Kbuild updates for v4.18

- improve fixdep to coalesce consecutive slashes in dep-files
 
 - fix some issues of the maintainer string generation in deb-pkg script
 
 - remove unused CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX and clean-up
   several tools and linker scripts
 
 - clean-up modpost
 
 - allow to enable the dead code/data elimination for PowerPC in EXPERT mode
 
 - improve two coccinelle scripts for better performance
 
 - pass endianness and machine size flags to sparse for all architecture
 
 - misc fixes
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJbF/yvAAoJED2LAQed4NsGEPgP/2qBg7w4raGvQtblqGY1qo6j
 3xGKYUKdg3GhIRf1zB9lPwkAmQcyLKzKlet/gYoTUTLKbfRUX8wDzJf/3TV0kpLW
 QQ2HM1/jsqrD1HSO21OPJ1rzMSNn1NcOSLWSeOLWUBorHkkvAHlenJcJSOo6szJr
 tTgEN78T/9id/artkFqdG+1Q3JhnI5FfH3u0lE20Eqxk5AAxrUKArHYsgRjgOg9o
 8DlHDTRsnTiUd4TtmC+VYSZK1BHz1ORlANaRiL69T+BGFZGNCvRSV09QkaD+ObxT
 dB4TTJne32Qg6g5qYX0bzLqfRdfJ8tpmJGQkycf3OT1rLgmDbWFaaOEDQTAe3mSw
 nT6ZbpQB1OoTgMD2An9ApWfUQRfsMnujm/pRP+BkRdKKkMJvXJCH7PvFw8rjqTt3
 PjK6DGbpG6H0G+DePtthMHrz/TU6wi5MFf7kQxl0AtFmpa3R0q67VhdM04BEYNCq
 Dbs1YaXWKKi101k14oSQ0kmRasZ9Jz5tvyfZ7wvy1LpGONXxtEbc6JQyBJ6tmf4f
 fCAxvHLSb/TQSmJhk9Rch7uPYT9B9hC16dseMrF9Pab8yR346fz70L1UdFE10j3q
 iKFbYkueq8uJCJDxNktsgHzbOF6Le5vaWauOafRN26K7p7+CRpVOy0O2bknX3yDa
 hKOGzCfQjT8sfdMmtyIH
 =2LYT
 -----END PGP SIGNATURE-----

Merge tag 'kbuild-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild

Pull Kbuild updates from Masahiro Yamada:

 - improve fixdep to coalesce consecutive slashes in dep-files

 - fix some issues of the maintainer string generation in deb-pkg script

 - remove unused CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX and clean-up
   several tools and linker scripts

 - clean-up modpost

 - allow to enable the dead code/data elimination for PowerPC in EXPERT
   mode

 - improve two coccinelle scripts for better performance

 - pass endianness and machine size flags to sparse for all architecture

 - misc fixes

* tag 'kbuild-v4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild: (25 commits)
  kbuild: add machine size to CHECKFLAGS
  kbuild: add endianness flag to CHEKCFLAGS
  kbuild: $(CHECK) doesnt need NOSTDINC_FLAGS twice
  scripts: Fixed printf format mismatch
  scripts/tags.sh: use `find` for $ALLSOURCE_ARCHS generation
  coccinelle: deref_null: improve performance
  coccinelle: mini_lock: improve performance
  powerpc: Allow LD_DEAD_CODE_DATA_ELIMINATION to be selected
  kbuild: Allow LD_DEAD_CODE_DATA_ELIMINATION to be selectable if enabled
  kbuild: LD_DEAD_CODE_DATA_ELIMINATION no -ffunction-sections/-fdata-sections for module build
  kbuild: Fix asm-generic/vmlinux.lds.h for LD_DEAD_CODE_DATA_ELIMINATION
  modpost: constify *modname function argument where possible
  modpost: remove redundant is_vmlinux() test
  modpost: use strstarts() helper more widely
  modpost: pass struct elf_info pointer to get_modinfo()
  checkpatch: remove VMLINUX_SYMBOL() check
  vmlinux.lds.h: remove no-op macro VMLINUX_SYMBOL()
  kbuild: remove CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
  export.h: remove code for prefixing symbols with underscore
  depmod.sh: remove symbol prefix support
  ...
This commit is contained in:
Linus Torvalds 2018-06-06 11:00:15 -07:00
commit 8715ee75fe
33 changed files with 349 additions and 443 deletions

View File

@ -802,13 +802,12 @@ KBUILD_CFLAGS += $(call cc-option, -fno-inline-functions-called-once)
endif
ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,)
KBUILD_CFLAGS += $(call cc-option,-fdata-sections,)
KBUILD_CFLAGS_KERNEL += $(call cc-option,-ffunction-sections,)
KBUILD_CFLAGS_KERNEL += $(call cc-option,-fdata-sections,)
endif
# arch Makefile may override CC so keep this after arch Makefile is included
NOSTDINC_FLAGS += -nostdinc -isystem $(call shell-cached,$(CC) -print-file-name=include)
CHECKFLAGS += $(NOSTDINC_FLAGS)
# warn about C99 declaration after statement
KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
@ -878,6 +877,12 @@ ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
LDFLAGS_vmlinux += $(call ld-option, -X,)
endif
# insure the checker run with the right endianness
CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian)
# the checker needs the correct machine size
CHECKFLAGS += $(if $(CONFIG_64BIT),-m64,-m32)
# Default kernel image to build when no specific target is given.
# KBUILD_IMAGE may be overruled on the command line or
# set in the environment
@ -1763,7 +1768,7 @@ quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files))
# Run depmod only if we have System.map and depmod is executable
quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \
$(KERNELRELEASE) "$(patsubst y,_,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX))"
$(KERNELRELEASE)
# Create temporary dir for module support files
# clean it up only when building all modules

View File

@ -597,21 +597,6 @@ config CC_STACKPROTECTOR_AUTO
endchoice
config LD_DEAD_CODE_DATA_ELIMINATION
bool
help
Select this if the architecture wants to do dead code and
data elimination with the linker by compiling with
-ffunction-sections -fdata-sections and linking with
--gc-sections.
This requires that the arch annotates or otherwise protects
its external entry points from being discarded. Linker scripts
must also merge .text.*, .data.*, and .bss.* correctly into
output sections. Care must be taken not to pull in unrelated
sections (e.g., '.text.init'). Typically '.' in section names
is used to distinguish them from label names / C identifiers.
config HAVE_ARCH_WITHIN_STACK_FRAMES
bool
help
@ -687,12 +672,6 @@ config MODULES_USE_ELF_REL
Modules only use ELF REL relocations. Modules with ELF RELA
relocations will give an error.
config HAVE_UNDERSCORE_SYMBOL_PREFIX
bool
help
Some architectures generate an _ in front of C symbols; things like
module loading and assembly files need to know about this.
config HAVE_IRQ_EXIT_ON_IRQ_STACK
bool
help

View File

@ -11,7 +11,7 @@
NM := $(NM) -B
LDFLAGS_vmlinux := -static -N #-relax
CHECKFLAGS += -D__alpha__ -m64
CHECKFLAGS += -D__alpha__
cflags-y := -pipe -mno-fp-regs -ffixed-8
cflags-y += $(call cc-option, -fno-jump-tables)

View File

@ -135,7 +135,7 @@ endif
KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
CHECKFLAGS += -D__arm__ -m32
CHECKFLAGS += -D__arm__
#Default value
head-y := arch/arm/kernel/head$(MMUEXT).o

View File

@ -78,7 +78,7 @@ LDFLAGS += -maarch64linux
UTS_MACHINE := aarch64
endif
CHECKFLAGS += -D__aarch64__ -m64
CHECKFLAGS += -D__aarch64__
ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds

View File

@ -18,7 +18,7 @@ READELF := $(CROSS_COMPILE)readelf
export AWK
CHECKFLAGS += -m64 -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__
CHECKFLAGS += -D__ia64=1 -D__ia64__=1 -D_LP64 -D__LP64__
OBJCOPYFLAGS := --strip-all
LDFLAGS_vmlinux := -static

View File

@ -309,9 +309,6 @@ ifdef CONFIG_MIPS
CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
ifdef CONFIG_64BIT
CHECKFLAGS += -m64
endif
endif
OBJCOPYFLAGS += --remove-section=.reginfo

View File

@ -25,7 +25,6 @@ LDFLAGS_vmlinux :=
LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
KBUILD_CFLAGS += -pipe -ffixed-r10 -D__linux__
CHECKFLAGS += -mbig-endian
ifeq ($(CONFIG_OPENRISC_HAVE_INST_MUL),y)
KBUILD_CFLAGS += $(call cc-option,-mhard-mul)

View File

@ -22,13 +22,13 @@ KBUILD_IMAGE := vmlinuz
KBUILD_DEFCONFIG := default_defconfig
NM = sh $(srctree)/arch/parisc/nm
CHECKFLAGS += -D__hppa__=1 -mbig-endian
CHECKFLAGS += -D__hppa__=1
LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
export LIBGCC
ifdef CONFIG_64BIT
UTS_MACHINE := parisc64
CHECKFLAGS += -D__LP64__=1 -m64
CHECKFLAGS += -D__LP64__=1
CC_ARCHES = hppa64
LD_BFD := elf64-hppa-linux
else # 32-bit

View File

@ -198,6 +198,7 @@ config PPC
select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP

View File

@ -89,7 +89,7 @@ SECTIONS
*/
.text BLOCK(0) : AT(ADDR(.text) - LOAD_OFFSET) {
#ifdef CONFIG_LD_HEAD_STUB_CATCH
*(.linker_stub_catch);
KEEP(*(.linker_stub_catch));
. = . ;
#endif
@ -98,7 +98,7 @@ SECTIONS
ALIGN_FUNCTION();
#endif
/* careful! __ftr_alt_* sections need to be close to .text */
*(.text.hot .text .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text);
*(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text);
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
@ -184,10 +184,10 @@ SECTIONS
.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
INIT_DATA
__vtop_table_begin = .;
*(.vtop_fixup);
KEEP(*(.vtop_fixup));
__vtop_table_end = .;
__ptov_table_begin = .;
*(.ptov_fixup);
KEEP(*(.ptov_fixup));
__ptov_table_end = .;
}
@ -208,26 +208,26 @@ SECTIONS
. = ALIGN(8);
__ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
__start___ftr_fixup = .;
*(__ftr_fixup)
KEEP(*(__ftr_fixup))
__stop___ftr_fixup = .;
}
. = ALIGN(8);
__mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
__start___mmu_ftr_fixup = .;
*(__mmu_ftr_fixup)
KEEP(*(__mmu_ftr_fixup))
__stop___mmu_ftr_fixup = .;
}
. = ALIGN(8);
__lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
__start___lwsync_fixup = .;
*(__lwsync_fixup)
KEEP(*(__lwsync_fixup))
__stop___lwsync_fixup = .;
}
#ifdef CONFIG_PPC64
. = ALIGN(8);
__fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
__start___fw_ftr_fixup = .;
*(__fw_ftr_fixup)
KEEP(*(__fw_ftr_fixup))
__stop___fw_ftr_fixup = .;
}
#endif
@ -240,7 +240,7 @@ SECTIONS
. = ALIGN(8);
.machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
__machine_desc_start = . ;
*(.machine.desc)
KEEP(*(.machine.desc))
__machine_desc_end = . ;
}
#ifdef CONFIG_RELOCATABLE
@ -288,7 +288,7 @@ SECTIONS
.data : AT(ADDR(.data) - LOAD_OFFSET) {
DATA_DATA
*(.data.rel*)
*(.sdata)
*(SDATA_MAIN)
*(.sdata2)
*(.got.plt) *(.got)
*(.plt)
@ -303,7 +303,7 @@ SECTIONS
.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
__start_opd = .;
*(.opd)
KEEP(*(.opd))
__end_opd = .;
}

View File

@ -18,7 +18,7 @@ KBUILD_CFLAGS += -m64
KBUILD_AFLAGS += -m64
UTS_MACHINE := s390x
STACK_SIZE := 16384
CHECKFLAGS += -D__s390__ -D__s390x__ -mbig-endian
CHECKFLAGS += -D__s390__ -D__s390x__
export LD_BFD

View File

@ -39,7 +39,7 @@ else
# sparc64
#
CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -D__arch64__ -m64
CHECKFLAGS += -D__sparc__ -D__sparc_v9__ -D__arch64__
LDFLAGS := -m elf64_sparc
export BITS := 64
UTS_MACHINE := sparc64

View File

@ -94,7 +94,7 @@ ifeq ($(CONFIG_X86_32),y)
else
BITS := 64
UTS_MACHINE := x86_64
CHECKFLAGS += -D__x86_64__ -m64
CHECKFLAGS += -D__x86_64__
biarch := -m64
KBUILD_AFLAGS += -m64

View File

@ -19,42 +19,32 @@
#define KCRC_ALIGN 4
#endif
#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
#define KSYM(name) _##name
#else
#define KSYM(name) name
#endif
/*
* note on .section use: @progbits vs %progbits nastiness doesn't matter,
* since we immediately emit into those sections anyway.
*/
.macro ___EXPORT_SYMBOL name,val,sec
#ifdef CONFIG_MODULES
.globl KSYM(__ksymtab_\name)
.globl __ksymtab_\name
.section ___ksymtab\sec+\name,"a"
.balign KSYM_ALIGN
KSYM(__ksymtab_\name):
__put \val, KSYM(__kstrtab_\name)
__ksymtab_\name:
__put \val, __kstrtab_\name
.previous
.section __ksymtab_strings,"a"
KSYM(__kstrtab_\name):
#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
.asciz "_\name"
#else
__kstrtab_\name:
.asciz "\name"
#endif
.previous
#ifdef CONFIG_MODVERSIONS
.section ___kcrctab\sec+\name,"a"
.balign KCRC_ALIGN
KSYM(__kcrctab_\name):
__kcrctab_\name:
#if defined(CONFIG_MODULE_REL_CRCS)
.long KSYM(__crc_\name) - .
.long __crc_\name - .
#else
.long KSYM(__crc_\name)
.long __crc_\name
#endif
.weak KSYM(__crc_\name)
.weak __crc_\name
.previous
#endif
#endif
@ -84,12 +74,12 @@ KSYM(__kcrctab_\name):
#endif
#define EXPORT_SYMBOL(name) \
__EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),)
__EXPORT_SYMBOL(name, KSYM_FUNC(name),)
#define EXPORT_SYMBOL_GPL(name) \
__EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl)
__EXPORT_SYMBOL(name, KSYM_FUNC(name), _gpl)
#define EXPORT_DATA_SYMBOL(name) \
__EXPORT_SYMBOL(name, KSYM(name),)
__EXPORT_SYMBOL(name, name,)
#define EXPORT_DATA_SYMBOL_GPL(name) \
__EXPORT_SYMBOL(name, KSYM(name),_gpl)
__EXPORT_SYMBOL(name, name,_gpl)
#endif

View File

@ -64,15 +64,24 @@
* generates .data.identifier sections, which need to be pulled in with
* .data. We don't want to pull in .data..other sections, which Linux
* has defined. Same for text and bss.
*
* RODATA_MAIN is not used because existing code already defines .rodata.x
* sections to be brought in with rodata.
*/
#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
#define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
#else
#define TEXT_MAIN .text
#define DATA_MAIN .data
#define SDATA_MAIN .sdata
#define RODATA_MAIN .rodata
#define BSS_MAIN .bss
#define SBSS_MAIN .sbss
#endif
/*
@ -104,66 +113,66 @@
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
#define MCOUNT_REC() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_mcount_loc) = .; \
*(__mcount_loc) \
VMLINUX_SYMBOL(__stop_mcount_loc) = .;
__start_mcount_loc = .; \
KEEP(*(__mcount_loc)) \
__stop_mcount_loc = .;
#else
#define MCOUNT_REC()
#endif
#ifdef CONFIG_TRACE_BRANCH_PROFILING
#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
*(_ftrace_annotated_branch) \
VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
#define LIKELY_PROFILE() __start_annotated_branch_profile = .; \
KEEP(*(_ftrace_annotated_branch)) \
__stop_annotated_branch_profile = .;
#else
#define LIKELY_PROFILE()
#endif
#ifdef CONFIG_PROFILE_ALL_BRANCHES
#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
*(_ftrace_branch) \
VMLINUX_SYMBOL(__stop_branch_profile) = .;
#define BRANCH_PROFILE() __start_branch_profile = .; \
KEEP(*(_ftrace_branch)) \
__stop_branch_profile = .;
#else
#define BRANCH_PROFILE()
#endif
#ifdef CONFIG_KPROBES
#define KPROBE_BLACKLIST() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
__start_kprobe_blacklist = .; \
KEEP(*(_kprobe_blacklist)) \
VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
__stop_kprobe_blacklist = .;
#else
#define KPROBE_BLACKLIST()
#endif
#ifdef CONFIG_FUNCTION_ERROR_INJECTION
#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \
VMLINUX_SYMBOL(__start_error_injection_whitelist) = .;\
__start_error_injection_whitelist = .; \
KEEP(*(_error_injection_whitelist)) \
VMLINUX_SYMBOL(__stop_error_injection_whitelist) = .;
__stop_error_injection_whitelist = .;
#else
#define ERROR_INJECT_WHITELIST()
#endif
#ifdef CONFIG_EVENT_TRACING
#define FTRACE_EVENTS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_ftrace_events) = .; \
__start_ftrace_events = .; \
KEEP(*(_ftrace_events)) \
VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
VMLINUX_SYMBOL(__start_ftrace_eval_maps) = .; \
__stop_ftrace_events = .; \
__start_ftrace_eval_maps = .; \
KEEP(*(_ftrace_eval_map)) \
VMLINUX_SYMBOL(__stop_ftrace_eval_maps) = .;
__stop_ftrace_eval_maps = .;
#else
#define FTRACE_EVENTS()
#endif
#ifdef CONFIG_TRACING
#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
#define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \
KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
__stop___trace_bprintk_fmt = .;
#define TRACEPOINT_STR() __start___tracepoint_str = .; \
KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
__stop___tracepoint_str = .;
#else
#define TRACE_PRINTKS()
#define TRACEPOINT_STR()
@ -171,27 +180,27 @@
#ifdef CONFIG_FTRACE_SYSCALLS
#define TRACE_SYSCALLS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
__start_syscalls_metadata = .; \
KEEP(*(__syscalls_metadata)) \
VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
__stop_syscalls_metadata = .;
#else
#define TRACE_SYSCALLS()
#endif
#ifdef CONFIG_BPF_EVENTS
#define BPF_RAW_TP() STRUCT_ALIGN(); \
VMLINUX_SYMBOL(__start__bpf_raw_tp) = .; \
__start__bpf_raw_tp = .; \
KEEP(*(__bpf_raw_tp_map)) \
VMLINUX_SYMBOL(__stop__bpf_raw_tp) = .;
__stop__bpf_raw_tp = .;
#else
#define BPF_RAW_TP()
#endif
#ifdef CONFIG_SERIAL_EARLYCON
#define EARLYCON_TABLE() . = ALIGN(8); \
VMLINUX_SYMBOL(__earlycon_table) = .; \
__earlycon_table = .; \
KEEP(*(__earlycon_table)) \
VMLINUX_SYMBOL(__earlycon_table_end) = .;
__earlycon_table_end = .;
#else
#define EARLYCON_TABLE()
#endif
@ -202,7 +211,7 @@
#define _OF_TABLE_0(name)
#define _OF_TABLE_1(name) \
. = ALIGN(8); \
VMLINUX_SYMBOL(__##name##_of_table) = .; \
__##name##_of_table = .; \
KEEP(*(__##name##_of_table)) \
KEEP(*(__##name##_of_table_end))
@ -217,18 +226,18 @@
#ifdef CONFIG_ACPI
#define ACPI_PROBE_TABLE(name) \
. = ALIGN(8); \
VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .; \
__##name##_acpi_probe_table = .; \
KEEP(*(__##name##_acpi_probe_table)) \
VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .;
__##name##_acpi_probe_table_end = .;
#else
#define ACPI_PROBE_TABLE(name)
#endif
#define KERNEL_DTB() \
STRUCT_ALIGN(); \
VMLINUX_SYMBOL(__dtb_start) = .; \
__dtb_start = .; \
KEEP(*(.dtb.init.rodata)) \
VMLINUX_SYMBOL(__dtb_end) = .;
__dtb_end = .;
/*
* .data section
@ -238,23 +247,23 @@
*(DATA_MAIN) \
*(.ref.data) \
*(.data..shared_aligned) /* percpu related */ \
MEM_KEEP(init.data) \
MEM_KEEP(exit.data) \
MEM_KEEP(init.data*) \
MEM_KEEP(exit.data*) \
*(.data.unlikely) \
VMLINUX_SYMBOL(__start_once) = .; \
__start_once = .; \
*(.data.once) \
VMLINUX_SYMBOL(__end_once) = .; \
__end_once = .; \
STRUCT_ALIGN(); \
*(__tracepoints) \
/* implement dynamic printk debug */ \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___jump_table) = .; \
__start___jump_table = .; \
KEEP(*(__jump_table)) \
VMLINUX_SYMBOL(__stop___jump_table) = .; \
__stop___jump_table = .; \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___verbose) = .; \
__start___verbose = .; \
KEEP(*(__verbose)) \
VMLINUX_SYMBOL(__stop___verbose) = .; \
__stop___verbose = .; \
LIKELY_PROFILE() \
BRANCH_PROFILE() \
TRACE_PRINTKS() \
@ -266,10 +275,10 @@
*/
#define NOSAVE_DATA \
. = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__nosave_begin) = .; \
__nosave_begin = .; \
*(.data..nosave) \
. = ALIGN(PAGE_SIZE); \
VMLINUX_SYMBOL(__nosave_end) = .;
__nosave_end = .;
#define PAGE_ALIGNED_DATA(page_align) \
. = ALIGN(page_align); \
@ -286,13 +295,13 @@
#define INIT_TASK_DATA(align) \
. = ALIGN(align); \
VMLINUX_SYMBOL(__start_init_task) = .; \
VMLINUX_SYMBOL(init_thread_union) = .; \
VMLINUX_SYMBOL(init_stack) = .; \
*(.data..init_task) \
*(.data..init_thread_info) \
. = VMLINUX_SYMBOL(__start_init_task) + THREAD_SIZE; \
VMLINUX_SYMBOL(__end_init_task) = .;
__start_init_task = .; \
init_thread_union = .; \
init_stack = .; \
KEEP(*(.data..init_task)) \
KEEP(*(.data..init_thread_info)) \
. = __start_init_task + THREAD_SIZE; \
__end_init_task = .;
/*
* Allow architectures to handle ro_after_init data on their
@ -300,9 +309,9 @@
*/
#ifndef RO_AFTER_INIT_DATA
#define RO_AFTER_INIT_DATA \
VMLINUX_SYMBOL(__start_ro_after_init) = .; \
__start_ro_after_init = .; \
*(.data..ro_after_init) \
VMLINUX_SYMBOL(__end_ro_after_init) = .;
__end_ro_after_init = .;
#endif
/*
@ -311,14 +320,14 @@
#define RO_DATA_SECTION(align) \
. = ALIGN((align)); \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_rodata) = .; \
__start_rodata = .; \
*(.rodata) *(.rodata.*) \
RO_AFTER_INIT_DATA /* Read only after init */ \
KEEP(*(__vermagic)) /* Kernel version magic */ \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
__start___tracepoints_ptrs = .; \
KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
__stop___tracepoints_ptrs = .; \
*(__tracepoints_strings)/* Tracepoints: strings */ \
} \
\
@ -328,109 +337,109 @@
\
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
__start_pci_fixups_early = .; \
KEEP(*(.pci_fixup_early)) \
VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
__end_pci_fixups_early = .; \
__start_pci_fixups_header = .; \
KEEP(*(.pci_fixup_header)) \
VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
__end_pci_fixups_header = .; \
__start_pci_fixups_final = .; \
KEEP(*(.pci_fixup_final)) \
VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
__end_pci_fixups_final = .; \
__start_pci_fixups_enable = .; \
KEEP(*(.pci_fixup_enable)) \
VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
__end_pci_fixups_enable = .; \
__start_pci_fixups_resume = .; \
KEEP(*(.pci_fixup_resume)) \
VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
__end_pci_fixups_resume = .; \
__start_pci_fixups_resume_early = .; \
KEEP(*(.pci_fixup_resume_early)) \
VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
__end_pci_fixups_resume_early = .; \
__start_pci_fixups_suspend = .; \
KEEP(*(.pci_fixup_suspend)) \
VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
__end_pci_fixups_suspend = .; \
__start_pci_fixups_suspend_late = .; \
KEEP(*(.pci_fixup_suspend_late)) \
VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
__end_pci_fixups_suspend_late = .; \
} \
\
/* Built-in firmware blobs */ \
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_builtin_fw) = .; \
__start_builtin_fw = .; \
KEEP(*(.builtin_fw)) \
VMLINUX_SYMBOL(__end_builtin_fw) = .; \
__end_builtin_fw = .; \
} \
\
TRACEDATA \
\
/* Kernel symbol table: Normal symbols */ \
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab) = .; \
__start___ksymtab = .; \
KEEP(*(SORT(___ksymtab+*))) \
VMLINUX_SYMBOL(__stop___ksymtab) = .; \
__stop___ksymtab = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
__start___ksymtab_gpl = .; \
KEEP(*(SORT(___ksymtab_gpl+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
__stop___ksymtab_gpl = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
__start___ksymtab_unused = .; \
KEEP(*(SORT(___ksymtab_unused+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
__stop___ksymtab_unused = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
__start___ksymtab_unused_gpl = .; \
KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
__stop___ksymtab_unused_gpl = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
__start___ksymtab_gpl_future = .; \
KEEP(*(SORT(___ksymtab_gpl_future+*))) \
VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
__stop___ksymtab_gpl_future = .; \
} \
\
/* Kernel symbol table: Normal symbols */ \
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab) = .; \
__start___kcrctab = .; \
KEEP(*(SORT(___kcrctab+*))) \
VMLINUX_SYMBOL(__stop___kcrctab) = .; \
__stop___kcrctab = .; \
} \
\
/* Kernel symbol table: GPL-only symbols */ \
__kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
__start___kcrctab_gpl = .; \
KEEP(*(SORT(___kcrctab_gpl+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
__stop___kcrctab_gpl = .; \
} \
\
/* Kernel symbol table: Normal unused symbols */ \
__kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
__start___kcrctab_unused = .; \
KEEP(*(SORT(___kcrctab_unused+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
__stop___kcrctab_unused = .; \
} \
\
/* Kernel symbol table: GPL-only unused symbols */ \
__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
__start___kcrctab_unused_gpl = .; \
KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
__stop___kcrctab_unused_gpl = .; \
} \
\
/* Kernel symbol table: GPL-future-only symbols */ \
__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
__start___kcrctab_gpl_future = .; \
KEEP(*(SORT(___kcrctab_gpl_future+*))) \
VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
__stop___kcrctab_gpl_future = .; \
} \
\
/* Kernel symbol table: strings */ \
@ -447,18 +456,18 @@
\
/* Built-in module parameters. */ \
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___param) = .; \
__start___param = .; \
KEEP(*(__param)) \
VMLINUX_SYMBOL(__stop___param) = .; \
__stop___param = .; \
} \
\
/* Built-in module versions. */ \
__modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___modver) = .; \
__start___modver = .; \
KEEP(*(__modver)) \
VMLINUX_SYMBOL(__stop___modver) = .; \
__stop___modver = .; \
. = ALIGN((align)); \
VMLINUX_SYMBOL(__end_rodata) = .; \
__end_rodata = .; \
} \
. = ALIGN((align));
@ -469,9 +478,9 @@
#define SECURITY_INIT \
.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__security_initcall_start) = .; \
__security_initcall_start = .; \
KEEP(*(.security_initcall.init)) \
VMLINUX_SYMBOL(__security_initcall_end) = .; \
__security_initcall_end = .; \
}
/*
@ -487,58 +496,58 @@
*(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
*(.text..refcount) \
*(.ref.text) \
MEM_KEEP(init.text) \
MEM_KEEP(exit.text) \
MEM_KEEP(init.text*) \
MEM_KEEP(exit.text*) \
/* sched.text is aling to function alignment to secure we have same
* address even at second ld pass when generating System.map */
#define SCHED_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__sched_text_start) = .; \
__sched_text_start = .; \
*(.sched.text) \
VMLINUX_SYMBOL(__sched_text_end) = .;
__sched_text_end = .;
/* spinlock.text is aling to function alignment to secure we have same
* address even at second ld pass when generating System.map */
#define LOCK_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__lock_text_start) = .; \
__lock_text_start = .; \
*(.spinlock.text) \
VMLINUX_SYMBOL(__lock_text_end) = .;
__lock_text_end = .;
#define CPUIDLE_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__cpuidle_text_start) = .; \
__cpuidle_text_start = .; \
*(.cpuidle.text) \
VMLINUX_SYMBOL(__cpuidle_text_end) = .;
__cpuidle_text_end = .;
#define KPROBES_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__kprobes_text_start) = .; \
__kprobes_text_start = .; \
*(.kprobes.text) \
VMLINUX_SYMBOL(__kprobes_text_end) = .;
__kprobes_text_end = .;
#define ENTRY_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__entry_text_start) = .; \
__entry_text_start = .; \
*(.entry.text) \
VMLINUX_SYMBOL(__entry_text_end) = .;
__entry_text_end = .;
#define IRQENTRY_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__irqentry_text_start) = .; \
__irqentry_text_start = .; \
*(.irqentry.text) \
VMLINUX_SYMBOL(__irqentry_text_end) = .;
__irqentry_text_end = .;
#define SOFTIRQENTRY_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__softirqentry_text_start) = .; \
__softirqentry_text_start = .; \
*(.softirqentry.text) \
VMLINUX_SYMBOL(__softirqentry_text_end) = .;
__softirqentry_text_end = .;
/* Section used for early init (in .S files) */
#define HEAD_TEXT *(.head.text)
#define HEAD_TEXT KEEP(*(.head.text))
#define HEAD_TEXT_SECTION \
.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
@ -551,9 +560,9 @@
#define EXCEPTION_TABLE(align) \
. = ALIGN(align); \
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___ex_table) = .; \
__start___ex_table = .; \
KEEP(*(__ex_table)) \
VMLINUX_SYMBOL(__stop___ex_table) = .; \
__stop___ex_table = .; \
}
/*
@ -567,11 +576,11 @@
#ifdef CONFIG_CONSTRUCTORS
#define KERNEL_CTORS() . = ALIGN(8); \
VMLINUX_SYMBOL(__ctors_start) = .; \
__ctors_start = .; \
KEEP(*(.ctors)) \
KEEP(*(SORT(.init_array.*))) \
KEEP(*(.init_array)) \
VMLINUX_SYMBOL(__ctors_end) = .;
__ctors_end = .;
#else
#define KERNEL_CTORS()
#endif
@ -579,11 +588,11 @@
/* init and exit section handling */
#define INIT_DATA \
KEEP(*(SORT(___kentry+*))) \
*(.init.data) \
MEM_DISCARD(init.data) \
*(.init.data init.data.*) \
MEM_DISCARD(init.data*) \
KERNEL_CTORS() \
MCOUNT_REC() \
*(.init.rodata) \
*(.init.rodata .init.rodata.*) \
FTRACE_EVENTS() \
TRACE_SYSCALLS() \
KPROBE_BLACKLIST() \
@ -602,16 +611,16 @@
EARLYCON_TABLE()
#define INIT_TEXT \
*(.init.text) \
*(.init.text .init.text.*) \
*(.text.startup) \
MEM_DISCARD(init.text)
MEM_DISCARD(init.text*)
#define EXIT_DATA \
*(.exit.data) \
*(.exit.data .exit.data.*) \
*(.fini_array) \
*(.dtors) \
MEM_DISCARD(exit.data) \
MEM_DISCARD(exit.rodata)
MEM_DISCARD(exit.data*) \
MEM_DISCARD(exit.rodata*)
#define EXIT_TEXT \
*(.exit.text) \
@ -629,7 +638,7 @@
. = ALIGN(sbss_align); \
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
*(.dynsbss) \
*(.sbss) \
*(SBSS_MAIN) \
*(.scommon) \
}
@ -706,9 +715,9 @@
#define BUG_TABLE \
. = ALIGN(8); \
__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start___bug_table) = .; \
__start___bug_table = .; \
KEEP(*(__bug_table)) \
VMLINUX_SYMBOL(__stop___bug_table) = .; \
__stop___bug_table = .; \
}
#else
#define BUG_TABLE
@ -718,22 +727,22 @@
#define ORC_UNWIND_TABLE \
. = ALIGN(4); \
.orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_orc_unwind_ip) = .; \
__start_orc_unwind_ip = .; \
KEEP(*(.orc_unwind_ip)) \
VMLINUX_SYMBOL(__stop_orc_unwind_ip) = .; \
__stop_orc_unwind_ip = .; \
} \
. = ALIGN(6); \
.orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_orc_unwind) = .; \
__start_orc_unwind = .; \
KEEP(*(.orc_unwind)) \
VMLINUX_SYMBOL(__stop_orc_unwind) = .; \
__stop_orc_unwind = .; \
} \
. = ALIGN(4); \
.orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(orc_lookup) = .; \
orc_lookup = .; \
. += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \
LOOKUP_BLOCK_SIZE) + 1) * 4; \
VMLINUX_SYMBOL(orc_lookup_end) = .; \
orc_lookup_end = .; \
}
#else
#define ORC_UNWIND_TABLE
@ -743,9 +752,9 @@
#define TRACEDATA \
. = ALIGN(4); \
.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__tracedata_start) = .; \
__tracedata_start = .; \
KEEP(*(.tracedata)) \
VMLINUX_SYMBOL(__tracedata_end) = .; \
__tracedata_end = .; \
}
#else
#define TRACEDATA
@ -753,24 +762,24 @@
#define NOTES \
.notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_notes) = .; \
*(.note.*) \
VMLINUX_SYMBOL(__stop_notes) = .; \
__start_notes = .; \
KEEP(*(.note.*)) \
__stop_notes = .; \
}
#define INIT_SETUP(initsetup_align) \
. = ALIGN(initsetup_align); \
VMLINUX_SYMBOL(__setup_start) = .; \
__setup_start = .; \
KEEP(*(.init.setup)) \
VMLINUX_SYMBOL(__setup_end) = .;
__setup_end = .;
#define INIT_CALLS_LEVEL(level) \
VMLINUX_SYMBOL(__initcall##level##_start) = .; \
__initcall##level##_start = .; \
KEEP(*(.initcall##level##.init)) \
KEEP(*(.initcall##level##s.init)) \
#define INIT_CALLS \
VMLINUX_SYMBOL(__initcall_start) = .; \
__initcall_start = .; \
KEEP(*(.initcallearly.init)) \
INIT_CALLS_LEVEL(0) \
INIT_CALLS_LEVEL(1) \
@ -781,22 +790,22 @@
INIT_CALLS_LEVEL(rootfs) \
INIT_CALLS_LEVEL(6) \
INIT_CALLS_LEVEL(7) \
VMLINUX_SYMBOL(__initcall_end) = .;
__initcall_end = .;
#define CON_INITCALL \
VMLINUX_SYMBOL(__con_initcall_start) = .; \
__con_initcall_start = .; \
KEEP(*(.con_initcall.init)) \
VMLINUX_SYMBOL(__con_initcall_end) = .;
__con_initcall_end = .;
#define SECURITY_INITCALL \
VMLINUX_SYMBOL(__security_initcall_start) = .; \
__security_initcall_start = .; \
KEEP(*(.security_initcall.init)) \
VMLINUX_SYMBOL(__security_initcall_end) = .;
__security_initcall_end = .;
#ifdef CONFIG_BLK_DEV_INITRD
#define INIT_RAM_FS \
. = ALIGN(4); \
VMLINUX_SYMBOL(__initramfs_start) = .; \
__initramfs_start = .; \
KEEP(*(.init.ramfs)) \
. = ALIGN(8); \
KEEP(*(.init.ramfs.info))
@ -851,7 +860,7 @@
* sharing between subsections for different purposes.
*/
#define PERCPU_INPUT(cacheline) \
VMLINUX_SYMBOL(__per_cpu_start) = .; \
__per_cpu_start = .; \
*(.data..percpu..first) \
. = ALIGN(PAGE_SIZE); \
*(.data..percpu..page_aligned) \
@ -861,7 +870,7 @@
*(.data..percpu) \
*(.data..percpu..shared_aligned) \
PERCPU_DECRYPTED_SECTION \
VMLINUX_SYMBOL(__per_cpu_end) = .;
__per_cpu_end = .;
/**
* PERCPU_VADDR - define output section for percpu area
@ -888,12 +897,11 @@
* address, use PERCPU_SECTION.
*/
#define PERCPU_VADDR(cacheline, vaddr, phdr) \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
.data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
- LOAD_OFFSET) { \
__per_cpu_load = .; \
.data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \
PERCPU_INPUT(cacheline) \
} phdr \
. = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
. = __per_cpu_load + SIZEOF(.data..percpu);
/**
* PERCPU_SECTION - define output section for percpu area, simple version
@ -910,7 +918,7 @@
#define PERCPU_SECTION(cacheline) \
. = ALIGN(PAGE_SIZE); \
.data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
__per_cpu_load = .; \
PERCPU_INPUT(cacheline) \
}
@ -949,9 +957,9 @@
#define INIT_TEXT_SECTION(inittext_align) \
. = ALIGN(inittext_align); \
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(_sinittext) = .; \
_sinittext = .; \
INIT_TEXT \
VMLINUX_SYMBOL(_einittext) = .; \
_einittext = .; \
}
#define INIT_DATA_SECTION(initsetup_align) \
@ -966,8 +974,8 @@
#define BSS_SECTION(sbss_align, bss_align, stop_align) \
. = ALIGN(sbss_align); \
VMLINUX_SYMBOL(__bss_start) = .; \
__bss_start = .; \
SBSS(sbss_align) \
BSS(bss_align) \
. = ALIGN(stop_align); \
VMLINUX_SYMBOL(__bss_stop) = .;
__bss_stop = .;

View File

@ -10,14 +10,8 @@
* hackers place grumpy comments in header files.
*/
/* Some toolchains use a `_' prefix for all user symbols. */
#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
#define __VMLINUX_SYMBOL(x) _##x
#define __VMLINUX_SYMBOL_STR(x) "_" #x
#else
#define __VMLINUX_SYMBOL(x) x
#define __VMLINUX_SYMBOL_STR(x) #x
#endif
/* Indirect, so macros are expanded before pasting. */
#define VMLINUX_SYMBOL(x) __VMLINUX_SYMBOL(x)
@ -46,14 +40,14 @@ extern struct module __this_module;
#if defined(CONFIG_MODULE_REL_CRCS)
#define __CRC_SYMBOL(sym, sec) \
asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
" .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
" .long " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n" \
" .weak __crc_" #sym " \n" \
" .long __crc_" #sym " - . \n" \
" .previous \n");
#else
#define __CRC_SYMBOL(sym, sec) \
asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
" .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
" .long " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
" .weak __crc_" #sym " \n" \
" .long __crc_" #sym " \n" \
" .previous \n");
#endif
#else
@ -66,7 +60,7 @@ extern struct module __this_module;
__CRC_SYMBOL(sym, sec) \
static const char __kstrtab_##sym[] \
__attribute__((section("__ksymtab_strings"), aligned(1))) \
= VMLINUX_SYMBOL_STR(sym); \
= #sym; \
static const struct kernel_symbol __ksymtab_##sym \
__used \
__attribute__((section("___ksymtab" sec "+" #sym), used)) \

View File

@ -1038,6 +1038,33 @@ config CC_OPTIMIZE_FOR_SIZE
endchoice
config HAVE_LD_DEAD_CODE_DATA_ELIMINATION
bool
help
This requires that the arch annotates or otherwise protects
its external entry points from being discarded. Linker scripts
must also merge .text.*, .data.*, and .bss.* correctly into
output sections. Care must be taken not to pull in unrelated
sections (e.g., '.text.init'). Typically '.' in section names
is used to distinguish them from label names / C identifiers.
config LD_DEAD_CODE_DATA_ELIMINATION
bool "Dead code and data elimination (EXPERIMENTAL)"
depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
depends on EXPERT
help
Select this if the architecture wants to do dead code and
data elimination with the linker by compiling with
-ffunction-sections -fdata-sections, and linking with
--gc-sections.
This can reduce on disk and in-memory size of the kernel
code and static data, particularly for small configs and
on small systems. This has the possibility of introducing
silently broken kernel if the required annotations are not
present. This option is not well tested yet, so use at your
own risk.
config SYSCTL
bool

View File

@ -147,7 +147,6 @@ $(obj)/%.i: $(src)/%.c FORCE
cmd_gensymtypes_c = \
$(CPP) -D__GENKSYMS__ $(c_flags) $< | \
$(GENKSYMS) $(if $(1), -T $(2)) \
$(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \
$(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \
$(if $(KBUILD_PRESERVE),-p) \
-r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
@ -355,7 +354,6 @@ cmd_gensymtypes_S = \
sed 's/.*___EXPORT_SYMBOL[[:space:]]*\([a-zA-Z0-9_]*\)[[:space:]]*,.*/EXPORT_SYMBOL(\1);/' ) | \
$(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \
$(GENKSYMS) $(if $(1), -T $(2)) \
$(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \
$(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS)) \
$(if $(KBUILD_PRESERVE),-p) \
-r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
@ -487,15 +485,10 @@ targets += $(lib-target)
dummy-object = $(obj)/.lib_exports.o
ksyms-lds = $(dot-target).lds
ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
ref_prefix = EXTERN(_
else
ref_prefix = EXTERN(
endif
quiet_cmd_export_list = EXPORTS $@
cmd_export_list = $(OBJDUMP) -h $< | \
sed -ne '/___ksymtab/s/.*+\([^ ]*\).*/$(ref_prefix)\1)/p' >$(ksyms-lds);\
sed -ne '/___ksymtab/s/.*+\([^ ]*\).*/EXTERN(\1)/p' >$(ksyms-lds);\
rm -f $(dummy-object);\
echo | $(CC) $(a_flags) -c -o $(dummy-object) -x assembler -;\
$(LD) $(ld_flags) -r -o $@ -T $(ksyms-lds) $(dummy-object);\

View File

@ -61,9 +61,6 @@ for mod in "$MODVERDIR"/*.mod; do
sed -n -e '3{s/ /\n/g;/^$/!p;}' "$mod"
done | sort -u |
while read sym; do
if [ -n "$CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX" ]; then
sym="${sym#_}"
fi
echo "#define __KSYM_${sym} 1"
done >> "$new_ksyms_file"

View File

@ -115,7 +115,7 @@ static void usage(void)
*/
static void print_dep(const char *m, int slen, const char *dir)
{
int c, i;
int c, prev_c = '/', i;
printf(" $(wildcard %s/", dir);
for (i = 0; i < slen; i++) {
@ -124,7 +124,9 @@ static void print_dep(const char *m, int slen, const char *dir)
c = '/';
else
c = tolower(c);
putchar(c);
if (c != '/' || prev_c != '/')
putchar(c);
prev_c = c;
}
printf(".h) \\\n");
}

View File

@ -5121,16 +5121,6 @@ sub process {
}
}
# make sure symbols are always wrapped with VMLINUX_SYMBOL() ...
# all assignments may have only one of the following with an assignment:
# .
# ALIGN(...)
# VMLINUX_SYMBOL(...)
if ($realfile eq 'vmlinux.lds.h' && $line =~ /(?:(?:^|\s)$Ident\s*=|=\s*$Ident(?:\s|$))/) {
WARN("MISSING_VMLINUX_SYMBOL",
"vmlinux.lds.h needs VMLINUX_SYMBOL() around C-visible symbols\n" . $herecurr);
}
# check for redundant bracing round if etc
if ($line =~ /(^.*)\bif\b/ && $1 !~ /else\s*$/) {
my ($level, $endln, @chunks) =

View File

@ -67,12 +67,14 @@ identifier lock,unlock;
@@
*lock(E1@p,...);
<+... when != E1
... when != E1
when any
if (...) {
... when != E1
* return@r ...;
}
...+>
... when != E1
when any
*unlock@up(E1,...);
@script:python depends on org@

View File

@ -14,18 +14,10 @@ virtual context
virtual org
virtual report
@ifm@
expression *E;
statement S1,S2;
position p1;
@@
if@p1 ((E == NULL && ...) || ...) S1 else S2
// The following two rules are separate, because both can match a single
// expression in different ways
@pr1 expression@
expression *ifm.E;
expression E;
identifier f;
position p1;
@@
@ -33,7 +25,7 @@ position p1;
(E != NULL && ...) ? <+...E->f@p1...+> : ...
@pr2 expression@
expression *ifm.E;
expression E;
identifier f;
position p2;
@@
@ -46,6 +38,14 @@ position p2;
sizeof(<+...E->f@p2...+>)
)
@ifm@
expression *E;
statement S1,S2;
position p1;
@@
if@p1 ((E == NULL && ...) || ...) S1 else S2
// For org and report modes
@r depends on !context && (org || report) exists@
@ -212,16 +212,8 @@ else S3
// The following three rules are duplicates of ifm, pr1 and pr2 respectively.
// It is need because the previous rule as already made a "change".
@ifm1 depends on context && !org && !report@
expression *E;
statement S1,S2;
position p1;
@@
if@p1 ((E == NULL && ...) || ...) S1 else S2
@pr11 depends on context && !org && !report expression@
expression *ifm1.E;
expression E;
identifier f;
position p1;
@@
@ -229,7 +221,7 @@ position p1;
(E != NULL && ...) ? <+...E->f@p1...+> : ...
@pr12 depends on context && !org && !report expression@
expression *ifm1.E;
expression E;
identifier f;
position p2;
@@
@ -242,6 +234,14 @@ position p2;
sizeof(<+...E->f@p2...+>)
)
@ifm1 depends on context && !org && !report@
expression *E;
statement S1,S2;
position p1;
@@
if@p1 ((E == NULL && ...) || ...) S1 else S2
@depends on context && !org && !report exists@
expression subE <= ifm1.E;
expression *ifm1.E;

View File

@ -3,36 +3,17 @@
#
# A depmod wrapper used by the toplevel Makefile
if test $# -ne 3; then
echo "Usage: $0 /sbin/depmod <kernelrelease> <symbolprefix>" >&2
if test $# -ne 2; then
echo "Usage: $0 /sbin/depmod <kernelrelease>" >&2
exit 1
fi
DEPMOD=$1
KERNELRELEASE=$2
SYMBOL_PREFIX=$3
if ! test -r System.map -a -x "$DEPMOD"; then
exit 0
fi
# older versions of depmod don't support -P <symbol-prefix>
# support was added in module-init-tools 3.13
if test -n "$SYMBOL_PREFIX"; then
release=$("$DEPMOD" --version)
package=$(echo "$release" | cut -d' ' -f 1)
if test "$package" = "module-init-tools"; then
version=$(echo "$release" | cut -d' ' -f 2)
later=$(printf '%s\n' "$version" "3.13" | sort -V | tail -n 1)
if test "$later" != "$version"; then
# module-init-tools < 3.13, drop the symbol prefix
SYMBOL_PREFIX=""
fi
fi
if test -n "$SYMBOL_PREFIX"; then
SYMBOL_PREFIX="-P $SYMBOL_PREFIX"
fi
fi
# older versions of depmod require the version string to start with three
# numbers, so we cheat with a symlink here
depmod_hack_needed=true
@ -55,7 +36,7 @@ set -- -ae -F System.map
if test -n "$INSTALL_MOD_PATH"; then
set -- "$@" -b "$INSTALL_MOD_PATH"
fi
"$DEPMOD" "$@" "$KERNELRELEASE" $SYMBOL_PREFIX
"$DEPMOD" "$@" "$KERNELRELEASE"
ret=$?
if $depmod_hack_needed; then

View File

@ -45,7 +45,6 @@ int in_source_file;
static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types,
flag_preserve, flag_warnings, flag_rel_crcs;
static const char *mod_prefix = "";
static int errors;
static int nsyms;
@ -693,10 +692,10 @@ void export_symbol(const char *name)
fputs(">\n", debugfile);
/* Used as a linker script. */
printf(!flag_rel_crcs ? "%s__crc_%s = 0x%08lx;\n" :
printf(!flag_rel_crcs ? "__crc_%s = 0x%08lx;\n" :
"SECTIONS { .rodata : ALIGN(4) { "
"%s__crc_%s = .; LONG(0x%08lx); } }\n",
mod_prefix, name, crc);
"__crc_%s = .; LONG(0x%08lx); } }\n",
name, crc);
}
}
@ -769,7 +768,6 @@ int main(int argc, char **argv)
#ifdef __GNU_LIBRARY__
struct option long_opts[] = {
{"symbol-prefix", 1, 0, 's'},
{"debug", 0, 0, 'd'},
{"warnings", 0, 0, 'w'},
{"quiet", 0, 0, 'q'},
@ -789,9 +787,6 @@ int main(int argc, char **argv)
while ((o = getopt(argc, argv, "s:dwqVDr:T:phR")) != EOF)
#endif /* __GNU_LIBRARY__ */
switch (o) {
case 's':
mod_prefix = optarg;
break;
case 'd':
flag_debug++;
break;

View File

@ -62,7 +62,6 @@ static struct sym_entry *table;
static unsigned int table_size, table_cnt;
static int all_symbols = 0;
static int absolute_percpu = 0;
static char symbol_prefix_char = '\0';
static int base_relative = 0;
int token_profit[0x10000];
@ -75,7 +74,6 @@ unsigned char best_table_len[256];
static void usage(void)
{
fprintf(stderr, "Usage: kallsyms [--all-symbols] "
"[--symbol-prefix=<prefix char>] "
"[--base-relative] < in.map > out.S\n");
exit(1);
}
@ -113,28 +111,22 @@ static int check_symbol_range(const char *sym, unsigned long long addr,
static int read_symbol(FILE *in, struct sym_entry *s)
{
char str[500];
char *sym, stype;
char sym[500], stype;
int rc;
rc = fscanf(in, "%llx %c %499s\n", &s->addr, &stype, str);
rc = fscanf(in, "%llx %c %499s\n", &s->addr, &stype, sym);
if (rc != 3) {
if (rc != EOF && fgets(str, 500, in) == NULL)
if (rc != EOF && fgets(sym, 500, in) == NULL)
fprintf(stderr, "Read error or end of file.\n");
return -1;
}
if (strlen(str) > KSYM_NAME_LEN) {
if (strlen(sym) > KSYM_NAME_LEN) {
fprintf(stderr, "Symbol %s too long for kallsyms (%zu vs %d).\n"
"Please increase KSYM_NAME_LEN both in kernel and kallsyms.c\n",
str, strlen(str), KSYM_NAME_LEN);
sym, strlen(sym), KSYM_NAME_LEN);
return -1;
}
sym = str;
/* skip prefix char */
if (symbol_prefix_char && str[0] == symbol_prefix_char)
sym++;
/* Ignore most absolute/undefined (?) symbols. */
if (strcmp(sym, "_text") == 0)
_text = s->addr;
@ -155,7 +147,7 @@ static int read_symbol(FILE *in, struct sym_entry *s)
is_arm_mapping_symbol(sym))
return -1;
/* exclude also MIPS ELF local symbols ($L123 instead of .L123) */
else if (str[0] == '$')
else if (sym[0] == '$')
return -1;
/* exclude debugging symbols */
else if (stype == 'N' || stype == 'n')
@ -163,14 +155,14 @@ static int read_symbol(FILE *in, struct sym_entry *s)
/* include the type field in the symbol name, so that it gets
* compressed together */
s->len = strlen(str) + 1;
s->len = strlen(sym) + 1;
s->sym = malloc(s->len + 1);
if (!s->sym) {
fprintf(stderr, "kallsyms failure: "
"unable to allocate required amount of memory\n");
exit(EXIT_FAILURE);
}
strcpy((char *)s->sym + 1, str);
strcpy((char *)s->sym + 1, sym);
s->sym[0] = stype;
s->percpu_absolute = 0;
@ -233,11 +225,6 @@ static int symbol_valid(struct sym_entry *s)
int i;
char *sym_name = (char *)s->sym + 1;
/* skip prefix char */
if (symbol_prefix_char && *sym_name == symbol_prefix_char)
sym_name++;
/* if --all-symbols is not specified, then symbols outside the text
* and inittext sections are discarded */
if (!all_symbols) {
@ -302,15 +289,9 @@ static void read_map(FILE *in)
static void output_label(char *label)
{
if (symbol_prefix_char)
printf(".globl %c%s\n", symbol_prefix_char, label);
else
printf(".globl %s\n", label);
printf(".globl %s\n", label);
printf("\tALGN\n");
if (symbol_prefix_char)
printf("%c%s:\n", symbol_prefix_char, label);
else
printf("%s:\n", label);
printf("%s:\n", label);
}
/* uncompress a compressed symbol. When this function is called, the best table
@ -424,7 +405,7 @@ static void write_src(void)
}
output_label("kallsyms_num_syms");
printf("\tPTR\t%d\n", table_cnt);
printf("\tPTR\t%u\n", table_cnt);
printf("\n");
/* table of offset markers, that give the offset in the compressed stream
@ -768,13 +749,7 @@ int main(int argc, char **argv)
all_symbols = 1;
else if (strcmp(argv[i], "--absolute-percpu") == 0)
absolute_percpu = 1;
else if (strncmp(argv[i], "--symbol-prefix=", 16) == 0) {
char *p = &argv[i][16];
/* skip quote */
if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\''))
p++;
symbol_prefix_char = *p;
} else if (strcmp(argv[i], "--base-relative") == 0)
else if (strcmp(argv[i], "--base-relative") == 0)
base_relative = 1;
else
usage();

View File

@ -121,10 +121,6 @@ kallsyms()
info KSYM ${2}
local kallsymopt;
if [ -n "${CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX}" ]; then
kallsymopt="${kallsymopt} --symbol-prefix=_"
fi
if [ -n "${CONFIG_KALLSYMS_ALL}" ]; then
kallsymopt="${kallsymopt} --all-symbols"
fi

View File

@ -19,9 +19,7 @@
#include <stdbool.h>
#include <errno.h>
#include "modpost.h"
#include "../../include/generated/autoconf.h"
#include "../../include/linux/license.h"
#include "../../include/linux/export.h"
/* Are we using CONFIG_MODVERSIONS? */
static int modversions = 0;
@ -123,7 +121,7 @@ void *do_nofail(void *ptr, const char *expr)
/* A list of all modules we processed */
static struct module *modules;
static struct module *find_module(char *modname)
static struct module *find_module(const char *modname)
{
struct module *mod;
@ -591,35 +589,32 @@ static void parse_elf_finish(struct elf_info *info)
static int ignore_undef_symbol(struct elf_info *info, const char *symname)
{
/* ignore __this_module, it will be resolved shortly */
if (strcmp(symname, VMLINUX_SYMBOL_STR(__this_module)) == 0)
if (strcmp(symname, "__this_module") == 0)
return 1;
/* ignore global offset table */
if (strcmp(symname, "_GLOBAL_OFFSET_TABLE_") == 0)
return 1;
if (info->hdr->e_machine == EM_PPC)
/* Special register function linked on all modules during final link of .ko */
if (strncmp(symname, "_restgpr_", sizeof("_restgpr_") - 1) == 0 ||
strncmp(symname, "_savegpr_", sizeof("_savegpr_") - 1) == 0 ||
strncmp(symname, "_rest32gpr_", sizeof("_rest32gpr_") - 1) == 0 ||
strncmp(symname, "_save32gpr_", sizeof("_save32gpr_") - 1) == 0 ||
strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
if (strstarts(symname, "_restgpr_") ||
strstarts(symname, "_savegpr_") ||
strstarts(symname, "_rest32gpr_") ||
strstarts(symname, "_save32gpr_") ||
strstarts(symname, "_restvr_") ||
strstarts(symname, "_savevr_"))
return 1;
if (info->hdr->e_machine == EM_PPC64)
/* Special register function linked on all modules during final link of .ko */
if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 ||
strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 ||
strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0 ||
if (strstarts(symname, "_restgpr0_") ||
strstarts(symname, "_savegpr0_") ||
strstarts(symname, "_restvr_") ||
strstarts(symname, "_savevr_") ||
strcmp(symname, ".TOC.") == 0)
return 1;
/* Do not ignore this symbol */
return 0;
}
#define CRC_PFX VMLINUX_SYMBOL_STR(__crc_)
#define KSYMTAB_PFX VMLINUX_SYMBOL_STR(__ksymtab_)
static void handle_modversions(struct module *mod, struct elf_info *info,
Elf_Sym *sym, const char *symname)
{
@ -628,13 +623,13 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
bool is_crc = false;
if ((!is_vmlinux(mod->name) || mod->is_dot_o) &&
strncmp(symname, "__ksymtab", 9) == 0)
strstarts(symname, "__ksymtab"))
export = export_from_secname(info, get_secindex(info, sym));
else
export = export_from_sec(info, get_secindex(info, sym));
/* CRC'd symbol */
if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) {
if (strstarts(symname, "__crc_")) {
is_crc = true;
crc = (unsigned int) sym->st_value;
if (sym->st_shndx != SHN_UNDEF && sym->st_shndx != SHN_ABS) {
@ -647,13 +642,13 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
info->sechdrs[sym->st_shndx].sh_addr : 0);
crc = *crcp;
}
sym_update_crc(symname + strlen(CRC_PFX), mod, crc,
sym_update_crc(symname + strlen("__crc_"), mod, crc,
export);
}
switch (sym->st_shndx) {
case SHN_COMMON:
if (!strncmp(symname, "__gnu_lto_", sizeof("__gnu_lto_")-1)) {
if (strstarts(symname, "__gnu_lto_")) {
/* Should warn here, but modpost runs before the linker */
} else
warn("\"%s\" [%s] is COMMON symbol\n", symname, mod->name);
@ -685,15 +680,10 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
}
#endif
#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
if (symname[0] != '_')
break;
else
symname++;
#endif
if (is_crc) {
const char *e = is_vmlinux(mod->name) ?"":".ko";
warn("EXPORT symbol \"%s\" [%s%s] version generation failed, symbol will not be versioned.\n", symname + strlen(CRC_PFX), mod->name, e);
warn("EXPORT symbol \"%s\" [%s%s] version generation failed, symbol will not be versioned.\n",
symname + strlen("__crc_"), mod->name, e);
}
mod->unres = alloc_symbol(symname,
ELF_ST_BIND(sym->st_info) == STB_WEAK,
@ -701,13 +691,13 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
break;
default:
/* All exported symbols */
if (strncmp(symname, KSYMTAB_PFX, strlen(KSYMTAB_PFX)) == 0) {
sym_add_exported(symname + strlen(KSYMTAB_PFX), mod,
if (strstarts(symname, "__ksymtab_")) {
sym_add_exported(symname + strlen("__ksymtab_"), mod,
export);
}
if (strcmp(symname, VMLINUX_SYMBOL_STR(init_module)) == 0)
if (strcmp(symname, "init_module") == 0)
mod->has_init = 1;
if (strcmp(symname, VMLINUX_SYMBOL_STR(cleanup_module)) == 0)
if (strcmp(symname, "cleanup_module") == 0)
mod->has_cleanup = 1;
break;
}
@ -734,16 +724,17 @@ static char *next_string(char *string, unsigned long *secsize)
return string;
}
static char *get_next_modinfo(void *modinfo, unsigned long modinfo_len,
const char *tag, char *info)
static char *get_next_modinfo(struct elf_info *info, const char *tag,
char *prev)
{
char *p;
unsigned int taglen = strlen(tag);
unsigned long size = modinfo_len;
char *modinfo = info->modinfo;
unsigned long size = info->modinfo_len;
if (info) {
size -= info - (char *)modinfo;
modinfo = next_string(info, &size);
if (prev) {
size -= prev - modinfo;
modinfo = next_string(prev, &size);
}
for (p = modinfo; p; p = next_string(p, &size)) {
@ -753,11 +744,10 @@ static char *get_next_modinfo(void *modinfo, unsigned long modinfo_len,
return NULL;
}
static char *get_modinfo(void *modinfo, unsigned long modinfo_len,
const char *tag)
static char *get_modinfo(struct elf_info *info, const char *tag)
{
return get_next_modinfo(modinfo, modinfo_len, tag, NULL);
return get_next_modinfo(info, tag, NULL);
}
/**
@ -1181,13 +1171,13 @@ static int secref_whitelist(const struct sectioncheck *mismatch,
/* Check for pattern 1 */
if (match(tosec, init_data_sections) &&
match(fromsec, data_sections) &&
(strncmp(fromsym, "__param", strlen("__param")) == 0))
strstarts(fromsym, "__param"))
return 0;
/* Check for pattern 1a */
if (strcmp(tosec, ".init.text") == 0 &&
match(fromsec, data_sections) &&
(strncmp(fromsym, "__param_ops_", strlen("__param_ops_")) == 0))
strstarts(fromsym, "__param_ops_"))
return 0;
/* Check for pattern 2 */
@ -1542,8 +1532,7 @@ static void default_mismatch_handler(const char *modname, struct elf_info *elf,
from = find_elf_symbol2(elf, r->r_offset, fromsec);
fromsym = sym_name(elf, from);
if (!strncmp(fromsym, "reference___initcall",
sizeof("reference___initcall")-1))
if (strstarts(fromsym, "reference___initcall"))
return;
tosec = sec_name(elf, get_secindex(elf, sym));
@ -1940,7 +1929,7 @@ static char *remove_dot(char *s)
return s;
}
static void read_symbols(char *modname)
static void read_symbols(const char *modname)
{
const char *symname;
char *version;
@ -1961,7 +1950,7 @@ static void read_symbols(char *modname)
mod->skip = 1;
}
license = get_modinfo(info.modinfo, info.modinfo_len, "license");
license = get_modinfo(&info, "license");
if (!license && !is_vmlinux(modname))
warn("modpost: missing MODULE_LICENSE() in %s\n"
"see include/linux/module.h for "
@ -1973,8 +1962,7 @@ static void read_symbols(char *modname)
mod->gpl_compatible = 0;
break;
}
license = get_next_modinfo(info.modinfo, info.modinfo_len,
"license", license);
license = get_next_modinfo(&info, "license", license);
}
for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
@ -1983,11 +1971,10 @@ static void read_symbols(char *modname)
handle_modversions(mod, &info, sym, symname);
handle_moddevtable(mod, &info, sym, symname);
}
if (!is_vmlinux(modname) ||
(is_vmlinux(modname) && vmlinux_section_warnings))
if (!is_vmlinux(modname) || vmlinux_section_warnings)
check_sec_ref(mod, modname, &info);
version = get_modinfo(info.modinfo, info.modinfo_len, "version");
version = get_modinfo(&info, "version");
if (version)
maybe_frob_rcs_version(modname, version, info.modinfo,
version - (char *)info.hdr);
@ -2174,9 +2161,7 @@ static void add_retpoline(struct buffer *b)
static void add_staging_flag(struct buffer *b, const char *name)
{
static const char *staging_dir = "drivers/staging";
if (strncmp(staging_dir, name, strlen(staging_dir)) == 0)
if (strstarts(name, "drivers/staging"))
buf_printf(b, "\nMODULE_INFO(staging, \"Y\");\n");
}
@ -2230,7 +2215,7 @@ static int add_versions(struct buffer *b, struct module *mod)
err = 1;
break;
}
buf_printf(b, "\t{ %#8x, __VMLINUX_SYMBOL_STR(%s) },\n",
buf_printf(b, "\t{ %#8x, \"%s\" },\n",
s->crc, s->name);
}

View File

@ -71,22 +71,21 @@ if [ "$ARCH" = "um" ] ; then
packagename=user-mode-linux-$version
fi
# Try to determine maintainer and email values
if [ -n "$DEBEMAIL" ]; then
email=$DEBEMAIL
elif [ -n "$EMAIL" ]; then
email=$EMAIL
email=${DEBEMAIL-$EMAIL}
# use email string directly if it contains <email>
if echo $email | grep -q '<.*>'; then
maintainer=$email
else
email=$(id -nu)@$(hostname -f 2>/dev/null || hostname)
# or construct the maintainer string
user=${KBUILD_BUILD_USER-$(id -nu)}
name=${DEBFULLNAME-$user}
if [ -z "$email" ]; then
buildhost=${KBUILD_BUILD_HOST-$(hostname -f 2>/dev/null || hostname)}
email="$user@$buildhost"
fi
maintainer="$name <$email>"
fi
if [ -n "$DEBFULLNAME" ]; then
name=$DEBFULLNAME
elif [ -n "$NAME" ]; then
name=$NAME
else
name="Anonymous"
fi
maintainer="$name <$email>"
# Try to determine distribution
if [ -n "$KDEB_CHANGELOG_DIST" ]; then

View File

@ -500,7 +500,7 @@ do_file(char const *const fname)
gpfx = 0;
switch (w2(ehdr->e_machine)) {
default:
fprintf(stderr, "unrecognized e_machine %d %s\n",
fprintf(stderr, "unrecognized e_machine %u %s\n",
w2(ehdr->e_machine), fname);
fail_file();
break;

View File

@ -441,7 +441,7 @@ static unsigned find_secsym_ndx(unsigned const txtndx,
return symp - sym0;
}
}
fprintf(stderr, "Cannot find symbol for section %d: %s.\n",
fprintf(stderr, "Cannot find symbol for section %u: %s.\n",
txtndx, txtname);
fail_file();
}

View File

@ -28,20 +28,11 @@ fi
# ignore userspace tools
ignore="$ignore ( -path ${tree}tools ) -prune -o"
# Find all available archs
find_all_archs()
{
ALLSOURCE_ARCHS=""
for arch in `ls ${tree}arch`; do
ALLSOURCE_ARCHS="${ALLSOURCE_ARCHS} "${arch##\/}
done
}
# Detect if ALLSOURCE_ARCHS is set. If not, we assume SRCARCH
if [ "${ALLSOURCE_ARCHS}" = "" ]; then
ALLSOURCE_ARCHS=${SRCARCH}
elif [ "${ALLSOURCE_ARCHS}" = "all" ]; then
find_all_archs
ALLSOURCE_ARCHS=$(find ${tree}arch/ -mindepth 1 -maxdepth 1 -type d -printf '%f ')
fi
# find sources in arch/$ARCH