2007-10-15 20:25:06 +00:00
|
|
|
# Backward compatibility
|
|
|
|
asflags-y += $(EXTRA_AFLAGS)
|
|
|
|
ccflags-y += $(EXTRA_CFLAGS)
|
|
|
|
cppflags-y += $(EXTRA_CPPFLAGS)
|
|
|
|
ldflags-y += $(EXTRA_LDFLAGS)
|
|
|
|
|
2009-04-19 09:04:26 +00:00
|
|
|
#
|
|
|
|
# flags that take effect in sub directories
|
|
|
|
export KBUILD_SUBDIR_ASFLAGS := $(KBUILD_SUBDIR_ASFLAGS) $(subdir-asflags-y)
|
|
|
|
export KBUILD_SUBDIR_CCFLAGS := $(KBUILD_SUBDIR_CCFLAGS) $(subdir-ccflags-y)
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
# Figure out what we need to build from the various variables
|
|
|
|
# ===========================================================================
|
|
|
|
|
|
|
|
# When an object is listed to be built compiled-in and modular,
|
|
|
|
# only build the compiled-in version
|
|
|
|
|
|
|
|
obj-m := $(filter-out $(obj-y),$(obj-m))
|
|
|
|
|
|
|
|
# Libraries are always collected in one lib file.
|
|
|
|
# Filter out objects already built-in
|
|
|
|
|
|
|
|
lib-y := $(filter-out $(obj-y), $(sort $(lib-y) $(lib-m)))
|
|
|
|
|
|
|
|
|
|
|
|
# Handle objects in subdirs
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
# o if we encounter foo/ in $(obj-y), replace it by foo/built-in.o
|
|
|
|
# and add the directory to the list of dirs to descend into: $(subdir-y)
|
2014-04-28 07:26:18 +00:00
|
|
|
# o if we encounter foo/ in $(obj-m), remove it from $(obj-m)
|
2005-04-16 22:20:36 +00:00
|
|
|
# and add the directory to the list of dirs to descend into: $(subdir-m)
|
|
|
|
|
2007-12-07 12:04:30 +00:00
|
|
|
# Determine modorder.
|
|
|
|
# Unfortunately, we don't have information about ordering between -y
|
|
|
|
# and -m subdirs. Just put -y's first.
|
|
|
|
modorder := $(patsubst %/,%/modules.order, $(filter %/, $(obj-y)) $(obj-m:.o=.ko))
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
__subdir-y := $(patsubst %/,%,$(filter %/, $(obj-y)))
|
|
|
|
subdir-y += $(__subdir-y)
|
|
|
|
__subdir-m := $(patsubst %/,%,$(filter %/, $(obj-m)))
|
|
|
|
subdir-m += $(__subdir-m)
|
|
|
|
obj-y := $(patsubst %/, %/built-in.o, $(obj-y))
|
|
|
|
obj-m := $(filter-out %/, $(obj-m))
|
|
|
|
|
|
|
|
# Subdirectories we need to descend into
|
|
|
|
|
|
|
|
subdir-ym := $(sort $(subdir-y) $(subdir-m))
|
|
|
|
|
2014-04-28 07:26:18 +00:00
|
|
|
# if $(foo-objs) exists, foo.o is a composite object
|
2005-04-16 22:20:36 +00:00
|
|
|
multi-used-y := $(sort $(foreach m,$(obj-y), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))), $(m))))
|
2015-10-27 13:02:24 +00:00
|
|
|
multi-used-m := $(sort $(foreach m,$(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m))), $(m))))
|
2005-04-16 22:20:36 +00:00
|
|
|
multi-used := $(multi-used-y) $(multi-used-m)
|
|
|
|
single-used-m := $(sort $(filter-out $(multi-used-m),$(obj-m)))
|
|
|
|
|
|
|
|
# Build list of the parts of our composite objects, our composite
|
|
|
|
# objects depend on those (obviously)
|
|
|
|
multi-objs-y := $(foreach m, $(multi-used-y), $($(m:.o=-objs)) $($(m:.o=-y)))
|
|
|
|
multi-objs-m := $(foreach m, $(multi-used-m), $($(m:.o=-objs)) $($(m:.o=-y)))
|
|
|
|
multi-objs := $(multi-objs-y) $(multi-objs-m)
|
|
|
|
|
2007-09-15 06:55:39 +00:00
|
|
|
# $(subdir-obj-y) is the list of objects in $(obj-y) which uses dir/ to
|
|
|
|
# tell kbuild to descend
|
|
|
|
subdir-obj-y := $(filter %/built-in.o, $(obj-y))
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
# $(obj-dirs) is a list of directories that contain object files
|
2013-06-30 09:09:28 +00:00
|
|
|
obj-dirs := $(dir $(multi-objs) $(obj-y))
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
# Replace multi-part objects by their individual parts, look at local dir only
|
|
|
|
real-objs-y := $(foreach m, $(filter-out $(subdir-obj-y), $(obj-y)), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m))) $(extra-y)
|
2015-10-27 13:02:24 +00:00
|
|
|
real-objs-m := $(foreach m, $(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m))),$($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m)),$(m)))
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
# Add subdir path
|
|
|
|
|
|
|
|
extra-y := $(addprefix $(obj)/,$(extra-y))
|
|
|
|
always := $(addprefix $(obj)/,$(always))
|
|
|
|
targets := $(addprefix $(obj)/,$(targets))
|
2007-12-07 12:04:30 +00:00
|
|
|
modorder := $(addprefix $(obj)/,$(modorder))
|
2005-04-16 22:20:36 +00:00
|
|
|
obj-y := $(addprefix $(obj)/,$(obj-y))
|
|
|
|
obj-m := $(addprefix $(obj)/,$(obj-m))
|
|
|
|
lib-y := $(addprefix $(obj)/,$(lib-y))
|
|
|
|
subdir-obj-y := $(addprefix $(obj)/,$(subdir-obj-y))
|
|
|
|
real-objs-y := $(addprefix $(obj)/,$(real-objs-y))
|
|
|
|
real-objs-m := $(addprefix $(obj)/,$(real-objs-m))
|
|
|
|
single-used-m := $(addprefix $(obj)/,$(single-used-m))
|
|
|
|
multi-used-y := $(addprefix $(obj)/,$(multi-used-y))
|
|
|
|
multi-used-m := $(addprefix $(obj)/,$(multi-used-m))
|
|
|
|
multi-objs-y := $(addprefix $(obj)/,$(multi-objs-y))
|
|
|
|
multi-objs-m := $(addprefix $(obj)/,$(multi-objs-m))
|
|
|
|
subdir-ym := $(addprefix $(obj)/,$(subdir-ym))
|
|
|
|
obj-dirs := $(addprefix $(obj)/,$(obj-dirs))
|
|
|
|
|
|
|
|
# These flags are needed for modversions and compiling, so we define them here
|
|
|
|
# already
|
2014-04-28 07:26:18 +00:00
|
|
|
# $(modname_flags) #defines KBUILD_MODNAME as the name of the module it will
|
2005-04-16 22:20:36 +00:00
|
|
|
# end up in (or would, if it gets compiled in)
|
2012-01-08 20:54:43 +00:00
|
|
|
# Note: Files that end up in two or more modules are compiled without the
|
|
|
|
# KBUILD_MODNAME definition. The reason is that any made-up name would
|
|
|
|
# differ in different configs.
|
2016-03-17 15:32:14 +00:00
|
|
|
name-fix = $(squote)$(quote)$(subst $(comma),_,$(subst -,_,$1))$(quote)$(squote)
|
|
|
|
basename_flags = -DKBUILD_BASENAME=$(call name-fix,$(basetarget))
|
2005-09-23 04:42:11 +00:00
|
|
|
modname_flags = $(if $(filter 1,$(words $(modname))),\
|
2016-03-17 15:32:14 +00:00
|
|
|
-DKBUILD_MODNAME=$(call name-fix,$(modname)))
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-04-19 09:04:26 +00:00
|
|
|
orig_c_flags = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(KBUILD_SUBDIR_CCFLAGS) \
|
2008-11-21 20:50:02 +00:00
|
|
|
$(ccflags-y) $(CFLAGS_$(basetarget).o)
|
2008-05-15 01:30:29 +00:00
|
|
|
_c_flags = $(filter-out $(CFLAGS_REMOVE_$(basetarget).o), $(orig_c_flags))
|
2015-11-05 13:03:27 +00:00
|
|
|
orig_a_flags = $(KBUILD_CPPFLAGS) $(KBUILD_AFLAGS) $(KBUILD_SUBDIR_ASFLAGS) \
|
2008-11-21 20:50:02 +00:00
|
|
|
$(asflags-y) $(AFLAGS_$(basetarget).o)
|
2015-11-05 13:03:27 +00:00
|
|
|
_a_flags = $(filter-out $(AFLAGS_REMOVE_$(basetarget).o), $(orig_a_flags))
|
2007-10-15 20:25:06 +00:00
|
|
|
_cpp_flags = $(KBUILD_CPPFLAGS) $(cppflags-y) $(CPPFLAGS_$(@F))
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-06-17 23:28:08 +00:00
|
|
|
#
|
|
|
|
# Enable gcov profiling flags for a file, directory or for all files depending
|
|
|
|
# on variables GCOV_PROFILE_obj.o, GCOV_PROFILE and CONFIG_GCOV_PROFILE_ALL
|
|
|
|
# (in this order)
|
|
|
|
#
|
|
|
|
ifeq ($(CONFIG_GCOV_KERNEL),y)
|
|
|
|
_c_flags += $(if $(patsubst n%,, \
|
|
|
|
$(GCOV_PROFILE_$(basetarget).o)$(GCOV_PROFILE)$(CONFIG_GCOV_PROFILE_ALL)), \
|
|
|
|
$(CFLAGS_GCOV))
|
|
|
|
endif
|
|
|
|
|
kasan: add kernel address sanitizer infrastructure
Kernel Address sanitizer (KASan) is a dynamic memory error detector. It
provides fast and comprehensive solution for finding use-after-free and
out-of-bounds bugs.
KASAN uses compile-time instrumentation for checking every memory access,
therefore GCC > v4.9.2 required. v4.9.2 almost works, but has issues with
putting symbol aliases into the wrong section, which breaks kasan
instrumentation of globals.
This patch only adds infrastructure for kernel address sanitizer. It's
not available for use yet. The idea and some code was borrowed from [1].
Basic idea:
The main idea of KASAN is to use shadow memory to record whether each byte
of memory is safe to access or not, and use compiler's instrumentation to
check the shadow memory on each memory access.
Address sanitizer uses 1/8 of the memory addressable in kernel for shadow
memory and uses direct mapping with a scale and offset to translate a
memory address to its corresponding shadow address.
Here is function to translate address to corresponding shadow address:
unsigned long kasan_mem_to_shadow(unsigned long addr)
{
return (addr >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET;
}
where KASAN_SHADOW_SCALE_SHIFT = 3.
So for every 8 bytes there is one corresponding byte of shadow memory.
The following encoding used for each shadow byte: 0 means that all 8 bytes
of the corresponding memory region are valid for access; k (1 <= k <= 7)
means that the first k bytes are valid for access, and other (8 - k) bytes
are not; Any negative value indicates that the entire 8-bytes are
inaccessible. Different negative values used to distinguish between
different kinds of inaccessible memory (redzones, freed memory) (see
mm/kasan/kasan.h).
To be able to detect accesses to bad memory we need a special compiler.
Such compiler inserts a specific function calls (__asan_load*(addr),
__asan_store*(addr)) before each memory access of size 1, 2, 4, 8 or 16.
These functions check whether memory region is valid to access or not by
checking corresponding shadow memory. If access is not valid an error
printed.
Historical background of the address sanitizer from Dmitry Vyukov:
"We've developed the set of tools, AddressSanitizer (Asan),
ThreadSanitizer and MemorySanitizer, for user space. We actively use
them for testing inside of Google (continuous testing, fuzzing,
running prod services). To date the tools have found more than 10'000
scary bugs in Chromium, Google internal codebase and various
open-source projects (Firefox, OpenSSL, gcc, clang, ffmpeg, MySQL and
lots of others): [2] [3] [4].
The tools are part of both gcc and clang compilers.
We have not yet done massive testing under the Kernel AddressSanitizer
(it's kind of chicken and egg problem, you need it to be upstream to
start applying it extensively). To date it has found about 50 bugs.
Bugs that we've found in upstream kernel are listed in [5].
We've also found ~20 bugs in out internal version of the kernel. Also
people from Samsung and Oracle have found some.
[...]
As others noted, the main feature of AddressSanitizer is its
performance due to inline compiler instrumentation and simple linear
shadow memory. User-space Asan has ~2x slowdown on computational
programs and ~2x memory consumption increase. Taking into account that
kernel usually consumes only small fraction of CPU and memory when
running real user-space programs, I would expect that kernel Asan will
have ~10-30% slowdown and similar memory consumption increase (when we
finish all tuning).
I agree that Asan can well replace kmemcheck. We have plans to start
working on Kernel MemorySanitizer that finds uses of unitialized
memory. Asan+Msan will provide feature-parity with kmemcheck. As
others noted, Asan will unlikely replace debug slab and pagealloc that
can be enabled at runtime. Asan uses compiler instrumentation, so even
if it is disabled, it still incurs visible overheads.
Asan technology is easily portable to other architectures. Compiler
instrumentation is fully portable. Runtime has some arch-dependent
parts like shadow mapping and atomic operation interception. They are
relatively easy to port."
Comparison with other debugging features:
========================================
KMEMCHECK:
- KASan can do almost everything that kmemcheck can. KASan uses
compile-time instrumentation, which makes it significantly faster than
kmemcheck. The only advantage of kmemcheck over KASan is detection of
uninitialized memory reads.
Some brief performance testing showed that kasan could be
x500-x600 times faster than kmemcheck:
$ netperf -l 30
MIGRATED TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to localhost (127.0.0.1) port 0 AF_INET
Recv Send Send
Socket Socket Message Elapsed
Size Size Size Time Throughput
bytes bytes bytes secs. 10^6bits/sec
no debug: 87380 16384 16384 30.00 41624.72
kasan inline: 87380 16384 16384 30.00 12870.54
kasan outline: 87380 16384 16384 30.00 10586.39
kmemcheck: 87380 16384 16384 30.03 20.23
- Also kmemcheck couldn't work on several CPUs. It always sets
number of CPUs to 1. KASan doesn't have such limitation.
DEBUG_PAGEALLOC:
- KASan is slower than DEBUG_PAGEALLOC, but KASan works on sub-page
granularity level, so it able to find more bugs.
SLUB_DEBUG (poisoning, redzones):
- SLUB_DEBUG has lower overhead than KASan.
- SLUB_DEBUG in most cases are not able to detect bad reads,
KASan able to detect both reads and writes.
- In some cases (e.g. redzone overwritten) SLUB_DEBUG detect
bugs only on allocation/freeing of object. KASan catch
bugs right before it will happen, so we always know exact
place of first bad read/write.
[1] https://code.google.com/p/address-sanitizer/wiki/AddressSanitizerForKernel
[2] https://code.google.com/p/address-sanitizer/wiki/FoundBugs
[3] https://code.google.com/p/thread-sanitizer/wiki/FoundBugs
[4] https://code.google.com/p/memory-sanitizer/wiki/FoundBugs
[5] https://code.google.com/p/address-sanitizer/wiki/AddressSanitizerForKernel#Trophies
Based on work by Andrey Konovalov.
Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
Acked-by: Michal Marek <mmarek@suse.cz>
Signed-off-by: Andrey Konovalov <adech.fo@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Konstantin Serebryany <kcc@google.com>
Cc: Dmitry Chernenkov <dmitryc@google.com>
Cc: Yuri Gribov <tetra2005@gmail.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-02-13 22:39:17 +00:00
|
|
|
#
|
|
|
|
# Enable address sanitizer flags for kernel except some files or directories
|
|
|
|
# we don't want to check (depends on variables KASAN_SANITIZE_obj.o, KASAN_SANITIZE)
|
|
|
|
#
|
|
|
|
ifeq ($(CONFIG_KASAN),y)
|
|
|
|
_c_flags += $(if $(patsubst n%,, \
|
|
|
|
$(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \
|
|
|
|
$(CFLAGS_KASAN))
|
|
|
|
endif
|
|
|
|
|
2016-01-20 23:00:55 +00:00
|
|
|
ifeq ($(CONFIG_UBSAN),y)
|
|
|
|
_c_flags += $(if $(patsubst n%,, \
|
|
|
|
$(UBSAN_SANITIZE_$(basetarget).o)$(UBSAN_SANITIZE)$(CONFIG_UBSAN_SANITIZE_ALL)), \
|
|
|
|
$(CFLAGS_UBSAN))
|
|
|
|
endif
|
|
|
|
|
kernel: add kcov code coverage
kcov provides code coverage collection for coverage-guided fuzzing
(randomized testing). Coverage-guided fuzzing is a testing technique
that uses coverage feedback to determine new interesting inputs to a
system. A notable user-space example is AFL
(http://lcamtuf.coredump.cx/afl/). However, this technique is not
widely used for kernel testing due to missing compiler and kernel
support.
kcov does not aim to collect as much coverage as possible. It aims to
collect more or less stable coverage that is function of syscall inputs.
To achieve this goal it does not collect coverage in soft/hard
interrupts and instrumentation of some inherently non-deterministic or
non-interesting parts of kernel is disbled (e.g. scheduler, locking).
Currently there is a single coverage collection mode (tracing), but the
API anticipates additional collection modes. Initially I also
implemented a second mode which exposes coverage in a fixed-size hash
table of counters (what Quentin used in his original patch). I've
dropped the second mode for simplicity.
This patch adds the necessary support on kernel side. The complimentary
compiler support was added in gcc revision 231296.
We've used this support to build syzkaller system call fuzzer, which has
found 90 kernel bugs in just 2 months:
https://github.com/google/syzkaller/wiki/Found-Bugs
We've also found 30+ bugs in our internal systems with syzkaller.
Another (yet unexplored) direction where kcov coverage would greatly
help is more traditional "blob mutation". For example, mounting a
random blob as a filesystem, or receiving a random blob over wire.
Why not gcov. Typical fuzzing loop looks as follows: (1) reset
coverage, (2) execute a bit of code, (3) collect coverage, repeat. A
typical coverage can be just a dozen of basic blocks (e.g. an invalid
input). In such context gcov becomes prohibitively expensive as
reset/collect coverage steps depend on total number of basic
blocks/edges in program (in case of kernel it is about 2M). Cost of
kcov depends only on number of executed basic blocks/edges. On top of
that, kernel requires per-thread coverage because there are always
background threads and unrelated processes that also produce coverage.
With inlined gcov instrumentation per-thread coverage is not possible.
kcov exposes kernel PCs and control flow to user-space which is
insecure. But debugfs should not be mapped as user accessible.
Based on a patch by Quentin Casasnovas.
[akpm@linux-foundation.org: make task_struct.kcov_mode have type `enum kcov_mode']
[akpm@linux-foundation.org: unbreak allmodconfig]
[akpm@linux-foundation.org: follow x86 Makefile layout standards]
Signed-off-by: Dmitry Vyukov <dvyukov@google.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Cc: syzkaller <syzkaller@googlegroups.com>
Cc: Vegard Nossum <vegard.nossum@oracle.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Tavis Ormandy <taviso@google.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
Cc: Kostya Serebryany <kcc@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Kees Cook <keescook@google.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: David Drysdale <drysdale@google.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Jiri Slaby <jslaby@suse.cz>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-22 21:27:30 +00:00
|
|
|
ifeq ($(CONFIG_KCOV),y)
|
|
|
|
_c_flags += $(if $(patsubst n%,, \
|
2016-08-02 21:07:30 +00:00
|
|
|
$(KCOV_INSTRUMENT_$(basetarget).o)$(KCOV_INSTRUMENT)$(CONFIG_KCOV_INSTRUMENT_ALL)), \
|
kernel: add kcov code coverage
kcov provides code coverage collection for coverage-guided fuzzing
(randomized testing). Coverage-guided fuzzing is a testing technique
that uses coverage feedback to determine new interesting inputs to a
system. A notable user-space example is AFL
(http://lcamtuf.coredump.cx/afl/). However, this technique is not
widely used for kernel testing due to missing compiler and kernel
support.
kcov does not aim to collect as much coverage as possible. It aims to
collect more or less stable coverage that is function of syscall inputs.
To achieve this goal it does not collect coverage in soft/hard
interrupts and instrumentation of some inherently non-deterministic or
non-interesting parts of kernel is disbled (e.g. scheduler, locking).
Currently there is a single coverage collection mode (tracing), but the
API anticipates additional collection modes. Initially I also
implemented a second mode which exposes coverage in a fixed-size hash
table of counters (what Quentin used in his original patch). I've
dropped the second mode for simplicity.
This patch adds the necessary support on kernel side. The complimentary
compiler support was added in gcc revision 231296.
We've used this support to build syzkaller system call fuzzer, which has
found 90 kernel bugs in just 2 months:
https://github.com/google/syzkaller/wiki/Found-Bugs
We've also found 30+ bugs in our internal systems with syzkaller.
Another (yet unexplored) direction where kcov coverage would greatly
help is more traditional "blob mutation". For example, mounting a
random blob as a filesystem, or receiving a random blob over wire.
Why not gcov. Typical fuzzing loop looks as follows: (1) reset
coverage, (2) execute a bit of code, (3) collect coverage, repeat. A
typical coverage can be just a dozen of basic blocks (e.g. an invalid
input). In such context gcov becomes prohibitively expensive as
reset/collect coverage steps depend on total number of basic
blocks/edges in program (in case of kernel it is about 2M). Cost of
kcov depends only on number of executed basic blocks/edges. On top of
that, kernel requires per-thread coverage because there are always
background threads and unrelated processes that also produce coverage.
With inlined gcov instrumentation per-thread coverage is not possible.
kcov exposes kernel PCs and control flow to user-space which is
insecure. But debugfs should not be mapped as user accessible.
Based on a patch by Quentin Casasnovas.
[akpm@linux-foundation.org: make task_struct.kcov_mode have type `enum kcov_mode']
[akpm@linux-foundation.org: unbreak allmodconfig]
[akpm@linux-foundation.org: follow x86 Makefile layout standards]
Signed-off-by: Dmitry Vyukov <dvyukov@google.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Cc: syzkaller <syzkaller@googlegroups.com>
Cc: Vegard Nossum <vegard.nossum@oracle.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Tavis Ormandy <taviso@google.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Quentin Casasnovas <quentin.casasnovas@oracle.com>
Cc: Kostya Serebryany <kcc@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Kees Cook <keescook@google.com>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Sasha Levin <sasha.levin@oracle.com>
Cc: David Drysdale <drysdale@google.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Jiri Slaby <jslaby@suse.cz>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-22 21:27:30 +00:00
|
|
|
$(CFLAGS_KCOV))
|
|
|
|
endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
# If building the kernel in a separate objtree expand all occurrences
|
|
|
|
# of -Idir to -I$(srctree)/dir except for absolute paths (starting with '/').
|
|
|
|
|
|
|
|
ifeq ($(KBUILD_SRC),)
|
|
|
|
__c_flags = $(_c_flags)
|
|
|
|
__a_flags = $(_a_flags)
|
|
|
|
__cpp_flags = $(_cpp_flags)
|
|
|
|
else
|
|
|
|
|
|
|
|
# -I$(obj) locates generated .h files
|
|
|
|
# $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files
|
|
|
|
# and locates generated .h files
|
|
|
|
# FIXME: Replace both with specific CFLAGS* statements in the makefiles
|
2017-03-07 23:48:23 +00:00
|
|
|
__c_flags = $(if $(obj),$(call addtree,-I$(src)) -I$(obj)) \
|
2016-06-15 15:45:47 +00:00
|
|
|
$(call flags,_c_flags)
|
|
|
|
__a_flags = $(call flags,_a_flags)
|
|
|
|
__cpp_flags = $(call flags,_cpp_flags)
|
2005-04-16 22:20:36 +00:00
|
|
|
endif
|
|
|
|
|
2008-11-21 20:50:02 +00:00
|
|
|
c_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
|
|
|
|
$(__c_flags) $(modkern_cflags) \
|
2016-03-17 15:32:14 +00:00
|
|
|
$(basename_flags) $(modname_flags)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-11-21 20:50:02 +00:00
|
|
|
a_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
|
2005-04-16 22:20:36 +00:00
|
|
|
$(__a_flags) $(modkern_aflags)
|
|
|
|
|
2008-11-21 20:50:02 +00:00
|
|
|
cpp_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
|
|
|
|
$(__cpp_flags)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-10-15 20:25:06 +00:00
|
|
|
ld_flags = $(LDFLAGS) $(ldflags-y)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-05-31 10:14:20 +00:00
|
|
|
dtc_cpp_flags = -Wp,-MD,$(depfile).pre.tmp -nostdinc \
|
2013-02-12 22:03:37 +00:00
|
|
|
-I$(srctree)/arch/$(SRCARCH)/boot/dts \
|
2017-05-13 03:13:26 +00:00
|
|
|
-I$(srctree)/scripts/dtc/include-prefixes \
|
2014-02-18 21:46:16 +00:00
|
|
|
-I$(srctree)/drivers/of/testcase-data \
|
2013-02-12 22:03:37 +00:00
|
|
|
-undef -D__DTS__
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
# Finds the multi-part object the current object will be linked into
|
|
|
|
modname-multi = $(sort $(foreach m,$(multi-used),\
|
|
|
|
$(if $(filter $(subst $(obj)/,,$*.o), $($(m:.o=-objs)) $($(m:.o=-y))),$(m:.o=))))
|
|
|
|
|
2014-08-19 07:34:20 +00:00
|
|
|
# Useful for describing the dependency of composite objects
|
|
|
|
# Usage:
|
|
|
|
# $(call multi_depend, multi_used_targets, suffix_to_remove, suffix_to_add)
|
|
|
|
define multi_depend
|
|
|
|
$(foreach m, $(notdir $1), \
|
|
|
|
$(eval $(obj)/$m: \
|
|
|
|
$(addprefix $(obj)/, $(foreach s, $3, $($(m:%$(strip $2)=%$(s)))))))
|
|
|
|
endef
|
|
|
|
|
2011-05-23 04:04:43 +00:00
|
|
|
ifdef REGENERATE_PARSERS
|
|
|
|
|
|
|
|
# GPERF
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
quiet_cmd_gperf = GPERF $@
|
|
|
|
cmd_gperf = gperf -t --output-file $@ -a -C -E -g -k 1,3,$$ -p -t $<
|
|
|
|
|
2011-07-31 18:45:40 +00:00
|
|
|
.PRECIOUS: $(src)/%.hash.c_shipped
|
2011-05-23 04:04:43 +00:00
|
|
|
$(src)/%.hash.c_shipped: $(src)/%.gperf
|
|
|
|
$(call cmd,gperf)
|
|
|
|
|
|
|
|
# LEX
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
LEX_PREFIX = $(if $(LEX_PREFIX_${baseprereq}),$(LEX_PREFIX_${baseprereq}),yy)
|
|
|
|
|
|
|
|
quiet_cmd_flex = LEX $@
|
|
|
|
cmd_flex = flex -o$@ -L -P $(LEX_PREFIX) $<
|
|
|
|
|
2011-07-31 18:45:40 +00:00
|
|
|
.PRECIOUS: $(src)/%.lex.c_shipped
|
2011-05-23 04:04:43 +00:00
|
|
|
$(src)/%.lex.c_shipped: $(src)/%.l
|
|
|
|
$(call cmd,flex)
|
|
|
|
|
|
|
|
# YACC
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
YACC_PREFIX = $(if $(YACC_PREFIX_${baseprereq}),$(YACC_PREFIX_${baseprereq}),yy)
|
|
|
|
|
|
|
|
quiet_cmd_bison = YACC $@
|
|
|
|
cmd_bison = bison -o$@ -t -l -p $(YACC_PREFIX) $<
|
|
|
|
|
2011-07-31 18:45:40 +00:00
|
|
|
.PRECIOUS: $(src)/%.tab.c_shipped
|
2011-05-23 04:04:43 +00:00
|
|
|
$(src)/%.tab.c_shipped: $(src)/%.y
|
|
|
|
$(call cmd,bison)
|
|
|
|
|
|
|
|
quiet_cmd_bison_h = YACC $@
|
|
|
|
cmd_bison_h = bison -o/dev/null --defines=$@ -t -l -p $(YACC_PREFIX) $<
|
|
|
|
|
2011-07-31 18:45:40 +00:00
|
|
|
.PRECIOUS: $(src)/%.tab.h_shipped
|
2011-05-23 04:04:43 +00:00
|
|
|
$(src)/%.tab.h_shipped: $(src)/%.y
|
|
|
|
$(call cmd,bison_h)
|
|
|
|
|
|
|
|
endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
# Shipped files
|
|
|
|
# ===========================================================================
|
|
|
|
|
|
|
|
quiet_cmd_shipped = SHIPPED $@
|
|
|
|
cmd_shipped = cat $< > $@
|
|
|
|
|
2011-06-07 17:09:28 +00:00
|
|
|
$(obj)/%: $(src)/%_shipped
|
2005-04-16 22:20:36 +00:00
|
|
|
$(call cmd,shipped)
|
|
|
|
|
|
|
|
# Commands useful for building a boot image
|
|
|
|
# ===========================================================================
|
2014-04-28 07:26:18 +00:00
|
|
|
#
|
2005-04-16 22:20:36 +00:00
|
|
|
# Use as following:
|
|
|
|
#
|
|
|
|
# target: source(s) FORCE
|
|
|
|
# $(if_changed,ld/objcopy/gzip)
|
|
|
|
#
|
2007-09-30 18:34:36 +00:00
|
|
|
# and add target to extra-y so that we know we have to
|
2005-04-16 22:20:36 +00:00
|
|
|
# read in the saved command line
|
|
|
|
|
|
|
|
# Linking
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
quiet_cmd_ld = LD $@
|
2007-10-15 20:25:06 +00:00
|
|
|
cmd_ld = $(LD) $(LDFLAGS) $(ldflags-y) $(LDFLAGS_$(@F)) \
|
2014-04-28 07:26:18 +00:00
|
|
|
$(filter-out FORCE,$^) -o $@
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
# Objcopy
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
quiet_cmd_objcopy = OBJCOPY $@
|
|
|
|
cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@
|
|
|
|
|
|
|
|
# Gzip
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
quiet_cmd_gzip = GZIP $@
|
2011-03-31 13:47:55 +00:00
|
|
|
cmd_gzip = (cat $(filter-out FORCE,$^) | gzip -n -f -9 > $@) || \
|
2009-05-06 04:17:15 +00:00
|
|
|
(rm -f $@ ; false)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-12-22 19:57:26 +00:00
|
|
|
# DTC
|
|
|
|
# ---------------------------------------------------------------------------
|
2016-02-11 22:28:13 +00:00
|
|
|
DTC ?= $(objtree)/scripts/dtc/dtc
|
2010-12-22 19:57:26 +00:00
|
|
|
|
2016-03-24 15:52:42 +00:00
|
|
|
# Disable noisy checks by default
|
|
|
|
ifeq ($(KBUILD_ENABLE_EXTRA_GCC_CHECKS),)
|
2017-03-21 14:14:20 +00:00
|
|
|
DTC_FLAGS += -Wno-unit_address_vs_reg \
|
|
|
|
-Wno-simple_bus_reg \
|
|
|
|
-Wno-unit_address_format \
|
|
|
|
-Wno-pci_bridge \
|
|
|
|
-Wno-pci_device_bus_num \
|
|
|
|
-Wno-pci_device_reg
|
2016-03-24 15:52:42 +00:00
|
|
|
endif
|
|
|
|
|
2017-03-21 14:14:20 +00:00
|
|
|
ifeq ($(KBUILD_ENABLE_EXTRA_GCC_CHECKS),2)
|
|
|
|
DTC_FLAGS += -Wnode_name_chars_strict \
|
|
|
|
-Wproperty_name_chars_strict
|
2016-03-24 15:52:42 +00:00
|
|
|
endif
|
|
|
|
|
2017-04-26 00:09:53 +00:00
|
|
|
DTC_FLAGS += $(DTC_FLAGS_$(basetarget))
|
|
|
|
|
2010-12-22 19:57:26 +00:00
|
|
|
# Generate an assembly file to wrap the output of the device tree compiler
|
2013-06-13 10:53:09 +00:00
|
|
|
quiet_cmd_dt_S_dtb= DTB $@
|
2010-12-22 19:57:26 +00:00
|
|
|
cmd_dt_S_dtb= \
|
|
|
|
( \
|
|
|
|
echo '\#include <asm-generic/vmlinux.lds.h>'; \
|
|
|
|
echo '.section .dtb.init.rodata,"a"'; \
|
|
|
|
echo '.balign STRUCT_ALIGNMENT'; \
|
|
|
|
echo '.global __dtb_$(*F)_begin'; \
|
|
|
|
echo '__dtb_$(*F)_begin:'; \
|
|
|
|
echo '.incbin "$<" '; \
|
|
|
|
echo '__dtb_$(*F)_end:'; \
|
|
|
|
echo '.global __dtb_$(*F)_end'; \
|
|
|
|
echo '.balign STRUCT_ALIGNMENT'; \
|
|
|
|
) > $@
|
|
|
|
|
|
|
|
$(obj)/%.dtb.S: $(obj)/%.dtb
|
|
|
|
$(call cmd,dt_S_dtb)
|
|
|
|
|
|
|
|
quiet_cmd_dtc = DTC $@
|
2015-03-30 12:39:08 +00:00
|
|
|
cmd_dtc = mkdir -p $(dir ${dtc-tmp}) ; \
|
|
|
|
$(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
|
2016-02-11 22:28:13 +00:00
|
|
|
$(DTC) -O dtb -o $@ -b 0 \
|
kbuild: Don't assume dts files live in arch/*/boot/dts
In commit b40b25ff (kbuild: always run gcc -E on *.dts, remove cmd_dtc_cpp),
dts building was changed to always use the C preprocessor. This meant
that the .dts file passed to dtc is not the original, but the
preprocessed one.
When compiling with a separate build directory (i.e., with O=), this
preprocessed file will not live in the same directory as the original.
When the .dts file includes .dtsi files, dtc will look for them in the
build directory, not in the source directory and compilation will fail.
The commit referenced above tried to fix this by passing arch/*/boot/dts
as an include path to dtc. However, for mips, the .dts files are not in
this directory, so dts compilation on mips breaks for some targets.
Instead of hardcoding this particular include path, this commit just
uses the directory of the .dts file that is being compiled, which
effectively restores the previous behaviour wrt includes. For most .dts
files, this path is just the same as the previous hardcoded
arch/*/boot/dts path.
This was tested on a mips (rt3052) and an arm (bcm2835) target.
Signed-off-by: Matthijs Kooijman <matthijs@stdin.nl>
Reviewed-by: Stephen Warren <swarren@nvidia.com>
Signed-off-by: Michal Marek <mmarek@suse.cz>
2013-05-08 10:59:04 +00:00
|
|
|
-i $(dir $<) $(DTC_FLAGS) \
|
2013-05-31 10:14:20 +00:00
|
|
|
-d $(depfile).dtc.tmp $(dtc-tmp) ; \
|
|
|
|
cat $(depfile).pre.tmp $(depfile).dtc.tmp > $(depfile)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2012-11-27 23:29:10 +00:00
|
|
|
$(obj)/%.dtb: $(src)/%.dts FORCE
|
|
|
|
$(call if_changed_dep,dtc)
|
|
|
|
|
2013-05-31 10:14:20 +00:00
|
|
|
dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
|
2013-02-05 19:06:28 +00:00
|
|
|
|
2009-01-04 21:46:16 +00:00
|
|
|
# Bzip2
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
2009-05-06 04:17:15 +00:00
|
|
|
# Bzip2 and LZMA do not include size in file... so we have to fake that;
|
|
|
|
# append the size as a 32-bit littleendian number as gzip does.
|
2009-10-13 20:22:46 +00:00
|
|
|
size_append = printf $(shell \
|
2009-05-06 04:17:15 +00:00
|
|
|
dec_size=0; \
|
|
|
|
for F in $1; do \
|
|
|
|
fsize=$$(stat -c "%s" $$F); \
|
|
|
|
dec_size=$$(expr $$dec_size + $$fsize); \
|
|
|
|
done; \
|
2009-12-28 19:38:27 +00:00
|
|
|
printf "%08x\n" $$dec_size | \
|
|
|
|
sed 's/\(..\)/\1 /g' | { \
|
|
|
|
read ch0 ch1 ch2 ch3; \
|
|
|
|
for ch in $$ch3 $$ch2 $$ch1 $$ch0; do \
|
|
|
|
printf '%s%03o' '\\' $$((0x$$ch)); \
|
|
|
|
done; \
|
|
|
|
} \
|
2009-05-06 04:17:15 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
quiet_cmd_bzip2 = BZIP2 $@
|
|
|
|
cmd_bzip2 = (cat $(filter-out FORCE,$^) | \
|
|
|
|
bzip2 -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
|
|
|
|
(rm -f $@ ; false)
|
2009-01-04 21:46:16 +00:00
|
|
|
|
|
|
|
# Lzma
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
quiet_cmd_lzma = LZMA $@
|
2009-05-06 04:17:15 +00:00
|
|
|
cmd_lzma = (cat $(filter-out FORCE,$^) | \
|
|
|
|
lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
|
|
|
|
(rm -f $@ ; false)
|
lib: add support for LZO-compressed kernels
This patch series adds generic support for creating and extracting
LZO-compressed kernel images, as well as support for using such images on
the x86 and ARM architectures, and support for creating and using
LZO-compressed initrd and initramfs images.
Russell King said:
: Testing on a Cortex A9 model:
: - lzo decompressor is 65% of the time gzip takes to decompress a kernel
: - lzo kernel is 9% larger than a gzip kernel
:
: which I'm happy to say confirms your figures when comparing the two.
:
: However, when comparing your new gzip code to the old gzip code:
: - new is 99% of the size of the old code
: - new takes 42% of the time to decompress than the old code
:
: What this means is that for a proper comparison, the results get even better:
: - lzo is 7.5% larger than the old gzip'd kernel image
: - lzo takes 28% of the time that the old gzip code took
:
: So the expense seems definitely worth the effort. The only reason I
: can think of ever using gzip would be if you needed the additional
: compression (eg, because you have limited flash to store the image.)
:
: I would argue that the default for ARM should therefore be LZO.
This patch:
The lzo compressor is worse than gzip at compression, but faster at
extraction. Here are some figures for an ARM board I'm working on:
Uncompressed size: 3.24Mo
gzip 1.61Mo 0.72s
lzo 1.75Mo 0.48s
So for a compression ratio that is still relatively close to gzip, it's
much faster to extract, at least in that case.
This part contains:
- Makefile routine to support lzo compression
- Fixes to the existing lzo compressor so that it can be used in
compressed kernels
- wrapper around the existing lzo1x_decompress, as it only extracts one
block at a time, while we need to extract a whole file here
- config dialog for kernel compression
[akpm@linux-foundation.org: coding-style fixes]
[akpm@linux-foundation.org: cleanup]
Signed-off-by: Albin Tonnerre <albin.tonnerre@free-electrons.com>
Tested-by: Wu Zhangjin <wuzhangjin@gmail.com>
Acked-by: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Russell King <rmk@arm.linux.org.uk>
Acked-by: Russell King <rmk@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-01-08 22:42:42 +00:00
|
|
|
|
2010-03-11 09:42:14 +00:00
|
|
|
quiet_cmd_lzo = LZO $@
|
lib: add support for LZO-compressed kernels
This patch series adds generic support for creating and extracting
LZO-compressed kernel images, as well as support for using such images on
the x86 and ARM architectures, and support for creating and using
LZO-compressed initrd and initramfs images.
Russell King said:
: Testing on a Cortex A9 model:
: - lzo decompressor is 65% of the time gzip takes to decompress a kernel
: - lzo kernel is 9% larger than a gzip kernel
:
: which I'm happy to say confirms your figures when comparing the two.
:
: However, when comparing your new gzip code to the old gzip code:
: - new is 99% of the size of the old code
: - new takes 42% of the time to decompress than the old code
:
: What this means is that for a proper comparison, the results get even better:
: - lzo is 7.5% larger than the old gzip'd kernel image
: - lzo takes 28% of the time that the old gzip code took
:
: So the expense seems definitely worth the effort. The only reason I
: can think of ever using gzip would be if you needed the additional
: compression (eg, because you have limited flash to store the image.)
:
: I would argue that the default for ARM should therefore be LZO.
This patch:
The lzo compressor is worse than gzip at compression, but faster at
extraction. Here are some figures for an ARM board I'm working on:
Uncompressed size: 3.24Mo
gzip 1.61Mo 0.72s
lzo 1.75Mo 0.48s
So for a compression ratio that is still relatively close to gzip, it's
much faster to extract, at least in that case.
This part contains:
- Makefile routine to support lzo compression
- Fixes to the existing lzo compressor so that it can be used in
compressed kernels
- wrapper around the existing lzo1x_decompress, as it only extracts one
block at a time, while we need to extract a whole file here
- config dialog for kernel compression
[akpm@linux-foundation.org: coding-style fixes]
[akpm@linux-foundation.org: cleanup]
Signed-off-by: Albin Tonnerre <albin.tonnerre@free-electrons.com>
Tested-by: Wu Zhangjin <wuzhangjin@gmail.com>
Acked-by: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Russell King <rmk@arm.linux.org.uk>
Acked-by: Russell King <rmk@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-01-08 22:42:42 +00:00
|
|
|
cmd_lzo = (cat $(filter-out FORCE,$^) | \
|
|
|
|
lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
|
|
|
|
(rm -f $@ ; false)
|
2010-03-05 16:34:46 +00:00
|
|
|
|
2013-07-08 23:01:46 +00:00
|
|
|
quiet_cmd_lz4 = LZ4 $@
|
|
|
|
cmd_lz4 = (cat $(filter-out FORCE,$^) | \
|
|
|
|
lz4c -l -c1 stdin stdout && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
|
|
|
|
(rm -f $@ ; false)
|
|
|
|
|
2012-03-16 21:03:55 +00:00
|
|
|
# U-Boot mkimage
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
MKIMAGE := $(srctree)/scripts/mkuboot.sh
|
|
|
|
|
|
|
|
# SRCARCH just happens to match slightly more than ARCH (on sparc), so reduces
|
|
|
|
# the number of overrides in arch makefiles
|
|
|
|
UIMAGE_ARCH ?= $(SRCARCH)
|
|
|
|
UIMAGE_COMPRESSION ?= $(if $(2),$(2),none)
|
|
|
|
UIMAGE_OPTS-y ?=
|
|
|
|
UIMAGE_TYPE ?= kernel
|
|
|
|
UIMAGE_LOADADDR ?= arch_must_set_this
|
|
|
|
UIMAGE_ENTRYADDR ?= $(UIMAGE_LOADADDR)
|
|
|
|
UIMAGE_NAME ?= 'Linux-$(KERNELRELEASE)'
|
|
|
|
UIMAGE_IN ?= $<
|
|
|
|
UIMAGE_OUT ?= $@
|
|
|
|
|
|
|
|
quiet_cmd_uimage = UIMAGE $(UIMAGE_OUT)
|
|
|
|
cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(UIMAGE_ARCH) -O linux \
|
|
|
|
-C $(UIMAGE_COMPRESSION) $(UIMAGE_OPTS-y) \
|
|
|
|
-T $(UIMAGE_TYPE) \
|
|
|
|
-a $(UIMAGE_LOADADDR) -e $(UIMAGE_ENTRYADDR) \
|
|
|
|
-n $(UIMAGE_NAME) -d $(UIMAGE_IN) $(UIMAGE_OUT)
|
|
|
|
|
2011-01-13 01:01:22 +00:00
|
|
|
# XZ
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
# Use xzkern to compress the kernel image and xzmisc to compress other things.
|
|
|
|
#
|
|
|
|
# xzkern uses a big LZMA2 dictionary since it doesn't increase memory usage
|
|
|
|
# of the kernel decompressor. A BCJ filter is used if it is available for
|
|
|
|
# the target architecture. xzkern also appends uncompressed size of the data
|
|
|
|
# using size_append. The .xz format has the size information available at
|
|
|
|
# the end of the file too, but it's in more complex format and it's good to
|
|
|
|
# avoid changing the part of the boot code that reads the uncompressed size.
|
|
|
|
# Note that the bytes added by size_append will make the xz tool think that
|
|
|
|
# the file is corrupt. This is expected.
|
|
|
|
#
|
|
|
|
# xzmisc doesn't use size_append, so it can be used to create normal .xz
|
|
|
|
# files. xzmisc uses smaller LZMA2 dictionary than xzkern, because a very
|
|
|
|
# big dictionary would increase the memory usage too much in the multi-call
|
|
|
|
# decompression mode. A BCJ filter isn't used either.
|
|
|
|
quiet_cmd_xzkern = XZKERN $@
|
|
|
|
cmd_xzkern = (cat $(filter-out FORCE,$^) | \
|
|
|
|
sh $(srctree)/scripts/xz_wrap.sh && \
|
|
|
|
$(call size_append, $(filter-out FORCE,$^))) > $@ || \
|
|
|
|
(rm -f $@ ; false)
|
|
|
|
|
|
|
|
quiet_cmd_xzmisc = XZMISC $@
|
|
|
|
cmd_xzmisc = (cat $(filter-out FORCE,$^) | \
|
|
|
|
xz --check=crc32 --lzma2=dict=1MiB) > $@ || \
|
|
|
|
(rm -f $@ ; false)
|
2017-04-12 19:43:52 +00:00
|
|
|
|
|
|
|
# ASM offsets
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
# Default sed regexp - multiline due to syntax constraints
|
2017-04-21 06:21:11 +00:00
|
|
|
#
|
|
|
|
# Use [:space:] because LLVM's integrated assembler inserts <tab> around
|
|
|
|
# the .ascii directive whereas GCC keeps the <space> as-is.
|
2017-04-12 19:43:52 +00:00
|
|
|
define sed-offsets
|
2017-04-21 06:21:11 +00:00
|
|
|
's:^[[:space:]]*\.ascii[[:space:]]*"\(.*\)".*:\1:; \
|
|
|
|
/^->/{s:->#\(.*\):/* \1 */:; \
|
2017-04-12 19:43:52 +00:00
|
|
|
s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
|
2017-04-21 06:21:11 +00:00
|
|
|
s:->::; p;}'
|
2017-04-12 19:43:52 +00:00
|
|
|
endef
|
|
|
|
|
|
|
|
# Use filechk to avoid rebuilds when a header changes, but the resulting file
|
|
|
|
# does not
|
|
|
|
define filechk_offsets
|
|
|
|
(set -e; \
|
|
|
|
echo "#ifndef $2"; \
|
|
|
|
echo "#define $2"; \
|
|
|
|
echo "/*"; \
|
|
|
|
echo " * DO NOT MODIFY."; \
|
|
|
|
echo " *"; \
|
|
|
|
echo " * This file was generated by Kbuild"; \
|
|
|
|
echo " */"; \
|
|
|
|
echo ""; \
|
|
|
|
sed -ne $(sed-offsets); \
|
|
|
|
echo ""; \
|
|
|
|
echo "#endif" )
|
|
|
|
endef
|