mirror of
https://github.com/torvalds/linux.git
synced 2024-11-07 20:51:47 +00:00
bb1f17b037
* create mm/init-mm.c, move init_mm there * remove INIT_MM, initialize init_mm with C99 initializer * unexport init_mm on all arches: init_mm is already unexported on x86. One strange place is some OMAP driver (drivers/video/omap/) which won't build modular, but it's already wants get_vm_area() export. Somebody should look there. [akpm@linux-foundation.org: add missing #includes] Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Cc: Mike Frysinger <vapier.adi@gmail.com> Cc: Americo Wang <xiyou.wangcong@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
44 lines
1.3 KiB
C
44 lines
1.3 KiB
C
#include <linux/mm.h>
|
|
#include <linux/module.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/init.h>
|
|
#include <linux/init_task.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/mqueue.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/desc.h>
|
|
|
|
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
|
|
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
|
|
|
|
/*
|
|
* Initial thread structure.
|
|
*
|
|
* We need to make sure that this is THREAD_SIZE aligned due to the
|
|
* way process stacks are handled. This is done by having a special
|
|
* "init_task" linker map entry..
|
|
*/
|
|
union thread_union init_thread_union
|
|
__attribute__((__section__(".data.init_task"))) =
|
|
{ INIT_THREAD_INFO(init_task) };
|
|
|
|
/*
|
|
* Initial task structure.
|
|
*
|
|
* All other task structs will be allocated on slabs in fork.c
|
|
*/
|
|
struct task_struct init_task = INIT_TASK(init_task);
|
|
EXPORT_SYMBOL(init_task);
|
|
|
|
/*
|
|
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
|
* no more per-task TSS's. The TSS size is kept cacheline-aligned
|
|
* so they are allowed to end up in the .data.cacheline_aligned
|
|
* section. Since TSS's are completely CPU-local, we want them
|
|
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
|
|
*/
|
|
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
|
|
|