forked from Minki/linux
f34e3b61f2
Currently most of the per cpu data, which is accessed by different cpus, has a ____cacheline_aligned_in_smp attribute. Move all this data to the new per cpu shared data section: .data.percpu.shared_aligned. This will seperate the percpu data which is referenced frequently by other cpus from the local only percpu data. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Acked-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Christoph Lameter <clameter@sgi.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
55 lines
1.7 KiB
C
55 lines
1.7 KiB
C
#include <linux/mm.h>
|
|
#include <linux/module.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/init.h>
|
|
#include <linux/init_task.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/mqueue.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/desc.h>
|
|
|
|
static struct fs_struct init_fs = INIT_FS;
|
|
static struct files_struct init_files = INIT_FILES;
|
|
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
|
|
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
|
|
struct mm_struct init_mm = INIT_MM(init_mm);
|
|
|
|
EXPORT_SYMBOL(init_mm);
|
|
|
|
/*
|
|
* Initial task structure.
|
|
*
|
|
* We need to make sure that this is 8192-byte aligned due to the
|
|
* way process stacks are handled. This is done by having a special
|
|
* "init_task" linker map entry..
|
|
*/
|
|
union thread_union init_thread_union
|
|
__attribute__((__section__(".data.init_task"))) =
|
|
{ INIT_THREAD_INFO(init_task) };
|
|
|
|
/*
|
|
* Initial task structure.
|
|
*
|
|
* All other task structs will be allocated on slabs in fork.c
|
|
*/
|
|
struct task_struct init_task = INIT_TASK(init_task);
|
|
|
|
EXPORT_SYMBOL(init_task);
|
|
/*
|
|
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
|
* no more per-task TSS's. The TSS size is kept cacheline-aligned
|
|
* so they are allowed to end up in the .data.cacheline_aligned
|
|
* section. Since TSS's are completely CPU-local, we want them
|
|
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
|
|
*/
|
|
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
|
|
|
|
/* Copies of the original ist values from the tss are only accessed during
|
|
* debugging, no special alignment required.
|
|
*/
|
|
DEFINE_PER_CPU(struct orig_ist, orig_ist);
|
|
|
|
#define ALIGN_TO_4K __attribute__((section(".data.init_task")))
|