mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 06:12:08 +00:00
f34e3b61f2
Currently most of the per cpu data, which is accessed by different cpus, has a ____cacheline_aligned_in_smp attribute. Move all this data to the new per cpu shared data section: .data.percpu.shared_aligned. This will seperate the percpu data which is referenced frequently by other cpus from the local only percpu data. Signed-off-by: Fenghua Yu <fenghua.yu@intel.com> Acked-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Christoph Lameter <clameter@sgi.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
47 lines
1.2 KiB
C
47 lines
1.2 KiB
C
#include <linux/mm.h>
|
|
#include <linux/module.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/init.h>
|
|
#include <linux/init_task.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/mqueue.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/desc.h>
|
|
|
|
static struct fs_struct init_fs = INIT_FS;
|
|
static struct files_struct init_files = INIT_FILES;
|
|
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
|
|
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
|
|
struct mm_struct init_mm = INIT_MM(init_mm);
|
|
|
|
EXPORT_SYMBOL(init_mm);
|
|
|
|
/*
|
|
* Initial thread structure.
|
|
*
|
|
* We need to make sure that this is THREAD_SIZE aligned due to the
|
|
* way process stacks are handled. This is done by having a special
|
|
* "init_task" linker map entry..
|
|
*/
|
|
union thread_union init_thread_union
|
|
__attribute__((__section__(".data.init_task"))) =
|
|
{ INIT_THREAD_INFO(init_task) };
|
|
|
|
/*
|
|
* Initial task structure.
|
|
*
|
|
* All other task structs will be allocated on slabs in fork.c
|
|
*/
|
|
struct task_struct init_task = INIT_TASK(init_task);
|
|
|
|
EXPORT_SYMBOL(init_task);
|
|
|
|
/*
|
|
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
|
* no more per-task TSS's.
|
|
*/
|
|
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
|
|
|