linux/arch/mips/kernel/vmlinux.lds.S
Tejun Heo 19df0c2fef percpu: align percpu readmostly subsection to cacheline
Currently percpu readmostly subsection may share cachelines with other
percpu subsections which may result in unnecessary cacheline bounce
and performance degradation.

This patch adds @cacheline parameter to PERCPU() and PERCPU_VADDR()
linker macros, makes each arch linker scripts specify its cacheline
size and use it to align percpu subsections.

This is based on Shaohua's x86 only patch.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Shaohua Li <shaohua.li@intel.com>
2011-01-25 14:26:50 +01:00

163 lines
3.0 KiB
ArmAsm

#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
#undef mips
#define mips mips
OUTPUT_ARCH(mips)
ENTRY(kernel_entry)
PHDRS {
text PT_LOAD FLAGS(7); /* RWX */
note PT_NOTE FLAGS(4); /* R__ */
}
#ifdef CONFIG_32BIT
#ifdef CONFIG_CPU_LITTLE_ENDIAN
jiffies = jiffies_64;
#else
jiffies = jiffies_64 + 4;
#endif
#else
jiffies = jiffies_64;
#endif
SECTIONS
{
#ifdef CONFIG_BOOT_ELF64
/* Read-only sections, merged into text segment: */
/* . = 0xc000000000000000; */
/* This is the value for an Origin kernel, taken from an IRIX kernel. */
/* . = 0xc00000000001c000; */
/* Set the vaddr for the text segment to a value
* >= 0xa800 0000 0001 9000 if no symmon is going to configured
* >= 0xa800 0000 0030 0000 otherwise
*/
/* . = 0xa800000000300000; */
. = 0xffffffff80300000;
#endif
. = VMLINUX_LOAD_ADDRESS;
/* read-only */
_text = .; /* Text and read-only data */
.text : {
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
*(.text.*)
*(.fixup)
*(.gnu.warning)
} :text = 0
_etext = .; /* End of text section */
EXCEPTION_TABLE(16)
/* Exception table for data bus errors */
__dbe_table : {
__start___dbe_table = .;
*(__dbe_table)
__stop___dbe_table = .;
}
NOTES :text :note
.dummy : { *(.dummy) } :text
RODATA
/* writeable */
.data : { /* Data */
. = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
INIT_TASK_DATA(PAGE_SIZE)
NOSAVE_DATA
CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
DATA_DATA
CONSTRUCTORS
}
_gp = . + 0x8000;
.lit8 : {
*(.lit8)
}
.lit4 : {
*(.lit4)
}
/* We want the small data sections together, so single-instruction offsets
can access them all, and initialized data all before uninitialized, so
we can shorten the on-disk segment size. */
.sdata : {
*(.sdata)
}
_edata = .; /* End of data section */
/* will be freed after init */
. = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
. = ALIGN(4);
.mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
__mips_machines_start = .;
*(.mips.machines.init)
__mips_machines_end = .;
}
/* .exit.text is discarded at runtime, not link time, to deal with
* references from .rodata
*/
.exit.text : {
EXIT_TEXT
}
.exit.data : {
EXIT_DATA
}
PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
BSS_SECTION(0, 0, 0)
_end = . ;
/* These mark the ABI of the kernel for debuggers. */
.mdebug.abi32 : {
KEEP(*(.mdebug.abi32))
}
.mdebug.abi64 : {
KEEP(*(.mdebug.abi64))
}
/* This is the MIPS specific mdebug section. */
.mdebug : {
*(.mdebug)
}
STABS_DEBUG
DWARF_DEBUG
/* These must appear regardless of . */
.gptab.sdata : {
*(.gptab.data)
*(.gptab.sdata)
}
.gptab.sbss : {
*(.gptab.bss)
*(.gptab.sbss)
}
/* Sections to be discarded */
DISCARDS
/DISCARD/ : {
/* ABI crap starts here */
*(.MIPS.options)
*(.options)
*(.pdr)
*(.reginfo)
}
}