2005-04-16 22:20:36 +00:00
|
|
|
/* $Id: trampoline.S,v 1.26 2002/02/09 19:49:30 davem Exp $
|
|
|
|
* trampoline.S: Jump start slave processors on sparc64.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
|
|
|
|
*/
|
|
|
|
|
2008-02-21 06:22:16 +00:00
|
|
|
#include <linux/init.h>
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <asm/head.h>
|
|
|
|
#include <asm/asi.h>
|
|
|
|
#include <asm/lsu.h>
|
|
|
|
#include <asm/dcr.h>
|
|
|
|
#include <asm/dcu.h>
|
|
|
|
#include <asm/pstate.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/spitfire.h>
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/thread_info.h>
|
|
|
|
#include <asm/mmu.h>
|
2006-02-09 10:52:44 +00:00
|
|
|
#include <asm/hypervisor.h>
|
2006-02-14 08:55:49 +00:00
|
|
|
#include <asm/cpudata.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
.data
|
|
|
|
.align 8
|
|
|
|
call_method:
|
|
|
|
.asciz "call-method"
|
|
|
|
.align 8
|
|
|
|
itlb_load:
|
|
|
|
.asciz "SUNW,itlb-load"
|
|
|
|
.align 8
|
|
|
|
dtlb_load:
|
|
|
|
.asciz "SUNW,dtlb-load"
|
|
|
|
|
[SPARC64]: Get SUN4V SMP working.
The sibling cpu bringup is extremely fragile. We can only
perform the most basic calls until we take over the trap
table from the firmware/hypervisor on the new cpu.
This means no accesses to %g4, %g5, %g6 since those can't be
TLB translated without our trap handlers.
In order to achieve this:
1) Change sun4v_init_mondo_queues() so that it can operate in
several modes.
It can allocate the queues, or install them in the current
processor, or both.
The boot cpu does both in it's call early on.
Later, the boot cpu allocates the sibling cpu queue, starts
the sibling cpu, then the sibling cpu loads them in.
2) init_cur_cpu_trap() is changed to take the current_thread_info()
as an argument instead of reading %g6 directly on the current
cpu.
3) Create a trampoline stack for the sibling cpus. We do our basic
kernel calls using this stack, which is locked into the kernel
image, then go to our proper thread stack after taking over the
trap table.
4) While we are in this delicate startup state, we put 0xdeadbeef
into %g4/%g5/%g6 in order to catch accidental accesses.
5) On the final prom_set_trap_table*() call, we put &init_thread_union
into %g6. This is a hack to make prom_world(0) work. All that
wants to do is restore the %asi register using
get_thread_current_ds().
Longer term we should just do the OBP calls to set the trap table by
hand just like we do for everything else. This would avoid that silly
prom_world(0) issue, then we can remove the init_thread_union hack.
Signed-off-by: David S. Miller <davem@davemloft.net>
2006-02-17 09:29:17 +00:00
|
|
|
/* XXX __cpuinit this thing XXX */
|
|
|
|
#define TRAMP_STACK_SIZE 1024
|
|
|
|
.align 16
|
|
|
|
tramp_stack:
|
|
|
|
.skip TRAMP_STACK_SIZE
|
|
|
|
|
2008-02-21 06:22:16 +00:00
|
|
|
__CPUINIT
|
2005-04-16 22:20:36 +00:00
|
|
|
.align 8
|
|
|
|
.globl sparc64_cpu_startup, sparc64_cpu_startup_end
|
|
|
|
sparc64_cpu_startup:
|
2006-02-09 10:52:44 +00:00
|
|
|
BRANCH_IF_SUN4V(g1, niagara_startup)
|
|
|
|
BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup)
|
|
|
|
BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
ba,pt %xcc, spitfire_startup
|
|
|
|
nop
|
|
|
|
|
|
|
|
cheetah_plus_startup:
|
|
|
|
/* Preserve OBP chosen DCU and DCR register settings. */
|
|
|
|
ba,pt %xcc, cheetah_generic_startup
|
|
|
|
nop
|
|
|
|
|
|
|
|
cheetah_startup:
|
|
|
|
mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
|
|
|
|
wr %g1, %asr18
|
|
|
|
|
|
|
|
sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
|
|
|
|
or %g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
|
|
|
|
sllx %g5, 32, %g5
|
|
|
|
or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5
|
|
|
|
stxa %g5, [%g0] ASI_DCU_CONTROL_REG
|
|
|
|
membar #Sync
|
[SPARC64]: Get SUN4V SMP working.
The sibling cpu bringup is extremely fragile. We can only
perform the most basic calls until we take over the trap
table from the firmware/hypervisor on the new cpu.
This means no accesses to %g4, %g5, %g6 since those can't be
TLB translated without our trap handlers.
In order to achieve this:
1) Change sun4v_init_mondo_queues() so that it can operate in
several modes.
It can allocate the queues, or install them in the current
processor, or both.
The boot cpu does both in it's call early on.
Later, the boot cpu allocates the sibling cpu queue, starts
the sibling cpu, then the sibling cpu loads them in.
2) init_cur_cpu_trap() is changed to take the current_thread_info()
as an argument instead of reading %g6 directly on the current
cpu.
3) Create a trampoline stack for the sibling cpus. We do our basic
kernel calls using this stack, which is locked into the kernel
image, then go to our proper thread stack after taking over the
trap table.
4) While we are in this delicate startup state, we put 0xdeadbeef
into %g4/%g5/%g6 in order to catch accidental accesses.
5) On the final prom_set_trap_table*() call, we put &init_thread_union
into %g6. This is a hack to make prom_world(0) work. All that
wants to do is restore the %asi register using
get_thread_current_ds().
Longer term we should just do the OBP calls to set the trap table by
hand just like we do for everything else. This would avoid that silly
prom_world(0) issue, then we can remove the init_thread_union hack.
Signed-off-by: David S. Miller <davem@davemloft.net>
2006-02-17 09:29:17 +00:00
|
|
|
/* fallthru */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
cheetah_generic_startup:
|
|
|
|
mov TSB_EXTENSION_P, %g3
|
|
|
|
stxa %g0, [%g3] ASI_DMMU
|
|
|
|
stxa %g0, [%g3] ASI_IMMU
|
|
|
|
membar #Sync
|
|
|
|
|
|
|
|
mov TSB_EXTENSION_S, %g3
|
|
|
|
stxa %g0, [%g3] ASI_DMMU
|
|
|
|
membar #Sync
|
|
|
|
|
|
|
|
mov TSB_EXTENSION_N, %g3
|
|
|
|
stxa %g0, [%g3] ASI_DMMU
|
|
|
|
stxa %g0, [%g3] ASI_IMMU
|
|
|
|
membar #Sync
|
2006-02-09 10:52:44 +00:00
|
|
|
/* fallthru */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-02-09 10:52:44 +00:00
|
|
|
niagara_startup:
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Disable STICK_INT interrupts. */
|
|
|
|
sethi %hi(0x80000000), %g5
|
|
|
|
sllx %g5, 32, %g5
|
|
|
|
wr %g5, %asr25
|
|
|
|
|
|
|
|
ba,pt %xcc, startup_continue
|
|
|
|
nop
|
|
|
|
|
|
|
|
spitfire_startup:
|
|
|
|
mov (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1
|
|
|
|
stxa %g1, [%g0] ASI_LSU_CONTROL
|
|
|
|
membar #Sync
|
|
|
|
|
|
|
|
startup_continue:
|
2007-08-16 08:56:00 +00:00
|
|
|
mov %o0, %l0
|
|
|
|
BRANCH_IF_SUN4V(g1, niagara_lock_tlb)
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
sethi %hi(0x80000000), %g2
|
|
|
|
sllx %g2, 32, %g2
|
|
|
|
wr %g2, 0, %tick_cmpr
|
|
|
|
|
|
|
|
/* Call OBP by hand to lock KERNBASE into i/d tlbs.
|
2008-03-22 00:01:38 +00:00
|
|
|
* We lock 'num_kernel_image_mappings' consequetive entries.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
sethi %hi(prom_entry_lock), %g2
|
|
|
|
1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
|
[SPARC64]: Avoid membar instructions in delay slots.
In particular, avoid membar instructions in the delay
slot of a jmpl instruction.
UltraSPARC-I, II, IIi, and IIe have a bug, documented in
the UltraSPARC-IIi User's Manual, Appendix K, Erratum 51
The long and short of it is that if the IMU unit misses
on a branch or jmpl, and there is a store buffer synchronizing
membar in the delay slot, the chip can stop fetching instructions.
If interrupts are enabled or some other trap is enabled, the
chip will unwedge itself, but performance will suffer.
We already had a workaround for this bug in a few spots, but
it's better to have the entire tree sanitized for this rule.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-27 22:42:04 +00:00
|
|
|
membar #StoreLoad | #StoreStore
|
2005-04-16 22:20:36 +00:00
|
|
|
brnz,pn %g1, 1b
|
[SPARC64]: Avoid membar instructions in delay slots.
In particular, avoid membar instructions in the delay
slot of a jmpl instruction.
UltraSPARC-I, II, IIi, and IIe have a bug, documented in
the UltraSPARC-IIi User's Manual, Appendix K, Erratum 51
The long and short of it is that if the IMU unit misses
on a branch or jmpl, and there is a store buffer synchronizing
membar in the delay slot, the chip can stop fetching instructions.
If interrupts are enabled or some other trap is enabled, the
chip will unwedge itself, but performance will suffer.
We already had a workaround for this bug in a few spots, but
it's better to have the entire tree sanitized for this rule.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-27 22:42:04 +00:00
|
|
|
nop
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
sethi %hi(p1275buf), %g2
|
|
|
|
or %g2, %lo(p1275buf), %g2
|
|
|
|
ldx [%g2 + 0x10], %l2
|
|
|
|
add %l2, -(192 + 128), %sp
|
|
|
|
flushw
|
|
|
|
|
2008-03-22 00:01:38 +00:00
|
|
|
/* Setup the loop variables:
|
|
|
|
* %l3: VADDR base
|
|
|
|
* %l4: TTE base
|
|
|
|
* %l5: Loop iterator, iterates from 0 to 'num_kernel_image_mappings'
|
|
|
|
* %l6: Number of TTE entries to map
|
|
|
|
* %l7: Highest TTE entry number, we count down
|
|
|
|
*/
|
|
|
|
sethi %hi(KERNBASE), %l3
|
|
|
|
sethi %hi(kern_locked_tte_data), %l4
|
|
|
|
ldx [%l4 + %lo(kern_locked_tte_data)], %l4
|
|
|
|
clr %l5
|
|
|
|
sethi %hi(num_kernel_image_mappings), %l6
|
|
|
|
lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
|
|
|
|
add %l6, 1, %l6
|
|
|
|
|
|
|
|
mov 15, %l7
|
|
|
|
BRANCH_IF_ANY_CHEETAH(g1,g5,2f)
|
|
|
|
|
|
|
|
mov 63, %l7
|
|
|
|
2:
|
|
|
|
|
|
|
|
3:
|
|
|
|
/* Lock into I-MMU */
|
2005-04-16 22:20:36 +00:00
|
|
|
sethi %hi(call_method), %g2
|
|
|
|
or %g2, %lo(call_method), %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x00]
|
|
|
|
mov 5, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x08]
|
|
|
|
mov 1, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x10]
|
|
|
|
sethi %hi(itlb_load), %g2
|
|
|
|
or %g2, %lo(itlb_load), %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x18]
|
[SPARC64]: Rewrite bootup sequence.
Instead of all of this cpu-specific code to remap the kernel
to the correct location, use portable firmware calls to do
this instead.
What we do now is the following in position independant
assembler:
chosen_node = prom_finddevice("/chosen");
prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu");
vaddr = 4MB_ALIGN(current_text_addr());
prom_translate(vaddr, &paddr_high, &paddr_low, &mode);
prom_boot_mapping_mode = mode;
prom_boot_mapping_phys_high = paddr_high;
prom_boot_mapping_phys_low = paddr_low;
prom_map(-1, 8 * 1024 * 1024, KERNBASE, paddr_low);
and that replaces the massive amount of by-hand TLB probing and
programming we used to do here.
The new code should also handle properly the case where the kernel
is mapped at the correct address already (think: future kexec
support).
Consequently, the bulk of remap_kernel() dies as does the entirety
of arch/sparc64/prom/map.S
We try to share some strings in the PROM library with the ones used
at bootup, and while we're here mark input strings to oplib.h routines
with "const" when appropriate.
There are many more simplifications now possible. For one thing, we
can consolidate the two copies we now have of a lot of cpu setup code
sitting in head.S and trampoline.S.
This is a significant step towards CONFIG_DEBUG_PAGEALLOC support.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-09-23 03:11:33 +00:00
|
|
|
sethi %hi(prom_mmu_ihandle_cache), %g2
|
|
|
|
lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
|
2005-04-16 22:20:36 +00:00
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x20]
|
|
|
|
|
2008-03-22 00:01:38 +00:00
|
|
|
/* Each TTE maps 4MB, convert index to offset. */
|
|
|
|
sllx %l5, 22, %g1
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-03-22 00:01:38 +00:00
|
|
|
add %l3, %g1, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR
|
|
|
|
add %l4, %g1, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-03-22 00:01:38 +00:00
|
|
|
/* TTE index is highest minus loop index. */
|
|
|
|
sub %l7, %l5, %g2
|
2005-04-16 22:20:36 +00:00
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x38]
|
2008-03-22 00:01:38 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
sethi %hi(p1275buf), %g2
|
|
|
|
or %g2, %lo(p1275buf), %g2
|
|
|
|
ldx [%g2 + 0x08], %o1
|
|
|
|
call %o1
|
|
|
|
add %sp, (2047 + 128), %o0
|
|
|
|
|
2008-03-22 00:01:38 +00:00
|
|
|
/* Lock into D-MMU */
|
2005-04-16 22:20:36 +00:00
|
|
|
sethi %hi(call_method), %g2
|
|
|
|
or %g2, %lo(call_method), %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x00]
|
|
|
|
mov 5, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x08]
|
|
|
|
mov 1, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x10]
|
|
|
|
sethi %hi(dtlb_load), %g2
|
|
|
|
or %g2, %lo(dtlb_load), %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x18]
|
[SPARC64]: Rewrite bootup sequence.
Instead of all of this cpu-specific code to remap the kernel
to the correct location, use portable firmware calls to do
this instead.
What we do now is the following in position independant
assembler:
chosen_node = prom_finddevice("/chosen");
prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu");
vaddr = 4MB_ALIGN(current_text_addr());
prom_translate(vaddr, &paddr_high, &paddr_low, &mode);
prom_boot_mapping_mode = mode;
prom_boot_mapping_phys_high = paddr_high;
prom_boot_mapping_phys_low = paddr_low;
prom_map(-1, 8 * 1024 * 1024, KERNBASE, paddr_low);
and that replaces the massive amount of by-hand TLB probing and
programming we used to do here.
The new code should also handle properly the case where the kernel
is mapped at the correct address already (think: future kexec
support).
Consequently, the bulk of remap_kernel() dies as does the entirety
of arch/sparc64/prom/map.S
We try to share some strings in the PROM library with the ones used
at bootup, and while we're here mark input strings to oplib.h routines
with "const" when appropriate.
There are many more simplifications now possible. For one thing, we
can consolidate the two copies we now have of a lot of cpu setup code
sitting in head.S and trampoline.S.
This is a significant step towards CONFIG_DEBUG_PAGEALLOC support.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-09-23 03:11:33 +00:00
|
|
|
sethi %hi(prom_mmu_ihandle_cache), %g2
|
|
|
|
lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
|
2005-04-16 22:20:36 +00:00
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x20]
|
|
|
|
|
2008-03-22 00:01:38 +00:00
|
|
|
/* Each TTE maps 4MB, convert index to offset. */
|
|
|
|
sllx %l5, 22, %g1
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-03-22 00:01:38 +00:00
|
|
|
add %l3, %g1, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x28] ! VADDR
|
|
|
|
add %l4, %g1, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x30] ! TTE
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-03-22 00:01:38 +00:00
|
|
|
/* TTE index is highest minus loop index. */
|
|
|
|
sub %l7, %l5, %g2
|
2005-04-16 22:20:36 +00:00
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x38]
|
2008-03-22 00:01:38 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
sethi %hi(p1275buf), %g2
|
|
|
|
or %g2, %lo(p1275buf), %g2
|
|
|
|
ldx [%g2 + 0x08], %o1
|
|
|
|
call %o1
|
|
|
|
add %sp, (2047 + 128), %o0
|
|
|
|
|
2008-03-22 00:01:38 +00:00
|
|
|
add %l5, 1, %l5
|
|
|
|
cmp %l5, %l6
|
|
|
|
bne,pt %xcc, 3b
|
2005-04-16 22:20:36 +00:00
|
|
|
nop
|
|
|
|
|
|
|
|
sethi %hi(prom_entry_lock), %g2
|
|
|
|
stb %g0, [%g2 + %lo(prom_entry_lock)]
|
|
|
|
membar #StoreStore | #StoreLoad
|
|
|
|
|
2006-02-09 10:52:44 +00:00
|
|
|
ba,pt %xcc, after_lock_tlb
|
|
|
|
nop
|
|
|
|
|
|
|
|
niagara_lock_tlb:
|
2008-03-22 00:01:38 +00:00
|
|
|
sethi %hi(KERNBASE), %l3
|
|
|
|
sethi %hi(kern_locked_tte_data), %l4
|
|
|
|
ldx [%l4 + %lo(kern_locked_tte_data)], %l4
|
|
|
|
clr %l5
|
|
|
|
sethi %hi(num_kernel_image_mappings), %l6
|
|
|
|
lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
|
|
|
|
add %l6, 1, %l6
|
|
|
|
|
|
|
|
1:
|
2006-02-10 06:57:21 +00:00
|
|
|
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
|
2008-03-22 00:01:38 +00:00
|
|
|
sllx %l5, 22, %g2
|
|
|
|
add %l3, %g2, %o0
|
2006-02-10 06:57:21 +00:00
|
|
|
clr %o1
|
2008-03-22 00:01:38 +00:00
|
|
|
add %l4, %g2, %o2
|
2006-02-10 06:57:21 +00:00
|
|
|
mov HV_MMU_IMMU, %o3
|
2006-02-09 10:52:44 +00:00
|
|
|
ta HV_FAST_TRAP
|
|
|
|
|
2006-02-10 06:57:21 +00:00
|
|
|
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
|
2008-03-22 00:01:38 +00:00
|
|
|
sllx %l5, 22, %g2
|
|
|
|
add %l3, %g2, %o0
|
2006-02-10 06:57:21 +00:00
|
|
|
clr %o1
|
2008-03-22 00:01:38 +00:00
|
|
|
add %l4, %g2, %o2
|
2006-02-10 06:57:21 +00:00
|
|
|
mov HV_MMU_DMMU, %o3
|
2006-02-09 10:52:44 +00:00
|
|
|
ta HV_FAST_TRAP
|
|
|
|
|
2008-03-22 00:01:38 +00:00
|
|
|
add %l5, 1, %l5
|
|
|
|
cmp %l5, %l6
|
|
|
|
bne,pt %xcc, 1b
|
2006-02-09 10:52:44 +00:00
|
|
|
nop
|
|
|
|
|
|
|
|
after_lock_tlb:
|
2005-04-16 22:20:36 +00:00
|
|
|
wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
|
|
|
|
wr %g0, 0, %fprs
|
|
|
|
|
|
|
|
wr %g0, ASI_P, %asi
|
|
|
|
|
|
|
|
mov PRIMARY_CONTEXT, %g7
|
2006-02-08 06:13:05 +00:00
|
|
|
|
|
|
|
661: stxa %g0, [%g7] ASI_DMMU
|
|
|
|
.section .sun4v_1insn_patch, "ax"
|
|
|
|
.word 661b
|
|
|
|
stxa %g0, [%g7] ASI_MMU
|
|
|
|
.previous
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
membar #Sync
|
|
|
|
mov SECONDARY_CONTEXT, %g7
|
2006-02-08 06:13:05 +00:00
|
|
|
|
|
|
|
661: stxa %g0, [%g7] ASI_DMMU
|
|
|
|
.section .sun4v_1insn_patch, "ax"
|
|
|
|
.word 661b
|
|
|
|
stxa %g0, [%g7] ASI_MMU
|
|
|
|
.previous
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
membar #Sync
|
|
|
|
|
[SPARC64]: Get SUN4V SMP working.
The sibling cpu bringup is extremely fragile. We can only
perform the most basic calls until we take over the trap
table from the firmware/hypervisor on the new cpu.
This means no accesses to %g4, %g5, %g6 since those can't be
TLB translated without our trap handlers.
In order to achieve this:
1) Change sun4v_init_mondo_queues() so that it can operate in
several modes.
It can allocate the queues, or install them in the current
processor, or both.
The boot cpu does both in it's call early on.
Later, the boot cpu allocates the sibling cpu queue, starts
the sibling cpu, then the sibling cpu loads them in.
2) init_cur_cpu_trap() is changed to take the current_thread_info()
as an argument instead of reading %g6 directly on the current
cpu.
3) Create a trampoline stack for the sibling cpus. We do our basic
kernel calls using this stack, which is locked into the kernel
image, then go to our proper thread stack after taking over the
trap table.
4) While we are in this delicate startup state, we put 0xdeadbeef
into %g4/%g5/%g6 in order to catch accidental accesses.
5) On the final prom_set_trap_table*() call, we put &init_thread_union
into %g6. This is a hack to make prom_world(0) work. All that
wants to do is restore the %asi register using
get_thread_current_ds().
Longer term we should just do the OBP calls to set the trap table by
hand just like we do for everything else. This would avoid that silly
prom_world(0) issue, then we can remove the init_thread_union hack.
Signed-off-by: David S. Miller <davem@davemloft.net>
2006-02-17 09:29:17 +00:00
|
|
|
/* Everything we do here, until we properly take over the
|
|
|
|
* trap table, must be done with extreme care. We cannot
|
|
|
|
* make any references to %g6 (current thread pointer),
|
|
|
|
* %g4 (current task pointer), or %g5 (base of current cpu's
|
|
|
|
* per-cpu area) until we properly take over the trap table
|
|
|
|
* from the firmware and hypervisor.
|
|
|
|
*
|
|
|
|
* Get onto temporary stack which is in the locked kernel image.
|
|
|
|
*/
|
|
|
|
sethi %hi(tramp_stack), %g1
|
|
|
|
or %g1, %lo(tramp_stack), %g1
|
|
|
|
add %g1, TRAMP_STACK_SIZE, %g1
|
2007-09-16 18:51:15 +00:00
|
|
|
sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
|
2005-04-16 22:20:36 +00:00
|
|
|
mov 0, %fp
|
|
|
|
|
[SPARC64]: Get SUN4V SMP working.
The sibling cpu bringup is extremely fragile. We can only
perform the most basic calls until we take over the trap
table from the firmware/hypervisor on the new cpu.
This means no accesses to %g4, %g5, %g6 since those can't be
TLB translated without our trap handlers.
In order to achieve this:
1) Change sun4v_init_mondo_queues() so that it can operate in
several modes.
It can allocate the queues, or install them in the current
processor, or both.
The boot cpu does both in it's call early on.
Later, the boot cpu allocates the sibling cpu queue, starts
the sibling cpu, then the sibling cpu loads them in.
2) init_cur_cpu_trap() is changed to take the current_thread_info()
as an argument instead of reading %g6 directly on the current
cpu.
3) Create a trampoline stack for the sibling cpus. We do our basic
kernel calls using this stack, which is locked into the kernel
image, then go to our proper thread stack after taking over the
trap table.
4) While we are in this delicate startup state, we put 0xdeadbeef
into %g4/%g5/%g6 in order to catch accidental accesses.
5) On the final prom_set_trap_table*() call, we put &init_thread_union
into %g6. This is a hack to make prom_world(0) work. All that
wants to do is restore the %asi register using
get_thread_current_ds().
Longer term we should just do the OBP calls to set the trap table by
hand just like we do for everything else. This would avoid that silly
prom_world(0) issue, then we can remove the init_thread_union hack.
Signed-off-by: David S. Miller <davem@davemloft.net>
2006-02-17 09:29:17 +00:00
|
|
|
/* Put garbage in these registers to trap any access to them. */
|
|
|
|
set 0xdeadbeef, %g4
|
|
|
|
set 0xdeadbeef, %g5
|
|
|
|
set 0xdeadbeef, %g6
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
call init_irqwork_curcpu
|
|
|
|
nop
|
2006-02-08 08:08:23 +00:00
|
|
|
|
|
|
|
sethi %hi(tlb_type), %g3
|
|
|
|
lduw [%g3 + %lo(tlb_type)], %g2
|
|
|
|
cmp %g2, 3
|
|
|
|
bne,pt %icc, 1f
|
|
|
|
nop
|
|
|
|
|
[SPARC64]: Get SUN4V SMP working.
The sibling cpu bringup is extremely fragile. We can only
perform the most basic calls until we take over the trap
table from the firmware/hypervisor on the new cpu.
This means no accesses to %g4, %g5, %g6 since those can't be
TLB translated without our trap handlers.
In order to achieve this:
1) Change sun4v_init_mondo_queues() so that it can operate in
several modes.
It can allocate the queues, or install them in the current
processor, or both.
The boot cpu does both in it's call early on.
Later, the boot cpu allocates the sibling cpu queue, starts
the sibling cpu, then the sibling cpu loads them in.
2) init_cur_cpu_trap() is changed to take the current_thread_info()
as an argument instead of reading %g6 directly on the current
cpu.
3) Create a trampoline stack for the sibling cpus. We do our basic
kernel calls using this stack, which is locked into the kernel
image, then go to our proper thread stack after taking over the
trap table.
4) While we are in this delicate startup state, we put 0xdeadbeef
into %g4/%g5/%g6 in order to catch accidental accesses.
5) On the final prom_set_trap_table*() call, we put &init_thread_union
into %g6. This is a hack to make prom_world(0) work. All that
wants to do is restore the %asi register using
get_thread_current_ds().
Longer term we should just do the OBP calls to set the trap table by
hand just like we do for everything else. This would avoid that silly
prom_world(0) issue, then we can remove the init_thread_union hack.
Signed-off-by: David S. Miller <davem@davemloft.net>
2006-02-17 09:29:17 +00:00
|
|
|
call hard_smp_processor_id
|
|
|
|
nop
|
|
|
|
|
2007-08-09 00:32:33 +00:00
|
|
|
call sun4v_register_mondo_queues
|
|
|
|
nop
|
2006-02-08 08:08:23 +00:00
|
|
|
|
|
|
|
1: call init_cur_cpu_trap
|
[SPARC64]: Get SUN4V SMP working.
The sibling cpu bringup is extremely fragile. We can only
perform the most basic calls until we take over the trap
table from the firmware/hypervisor on the new cpu.
This means no accesses to %g4, %g5, %g6 since those can't be
TLB translated without our trap handlers.
In order to achieve this:
1) Change sun4v_init_mondo_queues() so that it can operate in
several modes.
It can allocate the queues, or install them in the current
processor, or both.
The boot cpu does both in it's call early on.
Later, the boot cpu allocates the sibling cpu queue, starts
the sibling cpu, then the sibling cpu loads them in.
2) init_cur_cpu_trap() is changed to take the current_thread_info()
as an argument instead of reading %g6 directly on the current
cpu.
3) Create a trampoline stack for the sibling cpus. We do our basic
kernel calls using this stack, which is locked into the kernel
image, then go to our proper thread stack after taking over the
trap table.
4) While we are in this delicate startup state, we put 0xdeadbeef
into %g4/%g5/%g6 in order to catch accidental accesses.
5) On the final prom_set_trap_table*() call, we put &init_thread_union
into %g6. This is a hack to make prom_world(0) work. All that
wants to do is restore the %asi register using
get_thread_current_ds().
Longer term we should just do the OBP calls to set the trap table by
hand just like we do for everything else. This would avoid that silly
prom_world(0) issue, then we can remove the init_thread_union hack.
Signed-off-by: David S. Miller <davem@davemloft.net>
2006-02-17 09:29:17 +00:00
|
|
|
ldx [%l0], %o0
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-04 22:23:20 +00:00
|
|
|
/* Start using proper page size encodings in ctx register. */
|
2006-02-08 06:13:05 +00:00
|
|
|
sethi %hi(sparc64_kern_pri_context), %g3
|
|
|
|
ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
|
|
|
|
mov PRIMARY_CONTEXT, %g1
|
|
|
|
|
|
|
|
661: stxa %g2, [%g1] ASI_DMMU
|
|
|
|
.section .sun4v_1insn_patch, "ax"
|
|
|
|
.word 661b
|
|
|
|
stxa %g2, [%g1] ASI_MMU
|
|
|
|
.previous
|
|
|
|
|
|
|
|
membar #Sync
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[SPARC64]: Get SUN4V SMP working.
The sibling cpu bringup is extremely fragile. We can only
perform the most basic calls until we take over the trap
table from the firmware/hypervisor on the new cpu.
This means no accesses to %g4, %g5, %g6 since those can't be
TLB translated without our trap handlers.
In order to achieve this:
1) Change sun4v_init_mondo_queues() so that it can operate in
several modes.
It can allocate the queues, or install them in the current
processor, or both.
The boot cpu does both in it's call early on.
Later, the boot cpu allocates the sibling cpu queue, starts
the sibling cpu, then the sibling cpu loads them in.
2) init_cur_cpu_trap() is changed to take the current_thread_info()
as an argument instead of reading %g6 directly on the current
cpu.
3) Create a trampoline stack for the sibling cpus. We do our basic
kernel calls using this stack, which is locked into the kernel
image, then go to our proper thread stack after taking over the
trap table.
4) While we are in this delicate startup state, we put 0xdeadbeef
into %g4/%g5/%g6 in order to catch accidental accesses.
5) On the final prom_set_trap_table*() call, we put &init_thread_union
into %g6. This is a hack to make prom_world(0) work. All that
wants to do is restore the %asi register using
get_thread_current_ds().
Longer term we should just do the OBP calls to set the trap table by
hand just like we do for everything else. This would avoid that silly
prom_world(0) issue, then we can remove the init_thread_union hack.
Signed-off-by: David S. Miller <davem@davemloft.net>
2006-02-17 09:29:17 +00:00
|
|
|
wrpr %g0, 0, %wstate
|
|
|
|
|
|
|
|
/* As a hack, put &init_thread_union into %g6.
|
|
|
|
* prom_world() loads from here to restore the %asi
|
|
|
|
* register.
|
|
|
|
*/
|
|
|
|
sethi %hi(init_thread_union), %g6
|
|
|
|
or %g6, %lo(init_thread_union), %g6
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-02-10 23:39:51 +00:00
|
|
|
sethi %hi(is_sun4v), %o0
|
|
|
|
lduw [%o0 + %lo(is_sun4v)], %o0
|
|
|
|
brz,pt %o0, 1f
|
|
|
|
nop
|
|
|
|
|
|
|
|
TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
|
|
|
|
add %g2, TRAP_PER_CPU_FAULT_INFO, %g2
|
|
|
|
stxa %g2, [%g0] ASI_SCRATCHPAD
|
|
|
|
|
|
|
|
/* Compute physical address:
|
|
|
|
*
|
|
|
|
* paddr = kern_base + (mmfsa_vaddr - KERNBASE)
|
|
|
|
*/
|
|
|
|
sethi %hi(KERNBASE), %g3
|
|
|
|
sub %g2, %g3, %g2
|
|
|
|
sethi %hi(kern_base), %g3
|
|
|
|
ldx [%g3 + %lo(kern_base)], %g3
|
|
|
|
add %g2, %g3, %o1
|
2007-09-16 18:51:15 +00:00
|
|
|
sethi %hi(sparc64_ttable_tl0), %o0
|
2006-02-10 23:39:51 +00:00
|
|
|
|
2007-09-16 18:51:15 +00:00
|
|
|
set prom_set_trap_table_name, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x00]
|
|
|
|
mov 2, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x08]
|
|
|
|
mov 0, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x10]
|
|
|
|
stx %o0, [%sp + 2047 + 128 + 0x18]
|
|
|
|
stx %o1, [%sp + 2047 + 128 + 0x20]
|
|
|
|
sethi %hi(p1275buf), %g2
|
|
|
|
or %g2, %lo(p1275buf), %g2
|
|
|
|
ldx [%g2 + 0x08], %o1
|
|
|
|
call %o1
|
|
|
|
add %sp, (2047 + 128), %o0
|
2006-02-10 23:39:51 +00:00
|
|
|
|
|
|
|
ba,pt %xcc, 2f
|
|
|
|
nop
|
|
|
|
|
2007-09-16 18:51:15 +00:00
|
|
|
1: sethi %hi(sparc64_ttable_tl0), %o0
|
|
|
|
set prom_set_trap_table_name, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x00]
|
|
|
|
mov 1, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x08]
|
|
|
|
mov 0, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x10]
|
|
|
|
stx %o0, [%sp + 2047 + 128 + 0x18]
|
|
|
|
sethi %hi(p1275buf), %g2
|
|
|
|
or %g2, %lo(p1275buf), %g2
|
|
|
|
ldx [%g2 + 0x08], %o1
|
|
|
|
call %o1
|
|
|
|
add %sp, (2047 + 128), %o0
|
2005-04-16 22:20:36 +00:00
|
|
|
|
[SPARC64]: Get SUN4V SMP working.
The sibling cpu bringup is extremely fragile. We can only
perform the most basic calls until we take over the trap
table from the firmware/hypervisor on the new cpu.
This means no accesses to %g4, %g5, %g6 since those can't be
TLB translated without our trap handlers.
In order to achieve this:
1) Change sun4v_init_mondo_queues() so that it can operate in
several modes.
It can allocate the queues, or install them in the current
processor, or both.
The boot cpu does both in it's call early on.
Later, the boot cpu allocates the sibling cpu queue, starts
the sibling cpu, then the sibling cpu loads them in.
2) init_cur_cpu_trap() is changed to take the current_thread_info()
as an argument instead of reading %g6 directly on the current
cpu.
3) Create a trampoline stack for the sibling cpus. We do our basic
kernel calls using this stack, which is locked into the kernel
image, then go to our proper thread stack after taking over the
trap table.
4) While we are in this delicate startup state, we put 0xdeadbeef
into %g4/%g5/%g6 in order to catch accidental accesses.
5) On the final prom_set_trap_table*() call, we put &init_thread_union
into %g6. This is a hack to make prom_world(0) work. All that
wants to do is restore the %asi register using
get_thread_current_ds().
Longer term we should just do the OBP calls to set the trap table by
hand just like we do for everything else. This would avoid that silly
prom_world(0) issue, then we can remove the init_thread_union hack.
Signed-off-by: David S. Miller <davem@davemloft.net>
2006-02-17 09:29:17 +00:00
|
|
|
2: ldx [%l0], %g6
|
|
|
|
ldx [%g6 + TI_TASK], %g4
|
|
|
|
|
|
|
|
mov 1, %g5
|
|
|
|
sllx %g5, THREAD_SHIFT, %g5
|
|
|
|
sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
|
|
|
|
add %g6, %g5, %sp
|
|
|
|
mov 0, %fp
|
|
|
|
|
|
|
|
rdpr %pstate, %o1
|
|
|
|
or %o1, PSTATE_IE, %o1
|
|
|
|
wrpr %o1, 0, %pstate
|
|
|
|
|
|
|
|
call smp_callin
|
2005-04-16 22:20:36 +00:00
|
|
|
nop
|
|
|
|
call cpu_idle
|
|
|
|
mov 0, %o0
|
|
|
|
call cpu_panic
|
|
|
|
nop
|
|
|
|
1: b,a,pt %xcc, 1b
|
|
|
|
|
|
|
|
.align 8
|
|
|
|
sparc64_cpu_startup_end:
|