2005-04-16 22:20:36 +00:00
|
|
|
/* $Id: trampoline.S,v 1.26 2002/02/09 19:49:30 davem Exp $
|
|
|
|
* trampoline.S: Jump start slave processors on sparc64.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <asm/head.h>
|
|
|
|
#include <asm/asi.h>
|
|
|
|
#include <asm/lsu.h>
|
|
|
|
#include <asm/dcr.h>
|
|
|
|
#include <asm/dcu.h>
|
|
|
|
#include <asm/pstate.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/spitfire.h>
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/thread_info.h>
|
|
|
|
#include <asm/mmu.h>
|
2006-02-09 10:52:44 +00:00
|
|
|
#include <asm/hypervisor.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
.data
|
|
|
|
.align 8
|
|
|
|
call_method:
|
|
|
|
.asciz "call-method"
|
|
|
|
.align 8
|
|
|
|
itlb_load:
|
|
|
|
.asciz "SUNW,itlb-load"
|
|
|
|
.align 8
|
|
|
|
dtlb_load:
|
|
|
|
.asciz "SUNW,dtlb-load"
|
|
|
|
|
|
|
|
.text
|
|
|
|
.align 8
|
|
|
|
.globl sparc64_cpu_startup, sparc64_cpu_startup_end
|
|
|
|
sparc64_cpu_startup:
|
|
|
|
flushw
|
|
|
|
|
2006-02-09 10:52:44 +00:00
|
|
|
BRANCH_IF_SUN4V(g1, niagara_startup)
|
|
|
|
BRANCH_IF_CHEETAH_BASE(g1, g5, cheetah_startup)
|
|
|
|
BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1, g5, cheetah_plus_startup)
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
ba,pt %xcc, spitfire_startup
|
|
|
|
nop
|
|
|
|
|
|
|
|
cheetah_plus_startup:
|
|
|
|
/* Preserve OBP chosen DCU and DCR register settings. */
|
|
|
|
ba,pt %xcc, cheetah_generic_startup
|
|
|
|
nop
|
|
|
|
|
|
|
|
cheetah_startup:
|
|
|
|
mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
|
|
|
|
wr %g1, %asr18
|
|
|
|
|
|
|
|
sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
|
|
|
|
or %g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
|
|
|
|
sllx %g5, 32, %g5
|
|
|
|
or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5
|
|
|
|
stxa %g5, [%g0] ASI_DCU_CONTROL_REG
|
|
|
|
membar #Sync
|
|
|
|
|
|
|
|
cheetah_generic_startup:
|
|
|
|
mov TSB_EXTENSION_P, %g3
|
|
|
|
stxa %g0, [%g3] ASI_DMMU
|
|
|
|
stxa %g0, [%g3] ASI_IMMU
|
|
|
|
membar #Sync
|
|
|
|
|
|
|
|
mov TSB_EXTENSION_S, %g3
|
|
|
|
stxa %g0, [%g3] ASI_DMMU
|
|
|
|
membar #Sync
|
|
|
|
|
|
|
|
mov TSB_EXTENSION_N, %g3
|
|
|
|
stxa %g0, [%g3] ASI_DMMU
|
|
|
|
stxa %g0, [%g3] ASI_IMMU
|
|
|
|
membar #Sync
|
2006-02-09 10:52:44 +00:00
|
|
|
/* fallthru */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-02-09 10:52:44 +00:00
|
|
|
niagara_startup:
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Disable STICK_INT interrupts. */
|
|
|
|
sethi %hi(0x80000000), %g5
|
|
|
|
sllx %g5, 32, %g5
|
|
|
|
wr %g5, %asr25
|
|
|
|
|
|
|
|
ba,pt %xcc, startup_continue
|
|
|
|
nop
|
|
|
|
|
|
|
|
spitfire_startup:
|
|
|
|
mov (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1
|
|
|
|
stxa %g1, [%g0] ASI_LSU_CONTROL
|
|
|
|
membar #Sync
|
|
|
|
|
|
|
|
startup_continue:
|
|
|
|
wrpr %g0, 15, %pil
|
|
|
|
|
|
|
|
sethi %hi(0x80000000), %g2
|
|
|
|
sllx %g2, 32, %g2
|
|
|
|
wr %g2, 0, %tick_cmpr
|
|
|
|
|
2006-02-09 10:52:44 +00:00
|
|
|
BRANCH_IF_SUN4V(g1, niagara_lock_tlb)
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Call OBP by hand to lock KERNBASE into i/d tlbs.
|
|
|
|
* We lock 2 consequetive entries if we are 'bigkernel'.
|
|
|
|
*/
|
|
|
|
mov %o0, %l0
|
|
|
|
|
|
|
|
sethi %hi(prom_entry_lock), %g2
|
|
|
|
1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
|
[SPARC64]: Avoid membar instructions in delay slots.
In particular, avoid membar instructions in the delay
slot of a jmpl instruction.
UltraSPARC-I, II, IIi, and IIe have a bug, documented in
the UltraSPARC-IIi User's Manual, Appendix K, Erratum 51
The long and short of it is that if the IMU unit misses
on a branch or jmpl, and there is a store buffer synchronizing
membar in the delay slot, the chip can stop fetching instructions.
If interrupts are enabled or some other trap is enabled, the
chip will unwedge itself, but performance will suffer.
We already had a workaround for this bug in a few spots, but
it's better to have the entire tree sanitized for this rule.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-27 22:42:04 +00:00
|
|
|
membar #StoreLoad | #StoreStore
|
2005-04-16 22:20:36 +00:00
|
|
|
brnz,pn %g1, 1b
|
[SPARC64]: Avoid membar instructions in delay slots.
In particular, avoid membar instructions in the delay
slot of a jmpl instruction.
UltraSPARC-I, II, IIi, and IIe have a bug, documented in
the UltraSPARC-IIi User's Manual, Appendix K, Erratum 51
The long and short of it is that if the IMU unit misses
on a branch or jmpl, and there is a store buffer synchronizing
membar in the delay slot, the chip can stop fetching instructions.
If interrupts are enabled or some other trap is enabled, the
chip will unwedge itself, but performance will suffer.
We already had a workaround for this bug in a few spots, but
it's better to have the entire tree sanitized for this rule.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-27 22:42:04 +00:00
|
|
|
nop
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
sethi %hi(p1275buf), %g2
|
|
|
|
or %g2, %lo(p1275buf), %g2
|
|
|
|
ldx [%g2 + 0x10], %l2
|
|
|
|
mov %sp, %l1
|
|
|
|
add %l2, -(192 + 128), %sp
|
|
|
|
flushw
|
|
|
|
|
|
|
|
sethi %hi(call_method), %g2
|
|
|
|
or %g2, %lo(call_method), %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x00]
|
|
|
|
mov 5, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x08]
|
|
|
|
mov 1, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x10]
|
|
|
|
sethi %hi(itlb_load), %g2
|
|
|
|
or %g2, %lo(itlb_load), %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x18]
|
[SPARC64]: Rewrite bootup sequence.
Instead of all of this cpu-specific code to remap the kernel
to the correct location, use portable firmware calls to do
this instead.
What we do now is the following in position independant
assembler:
chosen_node = prom_finddevice("/chosen");
prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu");
vaddr = 4MB_ALIGN(current_text_addr());
prom_translate(vaddr, &paddr_high, &paddr_low, &mode);
prom_boot_mapping_mode = mode;
prom_boot_mapping_phys_high = paddr_high;
prom_boot_mapping_phys_low = paddr_low;
prom_map(-1, 8 * 1024 * 1024, KERNBASE, paddr_low);
and that replaces the massive amount of by-hand TLB probing and
programming we used to do here.
The new code should also handle properly the case where the kernel
is mapped at the correct address already (think: future kexec
support).
Consequently, the bulk of remap_kernel() dies as does the entirety
of arch/sparc64/prom/map.S
We try to share some strings in the PROM library with the ones used
at bootup, and while we're here mark input strings to oplib.h routines
with "const" when appropriate.
There are many more simplifications now possible. For one thing, we
can consolidate the two copies we now have of a lot of cpu setup code
sitting in head.S and trampoline.S.
This is a significant step towards CONFIG_DEBUG_PAGEALLOC support.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-09-23 03:11:33 +00:00
|
|
|
sethi %hi(prom_mmu_ihandle_cache), %g2
|
|
|
|
lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
|
2005-04-16 22:20:36 +00:00
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x20]
|
|
|
|
sethi %hi(KERNBASE), %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x28]
|
|
|
|
sethi %hi(kern_locked_tte_data), %g2
|
|
|
|
ldx [%g2 + %lo(kern_locked_tte_data)], %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x30]
|
|
|
|
|
|
|
|
mov 15, %g2
|
|
|
|
BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
|
|
|
|
|
|
|
|
mov 63, %g2
|
|
|
|
1:
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x38]
|
|
|
|
sethi %hi(p1275buf), %g2
|
|
|
|
or %g2, %lo(p1275buf), %g2
|
|
|
|
ldx [%g2 + 0x08], %o1
|
|
|
|
call %o1
|
|
|
|
add %sp, (2047 + 128), %o0
|
|
|
|
|
|
|
|
sethi %hi(bigkernel), %g2
|
|
|
|
lduw [%g2 + %lo(bigkernel)], %g2
|
2006-02-09 10:52:44 +00:00
|
|
|
brz,pt %g2, do_dtlb
|
2005-04-16 22:20:36 +00:00
|
|
|
nop
|
|
|
|
|
|
|
|
sethi %hi(call_method), %g2
|
|
|
|
or %g2, %lo(call_method), %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x00]
|
|
|
|
mov 5, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x08]
|
|
|
|
mov 1, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x10]
|
|
|
|
sethi %hi(itlb_load), %g2
|
|
|
|
or %g2, %lo(itlb_load), %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x18]
|
[SPARC64]: Rewrite bootup sequence.
Instead of all of this cpu-specific code to remap the kernel
to the correct location, use portable firmware calls to do
this instead.
What we do now is the following in position independant
assembler:
chosen_node = prom_finddevice("/chosen");
prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu");
vaddr = 4MB_ALIGN(current_text_addr());
prom_translate(vaddr, &paddr_high, &paddr_low, &mode);
prom_boot_mapping_mode = mode;
prom_boot_mapping_phys_high = paddr_high;
prom_boot_mapping_phys_low = paddr_low;
prom_map(-1, 8 * 1024 * 1024, KERNBASE, paddr_low);
and that replaces the massive amount of by-hand TLB probing and
programming we used to do here.
The new code should also handle properly the case where the kernel
is mapped at the correct address already (think: future kexec
support).
Consequently, the bulk of remap_kernel() dies as does the entirety
of arch/sparc64/prom/map.S
We try to share some strings in the PROM library with the ones used
at bootup, and while we're here mark input strings to oplib.h routines
with "const" when appropriate.
There are many more simplifications now possible. For one thing, we
can consolidate the two copies we now have of a lot of cpu setup code
sitting in head.S and trampoline.S.
This is a significant step towards CONFIG_DEBUG_PAGEALLOC support.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-09-23 03:11:33 +00:00
|
|
|
sethi %hi(prom_mmu_ihandle_cache), %g2
|
|
|
|
lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
|
2005-04-16 22:20:36 +00:00
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x20]
|
|
|
|
sethi %hi(KERNBASE + 0x400000), %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x28]
|
|
|
|
sethi %hi(kern_locked_tte_data), %g2
|
|
|
|
ldx [%g2 + %lo(kern_locked_tte_data)], %g2
|
|
|
|
sethi %hi(0x400000), %g1
|
|
|
|
add %g2, %g1, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x30]
|
|
|
|
|
|
|
|
mov 14, %g2
|
|
|
|
BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
|
|
|
|
|
|
|
|
mov 62, %g2
|
|
|
|
1:
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x38]
|
|
|
|
sethi %hi(p1275buf), %g2
|
|
|
|
or %g2, %lo(p1275buf), %g2
|
|
|
|
ldx [%g2 + 0x08], %o1
|
|
|
|
call %o1
|
|
|
|
add %sp, (2047 + 128), %o0
|
|
|
|
|
|
|
|
do_dtlb:
|
|
|
|
sethi %hi(call_method), %g2
|
|
|
|
or %g2, %lo(call_method), %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x00]
|
|
|
|
mov 5, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x08]
|
|
|
|
mov 1, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x10]
|
|
|
|
sethi %hi(dtlb_load), %g2
|
|
|
|
or %g2, %lo(dtlb_load), %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x18]
|
[SPARC64]: Rewrite bootup sequence.
Instead of all of this cpu-specific code to remap the kernel
to the correct location, use portable firmware calls to do
this instead.
What we do now is the following in position independant
assembler:
chosen_node = prom_finddevice("/chosen");
prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu");
vaddr = 4MB_ALIGN(current_text_addr());
prom_translate(vaddr, &paddr_high, &paddr_low, &mode);
prom_boot_mapping_mode = mode;
prom_boot_mapping_phys_high = paddr_high;
prom_boot_mapping_phys_low = paddr_low;
prom_map(-1, 8 * 1024 * 1024, KERNBASE, paddr_low);
and that replaces the massive amount of by-hand TLB probing and
programming we used to do here.
The new code should also handle properly the case where the kernel
is mapped at the correct address already (think: future kexec
support).
Consequently, the bulk of remap_kernel() dies as does the entirety
of arch/sparc64/prom/map.S
We try to share some strings in the PROM library with the ones used
at bootup, and while we're here mark input strings to oplib.h routines
with "const" when appropriate.
There are many more simplifications now possible. For one thing, we
can consolidate the two copies we now have of a lot of cpu setup code
sitting in head.S and trampoline.S.
This is a significant step towards CONFIG_DEBUG_PAGEALLOC support.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-09-23 03:11:33 +00:00
|
|
|
sethi %hi(prom_mmu_ihandle_cache), %g2
|
|
|
|
lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
|
2005-04-16 22:20:36 +00:00
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x20]
|
|
|
|
sethi %hi(KERNBASE), %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x28]
|
|
|
|
sethi %hi(kern_locked_tte_data), %g2
|
|
|
|
ldx [%g2 + %lo(kern_locked_tte_data)], %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x30]
|
|
|
|
|
|
|
|
mov 15, %g2
|
|
|
|
BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
|
|
|
|
|
|
|
|
mov 63, %g2
|
|
|
|
1:
|
|
|
|
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x38]
|
|
|
|
sethi %hi(p1275buf), %g2
|
|
|
|
or %g2, %lo(p1275buf), %g2
|
|
|
|
ldx [%g2 + 0x08], %o1
|
|
|
|
call %o1
|
|
|
|
add %sp, (2047 + 128), %o0
|
|
|
|
|
|
|
|
sethi %hi(bigkernel), %g2
|
|
|
|
lduw [%g2 + %lo(bigkernel)], %g2
|
2006-02-09 10:52:44 +00:00
|
|
|
brz,pt %g2, do_unlock
|
2005-04-16 22:20:36 +00:00
|
|
|
nop
|
|
|
|
|
|
|
|
sethi %hi(call_method), %g2
|
|
|
|
or %g2, %lo(call_method), %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x00]
|
|
|
|
mov 5, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x08]
|
|
|
|
mov 1, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x10]
|
|
|
|
sethi %hi(dtlb_load), %g2
|
|
|
|
or %g2, %lo(dtlb_load), %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x18]
|
[SPARC64]: Rewrite bootup sequence.
Instead of all of this cpu-specific code to remap the kernel
to the correct location, use portable firmware calls to do
this instead.
What we do now is the following in position independant
assembler:
chosen_node = prom_finddevice("/chosen");
prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu");
vaddr = 4MB_ALIGN(current_text_addr());
prom_translate(vaddr, &paddr_high, &paddr_low, &mode);
prom_boot_mapping_mode = mode;
prom_boot_mapping_phys_high = paddr_high;
prom_boot_mapping_phys_low = paddr_low;
prom_map(-1, 8 * 1024 * 1024, KERNBASE, paddr_low);
and that replaces the massive amount of by-hand TLB probing and
programming we used to do here.
The new code should also handle properly the case where the kernel
is mapped at the correct address already (think: future kexec
support).
Consequently, the bulk of remap_kernel() dies as does the entirety
of arch/sparc64/prom/map.S
We try to share some strings in the PROM library with the ones used
at bootup, and while we're here mark input strings to oplib.h routines
with "const" when appropriate.
There are many more simplifications now possible. For one thing, we
can consolidate the two copies we now have of a lot of cpu setup code
sitting in head.S and trampoline.S.
This is a significant step towards CONFIG_DEBUG_PAGEALLOC support.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-09-23 03:11:33 +00:00
|
|
|
sethi %hi(prom_mmu_ihandle_cache), %g2
|
|
|
|
lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
|
2005-04-16 22:20:36 +00:00
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x20]
|
|
|
|
sethi %hi(KERNBASE + 0x400000), %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x28]
|
|
|
|
sethi %hi(kern_locked_tte_data), %g2
|
|
|
|
ldx [%g2 + %lo(kern_locked_tte_data)], %g2
|
|
|
|
sethi %hi(0x400000), %g1
|
|
|
|
add %g2, %g1, %g2
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x30]
|
|
|
|
|
|
|
|
mov 14, %g2
|
|
|
|
BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
|
|
|
|
|
|
|
|
mov 62, %g2
|
|
|
|
1:
|
|
|
|
|
|
|
|
stx %g2, [%sp + 2047 + 128 + 0x38]
|
|
|
|
sethi %hi(p1275buf), %g2
|
|
|
|
or %g2, %lo(p1275buf), %g2
|
|
|
|
ldx [%g2 + 0x08], %o1
|
|
|
|
call %o1
|
|
|
|
add %sp, (2047 + 128), %o0
|
|
|
|
|
|
|
|
do_unlock:
|
|
|
|
sethi %hi(prom_entry_lock), %g2
|
|
|
|
stb %g0, [%g2 + %lo(prom_entry_lock)]
|
|
|
|
membar #StoreStore | #StoreLoad
|
|
|
|
|
2006-02-09 10:52:44 +00:00
|
|
|
ba,pt %xcc, after_lock_tlb
|
|
|
|
nop
|
|
|
|
|
|
|
|
niagara_lock_tlb:
|
|
|
|
mov HV_FAST_MMU_MAP_PERM_ADDR, %o0
|
|
|
|
sethi %hi(KERNBASE), %o1
|
|
|
|
clr %o2
|
|
|
|
sethi %hi(kern_locked_tte_data), %o3
|
|
|
|
ldx [%o3 + %lo(kern_locked_tte_data)], %o3
|
|
|
|
mov HV_MMU_IMMU, %o4
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
|
|
|
|
mov HV_FAST_MMU_MAP_PERM_ADDR, %o0
|
|
|
|
sethi %hi(KERNBASE), %o1
|
|
|
|
clr %o2
|
|
|
|
sethi %hi(kern_locked_tte_data), %o3
|
|
|
|
ldx [%o3 + %lo(kern_locked_tte_data)], %o3
|
|
|
|
mov HV_MMU_DMMU, %o4
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
|
|
|
|
sethi %hi(bigkernel), %g2
|
|
|
|
lduw [%g2 + %lo(bigkernel)], %g2
|
|
|
|
brz,pt %g2, after_lock_tlb
|
|
|
|
nop
|
|
|
|
|
|
|
|
mov HV_FAST_MMU_MAP_PERM_ADDR, %o0
|
|
|
|
sethi %hi(KERNBASE + 0x400000), %o1
|
|
|
|
clr %o2
|
|
|
|
sethi %hi(kern_locked_tte_data), %o3
|
|
|
|
ldx [%o3 + %lo(kern_locked_tte_data)], %o3
|
|
|
|
sethi %hi(0x400000), %o4
|
|
|
|
add %o3, %o4, %o3
|
|
|
|
mov HV_MMU_IMMU, %o4
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
|
|
|
|
mov HV_FAST_MMU_MAP_PERM_ADDR, %o0
|
|
|
|
sethi %hi(KERNBASE + 0x400000), %o1
|
|
|
|
clr %o2
|
|
|
|
sethi %hi(kern_locked_tte_data), %o3
|
|
|
|
ldx [%o3 + %lo(kern_locked_tte_data)], %o3
|
|
|
|
sethi %hi(0x400000), %o4
|
|
|
|
add %o3, %o4, %o3
|
|
|
|
mov HV_MMU_DMMU, %o4
|
|
|
|
ta HV_FAST_TRAP
|
|
|
|
|
|
|
|
after_lock_tlb:
|
2005-04-16 22:20:36 +00:00
|
|
|
mov %l1, %sp
|
|
|
|
flushw
|
|
|
|
|
|
|
|
mov %l0, %o0
|
|
|
|
|
|
|
|
wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
|
|
|
|
wr %g0, 0, %fprs
|
|
|
|
|
|
|
|
/* XXX Buggy PROM... */
|
|
|
|
srl %o0, 0, %o0
|
|
|
|
ldx [%o0], %g6
|
|
|
|
|
|
|
|
wr %g0, ASI_P, %asi
|
|
|
|
|
|
|
|
mov PRIMARY_CONTEXT, %g7
|
2006-02-08 06:13:05 +00:00
|
|
|
|
|
|
|
661: stxa %g0, [%g7] ASI_DMMU
|
|
|
|
.section .sun4v_1insn_patch, "ax"
|
|
|
|
.word 661b
|
|
|
|
stxa %g0, [%g7] ASI_MMU
|
|
|
|
.previous
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
membar #Sync
|
|
|
|
mov SECONDARY_CONTEXT, %g7
|
2006-02-08 06:13:05 +00:00
|
|
|
|
|
|
|
661: stxa %g0, [%g7] ASI_DMMU
|
|
|
|
.section .sun4v_1insn_patch, "ax"
|
|
|
|
.word 661b
|
|
|
|
stxa %g0, [%g7] ASI_MMU
|
|
|
|
.previous
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
membar #Sync
|
|
|
|
|
|
|
|
mov 1, %g5
|
|
|
|
sllx %g5, THREAD_SHIFT, %g5
|
|
|
|
sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
|
|
|
|
add %g6, %g5, %sp
|
|
|
|
mov 0, %fp
|
|
|
|
|
|
|
|
wrpr %g0, 0, %wstate
|
|
|
|
wrpr %g0, 0, %tl
|
|
|
|
|
2006-02-27 07:24:22 +00:00
|
|
|
/* Load TBA, then we can resurface. */
|
2005-04-16 22:20:36 +00:00
|
|
|
sethi %hi(sparc64_ttable_tl0), %g5
|
|
|
|
wrpr %g5, %tba
|
|
|
|
|
|
|
|
ldx [%g6 + TI_TASK], %g4
|
|
|
|
|
|
|
|
wrpr %g0, 0, %wstate
|
|
|
|
|
|
|
|
call init_irqwork_curcpu
|
|
|
|
nop
|
2006-02-08 08:08:23 +00:00
|
|
|
|
|
|
|
sethi %hi(tlb_type), %g3
|
|
|
|
lduw [%g3 + %lo(tlb_type)], %g2
|
|
|
|
cmp %g2, 3
|
|
|
|
bne,pt %icc, 1f
|
|
|
|
nop
|
|
|
|
|
|
|
|
call sun4v_init_mondo_queues
|
|
|
|
nop
|
|
|
|
|
|
|
|
1: call init_cur_cpu_trap
|
2006-02-27 07:24:22 +00:00
|
|
|
nop
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-10-04 22:23:20 +00:00
|
|
|
/* Start using proper page size encodings in ctx register. */
|
2006-02-08 06:13:05 +00:00
|
|
|
sethi %hi(sparc64_kern_pri_context), %g3
|
|
|
|
ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
|
|
|
|
mov PRIMARY_CONTEXT, %g1
|
|
|
|
|
|
|
|
661: stxa %g2, [%g1] ASI_DMMU
|
|
|
|
.section .sun4v_1insn_patch, "ax"
|
|
|
|
.word 661b
|
|
|
|
stxa %g2, [%g1] ASI_MMU
|
|
|
|
.previous
|
|
|
|
|
|
|
|
membar #Sync
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
rdpr %pstate, %o1
|
|
|
|
or %o1, PSTATE_IE, %o1
|
|
|
|
wrpr %o1, 0, %pstate
|
|
|
|
|
|
|
|
call prom_set_trap_table
|
|
|
|
sethi %hi(sparc64_ttable_tl0), %o0
|
|
|
|
|
|
|
|
call smp_callin
|
|
|
|
nop
|
|
|
|
call cpu_idle
|
|
|
|
mov 0, %o0
|
|
|
|
call cpu_panic
|
|
|
|
nop
|
|
|
|
1: b,a,pt %xcc, 1b
|
|
|
|
|
|
|
|
.align 8
|
|
|
|
sparc64_cpu_startup_end:
|