mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 13:22:23 +00:00
517af33237
This way we don't need to lock the TSB into the TLB. The trick is that every TSB load/store is registered into a special instruction patch section. The default uses virtual addresses, and the patch instructions use physical address load/stores. We can't do this on all chips because only cheetah+ and later have the physical variant of the atomic quad load. Signed-off-by: David S. Miller <davem@davemloft.net>
40 lines
834 B
ArmAsm
40 lines
834 B
ArmAsm
/* ITLB ** ICACHE line 1: Context 0 check and TSB load */
|
|
ldxa [%g0] ASI_IMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer
|
|
ldxa [%g0] ASI_IMMU, %g6 ! Get TAG TARGET
|
|
srlx %g6, 48, %g5 ! Get context
|
|
brz,pn %g5, kvmap_itlb ! Context 0 processing
|
|
nop ! Delay slot (fill me)
|
|
TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry
|
|
cmp %g4, %g6 ! Compare TAG
|
|
sethi %hi(_PAGE_EXEC), %g4 ! Setup exec check
|
|
|
|
/* ITLB ** ICACHE line 2: TSB compare and TLB load */
|
|
bne,pn %xcc, tsb_miss_itlb ! Miss
|
|
mov FAULT_CODE_ITLB, %g3
|
|
andcc %g5, %g4, %g0 ! Executable?
|
|
be,pn %xcc, tsb_do_fault
|
|
nop ! Delay slot, fill me
|
|
stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB
|
|
retry ! Trap done
|
|
nop
|
|
|
|
/* ITLB ** ICACHE line 3: */
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
|
|
/* ITLB ** ICACHE line 4: */
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|
|
nop
|