mach_sfmmu_asm.s revision 25cf1a301a396c38e8adf52c15f537b80d2483f7
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* SFMMU primitives. These primitives should only be used by sfmmu
* routines.
*/
#if defined(lint)
#else /* lint */
#include "assym.h"
#endif /* lint */
#include <sys/asm_linkage.h>
#include <sys/machtrap.h>
#include <vm/hat_sfmmu.h>
#include <sys/machparam.h>
#include <sys/privregs.h>
#include <sys/machthread.h>
#include <sys/trapstat.h>
/*
* sfmmu related subroutines
*/
#if defined (lint)
/*
* sfmmu related subroutines
*/
/* ARGSUSED */
void
{}
/* ARGSUSED */
void
{}
/* ARGSUSED */
void
{}
/* ARGSUSED */
void
{}
int
{ return(0); }
int
{ return(0); }
/* ARGSUSED */
void
sfmmu_setctx_sec(int ctx)
{}
/* ARGSUSED */
void
{
}
#else /* lint */
/*
* 1. If stealing ctx, flush all TLB entries whose ctx is ctx-being-stolen.
* 2. If processor is running in the ctx-being-stolen, set the
* context to the resv context. That is
* If processor in Kernel-mode - pri-ctx is 0, sec-ctx is ctx-being-stolen,
* just change sec-ctx register to resv ctx. When it returns to
* kernel-mode, user_rtt will change pri-ctx.
*
* Note: For multiple page size TLB, no need to set page sizes for
* DEMAP context.
*
* %g1 = ctx being stolen (victim)
* %g2 = invalid ctx to replace victim with
*/
/*
* Flush TLBs.
*/
/* pri-ctx to victim */
/* fall through to the code below */
/*
* We enter here if we're just raising a TSB miss
* exception, without switching MMU contexts. In
* this case, there is no need to flush the TLB.
*/
!
! return
! } else {
! } else {
! }
! }
!
/* next instruction is retry so no membar sync */
2:
#ifdef DEBUG
1:
#endif /* DEBUG */
#ifdef CHEETAHPLUS_ERRATUM_34
!
!
1:
!
!
2:
!
! our locked entry here.
!
sethi %hi(FLUSH_ADDR), %o1 ! Flush addr doesn't matter
0:
#endif /* CHEETAHPLUS_ERRATUM_34 */
stxa %o0, [%o5]ASI_IMMU
stxa %g1, [%g0]ASI_ITLB_IN
flush %o1 ! Flush required for I-MMU
3:
retl
wrpr %g0, %o3, %pstate ! Enable interrupts
SET_SIZE(sfmmu_itlb_ld)
/*
* Load an entry into the DTLB.
*
* Special handling is required for locked entries since there
* are some TLB slots that are reserved for the kernel but not
* always held locked. We want to avoid loading locked TTEs
* into those slots since they could be displaced.
*/
ENTRY_NP(sfmmu_dtlb_ld)
rdpr %pstate, %o3
#ifdef DEBUG
andcc %o3, PSTATE_IE, %g0 ! if interrupts already
bnz,pt %icc, 1f ! disabled, panic
nop
sethi %hi(panicstr), %g1
ldx [%g1 + %lo(panicstr)], %g1
tst %g1
bnz,pt %icc, 1f
nop
sethi %hi(sfmmu_panic1), %o0
call panic
or %o0, %lo(sfmmu_panic1), %o0
1:
#endif /* DEBUG */
wrpr %o3, PSTATE_IE, %pstate ! disable interrupts
srln %o0, MMU_PAGESHIFT, %o0
slln %o0, MMU_PAGESHIFT, %o0 ! clear page offset
or %o0, %o1, %o0 ! or in ctx to form tagacc
ldx [%o2], %g1
sethi %hi(ctx_pgsz_array), %o2 ! Check for T8s
ldn [%o2 + %lo(ctx_pgsz_array)], %o2
brz %o2, 1f
set MMU_TAG_ACCESS, %o5
ldub [%o2 + %o1], %o2 ! Cheetah+: set up tag access
sll %o2, TAGACCEXT_SHIFT, %o2 ! extension register so entry
set MMU_TAG_ACCESS_EXT, %o4 ! can go into T8 if unlocked
stxa %o2,[%o4]ASI_DMMU
membar #Sync
1:
andcc %g1, TTE_LCK_INT, %g0 ! Locked entries require
bnz,pn %icc, 2f ! special handling
sethi %hi(dtlb_resv_ttenum), %g3
stxa %o0,[%o5]ASI_DMMU ! Load unlocked TTE
stxa %g1,[%g0]ASI_DTLB_IN ! via DTLB_IN
membar #Sync
retl
wrpr %g0, %o3, %pstate ! enable interrupts
2:
ld [%g3 + %lo(dtlb_resv_ttenum)], %g3
sll %g3, 3, %g3 ! First reserved idx in TLB 0
sub %g3, (1 << 3), %g3 ! Decrement idx
3:
ldxa [%g3]ASI_DTLB_ACCESS, %g4 ! Load TTE from TLB 0
!
! of the lock bit).
!
brgez,pn %g4, 4f ! TTE is > 0 iff not valid
nop
andcc %g4, TTE_LCK_INT, %g0 ! Check for lock bit
bz,pn %icc, 4f ! If unlocked, go displace
nop
sub %g3, (1 << 3), %g3 ! Decrement idx
brgez %g3, 3b
nop
sethi %hi(sfmmu_panic5), %o0 ! We searched all entries and
call panic ! found no unlocked TTE so
or %o0, %lo(sfmmu_panic5), %o0 ! give up.
4:
stxa %o0,[%o5]ASI_DMMU ! Setup tag access
#ifdef OLYMPUS_SHARED_FTLB
stxa %g1,[%g0]ASI_DTLB_IN
#else
stxa %g1,[%g3]ASI_DTLB_ACCESS ! Displace entry at idx
#endif
membar #Sync
retl
wrpr %g0, %o3, %pstate ! enable interrupts
SET_SIZE(sfmmu_dtlb_ld)
ENTRY_NP(sfmmu_getctx_pri)
set MMU_PCONTEXT, %o0
retl
ldxa [%o0]ASI_MMU_CTX, %o0
SET_SIZE(sfmmu_getctx_pri)
ENTRY_NP(sfmmu_getctx_sec)
set MMU_SCONTEXT, %o0
set CTXREG_CTX_MASK, %o1
ldxa [%o0]ASI_MMU_CTX, %o0
retl
and %o0, %o1, %o0
SET_SIZE(sfmmu_getctx_sec)
/*
* Set the secondary context register for this process.
* %o0 = context number for this process.
*/
ENTRY_NP(sfmmu_setctx_sec)
/*
* From resume we call sfmmu_setctx_sec with interrupts disabled.
* But we can also get called from C with interrupts enabled. So,
* we need to check first. Also, resume saves state in %o3 and %o5
*/
/* If interrupts are not disabled, then disable them */
1:
2:
1: retl
/*
* set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS.
* returns the detection value in %o0.
*
* Currently ASI_QUAD_LDD_PHYS is supported in processors as follows
* - cheetah+ and later (greater or equal to CHEETAH_PLUS_IMPL)
* - FJ OPL Olympus-C and later (less than SPITFIRE_IMPL)
*
*/
GET_CPU_IMPL(%o0);
4:
3: retl
/*
* Called to load MMU registers and tsbmiss area
* for the active process. This function should
* only be called from TL=0.
*
* %o0 - hat pointer
*/
/*
* From resume we call sfmmu_load_mmustate with interrupts disabled.
* But we can also get called from C with interrupts enabled. So,
* we need to check first. Also, resume saves state in %o5 and we
* can't use this register here.
*/
/* If interrupts are not disabled, then disable them */
1:
/*
* We need to set up the TSB base register, tsbmiss
* area, and load locked TTE(s) for the TSB.
*/
#ifdef UTSB_PHYS
/*
* UTSB_PHYS accesses user TSBs via physical addresses. The first
* TSB is in the MMU I/D TSB Base registers. The second TSB uses a
* designated ASI_SCRATCHPAD register as a pseudo TSB base register.
*/
2:
#else /* UTSB_PHYS */
/*
* We have a second TSB for this process, so we need to
* encode data for both the first and second TSB in our single
* TSB base register. See hat_sfmmu.h for details on what bits
* correspond to which TSB.
* We also need to load a locked TTE into the TLB for the second TSB
* in this case.
*/
/*
* Load the TTE for the first TSB at the appropriate location in
* the TLB
*/
#endif /* UTSB_PHYS */
3: retl
#endif /* lint */
#if defined (lint)
/*
* Invalidate all of the entries within the tsb, by setting the inv bit
* in the tte_tag field of each tsbe.
*
* We take advantage of the fact TSBs are page aligned and a multiple of
* PAGESIZE to use block stores.
*
* See TSB_LOCK_ENTRY and the miss handlers for how this works in practice
* (in short, we set all bits in the upper word of the tag, and we give the
* invalid bit precedence over other tag bits in both places).
*/
/* ARGSUSED */
void
{}
#else /* lint */
#define VIS_BLOCKSIZE 64
! kpreempt_disable();
.sfmmu_inv_blkstart:
! stda %d0, [%i0+192]%asi ! in dly slot of branch that got us here
stda %d0, [%i0+128]%asi
stda %d0, [%i0+64]%asi
stda %d0, [%i0]%asi
add %i0, %i4, %i0
sub %i1, %i4, %i1
.sfmmu_inv_doblock:
cmp %i1, (4*VIS_BLOCKSIZE) ! check for completion
bgeu,a %icc, .sfmmu_inv_blkstart
stda %d0, [%i0+192]%asi
.sfmmu_inv_finish:
membar #Sync
btst FPRS_FEF, %l0 ! saved from above
bz,a .sfmmu_inv_finished
wr %l0, 0, %fprs ! restore fprs
! restore fpregs from stack
ldda [%l1]ASI_BLK_P, %d0
membar #Sync
wr %l0, 0, %fprs ! restore fprs
.sfmmu_inv_finished:
! kpreempt_enable();
ldsb [THREAD_REG + T_PREEMPT], %l3
dec %l3
stb %l3, [THREAD_REG + T_PREEMPT]
ret
restore
SET_SIZE(sfmmu_inv_tsb_fast)
#endif /* lint */
#if defined(lint)
/*
* Prefetch "struct tsbe" while walking TSBs.
* prefetch 7 cache lines ahead of where we are at now.
* #n_reads is being used since #one_read only applies to
* floating point reads, and we are not doing floating point
* reads. However, this has the negative side effect of polluting
* the ecache.
* The 448 comes from (7 * 64) which is how far ahead of our current
* address, we want to prefetch.
*/
/*ARGSUSED*/
void
prefetch_tsbe_read(struct tsbe *tsbep)
{}
/* Prefetch the tsbe that we are about to write */
/*ARGSUSED*/
void
prefetch_tsbe_write(struct tsbe *tsbep)
{}
#else /* lint */
ENTRY(prefetch_tsbe_read)
retl
prefetch [%o0+448], #n_reads
SET_SIZE(prefetch_tsbe_read)
ENTRY(prefetch_tsbe_write)
retl
prefetch [%o0], #n_writes
SET_SIZE(prefetch_tsbe_write)
#endif /* lint */
#ifndef lint
#endif /* lint */