mach_sfmmu_asm.s revision 9c7f0c7949f4f8c13342ed0167ea42f8d77b5f21
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* SFMMU primitives. These primitives should only be used by sfmmu
* routines.
*/
#if defined(lint)
#include <sys/types.h>
#else /* lint */
#include "assym.h"
#endif /* lint */
#include <sys/asm_linkage.h>
#include <sys/machtrap.h>
#include <sys/machasi.h>
#include <sys/sun4asi.h>
#include <sys/pte.h>
#include <sys/mmu.h>
#include <vm/hat_sfmmu.h>
#include <vm/seg_spt.h>
#include <sys/machparam.h>
#include <sys/privregs.h>
#include <sys/scb.h>
#include <sys/intreg.h>
#include <sys/machthread.h>
#include <sys/clock.h>
#include <sys/trapstat.h>
/*
* sfmmu related subroutines
*/
#if defined (lint)
/* ARGSUSED */
void
sfmmu_raise_tsb_exception(uint64_t sfmmup, uint64_t rctx)
{}
int
sfmmu_getctx_pri()
{ return(0); }
int
sfmmu_getctx_sec()
{ return(0); }
/* ARGSUSED */
void
sfmmu_setctx_sec(int ctx)
{}
/* ARGSUSED */
void
sfmmu_load_mmustate(sfmmu_t *sfmmup)
{
}
#else /* lint */
/*
* Invalidate either the context of a specific victim or any process
* currently running on this CPU.
*
* %g1 = sfmmup whose ctx is being stolen (victim)
* when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT.
* Note %g1 is the only input argument used by this xcall handler.
*/
ENTRY(sfmmu_raise_tsb_exception)
!
! if (victim == INVALID_CONTEXT) {
! if (sec-ctx > INVALID_CONTEXT)
! write INVALID_CONTEXT to sec-ctx
! if (pri-ctx > INVALID_CONTEXT)
! write INVALID_CONTEXT to pri-ctx
!
! } else if (current CPU tsbmiss->usfmmup != victim sfmmup) {
! return
! } else {
! if (sec-ctx > INVALID_CONTEXT)
! write INVALID_CONTEXT to sec-ctx
!
! if (pri-ctx > INVALID_CONTEXT)
! write INVALID_CONTEXT to pri-ctx
! }
!
sethi %hi(ksfmmup), %g3
ldx [%g3 + %lo(ksfmmup)], %g3
cmp %g1, %g3
be,a,pn %xcc, ptl1_panic /* can't invalidate kernel ctx */
mov PTL1_BAD_RAISE_TSBEXCP, %g1
set INVALID_CONTEXT, %g2
cmp %g1, INVALID_CONTEXT
bne,pt %xcc, 1f /* called from wrap_around? */
mov MMU_SCONTEXT, %g3
ldxa [%g3]ASI_MMU_CTX, %g5 /* %g5 = sec-ctx */
cmp %g5, INVALID_CONTEXT /* kernel or invalid ctx ? */
ble,pn %xcc, 0f /* yes, no need to change */
mov MMU_PCONTEXT, %g7
stxa %g2, [%g3]ASI_MMU_CTX /* set invalid ctx */
membar #Sync
0:
ldxa [%g7]ASI_MMU_CTX, %g5 /* %g5 = pri-ctx */
cmp %g5, INVALID_CONTEXT /* kernel or invalid ctx? */
ble,pn %xcc, 6f /* yes, no need to change */
nop
stxa %g2, [%g7]ASI_MMU_CTX /* set pri-ctx to invalid */
membar #Sync
6: /* flushall tlb */
mov %o0, %g3
mov %o1, %g4
mov %o2, %g6
mov %o5, %g7
mov %g0, %o0 ! XXX no cpu list yet
mov %g0, %o1 ! XXX no cpu list yet
mov MAP_ITLB | MAP_DTLB, %o2
mov MMU_DEMAP_ALL, %o5
ta FAST_TRAP
brz,pt %o0, 5f
nop
ba panic_bad_hcall
mov MMU_DEMAP_ALL, %o1
5:
mov %g3, %o0
mov %g4, %o1
mov %g6, %o2
mov %g7, %o5
ba 3f
nop
1:
/*
* %g1 = sfmmup
* %g2 = INVALID_CONTEXT
* %g3 = MMU_SCONTEXT
*/
CPU_TSBMISS_AREA(%g5, %g6) /* load cpu tsbmiss area */
ldx [%g5 + TSBMISS_UHATID], %g5 /* load usfmmup */
cmp %g5, %g1 /* is it the victim? */
bne,pt %xcc, 2f /* is our sec-ctx a victim? */
nop
ldxa [%g3]ASI_MMU_CTX, %g5 /* %g5 = sec-ctx */
cmp %g5, INVALID_CONTEXT /* kernel or invalid ctx ? */
ble,pn %xcc, 0f /* yes, no need to change */
mov MMU_PCONTEXT, %g7
stxa %g2, [%g3]ASI_MMU_CTX /* set sec-ctx to invalid */
membar #Sync
0:
ldxa [%g7]ASI_MMU_CTX, %g4 /* %g4 = pri-ctx */
cmp %g4, INVALID_CONTEXT /* is pri-ctx the victim? */
ble %icc, 3f /* no need to change pri-ctx */
nop
stxa %g2, [%g7]ASI_MMU_CTX /* set pri-ctx to invalid */
membar #Sync
3:
/* TSB program must be cleared - walkers do not check a context. */
mov %o0, %g3
mov %o1, %g4
mov %o5, %g7
clr %o0
clr %o1
mov MMU_TSB_CTXNON0, %o5
ta FAST_TRAP
brnz,a,pn %o0, ptl1_panic
mov PTL1_BAD_HCALL, %g1
mov %g3, %o0
mov %g4, %o1
mov %g7, %o5
2:
retry
SET_SIZE(sfmmu_raise_tsb_exception)
ENTRY_NP(sfmmu_getctx_pri)
set MMU_PCONTEXT, %o0
retl
ldxa [%o0]ASI_MMU_CTX, %o0
SET_SIZE(sfmmu_getctx_pri)
ENTRY_NP(sfmmu_getctx_sec)
set MMU_SCONTEXT, %o0
retl
ldxa [%o0]ASI_MMU_CTX, %o0
SET_SIZE(sfmmu_getctx_sec)
/*
* Set the secondary context register for this process.
* %o0 = context number
*/
ENTRY_NP(sfmmu_setctx_sec)
/*
* From resume we call sfmmu_setctx_sec with interrupts disabled.
* But we can also get called from C with interrupts enabled. So,
* we need to check first.
*/
/* If interrupts are not disabled, then disable them */
rdpr %pstate, %g1
btst PSTATE_IE, %g1
bnz,a,pt %icc, 1f
wrpr %g1, PSTATE_IE, %pstate /* disable interrupts */
1:
mov MMU_SCONTEXT, %o1
sethi %hi(FLUSH_ADDR), %o4
stxa %o0, [%o1]ASI_MMU_CTX /* set 2nd context reg. */
flush %o4
/*
* if the routine is entered with intr enabled, then enable intr now.
* otherwise, keep intr disabled, return without enabing intr.
* %g1 - old intr state
*/
btst PSTATE_IE, %g1
bnz,a,pt %icc, 2f
wrpr %g0, %g1, %pstate /* enable interrupts */
2: retl
nop
SET_SIZE(sfmmu_setctx_sec)
/*
* set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS.
* returns the detection value in %o0.
*/
ENTRY_NP(sfmmu_setup_4lp)
set ktsb_phys, %o2
mov 1, %o1
st %o1, [%o2]
retl
mov %o1, %o0
SET_SIZE(sfmmu_setup_4lp)
/*
* Called to load MMU registers and tsbmiss area
* for the active process. This function should
* only be called from TL=0.
*
* %o0 - hat pointer
*/
ENTRY_NP(sfmmu_load_mmustate)
#ifdef DEBUG
PANIC_IF_INTR_ENABLED_PSTR(msfmmu_ei_l1, %g1)
#endif /* DEBUG */
sethi %hi(ksfmmup), %o3
ldx [%o3 + %lo(ksfmmup)], %o3
cmp %o3, %o0
be,pn %xcc, 3f ! if kernel as, do nothing
nop
/*
* We need to set up the TSB base register, tsbmiss
* area, and pass the TSB information into the hypervisor
*/
ldx [%o0 + SFMMU_TSB], %o1 ! %o1 = first tsbinfo
ldx [%o1 + TSBINFO_NEXTPTR], %g2 ! %g2 = second tsbinfo
/* create/set first UTSBREG */
MAKE_UTSBREG(%o1, %o2, %o3) ! %o2 = user tsbreg
SET_UTSBREG(SCRATCHPAD_UTSBREG1, %o2, %o3)
brz,pt %g2, 2f
mov -1, %o2 ! use -1 if no second TSB
/* make 2nd UTSBREG */
MAKE_UTSBREG(%g2, %o2, %o3) ! %o2 = user tsbreg
2:
SET_UTSBREG(SCRATCHPAD_UTSBREG2, %o2, %o3)
#ifdef DEBUG
/* check if hypervisor/hardware should handle user TSB */
sethi %hi(hv_use_non0_tsb), %o2
ld [%o2 + %lo(hv_use_non0_tsb)], %o2
brz,pn %o2, 5f
nop
#endif /* DEBUG */
CPU_ADDR(%o2, %o4) ! load CPU struct addr to %o2 using %o4
ldub [%o2 + CPU_TSTAT_FLAGS], %o1 ! load cpu_tstat_flag to %o1
/*
* %o0 = sfmmup
* %o2 = returned sfmmu cnum on this CPU
* %o4 = scratch
*/
SFMMU_CPU_CNUM(%o0, %o2, %o4)
mov %o5, %o4 ! preserve %o5 for resume
mov %o0, %o3 ! preserve %o0
btst TSTAT_TLB_STATS, %o1
bnz,a,pn %icc, 4f ! ntsb = 0 if TLB stats enabled
clr %o0
cmp %o2, INVALID_CONTEXT
be,a,pn %icc, 4f
clr %o0 ! ntsb = 0 for invalid ctx
ldx [%o3 + SFMMU_HVBLOCK + HV_TSB_INFO_CNT], %o0
4:
ldx [%o3 + SFMMU_HVBLOCK + HV_TSB_INFO_PA], %o1
mov MMU_TSB_CTXNON0, %o5
ta FAST_TRAP ! set TSB info for user process
brnz,a,pn %o0, panic_bad_hcall
mov MMU_TSB_CTXNON0, %o1
mov %o3, %o0 ! restore %o0
mov %o4, %o5 ! restore %o5
5:
ldx [%o0 + SFMMU_ISMBLKPA], %o1 ! copy members of sfmmu
CPU_TSBMISS_AREA(%o2, %o3) ! we need to access from
stx %o1, [%o2 + TSBMISS_ISMBLKPA] ! sfmmu_tsb_miss into the
lduh [%o0 + SFMMU_FLAGS], %o3 ! per-CPU tsbmiss area.
stx %o0, [%o2 + TSBMISS_UHATID]
stuh %o3, [%o2 + TSBMISS_HATFLAGS]
3: retl
nop
SET_SIZE(sfmmu_load_mmustate)
#endif /* lint */
#if defined(lint)
/* Prefetch "struct tsbe" while walking TSBs */
/*ARGSUSED*/
void
prefetch_tsbe_read(struct tsbe *tsbep)
{}
/* Prefetch the tsbe that we are about to write */
/*ARGSUSED*/
void
prefetch_tsbe_write(struct tsbe *tsbep)
{}
#else /* lint */
ENTRY(prefetch_tsbe_read)
retl
nop
SET_SIZE(prefetch_tsbe_read)
ENTRY(prefetch_tsbe_write)
retl
nop
SET_SIZE(prefetch_tsbe_write)
#endif /* lint */