mach_sfmmu.h revision 9d0d62ad2e60e8f742a2e723d06e88352ee6a1f3
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* VM - Hardware Address Translation management.
*
* This file describes the contents of the sun reference mmu (sfmmu)
* specific hat data structures and the sfmmu specific hat procedures.
*/
#ifndef _VM_MACH_SFMMU_H
#define _VM_MACH_SFMMU_H
#include <sys/cheetahregs.h>
#include <sys/spitregs.h>
#include <sys/opl_olympus_regs.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* On sun4u platforms, user TSBs are accessed via virtual address by default.
* Platforms that support ASI_SCRATCHPAD registers can define UTSB_PHYS in the
* platform Makefile to access user TSBs via physical address but must also
* designate one ASI_SCRATCHPAD register to hold the second user TSB. To
* designate the user TSB scratchpad register, platforms must provide a
* definition for SCRATCHPAD_UTSBREG2 below.
*
* Platforms that use UTSB_PHYS do not allocate 2 locked TLB entries to access
* the user TSBs.
*/
#if defined(UTSB_PHYS)
#if defined(_OPL)
#else
#error "Compiling UTSB_PHYS but no SCRATCHPAD_UTSBREG2 specified"
#endif /* _OPL */
#endif /* UTSB_PHYS */
#ifdef _ASM
/*
* sfmmu_alloc_ctx().
* if is_shctx = 0 then we set the SCONTEXT to cnum and invalidate the
* SHARED_CONTEXT register. If is_shctx = 1 then only the SHARED_CONTEXT
* register is set.
* (See additional comments in sfmmu_alloc_ctx)
* Input:
* cnum = cnum
* tmp1 : %o4 scratch
* tmp2 : %o5 scratch
* label: used as local branch targets
*/
/* BEGIN CSTYLED */ \
/* END CSTYLED */
/*
* This macro is used in the MMU code to check if TL should be lowered from
* 2 to 1 to pop trapstat's state. See the block comment in trapstat.c
* for details.
*/
nop; \
9:
/*
* The following macros allow us to share majority of the
* SFMMU code between sun4u and sun4v platforms.
*/
/*
* Macro to swtich to alternate global register on sun4u platforms
* (not applicable to sun4v platforms)
*/
#define USE_ALTERNATE_GLOBALS(scr) \
/*
* Macro to set %gl register value on sun4v platforms
* (not applicable to sun4u platforms)
*/
#define SET_GL_REG(val)
/*
* Get MMU data tag access register value
*
* In:
* tagacc, scr1 = scratch registers
* Out:
* tagacc = MMU data tag access register value
*/
/*
* Get MMU data tag target register
*
* In:
* ttarget, scr1 = scratch registers
* Out:
* ttarget = MMU data tag target register value
*/
/*
* Get MMU data/instruction tag access register values
*
* In:
* dtagacc, itagacc, scr1, scr2 = scratch registers
* Out:
* dtagacc = MMU data tag access register value
* itagacc = MMU instruction tag access register value
*/
/*
* Get MMU data fault address from the tag access register
*
* In:
* daddr, scr1 = scratch registers
* Out:
* daddr = MMU data fault address
*/
/*
* Load ITLB entry
*
* In:
* tte = reg containing tte
* scr1, scr2, scr3, scr4 = scratch registers (not used)
*/
/*
* Load DTLB entry
*
* In:
* tte = reg containing tte
* scr1, scr2, scr3, scr4 = scratch register (not used)
*/
/*
* Returns PFN given the TTE and vaddr
*
* In:
* tte = reg containing tte
* vaddr = reg containing vaddr
* scr1, scr2, scr3 = scratch registers
* Out:
* tte = PFN value
*/
/* BEGIN CSTYLED */ \
/* END CSTYLED */ \
/* CSTYLED */ \
/*
* TTE_SET_REF_ML is a macro that updates the reference bit if it is
* not already set. Older sun4u platform use the virtual address to
* flush entries from dcache, this is not available here but there are
* only two positions in the 64K dcache where the cache line can reside
* so we need to flush both of them.
*
* Parameters:
* tte = reg containing tte
* ttepa = physical pointer to tte
* tsbarea = tsb miss area
* tmp1 = tmp reg
* tmp2 = tmp reg
* label = temporary label
*/
/* BEGIN CSTYLED */ \
/* check reference bit */ \
nop; \
GET_CPU_IMPL(tmp1); \
/* update reference bit */ \
/* END CSTYLED */
/*
* TTE_SET_REFMOD_ML is a macro that updates the reference and modify bits
* if not already set.
*
* Parameters:
* tte = reg containing tte
* ttepa = physical pointer to tte
* tsbarea = tsb miss area
* tmp1 = tmp reg
* tmp2 = tmp reg
* label = temporary label
* exitlabel = label where to jump to if write perm bit not set.
*/
exitlabel) \
/* BEGIN CSTYLED */ \
/* check reference bit */ \
nop; \
nop; \
GET_CPU_IMPL(tmp1); \
/* update reference bit */ \
/* END CSTYLED */
#ifndef UTSB_PHYS
/*
* Synthesize TSB base register contents for a process with
* a single TSB.
*
* We patch the virtual address mask in at runtime since the
* number of significant virtual address bits in the TSB VA
* can vary depending upon the TSB slab size being used on the
* machine.
*
* In:
* tsbinfo = TSB info pointer (ro)
* vabase = value of utsb_vabase (ro)
* Out:
* tsbreg = value to program into TSB base register
*/
/* BEGIN CSTYLED */ \
/* END CSTYLED */
/*
* Synthesize TSB base register contents for a process with
* two TSBs. See hat_sfmmu.h for the layout of the TSB base
* register in this case.
*
* In:
* tsb1 = pointer to first TSB info (ro)
* tsb2 = pointer to second TSB info (ro)
* Out:
* tsbreg = value to program into TSB base register
*/
/* BEGIN CSTYLED */ \
/* END CSTYLED */
/*
* Load the locked TSB TLB entry.
*
* In:
* tsbinfo = tsb_info pointer as va (ro)
* tteidx = shifted index into TLB to load the locked entry (ro)
* va = virtual address at which to load the locked TSB entry (ro)
* Out:
* Scratch:
* tmp
*/
/*
* In the current implementation, TSBs usually come from physically
* contiguous chunks of memory up to 4MB in size, but 8K TSBs may be
* allocated from 8K chunks of memory under certain conditions. To
* prevent aliasing in the virtual address cache when the TSB slab is
* 8K in size we must align the reserved (TL>0) TSB virtual address to
* have the same low-order bits as the kernel (TL=0) TSB virtual address,
* and map 8K TSBs with an 8K TTE. In cases where the TSB reserved VA
* range is smaller than the assumed 4M we will patch the shift at
* runtime; otherwise we leave it alone (which is why RUNTIME_PATCH
* constant doesn't appear below).
*
* In:
* tsbinfo (ro)
* resva: reserved VA base for this TSB
* Out:
* resva: corrected VA for this TSB
*/
/* BEGIN CSTYLED */ \
nop ;\
/* END CSTYLED */
/*
* Determine the pointer of the entry in the first TSB to probe given
* the 8K TSB pointer register contents.
*
* In:
* tsbp8k = 8K TSB pointer register (ro)
* tmp = scratch register
* label = label for hot patching of utsb_vabase
*
* Out: tsbe_ptr = TSB entry address
*
* Note: This function is patched at runtime for performance reasons.
* Any changes here require sfmmu_patch_utsb fixed.
*/
/* BEGIN CSTYLED */ \
/* tsbeptr = contents of utsb_vabase */ ;\
/* clear upper bits leaving just bits 21:0 of TSB ptr. */ ;\
/* finish clear */ ;\
/* or-in bits 41:22 of the VA to form the real pointer. */ ;\
/* END CSTYLED */
/*
* Determine the base address of the second TSB given the 8K TSB
* pointer register contents.
*
* In:
* tsbp8k = 8K TSB pointer register (ro)
* tmp = scratch register
* label = label for hot patching of utsb_vabase
*
* Out:
* tsbbase = TSB base address
*
* Note: This function is patched at runtime for performance reasons.
* Any changes here require sfmmu_patch_utsb fixed.
*/
/* BEGIN CSTYLED */ \
/* tsbbase = contents of utsb4m_vabase */ ;\
/* clear upper bits leaving just bits 21:xx of TSB addr. */ ;\
/* clear lower bits leaving just 21:13 in 8:0 */ ;\
/* adjust TSB offset to bits 21:13 */ ;\
/* END CSTYLED */
/*
* Determine the size code of the second TSB given the 8K TSB
* pointer register contents.
*
* In:
* tsbp8k = 8K TSB pointer register (ro)
* Out:
* size = TSB size code
*/
/*
* Get the location in the 2nd TSB of the tsbe for this fault.
* Assumes that the second TSB only contains 4M mappings.
*
* In:
* tagacc = tag access register (clobbered)
* tsbp8k = contents of TSB8K pointer register (ro)
* tmp1, tmp2 = scratch registers
* label = label at which to patch in reserved TSB 4M VA range
* Out:
* tsbe_ptr = pointer to the tsbe in the 2nd TSB
*/
/* tsbe_ptr = TSB base address, tmp2 = junk */ \
/* tmp1 = TSB size code */ \
#else /* !UTSB_PHYS */
/*
* Determine the pointer of the entry in the first TSB to probe given
* the 8K TSB pointer register contents.
*
* In:
* tagacc = tag access register
* tsbe_ptr = 8K TSB pointer register
* tmp = scratch registers
*
* Out: tsbe_ptr = TSB entry address
*
* Note: This macro is a nop since the 8K TSB pointer register
* is the entry pointer and does not need to be decoded.
* It is defined to allow for code sharing with sun4v.
*/
#endif /* !UTSB_PHYS */
/*
* Load TSB base register. In the single TSB case this register
* contains utsb_vabase, bits 21:13 of tsbinfo->tsb_va, and the
* TSB size code in bits 2:0. See hat_sfmmu.h for the layout in
* the case where we have multiple TSBs per process.
*
* In:
* tsbreg = value to load (ro)
*/
#ifdef UTSB_PHYS
#define UTSB_PROBE_ASI ASI_QUAD_LDD_PHYS
#else
#define UTSB_PROBE_ASI ASI_NQUAD_LD
#endif
/* BEGIN CSTYLED */ \
nop \
/* END CSTYLED */
/*
* Probe a TSB. If miss continue from the end of the macro for most probes
* except jump to TSB miss for 3rd ITSB probe. If hit retry faulted
* instruction for DTSB probes. For ITSB probes in case of TSB hit check
* execute bit and branch to exec_fault if the bit is not set otherwise retry
* faulted instruction. Do ITLB synthesis in case of hit in second ITSB if
* synthesis bit is set.
*
* tsbe_ptr = precomputed TSB entry pointer (in, ro)
* vpg_4m = 4M virtual page number for tag matching (in, ro)
* label = where to branch to if this is a miss (text)
*
* For trapstat, we have to explicily use these registers.
* g4 = location tag will be retrieved into from TSB (out)
* g5 = location data(tte) will be retrieved into from TSB (out)
*
* In case of first tsb probe tsbe_ptr is %g1. For other tsb probes
* move tsbe_ptr into %g1 in case of hit for traptrace.
*
* If the probe fails and we continue from call site %g4-%g5 are clobbered.
* 2nd ITSB probe macro will also clobber %g6 in this case.
*/
/* BEGIN CSTYLED */ \
TT_TRACE(trace_tsbhit) ;\
retry /* retry faulted instruction */ ;\
/* END CSTYLED */
/* BEGIN CSTYLED */ \
TT_TRACE(trace_tsbhit) ;\
retry /* retry faulted instruction */ ;\
/* END CSTYLED */
/* BEGIN CSTYLED */ \
nop ;\
TT_TRACE(trace_tsbhit) ;\
retry /* retry faulted instruction */ ;\
/* END CSTYLED */
/* BEGIN CSTYLED */ \
TT_TRACE(trace_tsbhit) ;\
retry /* retry faulted instruction */ ;\
TT_TRACE(trace_tsbhit) ;\
retry /* retry faulted instruction */ ;\
/* END CSTYLED */
#ifdef UTSB_PHYS
/*
* Updates the context filed in the tagaccess register with the shared
* context to force the next i/DTLB_STUFF() to load this mapping into
* the TLB with the shared context.
*/
/* BEGIN CSTYLED */ \
/* END CSTYLED */
/* BEGIN CSTYLED */ \
TT_TRACE(trace_tsbhit) ;\
retry /* retry faulted instruction */ ;\
/* END CSTYLED */
/* BEGIN CSTYLED */ \
/* END CSTYLED */
/* BEGIN CSTYLED */ \
/* END CSTYLED */
/* BEGIN CSTYLED */ \
TT_TRACE(trace_tsbhit) ;\
retry /* retry faulted instruction */ ;\
/* END CSTYLED */
/* BEGIN CSTYLED */ \
/* END CSTYLED */
/* BEGIN CSTYLED */ \
/* END CSTYLED */
/*
* The traptype is supplied by caller.
*
* If iTSB miss, store shctx into IMMU TAG ACCESS REG
* If dTSB miss, store shctx into DMMU TAG ACCESS REG
* Thus the [D|I]TLB_STUFF will work as expected.
*/
/* BEGIN CSTYLED */ \
nop ;\
/* END CSTYLED */
#endif /* UTSB_PHYS */
#endif /* _ASM */
#ifdef __cplusplus
}
#endif
#endif /* _VM_MACH_SFMMU_H */