trap_table.s revision 1bd453f385f392a0415fad0b14efc9f5a545320f
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#if !defined(lint)
#include "assym.h"
#endif /* !lint */
#include <sys/asm_linkage.h>
#include <sys/privregs.h>
#include <sys/cheetahregs.h>
#include <sys/machtrap.h>
#include <sys/machthread.h>
#include <sys/psr_compat.h>
#include <sys/machparam.h>
#include <sys/traptrace.h>
#include <vm/hat_sfmmu.h>
#include <sys/archsystm.h>
/*
* WARNING: If you add a fast trap handler which can be invoked by a
* non-privileged user, you may have to use the FAST_TRAP_DONE macro
* instead of "done" instruction to return back to the user mode. See
* comments for the "fast_trap_done" entry point for more information.
*
* An alternate FAST_TRAP_DONE_CHK_INTR macro should be used for the
* cases where you always want to process any pending interrupts before
* returning back to the user mode.
*/
#define FAST_TRAP_DONE \
#define FAST_TRAP_DONE_CHK_INTR \
/*
* SPARC V9 Trap Table
*
* Most of the trap handlers are made from common building
* blocks, and some are instantiated multiple times within
* the trap table. So, I build a bunch of macros, then
* populate the table using only the macros.
*
* Many macros branch to sys_trap. Its calling convention is:
* %g1 kernel trap handler
* %g2, %g3 args for above
* %g4 desire %pil
*/
#ifdef TRAPTRACE
/*
* Tracing macro. Adds two instructions if TRAPTRACE is defined.
*/
#define TT_TRACE_INS 2
#define TT_TRACE_L(label) \
#define TT_TRACE_L_INS 3
#else
#define TT_TRACE_INS 0
#define TT_TRACE_L(label)
#define TT_TRACE_L_INS 0
#endif
/*
* This macro is used to update per cpu mmu stats in perf critical
* paths. It is only enabled in debug kernels or if SFMMU_STAT_GATHER
* is defined.
*/
#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
#define HAT_PERCPU_DBSTAT(stat) \
#else
#define HAT_PERCPU_DBSTAT(stat)
#endif /* DEBUG || SFMMU_STAT_GATHER */
/*
* This first set are funneled to trap() with %tt as the type.
* Trap will then either panic or send the user a signal.
*/
/*
* NOT is used for traps that just shouldn't happen.
* It comes in both single and quadruple flavors.
*/
#if !defined(lint)
#endif /* !lint */
#define NOT \
.align 32
/*
* RED is for traps that use the red mode handler.
* We should never see these either.
*/
/*
* BAD is used for trap vectors we don't have a kernel
* handler for.
* It also comes in single and quadruple versions.
*/
#define DONE \
done; \
.align 32
/*
* TRAP vectors to the trap() function.
* It's main use is for user errors.
*/
#if !defined(lint)
#endif /* !lint */
.align 32
/*
* SYSCALL is used for system calls on both ILP32 and LP64 kernels
* depending on the "which" parameter (should be either syscall_trap
* or syscall_trap32).
*/
.align 32
#define FLUSHW() \
save ;\
flushw ;\
restore ;\
.align 32
/*
* GOTO just jumps to a label.
* It's used for things that can be fixed without going thru sys_trap.
*/
.empty ;\
.align 32
/*
* GOTO_TT just jumps to a label.
* correctable ECC error traps at level 0 and 1 will use this macro.
* It's used for things that can be fixed without going thru sys_trap.
*/
.empty ;\
.align 32
/*
* Privileged traps
* Takes breakpoint if privileged, calls trap() if not.
*/
.align 32
/*
* DTrace traps.
*/
#define DTRACE_FASTTRAP \
.align 32
#define DTRACE_PID \
.align 32
#define DTRACE_RETURN \
.align 32
/*
* REGISTER WINDOW MANAGEMENT MACROS
*/
/*
* various convenient units of padding
*/
/*
* CLEAN_WINDOW is the simple handler for cleaning a register window.
*/
#define CLEAN_WINDOW \
TT_TRACE_L(trace_win) ;\
#if !defined(lint)
/*
* If we get an unresolved tlb miss while in a window handler, the fault
* handler will resume execution at the last instruction of the window
* hander, instead of delivering the fault to the kernel. Spill handlers
* use this to spill windows into the wbuf.
*
* The mixed handler works by checking %sp, and branching to the correct
* handler. This is done by branching back to label 1: for 32b frames,
* or label 2: for 64b frames; which implies the handler order is: 32b,
* 64b, mixed. The 1: and 2: labels are offset into the routines to
* allow the branchs' delay slots to contain useful instructions.
*/
/*
* SPILL_32bit spills a 32-bit-wide kernel register window. It
* assumes that the kernel context and the nucleus context are the
* same. The stack pointer is required to be eight-byte aligned even
* though this code only needs it to be four-byte aligned.
*/
#define SPILL_32bit(tail) \
TT_TRACE_L(trace_win) ;\
saved ;\
retry ;\
/*
* SPILL_32bit_asi spills a 32-bit-wide register window into a 32-bit
* wide address space via the designated asi. It is used to spill
* non-kernel windows. The stack pointer is required to be eight-byte
* aligned even though this code only needs it to be four-byte
* aligned.
*/
TT_TRACE_L(trace_win) ;\
saved ;\
retry ;\
/*
* SPILL_32bit_tt1 spills a 32-bit-wide register window into a 32-bit
* wide address space via the designated asi. It is used to spill
* windows at tl>1 where performance isn't the primary concern and
* where we don't want to use unnecessary registers. The stack
* pointer is required to be eight-byte aligned even though this code
* only needs it to be four-byte aligned.
*/
TT_TRACE_L(trace_win) ;\
saved ;\
retry ;\
/*
* FILL_32bit fills a 32-bit-wide kernel register window. It assumes
* that the kernel context and the nucleus context are the same. The
* stack pointer is required to be eight-byte aligned even though this
* code only needs it to be four-byte aligned.
*/
#define FILL_32bit(tail) \
restored ;\
retry ;\
/*
* FILL_32bit_asi fills a 32-bit-wide register window from a 32-bit
* wide address space via the designated asi. It is used to fill
* non-kernel windows. The stack pointer is required to be eight-byte
* aligned even though this code only needs it to be four-byte
* aligned.
*/
restored ;\
retry ;\
/*
* FILL_32bit_tt1 fills a 32-bit-wide register window from a 32-bit
* wide address space via the designated asi. It is used to fill
* windows at tl>1 where performance isn't the primary concern and
* where we don't want to use unnecessary registers. The stack
* pointer is required to be eight-byte aligned even though this code
* only needs it to be four-byte aligned.
*/
TT_TRACE_L(trace_win) ;\
restored ;\
retry ;\
/*
* SPILL_64bit spills a 64-bit-wide kernel register window. It
* assumes that the kernel context and the nucleus context are the
* same. The stack pointer is required to be eight-byte aligned.
*/
#define SPILL_64bit(tail) \
TT_TRACE_L(trace_win) ;\
saved ;\
retry ;\
/*
* SPILL_64bit_asi spills a 64-bit-wide register window into a 64-bit
* wide address space via the designated asi. It is used to spill
* non-kernel windows. The stack pointer is required to be eight-byte
* aligned.
*/
TT_TRACE_L(trace_win) ;\
saved ;\
retry ;\
/*
* SPILL_64bit_tt1 spills a 64-bit-wide register window into a 64-bit
* wide address space via the designated asi. It is used to spill
* windows at tl>1 where performance isn't the primary concern and
* where we don't want to use unnecessary registers. The stack
* pointer is required to be eight-byte aligned.
*/
TT_TRACE_L(trace_win) ;\
saved ;\
retry ;\
/*
* FILL_64bit fills a 64-bit-wide kernel register window. It assumes
* that the kernel context and the nucleus context are the same. The
* stack pointer is required to be eight-byte aligned.
*/
#define FILL_64bit(tail) \
restored ;\
retry ;\
/*
* FILL_64bit_asi fills a 64-bit-wide register window from a 64-bit
* wide address space via the designated asi. It is used to fill
* non-kernel windows. The stack pointer is required to be eight-byte
* aligned.
*/
restored ;\
retry ;\
/*
* FILL_64bit_tt1 fills a 64-bit-wide register window from a 64-bit
* wide address space via the designated asi. It is used to fill
* windows at tl>1 where performance isn't the primary concern and
* where we don't want to use unnecessary registers. The stack
* pointer is required to be eight-byte aligned.
*/
TT_TRACE_L(trace_win) ;\
restored ;\
retry ;\
#endif /* !lint */
/*
* SPILL_mixed spills either size window, depending on
* whether %sp is even or odd, to a 32-bit address space.
* This may only be used in conjunction with SPILL_32bit/
* SPILL_64bit. New versions of SPILL_mixed_{tt1,asi} would be
* needed for use with SPILL_{32,64}bit_{tt1,asi}. Particular
* attention should be paid to the instructions that belong
* in the delay slots of the branches depending on the type
* of spill handler being branched to.
* Clear upper 32 bits of %sp if it is odd.
* We won't need to clear them in 64 bit kernel.
*/
#define SPILL_mixed \
nop ;\
.align 128
/*
* FILL_mixed(ASI) fills either size window, depending on
* whether %sp is even or odd, from a 32-bit address space.
* This may only be used in conjunction with FILL_32bit/
* FILL_64bit. New versions of FILL_mixed_{tt1,asi} would be
* needed for use with FILL_{32,64}bit_{tt1,asi}. Particular
* attention should be paid to the instructions that belong
* in the delay slots of the branches depending on the type
* of fill handler being branched to.
* Clear upper 32 bits of %sp if it is odd.
* We won't need to clear them in 64 bit kernel.
*/
#define FILL_mixed \
nop ;\
.align 128
/*
* SPILL_32clean/SPILL_64clean spill 32-bit and 64-bit register windows,
* respectively, into the address space via the designated asi. The
* unbiased stack pointer is required to be eight-byte aligned (even for
* the 32-bit case even though this code does not require such strict
* alignment).
*
* With SPARC v9 the spill trap takes precedence over the cleanwin trap
* so when cansave == 0, canrestore == 6, and cleanwin == 6 the next save
* will cause cwp + 2 to be spilled but will not clean cwp + 1. That
* window may contain kernel data so in user_rtt we set wstate to call
* these spill handlers on the first user spill trap. These handler then
* spill the appropriate window but also back up a window and clean the
* window that didn't get a cleanwin trap.
*/
TT_TRACE_L(trace_win) ;\
b .spill_clean ;\
TT_TRACE_L(trace_win) ;\
b .spill_clean ;\
/*
* Floating point disabled.
*/
#define FP_DISABLED_TRAP \
nop ;\
.align 32
/*
* Floating point exceptions.
*/
#define FP_IEEE_TRAP \
nop ;\
.align 32
#define FP_TRAP \
nop ;\
.align 32
#if !defined(lint)
/*
* asynchronous traps at level 0 and level 1
*
* The first instruction must be a membar for UltraSPARC-III
* to stop RED state entry if the store queue has many
* pending bad stores (PRM, Chapter 11).
*/
.align 32
/*
* Defaults to BAD entry, but establishes label to be used for
* architecture-specific overwrite of trap table entry.
*/
#define LABELED_BAD(table_name) \
.global table_name ;\
table_name: ;\
#endif /* !lint */
/*
* illegal instruction trap
*/
#define ILLTRAP_INSTR \
nop ;\
.align 32
/*
* tag overflow trap
*/
#define TAG_OVERFLOW \
nop ;\
.align 32
/*
* divide by zero trap
*/
#define DIV_BY_ZERO \
nop ;\
.align 32
/*
* trap instruction for V9 user trap handlers
*/
#define TRAP_INSTR \
nop ;\
.align 32
/*
* LEVEL_INTERRUPT is for level N interrupts.
* VECTOR_INTERRUPT is for the vector trap.
*/
#define LEVEL_INTERRUPT(level) \
.align 32
#define LEVEL14_INTERRUPT \
.align 32
#define VECTOR_INTERRUPT \
nop ;\
.empty ;\
.align 32
/*
* MMU Trap Handlers.
*/
#define SWITCH_GLOBALS /* mmu->alt, alt->mmu */ \
#define IMMU_EXCEPTION \
.align 32
#define DMMU_EXCEPTION \
.align 32
#define DMMU_EXC_AG_PRIV \
.align 32
#define DMMU_EXC_AG_NOT_ALIGNED \
.align 32
/*
* SPARC V9 IMPL. DEP. #109(1) and (2) and #110(1) and (2)
*/
#define DMMU_EXC_LDDF_NOT_ALIGNED \
.align 32
#define DMMU_EXC_STDF_NOT_ALIGNED \
.align 32
/*
* Flush the TLB using either the primary, secondary, or nucleus flush
* operation based on whether the ctx from the tag access register matches
* the primary or secondary context (flush the nucleus if neither matches).
*
* exits with:
* g2 = tag access register
* g3 = ctx number
*/
#if TAGACC_CTX_MASK != CTXREG_CTX_MASK
#error "TAGACC_CTX_MASK != CTXREG_CTX_MASK"
#endif
#define DTLB_DEMAP_ENTRY \
#if defined(cscope)
/*
* Define labels to direct cscope quickly to labels that
* are generated by macro expansion of DTLB_MISS().
*/
#endif
/*
* Needs to be exactly 32 instructions
*
* UTLB NOTE: If we don't hit on the 8k pointer then we branch
* to a special 4M tsb handler. It would be nice if that handler
* could live in this file but currently it seems better to allow
* it to fall thru to sfmmu_tsb_miss.
*/
#define DTLB_MISS(table_name) ;\
nop ;\
retry /* in %g5 */ ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
.align 128
#if defined(cscope)
/*
* Define labels to direct cscope quickly to labels that
* are generated by macro expansion of ITLB_MISS().
*/
#endif
/*
* Instruction miss handler.
* ldda instructions will have their ASI patched
* by sfmmu_patch_ktsb at runtime.
* MUST be EXACTLY 32 instructions or we'll break.
*/
#define ITLB_MISS(table_name) \
nop ;\
nop ;\
retry ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
.align 128
/*
* This macro is the first level handler for fast protection faults.
* It first demaps the tlb entry which generated the fault and then
* attempts to set the modify bit on the hash. It needs to be
* exactly 32 instructions.
*/
#define DTLB_PROT \
DTLB_DEMAP_ENTRY /* 20 instructions */ ;\
/* ;\
* At this point: ;\
* g1 = ???? ;\
* g2 = tag access register ;\
* g3 = ctx number ;\
* g4 = ???? ;\
*/ ;\
/* clobbers g1 and g6 */ ;\
nop ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
unimp 0 ;\
.align 128
#define DMMU_EXCEPTION_TL1 ;\
nop ;\
.align 32
#define MISALIGN_ADDR_TL1 ;\
nop ;\
.align 32
/*
* Trace a tsb hit
* g2 = tag access register (in)
* g3 - g4 = scratch (clobbered)
* g5 = tsbe data (in)
* g6 = scratch (clobbered)
* g7 = pc we jumped here from (in)
* ttextra = value to OR in to trap type (%tt) (in)
*/
#ifdef TRAPTRACE
#define TRACE_TSBHIT(ttextra) \
GET_TRACE_TICK(%g6) ;\
#else
#define TRACE_TSBHIT(ttextra)
#endif
#if defined(lint)
struct scb trap_table;
#else /* lint */
/*
* =======================================================================
* SPARC V9 TRAP TABLE
*
* The trap table is divided into two halves: the first half is used when
* taking traps when TL=0; the second half is used when taking traps from
* TL>0. Note that handlers in the second half of the table might not be able
* to make the same assumptions as handlers in the first half of the table.
*
* Worst case trap nesting so far:
*
* at TL=0 client issues software trap requesting service
* at TL=1 nucleus wants a register window
* at TL=3 processing TLB miss
* at TL=4 handle asynchronous error
*
* Note that a trap from TL=4 to TL=5 places Spitfire in "RED mode".
*
* =======================================================================
*/
.section ".text"
.align 4
scb:
/* hardware traps */
NOT; /* 000 reserved */
RED; /* 001 power on reset */
RED; /* 002 watchdog reset */
RED; /* 003 externally initiated reset */
RED; /* 004 software initiated reset */
RED; /* 005 red mode exception */
IMMU_EXCEPTION; /* 008 instruction access exception */
NOT; /* 009 instruction access MMU miss */
/* 00A instruction access error */
ILLTRAP_INSTR; /* 010 illegal instruction */
NOT; /* 012 unimplemented LDD */
NOT; /* 013 unimplemented STD */
FP_DISABLED_TRAP; /* 020 fp disabled */
FP_IEEE_TRAP; /* 021 fp exception ieee 754 */
FP_TRAP; /* 022 fp exception other */
TAG_OVERFLOW; /* 023 tag overflow */
CLEAN_WINDOW; /* 024 - 027 clean window */
DIV_BY_ZERO; /* 028 division by zero */
NOT; /* 029 internal processor error */
DMMU_EXCEPTION; /* 030 data access exception */
NOT; /* 031 data access MMU miss */
/* 032 data access error */
NOT; /* 033 data access protection */
DMMU_EXC_AG_NOT_ALIGNED; /* 034 mem address not aligned */
DMMU_EXC_LDDF_NOT_ALIGNED; /* 035 LDDF mem address not aligned */
DMMU_EXC_STDF_NOT_ALIGNED; /* 036 STDF mem address not aligned */
DMMU_EXC_AG_PRIV; /* 037 privileged action */
NOT; /* 038 LDQF mem address not aligned */
NOT; /* 039 STQF mem address not aligned */
NOT; /* 040 async data error */
LEVEL14_INTERRUPT; /* 04E interrupt level 14 */
VECTOR_INTERRUPT; /* 060 interrupt vector */
DTLB_PROT; /* 06C data access protection */
NOT; /* 073 reserved */
NOT4; /* 080 spill 0 normal */
SPILL_mixed; /* 09C spill 7 normal */
NOT4; /* 0A0 spill 0 other */
NOT4; /* 0B4 spill 5 other */
NOT4; /* 0B8 spill 6 other */
NOT4; /* 0BC spill 7 other */
NOT4; /* 0C0 fill 0 normal */
FILL_mixed; /* 0DC fill 7 normal */
NOT4; /* 0E0 fill 0 other */
NOT4; /* 0E4 fill 1 other */
NOT4; /* 0E8 fill 2 other */
NOT4; /* 0EC fill 3 other */
NOT4; /* 0F0 fill 4 other */
NOT4; /* 0F4 fill 5 other */
NOT4; /* 0F8 fill 6 other */
NOT4; /* 0FC fill 7 other */
/* user traps */
FLUSHW(); /* 103 flush windows */
BAD; /* 105 range check ?? */
BAD; /* 107 unused */
BAD; /* 128 ST_SETV9STACK */
DTRACE_PID; /* 138 dtrace pid tracing provider */
DTRACE_FASTTRAP; /* 139 dtrace fasttrap provider */
DTRACE_RETURN; /* 13A dtrace pid return probe */
BAD; /* 141 unused */
#ifdef DEBUG_USER_TRAPTRACECTL
#else
#endif
BAD; /* 170 - unused */
BAD; /* 171 - unused */
#ifdef PTL1_PANIC_DEBUG
/* 17C test ptl1_panic */
#else
BAD; /* 17C unused */
#endif /* PTL1_PANIC_DEBUG */
/* reserved */
/* 00A instruction access error */
NOT4; /* 020 - 023 unused */
CLEAN_WINDOW; /* 024 - 027 clean window */
DMMU_EXCEPTION_TL1; /* 030 data access exception */
NOT; /* 031 unused */
/* 032 data access error */
NOT; /* 033 unused */
MISALIGN_ADDR_TL1; /* 034 mem address not aligned */
NOT; /* 060 unused */
DTLB_PROT; /* 06C data access protection */
NOT; /* 073 reserved */
NOT4; /* 080 spill 0 normal */
SPILL_mixed; /* 09C spill 7 normal */
NOT4; /* 0A0 spill 0 other */
NOT4; /* 0B4 spill 5 other */
NOT4; /* 0B8 spill 6 other */
NOT4; /* 0BC spill 7 other */
NOT4; /* 0C0 fill 0 normal */
FILL_mixed; /* 0DC fill 7 normal */
NOT; /* 103 reserved */
/*
* We only reserve the above four special case soft traps for code running
* at TL>0, so we can truncate the trap table here.
*/
/*
* We get to exec_fault in the case of an instruction miss and tte
* has no execute bit set. We go to tl0 to handle it.
*
* g2 = tag access register (in)
* g3 - g4 = scratch (clobbered)
* g5 = tsbe data (in)
* g6 = scratch (clobbered)
*/
TRACE_TSBHIT(0x200)
1:
2:
1:
2:
3:
#ifdef SF_ERRATA_30 /* call causes fp-disabled */
#endif
#ifdef SF_ERRATA_30 /* call causes fp-disabled */
#else
#endif
1:
2:
1:
/*
* Register Inputs:
* %g5 user trap handler
* %g7 misaligned addr - for alignment traps only
*/
/*
* If the DTrace pid provider is single stepping a copied-out
* instruction, t->t_dtrace_step will be set. In that case we need
* to abort the single-stepping (since execution of the instruction
* was interrupted) and use the value of t->t_dtrace_npc as the %npc.
*/
1:
/* NOTREACHED */
1:
2:
3:
/* NOTREACHED */
/*
* Register Inputs:
* %g5 user trap handler
*/
/*
* If the DTrace pid provider is single stepping a copied-out
* instruction, t->t_dtrace_step will be set. In that case we need
* to abort the single-stepping (since execution of the instruction
* was interrupted) and use the value of t->t_dtrace_npc as the %npc.
*/
1:
/* NOTREACHED */
4:
/* NOTREACHED */
/*
* Cheetah takes unfinished_FPop trap for certain range of operands
* to the "fitos" instruction. Instead of going through the slow
* software emulation path, we try to simulate the "fitos" instruction
* via "fitod" and "fdtos" provided the following conditions are met:
*
* fpu_exists is set (if DEBUG)
* not in privileged mode
* ftt is unfinished_FPop
* NXM IEEE trap is not enabled
* instruction at %tpc is "fitos"
*
* Usage:
* %g1 per cpu address
* %g2 %fsr
* %g6 user instruction
*
* Note that we can take a memory access related trap while trying
* to fetch the user instruction. Therefore, we set CPU_TL1_HDLR
* flag to catch those traps and let the SFMMU code deal with page
* fault and data access exception.
*/
#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
#endif
st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
set FITOS_INSTR_MASK, %g7
and %g6, %g7, %g7
set FITOS_INSTR, %g5
cmp %g7, %g5
bne,pn %xcc, .fp_exception_cont ! branch if not FITOS_INSTR
nop
/*
* This is unfinished FPops trap for "fitos" instruction. We
* need to simulate "fitos" via "fitod" and "fdtos" instruction
* sequence.
*
* We need a temporary FP register to do the conversion. Since
* both source and destination operands for the "fitos" instruction
* have to be within %f0-%f31, we use an FP register from the upper
*
*/
/*
* Now convert data back into single precision
*/
#if DEBUG
/*
* Update FPop_unfinished trap kstat
*/
1:
/*
* Update fpu_sim_fitos kstat
*/
1:
#endif /* DEBUG */
/*
* Let _fp_exception deal with simulating FPop instruction.
* Note that we need to pass %fsr in %g2 (already read above).
*/
/*
* .spill_clean: clean the previous window, restore the wstate, and
* "done".
*
* Entry: %g7 contains new wstate
*/
nop ;\
nop ;\
/*
* Cheetah overwrites SFAR on a DTLB miss, hence read it now.
*/
#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
#endif
srl %g6, 23, %g1 ! using ldda or not?
and %g1, 1, %g1
brz,a,pt %g1, 2f ! check for ldda instruction
nop
srl %g6, 13, %g1 ! check immflag
and %g1, 1, %g1
rdpr %tstate, %g2 ! %tstate in %g2
brnz,a,pn %g1, 1f
srl %g2, 31, %g1 ! get asi from %tstate
srl %g6, 5, %g1 ! get asi from instruction
and %g1, 0xFF, %g1 ! imm_asi field
1:
cmp %g1, ASI_P ! primary address space
be,a,pt %icc, 2f
nop
cmp %g1, ASI_PNF ! primary no fault address space
be,a,pt %icc, 2f
nop
cmp %g1, ASI_S ! secondary address space
be,a,pt %icc, 2f
nop
cmp %g1, ASI_SNF ! secondary no fault address space
bne,a,pn %icc, 3f
nop
2:
lduwa [%g5]ASI_USER, %g7 ! get first half of misaligned data
add %g5, 4, %g5 ! increment misaligned data address
lduwa [%g5]ASI_USER, %g5 ! get second half of misaligned data
sllx %g7, 32, %g7
or %g5, %g7, %g5 ! combine data
CPU_ADDR(%g7, %g1) ! save data on a per-cpu basis
stx %g5, [%g7 + CPU_TMP1] ! save in cpu_tmp1
srl %g6, 25, %g3 ! %g6 has the instruction
and %g3, 0x1F, %g3 ! %g3 has rd
LDDF_REG(%g3, %g7, %g4)
CPU_ADDR(%g1, %g4)
st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
FAST_TRAP_DONE
3:
CPU_ADDR(%g1, %g4)
st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
4:
set T_USER, %g3 ! trap type in %g3
or %g3, T_LDDF_ALIGN, %g3
mov %g5, %g2 ! misaligned vaddr in %g2
set fpu_trap, %g1 ! goto C for the little and
ba,pt %xcc, sys_trap ! no fault little asi's
/*
* Cheetah overwrites SFAR on a DTLB miss, hence read it now.
*/
#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
#endif
srl %g6, 23, %g1 ! using stda or not?
and %g1, 1, %g1
brz,a,pt %g1, 2f ! check for stda instruction
nop
srl %g6, 13, %g1 ! check immflag
and %g1, 1, %g1
rdpr %tstate, %g2 ! %tstate in %g2
brnz,a,pn %g1, 1f
srl %g2, 31, %g1 ! get asi from %tstate
srl %g6, 5, %g1 ! get asi from instruction
and %g1, 0xFF, %g1 ! imm_asi field
1:
cmp %g1, ASI_P ! primary address space
be,a,pt %icc, 2f
nop
cmp %g1, ASI_S ! secondary address space
bne,a,pn %icc, 3f
nop
2:
srl %g6, 25, %g6
and %g6, 0x1F, %g6 ! %g6 has rd
CPU_ADDR(%g7, %g1)
STDF_REG(%g6, %g7, %g4) ! STDF_REG(REG, ADDR, TMP)
ldx [%g7 + CPU_TMP1], %g6
srlx %g6, 32, %g7
stuwa %g7, [%g5]ASI_USER ! first half
add %g5, 4, %g5 ! increment misaligned data address
stuwa %g6, [%g5]ASI_USER ! second half
CPU_ADDR(%g1, %g4)
st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
FAST_TRAP_DONE
3:
CPU_ADDR(%g1, %g4)
st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
4:
set T_USER, %g3 ! trap type in %g3
or %g3, T_STDF_ALIGN, %g3
mov %g5, %g2 ! misaligned vaddr in %g2
set fpu_trap, %g1 ! goto C for the little and
ba,pt %xcc, sys_trap ! nofault little asi's
#ifdef DEBUG_USER_TRAPTRACECTL
#endif /* DEBUG_USER_TRAPTRACECTL */
.getcc:
.setcc:
/*
* getpsr(void)
* Note that the xcc part of the ccr is not provided.
* The V8 code shows why the V9 trap is not faster:
* #define GETPSR_TRAP() \
* mov %psr, %i0; jmp %l2; rett %l2+4; nop;
*/
.getpsr:
/*
* setpsr(newpsr)
* Note that there is no support for ccr.xcc in the V9 code.
*/
.setpsr:
/*
* getlgrp
* get home lgrpid on which the calling thread is currently executing.
*/
.getlgrp:
/*
* Entry for old 4.x trap (trap 0).
*/
1:
!
!
/*
* Handler for software trap 9.
* Set trap0 emulation address for old 4.x system call trap.
* XXX - this should be a system call.
*/
/*
* mmu_trap_tl1
* trap handler for unexpected mmu traps.
* case we go to fpu_trap or a user trap from the window handler, in which
* case we go save the state on the pcb. Otherwise, we go to ptl1_panic.
*/
#ifdef TRAPTRACE
#endif /* TRAPTRACE */
/*
* We are running on a Panther and have hit a DTLB parity error.
*/
/*
* AM is cleared on trap, so addresses are 64 bit
*/
/*
* We are going to update cpu_m.tl1_hdlr using physical address.
* Flush the D$ line, so that stale data won't be accessed later.
*/
3:
2:
1:
/* tpc should be in the trap table */
/*
* Several traps use kmdb_trap and kmdb_trap_tl1 as their handlers. These
* traps are valid only when kmdb is loaded. When the debugger is active,
* the code below is rewritten to transfer control to the appropriate
* debugger entry points.
*/
.align 8
.align 8
/*
* This entry is copied from OBP's trap table during boot.
*/
.align 8
/*
* if kernel, set PCONTEXT to 0 for debuggers
* if user, clear nucleus page sizes
*/
1:
2:
3:
#ifdef TRAPTRACE
/*
* TRAPTRACE support.
* labels here are branched to with "rd %pc, %g7" in the delay slot.
* Return is done by "jmp %g7 + 4".
*/
/*
* Trace a tsb hit
* g2 = tag access register (in)
* g3 - g4 = scratch (clobbered)
* g5 = tsbe data (in)
* g6 = scratch (clobbered)
* g7 = pc we jumped here from (in)
*/
TRACE_TSBHIT(0)
/*
* Trace a TSB miss
*
* g1 = tsb8k pointer (in)
* g2 = tag access register (in)
* g3 = tsb4m pointer (in)
* g5 - g6 = scratch (clobbered)
* g7 = pc we jumped here from (in)
*/
/*
* g2 = tag access register (in)
* g3 = ctx number (in)
*/
#endif /* TRAPTRACE */
/*
* expects offset into tsbmiss area in %g1 and return pc in %g7
*/
/*
* fast_trap_done, fast_trap_done_chk_intr:
*
* Due to the design of UltraSPARC pipeline, pending interrupts are not
* taken immediately after a RETRY or DONE instruction which causes IE to
* go from 0 to 1. Instead, the instruction at %tpc or %tnpc is allowed
* to execute first before taking any interrupts. If that instruction
* results in other traps, and if the corresponding trap handler runs
* entirely at TL=1 with interrupts disabled, then pending interrupts
* won't be taken until after yet another instruction following the %tpc
* or %tnpc.
*
* A malicious user program can use this feature to block out interrupts
* for extended durations, which can result in send_mondo_timeout kernel
* panic.
*
* This problem is addressed by servicing any pending interrupts via
* sys_trap before returning back to the user mode from a fast trap
* handler. The "done" instruction within a fast trap handler, which
* runs entirely at TL=1 with interrupts disabled, is replaced with the
* FAST_TRAP_DONE macro, which branches control to this fast_trap_done
* entry point.
*
* We check for any pending interrupts here and force a sys_trap to
* service those interrupts, if any. To minimize overhead, pending
* interrupts are checked if the %tpc happens to be at 16K boundary,
* which allows a malicious program to execute at most 4K consecutive
* instructions before we service any pending interrupts. If a worst
* case fast trap handler takes about 2 usec, then interrupts will be
* blocked for at most 8 msec, less than a clock tick.
*
* For the cases where we don't know if the %tpc will cross a 16K
* boundary, we can't use the above optimization and always process
* any pending interrupts via fast_frap_done_chk_intr entry point.
*
* Entry Conditions:
* %pstate am:0 priv:1 ie:0
* globals are AG (not normal globals)
*/
andncc %g5, %g6, %g0 ! check lower 14 bits of %tpc
bz,a,pn %icc, 1f ! branch if zero (lower 32 bits only)
ldxa [%g0]ASI_INTR_RECEIVE_STATUS, %g5
done
fast_trap_done_chk_intr:
ldxa [%g0]ASI_INTR_RECEIVE_STATUS, %g5
1: rd SOFTINT, %g6
and %g5, IRSR_BUSY, %g5
orcc %g5, %g6, %g0
bnz,pn %xcc, 2f ! branch if any pending intr
nop
done
2:
/*
* We get here if there are any pending interrupts.
* instruction.
*/
/*
* Force a dummy sys_trap call so that interrupts can be serviced.
*/
#endif /* lint */