trap_table.s revision 7c478bd95313f5f23a4c958a745db2134aa03244
2N/A/*
2N/A * CDDL HEADER START
2N/A *
2N/A * The contents of this file are subject to the terms of the
2N/A * Common Development and Distribution License, Version 1.0 only
2N/A * (the "License"). You may not use this file except in compliance
2N/A * with the License.
2N/A *
2N/A * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
2N/A * or http://www.opensolaris.org/os/licensing.
2N/A * See the License for the specific language governing permissions
2N/A * and limitations under the License.
2N/A *
2N/A * When distributing Covered Code, include this CDDL HEADER in each
2N/A * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
2N/A * If applicable, add the following below this CDDL HEADER, with the
2N/A * fields enclosed by brackets "[]" replaced with your own identifying
2N/A * information: Portions Copyright [yyyy] [name of copyright owner]
2N/A *
2N/A * CDDL HEADER END
2N/A */
2N/A/*
2N/A * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
2N/A * Use is subject to license terms.
2N/A */
2N/A
2N/A#pragma ident "%Z%%M% %I% %E% SMI"
2N/A
2N/A#if !defined(lint)
2N/A#include "assym.h"
2N/A#endif /* !lint */
2N/A#include <sys/asm_linkage.h>
2N/A#include <sys/privregs.h>
2N/A#include <sys/sun4asi.h>
2N/A#include <sys/cheetahregs.h>
2N/A#include <sys/machtrap.h>
2N/A#include <sys/machthread.h>
2N/A#include <sys/pcb.h>
2N/A#include <sys/pte.h>
2N/A#include <sys/mmu.h>
2N/A#include <sys/machpcb.h>
2N/A#include <sys/async.h>
2N/A#include <sys/intreg.h>
2N/A#include <sys/scb.h>
2N/A#include <sys/psr_compat.h>
2N/A#include <sys/syscall.h>
2N/A#include <sys/machparam.h>
2N/A#include <sys/traptrace.h>
2N/A#include <vm/hat_sfmmu.h>
2N/A#include <sys/archsystm.h>
2N/A#include <sys/utrap.h>
2N/A#include <sys/clock.h>
2N/A#include <sys/intr.h>
2N/A#include <sys/fpu/fpu_simulator.h>
2N/A#include <vm/seg_spt.h>
2N/A
2N/A/*
2N/A * WARNING: If you add a fast trap handler which can be invoked by a
2N/A * non-privileged user, you may have to use the FAST_TRAP_DONE macro
2N/A * instead of "done" instruction to return back to the user mode. See
2N/A * comments for the "fast_trap_done" entry point for more information.
2N/A *
2N/A * An alternate FAST_TRAP_DONE_CHK_INTR macro should be used for the
2N/A * cases where you always want to process any pending interrupts before
2N/A * returning back to the user mode.
2N/A */
2N/A#define FAST_TRAP_DONE \
2N/A ba,a fast_trap_done
2N/A
2N/A#define FAST_TRAP_DONE_CHK_INTR \
2N/A ba,a fast_trap_done_chk_intr
2N/A
2N/A/*
2N/A * SPARC V9 Trap Table
2N/A *
2N/A * Most of the trap handlers are made from common building
2N/A * blocks, and some are instantiated multiple times within
2N/A * the trap table. So, I build a bunch of macros, then
2N/A * populate the table using only the macros.
2N/A *
2N/A * Many macros branch to sys_trap. Its calling convention is:
2N/A * %g1 kernel trap handler
2N/A * %g2, %g3 args for above
2N/A * %g4 desire %pil
2N/A */
2N/A
2N/A#ifdef TRAPTRACE
2N/A
2N/A/*
2N/A * Tracing macro. Adds two instructions if TRAPTRACE is defined.
2N/A */
2N/A#define TT_TRACE(label) \
2N/A ba label ;\
2N/A rd %pc, %g7
2N/A#define TT_TRACE_INS 2
2N/A
2N/A#define TT_TRACE_L(label) \
2N/A ba label ;\
2N/A rd %pc, %l4 ;\
2N/A clr %l4
2N/A#define TT_TRACE_L_INS 3
2N/A
2N/A#else
2N/A
2N/A#define TT_TRACE(label)
2N/A#define TT_TRACE_INS 0
2N/A
2N/A#define TT_TRACE_L(label)
2N/A#define TT_TRACE_L_INS 0
2N/A
2N/A#endif
2N/A
2N/A/*
2N/A * This macro is used to update per cpu mmu stats in perf critical
2N/A * paths. It is only enabled in debug kernels or if SFMMU_STAT_GATHER
2N/A * is defined.
2N/A */
2N/A#if defined(DEBUG) || defined(SFMMU_STAT_GATHER)
2N/A#define HAT_PERCPU_DBSTAT(stat) \
2N/A mov stat, %g1 ;\
2N/A ba stat_mmu ;\
2N/A rd %pc, %g7
2N/A#else
2N/A#define HAT_PERCPU_DBSTAT(stat)
2N/A#endif /* DEBUG || SFMMU_STAT_GATHER */
2N/A
2N/A/*
2N/A * This first set are funneled to trap() with %tt as the type.
2N/A * Trap will then either panic or send the user a signal.
2N/A */
2N/A/*
2N/A * NOT is used for traps that just shouldn't happen.
2N/A * It comes in both single and quadruple flavors.
2N/A */
2N/A#if !defined(lint)
2N/A .global trap
2N/A#endif /* !lint */
2N/A#define NOT \
2N/A TT_TRACE(trace_gen) ;\
2N/A set trap, %g1 ;\
2N/A rdpr %tt, %g3 ;\
2N/A ba,pt %xcc, sys_trap ;\
2N/A sub %g0, 1, %g4 ;\
2N/A .align 32
2N/A#define NOT4 NOT; NOT; NOT; NOT
2N/A/*
2N/A * RED is for traps that use the red mode handler.
2N/A * We should never see these either.
2N/A */
2N/A#define RED NOT
2N/A/*
2N/A * BAD is used for trap vectors we don't have a kernel
2N/A * handler for.
2N/A * It also comes in single and quadruple versions.
2N/A */
2N/A#define BAD NOT
2N/A#define BAD4 NOT4
2N/A
2N/A#define DONE \
2N/A done; \
2N/A .align 32
2N/A
2N/A/*
2N/A * TRAP vectors to the trap() function.
2N/A * It's main use is for user errors.
2N/A */
2N/A#if !defined(lint)
2N/A .global trap
2N/A#endif /* !lint */
2N/A#define TRAP(arg) \
2N/A TT_TRACE(trace_gen) ;\
2N/A set trap, %g1 ;\
2N/A mov arg, %g3 ;\
2N/A ba,pt %xcc, sys_trap ;\
2N/A sub %g0, 1, %g4 ;\
2N/A .align 32
2N/A
2N/A/*
2N/A * SYSCALL is used for system calls on both ILP32 and LP64 kernels
2N/A * depending on the "which" parameter (should be either syscall_trap
2N/A * or syscall_trap32).
2N/A */
2N/A#define SYSCALL(which) \
2N/A TT_TRACE(trace_gen) ;\
2N/A set (which), %g1 ;\
2N/A ba,pt %xcc, sys_trap ;\
2N/A sub %g0, 1, %g4 ;\
2N/A .align 32
2N/A
2N/A#define FLUSHW() \
2N/A set trap, %g1 ;\
2N/A mov T_FLUSHW, %g3 ;\
2N/A sub %g0, 1, %g4 ;\
2N/A save ;\
2N/A flushw ;\
2N/A restore ;\
2N/A FAST_TRAP_DONE ;\
2N/A .align 32
2N/A
2N/A/*
2N/A * GOTO just jumps to a label.
2N/A * It's used for things that can be fixed without going thru sys_trap.
2N/A */
2N/A#define GOTO(label) \
2N/A .global label ;\
2N/A ba,a label ;\
2N/A .empty ;\
2N/A .align 32
2N/A
2N/A/*
2N/A * GOTO_TT just jumps to a label.
2N/A * correctable ECC error traps at level 0 and 1 will use this macro.
2N/A * It's used for things that can be fixed without going thru sys_trap.
2N/A */
2N/A#define GOTO_TT(label, ttlabel) \
2N/A .global label ;\
2N/A TT_TRACE(ttlabel) ;\
2N/A ba,a label ;\
2N/A .empty ;\
2N/A .align 32
2N/A
2N/A/*
2N/A * Privileged traps
2N/A * Takes breakpoint if privileged, calls trap() if not.
2N/A */
2N/A#define PRIV(label) \
2N/A rdpr %tstate, %g1 ;\
2N/A btst TSTATE_PRIV, %g1 ;\
2N/A bnz label ;\
2N/A rdpr %tt, %g3 ;\
2N/A set trap, %g1 ;\
2N/A ba,pt %xcc, sys_trap ;\
2N/A sub %g0, 1, %g4 ;\
2N/A .align 32
2N/A
2N/A
2N/A/*
2N/A * DTrace traps.
2N/A */
2N/A#define DTRACE_FASTTRAP \
2N/A .global dtrace_fasttrap_probe ;\
2N/A .global dtrace_fasttrap_probe_ptr ;\
2N/A sethi %hi(dtrace_fasttrap_probe_ptr), %g4 ;\
2N/A ldn [%g4 + %lo(dtrace_fasttrap_probe_ptr)], %g4 ;\
2N/A set dtrace_fasttrap_probe, %g1 ;\
2N/A brnz,pn %g4, user_trap ;\
2N/A sub %g0, 1, %g4 ;\
2N/A FAST_TRAP_DONE ;\
2N/A .align 32
2N/A
2N/A#define DTRACE_PID \
2N/A .global dtrace_pid_probe ;\
2N/A set dtrace_pid_probe, %g1 ;\
2N/A ba,pt %xcc, user_trap ;\
2N/A sub %g0, 1, %g4 ;\
2N/A .align 32
2N/A
2N/A#define DTRACE_RETURN \
2N/A .global dtrace_return_probe ;\
2N/A set dtrace_return_probe, %g1 ;\
2N/A ba,pt %xcc, user_trap ;\
2N/A sub %g0, 1, %g4 ;\
2N/A .align 32
2N/A
2N/A/*
2N/A * REGISTER WINDOW MANAGEMENT MACROS
2N/A */
2N/A
2N/A/*
2N/A * various convenient units of padding
2N/A */
2N/A#define SKIP(n) .skip 4*(n)
2N/A
2N/A/*
2N/A * CLEAN_WINDOW is the simple handler for cleaning a register window.
2N/A */
2N/A#define CLEAN_WINDOW \
2N/A TT_TRACE_L(trace_win) ;\
2N/A rdpr %cleanwin, %l0; inc %l0; wrpr %l0, %cleanwin ;\
2N/A clr %l0; clr %l1; clr %l2; clr %l3 ;\
2N/A clr %l4; clr %l5; clr %l6; clr %l7 ;\
2N/A clr %o0; clr %o1; clr %o2; clr %o3 ;\
2N/A clr %o4; clr %o5; clr %o6; clr %o7 ;\
2N/A retry; .align 128
2N/A
2N/A#if !defined(lint)
2N/A
2N/A/*
2N/A * If we get an unresolved tlb miss while in a window handler, the fault
2N/A * handler will resume execution at the last instruction of the window
2N/A * hander, instead of delivering the fault to the kernel. Spill handlers
2N/A * use this to spill windows into the wbuf.
2N/A *
2N/A * The mixed handler works by checking %sp, and branching to the correct
2N/A * handler. This is done by branching back to label 1: for 32b frames,
2N/A * or label 2: for 64b frames; which implies the handler order is: 32b,
2N/A * 64b, mixed. The 1: and 2: labels are offset into the routines to
2N/A * allow the branchs' delay slots to contain useful instructions.
2N/A */
2N/A
2N/A/*
2N/A * SPILL_32bit spills a 32-bit-wide kernel register window. It
2N/A * assumes that the kernel context and the nucleus context are the
2N/A * same. The stack pointer is required to be eight-byte aligned even
2N/A * though this code only needs it to be four-byte aligned.
2N/A */
2N/A#define SPILL_32bit(tail) \
2N/A srl %sp, 0, %sp ;\
2N/A1: st %l0, [%sp + 0] ;\
2N/A st %l1, [%sp + 4] ;\
2N/A st %l2, [%sp + 8] ;\
2N/A st %l3, [%sp + 12] ;\
2N/A st %l4, [%sp + 16] ;\
2N/A st %l5, [%sp + 20] ;\
2N/A st %l6, [%sp + 24] ;\
2N/A st %l7, [%sp + 28] ;\
2N/A st %i0, [%sp + 32] ;\
2N/A st %i1, [%sp + 36] ;\
2N/A st %i2, [%sp + 40] ;\
2N/A st %i3, [%sp + 44] ;\
2N/A st %i4, [%sp + 48] ;\
2N/A st %i5, [%sp + 52] ;\
2N/A st %i6, [%sp + 56] ;\
2N/A st %i7, [%sp + 60] ;\
2N/A TT_TRACE_L(trace_win) ;\
2N/A saved ;\
2N/A retry ;\
2N/A SKIP(31-19-TT_TRACE_L_INS) ;\
2N/A ba,a,pt %xcc, fault_32bit_/**/tail ;\
2N/A .empty
2N/A
2N/A/*
2N/A * SPILL_32bit_asi spills a 32-bit-wide register window into a 32-bit
2N/A * wide address space via the designated asi. It is used to spill
2N/A * non-kernel windows. The stack pointer is required to be eight-byte
2N/A * aligned even though this code only needs it to be four-byte
2N/A * aligned.
2N/A */
2N/A#define SPILL_32bit_asi(asi_num, tail) \
2N/A srl %sp, 0, %sp ;\
2N/A1: sta %l0, [%sp + %g0]asi_num ;\
2N/A mov 4, %g1 ;\
2N/A sta %l1, [%sp + %g1]asi_num ;\
2N/A mov 8, %g2 ;\
2N/A sta %l2, [%sp + %g2]asi_num ;\
2N/A mov 12, %g3 ;\
2N/A sta %l3, [%sp + %g3]asi_num ;\
2N/A add %sp, 16, %g4 ;\
2N/A sta %l4, [%g4 + %g0]asi_num ;\
2N/A sta %l5, [%g4 + %g1]asi_num ;\
2N/A sta %l6, [%g4 + %g2]asi_num ;\
2N/A sta %l7, [%g4 + %g3]asi_num ;\
2N/A add %g4, 16, %g4 ;\
2N/A sta %i0, [%g4 + %g0]asi_num ;\
2N/A sta %i1, [%g4 + %g1]asi_num ;\
2N/A sta %i2, [%g4 + %g2]asi_num ;\
2N/A sta %i3, [%g4 + %g3]asi_num ;\
2N/A add %g4, 16, %g4 ;\
2N/A sta %i4, [%g4 + %g0]asi_num ;\
2N/A sta %i5, [%g4 + %g1]asi_num ;\
2N/A sta %i6, [%g4 + %g2]asi_num ;\
2N/A sta %i7, [%g4 + %g3]asi_num ;\
2N/A TT_TRACE_L(trace_win) ;\
2N/A saved ;\
2N/A retry ;\
2N/A SKIP(31-25-TT_TRACE_L_INS) ;\
2N/A ba,a,pt %xcc, fault_32bit_/**/tail ;\
2N/A .empty
2N/A
2N/A/*
2N/A * SPILL_32bit_tt1 spills a 32-bit-wide register window into a 32-bit
2N/A * wide address space via the designated asi. It is used to spill
2N/A * windows at tl>1 where performance isn't the primary concern and
2N/A * where we don't want to use unnecessary registers. The stack
2N/A * pointer is required to be eight-byte aligned even though this code
2N/A * only needs it to be four-byte aligned.
2N/A */
2N/A#define SPILL_32bit_tt1(asi_num, tail) \
2N/A mov asi_num, %asi ;\
2N/A1: srl %sp, 0, %sp ;\
2N/A sta %l0, [%sp + 0]%asi ;\
2N/A sta %l1, [%sp + 4]%asi ;\
2N/A sta %l2, [%sp + 8]%asi ;\
2N/A sta %l3, [%sp + 12]%asi ;\
2N/A sta %l4, [%sp + 16]%asi ;\
2N/A sta %l5, [%sp + 20]%asi ;\
2N/A sta %l6, [%sp + 24]%asi ;\
2N/A sta %l7, [%sp + 28]%asi ;\
2N/A sta %i0, [%sp + 32]%asi ;\
2N/A sta %i1, [%sp + 36]%asi ;\
2N/A sta %i2, [%sp + 40]%asi ;\
2N/A sta %i3, [%sp + 44]%asi ;\
2N/A sta %i4, [%sp + 48]%asi ;\
2N/A sta %i5, [%sp + 52]%asi ;\
2N/A sta %i6, [%sp + 56]%asi ;\
2N/A sta %i7, [%sp + 60]%asi ;\
2N/A TT_TRACE_L(trace_win) ;\
2N/A saved ;\
2N/A retry ;\
2N/A SKIP(31-20-TT_TRACE_L_INS) ;\
2N/A ba,a,pt %xcc, fault_32bit_/**/tail ;\
2N/A .empty
2N/A
2N/A
2N/A/*
2N/A * FILL_32bit fills a 32-bit-wide kernel register window. It assumes
2N/A * that the kernel context and the nucleus context are the same. The
2N/A * stack pointer is required to be eight-byte aligned even though this
2N/A * code only needs it to be four-byte aligned.
2N/A */
2N/A#define FILL_32bit(tail) \
2N/A srl %sp, 0, %sp ;\
2N/A1: TT_TRACE_L(trace_win) ;\
2N/A ld [%sp + 0], %l0 ;\
2N/A ld [%sp + 4], %l1 ;\
2N/A ld [%sp + 8], %l2 ;\
2N/A ld [%sp + 12], %l3 ;\
2N/A ld [%sp + 16], %l4 ;\
2N/A ld [%sp + 20], %l5 ;\
2N/A ld [%sp + 24], %l6 ;\
2N/A ld [%sp + 28], %l7 ;\
2N/A ld [%sp + 32], %i0 ;\
2N/A ld [%sp + 36], %i1 ;\
2N/A ld [%sp + 40], %i2 ;\
2N/A ld [%sp + 44], %i3 ;\
2N/A ld [%sp + 48], %i4 ;\
2N/A ld [%sp + 52], %i5 ;\
2N/A ld [%sp + 56], %i6 ;\
2N/A ld [%sp + 60], %i7 ;\
2N/A restored ;\
2N/A retry ;\
2N/A SKIP(31-19-TT_TRACE_L_INS) ;\
2N/A ba,a,pt %xcc, fault_32bit_/**/tail ;\
2N/A .empty
2N/A
2N/A/*
2N/A * FILL_32bit_asi fills a 32-bit-wide register window from a 32-bit
2N/A * wide address space via the designated asi. It is used to fill
2N/A * non-kernel windows. The stack pointer is required to be eight-byte
2N/A * aligned even though this code only needs it to be four-byte
2N/A * aligned.
2N/A */
2N/A#define FILL_32bit_asi(asi_num, tail) \
2N/A srl %sp, 0, %sp ;\
2N/A1: TT_TRACE_L(trace_win) ;\
2N/A mov 4, %g1 ;\
2N/A lda [%sp + %g0]asi_num, %l0 ;\
2N/A mov 8, %g2 ;\
2N/A lda [%sp + %g1]asi_num, %l1 ;\
2N/A mov 12, %g3 ;\
2N/A lda [%sp + %g2]asi_num, %l2 ;\
2N/A lda [%sp + %g3]asi_num, %l3 ;\
2N/A add %sp, 16, %g4 ;\
2N/A lda [%g4 + %g0]asi_num, %l4 ;\
2N/A lda [%g4 + %g1]asi_num, %l5 ;\
2N/A lda [%g4 + %g2]asi_num, %l6 ;\
2N/A lda [%g4 + %g3]asi_num, %l7 ;\
2N/A add %g4, 16, %g4 ;\
2N/A lda [%g4 + %g0]asi_num, %i0 ;\
2N/A lda [%g4 + %g1]asi_num, %i1 ;\
2N/A lda [%g4 + %g2]asi_num, %i2 ;\
2N/A lda [%g4 + %g3]asi_num, %i3 ;\
2N/A add %g4, 16, %g4 ;\
2N/A lda [%g4 + %g0]asi_num, %i4 ;\
2N/A lda [%g4 + %g1]asi_num, %i5 ;\
2N/A lda [%g4 + %g2]asi_num, %i6 ;\
2N/A lda [%g4 + %g3]asi_num, %i7 ;\
2N/A restored ;\
2N/A retry ;\
2N/A SKIP(31-25-TT_TRACE_L_INS) ;\
2N/A ba,a,pt %xcc, fault_32bit_/**/tail ;\
2N/A .empty
2N/A
2N/A/*
2N/A * FILL_32bit_tt1 fills a 32-bit-wide register window from a 32-bit
2N/A * wide address space via the designated asi. It is used to fill
2N/A * windows at tl>1 where performance isn't the primary concern and
2N/A * where we don't want to use unnecessary registers. The stack
2N/A * pointer is required to be eight-byte aligned even though this code
2N/A * only needs it to be four-byte aligned.
2N/A */
2N/A#define FILL_32bit_tt1(asi_num, tail) \
2N/A mov asi_num, %asi ;\
2N/A1: srl %sp, 0, %sp ;\
2N/A TT_TRACE_L(trace_win) ;\
2N/A lda [%sp + 0]%asi, %l0 ;\
2N/A lda [%sp + 4]%asi, %l1 ;\
2N/A lda [%sp + 8]%asi, %l2 ;\
2N/A lda [%sp + 12]%asi, %l3 ;\
2N/A lda [%sp + 16]%asi, %l4 ;\
2N/A lda [%sp + 20]%asi, %l5 ;\
2N/A lda [%sp + 24]%asi, %l6 ;\
2N/A lda [%sp + 28]%asi, %l7 ;\
2N/A lda [%sp + 32]%asi, %i0 ;\
2N/A lda [%sp + 36]%asi, %i1 ;\
2N/A lda [%sp + 40]%asi, %i2 ;\
2N/A lda [%sp + 44]%asi, %i3 ;\
2N/A lda [%sp + 48]%asi, %i4 ;\
2N/A lda [%sp + 52]%asi, %i5 ;\
2N/A lda [%sp + 56]%asi, %i6 ;\
2N/A lda [%sp + 60]%asi, %i7 ;\
2N/A restored ;\
2N/A retry ;\
2N/A SKIP(31-20-TT_TRACE_L_INS) ;\
2N/A ba,a,pt %xcc, fault_32bit_/**/tail ;\
2N/A .empty
2N/A
2N/A
2N/A/*
2N/A * SPILL_64bit spills a 64-bit-wide kernel register window. It
2N/A * assumes that the kernel context and the nucleus context are the
2N/A * same. The stack pointer is required to be eight-byte aligned.
2N/A */
2N/A#define SPILL_64bit(tail) \
2N/A2: stx %l0, [%sp + V9BIAS64 + 0] ;\
2N/A stx %l1, [%sp + V9BIAS64 + 8] ;\
2N/A stx %l2, [%sp + V9BIAS64 + 16] ;\
2N/A stx %l3, [%sp + V9BIAS64 + 24] ;\
2N/A stx %l4, [%sp + V9BIAS64 + 32] ;\
2N/A stx %l5, [%sp + V9BIAS64 + 40] ;\
2N/A stx %l6, [%sp + V9BIAS64 + 48] ;\
2N/A stx %l7, [%sp + V9BIAS64 + 56] ;\
2N/A stx %i0, [%sp + V9BIAS64 + 64] ;\
2N/A stx %i1, [%sp + V9BIAS64 + 72] ;\
2N/A stx %i2, [%sp + V9BIAS64 + 80] ;\
2N/A stx %i3, [%sp + V9BIAS64 + 88] ;\
2N/A stx %i4, [%sp + V9BIAS64 + 96] ;\
2N/A stx %i5, [%sp + V9BIAS64 + 104] ;\
2N/A stx %i6, [%sp + V9BIAS64 + 112] ;\
2N/A stx %i7, [%sp + V9BIAS64 + 120] ;\
2N/A TT_TRACE_L(trace_win) ;\
2N/A saved ;\
2N/A retry ;\
2N/A SKIP(31-18-TT_TRACE_L_INS) ;\
2N/A ba,a,pt %xcc, fault_64bit_/**/tail ;\
2N/A .empty
2N/A
2N/A/*
2N/A * SPILL_64bit_asi spills a 64-bit-wide register window into a 64-bit
2N/A * wide address space via the designated asi. It is used to spill
2N/A * non-kernel windows. The stack pointer is required to be eight-byte
2N/A * aligned.
2N/A */
2N/A#define SPILL_64bit_asi(asi_num, tail) \
2N/A mov 0 + V9BIAS64, %g1 ;\
2N/A2: stxa %l0, [%sp + %g1]asi_num ;\
2N/A mov 8 + V9BIAS64, %g2 ;\
2N/A stxa %l1, [%sp + %g2]asi_num ;\
2N/A mov 16 + V9BIAS64, %g3 ;\
2N/A stxa %l2, [%sp + %g3]asi_num ;\
2N/A mov 24 + V9BIAS64, %g4 ;\
2N/A stxa %l3, [%sp + %g4]asi_num ;\
2N/A add %sp, 32, %g5 ;\
2N/A stxa %l4, [%g5 + %g1]asi_num ;\
2N/A stxa %l5, [%g5 + %g2]asi_num ;\
2N/A stxa %l6, [%g5 + %g3]asi_num ;\
2N/A stxa %l7, [%g5 + %g4]asi_num ;\
2N/A add %g5, 32, %g5 ;\
2N/A stxa %i0, [%g5 + %g1]asi_num ;\
2N/A stxa %i1, [%g5 + %g2]asi_num ;\
2N/A stxa %i2, [%g5 + %g3]asi_num ;\
2N/A stxa %i3, [%g5 + %g4]asi_num ;\
2N/A add %g5, 32, %g5 ;\
2N/A stxa %i4, [%g5 + %g1]asi_num ;\
2N/A stxa %i5, [%g5 + %g2]asi_num ;\
2N/A stxa %i6, [%g5 + %g3]asi_num ;\
2N/A stxa %i7, [%g5 + %g4]asi_num ;\
2N/A TT_TRACE_L(trace_win) ;\
2N/A saved ;\
2N/A retry ;\
2N/A SKIP(31-25-TT_TRACE_L_INS) ;\
2N/A ba,a,pt %xcc, fault_64bit_/**/tail ;\
2N/A .empty
2N/A
2N/A/*
2N/A * SPILL_64bit_tt1 spills a 64-bit-wide register window into a 64-bit
2N/A * wide address space via the designated asi. It is used to spill
2N/A * windows at tl>1 where performance isn't the primary concern and
2N/A * where we don't want to use unnecessary registers. The stack
2N/A * pointer is required to be eight-byte aligned.
2N/A */
2N/A#define SPILL_64bit_tt1(asi_num, tail) \
2N/A mov asi_num, %asi ;\
2N/A2: stxa %l0, [%sp + V9BIAS64 + 0]%asi ;\
2N/A stxa %l1, [%sp + V9BIAS64 + 8]%asi ;\
2N/A stxa %l2, [%sp + V9BIAS64 + 16]%asi ;\
2N/A stxa %l3, [%sp + V9BIAS64 + 24]%asi ;\
2N/A stxa %l4, [%sp + V9BIAS64 + 32]%asi ;\
2N/A stxa %l5, [%sp + V9BIAS64 + 40]%asi ;\
2N/A stxa %l6, [%sp + V9BIAS64 + 48]%asi ;\
2N/A stxa %l7, [%sp + V9BIAS64 + 56]%asi ;\
2N/A stxa %i0, [%sp + V9BIAS64 + 64]%asi ;\
2N/A stxa %i1, [%sp + V9BIAS64 + 72]%asi ;\
2N/A stxa %i2, [%sp + V9BIAS64 + 80]%asi ;\
2N/A stxa %i3, [%sp + V9BIAS64 + 88]%asi ;\
2N/A stxa %i4, [%sp + V9BIAS64 + 96]%asi ;\
2N/A stxa %i5, [%sp + V9BIAS64 + 104]%asi ;\
2N/A stxa %i6, [%sp + V9BIAS64 + 112]%asi ;\
2N/A stxa %i7, [%sp + V9BIAS64 + 120]%asi ;\
2N/A TT_TRACE_L(trace_win) ;\
2N/A saved ;\
2N/A retry ;\
2N/A SKIP(31-19-TT_TRACE_L_INS) ;\
2N/A ba,a,pt %xcc, fault_64bit_/**/tail ;\
2N/A .empty
2N/A
2N/A
2N/A/*
2N/A * FILL_64bit fills a 64-bit-wide kernel register window. It assumes
2N/A * that the kernel context and the nucleus context are the same. The
2N/A * stack pointer is required to be eight-byte aligned.
2N/A */
2N/A#define FILL_64bit(tail) \
2N/A2: TT_TRACE_L(trace_win) ;\
2N/A ldx [%sp + V9BIAS64 + 0], %l0 ;\
2N/A ldx [%sp + V9BIAS64 + 8], %l1 ;\
2N/A ldx [%sp + V9BIAS64 + 16], %l2 ;\
2N/A ldx [%sp + V9BIAS64 + 24], %l3 ;\
2N/A ldx [%sp + V9BIAS64 + 32], %l4 ;\
2N/A ldx [%sp + V9BIAS64 + 40], %l5 ;\
2N/A ldx [%sp + V9BIAS64 + 48], %l6 ;\
2N/A ldx [%sp + V9BIAS64 + 56], %l7 ;\
2N/A ldx [%sp + V9BIAS64 + 64], %i0 ;\
2N/A ldx [%sp + V9BIAS64 + 72], %i1 ;\
2N/A ldx [%sp + V9BIAS64 + 80], %i2 ;\
2N/A ldx [%sp + V9BIAS64 + 88], %i3 ;\
2N/A ldx [%sp + V9BIAS64 + 96], %i4 ;\
2N/A ldx [%sp + V9BIAS64 + 104], %i5 ;\
2N/A ldx [%sp + V9BIAS64 + 112], %i6 ;\
2N/A ldx [%sp + V9BIAS64 + 120], %i7 ;\
2N/A restored ;\
2N/A retry ;\
2N/A SKIP(31-18-TT_TRACE_L_INS) ;\
2N/A ba,a,pt %xcc, fault_64bit_/**/tail ;\
2N/A .empty
2N/A
2N/A/*
2N/A * FILL_64bit_asi fills a 64-bit-wide register window from a 64-bit
2N/A * wide address space via the designated asi. It is used to fill
2N/A * non-kernel windows. The stack pointer is required to be eight-byte
2N/A * aligned.
2N/A */
2N/A#define FILL_64bit_asi(asi_num, tail) \
2N/A mov V9BIAS64 + 0, %g1 ;\
2N/A2: TT_TRACE_L(trace_win) ;\
2N/A ldxa [%sp + %g1]asi_num, %l0 ;\
2N/A mov V9BIAS64 + 8, %g2 ;\
2N/A ldxa [%sp + %g2]asi_num, %l1 ;\
2N/A mov V9BIAS64 + 16, %g3 ;\
2N/A ldxa [%sp + %g3]asi_num, %l2 ;\
2N/A mov V9BIAS64 + 24, %g4 ;\
2N/A ldxa [%sp + %g4]asi_num, %l3 ;\
2N/A add %sp, 32, %g5 ;\
2N/A ldxa [%g5 + %g1]asi_num, %l4 ;\
2N/A ldxa [%g5 + %g2]asi_num, %l5 ;\
2N/A ldxa [%g5 + %g3]asi_num, %l6 ;\
2N/A ldxa [%g5 + %g4]asi_num, %l7 ;\
2N/A add %g5, 32, %g5 ;\
2N/A ldxa [%g5 + %g1]asi_num, %i0 ;\
2N/A ldxa [%g5 + %g2]asi_num, %i1 ;\
2N/A ldxa [%g5 + %g3]asi_num, %i2 ;\
2N/A ldxa [%g5 + %g4]asi_num, %i3 ;\
2N/A add %g5, 32, %g5 ;\
2N/A ldxa [%g5 + %g1]asi_num, %i4 ;\
2N/A ldxa [%g5 + %g2]asi_num, %i5 ;\
2N/A ldxa [%g5 + %g3]asi_num, %i6 ;\
2N/A ldxa [%g5 + %g4]asi_num, %i7 ;\
2N/A restored ;\
2N/A retry ;\
2N/A SKIP(31-25-TT_TRACE_L_INS) ;\
2N/A ba,a,pt %xcc, fault_64bit_/**/tail ;\
2N/A .empty
2N/A
2N/A/*
2N/A * FILL_64bit_tt1 fills a 64-bit-wide register window from a 64-bit
2N/A * wide address space via the designated asi. It is used to fill
2N/A * windows at tl>1 where performance isn't the primary concern and
2N/A * where we don't want to use unnecessary registers. The stack
2N/A * pointer is required to be eight-byte aligned.
2N/A */
2N/A#define FILL_64bit_tt1(asi_num, tail) \
2N/A mov asi_num, %asi ;\
2N/A TT_TRACE_L(trace_win) ;\
2N/A ldxa [%sp + V9BIAS64 + 0]%asi, %l0 ;\
2N/A ldxa [%sp + V9BIAS64 + 8]%asi, %l1 ;\
2N/A ldxa [%sp + V9BIAS64 + 16]%asi, %l2 ;\
2N/A ldxa [%sp + V9BIAS64 + 24]%asi, %l3 ;\
2N/A ldxa [%sp + V9BIAS64 + 32]%asi, %l4 ;\
2N/A ldxa [%sp + V9BIAS64 + 40]%asi, %l5 ;\
2N/A ldxa [%sp + V9BIAS64 + 48]%asi, %l6 ;\
2N/A ldxa [%sp + V9BIAS64 + 56]%asi, %l7 ;\
2N/A ldxa [%sp + V9BIAS64 + 64]%asi, %i0 ;\
2N/A ldxa [%sp + V9BIAS64 + 72]%asi, %i1 ;\
2N/A ldxa [%sp + V9BIAS64 + 80]%asi, %i2 ;\
2N/A ldxa [%sp + V9BIAS64 + 88]%asi, %i3 ;\
2N/A ldxa [%sp + V9BIAS64 + 96]%asi, %i4 ;\
2N/A ldxa [%sp + V9BIAS64 + 104]%asi, %i5 ;\
2N/A ldxa [%sp + V9BIAS64 + 112]%asi, %i6 ;\
2N/A ldxa [%sp + V9BIAS64 + 120]%asi, %i7 ;\
2N/A restored ;\
2N/A retry ;\
2N/A SKIP(31-19-TT_TRACE_L_INS) ;\
2N/A ba,a,pt %xcc, fault_64bit_/**/tail ;\
2N/A .empty
2N/A
2N/A#endif /* !lint */
2N/A
2N/A/*
2N/A * SPILL_mixed spills either size window, depending on
2N/A * whether %sp is even or odd, to a 32-bit address space.
2N/A * This may only be used in conjunction with SPILL_32bit/
2N/A * SPILL_64bit. New versions of SPILL_mixed_{tt1,asi} would be
2N/A * needed for use with SPILL_{32,64}bit_{tt1,asi}. Particular
2N/A * attention should be paid to the instructions that belong
2N/A * in the delay slots of the branches depending on the type
2N/A * of spill handler being branched to.
2N/A * Clear upper 32 bits of %sp if it is odd.
2N/A * We won't need to clear them in 64 bit kernel.
2N/A */
2N/A#define SPILL_mixed \
2N/A btst 1, %sp ;\
2N/A bz,a,pt %xcc, 1b ;\
2N/A srl %sp, 0, %sp ;\
2N/A ba,pt %xcc, 2b ;\
2N/A nop ;\
2N/A .align 128
2N/A
2N/A/*
2N/A * FILL_mixed(ASI) fills either size window, depending on
2N/A * whether %sp is even or odd, from a 32-bit address space.
2N/A * This may only be used in conjunction with FILL_32bit/
2N/A * FILL_64bit. New versions of FILL_mixed_{tt1,asi} would be
2N/A * needed for use with FILL_{32,64}bit_{tt1,asi}. Particular
2N/A * attention should be paid to the instructions that belong
2N/A * in the delay slots of the branches depending on the type
2N/A * of fill handler being branched to.
2N/A * Clear upper 32 bits of %sp if it is odd.
2N/A * We won't need to clear them in 64 bit kernel.
2N/A */
2N/A#define FILL_mixed \
2N/A btst 1, %sp ;\
2N/A bz,a,pt %xcc, 1b ;\
2N/A srl %sp, 0, %sp ;\
2N/A ba,pt %xcc, 2b ;\
2N/A nop ;\
2N/A .align 128
2N/A
2N/A
2N/A/*
2N/A * SPILL_32clean/SPILL_64clean spill 32-bit and 64-bit register windows,
2N/A * respectively, into the address space via the designated asi. The
2N/A * unbiased stack pointer is required to be eight-byte aligned (even for
2N/A * the 32-bit case even though this code does not require such strict
2N/A * alignment).
2N/A *
2N/A * With SPARC v9 the spill trap takes precedence over the cleanwin trap
2N/A * so when cansave == 0, canrestore == 6, and cleanwin == 6 the next save
2N/A * will cause cwp + 2 to be spilled but will not clean cwp + 1. That
2N/A * window may contain kernel data so in user_rtt we set wstate to call
2N/A * these spill handlers on the first user spill trap. These handler then
2N/A * spill the appropriate window but also back up a window and clean the
2N/A * window that didn't get a cleanwin trap.
2N/A */
2N/A#define SPILL_32clean(asi_num, tail) \
2N/A srl %sp, 0, %sp ;\
2N/A sta %l0, [%sp + %g0]asi_num ;\
2N/A mov 4, %g1 ;\
2N/A sta %l1, [%sp + %g1]asi_num ;\
2N/A mov 8, %g2 ;\
2N/A sta %l2, [%sp + %g2]asi_num ;\
2N/A mov 12, %g3 ;\
2N/A sta %l3, [%sp + %g3]asi_num ;\
2N/A add %sp, 16, %g4 ;\
2N/A sta %l4, [%g4 + %g0]asi_num ;\
2N/A sta %l5, [%g4 + %g1]asi_num ;\
2N/A sta %l6, [%g4 + %g2]asi_num ;\
2N/A sta %l7, [%g4 + %g3]asi_num ;\
2N/A add %g4, 16, %g4 ;\
2N/A sta %i0, [%g4 + %g0]asi_num ;\
2N/A sta %i1, [%g4 + %g1]asi_num ;\
2N/A sta %i2, [%g4 + %g2]asi_num ;\
2N/A sta %i3, [%g4 + %g3]asi_num ;\
2N/A add %g4, 16, %g4 ;\
2N/A sta %i4, [%g4 + %g0]asi_num ;\
2N/A sta %i5, [%g4 + %g1]asi_num ;\
2N/A sta %i6, [%g4 + %g2]asi_num ;\
2N/A sta %i7, [%g4 + %g3]asi_num ;\
2N/A TT_TRACE_L(trace_win) ;\
2N/A b .spill_clean ;\
2N/A mov WSTATE_USER32, %g7 ;\
2N/A SKIP(31-25-TT_TRACE_L_INS) ;\
2N/A ba,a,pt %xcc, fault_32bit_/**/tail ;\
2N/A .empty
2N/A
2N/A#define SPILL_64clean(asi_num, tail) \
2N/A mov 0 + V9BIAS64, %g1 ;\
2N/A stxa %l0, [%sp + %g1]asi_num ;\
2N/A mov 8 + V9BIAS64, %g2 ;\
2N/A stxa %l1, [%sp + %g2]asi_num ;\
2N/A mov 16 + V9BIAS64, %g3 ;\
2N/A stxa %l2, [%sp + %g3]asi_num ;\
2N/A mov 24 + V9BIAS64, %g4 ;\
2N/A stxa %l3, [%sp + %g4]asi_num ;\
2N/A add %sp, 32, %g5 ;\
2N/A stxa %l4, [%g5 + %g1]asi_num ;\
2N/A stxa %l5, [%g5 + %g2]asi_num ;\
2N/A stxa %l6, [%g5 + %g3]asi_num ;\
2N/A stxa %l7, [%g5 + %g4]asi_num ;\
2N/A add %g5, 32, %g5 ;\
2N/A stxa %i0, [%g5 + %g1]asi_num ;\
2N/A stxa %i1, [%g5 + %g2]asi_num ;\
2N/A stxa %i2, [%g5 + %g3]asi_num ;\
2N/A stxa %i3, [%g5 + %g4]asi_num ;\
2N/A add %g5, 32, %g5 ;\
2N/A stxa %i4, [%g5 + %g1]asi_num ;\
2N/A stxa %i5, [%g5 + %g2]asi_num ;\
2N/A stxa %i6, [%g5 + %g3]asi_num ;\
2N/A stxa %i7, [%g5 + %g4]asi_num ;\
2N/A TT_TRACE_L(trace_win) ;\
2N/A b .spill_clean ;\
2N/A mov WSTATE_USER64, %g7 ;\
2N/A SKIP(31-25-TT_TRACE_L_INS) ;\
2N/A ba,a,pt %xcc, fault_64bit_/**/tail ;\
2N/A .empty
2N/A
2N/A
2N/A/*
2N/A * Floating point disabled.
2N/A */
2N/A#define FP_DISABLED_TRAP \
2N/A TT_TRACE(trace_gen) ;\
2N/A ba,pt %xcc,.fp_disabled ;\
2N/A nop ;\
2N/A .align 32
2N/A
2N/A/*
2N/A * Floating point exceptions.
2N/A */
2N/A#define FP_IEEE_TRAP \
2N/A TT_TRACE(trace_gen) ;\
2N/A ba,pt %xcc,.fp_ieee_exception ;\
2N/A nop ;\
2N/A .align 32
2N/A
2N/A#define FP_TRAP \
2N/A TT_TRACE(trace_gen) ;\
2N/A ba,pt %xcc,.fp_exception ;\
2N/A nop ;\
2N/A .align 32
2N/A
2N/A#if !defined(lint)
2N/A/*
2N/A * asynchronous traps at level 0 and level 1
2N/A *
2N/A * The first instruction must be a membar for UltraSPARC-III
2N/A * to stop RED state entry if the store queue has many
2N/A * pending bad stores (PRM, Chapter 11).
2N/A */
2N/A#define ASYNC_TRAP(ttype, ttlabel)\
2N/A membar #Sync ;\
2N/A TT_TRACE(ttlabel) ;\
2N/A ba async_err ;\
2N/A mov ttype, %g5 ;\
2N/A .align 32
2N/A
2N/A/*
2N/A * Defaults to BAD entry, but establishes label to be used for
2N/A * architecture-specific overwrite of trap table entry.
2N/A */
2N/A#define LABELED_BAD(table_name) \
2N/A .global table_name ;\
2N/Atable_name: ;\
2N/A BAD
2N/A
2N/A#endif /* !lint */
2N/A
2N/A/*
2N/A * illegal instruction trap
2N/A */
2N/A#define ILLTRAP_INSTR \
2N/A membar #Sync ;\
2N/A TT_TRACE(trace_gen) ;\
2N/A or %g0, P_UTRAP4, %g2 ;\
2N/A or %g0, T_UNIMP_INSTR, %g3 ;\
2N/A sethi %hi(.check_v9utrap), %g4 ;\
2N/A jmp %g4 + %lo(.check_v9utrap) ;\
2N/A nop ;\
2N/A .align 32
2N/A
2N/A/*
2N/A * tag overflow trap
2N/A */
2N/A#define TAG_OVERFLOW \
2N/A TT_TRACE(trace_gen) ;\
2N/A or %g0, P_UTRAP10, %g2 ;\
2N/A or %g0, T_TAG_OVERFLOW, %g3 ;\
2N/A sethi %hi(.check_v9utrap), %g4 ;\
2N/A jmp %g4 + %lo(.check_v9utrap) ;\
2N/A nop ;\
2N/A .align 32
2N/A
2N/A/*
2N/A * divide by zero trap
2N/A */
2N/A#define DIV_BY_ZERO \
2N/A TT_TRACE(trace_gen) ;\
2N/A or %g0, P_UTRAP11, %g2 ;\
2N/A or %g0, T_IDIV0, %g3 ;\
2N/A sethi %hi(.check_v9utrap), %g4 ;\
2N/A jmp %g4 + %lo(.check_v9utrap) ;\
2N/A nop ;\
2N/A .align 32
2N/A
2N/A/*
2N/A * trap instruction for V9 user trap handlers
2N/A */
2N/A#define TRAP_INSTR \
2N/A TT_TRACE(trace_gen) ;\
2N/A or %g0, T_SOFTWARE_TRAP, %g3 ;\
2N/A sethi %hi(.check_v9utrap), %g4 ;\
2N/A jmp %g4 + %lo(.check_v9utrap) ;\
2N/A nop ;\
2N/A .align 32
2N/A#define TRP4 TRAP_INSTR; TRAP_INSTR; TRAP_INSTR; TRAP_INSTR
2N/A
2N/A/*
2N/A * LEVEL_INTERRUPT is for level N interrupts.
2N/A * VECTOR_INTERRUPT is for the vector trap.
2N/A */
2N/A#define LEVEL_INTERRUPT(level) \
2N/A .global tt_pil/**/level ;\
2N/Att_pil/**/level: ;\
2N/A ba,pt %xcc, pil_interrupt ;\
2N/A mov level, %g4 ;\
2N/A .align 32
2N/A
2N/A#define LEVEL14_INTERRUPT \
2N/A ba pil14_interrupt ;\
2N/A mov PIL_14, %g4 ;\
2N/A .align 32
2N/A
2N/A#define VECTOR_INTERRUPT \
2N/A ldxa [%g0]ASI_INTR_RECEIVE_STATUS, %g1 ;\
2N/A btst IRSR_BUSY, %g1 ;\
2N/A bnz,pt %xcc, vec_interrupt ;\
2N/A nop ;\
2N/A ba,a,pt %xcc, vec_intr_spurious ;\
2N/A .empty ;\
2N/A .align 32
2N/A
2N/A/*
2N/A * MMU Trap Handlers.
2N/A */
2N/A#define SWITCH_GLOBALS /* mmu->alt, alt->mmu */ \
2N/A rdpr %pstate, %g5 ;\
2N/A wrpr %g5, PSTATE_MG | PSTATE_AG, %pstate
2N/A
2N/A#define IMMU_EXCEPTION \
2N/A membar #Sync ;\
2N/A SWITCH_GLOBALS ;\
2N/A wr %g0, ASI_IMMU, %asi ;\
2N/A rdpr %tpc, %g2 ;\
2N/A ldxa [MMU_SFSR]%asi, %g3 ;\
2N/A ba,pt %xcc, .mmu_exception_end ;\
2N/A mov T_INSTR_EXCEPTION, %g1 ;\
2N/A .align 32
2N/A
2N/A#define DMMU_EXCEPTION \
2N/A SWITCH_GLOBALS ;\
2N/A wr %g0, ASI_DMMU, %asi ;\
2N/A ldxa [MMU_TAG_ACCESS]%asi, %g2 ;\
2N/A ldxa [MMU_SFSR]%asi, %g3 ;\
2N/A ba,pt %xcc, .mmu_exception_end ;\
2N/A mov T_DATA_EXCEPTION, %g1 ;\
2N/A .align 32
2N/A
2N/A#define DMMU_EXC_AG_PRIV \
2N/A wr %g0, ASI_DMMU, %asi ;\
2N/A ldxa [MMU_SFAR]%asi, %g2 ;\
2N/A ba,pt %xcc, .mmu_priv_exception ;\
2N/A ldxa [MMU_SFSR]%asi, %g3 ;\
2N/A .align 32
2N/A
2N/A#define DMMU_EXC_AG_NOT_ALIGNED \
2N/A wr %g0, ASI_DMMU, %asi ;\
2N/A ldxa [MMU_SFAR]%asi, %g2 ;\
2N/A ba,pt %xcc, .mmu_exception_not_aligned ;\
2N/A ldxa [MMU_SFSR]%asi, %g3 ;\
2N/A .align 32
2N/A
2N/A/*
2N/A * SPARC V9 IMPL. DEP. #109(1) and (2) and #110(1) and (2)
2N/A */
2N/A#define DMMU_EXC_LDDF_NOT_ALIGNED \
2N/A btst 1, %sp ;\
2N/A bnz,pt %xcc, .lddf_exception_not_aligned ;\
2N/A wr %g0, ASI_DMMU, %asi ;\
2N/A ldxa [MMU_SFAR]%asi, %g2 ;\
2N/A ba,pt %xcc, .mmu_exception_not_aligned ;\
2N/A ldxa [MMU_SFSR]%asi, %g3 ;\
2N/A .align 32
2N/A
2N/A#define DMMU_EXC_STDF_NOT_ALIGNED \
2N/A btst 1, %sp ;\
2N/A bnz,pt %xcc, .stdf_exception_not_aligned ;\
2N/A wr %g0, ASI_DMMU, %asi ;\
2N/A ldxa [MMU_SFAR]%asi, %g2 ;\
2N/A ba,pt %xcc, .mmu_exception_not_aligned ;\
2N/A ldxa [MMU_SFSR]%asi, %g3 ;\
2N/A .align 32
2N/A
2N/A/*
2N/A * Flush the TLB using either the primary, secondary, or nucleus flush
2N/A * operation based on whether the ctx from the tag access register matches
2N/A * the primary or secondary context (flush the nucleus if neither matches).
2N/A *
2N/A * Requires a membar #Sync before next ld/st.
2N/A * exits with:
2N/A * g2 = tag access register
2N/A * g3 = ctx number
2N/A */
2N/A#if TAGACC_CTX_MASK != CTXREG_CTX_MASK
2N/A#error "TAGACC_CTX_MASK != CTXREG_CTX_MASK"
2N/A#endif
2N/A#define DTLB_DEMAP_ENTRY \
2N/A mov MMU_TAG_ACCESS, %g1 ;\
2N/A mov MMU_PCONTEXT, %g5 ;\
2N/A ldxa [%g1]ASI_DMMU, %g2 ;\
2N/A sethi %hi(TAGACC_CTX_MASK), %g4 ;\
2N/A or %g4, %lo(TAGACC_CTX_MASK), %g4 ;\
2N/A and %g2, %g4, %g3 /* g3 = ctx */ ;\
2N/A ldxa [%g5]ASI_DMMU, %g6 /* g6 = primary ctx */ ;\
2N/A and %g6, %g4, %g6 /* &= CTXREG_CTX_MASK */ ;\
2N/A cmp %g3, %g6 ;\
2N/A be,pt %xcc, 1f ;\
2N/A andn %g2, %g4, %g1 /* ctx = primary */ ;\
2N/A mov MMU_SCONTEXT, %g5 ;\
2N/A ldxa [%g5]ASI_DMMU, %g6 /* g6 = secondary ctx */ ;\
2N/A and %g6, %g4, %g6 /* &= CTXREG_CTX_MASK */ ;\
2N/A cmp %g3, %g6 ;\
2N/A be,a,pt %xcc, 1f ;\
2N/A or %g1, DEMAP_SECOND, %g1 ;\
2N/A or %g1, DEMAP_NUCLEUS, %g1 ;\
2N/A1: stxa %g0, [%g1]ASI_DTLB_DEMAP /* MMU_DEMAP_PAGE */ ;\
2N/A membar #Sync
2N/A
2N/A#if defined(cscope)
2N/A/*
2N/A * Define labels to direct cscope quickly to labels that
2N/A * are generated by macro expansion of DTLB_MISS().
2N/A */
2N/A .global tt0_dtlbmiss
2N/Att0_dtlbmiss:
2N/A .global tt1_dtlbmiss
2N/Att1_dtlbmiss:
2N/A nop
2N/A#endif
2N/A
2N/A/*
2N/A * Needs to be exactly 32 instructions
2N/A *
2N/A * UTLB NOTE: If we don't hit on the 8k pointer then we branch
2N/A * to a special 4M tsb handler. It would be nice if that handler
2N/A * could live in this file but currently it seems better to allow
2N/A * it to fall thru to sfmmu_tsb_miss.
2N/A */
2N/A#define DTLB_MISS(table_name) ;\
2N/A .global table_name/**/_dtlbmiss ;\
2N/Atable_name/**/_dtlbmiss: ;\
2N/A HAT_PERCPU_DBSTAT(TSBMISS_DTLBMISS) /* 3 instr ifdef DEBUG */ ;\
2N/A mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\
2N/A ldxa [%g0]ASI_DMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\
2N/A ldxa [%g6]ASI_DMMU, %g2 /* g2 = tag access */ ;\
2N/A sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\
2N/A srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\
2N/A cmp %g3, INVALID_CONTEXT ;\
2N/A ble,pn %xcc, sfmmu_kdtlb_miss ;\
2N/A srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\
2N/A brlz,pn %g1, sfmmu_udtlb_slowpath ;\
2N/A nop ;\
2N/A ldda [%g1]ASI_NQUAD_LD, %g4 /* g4 = tag, %g5 data */ ;\
2N/A cmp %g4, %g7 ;\
2N/A bne,pn %xcc, sfmmu_tsb_miss_tt /* no 4M TSB, miss */ ;\
2N/A mov %g0, %g3 /* clear 4M tsbe ptr */ ;\
2N/A TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\
2N/A stxa %g5, [%g0]ASI_DTLB_IN /* trapstat expects TTE */ ;\
2N/A retry /* in %g5 */ ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A .align 128
2N/A
2N/A#if defined(cscope)
2N/A/*
2N/A * Define labels to direct cscope quickly to labels that
2N/A * are generated by macro expansion of ITLB_MISS().
2N/A */
2N/A .global tt0_itlbmiss
2N/Att0_itlbmiss:
2N/A .global tt1_itlbmiss
2N/Att1_itlbmiss:
2N/A nop
2N/A#endif
2N/A
2N/A/*
2N/A * Instruction miss handler.
2N/A * ldda instructions will have their ASI patched
2N/A * by sfmmu_patch_ktsb at runtime.
2N/A * MUST be EXACTLY 32 instructions or we'll break.
2N/A */
2N/A#define ITLB_MISS(table_name) \
2N/A .global table_name/**/_itlbmiss ;\
2N/Atable_name/**/_itlbmiss: ;\
2N/A HAT_PERCPU_DBSTAT(TSBMISS_ITLBMISS) /* 3 instr ifdef DEBUG */ ;\
2N/A mov MMU_TAG_ACCESS, %g6 /* select tag acc */ ;\
2N/A ldxa [%g0]ASI_IMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\
2N/A ldxa [%g6]ASI_IMMU, %g2 /* g2 = tag access */ ;\
2N/A sllx %g2, TAGACC_CTX_LSHIFT, %g3 ;\
2N/A srlx %g3, TAGACC_CTX_LSHIFT, %g3 /* g3 = ctx */ ;\
2N/A cmp %g3, INVALID_CONTEXT ;\
2N/A ble,pn %xcc, sfmmu_kitlb_miss ;\
2N/A srlx %g2, TAG_VALO_SHIFT, %g7 /* g7 = tsb tag */ ;\
2N/A brlz,pn %g1, sfmmu_uitlb_slowpath /* if >1 TSB branch */ ;\
2N/A nop ;\
2N/A ldda [%g1]ASI_NQUAD_LD, %g4 /* g4 = tag, g5 = data */ ;\
2N/A cmp %g4, %g7 ;\
2N/A bne,pn %xcc, sfmmu_tsb_miss_tt /* br if 8k ptr miss */ ;\
2N/A mov %g0, %g3 /* no 4M TSB */ ;\
2N/A andcc %g5, TTE_EXECPRM_INT, %g0 /* check execute bit */ ;\
2N/A bz,pn %icc, exec_fault ;\
2N/A nop ;\
2N/A TT_TRACE(trace_tsbhit) /* 2 instr ifdef TRAPTRACE */ ;\
2N/A stxa %g5, [%g0]ASI_ITLB_IN /* trapstat expects %g5 */ ;\
2N/A retry ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A .align 128
2N/A
2N/A
2N/A/*
2N/A * This macro is the first level handler for fast protection faults.
2N/A * It first demaps the tlb entry which generated the fault and then
2N/A * attempts to set the modify bit on the hash. It needs to be
2N/A * exactly 32 instructions.
2N/A */
2N/A#define DTLB_PROT \
2N/A DTLB_DEMAP_ENTRY /* 20 instructions */ ;\
2N/A /* ;\
2N/A * At this point: ;\
2N/A * g1 = ???? ;\
2N/A * g2 = tag access register ;\
2N/A * g3 = ctx number ;\
2N/A * g4 = ???? ;\
2N/A */ ;\
2N/A TT_TRACE(trace_dataprot) /* 2 instr ifdef TRAPTRACE */ ;\
2N/A /* clobbers g1 and g6 */ ;\
2N/A ldxa [%g0]ASI_DMMU_TSB_8K, %g1 /* g1 = tsbe ptr */ ;\
2N/A brnz,pt %g3, sfmmu_uprot_trap /* user trap */ ;\
2N/A nop ;\
2N/A ba,a,pt %xcc, sfmmu_kprot_trap /* kernel trap */ ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A unimp 0 ;\
2N/A .align 128
2N/A
2N/A#define DMMU_EXCEPTION_TL1 ;\
2N/A SWITCH_GLOBALS ;\
2N/A ba,a,pt %xcc, mmu_trap_tl1 ;\
2N/A nop ;\
2N/A .align 32
2N/A
2N/A#define MISALIGN_ADDR_TL1 ;\
2N/A ba,a,pt %xcc, mmu_trap_tl1 ;\
2N/A nop ;\
2N/A .align 32
2N/A
2N/A/*
2N/A * Trace a tsb hit
2N/A * g1 = tsbe pointer (in/clobbered)
2N/A * g2 = tag access register (in)
2N/A * g3 - g4 = scratch (clobbered)
2N/A * g5 = tsbe data (in)
2N/A * g6 = scratch (clobbered)
2N/A * g7 = pc we jumped here from (in)
2N/A * ttextra = value to OR in to trap type (%tt) (in)
2N/A */
2N/A#ifdef TRAPTRACE
2N/A#define TRACE_TSBHIT(ttextra) \
2N/A membar #Sync ;\
2N/A sethi %hi(FLUSH_ADDR), %g6 ;\
2N/A flush %g6 ;\
2N/A TRACE_PTR(%g3, %g6) ;\
2N/A GET_TRACE_TICK(%g6) ;\
2N/A stxa %g6, [%g3 + TRAP_ENT_TICK]%asi ;\
2N/A stxa %g2, [%g3 + TRAP_ENT_SP]%asi /* tag access */ ;\
2N/A stxa %g5, [%g3 + TRAP_ENT_F1]%asi /* tsb data */ ;\
2N/A rdpr %tnpc, %g6 ;\
2N/A stxa %g6, [%g3 + TRAP_ENT_F2]%asi ;\
2N/A stxa %g1, [%g3 + TRAP_ENT_F3]%asi /* tsb pointer */ ;\
2N/A stxa %g0, [%g3 + TRAP_ENT_F4]%asi ;\
2N/A rdpr %tpc, %g6 ;\
2N/A stxa %g6, [%g3 + TRAP_ENT_TPC]%asi ;\
2N/A rdpr %tl, %g6 ;\
2N/A stha %g6, [%g3 + TRAP_ENT_TL]%asi ;\
2N/A rdpr %tt, %g6 ;\
2N/A or %g6, (ttextra), %g6 ;\
2N/A stha %g6, [%g3 + TRAP_ENT_TT]%asi ;\
2N/A ldxa [%g0]ASI_IMMU, %g1 /* tag target */ ;\
2N/A ldxa [%g0]ASI_DMMU, %g4 ;\
2N/A cmp %g6, FAST_IMMU_MISS_TT ;\
2N/A movne %icc, %g4, %g1 ;\
2N/A stxa %g1, [%g3 + TRAP_ENT_TSTATE]%asi /* tsb tag */ ;\
2N/A stxa %g0, [%g3 + TRAP_ENT_TR]%asi ;\
2N/A TRACE_NEXT(%g3, %g4, %g6)
2N/A#else
2N/A#define TRACE_TSBHIT(ttextra)
2N/A#endif
2N/A
2N/A#if defined(lint)
2N/A
2N/Astruct scb trap_table;
2N/Astruct scb scb; /* trap_table/scb are the same object */
2N/A
2N/A#else /* lint */
2N/A
2N/A/*
2N/A * =======================================================================
2N/A * SPARC V9 TRAP TABLE
2N/A *
2N/A * The trap table is divided into two halves: the first half is used when
2N/A * taking traps when TL=0; the second half is used when taking traps from
2N/A * TL>0. Note that handlers in the second half of the table might not be able
2N/A * to make the same assumptions as handlers in the first half of the table.
2N/A *
2N/A * Worst case trap nesting so far:
2N/A *
2N/A * at TL=0 client issues software trap requesting service
2N/A * at TL=1 nucleus wants a register window
2N/A * at TL=2 register window clean/spill/fill takes a TLB miss
2N/A * at TL=3 processing TLB miss
2N/A * at TL=4 handle asynchronous error
2N/A *
2N/A * Note that a trap from TL=4 to TL=5 places Spitfire in "RED mode".
2N/A *
2N/A * =======================================================================
2N/A */
2N/A .section ".text"
2N/A .align 4
2N/A .global trap_table, scb, trap_table0, trap_table1, etrap_table
2N/A .type trap_table, #function
2N/A .type scb, #function
2N/Atrap_table:
2N/Ascb:
2N/Atrap_table0:
2N/A /* hardware traps */
2N/A NOT; /* 000 reserved */
2N/A RED; /* 001 power on reset */
2N/A RED; /* 002 watchdog reset */
2N/A RED; /* 003 externally initiated reset */
2N/A RED; /* 004 software initiated reset */
2N/A RED; /* 005 red mode exception */
2N/A NOT; NOT; /* 006 - 007 reserved */
2N/A IMMU_EXCEPTION; /* 008 instruction access exception */
2N/A NOT; /* 009 instruction access MMU miss */
2N/A ASYNC_TRAP(T_INSTR_ERROR, trace_gen);
2N/A /* 00A instruction access error */
2N/A NOT; NOT4; /* 00B - 00F reserved */
2N/A ILLTRAP_INSTR; /* 010 illegal instruction */
2N/A TRAP(T_PRIV_INSTR); /* 011 privileged opcode */
2N/A NOT; /* 012 unimplemented LDD */
2N/A NOT; /* 013 unimplemented STD */
2N/A NOT4; NOT4; NOT4; /* 014 - 01F reserved */
2N/A FP_DISABLED_TRAP; /* 020 fp disabled */
2N/A FP_IEEE_TRAP; /* 021 fp exception ieee 754 */
2N/A FP_TRAP; /* 022 fp exception other */
2N/A TAG_OVERFLOW; /* 023 tag overflow */
2N/A CLEAN_WINDOW; /* 024 - 027 clean window */
2N/A DIV_BY_ZERO; /* 028 division by zero */
2N/A NOT; /* 029 internal processor error */
2N/A NOT; NOT; NOT4; /* 02A - 02F reserved */
2N/A DMMU_EXCEPTION; /* 030 data access exception */
2N/A NOT; /* 031 data access MMU miss */
2N/A ASYNC_TRAP(T_DATA_ERROR, trace_gen);
2N/A /* 032 data access error */
2N/A NOT; /* 033 data access protection */
2N/A DMMU_EXC_AG_NOT_ALIGNED; /* 034 mem address not aligned */
2N/A DMMU_EXC_LDDF_NOT_ALIGNED; /* 035 LDDF mem address not aligned */
2N/A DMMU_EXC_STDF_NOT_ALIGNED; /* 036 STDF mem address not aligned */
2N/A DMMU_EXC_AG_PRIV; /* 037 privileged action */
2N/A NOT; /* 038 LDQF mem address not aligned */
2N/A NOT; /* 039 STQF mem address not aligned */
2N/A NOT; NOT; NOT4; /* 03A - 03F reserved */
2N/A NOT; /* 040 async data error */
2N/A LEVEL_INTERRUPT(1); /* 041 interrupt level 1 */
2N/A LEVEL_INTERRUPT(2); /* 042 interrupt level 2 */
2N/A LEVEL_INTERRUPT(3); /* 043 interrupt level 3 */
2N/A LEVEL_INTERRUPT(4); /* 044 interrupt level 4 */
2N/A LEVEL_INTERRUPT(5); /* 045 interrupt level 5 */
2N/A LEVEL_INTERRUPT(6); /* 046 interrupt level 6 */
2N/A LEVEL_INTERRUPT(7); /* 047 interrupt level 7 */
2N/A LEVEL_INTERRUPT(8); /* 048 interrupt level 8 */
2N/A LEVEL_INTERRUPT(9); /* 049 interrupt level 9 */
2N/A LEVEL_INTERRUPT(10); /* 04A interrupt level 10 */
2N/A LEVEL_INTERRUPT(11); /* 04B interrupt level 11 */
2N/A LEVEL_INTERRUPT(12); /* 04C interrupt level 12 */
2N/A LEVEL_INTERRUPT(13); /* 04D interrupt level 13 */
2N/A LEVEL14_INTERRUPT; /* 04E interrupt level 14 */
2N/A LEVEL_INTERRUPT(15); /* 04F interrupt level 15 */
2N/A NOT4; NOT4; NOT4; NOT4; /* 050 - 05F reserved */
2N/A VECTOR_INTERRUPT; /* 060 interrupt vector */
2N/A GOTO(kmdb_trap); /* 061 PA watchpoint */
2N/A GOTO(kmdb_trap); /* 062 VA watchpoint */
2N/A GOTO_TT(ce_err, trace_gen); /* 063 corrected ECC error */
2N/A ITLB_MISS(tt0); /* 064 instruction access MMU miss */
2N/A DTLB_MISS(tt0); /* 068 data access MMU miss */
2N/A DTLB_PROT; /* 06C data access protection */
2N/A LABELED_BAD(tt0_fecc); /* 070 fast ecache ECC error */
2N/A LABELED_BAD(tt0_dperr); /* 071 Cheetah+ dcache parity error */
2N/A LABELED_BAD(tt0_iperr); /* 072 Cheetah+ icache parity error */
2N/A NOT; /* 073 reserved */
2N/A NOT4; NOT4; NOT4; /* 074 - 07F reserved */
2N/A NOT4; /* 080 spill 0 normal */
2N/A SPILL_32bit_asi(ASI_AIUP,sn0); /* 084 spill 1 normal */
2N/A SPILL_64bit_asi(ASI_AIUP,sn0); /* 088 spill 2 normal */
2N/A SPILL_32clean(ASI_AIUP,sn0); /* 08C spill 3 normal */
2N/A SPILL_64clean(ASI_AIUP,sn0); /* 090 spill 4 normal */
2N/A SPILL_32bit(not); /* 094 spill 5 normal */
2N/A SPILL_64bit(not); /* 098 spill 6 normal */
2N/A SPILL_mixed; /* 09C spill 7 normal */
2N/A NOT4; /* 0A0 spill 0 other */
2N/A SPILL_32bit_asi(ASI_AIUS,so0); /* 0A4 spill 1 other */
2N/A SPILL_64bit_asi(ASI_AIUS,so0); /* 0A8 spill 2 other */
2N/A SPILL_32bit_asi(ASI_AIUS,so0); /* 0AC spill 3 other */
2N/A SPILL_64bit_asi(ASI_AIUS,so0); /* 0B0 spill 4 other */
2N/A NOT4; /* 0B4 spill 5 other */
2N/A NOT4; /* 0B8 spill 6 other */
2N/A NOT4; /* 0BC spill 7 other */
2N/A NOT4; /* 0C0 fill 0 normal */
2N/A FILL_32bit_asi(ASI_AIUP,fn0); /* 0C4 fill 1 normal */
2N/A FILL_64bit_asi(ASI_AIUP,fn0); /* 0C8 fill 2 normal */
2N/A FILL_32bit_asi(ASI_AIUP,fn0); /* 0CC fill 3 normal */
2N/A FILL_64bit_asi(ASI_AIUP,fn0); /* 0D0 fill 4 normal */
2N/A FILL_32bit(not); /* 0D4 fill 5 normal */
2N/A FILL_64bit(not); /* 0D8 fill 6 normal */
2N/A FILL_mixed; /* 0DC fill 7 normal */
2N/A NOT4; /* 0E0 fill 0 other */
2N/A NOT4; /* 0E4 fill 1 other */
2N/A NOT4; /* 0E8 fill 2 other */
2N/A NOT4; /* 0EC fill 3 other */
2N/A NOT4; /* 0F0 fill 4 other */
2N/A NOT4; /* 0F4 fill 5 other */
2N/A NOT4; /* 0F8 fill 6 other */
2N/A NOT4; /* 0FC fill 7 other */
2N/A /* user traps */
2N/A GOTO(syscall_trap_4x); /* 100 old system call */
2N/A TRAP(T_BREAKPOINT); /* 101 user breakpoint */
2N/A TRAP(T_DIV0); /* 102 user divide by zero */
2N/A FLUSHW(); /* 103 flush windows */
2N/A GOTO(.clean_windows); /* 104 clean windows */
2N/A BAD; /* 105 range check ?? */
2N/A GOTO(.fix_alignment); /* 106 do unaligned references */
2N/A BAD; /* 107 unused */
2N/A SYSCALL(syscall_trap32); /* 108 ILP32 system call on LP64 */
2N/A GOTO(set_trap0_addr); /* 109 set trap0 address */
2N/A BAD; BAD; BAD4; /* 10A - 10F unused */
2N/A TRP4; TRP4; TRP4; TRP4; /* 110 - 11F V9 user trap handlers */
2N/A GOTO(.getcc); /* 120 get condition codes */
2N/A GOTO(.setcc); /* 121 set condition codes */
2N/A GOTO(.getpsr); /* 122 get psr */
2N/A GOTO(.setpsr); /* 123 set psr (some fields) */
2N/A GOTO(get_timestamp); /* 124 get timestamp */
2N/A GOTO(get_virtime); /* 125 get lwp virtual time */
2N/A PRIV(self_xcall); /* 126 self xcall */
2N/A GOTO(get_hrestime); /* 127 get hrestime */
2N/A BAD; /* 128 ST_SETV9STACK */
2N/A GOTO(.getlgrp); /* 129 get lgrpid */
2N/A BAD; BAD; BAD4; /* 12A - 12F unused */
2N/A BAD4; BAD4; /* 130 - 137 unused */
2N/A DTRACE_PID; /* 138 dtrace pid tracing provider */
2N/A DTRACE_FASTTRAP; /* 139 dtrace fasttrap provider */
2N/A DTRACE_RETURN; /* 13A dtrace pid return probe */
2N/A BAD; BAD4; /* 13B - 13F unused */
2N/A SYSCALL(syscall_trap) /* 140 LP64 system call */
2N/A BAD; /* 141 unused */
2N/A#ifdef DEBUG_USER_TRAPTRACECTL
2N/A GOTO(.traptrace_freeze); /* 142 freeze traptrace */
2N/A GOTO(.traptrace_unfreeze); /* 143 unfreeze traptrace */
2N/A#else
2N/A BAD; BAD; /* 142 - 143 unused */
2N/A#endif
2N/A BAD4; BAD4; BAD4; /* 144 - 14F unused */
2N/A BAD4; BAD4; BAD4; BAD4; /* 150 - 15F unused */
2N/A BAD4; BAD4; BAD4; BAD4; /* 160 - 16F unused */
2N/A BAD; /* 170 - unused */
2N/A BAD; /* 171 - unused */
2N/A BAD; BAD; /* 172 - 173 unused */
2N/A BAD4; BAD4; /* 174 - 17B unused */
2N/A#ifdef PTL1_PANIC_DEBUG
2N/A mov PTL1_BAD_DEBUG, %g1; GOTO(ptl1_panic);
2N/A /* 17C test ptl1_panic */
2N/A#else
2N/A BAD; /* 17C unused */
2N/A#endif /* PTL1_PANIC_DEBUG */
2N/A PRIV(kmdb_trap); /* 17D kmdb enter (L1-A) */
2N/A PRIV(kmdb_trap); /* 17E kmdb breakpoint */
2N/A PRIV(kctx_obp_bpt); /* 17F obp breakpoint */
2N/A /* reserved */
2N/A NOT4; NOT4; NOT4; NOT4; /* 180 - 18F reserved */
2N/A NOT4; NOT4; NOT4; NOT4; /* 190 - 19F reserved */
2N/A NOT4; NOT4; NOT4; NOT4; /* 1A0 - 1AF reserved */
2N/A NOT4; NOT4; NOT4; NOT4; /* 1B0 - 1BF reserved */
2N/A NOT4; NOT4; NOT4; NOT4; /* 1C0 - 1CF reserved */
2N/A NOT4; NOT4; NOT4; NOT4; /* 1D0 - 1DF reserved */
2N/A NOT4; NOT4; NOT4; NOT4; /* 1E0 - 1EF reserved */
2N/A NOT4; NOT4; NOT4; NOT4; /* 1F0 - 1FF reserved */
2N/Atrap_table1:
2N/A NOT4; NOT4; NOT; NOT; /* 000 - 009 unused */
2N/A ASYNC_TRAP(T_INSTR_ERROR + T_TL1, trace_gen);
2N/A /* 00A instruction access error */
2N/A NOT; NOT4; /* 00B - 00F unused */
2N/A NOT4; NOT4; NOT4; NOT4; /* 010 - 01F unused */
2N/A NOT4; /* 020 - 023 unused */
2N/A CLEAN_WINDOW; /* 024 - 027 clean window */
2N/A NOT4; NOT4; /* 028 - 02F unused */
2N/A DMMU_EXCEPTION_TL1; /* 030 data access exception */
2N/A NOT; /* 031 unused */
2N/A ASYNC_TRAP(T_DATA_ERROR + T_TL1, trace_gen);
2N/A /* 032 data access error */
2N/A NOT; /* 033 unused */
2N/A MISALIGN_ADDR_TL1; /* 034 mem address not aligned */
2N/A NOT; NOT; NOT; NOT4; NOT4 /* 035 - 03F unused */
2N/A NOT4; NOT4; NOT4; NOT4; /* 040 - 04F unused */
2N/A NOT4; NOT4; NOT4; NOT4; /* 050 - 05F unused */
2N/A NOT; /* 060 unused */
2N/A GOTO(kmdb_trap_tl1); /* 061 PA watchpoint */
2N/A GOTO(kmdb_trap_tl1); /* 062 VA watchpoint */
2N/A GOTO_TT(ce_err_tl1, trace_gen); /* 063 corrected ECC error */
2N/A ITLB_MISS(tt1); /* 064 instruction access MMU miss */
2N/A DTLB_MISS(tt1); /* 068 data access MMU miss */
2N/A DTLB_PROT; /* 06C data access protection */
2N/A LABELED_BAD(tt1_fecc); /* 070 fast ecache ECC error */
2N/A LABELED_BAD(tt1_dperr); /* 071 Cheetah+ dcache parity error */
2N/A LABELED_BAD(tt1_iperr); /* 072 Cheetah+ icache parity error */
2N/A NOT; /* 073 reserved */
2N/A NOT4; NOT4; NOT4; /* 074 - 07F reserved */
2N/A NOT4; /* 080 spill 0 normal */
2N/A SPILL_32bit_tt1(ASI_AIUP,sn1); /* 084 spill 1 normal */
2N/A SPILL_64bit_tt1(ASI_AIUP,sn1); /* 088 spill 2 normal */
2N/A SPILL_32bit_tt1(ASI_AIUP,sn1); /* 08C spill 3 normal */
2N/A SPILL_64bit_tt1(ASI_AIUP,sn1); /* 090 spill 4 normal */
2N/A SPILL_32bit(not); /* 094 spill 5 normal */
2N/A SPILL_64bit(not); /* 098 spill 6 normal */
2N/A SPILL_mixed; /* 09C spill 7 normal */
2N/A NOT4; /* 0A0 spill 0 other */
2N/A SPILL_32bit_tt1(ASI_AIUS,so1); /* 0A4 spill 1 other */
2N/A SPILL_64bit_tt1(ASI_AIUS,so1); /* 0A8 spill 2 other */
2N/A SPILL_32bit_tt1(ASI_AIUS,so1); /* 0AC spill 3 other */
2N/A SPILL_64bit_tt1(ASI_AIUS,so1); /* 0B0 spill 4 other */
2N/A NOT4; /* 0B4 spill 5 other */
2N/A NOT4; /* 0B8 spill 6 other */
2N/A NOT4; /* 0BC spill 7 other */
2N/A NOT4; /* 0C0 fill 0 normal */
2N/A FILL_32bit_tt1(ASI_AIUP,fn1); /* 0C4 fill 1 normal */
2N/A FILL_64bit_tt1(ASI_AIUP,fn1); /* 0C8 fill 2 normal */
2N/A FILL_32bit_tt1(ASI_AIUP,fn1); /* 0CC fill 3 normal */
2N/A FILL_64bit_tt1(ASI_AIUP,fn1); /* 0D0 fill 4 normal */
2N/A FILL_32bit(not); /* 0D4 fill 5 normal */
2N/A FILL_64bit(not); /* 0D8 fill 6 normal */
2N/A FILL_mixed; /* 0DC fill 7 normal */
2N/A NOT4; NOT4; NOT4; NOT4; /* 0E0 - 0EF unused */
2N/A NOT4; NOT4; NOT4; NOT4; /* 0F0 - 0FF unused */
2N/A LABELED_BAD(tt1_swtrap0); /* 100 fast ecache ECC error (cont) */
2N/A LABELED_BAD(tt1_swtrap1); /* 101 Ch+ D$ parity error (cont) */
2N/A LABELED_BAD(tt1_swtrap2); /* 102 Ch+ I$ parity error (cont) */
2N/A NOT; /* 103 reserved */
2N/A/*
2N/A * We only reserve the above four special case soft traps for code running
2N/A * at TL>0, so we can truncate the trap table here.
2N/A */
2N/Aetrap_table:
2N/A .size trap_table, (.-trap_table)
2N/A .size scb, (.-scb)
2N/A
2N/A/*
2N/A * We get to exec_fault in the case of an instruction miss and tte
2N/A * has no execute bit set. We go to tl0 to handle it.
2N/A *
2N/A * g1 = tsbe pointer (in/clobbered)
2N/A * g2 = tag access register (in)
2N/A * g3 - g4 = scratch (clobbered)
2N/A * g5 = tsbe data (in)
2N/A * g6 = scratch (clobbered)
2N/A * g7 = pc we jumped here from (in)
2N/A */
2N/A ALTENTRY(exec_fault)
2N/A TRACE_TSBHIT(0x200)
2N/A SWITCH_GLOBALS
2N/A mov MMU_TAG_ACCESS, %g4
2N/A ldxa [%g4]ASI_IMMU, %g2 ! arg1 = addr
2N/A mov T_INSTR_MMU_MISS, %g3 ! arg2 = traptype
2N/A set trap, %g1
2N/A ba,pt %xcc, sys_trap
2N/A mov -1, %g4
2N/A
2N/A.mmu_exception_not_aligned:
2N/A rdpr %tstate, %g1
2N/A btst TSTATE_PRIV, %g1
2N/A bnz,pn %icc, 2f
2N/A nop
2N/A CPU_ADDR(%g1, %g4) ! load CPU struct addr
2N/A ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer
2N/A ldn [%g1 + T_PROCP], %g1 ! load proc pointer
2N/A ldn [%g1 + P_UTRAPS], %g5 ! are there utraps?
2N/A brz,pt %g5, 2f
2N/A nop
2N/A ldn [%g5 + P_UTRAP15], %g5 ! unaligned utrap?
2N/A brz,pn %g5, 2f
2N/A nop
2N/A btst 1, %sp
2N/A bz,pt %xcc, 1f ! 32 bit user program
2N/A nop
2N/A ba,pt %xcc, .setup_v9utrap ! 64 bit user program
2N/A nop
2N/A1:
2N/A ba,pt %xcc, .setup_utrap
2N/A or %g2, %g0, %g7
2N/A2:
2N/A ba,pt %xcc, .mmu_exception_end
2N/A mov T_ALIGNMENT, %g1
2N/A
2N/A.mmu_priv_exception:
2N/A rdpr %tstate, %g1
2N/A btst TSTATE_PRIV, %g1
2N/A bnz,pn %icc, 1f
2N/A nop
2N/A CPU_ADDR(%g1, %g4) ! load CPU struct addr
2N/A ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer
2N/A ldn [%g1 + T_PROCP], %g1 ! load proc pointer
2N/A ldn [%g1 + P_UTRAPS], %g5 ! are there utraps?
2N/A brz,pt %g5, 1f
2N/A nop
2N/A ldn [%g5 + P_UTRAP16], %g5
2N/A brnz,pt %g5, .setup_v9utrap
2N/A nop
2N/A1:
2N/A mov T_PRIV_INSTR, %g1
2N/A
2N/A.mmu_exception_end:
2N/A CPU_INDEX(%g4, %g5)
2N/A set cpu_core, %g5
2N/A sllx %g4, CPU_CORE_SHIFT, %g4
2N/A add %g4, %g5, %g4
2N/A lduh [%g4 + CPUC_DTRACE_FLAGS], %g5
2N/A andcc %g5, CPU_DTRACE_NOFAULT, %g0
2N/A bz %xcc, .mmu_exception_tlb_chk
2N/A or %g5, CPU_DTRACE_BADADDR, %g5
2N/A stuh %g5, [%g4 + CPUC_DTRACE_FLAGS]
2N/A done
2N/A
2N/A.mmu_exception_tlb_chk:
2N/A GET_CPU_IMPL(%g5) ! check SFSR.FT to see if this
2N/A cmp %g5, PANTHER_IMPL ! is a TLB parity error. But
2N/A bne 2f ! we only do this check while
2N/A mov 1, %g4 ! running on Panther CPUs
2N/A sllx %g4, PN_SFSR_PARITY_SHIFT, %g4 ! since US-I/II use the same
2N/A andcc %g3, %g4, %g0 ! bit for something else which
2N/A bz 2f ! will be handled later.
2N/A nop
2N/A.mmu_exception_is_tlb_parity:
2N/A .weak itlb_parity_trap
2N/A .weak dtlb_parity_trap
2N/A set itlb_parity_trap, %g4
2N/A cmp %g1, T_INSTR_EXCEPTION ! branch to the itlb or
2N/A be 3f ! dtlb parity handler
2N/A nop ! if this trap is due
2N/A set dtlb_parity_trap, %g4
2N/A cmp %g1, T_DATA_EXCEPTION ! to a IMMU exception
2N/A be 3f ! or DMMU exception.
2N/A nop
2N/A2:
2N/A sllx %g3, 32, %g3
2N/A or %g3, %g1, %g3
2N/A set trap, %g1
2N/A ba,pt %xcc, sys_trap
2N/A sub %g0, 1, %g4
2N/A3:
2N/A jmp %g4 ! off to the appropriate
2N/A nop ! TLB parity handler
2N/A
2N/A.fp_disabled:
2N/A CPU_ADDR(%g1, %g4) ! load CPU struct addr
2N/A ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer
2N/A#ifdef SF_ERRATA_30 /* call causes fp-disabled */
2N/A brz,a,pn %g1, 2f
2N/A nop
2N/A#endif
2N/A rdpr %tstate, %g4
2N/A btst TSTATE_PRIV, %g4
2N/A#ifdef SF_ERRATA_30 /* call causes fp-disabled */
2N/A bnz,pn %icc, 2f
2N/A nop
2N/A#else
2N/A bnz,a,pn %icc, ptl1_panic
2N/A mov PTL1_BAD_FPTRAP, %g1
2N/A#endif
2N/A ldn [%g1 + T_PROCP], %g1 ! load proc pointer
2N/A ldn [%g1 + P_UTRAPS], %g5 ! are there utraps?
2N/A brz,a,pt %g5, 2f
2N/A nop
2N/A ldn [%g5 + P_UTRAP7], %g5 ! fp_disabled utrap?
2N/A brz,a,pn %g5, 2f
2N/A nop
2N/A btst 1, %sp
2N/A bz,a,pt %xcc, 1f ! 32 bit user program
2N/A nop
2N/A ba,a,pt %xcc, .setup_v9utrap ! 64 bit user program
2N/A nop
2N/A1:
2N/A ba,pt %xcc, .setup_utrap
2N/A or %g0, %g0, %g7
2N/A2:
2N/A set fp_disabled, %g1
2N/A ba,pt %xcc, sys_trap
2N/A sub %g0, 1, %g4
2N/A
2N/A.fp_ieee_exception:
2N/A rdpr %tstate, %g1
2N/A btst TSTATE_PRIV, %g1
2N/A bnz,a,pn %icc, ptl1_panic
2N/A mov PTL1_BAD_FPTRAP, %g1
2N/A CPU_ADDR(%g1, %g4) ! load CPU struct addr
2N/A stx %fsr, [%g1 + CPU_TMP1]
2N/A ldx [%g1 + CPU_TMP1], %g2
2N/A ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer
2N/A ldn [%g1 + T_PROCP], %g1 ! load proc pointer
2N/A ldn [%g1 + P_UTRAPS], %g5 ! are there utraps?
2N/A brz,a,pt %g5, 1f
2N/A nop
2N/A ldn [%g5 + P_UTRAP8], %g5
2N/A brnz,a,pt %g5, .setup_v9utrap
2N/A nop
2N/A1:
2N/A set _fp_ieee_exception, %g1
2N/A ba,pt %xcc, sys_trap
2N/A sub %g0, 1, %g4
2N/A
2N/A/*
2N/A * Register Inputs:
2N/A * %g5 user trap handler
2N/A * %g7 misaligned addr - for alignment traps only
2N/A */
2N/A.setup_utrap:
2N/A set trap, %g1 ! setup in case we go
2N/A mov T_FLUSH_PCB, %g3 ! through sys_trap on
2N/A sub %g0, 1, %g4 ! the save instruction below
2N/A
2N/A /*
2N/A * If the DTrace pid provider is single stepping a copied-out
2N/A * instruction, t->t_dtrace_step will be set. In that case we need
2N/A * to abort the single-stepping (since execution of the instruction
2N/A * was interrupted) and use the value of t->t_dtrace_npc as the %npc.
2N/A */
2N/A save %sp, -SA(MINFRAME32), %sp ! window for trap handler
2N/A CPU_ADDR(%g1, %g4) ! load CPU struct addr
2N/A ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer
2N/A ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step
2N/A rdpr %tnpc, %l2 ! arg1 == tnpc
2N/A brz,pt %g2, 1f
2N/A rdpr %tpc, %l1 ! arg0 == tpc
2N/A
2N/A ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast
2N/A ldn [%g1 + T_DTRACE_NPC], %l2 ! arg1 = t->t_dtrace_npc (step)
2N/A brz,pt %g2, 1f
2N/A st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags
2N/A stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast
2N/A1:
2N/A mov %g7, %l3 ! arg2 == misaligned address
2N/A
2N/A rdpr %tstate, %g1 ! cwp for trap handler
2N/A rdpr %cwp, %g4
2N/A bclr TSTATE_CWP_MASK, %g1
2N/A wrpr %g1, %g4, %tstate
2N/A wrpr %g0, %g5, %tnpc ! trap handler address
2N/A FAST_TRAP_DONE
2N/A /* NOTREACHED */
2N/A
2N/A.check_v9utrap:
2N/A rdpr %tstate, %g1
2N/A btst TSTATE_PRIV, %g1
2N/A bnz,a,pn %icc, 3f
2N/A nop
2N/A CPU_ADDR(%g4, %g1) ! load CPU struct addr
2N/A ldn [%g4 + CPU_THREAD], %g5 ! load thread pointer
2N/A ldn [%g5 + T_PROCP], %g5 ! load proc pointer
2N/A ldn [%g5 + P_UTRAPS], %g5 ! are there utraps?
2N/A
2N/A cmp %g3, T_SOFTWARE_TRAP
2N/A bne,a,pt %icc, 1f
2N/A nop
2N/A
2N/A brz,pt %g5, 3f ! if p_utraps == NULL goto trap()
2N/A rdpr %tt, %g3 ! delay - get actual hw trap type
2N/A
2N/A sub %g3, 254, %g1 ! UT_TRAP_INSTRUCTION_16 = p_utraps[18]
2N/A ba,pt %icc, 2f
2N/A smul %g1, CPTRSIZE, %g2
2N/A1:
2N/A brz,a,pt %g5, 3f ! if p_utraps == NULL goto trap()
2N/A nop
2N/A
2N/A cmp %g3, T_UNIMP_INSTR
2N/A bne,a,pt %icc, 2f
2N/A nop
2N/A
2N/A mov 1, %g1
2N/A st %g1, [%g4 + CPU_TL1_HDLR] ! set CPU_TL1_HDLR
2N/A rdpr %tpc, %g1 ! ld trapping instruction using
2N/A lduwa [%g1]ASI_AIUP, %g1 ! "AS IF USER" ASI which could fault
2N/A st %g0, [%g4 + CPU_TL1_HDLR] ! clr CPU_TL1_HDLR
2N/A
2N/A sethi %hi(0xc1c00000), %g4 ! setup mask for illtrap instruction
2N/A andcc %g1, %g4, %g4 ! and instruction with mask
2N/A bnz,a,pt %icc, 3f ! if %g4 == zero, %g1 is an ILLTRAP
2N/A nop ! fall thru to setup
2N/A2:
2N/A ldn [%g5 + %g2], %g5
2N/A brnz,a,pt %g5, .setup_v9utrap
2N/A nop
2N/A3:
2N/A set trap, %g1
2N/A ba,pt %xcc, sys_trap
2N/A sub %g0, 1, %g4
2N/A /* NOTREACHED */
2N/A
2N/A/*
2N/A * Register Inputs:
2N/A * %g5 user trap handler
2N/A */
2N/A.setup_v9utrap:
2N/A set trap, %g1 ! setup in case we go
2N/A mov T_FLUSH_PCB, %g3 ! through sys_trap on
2N/A sub %g0, 1, %g4 ! the save instruction below
2N/A
2N/A /*
2N/A * If the DTrace pid provider is single stepping a copied-out
2N/A * instruction, t->t_dtrace_step will be set. In that case we need
2N/A * to abort the single-stepping (since execution of the instruction
2N/A * was interrupted) and use the value of t->t_dtrace_npc as the %npc.
2N/A */
2N/A save %sp, -SA(MINFRAME64), %sp ! window for trap handler
2N/A CPU_ADDR(%g1, %g4) ! load CPU struct addr
2N/A ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer
2N/A ldub [%g1 + T_DTRACE_STEP], %g2 ! load t->t_dtrace_step
2N/A rdpr %tnpc, %l7 ! arg1 == tnpc
2N/A brz,pt %g2, 1f
2N/A rdpr %tpc, %l6 ! arg0 == tpc
2N/A
2N/A ldub [%g1 + T_DTRACE_AST], %g2 ! load t->t_dtrace_ast
2N/A ldn [%g1 + T_DTRACE_NPC], %l7 ! arg1 == t->t_dtrace_npc (step)
2N/A brz,pt %g2, 1f
2N/A st %g0, [%g1 + T_DTRACE_FT] ! zero all pid provider flags
2N/A stub %g2, [%g1 + T_ASTFLAG] ! aston(t) if t->t_dtrace_ast
2N/A1:
2N/A rdpr %tstate, %g2 ! cwp for trap handler
2N/A rdpr %cwp, %g4
2N/A bclr TSTATE_CWP_MASK, %g2
2N/A wrpr %g2, %g4, %tstate
2N/A
2N/A ldn [%g1 + T_PROCP], %g4 ! load proc pointer
2N/A ldn [%g4 + P_AS], %g4 ! load as pointer
2N/A ldn [%g4 + A_USERLIMIT], %g4 ! load as userlimit
2N/A cmp %l7, %g4 ! check for single-step set
2N/A bne,pt %xcc, 4f
2N/A nop
2N/A ldn [%g1 + T_LWP], %g1 ! load klwp pointer
2N/A ld [%g1 + PCB_STEP], %g4 ! load single-step flag
2N/A cmp %g4, STEP_ACTIVE ! step flags set in pcb?
2N/A bne,pt %icc, 4f
2N/A nop
2N/A stn %g5, [%g1 + PCB_TRACEPC] ! save trap handler addr in pcb
2N/A mov %l7, %g4 ! on entry to precise user trap
2N/A add %l6, 4, %l7 ! handler, %l6 == pc, %l7 == npc
2N/A ! at time of trap
2N/A wrpr %g0, %g4, %tnpc ! generate FLTBOUNDS,
2N/A ! %g4 == userlimit
2N/A FAST_TRAP_DONE
2N/A /* NOTREACHED */
2N/A4:
2N/A wrpr %g0, %g5, %tnpc ! trap handler address
2N/A FAST_TRAP_DONE_CHK_INTR
2N/A /* NOTREACHED */
2N/A
2N/A.fp_exception:
2N/A CPU_ADDR(%g1, %g4)
2N/A stx %fsr, [%g1 + CPU_TMP1]
2N/A ldx [%g1 + CPU_TMP1], %g2
2N/A
2N/A /*
2N/A * Cheetah takes unfinished_FPop trap for certain range of operands
2N/A * to the "fitos" instruction. Instead of going through the slow
2N/A * software emulation path, we try to simulate the "fitos" instruction
2N/A * via "fitod" and "fdtos" provided the following conditions are met:
2N/A *
2N/A * fpu_exists is set (if DEBUG)
2N/A * not in privileged mode
2N/A * ftt is unfinished_FPop
2N/A * NXM IEEE trap is not enabled
2N/A * instruction at %tpc is "fitos"
2N/A *
2N/A * Usage:
2N/A * %g1 per cpu address
2N/A * %g2 %fsr
2N/A * %g6 user instruction
2N/A *
2N/A * Note that we can take a memory access related trap while trying
2N/A * to fetch the user instruction. Therefore, we set CPU_TL1_HDLR
2N/A * flag to catch those traps and let the SFMMU code deal with page
2N/A * fault and data access exception.
2N/A */
2N/A#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
2N/A sethi %hi(fpu_exists), %g7
2N/A ld [%g7 + %lo(fpu_exists)], %g7
2N/A brz,pn %g7, .fp_exception_cont
2N/A nop
2N/A#endif
2N/A rdpr %tstate, %g7 ! branch if in privileged mode
2N/A btst TSTATE_PRIV, %g7
2N/A bnz,pn %xcc, .fp_exception_cont
2N/A srl %g2, FSR_FTT_SHIFT, %g7 ! extract ftt from %fsr
2N/A and %g7, (FSR_FTT>>FSR_FTT_SHIFT), %g7
2N/A cmp %g7, FTT_UNFIN
2N/A set FSR_TEM_NX, %g5
2N/A bne,pn %xcc, .fp_exception_cont ! branch if NOT unfinished_FPop
2N/A andcc %g2, %g5, %g0
2N/A bne,pn %xcc, .fp_exception_cont ! branch if FSR_TEM_NX enabled
2N/A rdpr %tpc, %g5 ! get faulting PC
2N/A
2N/A or %g0, 1, %g7
2N/A st %g7, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag
2N/A lda [%g5]ASI_USER, %g6 ! get user's instruction
2N/A st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2N/A
2N/A set FITOS_INSTR_MASK, %g7
2N/A and %g6, %g7, %g7
2N/A set FITOS_INSTR, %g5
2N/A cmp %g7, %g5
2N/A bne,pn %xcc, .fp_exception_cont ! branch if not FITOS_INSTR
2N/A nop
2N/A
2N/A /*
2N/A * This is unfinished FPops trap for "fitos" instruction. We
2N/A * need to simulate "fitos" via "fitod" and "fdtos" instruction
2N/A * sequence.
2N/A *
2N/A * We need a temporary FP register to do the conversion. Since
2N/A * both source and destination operands for the "fitos" instruction
2N/A * have to be within %f0-%f31, we use an FP register from the upper
2N/A * half to guarantee that it won't collide with the source or the
2N/A * dest operand. However, we do have to save and restore its value.
2N/A *
2N/A * We use %d62 as a temporary FP register for the conversion and
2N/A * branch to appropriate instruction within the conversion tables
2N/A * based upon the rs2 and rd values.
2N/A */
2N/A
2N/A std %d62, [%g1 + CPU_TMP1] ! save original value
2N/A
2N/A srl %g6, FITOS_RS2_SHIFT, %g7
2N/A and %g7, FITOS_REG_MASK, %g7
2N/A set _fitos_fitod_table, %g4
2N/A sllx %g7, 2, %g7
2N/A jmp %g4 + %g7
2N/A ba,pt %xcc, _fitos_fitod_done
2N/A .empty
2N/A
2N/A_fitos_fitod_table:
2N/A fitod %f0, %d62
2N/A fitod %f1, %d62
2N/A fitod %f2, %d62
2N/A fitod %f3, %d62
2N/A fitod %f4, %d62
2N/A fitod %f5, %d62
2N/A fitod %f6, %d62
2N/A fitod %f7, %d62
2N/A fitod %f8, %d62
2N/A fitod %f9, %d62
2N/A fitod %f10, %d62
2N/A fitod %f11, %d62
2N/A fitod %f12, %d62
2N/A fitod %f13, %d62
2N/A fitod %f14, %d62
2N/A fitod %f15, %d62
2N/A fitod %f16, %d62
2N/A fitod %f17, %d62
2N/A fitod %f18, %d62
2N/A fitod %f19, %d62
2N/A fitod %f20, %d62
2N/A fitod %f21, %d62
2N/A fitod %f22, %d62
2N/A fitod %f23, %d62
2N/A fitod %f24, %d62
2N/A fitod %f25, %d62
2N/A fitod %f26, %d62
2N/A fitod %f27, %d62
2N/A fitod %f28, %d62
2N/A fitod %f29, %d62
2N/A fitod %f30, %d62
2N/A fitod %f31, %d62
2N/A_fitos_fitod_done:
2N/A
2N/A /*
2N/A * Now convert data back into single precision
2N/A */
2N/A srl %g6, FITOS_RD_SHIFT, %g7
2N/A and %g7, FITOS_REG_MASK, %g7
2N/A set _fitos_fdtos_table, %g4
2N/A sllx %g7, 2, %g7
2N/A jmp %g4 + %g7
2N/A ba,pt %xcc, _fitos_fdtos_done
2N/A .empty
2N/A
2N/A_fitos_fdtos_table:
2N/A fdtos %d62, %f0
2N/A fdtos %d62, %f1
2N/A fdtos %d62, %f2
2N/A fdtos %d62, %f3
2N/A fdtos %d62, %f4
2N/A fdtos %d62, %f5
2N/A fdtos %d62, %f6
2N/A fdtos %d62, %f7
2N/A fdtos %d62, %f8
2N/A fdtos %d62, %f9
2N/A fdtos %d62, %f10
2N/A fdtos %d62, %f11
2N/A fdtos %d62, %f12
2N/A fdtos %d62, %f13
2N/A fdtos %d62, %f14
2N/A fdtos %d62, %f15
2N/A fdtos %d62, %f16
2N/A fdtos %d62, %f17
2N/A fdtos %d62, %f18
2N/A fdtos %d62, %f19
2N/A fdtos %d62, %f20
2N/A fdtos %d62, %f21
2N/A fdtos %d62, %f22
2N/A fdtos %d62, %f23
2N/A fdtos %d62, %f24
2N/A fdtos %d62, %f25
2N/A fdtos %d62, %f26
2N/A fdtos %d62, %f27
2N/A fdtos %d62, %f28
2N/A fdtos %d62, %f29
2N/A fdtos %d62, %f30
2N/A fdtos %d62, %f31
2N/A_fitos_fdtos_done:
2N/A
2N/A ldd [%g1 + CPU_TMP1], %d62 ! restore %d62
2N/A
2N/A#if DEBUG
2N/A /*
2N/A * Update FPop_unfinished trap kstat
2N/A */
2N/A set fpustat+FPUSTAT_UNFIN_KSTAT, %g7
2N/A ldx [%g7], %g5
2N/A1:
2N/A add %g5, 1, %g6
2N/A
2N/A casxa [%g7] ASI_N, %g5, %g6
2N/A cmp %g5, %g6
2N/A bne,a,pn %xcc, 1b
2N/A or %g0, %g6, %g5
2N/A
2N/A /*
2N/A * Update fpu_sim_fitos kstat
2N/A */
2N/A set fpuinfo+FPUINFO_FITOS_KSTAT, %g7
2N/A ldx [%g7], %g5
2N/A1:
2N/A add %g5, 1, %g6
2N/A
2N/A casxa [%g7] ASI_N, %g5, %g6
2N/A cmp %g5, %g6
2N/A bne,a,pn %xcc, 1b
2N/A or %g0, %g6, %g5
2N/A#endif /* DEBUG */
2N/A
2N/A FAST_TRAP_DONE
2N/A
2N/A.fp_exception_cont:
2N/A /*
2N/A * Let _fp_exception deal with simulating FPop instruction.
2N/A * Note that we need to pass %fsr in %g2 (already read above).
2N/A */
2N/A
2N/A set _fp_exception, %g1
2N/A ba,pt %xcc, sys_trap
2N/A sub %g0, 1, %g4
2N/A
2N/A.clean_windows:
2N/A set trap, %g1
2N/A mov T_FLUSH_PCB, %g3
2N/A sub %g0, 1, %g4
2N/A save
2N/A flushw
2N/A restore
2N/A wrpr %g0, %g0, %cleanwin ! no clean windows
2N/A
2N/A CPU_ADDR(%g4, %g5)
2N/A ldn [%g4 + CPU_MPCB], %g4
2N/A brz,a,pn %g4, 1f
2N/A nop
2N/A ld [%g4 + MPCB_WSTATE], %g5
2N/A add %g5, WSTATE_CLEAN_OFFSET, %g5
2N/A wrpr %g0, %g5, %wstate
2N/A1: FAST_TRAP_DONE
2N/A
2N/A/*
2N/A * .spill_clean: clean the previous window, restore the wstate, and
2N/A * "done".
2N/A *
2N/A * Entry: %g7 contains new wstate
2N/A */
2N/A.spill_clean:
2N/A sethi %hi(nwin_minus_one), %g5
2N/A ld [%g5 + %lo(nwin_minus_one)], %g5 ! %g5 = nwin - 1
2N/A rdpr %cwp, %g6 ! %g6 = %cwp
2N/A deccc %g6 ! %g6--
2N/A movneg %xcc, %g5, %g6 ! if (%g6<0) %g6 = nwin-1
2N/A wrpr %g6, %cwp
2N/A TT_TRACE_L(trace_win)
2N/A clr %l0
2N/A clr %l1
2N/A clr %l2
2N/A clr %l3
2N/A clr %l4
2N/A clr %l5
2N/A clr %l6
2N/A clr %l7
2N/A wrpr %g0, %g7, %wstate
2N/A saved
2N/A retry ! restores correct %cwp
2N/A
2N/A.fix_alignment:
2N/A CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2
2N/A ldn [%g1 + CPU_THREAD], %g1 ! load thread pointer
2N/A ldn [%g1 + T_PROCP], %g1
2N/A mov 1, %g2
2N/A stb %g2, [%g1 + P_FIXALIGNMENT]
2N/A FAST_TRAP_DONE
2N/A
2N/A#define STDF_REG(REG, ADDR, TMP) \
2N/A sll REG, 3, REG ;\
2N/Amark1: set start1, TMP ;\
2N/A jmp REG + TMP ;\
2N/A nop ;\
2N/Astart1: ba,pt %xcc, done1 ;\
2N/A std %f0, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f32, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f2, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f34, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f4, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f36, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f6, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f38, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f8, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f40, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f10, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f42, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f12, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f44, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f14, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f46, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f16, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f48, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f18, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f50, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f20, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f52, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f22, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f54, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f24, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f56, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f26, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f58, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f28, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f60, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f30, [ADDR + CPU_TMP1] ;\
2N/A ba,pt %xcc, done1 ;\
2N/A std %f62, [ADDR + CPU_TMP1] ;\
2N/Adone1:
2N/A
2N/A#define LDDF_REG(REG, ADDR, TMP) \
2N/A sll REG, 3, REG ;\
2N/Amark2: set start2, TMP ;\
2N/A jmp REG + TMP ;\
2N/A nop ;\
2N/Astart2: ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f0 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f32 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f2 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f34 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f4 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f36 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f6 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f38 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f8 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f40 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f10 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f42 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f12 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f44 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f14 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f46 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f16 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f48 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f18 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f50 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f20 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f52 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f22 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f54 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f24 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f56 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f26 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f58 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f28 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f60 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f30 ;\
2N/A ba,pt %xcc, done2 ;\
2N/A ldd [ADDR + CPU_TMP1], %f62 ;\
2N/Adone2:
2N/A
2N/A.lddf_exception_not_aligned:
2N/A /*
2N/A * Cheetah overwrites SFAR on a DTLB miss, hence read it now.
2N/A */
2N/A ldxa [MMU_SFAR]%asi, %g5 ! misaligned vaddr in %g5
2N/A
2N/A#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
2N/A sethi %hi(fpu_exists), %g2 ! check fpu_exists
2N/A ld [%g2 + %lo(fpu_exists)], %g2
2N/A brz,a,pn %g2, 4f
2N/A nop
2N/A#endif
2N/A CPU_ADDR(%g1, %g4)
2N/A or %g0, 1, %g4
2N/A st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag
2N/A
2N/A rdpr %tpc, %g2
2N/A lda [%g2]ASI_AIUP, %g6 ! get the user's lddf instruction
2N/A srl %g6, 23, %g1 ! using ldda or not?
2N/A and %g1, 1, %g1
2N/A brz,a,pt %g1, 2f ! check for ldda instruction
2N/A nop
2N/A srl %g6, 13, %g1 ! check immflag
2N/A and %g1, 1, %g1
2N/A rdpr %tstate, %g2 ! %tstate in %g2
2N/A brnz,a,pn %g1, 1f
2N/A srl %g2, 31, %g1 ! get asi from %tstate
2N/A srl %g6, 5, %g1 ! get asi from instruction
2N/A and %g1, 0xFF, %g1 ! imm_asi field
2N/A1:
2N/A cmp %g1, ASI_P ! primary address space
2N/A be,a,pt %icc, 2f
2N/A nop
2N/A cmp %g1, ASI_PNF ! primary no fault address space
2N/A be,a,pt %icc, 2f
2N/A nop
2N/A cmp %g1, ASI_S ! secondary address space
2N/A be,a,pt %icc, 2f
2N/A nop
2N/A cmp %g1, ASI_SNF ! secondary no fault address space
2N/A bne,a,pn %icc, 3f
2N/A nop
2N/A2:
2N/A lduwa [%g5]ASI_USER, %g7 ! get first half of misaligned data
2N/A add %g5, 4, %g5 ! increment misaligned data address
2N/A lduwa [%g5]ASI_USER, %g5 ! get second half of misaligned data
2N/A
2N/A sllx %g7, 32, %g7
2N/A or %g5, %g7, %g5 ! combine data
2N/A CPU_ADDR(%g7, %g1) ! save data on a per-cpu basis
2N/A stx %g5, [%g7 + CPU_TMP1] ! save in cpu_tmp1
2N/A
2N/A srl %g6, 25, %g3 ! %g6 has the instruction
2N/A and %g3, 0x1F, %g3 ! %g3 has rd
2N/A LDDF_REG(%g3, %g7, %g4)
2N/A
2N/A CPU_ADDR(%g1, %g4)
2N/A st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2N/A FAST_TRAP_DONE
2N/A3:
2N/A CPU_ADDR(%g1, %g4)
2N/A st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2N/A4:
2N/A set T_USER, %g3 ! trap type in %g3
2N/A or %g3, T_LDDF_ALIGN, %g3
2N/A mov %g5, %g2 ! misaligned vaddr in %g2
2N/A set fpu_trap, %g1 ! goto C for the little and
2N/A ba,pt %xcc, sys_trap ! no fault little asi's
2N/A sub %g0, 1, %g4
2N/A
2N/A.stdf_exception_not_aligned:
2N/A /*
2N/A * Cheetah overwrites SFAR on a DTLB miss, hence read it now.
2N/A */
2N/A ldxa [MMU_SFAR]%asi, %g5 ! misaligned vaddr in %g5
2N/A
2N/A#if defined(DEBUG) || defined(NEED_FPU_EXISTS)
2N/A sethi %hi(fpu_exists), %g7 ! check fpu_exists
2N/A ld [%g7 + %lo(fpu_exists)], %g3
2N/A brz,a,pn %g3, 4f
2N/A nop
2N/A#endif
2N/A CPU_ADDR(%g1, %g4)
2N/A or %g0, 1, %g4
2N/A st %g4, [%g1 + CPU_TL1_HDLR] ! set tl1_hdlr flag
2N/A
2N/A rdpr %tpc, %g2
2N/A lda [%g2]ASI_AIUP, %g6 ! get the user's stdf instruction
2N/A
2N/A srl %g6, 23, %g1 ! using stda or not?
2N/A and %g1, 1, %g1
2N/A brz,a,pt %g1, 2f ! check for stda instruction
2N/A nop
2N/A srl %g6, 13, %g1 ! check immflag
2N/A and %g1, 1, %g1
2N/A rdpr %tstate, %g2 ! %tstate in %g2
2N/A brnz,a,pn %g1, 1f
2N/A srl %g2, 31, %g1 ! get asi from %tstate
2N/A srl %g6, 5, %g1 ! get asi from instruction
2N/A and %g1, 0xFF, %g1 ! imm_asi field
2N/A1:
2N/A cmp %g1, ASI_P ! primary address space
2N/A be,a,pt %icc, 2f
2N/A nop
2N/A cmp %g1, ASI_S ! secondary address space
2N/A bne,a,pn %icc, 3f
2N/A nop
2N/A2:
2N/A srl %g6, 25, %g6
2N/A and %g6, 0x1F, %g6 ! %g6 has rd
2N/A CPU_ADDR(%g7, %g1)
2N/A STDF_REG(%g6, %g7, %g4) ! STDF_REG(REG, ADDR, TMP)
2N/A
2N/A ldx [%g7 + CPU_TMP1], %g6
2N/A srlx %g6, 32, %g7
2N/A stuwa %g7, [%g5]ASI_USER ! first half
2N/A add %g5, 4, %g5 ! increment misaligned data address
2N/A stuwa %g6, [%g5]ASI_USER ! second half
2N/A
2N/A CPU_ADDR(%g1, %g4)
2N/A st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2N/A FAST_TRAP_DONE
2N/A3:
2N/A CPU_ADDR(%g1, %g4)
2N/A st %g0, [%g1 + CPU_TL1_HDLR] ! clear tl1_hdlr flag
2N/A4:
2N/A set T_USER, %g3 ! trap type in %g3
2N/A or %g3, T_STDF_ALIGN, %g3
2N/A mov %g5, %g2 ! misaligned vaddr in %g2
2N/A set fpu_trap, %g1 ! goto C for the little and
2N/A ba,pt %xcc, sys_trap ! nofault little asi's
2N/A sub %g0, 1, %g4
2N/A
2N/A#ifdef DEBUG_USER_TRAPTRACECTL
2N/A
2N/A.traptrace_freeze:
2N/A mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4
2N/A TT_TRACE_L(trace_win)
2N/A mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0
2N/A set trap_freeze, %g1
2N/A mov 1, %g2
2N/A st %g2, [%g1]
2N/A FAST_TRAP_DONE
2N/A
2N/A.traptrace_unfreeze:
2N/A set trap_freeze, %g1
2N/A st %g0, [%g1]
2N/A mov %l0, %g1 ; mov %l1, %g2 ; mov %l2, %g3 ; mov %l4, %g4
2N/A TT_TRACE_L(trace_win)
2N/A mov %g4, %l4 ; mov %g3, %l2 ; mov %g2, %l1 ; mov %g1, %l0
2N/A FAST_TRAP_DONE
2N/A
2N/A#endif /* DEBUG_USER_TRAPTRACECTL */
2N/A
2N/A.getcc:
2N/A CPU_ADDR(%g1, %g2)
2N/A stx %o0, [%g1 + CPU_TMP1] ! save %o0
2N/A stx %o1, [%g1 + CPU_TMP2] ! save %o1
2N/A rdpr %tstate, %g3 ! get tstate
2N/A srlx %g3, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr
2N/A set PSR_ICC, %g2
2N/A and %o0, %g2, %o0 ! mask out the rest
2N/A srl %o0, PSR_ICC_SHIFT, %o0 ! right justify
2N/A rdpr %pstate, %o1
2N/A wrpr %o1, PSTATE_AG, %pstate ! get into normal globals
2N/A mov %o0, %g1 ! move ccr to normal %g1
2N/A wrpr %g0, %o1, %pstate ! back into alternate globals
2N/A ldx [%g1 + CPU_TMP1], %o0 ! restore %o0
2N/A ldx [%g1 + CPU_TMP2], %o1 ! restore %o1
2N/A FAST_TRAP_DONE
2N/A
2N/A.setcc:
2N/A CPU_ADDR(%g1, %g2)
2N/A stx %o0, [%g1 + CPU_TMP1] ! save %o0
2N/A stx %o1, [%g1 + CPU_TMP2] ! save %o1
2N/A rdpr %pstate, %o0
2N/A wrpr %o0, PSTATE_AG, %pstate ! get into normal globals
2N/A mov %g1, %o1
2N/A wrpr %g0, %o0, %pstate ! back to alternates
2N/A sll %o1, PSR_ICC_SHIFT, %g2
2N/A set PSR_ICC, %g3
2N/A and %g2, %g3, %g2 ! mask out rest
2N/A sllx %g2, PSR_TSTATE_CC_SHIFT, %g2
2N/A rdpr %tstate, %g3 ! get tstate
2N/A srl %g3, 0, %g3 ! clear upper word
2N/A or %g3, %g2, %g3 ! or in new bits
2N/A wrpr %g3, %tstate
2N/A ldx [%g1 + CPU_TMP1], %o0 ! restore %o0
2N/A ldx [%g1 + CPU_TMP2], %o1 ! restore %o1
2N/A FAST_TRAP_DONE
2N/A
2N/A/*
2N/A * getpsr(void)
2N/A * Note that the xcc part of the ccr is not provided.
2N/A * The V8 code shows why the V9 trap is not faster:
2N/A * #define GETPSR_TRAP() \
2N/A * mov %psr, %i0; jmp %l2; rett %l2+4; nop;
2N/A */
2N/A
2N/A .type .getpsr, #function
2N/A.getpsr:
2N/A rdpr %tstate, %g1 ! get tstate
2N/A srlx %g1, PSR_TSTATE_CC_SHIFT, %o0 ! shift ccr to V8 psr
2N/A set PSR_ICC, %g2
2N/A and %o0, %g2, %o0 ! mask out the rest
2N/A
2N/A rd %fprs, %g1 ! get fprs
2N/A and %g1, FPRS_FEF, %g2 ! mask out dirty upper/lower
2N/A sllx %g2, PSR_FPRS_FEF_SHIFT, %g2 ! shift fef to V8 psr.ef
2N/A or %o0, %g2, %o0 ! or result into psr.ef
2N/A
2N/A set V9_PSR_IMPLVER, %g2 ! SI assigned impl/ver: 0xef
2N/A or %o0, %g2, %o0 ! or psr.impl/ver
2N/A FAST_TRAP_DONE
2N/A SET_SIZE(.getpsr)
2N/A
2N/A/*
2N/A * setpsr(newpsr)
2N/A * Note that there is no support for ccr.xcc in the V9 code.
2N/A */
2N/A
2N/A .type .setpsr, #function
2N/A.setpsr:
2N/A rdpr %tstate, %g1 ! get tstate
2N/A! setx TSTATE_V8_UBITS, %g2
2N/A or %g0, CCR_ICC, %g3
2N/A sllx %g3, TSTATE_CCR_SHIFT, %g2
2N/A
2N/A andn %g1, %g2, %g1 ! zero current user bits
2N/A set PSR_ICC, %g2
2N/A and %g2, %o0, %g2 ! clear all but psr.icc bits
2N/A sllx %g2, PSR_TSTATE_CC_SHIFT, %g3 ! shift to tstate.ccr.icc
2N/A wrpr %g1, %g3, %tstate ! write tstate
2N/A
2N/A set PSR_EF, %g2
2N/A and %g2, %o0, %g2 ! clear all but fp enable bit
2N/A srlx %g2, PSR_FPRS_FEF_SHIFT, %g4 ! shift ef to V9 fprs.fef
2N/A wr %g0, %g4, %fprs ! write fprs
2N/A
2N/A CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1
2N/A ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer
2N/A ldn [%g2 + T_LWP], %g3 ! load klwp pointer
2N/A ldn [%g3 + LWP_FPU], %g2 ! get lwp_fpu pointer
2N/A stuw %g4, [%g2 + FPU_FPRS] ! write fef value to fpu_fprs
2N/A srlx %g4, 2, %g4 ! shift fef value to bit 0
2N/A stub %g4, [%g2 + FPU_EN] ! write fef value to fpu_en
2N/A FAST_TRAP_DONE
2N/A SET_SIZE(.setpsr)
2N/A
2N/A/*
2N/A * getlgrp
2N/A * get home lgrpid on which the calling thread is currently executing.
2N/A */
2N/A .type .getlgrp, #function
2N/A.getlgrp:
2N/A CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2
2N/A ld [%g1 + CPU_ID], %o0 ! load cpu_id
2N/A ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer
2N/A ldn [%g2 + T_LPL], %g2 ! load lpl pointer
2N/A ld [%g2 + LPL_LGRPID], %g1 ! load lpl_lgrpid
2N/A sra %g1, 0, %o1
2N/A FAST_TRAP_DONE
2N/A SET_SIZE(.getlgrp)
2N/A
2N/A/*
2N/A * Entry for old 4.x trap (trap 0).
2N/A */
2N/A ENTRY_NP(syscall_trap_4x)
2N/A CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2
2N/A ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer
2N/A ldn [%g2 + T_LWP], %g2 ! load klwp pointer
2N/A ld [%g2 + PCB_TRAP0], %g2 ! lwp->lwp_pcb.pcb_trap0addr
2N/A brz,pn %g2, 1f ! has it been set?
2N/A st %l0, [%g1 + CPU_TMP1] ! delay - save some locals
2N/A st %l1, [%g1 + CPU_TMP2]
2N/A rdpr %tnpc, %l1 ! save old tnpc
2N/A wrpr %g0, %g2, %tnpc ! setup tnpc
2N/A
2N/A rdpr %pstate, %l0
2N/A wrpr %l0, PSTATE_AG, %pstate ! switch to normal globals
2N/A mov %l1, %g6 ! pass tnpc to user code in %g6
2N/A wrpr %l0, %g0, %pstate ! switch back to alternate globals
2N/A
2N/A ! Note that %g1 still contains CPU struct addr
2N/A ld [%g1 + CPU_TMP2], %l1 ! restore locals
2N/A ld [%g1 + CPU_TMP1], %l0
2N/A FAST_TRAP_DONE_CHK_INTR
2N/A1:
2N/A mov %g1, %l0
2N/A st %l1, [%g1 + CPU_TMP2]
2N/A rdpr %pstate, %l1
2N/A wrpr %l1, PSTATE_AG, %pstate
2N/A !
2N/A ! check for old syscall mmap which is the only different one which
2N/A ! must be the same. Others are handled in the compatibility library.
2N/A !
2N/A cmp %g1, OSYS_mmap ! compare to old 4.x mmap
2N/A movz %icc, SYS_mmap, %g1
2N/A wrpr %g0, %l1, %pstate
2N/A ld [%l0 + CPU_TMP2], %l1 ! restore locals
2N/A ld [%l0 + CPU_TMP1], %l0
2N/A SYSCALL(syscall_trap32)
2N/A SET_SIZE(syscall_trap_4x)
2N/A
2N/A/*
2N/A * Handler for software trap 9.
2N/A * Set trap0 emulation address for old 4.x system call trap.
2N/A * XXX - this should be a system call.
2N/A */
2N/A ENTRY_NP(set_trap0_addr)
2N/A CPU_ADDR(%g1, %g2) ! load CPU struct addr to %g1 using %g2
2N/A ldn [%g1 + CPU_THREAD], %g2 ! load thread pointer
2N/A ldn [%g2 + T_LWP], %g2 ! load klwp pointer
2N/A st %l0, [%g1 + CPU_TMP1] ! save some locals
2N/A st %l1, [%g1 + CPU_TMP2]
2N/A rdpr %pstate, %l0
2N/A wrpr %l0, PSTATE_AG, %pstate
2N/A mov %g1, %l1
2N/A wrpr %g0, %l0, %pstate
2N/A andn %l1, 3, %l1 ! force alignment
2N/A st %l1, [%g2 + PCB_TRAP0] ! lwp->lwp_pcb.pcb_trap0addr
2N/A ld [%g1 + CPU_TMP1], %l0 ! restore locals
2N/A ld [%g1 + CPU_TMP2], %l1
2N/A FAST_TRAP_DONE
2N/A SET_SIZE(set_trap0_addr)
2N/A
2N/A/*
2N/A * mmu_trap_tl1
2N/A * trap handler for unexpected mmu traps.
2N/A * simply checks if the trap was a user lddf/stdf alignment trap, in which
2N/A * case we go to fpu_trap or a user trap from the window handler, in which
2N/A * case we go save the state on the pcb. Otherwise, we go to ptl1_panic.
2N/A */
2N/A .type mmu_trap_tl1, #function
2N/Ammu_trap_tl1:
2N/A#ifdef TRAPTRACE
2N/A TRACE_PTR(%g5, %g6)
2N/A GET_TRACE_TICK(%g6)
2N/A stxa %g6, [%g5 + TRAP_ENT_TICK]%asi
2N/A rdpr %tl, %g6
2N/A stha %g6, [%g5 + TRAP_ENT_TL]%asi
2N/A rdpr %tt, %g6
2N/A stha %g6, [%g5 + TRAP_ENT_TT]%asi
2N/A rdpr %tstate, %g6
2N/A stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi
2N/A stna %sp, [%g5 + TRAP_ENT_SP]%asi
2N/A stna %g0, [%g5 + TRAP_ENT_TR]%asi
2N/A rdpr %tpc, %g6
2N/A stna %g6, [%g5 + TRAP_ENT_TPC]%asi
2N/A set MMU_SFAR, %g6
2N/A ldxa [%g6]ASI_DMMU, %g6
2N/A stxa %g6, [%g5 + TRAP_ENT_F1]%asi
2N/A CPU_PADDR(%g7, %g6);
2N/A add %g7, CPU_TL1_HDLR, %g7
2N/A lda [%g7]ASI_MEM, %g6
2N/A stxa %g6, [%g5 + TRAP_ENT_F2]%asi
2N/A set 0xdeadbeef, %g6
2N/A stna %g6, [%g5 + TRAP_ENT_F3]%asi
2N/A stna %g6, [%g5 + TRAP_ENT_F4]%asi
2N/A TRACE_NEXT(%g5, %g6, %g7)
2N/A#endif /* TRAPTRACE */
2N/A
2N/A GET_CPU_IMPL(%g5)
2N/A cmp %g5, PANTHER_IMPL
2N/A bne mmu_trap_tl1_4
2N/A nop
2N/A rdpr %tt, %g5
2N/A cmp %g5, T_DATA_EXCEPTION
2N/A bne mmu_trap_tl1_4
2N/A nop
2N/A wr %g0, ASI_DMMU, %asi
2N/A ldxa [MMU_SFSR]%asi, %g5
2N/A mov 1, %g6
2N/A sllx %g6, PN_SFSR_PARITY_SHIFT, %g6
2N/A andcc %g5, %g6, %g0
2N/A bz mmu_trap_tl1_4
2N/A
2N/A /*
2N/A * We are running on a Panther and have hit a DTLB parity error.
2N/A */
2N/A ldxa [MMU_TAG_ACCESS]%asi, %g2
2N/A mov %g5, %g3
2N/A ba,pt %xcc, .mmu_exception_is_tlb_parity
2N/A mov T_DATA_EXCEPTION, %g1
2N/A
2N/Ammu_trap_tl1_4:
2N/A CPU_PADDR(%g7, %g6);
2N/A add %g7, CPU_TL1_HDLR, %g7 ! %g7 = &cpu_m.tl1_hdlr (PA)
2N/A /*
2N/A * AM is cleared on trap, so addresses are 64 bit
2N/A */
2N/A lda [%g7]ASI_MEM, %g6
2N/A brz,a,pt %g6, 1f
2N/A nop
2N/A /*
2N/A * We are going to update cpu_m.tl1_hdlr using physical address.
2N/A * Flush the D$ line, so that stale data won't be accessed later.
2N/A */
2N/A CPU_ADDR(%g6, %g5)
2N/A add %g6, CPU_TL1_HDLR, %g6 ! %g6 = &cpu_m.tl1_hdlr (VA)
2N/A GET_CPU_IMPL(%g5)
2N/A cmp %g5, CHEETAH_IMPL
2N/A bl,pn %icc, 3f
2N/A sethi %hi(dcache_line_mask), %g5
2N/A stxa %g0, [%g7]ASI_DC_INVAL
2N/A membar #Sync
2N/A ba,pt %xcc, 2f
2N/A nop
2N/A3:
2N/A ld [%g5 + %lo(dcache_line_mask)], %g5
2N/A and %g6, %g5, %g5
2N/A stxa %g0, [%g5]ASI_DC_TAG
2N/A membar #Sync
2N/A2:
2N/A sta %g0, [%g7]ASI_MEM
2N/A SWITCH_GLOBALS ! back to mmu globals
2N/A ba,a,pt %xcc, sfmmu_mmu_trap ! handle page faults
2N/A1:
2N/A rdpr %tt, %g5
2N/A rdpr %tl, %g7
2N/A sub %g7, 1, %g6
2N/A wrpr %g6, %tl
2N/A rdpr %tt, %g6
2N/A wrpr %g7, %tl
2N/A and %g6, WTRAP_TTMASK, %g6
2N/A cmp %g6, WTRAP_TYPE
2N/A bne,a,pn %xcc, ptl1_panic
2N/A mov PTL1_BAD_MMUTRAP, %g1
2N/A rdpr %tpc, %g7
2N/A /* tpc should be in the trap table */
2N/A set trap_table, %g6
2N/A cmp %g7, %g6
2N/A blt,a,pn %xcc, ptl1_panic
2N/A mov PTL1_BAD_MMUTRAP, %g1
2N/A set etrap_table, %g6
2N/A cmp %g7, %g6
2N/A bge,a,pn %xcc, ptl1_panic
2N/A mov PTL1_BAD_MMUTRAP, %g1
2N/A cmp %g5, T_ALIGNMENT
2N/A move %icc, MMU_SFAR, %g6
2N/A movne %icc, MMU_TAG_ACCESS, %g6
2N/A ldxa [%g6]ASI_DMMU, %g6
2N/A andn %g7, WTRAP_ALIGN, %g7 /* 128 byte aligned */
2N/A add %g7, WTRAP_FAULTOFF, %g7
2N/A wrpr %g0, %g7, %tnpc
2N/A done
2N/A SET_SIZE(mmu_trap_tl1)
2N/A
2N/A/*
2N/A * Several traps use kmdb_trap and kmdb_trap_tl1 as their handlers. These
2N/A * traps are valid only when kmdb is loaded. When the debugger is active,
2N/A * the code below is rewritten to transfer control to the appropriate
2N/A * debugger entry points.
2N/A */
2N/A .global kmdb_trap
2N/A .align 8
2N/Akmdb_trap:
2N/A ba,a trap_table0
2N/A jmp %g1 + 0
2N/A nop
2N/A
2N/A .global kmdb_trap_tl1
2N/A .align 8
2N/Akmdb_trap_tl1:
2N/A ba,a trap_table0
2N/A jmp %g1 + 0
2N/A nop
2N/A
2N/A/*
2N/A * This entry is copied from OBP's trap table during boot.
2N/A */
2N/A .global obp_bpt
2N/A .align 8
2N/Aobp_bpt:
2N/A NOT
2N/A
2N/A/*
2N/A * if kernel, set PCONTEXT to 0 for debuggers
2N/A * if user, clear nucleus page sizes
2N/A */
2N/A .global kctx_obp_bpt
2N/Akctx_obp_bpt:
2N/A set obp_bpt, %g2
2N/A1:
2N/A mov MMU_PCONTEXT, %g1
2N/A ldxa [%g1]ASI_DMMU, %g1
2N/A srlx %g1, CTXREG_NEXT_SHIFT, %g3
2N/A brz,pt %g3, 3f ! nucleus pgsz is 0, no problem
2N/A sllx %g3, CTXREG_NEXT_SHIFT, %g3
2N/A set CTXREG_CTX_MASK, %g4 ! check Pcontext
2N/A btst %g4, %g1
2N/A bz,a,pt %xcc, 2f
2N/A clr %g3 ! kernel: PCONTEXT=0
2N/A xor %g3, %g1, %g3 ! user: clr N_pgsz0/1 bits
2N/A2:
2N/A set DEMAP_ALL_TYPE, %g1
2N/A stxa %g0, [%g1]ASI_DTLB_DEMAP
2N/A stxa %g0, [%g1]ASI_ITLB_DEMAP
2N/A mov MMU_PCONTEXT, %g1
2N/A stxa %g3, [%g1]ASI_DMMU
2N/A membar #Sync
2N/A sethi %hi(FLUSH_ADDR), %g1
2N/A flush %g1 ! flush required by immu
2N/A3:
2N/A jmp %g2
2N/A nop
2N/A
2N/A
2N/A#ifdef TRAPTRACE
2N/A/*
2N/A * TRAPTRACE support.
2N/A * labels here are branched to with "rd %pc, %g7" in the delay slot.
2N/A * Return is done by "jmp %g7 + 4".
2N/A */
2N/A
2N/Atrace_gen:
2N/A TRACE_PTR(%g3, %g6)
2N/A GET_TRACE_TICK(%g6)
2N/A stxa %g6, [%g3 + TRAP_ENT_TICK]%asi
2N/A rdpr %tl, %g6
2N/A stha %g6, [%g3 + TRAP_ENT_TL]%asi
2N/A rdpr %tt, %g6
2N/A stha %g6, [%g3 + TRAP_ENT_TT]%asi
2N/A rdpr %tstate, %g6
2N/A stxa %g6, [%g3 + TRAP_ENT_TSTATE]%asi
2N/A stna %sp, [%g3 + TRAP_ENT_SP]%asi
2N/A rdpr %tpc, %g6
2N/A stna %g6, [%g3 + TRAP_ENT_TPC]%asi
2N/A TRACE_NEXT(%g3, %g4, %g5)
2N/A jmp %g7 + 4
2N/A nop
2N/A
2N/Atrace_win:
2N/A TRACE_WIN_INFO(0, %l0, %l1, %l2)
2N/A ! Keep the locals as clean as possible, caller cleans %l4
2N/A clr %l2
2N/A clr %l1
2N/A jmp %l4 + 4
2N/A clr %l0
2N/A
2N/A/*
2N/A * Trace a tsb hit
2N/A * g1 = tsbe pointer (in/clobbered)
2N/A * g2 = tag access register (in)
2N/A * g3 - g4 = scratch (clobbered)
2N/A * g5 = tsbe data (in)
2N/A * g6 = scratch (clobbered)
2N/A * g7 = pc we jumped here from (in)
2N/A */
2N/A
2N/A ! Do not disturb %g5, it will be used after the trace
2N/A ALTENTRY(trace_tsbhit)
2N/A TRACE_TSBHIT(0)
2N/A jmp %g7 + 4
2N/A nop
2N/A
2N/A/*
2N/A * Trace a TSB miss
2N/A *
2N/A * g1 = tsb8k pointer (in)
2N/A * g2 = tag access register (in)
2N/A * g3 = tsb4m pointer (in)
2N/A * g4 = tsbe tag (in/clobbered)
2N/A * g5 - g6 = scratch (clobbered)
2N/A * g7 = pc we jumped here from (in)
2N/A */
2N/A .global trace_tsbmiss
2N/Atrace_tsbmiss:
2N/A membar #Sync
2N/A sethi %hi(FLUSH_ADDR), %g6
2N/A flush %g6
2N/A TRACE_PTR(%g5, %g6)
2N/A GET_TRACE_TICK(%g6)
2N/A stxa %g6, [%g5 + TRAP_ENT_TICK]%asi
2N/A stxa %g2, [%g5 + TRAP_ENT_SP]%asi ! tag access
2N/A stxa %g4, [%g5 + TRAP_ENT_F1]%asi ! tsb tag
2N/A rdpr %tnpc, %g6
2N/A stxa %g6, [%g5 + TRAP_ENT_F2]%asi
2N/A stna %g1, [%g5 + TRAP_ENT_F3]%asi ! tsb8k pointer
2N/A srlx %g1, 32, %g6
2N/A stna %g6, [%g5 + TRAP_ENT_F4]%asi ! huh?
2N/A rdpr %tpc, %g6
2N/A stna %g6, [%g5 + TRAP_ENT_TPC]%asi
2N/A rdpr %tl, %g6
2N/A stha %g6, [%g5 + TRAP_ENT_TL]%asi
2N/A rdpr %tt, %g6
2N/A or %g6, TT_MMU_MISS, %g4
2N/A stha %g4, [%g5 + TRAP_ENT_TT]%asi
2N/A cmp %g6, FAST_IMMU_MISS_TT
2N/A be,a %icc, 1f
2N/A ldxa [%g0]ASI_IMMU, %g6
2N/A ldxa [%g0]ASI_DMMU, %g6
2N/A1: stxa %g6, [%g5 + TRAP_ENT_TSTATE]%asi ! tag target
2N/A stxa %g3, [%g5 + TRAP_ENT_TR]%asi ! tsb4m pointer
2N/A TRACE_NEXT(%g5, %g4, %g6)
2N/A jmp %g7 + 4
2N/A nop
2N/A
2N/A/*
2N/A * g2 = tag access register (in)
2N/A * g3 = ctx number (in)
2N/A */
2N/Atrace_dataprot:
2N/A membar #Sync
2N/A sethi %hi(FLUSH_ADDR), %g6
2N/A flush %g6
2N/A TRACE_PTR(%g1, %g6)
2N/A GET_TRACE_TICK(%g6)
2N/A stxa %g6, [%g1 + TRAP_ENT_TICK]%asi
2N/A rdpr %tpc, %g6
2N/A stna %g6, [%g1 + TRAP_ENT_TPC]%asi
2N/A rdpr %tstate, %g6
2N/A stxa %g6, [%g1 + TRAP_ENT_TSTATE]%asi
2N/A stxa %g2, [%g1 + TRAP_ENT_SP]%asi ! tag access reg
2N/A stxa %g0, [%g1 + TRAP_ENT_TR]%asi
2N/A stxa %g0, [%g1 + TRAP_ENT_F1]%asi
2N/A stxa %g0, [%g1 + TRAP_ENT_F2]%asi
2N/A stxa %g0, [%g1 + TRAP_ENT_F3]%asi
2N/A stxa %g0, [%g1 + TRAP_ENT_F4]%asi
2N/A rdpr %tl, %g6
2N/A stha %g6, [%g1 + TRAP_ENT_TL]%asi
2N/A rdpr %tt, %g6
2N/A stha %g6, [%g1 + TRAP_ENT_TT]%asi
2N/A TRACE_NEXT(%g1, %g4, %g5)
2N/A jmp %g7 + 4
2N/A nop
2N/A
2N/A#endif /* TRAPTRACE */
2N/A
2N/A/*
2N/A * expects offset into tsbmiss area in %g1 and return pc in %g7
2N/A */
2N/Astat_mmu:
2N/A CPU_INDEX(%g5, %g6)
2N/A sethi %hi(tsbmiss_area), %g6
2N/A sllx %g5, TSBMISS_SHIFT, %g5
2N/A or %g6, %lo(tsbmiss_area), %g6
2N/A add %g6, %g5, %g6 /* g6 = tsbmiss area */
2N/A ld [%g6 + %g1], %g5
2N/A add %g5, 1, %g5
2N/A jmp %g7 + 4
2N/A st %g5, [%g6 + %g1]
2N/A
2N/A
2N/A/*
2N/A * fast_trap_done, fast_trap_done_chk_intr:
2N/A *
2N/A * Due to the design of UltraSPARC pipeline, pending interrupts are not
2N/A * taken immediately after a RETRY or DONE instruction which causes IE to
2N/A * go from 0 to 1. Instead, the instruction at %tpc or %tnpc is allowed
2N/A * to execute first before taking any interrupts. If that instruction
2N/A * results in other traps, and if the corresponding trap handler runs
2N/A * entirely at TL=1 with interrupts disabled, then pending interrupts
2N/A * won't be taken until after yet another instruction following the %tpc
2N/A * or %tnpc.
2N/A *
2N/A * A malicious user program can use this feature to block out interrupts
2N/A * for extended durations, which can result in send_mondo_timeout kernel
2N/A * panic.
2N/A *
2N/A * This problem is addressed by servicing any pending interrupts via
2N/A * sys_trap before returning back to the user mode from a fast trap
2N/A * handler. The "done" instruction within a fast trap handler, which
2N/A * runs entirely at TL=1 with interrupts disabled, is replaced with the
2N/A * FAST_TRAP_DONE macro, which branches control to this fast_trap_done
2N/A * entry point.
2N/A *
2N/A * We check for any pending interrupts here and force a sys_trap to
2N/A * service those interrupts, if any. To minimize overhead, pending
2N/A * interrupts are checked if the %tpc happens to be at 16K boundary,
2N/A * which allows a malicious program to execute at most 4K consecutive
2N/A * instructions before we service any pending interrupts. If a worst
2N/A * case fast trap handler takes about 2 usec, then interrupts will be
2N/A * blocked for at most 8 msec, less than a clock tick.
2N/A *
2N/A * For the cases where we don't know if the %tpc will cross a 16K
2N/A * boundary, we can't use the above optimization and always process
2N/A * any pending interrupts via fast_frap_done_chk_intr entry point.
2N/A *
2N/A * Entry Conditions:
2N/A * %pstate am:0 priv:1 ie:0
2N/A * globals are AG (not normal globals)
2N/A */
2N/A
2N/A .global fast_trap_done, fast_trap_done_chk_intr
2N/Afast_trap_done:
2N/A rdpr %tpc, %g5
2N/A sethi %hi(0xffffc000), %g6 ! 1's complement of 0x3fff
2N/A andncc %g5, %g6, %g0 ! check lower 14 bits of %tpc
2N/A bz,a,pn %icc, 1f ! branch if zero (lower 32 bits only)
2N/A ldxa [%g0]ASI_INTR_RECEIVE_STATUS, %g5
2N/A done
2N/A
2N/Afast_trap_done_chk_intr:
2N/A ldxa [%g0]ASI_INTR_RECEIVE_STATUS, %g5
2N/A
2N/A1: rd SOFTINT, %g6
2N/A and %g5, IRSR_BUSY, %g5
2N/A orcc %g5, %g6, %g0
2N/A bnz,pn %xcc, 2f ! branch if any pending intr
2N/A nop
2N/A done
2N/A
2N/A2:
2N/A /*
2N/A * We get here if there are any pending interrupts.
2N/A * Adjust %tpc/%tnpc as we'll be resuming via "retry"
2N/A * instruction.
2N/A */
2N/A rdpr %tnpc, %g5
2N/A wrpr %g0, %g5, %tpc
2N/A add %g5, 4, %g5
2N/A wrpr %g0, %g5, %tnpc
2N/A
2N/A /*
2N/A * Force a dummy sys_trap call so that interrupts can be serviced.
2N/A */
2N/A set fast_trap_dummy_call, %g1
2N/A ba,pt %xcc, sys_trap
2N/A mov -1, %g4
2N/A
2N/Afast_trap_dummy_call:
2N/A retl
2N/A nop
2N/A
2N/A#endif /* lint */
2N/A
2N/A