spitfire_asm.s revision 7c478bd95313f5f23a4c958a745db2134aa03244
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
#if !defined(lint)
#include "assym.h"
#endif /* lint */
#include <sys/asm_linkage.h>
#include <vm/hat_sfmmu.h>
#include <sys/machparam.h>
#include <sys/machcpuvar.h>
#include <sys/machthread.h>
#include <sys/privregs.h>
#include <sys/asm_linkage.h>
#include <sys/spitregs.h>
#ifdef TRAPTRACE
#include <sys/traptrace.h>
#endif /* TRAPTRACE */
#ifndef lint
/* BEGIN CSTYLED */
nop ;\
/* \
* flushtype = FLUSHALL_TYPE, flush the whole thing \
* tmp3 = cache size \
* tmp1 = cache line size \
*/ \
4: \
nop ;\
/* \
* flushtype = FLUSHPAGE_TYPE \
* arg1 = tag to compare against \
* arg2 = virtual color \
* tmp1 = cache line size \
* tmp2 = tag from cache \
* tmp3 = counter \
*/ \
2: \
4: \
nop ;\
5: \
nop ;\
/* \
* flushtype = FLUSHMATCH_TYPE \
* arg1 = tag to compare against \
* tmp1 = cache line size \
* tmp3 = cache size \
* arg2 = counter \
* tmp2 = cache tag \
*/ \
3: \
4: \
nop ;\
5: \
1:
/*
* macro that flushes the entire dcache color
*/
/* \
* arg = virtual color \
* tmp2 = page size \
* tmp1 = cache line size \
*/ \
2: \
1:
/*
* macro that flushes the entire dcache
*/
\
2: \
1:
/*
* macro that flushes the entire icache
*/
\
2: \
1:
/*
* Macro for getting to offset from 'cpu_private' ptr. The 'cpu_private'
* ptr is in the machcpu structure.
* r_or_s: Register or symbol off offset from 'cpu_private' ptr.
* scr1: Scratch, ptr is returned in this register.
* scr2: Scratch
*/
nop; \
#ifdef HUMMINGBIRD
/*
* UltraSPARC-IIe processor supports both 4-way set associative and
* direct map E$. For performance reasons, we flush E$ by placing it
* we are done flushing it. Keep interrupts off while flushing in this
* manner.
*
* We flush the entire ecache by starting at one end and loading each
* successive ecache line for the 2*ecache-size range. We have to repeat
* the flush operation to guarantee that the entire ecache has been
* flushed.
*
* For flushing a specific physical address, we start at the aliased
* address and load at set-size stride, wrapping around at 2*ecache-size
* boundary and skipping the physical address being flushed. It takes
* 10 loads to guarantee that the physical address has been flushed.
*/
#define HB_ECACHE_FLUSH_CNT 2
#endif /* HUMMINGBIRD */
/* END CSTYLED */
#endif /* !lint */
/*
* Spitfire MMU and Cache operations.
*/
#if defined(lint)
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
vtag_flushall(void)
{}
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{}
/* ARGSUSED */
{
return ((uint64_t)0);
}
/* ARGSUSED */
{
return ((uint64_t)0);
}
/*ARGSUSED*/
void
int icache_size, int icache_lsize)
{}
#else /* lint */
/*
* flush page from the tlb
*
* %o0 = vaddr
* %o1 = ctxnum
*/
#ifdef DEBUG
3:
#endif /* DEBUG */
/*
* disable ints
*/
/*
* Then, blow out the tlb
* Interrupts are disabled to prevent the secondary ctx register
* from changing underneath us.
*/
/*
* For KCONTEXT demaps use primary. type = page implicitly
*/
b 5f
1:
/*
* User demap. We need to set the secondary context properly.
* %o0 = vaddr
* %o1 = ctxnum
* %o3 = FLUSH_ADDR
*/
4:
5:
/*
* flush context from the tlb
*
* %o0 = ctxnum
* We disable interrupts to prevent the secondary ctx register changing
* underneath us.
*/
#ifdef DEBUG
1:
#endif /* DEBUG */
4:
5:
.seg ".text"
.asciz "sfmmu_asm: unimplemented flush operation"
/*
* x-trap to flush page from tlb and tsb
*
* %g1 = vaddr, zero-extended on 32-bit kernel
* %g2 = ctxnum
*
* assumes TSBE_TAG = 0
*/
/* We need to set the secondary context properly. */
/*
* x-trap to flush pgcnt MMU_PAGESIZE pages from tlb
*
* %g1 = vaddr, zero-extended on 32-bit kernel
* %g2 = <zero32|ctx16|pgcnt16>
*
* NOTE: this handler relies on the fact that no
* interrupts or traps can occur during the loop
* issuing the TLB_DEMAP operations. It is assumed
* that interrupts are disabled and this code is
* fetching from the kernel locked text address.
*
* assumes TSBE_TAG = 0
*/
/* We need to set the secondary context properly. */
1:
/*
* x-trap to flush context from tlb
*
* %g1 = ctxnum
*/
/*
* vac_flushpage(pfnum, color)
* Flush 1 8k page of the D-$ with physical page = pfnum
* Algorithm:
* The spitfire dcache is a 16k direct mapped virtual indexed,
* physically tagged cache. Given the pfnum we read all cache
* lines for the corresponding page in the cache (determined by
* the color). Each cache line is compared with
* the tag created from the pfnum. If the tags match we flush
* the line.
*/
.seg ".data"
.align 8
.seg ".text"
/*
* flush page from the d$
*
* %o0 = pfnum, %o1 = color
*/
/*
* x-trap to flush page from the d$
*
* %g1 = pfnum, %g2 = color
*/
/*
* %o0 = vcolor
*/
/*
* %g1 = vcolor
*/
.asciz "ASI_INTR_DISPATCH_STATUS error: busy"
.align 4
/*
* Determine whether or not the IDSR is busy.
* Entry: no arguments
* Returns: 1 if busy, 0 otherwise
*/
1:
/*
* Setup interrupt dispatch data registers
* Entry:
* %o0 - function or inumber to call
* %o1, %o2 - arguments (2 uint64_t's)
*/
.seg "text"
#ifdef DEBUG
!
!
#endif /* DEBUG */
!
!
1:
!
!
!
!
/*
* Ship mondo to upaid
*/
#if defined(SF_ERRATA_54)
#endif
/*
* flush_instr_mem:
* Flush a portion of the I-$ starting at vaddr
* %o0 vaddr
* %o1 bytes to be flushed
*/
1:
/*
* flush_ecache:
* Flush the entire e$ using displacement flush by reading through a
* physically contiguous area. We use mmu bypass asi (ASI_MEM) while
* reading this physical address range so that data doesn't go to d$.
* incoming arguments:
* %o0 - 64 bit physical address
* %o1 - size of address range to read
* %o2 - ecache linesize
*/
#ifndef HUMMINGBIRD
b 2f
1:
2:
#else /* HUMMINGBIRD */
/*
* UltraSPARC-IIe processor supports both 4-way set associative
* and direct map E$. For performance reasons, we flush E$ by
* the state after we are done flushing it. It takes 2 iterations
* to guarantee that the entire ecache has been flushed.
*
* Keep the interrupts disabled while flushing E$ in this manner.
*/
2:
1:
#endif /* HUMMINGBIRD */
/*
* void kdi_flush_idcache(int dcache_size, int dcache_linesize,
* int icache_size, int icache_linesize)
*/
/*
* void get_ecache_dtag(uint32_t ecache_idx, uint64_t *data, uint64_t *tag,
* uint64_t *oafsr, uint64_t *acc_afsr)
*
* Get ecache data and tag. The ecache_idx argument is assumed to be aligned
* on a 64-byte boundary. The corresponding AFSR value is also read for each
* 8 byte ecache data obtained. The ecache data is assumed to be a pointer
* to an array of 16 uint64_t's (e$data & afsr value). The action to read the
* data and tag should be atomic to make sense. We will be executing at PIL15
* and will disable IE, so nothing can occur between the two reads. We also
* assume that the execution of this code does not interfere with what we are
* reading - not really possible, but we'll live with it for now.
* We also pass the old AFSR value before clearing it, and caller will take
* appropriate actions if the important bits are non-zero.
*
* If the caller wishes to track the AFSR in cases where the CP bit is
* set, an address should be passed in for acc_afsr. Otherwise, this
* argument may be null.
*
* Register Usage:
* i0: In: 32-bit e$ index
* i1: In: addr of e$ data
* i2: In: addr of e$ tag
* i3: In: addr of old afsr
* i4: In: addr of accumulated afsr - may be null
*/
bz 1f
1:
bz 2f
2:
bl,a 1b
#endif /* lint */
#if defined(lint)
/*
* The ce_err function handles trap type 0x63 (corrected_ECC_error) at tl=0.
* Steps: 1. GET AFSR 2. Get AFAR <40:4> 3. Get datapath error status
* 4. Clear datapath error bit(s) 5. Clear AFSR error bit
* 6. package data in %g2 and %g3 7. call cpu_ce_error vis sys_trap
* %g2: [ 52:43 UDB lower | 42:33 UDB upper | 32:0 afsr ] - arg #3/arg #1
* %g3: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2
*/
void
ce_err(void)
{}
void
ce_err_tl1(void)
{}
/*
* The async_err function handles trap types 0x0A (instruction_access_error)
* and 0x32 (data_access_error) at TL = 0 and TL > 0. When we branch here,
* %g5 will have the trap type (with 0x200 set if we're at TL > 0).
*
* Steps: 1. Get AFSR 2. Get AFAR <40:4> 3. If not UE error skip UDP registers.
* 4. Else get and clear datapath error bit(s) 4. Clear AFSR error bits
* 6. package data in %g2 and %g3 7. disable all cpu errors, because
* trap is likely to be fatal 8. call cpu_async_error vis sys_trap
*
* %g3: [ 63:53 tt | 52:43 UDB_L | 42:33 UDB_U | 32:0 afsr ] - arg #3/arg #1
* %g2: [ 40:4 afar ] - sys_trap->have_win: arg #4/arg #2
*/
void
async_err(void)
{}
/*
* The clr_datapath function clears any error bits set in the UDB regs.
*/
void
clr_datapath(void)
{}
/*
* The get_udb_errors() function gets the current value of the
* Datapath Error Registers.
*/
/*ARGSUSED*/
void
{
*udbh = 0;
*udbl = 0;
}
#else /* lint */
!
! a hardware pipeline, we might get to the CE trap before we
! can switch. The UDB and AFSR registers will have both the
! UE and CE bits set but the UDB syndrome and the AFAR will be
! for the UE.
!
or %g0, 1, %g1 ! put 1 in g1
sllx %g1, 21, %g1 ! shift left to <21> afsr UE
andcc %g1, %g3, %g0 ! check for UE in afsr
bnz async_err ! handle the UE, not the CE
or %g0, 0x63, %g5 ! pass along the CE ttype
!
! Disable further CE traps to avoid recursion (stack overflow)
! and staying above XCALL_PIL for extended periods.
!
ldxa [%g0]ASI_ESTATE_ERR, %g2
andn %g2, 0x1, %g2 ! clear bit 0 - CEEN
stxa %g2, [%g0]ASI_ESTATE_ERR
membar #Sync ! required
!
! handle the CE
ldxa [%g0]ASI_AFAR, %g2 ! save afar in g2
set P_DER_H, %g4 ! put P_DER_H in g4
ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb upper half into g5
or %g0, 1, %g6 ! put 1 in g6
sllx %g6, 8, %g6 ! shift g6 to <8> sdb CE
andcc %g5, %g6, %g1 ! check for CE in upper half
sllx %g5, 33, %g5 ! shift upper bits to <42:33>
or %g3, %g5, %g3 ! or with afsr bits
bz,a 1f ! no error, goto 1f
nop
stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg error bit
membar #Sync ! membar sync required
1:
set P_DER_L, %g4 ! put P_DER_L in g4
ldxa [%g4]ASI_SDB_INTR_R, %g5 ! read sdb lower half into g6
andcc %g5, %g6, %g1 ! check for CE in lower half
sllx %g5, 43, %g5 ! shift upper bits to <52:43>
or %g3, %g5, %g3 ! or with afsr bits
bz,a 2f ! no error, goto 2f
nop
stxa %g1, [%g4]ASI_SDB_INTR_W ! clear sdb reg error bit
membar #Sync ! membar sync required
2:
or %g0, 1, %g4 ! put 1 in g4
sllx %g4, 20, %g4 ! shift left to <20> afsr CE
stxa %g4, [%g0]ASI_AFSR ! use g4 to clear afsr CE error
membar #Sync ! membar sync required
set cpu_ce_error, %g1 ! put *cpu_ce_error() in g1
rdpr %pil, %g6 ! read pil into %g6
subcc %g6, PIL_15, %g0
movneg %icc, PIL_14, %g4 ! run at pil 14 unless already at 15
sethi %hi(sys_trap), %g5
jmp %g5 + %lo(sys_trap) ! goto sys_trap
movge %icc, PIL_15, %g4 ! already at pil 15
SET_SIZE(ce_err)
ENTRY_NP(ce_err_tl1)
#ifndef TRAPTRACE
ldxa [%g0]ASI_AFSR, %g7
stxa %g7, [%g0]ASI_AFSR
membar #Sync
retry
#else
set ce_trap_tl1, %g1
sethi %hi(dis_err_panic1), %g4
jmp %g4 + %lo(dis_err_panic1)
nop
#endif
SET_SIZE(ce_err_tl1)
#ifdef TRAPTRACE
.celevel1msg:
.asciz "Softerror with trap tracing at tl1: AFAR 0x%08x.%08x AFSR 0x%08x.%08x";
ENTRY_NP(ce_trap_tl1)
! upper 32 bits of AFSR already in o3
mov %o4, %o0 ! save AFAR upper 32 bits
mov %o2, %o4 ! lower 32 bits of AFSR
mov %o1, %o2 ! lower 32 bits of AFAR
mov %o0, %o1 ! upper 32 bits of AFAR
set .celevel1msg, %o0
call panic
nop
SET_SIZE(ce_trap_tl1)
#endif
!
! async_err is the assembly glue code to get us from the actual trap
!
1:
2:
1:
2:
#endif /* lint */
#if defined(lint)
/*
* The itlb_rd_entry and dtlb_rd_entry functions return the tag portion of the
* tte, the virtual address, and the ctxnum of the specified tlb entry. They
* should only be used in places where you have no choice but to look at the
* tlb itself.
*
* Note: These two routines are required by the Estar "cpr" loadable module.
*/
/*ARGSUSED*/
void
{}
/*ARGSUSED*/
void
{}
#else /* lint */
/*
* NB - In Spitfire cpus, when reading a tte from the hardware, we
* need to clear [42-41] because the general definitions in pte.h
* define the PA to be [42-13] whereas Spitfire really uses [40-13].
* When cloning these routines for other cpus the "andn" below is not
* necessary.
*/
#if defined(SF_ERRATA_32)
#endif
#if defined(SF_ERRATA_32)
#endif
#endif /* lint */
#if defined(lint)
/*
* routines to get and set the LSU register
*/
get_lsu(void)
{
return ((uint64_t)0);
}
/*ARGSUSED*/
void
{}
#else /* lint */
#endif /* lint */
#ifndef lint
/*
* Clear the NPT (non-privileged trap) bit in the %tick
* registers. In an effort to make the change in the
* tick counter as consistent as possible, we disable
* all interrupts while we're changing the registers. We also
* ensure that the read and write instructions are in the same
* line in the instruction cache.
*/
2:
/* clearing NPT bit */
#if defined(BB_ERRATA_1)
#endif
1:
/*
* get_ecache_tag()
* Register Usage:
* %o0: In: 32-bit E$ index
* Out: 64-bit E$ tag value
* %o1: In: 64-bit AFSR value after clearing sticky bits
* %o2: In: address of cpu private afsr storage
*/
bz 1f
1:
bz 2f
2:
/*
* check_ecache_line()
* Register Usage:
* %o0: In: 32-bit E$ index
* Out: 64-bit accumulated AFSR
* %o1: In: address of cpu private afsr storage
*/
bz 1f
1:
2:
bl,a 2b
bz 3f
3:
#endif /* lint */
#if defined(lint)
{
return ((uint64_t)0);
}
#else /* lint */
#endif /* lint */
#if defined(lint)
/* ARGSUSED */
void
{
}
#else /* lint */
/*
* scrubphys - Pass in the aligned physical memory address that you want
* to scrub, along with the ecache size.
*
* 1) Displacement flush the E$ line corresponding to %addr.
* The first ldxa guarantees that the %addr is no longer in
* M, O, or E (goes to I or S (if instruction fetch also happens).
* 2) "Write" the data using a CAS %addr,%g0,%g0.
* The casxa guarantees a transition from I to M or S to M.
* 3) Displacement flush the E$ line corresponding to %addr.
* The second ldxa pushes the M line out of the ecache, into the
* writeback buffers, on the way to memory.
* 4) The "membar #Sync" pushes the cache line out of the writeback
* buffers onto the bus, on the way to dram finally.
*
* This is a modified version of the algorithm suggested by Gary Lauterbach.
* In theory the CAS %addr,%g0,%g0 is supposed to mark the addr's cache line
* as modified, but then we found out that for spitfire, if it misses in the
* E$ it will probably install as an M, but if it hits in the E$, then it
* will stay E, if the store doesn't happen. So the first displacement flush
* should ensure that the CAS will miss in the E$. Arrgh.
*/
#ifndef HUMMINGBIRD
! addr == ecache_flushaddr
set ecache_flushaddr, %o3
ldx [%o3], %o3
rdpr %pstate, %o4
andn %o4, PSTATE_IE | PSTATE_AM, %o5
wrpr %o5, %g0, %pstate ! clear IE, AM bits
ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
casxa [%o0]ASI_MEM, %g0, %g0
ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
#else /* HUMMINGBIRD */
/*
* UltraSPARC-IIe processor supports both 4-way set associative
* and direct map E$. We need to reconfigure E$ to direct map
* need to flush all 4 sets of the E$ to ensure that the physaddr
* has been flushed. Keep the interrupts disabled while flushing
* E$ in this manner.
*
* For flushing a specific physical address, we start at the
* aliased address and load at set-size stride, wrapping around
* at 2*ecache-size boundary and skipping fault physical address.
* It takes 10 loads to guarantee that the physical address has
* been flushed.
*
* Usage:
* %o0 physaddr
* %o5 physaddr - ecache_flushaddr
* %g1 UPA config (restored later)
* %g2 E$ set size
* %g3 E$ flush address range mask (i.e. 2 * E$ -1)
* %g4 #loads to flush phys address
* %g5 temp
*/
sethi %hi(ecache_associativity), %g5
ld [%g5 + %lo(ecache_associativity)], %g5
udivx %o2, %g5, %g2 ! set size (i.e. ecache_size/#sets)
xor %o0, %o2, %o1 ! calculate alias address
add %o2, %o2, %g3 ! 2 * ecachesize in case
! addr == ecache_flushaddr
sub %g3, 1, %g3 ! 2 * ecachesize -1 == mask
and %o1, %g3, %o1 ! and with xor'd address
2:
1:
#endif /* HUMMINGBIRD */
#endif /* lint */
#if defined(lint)
/*
* clearphys - Pass in the aligned physical memory address that you want
* to push out, as a 64 byte block of zeros, from the ecache zero-filled.
* Since this routine does not bypass the ecache, it is possible that
* it could generate a UE error while trying to clear the a bad line.
* This routine clears and restores the error enable flag.
* TBD - Hummingbird may need similar protection
*/
/* ARGSUSED */
void
{
}
#else /* lint */
#ifndef HUMMINGBIRD
set ecache_flushaddr, %o3
ldx [%o3], %o3
or %o4, %g0, %o2 ! saved ecache linesize
rdpr %pstate, %o4
andn %o4, PSTATE_IE | PSTATE_AM, %o5
wrpr %o5, %g0, %pstate ! clear IE, AM bits
ldxa [%g0]ASI_ESTATE_ERR, %g1
stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
membar #Sync
! need to put zeros in the cache line before displacing it
sub %o2, 8, %o2 ! get offset of last double word in ecache line
1:
stxa %g0, [%o0 + %o2]ASI_MEM ! put zeros in the ecache line
sub %o2, 8, %o2
brgez,a,pt %o2, 1b
nop
ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
casxa [%o0]ASI_MEM, %g0, %g0
ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
membar #Sync
#else /* HUMMINGBIRD... */
/*
* UltraSPARC-IIe processor supports both 4-way set associative
* and direct map E$. We need to reconfigure E$ to direct map
* need to flush all 4 sets of the E$ to ensure that the physaddr
* has been flushed. Keep the interrupts disabled while flushing
* E$ in this manner.
*
* For flushing a specific physical address, we start at the
* aliased address and load at set-size stride, wrapping around
* at 2*ecache-size boundary and skipping fault physical address.
* It takes 10 loads to guarantee that the physical address has
* been flushed.
*
* Usage:
* %o0 physaddr
* %o5 physaddr - ecache_flushaddr
* %g1 UPA config (restored later)
* %g2 E$ set size
* %g3 E$ flush address range mask (i.e. 2 * E$ -1)
* %g4 #loads to flush phys address
* %g5 temp
*/
or %o3, %g0, %o4 ! save ecache linesize
sethi %hi(ecache_associativity), %g5
ld [%g5 + %lo(ecache_associativity)], %g5
udivx %o2, %g5, %g2 ! set size (i.e. ecache_size/#sets)
xor %o0, %o2, %o1 ! calculate alias address
add %o2, %o2, %g3 ! 2 * ecachesize
sub %g3, 1, %g3 ! 2 * ecachesize -1 == mask
and %o1, %g3, %o1 ! and with xor'd address
1:
2:
3:
#endif /* HUMMINGBIRD... */
#endif /* lint */
#if defined(lint)
/* ARGSUSED */
void
{
}
#else /* lint */
/*
* flushecacheline - This is a simpler version of scrubphys
* which simply does a displacement flush of the line in
* question. This routine is mainly used in handling async
* errors where we want to get rid of a bad line in ecache.
* Note that if the line is modified and it has suffered
* data corruption - we are guarantee that the hw will write
* a UE back to mark the page poisoned.
*/
#ifndef HUMMINGBIRD
! addr == ecache_flushaddr
set ecache_flushaddr, %o3
ldx [%o3], %o3
rdpr %pstate, %o4
andn %o4, PSTATE_IE | PSTATE_AM, %o5
wrpr %o5, %g0, %pstate ! clear IE, AM bits
ldxa [%g0]ASI_ESTATE_ERR, %g1
stxa %g0, [%g0]ASI_ESTATE_ERR ! disable errors
membar #Sync
ldxa [%o1 + %o3]ASI_MEM, %g0 ! load ecache_flushaddr + alias
membar #Sync
stxa %g1, [%g0]ASI_ESTATE_ERR ! restore error enable
membar #Sync
#else /* HUMMINGBIRD */
/*
* UltraSPARC-IIe processor supports both 4-way set associative
* and direct map E$. We need to reconfigure E$ to direct map
* need to flush all 4 sets of the E$ to ensure that the physaddr
* has been flushed. Keep the interrupts disabled while flushing
* E$ in this manner.
*
* For flushing a specific physical address, we start at the
* aliased address and load at set-size stride, wrapping around
* at 2*ecache-size boundary and skipping fault physical address.
* It takes 10 loads to guarantee that the physical address has
* been flushed.
*
* Usage:
* %o0 physaddr
* %o5 physaddr - ecache_flushaddr
* %g1 error enable register
* %g2 E$ set size
* %g3 E$ flush address range mask (i.e. 2 * E$ -1)
* %g4 UPA config (restored later)
* %g5 temp
*/
sethi %hi(ecache_associativity), %g5
ld [%g5 + %lo(ecache_associativity)], %g5
udivx %o2, %g5, %g2 ! set size (i.e. ecache_size/#sets)
xor %o0, %o2, %o1 ! calculate alias address
add %o2, %o2, %g3 ! 2 * ecachesize in case
! addr == ecache_flushaddr
sub %g3, 1, %g3 ! 2 * ecachesize -1 == mask
and %o1, %g3, %o1 ! and with xor'd address
2:
3:
#endif /* HUMMINGBIRD */
#endif /* lint */
#if defined(lint)
/* ARGSUSED */
void
{
}
#else /* lint */
/*
* ecache_scrubreq_tl1 is the crosstrap handler called at ecache_calls_a_sec Hz
* from the clock CPU. It atomically increments the outstanding request
* counter and, if there was not already an outstanding request,
* branches to setsoftint_tl1 to enqueue an intr_req for the given inum.
*/
!
! Arguments:
!
! Internal:
set setsoftint_tl1, %g6
!
! no need to use atomic instructions for the following
!
1:
#endif /* lint */
#if defined(lint)
/*ARGSUSED*/
void
{}
#else /* lint */
/*
* write_ec_tag_parity(), which zero's the ecache tag,
* marks the state as invalid and writes good parity to the tag.
* Input %o1= 32 bit E$ index
*/
ba 1f
/*
* Align on the ecache boundary in order to force
* ciritical code section onto the same ecache line.
*/
.align 64
1:
#endif /* lint */
#if defined(lint)
/*ARGSUSED*/
void
{}
#else /* lint */
/*
* write_hb_ec_tag_parity(), which zero's the ecache tag,
* marks the state as invalid and writes good parity to the tag.
* Input %o1= 32 bit E$ index
*/
ba 1f
/*
* Align on the ecache boundary in order to force
* ciritical code section onto the same ecache line.
*/
.align 64
1:
#ifdef HUMMINGBIRD
#else /* !HUMMINGBIRD */
#endif /* !HUMMINGBIRD */
#endif /* lint */
#define VIS_BLOCKSIZE 64
#if defined(lint)
/*ARGSUSED*/
int
{ return (0); }
#else
1:
/*
* We're about to write a block full or either total garbage
* (not kernel data, don't worry) or user floating-point data
* (so it only _looks_ like garbage).
*/
1:
0:
1:
/*
* If tryagain is set (%i2) we tail-call dtrace_blksuword32_err()
* which deals with watchpoints. Otherwise, just return -1.
*/
1:
#endif /* lint */