/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* common code with bug fixes from original version in trap.c */
#include <sys/archsystm.h>
#include <sys/privregs.h>
#include <sys/simulate.h>
#include <sys/watchpoint.h>
#include <sys/machtrap.h>
#include <sys/byteorder.h>
op3 == 0x35))
static int aligndebug = 0;
/*
* For the sake of those who must be compatible with unaligned
* architectures, users can link their programs to use a
* corrective trap handler that will fix unaligned references
* a special trap #6 (T_FIX_ALIGN) enables this 'feature'.
* Returns 1 for success, 0 for failure.
*/
int
{
int floatflg;
int fsrflg;
int immflg;
int lddstdflg;
union {
uint64_t l[2];
uint32_t i[4];
uint16_t s[8];
uint8_t c[16];
} data;
/* if not load or store do nothing */
return (0);
/* if ldstub or swap, do nothing */
return (0);
return (0);
if (floatflg) {
case 0: sz = 4;
break; /* ldf{a}/stf{a} */
if (rd == 0)
else if (rd == 1)
else
return (SIMU_ILLEGAL);
break;
break; /* ldqf{a}/stqf{a} */
break; /* lddf{a}/stdf{a} */
}
/*
* Fix to access extra double register encoding plus
* compensate to access the correct fpu_dreg.
*/
return (SIMU_ILLEGAL);
}
} else {
switch (sz_bits) { /* map size bits to a number */
case 0: /* lduw{a} */
case 4: /* stw{a} */
case 8: /* ldsw{a} */
case 0xf: /* swap */
sz = 4; break;
case 1: /* ldub{a} */
case 5: /* stb{a} */
case 9: /* ldsb{a} */
case 0xd: /* ldstub */
sz = 1; break;
case 2: /* lduh{a} */
case 6: /* sth{a} */
case 0xa: /* ldsh{a} */
sz = 2; break;
case 3: /* ldd{a} */
case 7: /* std{a} */
lddstdflg = 1;
sz = 8; break;
case 0xb: /* ldx{a} */
case 0xe: /* stx{a} */
sz = 8; break;
}
}
/* only support primary and secondary asi's */
if (immflg) {
} else {
}
switch (asi) {
case ASI_P:
case ASI_S:
break;
case ASI_PNF:
case ASI_SNF:
nf = 1;
break;
case ASI_PL:
case ASI_SL:
ltlend = 1;
break;
case ASI_PNFL:
case ASI_SNFL:
ltlend = 1;
nf = 1;
break;
default:
return (0);
}
/*
* Non-faulting stores generate a data_access_exception trap,
* according to the Spitfire manual, which should be signaled
* as an illegal instruction trap, because it can't be fixed.
*/
return (SIMU_ILLEGAL);
}
if (aligndebug) {
printf("unaligned access at %p, instruction: 0x%x\n",
"signed" : "unsigned"));
printf("rd = %d, op3 = 0x%x, rs1 = %d, rs2 = %d, imm13=0x%x\n",
}
(void) flush_user_windows_to_stack(NULL);
return (SIMU_FAULT);
if (aligndebug)
/* check immediate bit and use immediate field or reg (rs2) */
if (immflg) {
int imm;
imm >>= 19;
} else {
return (SIMU_FAULT);
}
/*
* If this is a 32-bit program, chop the address accordingly. The
* intermediate uintptr_t casts prevent warnings under a certain
* compiler, and the temporary 32 bit storage is intended to force
* proper code generation and break up what would otherwise be a
* quadruple cast.
*/
}
if (aligndebug)
goto badret;
}
/* a single bit differentiates ld and st */
if (floatflg) {
/* Ensure fp has been enabled */
if (fpu_exists) {
if (!(_fp_read_fprs() & FPRS_FEF))
fp_enable();
} else {
fp_enable();
}
/* if fpu_exists read fpu reg */
if (fpu_exists) {
if (fsrflg) {
_fp_read_pfsr(&data.l[0]);
} else {
if (sz == 4) {
data.i[0] = 0;
}
if (sz >= 8)
if (sz == 16)
}
} else {
if (fsrflg) {
/* Clear reserved bits, set version=7 */
} else {
if (sz == 4) {
data.i[0] = 0;
data.i[1] =
(unsigned)fp->
}
if (sz >= 8)
data.l[0] =
if (sz == 16)
data.l[1] =
}
}
} else {
if (lddstdflg) { /* combine the data */
return (SIMU_FAULT);
return (SIMU_FAULT);
if (ltlend) {
/*
* For STD, each 32-bit word is byte-
* swapped individually. For
* simplicity we don't want to do that
* below, so we swap the words now to
* get the desired result in the end.
*/
} else {
}
} else {
return (SIMU_FAULT);
}
}
if (aligndebug) {
if (sz == 16) {
printf("data %x %x %x %x\n",
} else {
printf("data %x %x %x %x %x %x %x %x\n",
}
}
if (ltlend) {
if (sz == 1) {
goto badret;
} else if (sz == 2) {
goto badret;
} else if (sz == 4) {
goto badret;
} else {
goto badret;
}
} else {
if (sz == 1) {
goto badret;
} else if (sz == 2) {
goto badret;
} else if (sz == 4) {
goto badret;
} else {
goto badret;
}
}
} else { /* load */
if (sz == 1) {
if (ltlend) {
if (nf)
data.c[7] = 0;
else
goto badret;
}
} else {
if (nf)
data.c[7] = 0;
else
goto badret;
}
}
/* if signed and the sign bit is set extend it */
} else {
data.i[0] = 0; /* clear upper 32+24 bits */
data.s[2] = 0;
data.c[6] = 0;
}
} else if (sz == 2) {
if (ltlend) {
if (nf)
data.s[3] = 0;
else
goto badret;
}
} else {
if (nf)
data.s[3] = 0;
else
goto badret;
}
}
/* if signed and the sign bit is set extend it */
} else {
data.i[0] = 0; /* clear upper 32+16 bits */
data.s[2] = 0;
}
} else if (sz == 4) {
if (ltlend) {
if (!nf)
goto badret;
data.i[1] = 0;
}
} else {
if (!nf)
goto badret;
data.i[1] = 0;
}
}
/* if signed and the sign bit is set extend it */
} else {
data.i[0] = 0; /* clear upper 32 bits */
}
} else {
if (ltlend) {
if (!nf)
goto badret;
data.l[0] = 0;
}
} else {
if (!nf)
goto badret;
data.l[0] = 0;
}
}
}
if (aligndebug) {
if (sz == 16) {
printf("data %x %x %x %x\n",
} else {
printf("data %x %x %x %x %x %x %x %x\n",
}
}
if (floatflg) { /* if fpu_exists write fpu reg */
/* Ensure fp has been enabled */
if (fpu_exists) {
if (!(_fp_read_fprs() & FPRS_FEF))
fp_enable();
} else {
fp_enable();
}
/* if fpu_exists read fpu reg */
if (fpu_exists) {
if (fsrflg) {
_fp_write_pfsr(&data.l[0]);
} else {
if (sz == 4)
if (sz >= 8)
if (sz == 16)
}
} else {
if (fsrflg) {
} else {
if (sz == 4)
(unsigned)data.i[1];
if (sz >= 8)
data.l[0];
if (sz == 16)
data.l[1];
}
}
} else {
if (lddstdflg) { /* split the data */
if (ltlend) {
/*
* For LDD, each 32-bit word is byte-
* swapped individually. We didn't
* do that above, but this will give
* us the desired result.
*/
} else {
}
data.i[0] = 0;
data.i[2] = 0;
goto badret;
goto badret;
} else {
goto badret;
}
}
}
return (SIMU_SUCCESS);
return (SIMU_FAULT);
}
int
{
int immflg;
if (usermode)
else
(void) flush_user_windows_to_stack(NULL);
else
if (immflg) {
} else {
}
switch (asi) {
case ASI_P:
case ASI_S:
break;
case ASI_PNF:
case ASI_SNF:
nf = 1;
break;
case ASI_PL:
case ASI_SL:
ltlend = 1;
break;
case ASI_PNFL:
case ASI_SNFL:
ltlend = 1;
nf = 1;
break;
case ASI_AIUP:
case ASI_AIUS:
usermode = 1;
break;
case ASI_AIUPL:
case ASI_AIUSL:
usermode = 1;
ltlend = 1;
break;
default:
return (SIMU_ILLEGAL);
}
}
return (SIMU_FAULT);
/* check immediate bit and use immediate field or reg (rs2) */
if (immflg) {
int imm;
imm >>= 19;
} else {
return (SIMU_FAULT);
}
/*
* T_UNIMP_LDD and T_UNIMP_STD are higher priority than
* T_ALIGNMENT. So we have to make sure that the address is
* kosher before trying to use it, because the hardware hasn't
* checked it for us yet.
*/
if (curproc->p_fixalignment)
else
return (SIMU_UNALIGN);
}
/*
* If this is a 32-bit program, chop the address accordingly. The
* intermediate uintptr_t casts prevent warnings under a certain
* compiler, and the temporary 32 bit storage is intended to force
* proper code generation and break up what would otherwise be a
* quadruple cast.
*/
}
return (SIMU_FAULT);
return (SIMU_FAULT);
if (ltlend) {
}
if (usermode) {
return (SIMU_FAULT);
} else {
}
} else { /* load */
if (usermode) {
if (nf)
data = 0;
else
return (SIMU_FAULT);
}
} else
if (ltlend) {
}
return (SIMU_FAULT);
return (SIMU_FAULT);
}
return (SIMU_SUCCESS);
}
/*
* simulate popc
*/
static int
{
if (rs1 > 0)
return (SIMU_ILLEGAL);
(void) flush_user_windows_to_stack(NULL);
/* check immediate bit and use immediate field or reg (rs2) */
if (immflg) {
imm >>= 51;
if (imm != 0) {
cnt++;
}
} else {
return (SIMU_FAULT);
if (val != 0) {
cnt++;
}
}
return (SIMU_FAULT);
return (SIMU_SUCCESS);
}
/*
* simulate mulscc
*/
static int
{
uint32_t c, d, v;
(void) flush_user_windows_to_stack(NULL);
d64 >>= 51;
} else {
if (inst & 0x1fe0) {
return (SIMU_ILLEGAL);
}
return (SIMU_FAULT);
}
}
return (SIMU_FAULT);
}
/* icc.n xor icc.v */
} else {
s2 = 0;
}
/* set the icc flags */
}
/* set the xcc flags */
if (ud64 == 0) {
}
return (SIMU_FAULT);
}
d64 >>= 1;
return (SIMU_SUCCESS);
}
/*
* simulate unimplemented instructions (popc, ldqf{a}, stqf{a})
*/
int
{
int nomatch = 0;
return (SIMU_ILLEGAL);
}
/*
* When fixing dirty v8 instructions there's a race if two processors
* are executing the dirty executable at the same time. If one
* cleans the instruction as the other is executing it the second
* processor will see a clean instruction when it comes through this
* code and will return SIMU_ILLEGAL. To work around the race
* this code will keep track of the last illegal instruction seen
* by each lwp and will only take action if the illegal instruction
* is repeatable.
*/
nomatch = 1;
/* instruction fields */
if (IS_IBIT_SET(inst)) {
} else {
}
if (fpu_exists) {
if (!(_fp_read_fprs() & FPRS_FEF))
fp_enable();
} else {
fp_enable();
}
fp_precise(rp);
return (SIMU_RETRY);
}
return (SIMU_ILLEGAL);
}
if (optype == OP_V8_LDSTR) {
}
/* This is a new instruction so illexccnt should also be set. */
if (nomatch) {
mpcb->mpcb_illexccnt = 0;
return (SIMU_RETRY);
}
/*
* In order to keep us from entering into an infinite loop while
* attempting to clean up faulty instructions, we will return
* SIMU_ILLEGAL once we've cleaned up the instruction as much
* as we can, and still end up here.
*/
return (SIMU_ILLEGAL);
/*
* The rest of the code handles v8 binaries with instructions
* that have dirty (non-zero) bits in reserved or 'ignored'
* fields; these will cause core dumps on v9 machines.
*
* We only clean dirty instructions in 32-bit programs (ie, v8)
* running on SPARCv9 processors. True v9 programs are forced
* to use the instruction set as intended.
*/
return (SIMU_ILLEGAL);
switch (optype) {
case OP_V8_BRANCH:
case OP_V8_CALL:
return (SIMU_ILLEGAL); /* these don't have ignored fields */
/*NOTREACHED*/
case OP_V8_ARITH:
switch (op3) {
case IOP_V8_RETT:
return (SIMU_ILLEGAL);
if (rd)
if (i == 0 && ignor)
break;
case IOP_V8_TCC:
if (i == 0 && ignor != 0) {
} else {
return (SIMU_ILLEGAL);
}
break;
case IOP_V8_JMPL:
case IOP_V8_RESTORE:
case IOP_V8_SAVE:
(i == 0 && ignor)) {
} else {
return (SIMU_ILLEGAL);
}
break;
case IOP_V8_FCMP:
if (rd == 0)
return (SIMU_ILLEGAL);
break;
case IOP_V8_RDASR:
/*
* The instruction specifies an invalid
* state register - better bail out than
* "fix" it when we're not sure what was
* intended.
*/
return (SIMU_ILLEGAL);
}
/*
* Note: this case includes the 'stbar'
* instruction (rs1 == 15 && i == 0).
*/
inst &= ~(0x3fff);
break;
case IOP_V8_SRA:
case IOP_V8_SRL:
case IOP_V8_SLL:
if (ignor == 0)
return (SIMU_ILLEGAL);
break;
case IOP_V8_ADD:
case IOP_V8_AND:
case IOP_V8_OR:
case IOP_V8_XOR:
case IOP_V8_SUB:
case IOP_V8_ANDN:
case IOP_V8_ORN:
case IOP_V8_XNOR:
case IOP_V8_ADDC:
case IOP_V8_UMUL:
case IOP_V8_SMUL:
case IOP_V8_SUBC:
case IOP_V8_UDIV:
case IOP_V8_SDIV:
case IOP_V8_ADDcc:
case IOP_V8_ANDcc:
case IOP_V8_ORcc:
case IOP_V8_XORcc:
case IOP_V8_SUBcc:
case IOP_V8_ANDNcc:
case IOP_V8_ORNcc:
case IOP_V8_XNORcc:
case IOP_V8_ADDCcc:
case IOP_V8_UMULcc:
case IOP_V8_SMULcc:
case IOP_V8_SUBCcc:
case IOP_V8_UDIVcc:
case IOP_V8_SDIVcc:
case IOP_V8_TADDcc:
case IOP_V8_TSUBcc:
case IOP_V8_TADDccTV:
case IOP_V8_TSUBccTV:
case IOP_V8_MULScc:
case IOP_V8_WRASR:
case IOP_V8_FLUSH:
if (i != 0 || ignor == 0)
return (SIMU_ILLEGAL);
break;
default:
return (SIMU_ILLEGAL);
}
break;
case OP_V8_LDSTR:
switch (op3) {
case IOP_V8_STFSR:
case IOP_V8_LDFSR:
return (SIMU_ILLEGAL);
if (rd)
if (i == 0 && ignor)
break;
default:
i == 0 && ignor)
else
return (SIMU_ILLEGAL);
break;
}
break;
default:
return (SIMU_ILLEGAL);
}
/*
* We only create COW page for MAP_PRIVATE mappings.
*/
return (SIMU_ILLEGAL);
}
/*
* A "flush" instruction using the user PC's vaddr will not work
* here, at least on Spitfire. Instead we create a temporary kernel
* mapping to the user's text page, then modify and flush that.
* Break COW by locking user page.
*/
F_SOFTLOCK, S_READ))
return (SIMU_FAULT);
if (pf_is_memory(pfnum)) {
} else {
return (SIMU_FAULT);
}
return (SIMU_RETRY);
}
/*
* Simulate a "rd %tick" or "rd %stick" (%asr24) instruction.
*/
int
{
/*
* Make sure this is either a %tick read (rs1 == 0x4) or
* a %stick read (rs1 == 0x18) instruction.
*/
if (rs1 == 0x4) {
(void) flush_user_windows_to_stack(NULL);
tick = gettick_counter();
return (SIMU_SUCCESS);
} else if (rs1 == 0x18) {
(void) flush_user_windows_to_stack(NULL);
stick = gethrtime_unscaled();
return (SIMU_SUCCESS);
}
}
return (SIMU_FAULT);
}
/*
* Get the value of a register for instruction simulation
* by using the regs or window structure pointers.
* Return 0 for success, and -1 for failure. If there is a failure,
* save the faulting address using badaddr pointer.
* We have 64 bit globals and outs, and 32 or 64 bit ins and locals.
*/
int
{
int rv = 0;
if (reg == 0) {
*val = 0;
} else if (reg < 16) {
} else if (IS_V9STACK(sp)) {
rv = -1;
}
} else {
}
} else {
rv = -1;
}
} else {
}
}
return (rv);
}
/*
* Set the value of a register after instruction simulation
* by using the regs or window structure pointers.
* Return 0 for succes -1 failure.
* save the faulting address using badaddr pointer.
* We have 64 bit globals and outs, and 32 or 64 bit ins and locals.
*/
int
{
int rv = 0;
if (reg == 0) {
return (0);
} else if (reg < 16) {
} else if (IS_V9STACK(sp)) {
rv = -1;
}
/*
* We have changed a local or in register;
* nuke the watchpoint return windows.
*/
} else {
}
} else {
rv = -1;
}
/*
* We have changed a local or in register;
* nuke the watchpoint return windows.
*/
} else {
}
}
return (rv);
}
/*
* Calculate a memory reference address from instruction
* operands, used to return the address of a fault, instead
* of the instruction when an error occurs. This is code that is
* common with most of the routines that simulate instructions.
*/
int
{
int sz;
int immflg;
int floatflg;
else
if (floatflg) {
}
/*
* Fix to access extra double register encoding plus
* compensate to access the correct fpu_dreg.
*/
if (sz > 4) {
}
} else {
case 0: /* lduw */
case 4: /* stw */
case 8: /* ldsw */
case 0xf: /* swap */
sz = 4; break;
case 1: /* ldub */
case 5: /* stb */
case 9: /* ldsb */
case 0xd: /* ldstub */
sz = 1; break;
case 2: /* lduh */
case 6: /* sth */
case 0xa: /* ldsh */
sz = 2; break;
case 3: /* ldd */
case 7: /* std */
case 0xb: /* ldx */
case 0xe: /* stx */
sz = 8; break;
}
}
(void) flush_user_windows_to_stack(NULL);
else
return (SIMU_FAULT);
/* check immediate bit and use immediate field or reg (rs2) */
if (immflg) {
int imm;
imm >>= 19;
} else {
return (SIMU_FAULT);
}
/*
* If this is a 32-bit program, chop the address accordingly. The
* intermediate uintptr_t casts prevent warnings under a certain
* compiler, and the temporary 32 bit storage is intended to force
* proper code generation and break up what would otherwise be a
* quadruple cast.
*/
}
}
/*
* Return the size of a load or store instruction (1, 2, 4, 8, 16, 64).
* Also compute the precise address by instruction disassembly.
* (v9 page faults only provide the page address via the hardware.)
* Return 0 on failure (not a load or store instruction).
*/
int
{
int sz = 0;
int immflg;
int floatflg;
return (4);
}
/*
* Fetch the instruction from user-level.
* We would like to assert this:
* ASSERT(USERMODE(rp->r_tstate));
* but we can't because we can reach this point from a
* traps call trap() with T_USER even though r_tstate
* indicates a system trap, not a user trap.
*/
/* if not load or store do nothing. can't happen? */
return (0);
if (immflg)
else
if (floatflg) {
} else {
switch (op3 & 3) {
case 0:
break;
case 1:
if (rd == 0)
else
break;
case 2:
if (op3 == 0x3e)
else
break;
case 3:
break;
}
}
} else {
case 0: /* lduw */
case 4: /* stw */
case 8: /* ldsw */
case 0xf: /* swap */
sz = 4; break;
case 1: /* ldub */
case 5: /* stb */
case 9: /* ldsb */
case 0xd: /* ldstub */
sz = 1; break;
case 2: /* lduh */
case 6: /* sth */
case 0xa: /* ldsh */
sz = 2; break;
case 3: /* ldd */
case 7: /* std */
case 0xb: /* ldx */
case 0xe: /* stx */
sz = 8; break;
}
}
if (sz == 0) /* can't happen? */
return (0);
(void) flush_user_windows_to_stack(NULL);
return (0);
/* check immediate bit and use immediate field or reg (rs2) */
if (immflg) {
int imm;
imm >>= 19;
} else {
/*
* asi's in the 0xCx range are partial store
* instructions. For these, rs2 is a mask, not part of
* the address.
*/
return (0);
}
}
}
/*
* If this is a 32-bit program, chop the address accordingly. The
* intermediate uintptr_t casts prevent warnings under a certain
* compiler, and the temporary 32 bit storage is intended to force
* proper code generation and break up what would otherwise be a
* quadruple cast.
*/
}
return (sz);
}
/*
* Fetch an instruction from user-level.
* Deal with watchpoints, if they are in effect.
*/
{
/*
* If this is a 32-bit program, chop the address accordingly. The
* intermediate uintptr_t casts prevent warnings under a certain
* compiler, and the temporary 32 bit storage is intended to force
* proper code generation and break up what would otherwise be a
* quadruple cast.
*/
if (p->p_model == DATAMODEL_ILP32) {
}
instr = -1;
return (instr);
}