translate.c revision cbe8bbf4742531f9ff5382113e546df70173c34d
/*
* i386 translation
*
* Copyright (c) 2003 Fabrice Bellard
*
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* Sun LGPL Disclaimer: For the avoidance of doubt, except that if any license choice
* other than GPL or LGPL is available it will apply instead, Sun elects to use only
* the Lesser General Public License version 2.1 (LGPLv2) at this time for any software where
* a choice of LGPL license versions is made available with the language indicating
* that LGPLv2 or any later version may be used, or where a choice of which version
* of the LGPL is applied is otherwise unspecified.
*/
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#ifndef VBOX
#include <inttypes.h>
#include <signal.h>
#include <assert.h>
#endif /* !VBOX */
#include "cpu.h"
#include "exec-all.h"
#include "disas.h"
#include "helper.h"
#include "tcg-op.h"
#define PREFIX_REPZ 0x01
#define PREFIX_REPNZ 0x02
#define PREFIX_LOCK 0x04
#define PREFIX_DATA 0x08
#define PREFIX_ADR 0x10
#ifdef TARGET_X86_64
#define X86_64_ONLY(x) x
#ifndef VBOX
#define X86_64_DEF(x...) x
#else
#define X86_64_DEF(x...) x
#endif
#if 1
#endif
#else
#define X86_64_ONLY(x) NULL
#ifndef VBOX
#define X86_64_DEF(x...)
#else
#define X86_64_DEF(x)
#endif
#define CODE64(s) 0
#define REX_X(s) 0
#define REX_B(s) 0
#endif
//#define MACRO_TEST 1
/* global register indexes */
/* local temps */
/* local register indexes (only used inside old micro ops) */
#include "gen-icount.h"
#ifdef TARGET_X86_64
static int x86_64_hregs;
#endif
#ifdef VBOX
{
uint8_t b;
return b;
}
#define ldub_code(a) ldub_code_raw(a)
{
}
#define lduw_code(a) lduw_code_raw(a)
{
}
#define ldl_code(a) ldl_code_raw(a)
#endif /* VBOX */
typedef struct DisasContext {
/* current insn context */
int override; /* -1 if no override */
int prefix;
int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
static state change (stop translation) */
/* current block context */
int pe; /* protected mode */
int code32; /* 32 bit code segment */
#ifdef TARGET_X86_64
int lma; /* long mode active */
int code64; /* 64 bit code segment */
#endif
int ss32; /* 32 bit stack segment */
int cc_op; /* current CC operation */
int f_st; /* currently unused */
int vm86; /* vm86 mode */
#ifdef VBOX
int vme; /* CR4.VME */
int pvi; /* CR4.PVI */
int record_call; /* record calls for CSAM or not? */
#endif
int cpl;
int iopl;
int tf; /* TF cpu flag */
int singlestep_enabled; /* "hardware" single step enabled */
int jmp_opt; /* use direct block chaining for direct jumps */
int mem_index; /* select memory access functions */
struct TranslationBlock *tb;
int popl_esp_hack; /* for correct popl with esp base handling */
int rip_offset; /* only used in x86_64, but left for simplicity */
int cpuid_features;
int cpuid_ext_features;
int cpuid_ext2_features;
int cpuid_ext3_features;
} DisasContext;
static void gen_eob(DisasContext *s);
#ifdef VBOX
static void gen_check_external_event();
#endif
enum {
};
/* i386 shift ops */
enum {
OP_SHL1, /* undocumented */
OP_SAR = 7,
};
enum {
};
/* operand size */
enum {
OT_BYTE = 0,
};
enum {
/* I386 int registers */
OR_EAX, /* MUST be even numbered */
OR_A0, /* temporary register used when doing address evaluation */
};
#ifndef VBOX
static inline void gen_op_movl_T0_0(void)
#else /* VBOX */
DECLINLINE(void) gen_op_movl_T0_0(void)
#endif /* VBOX */
{
tcg_gen_movi_tl(cpu_T[0], 0);
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifdef TARGET_X86_64
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#endif
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
static inline void gen_op_andl_T0_ffff(void)
#else /* VBOX */
DECLINLINE(void) gen_op_andl_T0_ffff(void)
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
static inline void gen_op_movl_T0_T1(void)
#else /* VBOX */
DECLINLINE(void) gen_op_movl_T0_T1(void)
#endif /* VBOX */
{
}
#ifndef VBOX
static inline void gen_op_andl_A0_ffff(void)
#else /* VBOX */
DECLINLINE(void) gen_op_andl_A0_ffff(void)
#endif /* VBOX */
{
}
#ifdef TARGET_X86_64
#define NB_OP_SIZES 4
#else /* !TARGET_X86_64 */
#define NB_OP_SIZES 3
#endif /* !TARGET_X86_64 */
#if defined(WORDS_BIGENDIAN)
#else
#define REG_B_OFFSET 0
#define REG_H_OFFSET 1
#define REG_W_OFFSET 0
#define REG_L_OFFSET 0
#define REG_LH_OFFSET 4
#endif
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
switch(ot) {
case OT_BYTE:
} else {
}
break;
case OT_WORD:
break;
#ifdef TARGET_X86_64
case OT_LONG:
/* high part of register set to zero */
tcg_gen_movi_tl(cpu_tmp0, 0);
break;
default:
case OT_QUAD:
break;
#else
default:
case OT_LONG:
break;
#endif
}
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
switch(size) {
case 0:
break;
#ifdef TARGET_X86_64
case 1:
/* high part of register set to zero */
tcg_gen_movi_tl(cpu_tmp0, 0);
break;
default:
case 2:
break;
#else
default:
case 1:
break;
#endif
}
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
switch(ot) {
case OT_BYTE:
#ifndef VBOX
goto std_case;
#else
#endif
} else {
}
break;
default:
#ifndef VBOX
#endif
break;
}
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
static inline void gen_op_movl_A0_reg(int reg)
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
#ifdef TARGET_X86_64
#endif
}
#ifdef TARGET_X86_64
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#endif
{
#ifdef TARGET_X86_64
if (CODE64(s))
else
#endif
}
#ifndef VBOX
static inline void gen_op_addl_T0_T1(void)
#else /* VBOX */
DECLINLINE(void) gen_op_addl_T0_T1(void)
#endif /* VBOX */
{
}
#ifndef VBOX
static inline void gen_op_jmp_T0(void)
#else /* VBOX */
DECLINLINE(void) gen_op_jmp_T0(void)
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
switch(size) {
case 0:
break;
case 1:
#ifdef TARGET_X86_64
#endif
break;
#ifdef TARGET_X86_64
case 2:
break;
#endif
}
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
switch(size) {
case 0:
break;
case 1:
#ifdef TARGET_X86_64
#endif
break;
#ifdef TARGET_X86_64
case 2:
break;
#endif
}
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
if (shift != 0)
#ifdef TARGET_X86_64
#endif
}
#ifdef VBOX
{
/* It seems segments doesn't get out of sync - if they do in fact - enable below code. */
#ifdef FORCE_SEGMENT_SYNC
#if 1
/* Considering poor quality of TCG optimizer - better call directly */
#else
/* Our segments could be outdated, thus check for newselector field to see if update really needed */
int skip_label;
/* For other segments this check is waste of time, and also TCG is unable to cope with this code,
return;
if (keepA0)
{
/* we need to store old cpu_A0 */
}
skip_label = gen_new_label();
if (keepA0)
{
}
#endif /* 0 */
#endif /* FORCE_SEGMENT_SYNC */
}
#endif
#ifndef VBOX
static inline void gen_op_movl_A0_seg(int reg)
#else /* VBOX */
#endif /* VBOX */
{
#ifdef VBOX
gen_op_seg_check(reg, false);
#endif
}
#ifndef VBOX
static inline void gen_op_addl_A0_seg(int reg)
#else /* VBOX */
#endif /* VBOX */
{
#ifdef VBOX
gen_op_seg_check(reg, true);
#endif
#ifdef TARGET_X86_64
#endif
}
#ifdef TARGET_X86_64
#ifndef VBOX
static inline void gen_op_movq_A0_seg(int reg)
#else /* VBOX */
#endif /* VBOX */
{
#ifdef VBOX
gen_op_seg_check(reg, false);
#endif
}
#ifndef VBOX
static inline void gen_op_addq_A0_seg(int reg)
#else /* VBOX */
#endif /* VBOX */
{
#ifdef VBOX
gen_op_seg_check(reg, true);
#endif
}
#ifndef VBOX
static inline void gen_op_movq_A0_reg(int reg)
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
if (shift != 0)
}
#endif
#ifndef VBOX
static inline void gen_op_lds_T0_A0(int idx)
#else /* VBOX */
#endif /* VBOX */
{
switch(idx & 3) {
case 0:
break;
case 1:
break;
default:
case 2:
break;
}
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
switch(idx & 3) {
case 0:
break;
case 1:
break;
case 2:
break;
default:
case 3:
break;
}
}
/* XXX: always use ldu or lds */
#ifndef VBOX
static inline void gen_op_ld_T0_A0(int idx)
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
static inline void gen_op_ldu_T0_A0(int idx)
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
static inline void gen_op_ld_T1_A0(int idx)
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
switch(idx & 3) {
case 0:
break;
case 1:
break;
case 2:
break;
default:
case 3:
break;
}
}
#ifndef VBOX
static inline void gen_op_st_T0_A0(int idx)
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
static inline void gen_op_st_T1_A0(int idx)
#else /* VBOX */
#endif /* VBOX */
{
}
#ifdef VBOX
static void gen_check_external_event()
{
#if 1
/** @todo: once TCG codegen improves, we may want to use version
from else version */
#else
int skip_label;
skip_label = gen_new_label();
/* t0 = cpu_tmp0; */
/* Keep in sync with helper_check_external_event() */
/** @todo: predict branch as taken */
#endif
}
#if 0 /* unused code? */
static void gen_check_external_event2()
{
}
#endif
#endif
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifdef VBOX
{
gen_jmp_im(pc);
#ifdef VBOX_DUMP_STATE
#endif
}
#endif
#ifndef VBOX
static inline void gen_string_movl_A0_ESI(DisasContext *s)
#else /* VBOX */
#endif /* VBOX */
{
int override;
#ifdef TARGET_X86_64
if (s->aflag == 2) {
if (override >= 0) {
} else {
}
} else
#endif
if (s->aflag) {
/* 32 bit address */
if (override >= 0) {
} else {
}
} else {
/* 16 address, always override */
if (override < 0)
}
}
#ifndef VBOX
static inline void gen_string_movl_A0_EDI(DisasContext *s)
#else /* VBOX */
#endif /* VBOX */
{
#ifdef TARGET_X86_64
if (s->aflag == 2) {
} else
#endif
if (s->aflag) {
if (s->addseg) {
} else {
}
} else {
}
}
#ifndef VBOX
static inline void gen_op_movl_T0_Dshift(int ot)
#else /* VBOX */
#endif /* VBOX */
{
};
{
switch(ot) {
case OT_BYTE:
break;
case OT_WORD:
break;
case OT_LONG:
break;
default:
break;
}
}
{
switch(ot) {
case OT_BYTE:
break;
case OT_WORD:
break;
case OT_LONG:
break;
default:
break;
}
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
static void *helper_in_func[3] = {
};
static void *helper_out_func[3] = {
};
static void *gen_check_io_func[3] = {
};
{
int state_saved;
state_saved = 0;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
state_saved = 1;
}
if(s->flags & HF_SVMI_MASK) {
if (!state_saved) {
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
state_saved = 1;
}
}
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
static inline void gen_update_cc_op(DisasContext *s)
#else /* VBOX */
#endif /* VBOX */
{
if (s->cc_op != CC_OP_DYNAMIC) {
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_DYNAMIC;
}
}
static void gen_op_update1_cc(void)
{
}
static void gen_op_update2_cc(void)
{
}
#ifndef VBOX
static inline void gen_op_cmpl_T0_T1_cc(void)
#else /* VBOX */
DECLINLINE(void) gen_op_cmpl_T0_T1_cc(void)
#endif /* VBOX */
{
}
#ifndef VBOX
static inline void gen_op_testl_T0_T1_cc(void)
#else /* VBOX */
DECLINLINE(void) gen_op_testl_T0_T1_cc(void)
#endif /* VBOX */
{
}
static void gen_op_update_neg_cc(void)
{
}
/* compute eflags.C to reg */
{
#if TCG_TARGET_REG_BITS == 32
#else
#endif
}
/* compute all eflags to cc_src */
{
#if TCG_TARGET_REG_BITS == 32
#else
#endif
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
switch(jcc_op) {
case JCC_O:
gen_compute_eflags(cpu_T[0]);
break;
case JCC_B:
break;
case JCC_Z:
gen_compute_eflags(cpu_T[0]);
break;
case JCC_BE:
break;
case JCC_S:
gen_compute_eflags(cpu_T[0]);
break;
case JCC_P:
gen_compute_eflags(cpu_T[0]);
break;
case JCC_L:
break;
default:
case JCC_LE:
break;
}
}
/* return true if setcc_slow is not needed (WARNING: must be kept in
sync with gen_jcc1) */
static int is_fast_jcc_case(DisasContext *s, int b)
{
int jcc_op;
switch(s->cc_op) {
case CC_OP_SUBB:
case CC_OP_SUBW:
case CC_OP_SUBL:
case CC_OP_SUBQ:
goto slow_jcc;
break;
/* some jumps are easy to compute */
case CC_OP_ADDB:
case CC_OP_ADDW:
case CC_OP_ADDL:
case CC_OP_ADDQ:
case CC_OP_LOGICB:
case CC_OP_LOGICW:
case CC_OP_LOGICL:
case CC_OP_LOGICQ:
case CC_OP_INCB:
case CC_OP_INCW:
case CC_OP_INCL:
case CC_OP_INCQ:
case CC_OP_DECB:
case CC_OP_DECW:
case CC_OP_DECL:
case CC_OP_DECQ:
case CC_OP_SHLB:
case CC_OP_SHLW:
case CC_OP_SHLL:
case CC_OP_SHLQ:
goto slow_jcc;
break;
default:
return 0;
}
return 1;
}
/* generate a conditional jump to label 'l1' according to jump opcode
value 'b'. In the fast case, T0 is guaranted not to be used. */
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
inv = b & 1;
switch(cc_op) {
case CC_OP_SUBB:
case CC_OP_SUBW:
case CC_OP_SUBL:
case CC_OP_SUBQ:
switch(jcc_op) {
case JCC_Z:
switch(size) {
case 0:
break;
case 1:
break;
#ifdef TARGET_X86_64
case 2:
break;
#endif
default:
t0 = cpu_cc_dst;
break;
}
break;
case JCC_S:
switch(size) {
case 0:
0, l1);
break;
case 1:
0, l1);
break;
#ifdef TARGET_X86_64
case 2:
0, l1);
break;
#endif
default:
0, l1);
break;
}
break;
case JCC_B:
goto fast_jcc_b;
case JCC_BE:
switch(size) {
case 0:
break;
case 1:
break;
#ifdef TARGET_X86_64
case 2:
break;
#endif
default:
t0 = cpu_cc_src;
break;
}
break;
case JCC_L:
goto fast_jcc_l;
case JCC_LE:
switch(size) {
case 0:
break;
case 1:
break;
#ifdef TARGET_X86_64
case 2:
break;
#endif
default:
t0 = cpu_cc_src;
break;
}
break;
default:
goto slow_jcc;
}
break;
/* some jumps are easy to compute */
case CC_OP_ADDB:
case CC_OP_ADDW:
case CC_OP_ADDL:
case CC_OP_ADDQ:
case CC_OP_ADCB:
case CC_OP_ADCW:
case CC_OP_ADCL:
case CC_OP_ADCQ:
case CC_OP_SBBB:
case CC_OP_SBBW:
case CC_OP_SBBL:
case CC_OP_SBBQ:
case CC_OP_LOGICB:
case CC_OP_LOGICW:
case CC_OP_LOGICL:
case CC_OP_LOGICQ:
case CC_OP_INCB:
case CC_OP_INCW:
case CC_OP_INCL:
case CC_OP_INCQ:
case CC_OP_DECB:
case CC_OP_DECW:
case CC_OP_DECL:
case CC_OP_DECQ:
case CC_OP_SHLB:
case CC_OP_SHLW:
case CC_OP_SHLL:
case CC_OP_SHLQ:
case CC_OP_SARB:
case CC_OP_SARW:
case CC_OP_SARL:
case CC_OP_SARQ:
switch(jcc_op) {
case JCC_Z:
goto fast_jcc_z;
case JCC_S:
goto fast_jcc_s;
default:
goto slow_jcc;
}
break;
default:
gen_setcc_slow_T0(s, jcc_op);
break;
}
}
/* XXX: does not work with gdbstub "ice" single step - not a
serious problem */
{
l1 = gen_new_label();
l2 = gen_new_label();
return l2;
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
if (use_icount)
gen_io_start();
/* Note: we must do this dummy write first to be restartable in
case of page fault. */
if (use_icount)
gen_io_end();
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
if (use_icount)
gen_io_start();
if (use_icount)
gen_io_end();
}
/* same method as Valgrind : we generate jumps to current or next
instruction */
#ifndef VBOX
{ \
int l2; \
gen_update_cc_op(s); \
/* a loop would cause two single step exceptions if ECX = 1 \
before rep string_insn */ \
if (!s->jmp_opt) \
}
#else /* VBOX */
{ \
int l2; \
gen_update_cc_op(s); \
/* a loop would cause two single step exceptions if ECX = 1 \
before rep string_insn */ \
if (!s->jmp_opt) \
}
#endif /* VBOX */
#ifndef VBOX
int nz) \
{ \
int l2; \
gen_update_cc_op(s); \
if (!s->jmp_opt) \
}
#else /* VBOX */
int nz) \
{ \
int l2;\
gen_update_cc_op(s); \
if (!s->jmp_opt) \
}
#endif /* VBOX */
static void *helper_fp_arith_ST0_FT0[8] = {
};
/* NOTE the exception in "r" op ordering */
static void *helper_fp_arith_STN_ST0[8] = {
NULL,
NULL,
};
/* if d == OR_TMP0, it means memory operand (address in A0) */
{
if (d != OR_TMP0) {
gen_op_mov_TN_reg(ot, 0, d);
} else {
}
switch(op) {
case OP_ADCL:
if (d != OR_TMP0)
gen_op_mov_reg_T0(ot, d);
else
break;
case OP_SBBL:
if (d != OR_TMP0)
gen_op_mov_reg_T0(ot, d);
else
break;
case OP_ADDL:
if (d != OR_TMP0)
gen_op_mov_reg_T0(ot, d);
else
break;
case OP_SUBL:
if (d != OR_TMP0)
gen_op_mov_reg_T0(ot, d);
else
break;
default:
case OP_ANDL:
if (d != OR_TMP0)
gen_op_mov_reg_T0(ot, d);
else
break;
case OP_ORL:
if (d != OR_TMP0)
gen_op_mov_reg_T0(ot, d);
else
break;
case OP_XORL:
if (d != OR_TMP0)
gen_op_mov_reg_T0(ot, d);
else
break;
case OP_CMPL:
break;
}
}
/* if d == OR_TMP0, it means memory operand (address in A0) */
{
if (d != OR_TMP0)
gen_op_mov_TN_reg(ot, 0, d);
else
if (c > 0) {
} else {
}
if (d != OR_TMP0)
gen_op_mov_reg_T0(ot, d);
else
}
{
int shift_label;
mask = 0x3f;
else
mask = 0x1f;
/* load */
else
if (is_right) {
if (is_arith) {
} else {
}
} else {
}
/* store */
else
/* update eflags if non zero shift */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
/* XXX: inefficient */
shift_label = gen_new_label();
if (is_right)
else
}
{
int mask;
mask = 0x3f;
else
mask = 0x1f;
/* load */
else
if (op2 != 0) {
if (is_right) {
if (is_arith) {
} else {
}
} else {
}
}
/* store */
else
/* update eflags if non zero shift */
if (op2 != 0) {
if (is_right)
else
}
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
if (arg2 >= 0)
else
}
/* XXX: add faster immediate case */
int is_right)
{
/* XXX: inefficient, but we must use local temps */
mask = 0x3f;
else
mask = 0x1f;
/* load */
} else {
}
/* Must test zero case to avoid using undefined behaviour in TCG
shifts. */
label1 = gen_new_label();
else
/* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
fix TCG definition) */
if (is_right) {
} else {
}
/* store */
} else {
}
/* update eflags */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
label2 = gen_new_label();
if (is_right) {
}
}
static void *helper_rotc[8] = {
};
/* XXX: add faster immediate = 1 case */
int is_right)
{
int label1;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
/* load */
else
/* store */
else
/* update eflags */
label1 = gen_new_label();
}
/* XXX: add faster immediate case */
int is_right)
{
mask = 0x3f;
else
mask = 0x1f;
/* load */
} else {
}
/* Must test zero case to avoid using undefined behaviour in TCG
shifts. */
label1 = gen_new_label();
/* Note: we implement the Intel behaviour for shift count > 16 */
if (is_right) {
/* only needed if count > 16, but a test would complicate */
} else {
/* XXX: not optimal */
}
} else {
if (is_right) {
} else {
}
}
/* store */
} else {
}
/* update eflags */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
label2 = gen_new_label();
if (is_right) {
} else {
}
}
{
if (s != OR_TMP1)
switch(op) {
case OP_ROL:
break;
case OP_ROR:
break;
case OP_SHL:
case OP_SHL1:
break;
case OP_SHR:
break;
case OP_SAR:
break;
case OP_RCL:
break;
case OP_RCR:
break;
}
}
{
switch(op) {
case OP_SHL:
case OP_SHL1:
break;
case OP_SHR:
break;
case OP_SAR:
break;
default:
/* currently not optimized */
break;
}
}
{
int havesib;
int base;
int index;
int scale;
int opreg;
must_add_seg = s->addseg;
if (override >= 0)
must_add_seg = 1;
if (s->aflag) {
havesib = 0;
index = 0;
scale = 0;
if (base == 4) {
havesib = 1;
}
switch (mod) {
case 0:
base = -1;
s->pc += 4;
}
} else {
disp = 0;
}
break;
case 1:
break;
default:
case 2:
#ifdef VBOX
#else
#endif
s->pc += 4;
break;
}
if (base >= 0) {
/* for correct popl handling with esp */
disp += s->popl_esp_hack;
#ifdef TARGET_X86_64
if (s->aflag == 2) {
if (disp != 0) {
}
} else
#endif
{
if (disp != 0)
}
} else {
#ifdef TARGET_X86_64
if (s->aflag == 2) {
} else
#endif
{
}
}
/* XXX: index == 4 is always invalid */
#ifdef TARGET_X86_64
if (s->aflag == 2) {
} else
#endif
{
}
}
if (must_add_seg) {
if (override < 0) {
else
}
#ifdef TARGET_X86_64
if (s->aflag == 2) {
} else
#endif
{
}
}
} else {
switch (mod) {
case 0:
if (rm == 6) {
s->pc += 2;
rm = 0; /* avoid SS override */
goto no_rm;
} else {
disp = 0;
}
break;
case 1:
break;
default:
case 2:
s->pc += 2;
break;
}
switch(rm) {
case 0:
break;
case 1:
break;
case 2:
break;
case 3:
break;
case 4:
break;
case 5:
break;
case 6:
break;
default:
case 7:
break;
}
if (disp != 0)
if (must_add_seg) {
if (override < 0) {
else
}
}
}
disp = 0;
*offset_ptr = disp;
}
{
if (mod == 3)
return;
if (s->aflag) {
if (base == 4) {
}
switch (mod) {
case 0:
if (base == 5) {
s->pc += 4;
}
break;
case 1:
s->pc++;
break;
default:
case 2:
s->pc += 4;
break;
}
} else {
switch (mod) {
case 0:
if (rm == 6) {
s->pc += 2;
}
break;
case 1:
s->pc++;
break;
default:
case 2:
s->pc += 2;
break;
}
}
}
/* used for LEA and MOV AX, mem */
static void gen_add_A0_ds_seg(DisasContext *s)
{
int override, must_add_seg;
must_add_seg = s->addseg;
if (s->override >= 0) {
must_add_seg = 1;
} else {
}
if (must_add_seg) {
#ifdef TARGET_X86_64
if (CODE64(s)) {
} else
#endif
{
}
}
}
/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
OR_TMP0 */
{
if (mod == 3) {
if (is_store) {
} else {
}
} else {
if (is_store) {
} else {
}
}
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
switch(ot) {
case OT_BYTE:
s->pc++;
break;
case OT_WORD:
s->pc += 2;
break;
default:
case OT_LONG:
s->pc += 4;
break;
}
return ret;
}
#ifndef VBOX
static inline int insn_const_size(unsigned int ot)
#else /* VBOX */
#endif /* VBOX */
{
return 1 << ot;
else
return 4;
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
/* NOTE: we handle the case where the TB spans two pages here */
#ifdef VBOX
#endif /* VBOX */
/* jump to same page: we can use a direct jump */
} else {
/* jump to another page: currently not optimized */
gen_eob(s);
}
}
#ifndef VBOX
static inline void gen_jcc(DisasContext *s, int b,
#else /* VBOX */
#endif /* VBOX */
{
if (s->cc_op != CC_OP_DYNAMIC) {
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_DYNAMIC;
}
if (s->jmp_opt) {
l1 = gen_new_label();
gen_goto_tb(s, 0, next_eip);
s->is_jmp = 3;
} else {
l1 = gen_new_label();
l2 = gen_new_label();
tcg_gen_br(l2);
gen_eob(s);
}
}
static void gen_setcc(DisasContext *s, int b)
{
if (is_fast_jcc_case(s, b)) {
/* nominal case: we use a jump */
/* XXX: make it faster by adding new instructions in TCG */
tcg_gen_movi_tl(t0, 0);
l1 = gen_new_label();
} else {
/* slow case: it is more efficient not to generate a jump,
although it is questionnable whether this optimization is
worth to */
inv = b & 1;
gen_setcc_slow_T0(s, jcc_op);
if (inv) {
}
}
}
#ifndef VBOX
static inline void gen_op_movl_T0_seg(int seg_reg)
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
static inline void gen_op_movl_seg_T0_vm(int seg_reg)
#else /* VBOX */
#endif /* VBOX */
{
#ifdef VBOX
flags |= DESC_CS_MASK;
/* Set the limit to 0xffff. */
gen_op_movl_T0_im(0xffff);
#endif
}
/* move T0 to seg_reg and compute if the CPU state may change. Never
call this function with seg_reg == R_CS */
{
/* XXX: optimize by finding processor state dynamically */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
/* abort translation because the addseg value may change or
because ss32 may change. For R_SS, translation must always
stop as a special handling must be done to disable hardware
interrupts for the next instruction */
s->is_jmp = 3;
} else {
s->is_jmp = 3;
}
}
#ifndef VBOX
static inline int svm_is_rep(int prefixes)
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
static inline void
#else /* VBOX */
DECLINLINE(void)
#endif /* VBOX */
{
/* no SVM activated; fast case */
return;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
}
#ifndef VBOX
static inline void
#else /* VBOX */
DECLINLINE(void)
#endif
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
#ifdef TARGET_X86_64
if (CODE64(s)) {
} else
#endif
if (s->ss32) {
} else {
}
}
/* generate a push. It depends on ss32, addseg and dflag */
static void gen_push_T0(DisasContext *s)
{
#ifdef TARGET_X86_64
if (CODE64(s)) {
if (s->dflag) {
gen_op_addq_A0_im(-8);
} else {
gen_op_addq_A0_im(-2);
}
} else
#endif
{
if (!s->dflag)
gen_op_addl_A0_im(-2);
else
gen_op_addl_A0_im(-4);
if (s->ss32) {
if (s->addseg) {
}
} else {
}
else
}
}
/* generate a push. It depends on ss32, addseg and dflag */
/* slower version for T1, only used for call Ev */
static void gen_push_T1(DisasContext *s)
{
#ifdef TARGET_X86_64
if (CODE64(s)) {
if (s->dflag) {
gen_op_addq_A0_im(-8);
} else {
gen_op_addq_A0_im(-2);
}
} else
#endif
{
if (!s->dflag)
gen_op_addl_A0_im(-2);
else
gen_op_addl_A0_im(-4);
if (s->ss32) {
if (s->addseg) {
}
} else {
}
else
}
}
/* two step pop is necessary for precise exceptions */
static void gen_pop_T0(DisasContext *s)
{
#ifdef TARGET_X86_64
if (CODE64(s)) {
} else
#endif
{
if (s->ss32) {
if (s->addseg)
} else {
}
}
}
static void gen_pop_update(DisasContext *s)
{
#ifdef TARGET_X86_64
gen_stack_update(s, 8);
} else
#endif
{
}
}
static void gen_stack_A0(DisasContext *s)
{
if (!s->ss32)
if (s->addseg)
}
/* NOTE: wrap around in 16 bit not fully handled */
static void gen_pusha(DisasContext *s)
{
int i;
if (!s->ss32)
if (s->addseg)
for(i = 0;i < 8; i++) {
}
}
/* NOTE: wrap around in 16 bit not fully handled */
static void gen_popa(DisasContext *s)
{
int i;
if (!s->ss32)
if (s->addseg)
for(i = 0;i < 8; i++) {
/* ESP is not reloaded */
if (i != 3) {
}
}
}
{
level &= 0x1f;
#ifdef TARGET_X86_64
if (CODE64(s)) {
/* push bp */
if (level) {
/* XXX: must save state */
cpu_T[1]);
}
} else
#endif
{
if (!s->ss32)
if (s->addseg)
/* push bp */
if (level) {
/* XXX: must save state */
tcg_const_i32(s->dflag),
cpu_T[1]);
}
}
}
{
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
s->is_jmp = 3;
}
/* an interrupt is different from an exception because of the
privilege checks */
{
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
s->is_jmp = 3;
}
{
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
s->is_jmp = 3;
}
/* generate a generic end of block. Trace exception is also generated
if needed */
static void gen_eob(DisasContext *s)
{
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
}
#ifdef VBOX
#endif /* VBOX */
if (s->singlestep_enabled) {
} else if (s->tf) {
} else {
tcg_gen_exit_tb(0);
}
s->is_jmp = 3;
}
/* generate a jump to eip. No segment change must happen before as a
direct call to the next block may occur */
{
if (s->jmp_opt) {
if (s->cc_op != CC_OP_DYNAMIC) {
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_DYNAMIC;
}
s->is_jmp = 3;
} else {
gen_eob(s);
}
}
{
gen_jmp_tb(s, eip, 0);
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
{
}
#ifndef VBOX
static inline void gen_op_movq_env_0(int d_offset)
#else /* VBOX */
#endif /* VBOX */
{
}
#define SSE_SPECIAL ((void *)1)
#define SSE_DUMMY ((void *)2)
/* 3DNow! extensions */
/* pure SSE operations */
[0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
[0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
[0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
[0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
/* MMX ops and their SSE extensions */
[0x70] = { helper_pshufw_mmx,
};
};
};
};
static void *sse_op_table5[256] = {
[0x0c] = helper_pi2fw,
[0x0d] = helper_pi2fd,
[0x1c] = helper_pf2iw,
[0x1d] = helper_pf2id,
[0x8a] = helper_pfnacc,
[0x8e] = helper_pfpnacc,
[0x90] = helper_pfcmpge,
[0x94] = helper_pfmin,
[0x96] = helper_pfrcp,
[0x97] = helper_pfrsqrt,
[0x9a] = helper_pfsub,
[0x9e] = helper_pfadd,
[0xa0] = helper_pfcmpgt,
[0xa4] = helper_pfmax,
[0xaa] = helper_pfsubr,
[0xae] = helper_pfacc,
[0xb0] = helper_pfcmpeq,
[0xb4] = helper_pfmul,
[0xb7] = helper_pmulhrw_mmx,
[0xbb] = helper_pswapd,
};
struct sse_op_helper_s {
};
};
};
{
void *sse_op2;
b &= 0xff;
if (s->prefix & PREFIX_DATA)
b1 = 1;
else if (s->prefix & PREFIX_REPZ)
b1 = 2;
else if (s->prefix & PREFIX_REPNZ)
b1 = 3;
else
b1 = 0;
if (!sse_op2)
goto illegal_op;
if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
is_xmm = 1;
} else {
if (b1 == 0) {
/* MMX case */
is_xmm = 0;
} else {
is_xmm = 1;
}
}
if (s->flags & HF_TS_MASK) {
return;
}
if (s->flags & HF_EM_MASK) {
return;
}
goto illegal_op;
if (b == 0x0e) {
if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
goto illegal_op;
/* femms */
return;
}
if (b == 0x77) {
/* emms */
return;
}
/* prepare MMX state (XXX: optimize by storing fptt and fptags in
the static cpu state) */
if (!is_xmm) {
}
if (is_xmm)
if (sse_op2 == SSE_SPECIAL) {
b |= (b1 << 8);
switch(b) {
case 0x0e7: /* movntq */
if (mod == 3)
goto illegal_op;
break;
case 0x1e7: /* movntdq */
case 0x02b: /* movntps */
case 0x12b: /* movntps */
if (mod == 3)
goto illegal_op;
break;
case 0x3f0: /* lddqu */
if (mod == 3)
goto illegal_op;
break;
case 0x6e: /* movd mm, ea */
#ifdef TARGET_X86_64
if (s->dflag == 2) {
} else
#endif
{
}
break;
case 0x16e: /* movd xmm, ea */
#ifdef TARGET_X86_64
if (s->dflag == 2) {
} else
#endif
{
}
break;
case 0x6f: /* movq mm, ea */
if (mod != 3) {
} else {
}
break;
case 0x010: /* movups */
case 0x110: /* movupd */
case 0x028: /* movaps */
case 0x128: /* movapd */
case 0x16f: /* movdqa xmm, ea */
case 0x26f: /* movdqu xmm, ea */
if (mod != 3) {
} else {
}
break;
case 0x210: /* movss xmm, ea */
if (mod != 3) {
} else {
}
break;
case 0x310: /* movsd xmm, ea */
if (mod != 3) {
} else {
}
break;
case 0x012: /* movlps */
case 0x112: /* movlpd */
if (mod != 3) {
} else {
/* movhlps */
}
break;
case 0x212: /* movsldup */
if (mod != 3) {
} else {
}
break;
case 0x312: /* movddup */
if (mod != 3) {
} else {
}
break;
case 0x016: /* movhps */
case 0x116: /* movhpd */
if (mod != 3) {
} else {
/* movlhps */
}
break;
case 0x216: /* movshdup */
if (mod != 3) {
} else {
}
break;
case 0x7e: /* movd ea, mm */
#ifdef TARGET_X86_64
if (s->dflag == 2) {
} else
#endif
{
}
break;
case 0x17e: /* movd ea, xmm */
#ifdef TARGET_X86_64
if (s->dflag == 2) {
} else
#endif
{
}
break;
case 0x27e: /* movq xmm, ea */
if (mod != 3) {
} else {
}
break;
case 0x7f: /* movq ea, mm */
if (mod != 3) {
} else {
}
break;
case 0x011: /* movups */
case 0x111: /* movupd */
case 0x029: /* movaps */
case 0x129: /* movapd */
case 0x17f: /* movdqa ea, xmm */
case 0x27f: /* movdqu ea, xmm */
if (mod != 3) {
} else {
}
break;
case 0x211: /* movss ea, xmm */
if (mod != 3) {
} else {
}
break;
case 0x311: /* movsd ea, xmm */
if (mod != 3) {
} else {
}
break;
case 0x013: /* movlps */
case 0x113: /* movlpd */
if (mod != 3) {
} else {
goto illegal_op;
}
break;
case 0x017: /* movhps */
case 0x117: /* movhpd */
if (mod != 3) {
} else {
goto illegal_op;
}
break;
case 0x71: /* shift mm, im */
case 0x72:
case 0x73:
case 0x171: /* shift xmm, im */
case 0x172:
case 0x173:
if (is_xmm) {
} else {
}
if (!sse_op2)
goto illegal_op;
if (is_xmm) {
} else {
}
break;
case 0x050: /* movmskps */
break;
case 0x150: /* movmskpd */
break;
case 0x02a: /* cvtpi2ps */
case 0x12a: /* cvtpi2pd */
if (mod != 3) {
} else {
}
switch(b >> 8) {
case 0x0:
break;
default:
case 0x1:
break;
}
break;
case 0x22a: /* cvtsi2ss */
case 0x32a: /* cvtsi2sd */
} else {
}
break;
case 0x02c: /* cvttps2pi */
case 0x12c: /* cvttpd2pi */
case 0x02d: /* cvtps2pi */
case 0x12d: /* cvtpd2pi */
if (mod != 3) {
} else {
}
switch(b) {
case 0x02c:
break;
case 0x12c:
break;
case 0x02d:
break;
case 0x12d:
break;
}
break;
case 0x22c: /* cvttss2si */
case 0x32c: /* cvttsd2si */
case 0x22d: /* cvtss2si */
case 0x32d: /* cvtsd2si */
if (mod != 3) {
if ((b >> 8) & 1) {
} else {
}
} else {
}
(b & 1) * 4];
} else {
}
break;
case 0xc4: /* pinsrw */
case 0x1c4:
s->rip_offset = 1;
if (b1) {
val &= 7;
} else {
val &= 3;
}
break;
case 0xc5: /* pextrw */
case 0x1c5:
if (mod != 3)
goto illegal_op;
if (b1) {
val &= 7;
} else {
val &= 3;
}
break;
case 0x1d6: /* movq ea, xmm */
if (mod != 3) {
} else {
}
break;
case 0x2d6: /* movq2dq */
break;
case 0x3d6: /* movdq2q */
break;
case 0xd7: /* pmovmskb */
case 0x1d7:
if (mod != 3)
goto illegal_op;
if (b1) {
} else {
}
break;
case 0x138:
if (s->prefix & PREFIX_REPNZ)
goto crc32;
case 0x038:
b = modrm;
if (!sse_op2)
goto illegal_op;
goto illegal_op;
if (b1) {
if (mod == 3) {
} else {
switch (b) {
case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
break;
case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
break;
case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
break;
case 0x2a: /* movntqda */
return;
default:
}
}
} else {
if (mod == 3) {
} else {
}
}
if (sse_op2 == SSE_SPECIAL)
goto illegal_op;
if (b == 0x17)
s->cc_op = CC_OP_EFLAGS;
break;
case 0x338: /* crc32 */
b = modrm;
if (b != 0xf0 && b != 0xf1)
goto illegal_op;
if (!(s->cpuid_ext_features & CPUID_EXT_SSE42))
goto illegal_op;
if (b == 0xf0)
if (s->prefix & PREFIX_DATA)
else
else
break;
case 0x03a:
case 0x13a:
b = modrm;
if (!sse_op2)
goto illegal_op;
goto illegal_op;
if (sse_op2 == SSE_SPECIAL) {
if (mod != 3)
switch (b) {
case 0x14: /* pextrb */
if (mod == 3)
else
break;
case 0x15: /* pextrw */
if (mod == 3)
else
break;
case 0x16:
if (mod == 3)
else
} else { /* pextrq */
if (mod == 3)
else
}
break;
case 0x17: /* extractps */
if (mod == 3)
else
break;
case 0x20: /* pinsrb */
if (mod == 3)
else
break;
case 0x21: /* insertps */
if (mod == 3)
else
if ((val >> 0) & 1)
break;
case 0x22:
if (mod == 3)
else
} else { /* pinsrq */
if (mod == 3)
else
}
break;
}
return;
}
if (b1) {
if (mod == 3) {
} else {
}
} else {
if (mod == 3) {
} else {
}
}
if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
s->cc_op = CC_OP_EFLAGS;
if (s->dflag == 2)
/* The helper must use entire 64-bit gp registers */
}
break;
default:
goto illegal_op;
}
} else {
/* generic MMX or SSE operation */
switch(b) {
case 0x70: /* pshufx insn */
case 0xc6: /* pshufx insn */
case 0xc2: /* compare insns */
s->rip_offset = 1;
break;
default:
break;
}
if (is_xmm) {
if (mod != 3) {
b == 0xc2)) {
/* specific case for SSE single instructions */
if (b1 == 2) {
/* 32 bit access */
} else {
/* 64 bit access */
}
} else {
}
} else {
}
} else {
if (mod != 3) {
} else {
}
}
switch(b) {
case 0x0f: /* 3DNow! data insns */
if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
goto illegal_op;
if (!sse_op2)
goto illegal_op;
break;
case 0x70: /* pshufx insn */
case 0xc6: /* pshufx insn */
break;
case 0xc2:
/* compare insns */
if (val >= 8)
goto illegal_op;
break;
case 0xf7:
/* maskmov : we must prepare A0 */
if (mod != 3)
goto illegal_op;
#ifdef TARGET_X86_64
if (s->aflag == 2) {
} else
#endif
{
if (s->aflag == 0)
}
break;
default:
break;
}
if (b == 0x2e || b == 0x2f) {
s->cc_op = CC_OP_EFLAGS;
}
}
}
#ifdef VBOX
/* Checks if it's an invalid lock sequence. Only a few instructions
can be used together with the lock prefix and of those only the
form that write a memory operand. So, this is kind of annoying
work to do...
The AMD manual lists the following instructions.
ADC
ADD
AND
BTC
BTR
BTS
CMPXCHG
CMPXCHG8B
CMPXCHG16B
DEC
INC
NEG
NOT
OR
SBB
SUB
XADD
XCHG
XOR */
{
/* X={8,16,32,64} Y={16,32,64} */
switch (b)
{
case 0x80:
case 0x81:
case 0x83:
break;
break;
return false;
break;
return false;
case 0xfe:
case 0xff:
break;
return false;
case 0xf6:
case 0xf7:
break;
return false;
case 0x0f:
switch (b)
{
case 0xba:
if (op < 5)
break;
break;
return false;
break;
return false;
/* /1: CMPXCHG8B mem64 or CMPXCHG16B mem128 */
case 0xc7:
if (op != 1)
break;
return false;
}
break;
}
/* illegal sequence. The s->pc is past the lock prefix and that
is sufficient for the TB, I think. */
return true;
}
#endif /* VBOX */
/* convert one instruction. s->is_jmp is set if the translation must
be stopped. Return the next pc value */
{
prefixes = 0;
s->override = -1;
rex_w = -1;
rex_r = 0;
#ifdef TARGET_X86_64
s->rex_x = 0;
s->rex_b = 0;
x86_64_hregs = 0;
#endif
s->rip_offset = 0; /* for relative ip address */
#ifdef VBOX
/* nike: seems only slow down things */
# if 0
/* Always update EIP. Otherwise one must be very careful with generated code that can raise exceptions. */
# endif
#endif
s->pc++;
/* check prefixes */
#ifdef TARGET_X86_64
if (CODE64(s)) {
switch (b) {
case 0xf3:
prefixes |= PREFIX_REPZ;
goto next_byte;
case 0xf2:
prefixes |= PREFIX_REPNZ;
goto next_byte;
case 0xf0:
prefixes |= PREFIX_LOCK;
goto next_byte;
case 0x2e:
goto next_byte;
case 0x36:
goto next_byte;
case 0x3e:
goto next_byte;
case 0x26:
goto next_byte;
case 0x64:
goto next_byte;
case 0x65:
goto next_byte;
case 0x66:
prefixes |= PREFIX_DATA;
goto next_byte;
case 0x67:
prefixes |= PREFIX_ADR;
goto next_byte;
case 0x40 ... 0x4f:
/* REX prefix */
goto next_byte;
}
if (rex_w == 1) {
/* 0x66 is ignored if rex.w is set */
dflag = 2;
} else {
if (prefixes & PREFIX_DATA)
dflag ^= 1;
}
if (!(prefixes & PREFIX_ADR))
aflag = 2;
} else
#endif
{
switch (b) {
case 0xf3:
prefixes |= PREFIX_REPZ;
goto next_byte;
case 0xf2:
prefixes |= PREFIX_REPNZ;
goto next_byte;
case 0xf0:
prefixes |= PREFIX_LOCK;
goto next_byte;
case 0x2e:
goto next_byte;
case 0x36:
goto next_byte;
case 0x3e:
goto next_byte;
case 0x26:
goto next_byte;
case 0x64:
goto next_byte;
case 0x65:
goto next_byte;
case 0x66:
prefixes |= PREFIX_DATA;
goto next_byte;
case 0x67:
prefixes |= PREFIX_ADR;
goto next_byte;
}
if (prefixes & PREFIX_DATA)
dflag ^= 1;
if (prefixes & PREFIX_ADR)
aflag ^= 1;
}
/* lock generation */
#ifndef VBOX
if (prefixes & PREFIX_LOCK)
#else /* VBOX */
if (prefixes & PREFIX_LOCK) {
if (is_invalid_lock_sequence(s, pc_start, b)) {
return s->pc;
}
}
#endif /* VBOX */
/* now check op code */
switch(b) {
case 0x0f:
/**************************/
/* extended op code */
goto reswitch;
/**************************/
/* arith & logic */
case 0x00 ... 0x05:
case 0x08 ... 0x0d:
case 0x10 ... 0x15:
case 0x18 ... 0x1d:
case 0x20 ... 0x25:
case 0x28 ... 0x2d:
case 0x30 ... 0x35:
case 0x38 ... 0x3d:
{
f = (b >> 1) & 3;
if ((b & 1) == 0)
else
switch(f) {
case 0: /* OP Ev, Gv */
if (mod != 3) {
/* xor reg, reg optimisation */
break;
} else {
}
break;
case 1: /* OP Gv, Ev */
if (mod != 3) {
goto xor_zero;
} else {
}
break;
case 2: /* OP A, Iv */
break;
}
}
break;
case 0x82:
if (CODE64(s))
goto illegal_op;
case 0x80: /* GRP1 */
case 0x81:
case 0x83:
{
int val;
if ((b & 1) == 0)
else
if (mod != 3) {
if (b == 0x83)
s->rip_offset = 1;
else
} else {
}
switch(b) {
default:
case 0x80:
case 0x81:
case 0x82:
break;
case 0x83:
break;
}
}
break;
/**************************/
/* inc, dec, and other misc arith */
case 0x40 ... 0x47: /* inc Gv */
break;
case 0x48 ... 0x4f: /* dec Gv */
break;
case 0xf6: /* GRP3 */
case 0xf7:
if ((b & 1) == 0)
else
if (mod != 3) {
if (op == 0)
} else {
}
switch(op) {
case 0: /* test */
break;
case 2: /* not */
if (mod != 3) {
} else {
}
break;
case 3: /* neg */
if (mod != 3) {
} else {
}
break;
case 4: /* mul */
switch(ot) {
case OT_BYTE:
/* XXX: use 32 bit mul which could be faster */
s->cc_op = CC_OP_MULB;
break;
case OT_WORD:
/* XXX: use 32 bit mul which could be faster */
s->cc_op = CC_OP_MULW;
break;
default:
case OT_LONG:
#ifdef TARGET_X86_64
#else
{
}
#endif
s->cc_op = CC_OP_MULL;
break;
#ifdef TARGET_X86_64
case OT_QUAD:
s->cc_op = CC_OP_MULQ;
break;
#endif
}
break;
case 5: /* imul */
switch(ot) {
case OT_BYTE:
/* XXX: use 32 bit mul which could be faster */
s->cc_op = CC_OP_MULB;
break;
case OT_WORD:
/* XXX: use 32 bit mul which could be faster */
s->cc_op = CC_OP_MULW;
break;
default:
case OT_LONG:
#ifdef TARGET_X86_64
#else
{
}
#endif
s->cc_op = CC_OP_MULL;
break;
#ifdef TARGET_X86_64
case OT_QUAD:
s->cc_op = CC_OP_MULQ;
break;
#endif
}
break;
case 6: /* div */
switch(ot) {
case OT_BYTE:
break;
case OT_WORD:
break;
default:
case OT_LONG:
break;
#ifdef TARGET_X86_64
case OT_QUAD:
break;
#endif
}
break;
case 7: /* idiv */
switch(ot) {
case OT_BYTE:
break;
case OT_WORD:
break;
default:
case OT_LONG:
break;
#ifdef TARGET_X86_64
case OT_QUAD:
break;
#endif
}
break;
default:
goto illegal_op;
}
break;
case 0xfe: /* GRP4 */
case 0xff: /* GRP5 */
if ((b & 1) == 0)
else
goto illegal_op;
}
if (CODE64(s)) {
/* operand size for jumps is 64 bit */
/* for call calls, the operand is 16 or 32 bit, even
in long mode */
} else if (op == 6) {
/* default push size is 64 bit */
}
}
if (mod != 3) {
} else {
}
switch(op) {
case 0: /* inc Ev */
if (mod != 3)
else
break;
case 1: /* dec Ev */
if (mod != 3)
else
break;
case 2: /* call Ev */
/* XXX: optimize if memory (no 'and' is necessary) */
#ifdef VBOX_WITH_CALL_RECORD
if (s->record_call)
#endif
if (s->dflag == 0)
gen_push_T1(s);
gen_eob(s);
break;
case 3: /* lcall Ev */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
} else {
}
gen_eob(s);
break;
case 4: /* jmp Ev */
if (s->dflag == 0)
gen_eob(s);
break;
case 5: /* ljmp Ev */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
cpu_T[1],
} else {
}
gen_eob(s);
break;
case 6: /* push Ev */
gen_push_T0(s);
break;
default:
goto illegal_op;
}
break;
case 0x84: /* test Ev, Gv */
case 0x85:
if ((b & 1) == 0)
else
break;
case 0xa8: /* test eAX, Iv */
case 0xa9:
if ((b & 1) == 0)
else
break;
#ifdef TARGET_X86_64
if (dflag == 2) {
} else
#endif
if (dflag == 1) {
} else {
}
break;
#ifdef TARGET_X86_64
if (dflag == 2) {
} else
#endif
if (dflag == 1) {
} else {
}
break;
case 0x1af: /* imul Gv, Ev */
case 0x69: /* imul Gv, Ev, I */
case 0x6b:
if (b == 0x69)
else if (b == 0x6b)
s->rip_offset = 1;
if (b == 0x69) {
} else if (b == 0x6b) {
} else {
}
#ifdef TARGET_X86_64
} else
#endif
#ifdef TARGET_X86_64
#else
{
}
#endif
} else {
/* XXX: use 32 bit mul which could be faster */
}
break;
case 0x1c0:
case 0x1c1: /* xadd Ev, Gv */
if ((b & 1) == 0)
else
if (mod == 3) {
} else {
}
break;
case 0x1b0:
case 0x1b1: /* cmpxchg Ev, Gv */
{
if ((b & 1) == 0)
else
if (mod == 3) {
} else {
rm = 0; /* avoid warning */
}
label1 = gen_new_label();
if (mod == 3) {
label2 = gen_new_label();
} else {
/* always store */
}
}
break;
case 0x1c7: /* cmpxchg8b */
goto illegal_op;
#ifdef TARGET_X86_64
if (dflag == 2) {
if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
} else
#endif
{
if (!(s->cpuid_features & CPUID_CX8))
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
}
s->cc_op = CC_OP_EFLAGS;
break;
/**************************/
case 0x50 ... 0x57: /* push */
gen_push_T0(s);
break;
case 0x58 ... 0x5f: /* pop */
if (CODE64(s)) {
} else {
}
gen_pop_T0(s);
/* NOTE: order is important for pop %sp */
gen_pop_update(s);
break;
case 0x60: /* pusha */
if (CODE64(s))
goto illegal_op;
gen_pusha(s);
break;
case 0x61: /* popa */
if (CODE64(s))
goto illegal_op;
gen_popa(s);
break;
case 0x68: /* push Iv */
case 0x6a:
if (CODE64(s)) {
} else {
}
if (b == 0x68)
else
gen_push_T0(s);
break;
case 0x8f: /* pop Ev */
if (CODE64(s)) {
} else {
}
gen_pop_T0(s);
if (mod == 3) {
/* NOTE: order is important for pop %sp */
gen_pop_update(s);
} else {
/* NOTE: order is important too for MMU exceptions */
s->popl_esp_hack = 0;
gen_pop_update(s);
}
break;
case 0xc8: /* enter */
{
int level;
s->pc += 2;
}
break;
case 0xc9: /* leave */
/* XXX: exception not precise (ESP is updated before potential exception) */
if (CODE64(s)) {
} else if (s->ss32) {
} else {
}
gen_pop_T0(s);
if (CODE64(s)) {
} else {
}
gen_pop_update(s);
break;
case 0x06: /* push es */
case 0x0e: /* push cs */
case 0x16: /* push ss */
case 0x1e: /* push ds */
if (CODE64(s))
goto illegal_op;
gen_op_movl_T0_seg(b >> 3);
gen_push_T0(s);
break;
case 0x1a0: /* push fs */
case 0x1a8: /* push gs */
gen_push_T0(s);
break;
case 0x07: /* pop es */
case 0x17: /* pop ss */
case 0x1f: /* pop ds */
if (CODE64(s))
goto illegal_op;
reg = b >> 3;
gen_pop_T0(s);
gen_pop_update(s);
/* if reg == SS, inhibit interrupts/trace. */
/* If several instructions disable interrupts, only the
_first_ does it */
s->tf = 0;
}
if (s->is_jmp) {
gen_eob(s);
}
break;
case 0x1a1: /* pop fs */
case 0x1a9: /* pop gs */
gen_pop_T0(s);
gen_pop_update(s);
if (s->is_jmp) {
gen_eob(s);
}
break;
/**************************/
/* mov */
case 0x88:
case 0x89: /* mov Gv, Ev */
if ((b & 1) == 0)
else
/* generate a generic store */
break;
case 0xc6:
case 0xc7: /* mov Ev, Iv */
if ((b & 1) == 0)
else
if (mod != 3) {
}
if (mod != 3)
else
break;
case 0x8a:
case 0x8b: /* mov Ev, Gv */
#ifdef VBOX /* dtrace hot fix */
if (prefixes & PREFIX_LOCK)
goto illegal_op;
#endif
if ((b & 1) == 0)
else
break;
case 0x8e: /* mov seg, Gv */
goto illegal_op;
/* if reg == SS, inhibit interrupts/trace */
/* If several instructions disable interrupts, only the
_first_ does it */
s->tf = 0;
}
if (s->is_jmp) {
gen_eob(s);
}
break;
case 0x8c: /* mov Gv, seg */
if (reg >= 6)
goto illegal_op;
if (mod == 3)
else
break;
case 0x1b6: /* movzbS Gv, Eb */
case 0x1b7: /* movzwS Gv, Eb */
case 0x1be: /* movsbS Gv, Eb */
case 0x1bf: /* movswS Gv, Eb */
{
int d_ot;
/* d_ot is the size of destination */
/* ot is the size of source */
if (mod == 3) {
switch(ot | (b & 8)) {
case OT_BYTE:
break;
case OT_BYTE | 8:
break;
case OT_WORD:
break;
default:
case OT_WORD | 8:
break;
}
} else {
if (b & 8) {
} else {
}
}
}
break;
case 0x8d: /* lea */
if (mod == 3)
goto illegal_op;
/* we must ensure that no segment is added */
s->override = -1;
s->addseg = 0;
break;
case 0xa0: /* mov EAX, Ov */
case 0xa1:
case 0xa2: /* mov Ov, EAX */
case 0xa3:
{
if ((b & 1) == 0)
else
#ifdef TARGET_X86_64
if (s->aflag == 2) {
s->pc += 8;
} else
#endif
{
if (s->aflag) {
} else {
}
}
if ((b & 2) == 0) {
} else {
}
}
break;
case 0xd7: /* xlat */
#ifdef TARGET_X86_64
if (s->aflag == 2) {
} else
#endif
{
if (s->aflag == 0)
else
}
break;
case 0xb0 ... 0xb7: /* mov R, Ib */
break;
case 0xb8 ... 0xbf: /* mov R, Iv */
#ifdef TARGET_X86_64
if (dflag == 2) {
/* 64 bit case */
s->pc += 8;
} else
#endif
{
}
break;
case 0x91 ... 0x97: /* xchg R, EAX */
goto do_xchg_reg;
case 0x86:
case 0x87: /* xchg Ev, Gv */
if ((b & 1) == 0)
else
if (mod == 3) {
} else {
/* for xchg, lock is implicit */
if (!(prefixes & PREFIX_LOCK))
if (!(prefixes & PREFIX_LOCK))
}
break;
case 0xc4: /* les Gv */
if (CODE64(s))
goto illegal_op;
goto do_lxx;
case 0xc5: /* lds Gv */
if (CODE64(s))
goto illegal_op;
goto do_lxx;
case 0x1b2: /* lss Gv */
goto do_lxx;
case 0x1b4: /* lfs Gv */
goto do_lxx;
case 0x1b5: /* lgs Gv */
if (mod == 3)
goto illegal_op;
/* load the segment first to handle exceptions properly */
/* then put the data */
if (s->is_jmp) {
gen_eob(s);
}
break;
/************************/
/* shifts */
case 0xc0:
case 0xc1:
/* shift Ev,Ib */
shift = 2;
grp2:
{
if ((b & 1) == 0)
else
if (mod != 3) {
if (shift == 2) {
s->rip_offset = 1;
}
} else {
}
/* simpler op */
if (shift == 0) {
} else {
if (shift == 2) {
}
}
}
break;
case 0xd0:
case 0xd1:
/* shift Ev,1 */
shift = 1;
goto grp2;
case 0xd2:
case 0xd3:
/* shift Ev,cl */
shift = 0;
goto grp2;
case 0x1a4: /* shld imm */
op = 0;
shift = 1;
goto do_shiftd;
case 0x1a5: /* shld cl */
op = 0;
shift = 0;
goto do_shiftd;
case 0x1ac: /* shrd imm */
op = 1;
shift = 1;
goto do_shiftd;
case 0x1ad: /* shrd cl */
op = 1;
shift = 0;
if (mod != 3) {
} else {
}
if (shift) {
} else {
}
break;
/************************/
/* floats */
case 0xd8 ... 0xdf:
/* if CR0.EM or CR0.TS are set, generate an FPU exception */
/* XXX: what to do if illegal op ? */
break;
}
if (mod != 3) {
/* memory op */
switch(op) {
case 0x00 ... 0x07: /* fxxxs */
case 0x10 ... 0x17: /* fixxxl */
case 0x20 ... 0x27: /* fxxxl */
case 0x30 ... 0x37: /* fixxx */
{
int op1;
switch(op >> 4) {
case 0:
break;
case 1:
break;
case 2:
break;
case 3:
default:
break;
}
if (op1 == 3) {
/* fcomp needs pop */
}
}
break;
case 0x08: /* flds */
case 0x0a: /* fsts */
case 0x0b: /* fstps */
case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
switch(op & 7) {
case 0:
switch(op >> 4) {
case 0:
break;
case 1:
break;
case 2:
break;
case 3:
default:
break;
}
break;
case 1:
/* XXX: the corresponding CPUID bit must be tested ! */
switch(op >> 4) {
case 1:
break;
case 2:
break;
case 3:
default:
break;
}
break;
default:
switch(op >> 4) {
case 0:
break;
case 1:
break;
case 2:
break;
case 3:
default:
break;
}
break;
}
break;
case 0x0c: /* fldenv mem */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
break;
case 0x0d: /* fldcw mem */
break;
case 0x0e: /* fnstenv mem */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
break;
case 0x0f: /* fnstcw mem */
break;
case 0x1d: /* fldt mem */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
break;
case 0x1f: /* fstpt mem */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
break;
case 0x2c: /* frstor mem */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
break;
case 0x2e: /* fnsave mem */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
break;
case 0x2f: /* fnstsw mem */
break;
case 0x3c: /* fbld */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
break;
case 0x3e: /* fbstp */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
break;
case 0x3d: /* fildll */
break;
case 0x3f: /* fistpll */
break;
default:
goto illegal_op;
}
} else {
/* register float ops */
switch(op) {
case 0x08: /* fld sti */
break;
case 0x09: /* fxchg sti */
case 0x29: /* fxchg4 sti, undocumented op */
case 0x39: /* fxchg7 sti, undocumented op */
break;
case 0x0a: /* grp d9/2 */
switch(rm) {
case 0: /* fnop */
/* check exceptions (FreeBSD FPU probe) */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
break;
default:
goto illegal_op;
}
break;
case 0x0c: /* grp d9/4 */
switch(rm) {
case 0: /* fchs */
break;
case 1: /* fabs */
break;
case 4: /* ftst */
break;
case 5: /* fxam */
break;
default:
goto illegal_op;
}
break;
case 0x0d: /* grp d9/5 */
{
switch(rm) {
case 0:
break;
case 1:
break;
case 2:
break;
case 3:
break;
case 4:
break;
case 5:
break;
case 6:
break;
default:
goto illegal_op;
}
}
break;
case 0x0e: /* grp d9/6 */
switch(rm) {
case 0: /* f2xm1 */
break;
case 1: /* fyl2x */
break;
case 2: /* fptan */
break;
case 3: /* fpatan */
break;
case 4: /* fxtract */
break;
case 5: /* fprem1 */
break;
case 6: /* fdecstp */
break;
default:
case 7: /* fincstp */
break;
}
break;
case 0x0f: /* grp d9/7 */
switch(rm) {
case 0: /* fprem */
break;
case 1: /* fyl2xp1 */
break;
case 2: /* fsqrt */
break;
case 3: /* fsincos */
break;
case 5: /* fscale */
break;
case 4: /* frndint */
break;
case 6: /* fsin */
break;
default:
case 7: /* fcos */
break;
}
break;
case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
{
int op1;
if (op >= 0x20) {
if (op >= 0x30)
} else {
}
}
break;
case 0x02: /* fcom */
case 0x22: /* fcom2, undocumented op */
break;
case 0x03: /* fcomp */
case 0x23: /* fcomp3, undocumented op */
case 0x32: /* fcomp5, undocumented op */
break;
case 0x15: /* da/5 */
switch(rm) {
case 1: /* fucompp */
break;
default:
goto illegal_op;
}
break;
case 0x1c:
switch(rm) {
case 0: /* feni (287 only, just do nop here) */
break;
case 1: /* fdisi (287 only, just do nop here) */
break;
case 2: /* fclex */
break;
case 3: /* fninit */
break;
case 4: /* fsetpm (287 only, just do nop here) */
break;
default:
goto illegal_op;
}
break;
case 0x1d: /* fucomi */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_EFLAGS;
break;
case 0x1e: /* fcomi */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_EFLAGS;
break;
case 0x28: /* ffree sti */
break;
case 0x2a: /* fst sti */
break;
case 0x2b: /* fstp sti */
case 0x0b: /* fstp1 sti, undocumented op */
case 0x3a: /* fstp8 sti, undocumented op */
case 0x3b: /* fstp9 sti, undocumented op */
break;
case 0x2c: /* fucom st(i) */
break;
case 0x2d: /* fucomp st(i) */
break;
case 0x33: /* de/3 */
switch(rm) {
case 1: /* fcompp */
break;
default:
goto illegal_op;
}
break;
case 0x38: /* ffreep sti, undocumented op */
break;
case 0x3c: /* df/4 */
switch(rm) {
case 0:
break;
default:
goto illegal_op;
}
break;
case 0x3d: /* fucomip */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_EFLAGS;
break;
case 0x3e: /* fcomip */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_EFLAGS;
break;
case 0x10 ... 0x13: /* fcmovxx */
case 0x18 ... 0x1b:
{
(JCC_B << 1),
(JCC_Z << 1),
(JCC_BE << 1),
(JCC_P << 1),
};
l1 = gen_new_label();
}
break;
default:
goto illegal_op;
}
}
break;
/************************/
/* string ops */
case 0xa4: /* movsS */
case 0xa5:
if ((b & 1) == 0)
else
} else {
}
break;
case 0xaa: /* stosS */
case 0xab:
if ((b & 1) == 0)
else
} else {
}
break;
case 0xac: /* lodsS */
case 0xad:
if ((b & 1) == 0)
else
} else {
}
break;
case 0xae: /* scasS */
case 0xaf:
if ((b & 1) == 0)
else
if (prefixes & PREFIX_REPNZ) {
} else if (prefixes & PREFIX_REPZ) {
} else {
}
break;
case 0xa6: /* cmpsS */
case 0xa7:
if ((b & 1) == 0)
else
if (prefixes & PREFIX_REPNZ) {
} else if (prefixes & PREFIX_REPZ) {
} else {
}
break;
case 0x6c: /* insS */
case 0x6d:
if ((b & 1) == 0)
else
} else {
if (use_icount) {
}
}
break;
case 0x6e: /* outsS */
case 0x6f:
if ((b & 1) == 0)
else
} else {
if (use_icount) {
}
}
break;
/************************/
/* port I/O */
case 0xe4:
case 0xe5:
if ((b & 1) == 0)
else
if (use_icount)
gen_io_start();
if (use_icount) {
gen_io_end();
}
break;
case 0xe6:
case 0xe7:
if ((b & 1) == 0)
else
#ifdef VBOX /* bird: linux is writing to this port for delaying I/O. */
if (val == 0x80)
break;
#endif /* VBOX */
if (use_icount)
gen_io_start();
if (use_icount) {
gen_io_end();
}
break;
case 0xec:
case 0xed:
if ((b & 1) == 0)
else
if (use_icount)
gen_io_start();
if (use_icount) {
gen_io_end();
}
break;
case 0xee:
case 0xef:
if ((b & 1) == 0)
else
if (use_icount)
gen_io_start();
if (use_icount) {
gen_io_end();
}
break;
/************************/
/* control */
case 0xc2: /* ret im */
s->pc += 2;
gen_pop_T0(s);
s->dflag = 2;
if (s->dflag == 0)
gen_eob(s);
break;
case 0xc3: /* ret */
gen_pop_T0(s);
gen_pop_update(s);
if (s->dflag == 0)
gen_eob(s);
break;
case 0xca: /* lret im */
s->pc += 2;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
tcg_const_i32(s->dflag),
tcg_const_i32(val));
} else {
gen_stack_A0(s);
/* pop offset */
if (s->dflag == 0)
/* NOTE: keeping EIP updated is not a problem in case of
exception */
/* pop selector */
/* add stack offset */
}
gen_eob(s);
break;
case 0xcb: /* lret */
val = 0;
goto do_lret;
case 0xcf: /* iret */
if (!s->pe) {
/* real mode */
s->cc_op = CC_OP_EFLAGS;
} else if (s->vm86) {
#ifdef VBOX
#else
if (s->iopl != 3) {
#endif
} else {
s->cc_op = CC_OP_EFLAGS;
}
} else {
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
tcg_const_i32(s->dflag),
s->cc_op = CC_OP_EFLAGS;
}
gen_eob(s);
break;
case 0xe8: /* call im */
{
if (dflag)
else
if (s->dflag == 0)
tval &= 0xffff;
else if (!CODE64(s))
tval &= 0xffffffff;
gen_push_T0(s);
}
break;
case 0x9a: /* lcall im */
{
if (CODE64(s))
goto illegal_op;
}
goto do_lcall;
case 0xe9: /* jmp im */
if (dflag)
else
if (s->dflag == 0)
tval &= 0xffff;
else if(!CODE64(s))
tval &= 0xffffffff;
break;
case 0xea: /* ljmp im */
{
if (CODE64(s))
goto illegal_op;
}
goto do_ljmp;
case 0xeb: /* jmp Jb */
if (s->dflag == 0)
tval &= 0xffff;
break;
case 0x70 ... 0x7f: /* jcc Jb */
goto do_jcc;
case 0x180 ... 0x18f: /* jcc Jv */
if (dflag) {
} else {
}
if (s->dflag == 0)
tval &= 0xffff;
break;
case 0x190 ... 0x19f: /* setcc Gv */
gen_setcc(s, b);
break;
case 0x140 ... 0x14f: /* cmov Gv, Ev */
{
int l1;
if (mod != 3) {
} else {
}
#ifdef TARGET_X86_64
/* XXX: specific Intel behaviour ? */
l1 = gen_new_label();
tcg_gen_movi_tl(cpu_tmp0, 0);
} else
#endif
{
l1 = gen_new_label();
}
}
break;
/************************/
/* flags */
case 0x9c: /* pushf */
#ifdef VBOX
#else
#endif
} else {
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
#ifdef VBOX
else
#endif
gen_push_T0(s);
}
break;
case 0x9d: /* popf */
#ifdef VBOX
#else
#endif
} else {
gen_pop_T0(s);
if (s->cpl == 0) {
if (s->dflag) {
} else {
}
} else {
if (s->dflag) {
} else {
}
} else {
if (s->dflag) {
} else {
#ifdef VBOX
else
#endif
}
}
}
gen_pop_update(s);
s->cc_op = CC_OP_EFLAGS;
/* abort translation because TF flag may change */
gen_eob(s);
}
break;
case 0x9e: /* sahf */
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_EFLAGS;
break;
case 0x9f: /* lahf */
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_compute_eflags(cpu_T[0]);
/* Note: gen_compute_eflags() only gives the condition codes */
break;
case 0xf5: /* cmc */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_EFLAGS;
break;
case 0xf8: /* clc */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_EFLAGS;
break;
case 0xf9: /* stc */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_EFLAGS;
break;
case 0xfc: /* cld */
break;
case 0xfd: /* std */
break;
/************************/
/* bit operations */
if (mod != 3) {
s->rip_offset = 1;
} else {
}
/* load shift */
if (op < 4)
goto illegal_op;
op -= 4;
goto bt_op;
case 0x1a3: /* bt Gv, Ev */
op = 0;
goto do_btx;
case 0x1ab: /* bts */
op = 1;
goto do_btx;
case 0x1b3: /* btr */
op = 2;
goto do_btx;
case 0x1bb: /* btc */
op = 3;
if (mod != 3) {
/* specific case: we need to add a displacement */
} else {
}
switch(op) {
case 0:
break;
case 1:
break;
case 2:
break;
default:
case 3:
break;
}
if (op != 0) {
if (mod != 3)
else
}
break;
case 0x1bc: /* bsf */
case 0x1bd: /* bsr */
{
int label1;
label1 = gen_new_label();
if (b & 1) {
} else {
}
}
break;
/************************/
/* bcd */
case 0x27: /* daa */
if (CODE64(s))
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_EFLAGS;
break;
case 0x2f: /* das */
if (CODE64(s))
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_EFLAGS;
break;
case 0x37: /* aaa */
if (CODE64(s))
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_EFLAGS;
break;
case 0x3f: /* aas */
if (CODE64(s))
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_EFLAGS;
break;
case 0xd4: /* aam */
if (CODE64(s))
goto illegal_op;
if (val == 0) {
} else {
s->cc_op = CC_OP_LOGICB;
}
break;
case 0xd5: /* aad */
if (CODE64(s))
goto illegal_op;
s->cc_op = CC_OP_LOGICB;
break;
/************************/
/* misc */
case 0x90: /* nop */
/* XXX: xchg + rex handling */
/* XXX: correct lock test for all insn */
if (prefixes & PREFIX_LOCK)
goto illegal_op;
if (prefixes & PREFIX_REPZ) {
}
break;
case 0x9b: /* fwait */
(HF_MP_MASK | HF_TS_MASK)) {
} else {
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
}
break;
case 0xcc: /* int3 */
#ifdef VBOX
} else
#endif
break;
case 0xcd: /* int N */
#ifdef VBOX
#else
#endif
} else {
}
break;
case 0xce: /* into */
if (CODE64(s))
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
break;
case 0xf1: /* icebp (undocumented, exits to external debugger) */
#if 1
#else
/* start debug */
#endif
break;
case 0xfa: /* cli */
if (!s->vm86) {
} else {
}
} else {
if (s->iopl == 3) {
#ifdef VBOX
#endif
} else {
}
}
break;
case 0xfb: /* sti */
if (!s->vm86) {
/* interruptions are enabled only the first insn after sti */
/* If several instructions disable interrupts, only the
_first_ does it */
/* give a chance to handle pending irqs */
gen_eob(s);
} else {
}
} else {
if (s->iopl == 3) {
goto gen_sti;
#ifdef VBOX
/* give a chance to handle pending irqs */
gen_eob(s);
#endif
} else {
}
}
break;
case 0x62: /* bound */
if (CODE64(s))
goto illegal_op;
if (mod == 3)
goto illegal_op;
else
break;
case 0x1c8 ... 0x1cf: /* bswap reg */
#ifdef TARGET_X86_64
if (dflag == 2) {
} else
{
}
#else
{
}
#endif
break;
case 0xd6: /* salc */
if (CODE64(s))
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
break;
case 0xe0: /* loopnz */
case 0xe1: /* loopz */
case 0xe2: /* loop */
case 0xe3: /* jecxz */
{
if (s->dflag == 0)
tval &= 0xffff;
l1 = gen_new_label();
l2 = gen_new_label();
l3 = gen_new_label();
b &= 3;
switch(b) {
case 0: /* loopnz */
case 1: /* loopz */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
if (b == 0) {
} else {
}
break;
case 2: /* loop */
break;
default:
case 3: /* jcxz */
break;
}
tcg_gen_br(l2);
gen_eob(s);
}
break;
case 0x130: /* wrmsr */
case 0x132: /* rdmsr */
if (s->cpl != 0) {
} else {
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
if (b & 2) {
} else {
}
}
break;
case 0x131: /* rdtsc */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
if (use_icount)
gen_io_start();
if (use_icount) {
gen_io_end();
}
break;
case 0x133: /* rdpmc */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
break;
case 0x134: /* sysenter */
#ifndef VBOX
/* For Intel SYSENTER is valid on 64-bit */
#else
/** @todo: make things right */
if (CODE64(s))
#endif
goto illegal_op;
if (!s->pe) {
} else {
if (s->cc_op != CC_OP_DYNAMIC) {
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_DYNAMIC;
}
gen_eob(s);
}
break;
case 0x135: /* sysexit */
#ifndef VBOX
/* For Intel SYSEXIT is valid on 64-bit */
#else
/** @todo: make things right */
if (CODE64(s))
#endif
goto illegal_op;
if (!s->pe) {
} else {
if (s->cc_op != CC_OP_DYNAMIC) {
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_DYNAMIC;
}
gen_eob(s);
}
break;
#ifdef TARGET_X86_64
case 0x105: /* syscall */
/* XXX: is it usable in real mode ? */
if (s->cc_op != CC_OP_DYNAMIC) {
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_DYNAMIC;
}
gen_eob(s);
break;
case 0x107: /* sysret */
if (!s->pe) {
} else {
if (s->cc_op != CC_OP_DYNAMIC) {
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_DYNAMIC;
}
/* condition codes are modified only in long mode */
if (s->lma)
s->cc_op = CC_OP_EFLAGS;
gen_eob(s);
}
break;
#endif
case 0x1a2: /* cpuid */
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
break;
case 0xf4: /* hlt */
if (s->cpl != 0) {
} else {
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
s->is_jmp = 3;
}
break;
case 0x100:
switch(op) {
case 0: /* sldt */
goto illegal_op;
if (mod == 3)
break;
case 2: /* lldt */
goto illegal_op;
if (s->cpl != 0) {
} else {
}
break;
case 1: /* str */
goto illegal_op;
if (mod == 3)
break;
case 3: /* ltr */
goto illegal_op;
if (s->cpl != 0) {
} else {
}
break;
case 4: /* verr */
case 5: /* verw */
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
if (op == 4)
else
s->cc_op = CC_OP_EFLAGS;
break;
default:
goto illegal_op;
}
break;
case 0x101:
#ifdef VBOX
/* 0f 01 f9 */
if (modrm == 0xf9)
{
if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
goto illegal_op;
break;
}
#endif
switch(op) {
case 0: /* sgdt */
if (mod == 3)
goto illegal_op;
gen_add_A0_im(s, 2);
if (!s->dflag)
gen_op_andl_T0_im(0xffffff);
break;
case 1:
if (mod == 3) {
switch (rm) {
case 0: /* monitor */
if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
s->cpl != 0)
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
#ifdef TARGET_X86_64
if (s->aflag == 2) {
} else
#endif
{
if (s->aflag == 0)
}
break;
case 1: /* mwait */
if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
s->cpl != 0)
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC) {
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_DYNAMIC;
}
gen_eob(s);
break;
default:
goto illegal_op;
}
} else { /* sidt */
gen_add_A0_im(s, 2);
if (!s->dflag)
gen_op_andl_T0_im(0xffffff);
}
break;
case 2: /* lgdt */
case 3: /* lidt */
if (mod == 3) {
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
switch(rm) {
case 0: /* VMRUN */
goto illegal_op;
if (s->cpl != 0) {
break;
} else {
tcg_const_i32(s->aflag),
tcg_gen_exit_tb(0);
s->is_jmp = 3;
}
break;
case 1: /* VMMCALL */
if (!(s->flags & HF_SVME_MASK))
goto illegal_op;
break;
case 2: /* VMLOAD */
goto illegal_op;
if (s->cpl != 0) {
break;
} else {
tcg_const_i32(s->aflag));
}
break;
case 3: /* VMSAVE */
goto illegal_op;
if (s->cpl != 0) {
break;
} else {
tcg_const_i32(s->aflag));
}
break;
case 4: /* STGI */
if ((!(s->flags & HF_SVME_MASK) &&
!(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
!s->pe)
goto illegal_op;
if (s->cpl != 0) {
break;
} else {
}
break;
case 5: /* CLGI */
goto illegal_op;
if (s->cpl != 0) {
break;
} else {
}
break;
case 6: /* SKINIT */
if ((!(s->flags & HF_SVME_MASK) &&
!(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
!s->pe)
goto illegal_op;
break;
case 7: /* INVLPGA */
goto illegal_op;
if (s->cpl != 0) {
break;
} else {
tcg_const_i32(s->aflag));
}
break;
default:
goto illegal_op;
}
} else if (s->cpl != 0) {
} else {
gen_add_A0_im(s, 2);
if (!s->dflag)
gen_op_andl_T0_im(0xffffff);
if (op == 2) {
} else {
}
}
break;
case 4: /* smsw */
break;
case 6: /* lmsw */
if (s->cpl != 0) {
} else {
gen_eob(s);
}
break;
case 7: /* invlpg */
if (s->cpl != 0) {
} else {
if (mod == 3) {
#ifdef TARGET_X86_64
/* swapgs */
} else
#endif
{
goto illegal_op;
}
} else {
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_eob(s);
}
}
break;
default:
goto illegal_op;
}
break;
case 0x108: /* invd */
case 0x109: /* wbinvd */
if (s->cpl != 0) {
} else {
/* nothing to do */
}
break;
case 0x63: /* arpl or movslS (x86_64) */
#ifdef TARGET_X86_64
if (CODE64(s)) {
int d_ot;
/* d_ot is the size of destination */
if (mod == 3) {
/* sign extend */
} else {
} else {
}
}
} else
#endif
{
int label1;
goto illegal_op;
#ifdef VBOX
#endif
if (mod != 3) {
#ifdef VBOX
#endif
} else {
}
tcg_gen_movi_tl(t2, 0);
label1 = gen_new_label();
if (mod != 3) {
#ifdef VBOX
/* cpu_A0 doesn't survive branch */
#else
#endif
} else {
}
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_EFLAGS;
#ifdef VBOX
#endif
}
break;
case 0x102: /* lar */
case 0x103: /* lsl */
{
int label1;
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
if (b == 0x102)
else
label1 = gen_new_label();
s->cc_op = CC_OP_EFLAGS;
}
break;
case 0x118:
switch(op) {
case 0: /* prefetchnta */
case 1: /* prefetchnt0 */
case 2: /* prefetchnt0 */
case 3: /* prefetchnt0 */
if (mod == 3)
goto illegal_op;
/* nothing more to do */
break;
default: /* nop (multi byte) */
gen_nop_modrm(s, modrm);
break;
}
break;
case 0x119 ... 0x11f: /* nop (multi byte) */
gen_nop_modrm(s, modrm);
break;
case 0x120: /* mov reg, crN */
case 0x122: /* mov crN, reg */
if (s->cpl != 0) {
} else {
#ifndef VBOX /* mod bits are always understood to be 11 (0xc0) regardless of actual content; see AMD manuals */
goto illegal_op;
#endif
if (CODE64(s))
else
switch(reg) {
case 0:
case 2:
case 3:
case 4:
case 8:
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
if (b & 2) {
gen_eob(s);
} else {
}
break;
default:
goto illegal_op;
}
}
break;
case 0x121: /* mov reg, drN */
case 0x123: /* mov drN, reg */
if (s->cpl != 0) {
} else {
#ifndef VBOX /* mod bits are always understood to be 11 (0xc0) regardless of actual content; see AMD manuals */
goto illegal_op;
#endif
if (CODE64(s))
else
/* XXX: do it dynamically with CR4.DE bit */
goto illegal_op;
if (b & 2) {
gen_eob(s);
} else {
}
}
break;
case 0x106: /* clts */
if (s->cpl != 0) {
} else {
/* abort block because static cpu state changed */
gen_eob(s);
}
break;
case 0x1c3: /* MOVNTI reg, mem */
if (!(s->cpuid_features & CPUID_SSE2))
goto illegal_op;
if (mod == 3)
goto illegal_op;
/* generate a generic store */
break;
case 0x1ae:
switch(op) {
case 0: /* fxsave */
(s->flags & HF_EM_MASK))
goto illegal_op;
if (s->flags & HF_TS_MASK) {
break;
}
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
break;
case 1: /* fxrstor */
(s->flags & HF_EM_MASK))
goto illegal_op;
if (s->flags & HF_TS_MASK) {
break;
}
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
break;
case 2: /* ldmxcsr */
case 3: /* stmxcsr */
if (s->flags & HF_TS_MASK) {
break;
}
mod == 3)
goto illegal_op;
if (op == 2) {
} else {
}
break;
case 5: /* lfence */
case 6: /* mfence */
goto illegal_op;
break;
case 7: /* sfence / clflush */
/* sfence */
/* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
if (!(s->cpuid_features & CPUID_SSE))
goto illegal_op;
} else {
/* clflush */
if (!(s->cpuid_features & CPUID_CLFLUSH))
goto illegal_op;
}
break;
default:
goto illegal_op;
}
break;
case 0x10d: /* 3DNow! prefetch(w) */
if (mod == 3)
goto illegal_op;
/* ignore for now */
break;
case 0x1aa: /* rsm */
if (!(s->flags & HF_SMM_MASK))
goto illegal_op;
if (s->cc_op != CC_OP_DYNAMIC) {
gen_op_set_cc_op(s->cc_op);
s->cc_op = CC_OP_DYNAMIC;
}
gen_eob(s);
break;
case 0x1b8: /* SSE4.2 popcnt */
goto illegal_op;
if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
goto illegal_op;
if (s->prefix & PREFIX_DATA)
else if (s->dflag != 2)
else
s->cc_op = CC_OP_EFLAGS;
break;
case 0x10e ... 0x10f:
/* 3DNow! instructions, ignore prefixes */
case 0x110 ... 0x117:
case 0x128 ... 0x12f:
case 0x138 ... 0x13a:
case 0x150 ... 0x177:
case 0x17c ... 0x17f:
case 0x1c2:
case 0x1c4 ... 0x1c6:
case 0x1d0 ... 0x1fe:
break;
default:
goto illegal_op;
}
/* lock generation */
if (s->prefix & PREFIX_LOCK)
return s->pc;
if (s->prefix & PREFIX_LOCK)
/* XXX: ensure that no lock was generated */
return s->pc;
}
void optimize_flags_init(void)
{
#if TCG_TARGET_REG_BITS == 32
#else
#endif
/* register helpers */
#include "helper.h"
}
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
basic block 'tb'. If search_pc is TRUE, also generate PC
information for each intermediate instruction. */
#ifndef VBOX
#else /* VBOX */
#endif /* VBOX */
int search_pc)
{
int num_insns;
int max_insns;
/* generate intermediate code */
#ifdef VBOX
#ifdef VBOX_WITH_CALL_RECORD
else
dc->record_call = 0;
#endif
#endif
dc->popl_esp_hack = 0;
/* select memory access functions */
if (flags & HF_SOFTMMU_MASK) {
else
}
#ifdef TARGET_X86_64
#endif
#ifndef CONFIG_SOFTMMU
|| (flags & HF_SOFTMMU_MASK)
#endif
);
#if 0
/* check addseg logic */
printf("ERROR addseg\n");
#endif
lj = -1;
num_insns = 0;
if (max_insns == 0)
for(;;) {
if (env->nb_breakpoints > 0) {
for(j = 0; j < env->nb_breakpoints; j++) {
break;
}
}
}
if (search_pc) {
j = gen_opc_ptr - gen_opc_buf;
if (lj < j) {
lj++;
while (lj < j)
gen_opc_instr_start[lj++] = 0;
}
}
gen_io_start();
num_insns++;
/* stop translation if indicated */
break;
#ifdef VBOX
#ifdef DEBUG
/*
if(cpu_check_code_raw(env, pc_ptr, env->hflags | (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK))) == ERROR_SUCCESS)
{
//should never happen as the jump to the patch code terminates the translation block
dprintf(("QEmu is about to execute instructions in our patch block at %08X!!\n", pc_ptr));
}
*/
#endif
{
break;
}
#endif /* VBOX */
/* if single step mode, we generate only one instruction and
generate an exception */
/* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
the flag and abort the translation to give the irqs a
change to be happen */
(flags & HF_INHIBIT_IRQ_MASK)) {
break;
}
/* if too long translation, stop generation too */
if (gen_opc_ptr >= gen_opc_end ||
break;
}
}
gen_io_end();
/* we don't forget to fill the last values */
if (search_pc) {
j = gen_opc_ptr - gen_opc_buf;
lj++;
while (lj <= j)
gen_opc_instr_start[lj++] = 0;
}
#ifdef DEBUG_DISAS
if (loglevel & CPU_LOG_TB_CPU) {
}
if (loglevel & CPU_LOG_TB_IN_ASM) {
int disas_flags;
#ifdef TARGET_X86_64
disas_flags = 2;
else
#endif
}
#endif
if (!search_pc) {
}
}
{
}
{
}
{
int cc_op;
#ifdef DEBUG_DISAS
if (loglevel & CPU_LOG_TB_OP) {
int i;
for(i = 0;i <= pc_pos; i++) {
if (gen_opc_instr_start[i]) {
}
}
}
#endif
if (cc_op != CC_OP_DYNAMIC)
}