test-i386.c revision 4af48bf7c72ef1e201c64bd475377b5af9d8e8a1
/*
* x86 CPU test
*
* Copyright (c) 2003 Fabrice Bellard
*
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
/*
* Oracle GPL Disclaimer: For the avoidance of doubt, except that if any license choice
* other than GPL or LGPL is available it will apply instead, Oracle elects to use only
* the General Public License version 2 (GPLv2) at this time for any software where
* a choice of GPL license versions is made available with the language indicating
* that GPLv2 or any later version may be used, or where a choice of which version
* of the GPL is applied is otherwise unspecified.
*/
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <inttypes.h>
#include <math.h>
#include <signal.h>
#include <setjmp.h>
#include <errno.h>
#include <sys/ucontext.h>
#if !defined(__x86_64__)
//#define TEST_VM86
#define TEST_SEGS
#endif
//#define LINUX_VM86_IOPL_FIX
//#define TEST_P4_FLAGS
#ifdef __SSE__
#define TEST_SSE
#define TEST_CMOV 1
#define TEST_FCOMI 1
#else
#define TEST_CMOV 1
#define TEST_FCOMI 1
#endif
#if defined(__x86_64__)
#define FMT64X "%016lx"
#define FMTLX "%016lx"
#define X86_64_ONLY(x) x
#else
#define FMTLX "%08lx"
#define X86_64_ONLY(x)
#endif
#ifdef TEST_VM86
#endif
#define xglue(x, y) x ## y
#define tostring(s) #s
#define CC_C 0x0001
#define CC_P 0x0004
#define CC_A 0x0010
#define CC_Z 0x0040
#define CC_S 0x0080
#define CC_O 0x0800
#if defined(__x86_64__)
static inline long i2l(long v)
{
return v | ((v ^ 0xabcd) << 32);
}
#else
static inline long i2l(long v)
{
return v;
}
#endif
#include "test-i386.h"
#include "test-i386.h"
#include "test-i386.h"
#include "test-i386.h"
#include "test-i386.h"
#include "test-i386.h"
#define OP_CC
#include "test-i386.h"
#define OP_CC
#include "test-i386.h"
#define OP_CC
#define OP1
#include "test-i386.h"
#define OP_CC
#define OP1
#include "test-i386.h"
#define OP_CC
#define OP1
#include "test-i386.h"
#define OP_CC
#define OP1
#include "test-i386.h"
#include "test-i386-shift.h"
#include "test-i386-shift.h"
#include "test-i386-shift.h"
#include "test-i386-shift.h"
#include "test-i386-shift.h"
#define OP_CC
#include "test-i386-shift.h"
#define OP_CC
#include "test-i386-shift.h"
#define OP_SHIFTD
#define OP_NOBYTE
#include "test-i386-shift.h"
#define OP_SHIFTD
#define OP_NOBYTE
#include "test-i386-shift.h"
/* XXX: should be more precise ? */
#define OP_NOBYTE
#include "test-i386-shift.h"
#define OP_NOBYTE
#include "test-i386-shift.h"
#define OP_NOBYTE
#include "test-i386-shift.h"
#define OP_NOBYTE
#include "test-i386-shift.h"
/* lea test (modrm support) */
{\
: "=r" (res)\
}
{\
: "=r" (res)\
}
#define TEST_LEA16(STR)\
{\
: "=wq" (res)\
}
void test_lea(void)
{
TEST_LEA("0x4000");
TEST_LEA("(%%eax)");
TEST_LEA("(%%ebx)");
TEST_LEA("(%%ecx)");
TEST_LEA("(%%edx)");
TEST_LEA("(%%esi)");
TEST_LEA("(%%edi)");
TEST_LEA("0x40(%%eax)");
TEST_LEA("0x40(%%ebx)");
TEST_LEA("0x40(%%ecx)");
TEST_LEA("0x40(%%edx)");
TEST_LEA("0x40(%%esi)");
TEST_LEA("0x40(%%edi)");
TEST_LEA("0x4000(%%eax)");
TEST_LEA("0x4000(%%ebx)");
TEST_LEA("0x4000(%%ecx)");
TEST_LEA("0x4000(%%edx)");
TEST_LEA("0x4000(%%esi)");
TEST_LEA("0x4000(%%edi)");
TEST_LEA("(%%eax, %%ecx)");
TEST_LEA("(%%ebx, %%edx)");
TEST_LEA("(%%ecx, %%ecx)");
TEST_LEA("(%%edx, %%ecx)");
TEST_LEA("(%%esi, %%ecx)");
TEST_LEA("(%%edi, %%ecx)");
TEST_LEA("0x40(%%eax, %%ecx)");
TEST_LEA("0x4000(%%ebx, %%edx)");
TEST_LEA("(%%ecx, %%ecx, 2)");
TEST_LEA("(%%edx, %%ecx, 4)");
TEST_LEA("(%%esi, %%ecx, 8)");
TEST_LEA("(,%%eax, 2)");
TEST_LEA("(,%%ebx, 4)");
TEST_LEA("(,%%ecx, 8)");
TEST_LEA("0x40(,%%eax, 2)");
TEST_LEA("0x40(,%%ebx, 4)");
TEST_LEA("0x40(,%%ecx, 8)");
TEST_LEA("-10(%%ecx, %%ecx, 2)");
TEST_LEA("-10(%%edx, %%ecx, 4)");
TEST_LEA("-10(%%esi, %%ecx, 8)");
TEST_LEA("0x4000(%%ecx, %%ecx, 2)");
TEST_LEA("0x4000(%%edx, %%ecx, 4)");
TEST_LEA("0x4000(%%esi, %%ecx, 8)");
#if defined(__x86_64__)
TEST_LEAQ("0x4000");
TEST_LEAQ("0x4000(%%rip)");
TEST_LEAQ("(%%rax)");
TEST_LEAQ("(%%rbx)");
TEST_LEAQ("(%%rcx)");
TEST_LEAQ("(%%rdx)");
TEST_LEAQ("(%%rsi)");
TEST_LEAQ("(%%rdi)");
TEST_LEAQ("0x40(%%rax)");
TEST_LEAQ("0x40(%%rbx)");
TEST_LEAQ("0x40(%%rcx)");
TEST_LEAQ("0x40(%%rdx)");
TEST_LEAQ("0x40(%%rsi)");
TEST_LEAQ("0x40(%%rdi)");
TEST_LEAQ("0x4000(%%rax)");
TEST_LEAQ("0x4000(%%rbx)");
TEST_LEAQ("0x4000(%%rcx)");
TEST_LEAQ("0x4000(%%rdx)");
TEST_LEAQ("0x4000(%%rsi)");
TEST_LEAQ("0x4000(%%rdi)");
TEST_LEAQ("(%%rax, %%rcx)");
TEST_LEAQ("(%%rbx, %%rdx)");
TEST_LEAQ("(%%rcx, %%rcx)");
TEST_LEAQ("(%%rdx, %%rcx)");
TEST_LEAQ("(%%rsi, %%rcx)");
TEST_LEAQ("(%%rdi, %%rcx)");
TEST_LEAQ("0x40(%%rax, %%rcx)");
TEST_LEAQ("0x4000(%%rbx, %%rdx)");
TEST_LEAQ("(%%rcx, %%rcx, 2)");
TEST_LEAQ("(%%rdx, %%rcx, 4)");
TEST_LEAQ("(%%rsi, %%rcx, 8)");
TEST_LEAQ("(,%%rax, 2)");
TEST_LEAQ("(,%%rbx, 4)");
TEST_LEAQ("(,%%rcx, 8)");
TEST_LEAQ("0x40(,%%rax, 2)");
TEST_LEAQ("0x40(,%%rbx, 4)");
TEST_LEAQ("0x40(,%%rcx, 8)");
TEST_LEAQ("-10(%%rcx, %%rcx, 2)");
TEST_LEAQ("-10(%%rdx, %%rcx, 4)");
TEST_LEAQ("-10(%%rsi, %%rcx, 8)");
TEST_LEAQ("0x4000(%%rcx, %%rcx, 2)");
TEST_LEAQ("0x4000(%%rdx, %%rcx, 4)");
TEST_LEAQ("0x4000(%%rsi, %%rcx, 8)");
#else
/* limited 16 bit addressing test */
TEST_LEA16("0x4000");
TEST_LEA16("(%%bx)");
TEST_LEA16("(%%si)");
TEST_LEA16("(%%di)");
TEST_LEA16("0x40(%%bx)");
TEST_LEA16("0x40(%%si)");
TEST_LEA16("0x40(%%di)");
TEST_LEA16("0x4000(%%bx)");
TEST_LEA16("0x4000(%%si)");
TEST_LEA16("(%%bx,%%si)");
TEST_LEA16("(%%bx,%%di)");
TEST_LEA16("0x40(%%bx,%%si)");
TEST_LEA16("0x40(%%bx,%%di)");
TEST_LEA16("0x4000(%%bx,%%si)");
TEST_LEA16("0x4000(%%bx,%%di)");
#endif
}
{\
int res;\
asm("movl $1, %0\n\t"\
"cmpl %2, %1\n\t"\
"movl $0, %0\n\t"\
"1:\n\t"\
: "=r" (res)\
\
asm("movl $0, %0\n\t"\
"cmpl %2, %1\n\t"\
: "=r" (res)\
if (TEST_CMOV) {\
asm("cmpl %2, %1\n\t"\
: "=r" (res)\
asm("cmpl %2, %1\n\t"\
: "=r" (res)\
asm("cmpl %2, %1\n\t"\
: "=r" (res)\
} \
}
/* various jump tests */
void test_jcc(void)
{
TEST_JCC("s", 0, 0);
TEST_JCC("ns", 0, 0);
}
{\
for(i = 0; i < sizeof(ecx_vals) / sizeof(long); i++) {\
asm("test %2, %2\n\t"\
"movl $1, %0\n\t"\
insn " 1f\n\t" \
"movl $0, %0\n\t"\
"1:\n\t"\
: "=a" (res)\
}\
}\
}
void test_loop(void)
{
const long ecx_vals[] = {
0,
1,
0x10000,
0x10001,
#if defined(__x86_64__)
0x100000000L,
0x100000001L,
#endif
};
int i, res;
#if !defined(__x86_64__)
TEST_LOOP("jcxz");
TEST_LOOP("loopw");
TEST_LOOP("loopzw");
TEST_LOOP("loopnzw");
#endif
TEST_LOOP("jecxz");
TEST_LOOP("loopl");
TEST_LOOP("loopzl");
TEST_LOOP("loopnzl");
}
#ifdef TEST_P4_FLAGS
#else
#endif
#include "test-i386-muldiv.h"
#include "test-i386-muldiv.h"
{
flags = 0;
asm volatile ("push %4\n\t"
"popf\n\t"
"imulw %w2, %w0\n\t"
"pushf\n\t"
"pop %1\n\t"
}
{
flags = 0;
asm volatile ("push %4\n\t"
"popf\n\t"
"imull %k2, %k0\n\t"
"pushf\n\t"
"pop %1\n\t"
}
#if defined(__x86_64__)
{
flags = 0;
asm volatile ("push %4\n\t"
"popf\n\t"
"imulq %2, %0\n\t"
"pushf\n\t"
"pop %1\n\t"
}
#endif
{\
flags = 0;\
res = 0;\
asm volatile ("push %3\n\t"\
"popf\n\t"\
"pushf\n\t"\
"pop %1\n\t"\
}
#define CC_MASK (0)
#include "test-i386-muldiv.h"
#include "test-i386-muldiv.h"
void test_mul(void)
{
#if defined(__x86_64__)
#endif
}
{\
asm("xor %1, %1\n"\
"mov $0x12345678, %0\n"\
: "r" (val));\
}
void test_bsx(void)
{
#if defined(__x86_64__)
#endif
}
/**********************************************/
union float64u {
double d;
uint64_t l;
};
void test_fops(double a, double b)
{
printf("a=%f b=%f a+b=%f\n", a, b, a + b);
printf("a=%f b=%f a-b=%f\n", a, b, a - b);
printf("a=%f b=%f a*b=%f\n", a, b, a * b);
printf("a=%f b=%f a/b=%f\n", a, b, a / b);
/* just to test some op combining */
}
void fpu_clear_exceptions(void)
{
struct __attribute__((packed)) {
long double fpregs[8];
} float_env32;
}
/* XXX: display exception bits when supported */
#define FPUS_EMASK 0x0000
//#define FPUS_EMASK 0x007f
void test_fcmp(double a, double b)
{
asm("fcom %2\n"
"fstsw %%ax\n"
: "=a" (fpus)
: "t" (a), "u" (b));
printf("fcom(%f %f)=%04lx \n",
asm("fucom %2\n"
"fstsw %%ax\n"
: "=a" (fpus)
: "t" (a), "u" (b));
printf("fucom(%f %f)=%04lx\n",
if (TEST_FCOMI) {
/* test f(u)comi instruction */
asm("fcomi %3, %2\n"
"fstsw %%ax\n"
"pushf\n"
"pop %0\n"
: "t" (a), "u" (b));
printf("fcomi(%f %f)=%04lx %02lx\n",
asm("fucomi %3, %2\n"
"fstsw %%ax\n"
"pushf\n"
"pop %0\n"
: "t" (a), "u" (b));
printf("fucomi(%f %f)=%04lx %02lx\n",
}
asm volatile("fxam\n"
"fstsw %%ax\n"
: "=a" (fpus)
: "t" (a));
}
void test_fcvt(double a)
{
float fa;
long double la;
int i;
int ia;
double ra;
fa = a;
la = a;
*(unsigned short *)((char *)(&la) + 8));
/* test all roundings */
for(i=0;i<4;i++) {
}
}
#define TEST(N) \
asm("fld" #N : "=t" (a)); \
void test_fconst(void)
{
double a;
TEST(1);
TEST(z);
}
void test_fbcd(double a)
{
unsigned short bcd[5];
double b;
printf("a=%f bcd=%04x%04x%04x%04x%04x b=%f\n",
}
{\
for(i=0;i<5;i++)\
for(i=0;i<5;i++)\
for(i=0;i<5;i++)\
printf("fpuc=%04x fpus=%04x fptag=%04x\n",\
}
void test_fenv(void)
{
struct __attribute__((packed)) {
long double fpregs[8];
} float_env32;
struct __attribute__((packed)) {
long double fpregs[8];
} float_env16;
double dtab[8];
double rtab[8];
int i;
for(i=0;i<8;i++)
dtab[i] = i + 1;
/* test for ffree */
for(i=0;i<5;i++)
asm volatile("ffree %st(2)");
asm volatile ("fninit");
}
{\
double res;\
asm("push %3\n"\
"popf\n"\
: "=t" (res)\
printf("fcmov%s eflags=0x%04lx-> %f\n", \
}
void test_fcmov(void)
{
double a, b;
long eflags, i;
a = 1.0;
b = 2.0;
for(i = 0; i < 4; i++) {
eflags = 0;
if (i & 1)
if (i & 2)
}
TEST_FCMOV(a, b, 0, "u");
TEST_FCMOV(a, b, 0, "nu");
}
void test_floats(void)
{
test_fcvt(0.5);
test_fcvt(-0.5);
test_fcvt(32768);
test_fcvt(-1e20);
test_fconst();
test_fbcd(1234567890123456.0);
test_fbcd(-123451234567890.0);
test_fenv();
if (TEST_CMOV) {
test_fcmov();
}
}
/**********************************************/
#if !defined(__x86_64__)
{\
asm ("push %3\n\t"\
"popf\n\t"\
#op "\n\t"\
"pushf\n\t"\
"pop %1\n\t"\
printf("%-10s A=%08x R=%08x CCIN=%04x CC=%04x\n",\
}
void test_bcd(void)
{
}
#endif
{\
: "0" (op0));\
}
{\
}
void test_xchg(void)
{
#if defined(__x86_64__)
#endif
#if defined(__x86_64__)
#endif
#if defined(__x86_64__)
#endif
{
int res;
res = 0x12345678;
}
#if defined(__x86_64__)
#endif
#if defined(__x86_64__)
#endif
#if defined(__x86_64__)
#endif
#if defined(__x86_64__)
#endif
#if defined(__x86_64__)
#endif
{
long i, eflags;
for(i = 0; i < 2; i++) {
op0 = 0x123456789abcdLL;
if (i == 0)
op1 = 0xfbca765423456LL;
else
op2 = 0x6532432432434LL;
asm("cmpxchg8b %2\n"
"pushf\n"
"pop %3\n"
}
}
}
#ifdef TEST_SEGS
/**********************************************/
/* segmentation tests */
#include <unistd.h>
{
}
#define modify_ldt_ldt_s user_desc
#endif
{\
res = 0x12345678;\
"movl $0, %1\n"\
"jnz 1f\n"\
"movl $1, %1\n"\
"1:\n"\
}
{\
long a, b, c; \
a = (op1); \
b = (op2); \
"movl $0,%1\n"\
"jnz 1f\n"\
"movl $1,%1\n"\
"1:\n"\
: "=r" (a), "=r" (c) : "0" (a), "r" (b)); \
}
/* NOTE: we use Linux modify_ldt syscall */
void test_segs(void)
{
struct modify_ldt_ldt_s ldt;
long long ldt_table[3];
char tmp;
struct {
ldt.read_exec_only = 0;
ldt.seg_not_present = 0;
ldt.read_exec_only = 0;
ldt.seg_not_present = 0;
#if 0
{
int i;
for(i=0;i<3;i++)
}
#endif
/* do some tests with fs or gs */
asm volatile ("pushl %%gs\n"
"movl %1, %%gs\n"
"gs movzbl 0x1, %0\n"
"popl %%gs\n"
: "=r" (res)
tmp = 0xa5;
asm volatile ("pushl %%ebp\n\t"
"pushl %%ds\n\t"
"movl %2, %%ds\n\t"
"movl %3, %%ebp\n\t"
"movzbl 0x1, %0\n\t"
"movzbl (%%ebp), %1\n\t"
"popl %%ds\n\t"
"popl %%ebp\n\t"
asm volatile("lfs %2, %0\n\t"
"movl %%fs, %1\n\t"
: "m" (segoff));
}
/* 16 bit code test */
extern char code16_start, code16_end;
extern char code16_func1;
extern char code16_func2;
extern char code16_func3;
void test_code16(void)
{
struct modify_ldt_ldt_s ldt;
/* build a code segment */
ldt.read_exec_only = 0;
ldt.limit_in_pages = 0;
ldt.seg_not_present = 0;
/* call the first function */
asm volatile ("lcall %1, %2"
: "=a" (res)
asm volatile ("lcall %2, %3"
asm volatile ("lcall %1, %2"
: "=a" (res)
}
#endif
#if defined(__x86_64__)
asm(".globl func_lret\n"
"func_lret:\n"
"movl $0x87654641, %eax\n"
"lretq\n");
#else
asm(".globl func_lret\n"
"func_lret:\n"
"movl $0x87654321, %eax\n"
"lret\n"
".globl func_iret\n"
"func_iret:\n"
"movl $0xabcd4321, %eax\n"
"iret\n");
#endif
extern char func_lret;
extern char func_iret;
void test_misc(void)
{
char table[256];
long res, i;
res = 0x12345678;
#if defined(__x86_64__)
#if 0
{
/* XXX: see if Intel Core2 and AMD64 behavior really
differ. Here we implemented the Intel way which is not
compatible yet with QEMU. */
static struct __attribute__((packed)) {
} desc;
long cs_sel;
asm volatile ("push %1\n"
"call func_lret\n"
: "=a" (res)
asm volatile ("xor %%rax, %%rax\n"
"rex64 lcall *(%%rcx)\n"
: "=a" (res)
: "c" (&desc)
: "memory", "cc");
asm volatile ("push %2\n"
"mov $ 1f, %%rax\n"
"push %%rax\n"
"rex64 ljmp *(%%rcx)\n"
"1:\n"
: "=a" (res)
: "memory", "cc");
}
#endif
#else
asm volatile ("push %%cs ; call %1"
: "=a" (res)
asm volatile ("pushf ; push %%cs ; call %1"
: "=a" (res)
#endif
#if defined(__x86_64__)
/* specific popl test */
asm volatile ("push $12345432 ; push $0x9abcdef ; pop (%%rsp) ; pop %0"
: "=g" (res));
#else
/* specific popl test */
asm volatile ("pushl $12345432 ; pushl $0x9abcdef ; popl (%%esp) ; popl %0"
: "=g" (res));
/* specific popw test */
asm volatile ("pushl $12345432 ; pushl $0x9abcdef ; popw (%%esp) ; addl $2, %%esp ; popl %0"
: "=g" (res));
#endif
}
{\
\
ecx = 17;\
\
asm volatile ("push $0\n\t"\
"popf\n\t"\
DF "\n\t"\
"cld\n\t"\
"pushf\n\t"\
"pop %4\n\t"\
}
void test_string(void)
{
int i;
for(i = 0;i < sizeof(str_buffer); i++)
str_buffer[i] = i + 0x56;
/* XXX: better tests */
}
#ifdef TEST_VM86
/* VM86 test */
{
}
{
}
{
}
{
}
extern char vm86_code_start;
extern char vm86_code_end;
#define VM86_CODE_CS 0x100
#define VM86_CODE_IP 0x100
void test_vm86(void)
{
struct vm86plus_struct ctx;
struct vm86_regs *r;
if (vm86_mem == MAP_FAILED) {
printf("ERROR: could not map vm86 memory");
return;
}
/* init basic registers */
r->eip = VM86_CODE_IP;
r->esp = 0xfffe;
seg = VM86_CODE_CS;
/* move code to proper address. We use the same layout as a .com
dos program. */
/* mark int 0x21 as being emulated */
for(;;) {
case VM86_INTx:
{
if (int_num != 0x21)
goto unknown_int;
switch(ah) {
case 0x00: /* exit */
goto the_end;
case 0x02: /* write char */
{
putchar(c);
}
break;
case 0x09: /* write string */
{
for(;;) {
c = *ptr++;
if (c == '$')
break;
putchar(c);
}
}
break;
case 0xff: /* extension: write eflags number in edx */
v = (int)r->edx;
#ifndef LINUX_VM86_IOPL_FIX
v &= ~0x3000;
#endif
printf("%08x\n", v);
break;
default:
goto the_end;
}
}
break;
case VM86_SIGNAL:
/* a signal came, we just ignore that */
break;
case VM86_STI:
break;
default:
goto the_end;
}
}
printf("VM86 end\n");
}
#endif
/* exception tests */
#define REG_TRAPNO TRAPNO
#endif
#if defined(__x86_64__)
#endif
int v1;
int tab[2];
{
printf("si_signo=%d si_errno=%d si_code=%d",
printf(" si_addr=0x%08lx",
printf("\n");
printf("\n");
}
void test_exceptions(void)
{
volatile int val;
/* test division by zero reporting */
printf("DIVZ exception:\n");
/* now divide by zero */
v1 = 0;
}
#if !defined(__x86_64__)
printf("BOUND exception:\n");
/* bound exception */
tab[0] = 1;
}
#endif
#ifdef TEST_SEGS
printf("segment exceptions:\n");
/* load an invalid segment */
asm volatile ("movl %0, %%fs" : : "r" ((0x1234 << 3) | 1));
}
/* null data segment is valid */
asm volatile ("movl %0, %%fs" : : "r" (3));
/* null stack segment */
asm volatile ("movl %0, %%ss" : : "r" (3));
}
{
struct modify_ldt_ldt_s ldt;
ldt.read_exec_only = 0;
/* segment not present */
}
}
#endif
/* test SEGV reporting */
printf("PF exception:\n");
val = 1;
/* we add a nop to test a weird PC retrieval case */
asm volatile ("nop");
/* now store in an invalid address */
*(char *)0x1234 = 1;
}
/* test SEGV reporting */
printf("PF exception:\n");
val = 1;
/* read from an invalid address */
v1 = *(char *)0x1234;
}
/* test illegal instruction reporting */
printf("UD2 exception:\n");
/* now execute an invalid instruction */
asm volatile("ud2");
}
printf("lock nop exception:\n");
/* now execute an invalid instruction */
asm volatile("lock nop");
}
printf("INT exception:\n");
asm volatile ("int $0xfd");
}
asm volatile ("int $0x01");
}
asm volatile (".byte 0xcd, 0x03");
}
asm volatile ("int $0x04");
}
asm volatile ("int $0x05");
}
printf("INT3 exception:\n");
asm volatile ("int3");
}
printf("CLI exception:\n");
asm volatile ("cli");
}
printf("STI exception:\n");
asm volatile ("cli");
}
#if !defined(__x86_64__)
printf("INTO exception:\n");
/* overflow exception */
asm volatile ("addl $1, %0 ; into" : : "r" (0x7fffffff));
}
#endif
printf("OUTB exception:\n");
asm volatile ("outb %%al, %%dx" : : "d" (0x4321), "a" (0));
}
printf("INB exception:\n");
}
printf("REP OUTSB exception:\n");
}
printf("REP INSB exception:\n");
}
printf("HLT exception:\n");
asm volatile ("hlt");
}
printf("single step exception:\n");
val = 0;
asm volatile ("pushf\n"
"orl $0x00100, (%%esp)\n"
"popf\n"
"movl $0xabcd, %0\n"
}
}
#if !defined(__x86_64__)
/* specific precise single step test */
{
}
void test_single_step(void)
{
volatile int val;
int i;
val = 0;
asm volatile ("pushf\n"
"orl $0x00100, (%%esp)\n"
"popf\n"
"movl $0xabcd, %0\n"
/* jmp test */
"movl $3, %%ecx\n"
"1:\n"
"addl $1, %0\n"
"decl %%ecx\n"
"jnz 1b\n"
/* movsb: the single step should stop at each movsb iteration */
"movl $sstep_buf1, %%esi\n"
"movl $sstep_buf2, %%edi\n"
"movl $0, %%ecx\n"
"rep movsb\n"
"movl $3, %%ecx\n"
"rep movsb\n"
"movl $1, %%ecx\n"
"rep movsb\n"
/* cmpsb: the single step should stop at each cmpsb iteration */
"movl $sstep_buf1, %%esi\n"
"movl $sstep_buf2, %%edi\n"
"movl $0, %%ecx\n"
"rep cmpsb\n"
"movl $4, %%ecx\n"
"rep cmpsb\n"
/* getpid() syscall: single step should skip one
instruction */
"movl $20, %%eax\n"
"int $0x80\n"
"movl $0, %%eax\n"
/* when modifying SS, trace is not done on the next
instruction */
"movl %%ss, %%ecx\n"
"movl %%ecx, %%ss\n"
"addl $1, %0\n"
"movl $1, %%eax\n"
"movl %%ecx, %%ss\n"
"jmp 1f\n"
"addl $1, %0\n"
"1:\n"
"movl $1, %%eax\n"
"pushl %%ecx\n"
"popl %%ss\n"
"addl $1, %0\n"
"movl $1, %%eax\n"
"pushf\n"
"andl $~0x00100, (%%esp)\n"
"popf\n"
: "=m" (val)
:
: "cc", "memory", "eax", "ecx", "esi", "edi");
for(i = 0; i < 4; i++)
}
/* self modifying code test */
0xb8, 0x1, 0x00, 0x00, 0x00, /* movl $1, %eax */
0xc3, /* ret */
};
asm(".section \".data\"\n"
"smc_code2:\n"
"movl 4(%esp), %eax\n"
"movl %eax, smc_patch_addr2 + 1\n"
"nop\n"
"nop\n"
"nop\n"
"nop\n"
"nop\n"
"nop\n"
"nop\n"
"nop\n"
"smc_patch_addr2:\n"
"movl $1, %eax\n"
"ret\n"
".previous\n"
);
typedef int FuncType(void);
extern int smc_code2(int);
void test_self_modifying_code(void)
{
int i;
printf("self modifying code:\n");
for(i = 2; i <= 4; i++) {
code[1] = i;
}
/* more difficult test : the modified code is just after the
modifying instruction. It is forbidden in Intel specs, but it
is used by old DOS programs */
for(i = 2; i <= 4; i++) {
}
}
#endif
long enter_stack[4096];
#if defined(__x86_64__)
#define RSP "%%rsp"
#define RBP "%%rbp"
#else
#define RSP "%%esp"
#define RBP "%%ebp"
#endif
{\
for(i=1;i<=32;i++)\
*--stack_ptr = i;\
: "[esp_val]" (esp_val),\
"[ebp_val]" (ebp_val));\
}
static void test_enter(void)
{
#if defined(__x86_64__)
#else
#endif
}
#ifdef TEST_SSE
typedef union {
double d[2];
float s[4];
uint32_t l[4];
uint64_t q[2];
} XMMReg;
{ 0x456723c698694873, 0xdc515cff944a58ec },
{ 0x1f297ccd58bad7ab, 0x41f21efba9e3e146 },
{ 0x007c62c2085427f8, 0x231be9e8cde7438d },
{ 0x0f76255a085427f8, 0xc233e9e8c4c9439a },
};
{\
#op,\
a.q[1], a.q[0],\
b.q[1], b.q[0],\
r.q[1], r.q[0]);\
}
{\
int i;\
for(i=0;i<2;i++) {\
a.q[0] = test_values[2*i][0];\
}\
}
{\
int i;\
for(i=0;i<2;i++) {\
a.q[0] = test_values[2*i][0];\
#op,\
a.q[0],\
b.q[0],\
r.q[0]);\
}\
}
{\
a.q[0] = test_values[0][0];\
b.q[0] = test_values[1][0];\
#op,\
a.q[1], a.q[0],\
b.q[1], b.q[0],\
ib,\
r.q[1], r.q[0]);\
}
{\
int i;\
for(i=0;i<2;i++) {\
a.q[0] = test_values[2*i][0];\
#op,\
a.q[1], a.q[0],\
ib,\
r.q[1], r.q[0]);\
}\
}
{\
int i;\
for(i=0;i<2;i++) {\
a.q[0] = test_values[2*i][0];\
#op,\
a.q[1], a.q[0],\
ib,\
r.q[1], r.q[0]);\
}\
}
{\
int i;\
for(i=0;i<2;i++) {\
a.q[0] = test_values[2*i][0];\
b.q[0] = ib;\
b.q[1] = 0;\
#op,\
a.q[1], a.q[0],\
b.q[1], b.q[0],\
r.q[1], r.q[0]);\
}\
}
{\
int i, reg;\
for(i=0;i<2;i++) {\
a.q[0] = test_values[2*i][0];\
#op,\
a.q[1], a.q[0],\
reg);\
}\
}
#define SSE_OPS(a) \
#define SSE_OPD(a) \
{\
unsigned int eflags;\
XMMReg a, b;\
asm volatile (#op " %2, %1\n"\
"pushf\n"\
"pop %0\n"\
: "=m" (eflags)\
printf("%-9s: a=%f b=%f cc=%04x\n",\
}
{
}
#define CVT_OP_XMM(op)\
{\
#op,\
a.q[1], a.q[0],\
r.q[1], r.q[0]);\
}
/* Force %xmm0 usage to avoid the case where both register index are 0
to test intruction decoding more extensively */
#define CVT_OP_XMM2MMX(op)\
{\
: "%xmm0"); \
asm volatile("emms\n"); \
#op,\
a.q[1], a.q[0],\
r.q[0]);\
}
#define CVT_OP_MMX2XMM(op)\
{\
asm volatile("emms\n"); \
#op,\
a.q[0],\
r.q[1], r.q[0]);\
}
#define CVT_OP_REG2XMM(op)\
{\
#op,\
a.l[0],\
r.q[1], r.q[0]);\
}
#define CVT_OP_XMM2REG(op)\
{\
#op,\
a.q[1], a.q[0],\
r.l[0]);\
}
struct fpxstate {
};
void test_fxsave(void)
{
int i, nb_xmm;
XMMReg a, b;
a.q[0] = test_values[0][0];
b.q[0] = test_values[1][0];
asm("movdqa %2, %%xmm0\n"
"movdqa %3, %%xmm7\n"
#if defined(__x86_64__)
"movdqa %2, %%xmm15\n"
#endif
" fld1\n"
" fldpi\n"
" fldln2\n"
" fxsave %0\n"
" fxrstor %0\n"
" fxsave %1\n"
" fninit\n"
: "m" (a), "m" (b));
for(i = 0; i < 3; i++) {
i,
}
#if defined(__x86_64__)
nb_xmm = 16;
#else
nb_xmm = 8;
#endif
for(i = 0; i < nb_xmm; i++) {
i,
}
}
void test_sse(void)
{
XMMReg r, a, b;
int i;
asm volatile ("pinsrw $1, %1, %0" : "=y" (r.q[0]) : "r" (0x12345678));
a.q[0] = test_values[0][0];
asm volatile ("pextrw $1, %1, %0" : "=r" (r.l[0]) : "y" (a.q[0]));
asm volatile ("pmovmskb %1, %0" : "=r" (r.l[0]) : "y" (a.q[0]));
{
r.q[0] = -1;
r.q[1] = -1;
a.q[0] = test_values[0][0];
b.q[0] = test_values[1][0];
asm volatile("maskmovq %1, %0" :
: "y" (a.q[0]), "y" (b.q[0]), "D" (&r)
: "memory");
"maskmov",
r.q[0],
a.q[0],
b.q[0]);
asm volatile("maskmovdqu %1, %0" :
: "memory");
"maskmov",
r.q[1], r.q[0],
a.q[1], a.q[0],
b.q[1], b.q[0]);
}
asm volatile ("emms");
/* FPU specific ops */
{
}
for(i = 0; i < 2; i++) {
a.s[0] = 2.7;
a.s[1] = 3.4;
a.s[2] = 4;
a.s[3] = -6.3;
b.s[0] = 45.7;
b.s[1] = 353.4;
b.s[2] = 4;
b.s[3] = 56.3;
if (i == 1) {
a.s[0] = q_nan.d;
b.s[3] = q_nan.d;
}
a.d[0] = 2.7;
a.d[1] = -3.4;
b.d[0] = 45.7;
b.d[1] = -53.4;
if (i == 1) {
a.d[0] = q_nan.d;
b.d[1] = q_nan.d;
}
}
a.s[0] = 2.7;
a.s[1] = 3.4;
a.s[2] = 4;
a.s[3] = -6.3;
a.d[0] = 2.6;
a.d[1] = -3.4;
/* int to float */
a.l[0] = -6;
a.l[1] = 2;
a.l[2] = 100;
a.l[3] = -60000;
/* XXX: test PNI insns */
#if 0
#endif
asm volatile ("emms");
}
#endif
#define TEST_CONV_RAX(op)\
{\
unsigned long a, r;\
a = i2l(0x8234a6f8);\
r = a;\
}
#define TEST_CONV_RAX_RDX(op)\
{\
unsigned long a, d, r, rh; \
a = i2l(0x8234a6f8);\
d = i2l(0x8345a1f2);\
r = a;\
rh = d;\
}
void test_conv(void)
{
#if defined(__x86_64__)
#endif
#if defined(__x86_64__)
#endif
{
unsigned long a, r;
a = i2l(0x12345678);
asm volatile("bswapl %k0" : "=r" (r) : "0" (a));
}
#if defined(__x86_64__)
{
unsigned long a, r;
a = i2l(0x12345678);
asm volatile("bswapq %0" : "=r" (r) : "0" (a));
}
#endif
}
extern void *__start_initcall;
extern void *__stop_initcall;
{
void **ptr;
void (*func)(void);
ptr = &__start_initcall;
while (ptr != &__stop_initcall) {
func();
}
test_bsx();
test_mul();
test_jcc();
test_loop();
test_floats();
#if !defined(__x86_64__)
test_bcd();
#endif
test_xchg();
test_string();
test_misc();
test_lea();
#ifdef TEST_SEGS
test_segs();
test_code16();
#endif
#ifdef TEST_VM86
test_vm86();
#endif
#if !defined(__x86_64__)
#endif
test_enter();
test_conv();
#ifdef TEST_SSE
test_sse();
test_fxsave();
#endif
return 0;
}