asm-amd64-x86.h revision a828fbe5454430a560bd0b69e6d1752ad4634c67
/** @file
* IPRT - AMD64 and x86 Specific Assembly Functions.
*/
/*
* Copyright (C) 2006-2010 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
#ifndef ___iprt_asm_amd64_x86_h
#define ___iprt_asm_amd64_x86_h
#if !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86)
# error "Not on AMD64 or x86"
#endif
#if defined(_MSC_VER) && RT_INLINE_ASM_USES_INTRIN
# include <intrin.h>
/* Emit the intrinsics at all optimization levels. */
# pragma intrinsic(_ReadWriteBarrier)
# pragma intrinsic(__writemsr)
# pragma intrinsic(__outbytestring)
# pragma intrinsic(__outwordstring)
# pragma intrinsic(__outdword)
# pragma intrinsic(__outdwordstring)
# pragma intrinsic(__inbytestring)
# pragma intrinsic(__inwordstring)
# pragma intrinsic(__indwordstring)
# pragma intrinsic(__writecr0)
# pragma intrinsic(__writecr3)
# pragma intrinsic(__writecr4)
# ifdef RT_ARCH_AMD64
# pragma intrinsic(__writecr8)
# endif
#endif
/** @defgroup grp_rt_asm_amd64_x86 AMD64 and x86 Specific ASM Routines
* @ingroup grp_rt_asm
* @{
*/
/** @todo find a more proper place for this structure? */
#pragma pack(1)
/** IDTR */
typedef struct RTIDTR
{
/** Size of the IDT. */
/** Address of the IDT. */
#pragma pack()
#pragma pack(1)
/** GDTR */
typedef struct RTGDTR
{
/** Size of the GDT. */
/** Address of the GDT. */
#pragma pack()
/**
* Gets the content of the IDTR CPU register.
* @param pIdtr Where to store the IDTR contents.
*/
#else
{
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
}
#endif
/**
* Sets the content of the IDTR CPU register.
* @param pIdtr Where to load the IDTR contents from
*/
#else
{
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
}
#endif
/**
* Gets the content of the GDTR CPU register.
* @param pGdtr Where to store the GDTR contents.
*/
#else
{
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
}
#endif
/**
* Get the cs register.
* @returns cs.
*/
#else
{
# else
{
}
# endif
return SelCS;
}
#endif
/**
* Get the DS register.
* @returns DS.
*/
#else
{
# else
{
}
# endif
return SelDS;
}
#endif
/**
* Get the ES register.
* @returns ES.
*/
#else
{
# else
{
}
# endif
return SelES;
}
#endif
/**
* Get the FS register.
* @returns FS.
*/
#else
{
# else
{
}
# endif
return SelFS;
}
# endif
/**
* Get the GS register.
* @returns GS.
*/
#else
{
# else
{
}
# endif
return SelGS;
}
#endif
/**
* Get the SS register.
* @returns SS.
*/
#else
{
# else
{
}
# endif
return SelSS;
}
#endif
/**
* Get the TR register.
* @returns TR.
*/
#else
{
# else
{
}
# endif
return SelTR;
}
#endif
/**
* Get the [RE]FLAGS register.
* @returns [RE]FLAGS.
*/
#else
{
# ifdef RT_ARCH_AMD64
"popq %0\n\t"
: "=r" (uFlags));
# else
"popl %0\n\t"
: "=r" (uFlags));
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
return uFlags;
}
#endif
/**
* Set the [RE]FLAGS register.
* @param uFlags The new [RE]FLAGS value.
*/
#else
{
# ifdef RT_ARCH_AMD64
"popfq\n\t"
: : "g" (uFlags));
# else
"popfl\n\t"
: : "g" (uFlags));
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
}
#endif
/**
* Gets the content of the CPU timestamp counter register.
*
* @returns TSC.
*/
#else
{
RTUINT64U u;
# else
u.u = __rdtsc();
# else
{
}
# endif
# endif
return u.u;
}
#endif
/**
* Performs the cpuid instruction returning all registers.
*
* @param uOperator CPUID operation (eax).
* @param pvEAX Where to store eax.
* @param pvEBX Where to store ebx.
* @param pvECX Where to store ecx.
* @param pvEDX Where to store edx.
* @remark We're using void pointers to ease the use of special bitfield structures and such.
*/
#else
{
# ifdef RT_ARCH_AMD64
: "=a" (uRAX),
"=b" (uRBX),
"=c" (uRCX),
"=d" (uRDX)
: "0" (uOperator));
# else
"cpuid\n\t"
"xchgl %%ebx, %1\n\t"
: "0" (uOperator));
# endif
int aInfo[4];
# else
{
}
# endif
}
#endif
/**
* Performs the cpuid instruction returning all registers.
* Some subfunctions of cpuid take ECX as additional parameter (currently known for EAX=4)
*
* @param uOperator CPUID operation (eax).
* @param uIdxECX ecx index
* @param pvEAX Where to store eax.
* @param pvEBX Where to store ebx.
* @param pvECX Where to store ecx.
* @param pvEDX Where to store edx.
* @remark We're using void pointers to ease the use of special bitfield structures and such.
*/
DECLASM(void) ASMCpuId_Idx_ECX(uint32_t uOperator, uint32_t uIdxECX, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX);
#else
DECLINLINE(void) ASMCpuId_Idx_ECX(uint32_t uOperator, uint32_t uIdxECX, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
{
# ifdef RT_ARCH_AMD64
__asm__ ("cpuid\n\t"
: "=a" (uRAX),
"=b" (uRBX),
"=c" (uRCX),
"=d" (uRDX)
: "0" (uOperator),
"2" (uIdxECX));
# else
__asm__ ("xchgl %%ebx, %1\n\t"
"cpuid\n\t"
"xchgl %%ebx, %1\n\t"
: "0" (uOperator),
"2" (uIdxECX));
# endif
int aInfo[4];
/* ??? another intrinsic ??? */
# else
{
}
# endif
}
#endif
/**
* Performs the cpuid instruction returning ecx and edx.
*
* @param uOperator CPUID operation (eax).
* @param pvECX Where to store ecx.
* @param pvEDX Where to store edx.
* @remark We're using void pointers to ease the use of special bitfield structures and such.
*/
#else
{
}
#endif
/**
* Performs the cpuid instruction returning eax.
*
* @param uOperator CPUID operation (eax).
* @returns EAX after cpuid operation.
*/
#else
{
# ifdef RT_ARCH_AMD64
__asm__ ("cpuid"
: "=a" (xAX)
: "0" (uOperator)
: "rbx", "rcx", "rdx");
__asm__ ("push %%ebx\n\t"
"cpuid\n\t"
"pop %%ebx\n\t"
: "=a" (xAX)
: "0" (uOperator)
: "ecx", "edx");
# else
__asm__ ("cpuid"
: "=a" (xAX)
: "0" (uOperator)
: "edx", "ecx", "ebx");
# endif
int aInfo[4];
# else
{
}
# endif
}
#endif
/**
* Performs the cpuid instruction returning ebx.
*
* @param uOperator CPUID operation (eax).
* @returns EBX after cpuid operation.
*/
#else
{
# ifdef RT_ARCH_AMD64
__asm__ ("cpuid"
: "=a" (uSpill),
"=b" (xBX)
: "0" (uOperator)
: "rdx", "rcx");
__asm__ ("push %%ebx\n\t"
"cpuid\n\t"
"mov %%ebx, %%edx\n\t"
"pop %%ebx\n\t"
: "=a" (uOperator),
"=d" (xBX)
: "0" (uOperator)
: "ecx");
# else
__asm__ ("cpuid"
: "=a" (uOperator),
"=b" (xBX)
: "0" (uOperator)
: "edx", "ecx");
# endif
int aInfo[4];
# else
{
}
# endif
}
#endif
/**
* Performs the cpuid instruction returning ecx.
*
* @param uOperator CPUID operation (eax).
* @returns ECX after cpuid operation.
*/
#else
{
# ifdef RT_ARCH_AMD64
__asm__ ("cpuid"
: "=a" (uSpill),
"=c" (xCX)
: "0" (uOperator)
: "rbx", "rdx");
__asm__ ("push %%ebx\n\t"
"cpuid\n\t"
"pop %%ebx\n\t"
: "=a" (uOperator),
"=c" (xCX)
: "0" (uOperator)
: "edx");
# else
__asm__ ("cpuid"
: "=a" (uOperator),
"=c" (xCX)
: "0" (uOperator)
: "ebx", "edx");
# endif
int aInfo[4];
# else
{
}
# endif
}
#endif
/**
* Performs the cpuid instruction returning edx.
*
* @param uOperator CPUID operation (eax).
* @returns EDX after cpuid operation.
*/
#else
{
# ifdef RT_ARCH_AMD64
__asm__ ("cpuid"
: "=a" (uSpill),
"=d" (xDX)
: "0" (uOperator)
: "rbx", "rcx");
__asm__ ("push %%ebx\n\t"
"cpuid\n\t"
"pop %%ebx\n\t"
: "=a" (uOperator),
"=d" (xDX)
: "0" (uOperator)
: "ecx");
# else
__asm__ ("cpuid"
: "=a" (uOperator),
"=d" (xDX)
: "0" (uOperator)
: "ebx", "ecx");
# endif
int aInfo[4];
# else
{
}
# endif
}
#endif
/**
* Checks if the current CPU supports CPUID.
*
* @returns true if CPUID is supported.
*/
DECLINLINE(bool) ASMHasCpuId(void)
{
#ifdef RT_ARCH_AMD64
return true; /* ASSUME that all amd64 compatible CPUs have cpuid. */
#else /* !RT_ARCH_AMD64 */
bool fRet = false;
__asm__ ("pushf\n\t"
"pop %1\n\t"
"mov %1, %2\n\t"
"xorl $0x200000, %1\n\t"
"push %1\n\t"
"popf\n\t"
"pushf\n\t"
"pop %1\n\t"
"cmpl %1, %2\n\t"
"setne %0\n\t"
"push %2\n\t"
"popf\n\t"
# else
{
}
# endif
return fRet;
#endif /* !RT_ARCH_AMD64 */
}
/**
* Gets the APIC ID of the current CPU.
*
* @returns the APIC ID.
*/
#else
{
# ifdef RT_ARCH_AMD64
: "=a" (uSpill),
"=b" (xBX)
: "0" (1)
: "rcx", "rdx");
"cpuid\n\t"
"xchgl %%ebx,%1\n\t"
: "=a" (uSpill),
"=rm" (xBX)
: "0" (1)
: "ecx", "edx");
# else
: "=a" (uSpill),
"=b" (xBX)
: "0" (1)
: "ecx", "edx");
# endif
int aInfo[4];
# else
{
}
# endif
}
#endif
/**
* Tests if it a genuine Intel CPU based on the ASMCpuId(0) output.
*
* @param uEBX EBX return from ASMCpuId(0)
* @param uECX ECX return from ASMCpuId(0)
* @param uEDX EDX return from ASMCpuId(0)
*/
{
}
/**
* Tests if this is a genuine Intel CPU.
*
* @remarks ASSUMES that cpuid is supported by the CPU.
*/
DECLINLINE(bool) ASMIsIntelCpu(void)
{
}
/**
* Tests if it a authentic AMD CPU based on the ASMCpuId(0) output.
*
* @param uEBX EBX return from ASMCpuId(0)
* @param uECX ECX return from ASMCpuId(0)
* @param uEDX EDX return from ASMCpuId(0)
*/
{
}
/**
* Tests if this is an authentic AMD CPU.
*
* @remarks ASSUMES that cpuid is supported by the CPU.
*/
DECLINLINE(bool) ASMIsAmdCpu(void)
{
}
/**
* Extracts the CPU family from ASMCpuId(1) or ASMCpuId(0x80000001)
*
* @returns Family.
* @param uEAX EAX return from ASMCpuId(1) or ASMCpuId(0x80000001).
*/
{
}
/**
* Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001), Intel variant.
*
* @returns Model.
* @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
*/
{
}
/**
* Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001), AMD variant.
*
* @returns Model.
* @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
*/
{
}
/**
* Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001)
*
* @returns Model.
* @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
* @param fIntel Whether it's an intel CPU. Use ASMIsIntelCpuEx() or ASMIsIntelCpu().
*/
{
}
/**
* Extracts the CPU stepping from ASMCpuId(1) or ASMCpuId(0x80000001)
*
* @returns Model.
* @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
*/
{
return uEAX & 0xf;
}
/**
* Get cr0.
* @returns cr0.
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
return uCR0;
}
#endif
/**
* Sets the CR0 register.
* @param uCR0 The new CR0 value.
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
}
#endif
/**
* Get cr2.
* @returns cr2.
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
return uCR2;
}
#endif
/**
* Sets the CR2 register.
* @param uCR2 The new CR0 value.
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
}
#endif
/**
* Get cr3.
* @returns cr3.
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
return uCR3;
}
#endif
/**
* Sets the CR3 register.
*
* @param uCR3 New CR3 value.
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
}
#endif
/**
* Reloads the CR3 register.
*/
DECLASM(void) ASMReloadCR3(void);
#else
DECLINLINE(void) ASMReloadCR3(void)
{
__writecr3(__readcr3());
RTCCUINTREG u;
# ifdef RT_ARCH_AMD64
"movq %0, %%cr3\n\t"
: "=r" (u));
# else
"movl %0, %%cr3\n\t"
: "=r" (u));
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
}
#endif
/**
* Get cr4.
* @returns cr4.
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
/*mov eax, cr4*/
_emit 0x0f
_emit 0x20
_emit 0xe0
# endif
}
# endif
return uCR4;
}
#endif
/**
* Sets the CR4 register.
*
* @param uCR4 New CR4 value.
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
_emit 0x0F
_emit 0x22
# endif
}
# endif
}
#endif
/**
* Get cr8.
* @returns cr8.
* @remark The lock prefix hack for access from non-64-bit modes is NOT used and 0 is returned.
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
{
}
# endif
return uCR8;
# else /* !RT_ARCH_AMD64 */
return 0;
# endif /* !RT_ARCH_AMD64 */
}
#endif
/**
* Enables interrupts (EFLAGS.IF).
*/
DECLASM(void) ASMIntEnable(void);
#else
DECLINLINE(void) ASMIntEnable(void)
{
__asm("sti\n");
_enable();
# else
# endif
}
#endif
/**
* Disables interrupts (!EFLAGS.IF).
*/
DECLASM(void) ASMIntDisable(void);
#else
DECLINLINE(void) ASMIntDisable(void)
{
__asm("cli\n");
_disable();
# else
# endif
}
#endif
/**
* Disables interrupts and returns previous xFLAGS.
*/
#else
{
# ifdef RT_ARCH_AMD64
"cli\n\t"
"popq %0\n\t"
: "=r" (xFlags));
# else
"cli\n\t"
"popl %0\n\t"
: "=r" (xFlags));
# endif
xFlags = ASMGetFlags();
_disable();
# else
__asm {
}
# endif
return xFlags;
}
#endif
/**
* Are interrupts enabled?
*
* @returns true / false.
*/
{
}
/**
* Halts the CPU until interrupted.
*/
#else
DECLINLINE(void) ASMHalt(void)
{
# else
__asm {
}
# endif
}
#endif
/**
* Reads a machine specific register.
*
* @returns Register content.
* @param uRegister Register to read.
*/
#else
{
RTUINT64U u;
: "=a" (u.s.Lo),
"=d" (u.s.Hi)
: "c" (uRegister));
# else
{
}
# endif
return u.u;
}
#endif
/**
* Writes a machine specific register.
*
* @returns Register content.
* @param uRegister Register to write to.
* @param u64Val Value to write.
*/
#else
{
RTUINT64U u;
u.u = u64Val;
::"a" (u.s.Lo),
"d" (u.s.Hi),
"c" (uRegister));
__writemsr(uRegister, u.u);
# else
{
}
# endif
}
#endif
/**
* Reads low part of a machine specific register.
*
* @returns Register content.
* @param uRegister Register to read.
*/
#else
{
: "=a" (u32)
: "c" (uRegister)
: "edx");
#else
{
}
# endif
return u32;
}
#endif
/**
* Reads high part of a machine specific register.
*
* @returns Register content.
* @param uRegister Register to read.
*/
#else
{
: "=d" (u32)
: "c" (uRegister)
: "eax");
# else
{
}
# endif
return u32;
}
#endif
/**
* Gets dr0.
*
* @returns dr0.
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
return uDR0;
}
#endif
/**
* Gets dr1.
*
* @returns dr1.
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
return uDR1;
}
#endif
/**
* Gets dr2.
*
* @returns dr2.
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
return uDR2;
}
#endif
/**
* Gets dr3.
*
* @returns dr3.
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
return uDR3;
}
#endif
/**
* Gets dr6.
*
* @returns dr6.
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
return uDR6;
}
#endif
/**
* Reads and clears DR6.
*
* @returns DR6.
*/
#else
{
# ifdef RT_ARCH_AMD64
"movq %1, %%dr6\n\t"
: "=r" (uDR6)
: "r" (uNewValue));
# else
"movl %1, %%dr6\n\t"
: "=r" (uDR6)
: "r" (uNewValue));
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
return uDR6;
}
#endif
/**
* Gets dr7.
*
* @returns dr7.
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
return uDR7;
}
#endif
/**
* Sets dr0.
*
* @param uDRVal Debug register value to write
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
}
#endif
/**
* Sets dr1.
*
* @param uDRVal Debug register value to write
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
}
#endif
/**
* Sets dr2.
*
* @param uDRVal Debug register value to write
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
}
#endif
/**
* Sets dr3.
*
* @param uDRVal Debug register value to write
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
}
#endif
/**
* Sets dr6.
*
* @param uDRVal Debug register value to write
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
}
#endif
/**
* Sets dr7.
*
* @param uDRVal Debug register value to write
*/
#else
{
# ifdef RT_ARCH_AMD64
# else
# endif
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
}
#endif
/**
* Writes a 8-bit unsigned integer to an I/O port, ordered.
*
* @param Port I/O port to write to.
* @param u8 8-bit integer to write.
*/
#else
{
:: "Nd" (Port),
"a" (u8));
# else
{
}
# endif
}
#endif
/**
* Reads a 8-bit unsigned integer from an I/O port, ordered.
*
* @returns 8-bit integer.
* @param Port I/O port to read from.
*/
#else
{
: "=a" (u8)
: "Nd" (Port));
# else
{
}
# endif
return u8;
}
#endif
/**
* Writes a 16-bit unsigned integer to an I/O port, ordered.
*
* @param Port I/O port to write to.
* @param u16 16-bit integer to write.
*/
#else
{
:: "Nd" (Port),
"a" (u16));
# else
{
}
# endif
}
#endif
/**
* Reads a 16-bit unsigned integer from an I/O port, ordered.
*
* @returns 16-bit integer.
* @param Port I/O port to read from.
*/
#else
{
: "=a" (u16)
: "Nd" (Port));
# else
{
}
# endif
return u16;
}
#endif
/**
* Writes a 32-bit unsigned integer to an I/O port, ordered.
*
* @param Port I/O port to write to.
* @param u32 32-bit integer to write.
*/
#else
{
:: "Nd" (Port),
"a" (u32));
# else
{
}
# endif
}
#endif
/**
* Reads a 32-bit unsigned integer from an I/O port, ordered.
*
* @returns 32-bit integer.
* @param Port I/O port to read from.
*/
#else
{
: "=a" (u32)
: "Nd" (Port));
# else
{
}
# endif
return u32;
}
#endif
/**
* Writes a string of 8-bit unsigned integer items to an I/O port, ordered.
*
* @param Port I/O port to write to.
* @param pau8 Pointer to the string buffer.
* @param c The number of items to write.
*/
#else
{
: "+S" (pau8),
"+c" (c)
: "d" (Port));
# else
{
}
# endif
}
#endif
/**
* Reads a string of 8-bit unsigned integer items from an I/O port, ordered.
*
* @param Port I/O port to read from.
* @param pau8 Pointer to the string buffer (output).
* @param c The number of items to read.
*/
#else
{
: "+D" (pau8),
"+c" (c)
: "d" (Port));
# else
{
}
# endif
}
#endif
/**
* Writes a string of 16-bit unsigned integer items to an I/O port, ordered.
*
* @param Port I/O port to write to.
* @param pau16 Pointer to the string buffer.
* @param c The number of items to write.
*/
#else
{
: "+S" (pau16),
"+c" (c)
: "d" (Port));
# else
{
}
# endif
}
#endif
/**
* Reads a string of 16-bit unsigned integer items from an I/O port, ordered.
*
* @param Port I/O port to read from.
* @param pau16 Pointer to the string buffer (output).
* @param c The number of items to read.
*/
#else
{
: "+D" (pau16),
"+c" (c)
: "d" (Port));
# else
{
}
# endif
}
#endif
/**
* Writes a string of 32-bit unsigned integer items to an I/O port, ordered.
*
* @param Port I/O port to write to.
* @param pau32 Pointer to the string buffer.
* @param c The number of items to write.
*/
#else
{
: "+S" (pau32),
"+c" (c)
: "d" (Port));
# else
{
}
# endif
}
#endif
/**
* Reads a string of 32-bit unsigned integer items from an I/O port, ordered.
*
* @param Port I/O port to read from.
* @param pau32 Pointer to the string buffer (output).
* @param c The number of items to read.
*/
#else
{
: "+D" (pau32),
"+c" (c)
: "d" (Port));
# else
{
}
# endif
}
#endif
/**
* Invalidate page.
*
* @param pv Address of the page to invalidate.
*/
#else
{
# else
{
# ifdef RT_ARCH_AMD64
# else
# endif
}
# endif
}
#endif
/**
* Write back the internal caches and invalidate them.
*/
DECLASM(void) ASMWriteBackAndInvalidateCaches(void);
#else
DECLINLINE(void) ASMWriteBackAndInvalidateCaches(void)
{
__wbinvd();
# else
{
}
# endif
}
#endif
/**
* Invalidate internal and (perhaps) external caches without first
* flushing dirty cache lines. Use with extreme care.
*/
DECLASM(void) ASMInvalidateInternalCaches(void);
#else
DECLINLINE(void) ASMInvalidateInternalCaches(void)
{
# else
{
}
# endif
}
#endif
/**
* Requires the X86_CPUID_FEATURE_EDX_SSE2 CPUID bit set.
*/
DECLINLINE(void) ASMMemoryFenceSSE2(void)
{
_mm_mfence();
#else
{
_emit 0x0f
_emit 0xae
_emit 0xf0
}
#endif
}
/**
* Memory store fence, waits for any writes to complete.
* Requires the X86_CPUID_FEATURE_EDX_SSE CPUID bit set.
*/
DECLINLINE(void) ASMWriteFenceSSE(void)
{
_mm_sfence();
#else
{
_emit 0x0f
_emit 0xae
_emit 0xf8
}
#endif
}
/**
* Memory load fence, waits for any pending reads to complete.
* Requires the X86_CPUID_FEATURE_EDX_SSE2 CPUID bit set.
*/
DECLINLINE(void) ASMReadFenceSSE2(void)
{
_mm_lfence();
#else
{
_emit 0x0f
_emit 0xae
_emit 0xe8
}
#endif
}
/** @} */
#endif