asm-amd64-x86.h revision 9c5875d62215e6a088a86658e5553af6b8401f1c
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/** @file
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * IPRT - AMD64 and x86 Specific Assembly Functions.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/*
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Copyright (C) 2006-2013 Oracle Corporation
e64031e20c39650a7bc902a3e1aba613b9415deevboxsync *
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * available from http://www.virtualbox.org. This file is free software;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * you can redistribute it and/or modify it under the terms of the GNU
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * General Public License (GPL) as published by the Free Software
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * The contents of this file may alternatively be used under the terms
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * of the Common Development and Distribution License Version 1.0
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * VirtualBox OSE distribution, in which case the provisions of the
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * CDDL are applicable instead of those of the GPL.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * You may elect to license modified versions of this file under the
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * terms and conditions of either the GPL or the CDDL or both.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#ifndef ___iprt_asm_amd64_x86_h
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#define ___iprt_asm_amd64_x86_h
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#include <iprt/types.h>
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if !defined(RT_ARCH_AMD64) && !defined(RT_ARCH_X86)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# error "Not on AMD64 or x86"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if defined(_MSC_VER) && RT_INLINE_ASM_USES_INTRIN
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# include <intrin.h>
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync /* Emit the intrinsics at all optimization levels. */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(_ReadWriteBarrier)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__cpuid)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(_enable)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(_disable)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__rdtsc)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__readmsr)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__writemsr)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__outbyte)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__outbytestring)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__outword)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__outwordstring)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__outdword)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__outdwordstring)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__inbyte)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__inbytestring)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__inword)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__inwordstring)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__indword)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__indwordstring)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__invlpg)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__wbinvd)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__readcr0)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__readcr2)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__readcr3)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__readcr4)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__writecr0)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__writecr3)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__writecr4)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__readdr)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__writedr)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# ifdef RT_ARCH_AMD64
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__readcr8)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__writecr8)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_USES_INTRIN >= 15
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__readeflags)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# pragma intrinsic(__writeeflags)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/** @defgroup grp_rt_asm_amd64_x86 AMD64 and x86 Specific ASM Routines
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @ingroup grp_rt_asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/** @todo find a more proper place for this structure? */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#pragma pack(1)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/** IDTR */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsynctypedef struct RTIDTR
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync /** Size of the IDT. */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync uint16_t cbIdt;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync /** Address of the IDT. */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync uintptr_t pIdt;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync} RTIDTR, *PRTIDTR;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#pragma pack()
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#pragma pack(1)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/** GDTR */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsynctypedef struct RTGDTR
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync /** Size of the GDT. */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync uint16_t cbGdt;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync /** Address of the GDT. */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync uintptr_t pGdt;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync} RTGDTR, *PRTGDTR;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#pragma pack()
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Gets the content of the IDTR CPU register.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param pIdtr Where to store the IDTR contents.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(void) ASMGetIDTR(PRTIDTR pIdtr);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(void) ASMGetIDTR(PRTIDTR pIdtr)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__("sidt %0" : "=m" (*pIdtr));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# ifdef RT_ARCH_AMD64
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov rax, [pIdtr]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync sidt [rax]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov eax, [pIdtr]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync sidt [eax]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Sets the content of the IDTR CPU register.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param pIdtr Where to load the IDTR contents from
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(void) ASMSetIDTR(const RTIDTR *pIdtr);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(void) ASMSetIDTR(const RTIDTR *pIdtr)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__("lidt %0" : : "m" (*pIdtr));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# ifdef RT_ARCH_AMD64
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov rax, [pIdtr]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync lidt [rax]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov eax, [pIdtr]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync lidt [eax]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Gets the content of the GDTR CPU register.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param pGdtr Where to store the GDTR contents.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(void) ASMGetGDTR(PRTGDTR pGdtr);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(void) ASMGetGDTR(PRTGDTR pGdtr)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__("sgdt %0" : "=m" (*pGdtr));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# ifdef RT_ARCH_AMD64
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov rax, [pGdtr]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync sgdt [rax]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov eax, [pGdtr]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync sgdt [eax]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Get the cs register.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @returns cs.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(RTSEL) ASMGetCS(void);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(RTSEL) ASMGetCS(void)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTSEL SelCS;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__("movw %%cs, %0\n\t" : "=r" (SelCS));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov ax, cs
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [SelCS], ax
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync return SelCS;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Get the DS register.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @returns DS.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(RTSEL) ASMGetDS(void);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(RTSEL) ASMGetDS(void)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTSEL SelDS;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__("movw %%ds, %0\n\t" : "=r" (SelDS));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov ax, ds
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [SelDS], ax
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync return SelDS;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Get the ES register.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @returns ES.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(RTSEL) ASMGetES(void);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(RTSEL) ASMGetES(void)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTSEL SelES;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__("movw %%es, %0\n\t" : "=r" (SelES));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov ax, es
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [SelES], ax
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync return SelES;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
83dc9ca94cd3c31dabc33a35b945de124d43aaeavboxsync * Get the FS register.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @returns FS.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(RTSEL) ASMGetFS(void);
83dc9ca94cd3c31dabc33a35b945de124d43aaeavboxsync#else
83dc9ca94cd3c31dabc33a35b945de124d43aaeavboxsyncDECLINLINE(RTSEL) ASMGetFS(void)
fb1975a6972d89de9e515bed0248db93f04ec9d8vboxsync{
83dc9ca94cd3c31dabc33a35b945de124d43aaeavboxsync RTSEL SelFS;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__("movw %%fs, %0\n\t" : "=r" (SelFS));
83dc9ca94cd3c31dabc33a35b945de124d43aaeavboxsync# else
83dc9ca94cd3c31dabc33a35b945de124d43aaeavboxsync __asm
83dc9ca94cd3c31dabc33a35b945de124d43aaeavboxsync {
83dc9ca94cd3c31dabc33a35b945de124d43aaeavboxsync mov ax, fs
83dc9ca94cd3c31dabc33a35b945de124d43aaeavboxsync mov [SelFS], ax
83dc9ca94cd3c31dabc33a35b945de124d43aaeavboxsync }
83dc9ca94cd3c31dabc33a35b945de124d43aaeavboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync return SelFS;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Get the GS register.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @returns GS.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(RTSEL) ASMGetGS(void);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(RTSEL) ASMGetGS(void)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTSEL SelGS;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__("movw %%gs, %0\n\t" : "=r" (SelGS));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov ax, gs
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [SelGS], ax
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync return SelGS;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Get the SS register.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @returns SS.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(RTSEL) ASMGetSS(void);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(RTSEL) ASMGetSS(void)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTSEL SelSS;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__("movw %%ss, %0\n\t" : "=r" (SelSS));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov ax, ss
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [SelSS], ax
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync return SelSS;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
83dc9ca94cd3c31dabc33a35b945de124d43aaeavboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Get the TR register.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @returns TR.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(RTSEL) ASMGetTR(void);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(RTSEL) ASMGetTR(void)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTSEL SelTR;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__("str %w0\n\t" : "=r" (SelTR));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync str ax
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [SelTR], ax
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync return SelTR;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Get the LDTR register.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @returns LDTR.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(RTSEL) ASMGetLDTR(void);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(RTSEL) ASMGetLDTR(void)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTSEL SelLDTR;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__("sldt %w0\n\t" : "=r" (SelLDTR));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
46fd1b35e55cbd736b7abe0d856a940f0336eb81vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync sldt ax
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [SelLDTR], ax
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync return SelLDTR;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Get the access rights for the segment selector.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @returns The access rights on success or ~0U on failure.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param uSel The selector value.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @remarks Using ~0U for failure is chosen because valid access rights always
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * have bits 0:7 as 0 (on both Intel & AMD).
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(uint32_t) ASMGetSegAttr(uint32_t uSel);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(uint32_t) ASMGetSegAttr(uint32_t uSel)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync uint32_t uAttr;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync /* LAR only accesses 16-bit of the source operand, but eax for the
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync destination operand is required for getting the full 32-bit access rights. */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__("lar %1, %%eax\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "jz done%=\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "movl $0xffffffff, %%eax\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "done%=:\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "movl %%eax, %0\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "=r" (uAttr)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "r" (uSel)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "cc", "%eax");
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync lar eax, [uSel]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync jz done
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov eax, 0ffffffffh
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync done:
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [uAttr], eax
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync return uAttr;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Get the [RE]FLAGS register.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @returns [RE]FLAGS.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(RTCCUINTREG) ASMGetFlags(void);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(RTCCUINTREG) ASMGetFlags(void)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTCCUINTREG uFlags;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# ifdef RT_ARCH_AMD64
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__("pushfq\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "popq %0\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "=r" (uFlags));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__("pushfl\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "popl %0\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "=r" (uFlags));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# elif RT_INLINE_ASM_USES_INTRIN >= 15
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync uFlags = __readeflags();
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# ifdef RT_ARCH_AMD64
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync pushfq
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync pop [uFlags]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync pushfd
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync pop [uFlags]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync return uFlags;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Set the [RE]FLAGS register.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param uFlags The new [RE]FLAGS value.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL && RT_INLINE_ASM_USES_INTRIN < 15
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(void) ASMSetFlags(RTCCUINTREG uFlags);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(void) ASMSetFlags(RTCCUINTREG uFlags)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# ifdef RT_ARCH_AMD64
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__("pushq %0\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "popfq\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : : "g" (uFlags));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__("pushl %0\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "popfl\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : : "g" (uFlags));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# elif RT_INLINE_ASM_USES_INTRIN >= 15
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __writeeflags(uFlags);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# ifdef RT_ARCH_AMD64
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync push [uFlags]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync popfq
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync push [uFlags]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync popfd
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Gets the content of the CPU timestamp counter register.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @returns TSC.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(uint64_t) ASMReadTSC(void);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(uint64_t) ASMReadTSC(void)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTUINT64U u;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__("rdtsc\n\t" : "=a" (u.s.Lo), "=d" (u.s.Hi));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_USES_INTRIN
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync u.u = __rdtsc();
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync rdtsc
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [u.s.Lo], eax
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [u.s.Hi], edx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync return u.u;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Performs the cpuid instruction returning all registers.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param uOperator CPUID operation (eax).
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param pvEAX Where to store eax.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param pvEBX Where to store ebx.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param pvECX Where to store ecx.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param pvEDX Where to store edx.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @remark We're using void pointers to ease the use of special bitfield structures and such.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(void) ASMCpuId(uint32_t uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(void) ASMCpuId(uint32_t uOperator, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# ifdef RT_ARCH_AMD64
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTCCUINTREG uRAX, uRBX, uRCX, uRDX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__ ("cpuid\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "=a" (uRAX),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=b" (uRBX),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=c" (uRCX),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=d" (uRDX)
83dc9ca94cd3c31dabc33a35b945de124d43aaeavboxsync : "0" (uOperator), "2" (0));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvEAX = (uint32_t)uRAX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvEBX = (uint32_t)uRBX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvECX = (uint32_t)uRCX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvEDX = (uint32_t)uRDX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ __volatile__ ("xchgl %%ebx, %1\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "cpuid\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "xchgl %%ebx, %1\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "=a" (*(uint32_t *)pvEAX),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=r" (*(uint32_t *)pvEBX),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=c" (*(uint32_t *)pvECX),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=d" (*(uint32_t *)pvEDX)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "0" (uOperator), "2" (0));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# elif RT_INLINE_ASM_USES_INTRIN
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync int aInfo[4];
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __cpuid(aInfo, uOperator);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvEAX = aInfo[0];
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvEBX = aInfo[1];
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvECX = aInfo[2];
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvEDX = aInfo[3];
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync uint32_t uEAX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync uint32_t uEBX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync uint32_t uECX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync uint32_t uEDX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync push ebx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov eax, [uOperator]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync cpuid
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [uEAX], eax
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [uEBX], ebx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [uECX], ecx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [uEDX], edx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync pop ebx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvEAX = uEAX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvEBX = uEBX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvECX = uECX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvEDX = uEDX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Performs the CPUID instruction with EAX and ECX input returning ALL output
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * registers.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param uOperator CPUID operation (eax).
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param uIdxECX ecx index
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param pvEAX Where to store eax.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param pvEBX Where to store ebx.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param pvECX Where to store ecx.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param pvEDX Where to store edx.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @remark We're using void pointers to ease the use of special bitfield structures and such.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL || RT_INLINE_ASM_USES_INTRIN
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(void) ASMCpuId_Idx_ECX(uint32_t uOperator, uint32_t uIdxECX, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(void) ASMCpuId_Idx_ECX(uint32_t uOperator, uint32_t uIdxECX, void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# ifdef RT_ARCH_AMD64
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTCCUINTREG uRAX, uRBX, uRCX, uRDX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ ("cpuid\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "=a" (uRAX),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=b" (uRBX),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=c" (uRCX),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=d" (uRDX)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "0" (uOperator),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "2" (uIdxECX));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvEAX = (uint32_t)uRAX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvEBX = (uint32_t)uRBX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvECX = (uint32_t)uRCX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvEDX = (uint32_t)uRDX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ ("xchgl %%ebx, %1\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "cpuid\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "xchgl %%ebx, %1\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "=a" (*(uint32_t *)pvEAX),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=r" (*(uint32_t *)pvEBX),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=c" (*(uint32_t *)pvECX),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=d" (*(uint32_t *)pvEDX)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "0" (uOperator),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "2" (uIdxECX));
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# elif RT_INLINE_ASM_USES_INTRIN
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync int aInfo[4];
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __cpuidex(aInfo, uOperator, uIdxECX);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvEAX = aInfo[0];
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvEBX = aInfo[1];
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvECX = aInfo[2];
83dc9ca94cd3c31dabc33a35b945de124d43aaeavboxsync *(uint32_t *)pvEDX = aInfo[3];
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync uint32_t uEAX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync uint32_t uEBX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync uint32_t uECX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync uint32_t uEDX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync push ebx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov eax, [uOperator]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov ecx, [uIdxECX]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync cpuid
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [uEAX], eax
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [uEBX], ebx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [uECX], ecx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [uEDX], edx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync pop ebx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvEAX = uEAX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvEBX = uEBX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvECX = uECX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *(uint32_t *)pvEDX = uEDX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * CPUID variant that initializes all 4 registers before the CPUID instruction.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @returns The EAX result value.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param uOperator CPUID operation (eax).
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param uInitEBX The value to assign EBX prior to the CPUID instruction.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param uInitECX The value to assign ECX prior to the CPUID instruction.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param uInitEDX The value to assign EDX prior to the CPUID instruction.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param pvEAX Where to store eax. Optional.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param pvEBX Where to store ebx. Optional.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param pvECX Where to store ecx. Optional.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param pvEDX Where to store edx. Optional.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(uint32_t) ASMCpuIdExSlow(uint32_t uOperator, uint32_t uInitEBX, uint32_t uInitECX, uint32_t uInitEDX,
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync void *pvEAX, void *pvEBX, void *pvECX, void *pvEDX);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Performs the cpuid instruction returning ecx and edx.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param uOperator CPUID operation (eax).
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param pvECX Where to store ecx.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param pvEDX Where to store edx.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @remark We're using void pointers to ease the use of special bitfield structures and such.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(void) ASMCpuId_ECX_EDX(uint32_t uOperator, void *pvECX, void *pvEDX);
83dc9ca94cd3c31dabc33a35b945de124d43aaeavboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(void) ASMCpuId_ECX_EDX(uint32_t uOperator, void *pvECX, void *pvEDX)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync uint32_t uEBX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync ASMCpuId(uOperator, &uOperator, &uEBX, pvECX, pvEDX);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Performs the cpuid instruction returning eax.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param uOperator CPUID operation (eax).
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @returns EAX after cpuid operation.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(uint32_t) ASMCpuId_EAX(uint32_t uOperator);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(uint32_t) ASMCpuId_EAX(uint32_t uOperator)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTCCUINTREG xAX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# ifdef RT_ARCH_AMD64
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ ("cpuid"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "=a" (xAX)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "0" (uOperator)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "rbx", "rcx", "rdx");
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ ("push %%ebx\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "cpuid\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "pop %%ebx\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "=a" (xAX)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "0" (uOperator)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "ecx", "edx");
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ ("cpuid"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "=a" (xAX)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "0" (uOperator)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "edx", "ecx", "ebx");
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# elif RT_INLINE_ASM_USES_INTRIN
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync int aInfo[4];
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __cpuid(aInfo, uOperator);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync xAX = aInfo[0];
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync push ebx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov eax, [uOperator]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync cpuid
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [xAX], eax
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync pop ebx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync return (uint32_t)xAX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Performs the cpuid instruction returning ebx.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param uOperator CPUID operation (eax).
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @returns EBX after cpuid operation.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(uint32_t) ASMCpuId_EBX(uint32_t uOperator);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(uint32_t) ASMCpuId_EBX(uint32_t uOperator)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTCCUINTREG xBX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
21fe567f85453e9865063d3d51464d189de5a867vboxsync# ifdef RT_ARCH_AMD64
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTCCUINTREG uSpill;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ ("cpuid"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "=a" (uSpill),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=b" (xBX)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "0" (uOperator)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "rdx", "rcx");
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ ("push %%ebx\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "cpuid\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "mov %%ebx, %%edx\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "pop %%ebx\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "=a" (uOperator),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=d" (xBX)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "0" (uOperator)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "ecx");
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ ("cpuid"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "=a" (uOperator),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=b" (xBX)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "0" (uOperator)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "edx", "ecx");
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# elif RT_INLINE_ASM_USES_INTRIN
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync int aInfo[4];
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __cpuid(aInfo, uOperator);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync xBX = aInfo[1];
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync push ebx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov eax, [uOperator]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync cpuid
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [xBX], ebx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync pop ebx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync return (uint32_t)xBX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Performs the cpuid instruction returning ecx.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param uOperator CPUID operation (eax).
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @returns ECX after cpuid operation.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(uint32_t) ASMCpuId_ECX(uint32_t uOperator);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(uint32_t) ASMCpuId_ECX(uint32_t uOperator)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTCCUINTREG xCX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# ifdef RT_ARCH_AMD64
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTCCUINTREG uSpill;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ ("cpuid"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "=a" (uSpill),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=c" (xCX)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "0" (uOperator)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "rbx", "rdx");
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ ("push %%ebx\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "cpuid\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "pop %%ebx\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "=a" (uOperator),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=c" (xCX)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "0" (uOperator)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "edx");
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ ("cpuid"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "=a" (uOperator),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=c" (xCX)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "0" (uOperator)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "ebx", "edx");
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# elif RT_INLINE_ASM_USES_INTRIN
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync int aInfo[4];
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __cpuid(aInfo, uOperator);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync xCX = aInfo[2];
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync {
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync push ebx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov eax, [uOperator]
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync cpuid
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync mov [xCX], ecx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync pop ebx
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync }
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync return (uint32_t)xCX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync}
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#endif
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync/**
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * Performs the cpuid instruction returning edx.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync *
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @param uOperator CPUID operation (eax).
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync * @returns EDX after cpuid operation.
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync */
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLASM(uint32_t) ASMCpuId_EDX(uint32_t uOperator);
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync#else
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsyncDECLINLINE(uint32_t) ASMCpuId_EDX(uint32_t uOperator)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync{
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTCCUINTREG xDX;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# if RT_INLINE_ASM_GNU_STYLE
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# ifdef RT_ARCH_AMD64
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync RTCCUINTREG uSpill;
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ ("cpuid"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "=a" (uSpill),
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "=d" (xDX)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "0" (uOperator)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync : "rbx", "rcx");
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync __asm__ ("push %%ebx\n\t"
1c2c968fd241148110002d75b2c0fdeddc211e14vboxsync "cpuid\n\t"
"pop %%ebx\n\t"
: "=a" (uOperator),
"=d" (xDX)
: "0" (uOperator)
: "ecx");
# else
__asm__ ("cpuid"
: "=a" (uOperator),
"=d" (xDX)
: "0" (uOperator)
: "ebx", "ecx");
# endif
# elif RT_INLINE_ASM_USES_INTRIN
int aInfo[4];
__cpuid(aInfo, uOperator);
xDX = aInfo[3];
# else
__asm
{
push ebx
mov eax, [uOperator]
cpuid
mov [xDX], edx
pop ebx
}
# endif
return (uint32_t)xDX;
}
#endif
/**
* Checks if the current CPU supports CPUID.
*
* @returns true if CPUID is supported.
*/
DECLINLINE(bool) ASMHasCpuId(void)
{
#ifdef RT_ARCH_AMD64
return true; /* ASSUME that all amd64 compatible CPUs have cpuid. */
#else /* !RT_ARCH_AMD64 */
bool fRet = false;
# if RT_INLINE_ASM_GNU_STYLE
uint32_t u1;
uint32_t u2;
__asm__ ("pushf\n\t"
"pop %1\n\t"
"mov %1, %2\n\t"
"xorl $0x200000, %1\n\t"
"push %1\n\t"
"popf\n\t"
"pushf\n\t"
"pop %1\n\t"
"cmpl %1, %2\n\t"
"setne %0\n\t"
"push %2\n\t"
"popf\n\t"
: "=m" (fRet), "=r" (u1), "=r" (u2));
# else
__asm
{
pushfd
pop eax
mov ebx, eax
xor eax, 0200000h
push eax
popfd
pushfd
pop eax
cmp eax, ebx
setne fRet
push ebx
popfd
}
# endif
return fRet;
#endif /* !RT_ARCH_AMD64 */
}
/**
* Gets the APIC ID of the current CPU.
*
* @returns the APIC ID.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(uint8_t) ASMGetApicId(void);
#else
DECLINLINE(uint8_t) ASMGetApicId(void)
{
RTCCUINTREG xBX;
# if RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
RTCCUINTREG uSpill;
__asm__ __volatile__ ("cpuid"
: "=a" (uSpill),
"=b" (xBX)
: "0" (1)
: "rcx", "rdx");
# elif (defined(PIC) || defined(__PIC__)) && defined(__i386__)
RTCCUINTREG uSpill;
__asm__ __volatile__ ("mov %%ebx,%1\n\t"
"cpuid\n\t"
"xchgl %%ebx,%1\n\t"
: "=a" (uSpill),
"=rm" (xBX)
: "0" (1)
: "ecx", "edx");
# else
RTCCUINTREG uSpill;
__asm__ __volatile__ ("cpuid"
: "=a" (uSpill),
"=b" (xBX)
: "0" (1)
: "ecx", "edx");
# endif
# elif RT_INLINE_ASM_USES_INTRIN
int aInfo[4];
__cpuid(aInfo, 1);
xBX = aInfo[1];
# else
__asm
{
push ebx
mov eax, 1
cpuid
mov [xBX], ebx
pop ebx
}
# endif
return (uint8_t)(xBX >> 24);
}
#endif
/**
* Tests if it a genuine Intel CPU based on the ASMCpuId(0) output.
*
* @returns true/false.
* @param uEBX EBX return from ASMCpuId(0)
* @param uECX ECX return from ASMCpuId(0)
* @param uEDX EDX return from ASMCpuId(0)
*/
DECLINLINE(bool) ASMIsIntelCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
{
return uEBX == UINT32_C(0x756e6547)
&& uECX == UINT32_C(0x6c65746e)
&& uEDX == UINT32_C(0x49656e69);
}
/**
* Tests if this is a genuine Intel CPU.
*
* @returns true/false.
* @remarks ASSUMES that cpuid is supported by the CPU.
*/
DECLINLINE(bool) ASMIsIntelCpu(void)
{
uint32_t uEAX, uEBX, uECX, uEDX;
ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
return ASMIsIntelCpuEx(uEBX, uECX, uEDX);
}
/**
* Tests if it an authentic AMD CPU based on the ASMCpuId(0) output.
*
* @returns true/false.
* @param uEBX EBX return from ASMCpuId(0)
* @param uECX ECX return from ASMCpuId(0)
* @param uEDX EDX return from ASMCpuId(0)
*/
DECLINLINE(bool) ASMIsAmdCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
{
return uEBX == UINT32_C(0x68747541)
&& uECX == UINT32_C(0x444d4163)
&& uEDX == UINT32_C(0x69746e65);
}
/**
* Tests if this is an authentic AMD CPU.
*
* @returns true/false.
* @remarks ASSUMES that cpuid is supported by the CPU.
*/
DECLINLINE(bool) ASMIsAmdCpu(void)
{
uint32_t uEAX, uEBX, uECX, uEDX;
ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
return ASMIsAmdCpuEx(uEBX, uECX, uEDX);
}
/**
* Tests if it a centaur hauling VIA CPU based on the ASMCpuId(0) output.
*
* @returns true/false.
* @param uEBX EBX return from ASMCpuId(0).
* @param uECX ECX return from ASMCpuId(0).
* @param uEDX EDX return from ASMCpuId(0).
*/
DECLINLINE(bool) ASMIsViaCentaurCpuEx(uint32_t uEBX, uint32_t uECX, uint32_t uEDX)
{
return uEBX == UINT32_C(0x746e6543)
&& uECX == UINT32_C(0x736c7561)
&& uEDX == UINT32_C(0x48727561);
}
/**
* Tests if this is a centaur hauling VIA CPU.
*
* @returns true/false.
* @remarks ASSUMES that cpuid is supported by the CPU.
*/
DECLINLINE(bool) ASMIsViaCentaurCpu(void)
{
uint32_t uEAX, uEBX, uECX, uEDX;
ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
return ASMIsAmdCpuEx(uEBX, uECX, uEDX);
}
/**
* Checks whether ASMCpuId_EAX(0x00000000) indicates a valid range.
*
*
* @returns true/false.
* @param uEAX The EAX value of CPUID leaf 0x00000000.
*
* @note This only succeeds if there are at least two leaves in the range.
* @remarks The upper range limit is just some half reasonable value we've
* picked out of thin air.
*/
DECLINLINE(bool) ASMIsValidStdRange(uint32_t uEAX)
{
return uEAX >= UINT32_C(0x00000001) && uEAX <= UINT32_C(0x000fffff);
}
/**
* Checks whether ASMCpuId_EAX(0x80000000) indicates a valid range.
*
* This only succeeds if there are at least two leaves in the range.
*
* @returns true/false.
* @param uEAX The EAX value of CPUID leaf 0x80000000.
*
* @note This only succeeds if there are at least two leaves in the range.
* @remarks The upper range limit is just some half reasonable value we've
* picked out of thin air.
*/
DECLINLINE(bool) ASMIsValidExtRange(uint32_t uEAX)
{
return uEAX >= UINT32_C(0x80000001) && uEAX <= UINT32_C(0x800fffff);
}
/**
* Extracts the CPU family from ASMCpuId(1) or ASMCpuId(0x80000001)
*
* @returns Family.
* @param uEAX EAX return from ASMCpuId(1) or ASMCpuId(0x80000001).
*/
DECLINLINE(uint32_t) ASMGetCpuFamily(uint32_t uEAX)
{
return ((uEAX >> 8) & 0xf) == 0xf
? ((uEAX >> 20) & 0x7f) + 0xf
: ((uEAX >> 8) & 0xf);
}
/**
* Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001), Intel variant.
*
* @returns Model.
* @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
*/
DECLINLINE(uint32_t) ASMGetCpuModelIntel(uint32_t uEAX)
{
return ((uEAX >> 8) & 0xf) == 0xf || (((uEAX >> 8) & 0xf) == 0x6) /* family! */
? ((uEAX >> 4) & 0xf) | ((uEAX >> 12) & 0xf0)
: ((uEAX >> 4) & 0xf);
}
/**
* Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001), AMD variant.
*
* @returns Model.
* @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
*/
DECLINLINE(uint32_t) ASMGetCpuModelAMD(uint32_t uEAX)
{
return ((uEAX >> 8) & 0xf) == 0xf
? ((uEAX >> 4) & 0xf) | ((uEAX >> 12) & 0xf0)
: ((uEAX >> 4) & 0xf);
}
/**
* Extracts the CPU model from ASMCpuId(1) or ASMCpuId(0x80000001)
*
* @returns Model.
* @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
* @param fIntel Whether it's an intel CPU. Use ASMIsIntelCpuEx() or ASMIsIntelCpu().
*/
DECLINLINE(uint32_t) ASMGetCpuModel(uint32_t uEAX, bool fIntel)
{
return ((uEAX >> 8) & 0xf) == 0xf || (((uEAX >> 8) & 0xf) == 0x6 && fIntel) /* family! */
? ((uEAX >> 4) & 0xf) | ((uEAX >> 12) & 0xf0)
: ((uEAX >> 4) & 0xf);
}
/**
* Extracts the CPU stepping from ASMCpuId(1) or ASMCpuId(0x80000001)
*
* @returns Model.
* @param uEAX EAX from ASMCpuId(1) or ASMCpuId(0x80000001).
*/
DECLINLINE(uint32_t) ASMGetCpuStepping(uint32_t uEAX)
{
return uEAX & 0xf;
}
/**
* Get cr0.
* @returns cr0.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(RTCCUINTREG) ASMGetCR0(void);
#else
DECLINLINE(RTCCUINTREG) ASMGetCR0(void)
{
RTCCUINTREG uCR0;
# if RT_INLINE_ASM_USES_INTRIN
uCR0 = __readcr0();
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %%cr0, %0\t\n" : "=r" (uCR0));
# else
__asm__ __volatile__("movl %%cr0, %0\t\n" : "=r" (uCR0));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, cr0
mov [uCR0], rax
# else
mov eax, cr0
mov [uCR0], eax
# endif
}
# endif
return uCR0;
}
#endif
/**
* Sets the CR0 register.
* @param uCR0 The new CR0 value.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMSetCR0(RTCCUINTREG uCR0);
#else
DECLINLINE(void) ASMSetCR0(RTCCUINTREG uCR0)
{
# if RT_INLINE_ASM_USES_INTRIN
__writecr0(uCR0);
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %0, %%cr0\n\t" :: "r" (uCR0));
# else
__asm__ __volatile__("movl %0, %%cr0\n\t" :: "r" (uCR0));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, [uCR0]
mov cr0, rax
# else
mov eax, [uCR0]
mov cr0, eax
# endif
}
# endif
}
#endif
/**
* Get cr2.
* @returns cr2.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(RTCCUINTREG) ASMGetCR2(void);
#else
DECLINLINE(RTCCUINTREG) ASMGetCR2(void)
{
RTCCUINTREG uCR2;
# if RT_INLINE_ASM_USES_INTRIN
uCR2 = __readcr2();
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %%cr2, %0\t\n" : "=r" (uCR2));
# else
__asm__ __volatile__("movl %%cr2, %0\t\n" : "=r" (uCR2));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, cr2
mov [uCR2], rax
# else
mov eax, cr2
mov [uCR2], eax
# endif
}
# endif
return uCR2;
}
#endif
/**
* Sets the CR2 register.
* @param uCR2 The new CR0 value.
*/
#if RT_INLINE_ASM_EXTERNAL
DECLASM(void) ASMSetCR2(RTCCUINTREG uCR2);
#else
DECLINLINE(void) ASMSetCR2(RTCCUINTREG uCR2)
{
# if RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %0, %%cr2\n\t" :: "r" (uCR2));
# else
__asm__ __volatile__("movl %0, %%cr2\n\t" :: "r" (uCR2));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, [uCR2]
mov cr2, rax
# else
mov eax, [uCR2]
mov cr2, eax
# endif
}
# endif
}
#endif
/**
* Get cr3.
* @returns cr3.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(RTCCUINTREG) ASMGetCR3(void);
#else
DECLINLINE(RTCCUINTREG) ASMGetCR3(void)
{
RTCCUINTREG uCR3;
# if RT_INLINE_ASM_USES_INTRIN
uCR3 = __readcr3();
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %%cr3, %0\t\n" : "=r" (uCR3));
# else
__asm__ __volatile__("movl %%cr3, %0\t\n" : "=r" (uCR3));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, cr3
mov [uCR3], rax
# else
mov eax, cr3
mov [uCR3], eax
# endif
}
# endif
return uCR3;
}
#endif
/**
* Sets the CR3 register.
*
* @param uCR3 New CR3 value.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMSetCR3(RTCCUINTREG uCR3);
#else
DECLINLINE(void) ASMSetCR3(RTCCUINTREG uCR3)
{
# if RT_INLINE_ASM_USES_INTRIN
__writecr3(uCR3);
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %0, %%cr3\n\t" : : "r" (uCR3));
# else
__asm__ __volatile__("movl %0, %%cr3\n\t" : : "r" (uCR3));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, [uCR3]
mov cr3, rax
# else
mov eax, [uCR3]
mov cr3, eax
# endif
}
# endif
}
#endif
/**
* Reloads the CR3 register.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMReloadCR3(void);
#else
DECLINLINE(void) ASMReloadCR3(void)
{
# if RT_INLINE_ASM_USES_INTRIN
__writecr3(__readcr3());
# elif RT_INLINE_ASM_GNU_STYLE
RTCCUINTREG u;
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %%cr3, %0\n\t"
"movq %0, %%cr3\n\t"
: "=r" (u));
# else
__asm__ __volatile__("movl %%cr3, %0\n\t"
"movl %0, %%cr3\n\t"
: "=r" (u));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, cr3
mov cr3, rax
# else
mov eax, cr3
mov cr3, eax
# endif
}
# endif
}
#endif
/**
* Get cr4.
* @returns cr4.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(RTCCUINTREG) ASMGetCR4(void);
#else
DECLINLINE(RTCCUINTREG) ASMGetCR4(void)
{
RTCCUINTREG uCR4;
# if RT_INLINE_ASM_USES_INTRIN
uCR4 = __readcr4();
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %%cr4, %0\t\n" : "=r" (uCR4));
# else
__asm__ __volatile__("movl %%cr4, %0\t\n" : "=r" (uCR4));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, cr4
mov [uCR4], rax
# else
push eax /* just in case */
/*mov eax, cr4*/
_emit 0x0f
_emit 0x20
_emit 0xe0
mov [uCR4], eax
pop eax
# endif
}
# endif
return uCR4;
}
#endif
/**
* Sets the CR4 register.
*
* @param uCR4 New CR4 value.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMSetCR4(RTCCUINTREG uCR4);
#else
DECLINLINE(void) ASMSetCR4(RTCCUINTREG uCR4)
{
# if RT_INLINE_ASM_USES_INTRIN
__writecr4(uCR4);
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %0, %%cr4\n\t" : : "r" (uCR4));
# else
__asm__ __volatile__("movl %0, %%cr4\n\t" : : "r" (uCR4));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, [uCR4]
mov cr4, rax
# else
mov eax, [uCR4]
_emit 0x0F
_emit 0x22
_emit 0xE0 /* mov cr4, eax */
# endif
}
# endif
}
#endif
/**
* Get cr8.
* @returns cr8.
* @remark The lock prefix hack for access from non-64-bit modes is NOT used and 0 is returned.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(RTCCUINTREG) ASMGetCR8(void);
#else
DECLINLINE(RTCCUINTREG) ASMGetCR8(void)
{
# ifdef RT_ARCH_AMD64
RTCCUINTREG uCR8;
# if RT_INLINE_ASM_USES_INTRIN
uCR8 = __readcr8();
# elif RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("movq %%cr8, %0\t\n" : "=r" (uCR8));
# else
__asm
{
mov rax, cr8
mov [uCR8], rax
}
# endif
return uCR8;
# else /* !RT_ARCH_AMD64 */
return 0;
# endif /* !RT_ARCH_AMD64 */
}
#endif
/**
* Enables interrupts (EFLAGS.IF).
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMIntEnable(void);
#else
DECLINLINE(void) ASMIntEnable(void)
{
# if RT_INLINE_ASM_GNU_STYLE
__asm("sti\n");
# elif RT_INLINE_ASM_USES_INTRIN
_enable();
# else
__asm sti
# endif
}
#endif
/**
* Disables interrupts (!EFLAGS.IF).
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMIntDisable(void);
#else
DECLINLINE(void) ASMIntDisable(void)
{
# if RT_INLINE_ASM_GNU_STYLE
__asm("cli\n");
# elif RT_INLINE_ASM_USES_INTRIN
_disable();
# else
__asm cli
# endif
}
#endif
/**
* Disables interrupts and returns previous xFLAGS.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(RTCCUINTREG) ASMIntDisableFlags(void);
#else
DECLINLINE(RTCCUINTREG) ASMIntDisableFlags(void)
{
RTCCUINTREG xFlags;
# if RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("pushfq\n\t"
"cli\n\t"
"popq %0\n\t"
: "=r" (xFlags));
# else
__asm__ __volatile__("pushfl\n\t"
"cli\n\t"
"popl %0\n\t"
: "=r" (xFlags));
# endif
# elif RT_INLINE_ASM_USES_INTRIN && !defined(RT_ARCH_X86)
xFlags = ASMGetFlags();
_disable();
# else
__asm {
pushfd
cli
pop [xFlags]
}
# endif
return xFlags;
}
#endif
/**
* Are interrupts enabled?
*
* @returns true / false.
*/
DECLINLINE(RTCCUINTREG) ASMIntAreEnabled(void)
{
RTCCUINTREG uFlags = ASMGetFlags();
return uFlags & 0x200 /* X86_EFL_IF */ ? true : false;
}
/**
* Halts the CPU until interrupted.
*/
#if RT_INLINE_ASM_EXTERNAL
DECLASM(void) ASMHalt(void);
#else
DECLINLINE(void) ASMHalt(void)
{
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("hlt\n\t");
# else
__asm {
hlt
}
# endif
}
#endif
/**
* Reads a machine specific register.
*
* @returns Register content.
* @param uRegister Register to read.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(uint64_t) ASMRdMsr(uint32_t uRegister);
#else
DECLINLINE(uint64_t) ASMRdMsr(uint32_t uRegister)
{
RTUINT64U u;
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("rdmsr\n\t"
: "=a" (u.s.Lo),
"=d" (u.s.Hi)
: "c" (uRegister));
# elif RT_INLINE_ASM_USES_INTRIN
u.u = __readmsr(uRegister);
# else
__asm
{
mov ecx, [uRegister]
rdmsr
mov [u.s.Lo], eax
mov [u.s.Hi], edx
}
# endif
return u.u;
}
#endif
/**
* Writes a machine specific register.
*
* @returns Register content.
* @param uRegister Register to write to.
* @param u64Val Value to write.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMWrMsr(uint32_t uRegister, uint64_t u64Val);
#else
DECLINLINE(void) ASMWrMsr(uint32_t uRegister, uint64_t u64Val)
{
RTUINT64U u;
u.u = u64Val;
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("wrmsr\n\t"
::"a" (u.s.Lo),
"d" (u.s.Hi),
"c" (uRegister));
# elif RT_INLINE_ASM_USES_INTRIN
__writemsr(uRegister, u.u);
# else
__asm
{
mov ecx, [uRegister]
mov edx, [u.s.Hi]
mov eax, [u.s.Lo]
wrmsr
}
# endif
}
#endif
/**
* Reads a machine specific register, extended version (for AMD).
*
* @returns Register content.
* @param uRegister Register to read.
* @param uXDI RDI/EDI value.
*/
#if RT_INLINE_ASM_EXTERNAL
DECLASM(uint64_t) ASMRdMsrEx(uint32_t uRegister, RTCCUINTREG uXDI);
#else
DECLINLINE(uint64_t) ASMRdMsrEx(uint32_t uRegister, RTCCUINTREG uXDI)
{
RTUINT64U u;
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("rdmsr\n\t"
: "=a" (u.s.Lo),
"=d" (u.s.Hi)
: "c" (uRegister),
"D" (uXDI));
# else
__asm
{
mov ecx, [uRegister]
xchg edi, [uXDI]
rdmsr
mov [u.s.Lo], eax
mov [u.s.Hi], edx
xchg edi, [uXDI]
}
# endif
return u.u;
}
#endif
/**
* Writes a machine specific register, extended version (for AMD).
*
* @returns Register content.
* @param uRegister Register to write to.
* @param uXDI RDI/EDI value.
* @param u64Val Value to write.
*/
#if RT_INLINE_ASM_EXTERNAL
DECLASM(void) ASMWrMsrEx(uint32_t uRegister, RTCCUINTREG uXDI, uint64_t u64Val);
#else
DECLINLINE(void) ASMWrMsrEx(uint32_t uRegister, RTCCUINTREG uXDI, uint64_t u64Val)
{
RTUINT64U u;
u.u = u64Val;
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("wrmsr\n\t"
::"a" (u.s.Lo),
"d" (u.s.Hi),
"c" (uRegister),
"D" (uXDI));
# else
__asm
{
mov ecx, [uRegister]
xchg edi, [uXDI]
mov edx, [u.s.Hi]
mov eax, [u.s.Lo]
wrmsr
xchg edi, [uXDI]
}
# endif
}
#endif
/**
* Reads low part of a machine specific register.
*
* @returns Register content.
* @param uRegister Register to read.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(uint32_t) ASMRdMsr_Low(uint32_t uRegister);
#else
DECLINLINE(uint32_t) ASMRdMsr_Low(uint32_t uRegister)
{
uint32_t u32;
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("rdmsr\n\t"
: "=a" (u32)
: "c" (uRegister)
: "edx");
# elif RT_INLINE_ASM_USES_INTRIN
u32 = (uint32_t)__readmsr(uRegister);
#else
__asm
{
mov ecx, [uRegister]
rdmsr
mov [u32], eax
}
# endif
return u32;
}
#endif
/**
* Reads high part of a machine specific register.
*
* @returns Register content.
* @param uRegister Register to read.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(uint32_t) ASMRdMsr_High(uint32_t uRegister);
#else
DECLINLINE(uint32_t) ASMRdMsr_High(uint32_t uRegister)
{
uint32_t u32;
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("rdmsr\n\t"
: "=d" (u32)
: "c" (uRegister)
: "eax");
# elif RT_INLINE_ASM_USES_INTRIN
u32 = (uint32_t)(__readmsr(uRegister) >> 32);
# else
__asm
{
mov ecx, [uRegister]
rdmsr
mov [u32], edx
}
# endif
return u32;
}
#endif
/**
* Gets dr0.
*
* @returns dr0.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(RTCCUINTREG) ASMGetDR0(void);
#else
DECLINLINE(RTCCUINTREG) ASMGetDR0(void)
{
RTCCUINTREG uDR0;
# if RT_INLINE_ASM_USES_INTRIN
uDR0 = __readdr(0);
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %%dr0, %0\n\t" : "=r" (uDR0));
# else
__asm__ __volatile__("movl %%dr0, %0\n\t" : "=r" (uDR0));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, dr0
mov [uDR0], rax
# else
mov eax, dr0
mov [uDR0], eax
# endif
}
# endif
return uDR0;
}
#endif
/**
* Gets dr1.
*
* @returns dr1.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(RTCCUINTREG) ASMGetDR1(void);
#else
DECLINLINE(RTCCUINTREG) ASMGetDR1(void)
{
RTCCUINTREG uDR1;
# if RT_INLINE_ASM_USES_INTRIN
uDR1 = __readdr(1);
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %%dr1, %0\n\t" : "=r" (uDR1));
# else
__asm__ __volatile__("movl %%dr1, %0\n\t" : "=r" (uDR1));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, dr1
mov [uDR1], rax
# else
mov eax, dr1
mov [uDR1], eax
# endif
}
# endif
return uDR1;
}
#endif
/**
* Gets dr2.
*
* @returns dr2.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(RTCCUINTREG) ASMGetDR2(void);
#else
DECLINLINE(RTCCUINTREG) ASMGetDR2(void)
{
RTCCUINTREG uDR2;
# if RT_INLINE_ASM_USES_INTRIN
uDR2 = __readdr(2);
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %%dr2, %0\n\t" : "=r" (uDR2));
# else
__asm__ __volatile__("movl %%dr2, %0\n\t" : "=r" (uDR2));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, dr2
mov [uDR2], rax
# else
mov eax, dr2
mov [uDR2], eax
# endif
}
# endif
return uDR2;
}
#endif
/**
* Gets dr3.
*
* @returns dr3.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(RTCCUINTREG) ASMGetDR3(void);
#else
DECLINLINE(RTCCUINTREG) ASMGetDR3(void)
{
RTCCUINTREG uDR3;
# if RT_INLINE_ASM_USES_INTRIN
uDR3 = __readdr(3);
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %%dr3, %0\n\t" : "=r" (uDR3));
# else
__asm__ __volatile__("movl %%dr3, %0\n\t" : "=r" (uDR3));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, dr3
mov [uDR3], rax
# else
mov eax, dr3
mov [uDR3], eax
# endif
}
# endif
return uDR3;
}
#endif
/**
* Gets dr6.
*
* @returns dr6.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(RTCCUINTREG) ASMGetDR6(void);
#else
DECLINLINE(RTCCUINTREG) ASMGetDR6(void)
{
RTCCUINTREG uDR6;
# if RT_INLINE_ASM_USES_INTRIN
uDR6 = __readdr(6);
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %%dr6, %0\n\t" : "=r" (uDR6));
# else
__asm__ __volatile__("movl %%dr6, %0\n\t" : "=r" (uDR6));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, dr6
mov [uDR6], rax
# else
mov eax, dr6
mov [uDR6], eax
# endif
}
# endif
return uDR6;
}
#endif
/**
* Reads and clears DR6.
*
* @returns DR6.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(RTCCUINTREG) ASMGetAndClearDR6(void);
#else
DECLINLINE(RTCCUINTREG) ASMGetAndClearDR6(void)
{
RTCCUINTREG uDR6;
# if RT_INLINE_ASM_USES_INTRIN
uDR6 = __readdr(6);
__writedr(6, 0xffff0ff0U); /* 31-16 and 4-11 are 1's, 12 and 63-31 are zero. */
# elif RT_INLINE_ASM_GNU_STYLE
RTCCUINTREG uNewValue = 0xffff0ff0U;/* 31-16 and 4-11 are 1's, 12 and 63-31 are zero. */
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %%dr6, %0\n\t"
"movq %1, %%dr6\n\t"
: "=r" (uDR6)
: "r" (uNewValue));
# else
__asm__ __volatile__("movl %%dr6, %0\n\t"
"movl %1, %%dr6\n\t"
: "=r" (uDR6)
: "r" (uNewValue));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, dr6
mov [uDR6], rax
mov rcx, rax
mov ecx, 0ffff0ff0h; /* 31-16 and 4-11 are 1's, 12 and 63-31 are zero. */
mov dr6, rcx
# else
mov eax, dr6
mov [uDR6], eax
mov ecx, 0ffff0ff0h; /* 31-16 and 4-11 are 1's, 12 is zero. */
mov dr6, ecx
# endif
}
# endif
return uDR6;
}
#endif
/**
* Gets dr7.
*
* @returns dr7.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(RTCCUINTREG) ASMGetDR7(void);
#else
DECLINLINE(RTCCUINTREG) ASMGetDR7(void)
{
RTCCUINTREG uDR7;
# if RT_INLINE_ASM_USES_INTRIN
uDR7 = __readdr(7);
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %%dr7, %0\n\t" : "=r" (uDR7));
# else
__asm__ __volatile__("movl %%dr7, %0\n\t" : "=r" (uDR7));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, dr7
mov [uDR7], rax
# else
mov eax, dr7
mov [uDR7], eax
# endif
}
# endif
return uDR7;
}
#endif
/**
* Sets dr0.
*
* @param uDRVal Debug register value to write
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMSetDR0(RTCCUINTREG uDRVal);
#else
DECLINLINE(void) ASMSetDR0(RTCCUINTREG uDRVal)
{
# if RT_INLINE_ASM_USES_INTRIN
__writedr(0, uDRVal);
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %0, %%dr0\n\t" : : "r" (uDRVal));
# else
__asm__ __volatile__("movl %0, %%dr0\n\t" : : "r" (uDRVal));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, [uDRVal]
mov dr0, rax
# else
mov eax, [uDRVal]
mov dr0, eax
# endif
}
# endif
}
#endif
/**
* Sets dr1.
*
* @param uDRVal Debug register value to write
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMSetDR1(RTCCUINTREG uDRVal);
#else
DECLINLINE(void) ASMSetDR1(RTCCUINTREG uDRVal)
{
# if RT_INLINE_ASM_USES_INTRIN
__writedr(1, uDRVal);
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %0, %%dr1\n\t" : : "r" (uDRVal));
# else
__asm__ __volatile__("movl %0, %%dr1\n\t" : : "r" (uDRVal));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, [uDRVal]
mov dr1, rax
# else
mov eax, [uDRVal]
mov dr1, eax
# endif
}
# endif
}
#endif
/**
* Sets dr2.
*
* @param uDRVal Debug register value to write
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMSetDR2(RTCCUINTREG uDRVal);
#else
DECLINLINE(void) ASMSetDR2(RTCCUINTREG uDRVal)
{
# if RT_INLINE_ASM_USES_INTRIN
__writedr(2, uDRVal);
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %0, %%dr2\n\t" : : "r" (uDRVal));
# else
__asm__ __volatile__("movl %0, %%dr2\n\t" : : "r" (uDRVal));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, [uDRVal]
mov dr2, rax
# else
mov eax, [uDRVal]
mov dr2, eax
# endif
}
# endif
}
#endif
/**
* Sets dr3.
*
* @param uDRVal Debug register value to write
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMSetDR3(RTCCUINTREG uDRVal);
#else
DECLINLINE(void) ASMSetDR3(RTCCUINTREG uDRVal)
{
# if RT_INLINE_ASM_USES_INTRIN
__writedr(3, uDRVal);
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %0, %%dr3\n\t" : : "r" (uDRVal));
# else
__asm__ __volatile__("movl %0, %%dr3\n\t" : : "r" (uDRVal));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, [uDRVal]
mov dr3, rax
# else
mov eax, [uDRVal]
mov dr3, eax
# endif
}
# endif
}
#endif
/**
* Sets dr6.
*
* @param uDRVal Debug register value to write
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMSetDR6(RTCCUINTREG uDRVal);
#else
DECLINLINE(void) ASMSetDR6(RTCCUINTREG uDRVal)
{
# if RT_INLINE_ASM_USES_INTRIN
__writedr(6, uDRVal);
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %0, %%dr6\n\t" : : "r" (uDRVal));
# else
__asm__ __volatile__("movl %0, %%dr6\n\t" : : "r" (uDRVal));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, [uDRVal]
mov dr6, rax
# else
mov eax, [uDRVal]
mov dr6, eax
# endif
}
# endif
}
#endif
/**
* Sets dr7.
*
* @param uDRVal Debug register value to write
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMSetDR7(RTCCUINTREG uDRVal);
#else
DECLINLINE(void) ASMSetDR7(RTCCUINTREG uDRVal)
{
# if RT_INLINE_ASM_USES_INTRIN
__writedr(7, uDRVal);
# elif RT_INLINE_ASM_GNU_STYLE
# ifdef RT_ARCH_AMD64
__asm__ __volatile__("movq %0, %%dr7\n\t" : : "r" (uDRVal));
# else
__asm__ __volatile__("movl %0, %%dr7\n\t" : : "r" (uDRVal));
# endif
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, [uDRVal]
mov dr7, rax
# else
mov eax, [uDRVal]
mov dr7, eax
# endif
}
# endif
}
#endif
/**
* Writes a 8-bit unsigned integer to an I/O port, ordered.
*
* @param Port I/O port to write to.
* @param u8 8-bit integer to write.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMOutU8(RTIOPORT Port, uint8_t u8);
#else
DECLINLINE(void) ASMOutU8(RTIOPORT Port, uint8_t u8)
{
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("outb %b1, %w0\n\t"
:: "Nd" (Port),
"a" (u8));
# elif RT_INLINE_ASM_USES_INTRIN
__outbyte(Port, u8);
# else
__asm
{
mov dx, [Port]
mov al, [u8]
out dx, al
}
# endif
}
#endif
/**
* Reads a 8-bit unsigned integer from an I/O port, ordered.
*
* @returns 8-bit integer.
* @param Port I/O port to read from.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(uint8_t) ASMInU8(RTIOPORT Port);
#else
DECLINLINE(uint8_t) ASMInU8(RTIOPORT Port)
{
uint8_t u8;
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("inb %w1, %b0\n\t"
: "=a" (u8)
: "Nd" (Port));
# elif RT_INLINE_ASM_USES_INTRIN
u8 = __inbyte(Port);
# else
__asm
{
mov dx, [Port]
in al, dx
mov [u8], al
}
# endif
return u8;
}
#endif
/**
* Writes a 16-bit unsigned integer to an I/O port, ordered.
*
* @param Port I/O port to write to.
* @param u16 16-bit integer to write.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMOutU16(RTIOPORT Port, uint16_t u16);
#else
DECLINLINE(void) ASMOutU16(RTIOPORT Port, uint16_t u16)
{
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("outw %w1, %w0\n\t"
:: "Nd" (Port),
"a" (u16));
# elif RT_INLINE_ASM_USES_INTRIN
__outword(Port, u16);
# else
__asm
{
mov dx, [Port]
mov ax, [u16]
out dx, ax
}
# endif
}
#endif
/**
* Reads a 16-bit unsigned integer from an I/O port, ordered.
*
* @returns 16-bit integer.
* @param Port I/O port to read from.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(uint16_t) ASMInU16(RTIOPORT Port);
#else
DECLINLINE(uint16_t) ASMInU16(RTIOPORT Port)
{
uint16_t u16;
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("inw %w1, %w0\n\t"
: "=a" (u16)
: "Nd" (Port));
# elif RT_INLINE_ASM_USES_INTRIN
u16 = __inword(Port);
# else
__asm
{
mov dx, [Port]
in ax, dx
mov [u16], ax
}
# endif
return u16;
}
#endif
/**
* Writes a 32-bit unsigned integer to an I/O port, ordered.
*
* @param Port I/O port to write to.
* @param u32 32-bit integer to write.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMOutU32(RTIOPORT Port, uint32_t u32);
#else
DECLINLINE(void) ASMOutU32(RTIOPORT Port, uint32_t u32)
{
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("outl %1, %w0\n\t"
:: "Nd" (Port),
"a" (u32));
# elif RT_INLINE_ASM_USES_INTRIN
__outdword(Port, u32);
# else
__asm
{
mov dx, [Port]
mov eax, [u32]
out dx, eax
}
# endif
}
#endif
/**
* Reads a 32-bit unsigned integer from an I/O port, ordered.
*
* @returns 32-bit integer.
* @param Port I/O port to read from.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(uint32_t) ASMInU32(RTIOPORT Port);
#else
DECLINLINE(uint32_t) ASMInU32(RTIOPORT Port)
{
uint32_t u32;
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("inl %w1, %0\n\t"
: "=a" (u32)
: "Nd" (Port));
# elif RT_INLINE_ASM_USES_INTRIN
u32 = __indword(Port);
# else
__asm
{
mov dx, [Port]
in eax, dx
mov [u32], eax
}
# endif
return u32;
}
#endif
/**
* Writes a string of 8-bit unsigned integer items to an I/O port, ordered.
*
* @param Port I/O port to write to.
* @param pau8 Pointer to the string buffer.
* @param c The number of items to write.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMOutStrU8(RTIOPORT Port, uint8_t const *pau8, size_t c);
#else
DECLINLINE(void) ASMOutStrU8(RTIOPORT Port, uint8_t const *pau8, size_t c)
{
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("rep; outsb\n\t"
: "+S" (pau8),
"+c" (c)
: "d" (Port));
# elif RT_INLINE_ASM_USES_INTRIN
__outbytestring(Port, (unsigned char *)pau8, (unsigned long)c);
# else
__asm
{
mov dx, [Port]
mov ecx, [c]
mov eax, [pau8]
xchg esi, eax
rep outsb
xchg esi, eax
}
# endif
}
#endif
/**
* Reads a string of 8-bit unsigned integer items from an I/O port, ordered.
*
* @param Port I/O port to read from.
* @param pau8 Pointer to the string buffer (output).
* @param c The number of items to read.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMInStrU8(RTIOPORT Port, uint8_t *pau8, size_t c);
#else
DECLINLINE(void) ASMInStrU8(RTIOPORT Port, uint8_t *pau8, size_t c)
{
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("rep; insb\n\t"
: "+D" (pau8),
"+c" (c)
: "d" (Port));
# elif RT_INLINE_ASM_USES_INTRIN
__inbytestring(Port, pau8, (unsigned long)c);
# else
__asm
{
mov dx, [Port]
mov ecx, [c]
mov eax, [pau8]
xchg edi, eax
rep insb
xchg edi, eax
}
# endif
}
#endif
/**
* Writes a string of 16-bit unsigned integer items to an I/O port, ordered.
*
* @param Port I/O port to write to.
* @param pau16 Pointer to the string buffer.
* @param c The number of items to write.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMOutStrU16(RTIOPORT Port, uint16_t const *pau16, size_t c);
#else
DECLINLINE(void) ASMOutStrU16(RTIOPORT Port, uint16_t const *pau16, size_t c)
{
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("rep; outsw\n\t"
: "+S" (pau16),
"+c" (c)
: "d" (Port));
# elif RT_INLINE_ASM_USES_INTRIN
__outwordstring(Port, (unsigned short *)pau16, (unsigned long)c);
# else
__asm
{
mov dx, [Port]
mov ecx, [c]
mov eax, [pau16]
xchg esi, eax
rep outsw
xchg esi, eax
}
# endif
}
#endif
/**
* Reads a string of 16-bit unsigned integer items from an I/O port, ordered.
*
* @param Port I/O port to read from.
* @param pau16 Pointer to the string buffer (output).
* @param c The number of items to read.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMInStrU16(RTIOPORT Port, uint16_t *pau16, size_t c);
#else
DECLINLINE(void) ASMInStrU16(RTIOPORT Port, uint16_t *pau16, size_t c)
{
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("rep; insw\n\t"
: "+D" (pau16),
"+c" (c)
: "d" (Port));
# elif RT_INLINE_ASM_USES_INTRIN
__inwordstring(Port, pau16, (unsigned long)c);
# else
__asm
{
mov dx, [Port]
mov ecx, [c]
mov eax, [pau16]
xchg edi, eax
rep insw
xchg edi, eax
}
# endif
}
#endif
/**
* Writes a string of 32-bit unsigned integer items to an I/O port, ordered.
*
* @param Port I/O port to write to.
* @param pau32 Pointer to the string buffer.
* @param c The number of items to write.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMOutStrU32(RTIOPORT Port, uint32_t const *pau32, size_t c);
#else
DECLINLINE(void) ASMOutStrU32(RTIOPORT Port, uint32_t const *pau32, size_t c)
{
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("rep; outsl\n\t"
: "+S" (pau32),
"+c" (c)
: "d" (Port));
# elif RT_INLINE_ASM_USES_INTRIN
__outdwordstring(Port, (unsigned long *)pau32, (unsigned long)c);
# else
__asm
{
mov dx, [Port]
mov ecx, [c]
mov eax, [pau32]
xchg esi, eax
rep outsd
xchg esi, eax
}
# endif
}
#endif
/**
* Reads a string of 32-bit unsigned integer items from an I/O port, ordered.
*
* @param Port I/O port to read from.
* @param pau32 Pointer to the string buffer (output).
* @param c The number of items to read.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMInStrU32(RTIOPORT Port, uint32_t *pau32, size_t c);
#else
DECLINLINE(void) ASMInStrU32(RTIOPORT Port, uint32_t *pau32, size_t c)
{
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("rep; insl\n\t"
: "+D" (pau32),
"+c" (c)
: "d" (Port));
# elif RT_INLINE_ASM_USES_INTRIN
__indwordstring(Port, (unsigned long *)pau32, (unsigned long)c);
# else
__asm
{
mov dx, [Port]
mov ecx, [c]
mov eax, [pau32]
xchg edi, eax
rep insd
xchg edi, eax
}
# endif
}
#endif
/**
* Invalidate page.
*
* @param pv Address of the page to invalidate.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMInvalidatePage(void *pv);
#else
DECLINLINE(void) ASMInvalidatePage(void *pv)
{
# if RT_INLINE_ASM_USES_INTRIN
__invlpg(pv);
# elif RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("invlpg %0\n\t"
: : "m" (*(uint8_t *)pv));
# else
__asm
{
# ifdef RT_ARCH_AMD64
mov rax, [pv]
invlpg [rax]
# else
mov eax, [pv]
invlpg [eax]
# endif
}
# endif
}
#endif
/**
* Write back the internal caches and invalidate them.
*/
#if RT_INLINE_ASM_EXTERNAL && !RT_INLINE_ASM_USES_INTRIN
DECLASM(void) ASMWriteBackAndInvalidateCaches(void);
#else
DECLINLINE(void) ASMWriteBackAndInvalidateCaches(void)
{
# if RT_INLINE_ASM_USES_INTRIN
__wbinvd();
# elif RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("wbinvd");
# else
__asm
{
wbinvd
}
# endif
}
#endif
/**
* Invalidate internal and (perhaps) external caches without first
* flushing dirty cache lines. Use with extreme care.
*/
#if RT_INLINE_ASM_EXTERNAL
DECLASM(void) ASMInvalidateInternalCaches(void);
#else
DECLINLINE(void) ASMInvalidateInternalCaches(void)
{
# if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__("invd");
# else
__asm
{
invd
}
# endif
}
#endif
/**
* Memory load/store fence, waits for any pending writes and reads to complete.
* Requires the X86_CPUID_FEATURE_EDX_SSE2 CPUID bit set.
*/
DECLINLINE(void) ASMMemoryFenceSSE2(void)
{
#if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__ (".byte 0x0f,0xae,0xf0\n\t");
#elif RT_INLINE_ASM_USES_INTRIN
_mm_mfence();
#else
__asm
{
_emit 0x0f
_emit 0xae
_emit 0xf0
}
#endif
}
/**
* Memory store fence, waits for any writes to complete.
* Requires the X86_CPUID_FEATURE_EDX_SSE CPUID bit set.
*/
DECLINLINE(void) ASMWriteFenceSSE(void)
{
#if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__ (".byte 0x0f,0xae,0xf8\n\t");
#elif RT_INLINE_ASM_USES_INTRIN
_mm_sfence();
#else
__asm
{
_emit 0x0f
_emit 0xae
_emit 0xf8
}
#endif
}
/**
* Memory load fence, waits for any pending reads to complete.
* Requires the X86_CPUID_FEATURE_EDX_SSE2 CPUID bit set.
*/
DECLINLINE(void) ASMReadFenceSSE2(void)
{
#if RT_INLINE_ASM_GNU_STYLE
__asm__ __volatile__ (".byte 0x0f,0xae,0xe8\n\t");
#elif RT_INLINE_ASM_USES_INTRIN
_mm_lfence();
#else
__asm
{
_emit 0x0f
_emit 0xae
_emit 0xe8
}
#endif
}
/** @} */
#endif