PDMAllCritSect.cpp revision 04e639b004793691f051abcd5b3c811c6b6b6f86
af062818b47340eef15700d2f0211576ba3506eevboxsync/* $Id$ */
af062818b47340eef15700d2f0211576ba3506eevboxsync/** @file
af062818b47340eef15700d2f0211576ba3506eevboxsync * PDM - Critical Sections, All Contexts.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/*
af062818b47340eef15700d2f0211576ba3506eevboxsync * Copyright (C) 2006-2007 Sun Microsystems, Inc.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * This file is part of VirtualBox Open Source Edition (OSE), as
af062818b47340eef15700d2f0211576ba3506eevboxsync * available from http://www.virtualbox.org. This file is free software;
af062818b47340eef15700d2f0211576ba3506eevboxsync * you can redistribute it and/or modify it under the terms of the GNU
af062818b47340eef15700d2f0211576ba3506eevboxsync * General Public License (GPL) as published by the Free Software
af062818b47340eef15700d2f0211576ba3506eevboxsync * Foundation, in version 2 as it comes in the "COPYING" file of the
af062818b47340eef15700d2f0211576ba3506eevboxsync * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
af062818b47340eef15700d2f0211576ba3506eevboxsync * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
af062818b47340eef15700d2f0211576ba3506eevboxsync * Clara, CA 95054 USA or visit http://www.sun.com if you need
af062818b47340eef15700d2f0211576ba3506eevboxsync * additional information or have any questions.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
589fd26cedb2b4ebbed14f2964cad03cc8ebbca2vboxsync
4b9d6701570cb98fd36e209314239d104ec584d3vboxsync
4b9d6701570cb98fd36e209314239d104ec584d3vboxsync/*******************************************************************************
589fd26cedb2b4ebbed14f2964cad03cc8ebbca2vboxsync* Header Files *
589fd26cedb2b4ebbed14f2964cad03cc8ebbca2vboxsync*******************************************************************************/
589fd26cedb2b4ebbed14f2964cad03cc8ebbca2vboxsync#define LOG_GROUP LOG_GROUP_PDM//_CRITSECT
589fd26cedb2b4ebbed14f2964cad03cc8ebbca2vboxsync#include "PDMInternal.h"
589fd26cedb2b4ebbed14f2964cad03cc8ebbca2vboxsync#include <VBox/pdmcritsect.h>
589fd26cedb2b4ebbed14f2964cad03cc8ebbca2vboxsync#include <VBox/mm.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync#include <VBox/vmm.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync#include <VBox/vm.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync#include <VBox/err.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync#include <VBox/hwaccm.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync#include <VBox/log.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync#include <iprt/asm.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync#include <iprt/assert.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef IN_RING3
af062818b47340eef15700d2f0211576ba3506eevboxsync# include <iprt/semaphore.h>
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/*******************************************************************************
af062818b47340eef15700d2f0211576ba3506eevboxsync* Defined Constants And Macros *
af062818b47340eef15700d2f0211576ba3506eevboxsync*******************************************************************************/
af062818b47340eef15700d2f0211576ba3506eevboxsync/** The number loops to spin for in ring-3. */
af062818b47340eef15700d2f0211576ba3506eevboxsync#define PDMCRITSECT_SPIN_COUNT_R3 20
af062818b47340eef15700d2f0211576ba3506eevboxsync/** The number loops to spin for in ring-0. */
af062818b47340eef15700d2f0211576ba3506eevboxsync#define PDMCRITSECT_SPIN_COUNT_R0 256
af062818b47340eef15700d2f0211576ba3506eevboxsync/** The number loops to spin for in the raw-mode context. */
af062818b47340eef15700d2f0211576ba3506eevboxsync#define PDMCRITSECT_SPIN_COUNT_RC 256
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef PDMCRITSECT_STRICT
af062818b47340eef15700d2f0211576ba3506eevboxsync# define PDMCRITSECT_STRICT_ARGS_DECL RTHCUINTPTR uId, RT_SRC_POS_DECL
af062818b47340eef15700d2f0211576ba3506eevboxsync# define PDMCRITSECT_STRICT_ARGS_PASS_ON uId, RT_SRC_POS_ARGS
af062818b47340eef15700d2f0211576ba3506eevboxsync#else
af062818b47340eef15700d2f0211576ba3506eevboxsync# define PDMCRITSECT_STRICT_ARGS_DECL int iDummy
af062818b47340eef15700d2f0211576ba3506eevboxsync# define PDMCRITSECT_STRICT_ARGS_PASS_ON 0
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/* Undefine the automatic VBOX_STRICT API mappings. */
af062818b47340eef15700d2f0211576ba3506eevboxsync#undef PDMCritSectEnter
af062818b47340eef15700d2f0211576ba3506eevboxsync#undef PDMCritSectTryEnter
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Gets the ring-3 native thread handle of the calling thread.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns native thread handle (ring-3).
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pCritSect The critical section. This is used in R0 and RC.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncDECL_FORCE_INLINE(RTNATIVETHREAD) pdmCritSectGetNativeSelf(PCPDMCRITSECT pCritSect)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef IN_RING3
af062818b47340eef15700d2f0211576ba3506eevboxsync NOREF(pCritSect);
af062818b47340eef15700d2f0211576ba3506eevboxsync RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
af062818b47340eef15700d2f0211576ba3506eevboxsync#else
af062818b47340eef15700d2f0211576ba3506eevboxsync AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
af062818b47340eef15700d2f0211576ba3506eevboxsync VERR_SEM_DESTROYED);
af062818b47340eef15700d2f0211576ba3506eevboxsync PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
af062818b47340eef15700d2f0211576ba3506eevboxsync PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
af062818b47340eef15700d2f0211576ba3506eevboxsync RTNATIVETHREAD hNativeSelf = pVCpu->hNativeThread; Assert(hNativeSelf != NIL_RTNATIVETHREAD);
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync return hNativeSelf;
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Tail code called when we've wont the battle for the lock.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns VINF_SUCCESS.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pCritSect The critical section.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param hNativeSelf The native handle of this thread.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncDECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PDMCRITSECT_STRICT_ARGS_DECL)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
af062818b47340eef15700d2f0211576ba3506eevboxsync Assert(!(pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK));
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
af062818b47340eef15700d2f0211576ba3506eevboxsync ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeSelf);
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync# if defined(PDMCRITSECT_STRICT) && defined(IN_RING3)
af062818b47340eef15700d2f0211576ba3506eevboxsync RTThreadWriteLockInc(RTLockValidatorSetOwner(pCritSect->s.Core.pValidatorRec, NIL_RTTHREAD, PDMCRITSECT_STRICT_ARGS_PASS_ON));
af062818b47340eef15700d2f0211576ba3506eevboxsync# endif
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
af062818b47340eef15700d2f0211576ba3506eevboxsync return VINF_SUCCESS;
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef IN_RING3
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Deals with the contended case in ring-3.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pCritSect The critsect.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param hNativeSelf The native thread handle.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncstatic int pdmR3CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PDMCRITSECT_STRICT_ARGS_DECL)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * Start waiting.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync if (ASMAtomicIncS32(&pCritSect->s.Core.cLockers) == 0)
af062818b47340eef15700d2f0211576ba3506eevboxsync return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON);
af062818b47340eef15700d2f0211576ba3506eevboxsync STAM_COUNTER_INC(&pCritSect->s.StatContentionR3);
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * The wait loop.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
af062818b47340eef15700d2f0211576ba3506eevboxsync SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
af062818b47340eef15700d2f0211576ba3506eevboxsync# ifdef PDMCRITSECT_STRICT
af062818b47340eef15700d2f0211576ba3506eevboxsync RTTHREAD hSelf = RTThreadSelfAutoAdopt();
af062818b47340eef15700d2f0211576ba3506eevboxsync RTLockValidatorCheckOrder(pCritSect->s.Core.pValidatorRec, hSelf, 0, NULL, 0, NULL);
af062818b47340eef15700d2f0211576ba3506eevboxsync# endif
af062818b47340eef15700d2f0211576ba3506eevboxsync for (;;)
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync# ifdef PDMCRITSECT_STRICT
af062818b47340eef15700d2f0211576ba3506eevboxsync RTThreadBlocking(hSelf, RTTHREADSTATE_CRITSECT, pCritSect->s.Core.pValidatorRec, 0, NULL, 0, NULL);
af062818b47340eef15700d2f0211576ba3506eevboxsync# endif
af062818b47340eef15700d2f0211576ba3506eevboxsync int rc = SUPSemEventWaitNoResume(pSession, hEvent, RT_INDEFINITE_WAIT);
af062818b47340eef15700d2f0211576ba3506eevboxsync# ifdef PDMCRITSECT_STRICT
af062818b47340eef15700d2f0211576ba3506eevboxsync RTThreadUnblocked(hSelf, RTTHREADSTATE_CRITSECT);
af062818b47340eef15700d2f0211576ba3506eevboxsync# endif
af062818b47340eef15700d2f0211576ba3506eevboxsync if (RT_UNLIKELY(pCritSect->s.Core.u32Magic != RTCRITSECT_MAGIC))
af062818b47340eef15700d2f0211576ba3506eevboxsync return VERR_SEM_DESTROYED;
af062818b47340eef15700d2f0211576ba3506eevboxsync if (rc == VINF_SUCCESS)
af062818b47340eef15700d2f0211576ba3506eevboxsync return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON);
af062818b47340eef15700d2f0211576ba3506eevboxsync AssertMsg(rc == VERR_INTERRUPTED, ("rc=%Rrc\n", rc));
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync /* won't get here */
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif /* IN_RING3 */
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Common worker for the debug and normal APIs.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns VINF_SUCCESS if entered successfully.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns rcBusy when encountering a busy critical section in GC/R0.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns VERR_SEM_DESTROYED if the critical section is dead.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pCritSect The PDM critical section to enter.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param rcBusy The status code to return when we're in GC or R0
af062818b47340eef15700d2f0211576ba3506eevboxsync * and the section is busy.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncDECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PDMCRITSECT_STRICT_ARGS_DECL)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * If the critical section has already been destroyed, then inform the caller.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
af062818b47340eef15700d2f0211576ba3506eevboxsync ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
af062818b47340eef15700d2f0211576ba3506eevboxsync VERR_SEM_DESTROYED);
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * See if we're lucky.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
af062818b47340eef15700d2f0211576ba3506eevboxsync /* Not owned ... */
af062818b47340eef15700d2f0211576ba3506eevboxsync if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
af062818b47340eef15700d2f0211576ba3506eevboxsync return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON);
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /* ... or nested. */
af062818b47340eef15700d2f0211576ba3506eevboxsync if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
af062818b47340eef15700d2f0211576ba3506eevboxsync ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
af062818b47340eef15700d2f0211576ba3506eevboxsync ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
af062818b47340eef15700d2f0211576ba3506eevboxsync return VINF_SUCCESS;
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * Spin for a bit without incrementing the counter.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
af062818b47340eef15700d2f0211576ba3506eevboxsync * cpu systems. */
af062818b47340eef15700d2f0211576ba3506eevboxsync int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_);
af062818b47340eef15700d2f0211576ba3506eevboxsync while (cSpinsLeft-- > 0)
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
af062818b47340eef15700d2f0211576ba3506eevboxsync return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON);
af062818b47340eef15700d2f0211576ba3506eevboxsync ASMNopPause();
af062818b47340eef15700d2f0211576ba3506eevboxsync /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a
af062818b47340eef15700d2f0211576ba3506eevboxsync cli'ed pendingpreemption check up front using sti w/ instruction fusing
af062818b47340eef15700d2f0211576ba3506eevboxsync for avoiding races. Hmm ... This is assuming the other party is actually
af062818b47340eef15700d2f0211576ba3506eevboxsync executing code on another CPU ... which we could keep track of if we
af062818b47340eef15700d2f0211576ba3506eevboxsync wanted. */
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef IN_RING3
5112e32d7072e280613921c982a6672f2c859cf3vboxsync /*
5112e32d7072e280613921c982a6672f2c859cf3vboxsync * Take the slow path.
5112e32d7072e280613921c982a6672f2c859cf3vboxsync */
5112e32d7072e280613921c982a6672f2c859cf3vboxsync return pdmR3CritSectEnterContended(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON);
5112e32d7072e280613921c982a6672f2c859cf3vboxsync#else
5112e32d7072e280613921c982a6672f2c859cf3vboxsync /*
5112e32d7072e280613921c982a6672f2c859cf3vboxsync * Return busy.
5112e32d7072e280613921c982a6672f2c859cf3vboxsync */
5112e32d7072e280613921c982a6672f2c859cf3vboxsync STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
5112e32d7072e280613921c982a6672f2c859cf3vboxsync LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy));
5112e32d7072e280613921c982a6672f2c859cf3vboxsync return rcBusy;
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Enters a PDM critical section.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns VINF_SUCCESS if entered successfully.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns rcBusy when encountering a busy critical section in GC/R0.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns VERR_SEM_DESTROYED if the critical section is dead.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pCritSect The PDM critical section to enter.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param rcBusy The status code to return when we're in GC or R0
af062818b47340eef15700d2f0211576ba3506eevboxsync * and the section is busy.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncVMMDECL(int) PDMCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifndef PDMCRITSECT_STRICT
af062818b47340eef15700d2f0211576ba3506eevboxsync return pdmCritSectEnter(pCritSect, rcBusy, PDMCRITSECT_STRICT_ARGS_PASS_ON);
af062818b47340eef15700d2f0211576ba3506eevboxsync#else
af062818b47340eef15700d2f0211576ba3506eevboxsync /* No need for a second code instance. */
af062818b47340eef15700d2f0211576ba3506eevboxsync return PDMCritSectEnterDebug(pCritSect, rcBusy, (uintptr_t)ASMReturnAddress(), RT_SRC_POS);
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Enters a PDM critical section, with location information for debugging.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns VINF_SUCCESS if entered successfully.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns rcBusy when encountering a busy critical section in GC/R0.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns VERR_SEM_DESTROYED if the critical section is dead.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pCritSect The PDM critical section to enter.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param rcBusy The status code to return when we're in GC or R0
af062818b47340eef15700d2f0211576ba3506eevboxsync * and the section is busy.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param uId Some kind of locking location ID. Typically a
af062818b47340eef15700d2f0211576ba3506eevboxsync * return address up the stack. Optional (0).
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pszFile The file where the lock is being acquired from.
af062818b47340eef15700d2f0211576ba3506eevboxsync * Optional.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param iLine The line number in that file. Optional (0).
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pszFunction The functionn where the lock is being acquired
af062818b47340eef15700d2f0211576ba3506eevboxsync * from. Optional.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncVMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef PDMCRITSECT_STRICT
af062818b47340eef15700d2f0211576ba3506eevboxsync return pdmCritSectEnter(pCritSect, rcBusy, PDMCRITSECT_STRICT_ARGS_PASS_ON);
af062818b47340eef15700d2f0211576ba3506eevboxsync#else
af062818b47340eef15700d2f0211576ba3506eevboxsync /* No need for a second code instance. */
af062818b47340eef15700d2f0211576ba3506eevboxsync return PDMCritSectEnter(pCritSect, rcBusy);
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Common worker for the debug and normal APIs.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @retval VINF_SUCCESS on success.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @retval VERR_SEM_BUSY if the critsect was owned.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
af062818b47340eef15700d2f0211576ba3506eevboxsync * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pCritSect The critical section.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncstatic int pdmCritSectTryEnter(PPDMCRITSECT pCritSect, PDMCRITSECT_STRICT_ARGS_DECL)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * If the critical section has already been destroyed, then inform the caller.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC,
af062818b47340eef15700d2f0211576ba3506eevboxsync ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic),
af062818b47340eef15700d2f0211576ba3506eevboxsync VERR_SEM_DESTROYED);
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * See if we're lucky.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect);
af062818b47340eef15700d2f0211576ba3506eevboxsync /* Not owned ... */
af062818b47340eef15700d2f0211576ba3506eevboxsync if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1))
af062818b47340eef15700d2f0211576ba3506eevboxsync return pdmCritSectEnterFirst(pCritSect, hNativeSelf, PDMCRITSECT_STRICT_ARGS_PASS_ON);
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /* ... or nested. */
af062818b47340eef15700d2f0211576ba3506eevboxsync if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf)
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync ASMAtomicIncS32(&pCritSect->s.Core.cLockers);
af062818b47340eef15700d2f0211576ba3506eevboxsync ASMAtomicIncS32(&pCritSect->s.Core.cNestings);
af062818b47340eef15700d2f0211576ba3506eevboxsync ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
af062818b47340eef15700d2f0211576ba3506eevboxsync return VINF_SUCCESS;
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /* no spinning */
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * Return busy.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef IN_RING3
af062818b47340eef15700d2f0211576ba3506eevboxsync STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionR3);
af062818b47340eef15700d2f0211576ba3506eevboxsync#else
af062818b47340eef15700d2f0211576ba3506eevboxsync STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock);
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync LogFlow(("PDMCritSectTryEnter: locked\n"));
af062818b47340eef15700d2f0211576ba3506eevboxsync return VERR_SEM_BUSY;
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Try enter a critical section.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @retval VINF_SUCCESS on success.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @retval VERR_SEM_BUSY if the critsect was owned.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
af062818b47340eef15700d2f0211576ba3506eevboxsync * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pCritSect The critical section.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncVMMDECL(int) PDMCritSectTryEnter(PPDMCRITSECT pCritSect)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifndef PDMCRITSECT_STRICT
af062818b47340eef15700d2f0211576ba3506eevboxsync return pdmCritSectTryEnter(pCritSect, PDMCRITSECT_STRICT_ARGS_PASS_ON);
af062818b47340eef15700d2f0211576ba3506eevboxsync#else
af062818b47340eef15700d2f0211576ba3506eevboxsync /* No need for a second code instance. */
af062818b47340eef15700d2f0211576ba3506eevboxsync return PDMCritSectTryEnterDebug(pCritSect, (uintptr_t)ASMReturnAddress(), RT_SRC_POS);
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Try enter a critical section, with location information for debugging.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @retval VINF_SUCCESS on success.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @retval VERR_SEM_BUSY if the critsect was owned.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
af062818b47340eef15700d2f0211576ba3506eevboxsync * @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pCritSect The critical section.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param uId Some kind of locking location ID. Typically a
af062818b47340eef15700d2f0211576ba3506eevboxsync * return address up the stack. Optional (0).
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pszFile The file where the lock is being acquired from.
af062818b47340eef15700d2f0211576ba3506eevboxsync * Optional.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param iLine The line number in that file. Optional (0).
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pszFunction The functionn where the lock is being acquired
af062818b47340eef15700d2f0211576ba3506eevboxsync * from. Optional.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncVMMDECL(int) PDMCritSectTryEnterDebug(PPDMCRITSECT pCritSect, RTHCUINTPTR uId, RT_SRC_POS_DECL)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef PDMCRITSECT_STRICT
af062818b47340eef15700d2f0211576ba3506eevboxsync return pdmCritSectTryEnter(pCritSect, PDMCRITSECT_STRICT_ARGS_PASS_ON);
af062818b47340eef15700d2f0211576ba3506eevboxsync#else
af062818b47340eef15700d2f0211576ba3506eevboxsync /* No need for a second code instance. */
af062818b47340eef15700d2f0211576ba3506eevboxsync return PDMCritSectTryEnter(pCritSect);
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef IN_RING3
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Enters a PDM critical section.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns VINF_SUCCESS if entered successfully.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns rcBusy when encountering a busy critical section in GC/R0.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @returns VERR_SEM_DESTROYED if the critical section is dead.
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pCritSect The PDM critical section to enter.
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param fCallRing3 Whether this is a VMMRZCallRing3()request.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncVMMR3DECL(int) PDMR3CritSectEnterEx(PPDMCRITSECT pCritSect, bool fCallRing3)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync int rc = PDMCritSectEnter(pCritSect, VERR_INTERNAL_ERROR);
af062818b47340eef15700d2f0211576ba3506eevboxsync if ( rc == VINF_SUCCESS
af062818b47340eef15700d2f0211576ba3506eevboxsync && fCallRing3
af062818b47340eef15700d2f0211576ba3506eevboxsync && pCritSect->s.Core.pValidatorRec
af062818b47340eef15700d2f0211576ba3506eevboxsync && pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
af062818b47340eef15700d2f0211576ba3506eevboxsync RTThreadWriteLockDec(RTLockValidatorUnsetOwner(pCritSect->s.Core.pValidatorRec));
af062818b47340eef15700d2f0211576ba3506eevboxsync return rc;
af062818b47340eef15700d2f0211576ba3506eevboxsync}
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif /* IN_RING3 */
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync/**
af062818b47340eef15700d2f0211576ba3506eevboxsync * Leaves a critical section entered with PDMCritSectEnter().
af062818b47340eef15700d2f0211576ba3506eevboxsync *
af062818b47340eef15700d2f0211576ba3506eevboxsync * @param pCritSect The PDM critical section to leave.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsyncVMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect)
af062818b47340eef15700d2f0211576ba3506eevboxsync{
af062818b47340eef15700d2f0211576ba3506eevboxsync AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
af062818b47340eef15700d2f0211576ba3506eevboxsync Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC);
af062818b47340eef15700d2f0211576ba3506eevboxsync Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
af062818b47340eef15700d2f0211576ba3506eevboxsync Assert(pCritSect->s.Core.cNestings >= 1);
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * Nested leave.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync if (pCritSect->s.Core.cNestings > 1)
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
af062818b47340eef15700d2f0211576ba3506eevboxsync ASMAtomicDecS32(&pCritSect->s.Core.cLockers);
af062818b47340eef15700d2f0211576ba3506eevboxsync return;
af062818b47340eef15700d2f0211576ba3506eevboxsync }
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync#ifdef IN_RING0
af062818b47340eef15700d2f0211576ba3506eevboxsync# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
af062818b47340eef15700d2f0211576ba3506eevboxsync if (1) /* SUPSemEventSignal is safe */
af062818b47340eef15700d2f0211576ba3506eevboxsync# else
af062818b47340eef15700d2f0211576ba3506eevboxsync if (ASMIntAreEnabled())
af062818b47340eef15700d2f0211576ba3506eevboxsync# endif
af062818b47340eef15700d2f0211576ba3506eevboxsync#endif
af062818b47340eef15700d2f0211576ba3506eevboxsync#if defined(IN_RING3) || defined(IN_RING0)
af062818b47340eef15700d2f0211576ba3506eevboxsync {
af062818b47340eef15700d2f0211576ba3506eevboxsync /*
af062818b47340eef15700d2f0211576ba3506eevboxsync * Leave for real.
af062818b47340eef15700d2f0211576ba3506eevboxsync */
af062818b47340eef15700d2f0211576ba3506eevboxsync /* update members. */
af062818b47340eef15700d2f0211576ba3506eevboxsync# ifdef IN_RING3
af062818b47340eef15700d2f0211576ba3506eevboxsync RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal;
af062818b47340eef15700d2f0211576ba3506eevboxsync pCritSect->s.EventToSignal = NIL_RTSEMEVENT;
af062818b47340eef15700d2f0211576ba3506eevboxsync# if defined(PDMCRITSECT_STRICT)
af062818b47340eef15700d2f0211576ba3506eevboxsync if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD)
af062818b47340eef15700d2f0211576ba3506eevboxsync RTThreadWriteLockDec(RTLockValidatorUnsetOwner(pCritSect->s.Core.pValidatorRec));
af062818b47340eef15700d2f0211576ba3506eevboxsync# endif
af062818b47340eef15700d2f0211576ba3506eevboxsync# endif
af062818b47340eef15700d2f0211576ba3506eevboxsync ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
af062818b47340eef15700d2f0211576ba3506eevboxsync Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
af062818b47340eef15700d2f0211576ba3506eevboxsync ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
af062818b47340eef15700d2f0211576ba3506eevboxsync ASMAtomicDecS32(&pCritSect->s.Core.cNestings);
af062818b47340eef15700d2f0211576ba3506eevboxsync
af062818b47340eef15700d2f0211576ba3506eevboxsync /* stop and decrement lockers. */
af062818b47340eef15700d2f0211576ba3506eevboxsync STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
af062818b47340eef15700d2f0211576ba3506eevboxsync ASMCompilerBarrier();
af062818b47340eef15700d2f0211576ba3506eevboxsync if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0)
af062818b47340eef15700d2f0211576ba3506eevboxsync {
/* Someone is waiting, wake up one of them. */
SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem;
PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession;
int rc = SUPSemEventSignal(pSession, hEvent);
AssertRC(rc);
}
# ifdef IN_RING3
/* Signal exit event. */
if (hEventToSignal != NIL_RTSEMEVENT)
{
LogBird(("Signalling %#x\n", hEventToSignal));
int rc = RTSemEventSignal(hEventToSignal);
AssertRC(rc);
}
# endif
# if defined(DEBUG_bird) && defined(IN_RING0)
VMMTrashVolatileXMMRegs();
# endif
}
#endif /* IN_RING3 || IN_RING0 */
#ifdef IN_RING0
else
#endif
#if defined(IN_RING0) || defined(IN_RC)
{
/*
* Try leave it.
*/
if (pCritSect->s.Core.cLockers == 0)
{
ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0);
RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner;
ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK);
STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l);
ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD);
if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0))
return;
/* darn, someone raced in on us. */
ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread);
STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l);
ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1);
}
ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK);
/*
* Queue the request.
*/
PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++;
LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect));
AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves));
pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect);
VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT);
VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3);
STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves);
STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock);
}
#endif /* IN_RING0 || IN_RC */
}
#if defined(IN_RING3) || defined(IN_RING0)
/**
* Process the critical sections queued for ring-3 'leave'.
*
* @param pVCpu The VMCPU handle.
*/
VMMDECL(void) PDMCritSectFF(PVMCPU pVCpu)
{
Assert(pVCpu->pdm.s.cQueuedCritSectLeaves > 0);
const RTUINT c = pVCpu->pdm.s.cQueuedCritSectLeaves;
for (RTUINT i = 0; i < c; i++)
{
# ifdef IN_RING3
PPDMCRITSECT pCritSect = pVCpu->pdm.s.apQueuedCritSectsLeaves[i];
# else
PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
# endif
PDMCritSectLeave(pCritSect);
LogFlow(("PDMR3CritSectFF: %p\n", pCritSect));
}
pVCpu->pdm.s.cQueuedCritSectLeaves = 0;
VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_PDM_CRITSECT);
}
#endif /* IN_RING3 || IN_RING0 */
/**
* Checks the caller is the owner of the critical section.
*
* @returns true if owner.
* @returns false if not owner.
* @param pCritSect The critical section.
*/
VMMDECL(bool) PDMCritSectIsOwner(PCPDMCRITSECT pCritSect)
{
#ifdef IN_RING3
return RTCritSectIsOwner(&pCritSect->s.Core);
#else
PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM);
PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu);
if (pCritSect->s.Core.NativeThreadOwner != pVCpu->hNativeThread)
return false;
return (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
#endif
}
/**
* Checks the specified VCPU is the owner of the critical section.
*
* @returns true if owner.
* @returns false if not owner.
* @param pCritSect The critical section.
* @param idCpu VCPU id
*/
VMMDECL(bool) PDMCritSectIsOwnerEx(PCPDMCRITSECT pCritSect, VMCPUID idCpu)
{
#ifdef IN_RING3
NOREF(idCpu);
return RTCritSectIsOwner(&pCritSect->s.Core);
#else
PVM pVM = pCritSect->s.CTX_SUFF(pVM);
AssertPtr(pVM);
Assert(idCpu < pVM->cCpus);
return pCritSect->s.Core.NativeThreadOwner == pVM->aCpus[idCpu].hNativeThread
&& (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
#endif
}
/**
* Checks if somebody currently owns the critical section.
*
* @returns true if locked.
* @returns false if not locked.
*
* @param pCritSect The critical section.
*
* @remarks This doesn't prove that no deadlocks will occur later on; it's
* just a debugging tool
*/
VMMDECL(bool) PDMCritSectIsOwned(PCPDMCRITSECT pCritSect)
{
return pCritSect->s.Core.NativeThreadOwner != NIL_RTNATIVETHREAD
&& (pCritSect->s.Core.fFlags & PDMCRITSECT_FLAGS_PENDING_UNLOCK) == 0;
}
/**
* Checks if anyone is waiting on the critical section we own.
*
* @returns true if someone is waitings.
* @returns false if no one is waiting.
* @param pCritSect The critical section.
*/
VMMDECL(bool) PDMCritSectHasWaiters(PCPDMCRITSECT pCritSect)
{
AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false);
Assert(pCritSect->s.Core.NativeThreadOwner == pdmCritSectGetNativeSelf(pCritSect));
return pCritSect->s.Core.cLockers >= pCritSect->s.Core.cNestings;
}
/**
* Checks if a critical section is initialized or not.
*
* @returns true if initialized.
* @returns false if not initialized.
* @param pCritSect The critical section.
*/
VMMDECL(bool) PDMCritSectIsInitialized(PCPDMCRITSECT pCritSect)
{
return RTCritSectIsInitialized(&pCritSect->s.Core);
}
/**
* Gets the recursion depth.
*
* @returns The recursion depth.
* @param pCritSect The critical section.
*/
VMMDECL(uint32_t) PDMCritSectGetRecursion(PCPDMCRITSECT pCritSect)
{
return RTCritSectGetRecursion(&pCritSect->s.Core);
}