PDMAllCritSect.cpp revision 757277acca3d6ac6c5df59c2163122494405ed2a
/* $Id$ */
/** @file
* PDM - Critical Sections, All Contexts.
*/
/*
* Copyright (C) 2006-2007 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#include "PDMInternal.h"
#include <iprt/asm-amd64-x86.h>
#ifdef IN_RING3
# include <iprt/lockvalidator.h>
# include <iprt/semaphore.h>
#endif
#endif
/*******************************************************************************
* Defined Constants And Macros *
*******************************************************************************/
/** The number loops to spin for in ring-3. */
#define PDMCRITSECT_SPIN_COUNT_R3 20
/** The number loops to spin for in ring-0. */
#define PDMCRITSECT_SPIN_COUNT_R0 256
/** The number loops to spin for in the raw-mode context. */
#define PDMCRITSECT_SPIN_COUNT_RC 256
/* Undefine the automatic VBOX_STRICT API mappings. */
/**
* Gets the ring-3 native thread handle of the calling thread.
*
* @returns native thread handle (ring-3).
* @param pCritSect The critical section. This is used in R0 and RC.
*/
{
#ifdef IN_RING3
#else
AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%RX32\n", pCritSect->s.Core.u32Magic),
#endif
return hNativeSelf;
}
/**
* Tail code called when we've won the battle for the lock.
*
* @returns VINF_SUCCESS.
*
* @param pCritSect The critical section.
* @param hNativeSelf The native handle of this thread.
*/
DECL_FORCE_INLINE(int) pdmCritSectEnterFirst(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
{
AssertMsg(pCritSect->s.Core.NativeThreadOwner == NIL_RTNATIVETHREAD, ("NativeThreadOwner=%p\n", pCritSect->s.Core.NativeThreadOwner));
# ifdef PDMCRITSECT_STRICT
# endif
return VINF_SUCCESS;
}
/**
* Deals with the contended case in ring-3 and ring-0.
*
* @returns VINF_SUCCESS or VERR_SEM_DESTROYED.
* @param pCritSect The critsect.
* @param hNativeSelf The native thread handle.
*/
static int pdmR3R0CritSectEnterContended(PPDMCRITSECT pCritSect, RTNATIVETHREAD hNativeSelf, PCRTLOCKVALSRCPOS pSrcPos)
{
/*
* Start waiting.
*/
# ifdef IN_RING3
# else
# endif
/*
* The wait loop.
*/
# ifdef IN_RING3
# ifdef PDMCRITSECT_STRICT
int rc2 = RTLockValidatorRecExclCheckOrder(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos, RT_INDEFINITE_WAIT);
if (RT_FAILURE(rc2))
return rc2;
# else
# endif
# endif
for (;;)
{
# ifdef PDMCRITSECT_STRICT
int rc9 = RTLockValidatorRecExclCheckBlocking(pCritSect->s.Core.pValidatorRec, hThreadSelf, pSrcPos,
RT_INDEFINITE_WAIT, RTTHREADSTATE_CRITSECT, true);
if (RT_FAILURE(rc9))
return rc9;
# endif
# ifdef IN_RING3
# endif
return VERR_SEM_DESTROYED;
if (rc == VINF_SUCCESS)
}
/* won't get here */
}
#endif /* IN_RING3 || IN_RING0 */
/**
* Common worker for the debug and normal APIs.
*
* @returns VINF_SUCCESS if entered successfully.
* @returns VERR_SEM_DESTROYED if the critical section is dead.
*
* @param pCritSect The PDM critical section to enter.
* @param rcBusy The status code to return when we're in GC or R0
* and the section is busy.
*/
DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos)
{
/*
* If the critical section has already been destroyed, then inform the caller.
*/
/*
* See if we're lucky.
*/
/* NOP ... */
return VINF_SUCCESS;
/* ... not owned ... */
/* ... or nested. */
{
return VINF_SUCCESS;
}
/*
* Spin for a bit without incrementing the counter.
*/
/** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI
* cpu systems. */
while (cSpinsLeft-- > 0)
{
ASMNopPause();
cli'ed pendingpreemption check up front using sti w/ instruction fusing
for avoiding races. Hmm ... This is assuming the other party is actually
executing code on another CPU ... which we could keep track of if we
wanted. */
}
#ifdef IN_RING3
/*
* Take the slow path.
*/
#else
# ifdef IN_RING0
* and would be better off switching out of that while waiting for
* the lock. Several of the locks jumps back to ring-3 just to
* get the lock, the ring-3 code will then call the kernel to do
* the lock wait and when the call return it will call ring-0
* again and resume via in setjmp style. Not very efficient. */
# if 0
if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing
* use PDMCritSectTryEnter. */
{
/*
* Leave HWACCM context while waiting if necessary.
*/
int rc;
{
}
else
{
RTThreadPreemptRestore(NIL_RTTHREAD, ????);
RTThreadPreemptDisable(NIL_RTTHREAD, ????);
}
return rc;
}
# else
/*
* We preemption hasn't been disabled, we can block here in ring-0.
*/
&& ASMIntAreEnabled())
# endif
#endif /* IN_RING0 */
/*
* Call ring-3 to acquire the critical section?
*/
if (rcBusy == VINF_SUCCESS)
{
}
/*
* Return busy.
*/
return rcBusy;
#endif /* !IN_RING3 */
}
/**
* Enters a PDM critical section.
*
* @returns VINF_SUCCESS if entered successfully.
* @returns VERR_SEM_DESTROYED if the critical section is dead.
*
* @param pCritSect The PDM critical section to enter.
* @param rcBusy The status code to return when we're in GC or R0
* and the section is busy. Pass VINF_SUCCESS to
* acquired the critical section thru a ring-3
* call if necessary.
*/
{
int rc;
#ifndef IN_RING3
if (rcBusy == VINF_SUCCESS)
{
# ifndef PDMCRITSECT_STRICT
# else
# endif
if (rc == VERR_SEM_BUSY)
{
}
}
else
#endif /* !IN_RING3 */
{
#ifndef PDMCRITSECT_STRICT
#else
#endif
}
return rc;
}
/**
* Enters a PDM critical section, with location information for debugging.
*
* @returns VINF_SUCCESS if entered successfully.
* @returns VERR_SEM_DESTROYED if the critical section is dead.
*
* @param pCritSect The PDM critical section to enter.
* @param rcBusy The status code to return when we're in GC or R0
* and the section is busy. Pass VINF_SUCCESS to
* acquired the critical section thru a ring-3
* call if necessary.
* @param uId Some kind of locking location ID. Typically a
* return address up the stack. Optional (0).
* @param pszFile The file where the lock is being acquired from.
* Optional.
* @param iLine The line number in that file. Optional (0).
* @param pszFunction The function where the lock is being acquired
* from. Optional.
*/
VMMDECL(int) PDMCritSectEnterDebug(PPDMCRITSECT pCritSect, int rcBusy, RTHCUINTPTR uId, RT_SRC_POS_DECL)
{
#ifdef PDMCRITSECT_STRICT
#else
#endif
}
/**
* Common worker for the debug and normal APIs.
*
* @retval VINF_SUCCESS on success.
* @retval VERR_SEM_BUSY if the critsect was owned.
* @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
* @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
*
* @param pCritSect The critical section.
*/
{
/*
* If the critical section has already been destroyed, then inform the caller.
*/
/*
* See if we're lucky.
*/
/* NOP ... */
return VINF_SUCCESS;
/* ... not owned ... */
/* ... or nested. */
{
return VINF_SUCCESS;
}
/* no spinning */
/*
* Return busy.
*/
#ifdef IN_RING3
#else
#endif
LogFlow(("PDMCritSectTryEnter: locked\n"));
return VERR_SEM_BUSY;
}
/**
* Try enter a critical section.
*
* @retval VINF_SUCCESS on success.
* @retval VERR_SEM_BUSY if the critsect was owned.
* @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
* @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
*
* @param pCritSect The critical section.
*/
{
#ifndef PDMCRITSECT_STRICT
#else
#endif
}
/**
* Try enter a critical section, with location information for debugging.
*
* @retval VINF_SUCCESS on success.
* @retval VERR_SEM_BUSY if the critsect was owned.
* @retval VERR_SEM_NESTED if nested enter on a no nesting section. (Asserted.)
* @retval VERR_SEM_DESTROYED if RTCritSectDelete was called while waiting.
*
* @param pCritSect The critical section.
* @param uId Some kind of locking location ID. Typically a
* return address up the stack. Optional (0).
* @param pszFile The file where the lock is being acquired from.
* Optional.
* @param iLine The line number in that file. Optional (0).
* @param pszFunction The function where the lock is being acquired
* from. Optional.
*/
{
#ifdef PDMCRITSECT_STRICT
#else
#endif
}
#ifdef IN_RING3
/**
* Enters a PDM critical section.
*
* @returns VINF_SUCCESS if entered successfully.
* @returns VERR_SEM_DESTROYED if the critical section is dead.
*
* @param pCritSect The PDM critical section to enter.
* @param fCallRing3 Whether this is a VMMRZCallRing3()request.
*/
{
if ( rc == VINF_SUCCESS
&& fCallRing3
return rc;
}
#endif /* IN_RING3 */
/**
* Leaves a critical section entered with PDMCritSectEnter().
*
* @param pCritSect The PDM critical section to leave.
*/
{
AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic));
/* Check for NOP sections before asserting ownership. */
return;
/*
* Nested leave.
*/
{
return;
}
#ifdef IN_RING0
# if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */
if (1) /* SUPSemEventSignal is safe */
# else
if (ASMIntAreEnabled())
# endif
#endif
{
/*
* Leave for real.
*/
/* update members. */
# ifdef IN_RING3
# if defined(PDMCRITSECT_STRICT)
# endif
Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD);
# endif
/* stop and decrement lockers. */
{
/* Someone is waiting, wake up one of them. */
}
# ifdef IN_RING3
/* Signal exit event. */
if (hEventToSignal != NIL_RTSEMEVENT)
{
}
# endif
# if defined(DEBUG_bird) && defined(IN_RING0)
# endif
}
#endif /* IN_RING3 || IN_RING0 */
#ifdef IN_RING0
else
#endif
{
/*
* Try leave it.
*/
{
return;
/* darn, someone raced in on us. */
}
/*
* Queue the request.
*/
}
#endif /* IN_RING0 || IN_RC */
}
/**
* Process the critical sections queued for ring-3 'leave'.
*
* @param pVCpu The VMCPU handle.
*/
{
for (RTUINT i = 0; i < c; i++)
{
# ifdef IN_RING3
# else
PPDMCRITSECT pCritSect = (PPDMCRITSECT)MMHyperR3ToCC(pVCpu->CTX_SUFF(pVM), pVCpu->pdm.s.apQueuedCritSectsLeaves[i]);
# endif
}
}
#endif /* IN_RING3 || IN_RING0 */
/**
* Checks the caller is the owner of the critical section.
*
* @returns true if owner.
* @returns false if not owner.
* @param pCritSect The critical section.
*/
{
#ifdef IN_RING3
#else
return false;
#endif
}
/**
* Checks the specified VCPU is the owner of the critical section.
*
* @returns true if owner.
* @returns false if not owner.
* @param pCritSect The critical section.
* @param pVCpu The virtual CPU handle.
*/
{
#ifdef IN_RING3
#else
return false;
#endif
}
/**
* Checks if anyone is waiting on the critical section we own.
*
* @returns true if someone is waiting.
* @returns false if no one is waiting.
* @param pCritSect The critical section.
*/
{
}
/**
* Checks if a critical section is initialized or not.
*
* @returns true if initialized.
* @returns false if not initialized.
* @param pCritSect The critical section.
*/
{
}
/**
* Gets the recursion depth.
*
* @returns The recursion depth.
* @param pCritSect The critical section.
*/
{
}