lockvalidator.cpp revision 769de0e40d6afe45e51ec291952c12e89d0182b6
/* $Id$ */
/** @file
* IPRT - Lock Validator.
*/
/*
* Copyright (C) 2009-2010 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#include <iprt/lockvalidator.h>
#include <iprt/semaphore.h>
#include "internal/lockvalidator.h"
/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
/**
* Deadlock detection stack entry.
*/
typedef struct RTLOCKVALDDENTRY
{
/** The current record. */
/** The current entry number if pRec is a shared one. */
/** The thread state of the thread we followed to get to pFirstSibling.
* This is only used for validating a deadlock stack. */
/** The thread we followed to get to pFirstSibling.
* This is only used for validating a deadlock stack. */
/** What pThread is waiting on, i.e. where we entered the circular list of
* siblings. This is used for validating a deadlock stack as well as
* terminating the sibling walk. */
/**
* Deadlock detection stack.
*/
typedef struct RTLOCKVALDDSTACK
{
/** The number stack entries. */
uint32_t c;
/** The stack entries. */
RTLOCKVALDDENTRY a[32];
/** Pointer to a deadlock detction stack. */
typedef RTLOCKVALDDSTACK *PRTLOCKVALDDSTACK;
/**
* Reference to another class.
*/
typedef struct RTLOCKVALCLASSREF
{
/** The class. */
/** The number of lookups of this class. */
/** Indicates whether the entry was added automatically during order checking
* (true) or manually via the API (false). */
bool fAutodidacticism;
/** Reserved / explicit alignment padding. */
bool afReserved[3];
/** Pointer to a class reference. */
typedef RTLOCKVALCLASSREF *PRTLOCKVALCLASSREF;
/** Pointer to a chunk of class references. */
typedef struct RTLOCKVALCLASSREFCHUNK *PRTLOCKVALCLASSREFCHUNK;
/**
* Chunk of class references.
*/
typedef struct RTLOCKVALCLASSREFCHUNK
{
/** Array of refs. */
#if 0 /** @todo for testing alloction of new chunks. */
#else
#endif
/** Pointer to the next chunk. */
PRTLOCKVALCLASSREFCHUNK volatile pNext;
/**
* Lock class.
*/
typedef struct RTLOCKVALCLASSINT
{
/** AVL node core. */
/** Magic value (RTLOCKVALCLASS_MAGIC). */
/** Reference counter. See RTLOCKVALCLASS_MAX_REFS. */
/** Whether the class is allowed to teach it self new locking order rules. */
bool fAutodidact;
/** Whether this class is in the tree. */
bool fInTree;
/** The minimum wait interval for which we do deadlock detection
* (milliseconds). */
/** The minimum wait interval for which we do order checks (milliseconds). */
/** More padding. */
/** Classes that may be taken prior to this one.
* This is a linked list where each node contains a chunk of locks so that we
* reduce the number of allocations as well as localize the data. */
/** Hash table containing frequently encountered prior locks. */
#define RTLOCKVALCLASS_HASH_STATS
#ifdef RTLOCKVALCLASS_HASH_STATS
/** Hash hits. */
/** Hash misses. */
uint32_t volatile cHashMisses;
#endif
/** Where this class was created.
* This is mainly used for finding automatically created lock classes.
* @remarks The strings are stored after this structure so we won't crash
* spawned it. */
/*******************************************************************************
* Defined Constants And Macros *
*******************************************************************************/
/** Macro that asserts that a pointer is aligned correctly.
* Only used when fighting bugs. */
#if 1
# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) \
#else
# define RTLOCKVAL_ASSERT_PTR_ALIGN(p) do { } while (0)
#endif
/** Hashes the class handle (pointer) into an apPriorLocksHash index. */
#define RTLOCKVALCLASS_HASH(hClass) \
/ sizeof(PRTLOCKVALCLASSREF)) )
/** The max value for RTLOCKVALCLASSINT::cRefs. */
/** The max value for RTLOCKVALCLASSREF::cLookups. */
/** The absolute max value for RTLOCKVALCLASSREF::cLookups at which it will
* be set back to RTLOCKVALCLASSREF_MAX_LOOKUPS. */
/*******************************************************************************
* Global Variables *
*******************************************************************************/
/** Serializing object destruction and deadlock detection.
*
* This makes sure that none of the memory examined by the deadlock detection
* code will become invalid (reused for other purposes or made not present)
* while the detection is in progress.
*
* NS: RTLOCKVALREC*, RTTHREADINT and RTLOCKVALDRECSHRD::papOwners destruction.
* EW: Deadlock detection and some related activities.
*/
/** Whether the lock validator is enabled or disabled.
* Only applies to new locks. */
static bool volatile g_fLockValidatorEnabled = true;
/** Set if the lock validator is quiet. */
#ifdef RT_STRICT
static bool volatile g_fLockValidatorQuiet = false;
#else
static bool volatile g_fLockValidatorQuiet = true;
#endif
/** Set if the lock validator may panic. */
#ifdef RT_STRICT
static bool volatile g_fLockValidatorMayPanic = true;
#else
static bool volatile g_fLockValidatorMayPanic = false;
#endif
/** Serializing class tree insert and lookups. */
/** Class tree. */
/** Critical section serializing the teaching new rules to the classes. */
static RTCRITSECT g_LockValClassTeachCS;
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
/**
* Lazy initialization of the lock validator globals.
*/
static void rtLockValidatorLazyInit(void)
{
static uint32_t volatile s_fInitializing = false;
if (ASMAtomicCmpXchgU32(&s_fInitializing, true, false))
{
if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
{
if (RT_SUCCESS(rc))
}
{
if (RT_SUCCESS(rc))
}
/** @todo register some cleanup callback if we care. */
ASMAtomicWriteU32(&s_fInitializing, false);
}
else
}
/** Wrapper around ASMAtomicReadPtr. */
DECL_FORCE_INLINE(PRTLOCKVALRECUNION) rtLockValidatorReadRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec)
{
return p;
}
/** Wrapper around ASMAtomicWritePtr. */
DECL_FORCE_INLINE(void) rtLockValidatorWriteRecUnionPtr(PRTLOCKVALRECUNION volatile *ppRec, PRTLOCKVALRECUNION pRecNew)
{
}
/** Wrapper around ASMAtomicReadPtr. */
{
return p;
}
/** Wrapper around ASMAtomicUoReadPtr. */
DECL_FORCE_INLINE(PRTLOCKVALRECSHRDOWN) rtLockValidatorUoReadSharedOwner(PRTLOCKVALRECSHRDOWN volatile *ppOwner)
{
return p;
}
/**
* Reads a volatile thread handle field and returns the thread name.
*
* @returns Thread name (read only).
* @param phThread The thread handle field.
*/
{
if (!pThread)
return "<NIL>";
return "<INVALID>";
return "<BAD-THREAD-MAGIC>";
}
/**
* Launch a simple assertion like complaint w/ panic.
*
* @param pszFile Where from - file.
* @param iLine Where from - line.
* @param pszFunction Where from - function.
* @param pszWhat What we're complaining about.
* @param ... Format arguments.
*/
{
{
}
}
/**
* Describes the lock.
*
* @param pszPrefix Message prefix.
* @param pRec The lock record we're working on.
* @param pszSuffix Message suffix.
*/
static void rtLockValidatorComplainAboutLock(const char *pszPrefix, PRTLOCKVALRECUNION pRec, const char *pszSuffix)
{
{
{
case RTLOCKVALRECEXCL_MAGIC:
pRec->Excl.SrcPos.pszFile, pRec->Excl.SrcPos.uLine, pRec->Excl.SrcPos.pszFunction, pRec->Excl.SrcPos.uId,
break;
case RTLOCKVALRECSHRD_MAGIC:
break;
{
pRec->ShrdOwner.SrcPos.pszFile, pRec->ShrdOwner.SrcPos.uLine, pRec->ShrdOwner.SrcPos.pszFunction, pRec->ShrdOwner.SrcPos.uId,
else
pRec->ShrdOwner.SrcPos.pszFile, pRec->ShrdOwner.SrcPos.uLine, pRec->ShrdOwner.SrcPos.pszFunction, pRec->ShrdOwner.SrcPos.uId,
break;
}
default:
RTAssertMsg2AddWeak("%spRec=%p u32Magic=%#x (bad)%s", pszPrefix, pRec, pRec->Core.u32Magic, pszSuffix);
break;
}
}
}
/**
* Launch the initial complaint.
*
* @param pszWhat What we're complaining about.
* @param pSrcPos Where we are complaining from, as it were.
* @param pThreadSelf The calling thread.
* @param pRec The main lock involved. Can be NULL.
*/
static void rtLockValidatorComplainFirst(const char *pszWhat, PCRTLOCKVALSRCPOS pSrcPos, PRTTHREADINT pThreadSelf, PRTLOCKVALRECUNION pRec)
{
{
ASMCompilerBarrier(); /* paranoia */
RTAssertMsg1Weak("RTLockValidator", pSrcPos ? pSrcPos->uLine : 0, pSrcPos ? pSrcPos->pszFile : NULL, pSrcPos ? pSrcPos->pszFunction : NULL);
RTAssertMsg2Weak("%s [uId=%p thrd=%s]\n", pszWhat, pSrcPos->uId, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
else
RTAssertMsg2Weak("%s [thrd=%s]\n", pszWhat, VALID_PTR(pThreadSelf) ? pThreadSelf->szName : "<NIL>");
}
}
/**
* Continue bitching.
*
* @param pszFormat Format string.
* @param ... Format arguments.
*/
static void rtLockValidatorComplainMore(const char *pszFormat, ...)
{
{
}
}
/**
* Raise a panic if enabled.
*/
static void rtLockValidatorComplainPanic(void)
{
}
/**
* Copy a source position record.
*
* @param pDst The destination.
* @param pSrc The source. Can be NULL.
*/
{
if (pSrc)
{
}
else
{
}
}
/**
* Init a source position record.
*
* @param pSrcPos The source position record.
*/
{
#if HC_ARCH_BITS == 64
pSrcPos->u32Padding = 0;
#endif
}
/* sdbm:
This algorithm was created for sdbm (a public-domain reimplementation of
ndbm) database library. it was found to do well in scrambling bits,
causing better distribution of the keys and fewer splits. it also happens
to be a good general hashing function with good distribution. the actual
function is hash(i) = hash(i - 1) * 65599 + str[i]; what is included below
is the faster version used in gawk. [there is even a faster, duff-device
version] the magic constant 65599 was picked out of thin air while
experimenting with different constants, and turns out to be a prime.
this is one of the algorithms used in berkeley db (see sleepycat) and
elsewhere. */
{
int c;
while ((c = *pu8++))
return hash;
}
/**
* Hashes the specified source position.
*
* @returns Hash.
* @param pSrcPos The source position record.
*/
{
|| pSrcPos->pszFunction)
{
uHash = 0;
if (pSrcPos->pszFunction)
}
else
{
}
return uHash;
}
/**
* Compares two source positions.
*
* @returns 0 if equal, < 0 if pSrcPos1 is smaller than pSrcPos2, > 0 if
* otherwise.
* @param pSrcPos1 The first source position.
* @param pSrcPos2 The second source position.
*/
{
if (iDiff != 0)
return iDiff;
if (iDiff != 0)
return iDiff;
return 0;
}
/**
* Serializes destruction of RTLOCKVALREC* and RTTHREADINT structures.
*/
DECLHIDDEN(void) rtLockValidatorSerializeDestructEnter(void)
{
if (hXRoads != NIL_RTSEMXROADS)
}
/**
* Call after rtLockValidatorSerializeDestructEnter.
*/
DECLHIDDEN(void) rtLockValidatorSerializeDestructLeave(void)
{
if (hXRoads != NIL_RTSEMXROADS)
}
/**
* Serializes deadlock detection against destruction of the objects being
* inspected.
*/
DECLINLINE(void) rtLockValidatorSerializeDetectionEnter(void)
{
if (hXRoads != NIL_RTSEMXROADS)
}
/**
* Call after rtLockValidatorSerializeDetectionEnter.
*/
DECLHIDDEN(void) rtLockValidatorSerializeDetectionLeave(void)
{
if (hXRoads != NIL_RTSEMXROADS)
}
/**
* Initializes the per thread lock validator data.
*
* @param pPerThread The data.
*/
{
/* ASSUMES the rest has already been zeroed. */
}
{
if (!pThis)
return VERR_NO_MEMORY;
pThis->afReserved[i] = false;
pThis->au32Reserved[i] = 0;
{
}
pThis->CreatePos.pszFile = pSrcPos->pszFile ? (char *)memcpy(pszDst, pSrcPos->pszFile, cbFile) : NULL;
pThis->CreatePos.pszFunction= pSrcPos->pszFunction ? (char *)memcpy(pszDst, pSrcPos->pszFunction, cbFunction) : NULL;
return VINF_SUCCESS;
}
{
1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/);
}
/**
* Internal class retainer.
* @returns The new reference count.
* @param pClass The class.
*/
{
if (cRefs > RTLOCKVALCLASS_MAX_REFS)
return cRefs;
}
/**
* Validates and retains a lock validator class.
*
* @returns @a hClass on success, NIL_RTLOCKVALCLASS on failure.
* @param hClass The class handle. NIL_RTLOCKVALCLASS is ok.
*/
{
if (hClass == NIL_RTLOCKVALCLASS)
return hClass;
return hClass;
}
/**
* Internal class releaser.
* @returns The new reference count.
* @param pClass The class.
*/
{
else if (!cRefs)
return cRefs;
}
/**
* Destroys a class once there are not more references to it.
*
* @param Class The class.
*/
{
while (pChunk)
{
{
if (pClass2 != NIL_RTLOCKVALCLASS)
{
}
}
}
}
{
if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
while (pClass)
{
{
break;
}
}
if (RT_SUCCESS(rcLock))
return pClass;
}
{
if (hClass == NIL_RTLOCKVALCLASS)
{
/*
* Create a new class and insert it into the tree.
*/
1 /*cMsMinDeadlock*/, 1 /*cMsMinOrder*/);
if (RT_SUCCESS(rc))
{
if (g_hLockValClassTreeRWLock == NIL_RTSEMRW)
if (RT_SUCCESS(rcLock))
return hClass;
}
}
return hClass;
}
{
return rtLockValidatorClassRetain(pClass);
}
{
return rtLockValidatorClassRelease(pClass);
}
/**
* Worker for rtLockValidatorClassIsPriorClass that does a linear search thru
* all the chunks for @a pPriorClass.
*
* @returns true / false.
* @param pClass The class to search.
* @param pPriorClass The class to search for.
*/
static bool rtLockValidatorClassIsPriorClassByLinearSearch(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
{
{
{
{
}
/* update the hash table entry. */
if ( !(*ppHashEntry)
#ifdef RTLOCKVALCLASS_HASH_STATS
#endif
return true;
}
}
return false;
}
/**
* Checks if @a pPriorClass is a known prior class.
*
* @returns true / false.
* @param pClass The class to search.
* @param pPriorClass The class to search for.
*/
DECL_FORCE_INLINE(bool) rtLockValidatorClassIsPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass)
{
/*
* Hash lookup here.
*/
if ( pRef
{
#ifdef RTLOCKVALCLASS_HASH_STATS
#endif
return true;
}
}
/**
* Adds a class to the prior list.
*
* @returns VINF_SUCCESS, VERR_NO_MEMORY or VERR_SEM_LV_WRONG_ORDER.
* @param pClass The class to work on.
* @param pPriorClass The class to add.
* @param fAutodidacticism Whether we're teaching ourselfs (true) or
* somebody is teaching us via the API (false).
*/
static int rtLockValidatorClassAddPriorClass(RTLOCKVALCLASSINT *pClass, RTLOCKVALCLASSINT *pPriorClass, bool fAutodidacticism)
{
/*
* Check that there are no conflict (no assert since we might race each other).
*/
int rc = VERR_INTERNAL_ERROR_5;
{
{
/*
* Scan the table for a free entry, allocating a new chunk if necessary.
*/
{
bool fDone = false;
{
if (fDone)
{
rc = VINF_SUCCESS;
break;
}
}
if (fDone)
break;
/* If no more chunks, allocate a new one and insert the class before linking it. */
{
if (!pNew)
{
rc = VERR_NO_MEMORY;
break;
}
{
}
rc = VINF_SUCCESS;
break;
}
} /* chunk loop */
}
else
rc = VINF_SUCCESS;
}
else
if (RT_SUCCESS(rcLock))
return rc;
}
{
}
/**
* Checks if all owners are blocked - shared record operated in signaller mode.
*
* @returns true / false accordingly.
* @param pRec The record.
* @param pThreadSelf The current thread.
*/
DECL_FORCE_INLINE(bool) rtLockValidatorDdAreAllThreadsBlocked(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf)
{
if (cEntries == 0)
return false;
for (uint32_t i = 0; i < cAllocated; i++)
{
if ( pEntry
{
if (!pCurThread)
return false;
return false;
&& pCurThread != pThreadSelf)
return false;
if (--cEntries == 0)
break;
}
else
}
return true;
}
/**
* Verifies the deadlock stack before calling it a deadlock.
*
* @retval VERR_SEM_LV_DEADLOCK if it's a deadlock.
* @retval VERR_SEM_LV_ILLEGAL_UPGRADE if it's a deadlock on the same lock.
* @retval VERR_TRY_AGAIN if something changed.
*
* @param pStack The deadlock detection stack.
* @param pThreadSelf The current thread.
*/
{
{
for (uint32_t i = 1; i < c; i++)
{
return VERR_TRY_AGAIN;
return VERR_TRY_AGAIN;
return VERR_TRY_AGAIN;
/* ASSUMES the signaller records won't have siblings! */
return VERR_TRY_AGAIN;
}
}
if (c == 1)
return VERR_SEM_LV_ILLEGAL_UPGRADE;
return VERR_SEM_LV_DEADLOCK;
}
/**
* Checks for stack cycles caused by another deadlock before returning.
*
* @retval VINF_SUCCESS if the stack is simply too small.
* @retval VERR_SEM_LV_EXISTING_DEADLOCK if a cycle was detected.
*
* @param pStack The deadlock detection stack.
*/
{
{
return VERR_SEM_LV_EXISTING_DEADLOCK;
}
static bool volatile s_fComplained = false;
if (!s_fComplained)
{
s_fComplained = true;
rtLockValidatorComplain(RT_SRC_POS, "lock validator stack is too small! (%zu entries)\n", RT_ELEMENTS(pStack->a));
}
return VINF_SUCCESS;
}
/**
* Worker for rtLockValidatorDeadlockDetection that does the actual deadlock
* detection.
*
* @retval VINF_SUCCESS
* @retval VERR_SEM_LV_DEADLOCK
* @retval VERR_SEM_LV_EXISTING_DEADLOCK
* @retval VERR_SEM_LV_ILLEGAL_UPGRADE
* @retval VERR_TRY_AGAIN
*
* @param pStack The stack to use.
* @param pOriginalRec The original record.
* @param pThreadSelf The calling thread.
*/
static int rtLockValidatorDdDoDetection(PRTLOCKVALDDSTACK pStack, PRTLOCKVALRECUNION const pOriginalRec,
PRTTHREADINT const pThreadSelf)
{
pStack->c = 0;
/* We could use a single RTLOCKVALDDENTRY variable here, but the
compiler may make a better job of it when using individual variables. */
{
/*
* Process the current record.
*/
/* Find the next relevant owner thread and record. */
{
case RTLOCKVALRECEXCL_MAGIC:
for (;;)
{
if ( !pNextThread
break;
if ( !RTTHREAD_IS_SLEEPING(enmNextState)
&& pNextThread != pThreadSelf)
break;
break;
}
if (!pNextRec)
{
if ( pRec
&& pRec != pFirstSibling)
continue;
}
break;
case RTLOCKVALRECSHRD_MAGIC:
{
/* Skip to the next sibling if same side. ASSUMES reader priority. */
/** @todo The read side of a read-write lock is problematic if
* the implementation prioritizes writers over readers because
* that means we should could deadlock against current readers
* if a writer showed up. If the RW sem implementation is
* wrapping some native API, it's not so easy to detect when we
* should do this and when we shouldn't. Checking when we
* shouldn't is subject to wakeup scheduling and cannot easily
* be made reliable.
*
* At the moment we circumvent all this mess by declaring that
* readers has priority. This is TRUE on linux, but probably
* isn't on Solaris and FreeBSD. */
if ( pRec == pFirstSibling
{
continue;
}
}
/* Scan the owner table for blocked owners. */
|| iEntry != UINT32_MAX
)
)
{
while (++iEntry < cAllocated)
{
if (pEntry)
{
for (;;)
{
break;
if ( !pNextThread
break;
if ( !RTTHREAD_IS_SLEEPING(enmNextState)
&& pNextThread != pThreadSelf)
break;
break;
}
if (pNextRec)
break;
}
else
}
if (pNextRec)
break;
}
/* Advance to the next sibling, if any. */
&& pRec != pFirstSibling)
{
iEntry = UINT32_MAX;
continue;
}
break;
break;
default:
break;
}
if (pNextRec)
{
/*
* Recurse and check for deadlock.
*/
pStack->c++;
&& ( i != 0
)
)
iEntry = UINT32_MAX;
}
else
{
/*
* No deadlock here, unwind the stack and deal with any unfinished
* business there.
*/
for (;;)
{
/* pop */
if (i == 0)
return VINF_SUCCESS;
i--;
/* Examine it. */
if (u32Magic == RTLOCKVALRECEXCL_MAGIC)
else if (u32Magic == RTLOCKVALRECSHRD_MAGIC)
{
break; /* continue processing this record. */
}
else
{
|| u32Magic == RTLOCKVALRECSHRD_MAGIC_DEAD);
continue;
}
/* Any next record to advance to? */
if ( !pRec
continue;
iEntry = UINT32_MAX;
break;
}
/* Restore the rest of the state and update the stack. */
pStack->c = i;
}
}
}
/**
* Check for the simple no-deadlock case.
*
* @returns true if no deadlock, false if further investigation is required.
*
* @param pOriginalRec The original record.
*/
{
{
if ( !pThread
return true;
if (!RTTHREAD_IS_SLEEPING(enmState))
return true;
}
return false;
}
/**
* Worker for rtLockValidatorDeadlockDetection that bitches about a deadlock.
*
* @param pStack The chain of locks causing the deadlock.
* @param pRec The record relating to the current thread's lock
* operation.
* @param pThreadSelf This thread.
* @param pSrcPos Where we are going to deadlock.
* @param rc The return code.
*/
{
{
const char *pszWhat;
switch (rc)
{
}
rtLockValidatorComplainFirst(pszWhat, pSrcPos, pThreadSelf, pStack->a[0].pRec != pRec ? pRec : NULL);
{
char szPrefix[24];
else
}
rtLockValidatorComplainMore("---- end of deadlock chain ----\n");
}
}
/**
* Perform deadlock detection.
*
* @retval VINF_SUCCESS
* @retval VERR_SEM_LV_DEADLOCK
* @retval VERR_SEM_LV_EXISTING_DEADLOCK
* @retval VERR_SEM_LV_ILLEGAL_UPGRADE
*
* @param pRec The record relating to the current thread's lock
* operation.
* @param pThreadSelf The current thread.
* @param pSrcPos The position of the current lock operation.
*/
static int rtLockValidatorDeadlockDetection(PRTLOCKVALRECUNION pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
{
#ifdef DEBUG_bird
if (RT_SUCCESS(rc))
return VINF_SUCCESS;
if (rc == VERR_TRY_AGAIN)
{
{
if (RT_SUCCESS_NP(rc))
return VINF_SUCCESS;
if (rc != VERR_TRY_AGAIN)
break;
if (iLoop >= 3)
return VINF_SUCCESS;
}
}
return rc;
#else
return VINF_SUCCESS;
#endif
}
/**
* Unlinks all siblings.
*
* This is used during record deletion and assumes no races.
*
* @param pCore One of the siblings.
*/
{
/* ASSUMES sibling destruction doesn't involve any races and that all
related records are to be disposed off now. */
while (pSibling)
{
PRTLOCKVALRECUNION volatile *ppCoreNext;
{
case RTLOCKVALRECEXCL_MAGIC:
break;
case RTLOCKVALRECSHRD_MAGIC:
break;
default:
AssertFailed();
ppCoreNext = NULL;
break;
}
if (RT_UNLIKELY(ppCoreNext))
break;
}
}
{
/*
* Validate input.
*/
/*
* Link them (circular list).
*/
{
}
{
}
else
return VINF_SUCCESS;
}
{
pRec->afReserved[0] = 0;
pRec->cRecursion = 0;
/* Lazy initialization. */
}
{
if (!pRec)
return VERR_NO_MEMORY;
return VINF_SUCCESS;
}
{
}
{
if (pRec)
{
}
}
{
return;
if (hThreadSelf == NIL_RTTHREAD)
{
}
{
pRec->cRecursion++;
}
else
{
/*
* Update the record.
*/
/*
* Push the lock onto the lock stack.
*/
/** @todo push it onto the per-thread lock stack. */
}
}
{
return VINF_SUCCESS;
return VINF_SUCCESS;
}
{
return;
{
/*
* Pop (remove) the lock.
*/
/*
* Update the record.
*/
}
}
{
return VINF_SUCCESS;
pRec->cRecursion++;
return VINF_SUCCESS;
}
{
return VINF_SUCCESS;
pRec->cRecursion--;
return VINF_SUCCESS;
}
RTDECL(int) RTLockValidatorRecExclRecursionMixed(PRTLOCKVALRECEXCL pRec, PRTLOCKVALRECCORE pRecMixed, PCRTLOCKVALSRCPOS pSrcPos)
{
return VINF_SUCCESS;
pRec->cRecursion++;
return VINF_SUCCESS;
}
{
return VINF_SUCCESS;
pRec->cRecursion--;
return VINF_SUCCESS;
}
RTDECL(int) RTLockValidatorRecExclCheckOrder(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
{
return VINF_SUCCESS;
/*
* Check it locks we're currently holding.
*/
/** @todo later */
/*
* If missing order rules, add them.
*/
return VINF_SUCCESS;
}
{
/*
* Fend off wild life.
*/
return VINF_SUCCESS;
{
}
/*
* Record the location.
*/
/*
* Don't do deadlock detection if we're recursing.
*
* On some hosts we don't do recursion accounting our selves and there
* isn't any other place to check for this.
*/
int rc = VINF_SUCCESS;
{
if (!fRecursiveOk)
{
}
}
/*
* Perform deadlock detection.
*/
else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
if (RT_SUCCESS(rc))
else
{
}
return rc;
}
RTDECL(int) RTLockValidatorRecExclCheckOrderAndBlocking(PRTLOCKVALRECEXCL pRec, RTTHREAD hThreadSelf,
{
if (RT_SUCCESS(rc))
rc = RTLockValidatorRecExclCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, enmSleepState, fReallySleeping);
return rc;
}
RTDECL(void) RTLockValidatorRecSharedInit(PRTLOCKVALRECSHRD pRec, RTLOCKVALCLASS hClass, uint32_t uSubClass,
{
/* the table */
pRec->iLastEntry = 0;
pRec->cAllocated = 0;
pRec->fReallocating = false;
#if HC_ARCH_BITS == 32
#endif
}
{
/*
* Flip it into table realloc mode and take the destruction lock.
*/
{
}
{
}
}
/**
* Locates an owner (thread) in a shared lock record.
*
* @returns Pointer to the owner entry on success, NULL on failure..
* @param pShared The shared lock record.
* @param hThread The thread (owner) to find.
* @param piEntry Where to optionally return the table in index.
* Optional.
*/
{
if (papOwners)
{
{
{
if (piEntry)
return pEntry;
}
}
}
return NULL;
}
RTDECL(int) RTLockValidatorRecSharedCheckOrder(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
{
return VINF_SUCCESS;
/*
* Check it locks we're currently holding.
*/
/** @todo later */
/*
* If missing order rules, add them.
*/
return VINF_SUCCESS;
}
{
/*
* Fend off wild life.
*/
return VINF_SUCCESS;
{
}
/*
* Record the location.
*/
/*
* Don't do deadlock detection if we're recursing.
*/
int rc = VINF_SUCCESS;
: NULL;
if (pEntry)
{
if (!fRecursiveOk)
{
}
}
/*
* Perform deadlock detection.
*/
else if (!rtLockValidatorIsSimpleNoDeadlockCase(pRecU))
if (RT_SUCCESS(rc))
else
{
}
return rc;
}
RTDECL(int) RTLockValidatorRecSharedCheckOrderAndBlocking(PRTLOCKVALRECSHRD pRec, RTTHREAD hThreadSelf,
{
if (RT_SUCCESS(rc))
rc = RTLockValidatorRecSharedCheckBlocking(pRec, hThreadSelf, pSrcPos, fRecursiveOk, enmSleepState, fReallySleeping);
return rc;
}
/**
* Allocates and initializes an owner entry for the shared lock record.
*
* @returns The new owner entry.
* @param pRec The shared lock record.
* @param pThreadSelf The calling thread and owner. Used for record
* initialization and allocation.
* @param pSrcPos The source position.
*/
rtLockValidatorRecSharedAllocOwner(PRTLOCKVALRECSHRD pRec, PRTTHREADINT pThreadSelf, PCRTLOCKVALSRCPOS pSrcPos)
{
/*
* Check if the thread has any statically allocated records we can easily
* make use of.
*/
unsigned iEntry = ASMBitFirstSetU32(ASMAtomicUoReadU32(&pThreadSelf->LockValidator.bmFreeShrdOwners));
if ( iEntry > 0
{
pEntry->fStaticAlloc = true;
}
else
{
if (RT_UNLIKELY(!pEntry))
return NULL;
pEntry->fStaticAlloc = false;
}
#if HC_ARCH_BITS == 32
#endif
if (pSrcPos)
else
return pEntry;
}
/**
* Frees an owner entry allocated by rtLockValidatorRecSharedAllocOwner.
*
* @param pEntry The owner entry.
*/
{
if (pEntry)
{
if (pEntry->fStaticAlloc)
{
}
else
{
}
}
}
/**
* Make more room in the table.
*
* @retval true on success
* @retval false if we're out of memory or running into a bad race condition
* (probably a bug somewhere). No longer holding the lock.
*
* @param pShared The shared lock record.
*/
{
for (unsigned i = 0; i < 1000; i++)
{
/*
* Switch to the other data access direction.
*/
if (i >= 10)
{
RTThreadSleep(i >= 100);
}
/*
* Try grab the privilege to reallocating the table.
*/
{
{
/*
* Ok, still not enough space. Reallocate the table.
*/
#if 0 /** @todo enable this after making sure growing works flawlessly. */
#else
#endif
(cAllocated + cInc) * sizeof(void *));
if (!papOwners)
{
/* RTMemRealloc will assert */
return false;
}
while (cInc-- > 0)
{
cAllocated++;
}
}
}
break;
return true;
}
AssertFailed(); /* too many iterations or destroyed while racing. */
return false;
}
/**
* Adds an owner entry to a shared lock record.
*
* @returns true on success, false on serious race or we're if out of memory.
* @param pShared The shared lock record.
* @param pEntry The owner entry.
*/
DECLINLINE(bool) rtLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry)
{
{
return false; /* the worker leave the lock */
for (unsigned i = 0; i < 100; i++)
{
{
{
return true;
}
}
Assert(i != 25);
}
AssertFailed();
}
return false;
}
/**
* Remove an owner entry from a shared lock record and free it.
*
* @param pShared The shared lock record.
* @param pEntry The owner entry to remove.
* @param iEntry The last known index.
*/
DECLINLINE(void) rtLockValidatorRecSharedRemoveAndFreeOwner(PRTLOCKVALRECSHRD pShared, PRTLOCKVALRECSHRDOWN pEntry,
{
/*
* Remove it from the table.
*/
AssertReturnVoidStmt(pShared->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
{
/* this shouldn't happen yet... */
AssertFailed();
break;
}
/*
* Successfully removed, now free it.
*/
}
RTDECL(void) RTLockValidatorRecSharedResetOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
{
return;
/*
* Free all current owners.
*/
{
AssertReturnVoidStmt(pRec->Core.u32Magic == RTLOCKVALRECSHRD_MAGIC, rtLockValidatorSerializeDetectionLeave());
{
PRTLOCKVALRECSHRDOWN pEntry = (PRTLOCKVALRECSHRDOWN)ASMAtomicXchgPtr((void * volatile *)&papEntries[iEntry], NULL);
if (pEntry)
{
break;
}
iEntry++;
}
}
if (hThread != NIL_RTTHREAD)
{
/*
* Allocate a new owner entry and insert it into the table.
*/
if ( pEntry
}
}
RTDECL(void) RTLockValidatorRecSharedAddOwner(PRTLOCKVALRECSHRD pRec, RTTHREAD hThread, PCRTLOCKVALSRCPOS pSrcPos)
{
return;
if (hThread == NIL_RTTHREAD)
{
}
/*
* Recursive?
*
* Note! This code can be optimized to try avoid scanning the table on
* insert. However, that's annoying work that makes the code big,
* so it can wait til later sometime.
*/
if (pEntry)
{
pEntry->cRecursion++;
return;
}
/*
* Allocate a new owner entry and insert it into the table.
*/
if ( pEntry
}
{
return;
/*
* Find the entry hope it's a recursive one.
*/
{
pEntry->cRecursion--;
}
else
}
{
return VINF_SUCCESS;
if (hThreadSelf == NIL_RTTHREAD)
{
}
/*
* Locate the entry for this thread in the table.
*/
if (RT_UNLIKELY(!pEntry))
{
return VERR_SEM_LV_NOT_OWNER;
}
/*
* Check the release order.
*/
{
/** @todo order validation */
}
/*
* Release the ownership or unwind a level of recursion.
*/
pEntry->cRecursion--;
else
return VINF_SUCCESS;
}
{
return VINF_SUCCESS;
if (hThreadSelf == NIL_RTTHREAD)
{
}
/*
* Locate the entry for this thread in the table.
*/
if (RT_UNLIKELY(!pEntry))
{
return VERR_SEM_LV_NOT_SIGNALLER;
}
return VINF_SUCCESS;
}
{
if (Thread == NIL_RTTHREAD)
return 0;
if (!pThread)
return VERR_INVALID_HANDLE;
return cWriteLocks;
}
{
}
{
}
{
if (Thread == NIL_RTTHREAD)
return 0;
if (!pThread)
return VERR_INVALID_HANDLE;
return cReadLocks;
}
{
}
{
}
{
if (pThread)
{
if (RTTHREAD_IS_SLEEPING(enmState))
{
if (RTTHREAD_IS_SLEEPING(enmState))
{
if (pRec)
{
{
case RTLOCKVALRECEXCL_MAGIC:
break;
break;
case RTLOCKVALRECSHRD_MAGIC:
break;
}
}
}
}
}
return pvLock;
}
{
bool fRet = false;
if (pThread)
{
}
return fRet;
}
{
}
RTDECL(bool) RTLockValidatorIsEnabled(void)
{
return ASMAtomicUoReadBool(&g_fLockValidatorEnabled);
}
{
}
RTDECL(bool) RTLockValidatorAreQuiet(void)
{
return ASMAtomicUoReadBool(&g_fLockValidatorQuiet);
}
{
}
RTDECL(bool) RTLockValidatorMayPanic(void)
{
return ASMAtomicUoReadBool(&g_fLockValidatorMayPanic);
}