MMAllHyper.cpp revision 52194993b9d83198a057fea598ec8506e1a3bbaa
/* $Id$ */
/** @file
* MM - Memory Manager - Hypervisor Memory Area, All Contexts.
*/
/*
* Copyright (C) 2006-2007 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_MM_HYPER_HEAP
#include "MMInternal.h"
/*******************************************************************************
* Defined Constants And Macros *
*******************************************************************************/
#define ASSERT_ALIGN(u1) AssertMsg(!((u1) & (MMHYPER_HEAP_ALIGN_MIN - 1)), ("u1=%#x (%d)\n", u1, u1))
Assert(MMHYPERCHUNK_GET_OFFPREV(pChunk) >= (intptr_t)(pHeap)->CTX_SUFF(pbHeap) - (intptr_t)(pChunk)); \
} while (0)
ASSERT_L((pChunk)->offNext, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
} while (0)
} while (0)
#ifdef VBOX_WITH_STATISTICS
do { if (MMHYPERCHUNK_ISFREE(pChunk)) \
{ \
AssertMsg(!((pChunk)->offStat & (MMHYPER_HEAP_ALIGN_MIN - 1)), ("offStat=%RX32\n", (pChunk)->offStat)); \
} \
} while (0)
#else
} while (0)
#endif
} while (0)
} while (0)
ASSERT_GE(((pChunk)->offPrev & (MMHYPER_HEAP_ALIGN_MIN - 1)), (intptr_t)(pHeap)->CTX_SUFF(pbHeap) - (intptr_t)(pChunk)); \
("pChunk=%p offChunk=%#x offFreeHead=%#x\n", (pChunk), (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap),\
(pHeap)->offFreeHead)); \
} while (0)
ASSERT_L((pChunk)->offNext, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
("pChunk=%p offChunk=%#x offFreeTail=%#x\n", (pChunk), (uintptr_t)(pChunk) - (uintptr_t)(pHeap)->CTX_SUFF(pbHeap), \
(pHeap)->offFreeTail)); \
} while (0)
else \
ASSERT_LE((pChunk)->cb, (uintptr_t)(pHeap)->CTX_SUFF(pbHeap) + (pHeap)->offPageAligned - (uintptr_t)(pChunk)); \
} while (0)
} while (0)
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
#ifdef VBOX_WITH_STATISTICS
#ifdef IN_RING3
#endif
#endif
#ifdef MMHYPER_HEAP_STRICT
#endif
/**
* Locks the hypervisor heap.
* This might call back to Ring-3 in order to deal with lock contention in GC and R3.
*
* @param pVM The VM handle.
*/
{
#ifdef IN_RING3
return VINF_SUCCESS; /* early init */
#else
if (rc == VERR_GENERAL_FAILURE)
{
# ifdef IN_RC
# else
# endif
}
#endif
return rc;
}
/**
* Unlocks the hypervisor heap.
*
* @param pVM The VM handle.
*/
{
#ifdef IN_RING3
return; /* early init */
#endif
}
/**
* Allocates memory in the Hypervisor (RC VMM) area.
* The returned memory is of course zeroed.
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param cb Number of bytes to allocate.
* @param uAlignment Required memory alignment in bytes.
* Values are 0,8,16,32 and PAGE_SIZE.
* 0 -> default alignment, i.e. 8 bytes.
* @param enmTag The statistics tag.
* @param ppv Where to store the address to the allocated
* memory.
*/
{
/*
* Validate input and adjust it to reasonable values.
*/
switch (uAlignment)
{
case 8:
case 16:
case 32:
{
AssertMsgFailed(("Nice try.\n"));
return VERR_INVALID_PARAMETER;
}
break;
case PAGE_SIZE:
if (!cbAligned)
{
AssertMsgFailed(("Nice try.\n"));
return VERR_INVALID_PARAMETER;
}
break;
default:
return VERR_INVALID_PARAMETER;
}
/*
* Get heap and statisticsStatistics.
*/
#ifdef VBOX_WITH_STATISTICS
if (!pStat)
{
AssertMsgFailed(("Failed to allocate statistics!\n"));
return VERR_MM_HYPER_NO_MEMORY;
}
#endif
if (uAlignment < PAGE_SIZE)
{
/*
* Allocate a chunk.
*/
if (pChunk)
{
#ifdef VBOX_WITH_STATISTICS
pStat->cAllocations++;
#else
#endif
Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, pv));
return VINF_SUCCESS;
}
}
else
{
/*
* Allocate page aligned memory.
*/
if (pv)
{
#ifdef VBOX_WITH_STATISTICS
pStat->cAllocations++;
#endif
/* ASMMemZero32(pv, cbAligned); - not required since memory is alloc-only and SUPPageAlloc zeros it. */
Log2(("MMHyperAlloc: cb=%#x uAlignment=%#x returns VINF_SUCCESS and *ppv=%p\n", cb, uAlignment, ppv));
return VINF_SUCCESS;
}
}
#ifdef VBOX_WITH_STATISTICS
pStat->cAllocations++;
#endif
return VERR_MM_HYPER_NO_MEMORY;
}
/**
* Wrapper for mmHyperAllocInternal
*/
{
int rc;
return rc;
}
/**
* Allocates a chunk of memory from the specified heap.
* The caller validates the parameters of this request.
*
* @returns Pointer to the allocated chunk.
* @returns NULL on failure.
* @param pHeap The heap.
* @param cb Size of the memory block to allocate.
* @param uAlignment The alignment specifications for the allocated block.
* @internal
*/
{
#ifdef MMHYPER_HEAP_STRICT
#endif
#ifdef MMHYPER_HEAP_STRICT_FENCE
#endif
/*
*/
return NULL;
/*
* Small alignments - from the front of the heap.
*
* Must split off free chunks at the end to prevent messing up the
* last free node which we take the page aligned memory from the top of.
*/
PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeHead);
while (pFree)
{
{
if (offAlign)
{
/*
* Adjust the node in front.
* Because of multiple alignments we need to special case allocation of the first block.
*/
if (offAlign)
{
{
/* just add a bit of memory to it. */
PMMHYPERCHUNKFREE pPrev = (PMMHYPERCHUNKFREE)((char *)pFree + MMHYPERCHUNK_GET_OFFPREV(&Free.core));
}
else
{
/* make new head node, mark it USED for simplisity. */
}
Log3(("mmHyperAllocChunk: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - offAlign, -(int)offAlign));
/* Recreate pFree node and adjusting everything... */
{
}
{
}
else
{
}
else
}
/*
* Split off a new FREE chunk?
*/
{
/*
* Move the FREE chunk up to make room for the new USED chunk.
*/
{
}
{
}
else
{
}
else
/*
* Update the old FREE node making it a USED node.
*/
}
else
{
/*
* Link out of free list.
*/
{
{
}
else
{
}
}
else
{
{
}
else
{
}
}
}
break;
}
}
/* next */
}
#ifdef MMHYPER_HEAP_STRICT_FENCE
#endif
#ifdef MMHYPER_HEAP_STRICT
#endif
return pRet;
}
/**
* Allocates one or more pages of memory from the specified heap.
* The caller validates the parameters of this request.
*
* @returns Pointer to the allocated chunk.
* @returns NULL on failure.
* @param pHeap The heap.
* @param cb Size of the memory block to allocate.
* @internal
*/
{
#ifdef MMHYPER_HEAP_STRICT
#endif
/*
*/
return NULL;
/*
* Page aligned chunks.
*
* Page aligned chunks can only be allocated from the last FREE chunk.
* This is for reasons of simplicity and fragmentation. Page aligned memory
* must also be allocated in page aligned sizes. Page aligned memory cannot
* be freed either.
*
* So, for this to work, the last FREE chunk needs to end on a page aligned
* boundrary.
*/
PMMHYPERCHUNKFREE pFree = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeTail);
{
return NULL;
}
void *pvRet;
{
/*
* Simple, just cut the top of the free node and return it.
*/
AssertMsg(RT_ALIGN_P(pvRet, PAGE_SIZE) == pvRet, ("pvRet=%p cb=%#x pFree=%p pFree->cb=%#x\n", pvRet, cb, pFree, pFree->cb));
}
else
{
/*
* Unlink the FREE node.
*/
Log3(("mmHyperAllocPages: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pFree->cb, -(int32_t)pFree->cb));
/* a scrap of spare memory (unlikely)? add it to the sprevious chunk. */
{
AssertMsg(MMHYPERCHUNK_GET_OFFPREV(&pFree->core), ("How the *beep* did someone manage to allocated up all the heap with page aligned memory?!?\n"));
#ifdef VBOX_WITH_STATISTICS
#endif
Log3(("mmHyperAllocPages: Added %d to %p (page align)\n", (uintptr_t)pvRet - (uintptr_t)pFree, pFree));
}
/* unlink from FREE chain. */
{
}
else
{
}
}
#ifdef MMHYPER_HEAP_STRICT
#endif
return pvRet;
}
#ifdef VBOX_WITH_STATISTICS
/**
* Get the statistic record for a tag.
*
* @returns Pointer to a stat record.
* @returns NULL on failure.
* @param pHeap The heap.
* @param enmTag The tag.
*/
{
/* try look it up first. */
if (!pStat)
{
/* try allocate a new one */
PMMHYPERCHUNK pChunk = mmHyperAllocChunk(pHeap, RT_ALIGN(sizeof(*pStat), MMHYPER_HEAP_ALIGN_MIN), MMHYPER_HEAP_ALIGN_MIN);
if (!pChunk)
return NULL;
}
if (!pStat->fRegistered)
{
# ifdef IN_RING3
# else
/** @todo schedule a R3 action. */
# endif
}
return pStat;
}
# ifdef IN_RING3
/**
* Registers statistics with STAM.
*
*/
{
if (pStat->fRegistered)
return;
STAMR3RegisterF(pVM, &pStat->cbCurAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Number of bytes currently allocated.", "/MM/HyperHeap/%s", pszTag);
STAMR3RegisterF(pVM, &pStat->cAllocations, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of alloc calls.", "/MM/HyperHeap/%s/cAllocations", pszTag);
STAMR3RegisterF(pVM, &pStat->cFrees, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of free calls.", "/MM/HyperHeap/%s/cFrees", pszTag);
STAMR3RegisterF(pVM, &pStat->cFailures, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Number of failures.", "/MM/HyperHeap/%s/cFailures", pszTag);
STAMR3RegisterF(pVM, &pStat->cbAllocated, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of allocated bytes.", "/MM/HyperHeap/%s/cbAllocated", pszTag);
STAMR3RegisterF(pVM, &pStat->cbFreed, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Total number of freed bytes.", "/MM/HyperHeap/%s/cbFreed", pszTag);
STAMR3RegisterF(pVM, &pStat->cbMaxAllocated, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Max number of bytes allocated at the same time.","/MM/HyperHeap/%s/cbMaxAllocated", pszTag);
pStat->fRegistered = true;
}
# endif /* IN_RING3 */
#endif /* VBOX_WITH_STATISTICS */
/**
* Free memory allocated using MMHyperAlloc().
* The caller validates the parameters of this request.
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pv The memory to free.
* @remark Try avoid free hyper memory.
*/
{
if (!pv)
return VINF_SUCCESS;
("Invalid pointer %p!\n", pv),
/*
* Get the heap and stats.
* Validate the chunk at the same time.
*/
("%p: Not used!\n", pv),
/* statistics */
#ifdef VBOX_WITH_STATISTICS
#else
#endif
/* The heap structure. */
/* Some more verifications using additional info from pHeap. */
#ifdef MMHYPER_HEAP_STRICT
#endif
#if defined(VBOX_WITH_STATISTICS) || defined(MMHYPER_HEAP_FREE_POISON)
/* calc block size. */
#endif
#ifdef MMHYPER_HEAP_FREE_POISON
/* poison the block */
#endif
#ifdef MMHYPER_HEAP_FREE_DELAY
# ifdef MMHYPER_HEAP_FREE_POISON
/*
* Check poison.
*/
while (i-- > 0)
{
("caller=%RTptr cb=%#zx off=%#x: %.*Rhxs\n",
}
# endif /* MMHYPER_HEAP_FREE_POISON */
/*
* Delayed freeing.
*/
int rc = VINF_SUCCESS;
{
PMMHYPERCHUNK pChunkFree = (PMMHYPERCHUNK)((uintptr_t)pHeap + pHeap->aDelayedFrees[pHeap->iDelayedFree].offChunk);
}
#else /* !MMHYPER_HEAP_FREE_POISON */
/*
* Call the worker.
*/
#endif /* !MMHYPER_HEAP_FREE_POISON */
/*
* Update statistics.
*/
#ifdef VBOX_WITH_STATISTICS
if (RT_SUCCESS(rc))
{
}
else
#endif
return rc;
}
/**
* Wrapper for mmHyperFreeInternal
*/
{
int rc;
return rc;
}
/**
* Free memory a memory chunk.
*
* @returns VBox status code.
* @param pHeap The heap.
* @param pChunk The memory chunk to free.
*/
{
/*
* Insert into the free list (which is sorted on address).
*
* We'll search towards the end of the heap to locate the
* closest FREE chunk.
*/
{
{
{
{
break;
}
}
}
if (!pRight)
pRight = (PMMHYPERCHUNKFREE)((char *)pHeap->CTX_SUFF(pbHeap) + pHeap->offFreeTail); /** @todo this can't be correct! 'pLeft = .. ; else' I think */
if (pRight)
{
{
}
}
}
{
return VERR_INVALID_POINTER;
}
/*
* Head free chunk list?
*/
if (!pLeft)
{
if (pRight)
{
}
else
{
}
}
else
{
/*
* Can we merge with left hand free chunk?
*/
{
{
MMHYPERCHUNK_SET_OFFPREV(((PMMHYPERCHUNK)((char *)pLeft + pLeft->core.offNext)), -(int32_t)pLeft->core.offNext);
}
else
Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pLeft->cb, -(int32_t)pLeft->cb));
}
/*
* No, just link it into the free list then.
*/
else
{
if (pRight)
{
}
else
{
}
}
}
/*
* Can we merge with right hand free chunk?
*/
{
/* core */
{
}
else
/* free */
{
}
else
{
}
Log3(("mmHyperFree: cbFree %d -> %d (%d)\n", pHeap->cbFree, pHeap->cbFree - pRight->cb, -(int32_t)pRight->cb));
}
/* calculate the size. */
else
pFree->cb = pHeap->offPageAligned - ((uintptr_t)pFree - (uintptr_t)pHeap->CTX_SUFF(pbHeap)) - sizeof(MMHYPERCHUNK);
#ifdef MMHYPER_HEAP_STRICT
#endif
return VINF_SUCCESS;
}
#if defined(DEBUG) || defined(MMHYPER_HEAP_STRICT)
/**
* Dumps a heap chunk to the log.
*
* @param pHeap Pointer to the heap.
* @param pCur Pointer to the chunk.
*/
{
{
{
#ifdef IN_RING3
Log(("%p %06x USED offNext=%06x offPrev=-%06x %s%s\n",
#else
Log(("%p %06x USED offNext=%06x offPrev=-%06x %d%s\n",
#endif
}
else
Log(("%p %06x USED offNext=%06x offPrev=-%06x\n",
}
else
Log(("%p %06x FREE offNext=%06x offPrev=-%06x : cb=%06x offNext=%06x offPrev=-%06x\n",
pCur->core.offNext, -MMHYPERCHUNK_GET_OFFPREV(&pCur->core), pCur->cb, pCur->offNext, pCur->offPrev));
}
#endif /* DEBUG || MMHYPER_HEAP_STRICT */
#ifdef MMHYPER_HEAP_STRICT
/**
* Internal consitency check.
*/
{
for (;;)
{
else
if (pPrev)
("pPrev->core.offNext=%d offPrev=%d\n", pPrev->core.offNext, MMHYPERCHUNK_GET_OFFPREV(&pCur->core)));
# ifdef MMHYPER_HEAP_STRICT_FENCE
{
{
}
uint32_t *pu32Bad = ASMMemIsAllU32((uint8_t *)pu32End - cbFence, cbFence - sizeof(uint32_t), MMHYPER_HEAP_STRICT_FENCE_U32);
if (RT_UNLIKELY(pu32Bad))
{
}
}
# endif
/* next */
break;
}
}
#endif
/**
* Performs consistency checks on the heap if MMHYPER_HEAP_STRICT was
* defined at build time.
*
* @param pVM Pointer to the shared VM structure.
*/
{
#ifdef MMHYPER_HEAP_STRICT
#endif
}
#ifdef DEBUG
/**
* Dumps the hypervisor heap to Log.
* @param pVM VM Handle.
*/
{
Log(("MMHyperHeapDump: *** heap dump - start ***\n"));
for (;;)
{
/* next */
break;
}
Log(("MMHyperHeapDump: *** heap dump - end ***\n"));
}
#endif
/**
* Query the amount of free memory in the hypervisor heap.
*
* @returns Number of free bytes in the hypervisor heap.
*/
{
}
/**
* Query the size the hypervisor heap.
*
* @returns The size of the hypervisor heap in bytes.
*/
{
}
/**
* Query the address and size the hypervisor memory area.
*
* @returns Base address of the hypervisor area.
* @param pVM VM Handle.
* @param pcb Where to store the size of the hypervisor area. (out)
*/
{
if (pcb)
}
/**
* Checks if an address is within the hypervisor memory area.
*
* @returns true if inside.
* @returns false if outside.
* @param pVM VM handle.
* @param GCPtr The pointer to check.
*/
{
}