PDMAsyncCompletionFile.cpp revision d116d2fdc8bc55e97a36032d13c0532de69d6aca
/* $Id$ */
/** @file
* PDM Async I/O - Transport data asynchronous in R3 using EMT.
*/
/*
* Copyright (C) 2006-2008 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#include "PDMInternal.h"
#include <iprt/critsect.h>
#include <iprt/semaphore.h>
#include "PDMAsyncCompletionFileInternal.h"
/**
* Frees a task.
*
* @returns nothing.
* @param pEndpoint Pointer to the endpoint the segment was for.
* @param pTask The task to free.
*/
{
/* Try the per endpoint cache first. */
{
/* Add it to the list. */
}
else if (false)
{
/* Bigger class cache */
}
else
{
}
}
/**
* Allocates a task segment
*
* @returns Pointer to the new task segment or NULL
* @param pEndpoint Pointer to the endpoint
*/
{
/* Try the small per endpoint cache first. */
{
/* Try the bigger endpoint class cache. */
PPDMASYNCCOMPLETIONEPCLASSFILE pEndpointClass = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass;
#if 0
/* We start with the assigned slot id to distribute the load when allocating new tasks. */
do
{
pTask = (PPDMASYNCCOMPLETIONTASK)ASMAtomicXchgPtr((void * volatile *)&pEndpointClass->apTaskCache[iSlot], NULL);
if (pTask)
break;
#endif
if (!pTask)
{
/*
* Allocate completely new.
* If this fails we return NULL.
*/
sizeof(PDMACTASKFILE),
(void **)&pTask);
if (RT_FAILURE(rc))
}
#if 0
else
{
/* Remove the first element and put the rest into the slot again. */
/* Put back into the list adding any new tasks. */
while (true)
{
bool fChanged = ASMAtomicCmpXchgPtr((void * volatile *)&pEndpointClass->apTaskCache[iSlot], pTaskHeadNew, NULL);
if (fChanged)
break;
PPDMASYNCCOMPLETIONTASK pTaskHead = (PPDMASYNCCOMPLETIONTASK)ASMAtomicXchgPtr((void * volatile *)&pEndpointClass->apTaskCache[iSlot], NULL);
/* The new task could be taken inbetween */
if (pTaskHead)
{
/* Go to the end of the probably much shorter new list. */
/* Concatenate */
}
/* Another round trying to change the list. */
}
/* We got a task from the global cache so decrement the counter */
}
#endif
}
else
{
/* Grab a free task from the head. */
AssertMsg(pEndpoint->cTasksCached > 0, ("No tasks cached but list contains more than one element\n"));
}
return pTask;
}
{
/*
* Get pending tasks.
*/
/* Reverse the list to process in FIFO order. */
if (pTasks)
{
while (pTask)
{
}
}
return pTasks;
}
{
if (!fWokenUp)
{
int rc = VINF_SUCCESS;
if (fWaitingEventSem)
}
}
static int pdmacFileAioMgrWaitForBlockingEvent(PPDMACEPFILEMGR pAioMgr, PDMACEPFILEAIOMGRBLOCKINGEVENT enmEvent)
{
int rc = VINF_SUCCESS;
/* Wakeup the async I/O manager */
/* Wait for completion. */
ASMAtomicWriteU32((volatile uint32_t *)&pAioMgr->enmBlockingEvent, PDMACEPFILEAIOMGRBLOCKINGEVENT_INVALID);
return rc;
}
static int pdmacFileAioMgrAddEndpoint(PPDMACEPFILEMGR pAioMgr, PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
{
int rc;
if (RT_SUCCESS(rc))
return rc;
}
static int pdmacFileAioMgrRemoveEndpoint(PPDMACEPFILEMGR pAioMgr, PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
{
int rc;
ASMAtomicWritePtr((void * volatile *)&pAioMgr->BlockingEventData.RemoveEndpoint.pEndpoint, pEndpoint);
return rc;
}
static int pdmacFileAioMgrCloseEndpoint(PPDMACEPFILEMGR pAioMgr, PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint)
{
int rc;
ASMAtomicWritePtr((void * volatile *)&pAioMgr->BlockingEventData.CloseEndpoint.pEndpoint, pEndpoint);
return rc;
}
{
int rc;
return rc;
}
{
do
{
} while (!ASMAtomicCmpXchgPtr((void * volatile *)&pEndpoint->pTasksNewHead, (void *)pTask, (void *)pNext));
return VINF_SUCCESS;
}
{
{
}
else
{
}
}
{
int rc = VINF_SUCCESS;
|| (enmTransfer == PDMACTASKFILETRANSFER_WRITE));
for (unsigned i = 0; i < cSegments; i++)
{
/* Send it off to the I/O manager. */
}
return VINF_SUCCESS;
}
/**
* Creates a new async I/O manager.
*
* @returns VBox status code.
* @param pEpClass Pointer to the endpoint class data.
* @param ppAioMgr Where to store the pointer to the new async I/O manager on success.
*/
static int pdmacFileAioMgrCreate(PPDMASYNCCOMPLETIONEPCLASSFILE pEpClass, PPPDMACEPFILEMGR ppAioMgr)
{
int rc = VINF_SUCCESS;
LogFlowFunc((": Entered\n"));
rc = MMR3HeapAllocZEx(pEpClass->Core.pVM, MM_TAG_PDM_ASYNC_COMPLETION, sizeof(PDMACEPFILEMGR), (void **)&pAioMgrNew);
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
/* Init the rest of the manager. */
if (!pAioMgrNew->fFailsafe)
if (RT_SUCCESS(rc))
{
0,
0,
? "F"
: "N");
if (RT_SUCCESS(rc))
{
/* Link it into the list. */
if (pEpClass->pAioMgrHead)
*ppAioMgr = pAioMgrNew;
return VINF_SUCCESS;
}
}
}
}
}
}
return rc;
}
/**
* Destroys a async I/O manager.
*
* @returns nothing.
* @param pAioMgr The async I/O manager to destroy.
*/
static void pdmacFileAioMgrDestroy(PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile, PPDMACEPFILEMGR pAioMgr)
{
/* Unlink from the list. */
if (pPrev)
else
if (pNext)
pEpClassFile->cAioMgrs--;
/* Free the ressources. */
}
{
int rc = VINF_SUCCESS;
if (RT_FAILURE(rc))
{
LogRel(("AIO: Async I/O manager not supported (rc=%Rrc). Falling back to failsafe manager\n",
rc));
pEpClassFile->fFailsafe = true;
}
else
{
pEpClassFile->fFailsafe = false;
}
/* Init critical section. */
if (RT_SUCCESS(rc))
{
/* Init cache structure */
if (RT_FAILURE(rc))
}
return rc;
}
{
/* All endpoints should be closed at this point. */
/* Destroy all left async I/O managers. */
while (pEpClassFile->pAioMgrHead)
}
{
int rc = VINF_SUCCESS;
("PDMAsyncCompletion: Invalid flag specified\n"), VERR_INVALID_PARAMETER);
if (!pEpClassFile->fFailsafe)
{
/*
* We only disable the cache if the size of the file is a multiple of 512.
* Certain hosts like Windows, Linux and Solaris require that transfer sizes
* are aligned to the volume sector size.
* If not we just make sure that the data is written to disk with RTFILE_O_WRITE_THROUGH
* which will trash the host cache but ensures that the host cache will not
* contain dirty buffers.
*/
if (RT_SUCCESS(rc))
{
{
}
}
}
/* Open with final flags. */
if (RT_SUCCESS(rc))
{
/* Initialize the segment cache */
sizeof(PDMACTASKFILE),
(void **)&pEpFile->pTasksFreeHead);
if (RT_SUCCESS(rc))
{
pEpFile->cTasksCached = 0;
if (pEpClassFile->fFailsafe)
{
/* Safe mode. Every file has its own async I/O manager. */
}
else
{
if (fFlags & PDMACEP_FILE_FLAGS_CACHING)
{
if (RT_FAILURE(rc))
{
LogRel(("AIOMgr: Endpoint for \"%s\" was opened with caching but initializing cache failed. Disabled caching\n", pszUri));
}
}
/* Check for an idling one or create new if not found */
if (!pEpClassFile->pAioMgrHead)
{
}
else
{
}
}
/* Assign the endpoint to the thread. */
if (RT_FAILURE(rc))
}
if (RT_FAILURE(rc))
}
return rc;
}
{
/* Make sure that all tasks finished for this endpoint. */
/*
* If the async I/O manager is in failsafe mode this is the only endpoint
* he processes and thus can be destroyed now.
*/
/* Free cached tasks. */
while (pTask)
{
}
/* Free the cached data. */
return VINF_SUCCESS;
}
{
else
}
{
return VERR_NOT_SUPPORTED;
else
}
{
return VERR_NOT_SUPPORTED;
pTaskFile->cbTransferLeft = 0;
return VINF_SUCCESS;
}
{
}
{
/* u32Version */
/* pcszName */
"File",
/* enmClassType */
/* cbEndpointClassGlobal */
sizeof(PDMASYNCCOMPLETIONEPCLASSFILE),
/* cbEndpoint */
sizeof(PDMASYNCCOMPLETIONENDPOINTFILE),
/* cbTask */
sizeof(PDMASYNCCOMPLETIONTASKFILE),
/* pfnInitialize */
/* pfnTerminate */
/* pfnEpInitialize. */
/* pfnEpClose */
/* pfnEpRead */
/* pfnEpWrite */
/* pfnEpFlush */
/* pfnEpGetSize */
/* u32VersionEnd */
};