VBoxMPVbva.cpp revision 68fd5221558ee9b9d730c0b5452f723bdb5d399c
/* $Id$ */
/** @file
* VBox WDDM Miniport driver
*/
/*
* Copyright (C) 2011-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#include "VBoxMPWddm.h"
#include "common/VBoxMPCommon.h"
/*
* Public hardware buffer methods.
*/
{
return VINF_SUCCESS;
WARN(("VBoxVBVAEnable failed!"));
return VERR_GENERAL_FAILURE;
}
{
return VINF_SUCCESS;
}
int vboxVbvaCreate(PVBOXMP_DEVEXT pDevExt, VBOXVBVAINFO *pVbva, ULONG offBuffer, ULONG cbBuffer, D3DDDI_VIDEO_PRESENT_SOURCE_ID srcId)
{
cbBuffer);
if (RT_SUCCESS(rc))
{
}
else
{
}
return rc;
}
{
int rc = VINF_SUCCESS;
return rc;
}
{
// if (rect.left < 0) rect.left = 0;
// if (rect.right > (int)ppdev->cxScreen) rect.right = ppdev->cxScreen;
// if (rect.bottom > (int)ppdev->cyScreen) rect.bottom = ppdev->cyScreen;
if (VBoxVBVAWrite(&pSrc->Vbva.Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, &hdr, sizeof(hdr)))
return VINF_SUCCESS;
WARN(("VBoxVBVAWrite failed"));
return VERR_GENERAL_FAILURE;
}
/* command vbva ring buffer */
/* customized VBVA implementation */
/* Forward declarations of internal functions. */
DECLINLINE(void) vboxVBVAExFlush(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
{
}
{
return VINF_SUCCESS;
}
{
return (VBOXCMDVBVA_CTL*)VBoxSHGSMICommandAlloc(&pHGSMICtx->heapCtx, cbCtl, HGSMI_CH_VBVA, VBVA_CMDVBVA_CTL);
}
{
}
{
if (!pHdr)
{
WARN(("VBoxSHGSMICommandPrepSynch returnd NULL"));
return VERR_INVALID_PARAMETER;
}
if (offCmd == HGSMIOFFSET_VOID)
{
WARN(("VBoxSHGSMICommandOffset returnd NULL"));
return VERR_INVALID_PARAMETER;
}
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
if (!RT_SUCCESS(rc))
return rc;
}
else
}
else
return rc;
}
static int vboxCmdVbvaCtlSubmitAsync(PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, VBOXCMDVBVA_CTL * pCtl, PFNVBOXSHGSMICMDCOMPLETION_IRQ pfnCompletionIrq, void *pvCompletionIrq)
{
const VBOXSHGSMIHEADER* pHdr = VBoxSHGSMICommandPrepAsynchIrq(&pHGSMICtx->heapCtx, pCtl, pfnCompletionIrq, pvCompletionIrq, VBOXSHGSMI_FLAG_GH_ASYNCH_FORCE);
if (!pHdr)
{
WARN(("VBoxSHGSMICommandPrepAsynchIrq returnd NULL"));
return VERR_INVALID_PARAMETER;
}
if (offCmd == HGSMIOFFSET_VOID)
{
WARN(("VBoxSHGSMICommandOffset returnd NULL"));
return VERR_INVALID_PARAMETER;
}
if (RT_SUCCESS(rc))
{
return rc;
}
else
return rc;
}
static int vboxVBVAExCtlSubmitEnableDisable(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, bool fEnable)
{
VBOXCMDVBVA_CTL_ENABLE *pCtl = (VBOXCMDVBVA_CTL_ENABLE*)vboxCmdVbvaCtlCreate(pHGSMICtx, sizeof (*pCtl));
if (!pCtl)
{
WARN(("vboxCmdVbvaCtlCreate failed"));
return VERR_NO_MEMORY;
}
if (RT_SUCCESS(rc))
{
if (!RT_SUCCESS(rc))
}
else
return rc;
}
/*
* Public hardware buffer methods.
*/
{
bool bRc = false;
#if 0 /* All callers check this */
if (ppdev->bHGSMISupported)
#endif
{
pVBVA->indexRecordFirst = 0;
pVBVA->indexRecordFree = 0;
pCtx->fHwBufferOverflow = false;
}
if (!bRc)
{
}
return bRc;
}
{
LogFlowFunc(("\n"));
pCtx->fHwBufferOverflow = false;
return;
}
{
bool bRc = false;
// LogFunc(("flags = 0x%08X\n", pCtx->pVBVA? pCtx->pVBVA->u32HostEvents: -1));
{
{
/* All slots in the records queue are used. */
}
{
/* Even after flush there is no place. Fail the request. */
LogFunc(("no space in the queue of records!!! first %d, last %d\n",
}
else
{
/* Initialize the record. */
// LogFunc(("indexRecordNext = %d\n", indexRecordNext));
/* Remember which record we are using. */
bRc = true;
}
}
return bRc;
}
{
// LogFunc(("\n"));
/* Mark the record completed. */
pCtx->fHwBufferOverflow = false;
return;
}
{
&& (
)
);
}
#ifdef DEBUG
{
if (!vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, pVBVA->indexRecordFirst, pVBVA->indexRecordFree))
{
WARN(("invalid record set"));
}
{
WARN(("invalid data set"));
}
}
#endif
/*
* Private operations.
*/
{
}
{
}
{
if (i32Diff <= 0)
{
/* Chunk will not cross buffer boundary. */
}
else
{
/* Chunk crosses buffer boundary. */
}
return;
}
{
{
return false;
}
// LogFunc(("%d\n", cb));
while (cb > 0)
{
// LogFunc(("pVBVA->off32Free %d, pRecord->cbRecord 0x%08X, cbHwBufferAvail %d, cb %d, cbWritten %d\n",
// pVBVA->off32Free, pRecord->cbRecord, cbHwBufferAvail, cb, cbWritten));
if (cbChunk >= cbHwBufferAvail)
{
if (cbChunk >= cbHwBufferAvail)
{
WARN(("no place for %d bytes. Only %d bytes available after flush. Going to partial writes.\n",
cb, cbHwBufferAvail));
{
WARN(("Buffer overflow!!!\n"));
pCtx->fHwBufferOverflow = true;
Assert(false);
return false;
}
}
}
}
return true;
}
/*
* Public writer to the hardware buffer.
*/
{
return 0;
}
RTDECL(void*) VBoxVBVAExAllocContiguous(PVBVAEXBUFFERCONTEXT pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, uint32_t cb)
{
{
return NULL;
}
// LogFunc(("%d\n", cb));
{
WARN(("requested to allocate buffer of size %d bigger than the VBVA ring buffer size %d", cb, pVBVA->cbData));
return NULL;
}
if (cbHwBufferContiguousAvail < cb)
{
{
/* the entire contiguous part is smaller than the requested buffer */
return NULL;
}
if (cbHwBufferContiguousAvail < cb)
{
/* this is really bad - the host did not clean up buffer even after we requested it to flush */
WARN(("Host did not clean up the buffer!"));
return NULL;
}
}
}
{
return !!(u32HostEvents & VBVA_F_STATE_PROCESSING);
}
{
#ifdef DEBUG
#endif
}
{
}
{
if (!pVBVA)
{
return false;
}
{
return true;
}
return false;
}
void *pvFlush)
{
}
static void* vboxVBVAExIterCur(PVBVAEXBUFFERITERBASE pIter, struct VBVABUFFER *pVBVA, uint32_t *pcbBuffer, bool *pfProcessed)
{
if (cbRecord == VBVA_F_RECORD_PARTIAL)
return NULL;
if (pcbBuffer)
if (pfProcessed)
*pfProcessed = !vboxVBVAExIsEntryInRange(pVBVA->indexRecordFirst, pIter->iCurRecord, pVBVA->indexRecordFree);
}
{
}
{
if (vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, iCurRecord, pVBVA->indexRecordFree))
{
/* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[iCurRecord].cbRecord below,
* the pCtx->pVBVA->aRecords[iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
* and we are in a submitter context now */
pIter->Base.off32CurCmd = vboxVBVAExSubst(pVBVA->off32Free, pCtx->pVBVA->aRecords[iCurRecord].cbRecord, pVBVA->cbData);
}
else
{
/* no data */
}
}
RTDECL(void*) VBoxVBVAExBIterNext(PVBVAEXBUFFERBACKWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
{
if (!vboxVBVAExIsEntryInRange(indexRecordFirstUncompleted, pIter->Base.iCurRecord, pVBVA->indexRecordFree))
return NULL;
/* even if the command gets completed by the time we're doing the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord below,
* the pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord will still be valid, as it can only be modified by a submitter,
* and we are in a submitter context now */
pIter->Base.off32CurCmd = vboxVBVAExSubst(pIter->Base.off32CurCmd, pCtx->pVBVA->aRecords[pIter->Base.iCurRecord].cbRecord, pVBVA->cbData);
return pvBuffer;
}
{
}
RTDECL(void*) VBoxVBVAExCFIterNext(PVBVAEXBUFFERFORWARDITER pIter, uint32_t *pcbBuffer, bool *pfProcessed)
{
if (!vboxVBVAExIsEntryInRange(pCtx->indexRecordFirstUncompleted, pIter->Base.iCurRecord, indexRecordFree))
return NULL;
if (!pvData)
return NULL;
if (pcbBuffer)
return pvData;
}
/**/
{
return VINF_SUCCESS;
WARN(("VBoxVBVAExEnable failed!"));
return VERR_GENERAL_FAILURE;
}
{
return VINF_SUCCESS;
}
{
int rc = VINF_SUCCESS;
return rc;
}
static void vboxCmdVbvaDdiNotifyCompleteIrq(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, UINT u32FenceId, DXGK_INTERRUPT_TYPE enmComplType)
{
switch (enmComplType)
{
break;
break;
Assert(0);
break;
default:
break;
}
pDevExt->u.primary.DxgkInterface.DxgkCbNotifyInterrupt(pDevExt->u.primary.DxgkInterface.DeviceHandle, ¬ify);
}
typedef struct VBOXCMDVBVA_NOTIFYCOMPLETED_CB
{
volatile UINT *pu32FenceId;
{
if (*pData->pu32FenceId)
{
*pData->pu32FenceId = 0;
pData->pDevExt->u.primary.DxgkInterface.DxgkCbQueueDpc(pData->pDevExt->u.primary.DxgkInterface.DeviceHandle);
return TRUE;
}
return FALSE;
}
static int vboxCmdVbvaDdiNotifyComplete(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, volatile UINT *pu32FenceId, DXGK_INTERRUPT_TYPE enmComplType)
{
&Data,
0, /* IN ULONG MessageNumber */
&bDummy);
if (!NT_SUCCESS(Status))
{
return VERR_GENERAL_FAILURE;
}
return Status;
}
static int vboxCmdVbvaFlush(PVBOXMP_DEVEXT pDevExt, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow)
{
/* Issue the flush command. */
sizeof (VBVACMDVBVAFLUSH),
if (!pFlush)
{
WARN(("VBoxHGSMIBufferAlloc failed\n"));
return VERR_OUT_OF_RESOURCES;
}
return VINF_SUCCESS;
}
typedef struct VBOXCMDVBVA_CHECK_COMPLETED_CB
{
{
if (pCompleted->pVbva)
return bRc;
}
static uint32_t vboxCmdVbvaCheckCompleted(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, bool fPingHost, HGSMIGUESTCOMMANDCONTEXT *pCtx, bool fBufferOverflow)
{
if (fPingHost)
context.u32FenceID = 0;
&context,
0, /* IN ULONG MessageNumber */
&bRet);
return context.u32FenceID;
}
DECLCALLBACK(void) voxCmdVbvaFlushCb(struct VBVAEXBUFFERCONTEXT *pCtx, PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx, void *pvFlush)
{
}
{
cbBuffer);
if (RT_SUCCESS(rc))
{
}
else
{
}
return rc;
}
int VBoxCmdVbvaSubmit(PVBOXMP_DEVEXT pDevExt, VBOXCMDVBVA *pVbva, struct VBOXCMDVBVA_HDR *pCmd, uint32_t cbCmd)
{
int rc = VINF_SUCCESS;
{
WARN(("buffer does not fit the vbva buffer, we do not support splitting buffers"));
return VERR_NOT_SUPPORTED;
}
{
WARN(("VBoxVBVAExBufferBeginUpdate failed!"));
return VERR_GENERAL_FAILURE;
}
void* pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
if (!pvBuffer)
{
WARN(("failed to allocate contiguous buffer, trying nopping the tail"));
if (!cbTail)
{
WARN(("this is not a free tail case, cbTail is NULL"));
return VERR_BUFFER_OVERFLOW;
}
pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbTail);
{
WARN(("VBoxVBVAExBufferBeginUpdate 2 failed!"));
return VERR_GENERAL_FAILURE;
}
pvBuffer = VBoxVBVAExAllocContiguous(&pVbva->Vbva, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, cbCmd);
if (!pvBuffer)
{
WARN(("failed to allocate contiguous buffer, failing"));
return VERR_GENERAL_FAILURE;
}
}
{
/* Issue the submit command. */
sizeof (VBVACMDVBVASUBMIT),
if (!pSubmit)
{
WARN(("VBoxHGSMIBufferAlloc failed\n"));
return VERR_OUT_OF_RESOURCES;
}
pSubmit->u32Reserved = 0;
}
return VINF_SUCCESS;
}
{
bool fProcessed;
{
if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
continue;
continue;
{
break;
}
/* we have canceled the command successfully */
return true;
}
return false;
}
{
bool fHasCommandsCompletedPreempted = false;
bool fProcessed;
{
if (!fProcessed)
break;
if (*pu8Cmd == VBOXCMDVBVA_OPTYPE_NOP)
continue;
|| u8State == VBOXCMDVBVA_STATE_CANCELLED);
if (u8State == VBOXCMDVBVA_STATE_IN_PROGRESS)
{
if (u32FenceID)
}
else
{
/* to prevent concurrent notifications from DdiPreemptCommand */
pCmd->u32FenceID = 0;
}
if (u32FenceID)
fHasCommandsCompletedPreempted = true;
}
return fHasCommandsCompletedPreempted;
}
{
return vboxCmdVbvaCheckCompleted(pDevExt, pVbva, fPingHost, &VBoxCommonFromDeviceExt(pDevExt)->guestCtx, false /* fBufferOverflow */);
}
static uint32_t vboxCVDdiSysMemElBuild(VBOXCMDVBVA_SYSMEMEL *pEl, PMDL pMdl, uint32_t iPfn, uint32_t cPages)
{
--cPages;
for ( ; cPages && cStoredPages < VBOXCMDVBVA_SYSMEMEL_CPAGES_MAX; --cPages, ++cStoredPages, cur = next;)
{
break;
}
return cPages;
}
uint32_t VBoxCVDdiPTransferVRamSysBuildEls(VBOXCMDVBVA_PAGING_TRANSFER *pCmd, PMDL pMdl, uint32_t iPfn, uint32_t cPages, uint32_t cbBuffer, uint32_t *pcPagesWritten)
{
if (cbBuffer < sizeof (VBOXCMDVBVA_PAGING_TRANSFER))
{
WARN(("cbBuffer < sizeof (VBOXCMDVBVA_PAGING_TRANSFER)"));
goto done;
}
uint32_t i = 0;
for (; cPages && cbBuffer >= sizeof (VBOXCMDVBVA_PAGING_TRANSFER); ++cEls, cbBuffer-=sizeof (VBOXCMDVBVA_SYSMEMEL), ++pEl, ++i)
{
}
done:
return cbInitBuffer - cbBuffer;
}