VBoxGuest.cpp revision 296cdbced70651cbf4c98767398d0c898885b54b
/** @file
*
* VBoxGuest -- VirtualBox Win32 guest support driver
*
* Copyright (C) 2006-2007 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
// enable backdoor logging
//#define LOG_ENABLED
/*******************************************************************************
* Header Files *
*******************************************************************************/
#include "VBoxGuest_Internal.h"
#ifdef TARGET_NT4
#include "NTLegacy.h"
#else
#include "VBoxGuestPnP.h"
#endif
#include "Helper.h"
#include <excpt.h>
#include <stdio.h>
#include <VBox/VBoxGuestLib.h>
#include <VBoxGuestInternal.h>
#ifdef TARGET_NT4
/*
* XP DDK #defines ExFreePool to ExFreePoolWithTag. The latter does not exist
* on NT4, so... The same for ExAllocatePool.
*/
#endif
/*******************************************************************************
* Defined Constants And Macros *
*******************************************************************************/
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
extern "C"
{
}
#ifdef VBOX_WITH_HGCM
#endif
#ifdef DEBUG
#endif
/*******************************************************************************
* Exported Functions *
*******************************************************************************/
#ifdef ALLOC_PRAGMA
/* Note: at least the isr handler should be in non-pageable memory! */
/*#pragma alloc_text (PAGE, VBoxGuestDpcHandler)
#pragma alloc_text (PAGE, VBoxGuestIsrHandler) */
#endif
/**
* Driver entry point.
*
* @returns appropriate status code.
* @param pDrvObj Pointer to driver object.
* @param pRegPath Registry base path.
*/
{
dprintf(("VBoxGuest::DriverEntry: Running on Windows NT version %d.%d, build %d\n", majorVersion, minorVersion, buildNumber));
#ifdef DEBUG
#endif
switch (majorVersion)
{
case 6: /* Windows Vista or Windows 7 (based on minor ver) */
switch (minorVersion)
{
case 0: /* Note: Also could be Windows 2008 Server! */
break;
case 1: /* Note: Also could be Windows 2008 Server R2! */
winVersion = WIN7;
break;
default:
dprintf(("VBoxGuest::DriverEntry: Unknown version of Windows, refusing!\n"));
return STATUS_DRIVER_UNABLE_TO_LOAD;
}
break;
case 5:
switch (minorVersion)
{
case 2:
winVersion = WIN2K3;
break;
case 1:
winVersion = WINXP;
break;
case 0:
winVersion = WIN2K;
break;
default:
dprintf(("VBoxGuest::DriverEntry: Unknown version of Windows, refusing!\n"));
return STATUS_DRIVER_UNABLE_TO_LOAD;
}
break;
case 4:
winVersion = WINNT4;
break;
default:
dprintf(("VBoxGuest::DriverEntry: At least Windows NT4 required!\n"));
return STATUS_DRIVER_UNABLE_TO_LOAD;
}
/*
* Setup the driver entry points in pDrvObj.
*/
#ifdef TARGET_NT4
#else
#endif
return rc;
}
#ifndef TARGET_NT4
/**
* Handle request from the Plug & Play subsystem
*
* @returns NT status code
* @param pDrvObj Driver object
* @param pDevObj Device object
*/
{
dprintf(("VBoxGuest::VBoxGuestAddDevice\n"));
/*
* Create device.
*/
rc = IoCreateDevice(pDrvObj, sizeof(VBOXGUESTDEVEXT), &devName, FILE_DEVICE_UNKNOWN, 0, FALSE, &deviceObject);
if (!NT_SUCCESS(rc))
{
return rc;
}
if (!NT_SUCCESS(rc))
{
return rc;
}
/*
* Setup the device extension.
*/
{
dprintf(("VBoxGuest::VBoxGuestAddDevice: IoAttachDeviceToDeviceStack did not give a nextLowerDrive\n"));
return STATUS_DEVICE_NOT_CONNECTED;
}
#ifdef VBOX_WITH_HGCM
if (RT_FAILURE(rc2))
{
dprintf(("VBoxGuest::VBoxGuestAddDevice: RTSpinlockCreate failed\n"));
return STATUS_DRIVER_UNABLE_TO_LOAD;
}
#endif
#endif
/* VBoxGuestPower is pageable; ensure we are not called at elevated IRQL */
/* Driver is ready now. */
return rc;
}
#endif
/**
* Unload the driver.
*
* @param pDrvObj Driver object.
*/
{
dprintf(("VBoxGuest::VBoxGuestUnload\n"));
#ifdef TARGET_NT4
if (pDevExt->workerThread)
{
dprintf(("VBoxGuest::VBoxGuestUnload: waiting for the worker thread to terminate...\n"));
dprintf(("VBoxGuest::VBoxGuestUnload: returned from KeWaitForSingleObject for worker thread\n"));
}
if (pDevExt->idleThread)
{
dprintf(("VBoxGuest::VBoxGuestUnload: waiting for the idle thread to terminate...\n"));
dprintf(("VBoxGuest::VBoxGuestUnload: returned from KeWaitForSingleObject for idle thread\n"));
}
#endif
/*
* I don't think it's possible to unload a driver which processes have
* opened, at least we'll blindly assume that here.
*/
#ifdef VBOX_WITH_HGCM
{
}
#endif
#endif
dprintf(("VBoxGuest::VBoxGuestUnload: returning\n"));
}
/**
* Create (i.e. Open) file entry point.
*
* @param pDevObj Device object.
* @param pIrp Request packet.
*/
{
dprintf(("VBoxGuest::VBoxGuestCreate\n"));
/*
* We are not remotely similar to a directory...
* (But this is possible.)
*/
{
dprintf(("VBoxGuest::VBoxGuestCreate: we're not a directory!\n"));
return STATUS_NOT_A_DIRECTORY;
}
#ifdef VBOX_WITH_HGCM
if (pFileObj)
{
if (RT_UNLIKELY(!pSession))
{
dprintf(("VBoxGuestCreate: no memory!\n"));
return STATUS_NO_MEMORY;
}
dprintf(("VBoxGuestCreate: pDevExt=%p pFileObj=%p pSession=%p\n",
}
#endif
return rcNt;
}
/**
* Close file entry point.
*
* @param pDevObj Device object.
* @param pIrp Request packet.
*/
{
dprintf(("VBoxGuest::VBoxGuestClose\n"));
dprintf(("VBoxGuest::VBoxGuestClose: pDevExt=%p pFileObj=%p pSession=%p\n",
#ifdef VBOX_WITH_HGCM
if (pFileObj)
{
if (RT_UNLIKELY(!pSession))
{
dprintf(("VBoxGuestClose: no FsContext!\n"));
}
else
{
if (pSession->aHGCMClientIds[i])
{
pSession->aHGCMClientIds[i] = 0;
}
}
}
#endif
return STATUS_SUCCESS;
}
#ifdef VBOX_WITH_HGCM
{
/* Possible problem with request completion right between the fu32Flags check and KeWaitForSingleObject
* call; introduce a timeout to make sure we don't wait indefinitely.
*/
if (u32Timeout == RT_INDEFINITE_WAIT)
else
{
}
{
/* Specifying UserMode so killing the user process will abort the wait. */
);
continue;
if (rc != STATUS_WAIT_0)
{
dprintf(("VBoxHGCMCallback: The external event was signalled or the wait timed out or terminated rc = 0x%08X.\n", rc));
break;
}
}
return;
}
{
dprintf(("VBoxHGCMCallback\n"));
return VINF_SUCCESS;
}
DECLVBGL(int) VBoxHGCMCallbackKernelMode (VMMDevHGCMRequestHeader *pHeader, void *pvData, uint32_t u32Data)
{
dprintf(("VBoxHGCMCallback\n"));
return VINF_SUCCESS;
}
{
dprintf(("VBoxHGCMCallbackInterruptible\n"));
return VINF_SUCCESS;
}
{
{
dprintf(("VBoxGuest::vboxHGCMVerifyIOBuffers: OutputBufferLength %d < %d\n",
return STATUS_INVALID_PARAMETER;
}
{
dprintf(("VBoxGuest::vboxHGCMVerifyIOBuffers: InputBufferLength %d < %d\n",
return STATUS_INVALID_PARAMETER;
}
return STATUS_SUCCESS;
}
#endif /* VBOX_WITH_HGCM */
{
}
{
bool result = false;
if (RT_SUCCESS (rc))
{
if (RT_FAILURE (rc))
{
dprintf (("VBoxGuest::VBoxGuestDeviceControl: error issuing request to VMMDev! "
"rc = %Rrc\n", rc));
}
else
{
result = true;
}
}
return result;
}
#ifdef VBOX_WITH_MANAGEMENT
{
int rc = VINF_SUCCESS;
uint32_t i;
{
AssertMsgFailed(("VBoxGuestSetBalloonSize illegal balloon size %d (max=%d)\n", cBalloonChunks, pDevExt->MemBalloon.cMaxBalloonChunks));
return VERR_INVALID_PARAMETER;
}
return VINF_SUCCESS; /* nothing to do */
/* Allocate request packet */
rc = VbglGRAlloc((VMMDevRequestHeader **)&req, RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]), VMMDevReq_ChangeMemBalloon);
if (RT_FAILURE(rc))
return rc;
{
/* inflate */
{
#ifndef TARGET_NT4
/*
* Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
*/
if (pMdl)
{
{
rc = VERR_NO_MEMORY;
goto end;
}
}
#else
if (!pvBalloon)
{
rc = VERR_NO_MEMORY;
goto end;
}
{
rc = VERR_NO_MEMORY;
goto end;
}
else
{
__try {
}
{
dprintf(("MmProbeAndLockPages failed!\n"));
rc = VERR_NO_MEMORY;
goto end;
}
}
#endif
/* Copy manually as RTGCPHYS is always 64 bits */
for (uint32_t j=0;j<VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;j++)
req->aPhysPage[j] = pPageDesc[j] << PAGE_SHIFT; /* PFN_NUMBER is physical page nr, so shift left by 12 to get the physical address */
req->header.size = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
if (RT_FAILURE(rc))
{
dprintf(("VBoxGuest::VBoxGuestSetBalloonSize: error issuing request to VMMDev! "
"rc = %Rrc\n", rc));
#ifndef TARGET_NT4
#else
#endif
goto end;
}
else
{
#ifndef TARGET_NT4
#else
#endif
}
}
}
else
{
/* deflate */
{
if (pMdl)
{
#ifdef TARGET_NT4
#endif
/* Copy manually as RTGCPHYS is always 64 bits */
for (uint32_t j = 0; j < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; j++)
req->aPhysPage[j] = pPageDesc[j] << PAGE_SHIFT; /* PFN_NUMBER is physical page nr, so shift left by 12 to get the physical address */
req->header.size = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
if (RT_FAILURE(rc))
{
AssertMsgFailed(("VBoxGuest::VBoxGuestSetBalloonSize: error issuing request to VMMDev! rc = %Rrc\n", rc));
break;
}
/* Free the ballooned memory */
#ifndef TARGET_NT4
#else
#endif
}
}
}
end:
return rc;
}
{
/* just perform the request */
dprintf(("VBoxGuestQueryMemoryBalloon\n"));
int rc = VbglGRAlloc((VMMDevRequestHeader **)&req, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
if (RT_SUCCESS(rc))
{
if (RT_FAILURE(rc))
{
dprintf(("VBoxGuest::VBoxGuestDeviceControl VBOXGUEST_IOCTL_CHECK_BALLOON: error issuing request to VMMDev! "
"rc = %Rrc\n", rc));
}
else
{
{
pDevExt->MemBalloon.paMdlMemBalloon = (PMDL *)ExAllocatePool(PagedPool, req->cPhysMemChunks * sizeof(PMDL));
return VERR_NO_MEMORY;
}
/* ignore out of memory failures */
if (rc == VERR_NO_MEMORY)
rc = VINF_SUCCESS;
if (pMemBalloonSize)
}
}
return rc;
}
#endif
{
#ifdef VBOX_WITH_MANAGEMENT
#else
return VINF_SUCCESS;
#endif
}
{
#ifdef VBOX_WITH_MANAGEMENT
{
/* Clean up the memory balloon leftovers */
}
#endif
}
/** A quick implementation of AtomicTestAndClear for uint32_t and multiple
* bits.
*/
{
AssertPtrReturn(pu32Bits, 0);
u32Mask));
while (iBitOffset > 0)
{
if (fSet)
}
return u32Result;
}
/**
* Device I/O Control entry point.
*
* @param pDevObj Device object.
* @param pIrp Request packet.
*/
{
//dprintf(("VBoxGuest::VBoxGuestDeviceControl\n"));
unsigned cbOut = 0;
{
{
dprintf(("VBoxGuest::VBoxGuestDeviceControl: VBOXGUEST_IOCTL_GETVMMDEVPORT\n"));
{
break;
}
cbOut = sizeof(VBoxGuestPortInfo);
break;
}
{
/* Need to be extended to support multiple waiters for an event,
* array of counters for each event, event mask is computed, each
* time a wait event is arrived.
*/
//dprintf(("VBoxGuest::VBoxGuestDeviceControl: VBOXGUEST_IOCTL_WAITEVENT\n"));
{
dprintf(("VBoxGuest::VBoxGuestDeviceControl: OutputBufferLength %d < sizeof(VBoxGuestWaitEventInfo)\n",
break;
}
dprintf(("VBoxGuest::VBoxGuestDeviceControl: InputBufferLength %d < sizeof(VBoxGuestWaitEventInfo)\n",
break;
}
if (!eventInfo->u32EventMaskIn) {
dprintf (("VBoxGuest::VBoxGuestDeviceControl: Invalid input mask %#x\n",
break;
}
eventInfo->u32EventFlagsOut = 0;
/* Possible problem with request completion right between the pending event check and KeWaitForSingleObject
* call; introduce a timeout (if none was specified) to make sure we don't wait indefinitely.
*/
for (;;)
{
dprintf (("mask = 0x%x, pending = 0x%x\n",
if (u32EventsPending)
{
break;
}
//dprintf(("VBOXGUEST_IOCTL_WAITEVENT: Wait returned %d -> event %x\n", rc, eventInfo->u32EventFlagsOut));
continue;
if (rc != STATUS_SUCCESS)
{
/* There was a timeout or wait was interrupted, etc. */
break;
}
}
cbOut = sizeof(VBoxGuestWaitEventInfo);
break;
}
case VBOXGUEST_IOCTL_VMMREQUEST(0): /* (The size isn't relevant on NT.)*/
{
dprintf(("VBoxGuest::VBoxGuestDeviceControl: VBOXGUEST_IOCTL_VMMREQUEST\n"));
#define CHECK_SIZE(s) \
{ \
dprintf(("VBoxGuest::VBoxGuestDeviceControl: OutputBufferLength %d < %d\n", \
break; \
} \
dprintf(("VBoxGuest::VBoxGuestDeviceControl: InputBufferLength %d < %d\n", \
break; \
}
/* get the request header */
CHECK_SIZE(sizeof(VMMDevRequestHeader));
{
dprintf(("VBoxGuest::VBoxGuestDeviceControl: vmmdevGetRequestSize failed!\n")); \
break;
}
/* make sure the buffers suit the request */
if (RT_FAILURE(rc))
{
dprintf(("VBoxGuest::VBoxGuestDeviceControl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc %d!!\n",
requestHeader->size, vmmdevGetRequestSize(requestHeader->requestType), requestHeader->requestType, rc));
break;
}
/* just perform the request */
if (RT_SUCCESS(rc))
{
/* copy the request information */
if (RT_FAILURE(rc))
{
dprintf(("VBoxGuest::VBoxGuestDeviceControl VBOXGUEST_IOCTL_VMMREQUEST: Error issuing request to VMMDev! "
"rc = %Rrc\n", rc));
}
else
{
/* copy result */
}
}
else
{
dprintf(("VBoxGuest::VBoxGuestDeviceControl: VbglGRAlloc failed!\n")); \
}
break;
}
{
dprintf (("VBoxGuest::VBoxGuestDeviceControl: InputBufferLength %d < %d\n",
sizeof (VBoxGuestFilterMaskInfo)));
break;
}
{
}
break;
}
#ifdef VBOX_WITH_HGCM
/* HGCM offers blocking IOCTLSs just like waitevent and actually
* uses the same waiting code.
*/
#ifdef RT_ARCH_AMD64
#endif /* RT_ARCH_AMD64 */
{
dprintf(("VBoxGuest::VBoxGuestDeviceControl: VBOXGUEST_IOCTL_HGCM_CONNECT\n"));
{
dprintf(("VBoxGuest::VBoxGuestDeviceControl: OutputBufferLength %d != sizeof(VBoxGuestHGCMConnectInfo) %d\n",
break;
}
dprintf(("VBoxGuest::VBoxGuestDeviceControl: InputBufferLength %d != sizeof(VBoxGuestHGCMConnectInfo) %d\n",
break;
}
/* If request will be processed asynchronously, execution will
* go to VBoxHGCMCallback. There it will wait for the request event, signalled from IRQ.
* On IRQ arrival, the VBoxHGCMCallback(s) will check the request memory and, if completion
* flag is set, returns.
*/
int rc = VbglR0HGCMInternalConnect (ptr, pIrp->RequestorMode == KernelMode? VBoxHGCMCallbackKernelMode :VBoxHGCMCallback,
if (RT_FAILURE(rc))
{
}
else
{
{
dprintf(("VBOXGUEST_IOCTL_HGCM_CONNECT: pDevExt=%p pFileObj=%p pSession=%p\n",
/*
* Append the client id to the client id table.
* If the table has somehow become filled up, we'll disconnect the session.
*/
unsigned i;
if (!pSession->aHGCMClientIds[i])
{
break;
}
{
static unsigned s_cErrors = 0;
if (s_cErrors++ < 32)
dprintf(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
VbglR0HGCMInternalDisconnect(&Info, pIrp->RequestorMode == KernelMode? VBoxHGCMCallbackKernelMode :VBoxHGCMCallback, pDevExt, RT_INDEFINITE_WAIT);
break;
}
}
else
{
/* @fixme, r=Leonid. I have no clue what to do in cases where
* pStack->FileObject==NULL. Can't populate list of HGCM ID's...
* But things worked before, so do nothing for now.
*/
dprintf(("VBOXGUEST_IOCTL_HGCM_CONNECT: pDevExt=%p, pStack->FileObject=%p\n", pDevExt, pStack->FileObject));
}
}
} break;
#ifdef RT_ARCH_AMD64
#endif /* RT_ARCH_AMD64 */
{
dprintf(("VBoxGuest::VBoxGuestDeviceControl: VBOXGUEST_IOCTL_HGCM_DISCONNECT\n"));
{
dprintf(("VBoxGuest::VBoxGuestDeviceControl: OutputBufferLength %d != sizeof(VBoxGuestHGCMDisconnectInfo) %d\n",
break;
}
dprintf(("VBoxGuest::VBoxGuestDeviceControl: InputBufferLength %d != sizeof(VBoxGuestHGCMDisconnectInfo) %d\n",
break;
}
unsigned i=0;
/* See comment in VBOXGUEST_IOCTL_HGCM_CONNECT */
if (pStack->FileObject)
{
dprintf(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: pDevExt=%p pFileObj=%p pSession=%p\n",
{
break;
}
{
static unsigned s_cErrors = 0;
if (s_cErrors++ > 32)
break;
}
}
/* If request will be processed asynchronously, execution will
* go to VBoxHGCMCallback. There it will wait for the request event, signalled from IRQ.
* On IRQ arrival, the VBoxHGCMCallback(s) will check the request memory and, if completion
* flag is set, returns.
*/
int rc = VbglR0HGCMInternalDisconnect (ptr, pIrp->RequestorMode == KernelMode? VBoxHGCMCallbackKernelMode :VBoxHGCMCallback, pDevExt, RT_INDEFINITE_WAIT);
if (RT_FAILURE(rc))
{
}
else
{
}
if (pStack->FileObject)
{
}
} break;
#ifdef RT_ARCH_AMD64
case VBOXGUEST_IOCTL_HGCM_CALL_32(0): /* (The size isn't relevant on NT.) */
{
/* A 32 bit application call. */
int rc;
dprintf(("VBoxGuest::VBoxGuestDeviceControl: VBOXGUEST_IOCTL_HGCM_CALL_32\n"));
sizeof (VBoxGuestHGCMCallInfo));
if (Status != STATUS_SUCCESS)
{
break;
}
/* @todo: Old guest OpenGL driver used the same IOCtl code for both 32 and 64 bit binaries.
* This is a protection, and can be removed if there were no 64 bit driver.
*/
if (!IoIs32bitProcess(pIrp))
{
break;
}
uint32_t fFlags = pIrp->RequestorMode == KernelMode ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
if (RT_FAILURE(rc))
{
}
else
{
}
} break;
#endif /* RT_ARCH_AMD64 */
case VBOXGUEST_IOCTL_HGCM_CALL(0): /* (The size isn't relevant on NT.) */
{
int rc;
dprintf(("VBoxGuest::VBoxGuestDeviceControl: VBOXGUEST_IOCTL_HGCM_CALL\n"));
sizeof (VBoxGuestHGCMCallInfo));
if (Status != STATUS_SUCCESS)
{
break;
}
uint32_t fFlags = pIrp->RequestorMode == KernelMode ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
if (RT_FAILURE(rc))
{
}
else
{
}
} break;
case VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0): /* (The size isn't relevant on NT.) */
{
/* This IOCTL is not used by shared folders, so VBoxHGCMCallbackKernelMode is not used. */
dprintf(("VBoxGuest::VBoxGuestDeviceControl: VBOXGUEST_IOCTL_HGCM_CALL_TIMED\n"));
sizeof (VBoxGuestHGCMCallInfoTimed));
if (Status != STATUS_SUCCESS)
{
break;
}
int rc;
uint32_t fFlags = pIrp->RequestorMode == KernelMode ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
if (pInfo->fInterruptible)
{
dprintf(("VBoxGuest::VBoxGuestDeviceControl: calling VBoxHGCMCall interruptible, timeout %lu ms\n",
pInfo->u32Timeout));
}
else
{
dprintf(("VBoxGuest::VBoxGuestDeviceControl: calling VBoxHGCMCall, timeout %lu ms\n",
pInfo->u32Timeout));
}
if (RT_FAILURE(rc))
{
}
else
{
}
} break;
#endif /* VBOX_WITH_HGCM */
{
if (!pDevExt->fVRDPEnabled)
{
}
break;
}
{
if (pDevExt->fVRDPEnabled)
{
pDevExt->ulOldActiveConsoleId = 0;
}
break;
}
#endif
#ifdef VBOX_WITH_MANAGEMENT
{
{
dprintf(("VBoxGuest::VBoxGuestDeviceControl: OutputBufferLength %d != sizeof(ULONG) %d\n",
break;
}
if (RT_FAILURE(rc))
{
}
else
{
pInfo->fHandleInR3 = false;
}
break;
}
#endif
case VBOXGUEST_IOCTL_LOG(0): /* The size isn't relevant on NT. */
{
/* Enable this only for debugging:
dprintf(("VBoxGuest::VBoxGuestDeviceControl: VBOXGUEST_IOCTL_LOG %.*s\n", (int)pStack->Parameters.DeviceIoControl.InputBufferLength, pBuf));
*/
cbOut = 0;
break;
}
default:
break;
}
//dprintf(("VBoxGuest::VBoxGuestDeviceControl: returned cbOut=%d rc=%#x\n", cbOut, Status));
return Status;
}
/**
* IRP_MJ_SYSTEM_CONTROL handler
*
* @returns NT status code
* @param pDevObj Device object.
* @param pIrp IRP.
*/
{
dprintf(("VBoxGuest::VBoxGuestSystemControl\n"));
/* Always pass it on to the next driver. */
}
/**
* IRP_MJ_SHUTDOWN handler
*
* @returns NT status code
* @param pDevObj Device object.
* @param pIrp IRP.
*/
{
dprintf(("VBoxGuest::VBoxGuestShutdown\n"));
{
if (RT_FAILURE(rc))
{
dprintf(("VBoxGuest::PowerStateRequest: error performing request to VMMDev! "
"rc = %Rrc\n", rc));
}
}
return STATUS_SUCCESS;
}
/**
* Stub function for functions we don't implemented.
*
* @returns STATUS_NOT_SUPPORTED
* @param pDevObj Device object.
* @param pIrp IRP.
*/
{
dprintf(("VBoxGuest::VBoxGuestNotSupportedStub\n"));
return STATUS_NOT_SUPPORTED;
}
/**
* DPC handler
*
* @param dpc DPC descriptor.
* @param pDevObj Device object.
* @param irp Interrupt request packet.
* @param context Context specific pointer.
*/
{
/* Unblock handlers waiting for arrived events.
*
* Events are very low things, there is one event flag (1 or more bit)
* for each event. Each event is processed by exactly one handler.
*
* Assume that we trust additions and that other drivers will
* handle its respective events without trying to fetch all events.
*
* Anyway design assures that wrong event processing will affect only guest.
*
* Event handler calls VMMDev IOCTL for waiting an event.
* It supplies event mask. IOCTL blocks on EventNotification.
* Here we just signal an the EventNotification to all waiting
* threads, the IOCTL handler analyzes events and either
* return to caller or blocks again.
*
* If we do not have too many events this is a simple and good
* approach. Other way is to have as many Event objects as the callers
* and wake up only callers waiting for the specific event.
*
* Now with the 'wake up all' appoach we probably do not need the DPC
* handler and can signal event directly from ISR.
*
*/
dprintf(("VBoxGuest::VBoxGuestDpcHandler\n"));
}
/**
* ISR handler
*
* @return BOOLEAN indicates whether the IRQ came from us (TRUE) or not (FALSE)
* @param interrupt Interrupt that was triggered.
* @param serviceContext Context specific pointer.
*/
{
dprintf(("VBoxGuest::VBoxGuestIsrHandler haveEvents = %d\n",
/*
* now we have to find out whether it was our IRQ. Read the event mask
* from our device to see if there are any pending events
*/
{
/* Acknowlegde events. */
if (RT_SUCCESS(rc))
{
dprintf(("VBoxGuest::VBoxGuestIsrHandler: acknowledge events succeeded %#x\n",
}
else
{
/* This can't be actually. This is sign of a serious problem. */
dprintf(("VBoxGuest::VBoxGuestIsrHandler: "
"acknowledge events failed rc = %Rrc\n", rc));
}
/* Mark IRQ as taken, there were events for us. */
}
return fIRQTaken;
}
/**
* Worker thread to do periodic things such as notify other
* drivers of events.
*
* @param pDevExt device extension pointer
*/
{
dprintf(("VBoxGuest::vboxWorkerThread entered\n"));
/* perform the hypervisor address space reservation */
do
{
/* Nothing to do here yet. */
/*
* Go asleep unless we're supposed to terminate
*/
if (!pDevExt->stopThread)
{
{
}
}
} while (!pDevExt->stopThread);
dprintf(("VBoxGuest::vboxWorkerThread: we've been asked to terminate!\n"));
if (pDevExt->workerThread)
{
}
dprintf(("VBoxGuest::vboxWorkerThread: now really gone!\n"));
}
/**
* Create driver worker threads
*
* @returns NTSTATUS NT status code
* @param pDevExt VBoxGuest device extension
*/
{
dprintf(("VBoxGuest::createThreads\n"));
// first setup the request semaphore
// the API has slightly changed after NT4
#ifdef TARGET_NT4
#ifdef OBJ_KERNEL_HANDLE
#endif
#define OBJ_KERNEL_HANDLE 0
#endif
/*
* The worker thread
*/
NULL,
NULL,
NULL);
(HANDLE)0L,
NULL,
pDevExt);
NULL,
NULL);
/*
* The idle thread
*/
#if 0 /// @todo Windows "sees" that time is lost and reports 100% usage
(HANDLE)0L,
NULL,
pDevExt);
NULL,
NULL);
#endif
return rc;
}
/**
* Helper routine to reserve address space for the hypervisor
* and communicate its position.
*
* @param pDevExt Device extension structure.
*/
{
// @todo rc handling
int rc = VbglGRAlloc ((VMMDevRequestHeader **)&req, sizeof (VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
if (RT_SUCCESS(rc))
{
req->hypervisorStart = 0;
req->hypervisorSize = 0;
if (RT_SUCCESS(rc))
{
if (!hypervisorSize)
{
dprintf(("VBoxGuest::reserveHypervisorMemory: host returned 0, not doing anything\n"));
return;
}
dprintf(("VBoxGuest::reserveHypervisorMemory: host wants %u bytes of hypervisor address space\n", hypervisorSize));
// Map fictive physical memory into the kernel address space to reserve virtual
// address space. This API does not perform any checks but just allocate the
// The hypervisor only likes 4MB aligned virtual addresses, so we have to allocate
// 4MB more than we are actually supposed to in order to guarantee that. Maybe we
// can come up with a less lavish algorithm lateron.
if (!pDevExt->hypervisorMapping)
{
dprintf(("VBoxGuest::reserveHypervisorMemory: MmMapIoSpace returned NULL!\n"));
return;
}
dprintf(("VBoxGuest::reserveHypervisorMemory: MmMapIoSpace returned %p\n", pDevExt->hypervisorMapping));
dprintf(("VBoxGuest::reserveHypervisorMemory: communicating %p to host\n",
/* align at 4MB */
/* issue request */
if (RT_FAILURE(rc))
{
dprintf(("VBoxGuest::reserveHypervisorMemory: error communicating physical address to VMMDev! "
"rc = %Rrc\n", rc));
}
}
else
{
}
}
#ifdef RT_ARCH_X86
/* Allocate locked executable memory that can be used for patching guest code. */
{
int rc = VbglGRAlloc ((VMMDevRequestHeader **)&req, sizeof (VMMDevReqPatchMemory), VMMDevReq_RegisterPatchMemory);
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
if (RT_FAILURE(rc))
{
dprintf(("VBoxGuest::reserveHypervisorMemory: VMMDevReq_RegisterPatchMemory error! "
"rc = %Rrc\n", rc));
}
}
else
{
}
}
}
#endif
return;
}
/**
* Helper function to unregister a virtual address space mapping
*
* @param pDevExt Device extension
*/
{
#ifdef RT_ARCH_X86
/* Remove the locked executable memory range that can be used for patching guest code. */
if (pDevExt->PatchMemObj)
{
int rc = VbglGRAlloc ((VMMDevRequestHeader **)&req, sizeof (VMMDevReqPatchMemory), VMMDevReq_DeregisterPatchMemory);
if (RT_SUCCESS(rc))
{
if (RT_FAILURE(rc))
{
dprintf(("VBoxGuest::reserveHypervisorMemory: VMMDevReq_DeregisterPatchMemory error! "
"rc = %Rrc\n", rc));
/* We intentially leak the memory object here as there still could
* be references to it!!!
*/
}
else
{
}
}
}
#endif
int rc = VbglGRAlloc ((VMMDevRequestHeader **)&req, sizeof (VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
if (RT_SUCCESS(rc))
{
/* tell the hypervisor that the mapping is no longer available */
req->hypervisorStart = 0;
req->hypervisorSize = 0;
if (RT_FAILURE(rc))
{
dprintf(("VBoxGuest::unreserveHypervisorMemory: error communicating physical address to VMMDev! "
"rc = %Rrc\n", rc));
}
}
if (!pDevExt->hypervisorMapping)
{
dprintf(("VBoxGuest::unreserveHypervisorMemory: there is no mapping, returning\n"));
return;
}
// unmap fictive IO space
dprintf(("VBoxGuest::unreserveHypervisorMemmory: done\n"));
}
/**
* Idle thread that runs at the lowest priority possible
* and whenever scheduled, makes a VMMDev call to give up
* timeslices. This is so prevent Windows from thinking that
* nothing is happening on the machine and doing stupid things
* that would steal time from other VMs it doesn't know of.
*
* @param pDevExt device extension pointer
*/
{
dprintf(("VBoxGuest::vboxIdleThread entered\n"));
/* set priority as low as possible */
/* allocate VMMDev request structure */
int rc = VbglGRAlloc((VMMDevRequestHeader **)&req, sizeof (VMMDevReqHypervisorInfo), VMMDevReq_Idle);
if (RT_FAILURE(rc))
{
dprintf(("VBoxGuest::vboxIdleThread: error %Rrc allocating request structure!\n"));
return;
}
do
{
//dprintf(("VBoxGuest: performing idle request..\n"));
/* perform idle request */
} while (!pDevExt->stopThread);
dprintf(("VBoxGuest::vboxIdleThread leaving\n"));
}
#ifdef DEBUG
{
)
AssertLogRelMsgFailed(("%s: TEST FAILED: u32Mask=0x%x, u32Bits (before)=0x%x, u32Bits (after)=0x%x, u32Result=0x%x, u32Exp=ox%x\n",
u32Result));
}
{
testAtomicTestAndClearBitsU32(0x11, 0, 0);
}
#endif