VBoxGuest.cpp revision 51228ad2dda4aaa302b47ca536427910b38ce881
/* $Id$ */
/** @file
* VBoxGuest - Guest Additions Driver, Common Code.
*/
/*
* Copyright (C) 2007-2009 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_DEFAULT
#include "VBoxGuestInternal.h"
#ifdef VBOX_WITH_HGCM
#endif
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
#ifdef VBOX_WITH_HGCM
static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
#endif
/**
* Reserves memory in which the VMM can relocate any guest mappings
* that are floating around.
*
* This operation is a little bit tricky since the VMM might not accept
* just any address because of address clashes between the three contexts
* it operates in, so use a small stack to perform this operation.
*
* @returns VBox status code (ignored).
* @param pDevExt The device extension.
*/
{
/*
* Query the required space.
*/
int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
if (RT_FAILURE(rc))
return rc;
pReq->hypervisorStart = 0;
pReq->hypervisorSize = 0;
{
return rc;
}
/*
* The VMM will report back if there is nothing it wants to map, like for
* insance in VT-x and AMD-V mode.
*/
if (pReq->hypervisorSize == 0)
Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
else
{
/*
* We have to try several times since the host can be picky
* about certain addresses.
*/
bool fBitched = false;
{
/*
* Reserve space, or if that isn't supported, create a object for
* some fictive physical memory and map that in to kernel space.
*
* To make the code a bit uglier, most systems cannot help with
* 4MB alignment, so we have to deal with that in addition to
* having two ways of getting the memory.
*/
if (rc == VERR_NOT_SUPPORTED)
{
}
if (rc == VERR_NOT_SUPPORTED)
{
if (hFictive == NIL_RTR0MEMOBJ)
{
if (RT_FAILURE(rc))
break;
}
uAlignment = _4M;
rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
if (rc == VERR_NOT_SUPPORTED)
{
rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
}
}
if (RT_FAILURE(rc))
{
LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
fBitched = true;
break;
}
/*
* Try set it.
*/
if ( uAlignment == PAGE_SIZE
AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
if (RT_SUCCESS(rc))
{
Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
break;
}
}
/*
* Cleanup failed attempts.
*/
while (iTry-- > 0)
if ( RT_FAILURE(rc)
&& hFictive != NIL_RTR0PTR)
LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
}
/*
* We ignore failed attempts for now.
*/
return VINF_SUCCESS;
}
/**
* Undo what vboxGuestInitFixateGuestMappings did.
*
* @param pDevExt The device extension.
*/
{
{
/*
* Tell the host that we're going to free the memory we reserved for
* it, the free it up. (Leak the memory if anything goes wrong here.)
*/
int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
if (RT_SUCCESS(rc))
{
pReq->hypervisorStart = 0;
pReq->hypervisorSize = 0;
}
if (RT_SUCCESS(rc))
{
}
else
}
}
/**
* Sets the interrupt filter mask during initialization and termination.
*
* This will ASSUME that we're the ones in carge over the mask, so
* we'll simply clear all bits we don't set.
*
* @returns VBox status code (ignored).
* @param pDevExt The device extension.
* @param fMask The new mask.
*/
{
if (RT_SUCCESS(rc))
{
if ( RT_FAILURE(rc)
LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
}
return rc;
}
/**
* Report guest information to the VMMDev.
*
* @returns VBox status code.
* @param pDevExt The device extension.
* @param enmOSType The OS type to report.
*/
{
if (RT_SUCCESS(rc))
{
if ( RT_FAILURE(rc)
LogRel(("vboxGuestInitReportGuestInfo: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
}
return rc;
}
/**
* Initializes the VBoxGuest device extension when the
* device driver is loaded.
*
* The native code locates the VMMDev on the PCI bus and retrieve
* the MMIO and I/O port ranges, this function will take care of
* mapping the MMIO memory (if present). Upon successful return
* the native code should set up the interrupt handler.
*
* @returns VBox status code.
*
* @param pDevExt The device extension. Allocated by the native code.
* @param IOPortBase The base of the I/O port range.
* @param pvMMIOBase The base of the MMIO memory mapping.
* This is optional, pass NULL if not present.
* @param cbMMIO The size of the MMIO memory mapping.
* This is optional, pass 0 if not present.
* @param enmOSType The guest OS type to report to the VMMDev.
* @param fFixedEvents Events that will be enabled upon init and no client
* will ever be allowed to mask.
*/
{
/*
* Adjust fFixedEvents.
*/
#ifdef VBOX_WITH_HGCM
#endif
/*
* Initalize the data.
*/
#ifdef VBOX_WITH_HGCM
#endif
pDevExt->f32PendingEvents = 0;
pDevExt->u32ClipboardClientId = 0;
pDevExt->u32MousePosChangedSeq = 0;
/*
* If there is an MMIO region validate the version and size.
*/
if (pvMMIOBase)
{
{
Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
}
else /* try live without it. */
LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
}
/*
* Create the wait and seesion spinlocks.
*/
if (RT_SUCCESS(rc))
if (RT_FAILURE(rc))
{
return rc;
}
/*
* Initialize the guest library and report the guest info back to VMMDev,
* set the interrupt control filter mask, and fixate the guest mappings
* made by the VMM.
*/
if (RT_SUCCESS(rc))
{
rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
/*
* Disable guest graphics capability by default. The guest specific
* graphics driver will re-enable this when it is necessary.
*/
if (RT_SUCCESS(rc))
{
Log(("VBoxGuestInitDevExt: returns success\n"));
return VINF_SUCCESS;
}
}
}
/* failure cleanup */
}
else
}
else
return rc; /* (failed) */
}
/**
* Deletes all the items in a wait chain.
* @param pWait The head of the chain.
*/
{
{
int rc2;
}
}
/**
* Destroys the VBoxGuest device extension.
*
* The native code should call this before the driver is loaded,
* but don't call this on shutdown.
*
* @param pDevExt The device extension.
*/
{
int rc2;
Log(("VBoxGuestDeleteDevExt:\n"));
LogRel(("VBoxGuest: The additions driver is terminating.\n"));
/*
* Unfix the guest mappings, filter all events and clear
* all capabilities.
*/
/*
* Cleanup resources.
*/
#ifdef VBOX_WITH_HGCM
#endif
pDevExt->IOPortBase = 0;
}
/**
* Creates a VBoxGuest user session.
*
* The native code calls this when a ring-3 client opens the device.
* Use VBoxGuestCreateKernelSession when a ring-0 client connects.
*
* @returns VBox status code.
* @param pDevExt The device extension.
* @param ppSession Where to store the session on success.
*/
{
if (RT_UNLIKELY(!pSession))
{
LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
return VERR_NO_MEMORY;
}
LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
return VINF_SUCCESS;
}
/**
* Creates a VBoxGuest kernel session.
*
* The native code calls this when a ring-0 client connects to the device.
* Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
*
* @returns VBox status code.
* @param pDevExt The device extension.
* @param ppSession Where to store the session on success.
*/
{
if (RT_UNLIKELY(!pSession))
{
LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
return VERR_NO_MEMORY;
}
LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
return VINF_SUCCESS;
}
/**
* Closes a VBoxGuest session.
*
* @param pDevExt The device extension.
* @param pSession The session to close (and free).
*/
{
unsigned i; NOREF(i);
Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
#ifdef VBOX_WITH_HGCM
if (pSession->aHGCMClientIds[i])
{
pSession->aHGCMClientIds[i] = 0;
}
#endif
}
/**
* Links the wait-for-event entry into the tail of the given list.
*
* @param pList The list to link it into.
* @param pWait The wait for event entry to append.
*/
{
if (pTail)
else
}
/**
* Unlinks the wait-for-event entry.
*
* @param pList The list to unlink it from.
* @param pWait The wait for event entry to unlink.
*/
{
if (pNext)
else
if (pPrev)
else
}
/**
* Allocates a wiat-for-event entry.
*
* @returns The wait-for-event entry.
* @param pDevExt The device extension.
* @param pSession The session that's allocating this. Can be NULL.
*/
{
/*
* Allocate it one way or the other.
*/
if (pWait)
{
if (pWait)
}
if (!pWait)
{
static unsigned s_cErrors = 0;
int rc;
if (!pWait)
{
if (s_cErrors++ < 32)
LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
return NULL;
}
if (RT_FAILURE(rc))
{
if (s_cErrors++ < 32)
return NULL;
}
}
/*
* Zero members just as an precaution.
*/
pWait->fReqEvents = 0;
pWait->fResEvents = 0;
#ifdef VBOX_WITH_HGCM
#endif
return pWait;
}
/**
* Frees the wait-for-event entry.
* The caller must own the wait spinlock!
*
* @param pDevExt The device extension.
* @param pWait The wait-for-event entry to free.
*/
{
pWait->fReqEvents = 0;
pWait->fResEvents = 0;
#ifdef VBOX_WITH_HGCM
#endif
}
/**
* Frees the wait-for-event entry.
*
* @param pDevExt The device extension.
* @param pWait The wait-for-event entry to free.
*/
{
}
/**
* Modifies the guest capabilities.
*
* Should be called during driver init and termination.
*
* @returns VBox status code.
* @param fOr The Or mask (what to enable).
* @param fNot The Not mask (what to disable).
*/
{
if (RT_FAILURE(rc))
{
Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
return rc;
}
if (RT_FAILURE(rc))
{
}
return rc;
}
/**
* Implements the fast (no input or output) type of IOCtls.
*
* This is currently just a placeholder stub inherited from the support driver code.
*
* @returns VBox status code.
* @param iFunction The IOCtl function number.
* @param pDevExt The device extension.
* @param pSession The session.
*/
int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
{
Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
return VERR_NOT_SUPPORTED;
}
static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
{
Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
if (pcbDataReturned)
*pcbDataReturned = sizeof(*pInfo);
return VINF_SUCCESS;
}
/**
* Worker VBoxGuestCommonIOCtl_WaitEvent.
* The caller enters the spinlock, we may or may not leave it.
*
* @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
*/
{
if (fMatches)
{
else
return VINF_SUCCESS;
}
return VERR_TIMEOUT;
}
{
pInfo->u32EventFlagsOut = 0;
if (pcbDataReturned)
*pcbDataReturned = sizeof(*pInfo);
/*
* Copy and verify the input mask.
*/
if (RT_UNLIKELY(iEvent < 0))
{
return VERR_INVALID_PARAMETER;
}
/*
* Check the condition up front, before doing the wait-for-event allocations.
*/
if (rc == VINF_SUCCESS)
return rc;
if (!pInfo->u32TimeoutIn)
{
Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
return VERR_TIMEOUT;
}
if (!pWait)
return VERR_NO_MEMORY;
/*
* We've got the wait entry now, re-enter the spinlock and check for the condition.
* If the wait condition is met, return.
* Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
*/
if (rc == VINF_SUCCESS)
{
return rc;
}
if (fInterruptible)
else
/*
* There is one special case here and that's when the semaphore is
* destroyed upon device driver unload. This shouldn't happen of course,
* but in case it does, just get out of here ASAP.
*/
if (rc == VERR_SEM_DESTROYED)
return rc;
/*
* Unlink the wait item and dispose of it.
*/
/*
* Now deal with the return code.
*/
if ( fResEvents
&& fResEvents != UINT32_MAX)
{
else
rc = VINF_SUCCESS;
}
else if ( fResEvents == UINT32_MAX
|| rc == VERR_INTERRUPTED)
{
rc == VERR_INTERRUPTED;
Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
}
else if (rc == VERR_TIMEOUT)
{
Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
}
else
{
if (RT_SUCCESS(rc))
{
static unsigned s_cErrors = 0;
if (s_cErrors++ < 32)
}
}
return rc;
}
static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
{
#if defined(RT_OS_SOLARIS)
#endif
int rc = 0;
Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
/*
* Walk the event list and wake up anyone with a matching session.
*
* Note! On Solaris we have to do really ugly stuff here because
* RTSemEventMultiSignal cannot be called with interrupts disabled.
* The hack is racy, but what we can we do... (Eliminate this
* termination hack, perhaps?)
*/
#if defined(RT_OS_SOLARIS)
do
{
{
/* HACK ALRET! This races wakeup + reuse! */
break;
}
} while (pWait);
#else
{
}
#endif
return VINF_SUCCESS;
}
{
/*
* Validate the header and request size.
*/
{
Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
return VERR_INVALID_PARAMETER;
}
{
Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
return VERR_INVALID_PARAMETER;
}
if (RT_FAILURE(rc))
{
Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc %d!!\n",
return rc;
}
/*
* Make a copy of the request in the physical memory heap so
* the VBoxGuestLibrary can more easily deal with the request.
* (This is really a waste of time since the OS or the OS specific
* it does makes things a bit simpler wrt to phys address.)
*/
if (RT_FAILURE(rc))
{
Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
return rc;
}
if ( RT_SUCCESS(rc)
{
if (pcbDataReturned)
*pcbDataReturned = cbReq;
}
else if (RT_FAILURE(rc))
else
{
Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
}
return rc;
}
static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
{
if (RT_FAILURE(rc))
{
Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
return rc;
}
if (RT_FAILURE(rc))
{
Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
}
return rc;
}
#ifdef VBOX_WITH_HGCM
/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
{
/*
* Check to see if the condition was met by the time we got here.
*
* We create a simple poll loop here for dealing with out-of-memory
* conditions since the caller isn't necessarily able to deal with
* us returning too early.
*/
for (;;)
{
{
return VINF_SUCCESS;
}
if (pWait)
break;
if (fInterruptible)
return VERR_INTERRUPTED;
RTThreadSleep(1);
}
/*
* Re-enter the spinlock and re-check for the condition.
* If the condition is met, return.
* Otherwise link us into the HGCM wait list and go to sleep.
*/
{
return VINF_SUCCESS;
}
int rc;
if (fInterruptible)
else
if (rc == VERR_SEM_DESTROYED)
return rc;
/*
* Unlink, free and return.
*/
if ( RT_FAILURE(rc)
&& rc != VERR_TIMEOUT
&& ( !fInterruptible
|| rc != VERR_INTERRUPTED))
return rc;
}
/**
* This is a callback for dealing with async waits.
*
* It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
*/
static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
{
false /* fInterruptible */,
u32User /* cMillies */);
}
/**
* This is a callback for dealing with async waits with a timeout.
*
* It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
*/
{
true /* fInterruptible */,
u32User /* cMillies */ );
}
static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMConnectInfo *pInfo,
{
/*
* The VbglHGCMConnect call will invoke the callback if the HGCM
* call is performed in an ASYNC fashion. The function is not able
* to deal with cancelled requests.
*/
Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
int rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
if (RT_SUCCESS(rc))
{
Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
{
/*
* Append the client id to the client id table.
* If the table has somehow become filled up, we'll disconnect the session.
*/
unsigned i;
if (!pSession->aHGCMClientIds[i])
{
break;
}
{
static unsigned s_cErrors = 0;
if (s_cErrors++ < 32)
LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
return VERR_TOO_MANY_OPEN_FILES;
}
}
if (pcbDataReturned)
*pcbDataReturned = sizeof(*pInfo);
}
return rc;
}
static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
{
/*
* Validate the client id and invalidate its entry while we're in the call.
*/
unsigned i;
{
break;
}
{
static unsigned s_cErrors = 0;
if (s_cErrors++ > 32)
return VERR_INVALID_HANDLE;
}
/*
* The VbglHGCMConnect call will invoke the callback if the HGCM
* call is performed in an ASYNC fashion. The function is not able
* to deal with cancelled requests.
*/
int rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
if (RT_SUCCESS(rc))
{
if (pcbDataReturned)
*pcbDataReturned = sizeof(*pInfo);
}
/* Update the client id array according to the result. */
return rc;
}
{
/*
* Some more validations.
*/
{
return VERR_INVALID_PARAMETER;
}
#ifdef RT_ARCH_AMD64
if (f32bit)
else
#endif
{
LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
return VERR_INVALID_PARAMETER;
}
/*
* Validate the client id.
*/
unsigned i;
break;
{
static unsigned s_cErrors = 0;
if (s_cErrors++ > 32)
return VERR_INVALID_HANDLE;
}
/*
* The VbglHGCMCall call will invoke the callback if the HGCM
* call is performed in an ASYNC fashion. This function can
* deal with cancelled requests, so we let user more requests
* be interruptible (should add a flag for this later I guess).
*/
int rc;
uint32_t fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
#ifdef RT_ARCH_AMD64
if (f32bit)
{
if (fInterruptible)
rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
else
rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
}
else
#endif
{
if (fInterruptible)
rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
else
rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
}
if (RT_SUCCESS(rc))
{
if (pcbDataReturned)
}
else
{
if (rc != VERR_INTERRUPTED)
else
}
return rc;
}
/**
* @returns VBox status code. Unlike the other HGCM IOCtls this will combine
* the VbglHGCMConnect/Disconnect return code with the Info.result.
*/
static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
{
int rc;
Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
/*
* If there is an old client, try disconnect it first.
*/
if (pDevExt->u32ClipboardClientId != 0)
{
rc = VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
if (RT_SUCCESS(rc))
{
LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
return rc;
}
{
Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. Info.result=%Rrc\n", rc));
}
pDevExt->u32ClipboardClientId = 0;
}
/*
* Try connect.
*/
Info.u32ClientID = 0;
if (RT_FAILURE(rc))
{
return rc;
}
{
return rc;
}
Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", Info.u32ClientID));
if (pcbDataReturned)
*pcbDataReturned = sizeof(uint32_t);
return VINF_SUCCESS;
}
#endif /* VBOX_WITH_HGCM */
/**
* Guest backdoor logging.
*
* @returns VBox status code.
*
* @param pch The log message (need not be NULL terminated).
* @param cbData Size of the buffer.
* @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
*/
{
if (pcbDataReturned)
*pcbDataReturned = 0;
return VINF_SUCCESS;
}
/**
* Common IOCtl for user to kernel and kernel to kernel communcation.
*
* This function only does the basic validation and then invokes
* worker functions that takes care of each specific function.
*
* @returns VBox status code.
*
* @param iFunction The requested function.
* @param pDevExt The device extension.
* @param pSession The client session.
* @param cbData The max size of the data buffer.
* @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
*/
{
Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
/*
* Make sure the returned data size is set to zero.
*/
if (pcbDataReturned)
*pcbDataReturned = 0;
/*
* Define some helper macros to simplify validation.
*/
#define CHECKRET_RING0(mnemonic) \
do { \
{ \
return VERR_PERMISSION_DENIED; \
} \
} while (0)
do { \
{ \
return VERR_BUFFER_OVERFLOW; \
} \
{ \
return VERR_INVALID_POINTER; \
} \
} while (0)
/*
* Deal with variably sized requests first.
*/
int rc = VINF_SUCCESS;
if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
{
rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
}
#ifdef VBOX_WITH_HGCM
/*
* These ones are a bit tricky.
*/
else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
{
rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
fInterruptible, false /*f32bit*/,
0, cbData, pcbDataReturned);
}
else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
{
false /*f32bit*/,
}
# ifdef RT_ARCH_AMD64
else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
{
rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
fInterruptible, true /*f32bit*/,
0, cbData, pcbDataReturned);
}
else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
{
true /*f32bit*/,
}
# endif
#endif /* VBOX_WITH_HGCM */
else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
{
}
else
{
switch (iFunction)
{
CHECKRET_RING0("GETVMMDEVPORT");
break;
break;
if (cbData != 0)
break;
break;
#ifdef VBOX_WITH_HGCM
# ifdef RT_ARCH_AMD64
# endif
rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
break;
# ifdef RT_ARCH_AMD64
# endif
rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
break;
break;
#endif /* VBOX_WITH_HGCM */
default:
{
break;
}
}
}
Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
return rc;
}
/**
* Common interrupt service routine.
*
* This deals with events and with waking up thread waiting for those events.
*
* @returns true if it was our interrupt, false if it wasn't.
* @param pDevExt The VBoxGuest device extension.
*/
{
bool fMousePositionChanged = false;
int rc = 0;
bool fOurIrq;
/*
* Make sure we've initalized the device extension.
*/
if (RT_UNLIKELY(!pReq))
return false;
/*
* Enter the spinlock and check if it's our IRQ or not.
*
* Note! Solaris cannot do RTSemEventMultiSignal with interrupts disabled
* so we're entering the spinlock without disabling them. This works
* fine as long as we never called in a nested fashion.
*/
#if defined(RT_OS_SOLARIS)
#else
#endif
if (fOurIrq)
{
/*
* Acknowlegde events.
* We don't use VbglGRPerform here as it may take another spinlocks.
*/
ASMCompilerBarrier(); /* paranoia */
{
/*
* VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
*/
{
fMousePositionChanged = true;
}
#ifdef VBOX_WITH_HGCM
/*
*/
if (fEvents & VMMDEV_EVENT_HGCM)
{
if ( !pWait->fResEvents
{
}
fEvents &= ~VMMDEV_EVENT_HGCM;
}
#endif
/*
* Normal FIFO waiter evaluation.
*/
&& !pWait->fResEvents)
{
if (!fEvents)
break;
}
}
else /* something is serious wrong... */
Log(("VBoxGuestCommonISR: acknowledge events failed rc=%d (events=%#x)!!\n",
}
else
LogFlow(("VBoxGuestCommonISR: not ours\n"));
/*
* Work the poll and async notification queues on OSes that implements that.
* Do this outside the spinlock to prevent some recursive spinlocking.
*/
#if defined(RT_OS_SOLARIS)
#else
#endif
{
}
return fOurIrq;
}