VBoxGuest.cpp revision ac2f8170e087931242786c8fe40ba16f457e18e0
/* $Id$ */
/** @file
* VBoxGuest - Guest Additions Driver, Common Code.
*/
/*
* Copyright (C) 2007-2014 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_DEFAULT
#include "VBoxGuestInternal.h"
#include "VBoxGuest2.h"
#include <iprt/asm-amd64-x86.h>
#ifdef VBOX_WITH_HGCM
#endif
#include "version-generated.h"
#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
# include "revision-generated.h"
#endif
#ifdef RT_OS_WINDOWS
# ifndef CTL_CODE
# include <Windows.h>
# endif
#endif
#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
#endif
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
#ifdef VBOX_WITH_HGCM
static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
#endif
static int VBoxGuestCommonGuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fOrMask, uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags);
#define VBOXGUEST_ACQUIRE_STYLE_EVENTS (VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST | VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST)
/** Return the mask of VMM device events that this session is allowed to see,
* ergo, all events except those in "acquire" mode which have not been acquired
* by this session. */
DECLINLINE(uint32_t) VBoxGuestCommonGetHandledEventsLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
{
if (!pDevExt->u32AcquireModeGuestCaps)
return VMMDEV_EVENT_VALID_EVENT_MASK;
/** @note VMMDEV_EVENT_VALID_EVENT_MASK should actually be the mask of valid
* capabilities, but that doesn't affect this code. */
uint32_t u32AllowedGuestCaps = pSession->u32AquiredGuestCaps | (VMMDEV_EVENT_VALID_EVENT_MASK & ~pDevExt->u32AcquireModeGuestCaps);
return VMMDEV_EVENT_VALID_EVENT_MASK & ~u32CleanupEvents;
}
DECLINLINE(uint32_t) VBoxGuestCommonGetAndCleanPendingEventsLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fReqEvents)
{
uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents & VBoxGuestCommonGetHandledEventsLocked(pDevExt, pSession);
if (fMatches)
return fMatches;
}
/** Puts a capability in "acquire" or "set" mode and returns the mask of
* capabilities currently in the other mode. Once a capability has been put in
* one of the two modes it can no longer be removed from that mode. */
DECLINLINE(bool) VBoxGuestCommonGuestCapsModeSet(PVBOXGUESTDEVEXT pDevExt, uint32_t fCaps, bool fAcquire, uint32_t *pu32OtherVal)
{
const uint32_t fNotVal = !fAcquire ? pDevExt->u32AcquireModeGuestCaps : pDevExt->u32SetModeGuestCaps;
bool fResult = true;
else
{
AssertMsgFailed(("trying to change caps mode\n"));
fResult = false;
}
if (pu32OtherVal)
*pu32OtherVal = fNotVal;
return fResult;
}
/**
* Sets the interrupt filter mask during initialization and termination.
*
* This will ASSUME that we're the ones in carge over the mask, so
* we'll simply clear all bits we don't set.
*
* @returns VBox status code (ignored).
* @param fMask The new mask.
*/
{
int rc;
if (RT_FAILURE(rc))
return rc;
}
/**
* Sets the guest capabilities to the host.
*
* This will ASSUME that we're the ones in charge of the mask, so
* we'll simply clear all bits we don't set.
*
* @returns VBox status code.
* @param fMask The new mask.
*/
{
int rc;
if (RT_FAILURE(rc))
return rc;
}
/**
* Sets the mouse status to the host.
*
* This will ASSUME that we're the ones in charge of the mask, so
* we'll simply clear all bits we don't set.
*
* @returns VBox status code.
* @param fMask The new mask.
*/
{
int rc;
pReq->pointerXPos = 0;
pReq->pointerYPos = 0;
if (RT_FAILURE(rc))
return rc;
}
/** Host flags to be updated by a given invocation of the
* vboxGuestUpdateHostFlags() method. */
enum
{
HostFlags_FilterMask = 1,
HostFlags_All = 7,
HostFlags_SizeHack = (unsigned)-1
};
{
unsigned cSessions = 0;
int rc = VINF_SUCCESS;
{
++cSessions;
}
if (!cSessions)
return rc;
}
/** Check which host flags in a given category are being asserted by some guest
* session and assert exactly those on the host which are being asserted by one
* or more sessions. pCallingSession is purely for sanity checking and can be
* NULL.
* @note Takes the session spin-lock.
*/
unsigned enmFlags)
{
int rc;
if (RT_SUCCESS(rc))
sizeof(*pCapabilitiesReq),
if (RT_SUCCESS(rc))
sizeof(*pStatusReq), VMMDevReq_SetMouseStatus);
if (RT_SUCCESS(rc))
if (RT_SUCCESS(rc))
{
/* Since VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR is inverted in the session
* capabilities we invert it again before sending it to the host. */
if (enmFlags & HostFlags_FilterMask)
if (enmFlags & HostFlags_Capabilities)
if (enmFlags & HostFlags_MouseStatus)
}
if (pFilterReq)
if (pCapabilitiesReq)
if (pStatusReq)
return rc;
}
/*******************************************************************************
* Global Variables *
*******************************************************************************/
static const uint32_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
/**
* Drag in the rest of IRPT since we share it with the
* rest of the kernel modules on Solaris.
*/
{
/* VirtioNet */
/* RTSemMutex* */
};
#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
/**
* Reserves memory in which the VMM can relocate any guest mappings
* that are floating around.
*
* This operation is a little bit tricky since the VMM might not accept
* just any address because of address clashes between the three contexts
* it operates in, so use a small stack to perform this operation.
*
* @returns VBox status code (ignored).
* @param pDevExt The device extension.
*/
{
/*
* Query the required space.
*/
int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
if (RT_FAILURE(rc))
return rc;
pReq->hypervisorStart = 0;
pReq->hypervisorSize = 0;
{
return rc;
}
/*
* The VMM will report back if there is nothing it wants to map, like for
* instance in VT-x and AMD-V mode.
*/
if (pReq->hypervisorSize == 0)
LogFlowFunc(("Nothing to do\n"));
else
{
/*
* We have to try several times since the host can be picky
* about certain addresses.
*/
bool fBitched = false;
{
/*
* Reserve space, or if that isn't supported, create a object for
* some fictive physical memory and map that in to kernel space.
*
* To make the code a bit uglier, most systems cannot help with
* 4MB alignment, so we have to deal with that in addition to
* having two ways of getting the memory.
*/
if (rc == VERR_NOT_SUPPORTED)
{
}
/*
* If both RTR0MemObjReserveKernel calls above failed because either not supported or
* not implemented at all at the current platform, try to map the memory object into the
* virtual kernel space.
*/
if (rc == VERR_NOT_SUPPORTED)
{
if (hFictive == NIL_RTR0MEMOBJ)
{
rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
if (RT_FAILURE(rc))
break;
}
uAlignment = _4M;
rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
if (rc == VERR_NOT_SUPPORTED)
{
rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
}
}
if (RT_FAILURE(rc))
{
LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
fBitched = true;
break;
}
/*
* Try set it.
*/
if ( uAlignment == PAGE_SIZE
AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
if (RT_SUCCESS(rc))
{
Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
break;
}
}
/*
* Cleanup failed attempts.
*/
while (iTry-- > 0)
if ( RT_FAILURE(rc)
&& hFictive != NIL_RTR0PTR)
LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
}
/*
* We ignore failed attempts for now.
*/
return VINF_SUCCESS;
}
/**
* Undo what vboxGuestInitFixateGuestMappings did.
*
* @param pDevExt The device extension.
*/
{
{
/*
* Tell the host that we're going to free the memory we reserved for
* it, the free it up. (Leak the memory if anything goes wrong here.)
*/
int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
if (RT_SUCCESS(rc))
{
pReq->hypervisorStart = 0;
pReq->hypervisorSize = 0;
}
if (RT_SUCCESS(rc))
{
}
else
}
}
/**
* Inflate the balloon by one chunk represented by an R0 memory object.
*
* The caller owns the balloon mutex.
*
* @returns IPRT status code.
* @param pMemObj Pointer to the R0 memory object.
* @param pReq The pre-allocated request for performing the VMMDev call.
*/
{
int rc;
{
}
if (RT_FAILURE(rc))
return rc;
}
/**
* Deflate the balloon by one chunk - info the host and free the memory object.
*
* The caller owns the balloon mutex.
*
* @returns IPRT status code.
* @param pMemObj Pointer to the R0 memory object.
* The memory object will be freed afterwards.
* @param pReq The pre-allocated request for performing the VMMDev call.
*/
{
int rc;
{
}
if (RT_FAILURE(rc))
{
return rc;
}
if (RT_FAILURE(rc))
{
LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
return rc;
}
return VINF_SUCCESS;
}
/**
*
* This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
* the mutex.
*
* @returns VBox status code.
* @param pDevExt The device extension.
* @param pSession The session.
* @param cBalloonChunks The new size of the balloon in chunks of 1MB.
* @param pfHandleInR3 Where to return the handle-in-ring3 indicator
* (VINF_SUCCESS if set).
*/
static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
{
int rc = VINF_SUCCESS;
{
uint32_t i;
{
LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
return VERR_INVALID_PARAMETER;
}
return VINF_SUCCESS; /* nothing to do */
{
pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
{
LogRel(("vboxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
return VERR_NO_MEMORY;
}
}
if (RT_FAILURE(rc))
return rc;
{
/* inflate */
{
if (RT_FAILURE(rc))
{
if (rc == VERR_NOT_SUPPORTED)
{
/* not supported -- fall back to the R3-allocated memory. */
rc = VINF_SUCCESS;
Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
}
/* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
* cannot allocate more memory => don't try further, just stop here */
/* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
break;
}
if (RT_FAILURE(rc))
{
break;
}
}
}
else
{
/* deflate */
{
if (RT_FAILURE(rc))
{
break;
}
}
}
}
/*
* Set the handle-in-ring3 indicator. When set Ring-3 will have to work
* the balloon changes via the other API.
*/
return rc;
}
/**
* Sends heartbeat to host.
*
* @returns VBox status code.
*/
static int VBoxGuestHeartbeatSend(void)
{
if (RT_SUCCESS(rc))
{
}
return rc;
}
/**
* Configure the host to check guest's heartbeat
* and get heartbeat interval from the host.
*
* @returns VBox status code.
* @param pDevExt The device extension.
* @param fEnabled Set true to enable guest heartbeat checks on host.
*/
{
Log(("VBoxGuestHeartbeatHostConfigure: VbglGRAlloc VBoxGuestHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
if (RT_SUCCESS(rc))
{
pReq->cNsInterval = 0;
Log(("VBoxGuestHeartbeatHostConfigure: VbglGRPerform VBoxGuestHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
}
return rc;
}
/**
* Callback for heartbeat timer.
*/
{
int rc = VBoxGuestHeartbeatSend();
if (RT_FAILURE(rc))
{
}
}
/**
* Helper to reinit the VBoxVMM communication after hibernation.
*
* @returns VBox status code.
* @param pDevExt The device extension.
* @param enmOSType The OS type.
*/
{
if (RT_SUCCESS(rc))
{
if (RT_FAILURE(rc))
}
else
return rc;
}
/**
*
* Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
*
* @returns VBox status code.
* @param pDevExt The device extension.
* @param pSession The session.
* @param u64ChunkAddr The address of the chunk to add to / remove from the
* balloon.
* @param fInflate Inflate if true, deflate if false.
*/
{
int rc = VINF_SUCCESS;
uint32_t i;
if (fInflate)
{
{
LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
return VERR_INVALID_PARAMETER;
}
{
pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
{
LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
return VERR_NO_MEMORY;
}
}
}
else
{
{
AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
return VERR_INVALID_PARAMETER;
}
}
/*
* Enumerate all memory objects and check if the object is already registered.
*/
{
if ( fInflate
&& !pMemObj
{
if (fInflate)
return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
break;
}
}
if (!pMemObj)
{
if (fInflate)
{
/* no free object pointer found -- should not happen */
return VERR_NO_MEMORY;
}
/* cannot free this memory as it wasn't provided before */
return VERR_NOT_FOUND;
}
/*
* Try inflate / default the balloon as requested.
*/
if (RT_FAILURE(rc))
return rc;
if (fInflate)
{
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
else
{
RTR0MemObjFree(*pMemObj, true);
}
}
}
else
{
if (RT_SUCCESS(rc))
else
}
return rc;
}
/**
* Cleanup the memory balloon of a session.
*
* Will request the balloon mutex, so it must be valid and the caller must not
* own it already.
*
* @param pDevExt The device extension.
* @param pDevExt The session. Can be NULL at unload.
*/
{
{
{
int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
if (RT_SUCCESS(rc))
{
uint32_t i;
{
if (RT_FAILURE(rc))
{
LogRelFunc(("Deflating balloon failed with rc=%Rrc; will leak %u chunks\n",
break;
}
}
}
else
LogRelFunc(("Failed to allocate VMMDev request buffer, rc=%Rrc; will leak %u chunks\n",
}
}
}
/**
* Initializes the VBoxGuest device extension when the
* device driver is loaded.
*
* The native code locates the VMMDev on the PCI bus and retrieve
* the MMIO and I/O port ranges, this function will take care of
* mapping the MMIO memory (if present). Upon successful return
* the native code should set up the interrupt handler.
*
* @returns VBox status code.
*
* @param pDevExt The device extension. Allocated by the native code.
* @param IOPortBase The base of the I/O port range.
* @param pvMMIOBase The base of the MMIO memory mapping.
* This is optional, pass NULL if not present.
* @param cbMMIO The size of the MMIO memory mapping.
* This is optional, pass 0 if not present.
* @param enmOSType The guest OS type to report to the VMMDev.
* @param fFixedEvents Events that will be enabled upon init and no client
* will ever be allowed to mask.
*/
{
/*
* Create the release log.
*/
static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
#ifdef DEBUG
"VBOXGUEST_LOG",
#else
"VBOXGUEST_RELEASE_LOG",
#endif
if (RT_SUCCESS(rc))
{
/* Explicitly flush the log in case of VBOXGUEST_RELEASE_LOG=buffered. */
}
/** @todo Add native hook for getting logger config parameters and setting
* them. On Linux we use the module parameter stuff (see vboxguestLinuxModInit). */
#endif
/*
* Adjust fFixedEvents.
*/
#ifdef VBOX_WITH_HGCM
#endif
/*
* Initialize the data.
*/
#ifdef VBOX_WITH_HGCM
#endif
#endif
pDevExt->fLoggingEnabled = false;
pDevExt->f32PendingEvents = 0;
pDevExt->u32MousePosChangedSeq = 0;
/*
* If there is an MMIO region validate the version and size.
*/
if (pvMMIOBase)
{
{
LogFlowFunc(("VMMDevMemory: mapping=%p size=%#RX32 (%#RX32), version=%#RX32\n",
}
else /* try live without it. */
LogRelFunc(("Bogus VMMDev memory; u32Version=%RX32 (expected %RX32), u32Size=%RX32 (expected <= %RX32)\n",
}
pDevExt->u32SetModeGuestCaps = 0;
pDevExt->u32GuestCaps = 0;
/*
* Create the wait and session spinlocks as well as the ballooning mutex.
*/
if (RT_SUCCESS(rc))
rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
if (RT_FAILURE(rc))
{
return rc;
}
if (RT_FAILURE(rc))
{
return rc;
}
/*
* Initialize the guest library and report the guest info back to VMMDev,
* set the interrupt control filter mask, and fixate the guest mappings
* made by the VMM.
*/
if (RT_SUCCESS(rc))
{
rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
/* Set the fixed event and disable the guest graphics capability
* by default. The guest specific graphics driver will re-enable
* the graphics capability if and when appropriate. */
if (RT_SUCCESS(rc))
{
if (RT_FAILURE(rc))
/* Make sure that heartbeat checking is disabled. */
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
LogFlowFunc(("Setting up heartbeat to trigger every %RU64 sec\n", pDevExt->cNsHeartbeatInterval / 1000000000));
if (RT_SUCCESS(rc))
{
if (RT_FAILURE(rc))
}
if (RT_FAILURE(rc))
{
LogRelFunc(("Failed to set up the timer, guest heartbeat is disabled\n"));
/* Disable host heartbeat check if we failed */
VBoxGuestHeartbeatHostConfigure(pDevExt, false);
}
}
else
}
LogFlowFunc(("VBoxGuestInitDevExt: returns success\n"));
return VINF_SUCCESS;
}
else
}
else
}
else
}
else
#endif
return rc; /* (failed) */
}
/**
* Deletes all the items in a wait chain.
* @param pList The head of the chain.
*/
{
while (!RTListIsEmpty(pList))
{
int rc2;
}
}
/**
* Destroys the VBoxGuest device extension.
*
* The native code should call this before the driver is loaded,
* but don't call this on shutdown.
*
* @param pDevExt The device extension.
*/
{
int rc2;
Log(("VBoxGuestDeleteDevExt:\n"));
Log(("VBoxGuest: The additions driver is terminating.\n"));
/*
* Stop and destroy HB timer and
* disable host heartbeat checking.
*/
if (pDevExt->pHeartbeatTimer)
{
VBoxGuestHeartbeatHostConfigure(pDevExt, false);
}
/*
* Clean up the bits that involves the host first.
*/
{
LogRelFunc(("session list not empty!\n"));
}
/* Update the host flags (mouse status etc) not to reflect this session. */
pDevExt->fFixedEvents = 0;
/*
* Cleanup all the other resources.
*/
#ifdef VBOX_WITH_HGCM
#endif
#endif
pDevExt->IOPortBase = 0;
#endif
}
/**
* Creates a VBoxGuest user session.
*
* The native code calls this when a ring-3 client opens the device.
* Use VBoxGuestCreateKernelSession when a ring-0 client connects.
*
* @returns VBox status code.
* @param pDevExt The device extension.
* @param ppSession Where to store the session on success.
*/
{
if (RT_UNLIKELY(!pSession))
{
LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
return VERR_NO_MEMORY;
}
LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
return VINF_SUCCESS;
}
/**
* Creates a VBoxGuest kernel session.
*
* The native code calls this when a ring-0 client connects to the device.
* Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
*
* @returns VBox status code.
* @param pDevExt The device extension.
* @param ppSession Where to store the session on success.
*/
{
if (RT_UNLIKELY(!pSession))
{
LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
return VERR_NO_MEMORY;
}
LogFlowFunc(("pSession=%p proc=%RTproc (%d) r0proc=%p\n",
pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
return VINF_SUCCESS;
}
static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
/**
* Closes a VBoxGuest session.
*
* @param pDevExt The device extension.
* @param pSession The session to close (and free).
*/
{
unsigned i; NOREF(i);
LogFlowFunc(("pSession=%p proc=%RTproc (%d) r0proc=%p\n",
pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
#ifdef VBOX_WITH_HGCM
if (pSession->aHGCMClientIds[i])
{
pSession->aHGCMClientIds[i] = 0;
}
#endif
/* Update the host flags (mouse status etc) not to reflect this session. */
#ifdef RT_OS_WINDOWS
#endif
);
}
/**
* Allocates a wait-for-event entry.
*
* @returns The wait-for-event entry.
* @param pDevExt The device extension.
* @param pSession The session that's allocating this. Can be NULL.
*/
{
/*
* Allocate it one way or the other.
*/
if (pWait)
{
if (pWait)
}
if (!pWait)
{
static unsigned s_cErrors = 0;
int rc;
if (!pWait)
{
if (s_cErrors++ < 32)
LogRelFunc(("Out of memory, returning NULL\n"));
return NULL;
}
if (RT_FAILURE(rc))
{
if (s_cErrors++ < 32)
return NULL;
}
}
/*
* Zero members just as an precaution.
*/
pWait->fReqEvents = 0;
pWait->fResEvents = 0;
pWait->fPendingWakeUp = false;
#endif
#ifdef VBOX_WITH_HGCM
#endif
return pWait;
}
/**
* Frees the wait-for-event entry.
*
* The caller must own the wait spinlock !
* The entry must be in a list!
*
* @param pDevExt The device extension.
* @param pWait The wait-for-event entry to free.
*/
{
pWait->fReqEvents = 0;
pWait->fResEvents = 0;
#ifdef VBOX_WITH_HGCM
#endif
if (pWait->fPendingWakeUp)
else
#endif
{
}
}
/**
* Frees the wait-for-event entry.
*
* @param pDevExt The device extension.
* @param pWait The wait-for-event entry to free.
*/
{
}
/**
* Processes the wake-up list.
*
* All entries in the wake-up list gets signalled and moved to the woken-up
* list.
*
* @param pDevExt The device extension.
*/
{
{
for (;;)
{
int rc;
if (!pWait)
break;
pWait->fPendingWakeUp = true;
pWait->fPendingWakeUp = false;
{
}
else
{
}
}
}
}
#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
/**
* Modifies the guest capabilities.
*
* Should be called during driver init and termination.
*
* @returns VBox status code.
* @param fOr The Or mask (what to enable).
* @param fNot The Not mask (what to disable).
*/
{
if (RT_FAILURE(rc))
{
LogFlowFunc(("Failed to allocate %u (%#x) bytes to cache the request; rc=%Rrc\n",
return rc;
}
if (RT_FAILURE(rc))
return rc;
}
/**
* Implements the fast (no input or output) type of IOCtls.
*
* This is currently just a placeholder stub inherited from the support driver code.
*
* @returns VBox status code.
* @param iFunction The IOCtl function number.
* @param pDevExt The device extension.
* @param pSession The session.
*/
int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
{
return VERR_NOT_SUPPORTED;
}
/**
* Return the VMM device port.
*
* returns IPRT status code.
* @param pDevExt The device extension.
* @param pInfo The request info.
* @param pcbDataReturned (out) contains the number of bytes to return.
*/
static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
{
if (pcbDataReturned)
*pcbDataReturned = sizeof(*pInfo);
return VINF_SUCCESS;
}
#ifndef RT_OS_WINDOWS
/**
* Set the callback for the kernel mouse handler.
*
* returns IPRT status code.
* @param pDevExt The device extension.
* @param pNotify The new callback information.
*/
int VBoxGuestCommonIOCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, VBoxGuestMouseSetNotifyCallback *pNotify)
{
return VINF_SUCCESS;
}
#endif
/**
* Worker VBoxGuestCommonIOCtl_WaitEvent.
*
* The caller enters the spinlock, we leave it.
*
* @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
*/
DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestWaitEventInfo *pInfo,
{
{
else
pSession->fPendingCancelWaitEvents = false;
return VINF_SUCCESS;
}
return VERR_TIMEOUT;
}
{
int iEvent;
int rc;
pInfo->u32EventFlagsOut = 0;
if (pcbDataReturned)
*pcbDataReturned = sizeof(*pInfo);
/*
* Copy and verify the input mask.
*/
if (RT_UNLIKELY(iEvent < 0))
{
return VERR_INVALID_PARAMETER;
}
/*
* Check the condition up front, before doing the wait-for-event allocations.
*/
if (rc == VINF_SUCCESS)
return rc;
if (!pInfo->u32TimeoutIn)
{
LogFlowFunc(("Returning VERR_TIMEOUT\n"));
return VERR_TIMEOUT;
}
if (!pWait)
return VERR_NO_MEMORY;
/*
* We've got the wait entry now, re-enter the spinlock and check for the condition.
* If the wait condition is met, return.
* Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
*/
if (rc == VINF_SUCCESS)
{
return rc;
}
if (fInterruptible)
else
/*
* There is one special case here and that's when the semaphore is
* destroyed upon device driver unload. This shouldn't happen of course,
* but in case it does, just get out of here ASAP.
*/
if (rc == VERR_SEM_DESTROYED)
return rc;
/*
* Unlink the wait item and dispose of it.
*/
/*
* Now deal with the return code.
*/
if ( fResEvents
&& fResEvents != UINT32_MAX)
{
else
rc = VINF_SUCCESS;
}
else if ( fResEvents == UINT32_MAX
|| rc == VERR_INTERRUPTED)
{
LogFlowFunc(("Returning VERR_INTERRUPTED\n"));
}
else if (rc == VERR_TIMEOUT)
{
LogFlowFunc(("Returning VERR_TIMEOUT (2)\n"));
}
else
{
if (RT_SUCCESS(rc))
{
static unsigned s_cErrors = 0;
if (s_cErrors++ < 32)
}
}
return rc;
}
static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
{
int rc = 0;
/* Was as least one WAITEVENT in process for this session? If not we
* set a flag that the next call should be interrupted immediately. This
* is needed so that a user thread can reliably interrupt another one in a
* WAITEVENT loop. */
bool fCancelledOne = false;
LogFlowFunc(("CANCEL_ALL_WAITEVENTS\n"));
/*
* Walk the event list and wake up anyone with a matching session.
*/
{
{
fCancelledOne = true;
#else
#endif
}
}
if (!fCancelledOne)
pSession->fPendingCancelWaitEvents = true;
#endif
return VINF_SUCCESS;
}
/**
* Checks if the VMM request is allowed in the context of the given session.
*
* @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
* @param pSession The calling session.
* @param enmType The request type.
* @param pReqHdr The request.
*/
static int VBoxGuestCheckIfVMMReqAllowed(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
VMMDevRequestHeader const *pReqHdr)
{
/*
* Categorize the request being made.
*/
/** @todo This need quite some more work! */
enum
{
kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
} enmRequired;
switch (enmType)
{
/*
* Deny access to anything we don't know or provide specialized I/O controls for.
*/
#ifdef VBOX_WITH_HGCM
case VMMDevReq_HGCMConnect:
case VMMDevReq_HGCMDisconnect:
# ifdef VBOX_WITH_64_BITS_GUESTS
case VMMDevReq_HGCMCall32:
case VMMDevReq_HGCMCall64:
# else
case VMMDevReq_HGCMCall:
# endif /* VBOX_WITH_64_BITS_GUESTS */
case VMMDevReq_HGCMCancel:
case VMMDevReq_HGCMCancel2:
#endif /* VBOX_WITH_HGCM */
default:
break;
/*
* There are a few things only this driver can do (and it doesn't use
* the VMMRequst I/O control route anyway, but whatever).
*/
break;
/*
* Trusted users apps only.
*/
case VMMDevReq_WriteCoreDump:
break;
/*
* Anyone. But not for CapsAcquire mode
*/
{
uint32_t fAcquireCaps = 0;
{
AssertFailed();
break;
}
/* hack to adjust the notcaps.
* @todo: move to a better place
* user-mode apps are allowed to pass any mask to the notmask,
* the driver cleans up them accordingly */
/* do not break, make it fall through to the below enmRequired setting */
}
/*
* Anyone.
*/
case VMMDevReq_GetMouseStatus:
case VMMDevReq_SetMouseStatus:
case VMMDevReq_GetHostVersion:
case VMMDevReq_Idle:
case VMMDevReq_GetHostTime:
case VMMDevReq_SetPowerStatus:
case VMMDevReq_LogString:
case VMMDevReq_GetSessionId:
break;
/*
* Depends on the request parameters...
*/
/** @todo this have to be changed into an I/O control and the facilities
* tracked in the session so they can automatically be failed when the
* session terminates without reporting the new status.
*
* The information presented by IGuest is not reliable without this! */
{
break;
break;
default:
break;
}
break;
}
/*
* Check against the session.
*/
switch (enmRequired)
{
default:
case kLevel_NoOne:
break;
case kLevel_OnlyVBoxGuest:
case kLevel_OnlyKernel:
return VINF_SUCCESS;
break;
case kLevel_TrustedUsers:
case kLevel_AllUsers:
return VINF_SUCCESS;
}
return VERR_PERMISSION_DENIED;
}
{
int rc;
/*
* Validate the header and request size.
*/
{
LogRelFunc(("Invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
return VERR_INVALID_PARAMETER;
}
{
LogRelFunc(("Invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
return VERR_INVALID_PARAMETER;
}
if (RT_FAILURE(rc))
{
LogFlowFunc(("Invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc\n",
return rc;
}
if (RT_FAILURE(rc))
{
return rc;
}
/*
* Make a copy of the request in the physical memory heap so
* the VBoxGuestLibrary can more easily deal with the request.
* (This is really a waste of time since the OS or the OS specific
* it does makes things a bit simpler wrt to phys address.)
*/
if (RT_FAILURE(rc))
{
LogFlowFunc(("Failed to allocate %u (%#x) bytes to cache the request; rc=%Rrc\n",
return rc;
}
if ( RT_SUCCESS(rc)
{
if (pcbDataReturned)
*pcbDataReturned = cbReq;
}
else if (RT_FAILURE(rc))
else
{
LogFlowFunc(("Request execution failed; VMMDev rc=%Rrc\n",
}
return rc;
}
{
int rc;
return VERR_INVALID_PARAMETER;
return rc;
}
{
int rc;
return VERR_INVALID_PARAMETER;
return rc;
}
/**
* Sets the mouse status features for this session and updates them
* globally.
*
* @returns VBox status code.
*
* @param pDevExt The device extention.
* @param pSession The session.
* @param fFeatures New bitmap of enabled features.
*/
{
int rc;
if (fFeatures & ~VMMDEV_MOUSE_GUEST_MASK)
return VERR_INVALID_PARAMETER;
/* Since this is more of a negative feature we invert it to get the real
* feature (when the guest does not need the host cursor). */
return rc;
}
#ifdef VBOX_WITH_HGCM
/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
{
int rc;
/*
* Check to see if the condition was met by the time we got here.
*
* We create a simple poll loop here for dealing with out-of-memory
* conditions since the caller isn't necessarily able to deal with
* us returning too early.
*/
for (;;)
{
{
return VINF_SUCCESS;
}
if (pWait)
break;
if (fInterruptible)
return VERR_INTERRUPTED;
RTThreadSleep(1);
}
/*
* Re-enter the spinlock and re-check for the condition.
* If the condition is met, return.
* Otherwise link us into the HGCM wait list and go to sleep.
*/
{
return VINF_SUCCESS;
}
if (fInterruptible)
else
if (rc == VERR_SEM_DESTROYED)
return rc;
/*
* Unlink, free and return.
*/
if ( RT_FAILURE(rc)
&& rc != VERR_TIMEOUT
&& ( !fInterruptible
|| rc != VERR_INTERRUPTED))
return rc;
}
/**
* This is a callback for dealing with async waits.
*
* It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
*/
static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
{
false /* fInterruptible */,
u32User /* cMillies */);
}
/**
* This is a callback for dealing with async waits with a timeout.
*
* It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
*/
{
true /* fInterruptible */,
u32User /* cMillies */ );
}
{
int rc;
/*
* The VbglHGCMConnect call will invoke the callback if the HGCM
* call is performed in an ASYNC fashion. The function is not able
* to deal with cancelled requests.
*/
LogFlowFunc(("%.128s\n",
if (RT_SUCCESS(rc))
{
LogFlowFunc(("u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
{
/*
* Append the client id to the client id table.
* If the table has somehow become filled up, we'll disconnect the session.
*/
unsigned i;
if (!pSession->aHGCMClientIds[i])
{
break;
}
{
static unsigned s_cErrors = 0;
if (s_cErrors++ < 32)
LogRelFunc(("Too many HGCMConnect calls for one session\n"));
return VERR_TOO_MANY_OPEN_FILES;
}
}
if (pcbDataReturned)
*pcbDataReturned = sizeof(*pInfo);
}
return rc;
}
static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
{
/*
* Validate the client id and invalidate its entry while we're in the call.
*/
int rc;
unsigned i;
{
break;
}
{
static unsigned s_cErrors = 0;
if (s_cErrors++ > 32)
return VERR_INVALID_HANDLE;
}
/*
* The VbglHGCMConnect call will invoke the callback if the HGCM
* call is performed in an ASYNC fashion. The function is not able
* to deal with cancelled requests.
*/
rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
if (RT_SUCCESS(rc))
{
if (pcbDataReturned)
*pcbDataReturned = sizeof(*pInfo);
}
/* Update the client id array according to the result. */
return rc;
}
{
unsigned i;
int rc;
/*
* Some more validations.
*/
{
return VERR_INVALID_PARAMETER;
}
#ifdef RT_ARCH_AMD64
if (f32bit)
else
#endif
{
LogRelFunc(("cbData=%#zx (%zu) required size is %#zx (%zu)\n",
return VERR_INVALID_PARAMETER;
}
/*
* Validate the client id.
*/
break;
{
static unsigned s_cErrors = 0;
if (s_cErrors++ > 32)
return VERR_INVALID_HANDLE;
}
/*
* The VbglHGCMCall call will invoke the callback if the HGCM
* call is performed in an ASYNC fashion. This function can
* deal with cancelled requests, so we let user more requests
* be interruptible (should add a flag for this later I guess).
*/
fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
#ifdef RT_ARCH_AMD64
if (f32bit)
{
if (fInterruptible)
rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
else
rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
}
else
#endif
{
if (fInterruptible)
rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
else
rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
}
if (RT_SUCCESS(rc))
{
if (pcbDataReturned)
}
else
{
if ( rc != VERR_INTERRUPTED
&& rc != VERR_TIMEOUT)
{
static unsigned s_cErrors = 0;
if (s_cErrors++ < 32)
LogRelFunc(("%s-bit call failed; rc=%Rrc\n",
}
else
LogFlowFunc(("%s-bit call failed; rc=%Rrc\n",
}
return rc;
}
#endif /* VBOX_WITH_HGCM */
/**
* Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
*
* Ask the host for the size of the balloon and try to set it accordingly. If
* this approach fails because it's not supported, return with fHandleInR3 set
* and let the user land supply memory we can lock via the other ioctl.
*
* @returns VBox status code.
*
* @param pDevExt The device extension.
* @param pSession The session.
* @param pInfo The output buffer.
* @param pcbDataReturned Where to store the amount of returned data. Can
* be NULL.
*/
static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
{
/*
* owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
*/
{
}
{
if (RT_SUCCESS(rc))
{
/*
* This is a response to that event. Setting this bit means that
* we request the value from the host and change the guest memory
* balloon according to this value.
*/
if (RT_SUCCESS(rc))
{
Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
pInfo->fHandleInR3 = false;
/* Ignore various out of memory failures. */
if ( rc == VERR_NO_MEMORY
|| rc == VERR_NO_PHYS_MEMORY
|| rc == VERR_NO_CONT_MEMORY)
rc = VINF_SUCCESS;
if (pcbDataReturned)
*pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
}
else
}
}
else
return rc;
}
/**
* Handle a request for changing the memory balloon.
*
* @returns VBox status code.
*
* @param pDevExt The device extention.
* @param pSession The session.
* @param pInfo The change request structure (input).
* @param pcbDataReturned Where to store the amount of returned data. Can
* be NULL.
*/
static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
{
{
/*
* owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
*/
{
if (pcbDataReturned)
*pcbDataReturned = 0;
}
else
}
else
return rc;
}
/**
* Handle a request for writing a core dump of the guest on the host.
*
* @returns VBox status code.
*
* @param pDevExt The device extension.
* @param pInfo The output buffer.
*/
static int VBoxGuestCommonIOCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWriteCoreDump *pInfo)
{
if (RT_FAILURE(rc))
{
LogFlowFunc(("Failed to allocate %u (%#x) bytes to cache the request; rc=%Rrc\n",
return rc;
}
if (RT_FAILURE(rc))
return rc;
}
/**
* Guest backdoor logging.
*
* @returns VBox status code.
*
* @param pDevExt The device extension.
* @param pch The log message (need not be NULL terminated).
* @param cbData Size of the buffer.
* @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
*/
static int VBoxGuestCommonIOCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, size_t *pcbDataReturned, bool fUserSession)
{
if (pDevExt->fLoggingEnabled)
else if (!fUserSession)
else
if (pcbDataReturned)
*pcbDataReturned = 0;
return VINF_SUCCESS;
}
{
if (fCaps & (~(VMMDEV_GUEST_SUPPORTS_SEAMLESS | VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING | VMMDEV_GUEST_SUPPORTS_GRAPHICS)))
return false;
return true;
}
/** Check whether any unreported VMM device events should be reported to any of
* the currently listening sessions. In addition, report any events in
* @a fGenFakeEvents.
* @note This is called by GUEST_CAPS_ACQUIRE in case any pending events can now
* be dispatched to the session which acquired capabilities. The fake
* events are a hack to wake up threads in that session which would not
* otherwise be woken.
* @todo Why not just use CANCEL_ALL_WAITEVENTS to do the waking up rather than
* adding additional code to the driver?
* @todo Why does acquiring capabilities block and unblock events? Capabilities
* are supposed to control what is reported to the host, we already have
* separate requests for blocking and unblocking events. */
static void VBoxGuestCommonCheckEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fGenFakeEvents)
{
{
&& !pWait->fResEvents)
{
#else
#endif
if (!fEvents)
break;
}
}
#endif
}
/** Switch the capabilities in @a fOrMask to "acquire" mode if they are not
* already in "set" mode. If @a enmFlags is not set to
* VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE, also try to acquire those
* capabilities for the current session and release those in @a fNotFlag. */
static int VBoxGuestCommonGuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fOrMask, uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags)
{
{
LogRelFunc(("pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- invalid fOrMask\n",
return VERR_INVALID_PARAMETER;
}
{
LogRelFunc(("pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- invalid enmFlags %d\n",
return VERR_INVALID_PARAMETER;
}
{
LogRelFunc(("pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- calling caps acquire for set caps\n",
return VERR_INVALID_STATE;
}
{
LogRelFunc(("pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- configured acquire caps: 0x%x\n",
return VINF_SUCCESS;
}
/* the fNotMask no need to have all values valid,
* invalid ones will simply be ignored */
if (!fOtherConflictingCaps)
{
if (fSessionOrCaps)
{
}
if (fSessionNotCaps)
{
}
}
{
return VERR_RESOURCE_BUSY;
}
/* now do host notification outside the lock */
if (!fSessionOrCaps && !fSessionNotCaps)
{
/* no changes, return */
return VINF_SUCCESS;
}
if (RT_FAILURE(rc))
{
/* Failure branch
* this is generally bad since e.g. failure to release the caps may result in other sessions not being able to use it
* so we are not trying to restore the caps back to their values before the VBoxGuestCommonGuestCapsAcquire call,
* but just pretend everithing is OK.
* @todo: better failure handling mechanism? */
}
/* success! */
uint32_t fGenFakeEvents = 0;
{
/* generate the seamless change event so that the r3 app could synch with the seamless state
* although this introduces a false alarming of r3 client, it still solve the problem of
* client state inconsistency in multiuser environment */
}
/* since the acquire filter mask has changed, we need to process events in any way to ensure they go from pending events field
* to the proper (un-filtered) entries */
return VINF_SUCCESS;
}
static int VBoxGuestCommonIOCTL_GuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestCapsAquire *pAcquire)
{
int rc = VBoxGuestCommonGuestCapsAcquire(pDevExt, pSession, pAcquire->u32OrMask, pAcquire->u32NotMask, pAcquire->enmFlags);
if (RT_FAILURE(rc))
return VINF_SUCCESS;
}
/**
* Common IOCtl for user to kernel and kernel to kernel communication.
*
* This function only does the basic validation and then invokes
* worker functions that takes care of each specific function.
*
* @returns VBox status code.
*
* @param iFunction The requested function.
* @param pDevExt The device extension.
* @param pSession The client session.
* @param cbData The max size of the data buffer.
* @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
*/
{
int rc;
LogFlowFunc(("iFunction=%#x, pDevExt=%p, pSession=%p, pvData=%p, cbData=%zu\n",
/*
* Make sure the returned data size is set to zero.
*/
if (pcbDataReturned)
*pcbDataReturned = 0;
/*
* Define some helper macros to simplify validation.
*/
#define CHECKRET_RING0(mnemonic) \
do { \
{ \
return VERR_PERMISSION_DENIED; \
} \
} while (0)
do { \
{ \
return VERR_BUFFER_OVERFLOW; \
} \
{ \
return VERR_INVALID_POINTER; \
} \
} while (0)
do { \
{ \
return VERR_BUFFER_OVERFLOW; \
} \
{ \
return VERR_INVALID_POINTER; \
} \
} while (0)
/*
* Deal with variably sized requests first.
*/
rc = VINF_SUCCESS;
if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
{
rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
}
#ifdef VBOX_WITH_HGCM
/*
* These ones are a bit tricky.
*/
else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
{
rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
0, cbData, pcbDataReturned);
}
else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
{
false /*f32bit*/, false /* fUserData */,
}
else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_USERDATA(0)))
{
bool fInterruptible = true;
rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
0, cbData, pcbDataReturned);
}
# ifdef RT_ARCH_AMD64
else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
{
rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
0, cbData, pcbDataReturned);
}
else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
{
true /*f32bit*/, false /* fUserData */,
}
# endif
#endif /* VBOX_WITH_HGCM */
else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
{
rc = VBoxGuestCommonIOCtl_Log(pDevExt, (char *)pvData, cbData, pcbDataReturned, pSession->fUserSession);
}
else
{
switch (iFunction)
{
CHECKRET_RING0("GETVMMDEVPORT");
break;
#ifndef RT_OS_WINDOWS /* Windows has its own implementation of this. */
CHECKRET_RING0("SET_MOUSE_NOTIFY_CALLBACK");
rc = VBoxGuestCommonIOCtl_SetMouseNotifyCallback(pDevExt, (VBoxGuestMouseSetNotifyCallback *)pvData);
break;
#endif
break;
if (cbData != 0)
break;
CHECKRET_MIN_SIZE("CTL_FILTER_MASK",
sizeof(VBoxGuestFilterMaskInfo));
break;
#ifdef VBOX_WITH_HGCM
# ifdef RT_ARCH_AMD64
# endif
rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
break;
# ifdef RT_ARCH_AMD64
# endif
rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
break;
#endif /* VBOX_WITH_HGCM */
rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
break;
rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
break;
break;
break;
CHECKRET_SIZE("DPC_LATENCY_CHECKER", 0);
break;
#endif
*pcbDataReturned = sizeof(VBoxGuestCapsAquire);
break;
CHECKRET_MIN_SIZE("SET_GUEST_CAPABILITIES",
sizeof(VBoxGuestSetCapabilitiesInfo));
break;
default:
{
LogRelFunc(("Unknown request iFunction=%#x, stripped size=%#x\n",
break;
}
}
}
return rc;
}
/**
* Common interrupt service routine.
*
* This deals with events and with waking up thread waiting for those events.
*
* @returns true if it was our interrupt, false if it wasn't.
* @param pDevExt The VBoxGuest device extension.
*/
{
bool fMousePositionChanged = false;
int rc = 0;
bool fOurIrq;
/*
* Make sure we've initialized the device extension.
*/
if (RT_UNLIKELY(!pReq))
return false;
/*
* Enter the spinlock and check if it's our IRQ or not.
*/
if (fOurIrq)
{
/*
* Acknowlegde events.
* We don't use VbglGRPerform here as it may take another spinlocks.
*/
ASMCompilerBarrier(); /* paranoia */
{
#ifndef DEBUG_andy
#endif
/*
* VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
*/
{
fMousePositionChanged = true;
#ifndef RT_OS_WINDOWS
#endif
}
#ifdef VBOX_WITH_HGCM
/*
*/
if (fEvents & VMMDEV_EVENT_HGCM)
{
{
{
# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
# else
# endif
}
}
fEvents &= ~VMMDEV_EVENT_HGCM;
}
#endif
/*
* Normal FIFO waiter evaluation.
*/
{
&& !pWait->fResEvents)
{
#else
#endif
if (!fEvents)
break;
}
}
}
else /* something is serious wrong... */
LogFlowFunc(("Acknowledging events failed, rc=%Rrc (events=%#x)\n",
}
#ifndef DEBUG_andy
else
LogFlowFunc(("Not ours\n"));
#endif
/*
* Do wake-ups.
* Note. On Windows this isn't possible at this IRQL, so a DPC will take
* care of it. Same on darwin, doing it in the work loop callback.
*/
#endif
/*
* Work the poll and async notification queues on OSes that implements that.
* (Do this outside the spinlock to prevent some recursive spinlocking.)
*/
{
}
return fOurIrq;
}