/* $Id$ */
/** @file
* VMM - Host Context Ring 0.
*/
/*
* Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#include "VMMInternal.h"
#ifdef VBOX_WITH_PCI_PASSTHROUGH
#endif
#include <iprt/asm-amd64-x86.h>
# pragma intrinsic(_AddressOfReturnAddress)
#endif
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
#endif
/*******************************************************************************
* Global Variables *
*******************************************************************************/
/** Drag in necessary library bits.
* The runtime lives here (in VMMR0.r0) and VBoxDD*R0.r0 links against us. */
{
#endif
};
#ifdef RT_OS_SOLARIS
/* Dependency information for the native solaris loader. */
#endif
/**
* Initialize the module.
* This is called when we're first loaded.
*
* @returns 0 on success.
* @returns VBox status on failure.
* @param hMod Image handle for use in APIs.
*/
{
#ifdef VBOX_WITH_DTRACE_R0
/*
* The first thing to do is register the static tracepoints.
* (Deregistration is automatic.)
*/
if (RT_FAILURE(rc2))
return rc2;
#endif
LogFlow(("ModuleInit:\n"));
#ifdef VBOX_WITH_64ON32_CMOS_DEBUG
/*
* Display the CMOS debug code.
*/
#endif
/*
* Initialize the VMM, GVMM, GMM, HM, PGM (Darwin) and INTNET.
*/
if (RT_SUCCESS(rc))
{
rc = GVMMR0Init();
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
rc = PGMR0DynMapInit();
#endif
if (RT_SUCCESS(rc))
{
rc = IntNetR0Init();
if (RT_SUCCESS(rc))
{
#ifdef VBOX_WITH_PCI_PASSTHROUGH
rc = PciRawR0Init();
#endif
if (RT_SUCCESS(rc))
{
rc = CPUMR0ModuleInit();
if (RT_SUCCESS(rc))
{
#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
if (RT_SUCCESS(rc))
#endif
{
LogFlow(("ModuleInit: returns success.\n"));
return VINF_SUCCESS;
}
/*
* Bail out.
*/
#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
#endif
}
else
#ifdef VBOX_WITH_PCI_PASSTHROUGH
PciRawR0Term();
#endif
}
else
IntNetR0Term();
}
else
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
#endif
}
else
}
else
HMR0Term();
}
else
GMMR0Term();
}
else
GVMMR0Term();
}
else
}
else
return rc;
}
/**
* Terminate the module.
* This is called when we're finally unloaded.
*
* @param hMod Image handle for use in APIs.
*/
{
LogFlow(("ModuleTerm:\n"));
/*
* Terminate the CPUM module (Local APIC cleanup).
*/
/*
* Terminate the internal network service.
*/
IntNetR0Term();
/*
* PGM (Darwin), HM and PciRaw global cleanup.
*/
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
#endif
#ifdef VBOX_WITH_PCI_PASSTHROUGH
PciRawR0Term();
#endif
HMR0Term();
#ifdef VBOX_WITH_TRIPLE_FAULT_HACK
#endif
/*
* Destroy the GMM and GVMM instances.
*/
GMMR0Term();
GVMMR0Term();
LogFlow(("ModuleTerm: returns\n"));
}
/**
* Initiates the R0 driver for a particular VM instance.
*
* @returns VBox status code.
*
* @param pVM Pointer to the VM.
* @param uSvnRev The SVN revision of the ring-3 part.
* @param uBuildType Build type indicator.
* @thread EMT.
*/
{
/*
* Match the SVN revisions and build type.
*/
if (uSvnRev != VMMGetSvnRev())
{
return VERR_VMM_R0_VERSION_MISMATCH;
}
if (uBuildType != vmmGetBuildType())
{
return VERR_VMM_R0_VERSION_MISMATCH;
}
return VERR_INVALID_PARAMETER;
#ifdef LOG_ENABLED
/*
* Register the EMT R0 logger instance for VCPU 0.
*/
if (pR0Logger)
{
# if 0 /* testing of the logger. */
LogCom(("vmmR0InitVM: offScratch=%d fFlags=%#x fDestFlags=%#x\n", pR0Logger->Logger.offScratch, pR0Logger->Logger.fFlags, pR0Logger->Logger.fDestFlags));
LogCom(("vmmR0InitVM: returned successfully from direct logger call.\n"));
LogCom(("vmmR0InitVM: returned successfully from direct flush call.\n"));
LogCom(("vmmR0InitVM: returned successfully from direct logger call (2). offScratch=%d\n", pR0Logger->Logger.offScratch));
RTLogPrintf("hello ring-0 logger (RTLogPrintf)\n");
# endif
pR0Logger->fRegistered = true;
}
#endif /* LOG_ENABLED */
/*
* Check if the host supports high resolution timers or not.
*/
&& !RTTimerCanDoHighResolution())
/*
* Initialize the per VM data for GVMM and GMM.
*/
// if (RT_SUCCESS(rc))
// rc = GMMR0InitPerVMData(pVM);
if (RT_SUCCESS(rc))
{
/*
* Init HM, CPUM and PGM (Darwin only).
*/
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
#endif
if (RT_SUCCESS(rc))
{
#ifdef VBOX_WITH_PCI_PASSTHROUGH
#endif
if (RT_SUCCESS(rc))
{
if (RT_SUCCESS(rc))
{
return rc;
}
/* bail out*/
#ifdef VBOX_WITH_PCI_PASSTHROUGH
#endif
}
}
}
}
}
return rc;
}
/**
* Terminates the R0 bits for a particular VM instance.
*
* This is normally called by ring-3 as part of the VM termination process, but
* may alternatively be called during the support driver session cleanup when
* the VM object is destroyed (see GVMM).
*
* @returns VBox status code.
*
* @param pVM Pointer to the VM.
* @param pGVM Pointer to the global VM structure. Optional.
* @thread EMT or session clean up thread.
*/
{
#ifdef VBOX_WITH_PCI_PASSTHROUGH
#endif
/*
* Tell GVMM what we're up to and check that we only do this once.
*/
{
/** @todo I wish to call PGMR0PhysFlushHandyPages(pVM, &pVM->aCpus[idCpu])
* here to make sure we don't leak any shared pages if we crash... */
#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE
#endif
}
/*
* Deregister the logger.
*/
return VINF_SUCCESS;
}
/**
* Creates R0 thread-context hooks for the current EMT thread.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
*
* @thread EMT(pVCpu)
*/
{
#if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
if ( RT_FAILURE(rc)
&& rc != VERR_NOT_SUPPORTED)
{
return rc;
}
#endif
return VINF_SUCCESS;
}
/**
* Releases the object reference for the thread-context hook.
*
* @param pVCpu Pointer to the VMCPU.
* @remarks Can be called from any thread.
*/
{
}
/**
* Registers the thread-context hook for this VCPU.
*
* @returns VBox status code.
* @param pVCpu Pointer to the VMCPU.
* @param pfnThreadHook Pointer to the thread-context callback.
*
* @thread EMT(pVCpu)
*/
{
}
/**
* Deregisters the thread-context hook for this VCPU.
*
* @param pVCpu Pointer to the VMCPU.
*
* @thread EMT(pVCpu)
*/
{
/* Clear the VCPU <-> host CPU mapping as we've left HM context. See @bugref{7726} comment #19. */
{
}
}
/**
* Whether thread-context hooks are created (implying they're supported) on this
* platform.
*
* @returns true if the hooks are created, false otherwise.
* @param pVCpu Pointer to the VMCPU.
*/
{
}
/**
* Whether thread-context hooks are registered for this VCPU.
*
* @returns true if registered, false otherwise.
* @param pVCpu Pointer to the VMCPU.
*/
{
}
/**
* VMM ring-0 thread-context callback.
*
* This does common HM state updating and calls the HM-specific thread-context
* callback.
*
* @param enmEvent The thread-context event.
* @param pvUser Opaque pointer to the VMCPU.
*
* @thread EMT(pvUser)
*/
{
switch (enmEvent)
{
case RTTHREADCTXEVENT_RESUMED:
{
/*
* Linux may call us with preemption enabled (really!) but technically we
* cannot get preempted here, otherwise we end up in an infinite recursion
* scenario (i.e. preempted in resume hook -> preempt hook -> resume hook...
* ad infinitum). Let's just disable preemption for now...
*/
/* We need to update the VCPU <-> host CPU mapping. */
/* In the very unlikely event that the GIP delta for the CPU we're
rescheduled needs calculating, try force a return to ring-3.
We unfortunately cannot do the measurements right here. */
/* Invoke the HM-specific thread-context callback. */
/* Restore preemption. */
break;
}
{
/* Invoke the HM-specific thread-context callback. */
/*
* Sigh. See VMMGetCpu() used by VMCPU_ASSERT_EMT(). We cannot let several VCPUs
* have the same host CPU associated with it.
*/
break;
}
default:
/* Invoke the HM-specific thread-context callback. */
break;
}
}
#ifdef VBOX_WITH_STATISTICS
/**
* Record return code statistics
* @param pVM Pointer to the VM.
* @param pVCpu Pointer to the VMCPU.
* @param rc The status code.
*/
{
/*
* Collect statistics.
*/
switch (rc)
{
case VINF_SUCCESS:
break;
case VINF_EM_RAW_INTERRUPT:
break;
break;
case VINF_EM_RAW_GUEST_TRAP:
break;
case VINF_EM_RAW_RING_SWITCH:
break;
break;
break;
case VINF_EM_RAW_IRET_TRAP:
break;
case VINF_IOM_R3_IOPORT_READ:
break;
case VINF_IOM_R3_IOPORT_WRITE:
break;
case VINF_IOM_R3_MMIO_READ:
break;
case VINF_IOM_R3_MMIO_WRITE:
break;
break;
break;
break;
case VINF_CPUM_R3_MSR_READ:
break;
case VINF_CPUM_R3_MSR_WRITE:
break;
break;
break;
case VINF_PATCH_EMULATE_INSTR:
break;
break;
break;
break;
break;
break;
case VINF_CSAM_PENDING_ACTION:
break;
case VINF_PGM_SYNC_CR3:
break;
case VINF_PATM_PATCH_INT3:
break;
case VINF_PATM_PATCH_TRAP_PF:
break;
case VINF_PATM_PATCH_TRAP_GP:
break;
break;
case VINF_EM_RESCHEDULE_REM:
break;
case VINF_EM_RAW_TO_R3:
else
break;
break;
break;
case VINF_VMM_CALL_HOST:
{
break;
case VMMCALLRING3_PDM_LOCK:
break;
break;
case VMMCALLRING3_PGM_LOCK:
break;
break;
break;
break;
break;
break;
break;
default:
break;
}
break;
break;
case VINF_PGM_CHANGE_MODE:
break;
break;
case VINF_EM_PENDING_REQUEST:
break;
break;
default:
break;
}
}
#endif /* VBOX_WITH_STATISTICS */
/**
* Unused ring-0 entry point that used to be called from the interrupt gate.
*
* Will be removed one of the next times we do a major SUPDrv version bump.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param enmOperation Which operation to execute.
* @param pvArg Argument to the operation.
* @remarks Assume called with interrupts disabled.
*/
{
/*
* We're returning VERR_NOT_SUPPORT here so we've got something else
* than -1 which the interrupt gate glue code might return.
*/
return VERR_NOT_SUPPORTED;
}
/**
* The Ring 0 entry point, called by the fast-ioctl path.
*
* @param pVM Pointer to the VM.
* The return code is stored in pVM->vmm.s.iLastGZRc.
* @param idCpu The Virtual CPU ID of the calling EMT.
* @param enmOperation Which operation to execute.
* @remarks Assume called with interrupts _enabled_.
*/
{
/*
* Validation.
*/
return;
return;
/*
* Perform requested operation.
*/
switch (enmOperation)
{
/*
* Switch to GC and run guest raw mode code.
* Disable interrupts before doing the world switch.
*/
case VMMR0_DO_RAW_RUN:
{
/* Some safety precautions first. */
{
break;
}
#endif
/*
* Disable preemption.
*/
/*
* Get the host CPU identifiers, make sure they are valid and that
* we've got a TSC delta for the CPU.
*/
{
/*
* Commit the CPU identifiers and update the periodict preemption timer if it's active.
*/
#endif
GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
/*
* We might need to disable VT-x if the active switcher turns off paging.
*/
bool fVTxDisabled;
if (RT_SUCCESS(rc))
{
/*
* Disable interrupts and run raw-mode code. The loop is for efficiently
* dispatching tracepoints that fired in raw-mode context.
*/
for (;;)
{
if (rc != VINF_VMM_CALL_TRACER)
break;
}
/*
* Re-enable VT-x before we dispatch any pending host interrupts and
* re-enables interrupts.
*/
if ( rc == VINF_EM_RAW_INTERRUPT
|| rc == VINF_EM_RAW_INTERRUPT_HYPER)
/* Fire dtrace probe and collect statistics. */
#ifdef VBOX_WITH_STATISTICS
#endif
}
else
/*
* Invalidate the host CPU identifiers as we restore preemption.
*/
}
/*
* Invalid CPU set index or TSC delta in need of measuring.
*/
else
{
if (iHostCpuSet < RTCPUSET_MAX_CPUS)
{
0 /*default cTries*/);
else
}
else
}
break;
}
/*
* Run guest code using the available hardware acceleration technology.
*/
case VMMR0_DO_HM_RUN:
{
/*
* Disable preemption.
*/
/*
* Get the host CPU identifiers, make sure they are valid and that
* we've got a TSC delta for the CPU.
*/
{
/*
* Update the periodict preemption timer if it's active.
*/
GVMMR0SchedUpdatePeriodicPreemptionTimer(pVM, pVCpu->idHostCpu, TMCalcHostTimerFrequency(pVM, pVCpu));
#ifdef LOG_ENABLED
/*
* Ugly: Lazy registration of ring 0 loggers.
*/
{
if ( pR0Logger
{
pR0Logger->fRegistered = true;
}
}
#endif
int rc;
bool fPreemptRestored = false;
if (!HMR0SuspendPending())
{
/*
* Register thread-context hooks if required.
*/
{
}
/*
* Enter HM context.
*/
if (RT_SUCCESS(rc))
{
/*
* When preemption hooks are in place, enable preemption now that
* we're in HM context.
*/
{
fPreemptRestored = true;
}
/*
* Setup the longjmp machinery and execute guest code (calls HMR0RunGuestCode).
*/
/*
* Assert sanity on the way out. Using manual assertions code here as normal
*/
{
}
{
}
}
}
/*
* The system is about to go into suspend mode; go back to ring 3.
*/
else
/*
* Invalidate the host CPU identifiers as we restore preemption.
*/
if (!fPreemptRestored)
/* Fire dtrace probe and collect statistics. */
#ifdef VBOX_WITH_STATISTICS
#endif
}
/*
* Invalid CPU set index or TSC delta in need of measuring.
*/
else
{
if (iHostCpuSet < RTCPUSET_MAX_CPUS)
{
0 /*default cTries*/);
else
}
else
}
break;
}
/*
* For profiling.
*/
case VMMR0_DO_NOP:
break;
/*
* Impossible.
*/
default:
break;
}
}
/**
* Validates a session or VM session argument.
*
* @returns true / false accordingly.
* @param pVM Pointer to the VM.
* @param pSession The session argument.
*/
DECLINLINE(bool) vmmR0IsValidSession(PVM pVM, PSUPDRVSESSION pClaimedSession, PSUPDRVSESSION pSession)
{
/* This must be set! */
if (!pSession)
return false;
/* Only one out of the two. */
if (pVM && pClaimedSession)
return false;
if (pVM)
return pClaimedSession == pSession;
}
/**
* VMMR0EntryEx worker function, either called directly or when ever possible
* called thru a longjmp so we can exit safely on failure.
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
* is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
* @param enmOperation Which operation to execute.
* @param pReqHdr This points to a SUPVMMR0REQHDR packet. Optional.
* The support driver validates this if it's present.
* @param u64Arg Some simple constant argument.
* @param pSession The session of the caller.
* @remarks Assume called with interrupts _enabled_.
*/
static int vmmR0EntryExWorker(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReqHdr, uint64_t u64Arg, PSUPDRVSESSION pSession)
{
/*
* Common VM pointer validation.
*/
if (pVM)
{
{
return VERR_INVALID_POINTER;
}
{
SUPR0Printf("vmmR0EntryExWorker: Invalid pVM=%p:{enmVMState=%d, .pVMR0=%p}! (op=%d)\n",
return VERR_INVALID_POINTER;
}
{
return VERR_INVALID_PARAMETER;
}
}
{
return VERR_INVALID_PARAMETER;
}
switch (enmOperation)
{
/*
* GVM requests
*/
case VMMR0_DO_GVMM_CREATE_VM:
return VERR_INVALID_PARAMETER;
case VMMR0_DO_GVMM_DESTROY_VM:
return VERR_INVALID_PARAMETER;
return GVMMR0DestroyVM(pVM);
{
if (!pVM)
return VERR_INVALID_PARAMETER;
}
case VMMR0_DO_GVMM_SCHED_HALT:
if (pReqHdr)
return VERR_INVALID_PARAMETER;
return VERR_INVALID_PARAMETER;
case VMMR0_DO_GVMM_SCHED_POKE:
return VERR_INVALID_PARAMETER;
if (u64Arg)
return VERR_INVALID_PARAMETER;
case VMMR0_DO_GVMM_SCHED_POLL:
return VERR_INVALID_PARAMETER;
if (u64Arg)
return VERR_INVALID_PARAMETER;
if (u64Arg)
return VERR_INVALID_PARAMETER;
/*
* Initialize the R0 part of a VM instance.
*/
case VMMR0_DO_VMMR0_INIT:
/*
* Terminate the R0 part of a VM instance.
*/
case VMMR0_DO_VMMR0_TERM:
/*
* Attempt to enable hm mode and check the current setting.
*/
case VMMR0_DO_HM_ENABLE:
return HMR0EnableAllCpus(pVM);
/*
* Setup the hardware accelerated session.
*/
case VMMR0_DO_HM_SETUP_VM:
return HMR0SetupVM(pVM);
/*
* Switch to RC to execute Hypervisor function.
*/
case VMMR0_DO_CALL_HYPERVISOR:
{
/*
* Validate input / context.
*/
if (RT_UNLIKELY(idCpu != 0))
return VERR_INVALID_CPU_ID;
return VERR_INVALID_PARAMETER;
return VERR_PGM_NO_CR3_SHADOW_ROOT;
#endif
/*
* Disable interrupts.
*/
/*
* Get the host CPU identifiers, make sure they are valid and that
* we've got a TSC delta for the CPU.
*/
{
return VERR_INVALID_CPU_INDEX;
}
{
0 /*default cTries*/);
return rc;
}
/*
* Commit the CPU identifiers.
*/
#endif
/*
* We might need to disable VT-x if the active switcher turns off paging.
*/
bool fVTxDisabled;
if (RT_SUCCESS(rc))
{
/*
* Go through the wormhole...
*/
/*
* Re-enable VT-x before we dispatch any pending host interrupts.
*/
if ( rc == VINF_EM_RAW_INTERRUPT
|| rc == VINF_EM_RAW_INTERRUPT_HYPER)
}
/*
* Invalidate the host CPU identifiers as we restore interrupts.
*/
return rc;
}
/*
* PGM wrappers.
*/
if (idCpu == NIL_VMCPUID)
return VERR_INVALID_CPU_ID;
if (idCpu == NIL_VMCPUID)
return VERR_INVALID_CPU_ID;
if (idCpu == NIL_VMCPUID)
return VERR_INVALID_CPU_ID;
if (idCpu != 0)
return VERR_INVALID_CPU_ID;
return PGMR0PhysSetupIommu(pVM);
/*
* GMM wrappers.
*/
if (u64Arg)
return VERR_INVALID_PARAMETER;
if (u64Arg)
return VERR_INVALID_PARAMETER;
if (u64Arg)
return VERR_INVALID_PARAMETER;
case VMMR0_DO_GMM_FREE_PAGES:
if (u64Arg)
return VERR_INVALID_PARAMETER;
if (u64Arg)
return VERR_INVALID_PARAMETER;
if (u64Arg)
return VERR_INVALID_PARAMETER;
if (idCpu == NIL_VMCPUID)
return VERR_INVALID_CPU_ID;
if (u64Arg)
return VERR_INVALID_PARAMETER;
if (u64Arg)
return VERR_INVALID_PARAMETER;
if (u64Arg)
return VERR_INVALID_PARAMETER;
case VMMR0_DO_GMM_SEED_CHUNK:
if (pReqHdr)
return VERR_INVALID_PARAMETER;
if (idCpu == NIL_VMCPUID)
return VERR_INVALID_CPU_ID;
if (u64Arg)
return VERR_INVALID_PARAMETER;
if (idCpu == NIL_VMCPUID)
return VERR_INVALID_CPU_ID;
if (u64Arg)
return VERR_INVALID_PARAMETER;
if (idCpu == NIL_VMCPUID)
return VERR_INVALID_CPU_ID;
if ( u64Arg
|| pReqHdr)
return VERR_INVALID_PARAMETER;
#ifdef VBOX_WITH_PAGE_SHARING
{
if (idCpu == NIL_VMCPUID)
return VERR_INVALID_CPU_ID;
if ( u64Arg
|| pReqHdr)
return VERR_INVALID_PARAMETER;
# ifdef DEBUG_sandervl
/* Make sure that log flushes can jump back to ring-3; annoying to get an incomplete log (this is risky though as the code doesn't take this into account). */
/* Todo: this can have bad side effects for unexpected jumps back to r3. */
if (rc == VINF_SUCCESS)
{
rc = vmmR0CallRing3SetJmp(&pVCpu->vmm.s.CallRing3JmpBufR0, GMMR0CheckSharedModules, pVM, pVCpu); /* this may resume code. */
|| (rc == VINF_VMM_CALL_HOST && pVCpu->vmm.s.enmCallRing3Operation == VMMCALLRING3_VMM_LOGGER_FLUSH));
}
# else
# endif
return rc;
}
#endif
if (u64Arg)
return VERR_INVALID_PARAMETER;
#endif
if (u64Arg)
return VERR_INVALID_PARAMETER;
if (u64Arg)
return VERR_INVALID_PARAMETER;
/*
* A quick GCFGM mock-up.
*/
/** @todo GCFGM with proper access control, ring-3 management interface and all that. */
case VMMR0_DO_GCFGM_SET_VALUE:
{
return VERR_INVALID_PARAMETER;
return VERR_INVALID_PARAMETER;
int rc;
if (enmOperation == VMMR0_DO_GCFGM_SET_VALUE)
{
//if (rc == VERR_CFGM_VALUE_NOT_FOUND)
// rc = GMMR0SetConfig(pReq->pSession, &pReq->szName[0], pReq->u64Value);
}
else
{
//if (rc == VERR_CFGM_VALUE_NOT_FOUND)
// rc = GMMR0QueryConfig(pReq->pSession, &pReq->szName[0], &pReq->u64Value);
}
return rc;
}
/*
* PDM Wrappers.
*/
{
return VERR_INVALID_PARAMETER;
}
{
return VERR_INVALID_PARAMETER;
}
/*
* Requests to the internal networking service.
*/
case VMMR0_DO_INTNET_OPEN:
{
return VERR_INVALID_PARAMETER;
}
case VMMR0_DO_INTNET_IF_CLOSE:
if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFCLOSEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
return VERR_INVALID_PARAMETER;
if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFGETBUFFERPTRSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
return VERR_INVALID_PARAMETER;
if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETPROMISCUOUSMODEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
return VERR_INVALID_PARAMETER;
if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETMACADDRESSREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
return VERR_INVALID_PARAMETER;
if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSETACTIVEREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
return VERR_INVALID_PARAMETER;
case VMMR0_DO_INTNET_IF_SEND:
if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
return VERR_INVALID_PARAMETER;
case VMMR0_DO_INTNET_IF_WAIT:
if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
return VERR_INVALID_PARAMETER;
if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PINTNETIFWAITREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
return VERR_INVALID_PARAMETER;
#ifdef VBOX_WITH_PCI_PASSTHROUGH
/*
* Requests to host PCI driver service.
*/
case VMMR0_DO_PCIRAW_REQ:
if (u64Arg || !pReqHdr || !vmmR0IsValidSession(pVM, ((PPCIRAWSENDREQ)pReqHdr)->pSession, pSession) || idCpu != NIL_VMCPUID)
return VERR_INVALID_PARAMETER;
#endif
/*
* For profiling.
*/
case VMMR0_DO_NOP:
case VMMR0_DO_SLOW_NOP:
return VINF_SUCCESS;
/*
* For testing Ring-0 APIs invoked in this environment.
*/
case VMMR0_DO_TESTS:
/** @todo make new test */
return VINF_SUCCESS;
#if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
if (idCpu == NIL_VMCPUID)
return VERR_INVALID_CPU_ID;
return HMR0TestSwitcher3264(pVM);
#endif
default:
/*
* We're returning VERR_NOT_SUPPORT here so we've got something else
* than -1 which the interrupt gate glue code might return.
*/
return VERR_NOT_SUPPORTED;
}
}
/**
* Argument for vmmR0EntryExWrapper containing the arguments for VMMR0EntryEx.
*/
typedef struct VMMR0ENTRYEXARGS
{
/** Pointer to a vmmR0EntryExWrapper argument package. */
/**
* This is just a longjmp wrapper function for VMMR0EntryEx calls.
*
* @returns VBox status code.
* @param pvArgs The argument package
*/
{
}
/**
* The Ring 0 entry point, called by the support library (SUP).
*
* @returns VBox status code.
* @param pVM Pointer to the VM.
* @param idCpu Virtual CPU ID argument. Must be NIL_VMCPUID if pVM
* is NIL_RTR0PTR, and may be NIL_VMCPUID if it isn't
* @param enmOperation Which operation to execute.
* @param pReq Pointer to the SUPVMMR0REQHDR packet. Optional.
* @param u64Arg Some simple constant argument.
* @param pSession The session of the caller.
* @remarks Assume called with interrupts _enabled_.
*/
VMMR0DECL(int) VMMR0EntryEx(PVM pVM, VMCPUID idCpu, VMMR0OPERATION enmOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession)
{
/*
* Requests that should only happen on the EMT thread will be
* wrapped in a setjmp so we can assert without causing trouble.
*/
{
switch (enmOperation)
{
case VMMR0_DO_GMM_FREE_PAGES:
/* On the mac we might not have a valid jmp buf, so check these as well. */
case VMMR0_DO_VMMR0_INIT:
case VMMR0_DO_VMMR0_TERM:
{
break;
/** @todo validate this EMT claim... GVM knows. */
}
default:
break;
}
}
}
/**
* Checks whether we've armed the ring-0 long jump machinery.
*
* @returns @c true / @c false
* @param pVCpu Pointer to the VMCPU.
* @thread EMT
* @sa VMMIsLongJumpArmed
*/
{
#ifdef RT_ARCH_X86
#else
#endif
}
/**
* Checks whether we've done a ring-3 long jump.
*
* @returns @c true / @c false
* @param pVCpu Pointer to the VMCPU.
* @thread EMT
*/
{
}
/**
* Internal R0 logger worker: Flush logger.
*
* @param pLogger The logger instance to flush.
* @remark This function must be exported!
*/
{
#ifdef LOG_ENABLED
/*
* Convert the pLogger into a VM handle and 'call' back to Ring-3.
* (This is a bit paranoid code.)
*/
{
# ifdef DEBUG
# endif
return;
}
if (pR0Logger->fFlushingDisabled)
return; /* quietly */
{
# ifdef DEBUG
# endif
return;
}
if (pVCpu)
{
/*
* Check that the jump buffer is armed.
*/
# ifdef RT_ARCH_X86
# else
# endif
{
# ifdef DEBUG
SUPR0Printf("vmmR0LoggerFlush: Jump buffer isn't armed!\n");
# endif
return;
}
}
# ifdef DEBUG
else
SUPR0Printf("vmmR0LoggerFlush: invalid VCPU context!\n");
# endif
#endif
}
/**
* Internal R0 logger worker: Custom prefix.
*
* @returns Number of chars written.
*
* @param pLogger The logger instance.
* @param pchBuf The output buffer.
* @param cchBuf The size of the buffer.
* @param pvUser User argument (ignored).
*/
{
#ifdef LOG_ENABLED
|| cchBuf < 2)
return 0;
return 2;
#else
return 0;
#endif
}
#ifdef LOG_ENABLED
/**
* Disables flushing of the ring-0 debug log.
*
* @param pVCpu Pointer to the VMCPU.
*/
{
}
/**
* Enables flushing of the ring-0 debug log.
*
* @param pVCpu Pointer to the VMCPU.
*/
{
}
/**
* Checks if log flushing is disabled or not.
*
* @param pVCpu Pointer to the VMCPU.
*/
{
return true;
}
#endif /* LOG_ENABLED */
/**
* Jump back to ring-3 if we're the EMT and the longjmp is armed.
*
* @returns true if the breakpoint should be hit, false if it should be ignored.
*/
{
#if 0
return true;
#else
if (pVM)
{
if (pVCpu)
{
#ifdef RT_ARCH_X86
#else
#endif
{
return RT_FAILURE_NP(rc);
}
}
}
#ifdef RT_OS_LINUX
return true;
#else
return false;
#endif
#endif
}
/**
* Override this so we can push it up to ring-3.
*
* @param pszExpr Expression. Can be NULL.
* @param uLine Location line number.
* @param pszFile Location file name.
* @param pszFunction Location function name.
*/
DECLEXPORT(void) RTCALL RTAssertMsg1Weak(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction)
{
/*
* To the log.
*/
LogAlways(("\n!!R0-Assertion Failed!!\n"
"Expression: %s\n"
"Location : %s(%d) %s\n",
/*
* To the global VMM buffer.
*/
if (pVM)
"\n!!R0-Assertion Failed!!\n"
"Expression: %s\n"
"Location : %s(%d) %s\n",
/*
* Continue the normal way.
*/
}
/**
* Callback for RTLogFormatV which writes to the ring-3 log port.
* See PFNLOGOUTPUT() for details.
*/
{
return cbChars;
}
/**
* Override this so we can push it up to ring-3.
*
* @param pszFormat The format string.
* @param va Arguments.
*/
{
/*
* Push the message to the loggers.
*/
if (pLog)
{
}
if (pLog)
{
}
/*
* Push it to the global VMM buffer.
*/
if (pVM)
{
RTStrPrintfV(pVM->vmm.s.szRing0AssertMsg2, sizeof(pVM->vmm.s.szRing0AssertMsg2), pszFormat, vaCopy);
}
/*
* Continue the normal way.
*/
}