PATM.cpp revision 1f107f4d641b44a79acfdef08e6d4022242e4fe2
/* $Id$ */
/** @file
* PATM - Dynamic Guest OS Patching Manager
*
* NOTE: Never ever reuse patch memory!!
*/
/*
* Copyright (C) 2006-2007 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_PATM
#include "PATMInternal.h"
#include "PATMPatch.h"
#include <VBox/disopcode.h>
#include "PATMA.h"
//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
//#define PATM_DISABLE_ALL
/**
* Refresh trampoline patch state.
*/
typedef struct PATMREFRESHPATCH
{
/** Pointer to the VM structure. */
/** The trampoline patch record. */
/** The new patch we want to jump to. */
/*
* Private structure used during disassembly
*/
typedef struct
{
} PATMDISASM, *PPATMDISASM;
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
#ifdef LOG_ENABLED // keep gcc quiet
#endif
#ifdef VBOX_WITH_STATISTICS
#endif
#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
#ifdef VBOX_WITH_DEBUGGER
static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs);
/** Command descriptors. */
{
/* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, fFlags, pfnHandler pszSyntax, ....pszDescription */
};
#endif
/* Don't want to break saved states, so put it here as a global variable. */
static unsigned int cIDTHandlersDisabled = 0;
/**
* Initializes the PATM.
*
* @returns VBox status code.
* @param pVM The VM to operate on.
*/
{
int rc;
/* These values can't change as they are hardcoded in patch code (old saved states!) */
AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
/* Allocate patch memory and GC patch state memory. */
/* Add another page in case the generated code is much larger than expected. */
/** @todo bad safety precaution */
rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
if (RT_FAILURE(rc))
{
return rc;
}
/* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
/*
*
* Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
* Note2: This doesn't really belong here, but we need access to it for relocation purposes
*
*/
/* Hypervisor memory for patch statistics */
/* Memory for patch lookup trees. */
rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
/* Check CFGM option. */
if (RT_FAILURE(rc))
# ifdef PATM_DISABLE_ALL
pVM->fPATMEnabled = false;
# else
pVM->fPATMEnabled = true;
# endif
#endif
if (RT_FAILURE(rc))
return rc;
/*
* Register save and load state notifiers.
*/
rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
#ifdef VBOX_WITH_DEBUGGER
/*
* Debugger commands.
*/
static bool s_fRegisteredCmds = false;
if (!s_fRegisteredCmds)
{
if (RT_SUCCESS(rc2))
s_fRegisteredCmds = true;
}
#endif
#ifdef VBOX_WITH_STATISTICS
STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
#endif /* VBOX_WITH_STATISTICS */
return rc;
}
/**
* Finalizes HMA page attributes.
*
* @returns VBox status code.
* @param pVM The VM handle.
*/
{
/* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
if (RT_FAILURE(rc))
rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
if (RT_FAILURE(rc))
rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
if (RT_FAILURE(rc))
return rc;
}
/**
* (Re)initializes PATM
*
* @param pVM The VM.
*/
{
int rc;
/*
* Assert alignment and sizes.
*/
/*
* Setup any fixed pointers and offsets.
*/
#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
#ifndef PATM_DISABLE_ALL
pVM->fPATMEnabled = true;
#endif
#endif
AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
/*
* (Re)Initialize PATM structure
*/
/* Lowest and highest patched instruction */
/* Generate all global functions to be used by future patches. */
/* We generate a fake patch in order to use the existing code for relocation. */
rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
if (RT_FAILURE(rc))
{
Log(("Out of memory!!!!\n"));
return VERR_NO_MEMORY;
}
/* Update free pointer in patch memory. */
/* Round to next 8 byte boundary. */
return rc;
}
/**
* Applies relocations to data and code managed by this
* component. This function will be called at init and
* whenever the VMM need to relocate it self inside the GC.
*
* The PATM will update the addresses used by the switcher.
*
* @param pVM The VM.
*/
{
if (delta)
{
/* Update CPUMCTX guest context pointer. */
/* If we are running patch code right now, then also adjust EIP. */
/* Deal with the global patch functions. */
}
}
/**
* Terminates the PATM.
*
* Termination means cleaning up and freeing all resources,
* the VM it self is at this point powered off or suspended.
*
* @returns VBox status code.
* @param pVM The VM to operate on.
*/
{
/* Memory was all allocated from the two MM heaps and requires no freeing. */
return VINF_SUCCESS;
}
/**
* PATM reset callback.
*
* @returns VBox status code.
* @param pVM The VM which is reset.
*/
{
Log(("PATMR3Reset\n"));
/* Free all patches. */
while (true)
{
PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
if (pPatchRec)
{
}
else
break;
}
if (RT_SUCCESS(rc))
return rc;
}
DECLCALLBACK(int) patmReadBytes(PDISCPUSTATE pDisState, uint8_t *pbDst, RTUINTPTR uSrcAddr, uint32_t cbToRead)
{
if (cbToRead == 0)
return VERR_INVALID_PARAMETER;
/*
* Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
* As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
*/
/** @todo could change in the future! */
{
for (int i = 0; i < orgsize; i++)
{
if (RT_FAILURE(rc))
break;
uSrcAddr++;
pbDst++;
cbToRead--;
}
if (cbToRead == 0)
return VINF_SUCCESS;
#ifdef VBOX_STRICT
{
}
#endif
}
{
}
/* pInstrHC is the base address; adjust according to the GC pointer. */
return VINF_SUCCESS;
}
DECLINLINE(bool) patmR3DisInstrToStr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
{
}
DECLINLINE(bool) patmR3DisInstr(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC, uint32_t fReadFlags,
{
}
DECLINLINE(bool) patmR3DisInstrNoStrOpMode(PVM pVM, PPATCHINFO pPatch, RTGCPTR32 InstrGCPtr32, uint8_t *pbInstrHC,
{
}
#ifdef LOG_ENABLED
do { \
if (LogIsEnabled()) \
} while (0)
const char *pszComment1, const char *pszComment2)
{
char szOutput[128];
szOutput[0] = '\0';
}
#else
# define PATM_LOG_PATCH_INSTR(a_pVM, a_pPatch, a_fFlags, a_szComment1, a_szComment2) do { } while (0)
#endif
/**
* Callback function for RTAvloU32DoWithAll
*
* Updates all fixups in the patches
*
* @returns VBox status code.
* @param pNode Current node
* @param pParam The VM to operate on.
*/
{
int rc;
/* Nothing to do if the patch is not active. */
return 0;
/*
* Apply fixups
*/
while (true)
{
/* Get the record that's closest from above */
if (pRec == 0)
break;
{
case FIXUP_ABSOLUTE:
Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
{
}
else
{
rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
if ( rc == VERR_PAGE_NOT_PRESENT
|| rc == VERR_PAGE_TABLE_NOT_PRESENT)
{
Log(("PATM: Patch page not present -> check later!\n"));
rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
}
else
{
Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
/*
* Disable patch; this is not a good solution
*/
/* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
}
else
if (RT_SUCCESS(rc))
{
rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
}
}
break;
case FIXUP_REL_JMPTOPATCH:
{
{
#if 0 /** @todo '*(int32_t*)pRec->pRelocPos' crashes on restore of an XP VM here. pRelocPos=0x8000dbe2180a (bird) */
Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
#else
Log(("Relative fixup (g2p) ???????? -> %08X at %08X (source=%08x, target=%08x)\n", displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
#endif
{
}
else
#endif
{
oldJump[0] = 0xE9;
}
else
{
continue; //this should never happen!!
}
/*
* Read old patch jump and compare it to the one we previously installed
*/
rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
if ( rc == VERR_PAGE_NOT_PRESENT
|| rc == VERR_PAGE_TABLE_NOT_PRESENT)
{
rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
}
else
{
Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
/*
* Disable patch; this is not a good solution
*/
/* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
}
else
if (RT_SUCCESS(rc))
{
}
else
}
else
Log(("Skip the guest jump to patch code for this disabled patch %RGv - %08X\n", pPatch->patch.pPrivInstrGC, pRec->pRelocPos));
break;
}
case FIXUP_REL_JMPTOGUEST:
{
Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
break;
}
default:
AssertMsg(0, ("Invalid fixup type!!\n"));
return VERR_INVALID_PARAMETER;
}
}
return 0;
}
/**
* \#PF Handler callback for virtual access handler ranges.
*
* Important to realize that a physical page in a range can have aliases, and
* for ALL and WRITE handlers these will also trigger.
*
* @returns VINF_SUCCESS if the handler have carried out the operation.
* @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
* @param pVM VM Handle.
* @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
* @param pvPtr The HC mapping of that address.
* @param enmAccessType The access type.
* @param pvUser User argument.
*/
DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf,
{
/** @todo could be the wrong virtual address (alias) */
return VINF_PGM_HANDLER_DO_DEFAULT;
}
#ifdef VBOX_WITH_DEBUGGER
/**
* Callback function for RTAvloU32DoWithAll
*
* Enables the patch that's being enumerated
*
* @returns 0 (continue enumeration).
* @param pNode Current node
* @param pVM The VM to operate on.
*/
{
return 0;
}
#endif /* VBOX_WITH_DEBUGGER */
#ifdef VBOX_WITH_DEBUGGER
/**
* Callback function for RTAvloU32DoWithAll
*
* Disables the patch that's being enumerated
*
* @returns 0 (continue enumeration).
* @param pNode Current node
* @param pVM The VM to operate on.
*/
{
return 0;
}
#endif
/**
* Returns the host context pointer and size of the patch memory block
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pcb Size of the patch memory block
*/
{
if (pcb)
}
/**
* Returns the guest context pointer and size of the patch memory block
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pcb Size of the patch memory block
*/
{
if (pcb)
}
/**
* Returns the host context pointer of the GC context structure
*
* @returns VBox status code.
* @param pVM The VM to operate on.
*/
{
}
/**
* Checks whether the HC address is part of our patch region
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pAddrGC Guest context address
*/
{
return (pAddrHC >= pVM->patm.s.pPatchMemHC && pAddrHC < pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) ? true : false;
}
/**
* Allows or disallow patching of privileged instructions executed by the guest OS
*
* @returns VBox status code.
* @param pVM The VM to operate on.
*/
{
return VINF_SUCCESS;
}
/**
* Convert a GC patch block pointer to a HC patch pointer
*
* @returns HC pointer or NULL if it's not a GC patch pointer
* @param pVM The VM to operate on.
* @param pAddrGC GC pointer
*/
{
if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
else
return NULL;
}
/**
*
* @returns 0 - disabled, 1 - enabled
* @param pVM The VM to operate on.
*/
{
return pVM->fPATMEnabled;
}
/**
* Convert guest context address to host context pointer
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pCacheRec Address conversion cache record
* @param pGCPtr Guest context pointer
*
* @returns Host context pointer or NULL in case of an error
*
*/
R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATMP2GLOOKUPREC pCacheRec, RCPTRTYPE(uint8_t *) pGCPtr)
{
int rc;
{
}
/* Release previous lock if any. */
{
}
if (rc != VINF_SUCCESS)
{
AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
return NULL;
}
return pHCPtr;
}
/* Calculates and fills in all branch targets
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Current patch block pointer
*
*/
{
unsigned nrJumpRecs = 0;
/*
* Set all branch targets inside the patch block.
* We remove all jump records as they are no longer needed afterwards.
*/
while (true)
{
if (pRec == 0)
break;
nrJumpRecs++;
/* HC in patch block to GC in patch block. */
{
/* Special case: call function replacement patch from this patch block.
*/
if (!pFunctionRec)
{
int rc;
else
if (RT_FAILURE(rc))
{
/* Failure for some reason -> mark exit point with int 3. */
/* Set a breakpoint at the very beginning of the recompiled instruction */
*pPatchHC = 0xCC;
continue;
}
}
else
{
}
}
else
if (pBranchTargetGC == 0)
{
AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
return VERR_PATCHING_REFUSED;
}
/* Our jumps *always* have a dword displacement (to make things easier). */
Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
}
return VINF_SUCCESS;
}
/* Add an illegal instruction record
*
* @param pVM The VM to operate on.
* @param pPatch Patch structure ptr
* @param pInstrGC Guest context pointer to privileged instruction
*
*/
{
}
{
if (pRec)
return true;
else
return false;
}
/**
* Add a patch to guest lookup record
*
* @param pVM The VM to operate on.
* @param pPatch Patch structure ptr
* @param pPatchInstrHC Guest context pointer to patch block
* @param pInstrGC Guest context pointer to privileged instruction
* @param enmType Lookup type
* @param fDirty Dirty flag
*
*/
/** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
{
bool ret;
uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
LogFlowFunc(("pVM=%#p pPatch=%#p pPatchInstrHC=%#p pInstrGC=%#x enmType=%d fDirty=%RTbool\n",
if (enmType == PATM_LOOKUP_PATCH2GUEST)
{
return; /* already there */
}
#ifdef VBOX_STRICT
else
{
}
#endif
pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
/* GC to patch address */
if (enmType == PATM_LOOKUP_BOTHDIR)
{
if (!pGuestToPatchRec)
{
}
}
}
/**
* Removes a patch to guest lookup record
*
* @param pVM The VM to operate on.
* @param pPatch Patch structure ptr
* @param pPatchInstrGC Guest context pointer to patch block
*/
{
uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
if (pPatchToGuestRec)
{
{
}
}
}
/**
* RTAvlPVDestroy callback.
*/
{
return 0;
}
/**
* Empty the specified tree (PV tree, MMR3 heap)
*
* @param pVM The VM to operate on.
* @param ppTree Tree to empty
*/
{
}
/**
* RTAvlU32Destroy callback.
*/
{
return 0;
}
/**
* Empty the specified tree (U32 tree, MMR3 heap)
*
* @param pVM The VM to operate on.
* @param ppTree Tree to empty
*/
{
}
/**
* Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pCpu CPU disassembly state
* @param pInstrGC Guest context pointer to privileged instruction
* @param pCurInstrGC Guest context pointer to the current instruction
* @param pCacheRec Cache record ptr
*
*/
static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
{
bool fIllegalInstr = false;
/*
* Preliminary heuristics:
*- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
*- no near or far returns; no int xx, no into
*
* Note: Later on we can impose less stricter guidelines if the need arises
*/
/* Bail out if the patch gets too big. */
{
fIllegalInstr = true;
}
else
{
/* No unconditional jumps or calls without fixed displacements. */
)
{
|| (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
)
{
fIllegalInstr = true;
}
}
/* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
{
&& pCurInstrGC + pCpu->opsize < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
{
Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
/* We turn this one into a int 3 callable patch. */
}
}
else
/* no nested pushfs just yet; nested cli is allowed for cli patches though. */
{
{
fIllegalInstr = true;
}
}
/* no far returns */
{
fIllegalInstr = true;
}
{
/* No int xx or into either. */
fIllegalInstr = true;
}
}
/* Illegal instruction -> end of analysis phase for this code block */
return VINF_SUCCESS;
/* Check for exit points. */
{
case OP_SYSEXIT:
return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
case OP_SYSENTER:
case OP_ILLUD2:
/* This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more. */
Log(("Illegal opcode (0xf 0xb) -> return here\n"));
return VINF_SUCCESS;
case OP_STI:
case OP_POPF:
/* If out exit point lies within the generated patch jump, then we have to refuse!! */
if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
{
return VERR_PATCHING_REFUSED;
}
{
{
return VINF_SUCCESS;
/* Or else we need to duplicate more instructions, because we can't jump back yet! */
Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
}
break; /* sti doesn't mark the end of a pushf block; only popf does. */
}
/* else: fall through. */
case OP_RETN: /* exit point for function replacement */
return VINF_SUCCESS;
case OP_IRET:
return VINF_SUCCESS; /* exitpoint */
case OP_CPUID:
case OP_CALL:
case OP_JMP:
break;
default:
{
return VINF_SUCCESS; /* exit point */
}
break;
}
/* If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump. */
if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW))
{
/* The end marker for this kind of patch is any instruction at a location outside our patch jump. */
return VINF_SUCCESS;
}
return VWRN_CONTINUE_ANALYSIS;
}
/**
* Analyses the instructions inside a function for compliance
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pCpu CPU disassembly state
* @param pInstrGC Guest context pointer to privileged instruction
* @param pCurInstrGC Guest context pointer to the current instruction
* @param pCacheRec Cache record ptr
*
*/
static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
{
bool fIllegalInstr = false;
//Preliminary heuristics:
//- no call instructions
//- ret ends a block
// bail out if the patch gets too big
{
Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
fIllegalInstr = true;
}
else
{
// no unconditional jumps or calls without fixed displacements
)
{
|| (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
)
{
fIllegalInstr = true;
}
}
else /* no far returns */
{
fIllegalInstr = true;
}
else /* no int xx or into either */
if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
{
fIllegalInstr = true;
}
#if 0
{
Log(("Illegal instructions for function patch!!\n"));
return VERR_PATCHING_REFUSED;
}
#endif
}
/* Illegal instruction -> end of analysis phase for this code block */
{
return VINF_SUCCESS;
}
// Check for exit points
{
case OP_ILLUD2:
//This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
Log(("Illegal opcode (0xf 0xb) -> return here\n"));
return VINF_SUCCESS;
case OP_IRET:
case OP_SYSEXIT: /* will fault or emulated in GC */
case OP_RETN:
return VINF_SUCCESS;
case OP_POPF:
case OP_STI:
return VWRN_CONTINUE_ANALYSIS;
default:
{
return VINF_SUCCESS; /* exit point */
}
return VWRN_CONTINUE_ANALYSIS;
}
return VWRN_CONTINUE_ANALYSIS;
}
/**
* Recompiles the instructions in a code block
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pCpu CPU disassembly state
* @param pInstrGC Guest context pointer to privileged instruction
* @param pCurInstrGC Guest context pointer to the current instruction
* @param pCacheRec Cache record ptr
*
*/
static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
{
int rc = VINF_SUCCESS;
bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
&& !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
{
/*
* Been there, done that; so insert a jump (we don't want to duplicate code)
* no need to record this instruction as it's glue code that never crashes (it had better not!)
*/
}
{
}
else
if (RT_FAILURE(rc))
return rc;
/* Note: Never do a direct return unless a failure is encountered! */
/* Clear recompilation of next instruction flag; we are doing that right here. */
/* Add lookup record for patch to guest address translation */
patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
/* Update lowest and highest instruction address for this patch */
else
/* Illegal instruction -> end of recompile phase for this code block. */
{
goto end;
}
/* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
* Indirect calls are handled below.
*/
{
if (pTargetGC == 0)
{
return VERR_PATCHING_REFUSED;
}
{
if (RT_FAILURE(rc))
goto end;
}
else
rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->opcode, !!(pCpu->prefix & PREFIX_OPSIZE));
if (RT_SUCCESS(rc))
goto end;
}
{
case OP_CLI:
{
/* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
* until we've found the proper exit point(s).
*/
if ( pCurInstrGC != pInstrGC
)
{
Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
}
/* Set by irq inhibition; no longer valid now. */
if (RT_SUCCESS(rc))
break;
}
case OP_MOV:
{
/* mov ss, src? */
{
/** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
}
#if 0 /* necessary for Haiku */
else
&& (pCpu->param1.flags & (USE_REG_GEN32|USE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
{
/* mov GPR, ss */
if (RT_SUCCESS(rc))
break;
}
#endif
}
goto duplicate_instr;
case OP_POP:
{
}
goto duplicate_instr;
case OP_STI:
{
/* In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
{
fInhibitIRQInstr = true;
}
if (RT_SUCCESS(rc))
{
unsigned opsize;
int disret;
{ /* Force pNextInstrHC out of scope after using it */
if (pNextInstrHC == NULL)
{
AssertFailed();
return VERR_PATCHING_REFUSED;
}
// Disassemble the next instruction
}
if (disret == false)
{
AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
return VERR_PATCHING_REFUSED;
}
|| pReturnInstrGC <= pInstrGC
)
{
/* Not an exit point for function duplication patches */
&& RT_SUCCESS(rc))
{
}
else
}
else {
Log(("PATM: sti occurred too soon; refusing patch!\n"));
}
}
break;
}
case OP_POPF:
{
/* Not an exit point for IDT handler or function replacement patches */
/* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
fGenerateJmpBack = false;
rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->opsize, !!(pCpu->prefix & PREFIX_OPSIZE), fGenerateJmpBack);
if (RT_SUCCESS(rc))
{
if (fGenerateJmpBack == false)
{
/* Not an exit point for IDT handler or function replacement patches */
}
else
{
}
}
break;
}
case OP_PUSHF:
if (RT_SUCCESS(rc))
break;
case OP_PUSH:
{
if (RT_SUCCESS(rc))
break;
}
goto duplicate_instr;
case OP_IRET:
if (RT_SUCCESS(rc))
{
}
break;
case OP_ILLUD2:
/* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
if (RT_SUCCESS(rc))
Log(("Illegal opcode (0xf 0xb)\n"));
break;
case OP_CPUID:
if (RT_SUCCESS(rc))
break;
case OP_STR:
case OP_SLDT:
if (RT_SUCCESS(rc))
break;
case OP_SGDT:
case OP_SIDT:
if (RT_SUCCESS(rc))
break;
case OP_RETN:
/* retn is an exit point for function patches */
if (RT_SUCCESS(rc))
break;
case OP_SYSEXIT:
/* Duplicate it, so it can be emulated in GC (or fault). */
if (RT_SUCCESS(rc))
break;
case OP_CALL:
/* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
* In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
*/
{
if (RT_SUCCESS(rc))
{
}
break;
}
goto gen_illegal_instr;
case OP_JMP:
/* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
* In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
*/
{
if (RT_SUCCESS(rc))
break;
}
goto gen_illegal_instr;
case OP_INT3:
case OP_INT:
case OP_INTO:
goto gen_illegal_instr;
case OP_MOV_DR:
/* Note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
{
if (RT_SUCCESS(rc))
break;
}
goto duplicate_instr;
case OP_MOV_CR:
/* Note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
{
if (RT_SUCCESS(rc))
break;
}
goto duplicate_instr;
default:
{
if (RT_SUCCESS(rc))
}
else
{
Log(("patmPatchGenDuplicate\n"));
if (RT_SUCCESS(rc))
}
break;
}
end:
if ( !fInhibitIRQInstr
{
int rc2;
{
Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
}
else
{
}
if (RT_FAILURE(rc2))
}
if (RT_SUCCESS(rc))
{
// If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
&& !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
)
{
// The end marker for this kind of patch is any instruction at a location outside our patch jump
Log(("patmRecompileCallback: end found for single instruction patch at %RRv opsize %d\n", pNextInstrGC, pCpu->opsize));
}
}
return rc;
}
#ifdef LOG_ENABLED
/* Add a disasm jump record (temporary for prevent duplicate analysis)
*
* @param pVM The VM to operate on.
* @param pPatch Patch structure ptr
* @param pInstrGC Guest context pointer to privileged instruction
*
*/
{
}
/**
* Checks if jump target has been analysed before.
*
* @returns VBox status code.
* @param pPatch Patch struct
* @param pInstrGC Jump target
*
*/
{
if (pRec)
return true;
return false;
}
/**
* For proper disassembly of the final patch block
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pCpu CPU disassembly state
* @param pInstrGC Guest context pointer to privileged instruction
* @param pCurInstrGC Guest context pointer to the current instruction
* @param pCacheRec Cache record ptr
*
*/
int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PPATMP2GLOOKUPREC pCacheRec)
{
{
/* Could be an int3 inserted in a call patch. Check to be sure */
{ /* Force pOrgJumpHC out of scope after using it */
return VINF_SUCCESS;
}
return VWRN_CONTINUE_ANALYSIS;
}
{
/* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
return VWRN_CONTINUE_ANALYSIS;
}
)
{
return VINF_SUCCESS;
}
return VINF_SUCCESS;
return VWRN_CONTINUE_ANALYSIS;
}
/**
* Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstrGC Guest context pointer to the initial privileged instruction
* @param pCurInstrGC Guest context pointer to the current instruction
* @param pfnPATMR3Disasm Callback for testing the disassembled instruction
* @param pCacheRec Cache record ptr
*
*/
int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
{
int rc = VWRN_CONTINUE_ANALYSIS;
bool disret;
char szOutput[256];
/* We need this to determine branch targets (and for disassembling). */
while (rc == VWRN_CONTINUE_ANALYSIS)
{
if (pCurInstrHC == NULL)
{
goto end;
}
{
else
{
rc = VINF_SUCCESS;
goto end;
}
}
else
if (disret == false)
{
Log(("Disassembly failed (probably page not present) -> return to caller\n"));
rc = VINF_SUCCESS;
goto end;
}
if (rc != VWRN_CONTINUE_ANALYSIS) {
break; //done!
}
/* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
)
{
if (pTargetGC == 0)
{
break;
}
{
//jump back to guest code
rc = VINF_SUCCESS;
goto end;
}
{
rc = VINF_SUCCESS;
goto end;
}
{
/* New jump, let's check it. */
if (rc != VINF_SUCCESS) {
break; //done!
}
}
{
/* Unconditional jump; return to caller. */
rc = VINF_SUCCESS;
goto end;
}
}
pCurInstrGC += opsize;
}
end:
return rc;
}
/**
* Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstrGC Guest context pointer to the initial privileged instruction
* @param pCurInstrGC Guest context pointer to the current instruction
* @param pfnPATMR3Disasm Callback for testing the disassembled instruction
* @param pCacheRec Cache record ptr
*
*/
int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, PPATMP2GLOOKUPREC pCacheRec)
{
/* Free all disasm jump records. */
return rc;
}
#endif /* LOG_ENABLED */
/**
* Detects it the specified address falls within a 5 byte jump generated for an active patch.
* If so, this patch is permanently disabled.
*
* @param pVM The VM to operate on.
* @param pInstrGC Guest context pointer to instruction
* @param pConflictGC Guest context pointer to check
*
* @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
*
*/
{
PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
if (pTargetPatch)
{
}
return VERR_PATCH_NO_CONFLICT;
}
/**
* Recompile the code stream until the callback function detects a failure or decides everything is acceptable
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstrGC Guest context pointer to privileged instruction
* @param pCurInstrGC Guest context pointer to the current instruction
* @param pfnPATMR3Recompile Callback for testing the disassembled instruction
* @param pCacheRec Cache record ptr
*
*/
static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, PPATMP2GLOOKUPREC pCacheRec)
{
int rc = VWRN_CONTINUE_ANALYSIS;
bool disret;
#ifdef LOG_ENABLED
char szOutput[256];
#endif
while (rc == VWRN_CONTINUE_RECOMPILE)
{
if (pCurInstrHC == NULL)
{
goto end;
}
#ifdef LOG_ENABLED
#else
#endif
if (disret == false)
{
Log(("Disassembly failed (probably page not present) -> return to caller\n"));
/* Add lookup record for patch to guest address translation */
patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
goto end;
}
if (rc != VWRN_CONTINUE_RECOMPILE)
{
/* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
if ( rc == VINF_SUCCESS
{
/* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
* Recompile the next instruction as well
*/
if (pNextInstrHC == NULL)
{
goto end;
}
disret = patmR3DisInstr(pVM, pPatch, pNextInstrGC, pNextInstrHC, PATMREAD_ORGCODE, &cpunext, &opsizenext);
if (disret == false)
{
goto end;
}
{
case OP_IRET: /* inhibit cleared in generated code */
case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
case OP_HLT:
break; /* recompile these */
default:
{
Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
goto end; /** @todo should be ok to ignore instruction fusing in this case */
}
break;
}
/* Note: after a cli we must continue to a proper exit point */
{
if (RT_SUCCESS(rc))
{
rc = VINF_SUCCESS;
goto end;
}
break;
}
else
}
else
break; /* done! */
}
/** @todo continue with the instructions following the jump and then recompile the jump target code */
/* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
)
{
if (addr == 0)
{
break;
}
/* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
{
Log(("patmRecompileCodeStream continue passed conditional jump\n"));
/* First we need to finish this linear code stream until the next exit point. */
if (RT_FAILURE(rc))
{
break; //fatal error
}
}
{
/* New code; let's recompile it. */
Log(("patmRecompileCodeStream continue with jump\n"));
/*
* If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
* this patch so we can continue our analysis
*
* We rely on CSAM to detect and resolve conflicts
*/
if(pTargetPatch)
{
Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
}
if(pTargetPatch)
if (RT_FAILURE(rc))
{
break; //done!
}
}
/* Always return to caller here; we're done! */
rc = VINF_SUCCESS;
goto end;
}
else
{
rc = VINF_SUCCESS;
goto end;
}
pCurInstrGC += opsize;
}
end:
return rc;
}
/**
* Generate the jump from guest to patch code
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch record
* @param pCacheRec Guest translation lookup cache record
*/
static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
{
int rc;
{
{
// jmp [PatchCode]
if (fAddFixup)
{
if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
{
Log(("Relocation failed for the jump in the guest code!!\n"));
return VERR_PATCHING_REFUSED;
}
}
*(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
}
else
{
// jmp [PatchCode]
if (fAddFixup)
{
if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
{
Log(("Relocation failed for the jump in the guest code!!\n"));
return VERR_PATCHING_REFUSED;
}
}
*(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
}
else
{
Assert(0);
return VERR_PATCHING_REFUSED;
}
}
else
#endif
{
// jmp [PatchCode]
if (fAddFixup)
{
if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
{
Log(("Relocation failed for the jump in the guest code!!\n"));
return VERR_PATCHING_REFUSED;
}
}
*(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
}
rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
if (rc == VINF_SUCCESS)
return rc;
}
/**
* Remove the jump from guest to patch code
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch record
*/
{
#ifdef DEBUG
char szOutput[256];
bool disret;
while (i < pPatch->cbPrivInstr)
{
if (disret == false)
break;
i += opsize;
}
#endif
/* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
#ifdef DEBUG
if (rc == VINF_SUCCESS)
{
i = 0;
while (i < pPatch->cbPrivInstr)
{
if (disret == false)
break;
i += opsize;
}
}
#endif
return rc;
}
/**
* Generate the call from guest to patch code
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch record
* @param pInstrHC HC address where to insert the jump
* @param pCacheRec Guest translation cache record
*/
static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, PPATMP2GLOOKUPREC pCacheRec, bool fAddFixup = true)
{
int rc;
// jmp [PatchCode]
if (fAddFixup)
{
if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
{
Log(("Relocation failed for the jump in the guest code!!\n"));
return VERR_PATCHING_REFUSED;
}
}
*(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
return rc;
}
/**
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstrGC Guest context point to privileged instruction
* @param pInstrHC Host context point to privileged instruction
* @param uOpcode Instruction opcode
* @param uOpSize Size of starting instruction
* @param pPatchRec Patch record
*
* @note returns failure if patching is not allowed or possible
*
*/
{
int rc = VERR_PATCHING_REFUSED;
uint32_t orgOffsetPatchMem = ~0;
bool fInserted;
/* Save original offset (in case of failures later on) */
switch (uOpcode)
{
case OP_MOV:
break;
case OP_CLI:
case OP_PUSHF:
/* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
/* Note: special precautions are taken when disabling and enabling such patches. */
break;
default:
{
return VERR_INVALID_PARAMETER;
}
}
if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
/* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
)
{
Log(("Patch jump would cross page boundary -> refuse!!\n"));
goto failure;
}
pPatch->nrPatch2GuestRecs = 0;
#ifdef PATM_ENABLE_CALL
#endif
pPatch->uCurPatchOffset = 0;
if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
{
/* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
if (RT_FAILURE(rc))
goto failure;
}
/***************************************************************************************************************************/
/* Note: We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
/***************************************************************************************************************************/
#ifdef VBOX_WITH_STATISTICS
{
if (RT_FAILURE(rc))
goto failure;
}
#endif
/* Free leftover lock if any. */
{
}
if (rc != VINF_SUCCESS)
{
goto failure;
}
/* Calculated during analysis. */
{
/* Most likely cause: we encountered an illegal instruction very early on. */
/** @todo could turn it into an int3 callable patch. */
Log(("PATMR3PatchBlock: patch block too small -> refuse\n"));
goto failure;
}
/* size of patch block */
/* Update free pointer in patch memory. */
/* Round to next 8 byte boundary. */
/*
* Insert into patch to guest lookup tree
*/
LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
if (!fInserted)
{
goto failure;
}
/* Note that patmr3SetBranchTargets can install additional patches!! */
if (rc != VINF_SUCCESS)
{
goto failure;
}
#ifdef LOG_ENABLED
Log(("Patch code ----------------------------------------------------------\n"));
patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, &cacheRec);
/* Free leftover lock if any. */
{
}
Log(("Patch code ends -----------------------------------------------------\n"));
#endif
/* make a copy of the guest code bytes that will be overwritten */
rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
{
/*uint8_t bASMInt3 = 0xCC; - unused */
/* Replace first opcode byte with 'int 3'. */
if (RT_FAILURE(rc))
goto failure;
/* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
}
else
{
Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
/* now insert a jump in the guest code */
if (RT_FAILURE(rc))
goto failure;
}
Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
return VINF_SUCCESS;
pPatch->nrJumpRecs = 0;
/* Turn this cli patch into a dummy. */
pPatch->pPatchBlockOffset = 0;
// Give back the patch memory we no longer need
return rc;
}
/**
* Patch IDT handler
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstrGC Guest context point to privileged instruction
* @param uOpSize Size of starting instruction
* @param pPatchRec Patch record
* @param pCacheRec Cache record ptr
*
* @note returns failure if patching is not allowed or possible
*
*/
static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, uint32_t uOpSize, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
{
bool disret;
uint32_t orgOffsetPatchMem = ~0;
/*
* In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
* and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
* condition here and only patch the common entypoint once.
*/
disret = patmR3DisInstr(pVM, pPatch, pCurInstrGC, pCurInstrHC, PATMREAD_ORGCODE, &cpuPush, &opsize);
{
int rc;
pCurInstrGC += opsize;
if ( disret
)
{
bool fInserted;
PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
if (pJmpPatch == 0)
{
/* Patch it first! */
if (rc != VINF_SUCCESS)
goto failure;
}
goto failure;
/* save original offset (in case of failures later on) */
pPatch->uCurPatchOffset = 0;
pPatch->nrPatch2GuestRecs = 0;
#ifdef VBOX_WITH_STATISTICS
if (RT_FAILURE(rc))
goto failure;
#endif
/* Install fake cli patch (to clear the virtual IF) */
if (RT_FAILURE(rc))
goto failure;
/* Add lookup record for patch to guest address translation (for the push) */
patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
/* Duplicate push. */
if (RT_FAILURE(rc))
goto failure;
/* Generate jump to common entrypoint. */
if (RT_FAILURE(rc))
goto failure;
/* size of patch block */
/* Update free pointer in patch memory. */
/* Round to next 8 byte boundary */
/* There's no jump from guest to patch code. */
pPatch->cbPatchJump = 0;
#ifdef LOG_ENABLED
Log(("Patch code ----------------------------------------------------------\n"));
patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
Log(("Patch code ends -----------------------------------------------------\n"));
#endif
/*
* Insert into patch to guest lookup tree
*/
LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
return VINF_SUCCESS;
}
}
/* Give back the patch memory we no longer need */
if (orgOffsetPatchMem != (uint32_t)~0)
}
/**
* Install a trampoline to call a guest trap handler directly
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstrGC Guest context point to privileged instruction
* @param pPatchRec Patch record
* @param pCacheRec Cache record ptr
*
*/
static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
{
int rc = VERR_PATCHING_REFUSED;
uint32_t orgOffsetPatchMem = ~0;
bool fInserted;
// save original offset (in case of failures later on)
pPatch->uCurPatchOffset = 0;
pPatch->nrPatch2GuestRecs = 0;
#ifdef VBOX_WITH_STATISTICS
if (RT_FAILURE(rc))
goto failure;
#endif
if (RT_FAILURE(rc))
goto failure;
/* size of patch block */
/* Update free pointer in patch memory. */
/* Round to next 8 byte boundary */
/* There's no jump from guest to patch code. */
pPatch->cbPatchJump = 0;
#ifdef LOG_ENABLED
Log(("Patch code ----------------------------------------------------------\n"));
patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
Log(("Patch code ends -----------------------------------------------------\n"));
#endif
/*
* Insert into patch to guest lookup tree
*/
LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
return VINF_SUCCESS;
AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
/* Turn this cli patch into a dummy. */
pPatch->pPatchBlockOffset = 0;
/* Give back the patch memory we no longer need */
return rc;
}
#ifdef LOG_ENABLED
/**
* Check if the instruction is patched as a common idt handler
*
* @returns true or false
* @param pVM The VM to operate on.
* @param pInstrGC Guest context point to the instruction
*
*/
{
return true;
return false;
}
#endif //DEBUG
/**
* Duplicates a complete function
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstrGC Guest context point to privileged instruction
* @param pPatchRec Patch record
* @param pCacheRec Cache record ptr
*
*/
static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec, PPATMP2GLOOKUPREC pCacheRec)
{
int rc = VERR_PATCHING_REFUSED;
uint32_t orgOffsetPatchMem = ~0;
bool fInserted;
/* Save original offset (in case of failures later on). */
/* We will not go on indefinitely with call instruction handling. */
{
Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
return VERR_PATCHING_REFUSED;
}
#ifdef PATM_ENABLE_CALL
#endif
pPatch->nrPatch2GuestRecs = 0;
pPatch->uCurPatchOffset = 0;
/* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
if (RT_FAILURE(rc))
goto failure;
#ifdef VBOX_WITH_STATISTICS
if (RT_FAILURE(rc))
goto failure;
#endif
if (rc != VINF_SUCCESS)
{
goto failure;
}
//size of patch block
//update free pointer in patch memory
/* Round to next 8 byte boundary. */
/*
* Insert into patch to guest lookup tree
*/
LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
if (!fInserted)
{
goto failure;
}
/* Note that patmr3SetBranchTargets can install additional patches!! */
if (rc != VINF_SUCCESS)
{
goto failure;
}
#ifdef LOG_ENABLED
Log(("Patch code ----------------------------------------------------------\n"));
patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pCacheRec);
Log(("Patch code ends -----------------------------------------------------\n"));
#endif
return VINF_SUCCESS;
pPatch->nrJumpRecs = 0;
/* Turn this cli patch into a dummy. */
pPatch->pPatchBlockOffset = 0;
// Give back the patch memory we no longer need
return rc;
}
/**
* Creates trampoline code to jump inside an existing patch
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstrGC Guest context point to privileged instruction
* @param pPatchRec Patch record
*
*/
{
uint32_t orgOffsetPatchMem = ~0;
int rc = VERR_PATCHING_REFUSED;
bool fInserted = false;
/* Save original offset (in case of failures later on). */
/* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
/** @todo we already checked this before */
PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
if (pPatchPage)
{
uint32_t i;
for (i=0;i<pPatchPage->cCount;i++)
{
if (pPatchPage->aPatch[i])
{
{
if (pPatchTargetGC)
{
PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatchToJmp->Patch2GuestAddrTree, offsetPatch, false);
pPatchToGuestRec->fJumpTarget = true;
Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv (patch target %RRv)\n", pPatchToJmp->pPrivInstrGC, pPatchTargetGC));
break;
}
}
}
}
}
/*
* Only record the trampoline patch if this is the first patch to the target
* or we recorded other patches already.
* The goal is to refuse refreshing function duplicates if the guest
* modifies code after a saved state was loaded because it is not possible
* to save the relation between trampoline and target without changing the
* saved satte version.
*/
{
if (!pTrampRec)
return VERR_NO_MEMORY; /* or better return VERR_PATCHING_REFUSED to let the VM continue? */
}
pPatch->nrPatch2GuestRecs = 0;
pPatch->uCurPatchOffset = 0;
/* Note: Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
if (RT_FAILURE(rc))
goto failure;
#ifdef VBOX_WITH_STATISTICS
if (RT_FAILURE(rc))
goto failure;
#endif
if (RT_FAILURE(rc))
goto failure;
/*
* Insert into patch to guest lookup tree
*/
LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
fInserted = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
if (!fInserted)
{
goto failure;
}
/* size of patch block */
/* Update free pointer in patch memory. */
/* Round to next 8 byte boundary */
/* There's no jump from guest to patch code. */
pPatch->cbPatchJump = 0;
/* Enable the patch. */
/* We allow this patch to be called as a function. */
if (pTrampRec)
{
}
return VINF_SUCCESS;
pPatch->nrJumpRecs = 0;
/* Turn this cli patch into a dummy. */
pPatch->pPatchBlockOffset = 0;
// Give back the patch memory we no longer need
if (pTrampRec)
return rc;
}
/**
* (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pCtx Guest context
*
*/
{
int rc;
RTRCPTR pPatchTargetGC = 0;
/* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
if (pPatchPage)
{
uint32_t i;
for (i=0;i<pPatchPage->cCount;i++)
{
if (pPatchPage->aPatch[i])
{
{
if (pPatchTargetGC)
{
break;
}
}
}
}
}
if (pPatchTargetGC)
{
/* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
}
else
{
}
if (rc == VINF_SUCCESS)
{
}
if (pPatchTargetGC)
{
}
else
{
/* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
}
return VINF_SUCCESS;
}
/**
* Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pCpu Disassembly CPU structure ptr
* @param pInstrGC Guest context point to privileged instruction
* @param pCacheRec Cache record ptr
*
*/
static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATMP2GLOOKUPREC pCacheRec)
{
int rc = VERR_PATCHING_REFUSED;
bool disret;
#ifdef LOG_ENABLED
char szOutput[256];
#endif
Assert((pCpu->pCurInstr->opcode == OP_CALL || pCpu->pCurInstr->opcode == OP_JMP) && pCpu->opsize == SIZEOF_NEARJUMP32);
if ((pCpu->pCurInstr->opcode != OP_CALL && pCpu->pCurInstr->opcode != OP_JMP) || pCpu->opsize != SIZEOF_NEARJUMP32)
{
goto failure;
}
if (pTargetGC == 0)
{
goto failure;
}
if (pPatchFunction == NULL)
{
for(;;)
{
/* It could be an indirect call (call -> jmp dest).
* Note that it's dangerous to assume the jump will never change...
*/
if (pTmpInstrHC == 0)
break;
break;
if (pTargetGC == 0)
{
break;
}
break;
}
if (pPatchFunction == 0)
{
goto failure;
}
}
// make a copy of the guest code bytes that will be overwritten
rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
/* Now replace the original call in the guest code */
if (RT_FAILURE(rc))
goto failure;
/* Lowest and highest address for write monitoring. */
return VINF_SUCCESS;
/* Turn this patch into a dummy. */
return rc;
}
/**
* Replace the address in an MMIO instruction with the cached version.
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstrGC Guest context point to privileged instruction
* @param pCpu Disassembly CPU structure ptr
* @param pCacheRec Cache record ptr
*
* @note returns failure if patching is not allowed or possible
*
*/
static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATMP2GLOOKUPREC pCacheRec)
{
int rc = VERR_PATCHING_REFUSED;
goto failure;
goto failure;
if (pPB == 0)
goto failure;
/* Add relocation record for cached data access. */
if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
{
Log(("Relocation failed for cached mmio address!!\n"));
return VERR_PATCHING_REFUSED;
}
/* Save original instruction. */
rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
/* Replace address with that of the cached item. */
rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->opsize - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
if (RT_FAILURE(rc))
{
goto failure;
}
return VINF_SUCCESS;
/* Turn this patch into a dummy. */
return rc;
}
/**
* Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstrGC Guest context point to privileged instruction
* @param pPatch Patch record
*
* @note returns failure if patching is not allowed or possible
*
*/
{
bool disret;
/* Convert GC to HC address. */
/* Disassemble mmio instruction. */
if (disret == false)
{
Log(("Disassembly failed (probably page not present) -> return to caller\n"));
return VERR_PATCHING_REFUSED;
}
if (opsize > MAX_INSTR_SIZE)
return VERR_PATCHING_REFUSED;
return VERR_PATCHING_REFUSED;
/* Add relocation record for cached data access. */
if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
{
Log(("Relocation failed for cached mmio address!!\n"));
return VERR_PATCHING_REFUSED;
}
/* Replace address with that of the cached item. */
/* Lowest and highest address for write monitoring. */
return VINF_SUCCESS;
}
/**
* Activates an int3 patch
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch record
*/
{
int rc;
/* Replace first opcode byte with 'int 3'. */
rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &bASMInt3, sizeof(bASMInt3));
return rc;
}
/**
* Deactivates an int3 patch
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch record
*/
{
int rc;
/* Restore first opcode byte. */
rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
return rc;
}
/**
* Replace an instruction with a breakpoint (0xCC), that is handled dynamically
* in the raw-mode context.
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstrGC Guest context point to privileged instruction
* @param pInstrHC Host context point to privileged instruction
* @param pCpu Disassembly CPU structure ptr
* @param pPatch Patch record
*
* @note returns failure if patching is not allowed or possible
*
*/
VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu,
{
int rc;
/* Note: Do not use patch memory here! It might called during patch installation too. */
/* Save the original instruction. */
rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
pPatch->cbPatchJump = sizeof(bASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
/* Replace first opcode byte with 'int 3'. */
if (RT_FAILURE(rc))
goto failure;
/* Lowest and highest address for write monitoring. */
return VINF_SUCCESS;
/* Turn this patch into a dummy. */
return VERR_PATCHING_REFUSED;
}
/**
* Patch a jump instruction at specified location
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstrGC Guest context point to privileged instruction
* @param pInstrHC Host context point to privileged instruction
* @param pCpu Disassembly CPU structure ptr
* @param pPatchRec Patch record
*
* @note returns failure if patching is not allowed or possible
*
*/
int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
{
int rc = VERR_PATCHING_REFUSED;
pPatch->uCurPatchOffset = 0;
pPatch->cbPatchBlockSize = 0;
/*
* Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
* make sure this never happens. (unless a trap is triggered (intentionally or not))
*/
{
case OP_JO:
case OP_JNO:
case OP_JC:
case OP_JNC:
case OP_JE:
case OP_JNE:
case OP_JBE:
case OP_JNBE:
case OP_JS:
case OP_JNS:
case OP_JP:
case OP_JNP:
case OP_JL:
case OP_JNL:
case OP_JLE:
case OP_JNLE:
case OP_JMP:
goto failure;
goto failure;
{
AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
goto failure;
}
break;
default:
goto failure;
}
// make a copy of the guest code bytes that will be overwritten
rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
/* Now insert a jump in the guest code. */
/*
* A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
* references the target instruction in the conflict patch.
*/
RTRCPTR pJmpDest = PATMR3GuestGCPtrToPatchGCPtr(pVM, pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval);
AssertMsg(pJmpDest, ("PATMR3GuestGCPtrToPatchGCPtr failed for %RRv\n", pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval));
/* Free leftover lock if any. */
{
}
if (RT_FAILURE(rc))
goto failure;
Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
/* Lowest and highest address for write monitoring. */
return VINF_SUCCESS;
/* Turn this cli patch into a dummy. */
return rc;
}
#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
/**
* Gives hint to PATM about supervisor guest instructions
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstr Guest context point to privileged instruction
* @param flags Patch flags
*/
{
}
/**
* Patch privileged instruction at specified location
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstr Guest context point to privileged instruction (0:32 flat address)
* @param flags Patch flags
*
* @note returns failure if patching is not allowed or possible
*/
{
bool disret;
int rc;
if ( !pVM
|| pInstrGC == 0
|| (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
{
AssertFailed();
return VERR_INVALID_PARAMETER;
}
if (PATMIsEnabled(pVM) == false)
return VERR_PATCHING_REFUSED;
/* Test for patch conflict only with patches that actually change guest code. */
{
AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
if (pConflictPatch != 0)
return VERR_PATCHING_REFUSED;
}
if (!(flags & PATMFL_CODE32))
{
/** @todo Only 32 bits code right now */
AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
return VERR_NOT_IMPLEMENTED;
}
/* We ran out of patch memory; don't bother anymore. */
return VERR_PATCHING_REFUSED;
/* Make sure the code selector is wide open; otherwise refuse. */
{
if (pInstrGCFlat != pInstrGC)
{
Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
return VERR_PATCHING_REFUSED;
}
}
/* Note: the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
if (!(flags & PATMFL_GUEST_SPECIFIC))
{
/* New code. Make sure CSAM has a go at it first. */
}
/* Note: obsolete */
&& (flags & PATMFL_MMIO_ACCESS))
{
void *pvPatchCoreOffset;
/* Find the patch record. */
pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
if (pvPatchCoreOffset == NULL)
{
return VERR_PATCH_NOT_FOUND; //fatal error
}
}
if (pPatchRec)
{
/* Hints about existing patches are ignored. */
if (flags & PATMFL_INSTR_HINT)
return VERR_PATCHING_REFUSED;
{
Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
}
{
/* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
{
}
else
/** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
if (RT_SUCCESS(rc))
return VWRN_PATCH_ENABLED;
return rc;
}
{
/*
* The patch might have been overwritten.
*/
{
/* Patch must have been overwritten; remove it and pretend nothing happened. */
if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
{
if (flags & PATMFL_IDTHANDLER)
pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
return VERR_PATM_ALREADY_PATCHED; /* already done once */
}
}
if (RT_FAILURE(rc))
return VERR_PATCHING_REFUSED;
}
else
{
AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
/* already tried it once! */
return VERR_PATCHING_REFUSED;
}
}
if (rc != VINF_SUCCESS)
{
return rc;
}
/* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
{
return VERR_PATCHING_REFUSED;
}
/* Initialize cache record for guest address translations. */
bool fInserted;
/* Allocate patch record. */
if (RT_FAILURE(rc))
{
Log(("Out of memory!!!!\n"));
return VERR_NO_MEMORY;
}
/* Insert patch record into the lookup tree. */
if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
{
/*
* Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
*/
PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
if (pPatchNear)
{
if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
{
Log(("Dangerous patch; would overwrite the unusable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
/*
* Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
*/
return VERR_PATCHING_REFUSED;
}
}
}
pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
{
Log(("Out of memory!!!!\n"));
return VERR_NO_MEMORY;
}
disret = patmR3DisInstrNoStrOpMode(pVM, &pPatchRec->patch, pInstrGC, NULL, PATMREAD_ORGCODE, &cpu, &opsize);
if (disret == false)
{
Log(("Disassembly failed (probably page not present) -> return to caller\n"));
return VERR_PATCHING_REFUSED;
}
if (opsize > MAX_INSTR_SIZE)
return VERR_PATCHING_REFUSED;
/* Restricted hinting for now. */
/* Initialize cache record patch pointer. */
/* Allocate statistics slot */
{
}
else
{
Log(("WARNING: Patch index wrap around!!\n"));
}
{
}
else
{
}
else
{
}
else
{
}
else
{
}
else
{
}
else
{
#ifdef VBOX_WITH_STATISTICS
if ( rc == VINF_SUCCESS
{
}
#endif
}
else
{
{
case OP_SYSENTER:
case OP_PUSH:
if (rc == VINF_SUCCESS)
{
if (rc == VINF_SUCCESS)
Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
return rc;
}
break;
default:
break;
}
}
else
{
{
case OP_SYSENTER:
if (rc == VINF_SUCCESS)
{
Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
return VINF_SUCCESS;
}
break;
case OP_JO:
case OP_JNO:
case OP_JC:
case OP_JNC:
case OP_JE:
case OP_JNE:
case OP_JBE:
case OP_JNBE:
case OP_JS:
case OP_JNS:
case OP_JP:
case OP_JNP:
case OP_JL:
case OP_JNL:
case OP_JLE:
case OP_JNLE:
case OP_JECXZ:
case OP_LOOP:
case OP_LOOPNE:
case OP_LOOPE:
case OP_JMP:
{
break;
}
return VERR_NOT_IMPLEMENTED;
#endif
case OP_PUSHF:
case OP_CLI:
Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
break;
case OP_STR:
case OP_SGDT:
case OP_SLDT:
case OP_SIDT:
case OP_CPUID:
case OP_LSL:
case OP_LAR:
case OP_SMSW:
case OP_VERW:
case OP_VERR:
case OP_IRET:
break;
default:
return VERR_NOT_IMPLEMENTED;
}
}
if (rc != VINF_SUCCESS)
{
{
}
}
else
{
/* Keep track upper and lower boundaries of patched instructions */
Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
rc = VINF_SUCCESS;
/* Patch hints are not enabled by default. Only when the are actually encountered. */
{
}
#ifdef VBOX_WITH_STATISTICS
/* Register statistics counter */
{
STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
#ifndef DEBUG_sandervl
/* Full breakdown for the GUI. */
STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
/// @todo change the state to be a callback so we can get a state mnemonic instead.
STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
#endif
}
#endif
}
/* Free leftover lock if any. */
return rc;
}
/**
* Query instruction size
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch record
* @param pInstrGC Instruction address
*/
{
if (rc == VINF_SUCCESS)
{
bool disret;
disret = patmR3DisInstr(pVM, pPatch, pInstrGC, pInstrHC, PATMREAD_ORGCODE | PATMREAD_NOCHECK, &cpu, &opsize);
if (disret)
return opsize;
}
return 0;
}
/**
* Add patch to page record
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPage Page address
* @param pPatch Patch record
*/
{
int rc;
if (pPatchPage)
{
{
rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
if (RT_FAILURE(rc))
{
Log(("Out of memory!!!!\n"));
return VERR_NO_MEMORY;
}
}
pPatchPage->cCount++;
}
else
{
bool fInserted;
if (RT_FAILURE(rc))
{
Log(("Out of memory!!!!\n"));
return VERR_NO_MEMORY;
}
rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
if (RT_FAILURE(rc))
{
Log(("Out of memory!!!!\n"));
return VERR_NO_MEMORY;
}
}
/* Get the closest guest instruction (from below) */
PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
if (pGuestToPatchRec)
{
LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
if ( pPatchPage->pLowestAddrGC == 0
{
/* If we're too close to the page boundary, then make sure an instruction from the previous page doesn't cross the boundary itself. */
{
/* Get the closest guest instruction (from above) */
pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
if (pGuestToPatchRec)
{
{
}
}
}
}
}
/* Get the closest guest instruction (from above) */
pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
if (pGuestToPatchRec)
{
LogFlow(("patmAddPatchToPage: highest patch page address %RRv current highest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
if ( pPatchPage->pHighestAddrGC == 0
{
/* Increase by instruction size. */
//// Assert(size);
}
}
return VINF_SUCCESS;
}
/**
* Remove patch from page record
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPage Page address
* @param pPatch Patch record
*/
{
int rc;
if (!pPatchPage)
return VERR_INVALID_PARAMETER;
{
uint32_t i;
/* Used by multiple patches */
for (i=0;i<pPatchPage->cCount;i++)
{
{
pPatchPage->aPatch[i] = 0;
break;
}
}
/* close the gap between the remaining pointers. */
{
memcpy(&pPatchPage->aPatch[i], &pPatchPage->aPatch[i+1], sizeof(PPATCHINFO)*(pPatchPage->cCount - (i+1)));
}
pPatchPage->cCount--;
}
else
{
pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
}
return VINF_SUCCESS;
}
/**
* Insert page records for all guest pages that contain instructions that were recompiled for this patch
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch record
*/
{
int rc;
/* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
/** @todo optimize better (large gaps between current and next used page) */
{
/* Get the closest guest instruction (from above) */
PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
if ( pGuestToPatchRec
)
{
/* Code in page really patched -> add record */
}
}
return VINF_SUCCESS;
}
/**
* Remove page records for all guest pages that contain instructions that were recompiled for this patch
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch record
*/
{
int rc;
/* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
{
/* Get the closest guest instruction (from above) */
PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
if ( pGuestToPatchRec
&& PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
)
{
/* Code in page really patched -> remove record */
}
}
return VINF_SUCCESS;
}
/**
* Notifies PATM about a (potential) write to code that has been patched.
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param GCPtr GC pointer to write address
* @param cbWrite Nr of bytes to write
*
*/
{
/* Quick boundary check */
)
return VINF_SUCCESS;
{
PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
if (pPatchPage)
{
uint32_t i;
bool fValidPatchWrite = false;
/* Quick check to see if the write is in the patched part of the page */
{
break;
}
for (i=0;i<pPatchPage->cCount;i++)
{
if (pPatchPage->aPatch[i])
{
//unused: bool fForceBreak = false;
/** @todo inefficient and includes redundant checks for multiple pages. */
{
if ( pPatch->cbPatchJump
{
/* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
if (rc == VINF_SUCCESS)
/* Note: jump back to the start as the pPatchPage has been deleted or changed */
goto loop_start;
continue;
}
/* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
if (!pPatchInstrGC)
{
if (pPatchInstrGC)
{
/* Check if this is not a write into a gap between two patches */
pPatchInstrGC = 0;
}
}
if (pPatchInstrGC)
{
uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
fValidPatchWrite = true;
PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
{
{
LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
/* Note: jump back to the start as the pPatchPage has been deleted or changed */
goto loop_start;
}
else
{
/* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
pPatchToGuestRec->fDirty = true;
*pInstrHC = 0xCC;
}
}
/* else already marked dirty */
}
}
}
} /* for each patch */
if (fValidPatchWrite == false)
{
/* Write to a part of the page that either:
* - old code page that's no longer in active use.
*/
pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
if (pPatchPage)
{
for (i=0;i<pPatchPage->cCount;i++)
{
{
/* Note: possibly dangerous assumption that all future writes will be harmless. */
{
LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
}
else
{
LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
}
/* Note: jump back to the start as the pPatchPage has been deleted or changed */
goto invalid_write_loop_start;
}
} /* for */
}
}
}
}
return VINF_SUCCESS;
}
/**
* Disable all patches in a flushed page
*
* @returns VBox status code
* @param pVM The VM to operate on.
* @param addr GC address of the page to flush
*/
/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
*/
{
PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
if (pPatchPage)
{
int i;
/* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
{
if (pPatchPage->aPatch[i])
{
}
}
}
return VINF_SUCCESS;
}
/**
* Checks if the instructions at the specified address has been patched already.
*
* @returns boolean, patched or not
* @param pVM The VM to operate on.
* @param pInstrGC Guest context pointer to instruction
*/
{
return true;
return false;
}
/**
* Query the opcode of the original code that was overwritten by the 5 bytes patch jump
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstrGC GC address of instr
* @param pByte opcode byte pointer (OUT)
*
*/
{
/** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
/* Shortcut. */
if ( !PATMIsEnabled(pVM)
{
return VERR_PATCH_NOT_FOUND;
}
pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
// if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
if ( pPatchRec
{
{
}
return VINF_SUCCESS;
}
return VERR_PATCH_NOT_FOUND;
}
/**
* Disable patch for privileged instruction at specified location
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstr Guest context point to privileged instruction
*
* @note returns failure if patching is not allowed or possible
*
*/
{
if (pPatchRec)
{
int rc = VINF_SUCCESS;
/* Already disabled? */
return VINF_SUCCESS;
/* Clear the IDT entries for the patch we're disabling. */
/* Note: very important as we clear IF in the patch itself */
/** @todo this needs to be changed */
{
{
if (++cIDTHandlersDisabled < 256)
}
}
/* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
if ( pPatch->pPatchBlockOffset
{
}
/* IDT or function patches haven't changed any guest code. */
{
Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
{
/* Let's first check if the guest code is still the same. */
if (rc == VINF_SUCCESS)
{
RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
)
{
Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
/* Remove it completely */
return VWRN_PATCH_REMOVED;
}
}
else
{
Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
}
}
else
{
AssertMsgFailed(("Patch was refused!\n"));
return VERR_PATCH_ALREADY_DISABLED;
}
}
else
{
/* Let's first check if the guest code is still the same. */
if (rc == VINF_SUCCESS)
{
if (temp[0] != 0xCC)
{
Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
/* Remove it completely */
return VWRN_PATCH_REMOVED;
}
}
}
if (rc == VINF_SUCCESS)
{
/* Save old state and mark this one as disabled (so it can be enabled later on). */
{
/* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
}
else
{
}
}
return VINF_SUCCESS;
}
Log(("Patch not found!\n"));
return VERR_PATCH_NOT_FOUND;
}
/**
* Permanently disable patch for privileged instruction at specified location
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstr Guest context instruction pointer
* @param pConflictAddr Guest context pointer which conflicts with specified patch
* @param pConflictPatch Conflicting patch
*
*/
static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
{
bool disret;
int rc;
/*
* If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
* with one that jumps right into the conflict patch.
* Otherwise we must disable the conflicting patch to avoid serious problems.
*/
if ( disret == true
{
/* Hint patches must be enabled first. */
{
/* Enabling might fail if the patched code has changed in the meantime. */
if (rc != VINF_SUCCESS)
return rc;
}
if (RT_SUCCESS(rc))
{
return VINF_SUCCESS;
}
}
#endif
{
/* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
if (rc == VWRN_PATCH_REMOVED)
return VINF_SUCCESS;
if (RT_SUCCESS(rc))
{
if (rc == VERR_PATCH_NOT_FOUND)
return VINF_SUCCESS; /* removed already */
if (RT_SUCCESS(rc))
{
return VINF_SUCCESS;
}
}
/* else turned into unusable patch (see below) */
}
else
{
Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
if (rc == VWRN_PATCH_REMOVED)
return VINF_SUCCESS;
}
/* No need to monitor the code anymore. */
{
}
return VERR_PATCH_DISABLED;
}
/**
* Enable patch for privileged instruction at specified location
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstr Guest context point to privileged instruction
*
* @note returns failure if patching is not allowed or possible
*
*/
{
if (pPatchRec)
{
int rc = VINF_SUCCESS;
{
{
/* Let's first check if the guest code is still the same. */
if (rc2 == VINF_SUCCESS)
{
{
Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
/* Remove it completely */
return VERR_PATCH_NOT_FOUND;
}
/* Free leftover lock if any. */
{
}
if (RT_FAILURE(rc2))
return rc2;
#ifdef DEBUG
{
char szOutput[256];
uint32_t i = 0;
bool disret;
while(i < pPatch->cbPatchJump)
{
i += opsize;
}
}
#endif
}
}
else
{
/* Let's first check if the guest code is still the same. */
{
Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
return VERR_PATCH_NOT_FOUND;
}
if (RT_FAILURE(rc2))
return rc2;
}
/* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
if (pPatch->pPatchBlockOffset)
}
else
return rc;
}
return VERR_PATCH_NOT_FOUND;
}
/**
* Remove patch for privileged instruction at specified location
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatchRec Patch record
* @param fForceRemove Remove *all* patches
*/
{
/* Strictly forbidden to remove such patches. There can be dependencies!! */
{
return VERR_ACCESS_DENIED;
}
/* Note: NEVER EVER REUSE PATCH MEMORY */
/* Note: PATMR3DisablePatch puts a breakpoint (0xCC) at the entry of this patch */
{
pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
}
{
}
#ifdef VBOX_WITH_STATISTICS
{
#ifndef DEBUG_sandervl
#endif
}
#endif
/* Note: no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
pPatch->nrPatch2GuestRecs = 0;
/* Note: might fail, because it has already been removed (e.g. during reset). */
/* Free the patch record */
return VINF_SUCCESS;
}
/**
* RTAvlU32DoWithAll() worker.
* Checks whether the current trampoline instruction is the jump to the target patch
* and updates the displacement to jump to the new target.
*
* @returns VBox status code.
* @retval VERR_ALREADY_EXISTS if the jump was found.
* @param pNode The current patch to guest record to check.
* @param pvUser The refresh state.
*/
{
/*
* Check if the patch instruction starts with a jump.
* ASSUMES that there is no other patch to guest record that starts
* with a jump.
*/
if (*pPatchInstr == 0xE9)
{
/* Jump found, update the displacement. */
int32_t displ = pPatchTargetGC - (pVM->patm.s.pPatchMemGC + pPatch2GuestRec->Core.Key + SIZEOF_NEARJUMP32);
LogFlow(("Updating trampoline patch new patch target %RRv, new displacment %d (old was %d)\n",
return VERR_ALREADY_EXISTS; /** @todo better return code */
}
return VINF_SUCCESS;
}
/**
* Attempt to refresh the patch by recompiling its entire code block
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatchRec Patch record
*/
{
int rc;
AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
{
if (!pPatch->pTrampolinePatchesHead)
{
/*
* It is sometimes possible that there are trampoline patches to this patch
* but they are not recorded (after a saved state load for example).
* Refuse to refresh those patches.
* Can hurt performance in theory if the patched code is modified by the guest
* and is executed often. However most of the time states are saved after the guest
* code was modified and is not updated anymore afterwards so this shouldn't be a
* big problem.
*/
Log(("patmR3RefreshPatch: refused because external jumps to this patch exist but the jumps are not recorded\n"));
return VERR_PATCHING_REFUSED;
}
Log(("patmR3RefreshPatch: external jumps to this patch exist, updating\n"));
}
/** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
/** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
#ifdef VBOX_WITH_STATISTICS
{
#ifndef DEBUG_sandervl
#endif
}
#endif
/** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
/* Attempt to install a new patch. */
rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
if (RT_SUCCESS(rc))
{
/* Determine target address in new patch */
if (!pPatchTargetGC)
{
goto failure;
}
/* Reset offset into patch memory to put the next code blocks right at the beginning. */
pPatch->uCurPatchOffset = 0;
/* insert jump to new patch in old patch block */
if (RT_FAILURE(rc))
goto failure;
/* Remove old patch (only do that when everything is finished) */
/* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
/* Used by another patch, so don't remove it! */
{
/* Update all trampoline patches to jump to the new patch. */
while (pTrampRec)
{
/*
* We have to find the right patch2guest record because there might be others
* for statistics.
*/
rc = VINF_SUCCESS;
}
/* Clear the list of trampoline patches for the old patch (safety precaution). */
}
}
if (RT_FAILURE(rc))
{
LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
/* Remove the new inactive patch */
/* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
/* Enable again in case the dirty instruction is near the end and there are safe code paths. */
}
return rc;
}
/**
* Find patch for privileged instruction at specified location
*
* @returns Patch structure pointer if found; else NULL
* @param pVM The VM to operate on.
* @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
* @param fIncludeHints Include hinted patches or not
*
*/
{
PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
/* if the patch is enabled, the pointer is not identical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
if (pPatchRec)
{
{
}
else
if ( fIncludeHints
{
}
}
return NULL;
}
/**
* Checks whether the GC address is inside a generated patch jump
*
* @returns true -> yes, false -> no
* @param pVM The VM to operate on.
* @param pAddr Guest context address
* @param pPatchAddr Guest context patch address (if true)
*/
{
if (PATMIsEnabled(pVM) == false)
return false;
if (pPatchAddr == NULL)
pPatchAddr = &addr;
*pPatchAddr = 0;
if (pPatch)
return *pPatchAddr == 0 ? false : true;
}
/**
* Remove patch for privileged instruction at specified location
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pInstr Guest context point to privileged instruction
*
* @note returns failure if patching is not allowed or possible
*
*/
{
if (pPatchRec)
{
if (rc == VWRN_PATCH_REMOVED)
return VINF_SUCCESS;
}
AssertFailed();
return VERR_PATCH_NOT_FOUND;
}
/**
* Mark patch as dirty
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch record
*
* @note returns failure if patching is not allowed or possible
*
*/
{
if (pPatch->pPatchBlockOffset)
{
}
/* Put back the replaced instruction. */
if (rc == VWRN_PATCH_REMOVED)
return VINF_SUCCESS;
/* Note: we don't restore patch pages for patches that are not enabled! */
/* Note: be careful when changing this behaviour!! */
/* The patch pages are no longer marked for self-modifying code detection */
{
}
/* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
return VINF_SUCCESS;
}
/**
* Query the corresponding GC instruction pointer from a pointer inside the patch block itself
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch block structure pointer
* @param pPatchGC GC address in patch block
*/
{
/* Get the closest record from below. */
PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
if (pPatchToGuestRec)
return pPatchToGuestRec->pOrgInstrGC;
return 0;
}
/* Converts Guest code GC ptr to Patch code GC ptr (if found)
*
* @returns corresponding GC pointer in patch block
* @param pVM The VM to operate on.
* @param pPatch Current patch block pointer
* @param pInstrGC Guest context pointer to privileged instruction
*
*/
{
if (pPatch->Guest2PatchAddrTree)
{
PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
if (pGuestToPatchRec)
}
return 0;
}
/* Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no identical match)
*
* @returns corresponding GC pointer in patch block
* @param pVM The VM to operate on.
* @param pPatch Current patch block pointer
* @param pInstrGC Guest context pointer to privileged instruction
*
*/
{
PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
if (pGuestToPatchRec)
return 0;
}
/* Converts Guest code GC ptr to Patch code GC ptr (if found)
*
* @returns corresponding GC pointer in patch block
* @param pVM The VM to operate on.
* @param pInstrGC Guest context pointer to privileged instruction
*
*/
{
PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
else
return 0;
}
/**
* Query the corresponding GC instruction pointer from a pointer inside the patch block itself
*
* @returns original GC instruction pointer or 0 if not found
* @param pVM The VM to operate on.
* @param pPatchGC GC address in patch block
* @param pEnmState State of the translated address (out)
*
*/
{
void *pvPatchCoreOffset;
pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
if (pvPatchCoreOffset == 0)
{
Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
return 0;
}
if (pEnmState)
{
if ( !pPrivInstrGC
{
pPrivInstrGC = 0;
}
else
{
}
else
{
}
else
{
}
else
{
}
else
}
return pPrivInstrGC;
}
/**
* Returns the GC pointer of the patch for the specified GC address
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pAddrGC Guest context address
*/
{
/* Find the patch record. */
/** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
else
return 0;
}
/**
* Attempt to recover dirty instructions
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pCtx CPU context
* @param pPatch Patch record
* @param pPatchToGuestRec Patch to guest address record
* @param pEip GC pointer of trapping instruction
*/
static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
{
int rc;
cbDirty = 0;
/* Find all adjacent dirty instructions */
while (true)
{
if (pRec->fJumpTarget)
{
LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pOrgInstrGC));
return VERR_PATCHING_REFUSED;
}
/* Restore original instruction opcode byte so we can check if the write was indeed safe. */
/* Only harmless instructions are acceptable. */
if ( RT_FAILURE(rc)
{
if (RT_SUCCESS(rc))
else
if (!cbDirty)
cbDirty = 1;
break;
}
#ifdef DEBUG
char szBuf[256];
#endif
/* Mark as clean; if we fail we'll let it always fault. */
/* Remove old lookup record. */
/* Let's see if there's another dirty instruction right after. */
pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
break; /* no more dirty instructions */
/* In case of complex instructions the next guest instruction could be quite far off. */
}
if ( RT_SUCCESS(rc)
)
{
{
bool fValidInstr;
if ( !fValidInstr
)
{
if ( pTargetGC >= pOrgInstrGC
)
{
/* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
fValidInstr = true;
}
}
/* If the instruction is completely harmless (which implies a 1:1 patch copy). */
if ( rc == VINF_SUCCESS
&& fValidInstr
)
{
#ifdef DEBUG
char szBuf[256];
#endif
/* Copy the new instruction. */
/* Add a new lookup record for the duplicated instruction. */
}
else
{
#ifdef DEBUG
char szBuf[256];
#endif
/* Restore the old lookup record for the duplicated instruction. */
/** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
break;
}
/* Check if we expanded a complex guest instruction into a patch stream (e.g. call) */
if (!cbLeft)
{
/* If the next patch instruction doesn't correspond to the next guest instruction, then we have some extra room to fill. */
if (RTAvlU32Get(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC) == NULL)
{
pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
if (pRec)
{
if (cbFiller >= SIZEOF_NEARJUMP32)
{
pPatchFillHC[0] = 0xE9;
#ifdef DEBUG
char szBuf[256];
#endif
}
else
{
for (unsigned i = 0; i < cbFiller; i++)
{
#ifdef DEBUG
char szBuf[256];
#endif
}
}
}
}
}
}
}
else
if (RT_SUCCESS(rc))
{
}
else
{
/* Mark the whole instruction stream with breakpoints. */
if (cbDirty)
{
if (RT_FAILURE(rc))
{
LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
}
/* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
}
}
return rc;
}
/**
* Handle trap inside patch code
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pCtx CPU context
* @param pEip GC pointer of trapping instruction
* @param ppNewEip GC pointer to new instruction
*/
{
PPATMPATCHREC pPatch = 0;
void *pvPatchCoreOffset;
int rc ;
pNewEip = 0;
*ppNewEip = 0;
/* Find the patch record. */
/* Note: there might not be a patch to guest translation record (global function) */
pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
if (pvPatchCoreOffset)
{
Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
{
{
/* Function duplication patches set fPIF to 1 on entry */
}
}
else
{
{
/* Function duplication patches set fPIF to 1 on entry */
}
}
else
{
Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
}
pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
}
else
AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
/* Check if we were interrupted in PATM generated instruction code. */
{
if ( rc == VINF_SUCCESS
)
{
{
if ( rc == VINF_SUCCESS
{
/* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
/* Reset the PATM stack. */
Log(("Faulting push -> go back to the original instruction\n"));
/* continue at the original instruction */
return VINF_SUCCESS;
}
}
/* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
if (rc == VINF_SUCCESS)
{
/* The guest page *must* be present. */
if ( rc == VINF_SUCCESS
{
return VINF_PATCH_CONTINUE;
}
}
}
else
{
/* Invalidated patch or first instruction overwritten.
* We can ignore the fPIF state in this case.
*/
/* Reset the PATM stack. */
Log(("Call to invalidated patch -> go back to the original instruction\n"));
/* continue at the original instruction */
return VINF_SUCCESS;
}
char szBuf[256];
DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pEip, DBGF_DISAS_FLAGS_DEFAULT_MODE, szBuf, sizeof(szBuf), NULL);
/* Very bad. We crashed in emitted code. Probably stack? */
if (pPatch)
{
("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
}
else
}
/* From here on, we must have a valid patch to guest translation. */
if (pvPatchCoreOffset == 0)
{
return VERR_PATCH_NOT_FOUND;
}
if (pPatchToGuestRec->fDirty)
{
if (RT_SUCCESS(rc))
{
/* Retry the current instruction. */
}
else
{
/* Reset the PATM stack. */
}
return rc;
}
#ifdef VBOX_STRICT
{
bool disret;
disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
{
Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
}
}
#endif
/* Return original address, correct by subtracting the CS base address. */
/* Reset the PATM stack. */
{
/* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
#ifdef VBOX_STRICT
bool disret;
disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_ORGCODE,
if (disret && (cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_INT3))
{
disret = patmR3DisInstr(pVM, &pPatch->patch, pNewEip, PATMGCVirtToHCVirt(pVM, &cacheRec, pNewEip), PATMREAD_RAWCODE,
Assert(cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_IRET);
}
#endif
}
Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
#ifdef LOG_ENABLED
#endif
if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
{
/* We can't jump back to code that we've overwritten with a 5 byte jump! */
Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
return VERR_PATCH_DISABLED;
}
/** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
{
Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
//we are only wasting time, back out the patch
pTrapRec->pNextPatchInstr = 0;
return VERR_PATCH_DISABLED;
}
#endif
return VINF_SUCCESS;
}
/**
* Handle page-fault in monitored page
*
* @returns VBox status code.
* @param pVM The VM to operate on.
*/
{
PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
{
if (rc == VWRN_PATCH_REMOVED)
return VINF_SUCCESS;
addr++;
}
for(;;)
{
pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
break;
{
}
}
return VINF_SUCCESS;
}
#ifdef VBOX_WITH_STATISTICS
{
{
return "SYSENT";
}
else
{
static char szTrap[16];
if (iGate < 256)
RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
else
return szTrap;
}
else
return "DUPFUNC";
else
return "FUNCCALL";
else
return "TRAMP";
else
}
{
{
case PATCH_ENABLED:
return "ENA";
case PATCH_DISABLED:
return "DIS";
case PATCH_DIRTY:
return "DIR";
case PATCH_UNUSABLE:
return "UNU";
case PATCH_REFUSED:
return "REF";
case PATCH_DISABLE_PENDING:
return "DIP";
default:
AssertFailed();
return " ";
}
}
/**
* Resets the sample.
* @param pVM The VM handle.
* @param pvSample The sample registered using STAMR3RegisterCallback.
*/
{
}
/**
* Prints the sample into the buffer.
*
* @param pVM The VM handle.
* @param pvSample The sample registered using STAMR3RegisterCallback.
* @param pszBuf The buffer to print into.
* @param cchBuf The size of the buffer.
*/
{
}
/**
* Returns the GC address of the corresponding patch statistics counter
*
* @returns Stat address
* @param pVM The VM to operate on.
* @param pPatch Patch structure
*/
{
return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
}
#endif /* VBOX_WITH_STATISTICS */
#ifdef VBOX_WITH_DEBUGGER
/**
* The '.patmoff' command.
*
* @returns VBox status.
* @param pCmd Pointer to the command descriptor (as registered).
* @param pCmdHlp Pointer to command helper functions.
* @param pVM Pointer to the current VM (if any).
* @param paArgs Pointer to (readonly) array of arguments.
* @param cArgs Number of arguments in the array.
*/
static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
{
/*
* Validate input.
*/
if (!pVM)
PATMR3AllowPatching(pVM, false);
}
/**
* The '.patmon' command.
*
* @returns VBox status.
* @param pCmd Pointer to the command descriptor (as registered).
* @param pCmdHlp Pointer to command helper functions.
* @param pVM Pointer to the current VM (if any).
* @param paArgs Pointer to (readonly) array of arguments.
* @param cArgs Number of arguments in the array.
*/
static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs)
{
/*
* Validate input.
*/
if (!pVM)
PATMR3AllowPatching(pVM, true);
}
#endif