PATMPatch.cpp revision 13f7948dbc7ae32c3e30086604ad6ade2ead3671
/* $Id$ */
/** @file
* PATMPatch - Dynamic Guest OS Instruction patches
*
* NOTE: CSAM assumes patch memory is never reused!!
*/
/*
* Copyright (C) 2006-2007 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_PATM
#include "PATMInternal.h"
#include <VBox/disopcode.h>
#include <stdlib.h>
#include <stdio.h>
#include "PATMA.h"
#include "PATMPatch.h"
/* internal structure for passing more information about call fixups to patmPatchGenCode */
typedef struct
{
int patmPatchAddReloc32(PVM pVM, PPATCHINFO pPatch, uint8_t *pRelocHC, uint32_t uType, RTRCPTR pSource, RTRCPTR pDest)
{
Assert(uType == FIXUP_ABSOLUTE || ((uType == FIXUP_REL_JMPTOPATCH || uType == FIXUP_REL_JMPTOGUEST) && pSource && pDest));
LogFlow(("patmPatchAddReloc32 type=%d pRelocGC=%RRv source=%RRv dest=%RRv\n", uType, pRelocHC - pVM->patm.s.pPatchMemGC + pVM->patm.s.pPatchMemGC , pSource, pDest));
return VINF_SUCCESS;
}
int patmPatchAddJump(PVM pVM, PPATCHINFO pPatch, uint8_t *pJumpHC, uint32_t offset, RTRCPTR pTargetGC, uint32_t opcode)
{
pPatch->nrJumpRecs++;
return VINF_SUCCESS;
}
\
{ \
return VERR_NO_MEMORY; \
}
static uint32_t patmPatchGenCode(PVM pVM, PPATCHINFO pPatch, uint8_t *pPB, PPATCHASMRECORD pAsmRecord, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fGenJump,
PPATMCALLINFO pCallInfo = 0)
{
uint32_t i, j;
// Copy the code block
// Process all fixups
{
for (;j<pAsmRecord->size;j++)
{
{
#ifdef VBOX_STRICT
else
#endif
/**
* BE VERY CAREFUL WITH THESE FIXUPS. TAKE INTO ACCOUNT THAT PROBLEMS MAY ARISE WHEN RESTORING A SAVED STATE WITH
* A DIFFERENT HYPERVISOR LAYOUT.
*/
switch (pAsmRecord->uReloc[i])
{
case PATM_VMFLAGS:
break;
case PATM_PENDINGACTION:
break;
case PATM_FIXUP:
/* Offset in uReloc[i+1] is from the base of the function. */
dest = (RTGCUINTPTR32)pVM->patm.s.pPatchMemGC + pAsmRecord->uReloc[i+1] + (RTGCUINTPTR32)(pPB - pVM->patm.s.pPatchMemHC);
break;
#ifdef VBOX_WITH_STATISTICS
case PATM_ALLPATCHCALLS:
break;
case PATM_IRETEFLAGS:
break;
case PATM_IRETCS:
break;
case PATM_IRETEIP:
break;
case PATM_PERPATCHCALLS:
break;
#endif
case PATM_STACKPTR:
break;
/* The first part of our PATM stack is used to store offsets of patch return addresses; the 2nd
* part to store the original return addresses.
*/
case PATM_STACKBASE:
break;
case PATM_STACKBASE_GUEST:
break;
case PATM_RETURNADDR: /* absolute guest address; no fixup required */
break;
case PATM_PATCHNEXTBLOCK: /* relative address of instruction following this block */
/** @note hardcoded assumption that we must return to the instruction following this block */
break;
case PATM_CALLTARGET: /* relative to patch address; no fixup required */
/* Address must be filled in later. (see patmr3SetBranchTargets) */
break;
case PATM_PATCHBASE: /* Patch GC base address */
break;
case PATM_CPUID_STD_PTR:
/* @todo dirty hack when correcting this fixup (state restore) */
break;
case PATM_CPUID_EXT_PTR:
/* @todo dirty hack when correcting this fixup (state restore) */
break;
case PATM_CPUID_CENTAUR_PTR:
/* @todo dirty hack when correcting this fixup (state restore) */
break;
case PATM_CPUID_DEF_PTR:
/* @todo dirty hack when correcting this fixup (state restore) */
break;
case PATM_CPUID_STD_MAX:
break;
case PATM_CPUID_EXT_MAX:
break;
case PATM_CPUID_CENTAUR_MAX:
break;
case PATM_INTERRUPTFLAG:
break;
case PATM_INHIBITIRQADDR:
break;
case PATM_NEXTINSTRADDR:
/* pNextInstrGC can be 0 if several instructions, that inhibit irqs, follow each other */
break;
case PATM_CURINSTRADDR:
break;
case PATM_VM_FORCEDACTIONS:
/* @todo dirty assumptions when correcting this fixup during saved state loading. */
break;
case PATM_TEMP_EAX:
break;
case PATM_TEMP_ECX:
break;
case PATM_TEMP_EDI:
break;
case PATM_TEMP_EFLAGS:
break;
case PATM_TEMP_RESTORE_FLAGS:
break;
break;
case PATM_CALL_RETURN_ADDR:
break;
/* Relative address of global patm lookup and call function. */
{
RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
/* Relative value is target minus address of instruction after the actual call instruction. */
break;
}
case PATM_RETURN_FUNCTION:
{
RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
/* Relative value is target minus address of instruction after the actual call instruction. */
break;
}
case PATM_IRET_FUNCTION:
{
RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
/* Relative value is target minus address of instruction after the actual call instruction. */
break;
}
{
RTRCPTR pInstrAfterCall = pVM->patm.s.pPatchMemGC + (RTGCUINTPTR32)(&pPB[j] + sizeof(RTRCPTR) - pVM->patm.s.pPatchMemHC);
/* Relative value is target minus address of instruction after the actual call instruction. */
break;
}
default:
AssertRelease(0);
break;
}
{
}
break;
}
}
}
/* Add the jump back to guest code (if required) */
if (fGenJump)
{
int32_t displ = pReturnAddrGC - (PATCHCODE_PTR_GC(pPatch) + pPatch->uCurPatchOffset + pAsmRecord->offJump - 1 + SIZEOF_NEARJUMP32);
/* Add lookup record for patch to guest address translation */
patmr3AddP2GLookupRecord(pVM, pPatch, &pPB[pAsmRecord->offJump - 1], pReturnAddrGC, PATM_LOOKUP_PATCH2GUEST);
}
// Calculate the right size of this patch block
{
return pAsmRecord->size;
}
else {
// if a jump instruction is present and we don't want one, then subtract SIZEOF_NEARJUMP32
}
}
/* Read bytes and check for overwritten instructions. */
{
/*
* Could be patched already; make sure this is checked!
*/
{
if (RT_SUCCESS(rc2))
{
}
else
break; /* no more */
}
return VINF_SUCCESS;
}
int patmPatchGenDuplicate(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pCurInstrGC)
{
int rc = VINF_SUCCESS;
return rc;
}
{
return VINF_SUCCESS;
}
{
return VINF_SUCCESS;
}
/*
* Generate an STI patch
*/
{
return VINF_SUCCESS;
}
int patmPatchGenPopf(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fSizeOverride, bool fGenJumpBack)
{
/* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
if (fSizeOverride == true)
{
Log(("operand size override!!\n"));
size = patmPatchGenCode(pVM, pPatch, pPB, (fGenJumpBack) ? &PATMPopf16Record : &PATMPopf16Record_NoExit , pReturnAddrGC, fGenJumpBack, &callInfo);
}
else
{
size = patmPatchGenCode(pVM, pPatch, pPB, (fGenJumpBack) ? &PATMPopf32Record : &PATMPopf32Record_NoExit, pReturnAddrGC, fGenJumpBack, &callInfo);
}
return VINF_SUCCESS;
}
{
if (fSizeOverride == true)
{
Log(("operand size override!!\n"));
}
else
{
}
return VINF_SUCCESS;
}
{
return VINF_SUCCESS;
}
int patmPatchGenLoop(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pTargetGC, uint32_t opcode, bool fSizeOverride)
{
switch (opcode)
{
case OP_LOOP:
break;
case OP_LOOPNE:
break;
case OP_LOOPE:
break;
case OP_JECXZ:
break;
default:
return VERR_INVALID_PARAMETER;
}
Log(("PatchGenLoop %d jump %d to %08x offrel=%d\n", opcode, pPatch->nrJumpRecs, pTargetGC, pPatchAsmRec->offRelJump));
// Generate the patch code
if (fSizeOverride)
{
}
return VINF_SUCCESS;
}
int patmPatchGenRelJump(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pTargetGC, uint32_t opcode, bool fSizeOverride)
{
// internal relative jumps from patch code to patch code; no relocation record required
switch (opcode)
{
case OP_JO:
break;
case OP_JNO:
break;
case OP_JC:
break;
case OP_JNC:
break;
case OP_JE:
break;
case OP_JNE:
break;
case OP_JBE:
break;
case OP_JNBE:
break;
case OP_JS:
break;
case OP_JNS:
break;
case OP_JP:
break;
case OP_JNP:
break;
case OP_JL:
break;
case OP_JNL:
break;
case OP_JLE:
break;
case OP_JNLE:
break;
case OP_JMP:
/* If interrupted here, then jump to the target instruction. Used by PATM.cpp for jumping to known instructions. */
/* Add lookup record for patch to guest address translation */
pPB[0] = 0xE9;
break;
case OP_JECXZ:
case OP_LOOP:
case OP_LOOPNE:
case OP_LOOPE:
default:
return VERR_PATCHING_REFUSED;
}
{
pPB[0] = 0xF;
offset += 2;
}
else offset++;
return VINF_SUCCESS;
}
/*
* Rewrite call to dynamic or currently unknown function (on-demand patching of function)
*/
int patmPatchGenCall(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RTRCPTR pCurInstrGC, RTRCPTR pTargetGC, bool fIndirect)
{
int rc;
/** @note Don't check for IF=1 here. The ret instruction will do this. */
/** @note It's dangerous to do this for 'normal' patches. the jump target might be inside the generated patch jump. (seen this!) */
/* 1: Clear PATM interrupt flag on entry. */
if (rc == VERR_NO_MEMORY)
return rc;
/* 2: We must push the target address onto the stack before appending the indirect call code. */
if (fIndirect)
{
Log(("patmPatchGenIndirectCall\n"));
/* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
* a page fault. The assembly code restores the stack afterwards.
*/
offset = 0;
/* include prefix byte to make sure we don't use the incorrect selector register. */
i = 2; /* standard offset of modrm bytes */
i++; //skip operand prefix
i++; //skip segment prefix
rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->opsize - i);
}
else
{
AssertMsg(PATMIsPatchGCAddr(pVM, pTargetGC) == false, ("Target is already a patch address (%RRv)?!?\n", pTargetGC));
/** @todo wasting memory as the complex search is overkill and we need only one lookup slot... */
/* Relative call to patch code (patch to patch -> no fixup). */
Log(("PatchGenCall from %RRv (next=%RRv) to %RRv\n", pCurInstrGC, pCurInstrGC + pCpu->opsize, pTargetGC));
/* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
* a page fault. The assembly code restores the stack afterwards.
*/
offset = 0;
}
/* align this block properly to make sure the jump table will not be misaligned. */
if (size)
for (i=0;i<size;i++)
{
}
/* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
size = patmPatchGenCode(pVM, pPatch, pPB, (fIndirect) ? &PATMCallIndirectRecord : &PATMCallRecord, 0, false, &callInfo);
/* Need to set PATM_INTERRUPTFLAG after the patched ret returns here. */
if (rc == VERR_NO_MEMORY)
return rc;
return VINF_SUCCESS;
}
/**
* Generate indirect jump to unknown destination
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch record
* @param pCpu Disassembly state
* @param pCurInstrGC Current instruction address
*/
{
int rc;
/* 1: Clear PATM interrupt flag on entry. */
if (rc == VERR_NO_MEMORY)
return rc;
/* 2: We must push the target address onto the stack before appending the indirect call code. */
Log(("patmPatchGenIndirectJump\n"));
/* We push it onto the stack here, so the guest's context isn't ruined when this happens to cause
* a page fault. The assembly code restores the stack afterwards.
*/
offset = 0;
/* include prefix byte to make sure we don't use the incorrect selector register. */
i = 2; /* standard offset of modrm bytes */
i++; //skip operand prefix
i++; //skip segment prefix
rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->opsize - i);
/* align this block properly to make sure the jump table will not be misaligned. */
if (size)
for (i=0;i<size;i++)
{
}
/* 3: Generate code to lookup address in our local cache; call hypervisor PATM code if it can't be located. */
return VINF_SUCCESS;
}
/**
* Generate return instruction
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch structure
* @param pCpu Disassembly struct
* @param pCurInstrGC Current instruction pointer
*
*/
int patmPatchGenRet(PVM pVM, PPATCHINFO pPatch, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pCurInstrGC)
{
/* Remember start of this patch for below. */
/** @note optimization: multiple identical ret instruction in a single patch can share a single patched ret. */
&& pPatch->pTempInfo->uPatchRetParam1 == (uint32_t)pCpu->param1.parval) /* nr of bytes popped off the stack should be identical of course! */
{
}
/* Jump back to the original instruction if IF is set again. */
/* align this block properly to make sure the jump table will not be misaligned. */
if (size)
for (int i=0;i<size;i++)
/* Duplicate the ret or ret n instruction; it will use the PATM return address */
if (rc == VINF_SUCCESS)
{
}
return rc;
}
/**
* Generate all global patm functions
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch structure
*
*/
{
int size = 0;
/* Round to next 8 byte boundary. */
/* Round to next 8 byte boundary. */
/* Round to next 8 byte boundary. */
return VINF_SUCCESS;
}
/**
* Generate illegal instruction (int 3)
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch structure
*
*/
{
pPB[0] = 0xCC;
return VINF_SUCCESS;
}
/**
* Check virtual IF flag and jump back to original guest code if set
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch structure
* @param pCurInstrGC Guest context pointer to the current instruction
*
*/
{
/* Add lookup record for patch to guest address translation */
/* Generate code to check for IF=1 before executing the call to the duplicated function. */
return VINF_SUCCESS;
}
/**
* Set PATM interrupt flag
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch structure
* @param pInstrGC Corresponding guest instruction
*
*/
{
/* Add lookup record for patch to guest address translation */
return VINF_SUCCESS;
}
/**
* Clear PATM interrupt flag
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch structure
* @param pInstrGC Corresponding guest instruction
*
*/
{
/* Add lookup record for patch to guest address translation */
return VINF_SUCCESS;
}
/**
* Clear PATM inhibit irq flag
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch structure
* @param pNextInstrGC Next guest instruction
*/
{
int size;
Assert((pPatch->flags & (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION)) != (PATMFL_GENERATE_JUMPTOGUEST|PATMFL_DUPLICATE_FUNCTION));
/* Add lookup record for patch to guest address translation */
else
return VINF_SUCCESS;
}
/**
* Generate an interrupt handler entrypoint
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch record
* @param pIntHandlerGC IDT handler address
*
** @todo must check if virtual IF is already cleared on entry!!!!!!!!!!!!!!!!!!!!!!!
*/
{
int rc = VINF_SUCCESS;
/* Add lookup record for patch to guest address translation */
/* Generate entrypoint for the interrupt handler (correcting CS in the interrupt stack frame) */
(pPatch->flags & PATMFL_INTHANDLER_WITH_ERRORCODE) ? &PATMIntEntryRecordErrorCode : &PATMIntEntryRecord,
0, false);
// Interrupt gates set IF to 0
return rc;
}
/**
* Generate a trap handler entrypoint
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch record
* @param pTrapHandlerGC IDT handler address
*/
{
/* Add lookup record for patch to guest address translation */
/* Generate entrypoint for the trap handler (correcting CS in the interrupt stack frame) */
(pPatch->flags & PATMFL_TRAPHANDLER_WITH_ERRORCODE) ? &PATMTrapEntryRecordErrorCode : &PATMTrapEntryRecord,
pTrapHandlerGC, true);
return VINF_SUCCESS;
}
#ifdef VBOX_WITH_STATISTICS
{
/* Add lookup record for stats code -> guest handler. */
/* Generate code to keep calling statistics for this patch */
return VINF_SUCCESS;
}
#endif
/**
* Debug register moves to or from general purpose registers
* mov GPR, DRx
* mov DRx, GPR
*
* @todo: if we ever want to support hardware debug registers natively, then
* this will need to be changed!
*/
{
int rc = VINF_SUCCESS;
mod = 0; //effective address (only)
{
Assert(0); // You not come here. Illegal!
// mov DRx, GPR
}
else
{
// mov GPR, DRx
}
return rc;
}
/*
* Control register moves to or from general purpose registers
* mov GPR, CRx
* mov CRx, GPR
*/
{
int rc = VINF_SUCCESS;
mod = 0; //effective address (only)
{
Assert(0); // You not come here. Illegal!
// mov CRx, GPR
}
else
{
// mov GPR, DRx
}
/// @todo: make this an array in the context structure
switch (ctrlreg)
{
case USE_REG_CR0:
break;
case USE_REG_CR2:
break;
case USE_REG_CR3:
break;
case USE_REG_CR4:
break;
default: /* Shut up compiler warning. */
AssertFailed();
offset = 0;
break;
}
return rc;
}
/*
* mov GPR, SS
*/
{
/* push ss */
offset = 0;
/* checks and corrects RPL of pushed ss*/
/* pop general purpose register */
offset = 0;
return VINF_SUCCESS;
}
/**
* Generate an sldt or str patch instruction
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch record
* @param pCpu Disassembly state
* @param pCurInstrGC Guest instruction address
*/
{
// sldt %Ew
int rc = VINF_SUCCESS;
uint32_t i;
/** @todo segment prefix (untested) */
{
/* Register operand */
// 8B 15 [32 bits addr] mov edx, CPUMCTX.tr/ldtr
/* Modify REG part according to destination of original instruction */
{
}
else
{
}
}
else
{
/* Memory operand */
//50 push eax
//52 push edx
//8D 15 48 7C 42 00 lea edx, dword ptr [dest]
//66 A1 48 7C 42 00 mov ax, CPUMCTX.tr/ldtr
//66 89 02 mov word ptr [edx],ax
//5A pop edx
//58 pop eax
{
}
// duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
i = 3; /* standard offset of modrm bytes */
i++; //skip operand prefix
i++; //skip segment prefix
rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->opsize - i);
{
}
else
{
}
}
return rc;
}
/**
* Generate an sgdt or sidt patch instruction
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch record
* @param pCpu Disassembly state
* @param pCurInstrGC Guest instruction address
*/
{
int rc = VINF_SUCCESS;
uint32_t i;
/* @todo segment prefix (untested) */
// sgdt %Ms
// sidt %Ms
{
case OP_SGDT:
break;
case OP_SIDT:
break;
default:
return VERR_INVALID_PARAMETER;
}
//50 push eax
//52 push edx
//8D 15 48 7C 42 00 lea edx, dword ptr [dest]
//66 A1 48 7C 42 00 mov ax, CPUMCTX.gdtr.limit
//66 89 02 mov word ptr [edx],ax
//A1 48 7C 42 00 mov eax, CPUMCTX.gdtr.base
//89 42 02 mov dword ptr [edx+2],eax
//5A pop edx
//58 pop eax
{
}
// duplicate and modify modrm byte and additional bytes if present (e.g. direct address)
i = 3; /* standard offset of modrm bytes */
i++; //skip operand prefix
i++; //skip segment prefix
rc = patmPatchReadBytes(pVM, &pPB[offset], (RTRCPTR)((RTGCUINTPTR32)pCurInstrGC + i), pCpu->opsize - i);
return rc;
}
/**
* Generate a cpuid patch instruction
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch record
* @param pCurInstrGC Guest instruction address
*/
{
return VINF_SUCCESS;
}
/**
* Generate the jump from guest to patch code
*
* @returns VBox status code.
* @param pVM The VM to operate on.
* @param pPatch Patch record
* @param pTargetGC Guest target jump
* @param fClearInhibitIRQs Clear inhibit irq flag
*/
int patmPatchGenJumpToGuest(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pReturnAddrGC, bool fClearInhibitIRQs)
{
int rc = VINF_SUCCESS;
if (fClearInhibitIRQs)
{
if (rc == VERR_NO_MEMORY)
return rc;
}
/* Add lookup record for patch to guest address translation */
/* Generate code to jump to guest code if IF=1, else fault. */
return rc;
}
/*
* Relative jump from patch code to patch code (no fixup required)
*/
int patmPatchGenPatchJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pCurInstrGC, RCPTRTYPE(uint8_t *) pPatchAddrGC, bool fAddLookupRecord)
{
int rc = VINF_SUCCESS;
if (fAddLookupRecord)
{
/* Add lookup record for patch to guest address translation */
}
return rc;
}