memsafer-r3.cpp revision c2037ceff701a3ec59df64530b011d5007801550
/* $Id$ */
/** @file
* IPRT - Memory Allocate for Sensitive Data, generic heap-based implementation.
*/
/*
* Copyright (C) 2006-2014 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#include <iprt/memsafer.h>
#include <iprt/critsect.h>
#ifdef IN_SUP_R3
#endif
/*******************************************************************************
* Defined Constants And Macros *
*******************************************************************************/
/** Allocation size alignment (power of two). */
#define RTMEMSAFER_ALIGN 16
/*******************************************************************************
* Structures and Typedefs *
*******************************************************************************/
/**
* Allocators.
*/
typedef enum RTMEMSAFERALLOCATOR
{
/** Invalid method. */
/** RTMemPageAlloc. */
/** SUPR3PageAllocEx. */
/**
* Tracking node (lives on normal heap).
*/
typedef struct RTMEMSAFERNODE
{
/** Node core.
* The core key is a scrambled pointer the user memory. */
/** The allocation flags. */
/** The offset into the allocation of the user memory. */
/** The requested allocation size. */
/** The allocation size in pages, this includes the two guard pages. */
/** The allocator used for this node. */
/** Pointer to an allocation tracking node. */
typedef RTMEMSAFERNODE *PRTMEMSAFERNODE;
/*******************************************************************************
* Global Variables *
*******************************************************************************/
/** Init once structure for this module. */
/** Critical section protecting the allocation tree. */
static RTCRITSECTRW g_MemSaferCritSect;
/** Tree of allocation nodes. */
static AVLPVTREE g_pMemSaferTree;
/** XOR scrambler value for memory. */
static uintptr_t g_uMemSaferScramblerXor;
/** XOR scrambler value pointers. */
static uintptr_t g_uMemSaferPtrScramblerXor;
/** Pointer rotate shift count.*/
/**
* @callback_method_impl{FNRTONCE, Inits globals.}
*/
{
return RTCritSectRwInit(&g_MemSaferCritSect);
}
/**
* @callback_method_impl{PFNRTONCECLEANUP, Cleans up globals.}
*/
{
if (!fLazyCleanUpOk)
{
}
}
{
#if ARCH_BITS == 64
#else
# error "Unsupported/missing ARCH_BITS."
#endif
return (void *)uPtr;
}
/**
* Inserts a tracking node into the tree.
*
* @param pThis The allocation tracking node to insert.
*/
{
}
/**
* Finds a tracking node into the tree.
*
* @returns The allocation tracking node for @a pvUser. NULL if not found.
* @param pvUser The user pointer to the allocation.
*/
{
return pThis;
}
/**
* Removes a tracking node from the tree.
*
* @returns The allocation tracking node for @a pvUser. NULL if not found.
* @param pvUser The user pointer to the allocation.
*/
{
return pThis;
}
{
#ifdef RT_STRICT
AssertMsgReturn(cb == pThis->cbUser, ("cb=%#zx != %#zx\n", cb, pThis->cbUser), VERR_INVALID_PARAMETER);
#endif
/* Note! This isn't supposed to be safe, just less obvious. */
while (cb > 0)
{
pu++;
}
return VINF_SUCCESS;
}
{
#ifdef RT_STRICT
AssertMsgReturn(cb == pThis->cbUser, ("cb=%#zx != %#zx\n", cb, pThis->cbUser), VERR_INVALID_PARAMETER);
#endif
/* Note! This isn't supposed to be safe, just less obvious. */
while (cb > 0)
{
pu++;
}
return VINF_SUCCESS;
}
/**
* Initializes the pages.
*
* Fills the memory with random bytes in order to make it less obvious where the
* secret data starts and ends. We also zero the user memory in case the
* allocator does not do this.
*
* @param pThis The allocation tracer node. The Core.Key member
* will be set.
* @param pvPages The pages to initialize.
*/
{
RTRandBytes(pbUser + pThis->cbUser, (size_t)pThis->cPages * PAGE_SIZE - PAGE_SIZE - pThis->offUser - pThis->cbUser);
}
/**
* Allocates and initializes pages from the support driver and initializes it.
*
* @returns VBox status code.
* @param pThis The allocator node. Core.Key will be set on successful
* return (unscrambled).
*/
{
#ifdef IN_SUP_R3
/*
* Try allocate the memory.
*/
void *pvPages;
int rc = SUPR3PageAllocEx(pThis->cPages, 0 /* fFlags */, &pvPages, NULL /* pR0Ptr */, NULL /* paPages */);
if (RT_SUCCESS(rc))
{
/*
* Configure the guard pages.
* SUPR3PageProtect isn't supported on all hosts, we ignore that.
*/
if (RT_SUCCESS(rc))
{
rc = SUPR3PageProtect(pvPages, NIL_RTR0PTR, (pThis->cPages - PAGE_SIZE) * PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
if (RT_SUCCESS(rc))
return VINF_SUCCESS;
}
else if (rc == VERR_NOT_SUPPORTED)
return VINF_SUCCESS;
/* failed. */
}
return rc;
#else /* !IN_SUP_R3 */
return VERR_NOT_SUPPORTED;
#endif /* !IN_SUP_R3 */
}
/**
* Allocates and initializes pages using the IPRT page allocator API.
*
* @returns VBox status code.
* @param pThis The allocator node. Core.Key will be set on successful
* return (unscrambled).
*/
{
/*
* Try allocate the memory.
*/
int rc;
if (pvPages)
{
/*
* Configure the guard pages.
*/
if (RT_SUCCESS(rc))
{
rc = RTMemProtect((uint8_t *)pvPages + (size_t)(pThis->cPages - 1U) * PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE);
if (RT_SUCCESS(rc))
return VINF_SUCCESS;
}
/* failed. */
}
else
return rc;
}
RTDECL(int) RTMemSaferAllocZExTag(void **ppvNew, size_t cb, uint32_t fFlags, const char *pszTag) RT_NO_THROW
{
/*
* Validate input.
*/
AssertReturn(cb <= 32U*_1M - PAGE_SIZE * 3U, VERR_ALLOCATION_TOO_BIG); /* Max 32 MB minus padding and guard pages. */
/*
* Initialize globals.
*/
if (RT_SUCCESS(rc))
{
/*
* Allocate a tracker node first.
*/
if (pThis)
{
/*
* Prepare the allocation.
*/
/*
* Try allocate the memory, using the best allocator by default and
* falling back on the less safe one.
*/
if (RT_SUCCESS(rc))
else if (!(fFlags & RTMEMSAFER_F_REQUIRE_NOT_PAGABLE))
{
if (RT_SUCCESS(rc))
}
if (RT_SUCCESS(rc))
{
/*
* Insert the node.
*/
return VINF_SUCCESS;
}
}
else
rc = VERR_NO_MEMORY;
}
return rc;
}
{
if (pv)
{
/*
* Wipe the user memory first.
*/
/*
* Free the pages.
*/
switch (pThis->enmAllocator)
{
#ifdef IN_SUP_R3
SUPR3PageProtect(pbPages, NIL_RTR0PTR, (uint32_t)(cbPages - PAGE_SIZE), PAGE_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
break;
#endif
break;
default:
AssertFailed();
}
/*
* Free the tracking node.
*/
}
else
}
/**
* The simplest reallocation method: allocate new block, copy over the data,
* free old block.
*/
static int rtMemSaferReallocSimpler(size_t cbOld, void *pvOld, size_t cbNew, void **ppvNew, uint32_t fFlags, const char *pszTag)
{
void *pvNew;
if (RT_SUCCESS(rc))
{
}
return rc;
}
RTDECL(int) RTMemSaferReallocZExTag(size_t cbOld, void *pvOld, size_t cbNew, void **ppvNew, uint32_t fFlags, const char *pszTag) RT_NO_THROW
{
int rc;
/* Real realloc. */
{
AssertMsgStmt(cbOld == pThis->cbUser, ("cbOld=%#zx != %#zx\n", cbOld, pThis->cbUser), cbOld = pThis->cbUser);
{
{
/*
* Is the enough room for us to grow?
*/
{
{
/*
* Sufficient space after the current allocation.
*/
}
else
{
/*
* Have to move the allocation to make enough room at the
* end. In order to make it a little less predictable and
* maybe avoid a relocation or two in the next call, divide
* the page offset by four until it it fits.
*/
do
}
rc = VINF_SUCCESS;
}
else
{
/*
* Not enough space, allocate a new block and copy over the data.
*/
}
}
else
{
/*
* Shrinking the allocation, just wipe the memory that is no longer
* being used.
*/
{
}
rc = VINF_SUCCESS;
}
}
{
/*
* New flags added. Allocate a new block and copy over the old one.
*/
}
else
{
/* Compatible flags. */
}
}
/*
* First allocation. Pass it on.
*/
else if (!cbOld)
{
}
/*
* Free operation. Pass it on.
*/
else
{
rc = VINF_SUCCESS;
}
return rc;
}
{
if (RT_SUCCESS(rc))
return pvNew;
return NULL;
}
RTDECL(void *) RTMemSaferReallocZTag(size_t cbOld, void *pvOld, size_t cbNew, const char *pszTag) RT_NO_THROW
{
if (RT_SUCCESS(rc))
return pvNew;
return NULL;
}