VmdkHDDCore.cpp revision 78dfd747e1837f5de0bc5625c39dff4b98e3d4d4
/* $Id$ */
/** @file
* VMDK Disk image, Core Code.
*/
/*
* Copyright (C) 2006-2007 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_VD_VMDK
#include "VBoxHDD-Internal.h"
/*******************************************************************************
* Constants And Macros, Structures and Typedefs *
*******************************************************************************/
/** Maximum encoded string size (including NUL) we allow for VMDK images.
* Deliberately not set high to avoid running out of descriptor space. */
#define VMDK_ENCODED_COMMENT_MAX 1024
/** VMDK descriptor DDB entry for PCHS cylinders. */
#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
/** VMDK descriptor DDB entry for PCHS heads. */
#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
/** VMDK descriptor DDB entry for PCHS sectors. */
#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
/** VMDK descriptor DDB entry for LCHS cylinders. */
#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
/** VMDK descriptor DDB entry for LCHS heads. */
#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
/** VMDK descriptor DDB entry for LCHS sectors. */
#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
/** VMDK descriptor DDB entry for image UUID. */
#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
/** VMDK descriptor DDB entry for image modification UUID. */
#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
/** VMDK descriptor DDB entry for parent image UUID. */
#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
/** VMDK descriptor DDB entry for parent image modification UUID. */
#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
/** No compression for streamOptimized files. */
#define VMDK_COMPRESSION_NONE 0
/** Deflate compression for streamOptimized files. */
#define VMDK_COMPRESSION_DEFLATE 1
/** Marker that the actual GD value is stored in the footer. */
#define VMDK_GD_AT_END 0xffffffffffffffffULL
/** Marker for end-of-stream in streamOptimized images. */
#define VMDK_MARKER_EOS 0
/** Marker for grain table block in streamOptimized images. */
#define VMDK_MARKER_GT 1
/** Marker for grain directory block in streamOptimized images. */
#define VMDK_MARKER_GD 2
/** Marker for footer in streamOptimized images. */
#define VMDK_MARKER_FOOTER 3
/** Dummy marker for "don't check the marker value". */
#define VMDK_MARKER_IGNORE 0xffffffffU
/**
* Magic number for hosted images created by VMware Workstation 4, VMware
* Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
*/
/**
* VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
* this header is also used for monolithic flat images.
*/
#pragma pack(1)
typedef struct SparseExtentHeader
{
bool uncleanShutdown;
char singleEndLineChar;
char nonEndLineChar;
char doubleEndLineChar1;
char doubleEndLineChar2;
#pragma pack()
/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
* divisible by the default grain size (64K) */
/** VMDK streamOptimized file format marker. The type field may or may not
* be actually valid, but there's always data to read there. */
#pragma pack(1)
typedef struct VMDKMARKER
{
} VMDKMARKER;
#pragma pack()
#ifdef VBOX_WITH_VMDK_ESX
/** @todo the ESX code is not tested, not used, and lacks error messages. */
/**
* Magic number for images created by VMware GSX Server 3 or ESX Server 3.
*/
#pragma pack(1)
typedef struct COWDisk_Header
{
/* The spec incompletely documents quite a few further fields, but states
* that they are unused by the current format. Replace them by padding. */
char reserved1[1604];
char reserved2[8];
char padding[396];
#pragma pack()
#endif /* VBOX_WITH_VMDK_ESX */
#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
/**
* VMDK extent type.
*/
typedef enum VMDKETYPE
{
/** Hosted sparse extent. */
/** Flat extent. */
/** Zero extent. */
/** VMFS extent, used by ESX. */
#ifdef VBOX_WITH_VMDK_ESX
,
/** ESX sparse extent. */
#endif /* VBOX_WITH_VMDK_ESX */
} VMDKETYPE, *PVMDKETYPE;
/**
* VMDK access type for a extent.
*/
typedef enum VMDKACCESS
{
/** No access allowed. */
VMDKACCESS_NOACCESS = 0,
/** Read-only access. */
/** Read-write access. */
} VMDKACCESS, *PVMDKACCESS;
/** Forward declaration for PVMDKIMAGE. */
typedef struct VMDKIMAGE *PVMDKIMAGE;
/**
* Extents files entry. Used for opening a particular file only once.
*/
typedef struct VMDKFILE
{
/** Pointer to filename. Local copy. */
const char *pszFilename;
/** File open flags for consistency checking. */
unsigned fOpen;
/** File handle. */
/** Handle for asnychronous access if requested.*/
void *pStorage;
/** Flag whether to use File or pStorage. */
bool fAsyncIO;
/** Reference counter. */
unsigned uReferences;
/** Flag whether the file should be deleted on last close. */
bool fDelete;
/** Pointer to the image we belong to. */
/** Pointer to next file descriptor. */
/** Pointer to the previous file descriptor. */
/**
* VMDK extent data structure.
*/
typedef struct VMDKEXTENT
{
/** File handle. */
/** Base name of the image extent. */
const char *pszBasename;
/** Full name of the image extent. */
const char *pszFullname;
/** Number of sectors in this extent. */
/** Number of sectors per block (grain in VMDK speak). */
/** Starting sector number of descriptor. */
/** Size of descriptor in sectors. */
/** Starting sector number of grain directory. */
/** Starting sector number of redundant grain directory. */
/** Total number of metadata sectors. */
/** Nominal size (i.e. as described by the descriptor) of this extent. */
/** Sector offset (i.e. as described by the descriptor) of this extent. */
/** Number of entries in a grain table. */
/** Number of sectors reachable via a grain directory entry. */
/** Number of entries in the grain directory. */
/** Pointer to the next free sector. Legacy information. Do not use. */
/** Number of this extent in the list of images. */
/** Pointer to the descriptor (NULL if no descriptor in this extent). */
char *pDescData;
/** Pointer to the grain directory. */
/** Pointer to the redundant grain directory. */
/** VMDK version of this extent. 1=1.0/1.1 */
/** Type of this extent. */
/** Access to this extent. */
/** Flag whether this extent is marked as unclean. */
bool fUncleanShutdown;
/** Flag whether the metadata in the extent header needs to be updated. */
bool fMetaDirty;
/** Flag whether there is a footer in this extent. */
bool fFooter;
/** Compression type for this extent. */
/** Last grain which has been written to. Only for streamOptimized extents. */
/** Sector number of last grain which has been written to. Only for
* streamOptimized extents. */
/** Data size of last grain which has been written to. Only for
* streamOptimized extents. */
/** Starting sector of the decompressed grain buffer. */
/** Decompressed grain buffer for streamOptimized extents. */
void *pvGrain;
/** Reference to the image in which this extent is used. Do not use this
* on a regular basis to avoid passing pImage references to functions
* explicitly. */
} VMDKEXTENT, *PVMDKEXTENT;
/**
* Grain table cache size. Allocated per image.
*/
#define VMDK_GT_CACHE_SIZE 256
/**
* Grain table block size. Smaller than an actual grain table block to allow
* more grain table blocks to be cached without having to allocate excessive
* amounts of memory for the cache.
*/
#define VMDK_GT_CACHELINE_SIZE 128
/**
* Maximum number of lines in a descriptor file. Not worth the effort of
* making it variable. Descriptor files are generally very short (~20 lines).
*/
#define VMDK_DESCRIPTOR_LINES_MAX 100U
/**
* Parsed descriptor information. Allows easy access and update of the
* descriptor (whether separate file or not). Free form text files suck.
*/
typedef struct VMDKDESCRIPTOR
{
/** Line number of first entry of the disk descriptor. */
unsigned uFirstDesc;
/** Line number of first entry in the extent description. */
unsigned uFirstExtent;
/** Line number of first disk database entry. */
unsigned uFirstDDB;
/** Total number of lines. */
unsigned cLines;
/** Total amount of memory available for the descriptor. */
/** Set if descriptor has been changed and not yet written to disk. */
bool fDirty;
/** Array of pointers to the data in the descriptor. */
char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
/** Array of line indices pointing to the next non-comment line. */
unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
/**
* extent.
*/
typedef struct VMDKGTCACHEENTRY
{
/** Extent number for which this entry is valid. */
/** GT data block number. */
/** Data part of the cache entry. */
/**
* Cache data structure for blocks of grain table entries. For now this is a
* fixed size direct mapping cache, but this should be adapted to the size of
* the sparse image and maybe converted to a set-associative cache. The
* implementation below implements a write-through cache with write allocate.
*/
typedef struct VMDKGTCACHE
{
/** Cache entries. */
/** Number of cache entries (currently unused). */
unsigned cEntries;
} VMDKGTCACHE, *PVMDKGTCACHE;
/**
* Complete VMDK image data structure. Mainly a collection of extents and a few
* extra global data fields.
*/
typedef struct VMDKIMAGE
{
/** Pointer to the image extents. */
/** Number of image extents. */
unsigned cExtents;
/** Pointer to the files list, for opening a file referenced multiple
* times only once (happens mainly with raw partition access). */
/** Base image name. */
const char *pszFilename;
/** Descriptor file if applicable. */
/** Pointer to the per-disk VD interface list. */
/** Error interface. */
/** Error interface callbacks. */
/** Async I/O interface. */
/** Async I/O interface callbacks. */
/**
* Pointer to an array of task handles for task submission.
* This is an optimization because the task number to submit is not known
* and allocating/freeing an array in the read/write functions every time
* is too expensive.
*/
void **apTask;
/** Entries available in the task handle array. */
unsigned cTask;
/** Open flags passed by VBoxHD layer. */
unsigned uOpenFlags;
/** Image flags defined during creation or determined during open. */
unsigned uImageFlags;
/** Total size of the image. */
/** Physical geometry of this image. */
/** Logical geometry of this image. */
/** Image UUID. */
/** Image modification UUID. */
/** Parent image UUID. */
/** Parent image modification UUID. */
/** Pointer to grain table cache, if this image contains sparse extents. */
/** Pointer to the descriptor (NULL if no separate descriptor file). */
char *pDescData;
/** Allocation size of the descriptor file. */
/** Parsed descriptor file content. */
} VMDKIMAGE;
/** State for the input callout of the inflate reader. */
typedef struct VMDKINFLATESTATE
{
/* File where the data is stored. */
/* Total size of the data to read. */
/* Offset in the file to read. */
/* Current read position. */
/** State for the output callout of the deflate writer. */
typedef struct VMDKDEFLATESTATE
{
/* File where the data is to be stored. */
/* Offset in the file to write at. */
/* Current write position. */
/*******************************************************************************
* Static Variables *
*******************************************************************************/
/** NULL-terminated array of supported file extensions. */
static const char *const s_apszVmdkFileExtensions[] =
{
"vmdk",
};
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
bool fDelete);
/**
* Internal: signal an error to the frontend.
*/
const char *pszFormat, ...)
{
return rc;
}
/**
* Internal: open a file (using a file descriptor cache to ensure each file
* is only opened once - anything else can cause locking problems).
*/
{
int rc = VINF_SUCCESS;
{
{
pVmdkFile->uReferences++;
*ppVmdkFile = pVmdkFile;
return rc;
}
}
/* If we get here, there's no matching entry in the cache. */
{
*ppVmdkFile = NULL;
return VERR_NO_MEMORY;
}
{
*ppVmdkFile = NULL;
return VERR_NO_MEMORY;
}
{
? true
: false,
}
else
{
}
if (RT_SUCCESS(rc))
{
*ppVmdkFile = pVmdkFile;
}
else
{
*ppVmdkFile = NULL;
}
return rc;
}
/**
* Internal: close a file, updating the file descriptor cache.
*/
{
int rc = VINF_SUCCESS;
pVmdkFile->uReferences--;
if (pVmdkFile->uReferences == 0)
{
/* Unchain the element from the list. */
if (pNext)
if (pPrev)
else
{
}
else
{
}
}
*ppVmdkFile = NULL;
return rc;
}
/**
* Internal: read from a file distinguishing between async and normal operation
*/
{
else
}
/**
* Internal: write to a file distinguishing between async and normal operation
*/
{
else
}
/**
* Internal: get the size of a file distinguishing beween async and normal operation
*/
{
{
AssertMsgFailed(("TODO\n"));
return 0;
}
else
}
/**
* Internal: set the size of a file distinguishing beween async and normal operation
*/
{
{
AssertMsgFailed(("TODO\n"));
return VERR_NOT_SUPPORTED;
}
else
}
/**
* Internal: flush a file distinguishing between async and normal operation
*/
{
else
}
static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
{
if (pInflateState->iOffset < 0)
{
if (pcbBuf)
*pcbBuf = 1;
pInflateState->iOffset = 0;
return VINF_SUCCESS;
}
if (RT_FAILURE(rc))
return rc;
return VINF_SUCCESS;
}
/**
* Internal: read from a file and inflate the compressed data,
* distinguishing between async and normal operation
*/
{
{
AssertMsgFailed(("TODO\n"));
return VERR_NOT_SUPPORTED;
}
else
{
int rc;
if (RT_FAILURE(rc))
return rc;
if ( uMarker != VMDK_MARKER_IGNORE
return VERR_VD_VMDK_INVALID_FORMAT;
{
/* Compressed grain marker. Data follows immediately. */
if (puLBA)
if (pcbMarkerData)
}
else
{
{
return VERR_VD_VMDK_INVALID_FORMAT;
}
{
if (pcbMarkerData)
}
else
{
return VERR_VD_VMDK_INVALID_FORMAT;
}
}
/* Sanity check - the expansion ratio should be much less than 2. */
return VERR_VD_VMDK_INVALID_FORMAT;
if (RT_FAILURE(rc))
return rc;
if (RT_FAILURE(rc))
return rc;
if (cbActuallyRead != cbToRead)
return rc;
}
}
{
if (pDeflateState->iOffset < 0)
{
cbBuf--;
pDeflateState->iOffset = 0;
}
if (!cbBuf)
return VINF_SUCCESS;
if (RT_FAILURE(rc))
return rc;
return VINF_SUCCESS;
}
/**
* Internal: deflate the uncompressed data and write to a file,
* distinguishing between async and normal operation
*/
{
{
AssertMsgFailed(("TODO\n"));
return VERR_NOT_SUPPORTED;
}
else
{
int rc;
if (uMarker == VMDK_MARKER_IGNORE)
{
/* Compressed grain marker. Data follows immediately. */
}
else
{
/** @todo implement creating the other marker types */
return VERR_NOT_IMPLEMENTED;
}
rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper, RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
if (RT_FAILURE(rc))
return rc;
if (RT_SUCCESS(rc))
if (RT_SUCCESS(rc))
{
if (pcbMarkerData)
/* Set the file size to remove old garbage in case the block is
* rewritten. Cannot cause data loss as the code calling this
* guarantees that data gets only appended. */
if (uMarker == VMDK_MARKER_IGNORE)
{
/* Compressed grain marker. */
if (RT_FAILURE(rc))
return rc;
}
else
{
/** @todo implement creating the other marker types */
return VERR_NOT_IMPLEMENTED;
}
}
return rc;
}
}
/**
* Internal: check if all files are closed, prevent leaking resources.
*/
{
{
LogRel(("VMDK: leaking reference to file \"%s\"\n",
pVmdkFile->pszFilename));
else
if (RT_SUCCESS(rc))
}
return rc;
}
/**
* Internal: truncate a string (at a UTF8 code point boundary) and encode the
* critical non-ASCII characters.
*/
static char *vmdkEncodeString(const char *psz)
{
{
char *pszDstPrev = pszDst;
if (Cp == '\\')
{
}
else if (Cp == '\n')
{
}
else if (Cp == '\r')
{
}
else
{
pszDst = pszDstPrev;
break;
}
}
*pszDst = '\0';
}
/**
* Internal: decode a string and store it into the specified string.
*/
{
int rc = VINF_SUCCESS;
char szBuf[4];
if (!cb)
return VERR_BUFFER_OVERFLOW;
{
if (Cp == '\\')
{
if (CpQ == 'n')
else if (CpQ == 'r')
else if (CpQ == '\0')
{
break;
}
else
}
else
/* Need to leave space for terminating NUL. */
{
break;
}
}
*psz = '\0';
return rc;
}
{
int rc = VINF_SUCCESS;
unsigned i;
goto out;
if (!pGD)
{
rc = VERR_NO_MEMORY;
goto out;
}
/* The VMDK 1.1 spec talks about compressed grain directories, but real
* life files don't have them. The spec is wrong in creative ways. */
if (RT_FAILURE(rc))
{
rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname);
goto out;
}
if (pExtent->uSectorRGD)
{
if (!pRGD)
{
rc = VERR_NO_MEMORY;
goto out;
}
/* The VMDK 1.1 spec talks about compressed grain directories, but real
* life files don't have them. The spec is wrong in creative ways. */
if (RT_FAILURE(rc))
{
rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
goto out;
}
/* Check grain table and redundant grain table for consistency. */
if (!pTmpGT1)
{
rc = VERR_NO_MEMORY;
goto out;
}
if (!pTmpGT2)
{
rc = VERR_NO_MEMORY;
goto out;
}
i < pExtent->cGDEntries;
{
/* If no grain table is allocated skip the entry. */
continue;
{
/* Just one grain directory entry refers to a not yet allocated
* grain table or both grain directory copies refer to the same
* grain table. Not allowed. */
rc = vmdkError(pExtent->pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
goto out;
}
/* The VMDK 1.1 spec talks about compressed grain tables, but real
* life files don't have them. The spec is wrong in creative ways. */
if (RT_FAILURE(rc))
{
rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
goto out;
}
/* The VMDK 1.1 spec talks about compressed grain tables, but real
* life files don't have them. The spec is wrong in creative ways. */
if (RT_FAILURE(rc))
{
rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
goto out;
}
{
rc = vmdkError(pExtent->pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
goto out;
}
}
/** @todo figure out what to do for unclean VMDKs. */
}
{
uint32_t uLastGrainSector = 0;
if (!pTmpGT)
{
rc = VERR_NO_MEMORY;
goto out;
}
{
/* If no grain table is allocated skip the entry. */
if (*pGDTmp == 0)
continue;
/* The VMDK 1.1 spec talks about compressed grain tables, but real
* life files don't have them. The spec is wrong in creative ways. */
if (RT_FAILURE(rc))
{
rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
goto out;
}
uint32_t j;
{
/* If no grain is allocated skip the entry. */
if (uGTTmp == 0)
continue;
{
rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: grain table in '%s' contains a violation of the ordering assumptions"), pExtent->pszFullname);
goto out;
}
}
}
/* streamOptimized extents need a grain decompress buffer. */
{
rc = VERR_NO_MEMORY;
goto out;
}
if (uLastGrainSector)
{
pExtent->pvGrain, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain), VMDK_MARKER_IGNORE, &uLBA, &cbMarker);
if (RT_FAILURE(rc))
goto out;
}
}
out:
if (RT_FAILURE(rc))
return rc;
}
bool fPreAlloc)
{
int rc = VINF_SUCCESS;
unsigned i;
if (fPreAlloc)
else
cbGTRounded = 0;
if (!pGD)
{
rc = VERR_NO_MEMORY;
goto out;
}
if (!pRGD)
{
rc = VERR_NO_MEMORY;
goto out;
}
cbOverhead = RT_ALIGN_64(VMDK_SECTOR2BYTE(uStartSector) + 2 * (cbGDRounded + cbGTRounded), VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
/* For streamOptimized extents put the end-of-stream marker at the end. */
else
if (RT_FAILURE(rc))
goto out;
if (fPreAlloc)
{
for (i = 0; i < pExtent->cGDEntries; i++)
{
pRGD[i] = uOffsetSectors;
/* Write the redundant grain directory entry to disk. */
if (RT_FAILURE(rc))
{
rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
goto out;
}
}
for (i = 0; i < pExtent->cGDEntries; i++)
{
pGD[i] = uOffsetSectors;
/* Write the grain directory entry to disk. */
if (RT_FAILURE(rc))
{
rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
goto out;
}
}
}
/* streamOptimized extents need a grain decompress buffer. */
{
{
rc = VERR_NO_MEMORY;
goto out;
}
}
out:
if (RT_FAILURE(rc))
return rc;
}
{
{
}
{
}
}
char **ppszUnquoted, char **ppszNext)
{
char *pszQ;
char *pszUnquoted;
/* Skip over whitespace. */
pszStr++;
if (*pszStr != '"')
{
pszQ++;
}
else
{
pszStr++;
return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s'"), pImage->pszFilename);
}
if (!pszUnquoted)
return VERR_NO_MEMORY;
if (ppszNext)
return VINF_SUCCESS;
}
const char *pszLine)
{
return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
pDescriptor->cLines++;
pDescriptor->fDirty = true;
return VINF_SUCCESS;
}
{
const char *pszValue;
while (uStart != 0)
{
{
/* Key matches, check for a '=' (preceded by whitespace). */
pszValue++;
if (*pszValue == '=')
{
break;
}
}
}
return !!uStart;
}
unsigned uStart,
{
char *pszTmp;
unsigned uLast = 0;
while (uStart != 0)
{
{
/* Key matches, check for a '=' (preceded by whitespace). */
pszTmp++;
if (*pszTmp == '=')
{
pszTmp++;
pszTmp++;
break;
}
}
}
if (uStart)
{
if (pszValue)
{
/* Key already exists, replace existing value. */
/* Check for buffer overflow. */
return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
}
else
{
{
if (pDescriptor->aNextLines[i])
else
}
pDescriptor->cLines--;
/* Adjust starting line numbers of following descriptor sections. */
pDescriptor->uFirstDDB--;
}
}
else
{
/* Key doesn't exist, append after the last entry in this category. */
if (!pszValue)
{
/* Key doesn't exist, and it should be removed. Simply a no-op. */
return VINF_SUCCESS;
}
/* Check for buffer overflow. */
return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
{
else
pDescriptor->aNextLines[i] = 0;
}
pDescriptor->cLines++;
/* Adjust starting line numbers of following descriptor sections. */
pDescriptor->uFirstDDB++;
}
pDescriptor->fDirty = true;
return VINF_SUCCESS;
}
{
const char *pszValue;
&pszValue))
return VERR_VD_VMDK_VALUE_NOT_FOUND;
}
{
const char *pszValue;
char *pszValueUnquoted;
&pszValue))
return VERR_VD_VMDK_VALUE_NOT_FOUND;
if (RT_FAILURE(rc))
return rc;
return rc;
}
{
char *pszValueQuoted;
if (RT_FAILURE(rc))
return rc;
return rc;
}
{
if (!uEntry)
return;
/* Move everything including \0 in the entry marking the end of buffer. */
{
if (pDescriptor->aNextLines[i])
else
}
pDescriptor->cLines--;
if (pDescriptor->uFirstDDB)
pDescriptor->uFirstDDB--;
return;
}
{
char *pszTmp;
char szExt[1024];
/* Find last entry in extent description. */
while (uStart)
{
}
if (enmType == VMDKETYPE_ZERO)
{
}
else
{
if (!uSectorOffset)
else
}
/* Check for buffer overflow. */
return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
{
else
pDescriptor->aNextLines[i] = 0;
}
pDescriptor->cLines++;
/* Adjust starting line numbers of following descriptor sections. */
pDescriptor->uFirstDDB++;
pDescriptor->fDirty = true;
return VINF_SUCCESS;
}
{
const char *pszValue;
char *pszValueUnquoted;
&pszValue))
return VERR_VD_VMDK_VALUE_NOT_FOUND;
if (RT_FAILURE(rc))
return rc;
return rc;
}
{
const char *pszValue;
char *pszValueUnquoted;
&pszValue))
return VERR_VD_VMDK_VALUE_NOT_FOUND;
if (RT_FAILURE(rc))
return rc;
return rc;
}
{
const char *pszValue;
char *pszValueUnquoted;
&pszValue))
return VERR_VD_VMDK_VALUE_NOT_FOUND;
if (RT_FAILURE(rc))
return rc;
return rc;
}
{
int rc;
char *pszValQuoted;
if (pszVal)
{
if (RT_FAILURE(rc))
return rc;
}
else
pszValQuoted = NULL;
if (pszValQuoted)
return rc;
}
{
char *pszUuid;
if (RT_FAILURE(rc))
return rc;
pszUuid);
return rc;
}
{
char *pszValue;
if (RT_FAILURE(rc))
return rc;
pszValue);
return rc;
}
{
int rc = VINF_SUCCESS;
unsigned cLine = 0, uLastNonEmptyLine = 0;
while (*pTmp != '\0')
{
if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
{
rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
goto out;
}
{
if (*pTmp == '\r')
{
{
rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
goto out;
}
else
{
/* Get rid of CR character. */
*pTmp = '\0';
}
}
pTmp++;
}
/* Get rid of LF character. */
if (*pTmp == '\n')
{
*pTmp = '\0';
pTmp++;
}
}
/* Pointer right after the end of the used part of the buffer. */
{
rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
goto out;
}
/* Initialize those, because we need to be able to reopen an image. */
pDescriptor->uFirstDesc = 0;
pDescriptor->uFirstExtent = 0;
pDescriptor->uFirstDDB = 0;
for (unsigned i = 0; i < cLine; i++)
{
{
{
/* An extent descriptor. */
{
/* Incorrect ordering of entries. */
rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if (!pDescriptor->uFirstExtent)
{
pDescriptor->uFirstExtent = i;
uLastNonEmptyLine = 0;
}
}
{
/* A disk database entry. */
{
/* Incorrect ordering of entries. */
rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if (!pDescriptor->uFirstDDB)
{
pDescriptor->uFirstDDB = i;
uLastNonEmptyLine = 0;
}
}
else
{
/* A normal entry. */
{
/* Incorrect ordering of entries. */
rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if (!pDescriptor->uFirstDesc)
{
pDescriptor->uFirstDesc = i;
uLastNonEmptyLine = 0;
}
}
if (uLastNonEmptyLine)
uLastNonEmptyLine = i;
}
}
out:
return rc;
}
{
if (RT_FAILURE(rc))
return rc;
if (RT_FAILURE(rc))
return rc;
return rc;
}
{
if (RT_FAILURE(rc))
return rc;
if (RT_FAILURE(rc))
return rc;
return rc;
}
{
int rc;
pDescriptor->uFirstDesc = 0;
pDescriptor->uFirstExtent = 0;
pDescriptor->uFirstDDB = 0;
pDescriptor->cLines = 0;
pDescriptor->fDirty = false;
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
/* The trailing space is created by VMware, too. */
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
/* Now that the framework is in place, use the normal functions to insert
* the remaining keys. */
char szBuf[9];
"CID", szBuf);
if (RT_FAILURE(rc))
goto out;
"parentCID", "ffffffff");
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
out:
return rc;
}
{
int rc;
unsigned cExtents;
unsigned uLine;
&pImage->Descriptor);
if (RT_FAILURE(rc))
return rc;
/* Check version, must be 1. */
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
if (uVersion != 1)
return vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
/* Get image creation type and determine image flags. */
const char *pszCreateType;
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
RTStrFree((char *)(void *)pszCreateType);
/* Count the number of extent config entries. */
uLine != 0;
/* nothing */;
{
/* Monolithic image, must have only one extent (already opened). */
return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
}
{
/* Non-monolithic image, extents need to be allocated. */
if (RT_FAILURE(rc))
return rc;
}
{
/* Access type of the extent. */
{
pszLine += 2;
}
{
pszLine += 6;
}
{
pszLine += 8;
}
else
return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
if (*pszLine++ != ' ')
return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
/* Nominal size of the extent. */
if (RT_FAILURE(rc))
return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
if (*pszLine++ != ' ')
return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
/* Type of the extent. */
#ifdef VBOX_WITH_VMDK_ESX
/** @todo Add the ESX extent types. Not necessary for now because
* the ESX extent types are only used inside an ESX server. They are
* automatically converted if the VMDK is exported. */
#endif /* VBOX_WITH_VMDK_ESX */
{
pszLine += 6;
}
{
pszLine += 4;
}
{
pszLine += 4;
}
{
pszLine += 4;
}
else
return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
{
/* This one has no basename or offset. */
if (*pszLine == ' ')
pszLine++;
if (*pszLine != '\0')
return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
}
else
{
/* All other extent types have basename and optional offset. */
if (*pszLine++ != ' ')
return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
/* Basename of the image. Surrounded by quotes. */
char *pszBasename;
if (RT_FAILURE(rc))
return rc;
if (*pszLine == ' ')
{
pszLine++;
if (*pszLine != '\0')
{
/* Optional offset in extent specified. */
if (RT_FAILURE(rc))
return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
}
}
if (*pszLine != '\0')
return vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
}
}
/* Determine PCHS geometry (autogenerate if necessary). */
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
else if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
else if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
else if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
{
/* Mark PCHS geometry as not yet valid (can't do the calculation here
* as the total image size isn't known yet). */
}
/* Determine LCHS geometry (set to 0 if not specified). */
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
else if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
else if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
else if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
{
}
/* Get image UUID. */
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
{
/* Image without UUID. Probably created by VMware and not yet used
* mode, so don't bother producing a sensible UUID otherwise. */
else
{
if (RT_FAILURE(rc))
return rc;
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
}
}
else if (RT_FAILURE(rc))
return rc;
/* Get image modification UUID. */
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
{
/* Image without UUID. Probably created by VMware and not yet used
* mode, so don't bother producing a sensible UUID otherwise. */
else
{
if (RT_FAILURE(rc))
return rc;
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
}
}
else if (RT_FAILURE(rc))
return rc;
/* Get UUID of parent image. */
&pImage->ParentUuid);
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
{
/* Image without UUID. Probably created by VMware and not yet used
* mode, so don't bother producing a sensible UUID otherwise. */
else
{
if (RT_FAILURE(rc))
return rc;
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
}
}
else if (RT_FAILURE(rc))
return rc;
/* Get parent image modification UUID. */
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
{
/* Image without UUID. Probably created by VMware and not yet used
* mode, so don't bother producing a sensible UUID otherwise. */
else
{
if (RT_FAILURE(rc))
return rc;
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
}
}
else if (RT_FAILURE(rc))
return rc;
return VINF_SUCCESS;
}
/**
*/
{
int rc = VINF_SUCCESS;
{
/* Separate descriptor file. */
uOffset = 0;
cbLimit = 0;
}
else
{
/* Embedded descriptor file. */
}
/* Bail out if there is no file to write to. */
return VERR_INVALID_PARAMETER;
{
return vmdkError(pImage, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
uOffset++;
}
if (cbLimit)
{
/* Inefficient, but simple. */
{
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
uOffset++;
}
}
else
{
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
}
return rc;
}
/**
* Internal: validate the consistency check values in a binary header.
*/
static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
{
int rc = VINF_SUCCESS;
{
rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
return rc;
}
{
rc = vmdkError(pImage, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
return rc;
}
{
rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
return rc;
}
return rc;
}
/**
* Internal: read metadata belonging to an extent with binary header, i.e.
* as found in monolithic files.
*/
{
if (RT_FAILURE(rc))
{
rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
goto out;
}
if (RT_FAILURE(rc))
goto out;
{
/* Read the footer, which isn't compressed and comes before the
* end-of-stream marker. This is bending the VMDK 1.1 spec, but that's
* VMware reality. Theory and practice have very little in common. */
if (RT_FAILURE(rc))
{
goto out;
}
if (RT_FAILURE(rc))
{
rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
goto out;
}
if (RT_FAILURE(rc))
goto out;
}
{
rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
goto out;
}
{
}
else
{
pExtent->uSectorRGD = 0;
}
{
rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
goto out;
}
{
rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
goto out;
}
/* Fix up the number of descriptor sectors, as some flat images have
* really just one, and this causes failures when inserting the UUID
* values and other extra information. */
{
/* Do it the easy way - just fix it for flat images which have no
* other complicated metadata which needs space too. */
}
out:
if (RT_FAILURE(rc))
return rc;
}
/**
* Internal: read additional metadata belonging to an extent. For those
* extents which have no additional metadata just verify the information.
*/
{
int rc = VINF_SUCCESS;
/* The image must be a multiple of a sector in size and contain the data
* area (flat images only). If not, it means the image is at least
* truncated, or even seriously garbled. */
if (RT_FAILURE(rc))
{
rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
goto out;
}
/* disabled the size check again as there are too many too short vmdks out there */
&& (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
{
rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
goto out;
}
#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
goto out;
/* The spec says that this must be a power of two and greater than 8,
* but probably they meant not less than 8. */
{
rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
goto out;
}
/* This code requires that a grain table must hold a power of two multiple
* of the number of entries per GT cache entry. */
{
rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
goto out;
}
out:
if (RT_FAILURE(rc))
return rc;
}
/**
*/
{
{
{
}
else
{
}
}
else
{
{
}
else
{
}
}
if (RT_FAILURE(rc))
rc = vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
return rc;
}
#ifdef VBOX_WITH_VMDK_ESX
/**
* Internal: unused code to read the metadata of a sparse ESX extent.
*
* Such extents never leave ESX server, so this isn't ever used.
*/
{
if (RT_FAILURE(rc))
goto out;
{
goto out;
}
/* The spec says that this must be between 1 sector and 1MB. This code
* assumes it's a power of two, so check that requirement, too. */
|| pExtent->cSectorsPerGrain == 0
{
goto out;
}
pExtent->uDescriptorSector = 0;
pExtent->cDescriptorSectors = 0;
pExtent->uSectorRGD = 0;
pExtent->cOverheadSectors = 0;
{
goto out;
}
{
/* Inconsistency detected. Computed number of GD entries doesn't match
* stored value. Better be safe than sorry. */
goto out;
}
out:
if (RT_FAILURE(rc))
return rc;
}
#endif /* VBOX_WITH_VMDK_ESX */
/**
* Internal: free the memory used by the extent data structure, optionally
* deleting the referenced files.
*/
bool fDelete)
{
{
}
{
/* Do not delete raw extents, these have full and base names equal. */
&& pExtent->pszFullname
}
if (pExtent->pszBasename)
{
}
if (pExtent->pszFullname)
{
}
{
}
}
/**
* Internal: allocate grain table cache if necessary for this image.
*/
{
/* Allocate grain table cache if any sparse extent is present. */
{
#ifdef VBOX_WITH_VMDK_ESX
#endif /* VBOX_WITH_VMDK_ESX */
)
{
/* Allocate grain table cache. */
return VERR_NO_MEMORY;
for (unsigned i = 0; i < VMDK_GT_CACHE_SIZE; i++)
{
}
break;
}
}
return VINF_SUCCESS;
}
/**
* Internal: allocate the given number of extents.
*/
{
int rc = VINF_SUCCESS;
if (pImage)
{
for (unsigned i = 0; i < cExtents; i++)
{
}
}
else
rc = VERR_NO_MEMORY;
return rc;
}
/**
* Internal: Open an image, constructing all necessary data structures.
*/
{
int rc;
/* Try to get error interface. */
if (pImage->pInterfaceError)
/* Try to get async I/O interface. */
if (pImage->pInterfaceAsyncIO)
/*
* Open the image.
* We don't have to check for asynchronous access because
* we only support raw access and the opened file is a description
* file were no data is stored.
*/
if (RT_FAILURE(rc))
{
/* Do NOT signal an appropriate error here, as the VD layer has the
* choice of retrying the open if it failed. */
goto out;
}
/* Read magic (if present). */
if (RT_FAILURE(rc))
{
rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
goto out;
}
/* Handle the file according to its magic number. */
{
/* It's a hosted single-extent image. */
if (RT_FAILURE(rc))
goto out;
/* The opened file is passed to the extent. No separate descriptor
* file, so no need to keep anything open for the image. */
if (!pExtent->pszFullname)
{
rc = VERR_NO_MEMORY;
goto out;
}
if (RT_FAILURE(rc))
goto out;
/* As we're dealing with a monolithic image here, there must
* be a descriptor embedded in the image file. */
{
rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
goto out;
}
/* HACK: extend the descriptor if it is unusually small and it fits in
* the unused space after the image header. Allows opening VMDK files
{
pExtent->fMetaDirty = true;
}
/* Read the descriptor from the extent. */
{
rc = VERR_NO_MEMORY;
goto out;
}
if (RT_FAILURE(rc))
{
rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
goto out;
}
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
/* Mark the extent as unclean if opened in read-write mode. */
if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
{
pExtent->fUncleanShutdown = true;
pExtent->fMetaDirty = true;
}
}
else
{
{
rc = VERR_NO_MEMORY;
goto out;
}
if (RT_FAILURE(rc))
{
rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
goto out;
}
{
/* Likely the read is truncated. Better fail a bit too early
* (normally the descriptor is much smaller than our buffer). */
rc = vmdkError(pImage, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if (RT_FAILURE(rc))
goto out;
/*
* We have to check for the asynchronous open flag. The
* extents are parsed and the type of all are known now.
* Check if every extent is either FLAT or ZERO.
*/
if (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
{
{
{
/*
* Opened image contains at least one none flat or zero extent.
* Return error but don't set error message as the caller
* has the chance to open in non async I/O mode.
*/
goto out;
}
}
}
{
if (pExtent->pszBasename)
{
/* Hack to figure out whether the specified name in the
* extent descriptor is absolute. Doesn't always work, but
* should be good enough for now. */
char *pszFullname;
/** @todo implement proper path absolute check. */
{
if (!pszFullname)
{
rc = VERR_NO_MEMORY;
goto out;
}
}
else
{
if (!pszDirname)
{
rc = VERR_NO_MEMORY;
goto out;
}
if (RT_FAILURE(rc))
goto out;
}
}
else
{
case VMDKETYPE_HOSTED_SPARSE:
if (RT_FAILURE(rc))
{
/* Do NOT signal an appropriate error here, as the VD
* layer has the choice of retrying the open if it
* failed. */
goto out;
}
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
/* Mark extent as unclean if opened in read-write mode. */
if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
{
pExtent->fUncleanShutdown = true;
pExtent->fMetaDirty = true;
}
break;
case VMDKETYPE_VMFS:
case VMDKETYPE_FLAT:
if (RT_FAILURE(rc))
{
/* Do NOT signal an appropriate error here, as the VD
* layer has the choice of retrying the open if it
* failed. */
goto out;
}
break;
case VMDKETYPE_ZERO:
/* Nothing to do. */
break;
default:
}
}
}
/* Make sure this is not reached accidentally with an error status. */
/* Determine PCHS geometry if not set. */
{
{
}
}
/* Update the image metadata now in case has changed. */
if (RT_FAILURE(rc))
goto out;
/* Figure out a few per-image constants from the extents. */
{
#ifdef VBOX_WITH_VMDK_ESX
#endif /* VBOX_WITH_VMDK_ESX */
)
{
/* Here used to be a check whether the nominal size of an extent
* is a multiple of the grain size. The spec says that this is
* always the case, but unfortunately some files out there in the
* wild violate the spec (e.g. ReactOS 0.3.1). */
}
}
{
{
break;
}
}
if (RT_FAILURE(rc))
goto out;
out:
if (RT_FAILURE(rc))
vmdkFreeImage(pImage, false);
return rc;
}
/**
*/
{
int rc = VINF_SUCCESS;
{
/* Full raw disk access. This requires setting up a descriptor
* file and open the (flat) raw disk. */
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
/* Create raw disk descriptor file. */
false);
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
/* Set up basename for extent description. Cannot use StrDup. */
if (!pszBasename)
return VERR_NO_MEMORY;
/* For raw disks the full name is identical to the base name. */
if (!pExtent->pszFullname)
return VERR_NO_MEMORY;
pExtent->uSectorOffset = 0;
pExtent->fMetaDirty = false;
/* Open flat image, the raw disk. */
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
}
else
{
/* Raw partition access. This requires setting up a descriptor
* file, write the partition information to a flat extent and
* open all the (flat) raw disk partitions. */
/* First pass over the partitions to determine how many
* extents we need. One partition can require up to 4 extents.
* One to skip over unpartitioned space, one for the
* partitioning data, one to skip over unpartitioned space
* and one for the partition data. */
unsigned cExtents = 0;
for (unsigned i = 0; i < pRaw->cPartitions; i++)
{
if (pPart->cbPartitionData)
{
return vmdkError(pImage, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: cannot go backwards for partitioning information in '%s'"), pImage->pszFilename);
cExtents++;
cExtents++;
}
if (pPart->cbPartition)
{
return vmdkError(pImage, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: cannot go backwards for partition data in '%s'"), pImage->pszFilename);
cExtents++;
cExtents++;
}
}
/* Another extent for filling up the rest of the image. */
cExtents++;
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
/* Create raw partition descriptor file. */
false);
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
/* Create base filename for the partition table extent. */
/** @todo remove fixed buffer without creating memory leaks. */
char pszPartition[1024];
if (!pszBaseBase)
return VERR_NO_MEMORY;
/* Second pass over the partitions, now define all extents. */
uint64_t uPartOffset = 0;
cExtents = 0;
uStart = 0;
for (unsigned i = 0; i < pRaw->cPartitions; i++)
{
if (pPart->cbPartitionData)
{
{
pExtent->uSectorOffset = 0;
pExtent->fMetaDirty = false;
}
/* Set up basename for extent description. Can't use StrDup. */
if (!pszBasename)
return VERR_NO_MEMORY;
/* Set up full name for partition extent. */
if (!pszDirname)
return VERR_NO_MEMORY;
char *pszFullname;
if (RT_FAILURE(rc))
return rc;
pExtent->fMetaDirty = false;
/* Create partition table flat image. */
false);
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
}
if (pPart->cbPartition)
{
{
pExtent->uSectorOffset = 0;
pExtent->fMetaDirty = false;
}
if (pPart->pszRawDevice)
{
/* Set up basename for extent descr. Can't use StrDup. */
if (!pszBasename)
return VERR_NO_MEMORY;
/* For raw disks full name is identical to base name. */
if (!pExtent->pszFullname)
return VERR_NO_MEMORY;
pExtent->fMetaDirty = false;
/* Open flat image, the raw partition. */
false);
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
}
else
{
pExtent->uSectorOffset = 0;
pExtent->fMetaDirty = false;
}
}
}
/* Another extent for filling up the rest of the image. */
{
pExtent->uSectorOffset = 0;
pExtent->fMetaDirty = false;
}
}
"fullDevice" : "partitionedDevice");
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
return rc;
}
/**
* Internal: create a regular (i.e. file-backed) VMDK image.
*/
unsigned uImageFlags,
unsigned uPercentStart, unsigned uPercentSpan)
{
int rc = VINF_SUCCESS;
unsigned cExtents = 1;
{
/* Do proper extent computation: need one smaller extent if the total
* size isn't evenly divisible by the split size. */
if (cbSize % VMDK_2G_SPLIT_SIZE)
cExtents++;
}
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
/* Basename strings needed for constructing the extent names. */
/* Create searate descriptor file if necessary. */
{
false);
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
}
else
/* Set up all extents. */
for (unsigned i = 0; i < cExtents; i++)
{
* for basename, as it is not guaranteed that the memory can be freed
* with RTMemTmpFree, which must be used as in other code paths
* StrDup is not usable. */
{
if (!pszBasename)
return VERR_NO_MEMORY;
}
else
{
char *pszTmp;
if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
{
if (cExtents == 1)
else
i+1, pszBasenameExt);
}
else
if (RT_FAILURE(rc))
return rc;
if (!pszBasename)
return VERR_NO_MEMORY;
}
char *pszFullname;
if (RT_FAILURE(rc))
return rc;
/* Create file for extent. */
false);
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
{
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
/* Fill image with zeroes. We do this for every fixed-size image since on some systems
* (for example Windows Vista), it takes ages to write a block near the end of a sparse
* file and the guest could complain about an ATA timeout. */
/** @todo Starting with Linux 2.6.23, there is an fallocate() system call.
* Currently supported file systems are ext4 and ocfs2. */
/* Allocate a temporary zero-filled buffer. Use a bigger block size to optimize writing */
if (!pvBuf)
return VERR_NO_MEMORY;
/* Write data to all image blocks. */
{
if (RT_FAILURE(rc))
{
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: writing block failed for '%s'"), pImage->pszFilename);
}
if (pfnProgress)
{
pvUser);
if (RT_FAILURE(rc))
{
return rc;
}
}
}
}
/* Place descriptor file information (where integrated). */
{
/* The descriptor is part of the (only) extent. */
}
if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
{
{
/* The spec says version is 1 for all VMDKs, but the vast
* majority of streamOptimized VMDKs actually contain
* version 3 - so go with the majority. Both are acepted. */
}
}
else
{
else
}
pExtent->fUncleanShutdown = true;
pExtent->fMetaDirty = true;
if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
{
1),
true);
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
}
pvUser);
cbRemaining -= cbExtent;
}
const char *pszDescType = NULL;
if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
{
pszDescType = "vmfs";
else
? "monolithicFlat" : "twoGbMaxExtentFlat";
}
else
{
pszDescType = "streamOptimized";
else
{
? "monolithicSparse" : "twoGbMaxExtentSparse";
}
}
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
return rc;
}
/**
* Internal: The actual code for creating any VMDK variant currently in
* existence on hosted environments.
*/
unsigned uImageFlags, const char *pszComment,
unsigned uPercentStart, unsigned uPercentSpan)
{
int rc;
/* Try to get error interface. */
if (pImage->pInterfaceError)
/* Try to get async I/O interface. */
if (pImage->pInterfaceAsyncIO)
&pImage->Descriptor);
if (RT_FAILURE(rc))
{
rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if ( (uImageFlags & VD_IMAGE_FLAGS_FIXED)
{
/* Raw disk image (includes raw partition). */
/* As the comment is misused, zap it so that no garbage comment
* is set below. */
pszComment = NULL;
}
else
{
/* Regular fixed or sparse image (monolithic or split). */
}
if (RT_FAILURE(rc))
goto out;
{
if (RT_FAILURE(rc))
{
rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
goto out;
}
}
if ( pPCHSGeometry->cCylinders != 0
&& pPCHSGeometry->cHeads != 0
&& pPCHSGeometry->cSectors != 0)
{
if (RT_FAILURE(rc))
goto out;
}
if ( pLCHSGeometry->cCylinders != 0
&& pLCHSGeometry->cHeads != 0
&& pLCHSGeometry->cSectors != 0)
{
if (RT_FAILURE(rc))
goto out;
}
if (RT_FAILURE(rc))
{
rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if (RT_FAILURE(rc))
{
rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if (RT_FAILURE(rc))
{
rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if (RT_FAILURE(rc))
{
rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
{
rc = vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
goto out;
}
out:
if (RT_FAILURE(rc))
return rc;
}
/**
* Internal: Update image comment.
*/
{
char *pszCommentEncoded;
if (pszComment)
{
if (!pszCommentEncoded)
return VERR_NO_MEMORY;
}
else
"ddb.comment", pszCommentEncoded);
if (pszComment)
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
return VINF_SUCCESS;
}
/**
* Internal. Free all allocated space for representing an image, and optionally
* delete the image from disk.
*/
{
{
/* Mark all extents as clean. */
{
#ifdef VBOX_WITH_VMDK_ESX
#endif /* VBOX_WITH_VMDK_ESX */
)
{
}
}
}
(void)vmdkFlushImage(pImage);
{
}
{
}
{
}
}
/**
* Internal. Flush image data (and metadata) to disk.
*/
{
int rc = VINF_SUCCESS;
/* Update descriptor if changed. */
{
if (RT_FAILURE(rc))
goto out;
}
{
{
{
case VMDKETYPE_HOSTED_SPARSE:
if (RT_FAILURE(rc))
goto out;
{
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
}
break;
#ifdef VBOX_WITH_VMDK_ESX
case VMDKETYPE_ESX_SPARSE:
/** @todo update the header. */
break;
#endif /* VBOX_WITH_VMDK_ESX */
case VMDKETYPE_VMFS:
case VMDKETYPE_FLAT:
/* Nothing to do. */
break;
case VMDKETYPE_ZERO:
default:
AssertMsgFailed(("extent with type %d marked as dirty\n",
break;
}
}
{
case VMDKETYPE_HOSTED_SPARSE:
#ifdef VBOX_WITH_VMDK_ESX
case VMDKETYPE_ESX_SPARSE:
#endif /* VBOX_WITH_VMDK_ESX */
case VMDKETYPE_VMFS:
case VMDKETYPE_FLAT:
/** @todo implement proper path absolute check. */
break;
case VMDKETYPE_ZERO:
/* No need to do anything for this extent. */
break;
default:
break;
}
}
out:
return rc;
}
/**
* Internal. Find extent corresponding to the sector number in the disk.
*/
{
int rc = VINF_SUCCESS;
{
{
break;
}
}
if (pExtent)
else
return rc;
}
/**
* Internal. Hash function for placing the grain table hash entries.
*/
unsigned uExtent)
{
/** @todo this hash function is quite simple, maybe use a better one which
* scrambles the bits better. */
}
/**
* Internal. Get sector number in the extent file from the relative sector
* number in the extent.
*/
{
int rc;
return VERR_OUT_OF_RANGE;
if (!uGTSector)
{
/* There is no grain table referenced by this grain directory
* entry. So there is absolutely no data in this area. */
*puExtentSector = 0;
return VINF_SUCCESS;
}
{
/* Cache miss, fetch data from disk. */
VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read grain table entry in '%s'"), pExtent->pszFullname);
for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
}
if (uGrainSector)
else
*puExtentSector = 0;
return VINF_SUCCESS;
}
/**
* Internal. Allocates a new grain table (if necessary), writes the grain
* and updates the grain table. The cache is also updated by this operation.
* This is separate from vmdkGetSector, because that should be as fast as
* possible. Most code from vmdkGetSector also appears here.
*/
{
int rc;
return VERR_OUT_OF_RANGE;
else
uRGTSector = 0; /**< avoid compiler warning */
if (!uGTSector)
{
/* There is no grain table referenced by this grain directory
* entry. So there is absolutely no data in this area. Allocate
* a new grain table and put the reference to it in the GDs. */
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
/* For writable streamOptimized extents the final sector is the
* end-of-stream marker. Will be re-added after the grain table.
* If the file has a footer it also will be re-added before EOS. */
{
uGTSector--;
{
uGTSector--;
uEOSOff = 512;
rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after grain table in '%s'"), pExtent->pszFullname);
}
pExtent->uLastGrainSector = 0;
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after grain table in '%s'"), pExtent->pszFullname);
}
/* Normally the grain table is preallocated for hosted sparse extents
* that support more than 32 bit sector numbers. So this shouldn't
* ever happen on a valid extent. */
if (uGTSector > UINT32_MAX)
return VERR_VD_VMDK_INVALID_HEADER;
/* Write grain table by writing the required number of grain table
* cache chunks. Avoids dynamic memory allocation, but is a bit
* slower. But as this is a pretty infrequently occurring case it
* should be acceptable. */
for (unsigned i = 0;
i++)
{
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
}
{
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
/* For writable streamOptimized extents the final sector is the
* end-of-stream marker. Will be re-added after the grain table.
* If the file has a footer it also will be re-added before EOS. */
{
uRGTSector--;
{
uRGTSector--;
uEOSOff = 512;
rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uRGTSector) + pExtent->cGTEntries * sizeof(uint32_t));
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after redundant grain table in '%s'"), pExtent->pszFullname);
}
pExtent->uLastGrainSector = 0;
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after redundant grain table in '%s'"), pExtent->pszFullname);
}
/* Normally the redundant grain table is preallocated for hosted
* sparse extents that support more than 32 bit sector numbers. So
* this shouldn't ever happen on a valid extent. */
if (uRGTSector > UINT32_MAX)
return VERR_VD_VMDK_INVALID_HEADER;
/* Write backup grain table by writing the required number of grain
* table cache chunks. Avoids dynamic memory allocation, but is a
* bit slower. But as this is a pretty infrequently occurring case
* it should be acceptable. */
for (unsigned i = 0;
i++)
{
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
}
}
/* Update the grain directory on disk (doing it before writing the
* grain table will result in a garbled extent if the operation is
* aborted for some reason. Otherwise the worst that can happen is
* some unused sectors in the extent. */
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
{
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
}
/* As the final step update the in-memory copy of the GDs. */
}
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
/* Write the data. Always a full grain, or we're in big trouble. */
{
/* For streamOptimized extents this is a little more difficult, as the
* cached data also needs to be updated, to handle updating the last
* written block properly. Also we're trying to avoid unnecessary gaps.
* Additionally the end-of-stream marker needs to be written. */
if (!pExtent->uLastGrainSector)
{
cbExtentSize -= 512;
cbExtentSize -= 512;
}
else
if (RT_FAILURE(rc))
{
pExtent->uGrainSector = 0;
pExtent->uLastGrainSector = 0;
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
}
{
uEOSOff = 512;
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after allocated data block in '%s'"), pExtent->pszFullname);
}
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after allocated data block in '%s'"), pExtent->pszFullname);
}
else
{
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
}
/* Update the grain table (and the cache). */
{
/* Cache miss, fetch data from disk. */
VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
}
else
{
/* Cache hit. Convert grain table block back to disk format, otherwise
* the code below will write garbage for all but the updated entry. */
for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
}
/* Update grain table on disk. */
VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
{
/* Update backup grain table on disk. */
VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
}
#ifdef VBOX_WITH_VMDK_ESX
{
pExtent->fMetaDirty = true;
}
#endif /* VBOX_WITH_VMDK_ESX */
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnCheckIfValid */
static int vmdkCheckIfValid(const char *pszFilename)
{
int rc = VINF_SUCCESS;
if ( !pszFilename
|| !*pszFilename
{
goto out;
}
if (!pImage)
{
rc = VERR_NO_MEMORY;
goto out;
}
/** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
* much as possible in vmdkOpenImage. */
vmdkFreeImage(pImage, false);
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnOpen */
void **ppBackendData)
{
LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, ppBackendData));
int rc;
/* Check open flags. All valid flags are supported. */
if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
{
goto out;
}
/* Check remaining arguments. */
if ( !VALID_PTR(pszFilename)
|| !*pszFilename
{
goto out;
}
if (!pImage)
{
rc = VERR_NO_MEMORY;
goto out;
}
if (RT_SUCCESS(rc))
*ppBackendData = pImage;
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnCreate */
unsigned uImageFlags, const char *pszComment,
unsigned uOpenFlags, unsigned uPercentStart,
void **ppBackendData)
{
LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p", pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData));
int rc;
if (pIfProgress)
{
}
/* Check open flags. All valid flags are supported. */
if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
{
goto out;
}
/* Check size. Maximum 2TB-64K for sparse images, otherwise unlimited. */
if ( !cbSize
{
goto out;
}
/* Check remaining arguments. */
if ( !VALID_PTR(pszFilename)
|| !*pszFilename
|| !VALID_PTR(pPCHSGeometry)
|| !VALID_PTR(pLCHSGeometry)
{
goto out;
}
if (!pImage)
{
rc = VERR_NO_MEMORY;
goto out;
}
{
rc = VERR_NO_MEMORY;
goto out;
}
if (RT_SUCCESS(rc))
{
* image is opened in read-only mode if the caller requested that. */
if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
{
vmdkFreeImage(pImage, false);
if (RT_FAILURE(rc))
goto out;
}
*ppBackendData = pImage;
}
else
{
}
out:
return rc;
}
/**
* Replaces a fragment of a string with the specified string.
*
* @returns Pointer to the allocated UTF-8 string.
* @param pszWhere UTF-8 string to search in.
* @param pszWhat UTF-8 string to search for.
* @param pszByWhat UTF-8 string to replace the found string with.
*/
{
if (!pszFoundStr)
return NULL;
if (pszNewStr)
{
}
return pszNewStr;
}
/** @copydoc VBOXHDDBACKEND::pfnRename */
{
int rc = VINF_SUCCESS;
char **apszOldName = NULL;
char **apszNewName = NULL;
char **apszNewLines = NULL;
char *pszOldDescName = NULL;
bool fImageFreed = false;
bool fEmbeddedDesc = false;
char *pszNewBaseName = NULL;
char *pszOldBaseName = NULL;
char *pszNewFullName = NULL;
char *pszOldFullName = NULL;
const char *pszOldImageName;
unsigned i, line;
/* Check arguments. */
if ( !pImage
|| !VALID_PTR(pszFilename)
|| !*pszFilename)
{
goto out;
}
/*
* Allocate an array to store both old and new names of renamed files
* in case we have to roll back the changes. Arrays are initialized
* with zeros. We actually save stuff when and if we change it.
*/
{
rc = VERR_NO_MEMORY;
goto out;
}
/* Save the descriptor size and position. */
{
/* Separate descriptor file. */
fEmbeddedDesc = false;
}
else
{
/* Embedded descriptor file. */
fEmbeddedDesc = true;
}
/* Save the descriptor content. */
for (i = 0; i < DescriptorCopy.cLines; i++)
{
if (!DescriptorCopy.aLines[i])
{
rc = VERR_NO_MEMORY;
goto out;
}
}
/* Prepare both old and new base names used for string replacement. */
/* Prepare both old and new full names used for string replacement. */
/* --- Up to this point we have not done any damage yet. --- */
/* Save the old name for easy access to the old descriptor file. */
/* Save old image name. */
/* Update the descriptor with modified extent names. */
i < cExtents;
{
/* Assume that vmdkStrReplace will fail. */
rc = VERR_NO_MEMORY;
/* Update the descriptor. */
if (!apszNewLines[i])
goto rollback;
}
/* Make sure the descriptor gets written back. */
/* Flush the descriptor now, in case it is embedded. */
(void)vmdkFlushImage(pImage);
for (i = 0; i < cExtents; i++)
{
/* Compose new name for the extent. */
if (!apszNewName[i])
goto rollback;
/* Close the extent file. */
/* Rename the extent file. */
if (RT_FAILURE(rc))
goto rollback;
/* Remember the old name. */
}
/* Release all old stuff. */
vmdkFreeImage(pImage, false);
fImageFreed = true;
* storing descriptor's names.
*/
/* Rename the descriptor file if it's separate. */
if (!fEmbeddedDesc)
{
if (RT_FAILURE(rc))
goto rollback;
/* Save old name only if we may need to change it back. */
}
/* Update pImage with the new information. */
/* Open the new image. */
if (RT_SUCCESS(rc))
goto out;
/* Roll back all changes in case of failure. */
if (RT_FAILURE(rc))
{
int rrc;
if (!fImageFreed)
{
/*
* Some extents may have been closed, close the rest. We will
* re-open the whole thing later.
*/
vmdkFreeImage(pImage, false);
}
/* Rename files back. */
for (i = 0; i <= cExtents; i++)
{
if (apszOldName[i])
{
}
}
/* Restore the old descriptor. */
if (fEmbeddedDesc)
{
}
else
{
/* Shouldn't be null for separate descriptor.
* There will be no access to the actual content.
*/
}
/* Get rid of the stuff we implanted. */
/* Re-open the image back. */
}
out:
for (i = 0; i < DescriptorCopy.cLines; i++)
if (DescriptorCopy.aLines[i])
if (apszOldName)
{
for (i = 0; i <= cExtents; i++)
if (apszOldName[i])
RTStrFree(apszOldName[i]);
}
if (apszNewName)
{
for (i = 0; i <= cExtents; i++)
if (apszNewName[i])
RTStrFree(apszNewName[i]);
}
if (apszNewLines)
{
for (i = 0; i < cExtents; i++)
if (apszNewLines[i])
RTStrFree(apszNewLines[i]);
}
if (pszOldDescName)
if (pszOldBaseName)
if (pszNewBaseName)
if (pszOldFullName)
if (pszNewFullName)
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnClose */
{
int rc = VINF_SUCCESS;
/* Freeing a never allocated image (e.g. because the open failed) is
* not signalled as an error. After all nothing bad happens. */
if (pImage)
{
}
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnRead */
{
LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToRead=%zu pcbActuallyRead=%#p\n", pBackendData, uOffset, pvBuf, cbToRead, pcbActuallyRead));
int rc;
|| cbToRead == 0)
{
goto out;
}
&pExtent, &uSectorExtentRel);
if (RT_FAILURE(rc))
goto out;
/* Check access permissions as defined in the extent descriptor. */
{
goto out;
}
/* Clip read range to remain in this extent. */
cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
/* Handle the read according to the current extent type. */
{
case VMDKETYPE_HOSTED_SPARSE:
#ifdef VBOX_WITH_VMDK_ESX
case VMDKETYPE_ESX_SPARSE:
#endif /* VBOX_WITH_VMDK_ESX */
if (RT_FAILURE(rc))
goto out;
/* Clip read range to at most the rest of the grain. */
cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
if (uSectorExtentAbs == 0)
else
{
{
{
if (RT_FAILURE(rc))
{
pExtent->uGrainSector = 0;
goto out;
}
}
}
else
{
}
}
break;
case VMDKETYPE_VMFS:
case VMDKETYPE_FLAT:
break;
case VMDKETYPE_ZERO:
break;
}
if (pcbActuallyRead)
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnWrite */
{
LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n", pBackendData, uOffset, pvBuf, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
int rc;
{
goto out;
}
if (cbToWrite == 0)
{
goto out;
}
/* No size check here, will do that later when the extent is located.
* There are sparse images out there which according to the spec are
* invalid, because the total size is not a multiple of the grain size.
* Also for sparse images which are stitched together in odd ways (not at
* grain boundaries, and with the nominal size not being a multiple of the
* grain size), this would prevent writing to the last grain. */
&pExtent, &uSectorExtentRel);
if (RT_FAILURE(rc))
goto out;
/* Check access permissions as defined in the extent descriptor. */
{
goto out;
}
/* Handle the write according to the current extent type. */
{
case VMDKETYPE_HOSTED_SPARSE:
#ifdef VBOX_WITH_VMDK_ESX
case VMDKETYPE_ESX_SPARSE:
#endif /* VBOX_WITH_VMDK_ESX */
if (RT_FAILURE(rc))
goto out;
/* Clip write range to at most the rest of the grain. */
cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
{
goto out;
}
if (uSectorExtentAbs == 0)
{
{
/* Full block write to a previously unallocated block.
* Check if the caller wants to avoid the automatic alloc. */
if (!(fWrite & VD_WRITE_NO_ALLOC))
{
/* Allocate GT and find out where to store the grain. */
}
else
*pcbPreRead = 0;
*pcbPostRead = 0;
}
else
{
/* Clip write range to remain in this extent. */
cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
}
}
else
{
{
{
if (RT_FAILURE(rc))
{
pExtent->uGrainSector = 0;
pExtent->uLastGrainSector = 0;
goto out;
}
}
if (RT_FAILURE(rc))
{
pExtent->uGrainSector = 0;
pExtent->uLastGrainSector = 0;
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
}
{
uEOSOff = 512;
rc = vmdkWriteMetaSparseExtent(pExtent, VMDK_SECTOR2BYTE(uSectorExtentAbs) + RT_ALIGN(cbGrain, 512));
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write footer after data block in '%s'"), pExtent->pszFullname);
}
if (RT_FAILURE(rc))
return vmdkError(pExtent->pImage, rc, RT_SRC_POS, N_("VMDK: cannot write end-of stream marker after data block in '%s'"), pExtent->pszFullname);
}
else
{
}
}
break;
case VMDKETYPE_VMFS:
case VMDKETYPE_FLAT:
/* Clip write range to remain in this extent. */
cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
break;
case VMDKETYPE_ZERO:
/* Clip write range to remain in this extent. */
cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
break;
}
if (pcbWriteProcess)
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnFlush */
static int vmdkFlush(void *pBackendData)
{
int rc;
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnGetVersion */
static unsigned vmdkGetVersion(void *pBackendData)
{
if (pImage)
return VMDK_IMAGE_VERSION;
else
return 0;
}
/** @copydoc VBOXHDDBACKEND::pfnGetSize */
{
if (pImage)
else
return 0;
}
/** @copydoc VBOXHDDBACKEND::pfnGetFileSize */
{
if (pImage)
{
{
if (RT_SUCCESS(rc))
}
{
{
if (RT_SUCCESS(rc))
}
}
}
return cb;
}
/** @copydoc VBOXHDDBACKEND::pfnGetPCHSGeometry */
static int vmdkGetPCHSGeometry(void *pBackendData,
{
int rc;
if (pImage)
{
{
rc = VINF_SUCCESS;
}
else
}
else
LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnSetPCHSGeometry */
static int vmdkSetPCHSGeometry(void *pBackendData,
{
LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
int rc;
if (pImage)
{
{
goto out;
}
if (RT_FAILURE(rc))
goto out;
rc = VINF_SUCCESS;
}
else
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnGetLCHSGeometry */
static int vmdkGetLCHSGeometry(void *pBackendData,
{
int rc;
if (pImage)
{
{
rc = VINF_SUCCESS;
}
else
}
else
LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnSetLCHSGeometry */
static int vmdkSetLCHSGeometry(void *pBackendData,
{
LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
int rc;
if (pImage)
{
{
goto out;
}
if (RT_FAILURE(rc))
goto out;
rc = VINF_SUCCESS;
}
else
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnGetImageFlags */
static unsigned vmdkGetImageFlags(void *pBackendData)
{
unsigned uImageFlags;
if (pImage)
else
uImageFlags = 0;
return uImageFlags;
}
/** @copydoc VBOXHDDBACKEND::pfnGetOpenFlags */
static unsigned vmdkGetOpenFlags(void *pBackendData)
{
unsigned uOpenFlags;
if (pImage)
else
uOpenFlags = 0;
return uOpenFlags;
}
/** @copydoc VBOXHDDBACKEND::pfnSetOpenFlags */
{
int rc;
/* Image must be opened and the new flags must be valid. Just readonly and
* info flags are supported. */
if (!pImage || (uOpenFlags & ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_ASYNC_IO)))
{
goto out;
}
/* Implement this operation via reopening the image. */
vmdkFreeImage(pImage, false);
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnGetComment */
{
LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
int rc;
if (pImage)
{
const char *pszCommentEncoded = NULL;
"ddb.comment", &pszCommentEncoded);
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
else if (RT_FAILURE(rc))
goto out;
if (pszComment && pszCommentEncoded)
else
{
if (pszComment)
*pszComment = '\0';
rc = VINF_SUCCESS;
}
if (pszCommentEncoded)
RTStrFree((char *)(void *)pszCommentEncoded);
}
else
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnSetComment */
{
int rc;
{
goto out;
}
if (pImage)
else
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnGetUuid */
{
int rc;
if (pImage)
{
rc = VINF_SUCCESS;
}
else
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnSetUuid */
{
int rc;
if (pImage)
{
{
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
rc = VINF_SUCCESS;
}
else
}
else
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnGetModificationUuid */
{
int rc;
if (pImage)
{
rc = VINF_SUCCESS;
}
else
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnSetModificationUuid */
{
int rc;
if (pImage)
{
{
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
rc = VINF_SUCCESS;
}
else
}
else
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnGetParentUuid */
{
int rc;
if (pImage)
{
rc = VINF_SUCCESS;
}
else
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnSetParentUuid */
{
int rc;
if (pImage)
{
{
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
rc = VINF_SUCCESS;
}
else
}
else
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnGetParentModificationUuid */
{
int rc;
if (pImage)
{
rc = VINF_SUCCESS;
}
else
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnSetParentModificationUuid */
{
int rc;
if (pImage)
{
{
if (RT_FAILURE(rc))
return vmdkError(pImage, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
rc = VINF_SUCCESS;
}
else
}
else
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnDump */
static void vmdkDump(void *pBackendData)
{
if (pImage)
{
RTLogPrintf("Header: Geometry PCHS=%u/%u/%u LCHS=%u/%u/%u cbSector=%llu\n",
}
}
{
int rc = VERR_NOT_IMPLEMENTED;
return rc;
}
{
int rc = VERR_NOT_IMPLEMENTED;
return rc;
}
{
int rc = VERR_NOT_IMPLEMENTED;
return rc;
}
{
int rc = VERR_NOT_IMPLEMENTED;
return rc;
}
{
int rc = VERR_NOT_IMPLEMENTED;
return rc;
}
static bool vmdkIsAsyncIOSupported(void *pvBackendData)
{
bool fAsyncIOSupported = false;
if (pImage)
{
/* We only support async I/O support if the image only consists of FLAT or ZERO extents. */
fAsyncIOSupported = true;
{
{
fAsyncIOSupported = false;
break; /* Stop search */
}
}
}
return fAsyncIOSupported;
}
{
int rc = VINF_SUCCESS;
unsigned cTasksToSubmit = 0;
|| cbRead == 0)
{
goto out;
}
{
&pExtent, &uSectorExtentRel);
if (RT_FAILURE(rc))
goto out;
/* Check access permissions as defined in the extent descriptor. */
{
goto out;
}
/* Clip read range to remain in this extent. */
cbToRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
/* Clip read range to remain into current data segment. */
{
case VMDKETYPE_VMFS:
case VMDKETYPE_FLAT:
{
/* Setup new task. */
void *pTask;
rc = pImage->pInterfaceAsyncIOCallbacks->pfnPrepareRead(pImage->pInterfaceAsyncIO->pvUser, pExtent->pFile->pStorage,
if (RT_FAILURE(rc))
{
goto out;
}
/* Check for enough room first. */
{
/* We reached maximum, resize array. Try to realloc memory first. */
if (!apTaskNew)
{
/* We failed. Allocate completely new. */
if (!apTaskNew)
{
/* Damn, we are out of memory. */
rc = VERR_NO_MEMORY;
goto out;
}
/* Copy task handles over. */
for (unsigned i = 0; i < cTasksToSubmit; i++)
/* Free old memory. */
}
}
break;
}
case VMDKETYPE_ZERO:
break;
default:
}
/* Go to next extent if there is no space left in current one. */
if (!cbLeftInCurrentSegment)
{
paSegCurrent++;
cSeg--;
}
}
if (cTasksToSubmit == 0)
{
/* The request was completely in a ZERO extent nothing to do. */
}
else
{
/* Submit tasks. */
NULL /* Nothing required after read. */);
}
out:
return rc;
}
{
int rc = VINF_SUCCESS;
unsigned cTasksToSubmit = 0;
|| cbWrite == 0)
{
goto out;
}
{
&pExtent, &uSectorExtentRel);
if (RT_FAILURE(rc))
goto out;
/* Check access permissions as defined in the extent descriptor. */
{
goto out;
}
/* Clip write range to remain in this extent. */
cbToWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
/* Clip write range to remain into current data segment. */
{
case VMDKETYPE_VMFS:
case VMDKETYPE_FLAT:
{
/* Setup new task. */
void *pTask;
rc = pImage->pInterfaceAsyncIOCallbacks->pfnPrepareWrite(pImage->pInterfaceAsyncIO->pvUser, pExtent->pFile->pStorage,
if (RT_FAILURE(rc))
{
goto out;
}
/* Check for enough room first. */
{
/* We reached maximum, resize array. Try to realloc memory first. */
if (!apTaskNew)
{
/* We failed. Allocate completely new. */
if (!apTaskNew)
{
/* Damn, we are out of memory. */
rc = VERR_NO_MEMORY;
goto out;
}
/* Copy task handles over. */
for (unsigned i = 0; i < cTasksToSubmit; i++)
/* Free old memory. */
}
}
break;
}
case VMDKETYPE_ZERO:
/* Nothing left to do. */
break;
default:
}
/* Go to next extent if there is no space left in current one. */
if (!cbLeftInCurrentSegment)
{
paSegCurrent++;
cSeg--;
}
}
if (cTasksToSubmit == 0)
{
/* The request was completely in a ZERO extent nothing to do. */
}
else
{
/* Submit tasks. */
NULL /* Nothing required after read. */);
}
out:
return rc;
}
{
/* pszBackendName */
"VMDK",
/* cbSize */
sizeof(VBOXHDDBACKEND),
/* uBackendCaps */
/* papszFileExtensions */
/* paConfigInfo */
NULL,
/* hPlugin */
/* pfnCheckIfValid */
/* pfnOpen */
/* pfnCreate */
/* pfnRename */
/* pfnClose */
/* pfnRead */
/* pfnWrite */
/* pfnFlush */
/* pfnGetVersion */
/* pfnGetSize */
/* pfnGetFileSize */
/* pfnGetPCHSGeometry */
/* pfnSetPCHSGeometry */
/* pfnGetLCHSGeometry */
/* pfnSetLCHSGeometry */
/* pfnGetImageFlags */
/* pfnGetOpenFlags */
/* pfnSetOpenFlags */
/* pfnGetComment */
/* pfnSetComment */
/* pfnGetUuid */
/* pfnSetUuid */
/* pfnGetModificationUuid */
/* pfnSetModificationUuid */
/* pfnGetParentUuid */
/* pfnSetParentUuid */
/* pfnGetParentModificationUuid */
/* pfnSetParentModificationUuid */
/* pfnDump */
/* pfnGetTimeStamp */
/* pfnGetParentTimeStamp */
/* pfnSetParentTimeStamp */
/* pfnGetParentFilename */
/* pfnSetParentFilename */
/* pfnIsAsyncIOSupported */
/* pfnAsyncRead */
/* pfnAsyncWrite */
/* pfnComposeLocation */
/* pfnComposeName */
};