VMDK.cpp revision a4bc7d6047136d1cf71105c808d1265eb464149e
/* $Id$ */
/** @file
* VMDK disk image, core code.
*/
/*
* Copyright (C) 2006-2011 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*******************************************************************************
* Header Files *
*******************************************************************************/
#define LOG_GROUP LOG_GROUP_VD_VMDK
#include <VBox/vd-plugin.h>
/*******************************************************************************
* Constants And Macros, Structures and Typedefs *
*******************************************************************************/
/** Maximum encoded string size (including NUL) we allow for VMDK images.
* Deliberately not set high to avoid running out of descriptor space. */
#define VMDK_ENCODED_COMMENT_MAX 1024
/** VMDK descriptor DDB entry for PCHS cylinders. */
#define VMDK_DDB_GEO_PCHS_CYLINDERS "ddb.geometry.cylinders"
/** VMDK descriptor DDB entry for PCHS heads. */
#define VMDK_DDB_GEO_PCHS_HEADS "ddb.geometry.heads"
/** VMDK descriptor DDB entry for PCHS sectors. */
#define VMDK_DDB_GEO_PCHS_SECTORS "ddb.geometry.sectors"
/** VMDK descriptor DDB entry for LCHS cylinders. */
#define VMDK_DDB_GEO_LCHS_CYLINDERS "ddb.geometry.biosCylinders"
/** VMDK descriptor DDB entry for LCHS heads. */
#define VMDK_DDB_GEO_LCHS_HEADS "ddb.geometry.biosHeads"
/** VMDK descriptor DDB entry for LCHS sectors. */
#define VMDK_DDB_GEO_LCHS_SECTORS "ddb.geometry.biosSectors"
/** VMDK descriptor DDB entry for image UUID. */
#define VMDK_DDB_IMAGE_UUID "ddb.uuid.image"
/** VMDK descriptor DDB entry for image modification UUID. */
#define VMDK_DDB_MODIFICATION_UUID "ddb.uuid.modification"
/** VMDK descriptor DDB entry for parent image UUID. */
#define VMDK_DDB_PARENT_UUID "ddb.uuid.parent"
/** VMDK descriptor DDB entry for parent image modification UUID. */
#define VMDK_DDB_PARENT_MODIFICATION_UUID "ddb.uuid.parentmodification"
/** No compression for streamOptimized files. */
#define VMDK_COMPRESSION_NONE 0
/** Deflate compression for streamOptimized files. */
#define VMDK_COMPRESSION_DEFLATE 1
/** Marker that the actual GD value is stored in the footer. */
#define VMDK_GD_AT_END 0xffffffffffffffffULL
/** Marker for end-of-stream in streamOptimized images. */
#define VMDK_MARKER_EOS 0
/** Marker for grain table block in streamOptimized images. */
#define VMDK_MARKER_GT 1
/** Marker for grain directory block in streamOptimized images. */
#define VMDK_MARKER_GD 2
/** Marker for footer in streamOptimized images. */
#define VMDK_MARKER_FOOTER 3
/** Marker for unknown purpose in streamOptimized images.
* Shows up in very recent images created by vSphere, but only sporadically.
* They "forgot" to document that one in the VMDK specification. */
#define VMDK_MARKER_UNSPECIFIED 4
/** Dummy marker for "don't check the marker value". */
#define VMDK_MARKER_IGNORE 0xffffffffU
/**
* Magic number for hosted images created by VMware Workstation 4, VMware
* Workstation 5, VMware Server or VMware Player. Not necessarily sparse.
*/
/**
* VMDK hosted binary extent header. The "Sparse" is a total misnomer, as
* this header is also used for monolithic flat images.
*/
#pragma pack(1)
typedef struct SparseExtentHeader
{
bool uncleanShutdown;
char singleEndLineChar;
char nonEndLineChar;
char doubleEndLineChar1;
char doubleEndLineChar2;
#pragma pack()
/** VMDK capacity for a single chunk when 2G splitting is turned on. Should be
* divisible by the default grain size (64K) */
/** VMDK streamOptimized file format marker. The type field may or may not
* be actually valid, but there's always data to read there. */
#pragma pack(1)
typedef struct VMDKMARKER
{
} VMDKMARKER, *PVMDKMARKER;
#pragma pack()
#ifdef VBOX_WITH_VMDK_ESX
/** @todo the ESX code is not tested, not used, and lacks error messages. */
/**
* Magic number for images created by VMware GSX Server 3 or ESX Server 3.
*/
#pragma pack(1)
typedef struct COWDisk_Header
{
/* The spec incompletely documents quite a few further fields, but states
* that they are unused by the current format. Replace them by padding. */
char reserved1[1604];
char reserved2[8];
char padding[396];
#pragma pack()
#endif /* VBOX_WITH_VMDK_ESX */
#define VMDK_BYTE2SECTOR(u) ((u) >> 9)
/**
* VMDK extent type.
*/
typedef enum VMDKETYPE
{
/** Hosted sparse extent. */
/** Flat extent. */
/** Zero extent. */
/** VMFS extent, used by ESX. */
#ifdef VBOX_WITH_VMDK_ESX
,
/** ESX sparse extent. */
#endif /* VBOX_WITH_VMDK_ESX */
} VMDKETYPE, *PVMDKETYPE;
/**
* VMDK access type for a extent.
*/
typedef enum VMDKACCESS
{
/** No access allowed. */
VMDKACCESS_NOACCESS = 0,
/** Read-only access. */
/** Read-write access. */
} VMDKACCESS, *PVMDKACCESS;
/** Forward declaration for PVMDKIMAGE. */
typedef struct VMDKIMAGE *PVMDKIMAGE;
/**
* Extents files entry. Used for opening a particular file only once.
*/
typedef struct VMDKFILE
{
/** Pointer to filename. Local copy. */
const char *pszFilename;
/** File open flags for consistency checking. */
unsigned fOpen;
/** Flag whether this file has been opened for async I/O. */
bool fAsyncIO;
/** Reference counter. */
unsigned uReferences;
/** Flag whether the file should be deleted on last close. */
bool fDelete;
/** Pointer to the image we belong to (for debugging purposes). */
/** Pointer to next file descriptor. */
/** Pointer to the previous file descriptor. */
/**
* VMDK extent data structure.
*/
typedef struct VMDKEXTENT
{
/** File handle. */
/** Base name of the image extent. */
const char *pszBasename;
/** Full name of the image extent. */
const char *pszFullname;
/** Number of sectors in this extent. */
/** Number of sectors per block (grain in VMDK speak). */
/** Starting sector number of descriptor. */
/** Size of descriptor in sectors. */
/** Starting sector number of grain directory. */
/** Starting sector number of redundant grain directory. */
/** Total number of metadata sectors. */
/** Nominal size (i.e. as described by the descriptor) of this extent. */
/** Sector offset (i.e. as described by the descriptor) of this extent. */
/** Number of entries in a grain table. */
/** Number of sectors reachable via a grain directory entry. */
/** Number of entries in the grain directory. */
/** Pointer to the next free sector. Legacy information. Do not use. */
/** Number of this extent in the list of images. */
/** Pointer to the descriptor (NULL if no descriptor in this extent). */
char *pDescData;
/** Pointer to the grain directory. */
/** Pointer to the redundant grain directory. */
/** VMDK version of this extent. 1=1.0/1.1 */
/** Type of this extent. */
/** Access to this extent. */
/** Flag whether this extent is marked as unclean. */
bool fUncleanShutdown;
/** Flag whether the metadata in the extent header needs to be updated. */
bool fMetaDirty;
/** Flag whether there is a footer in this extent. */
bool fFooter;
/** Compression type for this extent. */
/** Append position for writing new grain. Only for sparse extents. */
/** Last grain which was accessed. Only for streamOptimized extents. */
/** Starting sector corresponding to the grain buffer. */
/** Grain number corresponding to the grain buffer. */
/** Actual size of the compressed data, only valid for reading. */
/** Size of compressed grain buffer for streamOptimized extents. */
/** Compressed grain buffer for streamOptimized extents, with marker. */
void *pvCompGrain;
/** Decompressed grain buffer for streamOptimized extents. */
void *pvGrain;
/** Reference to the image in which this extent is used. Do not use this
* on a regular basis to avoid passing pImage references to functions
* explicitly. */
} VMDKEXTENT, *PVMDKEXTENT;
/**
* Grain table cache size. Allocated per image.
*/
#define VMDK_GT_CACHE_SIZE 256
/**
* Grain table block size. Smaller than an actual grain table block to allow
* more grain table blocks to be cached without having to allocate excessive
* amounts of memory for the cache.
*/
#define VMDK_GT_CACHELINE_SIZE 128
/**
* Maximum number of lines in a descriptor file. Not worth the effort of
* making it variable. Descriptor files are generally very short (~20 lines),
* with the exception of sparse files split in 2G chunks, which need for the
* maximum size (almost 2T) exactly 1025 lines for the disk database.
*/
#define VMDK_DESCRIPTOR_LINES_MAX 1100U
/**
* Parsed descriptor information. Allows easy access and update of the
* descriptor (whether separate file or not). Free form text files suck.
*/
typedef struct VMDKDESCRIPTOR
{
/** Line number of first entry of the disk descriptor. */
unsigned uFirstDesc;
/** Line number of first entry in the extent description. */
unsigned uFirstExtent;
/** Line number of first disk database entry. */
unsigned uFirstDDB;
/** Total number of lines. */
unsigned cLines;
/** Total amount of memory available for the descriptor. */
/** Set if descriptor has been changed and not yet written to disk. */
bool fDirty;
/** Array of pointers to the data in the descriptor. */
char *aLines[VMDK_DESCRIPTOR_LINES_MAX];
/** Array of line indices pointing to the next non-comment line. */
unsigned aNextLines[VMDK_DESCRIPTOR_LINES_MAX];
/**
* extent.
*/
typedef struct VMDKGTCACHEENTRY
{
/** Extent number for which this entry is valid. */
/** GT data block number. */
/** Data part of the cache entry. */
/**
* Cache data structure for blocks of grain table entries. For now this is a
* fixed size direct mapping cache, but this should be adapted to the size of
* the sparse image and maybe converted to a set-associative cache. The
* implementation below implements a write-through cache with write allocate.
*/
typedef struct VMDKGTCACHE
{
/** Cache entries. */
/** Number of cache entries (currently unused). */
unsigned cEntries;
} VMDKGTCACHE, *PVMDKGTCACHE;
/**
* Complete VMDK image data structure. Mainly a collection of extents and a few
* extra global data fields.
*/
typedef struct VMDKIMAGE
{
/** Image name. */
const char *pszFilename;
/** Descriptor file if applicable. */
/** Pointer to the per-disk VD interface list. */
/** Pointer to the per-image VD interface list. */
/** Error interface. */
/** I/O interface. */
/** Pointer to the image extents. */
/** Number of image extents. */
unsigned cExtents;
/** Pointer to the files list, for opening a file referenced multiple
* times only once (happens mainly with raw partition access). */
/**
* Pointer to an array of segment entries for async I/O.
* This is an optimization because the task number to submit is not known
* and allocating/freeing an array in the read/write functions every time
* is too expensive.
*/
/** Entries available in the segments array. */
unsigned cSegments;
/** Open flags passed by VBoxHD layer. */
unsigned uOpenFlags;
/** Image flags defined during creation or determined during open. */
unsigned uImageFlags;
/** Total size of the image. */
/** Physical geometry of this image. */
/** Logical geometry of this image. */
/** Image UUID. */
/** Image modification UUID. */
/** Parent image UUID. */
/** Parent image modification UUID. */
/** Pointer to grain table cache, if this image contains sparse extents. */
/** Pointer to the descriptor (NULL if no separate descriptor file). */
char *pDescData;
/** Allocation size of the descriptor file. */
/** Parsed descriptor file content. */
} VMDKIMAGE;
typedef struct VMDKCOMPRESSIO
{
/* Image this operation relates to. */
/* Current read position. */
/* Size of the compressed grain buffer (available data). */
/* Pointer to the compressed grain buffer. */
void *pvCompGrain;
/** Tracks async grain allocation. */
typedef struct VMDKGRAINALLOCASYNC
{
/** Flag whether the allocation failed. */
bool fIoErr;
/** Current number of transfers pending.
* If reached 0 and there is an error the old state is restored. */
unsigned cIoXfersPending;
/** Sector number */
/** Flag whether the grain table needs to be updated. */
bool fGTUpdateNeeded;
/** Extent the allocation happens. */
/** Position of the new grain, required for the grain table update. */
/** Grain table sector. */
/** Backup grain table sector. */
/*******************************************************************************
* Static Variables *
*******************************************************************************/
/** NULL-terminated array of supported file extensions. */
static const VDFILEEXTENSION s_aVmdkFileExtensions[] =
{
{"vmdk", VDTYPE_HDD},
};
/*******************************************************************************
* Internal Functions *
*******************************************************************************/
bool fDelete);
static int vmdkAllocGrainAsyncComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq);
/**
* Internal: open a file (using a file descriptor cache to ensure each file
* is only opened once - anything else can cause locking problems).
*/
{
int rc = VINF_SUCCESS;
{
{
pVmdkFile->uReferences++;
*ppVmdkFile = pVmdkFile;
return rc;
}
}
/* If we get here, there's no matching entry in the cache. */
{
*ppVmdkFile = NULL;
return VERR_NO_MEMORY;
}
{
*ppVmdkFile = NULL;
return VERR_NO_MEMORY;
}
if (RT_SUCCESS(rc))
{
*ppVmdkFile = pVmdkFile;
}
else
{
*ppVmdkFile = NULL;
}
return rc;
}
/**
* Internal: close a file, updating the file descriptor cache.
*/
{
int rc = VINF_SUCCESS;
pVmdkFile->uReferences--;
if (pVmdkFile->uReferences == 0)
{
/* Unchain the element from the list. */
if (pNext)
if (pPrev)
else
}
*ppVmdkFile = NULL;
return rc;
}
static DECLCALLBACK(int) vmdkFileInflateHelper(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
{
size_t cbInjected = 0;
if (pInflateState->iOffset < 0)
{
cbBuf--;
cbInjected = 1;
}
if (!cbBuf)
{
if (pcbBuf)
*pcbBuf = cbInjected;
return VINF_SUCCESS;
}
cbBuf);
return VINF_SUCCESS;
}
/**
* Internal: read from a file and inflate the compressed data,
* distinguishing between async and normal operation
*/
{
{
AssertMsgFailed(("TODO\n"));
return VERR_NOT_SUPPORTED;
}
else
{
int rc;
if (!pcvMarker)
{
NULL);
if (RT_FAILURE(rc))
return rc;
}
else
if (cbCompSize == 0)
{
AssertMsgFailed(("VMDK: corrupted marker\n"));
return VERR_VD_VMDK_INVALID_FORMAT;
}
/* Sanity check - the expansion ratio should be much less than 2. */
return VERR_VD_VMDK_INVALID_FORMAT;
/* Compressed grain marker. Data follows immediately. */
512)
if (puLBA)
if (pcbMarkerData)
512);
if (RT_FAILURE(rc))
return rc;
if (RT_FAILURE(rc))
return rc;
if (cbActuallyRead != cbToRead)
return rc;
}
}
{
if (pDeflateState->iOffset < 0)
{
cbBuf--;
}
if (!cbBuf)
return VINF_SUCCESS;
return VERR_BUFFER_OVERFLOW;
return VINF_SUCCESS;
}
/**
* Internal: deflate the uncompressed data and write to a file,
* distinguishing between async and normal operation
*/
{
{
AssertMsgFailed(("TODO\n"));
return VERR_NOT_SUPPORTED;
}
else
{
int rc;
if (RT_FAILURE(rc))
return rc;
if (RT_SUCCESS(rc))
if (RT_SUCCESS(rc))
{
/* pad with zeroes to get to a full sector size */
if (uSize % 512)
{
uSizeAlign - uSize);
uSize = uSizeAlign;
}
if (pcbMarkerData)
*pcbMarkerData = uSize;
/* Compressed grain marker. Data follows immediately. */
if (RT_FAILURE(rc))
return rc;
}
return rc;
}
}
/**
* Internal: check if all files are closed, prevent leaking resources.
*/
{
{
LogRel(("VMDK: leaking reference to file \"%s\"\n",
pVmdkFile->pszFilename));
if (RT_SUCCESS(rc))
}
return rc;
}
/**
* Internal: truncate a string (at a UTF8 code point boundary) and encode the
* critical non-ASCII characters.
*/
static char *vmdkEncodeString(const char *psz)
{
{
char *pszDstPrev = pszDst;
if (Cp == '\\')
{
}
else if (Cp == '\n')
{
}
else if (Cp == '\r')
{
}
else
{
pszDst = pszDstPrev;
break;
}
}
*pszDst = '\0';
}
/**
* Internal: decode a string and store it into the specified string.
*/
{
int rc = VINF_SUCCESS;
char szBuf[4];
if (!cb)
return VERR_BUFFER_OVERFLOW;
{
if (Cp == '\\')
{
if (CpQ == 'n')
else if (CpQ == 'r')
else if (CpQ == '\0')
{
break;
}
else
}
else
/* Need to leave space for terminating NUL. */
{
break;
}
}
*psz = '\0';
return rc;
}
/**
* Internal: free all buffers associated with grain directories.
*/
{
{
}
{
}
}
/**
* Internal: allocate the compressed/uncompressed buffers for streamOptimized
* images.
*/
{
int rc = VINF_SUCCESS;
{
/* streamOptimized extents need a compressed grain buffer, which must
* be big enough to hold uncompressible data (which needs ~8 bytes
* more than the uncompressed data), the marker and padding. */
if (!pExtent->pvCompGrain)
{
rc = VERR_NO_MEMORY;
goto out;
}
/* streamOptimized extents need a decompressed grain buffer. */
{
rc = VERR_NO_MEMORY;
goto out;
}
}
out:
if (RT_FAILURE(rc))
return rc;
}
/**
* Internal: allocate all buffers associated with grain directories.
*/
{
int rc = VINF_SUCCESS;
if (!pGD)
{
rc = VERR_NO_MEMORY;
goto out;
}
if (pExtent->uSectorRGD)
{
if (!pRGD)
{
rc = VERR_NO_MEMORY;
goto out;
}
}
out:
if (RT_FAILURE(rc))
return rc;
}
{
int rc = VINF_SUCCESS;
unsigned i;
goto out;
{
goto out;
}
if (RT_FAILURE(rc))
goto out;
/* The VMDK 1.1 spec seems to talk about compressed grain directories,
* but in reality they are not compressed. */
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not read grain directory in '%s': %Rrc"), pExtent->pszFullname);
goto out;
}
if (pExtent->uSectorRGD)
{
/* The VMDK 1.1 spec seems to talk about compressed grain directories,
* but in reality they are not compressed. */
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not read redundant grain directory in '%s'"), pExtent->pszFullname);
goto out;
}
/* Check grain table and redundant grain table for consistency. */
if (!pTmpGT1)
{
rc = VERR_NO_MEMORY;
goto out;
}
if (!pTmpGT2)
{
rc = VERR_NO_MEMORY;
goto out;
}
i < pExtent->cGDEntries;
{
/* If no grain table is allocated skip the entry. */
continue;
{
/* Just one grain directory entry refers to a not yet allocated
* grain table or both grain directory copies refer to the same
* grain table. Not allowed. */
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent references to grain directory in '%s'"), pExtent->pszFullname);
goto out;
}
/* The VMDK 1.1 spec seems to talk about compressed grain tables,
* but in reality they are not compressed. */
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading grain table in '%s'"), pExtent->pszFullname);
goto out;
}
/* The VMDK 1.1 spec seems to talk about compressed grain tables,
* but in reality they are not compressed. */
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading backup grain table in '%s'"), pExtent->pszFullname);
goto out;
}
{
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistency between grain table and backup grain table in '%s'"), pExtent->pszFullname);
goto out;
}
}
/** @todo figure out what to do for unclean VMDKs. */
}
out:
if (RT_FAILURE(rc))
return rc;
}
{
int rc = VINF_SUCCESS;
unsigned i;
if (fPreAlloc)
{
+ cbGTRounded;
}
else
{
/* Use a dummy start sector for layout computation. */
if (uStartSector == VMDK_GD_AT_END)
uStartSector = 1;
cbGTRounded = 0;
}
/* For streamOptimized extents there is only one grain directory,
* and for all others take redundant grain directory into account. */
{
}
else
{
}
if (RT_FAILURE(rc))
goto out;
{
pExtent->uSectorRGD = 0;
}
else
{
}
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
if (fPreAlloc)
{
{
for (i = 0; i < pExtent->cGDEntries; i++)
{
/* Write the redundant grain directory entry to disk. */
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new redundant grain directory entry in '%s'"), pExtent->pszFullname);
goto out;
}
}
}
for (i = 0; i < pExtent->cGDEntries; i++)
{
/* Write the grain directory entry to disk. */
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write new grain directory entry in '%s'"), pExtent->pszFullname);
goto out;
}
}
}
out:
if (RT_FAILURE(rc))
return rc;
}
char **ppszUnquoted, char **ppszNext)
{
char *pszQ;
char *pszUnquoted;
/* Skip over whitespace. */
pszStr++;
if (*pszStr != '"')
{
pszQ++;
}
else
{
pszStr++;
return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrectly quoted value in descriptor in '%s'"), pImage->pszFilename);
}
if (!pszUnquoted)
return VERR_NO_MEMORY;
if (ppszNext)
return VINF_SUCCESS;
}
const char *pszLine)
{
return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
pDescriptor->cLines++;
pDescriptor->fDirty = true;
return VINF_SUCCESS;
}
{
const char *pszValue;
while (uStart != 0)
{
{
/* Key matches, check for a '=' (preceded by whitespace). */
pszValue++;
if (*pszValue == '=')
{
break;
}
}
}
return !!uStart;
}
unsigned uStart,
{
char *pszTmp;
unsigned uLast = 0;
while (uStart != 0)
{
{
/* Key matches, check for a '=' (preceded by whitespace). */
pszTmp++;
if (*pszTmp == '=')
{
pszTmp++;
pszTmp++;
break;
}
}
}
if (uStart)
{
if (pszValue)
{
/* Key already exists, replace existing value. */
/* Check for buffer overflow. */
return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
}
else
{
{
if (pDescriptor->aNextLines[i])
else
}
pDescriptor->cLines--;
/* Adjust starting line numbers of following descriptor sections. */
pDescriptor->uFirstDDB--;
}
}
else
{
/* Key doesn't exist, append after the last entry in this category. */
if (!pszValue)
{
/* Key doesn't exist, and it should be removed. Simply a no-op. */
return VINF_SUCCESS;
}
/* Check for buffer overflow. */
return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
{
else
pDescriptor->aNextLines[i] = 0;
}
pDescriptor->cLines++;
/* Adjust starting line numbers of following descriptor sections. */
pDescriptor->uFirstDDB++;
}
pDescriptor->fDirty = true;
return VINF_SUCCESS;
}
{
const char *pszValue;
&pszValue))
return VERR_VD_VMDK_VALUE_NOT_FOUND;
}
{
const char *pszValue;
char *pszValueUnquoted;
&pszValue))
return VERR_VD_VMDK_VALUE_NOT_FOUND;
if (RT_FAILURE(rc))
return rc;
return rc;
}
{
char *pszValueQuoted;
if (!pszValueQuoted)
return VERR_NO_STR_MEMORY;
return rc;
}
{
if (!uEntry)
return;
/* Move everything including \0 in the entry marking the end of buffer. */
{
if (pDescriptor->aNextLines[i])
else
}
pDescriptor->cLines--;
if (pDescriptor->uFirstDDB)
pDescriptor->uFirstDDB--;
return;
}
{
char *pszTmp;
char szExt[1024];
/* Find last entry in extent description. */
while (uStart)
{
}
if (enmType == VMDKETYPE_ZERO)
{
}
else if (enmType == VMDKETYPE_FLAT)
{
}
else
{
}
/* Check for buffer overflow. */
return vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
{
else
pDescriptor->aNextLines[i] = 0;
}
pDescriptor->cLines++;
/* Adjust starting line numbers of following descriptor sections. */
pDescriptor->uFirstDDB++;
pDescriptor->fDirty = true;
return VINF_SUCCESS;
}
{
const char *pszValue;
char *pszValueUnquoted;
&pszValue))
return VERR_VD_VMDK_VALUE_NOT_FOUND;
if (RT_FAILURE(rc))
return rc;
return rc;
}
{
const char *pszValue;
char *pszValueUnquoted;
&pszValue))
return VERR_VD_VMDK_VALUE_NOT_FOUND;
if (RT_FAILURE(rc))
return rc;
return rc;
}
{
const char *pszValue;
char *pszValueUnquoted;
&pszValue))
return VERR_VD_VMDK_VALUE_NOT_FOUND;
if (RT_FAILURE(rc))
return rc;
return rc;
}
{
int rc;
char *pszValQuoted;
if (pszVal)
{
if (!pszValQuoted)
return VERR_NO_STR_MEMORY;
}
else
pszValQuoted = NULL;
if (pszValQuoted)
return rc;
}
{
char *pszUuid;
if (!pszUuid)
return VERR_NO_STR_MEMORY;
pszUuid);
return rc;
}
{
char *pszValue;
if (!pszValue)
return VERR_NO_STR_MEMORY;
pszValue);
return rc;
}
{
int rc = VINF_SUCCESS;
unsigned cLine = 0, uLastNonEmptyLine = 0;
while (*pTmp != '\0')
{
if (cLine >= VMDK_DESCRIPTOR_LINES_MAX)
{
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor too big in '%s'"), pImage->pszFilename);
goto out;
}
{
if (*pTmp == '\r')
{
{
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: unsupported end of line in descriptor in '%s'"), pImage->pszFilename);
goto out;
}
else
{
/* Get rid of CR character. */
*pTmp = '\0';
}
}
pTmp++;
}
/* Get rid of LF character. */
if (*pTmp == '\n')
{
*pTmp = '\0';
pTmp++;
}
}
/* Pointer right after the end of the used part of the buffer. */
{
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor does not start as expected in '%s'"), pImage->pszFilename);
goto out;
}
/* Initialize those, because we need to be able to reopen an image. */
pDescriptor->uFirstDesc = 0;
pDescriptor->uFirstExtent = 0;
pDescriptor->uFirstDDB = 0;
for (unsigned i = 0; i < cLine; i++)
{
{
{
/* An extent descriptor. */
{
/* Incorrect ordering of entries. */
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if (!pDescriptor->uFirstExtent)
{
pDescriptor->uFirstExtent = i;
uLastNonEmptyLine = 0;
}
}
{
/* A disk database entry. */
{
/* Incorrect ordering of entries. */
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if (!pDescriptor->uFirstDDB)
{
pDescriptor->uFirstDDB = i;
uLastNonEmptyLine = 0;
}
}
else
{
/* A normal entry. */
{
/* Incorrect ordering of entries. */
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect ordering of entries in descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if (!pDescriptor->uFirstDesc)
{
pDescriptor->uFirstDesc = i;
uLastNonEmptyLine = 0;
}
}
if (uLastNonEmptyLine)
uLastNonEmptyLine = i;
}
}
out:
return rc;
}
{
if (RT_FAILURE(rc))
return rc;
if (RT_FAILURE(rc))
return rc;
return rc;
}
{
if (RT_FAILURE(rc))
return rc;
if (RT_FAILURE(rc))
return rc;
return rc;
}
{
int rc;
pDescriptor->uFirstDesc = 0;
pDescriptor->uFirstExtent = 0;
pDescriptor->uFirstDDB = 0;
pDescriptor->cLines = 0;
pDescriptor->fDirty = false;
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
/* The trailing space is created by VMware, too. */
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
/* Now that the framework is in place, use the normal functions to insert
* the remaining keys. */
char szBuf[9];
"CID", szBuf);
if (RT_FAILURE(rc))
goto out;
"parentCID", "ffffffff");
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
out:
return rc;
}
{
int rc;
unsigned cExtents;
unsigned uLine;
unsigned i;
&pImage->Descriptor);
if (RT_FAILURE(rc))
return rc;
/* Check version, must be 1. */
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error finding key 'version' in descriptor in '%s'"), pImage->pszFilename);
if (uVersion != 1)
return vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: unsupported format version in descriptor in '%s'"), pImage->pszFilename);
/* Get image creation type and determine image flags. */
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get image type from descriptor in '%s'"), pImage->pszFilename);
RTStrFree((char *)(void *)pszCreateType);
/* Count the number of extent config entries. */
uLine != 0;
/* nothing */;
{
/* Monolithic image, must have only one extent (already opened). */
return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image may only have one extent in '%s'"), pImage->pszFilename);
}
{
/* Non-monolithic image, extents need to be allocated. */
if (RT_FAILURE(rc))
return rc;
}
{
/* Access type of the extent. */
{
pszLine += 2;
}
{
pszLine += 6;
}
{
pszLine += 8;
}
else
return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
if (*pszLine++ != ' ')
return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
/* Nominal size of the extent. */
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
if (*pszLine++ != ' ')
return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
/* Type of the extent. */
#ifdef VBOX_WITH_VMDK_ESX
/** @todo Add the ESX extent types. Not necessary for now because
* the ESX extent types are only used inside an ESX server. They are
* automatically converted if the VMDK is exported. */
#endif /* VBOX_WITH_VMDK_ESX */
{
pszLine += 6;
}
{
pszLine += 4;
}
{
pszLine += 4;
}
{
pszLine += 4;
}
else
return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
{
/* This one has no basename or offset. */
if (*pszLine == ' ')
pszLine++;
if (*pszLine != '\0')
return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
}
else
{
/* All other extent types have basename and optional offset. */
if (*pszLine++ != ' ')
return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
/* Basename of the image. Surrounded by quotes. */
char *pszBasename;
if (RT_FAILURE(rc))
return rc;
if (*pszLine == ' ')
{
pszLine++;
if (*pszLine != '\0')
{
/* Optional offset in extent specified. */
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
}
}
if (*pszLine != '\0')
return vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: parse error in extent description in '%s'"), pImage->pszFilename);
}
}
/* Determine PCHS geometry (autogenerate if necessary). */
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
else if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
else if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
else if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting PCHS geometry from extent description in '%s'"), pImage->pszFilename);
{
/* Mark PCHS geometry as not yet valid (can't do the calculation here
* as the total image size isn't known yet). */
}
/* Determine LCHS geometry (set to 0 if not specified). */
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
else if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
else if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
else if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting LCHS geometry from extent description in '%s'"), pImage->pszFilename);
{
}
/* Get image UUID. */
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
{
/* Image without UUID. Probably created by VMware and not yet used
* mode, so don't bother producing a sensible UUID otherwise. */
else
{
if (RT_FAILURE(rc))
return rc;
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
}
}
else if (RT_FAILURE(rc))
return rc;
/* Get image modification UUID. */
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
{
/* Image without UUID. Probably created by VMware and not yet used
* mode, so don't bother producing a sensible UUID otherwise. */
else
{
if (RT_FAILURE(rc))
return rc;
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image modification UUID in descriptor in '%s'"), pImage->pszFilename);
}
}
else if (RT_FAILURE(rc))
return rc;
/* Get UUID of parent image. */
&pImage->ParentUuid);
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
{
/* Image without UUID. Probably created by VMware and not yet used
* mode, so don't bother producing a sensible UUID otherwise. */
else
{
if (RT_FAILURE(rc))
return rc;
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent UUID in descriptor in '%s'"), pImage->pszFilename);
}
}
else if (RT_FAILURE(rc))
return rc;
/* Get parent image modification UUID. */
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
{
/* Image without UUID. Probably created by VMware and not yet used
* mode, so don't bother producing a sensible UUID otherwise. */
else
{
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in descriptor in '%s'"), pImage->pszFilename);
}
}
else if (RT_FAILURE(rc))
return rc;
return VINF_SUCCESS;
}
/**
* Internal : Prepares the descriptor to write to the image.
*/
{
int rc = VINF_SUCCESS;
/*
* Allocate temporary descriptor buffer.
* In case there is no limit allocate a default
* and increase if required.
*/
unsigned offDescriptor = 0;
if (!pszDescriptor)
return VERR_NO_MEMORY;
{
/*
* Increase the descriptor if there is no limit and
* there is not enough room left for this line.
*/
{
if (cbLimit)
{
rc = vdIfError(pImage->pIfError, VERR_BUFFER_OVERFLOW, RT_SRC_POS, N_("VMDK: descriptor too long in '%s'"), pImage->pszFilename);
break;
}
else
{
char *pszDescriptorNew = NULL;
LogFlow(("Increasing descriptor cache\n"));
if (!pszDescriptorNew)
{
rc = VERR_NO_MEMORY;
break;
}
}
}
if (cb > 0)
{
offDescriptor += cb;
}
}
if (RT_SUCCESS(rc))
{
*ppvData = pszDescriptor;
*pcbData = offDescriptor;
}
else if (pszDescriptor)
return rc;
}
/**
*/
{
int rc = VINF_SUCCESS;
void *pvDescriptor;
{
/* Separate descriptor file. */
uOffset = 0;
cbLimit = 0;
}
else
{
/* Embedded descriptor file. */
}
/* Bail out if there is no file to write to. */
return VERR_INVALID_PARAMETER;
if (RT_SUCCESS(rc))
{
if (RT_FAILURE(rc))
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
{
if (RT_FAILURE(rc))
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
}
if (RT_SUCCESS(rc))
}
return rc;
}
/**
*/
{
int rc = VINF_SUCCESS;
void *pvDescriptor;
{
/* Separate descriptor file. */
uOffset = 0;
cbLimit = 0;
}
else
{
/* Embedded descriptor file. */
}
/* Bail out if there is no file to write to. */
return VERR_INVALID_PARAMETER;
if (RT_SUCCESS(rc))
{
if ( RT_FAILURE(rc)
&& rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
}
{
if (RT_FAILURE(rc))
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
}
if (RT_SUCCESS(rc))
return rc;
}
/**
* Internal: validate the consistency check values in a binary header.
*/
static int vmdkValidateHeader(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, const SparseExtentHeader *pHeader)
{
int rc = VINF_SUCCESS;
{
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect magic in sparse extent header in '%s'"), pExtent->pszFullname);
return rc;
}
{
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_UNSUPPORTED_VERSION, RT_SRC_POS, N_("VMDK: incorrect version in sparse extent header in '%s', not a VMDK 1.0/1.1 conforming file"), pExtent->pszFullname);
return rc;
}
{
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: corrupted by CR/LF translation in '%s'"), pExtent->pszFullname);
return rc;
}
return rc;
}
/**
* Internal: read metadata belonging to an extent with binary header, i.e.
* as found in monolithic files.
*/
bool fMagicAlreadyRead)
{
int rc;
if (!fMagicAlreadyRead)
else
{
sizeof(Header)
NULL);
}
if (RT_FAILURE(rc))
{
vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent header in '%s'"), pExtent->pszFullname);
goto out;
}
if (RT_FAILURE(rc))
goto out;
{
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot get size of '%s'"), pExtent->pszFullname);
goto out;
}
}
{
/* Read the footer, which comes before the end-of-stream marker. */
if (RT_FAILURE(rc))
{
vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading extent footer in '%s'"), pExtent->pszFullname);
goto out;
}
if (RT_FAILURE(rc))
goto out;
/* Prohibit any writes to this extent. */
pExtent->uAppendPosition = 0;
}
{
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: inconsistent embedded descriptor config in '%s'"), pExtent->pszFullname);
goto out;
}
{
}
else
{
pExtent->uSectorRGD = 0;
}
{
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot resolve grain directory offset in '%s'"), pExtent->pszFullname);
goto out;
}
{
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: incorrect grain directory size in '%s'"), pExtent->pszFullname);
goto out;
}
/* Fix up the number of descriptor sectors, as some flat images have
* really just one, and this causes failures when inserting the UUID
* values and other extra information. */
{
/* Do it the easy way - just fix it for flat images which have no
* other complicated metadata which needs space too. */
}
out:
if (RT_FAILURE(rc))
return rc;
}
/**
* Internal: read additional metadata belonging to an extent. For those
* extents which have no additional metadata just verify the information.
*/
{
int rc = VINF_SUCCESS;
/* disabled the check as there are too many truncated vmdk images out there */
/* The image must be a multiple of a sector in size and contain the data
* area (flat images only). If not, it means the image is at least
* truncated, or even seriously garbled. */
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error getting size in '%s'"), pExtent->pszFullname);
goto out;
}
&& (pExtent->enmType != VMDKETYPE_FLAT || pExtent->cNominalSectors + pExtent->uSectorOffset > VMDK_BYTE2SECTOR(cbExtentSize)))
{
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: file size is not a multiple of 512 in '%s', file is truncated or otherwise garbled"), pExtent->pszFullname);
goto out;
}
#endif /* VBOX_WITH_VMDK_STRICT_SIZE_CHECK */
goto out;
/* The spec says that this must be a power of two and greater than 8,
* but probably they meant not less than 8. */
{
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: invalid extent grain size %u in '%s'"), pExtent->cSectorsPerGrain, pExtent->pszFullname);
goto out;
}
/* This code requires that a grain table must hold a power of two multiple
* of the number of entries per GT cache entry. */
{
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: grain table cache size problem in '%s'"), pExtent->pszFullname);
goto out;
}
if (RT_FAILURE(rc))
goto out;
/* Prohibit any writes to this streamOptimized extent. */
pExtent->uAppendPosition = 0;
else
{
pExtent->cbGrainStreamRead = 0;
}
out:
if (RT_FAILURE(rc))
return rc;
}
/**
*/
{
{
{
}
else
{
}
}
else
{
{
}
else
{
}
}
if (RT_FAILURE(rc))
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
return rc;
}
/**
*/
{
{
{
}
else
{
}
}
else
{
{
}
else
{
}
}
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
return rc;
}
#ifdef VBOX_WITH_VMDK_ESX
/**
* Internal: unused code to read the metadata of a sparse ESX extent.
*
* Such extents never leave ESX server, so this isn't ever used.
*/
{
if (RT_FAILURE(rc))
{
vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading ESX sparse extent header in '%s'"), pExtent->pszFullname);
goto out;
}
{
goto out;
}
/* The spec says that this must be between 1 sector and 1MB. This code
* assumes it's a power of two, so check that requirement, too. */
|| pExtent->cSectorsPerGrain == 0
{
goto out;
}
pExtent->uDescriptorSector = 0;
pExtent->cDescriptorSectors = 0;
pExtent->uSectorRGD = 0;
pExtent->cOverheadSectors = 0;
{
goto out;
}
{
/* Inconsistency detected. Computed number of GD entries doesn't match
* stored value. Better be safe than sorry. */
goto out;
}
out:
if (RT_FAILURE(rc))
return rc;
}
#endif /* VBOX_WITH_VMDK_ESX */
/**
* Internal: free the buffers used for streamOptimized images.
*/
{
if (pExtent->pvCompGrain)
{
}
{
}
}
/**
* Internal: free the memory used by the extent data structure, optionally
* deleting the referenced files.
*/
bool fDelete)
{
{
}
{
/* Do not delete raw extents, these have full and base names equal. */
&& pExtent->pszFullname
}
if (pExtent->pszBasename)
{
}
if (pExtent->pszFullname)
{
}
}
/**
* Internal: allocate grain table cache if necessary for this image.
*/
{
/* Allocate grain table cache if any sparse extent is present. */
{
#ifdef VBOX_WITH_VMDK_ESX
#endif /* VBOX_WITH_VMDK_ESX */
)
{
/* Allocate grain table cache. */
return VERR_NO_MEMORY;
for (unsigned j = 0; j < VMDK_GT_CACHE_SIZE; j++)
{
}
break;
}
}
return VINF_SUCCESS;
}
/**
* Internal: allocate the given number of extents.
*/
{
int rc = VINF_SUCCESS;
if (pExtents)
{
for (unsigned i = 0; i < cExtents; i++)
{
}
}
else
rc = VERR_NO_MEMORY;
return rc;
}
/**
* Internal: Open an image, constructing all necessary data structures.
*/
{
int rc;
/*
* Open the image.
* We don't have to check for asynchronous access because
* we only support raw access and the opened file is a description
* file were no data is stored.
*/
false /* fAsyncIO */);
if (RT_FAILURE(rc))
{
/* Do NOT signal an appropriate error here, as the VD layer has the
* choice of retrying the open if it failed. */
goto out;
}
/* Read magic (if present). */
if (RT_FAILURE(rc))
{
vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error reading the magic number in '%s'"), pImage->pszFilename);
goto out;
}
/* Handle the file according to its magic number. */
{
/* It's a hosted single-extent image. */
if (RT_FAILURE(rc))
goto out;
/* The opened file is passed to the extent. No separate descriptor
* file, so no need to keep anything open for the image. */
if (!pExtent->pszFullname)
{
rc = VERR_NO_MEMORY;
goto out;
}
if (RT_FAILURE(rc))
goto out;
/* As we're dealing with a monolithic image here, there must
* be a descriptor embedded in the image file. */
{
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: monolithic image without descriptor in '%s'"), pImage->pszFilename);
goto out;
}
/* HACK: extend the descriptor if it is unusually small and it fits in
* the unused space after the image header. Allows opening VMDK files
{
pExtent->fMetaDirty = true;
}
/* Read the descriptor from the extent. */
{
rc = VERR_NO_MEMORY;
goto out;
}
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pExtent->pszFullname);
goto out;
}
if (RT_FAILURE(rc))
goto out;
{
goto out;
}
if (RT_FAILURE(rc))
goto out;
/* Mark the extent as unclean if opened in read-write mode. */
if ( !(uOpenFlags & VD_OPEN_FLAGS_READONLY)
{
pExtent->fUncleanShutdown = true;
pExtent->fMetaDirty = true;
}
}
else
{
/* Allocate at least 10K, and make sure that there is 5K free space
* in case new entries need to be added to the descriptor. Never
* allocate more than 128K, because that's no valid descriptor file
* and will result in the correct "truncated read" error handling. */
if (RT_FAILURE(rc))
goto out;
/* If the descriptor file is shorter than 50 bytes it can't be valid. */
if (cbFileSize < 50)
{
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: descriptor in '%s' is too short"), pImage->pszFilename);
goto out;
}
else
{
rc = VERR_NO_MEMORY;
goto out;
}
/* Don't reread the place where the magic would live in a sparse
* image if it's a descriptor based one. */
cbFileSize - sizeof(u32Magic)),
&cbRead);
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: read error for descriptor in '%s'"), pImage->pszFilename);
goto out;
}
{
/* Likely the read is truncated. Better fail a bit too early
* (normally the descriptor is much smaller than our buffer). */
rc = vdIfError(pImage->pIfError, VERR_VD_VMDK_INVALID_HEADER, RT_SRC_POS, N_("VMDK: cannot read descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if (RT_FAILURE(rc))
goto out;
/*
* We have to check for the asynchronous open flag. The
* extents are parsed and the type of all are known now.
* Check if every extent is either FLAT or ZERO.
*/
if (uOpenFlags & VD_OPEN_FLAGS_ASYNC_IO)
{
unsigned cFlatExtents = 0;
{
{
/*
* Opened image contains at least one none flat or zero extent.
* Return error but don't set error message as the caller
* has the chance to open in non async I/O mode.
*/
goto out;
}
cFlatExtents++;
}
}
{
if (pExtent->pszBasename)
{
/* Hack to figure out whether the specified name in the
* extent descriptor is absolute. Doesn't always work, but
* should be good enough for now. */
char *pszFullname;
/** @todo implement proper path absolute check. */
{
if (!pszFullname)
{
rc = VERR_NO_MEMORY;
goto out;
}
}
else
{
if (!pszDirname)
{
rc = VERR_NO_MEMORY;
goto out;
}
if (!pszFullname)
{
goto out;
}
}
}
else
{
case VMDKETYPE_HOSTED_SPARSE:
false /* fCreate */),
false /* fAsyncIO */);
if (RT_FAILURE(rc))
{
/* Do NOT signal an appropriate error here, as the VD
* layer has the choice of retrying the open if it
* failed. */
goto out;
}
false /* fMagicAlreadyRead */);
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
goto out;
/* Mark extent as unclean if opened in read-write mode. */
if (!(uOpenFlags & VD_OPEN_FLAGS_READONLY))
{
pExtent->fUncleanShutdown = true;
pExtent->fMetaDirty = true;
}
break;
case VMDKETYPE_VMFS:
case VMDKETYPE_FLAT:
false /* fCreate */),
true /* fAsyncIO */);
if (RT_FAILURE(rc))
{
/* Do NOT signal an appropriate error here, as the VD
* layer has the choice of retrying the open if it
* failed. */
goto out;
}
break;
case VMDKETYPE_ZERO:
/* Nothing to do. */
break;
default:
}
}
}
/* Make sure this is not reached accidentally with an error status. */
/* Determine PCHS geometry if not set. */
{
{
}
}
/* Update the image metadata now in case has changed. */
if (RT_FAILURE(rc))
goto out;
/* Figure out a few per-image constants from the extents. */
{
#ifdef VBOX_WITH_VMDK_ESX
#endif /* VBOX_WITH_VMDK_ESX */
)
{
/* Here used to be a check whether the nominal size of an extent
* is a multiple of the grain size. The spec says that this is
* always the case, but unfortunately some files out there in the
* wild violate the spec (e.g. ReactOS 0.3.1). */
}
}
{
{
break;
}
}
out:
if (RT_FAILURE(rc))
vmdkFreeImage(pImage, false);
return rc;
}
/**
*/
{
int rc = VINF_SUCCESS;
{
/* Full raw disk access. This requires setting up a descriptor
* file and open the (flat) raw disk. */
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
/* Create raw disk descriptor file. */
true /* fCreate */),
false /* fAsyncIO */);
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
/* Set up basename for extent description. Cannot use StrDup. */
if (!pszBasename)
return VERR_NO_MEMORY;
/* For raw disks the full name is identical to the base name. */
if (!pExtent->pszFullname)
return VERR_NO_MEMORY;
pExtent->uSectorOffset = 0;
pExtent->fMetaDirty = false;
/* Open flat image, the raw disk. */
false /* fCreate */),
false /* fAsyncIO */);
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
}
else
{
/* Raw partition access. This requires setting up a descriptor
* file, write the partition information to a flat extent and
* open all the (flat) raw disk partitions. */
/* First pass over the partition data areas to determine how many
* extents we need. One data area can require up to 2 extents, as
* it might be necessary to skip over unpartitioned space. */
unsigned cExtents = 0;
for (unsigned i = 0; i < pRaw->cPartDescs; i++)
{
return vdIfError(pImage->pIfError, VERR_INVALID_PARAMETER, RT_SRC_POS, N_("VMDK: incorrect partition data area ordering set up by the caller in '%s'"), pImage->pszFilename);
cExtents++;
cExtents++;
}
/* Another extent for filling up the rest of the image. */
cExtents++;
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
/* Create raw partition descriptor file. */
true /* fCreate */),
false /* fAsyncIO */);
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
/* Create base filename for the partition table extent. */
/** @todo remove fixed buffer without creating memory leaks. */
char pszPartition[1024];
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: invalid filename '%s'"), pImage->pszFilename);
if (!pszBaseBase)
return VERR_NO_MEMORY;
/* Second pass over the partitions, now define all extents. */
uint64_t uPartOffset = 0;
cExtents = 0;
uStart = 0;
for (unsigned i = 0; i < pRaw->cPartDescs; i++)
{
{
pExtent->uSectorOffset = 0;
pExtent->fMetaDirty = false;
/* go to next extent */
}
if (pPart->pvPartitionData)
{
/* Set up basename for extent description. Can't use StrDup. */
if (!pszBasename)
return VERR_NO_MEMORY;
/* Set up full name for partition extent. */
if (!pszDirname)
return VERR_NO_STR_MEMORY;
if (!pszDirname)
return VERR_NO_STR_MEMORY;
pExtent->fMetaDirty = false;
/* Create partition table flat image. */
true /* fCreate */),
false /* fAsyncIO */);
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not write partition data to '%s'"), pExtent->pszFullname);
}
else
{
if (pPart->pszRawDevice)
{
/* Set up basename for extent descr. Can't use StrDup. */
if (!pszBasename)
return VERR_NO_MEMORY;
/* For raw disks full name is identical to base name. */
if (!pExtent->pszFullname)
return VERR_NO_MEMORY;
pExtent->fMetaDirty = false;
/* Open flat image, the raw partition. */
false /* fCreate */),
false /* fAsyncIO */);
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
}
else
{
pExtent->uSectorOffset = 0;
pExtent->fMetaDirty = false;
}
}
}
/* Another extent for filling up the rest of the image. */
{
pExtent->uSectorOffset = 0;
pExtent->fMetaDirty = false;
}
}
"fullDevice" : "partitionedDevice");
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
return rc;
}
/**
* Internal: create a regular (i.e. file-backed) VMDK image.
*/
unsigned uImageFlags,
unsigned uPercentStart, unsigned uPercentSpan)
{
int rc = VINF_SUCCESS;
unsigned cExtents = 1;
{
/* Do proper extent computation: need one smaller extent if the total
* size isn't evenly divisible by the split size. */
if (cbSize % VMDK_2G_SPLIT_SIZE)
cExtents++;
}
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
/* Basename strings needed for constructing the extent names. */
/* Create separate descriptor file if necessary. */
{
true /* fCreate */),
false /* fAsyncIO */);
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
}
else
/* Set up all extents. */
for (unsigned i = 0; i < cExtents; i++)
{
* for basename, as it is not guaranteed that the memory can be freed
* with RTMemTmpFree, which must be used as in other code paths
* StrDup is not usable. */
{
if (!pszBasename)
return VERR_NO_MEMORY;
}
else
{
char *pszTmp;
if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
{
if (cExtents == 1)
else
i+1, pszBasenameExt);
}
else
if (!pszTmp)
return VERR_NO_STR_MEMORY;
if (!pszBasename)
return VERR_NO_MEMORY;
}
if (!pszBasedirectory)
return VERR_NO_STR_MEMORY;
if (!pszFullname)
return VERR_NO_STR_MEMORY;
/* Create file for extent. */
true /* fCreate */),
false /* fAsyncIO */);
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
{
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set size of new file '%s'"), pExtent->pszFullname);
/* Fill image with zeroes. We do this for every fixed-size image since on some systems
* (for example Windows Vista), it takes ages to write a block near the end of a sparse
* file and the guest could complain about an ATA timeout. */
/** @todo Starting with Linux 2.6.23, there is an fallocate() system call.
* Currently supported file systems are ext4 and ocfs2. */
/* Allocate a temporary zero-filled buffer. Use a bigger block size to optimize writing */
if (!pvBuf)
return VERR_NO_MEMORY;
/* Write data to all image blocks. */
{
if (RT_FAILURE(rc))
{
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: writing block failed for '%s'"), pImage->pszFilename);
}
if (pfnProgress)
{
if (RT_FAILURE(rc))
{
return rc;
}
}
}
}
/* Place descriptor file information (where integrated). */
{
/* The descriptor is part of the (only) extent. */
}
if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
{
{
/* The spec says version is 1 for all VMDKs, but the vast
* majority of streamOptimized VMDKs actually contain
* version 3 - so go with the majority. Both are accepted. */
}
}
else
{
else
}
pExtent->fUncleanShutdown = true;
pExtent->uSectorOffset = 0;
pExtent->fMetaDirty = true;
if (!(uImageFlags & VD_IMAGE_FLAGS_FIXED))
{
/* fPreAlloc should never be false because VMware can't use such images. */
1),
true /* fPreAlloc */);
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
}
cbRemaining -= cbExtent;
}
{
/* VirtualBox doesn't care, but VMWare ESX freaks out if the wrong
* controller type is set in an image. */
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set controller type to lsilogic in '%s'"), pImage->pszFilename);
}
const char *pszDescType = NULL;
if (uImageFlags & VD_IMAGE_FLAGS_FIXED)
{
pszDescType = "vmfs";
else
? "monolithicFlat" : "twoGbMaxExtentFlat";
}
else
{
pszDescType = "streamOptimized";
else
{
? "monolithicSparse" : "twoGbMaxExtentSparse";
}
}
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
return rc;
}
/**
* Internal: Create a real stream optimized VMDK using only linear writes.
*/
unsigned uImageFlags,
unsigned uPercentStart, unsigned uPercentSpan)
{
int rc;
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new extent list in '%s'"), pImage->pszFilename);
/* Basename strings needed for constructing the extent names. */
/* No separate descriptor file. */
/* Set up all extents. */
* for basename, as it is not guaranteed that the memory can be freed
* with RTMemTmpFree, which must be used as in other code paths
* StrDup is not usable. */
if (!pszBasename)
return VERR_NO_MEMORY;
if (!pszFullname)
return VERR_NO_STR_MEMORY;
/* Create file for extent. Make it write only, no reading allowed. */
true /* fCreate */)
& ~RTFILE_O_READ,
false /* fAsyncIO */);
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
/* Place descriptor file information. */
/* The descriptor is part of the (only) extent. */
/* The spec says version is 1 for all VMDKs, but the vast
* majority of streamOptimized VMDKs actually contain
* version 3 - so go with the majority. Both are accepted. */
pExtent->fUncleanShutdown = false;
pExtent->uSectorOffset = 0;
pExtent->fMetaDirty = true;
/* Create grain directory, without preallocating it straight away. It will
* be constructed on the fly when writing out the data and written when
* closing the image. The end effect is that the full grain directory is
* allocated, which is a requirement of the VMDK specs. */
false /* fPreAlloc */);
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new grain directory in '%s'"), pExtent->pszFullname);
"streamOptimized");
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not set the image type in '%s'"), pImage->pszFilename);
return rc;
}
/**
* Internal: The actual code for creating any VMDK variant currently in
* existence on hosted environments.
*/
unsigned uImageFlags, const char *pszComment,
unsigned uPercentStart, unsigned uPercentSpan)
{
int rc;
&pImage->Descriptor);
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if ( (uImageFlags & VD_IMAGE_FLAGS_FIXED)
{
/* Raw disk image (includes raw partition). */
/* As the comment is misused, zap it so that no garbage comment
* is set below. */
pszComment = NULL;
}
else
{
{
/* Stream optimized sparse image (monolithic). */
}
else
{
/* Regular fixed or sparse image (monolithic or split). */
}
}
if (RT_FAILURE(rc))
goto out;
{
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not insert the extent list into descriptor in '%s'"), pImage->pszFilename);
goto out;
}
}
if ( pPCHSGeometry->cCylinders != 0
&& pPCHSGeometry->cHeads != 0
&& pPCHSGeometry->cSectors != 0)
{
if (RT_FAILURE(rc))
goto out;
}
if ( pLCHSGeometry->cCylinders != 0
&& pLCHSGeometry->cHeads != 0
&& pLCHSGeometry->cSectors != 0)
{
if (RT_FAILURE(rc))
goto out;
}
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in new descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in new descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in new descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent modification UUID in new descriptor in '%s'"), pImage->pszFilename);
goto out;
}
if (RT_FAILURE(rc))
goto out;
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot set image comment in '%s'"), pImage->pszFilename);
goto out;
}
{
/* streamOptimized is a bit special, we cannot trigger the flush
* until all data has been written. So we write the necessary
* information explicitly. */
pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64( pImage->Descriptor.aLines[pImage->Descriptor.cLines]
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK header in '%s'"), pImage->pszFilename);
goto out;
}
if (RT_FAILURE(rc))
{
rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write VMDK descriptor in '%s'"), pImage->pszFilename);
goto out;
}
}
else
out:
if (RT_FAILURE(rc))
return rc;
}
/**
* Internal: Update image comment.
*/
{
char *pszCommentEncoded;
if (pszComment)
{
if (!pszCommentEncoded)
return VERR_NO_MEMORY;
}
else
"ddb.comment", pszCommentEncoded);
if (pszComment)
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image comment in descriptor in '%s'"), pImage->pszFilename);
return VINF_SUCCESS;
}
/**
* Internal. Clear the grain table buffer for real stream optimized writing.
*/
{
uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
for (uint32_t i = 0; i < cCacheLines; i++)
VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t));
}
/**
* Internal. Flush the grain table buffer for real stream optimized writing.
*/
{
int rc = VINF_SUCCESS;
uint32_t cCacheLines = RT_ALIGN(pExtent->cGTEntries, VMDK_GT_CACHELINE_SIZE) / VMDK_GT_CACHELINE_SIZE;
/* VMware does not write out completely empty grain tables in the case
* of streamOptimized images, which according to my interpretation of
* the VMDK 1.1 spec is bending the rules. Since they do it and we can
* handle it without problems do it the same way and save some bytes. */
bool fAllZero = true;
for (uint32_t i = 0; i < cCacheLines; i++)
{
/* Convert the grain table to little endian in place, as it will not
* be used at all after this function has been called. */
if (*pGTTmp)
{
fAllZero = false;
break;
}
if (!fAllZero)
break;
}
if (fAllZero)
return VINF_SUCCESS;
if (!uFileOffset)
return VERR_INTERNAL_ERROR;
/* Align to sector, as the previous write could have been any size. */
/* Grain table marker. */
uFileOffset += 512;
return VERR_INTERNAL_ERROR;
for (uint32_t i = 0; i < cCacheLines; i++)
{
/* Convert the grain table to little endian in place, as it will not
* be used at all after this function has been called. */
VMDK_GT_CACHELINE_SIZE * sizeof(uint32_t),
NULL);
if (RT_FAILURE(rc))
break;
}
return rc;
}
/**
* Internal. Free all allocated space for representing an image, and optionally
* delete the image from disk.
*/
{
int rc = VINF_SUCCESS;
/* Freeing a never allocated image (e.g. because the open failed) is
* not signalled as an error. After all nothing bad happens. */
if (pImage)
{
{
{
/* Check if all extents are clean. */
{
}
}
else
{
/* Mark all extents as clean. */
{
#ifdef VBOX_WITH_VMDK_ESX
#endif /* VBOX_WITH_VMDK_ESX */
)
{
}
/* From now on it's not safe to append any more data. */
}
}
}
{
/* No need to write any pending data if the file will be deleted
* or if the new file wasn't successfully created. */
{
{
}
if (!uFileOffset)
return VERR_INTERNAL_ERROR;
/* From now on it's not safe to append any more data. */
pExtent->uAppendPosition = 0;
/* Grain directory marker. */
pMarker->uSector = VMDK_BYTE2SECTOR(RT_ALIGN_64(RT_H2LE_U64(pExtent->cGDEntries * sizeof(uint32_t)), 512));
uFileOffset += 512;
/* Write grain directory in little endian style. The array will
* not be used after this, so convert in place. */
NULL);
512);
/* Footer marker. */
uFileOffset += 512;
uFileOffset += 512;
/* End-of-stream marker. */
}
}
else
{
}
{
}
{
}
}
return rc;
}
/**
* Internal. Flush image data (and metadata) to disk.
*/
{
int rc = VINF_SUCCESS;
/* Update descriptor if changed. */
{
if (RT_FAILURE(rc))
goto out;
}
{
{
{
case VMDKETYPE_HOSTED_SPARSE:
{
if (RT_FAILURE(rc))
goto out;
}
else
{
/* Simply skip writing anything if the streamOptimized
* image hasn't been just created. */
if (!uFileOffset)
break;
if (RT_FAILURE(rc))
goto out;
}
break;
#ifdef VBOX_WITH_VMDK_ESX
case VMDKETYPE_ESX_SPARSE:
/** @todo update the header. */
break;
#endif /* VBOX_WITH_VMDK_ESX */
case VMDKETYPE_VMFS:
case VMDKETYPE_FLAT:
/* Nothing to do. */
break;
case VMDKETYPE_ZERO:
default:
AssertMsgFailed(("extent with type %d marked as dirty\n",
break;
}
}
{
case VMDKETYPE_HOSTED_SPARSE:
#ifdef VBOX_WITH_VMDK_ESX
case VMDKETYPE_ESX_SPARSE:
#endif /* VBOX_WITH_VMDK_ESX */
case VMDKETYPE_VMFS:
case VMDKETYPE_FLAT:
/** @todo implement proper path absolute check. */
break;
case VMDKETYPE_ZERO:
/* No need to do anything for this extent. */
break;
default:
break;
}
}
out:
return rc;
}
/**
* Internal. Find extent corresponding to the sector number in the disk.
*/
{
int rc = VINF_SUCCESS;
{
{
break;
}
}
if (pExtent)
else
return rc;
}
/**
* Internal. Hash function for placing the grain table hash entries.
*/
unsigned uExtent)
{
/** @todo this hash function is quite simple, maybe use a better one which
* scrambles the bits better. */
}
/**
* Internal. Get sector number in the extent file from the relative sector
* number in the extent.
*/
{
int rc;
/* For newly created and readonly/sequentially opened streamOptimized
* images this must be a no-op, as the grain directory is not there. */
&& pExtent->uAppendPosition)
{
*puExtentSector = 0;
return VINF_SUCCESS;
}
return VERR_OUT_OF_RANGE;
if (!uGTSector)
{
/* There is no grain table referenced by this grain directory
* entry. So there is absolutely no data in this area. */
*puExtentSector = 0;
return VINF_SUCCESS;
}
{
/* Cache miss, fetch data from disk. */
VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read grain table entry in '%s'"), pExtent->pszFullname);
for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
}
if (uGrainSector)
else
*puExtentSector = 0;
return VINF_SUCCESS;
}
/**
* Internal. Get sector number in the extent file from the relative sector
* number in the extent - version for async access.
*/
{
int rc;
return VERR_OUT_OF_RANGE;
if (!uGTSector)
{
/* There is no grain table referenced by this grain directory
* entry. So there is absolutely no data in this area. */
*puExtentSector = 0;
return VINF_SUCCESS;
}
{
/* Cache miss, fetch data from disk. */
VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
if (RT_FAILURE(rc))
return rc;
/* We can release the metadata transfer immediately. */
for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
}
if (uGrainSector)
else
*puExtentSector = 0;
return VINF_SUCCESS;
}
/**
* Internal. Allocates a new grain table (if necessary), writes the grain
* and updates the grain table. The cache is also updated by this operation.
* This is separate from vmdkGetSector, because that should be as fast as
* possible. Most code from vmdkGetSector also appears here.
*/
{
int rc;
return VERR_OUT_OF_RANGE;
else
uRGTSector = 0; /**< avoid compiler warning */
if (!uGTSector)
{
/* There is no grain table referenced by this grain directory
* entry. So there is absolutely no data in this area. Allocate
* a new grain table and put the reference to it in the GDs. */
if (!uFileOffset)
return VERR_INTERNAL_ERROR;
/* Normally the grain table is preallocated for hosted sparse extents
* that support more than 32 bit sector numbers. So this shouldn't
* ever happen on a valid extent. */
if (uGTSector > UINT32_MAX)
return VERR_VD_VMDK_INVALID_HEADER;
/* Write grain table by writing the required number of grain table
* cache chunks. Avoids dynamic memory allocation, but is a bit
* slower. But as this is a pretty infrequently occurring case it
* should be acceptable. */
for (unsigned i = 0;
i++)
{
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
}
512);
{
if (!uFileOffset)
return VERR_INTERNAL_ERROR;
/* Normally the redundant grain table is preallocated for hosted
* sparse extents that support more than 32 bit sector numbers. So
* this shouldn't ever happen on a valid extent. */
if (uRGTSector > UINT32_MAX)
return VERR_VD_VMDK_INVALID_HEADER;
/* Write backup grain table by writing the required number of grain
* table cache chunks. Avoids dynamic memory allocation, but is a
* bit slower. But as this is a pretty infrequently occurring case
* it should be acceptable. */
for (unsigned i = 0;
i++)
{
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
}
}
/* Update the grain directory on disk (doing it before writing the
* grain table will result in a garbled extent if the operation is
* aborted for some reason. Otherwise the worst that can happen is
* some unused sectors in the extent. */
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
{
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
}
/* As the final step update the in-memory copy of the GDs. */
}
if (!uFileOffset)
return VERR_INTERNAL_ERROR;
/* Write the data. Always a full grain, or we're in big trouble. */
{
return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname);
/* Invalidate cache, just in case some code incorrectly allows mixing
* of reads and writes. Normally shouldn't be needed. */
pExtent->uGrainSectorAbs = 0;
/* Write compressed data block and the markers. */
if (RT_FAILURE(rc))
{
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
}
}
else
{
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
}
/* Update the grain table (and the cache). */
{
/* Cache miss, fetch data from disk. */
VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
}
else
{
/* Cache hit. Convert grain table block back to disk format, otherwise
* the code below will write garbage for all but the updated entry. */
for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
}
/* Update grain table on disk. */
VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
{
/* Update backup grain table on disk. */
VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
}
#ifdef VBOX_WITH_VMDK_ESX
{
pExtent->fMetaDirty = true;
}
#endif /* VBOX_WITH_VMDK_ESX */
return rc;
}
/**
* Internal. Writes the grain and also if necessary the grain tables.
* Uses the grain table cache as a true grain table.
*/
{
int rc;
/* Very strict requirements: always write at least one full grain, with
* proper alignment. Everything else would require reading of already
* written data, which we don't support for obvious reasons. The only
* exception is the last grain, and only if the image size specifies
* that only some portion holds data. In any case the write must be
* within the image limits, no "overshoot" allowed. */
if ( cbWrite == 0
return VERR_INVALID_PARAMETER;
/* Clip write range to at most the rest of the grain. */
cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSector % pExtent->cSectorsPerGrain));
/* Do not allow to go back. */
return VERR_VD_VMDK_INVALID_WRITE;
/* Zero byte write optimization. Since we don't tell VBoxHDD that we need
* to allocate something, we also need to detect the situation ourself. */
return VINF_SUCCESS;
if (uGDEntry != uLastGDEntry)
{
if (RT_FAILURE(rc))
return rc;
{
if (RT_FAILURE(rc))
return rc;
}
}
if (!uFileOffset)
return VERR_INTERNAL_ERROR;
/* Align to sector, as the previous write could have been any size. */
/* Paranoia check: extent type, grain table buffer presence and
* grain table buffer space. Also grain table entry must be clear. */
return VERR_INTERNAL_ERROR;
/* Update grain table entry. */
{
}
if (RT_FAILURE(rc))
{
pExtent->uGrainSectorAbs = 0;
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write compressed data block in '%s'"), pExtent->pszFullname);
}
return rc;
}
/**
* Internal: Updates the grain table during a async grain allocation.
*/
{
int rc = VINF_SUCCESS;
LogFlowFunc(("pImage=%#p pExtent=%#p pCache=%#p pIoCtx=%#p pGrainAlloc=%#p\n",
/* Update the grain table (and the cache). */
{
/* Cache miss, fetch data from disk. */
LogFlow(("Cache miss, fetch data from disk\n"));
VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
{
pGrainAlloc->fGTUpdateNeeded = true;
/* Leave early, we will be called again after the read completed. */
LogFlowFunc(("Metadata read in progress, leaving\n"));
return rc;
}
else if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
}
else
{
/* Cache hit. Convert grain table block back to disk format, otherwise
* the code below will write garbage for all but the updated entry. */
for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
}
pGrainAlloc->fGTUpdateNeeded = false;
/* Update grain table on disk. */
VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
else if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
{
/* Update backup grain table on disk. */
VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
else if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
}
#ifdef VBOX_WITH_VMDK_ESX
{
pExtent->fMetaDirty = true;
}
#endif /* VBOX_WITH_VMDK_ESX */
return rc;
}
/**
* Internal - complete the grain allocation by updating disk grain table if required.
*/
static int vmdkAllocGrainAsyncComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
{
int rc = VINF_SUCCESS;
LogFlowFunc(("pBackendData=%#p pIoCtx=%#p pvUser=%#p rcReq=%Rrc\n",
if (!pGrainAlloc->cIoXfersPending)
{
/* Grain allocation completed. */
}
return rc;
}
/**
* Internal. Allocates a new grain table (if necessary) - async version.
*/
{
int rc;
LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
if (!pGrainAlloc)
return VERR_NO_MEMORY;
{
return VERR_OUT_OF_RANGE;
}
else
uRGTSector = 0; /**< avoid compiler warning */
if (!uGTSector)
{
LogFlow(("Allocating new grain table\n"));
/* There is no grain table referenced by this grain directory
* entry. So there is absolutely no data in this area. Allocate
* a new grain table and put the reference to it in the GDs. */
if (!uFileOffset)
return VERR_INTERNAL_ERROR;
/* Normally the grain table is preallocated for hosted sparse extents
* that support more than 32 bit sector numbers. So this shouldn't
* ever happen on a valid extent. */
if (uGTSector > UINT32_MAX)
return VERR_VD_VMDK_INVALID_HEADER;
/* Write grain table by writing the required number of grain table
* cache chunks. Allocate memory dynamically here or we flood the
* metadata cache with very small entries. */
if (!paGTDataTmp)
return VERR_NO_MEMORY;
if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
else if (RT_FAILURE(rc))
{
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
}
+ cbGTDataTmp, 512);
{
if (!uFileOffset)
return VERR_INTERNAL_ERROR;
/* Normally the redundant grain table is preallocated for hosted
* sparse extents that support more than 32 bit sector numbers. So
* this shouldn't ever happen on a valid extent. */
if (uRGTSector > UINT32_MAX)
{
return VERR_VD_VMDK_INVALID_HEADER;
}
/* Write grain table by writing the required number of grain table
* cache chunks. Allocate memory dynamically here or we flood the
* metadata cache with very small entries. */
if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
else if (RT_FAILURE(rc))
{
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
}
}
/* Update the grain directory on disk (doing it before writing the
* grain table will result in a garbled extent if the operation is
* aborted for some reason. Otherwise the worst that can happen is
* some unused sectors in the extent. */
if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
else if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
{
if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
else if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
}
/* As the final step update the in-memory copy of the GDs. */
}
if (!uFileOffset)
return VERR_INTERNAL_ERROR;
/* Write the data. Always a full grain, or we're in big trouble. */
if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
else if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
if (!pGrainAlloc->cIoXfersPending)
{
/* Grain allocation completed. */
}
return rc;
}
/**
* Internal. Reads the contents by sequentially going over the compressed
* grains (hoping that they are in sequence).
*/
{
int rc;
/* Do not allow to go back. */
return VERR_VD_VMDK_INVALID_STATE;
/* After a previous error do not attempt to recover, as it would need
* seeking (in the general case backwards which is forbidden). */
if (!pExtent->uGrainSectorAbs)
return VERR_VD_VMDK_INVALID_STATE;
/* Check if we need to read something from the image or if what we have
* in the buffer is good to fulfill the request. */
{
/* Get the marker from the next data block - and skip everything which
* is not a compressed grain. If it's a compressed grain which is for
* the requested sector (or after), read it. */
do
{
NULL);
if (RT_FAILURE(rc))
return rc;
{
/* A marker for something else than a compressed grain. */
NULL);
if (RT_FAILURE(rc))
return rc;
{
case VMDK_MARKER_EOS:
/* Read (or mostly skip) to the end of file. Uses the
* Marker (LBA sector) as it is unused anyway. This
* makes sure that really everything is read in the
* success case. If this read fails it means the image
* is truncated, but this is harmless so ignore. */
+ 511,
break;
case VMDK_MARKER_GT:
break;
case VMDK_MARKER_GD:
break;
case VMDK_MARKER_FOOTER:
uGrainSectorAbs += 2;
break;
case VMDK_MARKER_UNSPECIFIED:
/* Skip over the contents of the unspecified marker
* type 4 which exists in some vSphere created files. */
/** @todo figure out what the payload means. */
uGrainSectorAbs += 1;
break;
default:
pExtent->uGrainSectorAbs = 0;
return VERR_VD_VMDK_INVALID_STATE;
}
pExtent->cbGrainStreamRead = 0;
}
else
{
* interested in read and decompress data. */
{
continue;
}
if (RT_FAILURE(rc))
{
pExtent->uGrainSectorAbs = 0;
return rc;
}
{
pExtent->uGrainSectorAbs = 0;
return VERR_VD_VMDK_INVALID_STATE;
}
break;
}
{
/* Must set a non-zero value for pExtent->cbGrainStreamRead or
* the next read would try to get more data, and we're at EOF. */
}
}
{
/* The next data block we have is not for this area, so just return
* that there is no data. */
return VERR_VD_BLOCK_FREE;
}
cbRead);
return VINF_SUCCESS;
}
/**
* Replaces a fragment of a string with the specified string.
*
* @returns Pointer to the allocated UTF-8 string.
* @param pszWhere UTF-8 string to search in.
* @param pszWhat UTF-8 string to search for.
* @param pszByWhat UTF-8 string to replace the found string with.
*/
const char *pszByWhat)
{
if (!pszFoundStr)
return NULL;
if (pszNewStr)
{
}
return pszNewStr;
}
/** @copydoc VBOXHDDBACKEND::pfnCheckIfValid */
{
LogFlowFunc(("pszFilename=\"%s\" pVDIfsDisk=%#p pVDIfsImage=%#p penmType=%#p\n",
int rc = VINF_SUCCESS;
if ( !pszFilename
|| !*pszFilename
{
goto out;
}
if (!pImage)
{
rc = VERR_NO_MEMORY;
goto out;
}
/** @todo speed up this test open (VD_OPEN_FLAGS_INFO) by skipping as
* much as possible in vmdkOpenImage. */
vmdkFreeImage(pImage, false);
if (RT_SUCCESS(rc))
*penmType = VDTYPE_HDD;
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnOpen */
{
LogFlowFunc(("pszFilename=\"%s\" uOpenFlags=%#x pVDIfsDisk=%#p pVDIfsImage=%#p ppBackendData=%#p\n", pszFilename, uOpenFlags, pVDIfsDisk, pVDIfsImage, ppBackendData));
int rc;
/* Check open flags. All valid flags are supported. */
if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
{
goto out;
}
/* Check remaining arguments. */
if ( !VALID_PTR(pszFilename)
|| !*pszFilename
{
goto out;
}
if (!pImage)
{
rc = VERR_NO_MEMORY;
goto out;
}
if (RT_SUCCESS(rc))
*ppBackendData = pImage;
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnCreate */
unsigned uImageFlags, const char *pszComment,
unsigned uPercentStart, unsigned uPercentSpan,
{
LogFlowFunc(("pszFilename=\"%s\" cbSize=%llu uImageFlags=%#x pszComment=\"%s\" pPCHSGeometry=%#p pLCHSGeometry=%#p Uuid=%RTuuid uOpenFlags=%#x uPercentStart=%u uPercentSpan=%u pVDIfsDisk=%#p pVDIfsImage=%#p pVDIfsOperation=%#p ppBackendData=%#p\n", pszFilename, cbSize, uImageFlags, pszComment, pPCHSGeometry, pLCHSGeometry, pUuid, uOpenFlags, uPercentStart, uPercentSpan, pVDIfsDisk, pVDIfsImage, pVDIfsOperation, ppBackendData));
int rc;
if (pIfProgress)
{
}
/* Check the image flags. */
if ((uImageFlags & ~VD_VMDK_IMAGE_FLAGS_MASK) != 0)
{
goto out;
}
/* Check open flags. All valid flags are supported. */
if (uOpenFlags & ~VD_OPEN_FLAGS_MASK)
{
goto out;
}
/* Check size. Maximum 2TB-64K for sparse images, otherwise unlimited. */
if ( !cbSize
{
goto out;
}
/* Check remaining arguments. */
if ( !VALID_PTR(pszFilename)
|| !*pszFilename
|| !VALID_PTR(pPCHSGeometry)
|| !VALID_PTR(pLCHSGeometry)
#ifndef VBOX_WITH_VMDK_ESX
&& !(uImageFlags & VD_IMAGE_FLAGS_FIXED))
#endif
{
goto out;
}
if (!pImage)
{
rc = VERR_NO_MEMORY;
goto out;
}
/* Descriptors for split images can be pretty large, especially if the
* filename is long. So prepare for the worst, and allocate quite some
* memory for the descriptor in this case. */
else
{
rc = VERR_NO_MEMORY;
goto out;
}
if (RT_SUCCESS(rc))
{
* image is opened in read-only mode if the caller requested that. */
if (uOpenFlags & VD_OPEN_FLAGS_READONLY)
{
vmdkFreeImage(pImage, false);
if (RT_FAILURE(rc))
goto out;
}
*ppBackendData = pImage;
}
else
{
}
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnRename */
{
int rc = VINF_SUCCESS;
char **apszOldName = NULL;
char **apszNewName = NULL;
char **apszNewLines = NULL;
char *pszOldDescName = NULL;
bool fImageFreed = false;
bool fEmbeddedDesc = false;
unsigned cExtents = 0;
char *pszNewBaseName = NULL;
char *pszOldBaseName = NULL;
char *pszNewFullName = NULL;
char *pszOldFullName = NULL;
const char *pszOldImageName;
unsigned i, line;
/* Check arguments. */
if ( !pImage
|| !VALID_PTR(pszFilename)
|| !*pszFilename)
{
goto out;
}
/*
* Allocate an array to store both old and new names of renamed files
* in case we have to roll back the changes. Arrays are initialized
* with zeros. We actually save stuff when and if we change it.
*/
{
rc = VERR_NO_MEMORY;
goto out;
}
/* Save the descriptor size and position. */
{
/* Separate descriptor file. */
fEmbeddedDesc = false;
}
else
{
/* Embedded descriptor file. */
fEmbeddedDesc = true;
}
/* Save the descriptor content. */
for (i = 0; i < DescriptorCopy.cLines; i++)
{
if (!DescriptorCopy.aLines[i])
{
rc = VERR_NO_MEMORY;
goto out;
}
}
/* Prepare both old and new base names used for string replacement. */
/* Prepare both old and new full names used for string replacement. */
/* --- Up to this point we have not done any damage yet. --- */
/* Save the old name for easy access to the old descriptor file. */
/* Save old image name. */
/* Update the descriptor with modified extent names. */
i < cExtents;
{
/* Assume that vmdkStrReplace will fail. */
rc = VERR_NO_MEMORY;
/* Update the descriptor. */
if (!apszNewLines[i])
goto rollback;
}
/* Make sure the descriptor gets written back. */
/* Flush the descriptor now, in case it is embedded. */
for (i = 0; i < cExtents; i++)
{
/* Compose new name for the extent. */
if (!apszNewName[i])
goto rollback;
/* Close the extent file. */
/* Rename the extent file. */
if (RT_FAILURE(rc))
goto rollback;
/* Remember the old name. */
}
/* Release all old stuff. */
vmdkFreeImage(pImage, false);
fImageFreed = true;
* storing descriptor's names.
*/
/* Rename the descriptor file if it's separate. */
if (!fEmbeddedDesc)
{
if (RT_FAILURE(rc))
goto rollback;
/* Save old name only if we may need to change it back. */
}
/* Update pImage with the new information. */
/* Open the new image. */
if (RT_SUCCESS(rc))
goto out;
/* Roll back all changes in case of failure. */
if (RT_FAILURE(rc))
{
int rrc;
if (!fImageFreed)
{
/*
* Some extents may have been closed, close the rest. We will
* re-open the whole thing later.
*/
vmdkFreeImage(pImage, false);
}
/* Rename files back. */
for (i = 0; i <= cExtents; i++)
{
if (apszOldName[i])
{
}
}
/* Restore the old descriptor. */
false /* fCreate */),
false /* fAsyncIO */);
if (fEmbeddedDesc)
{
}
else
{
/* Shouldn't be null for separate descriptor.
* There will be no access to the actual content.
*/
}
/* Get rid of the stuff we implanted. */
/* Re-open the image back. */
}
out:
for (i = 0; i < DescriptorCopy.cLines; i++)
if (DescriptorCopy.aLines[i])
if (apszOldName)
{
for (i = 0; i <= cExtents; i++)
if (apszOldName[i])
RTStrFree(apszOldName[i]);
}
if (apszNewName)
{
for (i = 0; i <= cExtents; i++)
if (apszNewName[i])
RTStrFree(apszNewName[i]);
}
if (apszNewLines)
{
for (i = 0; i < cExtents; i++)
if (apszNewLines[i])
RTStrFree(apszNewLines[i]);
}
if (pszOldDescName)
if (pszOldBaseName)
if (pszNewBaseName)
if (pszOldFullName)
if (pszNewFullName)
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnClose */
{
int rc;
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnRead */
{
LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToRead=%zu pcbActuallyRead=%#p\n", pBackendData, uOffset, pvBuf, cbToRead, pcbActuallyRead));
int rc;
|| cbToRead == 0)
{
goto out;
}
&pExtent, &uSectorExtentRel);
if (RT_FAILURE(rc))
goto out;
/* Check access permissions as defined in the extent descriptor. */
{
goto out;
}
/* Clip read range to remain in this extent. */
cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
/* Handle the read according to the current extent type. */
{
case VMDKETYPE_HOSTED_SPARSE:
#ifdef VBOX_WITH_VMDK_ESX
case VMDKETYPE_ESX_SPARSE:
#endif /* VBOX_WITH_VMDK_ESX */
if (RT_FAILURE(rc))
goto out;
/* Clip read range to at most the rest of the grain. */
cbToRead = RT_MIN(cbToRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
if (uSectorExtentAbs == 0)
{
else
}
else
{
{
{
if (RT_FAILURE(rc))
{
pExtent->uGrainSectorAbs = 0;
goto out;
}
}
}
else
{
}
}
break;
case VMDKETYPE_VMFS:
case VMDKETYPE_FLAT:
break;
case VMDKETYPE_ZERO:
break;
}
if (pcbActuallyRead)
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnWrite */
{
LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n", pBackendData, uOffset, pvBuf, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
int rc;
{
goto out;
}
if (cbToWrite == 0)
{
goto out;
}
/* No size check here, will do that later when the extent is located.
* There are sparse images out there which according to the spec are
* invalid, because the total size is not a multiple of the grain size.
* Also for sparse images which are stitched together in odd ways (not at
* grain boundaries, and with the nominal size not being a multiple of the
* grain size), this would prevent writing to the last grain. */
&pExtent, &uSectorExtentRel);
if (RT_FAILURE(rc))
goto out;
/* Check access permissions as defined in the extent descriptor. */
{
goto out;
}
/* Handle the write according to the current extent type. */
{
case VMDKETYPE_HOSTED_SPARSE:
#ifdef VBOX_WITH_VMDK_ESX
case VMDKETYPE_ESX_SPARSE:
#endif /* VBOX_WITH_VMDK_ESX */
if (RT_FAILURE(rc))
goto out;
/* Clip write range to at most the rest of the grain. */
cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
{
goto out;
}
if (uSectorExtentAbs == 0)
{
{
{
/* Full block write to a previously unallocated block.
* Check if the caller wants feedback. */
if (!(fWrite & VD_WRITE_NO_ALLOC))
{
/* Allocate GT and store the grain. */
}
else
*pcbPreRead = 0;
*pcbPostRead = 0;
}
else
{
/* Clip write range to remain in this extent. */
cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
}
}
else
{
}
}
else
{
{
/* A partial write to a streamOptimized image is simply
* invalid. It requires rewriting already compressed data
* which is somewhere between expensive and impossible. */
pExtent->uGrainSectorAbs = 0;
}
else
{
}
}
break;
case VMDKETYPE_VMFS:
case VMDKETYPE_FLAT:
/* Clip write range to remain in this extent. */
cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
break;
case VMDKETYPE_ZERO:
/* Clip write range to remain in this extent. */
cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
break;
}
if (pcbWriteProcess)
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnFlush */
static int vmdkFlush(void *pBackendData)
{
int rc = VINF_SUCCESS;
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnGetVersion */
static unsigned vmdkGetVersion(void *pBackendData)
{
if (pImage)
return VMDK_IMAGE_VERSION;
else
return 0;
}
/** @copydoc VBOXHDDBACKEND::pfnGetSize */
{
if (pImage)
else
return 0;
}
/** @copydoc VBOXHDDBACKEND::pfnGetFileSize */
{
if (pImage)
{
{
if (RT_SUCCESS(rc))
}
{
{
if (RT_SUCCESS(rc))
}
}
}
return cb;
}
/** @copydoc VBOXHDDBACKEND::pfnGetPCHSGeometry */
{
int rc;
if (pImage)
{
{
rc = VINF_SUCCESS;
}
else
}
else
LogFlowFunc(("returns %Rrc (PCHS=%u/%u/%u)\n", rc, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnSetPCHSGeometry */
{
LogFlowFunc(("pBackendData=%#p pPCHSGeometry=%#p PCHS=%u/%u/%u\n", pBackendData, pPCHSGeometry, pPCHSGeometry->cCylinders, pPCHSGeometry->cHeads, pPCHSGeometry->cSectors));
int rc;
if (pImage)
{
{
goto out;
}
{
goto out;
}
if (RT_FAILURE(rc))
goto out;
rc = VINF_SUCCESS;
}
else
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnGetLCHSGeometry */
{
int rc;
if (pImage)
{
{
rc = VINF_SUCCESS;
}
else
}
else
LogFlowFunc(("returns %Rrc (LCHS=%u/%u/%u)\n", rc, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnSetLCHSGeometry */
{
LogFlowFunc(("pBackendData=%#p pLCHSGeometry=%#p LCHS=%u/%u/%u\n", pBackendData, pLCHSGeometry, pLCHSGeometry->cCylinders, pLCHSGeometry->cHeads, pLCHSGeometry->cSectors));
int rc;
if (pImage)
{
{
goto out;
}
{
goto out;
}
if (RT_FAILURE(rc))
goto out;
rc = VINF_SUCCESS;
}
else
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnGetImageFlags */
static unsigned vmdkGetImageFlags(void *pBackendData)
{
unsigned uImageFlags;
if (pImage)
else
uImageFlags = 0;
return uImageFlags;
}
/** @copydoc VBOXHDDBACKEND::pfnGetOpenFlags */
static unsigned vmdkGetOpenFlags(void *pBackendData)
{
unsigned uOpenFlags;
if (pImage)
else
uOpenFlags = 0;
return uOpenFlags;
}
/** @copydoc VBOXHDDBACKEND::pfnSetOpenFlags */
{
int rc;
/* Image must be opened and the new flags must be valid. */
if (!pImage || (uOpenFlags & ~(VD_OPEN_FLAGS_READONLY | VD_OPEN_FLAGS_INFO | VD_OPEN_FLAGS_ASYNC_IO | VD_OPEN_FLAGS_SHAREABLE | VD_OPEN_FLAGS_SEQUENTIAL)))
{
goto out;
}
/* StreamOptimized images need special treatment: reopen is prohibited. */
{
rc = VINF_SUCCESS;
else
goto out;
}
/* Implement this operation via reopening the image. */
vmdkFreeImage(pImage, false);
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnGetComment */
{
LogFlowFunc(("pBackendData=%#p pszComment=%#p cbComment=%zu\n", pBackendData, pszComment, cbComment));
int rc;
if (pImage)
{
const char *pszCommentEncoded = NULL;
"ddb.comment", &pszCommentEncoded);
if (rc == VERR_VD_VMDK_VALUE_NOT_FOUND)
else if (RT_FAILURE(rc))
goto out;
if (pszComment && pszCommentEncoded)
else
{
if (pszComment)
*pszComment = '\0';
rc = VINF_SUCCESS;
}
if (pszCommentEncoded)
RTStrFree((char *)(void *)pszCommentEncoded);
}
else
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnSetComment */
{
int rc;
{
goto out;
}
{
goto out;
}
if (pImage)
else
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnGetUuid */
{
int rc;
if (pImage)
{
rc = VINF_SUCCESS;
}
else
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnSetUuid */
{
int rc;
if (pImage)
{
{
{
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing image UUID in descriptor in '%s'"), pImage->pszFilename);
rc = VINF_SUCCESS;
}
else
}
else
}
else
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnGetModificationUuid */
{
int rc;
if (pImage)
{
rc = VINF_SUCCESS;
}
else
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnSetModificationUuid */
{
int rc;
if (pImage)
{
{
{
/* Only touch the modification uuid if it changed. */
{
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing modification UUID in descriptor in '%s'"), pImage->pszFilename);
}
rc = VINF_SUCCESS;
}
else
}
else
}
else
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnGetParentUuid */
{
int rc;
if (pImage)
{
rc = VINF_SUCCESS;
}
else
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnSetParentUuid */
{
int rc;
if (pImage)
{
{
{
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
rc = VINF_SUCCESS;
}
else
}
else
}
else
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnGetParentModificationUuid */
{
int rc;
if (pImage)
{
rc = VINF_SUCCESS;
}
else
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnSetParentModificationUuid */
{
int rc;
if (pImage)
{
{
{
if (RT_FAILURE(rc))
return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error storing parent image UUID in descriptor in '%s'"), pImage->pszFilename);
rc = VINF_SUCCESS;
}
else
}
else
}
else
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnDump */
static void vmdkDump(void *pBackendData)
{
if (pImage)
{
vdIfErrorMessage(pImage->pIfError, "Header: uuidModification={%RTuuid}\n", &pImage->ModificationUuid);
vdIfErrorMessage(pImage->pIfError, "Header: uuidParentModification={%RTuuid}\n", &pImage->ParentModificationUuid);
}
}
/** @copydoc VBOXHDDBACKEND::pfnAsyncRead */
{
LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
int rc;
|| cbRead == 0)
{
goto out;
}
&pExtent, &uSectorExtentRel);
if (RT_FAILURE(rc))
goto out;
/* Check access permissions as defined in the extent descriptor. */
{
goto out;
}
/* Clip read range to remain in this extent. */
cbRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
/* Handle the read according to the current extent type. */
{
case VMDKETYPE_HOSTED_SPARSE:
#ifdef VBOX_WITH_VMDK_ESX
case VMDKETYPE_ESX_SPARSE:
#endif /* VBOX_WITH_VMDK_ESX */
if (RT_FAILURE(rc))
goto out;
/* Clip read range to at most the rest of the grain. */
cbRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
if (uSectorExtentAbs == 0)
else
{
AssertMsg(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED), ("Async I/O is not supported for stream optimized VMDK's\n"));
}
break;
case VMDKETYPE_VMFS:
case VMDKETYPE_FLAT:
break;
case VMDKETYPE_ZERO:
rc = VINF_SUCCESS;
break;
}
if (pcbActuallyRead)
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnAsyncWrite */
{
LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
int rc;
{
goto out;
}
if (cbWrite == 0)
{
goto out;
}
/* No size check here, will do that later when the extent is located.
* There are sparse images out there which according to the spec are
* invalid, because the total size is not a multiple of the grain size.
* Also for sparse images which are stitched together in odd ways (not at
* grain boundaries, and with the nominal size not being a multiple of the
* grain size), this would prevent writing to the last grain. */
&pExtent, &uSectorExtentRel);
if (RT_FAILURE(rc))
goto out;
/* Check access permissions as defined in the extent descriptor. */
{
goto out;
}
/* Handle the write according to the current extent type. */
{
case VMDKETYPE_HOSTED_SPARSE:
#ifdef VBOX_WITH_VMDK_ESX
case VMDKETYPE_ESX_SPARSE:
#endif /* VBOX_WITH_VMDK_ESX */
if (RT_FAILURE(rc))
goto out;
/* Clip write range to at most the rest of the grain. */
cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
{
goto out;
}
if (uSectorExtentAbs == 0)
{
{
/* Full block write to a previously unallocated block.
* Check if the caller wants to avoid the automatic alloc. */
if (!(fWrite & VD_WRITE_NO_ALLOC))
{
/* Allocate GT and find out where to store the grain. */
}
else
*pcbPreRead = 0;
*pcbPostRead = 0;
}
else
{
/* Clip write range to remain in this extent. */
cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
}
}
else
{
}
break;
case VMDKETYPE_VMFS:
case VMDKETYPE_FLAT:
/* Clip write range to remain in this extent. */
cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
break;
case VMDKETYPE_ZERO:
/* Clip write range to remain in this extent. */
cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
break;
}
if (pcbWriteProcess)
out:
return rc;
}
/** @copydoc VBOXHDDBACKEND::pfnAsyncFlush */
{
int rc = VINF_SUCCESS;
/* Update descriptor if changed. */
/** @todo: The descriptor is never updated because
* it remains unchanged during normal operation (only vmdkRename updates it).
* So this part is actually not tested so far and requires testing as soon
* as the descriptor might change during async I/O.
*/
{
if ( RT_FAILURE(rc)
&& rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
goto out;
}
{
{
{
case VMDKETYPE_HOSTED_SPARSE:
#ifdef VBOX_WITH_VMDK_ESX
case VMDKETYPE_ESX_SPARSE:
#endif /* VBOX_WITH_VMDK_ESX */
goto out;
{
if (!uFileOffset)
{
goto out;
}
goto out;
}
break;
case VMDKETYPE_VMFS:
case VMDKETYPE_FLAT:
/* Nothing to do. */
break;
case VMDKETYPE_ZERO:
default:
AssertMsgFailed(("extent with type %d marked as dirty\n",
break;
}
}
{
case VMDKETYPE_HOSTED_SPARSE:
#ifdef VBOX_WITH_VMDK_ESX
case VMDKETYPE_ESX_SPARSE:
#endif /* VBOX_WITH_VMDK_ESX */
case VMDKETYPE_VMFS:
case VMDKETYPE_FLAT:
/*
* Don't ignore block devices like in the sync case
* (they have an absolute path).
* We might have unwritten data in the writeback cache and
* the async I/O manager will handle these requests properly
* even if the block device doesn't support these requests.
*/
break;
case VMDKETYPE_ZERO:
/* No need to do anything for this extent. */
break;
default:
break;
}
}
out:
return rc;
}
{
/* pszBackendName */
"VMDK",
/* cbSize */
sizeof(VBOXHDDBACKEND),
/* uBackendCaps */
| VD_CAP_VFS,
/* paFileExtensions */
/* paConfigInfo */
NULL,
/* hPlugin */
/* pfnCheckIfValid */
/* pfnOpen */
/* pfnCreate */
/* pfnRename */
/* pfnClose */
/* pfnRead */
/* pfnWrite */
/* pfnFlush */
/* pfnGetVersion */
/* pfnGetSize */
/* pfnGetFileSize */
/* pfnGetPCHSGeometry */
/* pfnSetPCHSGeometry */
/* pfnGetLCHSGeometry */
/* pfnSetLCHSGeometry */
/* pfnGetImageFlags */
/* pfnGetOpenFlags */
/* pfnSetOpenFlags */
/* pfnGetComment */
/* pfnSetComment */
/* pfnGetUuid */
/* pfnSetUuid */
/* pfnGetModificationUuid */
/* pfnSetModificationUuid */
/* pfnGetParentUuid */
/* pfnSetParentUuid */
/* pfnGetParentModificationUuid */
/* pfnSetParentModificationUuid */
/* pfnDump */
/* pfnGetTimeStamp */
NULL,
/* pfnGetParentTimeStamp */
NULL,
/* pfnSetParentTimeStamp */
NULL,
/* pfnGetParentFilename */
NULL,
/* pfnSetParentFilename */
NULL,
/* pfnAsyncRead */
/* pfnAsyncWrite */
/* pfnAsyncFlush */
/* pfnComposeLocation */
/* pfnComposeName */
/* pfnCompact */
NULL,
/* pfnResize */
NULL,
/* pfnDiscard */
};