dmu_send.c revision feaa74e41c407fe56e66a47e097c2842d4f65b9f
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
#include <sys/dmu_impl.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/zfs_ioctl.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_znode.h>
#include <zfs_fletcher.h>
#include <sys/zfs_onexit.h>
static char *dmu_recv_tag = "dmu_recv_tag";
/*
* The list of data whose inclusion in a send stream can be pending from
* one call to backup_cb to another. Multiple calls to dump_free() and
* dump_freeobjects() can be aggregated into a single DRR_FREE or
* DRR_FREEOBJECTS replay record.
*/
typedef enum {
} pendop_t;
struct backuparg {
int err;
};
static int
{
}
static int
{
/*
* If there is a pending op, but it's not PENDING_FREE, push it out,
* since free block aggregation can only be done for blocks of the
* same type (i.e., DRR_FREE records can only be aggregated with
* other DRR_FREE records. DRR_FREEOBJECTS records can only be
* aggregated with other DRR_FREEOBJECTS records.
*/
return (EINTR);
}
/*
* There should never be a PENDING_FREE if length is -1
* (because dump_dnode is the only place where this
* function is called with a -1, and only after flushing
* any pending record).
*/
/*
* Check to see whether this free block can be aggregated
* with pending one.
*/
return (0);
} else {
/* not a continuation. Push out pending record */
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
}
}
/* create a FREE record and make it pending */
if (length == -1ULL) {
return (EINTR);
} else {
}
return (0);
}
static int
{
/*
* If there is any kind of pending aggregation (currently either
* a grouping of free objects or free blocks), push it out to
* the stream, since aggregation can't be done across operations
* of different types.
*/
return (EINTR);
}
/* write a DATA record */
return (EINTR);
return (EINTR);
return (0);
}
static int
{
return (EINTR);
}
/* write a SPILL record */
return (EINTR);
return (EINTR);
return (0);
}
static int
{
/*
* If there is a pending op, but it's not PENDING_FREEOBJECTS,
* push it out, since free block aggregation can only be done for
* blocks of the same type (i.e., DRR_FREE records can only be
* aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
* can only be aggregated with other DRR_FREEOBJECTS records.
*/
return (EINTR);
}
/*
* See whether this free object array can be aggregated
* with pending one
*/
return (0);
} else {
/* can't be aggregated. Push out pending record */
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
}
}
/* write a FREEOBJECTS record */
return (0);
}
static int
{
return (EINTR);
}
/* write an OBJECT record */
return (EINTR);
return (EINTR);
/* free anything past the end of the file */
return (EINTR);
return (EINTR);
return (0);
}
/* ARGSUSED */
static int
{
int err = 0;
return (EINTR);
return (0);
return (0);
} else if (type == DMU_OT_DNODE) {
int i;
return (EIO);
for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
(DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
if (err)
break;
}
return (EIO);
} else { /* it's a level-0 block of a regular object */
return (EIO);
}
return (err);
}
int
{
int err;
/* tosnap must be a snapshot */
return (EINVAL);
/* fromsnap must be an earlier snapshot from the same fs as tosnap */
return (EXDEV);
if (fromorigin) {
if (fromsnap)
return (EINVAL);
if (err)
return (err);
} else {
}
}
#ifdef _KERNEL
return (EINVAL);
if (version == ZPL_VERSION_SA) {
}
}
#endif
if (fromorigin)
if (fromds)
if (fromds)
if (fromorigin)
}
if (err) {
return (err);
}
}
return (0);
}
struct recvbeginsyncarg {
const char *tofs;
const char *tosnap;
void *tag;
char clonelastname[MAXNAMELEN];
};
/* ARGSUSED */
static int
{
int err;
/* make sure it's a snap in the same pool */
return (EXDEV);
return (EINVAL);
return (ENODEV);
}
return (0);
}
static void
{
/* Create and open new dataset. */
}
}
/* ARGSUSED */
static int
{
int err;
/* must not have any changes since most recent snapshot */
return (ETXTBSY);
/* new snapshot name must not exist */
if (err == 0)
return (EEXIST);
return (err);
/* if incremental, most recent snapshot must match fromguid */
return (ENODEV);
/*
* most recent snapshot must match fromguid, or there are no
* changes since the fromguid one
*/
while (obj != 0) {
if (err)
return (ENODEV);
return (ENODEV);
}
break; /* it's ok */
}
}
if (obj == 0)
return (ENODEV);
}
} else {
/* if full, most recent snapshot must be $ORIGIN */
return (ENODEV);
}
/* temporary clone name must not exist */
if (err == 0)
return (EEXIST);
return (err);
return (0);
}
/* ARGSUSED */
static void
{
/* create and open the temporary clone */
/*
* If we actually created a non-clone, we need to create the
* objset in our new dataset.
*/
}
}
static boolean_t
{
int featureflags;
/* Verify pool version supports SA if SA_SPILL feature set */
return ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
}
/*
* NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
* succeeds; otherwise we will leak the holds on the datasets.
*/
int
{
int err = 0;
struct recvbeginsyncarg rbsa = { 0 };
int flags;
else
return (EINVAL);
if (byteswap) {
}
return (EINVAL);
if (flags & DRR_FLAG_CI_DATA)
/*
* Process the begin in syncing context.
*/
/* open the dataset we are logically receiving into */
if (err == 0) {
return (ENOTSUP);
}
/* target fs already exists; recv into temp clone */
/* Can't recv a clone into an existing fs */
if (flags & DRR_FLAG_CLONE) {
return (EINVAL);
}
/* must not have an incremental recv already in progress */
return (EBUSY);
}
/* tmp clone name is: tofs/%tosnap" */
"%%%s", tosnap);
if (err) {
return (err);
}
/* target fs does not exist; must be a full backup or clone */
char *cp;
/*
* If it's a non-clone incremental, we are missing the
* target fs, so fail the recv.
*/
return (ENOENT);
/* Open the parent of tofs */
*cp = '\0';
*cp = '/';
if (err)
return (err);
return (ENOTSUP);
}
if (err)
return (err);
}
return (err);
}
struct restorearg {
int err;
int byteswap;
char *buf;
int bufsize; /* amount of memory allocated for buf */
};
typedef struct guid_map_entry {
static int
{
return (-1);
return (1);
return (0);
}
/*
* This function is a callback used by dmu_objset_find() (which
* enumerates the object sets) to build an avl tree that maps guids
* to datasets. The resulting table is used when processing DRR_WRITE_BYREF
* send stream records. These records, which are used in dedup'ed
* streams, do not contain data themselves, but refer to a copy
* of the data block that has already been written because it was
* earlier in the stream. That previous copy is identified by the
* guid of the dataset with the referenced data.
*/
int
{
dsl_pool_t *dp;
int err;
return (0);
if (err) {
/*
* Skip this snapshot and move on. It's not
* clear why this would ever happen, but the
* remainder of the snapshot streadm can be
* processed.
*/
return (0);
}
}
return (0);
}
static void
free_guid_map_onexit(void *arg)
{
}
}
static void *
{
void *rv;
int done = 0;
/* some things will require 8-byte alignment, so everything must */
return (NULL);
}
else
return (rv);
}
static void
{
case DRR_BEGIN:
break;
case DRR_OBJECT:
/* DO64(drr_object.drr_allocation_txg); */
break;
case DRR_FREEOBJECTS:
break;
case DRR_WRITE:
break;
case DRR_WRITE_BYREF:
break;
case DRR_FREE:
break;
case DRR_SPILL:
break;
case DRR_END:
break;
}
}
static int
{
int err;
return (EINVAL);
}
return (EINVAL);
if (drro->drr_bonuslen) {
}
/* currently free, want to be allocated */
if (err) {
return (err);
}
} else {
/* currently allocated, want to be allocated */
}
if (err) {
return (EINVAL);
}
if (err) {
return (err);
}
tx);
drro->drr_bonuslen);
}
}
return (0);
}
/* ARGSUSED */
static int
struct drr_freeobjects *drrfo)
{
return (EINVAL);
int err;
continue;
if (err)
return (err);
}
return (0);
}
static int
{
void *data;
int err;
return (EINVAL);
return (EINVAL);
if (err) {
return (err);
}
return (0);
}
/*
* Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
* streams to refer to a copy of the data that is already on the
* system because it came in earlier in the stream. This function
* finds the earlier copy of the data, and uses that copy instead of
* data from the stream to fulfill this write.
*/
static int
struct drr_write_byref *drrwbr)
{
int err;
return (EINVAL);
/*
* If the GUID of the referenced dataset is different from the
* GUID of the target dataset, find the referenced dataset.
*/
return (EINVAL);
}
return (EINVAL);
} else {
}
return (err);
if (err) {
return (err);
}
return (0);
}
static int
{
void *data;
int err;
return (EINVAL);
return (EINVAL);
return (err);
}
if (err) {
return (err);
}
return (0);
}
/* ARGSUSED */
static int
{
int err;
return (EINVAL);
return (EINVAL);
return (err);
}
/*
* NB: callers *must* call dmu_recv_end() if this succeeds.
*/
int
{
struct restorearg ra = { 0 };
int featureflags;
{
/* compute checksum of drr_begin record */
} else {
}
}
}
/* these were verified in dmu_recv_begin */
/*
* Open the objset we are modifying.
*/
/* if this stream is dedup'ed, set up the avl tree for guid mapping */
if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
if (cleanup_fd == -1) {
goto out;
}
cleanup_fd = -1;
goto out;
}
if (*action_handlep == 0) {
sizeof (guid_map_entry_t),
(void *)ra.guid_to_ds_map,
goto out;
} else {
(void **)&ra.guid_to_ds_map);
goto out;
}
}
/*
* Read records and process them.
*/
goto out;
}
case DRR_OBJECT:
{
/*
* We need to make a copy of the record header,
* because restore_{object,write} may need to
* restore_read(), which will invalidate drr.
*/
break;
}
case DRR_FREEOBJECTS:
{
struct drr_freeobjects drrfo =
break;
}
case DRR_WRITE:
{
break;
}
case DRR_WRITE_BYREF:
{
struct drr_write_byref drrwbr =
break;
}
case DRR_FREE:
{
break;
}
case DRR_END:
{
/*
* We compare against the *previous* checksum
* value, because the stored checksum is of
* everything before the DRR_END record.
*/
goto out;
}
case DRR_SPILL:
{
break;
}
default:
goto out;
}
}
out:
/*
* destroy what we created, so we don't leave it in the
* inconsistent restoring state.
*/
B_FALSE);
}
}
}
struct recvendsyncarg {
char *tosnap;
};
static int
{
}
static void
{
/* set snapshot's creation time and guid */
}
static int
{
struct recvendsyncarg resa;
int err;
/*
* XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
* expects it to have a ds_user_ptr (and zil), but clone_swap()
* can close it.
*/
if (err)
goto out;
} else {
B_FALSE);
return (EBUSY);
}
if (err) {
/* swap back */
}
out:
return (err);
}
static int
{
struct recvendsyncarg resa;
int err;
/*
* XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
* expects it to have a ds_user_ptr (and zil), but clone_swap()
* can close it.
*/
if (err) {
/* clean up the fs we just recv'd into */
} else {
/* release the hold from dmu_recv_begin */
}
return (err);
}
int
{
return (dmu_recv_existing_end(drc));
else
return (dmu_recv_new_end(drc));
}