dnode_sync.c revision d2b3cbbd7f3a37bc7c01b526d3eb312acd070423
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2012, 2014 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/range_tree.h>
#include <sys/zfeature.h>
static void
{
int i;
/* this dnode can't be paged out because it's dirty */
/* check for existing blkptrs in the dnode */
for (i = 0; i < nblkptr; i++)
break;
if (i != nblkptr) {
/* transfer dnode's block pointers to new indirect block */
}
/* set dbuf's parent pointers to new indirect buf */
for (i = 0; i < nblkptr; i++) {
continue;
#ifdef DEBUG
#endif /* DEBUG */
continue;
}
else
"changed db_blkptr to new indirect %s", "");
}
}
static void
{
uint64_t bytesfreed = 0;
if (BP_IS_HOLE(bp))
continue;
/*
* Save some useful information on the holes being
* punched, including logical size, type, and indirection
* level. Retaining birth time enables detection of when
* holes are punched for reducing the number of free
* records transmitted during a zfs send.
*/
}
}
}
#ifdef ZFS_DEBUG
static void
{
int j;
continue;
/* data_old better be zeroed */
if (dr) {
if (buf[j] != 0) {
panic("freed data not zero: "
"child=%p i=%d off=%d num=%d\n",
}
}
}
/*
* db_data better be zeroed unless it's dirty in a
* future txg.
*/
if (buf[j] != 0) {
panic("freed data not zero: "
"child=%p i=%d off=%d num=%d\n",
}
}
}
}
}
#endif
static void
{
/*
* There is a small possibility that this block will not be cached:
* 1 - if level > 1 and there are no children with level <= 1
* 2 - if this block was evicted since we read it from
* dmu_tx_hold_free().
*/
} else {
}
} else {
if (BP_IS_HOLE(bp))
continue;
}
}
/* If this whole block is free, free ourself too. */
if (!BP_IS_HOLE(bp))
break;
}
if (i == 1 << epbs) {
/* didn't find any non-holes */
} else {
/*
* Partial block free; must be marked dirty so that it
* will be written out.
*/
}
}
/*
* Traverse the indicated range of the provided file
* and "free" all the blocks contained there.
*/
static void
{
return;
}
/* There are no indirect blocks in the object */
if (dnlevel == 1) {
/* this range was never made persistent */
return;
}
} else {
if (BP_IS_HOLE(bp))
continue;
}
}
if (trunc) {
}
}
typedef struct dnode_sync_free_range_arg {
static void
{
}
/*
* Try to kick all the dnode's dbufs out of the cache...
*/
void
{
int progress;
int pass = 0;
do {
#ifdef DEBUG
#endif /* DEBUG */
} else {
}
}
/*
* NB: we need to drop dn_dbufs_mtx between passes so
* that any DB_EVICTING dbufs can make progress.
* Ideally, we would have some cv we could wait on, but
* since we don't, just wait a bit to give the other
* thread a chance to run.
*/
if (evicting)
delay(1);
pass++;
} while (progress);
}
}
static void
{
/* XXX - use dbuf_undirty()? */
} else {
}
}
}
static void
{
/*
* Our contents should have been freed in dnode_sync() by the
* free range record inserted by the caller of dnode_free().
*/
/*
* XXX - It would be nice to assert this, but we may still
* have residual holds from async evictions from the arc...
*
* zfs_obj_to_path() also depends on this being
* commented out.
*
* ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
*/
/* Undirty next bits */
/* ASSERT(blkptrs are zero); */
dn->dn_maxblkid = 0;
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
/*
* Now that we've released our hold, the dnode may
* be evicted, so we musn't access it.
*/
}
/*
* Write out the dnode's dirty buffers.
*/
void
{
static const dnode_phys_t zerodn = { 0 };
} else {
/* Once we account for it, we should always account for it. */
}
/* The dnode is newly allocated or reallocated */
/* this is a first alloc, not a realloc */
}
}
}
SPA_MINBLOCKSIZE) == 0);
dnp->dn_datablkszsec ||
}
dnp->dn_bonuslen = 0;
else
}
}
/*
* We will either remove a spill block when a file is being removed
* or we have been asked to remove it.
*/
kill_spill = B_TRUE;
}
}
/*
* Just take the live (open-context) values for checksum and compress.
* Strictly speaking it's a future leak, but nothing bad happens if we
* start using the new checksum or compress algorithm a little early.
*/
if (kill_spill) {
}
/* process all the "freed" ranges in the file */
}
if (freeing_dnode) {
return;
}
/* this should only happen on a realloc */
/* zero the new blkptrs we are gaining */
sizeof (blkptr_t) *
#ifdef ZFS_DEBUG
} else {
int i;
/* the blkptrs we are losing better be unallocated */
i < dnp->dn_nblkptr; i++)
#endif
}
}
}
}
/*
* Although we have dropped our reference to the dnode, it
* can't be evicted until its written, and we haven't yet
* initiated the IO for the dnode's dbuf.
*/
}