dmu_tx.c revision 4a7f2a75ca428b8c3910f76ab6cea0dfadba8914
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/dmu_impl.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_pool.h>
#include <sys/zfs_context.h>
dmu_tx_t *
{
if (dd)
#ifdef ZFS_DEBUG
#endif
return (tx);
}
dmu_tx_t *
{
return (tx);
}
dmu_tx_t *
{
return (tx);
}
int
{
}
int
{
}
static dmu_tx_hold_t *
{
int err;
if (object != DMU_NEW_OBJECT) {
if (err) {
return (NULL);
}
/*
* dn->dn_assigned_txg == tx->tx_txg doesn't pose a
* problem, but there's no way for it to happen (for
* now, at least).
*/
}
}
#ifdef ZFS_DEBUG
#endif
return (txh);
}
void
{
/*
* If we're syncing, they can manipulate any object anyhow, and
* the hold on the dnode_t can cause problems.
*/
if (!dmu_tx_is_syncing(tx)) {
object, THT_NEWOBJECT, 0, 0);
}
}
static int
{
int err;
return (EIO);
return (err);
}
static void
{
if (i >= dn->dn_nlevels)
return;
return;
}
if (freeable)
else
}
}
/* ARGSUSED */
static void
{
int err = 0;
if (len == 0)
return;
if (dn) {
int delta;
/*
* For i/o error checking, read the first and last level-0
* blocks (if they are not aligned), and all the level-1 blocks.
*/
if (dn->dn_maxblkid == 0) {
if (err)
goto out;
}
} else {
/* first level-0 block */
if (err)
goto out;
}
/* last level-0 block */
if (err)
goto out;
}
/* level-1 blocks */
if (nlvls > 1) {
if (err)
goto out;
}
}
if (err)
goto out;
}
if (dn->dn_maxblkid > 0) {
/*
* The blocksize can't change,
* so we can make a more precise estimate.
*/
/*
* This ensures that if we reduce DN_MAX_INDBLKSHIFT,
* the code will still work correctly on older pools.
*/
}
/*
* If this write is not off the end of the file
* we need to account for overwrites/unref.
*/
} else {
txh->txh_space_tounref +=
}
/*
* Account for new indirects appearing
* before this IO gets assigned into a txg.
*/
goto out;
}
}
}
/*
* 'end' is the last thing we will access, not one past.
* This way we won't overflow when accessing the last byte.
*/
/*
* The object contains at most 2^(64 - min_bs) blocks,
* and each indirect level maps 2^epbs.
*/
if (start != 0) {
/*
* We also need a new blkid=0 indirect block
* to reference any existing file data.
*/
}
}
out:
2 * DMU_MAX_ACCESS)
if (err)
}
static void
{
} else {
}
}
void
{
return;
}
static void
{
int epbs;
if (dn->dn_nlevels == 0)
return;
/*
* The struct_rwlock protects us against dn_nlevels
* changing, in case (against all odds) we manage to dirty &
* sync out the changes after we check for being dirty.
* Also, dbuf_hold_level() wants us to have the struct_rwlock.
*/
if (dn->dn_maxblkid == 0) {
blkid = 0;
nblks = 1;
} else {
return;
}
} else {
return;
}
}
int i;
for (i = 0; i < nblks; i++) {
}
}
nblks = 0;
}
/*
* Add in memory requirements of higher-level indirects.
* This assumes a worst-possible scenario for dn_nlevels.
*/
{
while (level++ < DN_MAX_LEVELS) {
}
}
while (nblks) {
break;
}
if (err) {
break;
}
break;
}
}
break;
}
if (err != 0) {
break;
}
for (i = 0; i < tochk; i++) {
}
}
}
/* account for new level 1 indirect blocks that might show up */
if (skipped > 0) {
}
}
void
{
return;
/* first block */
if (off != 0)
/* last block */
if (len != DMU_OBJECT_END)
return;
if (len == DMU_OBJECT_END)
/*
* For i/o error checking, read the first and last level-0
* blocks, and all the level-1 blocks. The above count_write's
* have already taken care of the level-0 blocks.
*/
break;
if (err) {
return;
}
if (err) {
return;
}
}
if (err) {
return;
}
}
}
void
{
return;
/*
* We will be able to fit a new object's entries into one leaf
* block. So there will be at most 2 blocks total,
* including the header block.
*/
return;
}
/*
* If there is only one block (i.e. this is a micro-zap)
* and we are not adding anything, the accounting is simple.
*/
if (err) {
return;
}
/*
* Use max block size here, since we don't know how much
* the size will change between now and the dbuf dirty call.
*/
} else {
txh->txh_space_tounref +=
}
return;
}
/*
* access the name in this fat-zap so that we'll check
* for i/o errors to the leaf blocks, etc.
*/
8, 0, NULL);
return;
}
}
/*
* 3 blocks overwritten: target leaf, ptrtbl block, header block
* 3 new blocks written if adding: new split leaf, 2 grown ptrtbl blocks
*/
/*
* If the modified blocks are scattered to the four winds,
* we'll have to modify an indirect twig for each.
*/
}
void
{
if (txh)
}
void
{
}
int
{
int holds = 0;
/*
* By asserting that the tx is assigned, we're counting the
* number of dn_tx_holds, which is the same as the number of
* dn_holds. Otherwise, we'd be counting dn_holds, but
* dn_tx_holds could be 0.
*/
/* if (tx->tx_anyobj == TRUE) */
/* return (0); */
holds++;
}
return (holds);
}
#ifdef ZFS_DEBUG
void
{
return;
/* XXX No checking on the meta dnode for now */
return;
match_object = TRUE;
/* XXX txh_arg2 better not be zero... */
dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
case THT_WRITE:
match_offset = TRUE;
/*
* We will let this hold work for the bonus
* buffer so that we don't need to hold it
* when creating a new object.
*/
if (blkid == DB_BONUS_BLKID)
match_offset = TRUE;
/*
* They might have to increase nlevels,
* thus dirtying the new TLIBs. Or the
* might have to change the block size,
* thus dirying the new lvl=0 blk=0.
*/
if (blkid == 0)
match_offset = TRUE;
break;
case THT_FREE:
/*
* We will dirty all the level 1 blocks in
* the free range and perhaps the first and
* last level 0 block.
*/
match_offset = TRUE;
break;
case THT_BONUS:
if (blkid == DB_BONUS_BLKID)
match_offset = TRUE;
break;
case THT_ZAP:
match_offset = TRUE;
break;
case THT_NEWOBJECT:
match_object = TRUE;
break;
default:
ASSERT(!"bad txh_type");
}
}
if (match_object && match_offset)
return;
}
panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
}
#endif
static int
{
if (spa_suspended(spa)) {
/*
* If the user has indicated a blocking failure mode
* then return ERESTART which will block in dmu_tx_wait().
* Otherwise, return EIO so that an error can get
* propagated back to the VOP calls.
*
* Note that we always honor the txg_how flag regardless
* of the failuremode setting.
*/
return (EIO);
return (ERESTART);
}
/*
* NB: No error returns are allowed after txg_hold_open, but
* before processing the dnode holds, due to the
* dmu_tx_unassign() logic.
*/
return (ERESTART);
}
if (dn->dn_assigned_txg == 0)
}
}
/*
* NB: This check must be after we've held the dnodes, so that
* the dmu_tx_unassign() logic will work properly
*/
return (ERESTART);
/*
* If a snapshot has been taken since we made our estimates,
* assume that we won't be able to free or overwrite anything.
*/
tx->tx_lastsnap_txg) {
towrite += tooverwrite;
tooverwrite = tofree = 0;
}
/* needed allocation: worst-case estimate of write space */
/* freed space estimate: worst-case overwrite + free estimate */
/* convert unrefd space to worst-case estimate */
/* calculate memory footprint estimate */
#ifdef ZFS_DEBUG
/*
* Add in 'tohold' to account for our dirty holds on this memory
* XXX - the "fudge" factor is to account for skipped blocks that
* we missed because dnode_next_offset() misses in-core-only blocks.
*/
#endif
if (err)
return (err);
}
return (0);
}
static void
{
return;
continue;
dn->dn_assigned_txg = 0;
}
}
}
/*
* Assign tx to a transaction group. txg_how can be one of:
*
* (1) TXG_WAIT. If the current open txg is full, waits until there's
* a new one. This should be used when you're not holding locks.
* If will only fail if we're truly out of space (or over quota).
*
* (2) TXG_NOWAIT. If we can't assign into the current open txg without
* blocking, returns immediately with ERESTART. This should be used
* whenever you're holding locks. On an ERESTART error, the caller
* should drop locks, do a dmu_tx_wait(tx), and try again.
*
* (3) A specific txg. Use this if you need to ensure that multiple
* transactions all sync in the same txg. Like TXG_NOWAIT, it
* returns ERESTART if it can't assign you into the requested txg.
*/
int
{
int err;
return (err);
}
return (0);
}
void
{
/*
* It's possible that the pool has become active after this thread
* has tried to obtain a tx. If that's the case then his
* tx_lasttried_txg would not have been assigned.
*/
} else if (tx->tx_needassign_txh) {
} else {
}
}
void
{
#ifdef ZFS_DEBUG
return;
if (delta > 0) {
} else {
}
#endif
}
void
{
continue;
dn->dn_assigned_txg = 0;
}
}
if (tx->tx_tempreserve_cookie)
#ifdef ZFS_DEBUG
dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
#endif
}
void
{
}
#ifdef ZFS_DEBUG
#endif
}
{
}