/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2016 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
*/
#include <sys/dmu_impl.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_pool.h>
#include <sys/zfs_context.h>
dmu_tx_t *
{
#ifdef ZFS_DEBUG
#endif
return (tx);
}
dmu_tx_t *
{
return (tx);
}
dmu_tx_t *
{
return (tx);
}
int
{
}
int
{
}
static dmu_tx_hold_t *
{
int err;
if (object != DMU_NEW_OBJECT) {
if (err) {
return (NULL);
}
/*
* dn->dn_assigned_txg == tx->tx_txg doesn't pose a
* problem, but there's no way for it to happen (for
* now, at least).
*/
}
}
#ifdef ZFS_DEBUG
#endif
return (txh);
}
void
{
/*
* If we're syncing, they can manipulate any object anyhow, and
* the hold on the dnode_t can cause problems.
*/
if (!dmu_tx_is_syncing(tx)) {
object, THT_NEWOBJECT, 0, 0);
}
}
static int
{
int err;
return (err);
}
static void
{
return;
} else {
}
if (freeable) {
} else {
}
if (bp) {
}
}
/* ARGSUSED */
static void
{
int err = 0;
if (len == 0)
return;
if (dn) {
int delta;
/*
* For i/o error checking, read the first and last level-0
* blocks (if they are not aligned), and all the level-1 blocks.
*/
if (dn->dn_maxblkid == 0) {
if (err)
goto out;
}
} else {
/* first level-0 block */
if (err)
goto out;
}
/* last level-0 block */
if (err)
goto out;
}
/* level-1 blocks */
if (nlvls > 1) {
if (err)
goto out;
}
}
if (err)
goto out;
}
if (dn->dn_maxblkid > 0) {
/*
* The blocksize can't change,
* so we can make a more precise estimate.
*/
} else {
/*
* The blocksize can increase up to the recordsize,
* or if it is already more than the recordsize,
* up to the next power of 2.
*/
}
/*
* If this write is not off the end of the file
* we need to account for overwrites/unref.
*/
for (int l = 0; l < DN_MAX_LEVELS; l++)
history[l] = -1ULL;
}
if (err) {
return;
}
history);
/*
* Account for new indirects appearing
* before this IO gets assigned into a txg.
*/
(void) refcount_add_many(
}
goto out;
}
}
}
/*
* 'end' is the last thing we will access, not one past.
* This way we won't overflow when accessing the last byte.
*/
/*
* The object contains at most 2^(64 - min_bs) blocks,
* and each indirect level maps 2^epbs.
*/
if (start != 0) {
/*
* We also need a new blkid=0 indirect block
* to reference any existing file data.
*/
}
}
out:
2 * DMU_MAX_ACCESS)
if (err)
}
static void
{
} else {
}
}
}
void
{
return;
}
static void
{
int epbs;
if (dn->dn_nlevels == 0)
return;
/*
* The struct_rwlock protects us against dn_nlevels
* changing, in case (against all odds) we manage to dirty &
* sync out the changes after we check for being dirty.
* Also, dbuf_hold_impl() wants us to have the struct_rwlock.
*/
if (dn->dn_maxblkid == 0) {
blkid = 0;
nblks = 1;
} else {
return;
}
} else {
return;
}
}
int i;
for (i = 0; i < nblks; i++) {
}
}
nl1blks = 1;
nblks = 0;
}
while (nblks) {
break;
}
if (err) {
break;
}
break;
}
}
if (err) {
break;
}
/*
* We don't check memory_tohold against DMU_MAX_ACCESS because
* memory_tohold is an over-estimation (especially the >L1
* indirect blocks), so it could fail. Callers should have
* already verified that they will not be holding too much
* memory.
*/
if (err != 0) {
break;
}
for (i = 0; i < tochk; i++) {
}
}
++nl1blks;
}
/*
* Add in memory requirements of higher-level indirects.
* This assumes a worst-possible scenario for dn_nlevels and a
* worst-possible distribution of l1-blocks over the region to free.
*/
{
/*
* Here we don't use DN_MAX_LEVEL, but calculate it with the
* given datablkshift and indblkshift. This makes the
* difference between 19 and 8 on large files.
*/
FTAG);
}
}
/* account for new level 1 indirect blocks that might show up */
if (skipped > 0) {
}
}
/*
* This function marks the transaction as being a "net free". The end
* result is that refquotas will be disabled for this transaction, and
* this transaction will be able to use half of the pool space overhead
* (see dsl_pool_adjustedsize()). Therefore this function should only
* be called for transactions that we expect will not cause a net increase
* in the amount of space used (but it's OK if that is occasionally not true).
*/
void
{
DMU_NEW_OBJECT, THT_FREE, 0, 0);
/*
* Pretend that this operation will free 1GB of space. This
* should be large enough to cancel out the largest write.
* We don't want to use something like UINT64_MAX, because that would
* cause overflows when doing math with these values (e.g. in
* dmu_tx_try_assign()).
*/
}
void
{
int err;
return;
return;
if (len == DMU_OBJECT_END)
/*
* For i/o error checking, we read the first and last level-0
* blocks if they are not aligned, and all the level-1 blocks.
*
* Note: dbuf_free_range() assumes that we have not instantiated
* any level-0 dbufs that will be completely freed. Therefore we must
* exercise care to not read or count the first and last blocks
* if they are blocksize-aligned.
*/
if (dn->dn_datablkshift == 0) {
} else {
/* first block will be modified if it is not aligned */
/* last block will be modified if it is not aligned */
}
/*
* Check level-1 blocks.
*/
/*
* dnode_reallocate() can result in an object with indirect
* blocks having an odd data block size. In this case,
* just check the single block.
*/
if (dn->dn_datablkshift == 0)
break;
if (err) {
return;
}
if (err) {
return;
}
}
if (err) {
return;
}
}
}
void
{
int err;
return;
/*
* We will be able to fit a new object's entries into one leaf
* block. So there will be at most 2 blocks total,
* including the header block.
*/
return;
}
/*
* If there is only one block (i.e. this is a micro-zap)
* and we are not adding anything, the accounting is simple.
*/
if (err) {
return;
}
/*
* Use max block size here, since we don't know how much
* the size will change between now and the dbuf dirty call.
*/
} else {
}
if (!BP_IS_HOLE(bp)) {
}
return;
}
/*
* access the name in this fat-zap so that we'll check
* for i/o errors to the leaf blocks, etc.
*/
return;
}
}
/*
* If the modified blocks are scattered to the four winds,
* we'll have to modify an indirect twig for each. We can make
* modifications at up to 3 locations:
* - header block at the beginning of the object
* - target leaf block
* - end of the object, where we might need to write:
* - a new leaf block if the target block needs to be split
* - the new pointer table, if it is growing
* - the new cookie table, if it is growing
*/
if (ds_phys->ds_prev_snap_obj != 0) {
} else {
}
}
}
void
{
if (txh)
}
void
{
}
int
{
int holds = 0;
/*
* By asserting that the tx is assigned, we're counting the
* number of dn_tx_holds, which is the same as the number of
* dn_holds. Otherwise, we'd be counting dn_holds, but
* dn_tx_holds could be 0.
*/
/* if (tx->tx_anyobj == TRUE) */
/* return (0); */
holds++;
}
return (holds);
}
#ifdef ZFS_DEBUG
void
{
return;
}
/* XXX No checking on the meta dnode for now */
return;
}
match_object = TRUE;
/* XXX txh_arg2 better not be zero... */
dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
case THT_WRITE:
match_offset = TRUE;
/*
* We will let this hold work for the bonus
* or spill buffer so that we don't need to
* hold it when creating a new object.
*/
if (blkid == DMU_BONUS_BLKID ||
blkid == DMU_SPILL_BLKID)
match_offset = TRUE;
/*
* They might have to increase nlevels,
* thus dirtying the new TLIBs. Or the
* might have to change the block size,
* thus dirying the new lvl=0 blk=0.
*/
if (blkid == 0)
match_offset = TRUE;
break;
case THT_FREE:
/*
* We will dirty all the level 1 blocks in
* the free range and perhaps the first and
* last level 0 block.
*/
match_offset = TRUE;
break;
case THT_SPILL:
if (blkid == DMU_SPILL_BLKID)
match_offset = TRUE;
break;
case THT_BONUS:
if (blkid == DMU_BONUS_BLKID)
match_offset = TRUE;
break;
case THT_ZAP:
match_offset = TRUE;
break;
case THT_NEWOBJECT:
match_object = TRUE;
break;
default:
ASSERT(!"bad txh_type");
}
}
if (match_object && match_offset) {
return;
}
}
panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
}
#endif
/*
* If we can't do 10 iops, something is wrong. Let us go ahead
* and hit zfs_dirty_data_max.
*/
/*
* We delay transactions when we've determined that the backend storage
* isn't able to accommodate the rate of incoming writes.
*
* If there is already a transaction waiting, we delay relative to when
* that transaction finishes waiting. This way the calculated min_time
* is independent of the number of threads concurrently executing
* transactions.
*
* If we are the only waiter, wait relative to when the transaction
* started, rather than the current time. This credits the transaction for
* "time already served", e.g. reading indirect blocks.
*
* The minimum time for a transaction to take is calculated as:
* min_time = scale * (dirty - min) / (max - dirty)
* min_time is then capped at zfs_delay_max_ns.
*
* The delay has two degrees of freedom that can be adjusted via tunables.
* The percentage of dirty data at which we start to delay is defined by
* zfs_delay_min_dirty_percent. This should typically be at or above
* zfs_vdev_async_write_active_max_dirty_percent so that we only start to
* delay after writing at full speed has failed to keep up with the incoming
* write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
* speaking, this variable determines the amount of delay at the midpoint of
* the curve.
*
* delay
* 10ms +-------------------------------------------------------------*+
* | *|
* 9ms + *+
* | *|
* 8ms + *+
* | * |
* 7ms + * +
* | * |
* 6ms + * +
* | * |
* 5ms + * +
* | * |
* 4ms + * +
* | * |
* 3ms + * +
* | * |
* 2ms + (midpoint) * +
* | | ** |
* 1ms + v *** +
* | zfs_delay_scale ----------> ******** |
* 0 +-------------------------------------*********----------------+
* 0% <- zfs_dirty_data_max -> 100%
*
* Note that since the delay is added to the outstanding time remaining on the
* most recent transaction, the delay is effectively the inverse of IOPS.
* Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
* was chosen such that small changes in the amount of accumulated dirty data
* in the first 3/4 of the curve yield relatively small differences in the
* amount of delay.
*
* The effects can be easier to understand when the amount of delay is
* represented on a log scale:
*
* delay
* 100ms +-------------------------------------------------------------++
* + +
* | |
* + *+
* 10ms + *+
* + ** +
* | (midpoint) ** |
* + | ** +
* 1ms + v **** +
* + zfs_delay_scale ----------> ***** +
* | **** |
* + **** +
* 100us + ** +
* + * +
* | * |
* + * +
* 10us + * +
* + +
* | |
* + +
* +--------------------------------------------------------------+
* 0% <- zfs_dirty_data_max -> 100%
*
* Note here that only as the amount of dirty data approaches its limit does
* the delay start to increase rapidly. The goal of a properly tuned system
* should be to keep the amount of dirty data out of that range by first
* ensuring that the appropriate limits are set for the I/O scheduler to reach
* optimal throughput on the backend storage, and then by changing the value
* of zfs_delay_scale to increase the steepness of the curve.
*/
static void
{
if (dirty <= delay_min_bytes)
return;
/*
* The caller has already waited until we are under the max.
* We make them pass us the amount of dirty data so we don't
* have to handle the case of it being >= the max, which could
* cause a divide-by-zero if it's == the max.
*/
return;
#ifdef _KERNEL
continue;
#else
#endif
}
static int
{
if (spa_suspended(spa)) {
/*
* If the user has indicated a blocking failure mode
* then return ERESTART which will block in dmu_tx_wait().
* Otherwise, return EIO so that an error can get
* propagated back to the VOP calls.
*
* Note that we always honor the txg_how flag regardless
* of the failuremode setting.
*/
}
}
/*
* NB: No error returns are allowed after txg_hold_open, but
* before processing the dnode holds, due to the
* dmu_tx_unassign() logic.
*/
}
if (dn->dn_assigned_txg == 0)
}
}
/*
* If a snapshot has been taken since we made our estimates,
* assume that we won't be able to free or overwrite anything.
*/
tx->tx_lastsnap_txg) {
towrite += tooverwrite;
tooverwrite = tofree = 0;
}
/* needed allocation: worst-case estimate of write space */
/* freed space estimate: worst-case overwrite + free estimate */
/* convert unrefd space to worst-case estimate */
/* calculate memory footprint estimate */
#ifdef ZFS_DEBUG
/*
* Add in 'tohold' to account for our dirty holds on this memory
* XXX - the "fudge" factor is to account for skipped blocks that
* we missed because dnode_next_offset() misses in-core-only blocks.
*/
#endif
if (err)
return (err);
}
return (0);
}
static void
{
return;
/*
* Walk the transaction's hold list, removing the hold on the
* associated dnode, and notifying waiters if the refcount drops to 0.
*/
continue;
dn->dn_assigned_txg = 0;
}
}
}
/*
* Assign tx to a transaction group. txg_how can be one of:
*
* (1) TXG_WAIT. If the current open txg is full, waits until there's
* a new one. This should be used when you're not holding locks.
* It will only fail if we're truly out of space (or over quota).
*
* (2) TXG_NOWAIT. If we can't assign into the current open txg without
* blocking, returns immediately with ERESTART. This should be used
* whenever you're holding locks. On an ERESTART error, the caller
* should drop locks, do a dmu_tx_wait(tx), and try again.
*
* (3) TXG_WAITED. Like TXG_NOWAIT, but indicates that dmu_tx_wait()
* has already been called on behalf of this operation (though
* most likely on a different tx).
*/
int
{
int err;
txg_how == TXG_WAITED);
/* If we might wait, we must not hold the config lock. */
if (txg_how == TXG_WAITED)
return (err);
}
return (0);
}
void
{
if (tx->tx_wait_dirty) {
/*
* dmu_tx_try_assign() has determined that we need to wait
* because we've consumed much or all of the dirty buffer
* space.
*/
/*
* Note: setting tx_waited only has effect if the caller
* used TX_WAIT. Otherwise they are going to destroy
* this tx and try again. The common case, zfs_write(),
* uses TX_WAIT.
*/
/*
* If the pool is suspended we need to wait until it
* is resumed. Note that it's possible that the pool
* has become active after this thread has tried to
* obtain a tx. If that's the case then tx_lasttried_txg
* would not have been set.
*/
} else if (tx->tx_needassign_txh) {
/*
* A dnode is assigned to the quiescing txg. Wait for its
* transaction to complete.
*/
} else {
}
}
void
{
#ifdef ZFS_DEBUG
return;
if (delta > 0) {
} else {
}
#endif
}
static void
{
}
#ifdef ZFS_DEBUG
#endif
}
void
{
/*
* Go through the transaction's hold list and remove holds on
* associated dnodes, notifying waiters if no holds remain.
*/
continue;
dn->dn_assigned_txg = 0;
}
}
if (tx->tx_tempreserve_cookie)
#ifdef ZFS_DEBUG
dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
#endif
}
void
{
/*
* Call any registered callbacks with an error code.
*/
}
{
}
{
}
void
{
}
/*
* Call all the commit callbacks on a list, with a given error code.
*/
void
{
}
}
/*
* Interface to hold a bunch of attributes.
* used for creating new files.
* attrsize is the total size of all attributes
* to be added during object creation
*
*/
/*
* hold necessary attribute name for attribute registration.
* should be a very rare case where this is needed. If it does
* happen it would only happen on the first write to the file system.
*/
static void
{
int i;
if (!sa->sa_need_attr_registration)
return;
for (i = 0; i != sa->sa_num_attrs; i++) {
if (sa->sa_reg_attr_obj)
else
}
}
}
void
{
THT_SPILL, 0, 0);
return;
/* If blkptr doesn't exist then add space to towrite */
} else {
} else {
}
if (!BP_IS_HOLE(bp)) {
}
}
}
void
{
return;
else {
}
return;
THT_SPILL, 0, 0);
}
/*
* Hold SA attribute
*
* dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
*
* variable_size is the total size of all variable sized attributes
* passed to this function. It is not the total size of all
* variable size attributes that *may* exist on this object.
*/
void
{
return;
}
} else {
if (dn->dn_have_spill) {
}
}
}