lufs_top.c revision 31d4cf520a749c6f68cc90540de415399538680b
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2015 by Delphix. All rights reserved.
*/
#include <sys/sysmacros.h>
/*
* FILE SYSTEM INTERFACE TO TRANSACTION OPERATIONS (TOP; like VOP)
*/
/*
* declare a delta
*/
void
int (*func)(),
{
if (tp) {
}
}
/*
* cancel a delta
*/
void
{
if (metadata)
/*
* needed for the roll thread's heuristic
*/
}
/*
* check if this delta has been canceled (metadata -> userdata)
*/
int
{
return (1);
return (1);
return (0);
}
/*
* put device into error state
*/
void
{
}
/*
*/
static void
{
int error = 0;
if (!error) {
}
}
static void
top_issue_from_taskq(void *arg)
{
/*
* We were called from the taskq_dispatch() in top_begin_async(), so
* decrement mtm_taskq_sync_count and wake up the thread waiting
* on the mtm_cv if the mtm_taskq_sync_count hits zero.
*/
if (mtm->mtm_taskq_sync_count == 0) {
}
}
/*
* MOBY TRANSACTION ROUTINES
* begin a moby transaction
* sync ops enter until first sync op finishes
* async ops enter until last sync op finishes
* end a moby transaction
* outstanding deltas are pushed thru log
* log buffer is committed (incore only)
* next trans is open to async ops
* log buffer is committed on the log
* next trans is open to sync ops
*/
/*ARGSUSED*/
void
{
/*
* Error the fsync immediately if this is an nfs thread
* and its last transaction has already been committed.
* The only transactions outstanding are those
* where no commit has even started
* (last_async_tid == mtm->mtm_tid)
* or those where a commit is in progress
* (last_async_tid == mtm->mtm_committid)
*/
*error = 1;
return;
}
}
/*
* If there's already other synchronous transactions
* and we haven't allowed async ones to start yet
* then just wait for the commit to complete.
*/
do {
*error = 1;
return;
}
/*
* We know we're in the window where a thread is
* committing a transaction in top_end_sync() and
* has allowed async threads to start but hasn't
* got the completion on the commit write to
* allow sync threads to start.
* So wait for that commit completion then retest
* for the quick nfs check and if that fails
* go on to start a transaction
*/
do {
/* tp is set above if T_DONTPEND */
*error = 1;
return;
}
}
}
/*
* current transaction closed to sync ops; try for next transaction
*/
/*
* We know a commit is in progress, if we are trying to
* commit and we haven't allowed async ones to start yet,
* then just wait for the commit completion
*/
if ((size == TOP_COMMIT_SIZE) &&
do {
*error = 1;
return;
}
/*
* next transaction is full; try for next transaction
*/
goto retry;
}
/*
* we are in the next transaction; wait for it to start
*/
mtm->mtm_wantin++;
/*
* The corresponding cv_broadcast wakes up
* all threads that have been validated to go into
* the next transaction. However, because spurious
* cv_wait wakeups are possible we use a sequence
* number to check that the commit and cv_broadcast
* has really occurred. We couldn't use mtm_tid
* because on error that doesn't get incremented.
*/
do {
} else {
/*
* if the current transaction is full; try the next one
*/
/*
* log is over reserved and no one will unresv the space
* so generate empty sync op to unresv the space
*/
if (mtm->mtm_activesync == 0) {
goto retry;
}
goto retry;
}
/*
* we are in the current transaction
*/
mtm->mtm_active++;
mtm->mtm_activesync++;
}
}
int tryfail_cnt;
int
{
}
tp->deltas_size = 0;
tp->any_deltas = 0;
/*
* current transaction closed to async ops; try for next transaction
*/
if (tryasync) {
tryfail_cnt++;
return (EWOULDBLOCK);
}
goto retry;
}
/*
* if the current transaction is full; try the next one
*/
!panicstr) {
/*
* log is overreserved and no one will unresv the space
* so generate empty sync op to unresv the space
* We need TOP_SYNC_FORCED because we want to know when
* a top_end_sync is completed.
* mtm_taskq_sync_count is needed because we want to keep track
* of the pending top_issue_sync dispatches so that during
* forced umount we can wait for these to complete.
* mtm_taskq_sync_count is decremented in top_issue_sync and
* can remain set even after top_end_sync completes.
* We have a window between the clearing of TOP_SYNC_FORCED
* flag and the decrementing of mtm_taskq_sync_count.
* If in this window new async transactions start consuming
* log space, the log can get overreserved.
* Subsequently a new async transaction would fail to generate
* an empty sync transaction via the taskq, since it finds
* the mtm_taskq_sync_count set. This can cause a hang.
* Hence we do not test for mtm_taskq_sync_count being zero.
* Instead, the TOP_SYNC_FORCED flag is tested here.
*/
if ((mtm->mtm_activesync == 0) &&
/*
* Set flag to stop multiple forced empty
* sync transactions. Increment mtm_taskq_sync_count.
*/
(void) taskq_dispatch(system_taskq,
if (tryasync) {
tryfail_cnt++;
return (EWOULDBLOCK);
}
goto retry;
}
if (tryasync) {
tryfail_cnt++;
return (EWOULDBLOCK);
}
goto retry;
}
/*
* we are in the current transaction
*/
mtm->mtm_active++;
return (0);
}
/*ARGSUSED*/
void
{
mtm->mtm_activesync--;
mtm->mtm_active--;
/*
* wait for last syncop to complete
*/
do {
goto out;
}
/*
* last syncop; close current transaction to all ops
*/
/*
* wait for last asyncop to finish
*/
while (mtm->mtm_active) {
}
/*
* push dirty metadata thru the log
*/
top_roll_debug(ul));
/*
* Empty the cancellist, but save it for logmap_free_cancel
*/
/*
* allow async ops
*/
/*
* Hold the un_log_mutex here until we are done writing
* the commit record to prevent any more deltas to be written
* to the log after we allow async operations.
*/
/*
* asynchronously write the commit record,
*/
/*
* wait for outstanding log writes (e.g., commits) to finish
*/
/*
* Now that we are sure the commit has been written to the log
* we can free any canceled deltas. If we free them before
* guaranteeing that the commit was written, we could panic before
* the commit, but after an async thread has allocated and written
* to canceled freed block.
*/
/*
* now, allow all ops
*/
mtm->mtm_wantin = 0;
mtm->mtm_closed = 0;
ul->un_resv_wantin = 0;
/*
* Finish any other synchronous transactions and
* start any waiting new synchronous transactions
*/
/*
* if the logmap is getting full; roll something
*/
if (logmap_need_roll_sync(mtm)) {
}
out:
}
/*ARGSUSED*/
void
{
int wakeup_needed = 0;
}
if (tp->any_deltas) {
}
mtm->mtm_active--;
if ((mtm->mtm_active == 0) &&
wakeup_needed = 1;
}
if (wakeup_needed)
/*
* Generate a sync op if the log, logmap, or deltamap are heavily used.
* Unless we are possibly holding any VM locks, since if we are holding
* any VM locks and we issue a top_end_sync(), we could deadlock.
*/
if ((mtm->mtm_activesync == 0) &&
ldl_need_commit(ul)) &&
(topid != TOP_GETPAGE)) {
}
/*
* roll something from the log if the logmap is too full
*/
if (logmap_need_roll_async(mtm))
}
/*
* Called from roll thread;
* buffer set for reading master
* Returns
* 0 - success, can continue with next buffer
* 1 - failure due to logmap deltas being in use
*/
int
{
/*
* get a list of deltas
*/
/* logmap deltas are in use */
return (1);
}
/*
* no deltas were found, nothing to roll
*/
return (0);
}
/*
* If there is one cached roll buffer that cover all the deltas then
* we can use that instead of copying to a separate roll buffer.
*/
return (0);
}
/*
* Set up the read.
* If no read is needed logmap_setup_read() returns 0.
*/
/*
* async read the data from master
*/
(void) bdev_strategy(bp);
} else {
}
return (0);
}
int ufs_crb_enable = 1;
/*
* move deltas from deltamap into the log
*/
void
{
mapentry_t *me;
/*
* needed for the roll thread's heuristic
*/
if (buf && ufs_crb_enable) {
/*
* Move any deltas to the logmap. Split requests that
* straddle MAPBLOCKSIZE hash boundaries (i.e. summary info).
*/
if (me) {
}
}
} else {
/*
* if there are deltas
*/
if (me) {
/*
* move to logmap
*/
}
}
}
static void
top_threadtrans_destroy(void *tp)
{
}
void
_init_top(void)
{
ASSERT(top_init_debug());
/*
* set up the delta layer
*/
_init_map();
/*
* Initialise the thread specific data transaction key
*/
}