lufs_thread.c revision d3d50737e566cade9a08d73d2af95105ac7cd960
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include <sys/sysmacros.h>
#include <sys/fssnap_if.h>
#include <sys/inttypes.h>
#include <sys/tnf_probe.h>
/*
* Kernel threads for logging
* Currently only one for rolling the log (one per log).
*/
#define LUFS_DEFAULT_NUM_ROLL_BUFS 16
#define LUFS_DEFAULT_MIN_ROLL_BUFS 4
#define LUFS_DEFAULT_MAX_ROLL_BUFS 64
/*
* Macros
*/
/*
* Tunables
*/
long logmap_maxnme = 1536;
int trans_roll_tics = 0;
/*
* Key for thread specific data for the roll thread to
* bypass snapshot throttling
*/
/*
* externs
*/
extern kcondvar_t ml_scan_cv;
extern int maxphys;
static void
{
}
}
/*
* returns the number of 8K buffers to use for rolling the log
*/
static uint32_t
{
/*
* sanity validate the tunable lufs_num_roll_bufs
*/
if (lufs_num_roll_bufs < lufs_min_roll_bufs) {
return (lufs_min_roll_bufs);
}
if (lufs_num_roll_bufs > lufs_max_roll_bufs) {
return (lufs_max_roll_bufs);
}
return (lufs_num_roll_bufs);
}
/*
* Find something to roll, then if we don't have cached roll buffers
* covering all the deltas in that MAPBLOCK then read the master
* and overlay the deltas.
* returns;
* 0 if sucessful
* 1 on finding nothing to roll
* 2 on error
*/
int
int *retnbuf)
{
int i;
int error;
int nbuf;
/*
* Make sure there is really something to roll
*/
mof = 0;
return (1);
}
/*
* build some master blocks + deltas to roll forward
*/
nbuf = 0;
do {
/*
* Check for the case of a new delta to a set up buffer
*/
/* CSTYLED */);
/* Flush out the current set of buffers */
goto flush_bufs;
}
}
/*
* Work out what to roll next. If it isn't cached then read
* it asynchronously from the master.
*/
/* logmap deltas were in use */
if (nbuf == 0) {
/*
* On first buffer wait for the logmap user
* to finish by grabbing the logmap lock
* exclusively rather than spinning
*/
lrr_wait++;
return (1);
}
/* we have at least one buffer - flush it */
goto flush_bufs;
}
nbuf++;
}
mof += MAPBLOCKSIZE;
/*
* If there was nothing to roll cycle back
*/
if (nbuf == 0) {
return (1);
}
/*
* For each buffer, if it isn't cached then wait for the read to
* finish and overlay the deltas.
*/
if (trans_not_wait(bp)) {
"Error reading master during ufs log roll");
error = 1;
}
/*
* sync read the data from the log
*/
error = 1;
}
}
/*
* reset the age bit in the age list
*/
error = 1;
}
}
if (error)
return (2);
return (0);
}
/*
* Write out a cached roll buffer
*/
void
{
/* if snapshots are enabled, call it */
if (ufsvfsp->vfs_snapshot) {
} else {
(void) bdev_strategy(bp);
}
}
/*
* Write out a set of non cached roll buffers
*/
void
{
int j, k;
do { /* for each contiguous block of sectors */
/* find start of next sector to write */
for (j = 0; j < 16; ++j) {
break;
secmap >>= 1;
}
/* calculate number of sectors */
secmap >>= 1;
j++;
for (k = 1; j < 16; ++j) {
break;
secmap >>= 1;
k++;
}
/* if snapshots are enabled, call it */
if (ufsvfsp->vfs_snapshot)
else
(void) bdev_strategy(bp);
if (secmap) {
/*
* Allocate another buf_t to handle
* the next write in this MAPBLOCK
* Chain them via b_list.
*/
}
} while (secmap);
}
/*
* Asynchronously roll the deltas, using the sector map
* in each rollbuf_t.
*/
int
{
/*
* Order the buffers by blkno
*/
#ifdef lint
#endif
} else {
}
break;
}
}
}
}
/*
* issue the in-order writes
*/
} else {
}
/* null out the rb_next link for next set of rolling */
}
/*
* wait for all the writes to finish
*/
if (trans_not_wait(bp)) {
"Error writing master during ufs log roll");
}
/*
* Now wait for all the "cloned" buffer writes (if any)
* and free those headers
*/
while (bp2) {
if (trans_not_wait(bp2)) {
"Error writing master during ufs log roll");
}
}
}
return (1);
return (0);
}
void
{
int i;
int doingforceroll;
int nbuf;
"trans_roll");
/*
* We do not want the roll thread's writes to be
* throttled by the snapshot.
* If they are throttled then we can have a deadlock
* between the roll thread and the snapshot taskq thread:
* roll thread wants the throttling semaphore and
* the snapshot taskq thread cannot release the semaphore
* because it is writing to the log and the log is full.
*/
/*
* setup some roll parameters
*/
if (trans_roll_tics == 0)
nmblk = log_roll_buffers();
/*
* allocate the buffers and buffer headers
*/
/*
* initialize the buffer headers
*/
}
doingforceroll = 0;
/*
* LOOP FOREVER
*/
/*
* exit on demand
*/
thread_exit();
/* NOTREACHED */
}
/*
* MT_SCAN debug mode
* don't roll except in FORCEROLL situations
*/
goto again;
}
/*
* If we've finished a force roll cycle then wakeup any
* waiters.
*/
if (doingforceroll) {
doingforceroll = 0;
} else {
}
/*
* If someone wants us to roll something; then do it
*/
doingforceroll = 1;
goto rollsomething;
}
/*
* Log is busy, check if logmap is getting full.
*/
if (logmap_need_roll(logmap)) {
goto rollsomething;
}
/*
* Check if the log is idle and is not empty
*/
goto rollsomething;
}
/*
* Log is busy, check if its getting full
*/
if (ldl_need_roll(ul)) {
goto rollsomething;
}
/*
* nothing to do; wait a bit and then start over
*/
goto again;
/*
* ROLL SOMETHING
*/
/*
* Use the cached roll buffers, or read the master
* and overlay the deltas
*/
/* FALLTHROUGH */
case 2: goto again;
/* default case is success */
}
/*
* Asynchronously write out the deltas
*/
goto again;
/*
* free up the deltas in the logmap
*/
}
/*
* free up log space; if possible
*/
/*
* LOOP
*/
goto again;
}