rdc_io.c revision 3270659f55e0928d6edec3d26217cc29398a8149
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifdef _SunOS_5_6
/*
* on 2.6 both dki_lock.h and rpc/types.h define bool_t so we
* anyway and make it look like we included it. Yuck.
*/
#define _RPC_TYPES_H
typedef int enum_t;
#else
#ifndef DS_DDICT
#endif
#endif /* _SunOS_5_6 */
#include <sys/nsc_thread.h>
#include "rdc_io.h"
#include "rdc_bitmap.h"
#include "rdc_update.h"
#include "rdc_ioctl.h"
#include "rdcsrv.h"
#include "rdc_diskq.h"
volatile int net_exit;
#ifdef DEBUG
int RDC_MAX_SYNC_THREADS = 8;
int rdc_maxthreads_last = 8;
#endif
static kmutex_t net_blk_lock;
/*
* rdc_conf_lock is used as a global device configuration lock.
* the transition of an rdc set between configured and unconfigured is
* atomic.
*
* krdc->group->lock is used to protect state changes of a configured rdc
* set (e.g. changes to urdc->flags), such as enabled to disabled and vice
* versa.
*
* rdc_many_lock is also used to protect changes in group membership. A group
* linked list cannot change while this lock is held. The many list and the
* multi-hop list are both protected by rdc_many_lock.
*/
int rdc_debug = 0;
int rdc_debug_sleep = 0;
static int rdc_net_hnd_id = 1;
extern kmutex_t rdc_clnt_lock;
static void rdc_ditemsfree(rdc_net_dataset_t *);
void rdc_clnt_destroy(void);
unsigned long rdc_async_timeout;
int rdc_max_qitems = RDC_MAX_QITEMS;
int rdc_asyncthr = RDC_ASYNCTHR;
static nsc_svc_t *rdc_volume_update;
static int rdc_prealloc_handle = 1;
/*
* Forward declare all statics that are used before defined
* to enforce parameter checking
*
* Some (if not all) of these could be removed if the code were reordered
*/
static void rdc_volume_update_svc(intptr_t);
void rdc_kstat_create(int index);
void rdc_kstat_delete(int index);
static int rdc_checkforbitmap(int, nsc_off_t);
static int rdc_installbitmap(int, void *, int, nsc_off_t, int, int *, int);
static rdc_group_t *rdc_newgroup();
/*
* RDC threadset tunables
*/
/*
* Private threadset manipulation variables
*/
static int rdc_threads_hysteresis = 2;
/* hysteresis for threadset resizing */
static int rdc_sets_active; /* number of sets currently enabled */
#ifdef DEBUG
#endif
/*
* rdc_thread_deconfigure - rdc is being deconfigured, stop any
* thread activity.
*
* Inherently single-threaded by the Solaris module unloading code.
*/
static void
rdc_thread_deconfigure(void)
{
_rdc_ioset = NULL;
_rdc_flset = NULL;
}
/*
* rdc_thread_configure - rdc is being configured, initialize the
* threads we need for flushing aync volumes.
*
* Must be called with rdc_conf_lock held.
*/
static int
rdc_thread_configure(void)
{
return (EINVAL);
return (EINVAL);
if ((sync_info.rdc_syncset =
return (EINVAL);
return (0);
}
/*
* rdc_thread_tune - called to tune the size of the rdc threadset.
*
* Called from the config code when an rdc_set has been enabled or disabled.
* 'sets' is the increment to the number of active rdc_sets.
*
* Must be called with rdc_conf_lock held.
*/
static void
rdc_thread_tune(int sets)
{
int change = 0;
int nthreads;
if (sets < 0)
while (sets--) {
rdc_sets_active += incr;
if (rdc_sets_active >= nthreads)
else if ((rdc_sets_active <
}
#ifdef DEBUG
if (change) {
"nsets %d, nthreads %d, nthreads change %d",
}
#endif
}
/*
* _rdc_unload() - cache is being unloaded,
* deallocate any dual copy structures allocated during cache
* loading.
*/
void
_rdc_unload(void)
{
int i;
if (rdc_volume_update) {
(void) nsc_unregister_svc(rdc_volume_update);
}
if (rdc_k_info != NULL) {
for (i = 0; i < rdc_max_sets; i++) {
krdc = &rdc_k_info[i];
}
}
#ifdef DEBUG
#endif
if (rdc_k_info != NULL)
if (rdc_u_info != NULL)
rdc_k_info = NULL;
rdc_u_info = NULL;
rdc_max_sets = 0;
}
/*
* _rdc_load() - rdc is being loaded, Allocate anything
* that will be needed while the cache is loaded but doesn't really
* depend on configuration parameters.
*
*/
int
_rdc_load(void)
{
int i;
#ifdef DEBUG
#endif
if ((i = nsc_max_devices()) < rdc_max_sets)
rdc_max_sets = i;
/* following case for partial installs that may fail */
if (!rdc_max_sets)
rdc_max_sets = 1024;
if (!rdc_k_info)
return (ENOMEM);
if (!rdc_u_info) {
return (ENOMEM);
}
for (i = 0; i < rdc_max_sets; i++) {
krdc = &rdc_k_info[i];
}
return (0);
}
static void
{
}
/*
* _rdc_configure() - cache is being configured.
*
* Initialize dual copy structures
*/
int
_rdc_configure(void)
{
int index;
krdc->bitmap_size = 0;
krdc->bitmap_write = 0;
krdc->disk_status = 0;
}
return (0);
}
return (0);
}
/*
* _rdc_deconfigure - rdc is being deconfigured, shut down any
* dual copy operations and return to an unconfigured state.
*/
void
_rdc_deconfigure(void)
{
int index;
krdc->bitmap_size = 0;
krdc->bitmap_write = 0;
krdc->disk_status = 0;
}
}
/*
* Lock primitives, containing checks that lock ordering isn't broken
*/
/*ARGSUSED*/
void
{
}
/* ARGSUSED */
void
{
}
void
{
}
void
{
}
/*
* Suspend and disable operations use this function to wait until it is safe
* to do continue, without trashing data structures used by other ioctls.
*/
static void
{
while (krdc->busy_count > 0)
}
/*
* Other ioctls use this function to hold off disable and suspend.
*/
void
{
krdc->busy_count++;
}
/*
* Other ioctls use this function to allow disable and suspend to continue.
*/
void
{
if (krdc->busy_count <= 0)
return;
krdc->busy_count--;
}
/*
* Remove the rdc set from its group, and destroy the group if no longer in
* use.
*/
static void
{
rdc_k_info_t *p;
/*
* lock queue while looking at thrnum
*/
/*
* Assure the we've stopped and the flusher thread has not
* fallen back to sleep
*/
delay(2);
}
}
return;
}
/*
* Always clear the group field.
* no, you need it set in rdc_flush_memq().
* to call rdc_group_log()
* krdc->group = NULL;
*/
/* Take this rdc structure off the group list */
;
}
/*
* Add the rdc set to its group, setting up a new group if it's the first one.
*/
static int
{
int index;
int rc = 0;
/*
* Look for matching group name, primary host name and secondary
* host name.
*/
if (urdc->group_name[0] == 0)
break;
if (!IS_CONFIGURED(ktmp))
continue;
NSC_MAXPATH) != 0)
continue;
MAX_RDC_HOST_SIZE) != 0) {
/* Same group name, different primary interface */
return (-1);
}
MAX_RDC_HOST_SIZE) != 0) {
/* Same group name, different secondary interface */
return (-1);
}
/* Group already exists, so add this set to the group */
if (((options & RDC_OPT_ASYNC) == 0) &&
/* Must be same mode as existing group members */
return (-1);
}
if (((options & RDC_OPT_ASYNC) != 0) &&
/* Must be same mode as existing group members */
return (-1);
}
/* cannont reconfigure existing group into new queue this way */
if ((cmd != RDC_CMD_RESUME) &&
return (RDC_EQNOADD);
}
return (0);
}
/* This must be a new group */
group = rdc_newgroup();
/*
* Tune the thread set by one for each thread created
*/
rdc_thread_tune(1);
rc = -1;
goto fail;
}
} else {
/* XXX check here for resume or enable and act accordingly */
if (cmd == RDC_CMD_RESUME) {
} else if (cmd == RDC_CMD_ENABLE) {
" enabling memory queue",
urdc->disk_queue);
}
}
}
fail:
return (rc);
}
/*
* Move the set to a new group if possible
*/
static int
{
char tmpq[NSC_MAXPATH];
int index;
int rc = -1;
/*
* Look for matching group name, primary host name and secondary
* host name.
*/
}
continue;
if (urdc->group_name[0] == 0)
break;
if (!IS_CONFIGURED(ktmp))
continue;
NSC_MAXPATH) != 0)
continue;
MAX_RDC_HOST_SIZE) != 0)
goto bad;
MAX_RDC_HOST_SIZE) != 0)
goto bad;
/* Group already exists, so add this set to the group */
if (((options & RDC_OPT_ASYNC) == 0) &&
/* Must be same mode as existing group members */
goto bad;
}
if (((options & RDC_OPT_ASYNC) != 0) &&
/* Must be same mode as existing group members */
goto bad;
}
goto good;
}
/* This must be a new group */
group = rdc_newgroup();
rc = -1;
goto bad;
}
if (urdc->disk_queue[0] == 0) {
} else {
goto bad;
}
good:
if (options & RDC_OPT_ASYNC) {
} else {
}
/* Group now empty, so destroy */
if (RDC_IS_DISKQ(old_group)) {
}
/*
* Assure the we've stopped and the flusher thread has not
* fallen back to sleep
*/
delay(2);
}
}
return (0);
}
/* Take this rdc structure off the old group list */
;
return (0);
bad:
/* Leave existing group status alone */
return (rc);
}
/*
* Set flags for an rdc set, setting the group flags as necessary.
*/
void
{
if (vflags) {
/* normal volume flags */
if (ssflags)
if (ssflags)
}
if (sflags) {
/* Sync state flags that are protected by a different lock */
}
if (bflags) {
/* Bmap state flags that are protected by a different lock */
}
}
/*
* Clear flags for an rdc set, clearing the group flags as necessary.
*/
void
{
if (vflags) {
/* normal volume flags */
}
if (sflags) {
/* Sync state flags that are protected by a different lock */
}
if (bflags) {
/* Bmap state flags that are protected by a different lock */
}
}
/*
* Get the flags for an rdc set.
*/
int
{
}
/*
* Initialise flags for an rdc set.
*/
static void
{
urdc->sync_flags = 0;
urdc->bmap_flags = 0;
}
/*
* Set flags for a many group.
*/
void
{
if (flags == 0)
return;
if (!IS_ENABLED(urdc))
continue;
}
}
/*
* Clear flags for a many group.
*/
void
{
if (flags == 0)
return;
/*
* We must maintain the mflags based on the set of flags for
* all the urdc's that are chained up.
*/
/*
* First look through all the urdc's and remove bits from
* the 'flags' variable that are in use elsewhere.
*/
if (!IS_ENABLED(utmp))
continue;
if (flags == 0)
break;
}
/*
* Now clear flags as necessary.
*/
if (flags != 0) {
if (!IS_ENABLED(utmp))
continue;
}
}
}
int
{
}
void
{
return;
if (flags & RDC_LOGGING)
if (flags & RDC_VOL_FAILED)
if (flags & RDC_BMP_FAILED)
}
/*
* rdc_lor(source, dest, len)
* logically OR memory pointed to by source and dest, copying result into dest.
*/
void
{
int i;
return;
for (i = 0; i < len; i++)
}
static int
{
int status;
if (status) {
return (RDC_EGETSIZE);
}
(void) spcs_s_inttostring(
/*
* Cheat, and covert to int, until we have
* spcs_s_unsignedlonginttostring().
*/
status = (int)remote_size;
return (RDC_ESIZE);
}
return (0);
}
static void
{
struct net_bdata6 bd;
int index;
int rc;
#ifdef DEBUG_IIUPDATE
#endif
/* don't understand what the client intends to do */
return;
}
if (index < 0)
return;
/*
* warn II that this volume is in use by sndr so
* II can validate the sizes of the master vs shadow
* and avoid trouble later down the line with
* size mis-matches between urdc->volume_size and
* what is returned from nsc_partsize() which may
* be the size of the master when replicating the shadow
*/
if (index >= 0)
return;
}
do {
#ifdef DEBUG_IIUPDATE
#endif
return;
}
/* 1->many - all must be logging */
if (!IS_ENABLED(urdc))
continue;
break;
}
}
#ifdef DEBUG_IIUPDATE
#endif
do {
return;
}
if (!IS_ENABLED(urdc))
continue;
break;
}
}
/* II (or something else) has updated us, so no need for a sync */
}
if (krdc->bitmap_write > 0)
(void) rdc_write_bitmap(krdc);
}
/*
* rdc_check()
*
* Return 0 if the set is configured, enabled and the supplied
* addressing information matches the in-kernel config, otherwise
* return 1.
*/
static int
{
if (!IS_ENABLED(urdc))
return (1);
NSC_MAXPATH) != 0) {
#ifdef DEBUG
#endif
return (1);
}
#ifdef DEBUG
#endif
return (1);
}
NSC_MAXPATH) != 0) {
#ifdef DEBUG
#endif
return (1);
}
#ifdef DEBUG
#endif
return (1);
}
return (0);
}
/*
* Lookup enabled sets for a bitmap match
*/
int
rdc_lookup_bitmap(char *pathname)
{
#ifdef DEBUG
#endif
int index;
#ifdef DEBUG
#endif
if (!IS_ENABLED(urdc))
continue;
NSC_MAXPATH) == 0)
return (index);
} else {
NSC_MAXPATH) == 0)
return (index);
}
}
return (-1);
}
/*
* Translate a pathname to index into rdc_k_info[].
* Returns first match that is enabled.
*/
int
{
int index;
if (!IS_ENABLED(urdc))
continue;
continue;
NSC_MAXPATH) == 0)
return (index);
} else {
NSC_MAXPATH) == 0)
return (index);
}
}
if (allow_disabling == 0) {
/* None found, or only a disabling one found, so try again */
allow_disabling = 1;
goto restart;
}
return (-1);
}
/*
* Translate a pathname to index into rdc_k_info[].
* Returns first match that is configured.
*
* Used by enable & resume code.
* Must be called with rdc_conf_lock held.
*/
int
rdc_lookup_configured(char *pathname)
{
int index;
if (!IS_CONFIGURED(krdc))
continue;
NSC_MAXPATH) == 0)
return (index);
} else {
NSC_MAXPATH) == 0)
return (index);
}
}
return (-1);
}
/*
* Looks up a configured set with matching secondary interface:volume
* to check for illegal many-to-one volume configs. To be used during
* enable and resume processing.
*
* Must be called with rdc_conf_lock held.
*/
static int
{
int index;
if (!IS_CONFIGURED(krdc))
continue;
continue;
continue;
break;
}
if (index < rdc_max_sets)
return (index);
else
return (-1);
}
/*
* Looks up an rdc set to check if it is already configured, to be used from
* functions called from the config ioctl where the interface names can be
* used for comparison.
*
* Must be called with rdc_conf_lock held.
*/
int
{
int index;
if (!IS_CONFIGURED(krdc))
continue;
NSC_MAXPATH) != 0)
continue;
MAX_RDC_HOST_SIZE) != 0)
continue;
NSC_MAXPATH) != 0)
continue;
MAX_RDC_HOST_SIZE) != 0)
continue;
break;
}
if (index < rdc_max_sets)
return (index);
else
return (-1);
}
/*
* Looks up a secondary hostname and device, to be used from
* functions called from the config ioctl where the interface names can be
* used for comparison.
*
* Must be called with rdc_conf_lock held.
*/
int
{
int index;
if (!IS_CONFIGURED(krdc))
continue;
NSC_MAXPATH) != 0)
continue;
MAX_RDC_HOST_SIZE) != 0)
continue;
break;
}
if (index < rdc_max_sets)
return (index);
else
return (-1);
}
/*
* Looks up an rdc set to see if it is currently enabled, to be used on the
* server so that the interface addresses must be used for comparison, as
* the interface names may differ from those used on the client.
*
*/
int
{
#ifdef DEBUG
#endif
int index;
#ifdef DEBUG
#endif
if (!IS_ENABLED(urdc))
continue;
continue;
continue;
continue;
}
continue;
}
break;
}
if (index < rdc_max_sets)
return (index);
else
return (-1);
}
/*
* Return index of first multihop or 1-to-many
* Behavior controlled by setting ismany.
* ismany TRUE (one-to-many)
* ismany FALSE (multihops)
*
*/
static int
{
char *pathname;
int index;
int role;
/* this host is the primary of the krdc set */
if (ismany) {
/*
* 1-many sets are linked by primary :
* look for matching primary on this host
*/
role = RDC_PRIMARY;
} else {
/*
* multihop sets link primary to secondary :
* look for matching secondary on this host
*/
role = 0;
}
} else {
/* this host is the secondary of the krdc set */
if (ismany) {
/*
* 1-many sets are linked by primary, so if
* this host is the secondary of the set this
* cannot require 1-many linkage.
*/
return (-1);
} else {
/*
* multihop sets link primary to secondary :
* look for matching primary on this host
*/
role = RDC_PRIMARY;
}
}
if (!IS_CONFIGURED(ktmp)) {
continue;
}
if (role == RDC_PRIMARY) {
/*
* Find a primary that is this host and is not
* krdc but shares the same data volume as krdc.
*/
break;
}
} else {
/*
* Find a secondary that is this host and is not
* krdc but shares the same data volume as krdc.
*/
break;
}
}
}
if (index < rdc_max_sets)
return (index);
else
return (-1);
}
/*
* Returns secondary match that is configured.
*
* Used by enable & resume code.
* Must be called with rdc_conf_lock held.
*/
static int
rdc_lookup_secondary(char *pathname)
{
int index;
if (!IS_CONFIGURED(krdc))
continue;
NSC_MAXPATH) == 0)
return (index);
}
}
return (-1);
}
static nsc_fd_t *
{
int rc;
}
static void
{
urdc->direct_file[0] = 0;
}
}
}
#ifdef DEBUG_MANY
static void
{
rdc_k_info_t *p = start;
do {
p->multi_next);
delay(10);
p = p->many_next;
q = &rdc_u_info[p->index];
} while (p && p != start);
}
#endif /* DEBUG_MANY */
static int
{
int mindex;
int domulti;
/* Now find companion krdc */
#ifdef DEBUG_MANY
"!add_to_multi: lookup_multimany: mindex %d prim %s sec %s",
#endif
if (mindex >= 0) {
domulti = 1;
/*
* We are adding a new primary to a many
* group that is the target of a multihop, just
* ignore it since we are linked in elsewhere.
*/
domulti = 0;
}
if (domulti) {
/* Is previous leg using direct file I/O? */
if (utmp->direct_file[0] != 0) {
/* It is, so cannot proceed */
return (-1);
}
} else {
/* Is this leg using direct file I/O? */
if (urdc->direct_file[0] != 0) {
/* It is, so cannot proceed */
return (-1);
}
}
}
} else {
#ifdef DEBUG_MANY
#endif
}
return (0);
}
/*
* Add a new set to the circular list of 1-to-many primaries and chain
* up any multihop as well.
*/
static int
{
int oindex;
if (add_to_multi(krdc) < 0) {
return (-1);
}
if (oindex < 0) {
#ifdef DEBUG_MANY
#endif
return (0);
}
#ifdef DEBUG_MANY
#endif
#ifdef DEBUG_MANY
#endif
return (0);
}
/*
* Remove a set from the circular list of 1-to-many primaries.
*/
static void
{
rdc_k_info_t *p, *q;
#ifdef DEBUG_MANY
#endif
/* remove from multihop */
q->multi_next = NULL;
}
return;
}
/* search */
;
/*
* old was part of a multihop, so switch multi pointers
* to someone remaining on the many chain
*/
q->multi_next = p;
p->multi_next = q;
}
#ifdef DEBUG_MANY
if (p == old) {
} else {
print_many(p);
}
#endif
}
static int
{
int index;
char *rhost;
char *local_file;
char *local_bitmap;
char *diskq;
int rc;
return (RDC_EEMPTY);
}
/* Next check there aren't any enabled rdc sets which match. */
if (rdc_lookup_byname(rdc_set) >= 0) {
return (RDC_EENABLED);
}
if (rdc_lookup_many2one(rdc_set) >= 0) {
return (RDC_EMANY2ONE);
}
return (RDC_ENETCONFIG);
}
return (RDC_ENETBUF);
}
return (RDC_ENETBUF);
}
/* Check that the local data volume isn't in use as a bitmap */
if (options & RDC_OPT_PRIMARY)
else
if (rdc_lookup_bitmap(local_file) >= 0) {
return (RDC_EVOLINUSE);
}
/* check that the secondary data volume isn't in use */
if (!(options & RDC_OPT_PRIMARY)) {
if (rdc_lookup_secondary(local_file) >= 0) {
return (RDC_EVOLINUSE);
}
}
/* check that the local data vol is not in use as a diskqueue */
if (options & RDC_OPT_PRIMARY) {
return (RDC_EVOLINUSE);
}
}
/* Check that the bitmap isn't in use as a data volume */
if (options & RDC_OPT_PRIMARY)
else
if (rdc_lookup_configured(local_bitmap) >= 0) {
return (RDC_EBMPINUSE);
}
/* Check that the bitmap isn't already in use as a bitmap */
if (rdc_lookup_bitmap(local_bitmap) >= 0) {
return (RDC_EBMPINUSE);
}
/* check that the diskq (if here) is not in use */
return (RDC_EDISKQINUSE);
}
/* Set urdc->volume_size */
if (index < 0) {
if (options & RDC_OPT_PRIMARY)
else
return (RDC_EOPEN);
}
/* copy relevant parts of rdc_set to urdc field by field */
/*
* before we try to add to group, or create one, check out
* if we are doing the wrong thing with the diskq
*/
return (RDC_EQWRONGMODE);
}
if (rc == RDC_EQNOADD) {
return (RDC_EQNOADD);
} else {
return (RDC_EGROUP);
}
}
/*
* maxfbas was set in rdc_dev_open as primary's maxfbas.
* If diskq's maxfbas is smaller, then use diskq's.
*/
if (RDC_SUCCESS(rc)) {
if (rc == 0) {
#ifdef DEBUG
"!_rdc_enable: diskq maxfbas = %"
NSC_SZFMT ", primary maxfbas = %"
#endif
} else {
"!_rdc_enable: diskq maxfbas failed (%d)",
rc);
}
} else {
"!_rdc_enable: diskq reserve failed (%d)", rc);
}
}
}
if (options & RDC_OPT_PRIMARY)
if (options & RDC_OPT_ASYNC)
if (add_to_many(krdc) < 0) {
rc = RDC_EMULTI;
goto fail;
}
/* Configured but not enabled */
/* Configured but not enabled */
/*
* The rdc set is configured but not yet enabled. Other operations must
* ignore this set until it is enabled.
*/
else
else
else
/* Still unknown */
else
}
if (options & RDC_OPT_PRIMARY) {
} else {
}
if (options & RDC_OPT_ASYNC)
goto fail;
}
/* Don't set krdc->intf here */
goto bmpfail;
else {
#ifdef DEBUG
#endif
}
/* Configured but not enabled */
/* And finally */
/* Should we set the whole group logging? */
if (rdc_intercept(krdc) != 0) {
if (options & RDC_OPT_PRIMARY)
else
#ifdef DEBUG
#endif
rc = RDC_EREGISTER;
goto bmpfail;
}
#ifdef DEBUG
#endif
return (0);
if (options & RDC_OPT_PRIMARY)
else
rc = RDC_EBITMAP;
(void) rdc_unintercept(krdc);
}
fail:
}
/* Configured but not enabled */
/* Configured but not enabled */
/* Configured but not enabled */
return (rc);
}
static int
{
int rc;
char itmp[10];
rc = RDC_EEINVAL;
(void) spcs_s_inttostring(
goto done;
}
rc = RDC_EEINVAL;
(void) spcs_s_inttostring(
goto done;
}
rc = RDC_EEINVAL;
(void) spcs_s_inttostring(
goto done;
}
done:
return (rc);
}
/* ARGSUSED */
static int
{
disk_queue *q;
#ifdef DEBUG
#else
return (RDC_EALREADY);
}
#endif
}
return (RDC_EQNOTEMPTY);
}
(void) rdc_unintercept(krdc);
#ifdef DEBUG
#endif
/* Configured but not enabled */
/*
* No new io can come in through the io provider.
* Wait for the async flusher to finish.
*/
#ifdef DEBUG
#endif
do {
/* ok, force it to happen... */
do {
"!SNDR: async I/O pending and not flushed "
"for %s during disable",
#ifdef DEBUG
NSC_SZFMT " head: 0x%p tail: 0x%p",
#endif
}
}
if (ip) {
}
/* Configured but not enabled */
/* Must not hold group lock during this function */
delay(2);
(void) rdc_clear_state(krdc);
/* Configured but not enabled */
/*
* we should now unregister the queue, with no conflicting
* locks held. This is the last(only) member of the group
*/
}
/* Configured but not enabled */
#ifdef DEBUG
if (krdc->dcio_bitmap)
"dcio_bitmap");
#endif
krdc->bitmap_size = 0;
krdc->bitmap_write = 0;
krdc->disk_status = 0;
return (0);
}
static int
{
int index;
int rc;
if (index >= 0)
return (RDC_EALREADY);
}
/* A resume or enable failed */
return (RDC_EALREADY);
}
return (rc);
}
/*
* Checks whether the state of one of the other sets in the 1-many or
* multi-hop config should prevent a sync from starting on this one.
* Return NULL if no just cause or impediment is found, otherwise return
* a pointer to the offending set.
*/
static rdc_u_info_t *
{
/*
* In the reverse sync case we need to check the previous leg of
* the multi-hop config. The link to that set can be from any of
* the 1-many list, so as we go through we keep an eye open for it.
*/
/* This set links to the first leg */
if (IS_ENABLED(utmp))
}
if (!IS_ENABLED(utmp))
continue;
if (options & RDC_OPT_FORWARD) {
/*
* Reverse sync needed is bad, as it means a
* reverse sync in progress or started and
* didn't complete, so this primary volume
* is not consistent. So we shouldn't copy
* it to its secondary.
*/
return (utmp);
}
} else {
/* Reverse, so see if we need to spot kmulti */
/* This set links to the first leg */
if (!IS_ENABLED(
}
/*
* Non-logging is bad, as the bitmap will
* be updated with the bits for this sync.
*/
return (utmp);
}
}
}
}
if (kmulti) {
if (IS_REPLICATING(utmp)) {
/*
* Replicating is bad as data is already flowing to
* the target of the requested sync operation.
*/
return (utmp);
}
/*
* Forward sync in progress is bad, as data is
* already flowing to the target of the requested
* sync operation.
* Reverse sync in progress is bad, as the primary
* has already decided which data to copy.
*/
return (utmp);
}
/*
* Clear the "sync needed" flags, as the multi-hop secondary
* will be updated via this requested sync operation, so does
* not need to complete its aborted forward sync.
*/
}
if (!IS_ENABLED(utmp))
continue;
/*
* Clear any "reverse sync needed" flags, as the
* volume will be updated via this requested
* sync operation, so does not need to complete
* its aborted reverse sync.
*/
}
}
return (NULL);
}
static void
_rdc_sync_wrthr(void *thrinfo)
{
int rc;
int tries = 0;
goto failed;
}
goto failed;
}
/*
* The following is to handle
* the case where the secondary side
* has thrown our buffer handle token away in a
* attempt to preserve its health on restart
*/
(void) nsc_free_buf(handle);
tries++;
goto retry;
}
goto failed;
}
(void) nsc_free_buf(handle);
return;
(void) nsc_free_buf(handle);
}
/*
* see above comments on _rdc_sync_wrthr
*/
static void
_rdc_sync_rdthr(void *thrinfo)
{
int rc;
goto failed;
}
goto failed;
}
if (!RDC_SUCCESS(rc)) {
goto failed;
}
if (!RDC_SUCCESS(rc)) {
goto failed;
}
(void) nsc_free_buf(handle);
return;
(void) nsc_free_buf(handle);
}
/*
* _rdc_sync_wrthr
* sync loop write thread
* if there are avail threads, we have not
* used up the pipe, so the sync loop will, if
*/
void
_rdc_sync_thread(void *thrinfo)
{
int rc;
if (!RDC_SUCCESS(rc))
goto failed;
else
#ifdef DEBUG
#else
/*EMPTY*/
#endif
}
/*
* done with this, get rid of it.
* the status is not freed, it should still be a status chain
* that _rdc_sync() has the head of
*/
/*
* decrement the global sync thread num
*/
/* LINTED */
/*
* krdc specific stuff
*/
}
int
{
/* alloc here, free in the sync thread */
tmp =
return (-1);
return (0);
}
{
sync_status_t *s;
s->offset = -1;
return (s);
}
void
{
sync_status_t *s;
while (status) {
status = s;
}
}
int
{
#ifdef DEBUG_SYNCSTATUS
int i = 0;
#endif
while (status) {
return (-1);
}
#ifdef DEBUG_SYNCSTATUS
i++;
#endif
}
#ifdef DEBUGSYNCSTATUS
#endif
return (0);
}
int mtsync = 1;
/*
* _rdc_sync() : rdc sync loop
*
*/
static void
{
nsc_size_t size = 0;
int rtype;
int sts;
int reserved = 0;
int sync_completed = 0;
int tries = 0;
int rc;
int queuing = 0;
/* flusher is handling the sync in the update case */
queuing = 1;
goto sync_done;
}
/*
*/
if (sts != 0)
goto failed_noincr;
reserved = 1;
/*
* pre-allocate a handle if we can - speeds up the sync.
*/
if (rdc_prealloc_handle) {
#ifdef DEBUG
if (!alloc_h) {
"!rdc sync: failed to pre-alloc handle");
}
#endif
} else {
}
/*
* as this while loop can also move data, it is counted as a
* sync loop thread
*/
/* LINTED */
} else {
}
goto failed; /* halt sync */
}
len = 0;
/* skip unnecessary chunks */
krdc->syncbitpos++;
}
/* check for boundary */
goto sync_done;
}
/* find maximal length we can transfer */
krdc->syncbitpos++;
/* we can only read maxfbas anyways */
break;
}
} else {
}
/* truncate to the io provider limit */
/*
* If the update is larger than a bitmap chunk,
* then truncate to a whole number of bitmap
* chunks.
*
* If the update is smaller than a bitmap
* chunk, this must be the last write.
*/
}
}
/*
* Find out if we can reserve a thread here ...
* note: skip the mutex for the first check, if the number
* is up there, why bother even grabbing the mutex to
* only realize that we can't have a thread anyways
*/
if (sync_status == NULL) {
ss = sync_status =
} else {
}
#ifdef DEBUG
"allocate status for mt sync");
#endif
goto retry;
}
/*
* syncinfo protected by sync_info lock but
* not part of the sync_info structure
* be careful if moving
*/
if (_rdc_setup_syncthr(&syncinfo,
}
#ifdef DEBUG
"mt sync");
#endif
goto retry;
} else {
}
/* LINTED */
goto threaded;
}
}
else
if (sts > 0) {
(void) nsc_free_buf(handle);
}
goto failed;
}
/* overwrite buffer with remote data */
if (!RDC_SUCCESS(sts)) {
#ifdef DEBUG
"!rdc sync: remote read failed (%d)", sts);
#endif
goto failed;
}
/* commit locally */
if (!RDC_SUCCESS(sts)) {
/* reverse sync needed already set */
"write failed during sync");
goto failed;
}
} else {
/* send local data to remote */
/*
* The following is to handle
* the case where the secondary side
* has thrown our buffer handle token away in a
* attempt to preserve its health on restart
*/
(void) nsc_free_buf(handle);
tries++;
goto retry;
}
#ifdef DEBUG
"!rdc sync: remote write failed (%d) 0x%x",
#endif
goto failed;
}
}
(void) nsc_free_buf(handle);
#ifdef DEBUG
#else
;
/*EMPTY*/
#endif
} else {
}
/*
*/
if (alloc_h) {
(void) nsc_free_handle(alloc_h);
}
reserved = 0;
delay(2);
if (sts != 0) {
goto failed;
}
reserved = 1;
if (rdc_prealloc_handle) {
#ifdef DEBUG
if (!alloc_h) {
"failed to pre-alloc handle");
}
#endif
}
}
}
sync_completed = 1;
}
/*
* if sync_completed is 0 here,
* we know that the main sync thread failed anyway
* so just free the statuses and fail
*/
sync_completed = 0; /* at least 1 thread failed */
}
/*
* we didn't increment, we didn't even sync,
* so don't dec sync_info.active_thr
*/
if (!queuing) {
/* LINTED */
}
if (handle) {
(void) nsc_free_buf(handle);
}
if (alloc_h) {
(void) nsc_free_handle(alloc_h);
}
if (reserved) {
}
if (sync_completed) {
} else {
}
}
if (sync_completed) {
} else {
}
if (sync_completed)
else
}
static int
{
int rc = 0;
int busy = 0;
int index;
int sync_completed = 0;
int thrcount;
if (index >= 0)
rc = RDC_EALREADY;
goto notstarted;
}
busy = 1;
/* A resume or enable failed or we raced with a teardown */
rc = RDC_EALREADY;
goto notstarted;
}
goto notstarted_unlock;
}
rc = RDC_EALREADY;
goto notstarted_unlock;
}
goto notstarted_unlock;
}
/*
* cannot reverse sync when queuing, need to go logging first
*/
rc = RDC_EQNORSYNC;
goto notstarted_unlock;
}
rc = RDC_EADDTOIF;
goto notstarted_unlock;
}
if (urdc->volume_size == 0) {
/* Implies reserve failed when previous resume was done */
}
if (urdc->volume_size == 0) {
rc = RDC_ENOBMAP;
goto notstarted_unlock;
}
if (rdc_resume_bitmap(krdc) < 0) {
rc = RDC_ENOBMAP;
goto notstarted_unlock;
}
}
if (rdc_reset_bitmap(krdc)) {
rc = RDC_EBITMAP;
goto notstarted_unlock;
}
}
rc = RDC_ESTATE;
goto notstarted_unlock;
}
}
/*
* there is a small window where _rdc_sync is still
* running, but has cleared the RDC_SYNCING flag.
* Use aux_state which is only cleared
* after _rdc_sync had done its 'death' broadcast.
*/
#ifdef DEBUG
"RDC_AUXSYNCIP set, SYNCING off");
}
#endif
rc = RDC_ESYNCING;
goto notstarted_unlock;
}
rc = RDC_ESYNCING;
goto notstarted_unlock;
}
if ((options & RDC_OPT_FORWARD) &&
/* cannot forward sync if a reverse sync is needed */
goto notstarted_unlock;
}
/* Check if the rdc set is accessible on the remote node */
/*
* Remote end may be inaccessible, or the rdc set is not
* enabled at the remote end.
*/
rc = RDC_ECONNOPEN;
goto notstarted_unlock;
}
if (options & RDC_OPT_REVERSE)
else
if (krdc->remote_index < 0) {
/*
* Remote note probably not in a valid state to be synced,
* as the state was fetched OK above.
*/
rc = RDC_ERSTATE;
goto notstarted_unlock;
}
if (rc != 0) {
goto notstarted_unlock;
}
if (options & RDC_OPT_REVERSE) {
/* LINTED */
(RDC_VOL_FAILED | RDC_SYNC_NEEDED))) {
}
}
} else {
}
if (options & RDC_OPT_UPDATE) {
if (rdc_net_getbmap(index,
rc = RDC_ENOBMAP;
if (options & RDC_OPT_REVERSE) {
}
"failed to read remote bitmap");
goto failed;
}
} else {
/*
* This is a full sync (not an update sync), mark the
* entire bitmap dirty
*/
}
/*
* allow diskq->memq flusher to wake up
*/
/*
* if this is a full sync on a non-diskq set or
* a diskq set that has failed, clear the async flag
*/
if ((!(options & RDC_OPT_UPDATE)) ||
/* full syncs, or core queue are synchronous */
}
/*
* if the queue failed because it was full, lets see
* if we can restart it. After _rdc_sync() is done
* the modes will switch and we will begin disk
* queuing again. NOTE: this should only be called
* once per group, as it clears state for all group
* members, also clears the async flag for all members
*/
} else {
/* don't add insult to injury by flushing a dead queue */
/*
* if we are updating, and a diskq and
* the async thread isn't active, start
* it up.
*/
if ((options & RDC_OPT_UPDATE) &&
while ((thrcount-- > 0) &&
}
}
}
}
/*
* For a reverse sync, merge the current bitmap with all other sets
* that share this volume.
*/
if (options & RDC_OPT_REVERSE) {
if (!IS_ENABLED(umany))
continue;
/* May merge more than once */
goto retry_many;
}
}
}
if (IS_ENABLED(umulti)) {
goto retry_multi;
}
}
}
}
if (krdc->bitmap_write == 0) {
if (rdc_write_bitmap_fill(krdc) >= 0)
}
if (krdc->bitmap_write > 0)
(void) rdc_write_bitmap(krdc);
if (options & RDC_OPT_REVERSE) {
(void) _rdc_sync_event_notify(RDC_SYNC_START,
}
/* Now set off the sync itself */
if (nsc_create_process(
/*
* We used to just return here,
* but we need to clear the AUXSYNCIP bit
* and there is a very small chance that
* someone may be waiting on the disk_status flag.
*/
rc = RDC_ENOPROC;
/*
* need the group lock held at failed.
*/
goto failed;
}
busy = 0;
char siztmp1[16];
(void) spcs_s_inttostring(
0);
} else
sync_completed = 1;
/*
* We use this flag now to make halt_sync() wait for
* us to terminate and let us take the group lock.
*/
krdc->disk_status = 0;
}
(void) _rdc_sync_event_notify(RDC_SYNC_DONE,
}
if (busy) {
}
return (rc);
}
/* ARGSUSED */
static int
{
#ifdef DEBUG
#else
return (RDC_EALREADY);
}
#endif
}
(void) rdc_unintercept(krdc);
#ifdef DEBUG
#endif
/* Configured but not enabled */
#ifdef DEBUG
#endif
do {
/* ok, force it to happen... */
do {
"!SNDR: async I/O pending and not flushed "
"for %s during suspend",
#ifdef DEBUG
NSC_SZFMT " head: 0x%p tail: 0x%p",
#endif
}
}
if (ip) {
}
/* Configured but not enabled */
/* Must not hold group lock during this function */
delay(2);
/* Don't rdc_clear_state, unlike _rdc_disable */
/* Configured but not enabled */
/*
* we should now unregister the queue, with no conflicting
* locks held. This is the last(only) member of the group
*/
}
/* Configured but not enabled */
#ifdef DEBUG
if (krdc->dcio_bitmap)
"dcio_bitmap");
#endif
krdc->bitmap_size = 0;
krdc->bitmap_write = 0;
krdc->disk_status = 0;
return (0);
}
static int
{
int index;
int rc;
if (index >= 0)
return (RDC_EALREADY);
}
/* A resume or enable failed */
return (RDC_EALREADY);
}
return (rc);
}
static int
{
int index;
char *rhost;
char *local_file;
char *local_bitmap;
return (RDC_EEMPTY);
}
/* Next check there aren't any enabled rdc sets which match. */
if (rdc_lookup_byname(rdc_set) >= 0) {
return (RDC_EENABLED);
}
if (rdc_lookup_many2one(rdc_set) >= 0) {
return (RDC_EMANY2ONE);
}
return (RDC_ENETCONFIG);
}
return (RDC_ENETBUF);
}
return (RDC_ENETBUF);
}
/* Check that the local data volume isn't in use as a bitmap */
if (options & RDC_OPT_PRIMARY)
else
if (rdc_lookup_bitmap(local_file) >= 0) {
return (RDC_EVOLINUSE);
}
/* check that the secondary data volume isn't in use */
if (!(options & RDC_OPT_PRIMARY)) {
if (rdc_lookup_secondary(local_file) >= 0) {
return (RDC_EVOLINUSE);
}
}
/* Check that the bitmap isn't in use as a data volume */
if (options & RDC_OPT_PRIMARY)
else
if (rdc_lookup_configured(local_bitmap) >= 0) {
return (RDC_EBMPINUSE);
}
/* Check that the bitmap isn't already in use as a bitmap */
if (rdc_lookup_bitmap(local_bitmap) >= 0) {
return (RDC_EBMPINUSE);
}
/* Set urdc->volume_size */
if (index < 0) {
if (options & RDC_OPT_PRIMARY)
else
return (RDC_EOPEN);
}
/* copy relevant parts of rdc_set to urdc field by field */
return (RDC_EQWRONGMODE);
}
/*
* init flags now so that state left by failures in add_to_group()
* are preserved.
*/
/* don't return a failure here, continue with resume */
} else { /* some other group add failure */
return (RDC_EGROUP);
}
}
/*
* maxfbas was set in rdc_dev_open as primary's maxfbas.
* If diskq's maxfbas is smaller, then use diskq's.
*/
if (RDC_SUCCESS(rc)) {
if (rc == 0) {
#ifdef DEBUG
"!_rdc_resume: diskq maxfbas = %"
NSC_SZFMT ", primary maxfbas = %"
#endif
maxfbas);
} else {
"!_rdc_resume: diskq maxfbas failed (%d)",
rc);
}
} else {
"!_rdc_resume: diskq reserve failed (%d)", rc);
}
}
}
if (options & RDC_OPT_PRIMARY)
if (options & RDC_OPT_ASYNC)
if (add_to_many(krdc) < 0) {
rc = RDC_EMULTI;
goto fail;
}
/* Configured but not enabled */
if (urdc->volume_size == 0) {
if (options & RDC_OPT_PRIMARY)
else
}
/* Configured but not enabled */
/*
* The rdc set is configured but not yet enabled. Other operations must
* ignore this set until it is enabled.
*/
/* Set tunable defaults, we'll pick up tunables from the header later */
if (options & RDC_OPT_PRIMARY) {
} else {
}
if (options & RDC_OPT_ASYNC)
goto fail;
}
/* Don't set krdc->intf here */
/* if the bitmap resume isn't clean, it will clear queuing flag */
(void) rdc_resume_bitmap(krdc);
if ((rc1 == RDC_EQNOADD) ||
IS_QSTATE(q, RDC_QBADRESUME)) {
}
}
else {
#ifdef DEBUG
#endif
}
/* Configured but not enabled */
/* And finally */
/* Should we set the whole group logging? */
if (rdc_intercept(krdc) != 0) {
if (options & RDC_OPT_PRIMARY)
else
#ifdef DEBUG
#endif
rc = RDC_EREGISTER;
goto bmpfail;
}
#ifdef DEBUG
#endif
return (0);
if (options & RDC_OPT_PRIMARY)
else
rc = RDC_EBITMAP;
(void) rdc_unintercept(krdc);
}
fail:
/* Don't unset krdc->intf here, unlike _rdc_enable */
/* Configured but not enabled */
/* Configured but not enabled */
/* Configured but not enabled */
return (rc);
}
static int
{
char itmp[10];
int rc;
(void) spcs_s_inttostring(
rc = RDC_EEINVAL;
goto done;
}
(void) spcs_s_inttostring(
rc = RDC_EEINVAL;
goto done;
}
done:
return (rc);
}
/*
* if rdc_group_log is called because a volume has failed,
* we must disgard the queue to preserve write ordering.
* later perhaps, we can keep queuing, but we would have to
* rewrite the i/o path to acommodate that. currently, if there
* is a volume failure, the buffers are satisfied remotely and
* there is no way to satisfy them from the current diskq config
* phew, if we do that.. it will be difficult
*/
int
{
rdc_k_info_t *p;
rdc_u_info_t *q;
q = &rdc_u_info[p->index];
if (IS_STATE(q, RDC_VOL_FAILED))
return (0);
if (p == krdc)
break;
}
return (1);
}
/*
* wait here, until all in flight async i/o's have either
* finished or failed. Avoid the race with r_net_state()
* which tells remote end to log.
*/
void
{
volatile int *inflitems;
if (RDC_IS_DISKQ(grp))
else
}
void
{
rdc_k_info_t *p;
rdc_u_info_t *q;
int do_group;
disk_queue *dq;
if (!IS_ENABLED(urdc))
return;
(rdc_can_queue(krdc))) {
} else {
}
do_group = 1;
do_group = 0;
else if ((urdc->group_name[0] == 0) ||
do_group = 0;
if (do_group) {
q = &rdc_u_info[p->index];
if (!IS_ENABLED(q))
continue;
if ((rdc_get_vflags(q) & RDC_LOGGING) ||
(rdc_get_vflags(q) & RDC_SYNCING)) {
do_group = 0;
break;
}
}
}
do_group = 1;
if (do_group) {
#ifdef DEBUG
#endif
/* Set group logging at the same PIT under rdc_many_lock */
q = &rdc_u_info[p->index];
if (!IS_ENABLED(q))
continue;
"consistency group member following leader");
if (RDC_IS_DISKQ(p->group))
flag_op(q, RDC_QUEUING);
}
/*
* This can cause the async threads to fail,
* which in turn will call rdc_group_log()
* again. Release the lock and re-aquire.
*/
delay(2);
/*
* a little lazy, but neat. recall dump_alloc_bufs to
* ensure that the queue pointers & seq are reset properly
* after we have waited for inflight stuff
*/
delay(2);
/* fail or user request */
}
if (flag & RDC_ALLREMOTE) {
/* Tell other node to start logging */
}
p = p->group_next) {
(void) rdc_net_state(p->index,
}
}
}
q = &rdc_u_info[p->index];
if (!IS_ENABLED(q))
continue;
rdc_write_state(q);
}
} else {
/* No point in time is possible, just deal with single set */
} else {
"failed to read remote state");
== EAGAIN)
delay(2);
}
return;
}
}
return;
}
delay(2);
/*
* a little lazy, but neat. recall dump_alloc_bufs to
* ensure that the queue pointers & seq are reset
* properly after we have waited for inflight stuff
*/
delay(2);
if (flag & RDC_ALLREMOTE) {
/* Tell other node to start logging */
}
}
}
}
/*
* just in case any threads were in flight during log cleanup
*/
}
}
static int
{
return (RDC_EALREADY);
}
else
return (RDC_EADDTOIF);
}
return (RDC_ESYNCING);
}
return (0);
}
static int
{
int rc = 0;
int index;
if (index >= 0)
return (RDC_EALREADY);
}
/* A resume or enable failed */
return (RDC_EALREADY);
}
return (rc);
}
static int
{
int index;
int need_check = 0;
if (index >= 0)
return (RDC_EALREADY);
}
return (0);
}
/* A resume or enable failed */
return (RDC_EALREADY);
}
return (RDC_EALREADY);
}
(RDC_SYNCING | RDC_PRIMARY)) {
return (0);
}
need_check = 1;
}
if (need_check) {
return (0);
return (EIO);
}
}
return (0);
}
static int
{
int rc = 0;
int index;
if (index >= 0)
return (RDC_EALREADY);
}
/* A resume or enable failed */
return (RDC_EALREADY);
}
rc = RDC_EALREADY;
goto done;
}
*rvp = RDC_ACTIVE;
else
*rvp = RDC_INACTIVE;
done:
return (rc);
}
static int
{
int rc = -2;
int index;
if (index >= 0)
return (RDC_EALREADY);
}
/* A resume or enable failed */
return (RDC_EALREADY);
}
rc = RDC_EALREADY;
goto done;
}
(void) rdc_reset_bitmap(krdc);
/* Move to a new bitmap if necessary */
NSC_MAXPATH) != 0) {
} else {
/* simulate a succesful rdc_move_bitmap */
rc = 0;
}
}
NSC_MAXPATH) != 0) {
/* simulate a succesful rdc_move_bitmap */
rc = 0;
} else {
}
}
if (rc == -1) {
goto done;
}
/*
* At this point we fail any other type of reconfig
* if not in logging mode and we did not do a bitmap reconfig
*/
/* no other changes possible unless logging */
goto done;
}
rc = 0;
/* Change direct file if necessary */
NSC_MAXPATH)) {
goto notlogging;
}
if (urdc->direct_file[0]) {
else
}
}
/* Change group if necessary */
NSC_MAXPATH) != 0) {
char orig_group[NSC_MAXPATH];
goto notlogging;
if (rc == RDC_EQNOADD) {
goto done;
} else if (rc < 0) {
rc = RDC_EGROUP;
goto done;
}
if (rc >= 0) {
goto notlogging;
krdc->bitmap_ref =
(uchar_t *)kmem_zalloc(
} else {
if (krdc->bitmap_ref) {
}
}
}
} else {
goto notlogging;
rc = RDC_EGROUPMODE;
goto done;
}
}
goto notlogging;
/* switch to sync */
sizeof (urdc->disk_queue));
}
if (krdc->bitmap_ref) {
}
goto notlogging;
/* switch to async */
krdc->bitmap_ref =
(uchar_t *)kmem_zalloc(
}
}
}
/* Reverse concept of primary and secondary */
/*
* Disallow role reversal for advanced configurations
*/
return (RDC_EMASTER);
}
/* copy primary parts of urdc to rdc_set field by field */
/* Now overwrite urdc primary */
/* Now ovwewrite urdc secondary */
}
} else {
}
}
if (!krdc->bitmap_ref)
krdc->bitmap_ref =
KM_SLEEP);
"!rdc_reconfig: bitmap_ref alloc %"
NSC_SZFMT " failed",
return (-1);
}
}
/* Primary, so reverse sync needed */
/* Secondary, so forward sync needed */
}
/*
* rewrite bitmap header
*/
}
done:
return (rc);
/* no other changes possible unless logging */
return (RDC_ENOTLOGGING);
}
static int
{
int rc = 0;
int index;
int cleared_error = 0;
if (index >= 0)
return (RDC_EALREADY);
}
/* A resume or enable failed */
return (RDC_EALREADY);
}
rc = RDC_EALREADY;
goto done;
}
if (rdc_reset_bitmap(krdc) == 0)
}
/* Fix direct file if necessary */
else {
}
}
}
if (cleared_error) {
/* cleared an error so we should be in logging mode */
}
done:
return (rc);
}
static int
{
rdc_k_info_t *p;
rdc_u_info_t *q;
int rc = 0;
int index;
if (index >= 0)
return (RDC_EALREADY);
}
/* A resume or enable failed */
return (RDC_EALREADY);
}
rc = RDC_EALREADY;
goto done;
}
q = &rdc_u_info[p->index];
rdc_write_state(q);
}
}
q = &rdc_u_info[p->index];
rdc_write_state(q);
}
}
rc = RDC_EQNOQUEUE;
goto done;
}
/* queue will fail if this fails */
}
rc = RDC_EQNOQUEUE;
goto done;
}
/* queue will fail if this fails */
}
q = &rdc_u_info[p->index];
rdc_write_state(q);
}
}
else
/* Changed autosync, so update rest of the group */
q = &rdc_u_info[p->index];
rdc_write_state(q);
}
}
done:
return (rc);
}
/*
* Yet another standard thing that is not standard ...
*/
#ifndef offsetof
#endif
static int
{
int rc = 0;
int index;
char *ptr;
extern int rdc_status_copy32(const void *, void *, size_t, int);
if (index >= 0)
return (RDC_EALREADY);
}
/* A resume or enable failed */
return (RDC_EALREADY);
}
rc = RDC_EALREADY;
goto done;
}
/*
* sneak out qstate in urdc->flags
* this is harmless because it's value is not used
* in urdc->flags. the real qstate is kept in
* group->diskq->disk_hdr.h.state
*/
}
mode);
} else {
}
/* clear out qstate from flags */
if (rc)
done:
return (rc);
}
/*
* Overwrite the bitmap with one supplied by the
* user.
* Copy into all bitmaps that are tracking this volume.
*/
int
{
int rc;
int *indexvec;
int index;
int indexit;
int i;
int groupind;
/* Must be modulo FBA */
"boundary %llu", (unsigned long long)off);
return (EINVAL);
}
/* Must be modulo FBA */
"boundary %d", bmapsz);
return (EINVAL);
}
if (index >= 0) {
}
return (rc);
}
/*
* I now have this set, and I want to take the group
* lock on it, and all the group locks of all the
* sets on the many and multi-hop links.
* I have to take the many lock while traversing the
* I think I also need to set the busy count on this
* set, otherwise when I drop the conf_lock, what
* will stop some other process from coming in and
* issuing a disable?
*/
groupind = 0;
indexit = 0;
/*
* Take this initial sets group lock first.
*/
goto retrylock;
}
groupind++;
if (rc) {
goto done;
}
indexit++;
/*
* attempt to take the group lock,
* if we don't already have it.
*/
goto done;
}
for (i = 0; i < groupind; i++) {
/* already have the group lock */
break;
}
/*
* didn't find our lock in our collection,
* attempt to take group lock.
*/
if (i >= groupind) {
for (i = 0; i < groupind; i++) {
mutex_exit(grouplocks[i]);
}
goto retrylock;
}
groupind++;
}
if (rc == 0) {
indexit++;
} else {
goto done;
}
}
}
goto done;
}
/*
* This can't be in our group already.
*/
for (i = 0; i < groupind; i++) {
mutex_exit(grouplocks[i]);
}
goto retrylock;
}
groupind++;
if (rc == 0) {
indexit++;
} else {
goto done;
}
}
indexit);
done:
for (i = 0; i < groupind; i++) {
mutex_exit(grouplocks[i]);
}
return (rc);
}
static int
{
if (!IS_ENABLED(urdc)) {
return (EIO);
}
return (ENXIO);
}
return (ENOENT);
}
return (ENOSPC);
}
return (0);
}
/*
* Copy the user supplied bitmap to this set.
*/
static int
{
int rc;
int len;
int left;
int copied;
int index;
rc = 0;
copied = 0;
while (left > 0) {
if (left > RDC_MAXDATA) {
len = RDC_MAXDATA;
} else {
}
goto out;
}
if (op == RDC_BITMAPSET) {
len);
} else {
}
/*
* Maybe this should be just done once outside of
* the the loop? (Less work, but leaves a window
* where the bits_set doesn't match the bitmap).
*/
if (krdc->bitmap_write > 0) {
fba)) {
"!installbitmap: "
"write_bitmap_fba failed "
"on fba number %" NSC_SZFMT
" set %s:%s", fba,
goto out;
}
}
}
}
}
out:
return (rc);
}
/*
* _rdc_config
*/
int
{
int rc = 0;
struct knetconfig *knconf;
struct rdc_config *uap;
int cmd;
return (EFAULT);
}
#ifdef DEBUG
#endif
return (EFAULT);
}
#ifdef DEBUG
#endif
return (EFAULT);
}
} else {
}
#ifdef DEBUG
#endif
return (EFAULT);
}
#ifndef _SunOS_5_6
if ((mode & DATAMODEL_LP64) == 0) {
} else {
#endif
#ifndef _SunOS_5_6
}
#endif
if (rc) {
#ifdef DEBUG
#endif
goto out;
}
if (rc) {
#ifdef DEBUG
#endif
goto out;
}
} /* !NULL netconfig */
/* copy relevant parts of rdc_config to uap field by field */
/*
* Initialise the threadset if it has not already been done.
*
* This has to be done now, not in rdcattach(), because
* rdcattach() can be called before nskernd is running (eg.
* boot -r) in which case the nst_init() would fail and hence
* the attach would fail.
*
* Threadset creation is locked by the rdc_conf_lock,
* destruction is inherently single threaded as it is done in
* _rdc_unload() which must be the last thing performed by
* rdcdetach().
*/
if (_rdc_ioset == NULL) {
rc = rdc_thread_configure();
}
rc = RDC_ENOTHREADS;
goto outuap;
}
}
case RDC_CMD_ENABLE:
break;
case RDC_CMD_DISABLE:
break;
case RDC_CMD_COPY:
break;
case RDC_CMD_LOG:
break;
case RDC_CMD_RECONFIG:
break;
case RDC_CMD_RESUME:
break;
case RDC_CMD_SUSPEND:
break;
case RDC_CMD_TUNABLE:
break;
case RDC_CMD_WAIT:
break;
case RDC_CMD_HEALTH:
break;
case RDC_CMD_STATUS:
break;
case RDC_CMD_RESET:
break;
case RDC_CMD_ADDQ:
break;
case RDC_CMD_REMQ:
break;
/* FALLTHRU */
case RDC_CMD_KILLQ:
break;
case RDC_CMD_INITQ:
break;
default:
break;
}
/*
* Tune the threadset size after a successful rdc_set addition
* or removal.
*/
}
out:
if (pf)
if (p)
kmem_free(p, KNC_STRSIZE);
if (knconf)
return (rc);
}
/*
* krdc->group->lock held on entry to halt_sync()
*/
static void
{
/*
* If a sync is in progress, halt it
*/
break;
}
}
}
/*
* return size in blocks
*/
mirror_getsize(int index)
{
if (rc == 0)
}
/*
* Create a new dataset for this transfer, and add it to the list
* of datasets via the net_dataset pointer in the krdc.
*/
rdc_net_add_set(int index)
{
if (index >= rdc_max_sets) {
return (NULL);
}
return (NULL);
}
RDC_DSMEMUSE(sizeof (*dset));
if (!IS_ENABLED(urdc)) {
/* raced with a disable command */
RDC_DSMEMUSE(-sizeof (*dset));
return (NULL);
}
/*
* Shared the id generator, (and the locks).
*/
if (++rdc_net_hnd_id == 0)
rdc_net_hnd_id = 1;
#ifdef DEBUG
"rdc_net_add_set duplicate id %p:%d %p:%d",
}
}
}
#endif
return (dset);
}
/*
* fetch the previously added dataset.
*/
{
if (index >= rdc_max_sets) {
return (NULL);
}
if (dset) {
}
return (dset);
}
/*
* Decrement the inuse counter. Data may be freed.
*/
void
{
if (index >= rdc_max_sets) {
return;
}
}
}
/*
* Mark that we are finished with this set. Decrement inuse
* counter, mark as needing deletion, and
* remove from linked list.
*/
void
{
if (index >= rdc_max_sets) {
return;
}
}
}
/*
* free all the memory associated with this set, and remove from
* list.
* Enters and exits with dc_sleep lock held.
*/
void
{
#ifdef DEBUG
int found = 0;
#endif
#ifdef DEBUG
found = 1;
#endif
break;
}
}
#ifdef DEBUG
if (found == 0) {
"dataset 0x%p in krdc list", (void *)dset);
}
#endif
/*
* unlinked from list. Free all the data
*/
/*
* free my core.
*/
RDC_DSMEMUSE(-sizeof (*dset));
}
/*
* Free all the dataitems and the data it points to.
*/
static void
{
while (ditem) {
RDC_DSMEMUSE(-sizeof (*ditem));
}
}
/*
* allocate and initialize a rdc_aio_t
*/
{
rdc_aio_t *p;
if (p == NULL) {
#ifdef DEBUG
#endif
return (NULL);
} else {
p->next = n; /* overload */
p->handle = h;
p->qpos = -1;
p->iostatus = s; /* overload */
/* set up seq later, in case thr create fails */
}
return (p);
}
/*
* rdc_aio_buf_get
* get an aio_buf
*/
{
aio_buf_t *p;
if (index >= rdc_max_sets) {
return (NULL);
}
mutex_enter(&h->aio_lock);
p = h->rdc_anon;
p = p->next;
mutex_exit(&h->aio_lock);
return (p);
}
/*
* rdc_aio_buf_del
* delete a aio_buf
*/
void
{
mutex_enter(&h->aio_lock);
p = NULL;
p = *pp;
break;
}
}
if (p) {
kmem_free(p, sizeof (*p));
}
mutex_exit(&h->aio_lock);
}
/*
* rdc_aio_buf_add
* Add a aio_buf.
*/
{
aio_buf_t *p;
p = kmem_zalloc(sizeof (*p), KM_NOSLEEP);
if (p == NULL) {
return (NULL);
}
mutex_enter(&h->aio_lock);
h->rdc_anon = p;
mutex_exit(&h->aio_lock);
return (p);
}
/*
* kmemalloc a new group structure and setup the common
* fields.
*/
static rdc_group_t *
{
/*
* add default number of threads to the flusher thread set, plus
* one extra thread for the disk queue flusher
*/
return (group);
}
void
{
/* try to remove flusher threads that this group added to _rdc_flset */
}