/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Network data replicator Client side */
#include <sys/byteorder.h>
#ifdef _SunOS_2_6
/*
* on 2.6 both dki_lock.h and rpc/types.h define bool_t so we
* anyway and make it look like we included it. Yuck.
*/
#define _RPC_TYPES_H
typedef int enum_t;
#else
#ifndef DS_DDICT
#endif
#endif /* _SunOS_2_6 */
#ifndef DS_DDICT
#endif
#include <sys/nsc_thread.h>
#ifdef DS_DDICT
#endif
#include "rdc_io.h"
#include "rdc_clnt.h"
#include "rdc_bitmap.h"
#include "rdc_diskq.h"
#ifdef DEBUG
int noflush = 0;
#endif
static void _rdc_remote_flush(rdc_aio_t *);
void rdc_flush_memq(int index);
void rdc_flush_diskq(int index);
int rdc_drain_net_queue(int index);
void rdc_flusher_thread(int index);
int rdc_writer(int index);
static int rdc_clnt_toomany;
#ifdef DEBUG
static int rdc_ooreply;
#endif
static enum clnt_stat
{
return (stat);
}
int
{
struct timeval t;
*sizeptr = 0;
if (krdc->remote_index < 0)
return (EINVAL);
t.tv_sec = rdc_rpc_tmout;
t.tv_usec = 0;
#ifdef DEBUG
"!rdc_net_getsize: null intf for index %d", index);
#endif
if (err == 0)
} else {
xdr_u_longlong_t, (char *)sizeptr, &t);
}
return (err);
}
int
{
struct timeval t;
int err;
struct set_state s;
unsigned short *sp;
t.tv_sec = rdc_rpc_tmout;
t.tv_usec = 0;
(char *)&remote_index, &t);
} else {
(char *)&remote_index, &t);
}
if (err)
return (-1);
else
return (remote_index);
}
/*
* rdc_net_getbmap
* gets the bitmaps from remote side and or's them with remote bitmap
*/
int
{
struct timeval t;
int err;
struct bmap b;
if (krdc->remote_index < 0)
return (EINVAL);
t.tv_sec = rdc_rpc_tmout;
t.tv_usec = 0;
#ifdef DEBUG
"!rdc_net_getbmap: null intf for index %d", index);
#endif
(char *)&err, &t);
} else {
(char *)&err, &t);
}
return (err);
}
int sndr_proto = 0;
/*
* return state corresponding to rdc_host
*/
int
int *mirror_down, int network)
{
int err;
struct timeval t;
int state;
struct set_state s;
#ifdef sparc
#endif
unsigned short *sp;
char *setp = (char *)&s;
network) /* fail fast */
return (-1);
s.flag = 0;
t.tv_sec = rdc_rpc_tmout;
t.tv_usec = 0;
if (sndr_proto)
else
RDC_VERS_MIN)) {
/* set_state struct changed with v7 of protocol */
#ifdef sparc
s4.netaddrlen);
#else
/* x64 can not use protocols < 7 */
return (-1);
#endif
}
goto again;
}
#ifdef DEBUG
#endif
if (err) {
return (-1);
}
if (state == -1)
return (-1);
if (serial_mode)
if (use_mirror)
if (mirror_down)
return (0);
}
{ (int)RDC_OK, xdr_readok },
};
/*
* Reply from remote read (client side)
*/
static bool_t
{
}
static int
{
int ret = 0;
switch (status) {
case RDCERR_NOENT:
break;
case RDCERR_NOMEM:
break;
default:
break;
}
}
return (ret);
}
int
{
struct timeval t;
int rpc_flag;
int err;
int ret;
int transflag;
return (EINVAL);
#ifdef DEBUG
#endif
return (EINVAL);
}
int reserved = 0;
if (RDC_SUCCESS(ret)) {
reserved = 1;
}
if (RDC_SUCCESS(ret)) {
fba_len);
if (RDC_SUCCESS(ret)) {
(void) nsc_free_buf(remote_h);
return (0);
}
}
if (remote_h)
(void) nsc_free_buf(remote_h);
if (reserved)
}
t.tv_sec = rdc_rpc_tmout;
t.tv_usec = 0;
else
rpc_flag = 0;
#ifdef DEBUG
"!rdc_net_read: null intf for index %d", local_index);
#endif
/*
* switch on proto version.
*/
/* send setup rpc */
(char *)&ret, &t);
} else {
/* send setup rpc */
(char *)&ret, &t);
}
if (err) {
#ifdef DEBUG
#endif
else
goto remote_rerror;
}
if (ret == 0) { /* No valid index from r_net_read */
#ifdef DEBUG
"!rdc_net_read: no valid index from r_net_read");
#endif
return (ENOBUFS);
}
/* move onto to data xfer rpcs */
} else {
}
/* find starting position in handle */
while (len) {
} else {
}
}
if (len == 0) {
/* last data xfer rpc - tell server to cleanup */
}
}
/* error */
#ifdef DEBUG
#endif
return (ENOMEM);
}
/* get data from remote end */
#ifdef DEBUG
"!rdc_net_read: null intf for index %d",
#endif
}
/*CONSTCOND*/
xdr_rdresult, (char *)&rr, &t);
} else {
xdr_rdresult, (char *)&rr, &t);
}
}
if (err) {
#ifdef DEBUG
#endif
} else {
}
goto remote_rerror;
}
if (!ret)
goto remote_rerror;
}
/* copy into handle */
/* update counters */
} else {
}
}
if (sv_len == 0) {
/* goto next vector */
vec++;
}
}
return (0);
}
/*
* rdc_net_write
* Main remote write client side
* Handles protocol selection as well as requests for remote allocation
* and data transfer
* Does local IO for FCAL
* caller must clear bitmap on success
*/
int
{
struct timeval t;
int sv_len;
int err;
int ret;
int transflag;
int translen;
int transendoblk;
char *transptr;
int vflags;
return (EINVAL);
/* if not a diskq buffer */
#ifdef DEBUG
#endif
return (EINVAL);
}
t.tv_sec = rdc_rpc_tmout;
t.tv_usec = 0;
/* FCAL IO */
int reserved = 0;
if (RDC_SUCCESS(ret)) {
reserved = 1;
}
if (RDC_SUCCESS(ret)) {
fba_len);
if (RDC_SUCCESS(ret))
if (RDC_SUCCESS(ret)) {
(void) nsc_free_buf(remote_h);
return (0);
}
}
if (remote_h)
(void) nsc_free_buf(remote_h);
if (reserved)
}
/*
* At this point we must decide which protocol we are using and
* do the right thing
*/
if (netres) {
} else {
}
else
transflag = 0;
#ifdef DEBUG
#endif
/*
* find starting position in vector
*/
else
} else {
}
transendoblk = 0;
while (fba_len) {
if (!transptr) {
#ifdef DEBUG
"!rdc_net_write: walked off end of handle!");
#endif
goto remote_error;
}
} else {
}
}
if (fba_len == 0) {
/* last data xfer - tell server to commit */
transendoblk = 1;
}
#ifdef DEBUG
"!rdc_net_write: null intf for index %d",
#endif
}
ret = 0;
(char *)&ret, &t);
if (ret >= 0) {
} else {
}
} else {
(char *)&dlist6, xdr_netwriteres,
(char *)netresptr, &t);
}
}
if (err) {
#ifdef DEBUG
"!rdc_net_write(5): cd %d err %d ret %d",
#endif
goto remote_error;
}
/* Error from r_net_write5 */
#ifdef DEBUG
"!rdc_net_write: r_net_write(5) "
"returned: %d",
#endif
sizeof (net_pendvec_t));
goto remote_error;
#ifdef DEBUG
"!rdc_net_write: no valid index from "
"r_net_write(5)");
#endif
sizeof (net_pendvec_t));
goto remote_error;
}
} else {
}
/* update counters */
}
if (sv_len <= 0) {
/* goto next vector */
vec++;
}
}
/*
* this can't happen.....
*/
sizeof (net_pendvec_t));
return (0);
}
void
{
vecp++;
}
}
/*
* rdc_dump_alloc_bufs_cd
* Dump allocated buffers (rdc_net_hnd's) for the specified cd.
* this could be the flusher failing, if so, don't do the delay forever
* Returns: 0 (success), EAGAIN (caller needs to try again).
*/
int
{
net_queue *q;
/* cannot do anything! */
#ifdef DEBUG
index);
#endif
return (0);
}
} else {
}
/*
* Now dump the async queue anonymous buffers
* if we are a diskq, the we are using the diskq mutex.
* However, we are flushing from diskq to memory queue
* so we now need to grab the memory lock also
*/
mutex_enter(&q->net_qlock);
if (q->qfill_sleeping == RDC_QFILL_AWAKE) {
#ifdef DEBUG_DISKQ
"!dumpalloccd sending diskq->memq flush to sleep");
#endif
q->qfflags |= RDC_QFILLSLEEP;
mutex_exit(&q->net_qlock);
delay(5);
mutex_enter(&q->net_qlock);
}
}
}
} else {
}
}
}
q->nitems--;
}
}
}
}
mutex_exit(&q->net_qlock);
}
return (0);
}
/*
* rdc_dump_alloc_bufs
* We have an error on the link
* Try to dump all of the allocated bufs so we can cleanly recover
* and not hang
*/
void
{
int repeat;
int index;
do {
repeat = 0;
repeat = 1;
delay(2);
}
}
} while (repeat);
}
}
/*
* returns 1 if the the throttle should throttle, 0 if not.
*/
int
{
/* ---T----H----N--- */
mutex_enter(QLOCK(q));
mutex_exit(QLOCK(q));
return (0);
}
mutex_exit(QLOCK(q));
return (1);
}
void
{
/*
* Throttle entries on queue
*/
/* Need to take the 1-many case into account, checking all sets */
/* ADD HANDY HUERISTIC HERE TO SLOW DOWN IO */
/*
* this may be the last set standing in a one to many setup.
* we may also be stuck in unintercept, after marking
* the volume as not enabled, but have not removed it
* from the many list resulting in an endless loop if
* we just continue here. Lets jump over this stuff
* and check to see if we are the only dude here.
*/
if (!IS_ENABLED(urdc))
goto thischeck;
goto thischeck;
delay(2);
q->throttle_delay++;
}
}
/* do a much more aggressive delay, get disk flush going */
while ((!IS_QSTATE(q, RDC_QNOBLOCK)) &&
(_rdc_diskq_isfull(q, len)) &&
if (print_msg) {
" disk queue %s full",
&urdc->disk_queue[0]);
print_msg = 0;
}
goto thischeck;
delay(10);
q->throttle_delay += 10;
"%s full & not flushing. giving up",
&urdc->disk_queue[0]);
" logging mode",
RDC_DOLOG | RDC_NOFAIL);
mutex_enter(QLOCK(q));
cv_broadcast(&q->qfullcv);
mutex_exit(QLOCK(q));
}
}
if ((IS_QSTATE(q, RDC_QNOBLOCK)) &&
_rdc_diskq_isfull(q, len) &&
if (print_msg) {
&urdc->disk_queue[0]);
print_msg = 0;
}
RDC_DOLOG | RDC_NOFAIL);
mutex_enter(QLOCK(q));
cv_broadcast(&q->qfullcv);
mutex_exit(QLOCK(q));
}
}
break;
}
}
static int rdc_joins = 0;
int
{
int rc;
if (rdc_coalesce == 0)
return (0); /* don't even try */
return (0); /* existing queue is empty */
}
MAX_RDC_FBAS) {
return (0); /* I/O to big */
}
&h);
if (!RDC_SUCCESS(rc)) {
if (h != NULL)
(void) nsc_free_buf(h);
return (0); /* couldn't do coalesce */
}
if (!RDC_SUCCESS(rc)) {
(void) nsc_free_buf(h);
return (0); /* couldn't do coalesce */
}
if (!RDC_SUCCESS(rc)) {
(void) nsc_free_buf(h);
return (0); /* couldn't do coalesce */
}
bitmask = 0;
/*
* bump the ref count back up
*/
return (1); /* new I/O succeeds last I/O queued */
}
return (0);
}
int
{
net_queue *q;
mutex_enter(&q->net_qlock);
rdc_joins++;
goto out;
}
/* adding to empty q */
#ifdef DEBUG
"rdc enqueue: q %p, qhead 0, q blocks %" NSC_SZFMT
", nitems %" NSC_SZFMT,
}
#endif
} else {
/* discontiguous, add aio to q tail */
}
q->nitems++;
}
out:
#ifdef DEBUG
/* sum the q and check for sanity */
{
rdc_aio_t *a;
nitems++;
}
}
}
#endif
mutex_exit(&q->net_qlock);
if (q->nitems > q->nitems_hwm) {
q->nitems_hwm = q->nitems;
}
if (q->blocks > q->blocks_hwm) {
q->blocks_hwm = q->blocks;
}
return (0);
}
int
nsc_buf_t *h)
{
int rc;
if (!aio) {
return (ENOMEM);
}
return (rc);
}
return (-1); /* keep lint quiet */
}
/*
* Async Network RDC flusher
*/
/*
* don't allow any new writer threads to start if a member of the set
* is disable pending
*/
int
{
int rc = 0;
do {
rc = 1;
break;
}
return (rc);
}
/*
* rdc_writer -- spawn new writer if not running already
* called after enqueing the dirty blocks
*/
int
{
nsthread_t *t;
int tries;
if (RDC_IS_DISKQ(group))
else
#ifdef DEBUG
if (noflush) {
return (0);
}
#endif
return (0);
}
/*
* We also need to check if we are starting a new
* sequence, and if so don't create a new thread,
* as we must ensure that the start of new sequence
* requests arrives first to re-init the server.
*/
return (0);
}
/*
* For version 6,
* see if we can fit in another thread.
*/
group->rdc_thrnum++;
} else {
}
/*
* If we got here, we know that we have not exceeded the allowed
* number of async threads for our group. If we run out of threads
* in _rdc_flset, we add a new thread to the set.
*/
tries = 0;
do {
/* first try to grab a thread from the free list */
break;
}
/* that failed; add a thread to the set and try again */
break;
}
if (tries) {
}
if (t) {
return (1);
}
group->rdc_thrnum--;
group->rdc_writer = 0;
/*
* Race with remove_from_group while write thread was
* failing to be created.
*/
#ifdef DEBUG
#endif
return (-1);
}
return (-1);
}
/*
* Either we need to flush the
* kmem (net_queue) queue or the disk (disk_queue)
* determine which, and do it.
*/
void
{
return;
return;
} else { /* uh-oh, big time */
}
}
void
{
net_queue *q;
int dowork;
#ifdef DEBUG
#endif
return;
}
#ifdef DEBUG
#endif
goto thread_death;
}
#ifdef DEBUG_DISABLE
/*
* Need to continue as we may be trying to flush IO
* while trying to disable or suspend
*/
}
#endif
dowork = 1;
/* CONSTCOND */
while (dowork) {
break;
#ifdef DEBUG
#endif
break;
}
mutex_enter(&q->net_qlock);
#ifdef DEBUG
if (q->nitems != 0 ||
q->blocks != 0 ||
q->net_qtail != 0) {
"rdc_flush_memq(1): q %p, q blocks %"
", qhead %p qtail %p",
}
#endif
mutex_exit(&q->net_qlock);
break;
}
/* aio remove from q */
q->nitems--;
/*
* in flight numbers.
*/
q->inflitems++;
#ifdef DEBUG
if (q->nitems != 0 ||
q->blocks != 0 ||
q->net_qtail != 0) {
"rdc_flush_memq(2): q %p, q blocks %"
", qhead %p qtail %p",
(void *) q->net_qhead,
(void *) q->net_qtail);
}
}
#ifndef NSC_MULTI_TERABYTE
if (q->blocks < 0) {
"rdc_flush_memq(3): q %p, q blocks %" NSC_SZFMT
", nitems %d, qhead %p, qtail %p",
}
#else
/* blocks and nitems are unsigned for NSC_MULTI_TERABYTE */
#endif
#endif
mutex_exit(&q->net_qlock);
mutex_enter(&q->net_qlock);
q->inflitems--;
/*
* We are an old thread, and the
* queue sequence has been reset
* during the network write above.
* As such we mustn't pull another
* job from the queue until the
* first sequence message has been ack'ed.
* Just die instead. Unless this thread
* is the first sequence that has just
* been ack'ed
*/
dowork = 0;
}
}
mutex_exit(&q->net_qlock);
goto thread_death;
}
"memq flush aio status not RDC_IO_DONE");
}
break;
}
group->rdc_thrnum--;
group->rdc_writer = 0;
/*
* all threads must be dead.
*/
/*
* Group now empty, so destroy
* Race with remove_from_group while write thread was running
*/
#ifdef DEBUG
#endif
return;
}
}
/*
* rdc_flush_diskq
* disk queue flusher
*/
void
{
disk_queue *q;
int dowork;
int rc;
#ifdef DEBUG
#endif
return;
}
#ifdef DEBUG
#endif
return;
}
#ifdef DEBUG_DISABLE
/*
* Need to continue as we may be trying to flush IO
* while trying to disable or suspend
*/
}
#endif
#ifdef DEBUG
#endif
goto thread_death;
}
dowork = 1;
/* CONSTCOND */
while (dowork) {
break;
#ifdef DEBUG
#endif
break;
}
do {
rc = 0;
goto thread_death;
goto thread_death;
}
delay(40);
}
break;
}
mutex_enter(QLOCK(q));
q->inflitems++;
mutex_exit(QLOCK(q));
mutex_enter(QLOCK(q));
q->inflitems--;
/*
* We are an old thread, and the
* queue sequence has been reset
* during the network write above.
* As such we mustn't pull another
* job from the queue until the
* first sequence message has been ack'ed.
* Just die instead. Unless of course,
* this thread is the first sequence that
* has just been ack'ed.
*/
dowork = 0;
}
}
mutex_exit(QLOCK(q));
if (group) { /* seq gets bumped on dequeue */
mutex_enter(QLOCK(q));
rdc_dump_iohdrs(q);
SET_QNXTIO(q, QHEAD(q));
SET_QCOALBOUNDS(q, QHEAD(q));
mutex_exit(QLOCK(q));
}
break;
}
goto thread_death;
}
"diskq flush aio status not RDC_IO_DONE");
}
#ifdef DEBUG_DISABLE
"!rdc_flush_diskq: DISABLE PENDING after IO!");
}
#endif
break;
if (IS_QSTATE(q, RDC_QDISABLEPEND)) {
#ifdef DEBUG
#endif
break;
}
}
mutex_enter(QLOCK(q));
group->rdc_thrnum--;
group->rdc_writer = 0;
}
}
mutex_exit(QLOCK(q));
/*
* Group now empty, so destroy
* Race with remove_from_group while write thread was running
*/
#ifdef DEBUG
#endif
return;
}
mutex_exit(QLOCK(q));
}
/*
* _rdc_remote_flush
* Flush a single block ANON block
* this function will flush from either the disk queue
* or the memory queue. The appropriate locks must be
* taken out etc, etc ...
*/
static void
{
int reserved = 0;
int rc;
int vflags;
/* Where did we get this aio from anyway? */
if (RDC_IS_DISKQ(group)) {
} else {
}
/*
* quench transmission if we are too far ahead of the
* server Q, or it will overflow.
* Must fail all requests while asyncdis is set.
* It will be cleared when the last thread to be discarded
* sets the asyncstall counter to zero.
* Note the thread within rdc_net_write
* also bumps the asyncstall counter.
*/
goto failed;
}
/* don't go to sleep if we have gone logging! */
goto failed;
}
group->asyncstall++;
group->asyncstall--;
if (group->asyncstall == 0) {
}
goto failed;
}
/*
* See if we have gone into logging mode
* since sleeping.
*/
goto failed;
}
}
waitq = 0;
}
if (rc != 0) {
#ifdef DEBUG
#endif
goto failed;
}
reserved = 1;
/*
* Case where we are multihop and calling with no ANON bufs
* Need to do the read to fill the buf.
*/
if (!RDC_SUCCESS(rc)) {
#ifdef DEBUG
"!_rdc_remote_flush: alloc_buf, index %d, pos %"
#endif
goto failed;
}
}
if (group->asyncstall == 0) {
}
goto failed;
}
group->asyncstall++;
if (krdc->remote_index < 0) {
/*
* this should be ok, we are flushing, not rev syncing.
* remote_index could be -1 if we lost a race with
* resume and the flusher trys to flush an io from
* another set that has not resumed
*/
}
/*
* double check for logging, no check in net_write()
* skip the write if you can, otherwise, if logging
* avoid clearing the bit .. you don't know whose bit it may
* also be.
*/
group->asyncstall--;
goto failed;
}
group->asyncstall--;
if (group->asyncstall == 0) {
}
goto failed;
}
goto failed;
}
if (rc != 0) {
#ifdef DEBUG
"!_rdc_remote_flush: write, index %d, pos %" NSC_SZFMT
"rc %d seq %u group seq %u seqack %u qpos %" NSC_SZFMT,
#endif
"!Hard timeout detected (%d sec) "
"on SNDR set %s:%s",
}
goto failed;
} else {
}
if (RDC_IS_DISKQ(group)) {
/* free locally alloc'd handle */
}
(void) _rdc_rsrv_diskq(group);
}
} else {
}
0xffffffff, RDC_BIT_BUMP);
/* tell queue data has been flushed */
} else { /* throw away queue, logging */
rdc_dump_iohdrs(q);
SET_QNXTIO(q, QHEAD(q));
SET_QCOALBOUNDS(q, QHEAD(q));
}
}
}
/*
* Check to see if the reply has arrived out of
* order, if so don't update seqack.
*/
}
#ifdef DEBUG
else {
rdc_ooreply++;
}
#endif
if (group->asyncstall) {
}
}
/*
* see if we have any pending async requests we can mark
* as done.
*/
/*
* we must always still be in the same group.
*/
if (!(vflags &
0xffffffff, RDC_BIT_BUMP);
/* update queue info */
} else { /* we've gone logging */
rdc_dump_iohdrs(q);
SET_QNXTIO(q, QHEAD(q));
SET_QCOALBOUNDS(q, QHEAD(q));
}
}
}
/*
* see if we can re-start transmission
*/
}
#ifdef DEBUG
else {
rdc_ooreply++;
}
#endif
if (group->asyncstall) {
}
vecp++;
}
}
return;
/* perhaps we have a few threads stuck .. */
if (group->asyncstall) {
}
if (RDC_IS_DISKQ(group)) {
/* free locally alloc'd hanlde */
}
(void) _rdc_rsrv_diskq(group);
}
} else {
}
}
if (reserved) {
}
}
/* make sure that the bit is still set */
}
/*
* rdc_drain_disk_queue
* drain the async network queue for the whole group. Bail out if nothing
* happens in 20 sec
* returns -1 if it bails before the queues are drained.
*/
int
{
long blocks;
/* Sanity checking */
if (index > rdc_max_sets)
return (0);
/*
* If there is no group or diskq configured, we can leave now
*/
return (0);
/*
* No need to wait if EMPTY and threads are gone
*/
counter = 0;
/*
* Capture counters to determine if progress is being made
*/
/*
* Wait
*/
/*
* Has the group or disk queue gone away while delayed?
*/
return (0);
/*
* Are we still seeing progress?
*/
/*
* No progress seen, increment retry counter
*/
if (counter++ > NUM_RETRIES) {
return (-1);
}
} else {
/*
* Reset counter, as we've made progress
*/
counter = 0;
}
}
return (0);
}
/*
* decide what needs to be drained, disk or core
* and drain it
*/
int
{
if (!group)
return (0);
if (RDC_IS_DISKQ(group))
return (rdc_drain_disk_queue(index));
if (RDC_IS_MEMQ(group))
return (rdc_drain_net_queue(index));
/* oops.. */
#ifdef DEBUG
"attempting drain of unknown Q type");
#endif
return (0);
}
/*
* rdc_drain_net_queue
* drain the async network queue for the whole group. Bail out if nothing
* happens in 20 sec
* returns -1 if it bails before the queues are drained.
*/
int
{
volatile net_queue *q;
/* Sanity checking */
if (index > rdc_max_sets)
return (0);
return (0);
/* LINTED */
return (0);
/* CONSTCOND */
while (1) {
break;
}
(--bail <= 0)) {
break;
}
}
if (bail <= 0)
return (-1);
return (0);
}
/*
* rdc_dump_queue
* We want to release all the blocks currently on the network flushing queue
* We already have them logged in the bitmap.
*/
void
{
net_queue *q;
/*
* gotta have both locks here for diskq
*/
if (RDC_IS_DISKQ(group)) {
mutex_enter(&q->net_qlock);
if (q->qfill_sleeping == RDC_QFILL_AWAKE) {
#ifdef DEBUG_DISKQ
"!dumpq sending diskq->memq flusher to sleep");
#endif
q->qfflags |= RDC_QFILLSLEEP;
mutex_exit(&q->net_qlock);
delay(5);
mutex_enter(&q->net_qlock);
}
}
if (RDC_IS_DISKQ(group)) {
(void) _rdc_rsrv_diskq(group);
} else {
}
/* if the q is on disk, dump the q->iohdr chain */
if (RDC_IS_DISKQ(group)) {
/* back up the nxtio pointer */
}
while (q->net_qhead) {
if (RDC_IS_DISKQ(group)) {
}
} else {
}
}
}
}
q->blocks = 0;
q->nitems = 0;
/*
* See if we have stalled threads.
*/
done:
if (group->asyncstall) {
}
if (RDC_IS_DISKQ(group)) {
mutex_exit(&q->net_qlock);
}
}
/*
* rdc_clnt_get
* Get a CLIENT handle and cache it
*/
static int
{
int retries;
int ret;
int num_clnts = 0;
if (rch) {
*rch = 0;
}
if (clp) {
*clp = 0;
}
cred = ddi_get_cred();
ch = rdc_chtable;
plistp = &rdc_chtable;
/* find the right ch_list chain */
/* found the correct chain to walk */
break;
}
}
/* walk the ch_list and try and find a free client */
/* suitable handle to reuse */
break;
}
}
}
/* alloc a temporary handle and return */
if (ret != 0) {
"!rdc_call: tli_kcreate failed %d", ret);
return (ret);
}
*rch = 0;
return (ret);
}
/* reuse a cached handle */
ch->ch_timesused++;
if (ret != 0) {
return (ret);
}
NULL);
return (0);
} else {
/*
* Consecutive calls to CLNT_CALL() on the same client handle
* get the same transaction ID. We want a new xid per call,
* so we first reinitialise the handle.
*/
return (0);
}
}
/* create new handle and cache it */
if (ch) {
if (ch->ch_protofmly)
}
if (ret != 0) {
if (ch)
return (ret);
}
if (ch)
return (ret);
}
long rdc_clnt_count = 0;
/*
* rdc_clnt_call
* Arguments:
* rdc_srv_t *svp - rdc servinfo
* rpcproc_t proc; - rpcid
* rpcvers_t vers; - protocol version
* xdrproc_t xargs;- xdr function
* caddr_t argsp;- args to xdr function
* xdrproc_t xres;- xdr function
* caddr_t resp;- args to xdr function
* struct timeval timeout;
* Performs RPC client call using specific protocol and version
*/
int
{
int err;
int tries = 0;
return (err);
do {
switch (err) {
case RPC_SUCCESS: /* bail now */
goto done;
case RPC_INTR: /* No recovery from this */
goto done;
case RPC_PROGVERSMISMATCH:
goto done;
case RPC_TLIERROR:
/* fall thru */
case RPC_XPRTFAILED:
/* Delay here to err on side of caution */
/* fall thru */
case RPC_VERSMISMATCH:
default:
if (IS_UNRECOVERABLE_RPC(err)) {
goto done;
}
tries++;
/*
* The call is in progress (over COTS)
* Try the CLNT_CALL again, but don't
* print a noisy error message
*/
if (err == RPC_INPROGRESS)
break;
}
done:
return (err);
}
/*
* Call an rpc from the client side, not caring which protocol is used.
*/
int
{
int rc;
} else {
vers = RDC_VERS_MAX;
}
do {
if (rc == RPC_PROGVERSMISMATCH) {
/*
* Downgrade and try again.
*/
vers--;
}
}
return (rc);
}
/*
* Call an rpc from the client side, starting with protocol specified
*/
int
{
int rc;
do {
if (rc == RPC_PROGVERSMISMATCH) {
/*
* Downgrade and try again.
*/
vers--;
}
}
return (rc);
}
/*
* rdc_clnt_free
* Free a client structure into the cache, or if this was a temporary
* handle allocated above MAXCLIENTS, destroy it.
*/
static void
{
/* cached client, just clear inuse flag and return */
return;
}
/* temporary handle allocated above MAXCLIENTS, so destroy it */
}
}
/*
* _rdc_clnt_destroy
* Free a chain (ch_list or ch_next) of cached clients
*/
static int
{
int leak = 0;
if (!p)
return (0);
while (*p != NULL) {
ch = *p;
/*
* unlink from the chain
* - this leaks the client if it was inuse
*/
/* unused client - destroy it */
}
}
if (ch->ch_protofmly)
} else {
/* remember client leak */
leak++;
}
}
return (leak);
}
/*
* rdc_clnt_destroy
* Free client caching table on unconfigure
*/
void
rdc_clnt_destroy(void)
{
int leak = 0;
/* destroy each ch_list chain */
}
/* destroy the main ch_next chain */
if (leak) {
/* we are about to leak clients */
"!rdc_clnt_destroy: leaking %d inuse clients", leak);
}
}
#ifdef DEBUG
/*
* Function to send an asynchronous net_data6 request
* direct to a server to allow the generation of
* out of order requests for ZatoIchi tests.
*/
int
{
int index;
char *data;
int datasz;
char *datap;
int rc;
struct timeval t;
int i;
rc = 0;
*rvp = 0;
/*
* copyin the user's arguments.
*/
return (EFAULT);
}
/*
* search by the secondary host and file.
*/
if (!IS_CONFIGURED(krdc))
continue;
if (!IS_ENABLED(urdc))
continue;
continue;
continue;
MAX_RDC_HOST_SIZE) == 0) &&
NSC_MAXPATH) == 0)) {
break;
}
}
if (index >= rdc_max_sets) {
return (ENOENT);
}
"start %d length %d sub start %d sub length %d",
return (EIO);
}
}
/* LINTED */
}
/*
* Fill in the net databuffer prior to transmission.
*/
return (EIO);
} else {
}
} else {
}
t.tv_sec = rdc_rpc_tmout;
t.tv_usec = 0;
&t);
if (rc == 0) {
}
"pendcnt %d",
sizeof (net_pendvec_t));
}
sizeof (net_pendvec_t));
} else {
}
return (rc);
}
/*
* Function to send an net_read6 request
* direct to a server to allow the generation of
* read requests.
*/
int
{
int index;
int ret;
struct timeval t;
int err;
*rvp = 0;
return (EFAULT);
}
} else {
return (EFAULT);
}
}
switch (readgen.rpcversion) {
case 5:
case 6:
break;
default:
return (EINVAL);
}
if (index >= 0) {
}
return (ENODEV);
}
/*
* we should really call setbusy here.
*/
t.tv_sec = rdc_rpc_tmout;
t.tv_usec = 0;
goto out;
}
} else {
}
} else {
}
if (err == 0) {
ret = 0;
} else {
}
} else {
xdr_rdresult, (char *)&rr, &t);
} else {
xdr_rdresult, (char *)&rr, &t);
}
if (err == 0) {
goto out;
}
goto out;
}
ret = 0;
} else {
goto out;
}
}
out:
}
return (ret);
}
#endif