/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* kRPC Server for sndr
*/
#ifdef _SunOS_2_6
/*
* on 2.6 both dki_lock.h and rpc/types.h define bool_t so we
* anyway and make it look like we included it. Yuck.
*/
#define _RPC_TYPES_H
typedef int enum_t;
#else
#ifndef DS_DDICT
#endif
#endif /* _SunOS_2_6 */
#ifndef DS_DDICT
#endif
#include <sys/nsc_thread.h>
#ifdef DS_DDICT
#endif
#include "rdc_io.h"
#include "rdc_bitmap.h"
#include "rdcsrv.h"
static rdc_sleepq_t *rdc_newsleepq();
static void rdc_delsleepq(rdc_sleepq_t *);
rdc_net_dataset_t *, uint_t, int);
int, int);
char *, int, int);
/* direction for dsetcopy() */
static int rdc_rread_slow;
#ifdef DEBUG
static int rdc_netwrite6;
static int rdc_stall0;
static int rdc_sleepcnt;
int rdc_datasetcnt;
#endif
int
{
int ack = 0;
if (rdc_sync_event.daemon_waiting) {
/* signalled or timed out */
ack = 0;
} else {
if (rdc_sync_event.ack)
ack = 1;
else
ack = -1;
}
}
return (ack);
}
int
int *rvp)
{
int rc = 0;
master[0] = '\0';
*rvp = 0;
return (EFAULT);
if (rdc_sync_event.kernel_waiting &&
/* We haven't been away too long */
if (master[0])
else
rdc_sync_event.ack = 0;
}
} else {
mode);
mode);
}
return (rc);
}
static int
{
return (0);
if (!IS_ENABLED(urdc)) {
return (0);
}
if (option == CCIO_RSYNC) {
/* Reverse sync */
/*
* Reverse sync needed or in progress.
*/
return (-1);
}
} else {
/* Forward sync */
/*
* Reverse syncing is bad, as that means that data
* is already flowing to the target of the requested
* sync operation.
*/
return (-1);
}
/*
* Clear "reverse sync needed" on all 1-many volumes.
* The data on them will be updated from the primary of this
* requested sync operation, so the aborted reverse sync need
* not be completed.
*/
}
if (!IS_ENABLED(utmp))
continue;
}
}
}
}
return (0);
}
/*
* r_net_null
* Proc 0 Null action
*/
static void
{
}
/*
* r_net_read
*/
static void
{
int nocache;
int sv_len;
int vecsz = 0;
if (!st) {
return;
}
#ifdef DEBUG
"!r_net_read: EPROTO cd out or not enabled");
#endif
return;
}
/* setup rpc */
if (!IS_ENABLED(urdc)) {
st = 0;
return;
}
nocache);
if (st != 0) {
} else {
"has failed in cleanup");
}
}
}
return;
}
/* data rpc */
#ifdef DEBUG
}
#endif
if (dset) {
}
goto cleanup;
}
if (!IS_ENABLED(urdc)) {
goto cleanup;
}
/* find place in vector */
/*
* IF the data is in a single sb_vec entry
* THEN
* we can just point to that
* ELSE
* we have to alloc a local buffer,
* copy the data in and the point to
* the local buffer.
*/
/* fast */
} else {
/* slow */
rdc_rread_slow++; /* rough count */
if (!buffer) {
} else {
}
}
}
if (dset) {
if (!st ||
/*
* RPC reply failed, OR
* Last RPC for this IO operation, OR
* We are failing this IO operation.
*
* Do cleanup.
*/
} else {
}
}
if (buffer)
if (vector) {
}
}
/*
* r_net_read (v6)
*/
static void
{
int nocache;
int sv_len;
int vecsz = 0;
if (!st) {
return;
}
#ifdef DEBUG
#endif
return;
}
/* setup rpc */
if (!IS_ENABLED(urdc)) {
st = 0;
return;
}
nocache);
if (st != 0) {
} else {
"has failed in cleanup");
}
}
}
return;
}
/* data rpc */
#ifdef DEBUG
}
#endif
if (dset) {
}
goto cleanup;
}
if (!IS_ENABLED(urdc)) {
goto cleanup;
}
/* find place in vector */
/*
* IF the data is in a single sb_vec entry
* THEN
* we can just point to that
* ELSE
* we have to alloc a local buffer,
* copy the data in and the point to
* the local buffer.
*/
/* fast */
} else {
/* slow */
rdc_rread_slow++; /* rough count */
if (!buffer) {
} else {
}
}
}
if (dset) {
if (!st ||
/*
* RPC reply failed, OR
* Last RPC for this IO operation, OR
* We are failing this IO operation.
*
* Do cleanup.
*/
} else {
}
}
if (buffer)
if (vector) {
}
}
/*
* r_net_write (Version 5)
* 0 reply indicates error
* >0 reply indicates a net handle index
* <0 reply indicates errno
* ret net handle index
* ret2 general error
* ret3 multi-hop errors (never returned)
*/
static void
{
int nocache;
int ret = 0;
int ret2 = 0;
int st;
goto out;
}
if (!st) {
#ifdef DEBUG
#endif
goto out;
}
#ifdef DEBUG
#endif
goto out;
}
#ifdef DEBUG
#endif
goto out;
}
}
/* -1 index says allocate a buffer */
#ifdef DEBUG
"failed to add dataset");
#endif
goto out;
} else {
}
goto out;
}
RDC_DSMEMUSE(sizeof (rdc_net_dataitem_t));
/*
* If this is a single transfer, then we don't
* need to allocate any memory for the data,
* just point the ditem data pointer to the
* existing buffer.
*/
/*
* So we don't free it twice.
*/
} else {
/*
* Allocate the memory for the complete
* transfer.
*/
goto out;
}
/*
* Copy the data to the new buffer.
*/
/*
* free the old data buffer.
*/
}
} else {
#ifdef DEBUG
"!r_net_write5: net_get_set failed cd %d idx %d",
#endif
goto out;
}
/*
* We have to copy the data from the rpc buffer
* to the data in ditem.
*/
}
}
out:
if (!RDC_SUCCESS(ret2)) {
if (ret2 > 0)
} else
}
/*
* On Error we must cleanup.
* If we have a handle, free it.
* If we have a network handle, free it.
*/
#ifdef DEBUG
#endif
if (dset) {
}
} else {
if (dset) {
}
}
}
}
/*
* r_net_write (Version 6)
* index 0 = error, or net handle index.
* result = 0 , ok.
* result = 1, pending write.
* result < 0 error, and is the -errno.
* ret net handle index.
* ret2 general error.
*/
static void
{
int ret = 0;
int ret2 = 0;
int st;
int nocache;
goto out;
}
if (!st) {
#ifdef DEBUG
"!r_net_write6:SVC_GETARGS failed: st %d", st);
#endif
goto out;
}
#ifdef DEBUG
#endif
goto out;
}
#ifdef DEBUG
"!r_net_write6: cd logging or not enabled (%x)",
#endif
goto out;
}
#ifdef DEBUG
"!r_net_write6: No group structure for set %s:%s",
#endif
goto out;
}
#ifdef DEBUG
if (rdc_netwrite6) {
"!r_net_write6: idx %d seq %u current seq %u pos %llu "
"len %d sfba %llu nfba %d endoblk %d",
}
#endif
}
/* -1 index says allocate a net dataset */
#ifdef DEBUG
"!r_net_write6: failed to add dataset");
#endif
goto out;
} else {
}
goto out;
}
RDC_DSMEMUSE(sizeof (rdc_net_dataitem_t));
/*
* If this is a single transfer, then we don't
* need to allocate any memory for the data,
* just point the ditem data pointer to the
* existing buffer.
*/
/*
* So we don't free it twice.
*/
} else {
/*
* Allocate the memory for the complete
* transfer.
*/
goto out;
}
/*
* Copy the data to the new buffer.
*/
/*
* free the old data buffer.
*/
}
} else {
#ifdef DEBUG
"!r_net_write6: net_get_set failed cd %d idx %d "
"packet sequence %u expected seq %u",
#endif
goto out;
}
/*
* We have to copy the data from the rpc buffer
* to the data in ditem.
*/
}
#ifdef DEBUG
rdc_stallzero(2);
}
#endif
/*
* magic marker, start of sequence.
*/
/*
* see if some threads are stuck.
*/
}
}
/*
* see if we are allowed through here to
* do the write, or if we have to q the
* request and send back a pending reply.
*/
int maxseq;
/*
* Check that we have room.
*/
/*
* skip magic values.
*/
}
#ifdef DEBUG
"size %d exceeded seqack %u "
"this seq %u maxseq %u seq %u",
#endif
int, maxseq);
if (!(rdc_get_vflags(urdc) &
RDC_VOL_FAILED)) {
}
goto out;
}
sq = rdc_newsleepq();
goto out;
}
io_kstats));
}
/*
* pending state.
*/
(char *)&netret);
}
return;
}
}
#ifdef DEBUG
if (!RDC_SUCCESS(ret2)) {
ret2);
}
#endif
}
}
}
out:
if (!RDC_SUCCESS(ret2)) {
} else {
}
}
}
/*
* On Error we must cleanup.
* If we have a handle, free it.
* If we have a network handle, free it.
* If we hold the main nsc buffer, free it.
*/
#ifdef DEBUG
#endif
if (dset) {
}
} else {
if (dset) {
}
}
}
}
/*
* r_net_ping4
*
* received on the primary.
*/
static void
{
int e, ret = 0;
if (e) {
/* update specified interface */
RDC_MAXADDR) == 0) &&
RDC_MAXADDR) == 0)) {
/* Update the rpc protocol version to use */
break;
}
}
} else {
#ifdef DEBUG
#endif
}
}
/*
* r_net_ping7
*
* received on the primary.
*/
static void
{
int e, ret = 0;
unsigned short *sp;
if (e) {
/* update specified interface */
/* Update the rpc protocol version to use */
break;
}
}
} else {
#ifdef DEBUG
#endif
}
}
/*
* r_net_bmap (v5)
* WARNING acts as both client and server
*/
static void
{
struct bmap b;
if (e == TRUE) {
}
}
}
/*
* r_net_bmap (v6)
* WARNING acts as both client and server
*/
static void
{
struct bmap6 b;
if (e == TRUE) {
ret = RDC_SEND_BITMAP(&b);
}
}
/*
* If the bitmap send has succeeded, clear it.
*/
if (ret == 0) {
#ifdef DEBUG
#endif
}
}
/*
* r_net_bdata
*/
static void
{
/*
* We have to convert it to the internal form here,
* net_data6, when we know that we will have to convert
* it back to the v5 variant for transmission.
*/
goto out;
if (e == TRUE) {
}
}
out:
}
/*
* r_net_bdata v6
*/
static void
{
/*
* just allocate the bigger block, regardless of < V7
* bd.size will dictate how much we lor into our bitmap
* the other option would be write r_net_bdata7 that is identical
* to this function, but a V7 alloc.
*/
goto out;
if (e == TRUE) {
}
/*
* Write the merged bitmap.
*/
#ifdef DEBUG
#endif
}
out:
}
/*
* r_net_getsize (v5)
*/
static void
{
if (e) {
RDC_DISABLEPEND) == 0))
}
}
/*
* r_net_getsize (v6)
*/
static void
{
int e, index;
/*
* small change in semantics here, as we can't return
* -1 over the wire anymore.
*/
ret = 0;
if (e) {
RDC_DISABLEPEND) == 0))
}
}
/*
* r_net_state4
*/
static void
{
int options;
int log = 0;
int done = 0;
int slave = 0;
int rev_sync = 0;
if (e) {
#ifdef DEBUG
"!r_net_state: no index or disable pending");
#endif
return;
}
if (!IS_ENABLED(urdc)) {
index = -1;
#ifdef DEBUG
#endif
return;
}
index = -1;
return;
}
#ifdef DEBUG
"!r_net_state: no valid krdc %p\n", (void*)krdc);
#endif
index = -1;
return;
}
index = -1;
#ifdef DEBUG
#endif
return;
}
else
if (options & CCIO_SLAVE) {
/*
* mark that the bitmap needs clearing.
*/
/* Starting forward sync */
if (urdc->volume_size == 0)
if (urdc->volume_size == 0) {
index = -1;
goto out;
}
if (rdc_resume_bitmap(krdc) < 0) {
index = -1;
goto out;
}
}
index = -1;
goto out;
}
slave = 1;
} else if (options & CCIO_RSYNC) {
/*
* mark that the bitmap needs clearing.
*/
/* Starting reverse sync */
RDC_VOL_FAILED | RDC_BMP_FAILED)) {
index = -1;
goto out;
}
index = -1;
goto out;
}
rev_sync = 1;
/* Sync completed OK */
}
} else if (options & CCIO_ENABLELOG) {
/* Sync aborted or logging started */
}
log = 1;
}
out:
if (slave) {
} else {
index = -1;
}
} else if (rev_sync) {
/* Check to see if volume is mounted */
} else {
index = -1;
}
} else if (done) {
/*
* special case...
* if this set is in a group, then sndrsyncd will
* make sure that all sets in the group are REP
* before updating the config to "update", telling
* sndrsyncd that it is ok to take anther snapshot
* on a following sync. The important part about
* the whole thing is that syncd needs kernel stats.
* however, this thread must set the set busy to
* avoid disables. since this is the only
* sync_event_notify() that will cause a status
* call back into the kernel, and we will not be
* accessing the group structure, we have to wakeup now
*/
(void) _rdc_sync_event_notify(RDC_SYNC_DONE,
}
}
if (!done) {
}
if (log) {
"Sync aborted or logging started");
}
}
/*
* r_net_state
*/
static void
{
int options;
int log = 0;
int done = 0;
int slave = 0;
int rev_sync = 0;
unsigned short *sp;
if (e) {
#ifdef DEBUG
"!r_net_state: no index or disable pending");
#endif
return;
}
if (!IS_ENABLED(urdc)) {
index = -1;
#ifdef DEBUG
#endif
return;
}
index = -1;
return;
}
#ifdef DEBUG
"!r_net_state: no valid krdc %p\n", (void*)krdc);
#endif
index = -1;
return;
}
index = -1;
#ifdef DEBUG
#endif
return;
}
else
if (options & CCIO_SLAVE) {
/*
* mark that the bitmap needs clearing.
*/
/* Starting forward sync */
if (urdc->volume_size == 0)
if (urdc->volume_size == 0) {
index = -1;
goto out;
}
if (rdc_resume_bitmap(krdc) < 0) {
index = -1;
goto out;
}
}
index = -1;
goto out;
}
slave = 1;
} else if (options & CCIO_RSYNC) {
/*
* mark that the bitmap needs clearing.
*/
/* Starting reverse sync */
RDC_VOL_FAILED | RDC_BMP_FAILED)) {
index = -1;
goto out;
}
index = -1;
goto out;
}
rev_sync = 1;
/* Sync completed OK */
}
} else if (options & CCIO_ENABLELOG) {
/* Sync aborted or logging started */
}
log = 1;
}
out:
if (slave) {
} else {
index = -1;
}
} else if (rev_sync) {
/* Check to see if volume is mounted */
} else {
index = -1;
}
} else if (done) {
/*
* special case...
* if this set is in a group, then sndrsyncd will
* make sure that all sets in the group are REP
* before updating the config to "update", telling
* sndrsyncd that it is ok to take anther snapshot
* on a following sync. The important part about
* the whole thing is that syncd needs kernel stats.
* however, this thread must set the set busy to
* avoid disables. since this is the only
* sync_event_notify() that will cause a status
* call back into the kernel, and we will not be
* accessing the group structure, we have to wakeup now
*/
(void) _rdc_sync_event_notify(RDC_SYNC_DONE,
}
}
if (!done) {
}
if (log) {
"Sync aborted or logging started");
}
}
/*
* r_net_getstate4
* Return our state to client
*/
static void
{
if (e) {
if (index >= 0) {
ret = 0;
ret |= 4;
ret |= 2;
ret |= 1;
}
}
}
/*
* r_net_getstate7
* Return our state to client
*/
static void
{
unsigned short *sp;
if (e) {
/*
* strncpy(rdc_set.primary.file, state.pfile, RDC_MAXNAMLEN);
*/
if (index >= 0) {
ret = 0;
ret |= 4;
ret |= 2;
ret |= 1;
}
}
}
/*
*/
static int
{
int sv_len;
int len;
int n;
#ifdef DEBUG
"!rdc: dsetcopy: parameters failed bdata %p, dset %p "
#endif
return (FALSE);
}
if (fba_len > MAX_RDC_FBAS ||
#ifdef DEBUG
"!rdc: dsetcopy: params failed fba_len %" NSC_SZFMT
#endif
return (FALSE);
}
if (!len) {
#ifdef DEBUG
#endif
return (FALSE);
}
#ifdef DEBUG
#endif
}
/* should never happen */
#ifdef DEBUG
#endif
return (FALSE); /* Don't overrun handle */
}
/* find starting position in vector */
/*
* Copy data
*/
while (len) {
if (!sv_addr) /* end of vec - how did this happen? */
break;
else
sv_len -= n;
len -= n;
sv_addr += n;
data += n;
if (sv_len <= 0) {
/* goto next vector */
vec++;
}
}
return (TRUE);
}
/*
* rdc_start_server
* Starts the kRPC server for rdc. Uses tli file descriptor passed down
* from user level rdc server.
*
* Returns: 0 or errno (NOT unistat!).
*/
int
{
int ret;
cred = ddi_get_cred();
return (EPERM);
#ifdef DEBUG
(void *) fp);
#endif
return (EBADF);
}
return (ret);
}
/*
* Allocate a new sleepq element.
*/
static rdc_sleepq_t *
{
#ifdef DEBUG
rdc_sleepcnt++;
#endif
return (sq);
}
/*
*/
static void
{
if (dset) {
}
}
#ifdef DEBUG
rdc_sleepcnt--;
#endif
}
/*
* skip down the sleep q and insert the sleep request
* in ascending order. Return 0 on success, 1 on failure.
*/
static int
{
} else {
return (1);
}
} else {
"Attempt to add duplicate "
return (1);
}
break;
}
}
}
}
return (0);
}
/*
* run down the sleep q and discard all the sleepq elements.
*/
void
{
while (sq) {
}
}
}
/*
* split any write requests down to maxfba sized chunks.
*/
/*ARGSUSED*/
static int
{
int len;
int ret;
int reserved;
int rtype;
int eintr_count;
unsigned char *daddr;
int kstat_len;
ret = 0;
reserved = 0;
eintr_count = 0;
do {
++eintr_count;
delay(2);
}
if (ret != 0) {
#ifdef DEBUG
"failed %d", ret);
#endif
goto out;
}
reserved = 1;
/*
* Perhaps we should cache mfba.
*/
if (ret != 0) {
#ifdef DEBUG
ret);
#endif
goto out;
}
/* should never happen */
/*
* also need to trim down the vector
* sizes.
*/
}
while (len > 0) {
if (handle) {
(void) nsc_free_buf(handle);
}
if (ret != 0) {
#ifdef DEBUG
"nsc_alloc (d1) buf failed %d at "
#endif
goto out;
}
if (ret != 0) {
#ifdef DEBUG
#endif
goto out;
}
}
out:
if (!RDC_SUCCESS(ret)) {
RDC_PRIMARY));
"svc write failed");
}
} else {
/* success */
#ifdef DEBUG
if (rdc_netwrite6) {
/*
* This string is used in the ZatoIchi MASNDR
* tests, if you change this, update the test.
*/
"sequence %u", seq);
}
#endif
}
}
if (handle)
(void) nsc_free_buf(handle);
if (reserved)
return (ret);
}
static int
{
int rsync;
int ret;
int multiret;
rsync = -1;
ret = 0;
/* Handle multihop I/O even on error */
/*
* Find a target primary that is enabled,
* taking account of the fact that this
* could be a multihop secondary
* connected to a 1-to-many primary.
*/
goto multi_done;
}
do {
/* CSTYLED */
&& IS_ENABLED(utmp))
break;
!IS_ENABLED(utmp)) {
goto multi_done;
}
if (!rsync) {
/* normal case - local io first */
0);
}
if (!RDC_SUCCESS(multiret)) {
#ifdef DEBUG
"rdc_multi_write failed "
"status %d ret %d",
#endif
if (!(rdc_get_vflags(utmp) &
RDC_VOL_FAILED)) {
if (rdc_get_vflags(utmp) &
RDC_PRIMARY) {
} else {
}
}
}
}
if (rsync != 0) {
/*
* Either:
* reverse sync in progress and so we
* need to do the local io after the
* (multihop) secondary io.
* Or:
* no multihop and this is the only io
* required.
*/
}
return (ret);
}
/*
* set the pos and len values in the piggyback reply.
*/
static void
{
int pc;
}
/*
* Enters with group->ra_queue.net_qlock held.
* Tries to construct the return status data for
* all the pending requests in the sleepq that it can
* satisfy.
*/
static void
{
int pendcnt;
int ret;
int pendsz;
pendcnt = 0;
/*
* now look at the Q of pending tasks, attempt
* to write any that have been waiting for
* me to complete my write, and piggyback
* their results in my reply, by setiing pendcnt
* to the number of extra requests sucessfully
* processed.
*/
}
#ifdef DEBUG
#endif
/*
* as we failed to get the pointer, there
* is no point expecting the cleanup
* code in rdc_delsleepq() to get it
* either.
*/
goto cleansq;
}
if (RDC_SUCCESS(ret)) {
} else {
"asynchronous task failed, with "
"sequence number %u for SNDR set %s:%s",
}
}
if (pendcnt) {
int vecsz;
#ifdef DEBUG
if (rdc_netwrite6) {
}
#endif
}
}
/*
* Take the dset and allocate and fill in the vector.
*/
static nsc_vec_t *
{
int i;
return (NULL);
}
}
/*
* Null terminate.
*/
/*
* Check the list and count matches.
*/
return (vecret);
}
/*
* Split the local read into maxfba sized chunks.
* Returns 0 on an error, or a valid idx on success.
*/
static int
{
int idx;
int rtype;
int ret;
int reserved;
int eintr_count;
idx = 0; /* error status */
reserved = 0;
ret = 0;
mfba = 0;
eintr_count = 0;
do {
++eintr_count;
delay(2);
}
if (ret != 0) {
#ifdef DEBUG
ret);
#endif
goto out;
}
reserved = 1;
/*
* create a dataset that we can hang all the buffers from.
*/
#ifdef DEBUG
#endif
goto out;
}
if (ret != 0) {
#ifdef DEBUG
#endif
goto out;
}
while (fbaleft > 0) {
if (handle) {
(void) nsc_free_buf(handle);
}
if (ret != 0) {
#ifdef DEBUG
#endif
goto out;
}
goto out;
}
RDC_DSMEMUSE(sizeof (rdc_net_dataitem_t));
/*
* construct a vector list
*/
if (ret != 0) {
goto out;
}
/*
* place on linked list.
*/
} else {
}
/*
* now its linked, clear this so its not freed twice.
*/
}
/*
* all the reads have worked, store the results.
*/
out:
if (handle)
(void) nsc_free_buf(handle);
if (reserved)
if (dset)
if (ditem) {
RDC_DSMEMUSE(-sizeof (*ditem));
}
return (idx);
}
/*
* perform both a local read, and if multihop, a remote read.
* return 0 on success, or errno on failure.
*/
static int
{
int ret;
/*
* read it.
*/
}
}
if (ret != 0) {
#ifdef DEBUG
#endif
"comby read failed");
}
goto out;
}
IS_ENABLED(utmp) &&
/*
* Set NSC_MIXED so
* that the cache will throw away this
* buffer when we free it since we have
* combined data from multiple sources
* into a single buffer.
* Currently we don't use the cache for
* data volumes, so comment this out.
* handle->sb_flag |= NSC_MIXED;
*/
if (ret != 0) {
#ifdef DEBUG
#endif
goto out;
}
}
}
out:
return (ret);
}
/*
* remove and free all the collected dsets for this set.
*/
void
{
/*
* for the dset to be in use, the
* service routine r_net_write6() must
* be active with it. It will free
* it eventually.
*/
delay(5);
goto tloop;
}
/*
* free it.
*/
}
}
#ifdef DEBUG
void
{
static int init = 0;
if (init == 0) {
init = 1;
}
mutex_enter(&mu);
switch (flag) {
case 0:
rdc_stall0 = 0;
break;
case 1:
rdc_stall0 = 1;
break;
case 2:
while (rdc_stall0 == 1)
break;
default:
break;
}
mutex_exit(&mu);
}
#endif
/*
* RDC protocol version 5
*/
{
/* PROC Idempotent */
{ r_net_null, FALSE },
{ rdcsrv_noproc, FALSE },
{ r_net_getsize, FALSE },
{ rdcsrv_noproc, FALSE },
{ r_net_write5, TRUE },
{ r_net_read, FALSE },
{ rdcsrv_noproc, FALSE },
{ r_net_state4, FALSE },
{ r_net_ping4, FALSE },
{ r_net_bmap, FALSE },
{ r_net_bdata, FALSE },
{ rdcsrv_noproc, FALSE },
{ r_net_getstate4, FALSE }
};
/*
* RDC protocol version 6
*/
{
/* PROC Idempotent */
{ r_net_null, FALSE },
{ rdcsrv_noproc, FALSE },
{ r_net_getsize6, FALSE },
{ rdcsrv_noproc, FALSE },
{ r_net_write6, TRUE },
{ r_net_read6, FALSE },
{ rdcsrv_noproc, FALSE },
{ r_net_state4, FALSE },
{ r_net_ping4, FALSE },
{ r_net_bmap6, FALSE },
{ r_net_bdata6, FALSE },
{ rdcsrv_noproc, FALSE },
{ r_net_getstate4, FALSE }
};
/*
* RDC protocol version 7
*/
{
/* PROC Idempotent */
{ r_net_null, FALSE },
{ rdcsrv_noproc, FALSE },
{ r_net_getsize6, FALSE },
{ rdcsrv_noproc, FALSE },
{ r_net_write6, TRUE },
{ r_net_read6, FALSE },
{ rdcsrv_noproc, FALSE },
{ r_net_state, FALSE },
{ r_net_ping7, FALSE },
{ r_net_bmap6, FALSE },
{ r_net_bdata6, FALSE },
{ rdcsrv_noproc, FALSE },
{ r_net_getstate7, FALSE }
};
};
{
/*
* If we're decoding and the caller has already allocated a buffer,
* throw away maxlen, since it doesn't apply to the caller's
* buffer. xdr_bytes will return an error if the buffer isn't big
* enough.
*/
return (FALSE);
} else {
return (FALSE);
}
}