clnt_vc.c revision 7c478bd95313f5f23a4c958a745db2134aa03244
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
/*
* Portions of this source code were derived from Berkeley
* 4.3 BSD under license from the Regents of the University of
* California.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
*
* Implements a connectionful client side RPC.
*
* Connectionful RPC supports 'batched calls'.
* A sequence of calls may be batched-up in a send buffer. The rpc call
* return immediately to the client even though the call was not necessarily
* sent. The batching occurs if the results' xdr routine is NULL (0) AND
* the rpc timeout value is zero (see clnt.h, rpc).
*
* Clients should NOT casually batch calls that in fact return results; that
* is the server side should be aware that a call is batched and not produce
* any return message. Batched calls that produce many result messages can
* deadlock (netlock) the client and the server....
*/
#include "mt.h"
#include "rpc_mt.h"
#include <assert.h>
#include <errno.h>
#include <sys/byteorder.h>
#include <syslog.h>
#include <stdlib.h>
#include <unistd.h>
#define MCALL_MSG_SIZE 24
#define SECS_TO_MS 1000
#ifndef MIN
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#endif
extern int __rpc_timeval_to_msec();
extern bool_t xdr_opaque_auth();
extern bool_t __rpc_gss_wrap();
extern bool_t __rpc_gss_unwrap();
static struct clnt_ops *clnt_vc_ops();
#ifdef __STDC__
#else
static int read_vc();
static int write_vc();
#endif
static int t_rcvall();
static bool_t time_not_ok();
static bool_t set_up_connection();
struct ct_data;
/*
* Lock table handle used by various MT sync. routines
*/
static const char clnt_vc_errstr[] = "%s : %s";
static const char clnt_vc_str[] = "clnt_vc_create";
static const char clnt_read_vc_str[] = "read_vc";
static const char __no_mem_str[] = "out of memory";
static const char no_fcntl_getfl_str[] = "could not get status flags and modes";
static const char no_nonblock_str[] = "could not set transport blocking mode";
/*
* Private data structure
*/
struct ct_data {
int ct_fd; /* connection's fd */
int ct_tsdu; /* size of tsdu */
int ct_wait; /* wait interval in milliseconds */
/* NON STANDARD INFO - 00-08-31 */
char *ct_buffer; /* Pointer to the buffer. */
char *ct_bufferWritePtr; /* Ptr to the first free byte. */
char *ct_bufferReadPtr; /* Ptr to the first byte of data. */
};
struct nb_reg_node {
struct nb_reg_node *next;
};
/* Define some macros to manage the linked list. */
#define LIST_ISEMPTY(l) (l == (struct nb_reg_node *)&l)
#define LIST_CLR(l) (l = (struct nb_reg_node *)&l)
#define LIST_FOR_EACH(l, node) \
/* Default size of the IO buffer used in non blocking mode */
unsigned int nbytes);
int P_mode);
/*
* Change the mode of the underlying fd.
*/
static bool_t
{
int flag;
/*
* If the underlying fd is already in the required mode,
* avoid the syscall.
*/
return (TRUE);
return (FALSE);
}
return (FALSE);
}
return (TRUE);
}
/*
* Create a client handle for a connection.
* Default options are set, which the user can change using clnt_control()'s.
* must pick send and receive buffer sizes, 0 => use the default.
* NB: fd is copied into a private area.
* NB: The rpch->cl_auth is set null authentication. Caller may wish to
* set this something more useful.
*
* fd should be open and bound.
*/
CLIENT *
int fd; /* open file descriptor */
{
}
/*
* This has the same definition as clnt_vc_create(), except it
* takes an additional parameter - a pointer to a timeval structure.
*
* Not a public interface. This is for clnt_create_timed,
* clnt_create_vers_timed, clnt_tp_create_timed to pass down the timeout
* value to control a tcp connection attempt.
* (for bug 4049792: clnt_create_timed does not time out)
*
* If tp is NULL, use default timeout to set up the connection.
*/
CLIENT *
int fd; /* open file descriptor */
{
int flag;
goto err;
}
goto err;
}
ct->ct_bufferPendingSize = 0;
/* Check the current state of the fd. */
goto err;
}
goto err;
}
/*
* Set up other members of private data struct
*/
/*
* The actual value will be set by clnt_call or clnt_control
*/
/*
* By default, closeit is always FALSE. It is users responsibility
* to do a t_close on it, else the user may use clnt_control
*/
/*
* Initialize call message
*/
/*
* pre-serialize the static part of the call msg and stash it away
*/
goto err;
}
goto err;
}
/*
* Find the receive and the send size
*/
goto err;
}
/*
* Create a client handle which uses xdrrec for serialization
* and authnone for authentication.
*/
goto err;
}
return (cl);
err:
if (cl) {
if (ct) {
}
}
}
#define TCPOPT_BUFSIZE 128
/*
* Set tcp connection timeout value.
* Retun 0 for success, -1 for failure.
*/
static int
{
int *ip;
char buf[TCPOPT_BUFSIZE];
return (-1);
}
return (0);
}
/*
* Get current tcp connection timeout value.
* Retun 0 for success, -1 for failure.
*/
static int
_get_tcp_conntime(int fd)
{
char buf[TCPOPT_BUFSIZE];
*ip = 0;
return (-1);
}
return (retval);
}
static bool_t
int fd;
{
int state;
int nconnect;
int curr_time = 0;
if (state == -1) {
return (FALSE);
}
#ifdef DEBUG
#endif
switch (state) {
case T_IDLE:
return (FALSE);
}
/*
* Connect only if state is IDLE and svcaddr known
*/
/* LINTED pointer alignment */
return (FALSE);
}
/*
* Even NULL could have sufficed for rcvcall, because
* the address returned is same for all cases except
* for the gateway case, and hence required.
*/
/*
* If there is a timeout value specified, we will try to
* reset the tcp connection timeout. If the transport does
* not support the TCP_CONN_ABORT_THRESHOLD option or fails
* for other reason, default timeout will be used.
*/
int ms;
/* TCP_CONN_ABORT_THRESHOLD takes int value in millisecs */
#ifdef DEBUG
#endif
}
}
break;
}
break;
}
break;
}
break;
}
}
if (do_rcv_connect) {
do {
break;
}
}
/*
* Set the connection timeout back to its old value.
*/
if (curr_time) {
}
if (!connected) {
#ifdef DEBUG
#endif
return (FALSE);
}
/* Free old area if allocated */
/* So that address buf does not get freed */
break;
case T_DATAXFER:
case T_OUTCON:
/*
* svcaddr could also be NULL in cases where the
* client is already bound and connected.
*/
} else {
return (FALSE);
}
}
break;
default:
return (FALSE);
}
return (TRUE);
}
static enum clnt_stat
{
/* LINTED pointer alignment */
/* LINTED pointer alignment */
int refreshes = 2;
return (RPC_FAILED);
}
return (RPC_FAILED); /* XXX */
}
}
if (!ct->ct_waitset) {
/* If time is not within limits, we ignore it. */
} else {
}
/*
* Due to little endian byte order, it is necessary to convert to host
* format before decrementing xid.
*/
return (rpc_callerr.re_status);
}
} else {
/* LINTED pointer alignment */
IXDR_PUT_U_INT32(u, proc);
return (rpc_callerr.re_status);
}
}
}
if (! shipnow) {
return (RPC_SUCCESS);
}
/*
* Hack to provide rpc-based message passing
*/
}
/*
* Keep receiving until we get a valid transaction id
*/
/*CONSTANTCONDITION*/
while (TRUE) {
if (! xdrrec_skiprecord(xdrs)) {
return (rpc_callerr.re_status);
}
/* now decode and validate the response header */
continue;
return (rpc_callerr.re_status);
}
break;
}
/*
* process header
*/
else
}
results_ptr)) {
}
} /* end successful completion */
/*
* If unsuccesful AND error is an authentication error
* then refresh credentials and try again, else break
*/
/* maybe our credentials need to be refreshed ... */
goto call_again;
else
/*
* We are setting rpc_callerr here given that libnsl
* is not reentrant thereby reinitializing the TSD.
* If not set here then success could be returned even
* though refresh failed.
*/
} /* end of unsuccessful completion */
/* free verifier ... */
}
return (rpc_callerr.re_status);
}
static enum clnt_stat
{
/* LINTED pointer alignment */
/* LINTED pointer alignment */
return (RPC_FAILED);
}
/*
* Due to little endian byte order, it is necessary to convert to host
* format before decrementing xid.
*/
return (rpc_callerr.re_status);
}
} else {
/* LINTED pointer alignment */
IXDR_PUT_U_INT32(u, proc);
return (rpc_callerr.re_status);
}
}
/*
* Do not need to check errors, as the following code does
* not depend on the successful completion of the call.
* An error, if any occurs, is reported through
* rpc_callerr.re_status.
*/
return (rpc_callerr.re_status);
}
static void
{
*errp = rpc_callerr;
}
static bool_t
{
/* LINTED pointer alignment */
return (dummy);
}
static void
clnt_vc_abort(void)
{
trace1(TR_clnt_vc_abort, 0);
}
/*ARGSUSED*/
static bool_t
int request;
char *info;
{
/* LINTED pointer alignment */
return (RPC_FAILED);
}
switch (request) {
case CLSET_FD_CLOSE:
return (TRUE);
case CLSET_FD_NCLOSE:
return (TRUE);
case CLFLUSH:
int res;
*(int *)info == RPC_CL_DEFAULT_FLUSH)?
}
return (ret);
}
/* for other requests which use info */
return (FALSE);
}
switch (request) {
case CLSET_TIMEOUT:
/* LINTED pointer alignment */
return (FALSE);
}
/* LINTED pointer alignment */
break;
case CLGET_TIMEOUT:
/* LINTED pointer alignment */
/* LINTED pointer alignment */
break;
case CLGET_SERVER_ADDR: /* For compatibility only */
break;
case CLGET_FD:
/* LINTED pointer alignment */
break;
case CLGET_SVC_ADDR:
/* The caller should not free this memory area */
/* LINTED pointer alignment */
break;
case CLSET_SVC_ADDR: /* set to new address */
#ifdef undef
/*
* XXX: once the t_snddis(), followed by t_connect() starts to
* work, this ifdef should be removed. CLIENT handle reuse
* would then be possible for COTS as well.
*/
return (FALSE);
}
return (ret);
#else
return (FALSE);
#endif
case CLGET_XID:
/*
* use the knowledge that xid is the
* first element in the call structure
* This will get the xid of the PREVIOUS call
*/
/* LINTED pointer alignment */
break;
case CLSET_XID:
/* This will set the xid of the NEXT call */
/* LINTED pointer alignment */
/* increment by 1 as clnt_vc_call() decrements once */
break;
case CLGET_VERS:
/*
* This RELIES on the information that, in the call body,
* the version number field is the fifth field from the
* begining of the RPC header. MUST be changed if the
* call_struct is changed
*/
/* LINTED pointer alignment */
4 * BYTES_PER_XDR_UNIT));
break;
case CLSET_VERS:
/* LINTED pointer alignment */
/* LINTED pointer alignment */
break;
case CLGET_PROG:
/*
* This RELIES on the information that, in the call body,
* the program number field is the fourth field from the
* begining of the RPC header. MUST be changed if the
* call_struct is changed
*/
/* LINTED pointer alignment */
3 * BYTES_PER_XDR_UNIT));
break;
case CLSET_PROG:
/* LINTED pointer alignment */
/* LINTED pointer alignment */
break;
case CLSET_IO_MODE:
return (FALSE);
}
break;
case CLSET_FLUSH_MODE:
/* Set a specific FLUSH_MODE */
return (FALSE);
}
break;
case CLGET_FLUSH_MODE:
break;
case CLGET_IO_MODE:
break;
case CLGET_CURRENT_REC_SIZE:
/*
* Returns the current amount of memory allocated
* to pending requests
*/
break;
case CLSET_CONNMAXREC_SIZE:
/* Cannot resize the buffer if it is used. */
if (ct->ct_bufferPendingSize != 0) {
return (FALSE);
}
/*
* If the new size is equal to the current size,
* there is nothing to do.
*/
break;
}
break;
case CLGET_CONNMAXREC_SIZE:
/*
* Returns the size of buffer allocated
* to pending requests
*/
break;
default:
return (FALSE);
}
return (TRUE);
}
static void
{
/* LINTED pointer alignment */
}
if (ct->ct_closeit)
}
/*
* Interface between xdr serializer and vc connection.
* Behaves like the system calls, read & write, but keeps some error state
* around for the rpc level.
*/
static int
{
static pthread_key_t pfdp_key;
int npfd; /* total number of pfdp allocated */
struct timeval time_waited;
int poll_time;
int delta;
if (len == 0) {
return (0);
}
/*
* Allocate just one the first time. thr_get_storage() may
* return a larger buffer, left over from the last time we were
* here, but that's OK. realloc() will deal with it properly.
*/
npfd = 1;
rpc_callerr.re_terrno = 0;
return (-1);
}
/*
* N.B.: slot 0 in the pollfd array is reserved for the file
* descriptor we're really interested in (as opposed to the
* callback descriptors).
*/
return (-1);
}
for (;;) {
extern void (*_svc_getreqset_proc)();
extern pollfd_t *svc_pollfd;
extern int svc_max_pollfd;
int fds;
/* VARIABLES PROTECTED BY svc_fd_lock: svc_pollfd */
if (_svc_getreqset_proc) {
/* reallocate pfdp to svc_max_pollfd +1 */
sizeof (struct pollfd) *
(svc_max_pollfd + 1));
rpc_callerr.re_terrno = 0;
return (-1);
}
}
if (npfd > 1)
} else {
}
case 0:
return (-1);
case -1:
continue;
else {
/*
* interrupted by another signal,
* update time_waited
*/
if (gettimeofday(&curtime,
"Unable to get time of day: %m");
errno = 0;
continue;
};
if (poll_time < 0) {
errno = 0;
return (-1);
} else {
errno = 0; /* reset it */
continue;
}
}
}
/* must be for server side of the house */
continue; /* do poll again */
}
/*
* Note: we're faking errno here because we
* previously would have expected select() to
* return -1 with errno EBADF. Poll(BA_OS)
* returns 0 and sets the POLLNVAL revents flag
* instead.
*/
return (-1);
}
return (-1);
}
break;
}
case 0:
/* premature eof */
rpc_callerr.re_terrno = 0;
break;
case -1:
rpc_callerr.re_errno = 0;
break;
}
return (len);
}
static int
void *ct_tmp;
int len;
{
int i, cnt;
int flag;
int maxsz;
/* Handle the non-blocking mode */
/*
* Test a special case here. If the length of the current
* write is greater than the transport data unit, and the
* mode is non blocking, we return RPC_CANTSEND.
* XXX this is not very clean.
*/
rpc_callerr.re_errno = 0;
return (-1);
}
if (len == -1) {
rpc_callerr.re_errno = 0;
} else if (len == -2) {
rpc_callerr.re_terrno = 0;
rpc_callerr.re_errno = 0;
}
return (len);
}
/*
* T_snd may return -1 for error on connection (connection
* handling error (no operation to do, just wait and call
* T_Flush()).
*/
rpc_callerr.re_errno = 0;
}
return (len);
}
/*
* This for those transports which have a max size for data.
*/
flag)) == -1) {
rpc_callerr.re_errno = 0;
return (-1);
}
}
return (len);
}
/*
* Receive the required bytes of data, even if it is fragmented.
*/
static int
int fd;
char *buf;
int len;
{
int moreflag;
int final = 0;
int res;
do {
moreflag = 0;
if (res == -1) {
case T_DISCONNECT:
return (-1);
case T_ORDREL:
/* Received orderly release indication */
/* Send orderly release indicator */
return (-1);
default:
return (-1);
}
} else if (res == 0) {
return (0);
}
return (final);
}
static struct clnt_ops *
clnt_vc_ops(void)
{
/* VARIABLES PROTECTED BY ops_lock: ops */
trace1(TR_clnt_vc_ops, 0);
}
return (&ops);
}
/*
* Make sure that the time is not garbage. -1 value is disallowed.
* Note this is different from time_not_ok in clnt_dg.c
*/
static bool_t
time_not_ok(t)
struct timeval *t;
{
trace1(TR_time_not_ok, 0);
}
/* Compute the # of bytes that remains until the end of the buffer */
static int
{
/* Buffer not allocated yet. */
char *buffer;
return (-1);
}
} else {
/*
* For an already allocated buffer, two mem copies
* might be needed, depending on the current
* writing position.
*/
/* Compute the length of the first copy. */
if (0 == nBytes) {
/* One memcopy needed. */
/*
* If the write pointer is at the end of the buffer,
* wrap it now.
*/
if (ct->ct_bufferWritePtr ==
}
} else {
/* Two memcopy needed. */
/*
* Copy the remaining data to the beginning of the
* buffer
*/
}
}
return (0);
}
static void
{
}
static void
{
if (ct->ct_bufferPendingSize == 0) {
/*
* If the buffer contains no data, we set the two pointers at
* the beginning of the buffer (to miminize buffer wraps).
*/
} else {
if (ct->ct_bufferReadPtr >
}
}
}
static int
{
int l;
if (ct->ct_bufferPendingSize == 0)
return (0);
l = REMAIN_BYTES(bufferReadPtr);
if (l < ct->ct_bufferPendingSize) {
/* Buffer in two fragments. */
return (2);
} else {
/* Buffer in one fragment. */
return (1);
}
}
static bool_t
{
switch (mode) {
case RPC_CL_BLOCKING_FLUSH:
/* flush as most as possible without blocking */
case RPC_CL_BESTEFFORT_FLUSH:
/* flush the buffer completely (possibly blocking) */
case RPC_CL_DEFAULT_FLUSH:
/* flush according to the currently defined policy */
return (TRUE);
default:
return (FALSE);
}
}
static bool_t
{
switch (ioMode) {
case RPC_CL_BLOCKING:
/*
* If a buffer was allocated for this
* connection, flush it now, and free it.
*/
}
}
break;
case RPC_CL_NONBLOCKING:
return (FALSE);
}
}
break;
default:
return (FALSE);
}
return (TRUE);
}
static int
{
int result;
if (ct->ct_bufferPendingSize == 0) {
return (0);
}
switch (flush_mode) {
case RPC_CL_BLOCKING_FLUSH:
return (-1);
}
while (ct->ct_bufferPendingSize > 0) {
if (REMAIN_BYTES(bufferReadPtr) <
} else {
ct->ct_bufferPendingSize, 0);
}
if (result < 0) {
return (-1);
}
}
break;
case RPC_CL_BESTEFFORT_FLUSH:
} else {
ct->ct_bufferPendingSize, 0);
}
if (result < 0) {
if (errno != EWOULDBLOCK) {
perror("flush");
return (-1);
}
return (0);
}
if (result > 0)
break;
}
return (0);
}
/*
* Non blocking send.
*/
static int
{
int result;
return (-1);
}
/*
* Check to see if the current message can be stored fully in the
* buffer. We have to check this now because it may be impossible
* to send any data, so the message must be stored in the buffer.
*/
/* Try to flush (to free some space). */
/* Can we store the message now ? */
return (-2);
}
/*
* If there is no data pending, we can simply try
* to send our data.
*/
if (ct->ct_bufferPendingSize == 0) {
if (result == -1) {
if (errno == EWOULDBLOCK) {
result = 0;
} else {
perror("send");
return (-1);
}
}
/*
* If we have not sent all data, we must store them
* in the buffer.
*/
return (-1);
}
}
} else {
/*
* Some data pending in the buffer. We try to send
* both buffer data and current message in one shot.
*/
if (result == -1) {
if (errno == EWOULDBLOCK) {
/* No bytes sent */
result = 0;
} else {
return (-1);
}
}
/*
* Add the bytes from the message
* that we have not sent.
*/
/* No bytes from the message sent */
return (-1);
}
} else {
/*
* Some bytes of the message are sent.
* Compute the length of the message that has
* been sent.
*/
/* So, empty the buffer. */
ct->ct_bufferPendingSize = 0;
/* And add the remaining part of the message. */
return (-1);
}
}
}
}
return (nBytes);
}
static void
{
struct nb_reg_node *node;
if (LIST_ISEMPTY(nb_first)) {
return;
}
}
}
static int
{
#define CHUNK_SIZE 16
struct nb_reg_node *n;
int i;
return (-1);
}
n = chk;
for (i = 0; i < CHUNK_SIZE-1; ++i) {
n[i].next = &(n[i+1]);
}
return (0);
}
static int
{
struct nb_reg_node *node;
return (-1);
}
if (!exit_handler_set) {
}
/* Get the first free node */
return (0);
}
static int
{
struct nb_reg_node *node;
/* Get the node to unregister. */
break;
}
}
return (0);
}