/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
*/
/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
/* All Rights Reserved */
#include "mt.h"
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <unistd.h>
#include <errno.h>
#include <stropts.h>
#include <xti.h>
#include <fcntl.h>
#include <signal.h>
#include <assert.h>
#include <syslog.h>
#include <limits.h>
#include <ucred.h>
#include "tx.h"
/*
* The following used to be in tiuser.h, but was causing too much namespace
* pollution.
*/
static struct _ti_user *find_tilink(int s);
static struct _ti_user *add_tilink(int s);
struct T_info_ack *tsap);
/*
* Checkfd - checks validity of file descriptor
*/
struct _ti_user *
{
if (fd < 0) {
return (NULL);
}
if (!force_sync) {
return (tiptr);
}
}
/*
* Not found or a forced sync is required.
*/
timodpushed = 0;
do {
/*
* not a stream or a TLI endpoint with no timod
* XXX Note: If it is a XTI call, we push "timod" and
* try to convert it into a transport endpoint later.
* We do not do it for TLI and "retain" the old buggy
* behavior because ypbind and a lot of other deamons seem
* to use a buggy logic test of the form
* "(t_getstate(0) != -1 || t_errno != TBADF)" to see if
* they we ever invoked with request on stdin and drop into
* untested code. This test is in code generated by rpcgen
* which is why it is replicated test in many daemons too.
* We will need to fix that test too with "IsaTLIendpoint"
* test if we ever fix this for TLI
*/
return (NULL);
}
if (retval == 0) {
/*
* "timod" not already on stream, then push it
*/
do {
/*
* Assumes (correctly) that I_PUSH is
* atomic w.r.t signals (EINTR error)
*/
if (retval < 0) {
return (NULL);
}
timodpushed = 1;
}
/*
* Try to (re)constitute the info at user level from state
* in the kernel. This could be information that lost due
* to an exec or being instantiated at a new descriptor due
* to , open(), dup2() etc.
*
* _t_create() requires that all signals be blocked.
* Note that sig_mutex_lock() only defers signals, it does not
* block them, so interruptible syscalls could still get EINTR.
*/
/*
* restore to stream before timod pushed. It may
* not have been a network transport stream.
*/
if (timodpushed)
return (NULL);
}
return (tiptr);
}
/*
* copy data to output buffer making sure the output buffer is 32 bit
* aligned, even though the input buffer may not be.
*/
int
int len,
int init_offset,
char *datap,
{
/*
* Aligned copy will overflow buffer
*/
return (-1);
}
return (0);
}
/*
* append data and control info in look buffer (list in the MT case)
*
* The only thing that can be in look buffer is a T_DISCON_IND,
* T_ORDREL_IND or a T_UDERROR_IND.
*
* It also enforces priority of T_DISCONDs over any T_ORDREL_IND
* already in the buffer. It assumes no T_ORDREL_IND is appended
* when there is already something on the looklist (error case) and
* that a T_ORDREL_IND if present will always be the first on the
* list.
*
* This also assumes ti_lock is held via sig_mutex_lock(),
* so signals are deferred here.
*/
int
int dsize,
int csize)
{
/* can't fit - return error */
return (-1); /* error */
}
/*
* Enforce priority of T_DISCON_IND over T_ORDREL_IND
* queued earlier.
* Note: Since there can be only at most one T_ORDREL_IND
* queued (more than one is error case), and we look for it
* on each append of T_DISCON_IND, it can only be at the
* head of the list if it is there.
*/
/* LINTED pointer cast */
/* appending discon ind */
/* LINTED pointer cast */
T_ORDREL_IND) { /* T_ORDREL_IND is on list */
/*
* Blow away T_ORDREL_IND
*/
}
}
}
if (tiptr->ti_lookcnt > 0) {
int listcount = 0;
/*
* Allocate and append a new lookbuf to the
* existing list. (Should only happen in MT case)
*/
listcount++;
}
/*
* signals are deferred, calls to malloc() are safe.
*/
NULL)
return (-1); /* error */
/*
* Allocate the buffers. The sizes derived from the
* sizes of other related buffers. See _t_alloc_bufs()
* for details.
*/
/* giving up - free other memory chunks */
return (-1); /* error */
}
if ((dsize > 0) &&
/* giving up - free other memory chunks */
return (-1); /* error */
}
}
if (dsize > 0)
tiptr->ti_lookcnt++;
return (0); /* ok return */
}
/*
* Is there something that needs attention?
* Assumes tiptr->ti_lock held and this threads signals blocked
* in MT case.
*/
int
{
return (-1);
}
return (-1);
}
return (0);
}
/*
* wait for T_OK_ACK
* assumes tiptr->ti_lock held in MT case
*/
int
{
int size;
int flags = 0;
/*
* of the message.
*/
return (-1);
/*
*/
if (didalloc)
else
return (-1);
}
/*
* Temporarily convert a non blocking endpoint to a
* blocking one and restore status later
*/
continue;
goto err_out;
}
/* did I get entire message */
if (retval > 0) {
goto err_out;
}
/*
* is ctl part large enough to determine type?
*/
goto err_out;
}
/* LINTED pointer cast */
case T_OK_ACK:
goto err_out;
}
if (didalloc)
else
if (didralloc)
else
return (0);
case T_ERROR_ACK:
goto err_out;
}
/*
* if error is out of state and there is something
* on read queue, then indicate to user that
* there is something that needs attention
*/
goto err_out;
}
if (retval > 0)
else
} else {
}
goto err_out;
default:
/* fallthru to err_out: */
}
if (didalloc)
else
if (didralloc)
else
return (-1);
}
/*
* timod ioctl
*/
int
{
int retval;
return (-1);
}
if (retval > 0) {
return (-1);
}
if (retlenp)
return (0);
}
/*
* alloc scratch buffers and look buffers
*/
/* ARGSUSED */
static int
{
if (size1 != 0) {
return (-1);
return (-1);
}
} else {
}
/* compensate for XTI level options */
else
/*
* We compute the largest buffer size needed for this provider by
* adding the components. [ An extra sizeof (t_scalar_t) is added to
* take care of rounding off for alignment) for each buffer ]
* The goal here is compute the size of largest possible buffer that
* might be needed to hold a TPI message for the transport provider
* on this endpoint.
* Note: T_ADDR_ACK contains potentially two address buffers.
*/
+ asize + (unsigned int)sizeof (t_scalar_t) +
/* first addr buffer plus alignment */
asize + (unsigned int)sizeof (t_scalar_t) +
/* second addr buffer plus ailignment */
osize + (unsigned int)sizeof (t_scalar_t);
/* option buffer plus alignment */
if (size1 != 0) {
}
return (-1);
}
if (size1 != 0) {
}
return (-1);
}
/*
* Note: The head of the lookbuffers list (and associated buffers)
* is allocated here on initialization.
* More allocated on demand.
*/
return (0);
}
/*
* set sizes of buffers
*/
static unsigned int
{
switch (infosize) {
case T_INFINITE /* -1 */:
if (option) {
if (optinfsize == 0) {
else
}
return ((unsigned int)optinfsize);
}
return (DEFSIZE);
case T_INVALID /* -2 */:
return (0);
default:
return ((unsigned int) infosize);
}
}
static void
{
/*
* Note: This routine is designed for a "reinitialization"
* Following fields are not modified here and preserved.
* - ti_fd field
* - ti_lock
* - ti_next
* - ti_prev
* The above fields have to be separately initialized if this
* is used for a fresh initialization.
*/
tiptr->ti_rcvsize = 0;
tiptr->ti_ctlsize = 0;
tiptr->ti_tsdusize = 0;
tiptr->ti_etsdusize = 0;
tiptr->ti_cdatasize = 0;
tiptr->ti_ddatasize = 0;
tiptr->ti_servtype = 0;
tiptr->ti_lookcnt = 0;
tiptr->ti_prov_flag = 0;
}
/*
* Link manipulation routines.
*
* NBUCKETS hash buckets are used to give fast
* access. The number is derived the file descriptor softlimit
* number (64).
*/
/*
* Allocates a new link and returns a pointer to it.
* Assumes that the caller is holding _ti_userlock via sig_mutex_lock(),
* so signals are deferred here.
*/
static struct _ti_user *
add_tilink(int s)
{
int x;
return (NULL);
x = s % NBUCKETS;
if (hash_bucket[x] != NULL) {
/*
* Walk along the bucket looking for
* duplicate entry or the end.
*/
/*
* This can happen when the user has close(2)'ed
* a descriptor and then been allocated it again
* via t_open().
*
* We will re-use the existing _ti_user struct
* in this case rather than using the one
* we allocated above. If there are buffers
* associated with the existing _ti_user
* struct, they may not be the correct size,
* so we can not use it. We free them
* here and re-allocate a new ones
* later on.
*/
return (curptr);
}
}
/*
* Allocate and link in a new one.
*/
return (NULL);
/*
* First initialize fields common with reinitialization and
* then other fields too
*/
} else {
/*
* First entry.
*/
return (NULL);
hash_bucket[x] = tiptr;
}
return (tiptr);
}
/*
* Find a link by descriptor
* Assumes that the caller is holding _ti_userlock.
*/
static struct _ti_user *
find_tilink(int s)
{
int x;
return (NULL);
x = s % NBUCKETS;
/*
* Walk along the bucket looking for the descriptor.
*/
return (curptr);
(void) _t_delete_tilink(s);
}
}
return (NULL);
}
/*
* Assumes that the caller is holding _ti_userlock.
* Also assumes that all signals are blocked.
*/
int
_t_delete_tilink(int s)
{
int x;
/*
* Find the link.
*/
if (s < 0)
return (-1);
x = s % NBUCKETS;
/*
* Walk along the bucket looking for
* the descriptor.
*/
if (prevptr)
else
hash_bucket[x] = nextptr;
if (nextptr)
/*
* free resource associated with the curptr
*/
return (0);
}
}
return (-1);
}
/*
* Allocate a TLI state structure and synch it with the kernel
* *tiptr is returned
* Assumes that the caller is holding the _ti_userlock and has blocked signals.
*
* This function may fail the first time it is called with given transport if it
* doesn't support T_CAPABILITY_REQ TPI message.
*/
struct _ti_user *
{
/*
* Aligned data buffer for ioctl.
*/
union {
} ioctl_data;
/* preferred location first local variable */
/* see note below */
/*
* Note: We use "ioctlbuf" allocated on stack above with
* room to grow since (struct ti_sync_ack) can grow in size
* on future kernels. (We do not use malloc'd "ti_ctlbuf" as that
* part of instance structure which may not exist yet)
* Its preferred declaration location is first local variable in this
* procedure as bugs causing overruns will be detectable on
* platforms where procedure calling conventions place return
* address on stack (such as x86) instead of causing silent
* memory corruption.
*/
int expected_acksize;
/*
* Use ioctl required for sync'ing state with kernel.
* We use two ioctls. TI_CAPABILITY is used to get TPI information and
* TI_SYNC is used to synchronise state with timod. Statically linked
* TLI applications will no longer work on older releases where there
* are no TI_SYNC and TI_CAPABILITY.
*/
/*
* Request info about transport.
* Assumes that TC1_INFO should always be implemented.
* For TI_CAPABILITY size argument to ioctl specifies maximum buffer
* size.
*/
expected_acksize = (int)sizeof (struct T_capability_ack);
if (rval < 0) {
/*
* TI_CAPABILITY may fail when transport provider doesn't
* support T_CAPABILITY_REQ message type. In this case file
* descriptor may be unusable (when transport provider sent
* M_ERROR in response to T_CAPABILITY_REQ). This should only
* happen once during system lifetime for given transport
* provider since timod will emulate TI_CAPABILITY after it
* detected the failure.
*/
if (t_capreq_failed != NULL)
*t_capreq_failed = 1;
return (NULL);
}
if (retlen != expected_acksize) {
return (NULL);
}
return (NULL);
}
return (NULL);
}
if (_T_IS_XTI(api_semantics)) {
/*
* XTI ONLY - TLI "struct t_info" does not
* have "flags"
*/
/*
* Some day there MAY be a NEW bit in T_info_ack
* PROVIDER_flag namespace exposed by TPI header
* role played by T_ORDRELDATA in info->flags namespace
* When that bit exists, we can add a test to see if
* it is set and set T_ORDRELDATA.
* Note: Currently only mOSI ("minimal OSI") provider
* is specified to use T_ORDRELDATA so probability of
* needing it is minimal.
*/
}
}
/*
* then create initialize data structure
* and allocate buffers
*/
return (NULL);
}
/*
* Allocate buffers for the new descriptor
*/
(void) _t_delete_tilink(fd);
return (NULL);
}
/* Fill instance structure */
ntiptr->ti_lookcnt = 0;
}
else
/*
* Restore state from kernel (caveat some heuristics)
*/
switch (tiap->CURRENT_state) {
case TS_UNBND:
break;
case TS_IDLE:
(void) _t_delete_tilink(fd);
return (NULL);
}
break;
case TS_WRES_CIND:
break;
case TS_WCON_CREQ:
break;
case TS_DATA_XFER:
(void) _t_delete_tilink(fd);
return (NULL);
}
break;
case TS_WIND_ORDREL:
break;
case TS_WREQ_ORDREL:
(void) _t_delete_tilink(fd);
return (NULL);
}
break;
default:
(void) _t_delete_tilink(fd);
return (NULL);
}
/*
* Sync information with timod.
*/
expected_acksize = (int)sizeof (struct ti_sync_ack);
if (rval < 0) {
(void) _t_delete_tilink(fd);
return (NULL);
}
/*
* This is a "less than" check as "struct ti_sync_ack" returned by
* linked application is run on a future kernel, it should not fail.
*/
if (retlen < expected_acksize) {
(void) _t_delete_tilink(fd);
return (NULL);
}
if (_T_IS_TLI(api_semantics))
return (ntiptr);
}
static int
{
/*
* Peek at message on stream head (if any)
* and see if it is data
*/
return (-1);
}
/*
* If peek shows something at stream head, then
* Adjust "outstate" based on some heuristics.
*/
if (retval > 0) {
switch (instate) {
case T_IDLE:
/*
* The following heuristic is to handle data
* ahead of T_DISCON_IND indications that might
* be at the stream head waiting to be
* read (T_DATA_IND or M_DATA)
*/
/* LINTED pointer cast */
}
break;
case T_DATAXFER:
/*
* The following heuristic is to handle
* the case where the connection is established
* and in data transfer state at the provider
* but the T_CONN_CON has not yet been read
* from the stream head.
*/
/* LINTED pointer cast */
break;
case T_INREL:
/*
* The following heuristic is to handle data
* ahead of T_ORDREL_IND indications that might
* be at the stream head waiting to be
* read (T_DATA_IND or M_DATA)
*/
/* LINTED pointer cast */
}
break;
default:
break;
}
}
return (outstate);
}
/*
* Assumes caller has blocked signals at least in this thread (for safe
*/
static int
{
unsigned size2;
return (-1);
}
return (size2);
}
/*
* Assumes caller has blocked signals at least in this thread (for safe
*/
int
{
unsigned size1;
return (-1);
}
return (size1);
}
/*
* Free lookbuffer structures and associated resources
* Assumes ti_lock held for MT case.
*/
static void
{
/*
* Assertion:
* The structure lock should be held or the global list
* manipulation lock. The assumption is that nothing
* else can access the descriptor since global list manipulation
* lock is held so it is OK to manipulate fields without the
* structure lock
*/
/*
* Free only the buffers in the first lookbuf
*/
}
/*
* Free the node and the buffers in the rest of the
* list
*/
}
}
/*
* Free lookbuffer event list head.
* Consume current lookbuffer event
* Assumes ti_lock held for MT case.
* Note: The head of this list is part of the instance
* structure so the code is a little unorthodox.
*/
void
{
/*
* Free the control and data buffers
*/
/*
* Replace with next lookbuf event contents
*/
/*
* Decrement the flag - should never get to zero.
* in this path
*/
tiptr->ti_lookcnt--;
} else {
/*
* No more look buffer events - just clear the flag
* and leave the buffers alone
*/
tiptr->ti_lookcnt = 0;
}
}
/*
* Discard lookbuffer events.
* Assumes ti_lock held for MT case.
*/
void
{
/*
* Leave the first nodes buffers alone (i.e. allocated)
* but reset the flag.
*/
tiptr->ti_lookcnt = 0;
/*
* Blow away the rest of the list
*/
}
}
/*
* This routine checks if the receive. buffer in the instance structure
* is available (non-null). If it is, the buffer is acquired and marked busy
* (null). If it is busy (possible in MT programs), it allocates a new
* buffer and sets a flag indicating new memory was allocated and the caller
* has to free it.
*/
int
int *didallocp)
{
*didallocp = 0;
} else {
/*
* tiptr->ti_ctlbuf is in use
* allocate new buffer and free after use.
*/
return (-1);
}
*didallocp = 1;
}
return (0);
}
/*
* This routine checks if the receive buffer in the instance structure
* is available (non-null). If it is, the buffer is acquired and marked busy
* (null). If it is busy (possible in MT programs), it allocates a new
* buffer and sets a flag indicating new memory was allocated and the caller
* has to free it.
* Note: The receive buffer pointer can also be null if the transport
* provider does not support connect/disconnect data, (e.g. TCP) - not
* just when it is "busy". In that case, ti_rcvsize will be 0 and that is
* used to instantiate the databuf which points to a null buffer of
* length 0 which is the right thing to do for that case.
*/
int
int *didallocp)
{
*didallocp = 0;
} else if (tiptr->ti_rcvsize == 0) {
} else {
/*
* tiptr->ti_rcvbuf is in use
* allocate new buffer and free after use.
*/
return (-1);
}
*didallocp = 1;
}
return (0);
}
/*
* This routine requests timod to look for any expedited data
* queued in the "receive buffers" in the kernel. Used for XTI
* t_look() semantics for transports that send expedited data
* data inline (e.g TCP).
* Returns -1 for failure
* Returns 0 for success
* On a successful return, the location pointed by "expedited_queuedp"
* contains
* 0 if no expedited data is found queued in "receive buffers"
* 1 if expedited data is found queued in "receive buffers"
*/
int
{
union {
} ioctl_data;
/* preferred location first local variable */
/* see note in _t_create above */
*expedited_queuedp = 0;
/* request info on rq expinds */
do {
if (rval < 0)
return (-1);
/*
* This is a "less than" check as "struct ti_sync_ack" returned by
* linked application is run on a future kernel, it should not fail.
*/
if (retlen < (int)sizeof (struct ti_sync_ack)) {
return (-1);
}
*expedited_queuedp = 1;
return (0);
}
/*
* like t_sndv(), t_rcvv() etc..follow below.
*/
/*
* _t_bytecount_upto_intmax() :
* Sum of the lengths of the individual buffers in
* the t_iovec array. If the sum exceeds INT_MAX
* it is truncated to INT_MAX.
*/
unsigned int
{
int i;
nbytes = 0;
break;
}
}
return ((unsigned int)nbytes);
}
/*
* Gather the data in the t_iovec buffers, into a single linear buffer
* starting at dataptr. Caller must have allocated sufficient space
* starting at dataptr. The total amount of data that is gathered is
* limited to INT_MAX. Any remaining data in the t_iovec buffers is
* not copied.
*/
void
{
char *curptr;
unsigned int cur_count;
unsigned int nbytes_remaining;
int i;
cur_count = 0;
for (i = 0; i < tiovcount && nbytes_remaining != 0; i++) {
else
}
}
/*
* Scatter the data from the single linear buffer at pdatabuf->buf into
* the t_iovec buffers.
*/
void
{
char *curptr;
unsigned int nbytes_remaining;
unsigned int curlen;
int i;
/*
* There cannot be any uncopied data leftover in pdatabuf
* at the conclusion of this function. (asserted below)
*/
for (i = 0; i < tiovcount && nbytes_remaining != 0; i++) {
else
}
}
/*
* Adjust the iovec array, for subsequent use. Examine each element in the
* iovec array,and zero out the iov_len if the buffer was sent fully.
* otherwise the buffer was only partially sent, so adjust both iov_len and
* iov_base.
*
*/
void
{
int i;
for (i = 0; i < *iovcountp && bytes_sent; i++) {
continue;
break;
else {
}
}
}
/*
* Copy the t_iovec array to the iovec array while taking care to see
* that the sum of the buffer lengths in the result is not more than
* INT_MAX. This function requires that T_IOV_MAX is no larger than
* IOV_MAX. Otherwise the resulting array is not a suitable input to
* writev(). If the sum of the lengths in t_iovec is zero, so is the
* resulting iovec.
*/
void
{
int i;
unsigned int nbytes_remaining;
i = 0;
do {
else
i++;
} while (nbytes_remaining != 0 && i < tiovcount);
*iovcountp = i;
}
/*
* Routine called after connection establishment on transports where
* connection establishment changes certain transport attributes such as
* TIDU_size
*/
int
{
union {
} ioctl_data;
int expected_acksize;
/*
* This T_CAPABILITY_REQ should not fail, even if it is unsupported
* by the transport provider. timod will emulate it in that case.
*/
expected_acksize = (int)sizeof (struct T_capability_ack);
if (rval < 0)
return (-1);
/*
* T_capability TPI messages are extensible and can grow in future.
* However timod will take care of returning no more information
* than what was requested, and truncating the "extended"
* information towards the end of the T_capability_ack, if necessary.
*/
if (retlen != expected_acksize) {
return (-1);
}
/*
* The T_info_ack part of the T_capability_ack is guaranteed to be
* present only if the corresponding TC1_INFO bit is set
*/
return (-1);
}
return (-1);
}
/*
* Note: Sync with latest information returned in "struct T_info_ack
* but we deliberately not sync the state here as user level state
* construction here is not required, only update of attributes which
* may have changed because of negotations during connection
* establsihment
*/
return (0);
}