2N/A/*
2N/A * CDDL HEADER START
2N/A *
2N/A * The contents of this file are subject to the terms of the
2N/A * Common Development and Distribution License (the "License").
2N/A * You may not use this file except in compliance with the License.
2N/A *
2N/A * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
2N/A * or http://www.opensolaris.org/os/licensing.
2N/A * See the License for the specific language governing permissions
2N/A * and limitations under the License.
2N/A *
2N/A * When distributing Covered Code, include this CDDL HEADER in each
2N/A * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
2N/A * If applicable, add the following below this CDDL HEADER, with the
2N/A * fields enclosed by brackets "[]" replaced with your own identifying
2N/A * information: Portions Copyright [yyyy] [name of copyright owner]
2N/A *
2N/A * CDDL HEADER END
2N/A */
2N/A
2N/A/*
2N/A * Copyright (c) 1988, 2011, Oracle and/or its affiliates. All rights reserved.
2N/A */
2N/A
2N/A/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
2N/A/* All Rights Reserved */
2N/A
2N/A#include "mt.h"
2N/A#include <stdlib.h>
2N/A#include <string.h>
2N/A#include <strings.h>
2N/A#include <unistd.h>
2N/A#include <errno.h>
2N/A#include <stropts.h>
2N/A#include <sys/stream.h>
2N/A#define _SUN_TPI_VERSION 2
2N/A#include <sys/tihdr.h>
2N/A#include <sys/timod.h>
2N/A#include <sys/stat.h>
2N/A#include <xti.h>
2N/A#include <fcntl.h>
2N/A#include <signal.h>
2N/A#include <assert.h>
2N/A#include <syslog.h>
2N/A#include <limits.h>
2N/A#include <ucred.h>
2N/A#include "tx.h"
2N/A
2N/A#define DEFSIZE 2048
2N/A
2N/A/*
2N/A * The following used to be in tiuser.h, but was causing too much namespace
2N/A * pollution.
2N/A */
2N/A#define ROUNDUP32(X) ((X + 0x03)&~0x03)
2N/A
2N/Astatic struct _ti_user *find_tilink(int s);
2N/Astatic struct _ti_user *add_tilink(int s);
2N/Astatic void _t_free_lookbufs(struct _ti_user *tiptr);
2N/Astatic unsigned int _t_setsize(t_scalar_t infosize, boolean_t option);
2N/Astatic int _t_cbuf_alloc(struct _ti_user *tiptr, char **retbuf);
2N/Astatic int _t_rbuf_alloc(struct _ti_user *tiptr, char **retbuf);
2N/Astatic int _t_adjust_state(int fd, int instate);
2N/Astatic int _t_alloc_bufs(int fd, struct _ti_user *tiptr,
2N/A struct T_info_ack *tsap);
2N/A
2N/Amutex_t _ti_userlock = DEFAULTMUTEX; /* Protects hash_bucket[] */
2N/A
2N/A/*
2N/A * Checkfd - checks validity of file descriptor
2N/A */
2N/Astruct _ti_user *
2N/A_t_checkfd(int fd, int force_sync, int api_semantics)
2N/A{
2N/A sigset_t mask;
2N/A struct _ti_user *tiptr;
2N/A int retval, timodpushed;
2N/A
2N/A if (fd < 0) {
2N/A t_errno = TBADF;
2N/A return (NULL);
2N/A }
2N/A tiptr = NULL;
2N/A sig_mutex_lock(&_ti_userlock);
2N/A if ((tiptr = find_tilink(fd)) != NULL) {
2N/A if (!force_sync) {
2N/A sig_mutex_unlock(&_ti_userlock);
2N/A return (tiptr);
2N/A }
2N/A }
2N/A sig_mutex_unlock(&_ti_userlock);
2N/A
2N/A /*
2N/A * Not found or a forced sync is required.
2N/A * check if this is a valid TLI/XTI descriptor.
2N/A */
2N/A timodpushed = 0;
2N/A do {
2N/A retval = ioctl(fd, I_FIND, "timod");
2N/A } while (retval < 0 && errno == EINTR);
2N/A
2N/A if (retval < 0 || (retval == 0 && _T_IS_TLI(api_semantics))) {
2N/A /*
2N/A * not a stream or a TLI endpoint with no timod
2N/A * XXX Note: If it is a XTI call, we push "timod" and
2N/A * try to convert it into a transport endpoint later.
2N/A * We do not do it for TLI and "retain" the old buggy
2N/A * behavior because ypbind and a lot of other deamons seem
2N/A * to use a buggy logic test of the form
2N/A * "(t_getstate(0) != -1 || t_errno != TBADF)" to see if
2N/A * they we ever invoked with request on stdin and drop into
2N/A * untested code. This test is in code generated by rpcgen
2N/A * which is why it is replicated test in many daemons too.
2N/A * We will need to fix that test too with "IsaTLIendpoint"
2N/A * test if we ever fix this for TLI
2N/A */
2N/A t_errno = TBADF;
2N/A return (NULL);
2N/A }
2N/A
2N/A if (retval == 0) {
2N/A /*
2N/A * "timod" not already on stream, then push it
2N/A */
2N/A do {
2N/A /*
2N/A * Assumes (correctly) that I_PUSH is
2N/A * atomic w.r.t signals (EINTR error)
2N/A */
2N/A retval = ioctl(fd, I_PUSH, "timod");
2N/A } while (retval < 0 && errno == EINTR);
2N/A
2N/A if (retval < 0) {
2N/A t_errno = TSYSERR;
2N/A return (NULL);
2N/A }
2N/A timodpushed = 1;
2N/A }
2N/A /*
2N/A * Try to (re)constitute the info at user level from state
2N/A * in the kernel. This could be information that lost due
2N/A * to an exec or being instantiated at a new descriptor due
2N/A * to , open(), dup2() etc.
2N/A *
2N/A * _t_create() requires that all signals be blocked.
2N/A * Note that sig_mutex_lock() only defers signals, it does not
2N/A * block them, so interruptible syscalls could still get EINTR.
2N/A */
2N/A (void) thr_sigsetmask(SIG_SETMASK, &fillset, &mask);
2N/A sig_mutex_lock(&_ti_userlock);
2N/A tiptr = _t_create(fd, NULL, api_semantics, NULL);
2N/A if (tiptr == NULL) {
2N/A int sv_errno = errno;
2N/A sig_mutex_unlock(&_ti_userlock);
2N/A (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
2N/A /*
2N/A * restore to stream before timod pushed. It may
2N/A * not have been a network transport stream.
2N/A */
2N/A if (timodpushed)
2N/A (void) ioctl(fd, I_POP, 0);
2N/A errno = sv_errno;
2N/A return (NULL);
2N/A }
2N/A sig_mutex_unlock(&_ti_userlock);
2N/A (void) thr_sigsetmask(SIG_SETMASK, &mask, NULL);
2N/A return (tiptr);
2N/A}
2N/A
2N/A/*
2N/A * copy data to output buffer making sure the output buffer is 32 bit
2N/A * aligned, even though the input buffer may not be.
2N/A */
2N/Aint
2N/A_t_aligned_copy(
2N/A struct strbuf *strbufp,
2N/A int len,
2N/A int init_offset,
2N/A char *datap,
2N/A t_scalar_t *rtn_offset)
2N/A{
2N/A *rtn_offset = ROUNDUP32(init_offset);
2N/A if ((*rtn_offset + len) > strbufp->maxlen) {
2N/A /*
2N/A * Aligned copy will overflow buffer
2N/A */
2N/A return (-1);
2N/A }
2N/A (void) memcpy(strbufp->buf + *rtn_offset, datap, (size_t)len);
2N/A
2N/A return (0);
2N/A}
2N/A
2N/A
2N/A/*
2N/A * append data and control info in look buffer (list in the MT case)
2N/A *
2N/A * The only thing that can be in look buffer is a T_DISCON_IND,
2N/A * T_ORDREL_IND or a T_UDERROR_IND.
2N/A *
2N/A * It also enforces priority of T_DISCONDs over any T_ORDREL_IND
2N/A * already in the buffer. It assumes no T_ORDREL_IND is appended
2N/A * when there is already something on the looklist (error case) and
2N/A * that a T_ORDREL_IND if present will always be the first on the
2N/A * list.
2N/A *
2N/A * This also assumes ti_lock is held via sig_mutex_lock(),
2N/A * so signals are deferred here.
2N/A */
2N/Aint
2N/A_t_register_lookevent(
2N/A struct _ti_user *tiptr,
2N/A caddr_t dptr,
2N/A int dsize,
2N/A caddr_t cptr,
2N/A int csize)
2N/A{
2N/A struct _ti_lookbufs *tlbs;
2N/A int cbuf_size, dbuf_size;
2N/A
2N/A assert(MUTEX_HELD(&tiptr->ti_lock));
2N/A
2N/A cbuf_size = tiptr->ti_ctlsize;
2N/A dbuf_size = tiptr->ti_rcvsize;
2N/A
2N/A if ((csize > cbuf_size) || dsize > dbuf_size) {
2N/A /* can't fit - return error */
2N/A return (-1); /* error */
2N/A }
2N/A /*
2N/A * Enforce priority of T_DISCON_IND over T_ORDREL_IND
2N/A * queued earlier.
2N/A * Note: Since there can be only at most one T_ORDREL_IND
2N/A * queued (more than one is error case), and we look for it
2N/A * on each append of T_DISCON_IND, it can only be at the
2N/A * head of the list if it is there.
2N/A */
2N/A if (tiptr->ti_lookcnt > 0) { /* something already on looklist */
2N/A if (cptr && csize >= (int)sizeof (struct T_discon_ind) &&
2N/A /* LINTED pointer cast */
2N/A *(t_scalar_t *)cptr == T_DISCON_IND) {
2N/A /* appending discon ind */
2N/A assert(tiptr->ti_servtype != T_CLTS);
2N/A /* LINTED pointer cast */
2N/A if (*(t_scalar_t *)tiptr->ti_lookbufs.tl_lookcbuf ==
2N/A T_ORDREL_IND) { /* T_ORDREL_IND is on list */
2N/A /*
2N/A * Blow away T_ORDREL_IND
2N/A */
2N/A _t_free_looklist_head(tiptr);
2N/A }
2N/A }
2N/A }
2N/A tlbs = &tiptr->ti_lookbufs;
2N/A if (tiptr->ti_lookcnt > 0) {
2N/A int listcount = 0;
2N/A /*
2N/A * Allocate and append a new lookbuf to the
2N/A * existing list. (Should only happen in MT case)
2N/A */
2N/A while (tlbs->tl_next != NULL) {
2N/A listcount++;
2N/A tlbs = tlbs->tl_next;
2N/A }
2N/A assert(tiptr->ti_lookcnt == listcount);
2N/A
2N/A /*
2N/A * signals are deferred, calls to malloc() are safe.
2N/A */
2N/A if ((tlbs->tl_next = malloc(sizeof (struct _ti_lookbufs))) ==
2N/A NULL)
2N/A return (-1); /* error */
2N/A tlbs = tlbs->tl_next;
2N/A /*
2N/A * Allocate the buffers. The sizes derived from the
2N/A * sizes of other related buffers. See _t_alloc_bufs()
2N/A * for details.
2N/A */
2N/A if ((tlbs->tl_lookcbuf = malloc(cbuf_size)) == NULL) {
2N/A /* giving up - free other memory chunks */
2N/A free(tlbs);
2N/A return (-1); /* error */
2N/A }
2N/A if ((dsize > 0) &&
2N/A ((tlbs->tl_lookdbuf = malloc(dbuf_size)) == NULL)) {
2N/A /* giving up - free other memory chunks */
2N/A free(tlbs->tl_lookcbuf);
2N/A free(tlbs);
2N/A return (-1); /* error */
2N/A }
2N/A }
2N/A
2N/A (void) memcpy(tlbs->tl_lookcbuf, cptr, csize);
2N/A if (dsize > 0)
2N/A (void) memcpy(tlbs->tl_lookdbuf, dptr, dsize);
2N/A tlbs->tl_lookdlen = dsize;
2N/A tlbs->tl_lookclen = csize;
2N/A tlbs->tl_next = NULL;
2N/A tiptr->ti_lookcnt++;
2N/A return (0); /* ok return */
2N/A}
2N/A
2N/A/*
2N/A * Is there something that needs attention?
2N/A * Assumes tiptr->ti_lock held and this threads signals blocked
2N/A * in MT case.
2N/A */
2N/Aint
2N/A_t_is_event(int fd, struct _ti_user *tiptr)
2N/A{
2N/A int size, retval;
2N/A
2N/A assert(MUTEX_HELD(&tiptr->ti_lock));
2N/A if ((retval = ioctl(fd, I_NREAD, &size)) < 0) {
2N/A t_errno = TSYSERR;
2N/A return (-1);
2N/A }
2N/A
2N/A if ((retval > 0) || (tiptr->ti_lookcnt > 0)) {
2N/A t_errno = TLOOK;
2N/A return (-1);
2N/A }
2N/A return (0);
2N/A}
2N/A
2N/A/*
2N/A * wait for T_OK_ACK
2N/A * assumes tiptr->ti_lock held in MT case
2N/A */
2N/Aint
2N/A_t_is_ok(int fd, struct _ti_user *tiptr, t_scalar_t type)
2N/A{
2N/A struct strbuf ctlbuf;
2N/A struct strbuf databuf;
2N/A union T_primitives *pptr;
2N/A int retval, cntlflag;
2N/A int size;
2N/A int didalloc, didralloc;
2N/A int flags = 0;
2N/A
2N/A assert(MUTEX_HELD(&tiptr->ti_lock));
2N/A /*
2N/A * Acquire ctlbuf for use in sending/receiving control part
2N/A * of the message.
2N/A */
2N/A if (_t_acquire_ctlbuf(tiptr, &ctlbuf, &didalloc) < 0)
2N/A return (-1);
2N/A /*
2N/A * Acquire databuf for use in sending/receiving data part
2N/A */
2N/A if (_t_acquire_databuf(tiptr, &databuf, &didralloc) < 0) {
2N/A if (didalloc)
2N/A free(ctlbuf.buf);
2N/A else
2N/A tiptr->ti_ctlbuf = ctlbuf.buf;
2N/A return (-1);
2N/A }
2N/A
2N/A /*
2N/A * Temporarily convert a non blocking endpoint to a
2N/A * blocking one and restore status later
2N/A */
2N/A cntlflag = fcntl(fd, F_GETFL, 0);
2N/A if (cntlflag & (O_NDELAY | O_NONBLOCK))
2N/A (void) fcntl(fd, F_SETFL, cntlflag & ~(O_NDELAY | O_NONBLOCK));
2N/A
2N/A flags = RS_HIPRI;
2N/A
2N/A while ((retval = getmsg(fd, &ctlbuf, &databuf, &flags)) < 0) {
2N/A if (errno == EINTR)
2N/A continue;
2N/A if (cntlflag & (O_NDELAY | O_NONBLOCK))
2N/A (void) fcntl(fd, F_SETFL, cntlflag);
2N/A t_errno = TSYSERR;
2N/A goto err_out;
2N/A }
2N/A
2N/A /* did I get entire message */
2N/A if (retval > 0) {
2N/A if (cntlflag & (O_NDELAY | O_NONBLOCK))
2N/A (void) fcntl(fd, F_SETFL, cntlflag);
2N/A t_errno = TSYSERR;
2N/A errno = EIO;
2N/A goto err_out;
2N/A }
2N/A
2N/A /*
2N/A * is ctl part large enough to determine type?
2N/A */
2N/A if (ctlbuf.len < (int)sizeof (t_scalar_t)) {
2N/A if (cntlflag & (O_NDELAY | O_NONBLOCK))
2N/A (void) fcntl(fd, F_SETFL, cntlflag);
2N/A t_errno = TSYSERR;
2N/A errno = EPROTO;
2N/A goto err_out;
2N/A }
2N/A
2N/A if (cntlflag & (O_NDELAY | O_NONBLOCK))
2N/A (void) fcntl(fd, F_SETFL, cntlflag);
2N/A
2N/A /* LINTED pointer cast */
2N/A pptr = (union T_primitives *)ctlbuf.buf;
2N/A
2N/A switch (pptr->type) {
2N/A case T_OK_ACK:
2N/A if ((ctlbuf.len < (int)sizeof (struct T_ok_ack)) ||
2N/A (pptr->ok_ack.CORRECT_prim != type)) {
2N/A t_errno = TSYSERR;
2N/A errno = EPROTO;
2N/A goto err_out;
2N/A }
2N/A if (didalloc)
2N/A free(ctlbuf.buf);
2N/A else
2N/A tiptr->ti_ctlbuf = ctlbuf.buf;
2N/A if (didralloc)
2N/A free(databuf.buf);
2N/A else
2N/A tiptr->ti_rcvbuf = databuf.buf;
2N/A return (0);
2N/A
2N/A case T_ERROR_ACK:
2N/A if ((ctlbuf.len < (int)sizeof (struct T_error_ack)) ||
2N/A (pptr->error_ack.ERROR_prim != type)) {
2N/A t_errno = TSYSERR;
2N/A errno = EPROTO;
2N/A goto err_out;
2N/A }
2N/A /*
2N/A * if error is out of state and there is something
2N/A * on read queue, then indicate to user that
2N/A * there is something that needs attention
2N/A */
2N/A if (pptr->error_ack.TLI_error == TOUTSTATE) {
2N/A if ((retval = ioctl(fd, I_NREAD, &size)) < 0) {
2N/A t_errno = TSYSERR;
2N/A goto err_out;
2N/A }
2N/A if (retval > 0)
2N/A t_errno = TLOOK;
2N/A else
2N/A t_errno = TOUTSTATE;
2N/A } else {
2N/A t_errno = pptr->error_ack.TLI_error;
2N/A if (t_errno == TSYSERR)
2N/A errno = pptr->error_ack.UNIX_error;
2N/A }
2N/A goto err_out;
2N/A default:
2N/A t_errno = TSYSERR;
2N/A errno = EPROTO;
2N/A /* fallthru to err_out: */
2N/A }
2N/Aerr_out:
2N/A if (didalloc)
2N/A free(ctlbuf.buf);
2N/A else
2N/A tiptr->ti_ctlbuf = ctlbuf.buf;
2N/A if (didralloc)
2N/A free(databuf.buf);
2N/A else
2N/A tiptr->ti_rcvbuf = databuf.buf;
2N/A return (-1);
2N/A}
2N/A
2N/A/*
2N/A * timod ioctl
2N/A */
2N/Aint
2N/A_t_do_ioctl(int fd, char *buf, int size, int cmd, int *retlenp)
2N/A{
2N/A int retval;
2N/A struct strioctl strioc;
2N/A
2N/A strioc.ic_cmd = cmd;
2N/A strioc.ic_timout = -1;
2N/A strioc.ic_len = size;
2N/A strioc.ic_dp = buf;
2N/A
2N/A if ((retval = ioctl(fd, I_STR, &strioc)) < 0) {
2N/A t_errno = TSYSERR;
2N/A return (-1);
2N/A }
2N/A
2N/A if (retval > 0) {
2N/A t_errno = retval&0xff;
2N/A if (t_errno == TSYSERR)
2N/A errno = (retval >> 8)&0xff;
2N/A return (-1);
2N/A }
2N/A if (retlenp)
2N/A *retlenp = strioc.ic_len;
2N/A return (0);
2N/A}
2N/A
2N/A/*
2N/A * alloc scratch buffers and look buffers
2N/A */
2N/A/* ARGSUSED */
2N/Astatic int
2N/A_t_alloc_bufs(int fd, struct _ti_user *tiptr, struct T_info_ack *tsap)
2N/A{
2N/A unsigned int size1, size2;
2N/A t_scalar_t optsize;
2N/A unsigned int csize, dsize, asize, osize;
2N/A char *ctlbuf, *rcvbuf;
2N/A char *lookdbuf, *lookcbuf;
2N/A
2N/A csize = _t_setsize(tsap->CDATA_size, B_FALSE);
2N/A dsize = _t_setsize(tsap->DDATA_size, B_FALSE);
2N/A
2N/A size1 = _T_MAX(csize, dsize);
2N/A
2N/A if (size1 != 0) {
2N/A if ((rcvbuf = malloc(size1)) == NULL)
2N/A return (-1);
2N/A if ((lookdbuf = malloc(size1)) == NULL) {
2N/A free(rcvbuf);
2N/A return (-1);
2N/A }
2N/A } else {
2N/A rcvbuf = NULL;
2N/A lookdbuf = NULL;
2N/A }
2N/A
2N/A asize = _t_setsize(tsap->ADDR_size, B_FALSE);
2N/A if (tsap->OPT_size >= 0)
2N/A /* compensate for XTI level options */
2N/A optsize = tsap->OPT_size + TX_XTI_LEVEL_MAX_OPTBUF;
2N/A else
2N/A optsize = tsap->OPT_size;
2N/A osize = _t_setsize(optsize, B_TRUE);
2N/A
2N/A /*
2N/A * We compute the largest buffer size needed for this provider by
2N/A * adding the components. [ An extra sizeof (t_scalar_t) is added to
2N/A * take care of rounding off for alignment) for each buffer ]
2N/A * The goal here is compute the size of largest possible buffer that
2N/A * might be needed to hold a TPI message for the transport provider
2N/A * on this endpoint.
2N/A * Note: T_ADDR_ACK contains potentially two address buffers.
2N/A */
2N/A
2N/A size2 = (unsigned int)sizeof (union T_primitives) /* TPI struct */
2N/A + asize + (unsigned int)sizeof (t_scalar_t) +
2N/A /* first addr buffer plus alignment */
2N/A asize + (unsigned int)sizeof (t_scalar_t) +
2N/A /* second addr buffer plus ailignment */
2N/A osize + (unsigned int)sizeof (t_scalar_t);
2N/A /* option buffer plus alignment */
2N/A
2N/A if ((ctlbuf = malloc(size2)) == NULL) {
2N/A if (size1 != 0) {
2N/A free(rcvbuf);
2N/A free(lookdbuf);
2N/A }
2N/A return (-1);
2N/A }
2N/A
2N/A if ((lookcbuf = malloc(size2)) == NULL) {
2N/A if (size1 != 0) {
2N/A free(rcvbuf);
2N/A free(lookdbuf);
2N/A }
2N/A free(ctlbuf);
2N/A return (-1);
2N/A }
2N/A
2N/A tiptr->ti_rcvsize = size1;
2N/A tiptr->ti_rcvbuf = rcvbuf;
2N/A tiptr->ti_ctlsize = size2;
2N/A tiptr->ti_ctlbuf = ctlbuf;
2N/A
2N/A /*
2N/A * Note: The head of the lookbuffers list (and associated buffers)
2N/A * is allocated here on initialization.
2N/A * More allocated on demand.
2N/A */
2N/A tiptr->ti_lookbufs.tl_lookclen = 0;
2N/A tiptr->ti_lookbufs.tl_lookcbuf = lookcbuf;
2N/A tiptr->ti_lookbufs.tl_lookdlen = 0;
2N/A tiptr->ti_lookbufs.tl_lookdbuf = lookdbuf;
2N/A
2N/A return (0);
2N/A}
2N/A
2N/A
2N/A/*
2N/A * set sizes of buffers
2N/A */
2N/Astatic unsigned int
2N/A_t_setsize(t_scalar_t infosize, boolean_t option)
2N/A{
2N/A static size_t optinfsize;
2N/A
2N/A switch (infosize) {
2N/A case T_INFINITE /* -1 */:
2N/A if (option) {
2N/A if (optinfsize == 0) {
2N/A size_t uc = ucred_size();
2N/A if (uc < DEFSIZE/2)
2N/A optinfsize = DEFSIZE;
2N/A else
2N/A optinfsize = ucred_size() + DEFSIZE/2;
2N/A }
2N/A return ((unsigned int)optinfsize);
2N/A }
2N/A return (DEFSIZE);
2N/A case T_INVALID /* -2 */:
2N/A return (0);
2N/A default:
2N/A return ((unsigned int) infosize);
2N/A }
2N/A}
2N/A
2N/Astatic void
2N/A_t_reinit_tiptr(struct _ti_user *tiptr)
2N/A{
2N/A /*
2N/A * Note: This routine is designed for a "reinitialization"
2N/A * Following fields are not modified here and preserved.
2N/A * - ti_fd field
2N/A * - ti_lock
2N/A * - ti_next
2N/A * - ti_prev
2N/A * The above fields have to be separately initialized if this
2N/A * is used for a fresh initialization.
2N/A */
2N/A
2N/A tiptr->ti_flags = 0;
2N/A tiptr->ti_rcvsize = 0;
2N/A tiptr->ti_rcvbuf = NULL;
2N/A tiptr->ti_ctlsize = 0;
2N/A tiptr->ti_ctlbuf = NULL;
2N/A tiptr->ti_lookbufs.tl_lookdbuf = NULL;
2N/A tiptr->ti_lookbufs.tl_lookcbuf = NULL;
2N/A tiptr->ti_lookbufs.tl_lookdlen = 0;
2N/A tiptr->ti_lookbufs.tl_lookclen = 0;
2N/A tiptr->ti_lookbufs.tl_next = NULL;
2N/A tiptr->ti_maxpsz = 0;
2N/A tiptr->ti_tsdusize = 0;
2N/A tiptr->ti_etsdusize = 0;
2N/A tiptr->ti_cdatasize = 0;
2N/A tiptr->ti_ddatasize = 0;
2N/A tiptr->ti_servtype = 0;
2N/A tiptr->ti_lookcnt = 0;
2N/A tiptr->ti_state = 0;
2N/A tiptr->ti_ocnt = 0;
2N/A tiptr->ti_prov_flag = 0;
2N/A tiptr->ti_qlen = 0;
2N/A}
2N/A
2N/A/*
2N/A * Link manipulation routines.
2N/A *
2N/A * NBUCKETS hash buckets are used to give fast
2N/A * access. The number is derived the file descriptor softlimit
2N/A * number (64).
2N/A */
2N/A
2N/A#define NBUCKETS 64
2N/Astatic struct _ti_user *hash_bucket[NBUCKETS];
2N/A
2N/A/*
2N/A * Allocates a new link and returns a pointer to it.
2N/A * Assumes that the caller is holding _ti_userlock via sig_mutex_lock(),
2N/A * so signals are deferred here.
2N/A */
2N/Astatic struct _ti_user *
2N/Aadd_tilink(int s)
2N/A{
2N/A struct _ti_user *tiptr;
2N/A struct _ti_user *prevptr;
2N/A struct _ti_user *curptr;
2N/A int x;
2N/A struct stat stbuf;
2N/A
2N/A assert(MUTEX_HELD(&_ti_userlock));
2N/A
2N/A if (s < 0 || fstat(s, &stbuf) != 0)
2N/A return (NULL);
2N/A
2N/A x = s % NBUCKETS;
2N/A if (hash_bucket[x] != NULL) {
2N/A /*
2N/A * Walk along the bucket looking for
2N/A * duplicate entry or the end.
2N/A */
2N/A for (curptr = hash_bucket[x]; curptr != NULL;
2N/A curptr = curptr->ti_next) {
2N/A if (curptr->ti_fd == s) {
2N/A /*
2N/A * This can happen when the user has close(2)'ed
2N/A * a descriptor and then been allocated it again
2N/A * via t_open().
2N/A *
2N/A * We will re-use the existing _ti_user struct
2N/A * in this case rather than using the one
2N/A * we allocated above. If there are buffers
2N/A * associated with the existing _ti_user
2N/A * struct, they may not be the correct size,
2N/A * so we can not use it. We free them
2N/A * here and re-allocate a new ones
2N/A * later on.
2N/A */
2N/A if (curptr->ti_rcvbuf != NULL)
2N/A free(curptr->ti_rcvbuf);
2N/A free(curptr->ti_ctlbuf);
2N/A _t_free_lookbufs(curptr);
2N/A _t_reinit_tiptr(curptr);
2N/A curptr->ti_rdev = stbuf.st_rdev;
2N/A curptr->ti_ino = stbuf.st_ino;
2N/A return (curptr);
2N/A }
2N/A prevptr = curptr;
2N/A }
2N/A /*
2N/A * Allocate and link in a new one.
2N/A */
2N/A if ((tiptr = malloc(sizeof (*tiptr))) == NULL)
2N/A return (NULL);
2N/A /*
2N/A * First initialize fields common with reinitialization and
2N/A * then other fields too
2N/A */
2N/A _t_reinit_tiptr(tiptr);
2N/A prevptr->ti_next = tiptr;
2N/A tiptr->ti_prev = prevptr;
2N/A } else {
2N/A /*
2N/A * First entry.
2N/A */
2N/A if ((tiptr = malloc(sizeof (*tiptr))) == NULL)
2N/A return (NULL);
2N/A _t_reinit_tiptr(tiptr);
2N/A hash_bucket[x] = tiptr;
2N/A tiptr->ti_prev = NULL;
2N/A }
2N/A tiptr->ti_next = NULL;
2N/A tiptr->ti_fd = s;
2N/A tiptr->ti_rdev = stbuf.st_rdev;
2N/A tiptr->ti_ino = stbuf.st_ino;
2N/A (void) mutex_init(&tiptr->ti_lock, USYNC_THREAD, NULL);
2N/A return (tiptr);
2N/A}
2N/A
2N/A/*
2N/A * Find a link by descriptor
2N/A * Assumes that the caller is holding _ti_userlock.
2N/A */
2N/Astatic struct _ti_user *
2N/Afind_tilink(int s)
2N/A{
2N/A struct _ti_user *curptr;
2N/A int x;
2N/A struct stat stbuf;
2N/A
2N/A assert(MUTEX_HELD(&_ti_userlock));
2N/A
2N/A if (s < 0 || fstat(s, &stbuf) != 0)
2N/A return (NULL);
2N/A
2N/A x = s % NBUCKETS;
2N/A /*
2N/A * Walk along the bucket looking for the descriptor.
2N/A */
2N/A for (curptr = hash_bucket[x]; curptr; curptr = curptr->ti_next) {
2N/A if (curptr->ti_fd == s) {
2N/A if (curptr->ti_rdev == stbuf.st_rdev &&
2N/A curptr->ti_ino == stbuf.st_ino)
2N/A return (curptr);
2N/A (void) _t_delete_tilink(s);
2N/A }
2N/A }
2N/A return (NULL);
2N/A}
2N/A
2N/A/*
2N/A * Assumes that the caller is holding _ti_userlock.
2N/A * Also assumes that all signals are blocked.
2N/A */
2N/Aint
2N/A_t_delete_tilink(int s)
2N/A{
2N/A struct _ti_user *curptr;
2N/A int x;
2N/A
2N/A /*
2N/A * Find the link.
2N/A */
2N/A assert(MUTEX_HELD(&_ti_userlock));
2N/A if (s < 0)
2N/A return (-1);
2N/A x = s % NBUCKETS;
2N/A /*
2N/A * Walk along the bucket looking for
2N/A * the descriptor.
2N/A */
2N/A for (curptr = hash_bucket[x]; curptr; curptr = curptr->ti_next) {
2N/A if (curptr->ti_fd == s) {
2N/A struct _ti_user *nextptr;
2N/A struct _ti_user *prevptr;
2N/A
2N/A nextptr = curptr->ti_next;
2N/A prevptr = curptr->ti_prev;
2N/A if (prevptr)
2N/A prevptr->ti_next = nextptr;
2N/A else
2N/A hash_bucket[x] = nextptr;
2N/A if (nextptr)
2N/A nextptr->ti_prev = prevptr;
2N/A
2N/A /*
2N/A * free resource associated with the curptr
2N/A */
2N/A if (curptr->ti_rcvbuf != NULL)
2N/A free(curptr->ti_rcvbuf);
2N/A free(curptr->ti_ctlbuf);
2N/A _t_free_lookbufs(curptr);
2N/A (void) mutex_destroy(&curptr->ti_lock);
2N/A free(curptr);
2N/A return (0);
2N/A }
2N/A }
2N/A return (-1);
2N/A}
2N/A
2N/A/*
2N/A * Allocate a TLI state structure and synch it with the kernel
2N/A * *tiptr is returned
2N/A * Assumes that the caller is holding the _ti_userlock and has blocked signals.
2N/A *
2N/A * This function may fail the first time it is called with given transport if it
2N/A * doesn't support T_CAPABILITY_REQ TPI message.
2N/A */
2N/Astruct _ti_user *
2N/A_t_create(int fd, struct t_info *info, int api_semantics, int *t_capreq_failed)
2N/A{
2N/A /*
2N/A * Aligned data buffer for ioctl.
2N/A */
2N/A union {
2N/A struct ti_sync_req ti_req;
2N/A struct ti_sync_ack ti_ack;
2N/A union T_primitives t_prim;
2N/A char pad[128];
2N/A } ioctl_data;
2N/A void *ioctlbuf = &ioctl_data; /* TI_SYNC/GETINFO with room to grow */
2N/A /* preferred location first local variable */
2N/A /* see note below */
2N/A /*
2N/A * Note: We use "ioctlbuf" allocated on stack above with
2N/A * room to grow since (struct ti_sync_ack) can grow in size
2N/A * on future kernels. (We do not use malloc'd "ti_ctlbuf" as that
2N/A * part of instance structure which may not exist yet)
2N/A * Its preferred declaration location is first local variable in this
2N/A * procedure as bugs causing overruns will be detectable on
2N/A * platforms where procedure calling conventions place return
2N/A * address on stack (such as x86) instead of causing silent
2N/A * memory corruption.
2N/A */
2N/A struct ti_sync_req *tsrp = (struct ti_sync_req *)ioctlbuf;
2N/A struct ti_sync_ack *tsap = (struct ti_sync_ack *)ioctlbuf;
2N/A struct T_capability_req *tcrp = (struct T_capability_req *)ioctlbuf;
2N/A struct T_capability_ack *tcap = (struct T_capability_ack *)ioctlbuf;
2N/A struct T_info_ack *tiap = &tcap->INFO_ack;
2N/A struct _ti_user *ntiptr;
2N/A int expected_acksize;
2N/A int retlen, rstate, sv_errno, rval;
2N/A
2N/A assert(MUTEX_HELD(&_ti_userlock));
2N/A
2N/A /*
2N/A * Use ioctl required for sync'ing state with kernel.
2N/A * We use two ioctls. TI_CAPABILITY is used to get TPI information and
2N/A * TI_SYNC is used to synchronise state with timod. Statically linked
2N/A * TLI applications will no longer work on older releases where there
2N/A * are no TI_SYNC and TI_CAPABILITY.
2N/A */
2N/A
2N/A /*
2N/A * Request info about transport.
2N/A * Assumes that TC1_INFO should always be implemented.
2N/A * For TI_CAPABILITY size argument to ioctl specifies maximum buffer
2N/A * size.
2N/A */
2N/A tcrp->PRIM_type = T_CAPABILITY_REQ;
2N/A tcrp->CAP_bits1 = TC1_INFO | TC1_ACCEPTOR_ID;
2N/A rval = _t_do_ioctl(fd, (char *)ioctlbuf,
2N/A (int)sizeof (struct T_capability_ack), TI_CAPABILITY, &retlen);
2N/A expected_acksize = (int)sizeof (struct T_capability_ack);
2N/A
2N/A if (rval < 0) {
2N/A /*
2N/A * TI_CAPABILITY may fail when transport provider doesn't
2N/A * support T_CAPABILITY_REQ message type. In this case file
2N/A * descriptor may be unusable (when transport provider sent
2N/A * M_ERROR in response to T_CAPABILITY_REQ). This should only
2N/A * happen once during system lifetime for given transport
2N/A * provider since timod will emulate TI_CAPABILITY after it
2N/A * detected the failure.
2N/A */
2N/A if (t_capreq_failed != NULL)
2N/A *t_capreq_failed = 1;
2N/A return (NULL);
2N/A }
2N/A
2N/A if (retlen != expected_acksize) {
2N/A t_errno = TSYSERR;
2N/A errno = EIO;
2N/A return (NULL);
2N/A }
2N/A
2N/A if ((tcap->CAP_bits1 & TC1_INFO) == 0) {
2N/A t_errno = TSYSERR;
2N/A errno = EPROTO;
2N/A return (NULL);
2N/A }
2N/A if (info != NULL) {
2N/A if (tiap->PRIM_type != T_INFO_ACK) {
2N/A t_errno = TSYSERR;
2N/A errno = EPROTO;
2N/A return (NULL);
2N/A }
2N/A info->addr = tiap->ADDR_size;
2N/A info->options = tiap->OPT_size;
2N/A info->tsdu = tiap->TSDU_size;
2N/A info->etsdu = tiap->ETSDU_size;
2N/A info->connect = tiap->CDATA_size;
2N/A info->discon = tiap->DDATA_size;
2N/A info->servtype = tiap->SERV_type;
2N/A if (_T_IS_XTI(api_semantics)) {
2N/A /*
2N/A * XTI ONLY - TLI "struct t_info" does not
2N/A * have "flags"
2N/A */
2N/A info->flags = 0;
2N/A if (tiap->PROVIDER_flag & (SENDZERO|OLD_SENDZERO))
2N/A info->flags |= T_SENDZERO;
2N/A /*
2N/A * Some day there MAY be a NEW bit in T_info_ack
2N/A * PROVIDER_flag namespace exposed by TPI header
2N/A * <sys/tihdr.h> which will functionally correspond to
2N/A * role played by T_ORDRELDATA in info->flags namespace
2N/A * When that bit exists, we can add a test to see if
2N/A * it is set and set T_ORDRELDATA.
2N/A * Note: Currently only mOSI ("minimal OSI") provider
2N/A * is specified to use T_ORDRELDATA so probability of
2N/A * needing it is minimal.
2N/A */
2N/A }
2N/A }
2N/A
2N/A /*
2N/A * if first time or no instance (after fork/exec, dup etc,
2N/A * then create initialize data structure
2N/A * and allocate buffers
2N/A */
2N/A ntiptr = add_tilink(fd);
2N/A if (ntiptr == NULL) {
2N/A t_errno = TSYSERR;
2N/A errno = ENOMEM;
2N/A return (NULL);
2N/A }
2N/A sig_mutex_lock(&ntiptr->ti_lock);
2N/A
2N/A /*
2N/A * Allocate buffers for the new descriptor
2N/A */
2N/A if (_t_alloc_bufs(fd, ntiptr, tiap) < 0) {
2N/A sv_errno = errno;
2N/A (void) _t_delete_tilink(fd);
2N/A t_errno = TSYSERR;
2N/A sig_mutex_unlock(&ntiptr->ti_lock);
2N/A errno = sv_errno;
2N/A return (NULL);
2N/A }
2N/A
2N/A /* Fill instance structure */
2N/A
2N/A ntiptr->ti_lookcnt = 0;
2N/A ntiptr->ti_flags = USED;
2N/A ntiptr->ti_state = T_UNINIT;
2N/A ntiptr->ti_ocnt = 0;
2N/A
2N/A assert(tiap->TIDU_size > 0);
2N/A ntiptr->ti_maxpsz = tiap->TIDU_size;
2N/A assert(tiap->TSDU_size >= -2);
2N/A ntiptr->ti_tsdusize = tiap->TSDU_size;
2N/A assert(tiap->ETSDU_size >= -2);
2N/A ntiptr->ti_etsdusize = tiap->ETSDU_size;
2N/A assert(tiap->CDATA_size >= -2);
2N/A ntiptr->ti_cdatasize = tiap->CDATA_size;
2N/A assert(tiap->DDATA_size >= -2);
2N/A ntiptr->ti_ddatasize = tiap->DDATA_size;
2N/A ntiptr->ti_servtype = tiap->SERV_type;
2N/A ntiptr->ti_prov_flag = tiap->PROVIDER_flag;
2N/A
2N/A if ((tcap->CAP_bits1 & TC1_ACCEPTOR_ID) != 0) {
2N/A ntiptr->acceptor_id = tcap->ACCEPTOR_id;
2N/A ntiptr->ti_flags |= V_ACCEPTOR_ID;
2N/A }
2N/A else
2N/A ntiptr->ti_flags &= ~V_ACCEPTOR_ID;
2N/A
2N/A /*
2N/A * Restore state from kernel (caveat some heuristics)
2N/A */
2N/A switch (tiap->CURRENT_state) {
2N/A
2N/A case TS_UNBND:
2N/A ntiptr->ti_state = T_UNBND;
2N/A break;
2N/A
2N/A case TS_IDLE:
2N/A if ((rstate = _t_adjust_state(fd, T_IDLE)) < 0) {
2N/A sv_errno = errno;
2N/A (void) _t_delete_tilink(fd);
2N/A sig_mutex_unlock(&ntiptr->ti_lock);
2N/A errno = sv_errno;
2N/A return (NULL);
2N/A }
2N/A ntiptr->ti_state = rstate;
2N/A break;
2N/A
2N/A case TS_WRES_CIND:
2N/A ntiptr->ti_state = T_INCON;
2N/A break;
2N/A
2N/A case TS_WCON_CREQ:
2N/A ntiptr->ti_state = T_OUTCON;
2N/A break;
2N/A
2N/A case TS_DATA_XFER:
2N/A if ((rstate = _t_adjust_state(fd, T_DATAXFER)) < 0) {
2N/A sv_errno = errno;
2N/A (void) _t_delete_tilink(fd);
2N/A sig_mutex_unlock(&ntiptr->ti_lock);
2N/A errno = sv_errno;
2N/A return (NULL);
2N/A }
2N/A ntiptr->ti_state = rstate;
2N/A break;
2N/A
2N/A case TS_WIND_ORDREL:
2N/A ntiptr->ti_state = T_OUTREL;
2N/A break;
2N/A
2N/A case TS_WREQ_ORDREL:
2N/A if ((rstate = _t_adjust_state(fd, T_INREL)) < 0) {
2N/A sv_errno = errno;
2N/A (void) _t_delete_tilink(fd);
2N/A sig_mutex_unlock(&ntiptr->ti_lock);
2N/A errno = sv_errno;
2N/A return (NULL);
2N/A }
2N/A ntiptr->ti_state = rstate;
2N/A break;
2N/A default:
2N/A t_errno = TSTATECHNG;
2N/A (void) _t_delete_tilink(fd);
2N/A sig_mutex_unlock(&ntiptr->ti_lock);
2N/A return (NULL);
2N/A }
2N/A
2N/A /*
2N/A * Sync information with timod.
2N/A */
2N/A tsrp->tsr_flags = TSRF_QLEN_REQ;
2N/A
2N/A rval = _t_do_ioctl(fd, ioctlbuf,
2N/A (int)sizeof (struct ti_sync_req), TI_SYNC, &retlen);
2N/A expected_acksize = (int)sizeof (struct ti_sync_ack);
2N/A
2N/A if (rval < 0) {
2N/A sv_errno = errno;
2N/A (void) _t_delete_tilink(fd);
2N/A t_errno = TSYSERR;
2N/A sig_mutex_unlock(&ntiptr->ti_lock);
2N/A errno = sv_errno;
2N/A return (NULL);
2N/A }
2N/A
2N/A /*
2N/A * This is a "less than" check as "struct ti_sync_ack" returned by
2N/A * TI_SYNC can grow in size in future kernels. If/when a statically
2N/A * linked application is run on a future kernel, it should not fail.
2N/A */
2N/A if (retlen < expected_acksize) {
2N/A sv_errno = errno;
2N/A (void) _t_delete_tilink(fd);
2N/A t_errno = TSYSERR;
2N/A sig_mutex_unlock(&ntiptr->ti_lock);
2N/A errno = sv_errno;
2N/A return (NULL);
2N/A }
2N/A
2N/A if (_T_IS_TLI(api_semantics))
2N/A tsap->tsa_qlen = 0; /* not needed for TLI */
2N/A
2N/A ntiptr->ti_qlen = tsap->tsa_qlen;
2N/A sig_mutex_unlock(&ntiptr->ti_lock);
2N/A return (ntiptr);
2N/A}
2N/A
2N/A
2N/Astatic int
2N/A_t_adjust_state(int fd, int instate)
2N/A{
2N/A char ctlbuf[sizeof (t_scalar_t)];
2N/A char databuf[sizeof (int)]; /* size unimportant - anything > 0 */
2N/A struct strpeek arg;
2N/A int outstate, retval;
2N/A
2N/A /*
2N/A * Peek at message on stream head (if any)
2N/A * and see if it is data
2N/A */
2N/A arg.ctlbuf.buf = ctlbuf;
2N/A arg.ctlbuf.maxlen = (int)sizeof (ctlbuf);
2N/A arg.ctlbuf.len = 0;
2N/A
2N/A arg.databuf.buf = databuf;
2N/A arg.databuf.maxlen = (int)sizeof (databuf);
2N/A arg.databuf.len = 0;
2N/A
2N/A arg.flags = 0;
2N/A
2N/A if ((retval = ioctl(fd, I_PEEK, &arg)) < 0) {
2N/A t_errno = TSYSERR;
2N/A return (-1);
2N/A }
2N/A outstate = instate;
2N/A /*
2N/A * If peek shows something at stream head, then
2N/A * Adjust "outstate" based on some heuristics.
2N/A */
2N/A if (retval > 0) {
2N/A switch (instate) {
2N/A case T_IDLE:
2N/A /*
2N/A * The following heuristic is to handle data
2N/A * ahead of T_DISCON_IND indications that might
2N/A * be at the stream head waiting to be
2N/A * read (T_DATA_IND or M_DATA)
2N/A */
2N/A if (((arg.ctlbuf.len == 4) &&
2N/A /* LINTED pointer cast */
2N/A ((*(int32_t *)arg.ctlbuf.buf) == T_DATA_IND)) ||
2N/A ((arg.ctlbuf.len == 0) && arg.databuf.len)) {
2N/A outstate = T_DATAXFER;
2N/A }
2N/A break;
2N/A case T_DATAXFER:
2N/A /*
2N/A * The following heuristic is to handle
2N/A * the case where the connection is established
2N/A * and in data transfer state at the provider
2N/A * but the T_CONN_CON has not yet been read
2N/A * from the stream head.
2N/A */
2N/A if ((arg.ctlbuf.len == 4) &&
2N/A /* LINTED pointer cast */
2N/A ((*(int32_t *)arg.ctlbuf.buf) == T_CONN_CON))
2N/A outstate = T_OUTCON;
2N/A break;
2N/A case T_INREL:
2N/A /*
2N/A * The following heuristic is to handle data
2N/A * ahead of T_ORDREL_IND indications that might
2N/A * be at the stream head waiting to be
2N/A * read (T_DATA_IND or M_DATA)
2N/A */
2N/A if (((arg.ctlbuf.len == 4) &&
2N/A /* LINTED pointer cast */
2N/A ((*(int32_t *)arg.ctlbuf.buf) == T_DATA_IND)) ||
2N/A ((arg.ctlbuf.len == 0) && arg.databuf.len)) {
2N/A outstate = T_DATAXFER;
2N/A }
2N/A break;
2N/A default:
2N/A break;
2N/A }
2N/A }
2N/A return (outstate);
2N/A}
2N/A
2N/A/*
2N/A * Assumes caller has blocked signals at least in this thread (for safe
2N/A * malloc/free operations)
2N/A */
2N/Astatic int
2N/A_t_cbuf_alloc(struct _ti_user *tiptr, char **retbuf)
2N/A{
2N/A unsigned size2;
2N/A
2N/A assert(MUTEX_HELD(&tiptr->ti_lock));
2N/A size2 = tiptr->ti_ctlsize; /* same size as default ctlbuf */
2N/A
2N/A if ((*retbuf = malloc(size2)) == NULL) {
2N/A return (-1);
2N/A }
2N/A return (size2);
2N/A}
2N/A
2N/A
2N/A/*
2N/A * Assumes caller has blocked signals at least in this thread (for safe
2N/A * malloc/free operations)
2N/A */
2N/Aint
2N/A_t_rbuf_alloc(struct _ti_user *tiptr, char **retbuf)
2N/A{
2N/A unsigned size1;
2N/A
2N/A assert(MUTEX_HELD(&tiptr->ti_lock));
2N/A size1 = tiptr->ti_rcvsize; /* same size as default rcvbuf */
2N/A
2N/A if ((*retbuf = malloc(size1)) == NULL) {
2N/A return (-1);
2N/A }
2N/A return (size1);
2N/A}
2N/A
2N/A/*
2N/A * Free lookbuffer structures and associated resources
2N/A * Assumes ti_lock held for MT case.
2N/A */
2N/Astatic void
2N/A_t_free_lookbufs(struct _ti_user *tiptr)
2N/A{
2N/A struct _ti_lookbufs *tlbs, *prev_tlbs, *head_tlbs;
2N/A
2N/A /*
2N/A * Assertion:
2N/A * The structure lock should be held or the global list
2N/A * manipulation lock. The assumption is that nothing
2N/A * else can access the descriptor since global list manipulation
2N/A * lock is held so it is OK to manipulate fields without the
2N/A * structure lock
2N/A */
2N/A assert(MUTEX_HELD(&tiptr->ti_lock) || MUTEX_HELD(&_ti_userlock));
2N/A
2N/A /*
2N/A * Free only the buffers in the first lookbuf
2N/A */
2N/A head_tlbs = &tiptr->ti_lookbufs;
2N/A if (head_tlbs->tl_lookdbuf != NULL) {
2N/A free(head_tlbs->tl_lookdbuf);
2N/A head_tlbs->tl_lookdbuf = NULL;
2N/A }
2N/A free(head_tlbs->tl_lookcbuf);
2N/A head_tlbs->tl_lookcbuf = NULL;
2N/A /*
2N/A * Free the node and the buffers in the rest of the
2N/A * list
2N/A */
2N/A
2N/A tlbs = head_tlbs->tl_next;
2N/A head_tlbs->tl_next = NULL;
2N/A
2N/A while (tlbs != NULL) {
2N/A if (tlbs->tl_lookdbuf != NULL)
2N/A free(tlbs->tl_lookdbuf);
2N/A free(tlbs->tl_lookcbuf);
2N/A prev_tlbs = tlbs;
2N/A tlbs = tlbs->tl_next;
2N/A free(prev_tlbs);
2N/A }
2N/A}
2N/A
2N/A/*
2N/A * Free lookbuffer event list head.
2N/A * Consume current lookbuffer event
2N/A * Assumes ti_lock held for MT case.
2N/A * Note: The head of this list is part of the instance
2N/A * structure so the code is a little unorthodox.
2N/A */
2N/Avoid
2N/A_t_free_looklist_head(struct _ti_user *tiptr)
2N/A{
2N/A struct _ti_lookbufs *tlbs, *next_tlbs;
2N/A
2N/A tlbs = &tiptr->ti_lookbufs;
2N/A
2N/A if (tlbs->tl_next) {
2N/A /*
2N/A * Free the control and data buffers
2N/A */
2N/A if (tlbs->tl_lookdbuf != NULL)
2N/A free(tlbs->tl_lookdbuf);
2N/A free(tlbs->tl_lookcbuf);
2N/A /*
2N/A * Replace with next lookbuf event contents
2N/A */
2N/A next_tlbs = tlbs->tl_next;
2N/A tlbs->tl_next = next_tlbs->tl_next;
2N/A tlbs->tl_lookcbuf = next_tlbs->tl_lookcbuf;
2N/A tlbs->tl_lookclen = next_tlbs->tl_lookclen;
2N/A tlbs->tl_lookdbuf = next_tlbs->tl_lookdbuf;
2N/A tlbs->tl_lookdlen = next_tlbs->tl_lookdlen;
2N/A free(next_tlbs);
2N/A /*
2N/A * Decrement the flag - should never get to zero.
2N/A * in this path
2N/A */
2N/A tiptr->ti_lookcnt--;
2N/A assert(tiptr->ti_lookcnt > 0);
2N/A } else {
2N/A /*
2N/A * No more look buffer events - just clear the flag
2N/A * and leave the buffers alone
2N/A */
2N/A assert(tiptr->ti_lookcnt == 1);
2N/A tiptr->ti_lookcnt = 0;
2N/A }
2N/A}
2N/A
2N/A/*
2N/A * Discard lookbuffer events.
2N/A * Assumes ti_lock held for MT case.
2N/A */
2N/Avoid
2N/A_t_flush_lookevents(struct _ti_user *tiptr)
2N/A{
2N/A struct _ti_lookbufs *tlbs, *prev_tlbs;
2N/A
2N/A /*
2N/A * Leave the first nodes buffers alone (i.e. allocated)
2N/A * but reset the flag.
2N/A */
2N/A assert(MUTEX_HELD(&tiptr->ti_lock));
2N/A tiptr->ti_lookcnt = 0;
2N/A /*
2N/A * Blow away the rest of the list
2N/A */
2N/A tlbs = tiptr->ti_lookbufs.tl_next;
2N/A tiptr->ti_lookbufs.tl_next = NULL;
2N/A while (tlbs != NULL) {
2N/A if (tlbs->tl_lookdbuf != NULL)
2N/A free(tlbs->tl_lookdbuf);
2N/A free(tlbs->tl_lookcbuf);
2N/A prev_tlbs = tlbs;
2N/A tlbs = tlbs->tl_next;
2N/A free(prev_tlbs);
2N/A }
2N/A}
2N/A
2N/A
2N/A/*
2N/A * This routine checks if the receive. buffer in the instance structure
2N/A * is available (non-null). If it is, the buffer is acquired and marked busy
2N/A * (null). If it is busy (possible in MT programs), it allocates a new
2N/A * buffer and sets a flag indicating new memory was allocated and the caller
2N/A * has to free it.
2N/A */
2N/Aint
2N/A_t_acquire_ctlbuf(
2N/A struct _ti_user *tiptr,
2N/A struct strbuf *ctlbufp,
2N/A int *didallocp)
2N/A{
2N/A *didallocp = 0;
2N/A
2N/A ctlbufp->len = 0;
2N/A if (tiptr->ti_ctlbuf) {
2N/A ctlbufp->buf = tiptr->ti_ctlbuf;
2N/A tiptr->ti_ctlbuf = NULL;
2N/A ctlbufp->maxlen = tiptr->ti_ctlsize;
2N/A } else {
2N/A /*
2N/A * tiptr->ti_ctlbuf is in use
2N/A * allocate new buffer and free after use.
2N/A */
2N/A if ((ctlbufp->maxlen = _t_cbuf_alloc(tiptr,
2N/A &ctlbufp->buf)) < 0) {
2N/A t_errno = TSYSERR;
2N/A return (-1);
2N/A }
2N/A *didallocp = 1;
2N/A }
2N/A return (0);
2N/A}
2N/A
2N/A/*
2N/A * This routine checks if the receive buffer in the instance structure
2N/A * is available (non-null). If it is, the buffer is acquired and marked busy
2N/A * (null). If it is busy (possible in MT programs), it allocates a new
2N/A * buffer and sets a flag indicating new memory was allocated and the caller
2N/A * has to free it.
2N/A * Note: The receive buffer pointer can also be null if the transport
2N/A * provider does not support connect/disconnect data, (e.g. TCP) - not
2N/A * just when it is "busy". In that case, ti_rcvsize will be 0 and that is
2N/A * used to instantiate the databuf which points to a null buffer of
2N/A * length 0 which is the right thing to do for that case.
2N/A */
2N/Aint
2N/A_t_acquire_databuf(
2N/A struct _ti_user *tiptr,
2N/A struct strbuf *databufp,
2N/A int *didallocp)
2N/A{
2N/A *didallocp = 0;
2N/A
2N/A databufp->len = 0;
2N/A if (tiptr->ti_rcvbuf) {
2N/A assert(tiptr->ti_rcvsize != 0);
2N/A databufp->buf = tiptr->ti_rcvbuf;
2N/A tiptr->ti_rcvbuf = NULL;
2N/A databufp->maxlen = tiptr->ti_rcvsize;
2N/A } else if (tiptr->ti_rcvsize == 0) {
2N/A databufp->buf = NULL;
2N/A databufp->maxlen = 0;
2N/A } else {
2N/A /*
2N/A * tiptr->ti_rcvbuf is in use
2N/A * allocate new buffer and free after use.
2N/A */
2N/A if ((databufp->maxlen = _t_rbuf_alloc(tiptr,
2N/A &databufp->buf)) < 0) {
2N/A t_errno = TSYSERR;
2N/A return (-1);
2N/A }
2N/A *didallocp = 1;
2N/A }
2N/A return (0);
2N/A}
2N/A
2N/A/*
2N/A * This routine requests timod to look for any expedited data
2N/A * queued in the "receive buffers" in the kernel. Used for XTI
2N/A * t_look() semantics for transports that send expedited data
2N/A * data inline (e.g TCP).
2N/A * Returns -1 for failure
2N/A * Returns 0 for success
2N/A * On a successful return, the location pointed by "expedited_queuedp"
2N/A * contains
2N/A * 0 if no expedited data is found queued in "receive buffers"
2N/A * 1 if expedited data is found queued in "receive buffers"
2N/A */
2N/A
2N/Aint
2N/A_t_expinline_queued(int fd, int *expedited_queuedp)
2N/A{
2N/A union {
2N/A struct ti_sync_req ti_req;
2N/A struct ti_sync_ack ti_ack;
2N/A char pad[128];
2N/A } ioctl_data;
2N/A void *ioctlbuf = &ioctl_data; /* for TI_SYNC with room to grow */
2N/A /* preferred location first local variable */
2N/A /* see note in _t_create above */
2N/A struct ti_sync_req *tsrp = (struct ti_sync_req *)ioctlbuf;
2N/A struct ti_sync_ack *tsap = (struct ti_sync_ack *)ioctlbuf;
2N/A int rval, retlen;
2N/A
2N/A *expedited_queuedp = 0;
2N/A /* request info on rq expinds */
2N/A tsrp->tsr_flags = TSRF_IS_EXP_IN_RCVBUF;
2N/A do {
2N/A rval = _t_do_ioctl(fd, ioctlbuf,
2N/A (int)sizeof (struct T_info_req), TI_SYNC, &retlen);
2N/A } while (rval < 0 && errno == EINTR);
2N/A
2N/A if (rval < 0)
2N/A return (-1);
2N/A
2N/A /*
2N/A * This is a "less than" check as "struct ti_sync_ack" returned by
2N/A * TI_SYNC can grow in size in future kernels. If/when a statically
2N/A * linked application is run on a future kernel, it should not fail.
2N/A */
2N/A if (retlen < (int)sizeof (struct ti_sync_ack)) {
2N/A t_errno = TSYSERR;
2N/A errno = EIO;
2N/A return (-1);
2N/A }
2N/A if (tsap->tsa_flags & TSAF_EXP_QUEUED)
2N/A *expedited_queuedp = 1;
2N/A return (0);
2N/A}
2N/A
2N/A/*
2N/A * Support functions for use by functions that do scatter/gather
2N/A * like t_sndv(), t_rcvv() etc..follow below.
2N/A */
2N/A
2N/A/*
2N/A * _t_bytecount_upto_intmax() :
2N/A * Sum of the lengths of the individual buffers in
2N/A * the t_iovec array. If the sum exceeds INT_MAX
2N/A * it is truncated to INT_MAX.
2N/A */
2N/Aunsigned int
2N/A_t_bytecount_upto_intmax(const struct t_iovec *tiov, unsigned int tiovcount)
2N/A{
2N/A size_t nbytes;
2N/A int i;
2N/A
2N/A nbytes = 0;
2N/A for (i = 0; i < tiovcount && nbytes < INT_MAX; i++) {
2N/A if (tiov[i].iov_len >= INT_MAX) {
2N/A nbytes = INT_MAX;
2N/A break;
2N/A }
2N/A nbytes += tiov[i].iov_len;
2N/A }
2N/A
2N/A if (nbytes > INT_MAX)
2N/A nbytes = INT_MAX;
2N/A
2N/A return ((unsigned int)nbytes);
2N/A}
2N/A
2N/A/*
2N/A * Gather the data in the t_iovec buffers, into a single linear buffer
2N/A * starting at dataptr. Caller must have allocated sufficient space
2N/A * starting at dataptr. The total amount of data that is gathered is
2N/A * limited to INT_MAX. Any remaining data in the t_iovec buffers is
2N/A * not copied.
2N/A */
2N/Avoid
2N/A_t_gather(char *dataptr, const struct t_iovec *tiov, unsigned int tiovcount)
2N/A{
2N/A char *curptr;
2N/A unsigned int cur_count;
2N/A unsigned int nbytes_remaining;
2N/A int i;
2N/A
2N/A curptr = dataptr;
2N/A cur_count = 0;
2N/A
2N/A nbytes_remaining = _t_bytecount_upto_intmax(tiov, tiovcount);
2N/A for (i = 0; i < tiovcount && nbytes_remaining != 0; i++) {
2N/A if (tiov[i].iov_len <= nbytes_remaining)
2N/A cur_count = (int)tiov[i].iov_len;
2N/A else
2N/A cur_count = nbytes_remaining;
2N/A (void) memcpy(curptr, tiov[i].iov_base, cur_count);
2N/A curptr += cur_count;
2N/A nbytes_remaining -= cur_count;
2N/A }
2N/A}
2N/A
2N/A/*
2N/A * Scatter the data from the single linear buffer at pdatabuf->buf into
2N/A * the t_iovec buffers.
2N/A */
2N/Avoid
2N/A_t_scatter(struct strbuf *pdatabuf, struct t_iovec *tiov, int tiovcount)
2N/A{
2N/A char *curptr;
2N/A unsigned int nbytes_remaining;
2N/A unsigned int curlen;
2N/A int i;
2N/A
2N/A /*
2N/A * There cannot be any uncopied data leftover in pdatabuf
2N/A * at the conclusion of this function. (asserted below)
2N/A */
2N/A assert(pdatabuf->len <= _t_bytecount_upto_intmax(tiov, tiovcount));
2N/A curptr = pdatabuf->buf;
2N/A nbytes_remaining = pdatabuf->len;
2N/A for (i = 0; i < tiovcount && nbytes_remaining != 0; i++) {
2N/A if (tiov[i].iov_len < nbytes_remaining)
2N/A curlen = (unsigned int)tiov[i].iov_len;
2N/A else
2N/A curlen = nbytes_remaining;
2N/A (void) memcpy(tiov[i].iov_base, curptr, curlen);
2N/A curptr += curlen;
2N/A nbytes_remaining -= curlen;
2N/A }
2N/A}
2N/A
2N/A/*
2N/A * Adjust the iovec array, for subsequent use. Examine each element in the
2N/A * iovec array,and zero out the iov_len if the buffer was sent fully.
2N/A * otherwise the buffer was only partially sent, so adjust both iov_len and
2N/A * iov_base.
2N/A *
2N/A */
2N/Avoid
2N/A_t_adjust_iov(int bytes_sent, struct iovec *iov, int *iovcountp)
2N/A{
2N/A
2N/A int i;
2N/A
2N/A for (i = 0; i < *iovcountp && bytes_sent; i++) {
2N/A if (iov[i].iov_len == 0)
2N/A continue;
2N/A if (bytes_sent < iov[i].iov_len)
2N/A break;
2N/A else {
2N/A bytes_sent -= iov[i].iov_len;
2N/A iov[i].iov_len = 0;
2N/A }
2N/A }
2N/A iov[i].iov_len -= bytes_sent;
2N/A iov[i].iov_base = (caddr_t)iov[i].iov_base + bytes_sent;
2N/A}
2N/A
2N/A/*
2N/A * Copy the t_iovec array to the iovec array while taking care to see
2N/A * that the sum of the buffer lengths in the result is not more than
2N/A * INT_MAX. This function requires that T_IOV_MAX is no larger than
2N/A * IOV_MAX. Otherwise the resulting array is not a suitable input to
2N/A * writev(). If the sum of the lengths in t_iovec is zero, so is the
2N/A * resulting iovec.
2N/A */
2N/Avoid
2N/A_t_copy_tiov_to_iov(const struct t_iovec *tiov, int tiovcount,
2N/A struct iovec *iov, int *iovcountp)
2N/A{
2N/A int i;
2N/A unsigned int nbytes_remaining;
2N/A
2N/A nbytes_remaining = _t_bytecount_upto_intmax(tiov, tiovcount);
2N/A i = 0;
2N/A do {
2N/A iov[i].iov_base = tiov[i].iov_base;
2N/A if (tiov[i].iov_len > nbytes_remaining)
2N/A iov[i].iov_len = nbytes_remaining;
2N/A else
2N/A iov[i].iov_len = tiov[i].iov_len;
2N/A nbytes_remaining -= iov[i].iov_len;
2N/A i++;
2N/A } while (nbytes_remaining != 0 && i < tiovcount);
2N/A
2N/A *iovcountp = i;
2N/A}
2N/A
2N/A/*
2N/A * Routine called after connection establishment on transports where
2N/A * connection establishment changes certain transport attributes such as
2N/A * TIDU_size
2N/A */
2N/Aint
2N/A_t_do_postconn_sync(int fd, struct _ti_user *tiptr)
2N/A{
2N/A union {
2N/A struct T_capability_req tc_req;
2N/A struct T_capability_ack tc_ack;
2N/A } ioctl_data;
2N/A
2N/A void *ioctlbuf = &ioctl_data;
2N/A int expected_acksize;
2N/A int retlen, rval;
2N/A struct T_capability_req *tc_reqp = (struct T_capability_req *)ioctlbuf;
2N/A struct T_capability_ack *tc_ackp = (struct T_capability_ack *)ioctlbuf;
2N/A struct T_info_ack *tiap;
2N/A
2N/A /*
2N/A * This T_CAPABILITY_REQ should not fail, even if it is unsupported
2N/A * by the transport provider. timod will emulate it in that case.
2N/A */
2N/A tc_reqp->PRIM_type = T_CAPABILITY_REQ;
2N/A tc_reqp->CAP_bits1 = TC1_INFO;
2N/A rval = _t_do_ioctl(fd, (char *)ioctlbuf,
2N/A (int)sizeof (struct T_capability_ack), TI_CAPABILITY, &retlen);
2N/A expected_acksize = (int)sizeof (struct T_capability_ack);
2N/A
2N/A if (rval < 0)
2N/A return (-1);
2N/A
2N/A /*
2N/A * T_capability TPI messages are extensible and can grow in future.
2N/A * However timod will take care of returning no more information
2N/A * than what was requested, and truncating the "extended"
2N/A * information towards the end of the T_capability_ack, if necessary.
2N/A */
2N/A if (retlen != expected_acksize) {
2N/A t_errno = TSYSERR;
2N/A errno = EIO;
2N/A return (-1);
2N/A }
2N/A
2N/A /*
2N/A * The T_info_ack part of the T_capability_ack is guaranteed to be
2N/A * present only if the corresponding TC1_INFO bit is set
2N/A */
2N/A if ((tc_ackp->CAP_bits1 & TC1_INFO) == 0) {
2N/A t_errno = TSYSERR;
2N/A errno = EPROTO;
2N/A return (-1);
2N/A }
2N/A
2N/A tiap = &tc_ackp->INFO_ack;
2N/A if (tiap->PRIM_type != T_INFO_ACK) {
2N/A t_errno = TSYSERR;
2N/A errno = EPROTO;
2N/A return (-1);
2N/A }
2N/A
2N/A /*
2N/A * Note: Sync with latest information returned in "struct T_info_ack
2N/A * but we deliberately not sync the state here as user level state
2N/A * construction here is not required, only update of attributes which
2N/A * may have changed because of negotations during connection
2N/A * establsihment
2N/A */
2N/A assert(tiap->TIDU_size > 0);
2N/A tiptr->ti_maxpsz = tiap->TIDU_size;
2N/A assert(tiap->TSDU_size >= T_INVALID);
2N/A tiptr->ti_tsdusize = tiap->TSDU_size;
2N/A assert(tiap->ETSDU_size >= T_INVALID);
2N/A tiptr->ti_etsdusize = tiap->ETSDU_size;
2N/A assert(tiap->CDATA_size >= T_INVALID);
2N/A tiptr->ti_cdatasize = tiap->CDATA_size;
2N/A assert(tiap->DDATA_size >= T_INVALID);
2N/A tiptr->ti_ddatasize = tiap->DDATA_size;
2N/A tiptr->ti_prov_flag = tiap->PROVIDER_flag;
2N/A
2N/A return (0);
2N/A}