2N/A/*
2N/A * CDDL HEADER START
2N/A *
2N/A * The contents of this file are subject to the terms of the
2N/A * Common Development and Distribution License (the "License").
2N/A * You may not use this file except in compliance with the License.
2N/A *
2N/A * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
2N/A * or http://www.opensolaris.org/os/licensing.
2N/A * See the License for the specific language governing permissions
2N/A * and limitations under the License.
2N/A *
2N/A * When distributing Covered Code, include this CDDL HEADER in each
2N/A * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
2N/A * If applicable, add the following below this CDDL HEADER, with the
2N/A * fields enclosed by brackets "[]" replaced with your own identifying
2N/A * information: Portions Copyright [yyyy] [name of copyright owner]
2N/A *
2N/A * CDDL HEADER END
2N/A */
2N/A
2N/A/*
2N/A * Copyright (c) 1989, 2011, Oracle and/or its affiliates. All rights reserved.
2N/A */
2N/A/*
2N/A * Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved.
2N/A */
2N/A/* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
2N/A/* All Rights Reserved */
2N/A/*
2N/A * Portions of this source code were derived from Berkeley
2N/A * 4.3 BSD under license from the Regents of the University of
2N/A * California.
2N/A */
2N/A
2N/A/*
2N/A * svc.c, Server-side remote procedure call interface.
2N/A *
2N/A * There are two sets of procedures here. The xprt routines are
2N/A * for handling transport handles. The svc routines handle the
2N/A * list of service routines.
2N/A *
2N/A */
2N/A
2N/A
2N/A#include "mt.h"
2N/A#include "rpc_mt.h"
2N/A#include <assert.h>
2N/A#include <errno.h>
2N/A#include <sys/types.h>
2N/A#include <stropts.h>
2N/A#include <sys/conf.h>
2N/A#include <rpc/rpc.h>
2N/A#ifdef PORTMAP
2N/A#include <rpc/pmap_clnt.h>
2N/A#endif
2N/A#include <sys/poll.h>
2N/A#include <netconfig.h>
2N/A#include <syslog.h>
2N/A#include <stdlib.h>
2N/A#include <unistd.h>
2N/A#include <string.h>
2N/A#include <limits.h>
2N/A
2N/Aextern bool_t __svc_get_door_cred();
2N/Aextern bool_t __rpc_get_local_cred();
2N/A
2N/ASVCXPRT **svc_xports;
2N/Astatic int nsvc_xports; /* total number of svc_xports allocated */
2N/A
2N/AXDR **svc_xdrs; /* common XDR receive area */
2N/Aint nsvc_xdrs; /* total number of svc_xdrs allocated */
2N/A
2N/Aint __rpc_use_pollfd_done; /* to unlimit the number of connections */
2N/A
2N/A#define NULL_SVC ((struct svc_callout *)0)
2N/A
2N/A#define RQCRED_SIZE (NGRPS_LOOPBACK * sizeof (gid_t) + \
2N/A MAX_MACHINE_NAME + 1 + \
2N/A sizeof (struct authsys_parms))
2N/A
2N/A/*
2N/A * The services list
2N/A * Each entry represents a set of procedures (an rpc program).
2N/A * The dispatch routine takes request structs and runs the
2N/A * appropriate procedure.
2N/A */
2N/Astatic struct svc_callout {
2N/A struct svc_callout *sc_next;
2N/A rpcprog_t sc_prog;
2N/A rpcvers_t sc_vers;
2N/A char *sc_netid;
2N/A void (*sc_dispatch)();
2N/A} *svc_head;
2N/Aextern rwlock_t svc_lock;
2N/A
2N/Astatic struct svc_callout *svc_find();
2N/Aint _svc_prog_dispatch();
2N/Avoid svc_getreq_common();
2N/Achar *strdup();
2N/A
2N/Aextern mutex_t svc_door_mutex;
2N/Aextern cond_t svc_door_waitcv;
2N/Aextern int svc_ndoorfds;
2N/Aextern SVCXPRT_LIST *_svc_xprtlist;
2N/Aextern mutex_t xprtlist_lock;
2N/Aextern void __svc_rm_from_xlist();
2N/A
2N/Aextern fd_set _new_svc_fdset;
2N/A
2N/A/*
2N/A * If the allocated array of reactor is too small, this value is used as a
2N/A * margin. This reduces the number of allocations.
2N/A */
2N/A#define USER_FD_INCREMENT 5
2N/A
2N/Astatic void add_pollfd(int fd, short events);
2N/Astatic void remove_pollfd(int fd);
2N/Astatic void __svc_remove_input_of_fd(int fd);
2N/A
2N/A
2N/A/*
2N/A * Data used to handle reactor:
2N/A * - one file descriptor we listen to,
2N/A * - one callback we call if the fd pops,
2N/A * - and a cookie passed as a parameter to the callback.
2N/A *
2N/A * The structure is an array indexed on the file descriptor. Each entry is
2N/A * pointing to the first element of a double-linked list of callback.
2N/A * only one callback may be associated to a couple (fd, event).
2N/A */
2N/A
2N/Astruct _svc_user_fd_head;
2N/A
2N/Atypedef struct {
2N/A struct _svc_user_fd_node *next;
2N/A struct _svc_user_fd_node *previous;
2N/A} _svc_user_link;
2N/A
2N/Atypedef struct _svc_user_fd_node {
2N/A /* The lnk field must be the first field. */
2N/A _svc_user_link lnk;
2N/A svc_input_id_t id;
2N/A int fd;
2N/A unsigned int events;
2N/A svc_callback_t callback;
2N/A void* cookie;
2N/A} _svc_user_fd_node;
2N/A
2N/Atypedef struct _svc_user_fd_head {
2N/A /* The lnk field must be the first field. */
2N/A _svc_user_link lnk;
2N/A unsigned int mask; /* logical OR of all sub-masks */
2N/A} _svc_user_fd_head;
2N/A
2N/A
2N/A/* Define some macros to manage the linked list. */
2N/A#define LIST_ISEMPTY(l) ((_svc_user_fd_node *) &(l.lnk) == l.lnk.next)
2N/A#define LIST_CLR(l) \
2N/A (l.lnk.previous = l.lnk.next = (_svc_user_fd_node *) &(l.lnk))
2N/A
2N/A/* Array of defined reactor - indexed on file descriptor */
2N/Astatic _svc_user_fd_head *svc_userfds = NULL;
2N/A
2N/A/* current size of file descriptor */
2N/Astatic int svc_nuserfds = 0;
2N/A
2N/A/* Mutex to ensure MT safe operations for user fds callbacks. */
2N/Astatic mutex_t svc_userfds_lock = DEFAULTMUTEX;
2N/A
2N/A
2N/A/*
2N/A * This structure is used to have constant time alogrithms. There is an array
2N/A * of this structure as large as svc_nuserfds. When the user is registering a
2N/A * new callback, the address of the created structure is stored in a cell of
2N/A * this array. The address of this cell is the returned unique identifier.
2N/A *
2N/A * On removing, the id is given by the user, then we know if this cell is
2N/A * filled or not (with free). If it is free, we return an error. Otherwise,
2N/A * we can free the structure pointed by fd_node.
2N/A *
2N/A * On insertion, we use the linked list created by (first_free,
2N/A * next_free). In this way with a constant time computation, we can give a
2N/A * correct index to the user.
2N/A */
2N/A
2N/Atypedef struct _svc_management_user_fd {
2N/A bool_t free;
2N/A union {
2N/A svc_input_id_t next_free;
2N/A _svc_user_fd_node *fd_node;
2N/A } data;
2N/A} _svc_management_user_fd;
2N/A
2N/A/* index to the first free elem */
2N/Astatic svc_input_id_t first_free = (svc_input_id_t)-1;
2N/A/* the size of this array is the same as svc_nuserfds */
2N/Astatic _svc_management_user_fd* user_fd_mgt_array = NULL;
2N/A
2N/A/* current size of user_fd_mgt_array */
2N/Astatic int svc_nmgtuserfds = 0;
2N/A
2N/A
2N/A/* Define some macros to access data associated to registration ids. */
2N/A#define node_from_id(id) (user_fd_mgt_array[(int)id].data.fd_node)
2N/A#define is_free_id(id) (user_fd_mgt_array[(int)id].free)
2N/A
2N/A#ifndef POLLSTANDARD
2N/A#define POLLSTANDARD \
2N/A (POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND| \
2N/A POLLWRBAND|POLLERR|POLLHUP|POLLNVAL)
2N/A#endif
2N/A
2N/A/*
2N/A * To free an Id, we set the cell as free and insert its address in the list
2N/A * of free cell.
2N/A */
2N/A
2N/Astatic void
2N/A_svc_free_id(const svc_input_id_t id)
2N/A{
2N/A assert(((int)id >= 0) && ((int)id < svc_nmgtuserfds));
2N/A user_fd_mgt_array[(int)id].free = TRUE;
2N/A user_fd_mgt_array[(int)id].data.next_free = first_free;
2N/A first_free = id;
2N/A}
2N/A
2N/A/*
2N/A * To get a free cell, we just have to take it from the free linked list and
2N/A * set the flag to "not free". This function also allocates new memory if
2N/A * necessary
2N/A */
2N/Astatic svc_input_id_t
2N/A_svc_attribute_new_id(_svc_user_fd_node *node)
2N/A{
2N/A int selected_index = (int)first_free;
2N/A assert(node != NULL);
2N/A
2N/A if (selected_index == -1) {
2N/A /* Allocate new entries */
2N/A int L_inOldSize = svc_nmgtuserfds;
2N/A int i;
2N/A
2N/A svc_nmgtuserfds += USER_FD_INCREMENT;
2N/A
2N/A user_fd_mgt_array = (_svc_management_user_fd *)
2N/A realloc(user_fd_mgt_array, svc_nmgtuserfds
2N/A * sizeof (_svc_management_user_fd));
2N/A
2N/A if (user_fd_mgt_array == NULL) {
2N/A syslog(LOG_ERR, "_svc_attribute_new_id: out of memory");
2N/A errno = ENOMEM;
2N/A return ((svc_input_id_t)-1);
2N/A }
2N/A
2N/A for (i = svc_nmgtuserfds - 1; i >= L_inOldSize; i--)
2N/A _svc_free_id((svc_input_id_t)i);
2N/A selected_index = (int)first_free;
2N/A }
2N/A
2N/A node->id = (svc_input_id_t)selected_index;
2N/A first_free = user_fd_mgt_array[selected_index].data.next_free;
2N/A
2N/A user_fd_mgt_array[selected_index].data.fd_node = node;
2N/A user_fd_mgt_array[selected_index].free = FALSE;
2N/A
2N/A return ((svc_input_id_t)selected_index);
2N/A}
2N/A
2N/A/*
2N/A * Access to a pollfd treatment. Scan all the associated callbacks that have
2N/A * at least one bit in their mask that masks a received event.
2N/A *
2N/A * If event POLLNVAL is received, we check that one callback processes it, if
2N/A * not, then remove the file descriptor from the poll. If there is one, let
2N/A * the user do the work.
2N/A */
2N/Avoid
2N/A__svc_getreq_user(struct pollfd *pfd)
2N/A{
2N/A int fd = pfd->fd;
2N/A short revents = pfd->revents;
2N/A bool_t invalHandled = FALSE;
2N/A _svc_user_fd_node *node;
2N/A
2N/A (void) mutex_lock(&svc_userfds_lock);
2N/A
2N/A if ((fd < 0) || (fd >= svc_nuserfds)) {
2N/A (void) mutex_unlock(&svc_userfds_lock);
2N/A return;
2N/A }
2N/A
2N/A node = svc_userfds[fd].lnk.next;
2N/A
2N/A /* check if at least one mask fits */
2N/A if (0 == (revents & svc_userfds[fd].mask)) {
2N/A (void) mutex_unlock(&svc_userfds_lock);
2N/A return;
2N/A }
2N/A
2N/A while ((svc_userfds[fd].mask != 0) &&
2N/A ((_svc_user_link *)node != &(svc_userfds[fd].lnk))) {
2N/A /*
2N/A * If one of the received events maps the ones the node listens
2N/A * to
2N/A */
2N/A _svc_user_fd_node *next = node->lnk.next;
2N/A
2N/A if (node->callback != NULL) {
2N/A if (node->events & revents) {
2N/A if (revents & POLLNVAL) {
2N/A invalHandled = TRUE;
2N/A }
2N/A
2N/A /*
2N/A * The lock must be released before calling the
2N/A * user function, as this function can call
2N/A * svc_remove_input() for example.
2N/A */
2N/A (void) mutex_unlock(&svc_userfds_lock);
2N/A node->callback(node->id, node->fd,
2N/A node->events & revents, node->cookie);
2N/A /*
2N/A * Do not use the node structure anymore, as it
2N/A * could have been deallocated by the previous
2N/A * callback.
2N/A */
2N/A (void) mutex_lock(&svc_userfds_lock);
2N/A }
2N/A }
2N/A node = next;
2N/A }
2N/A
2N/A if ((revents & POLLNVAL) && !invalHandled)
2N/A __svc_remove_input_of_fd(fd);
2N/A (void) mutex_unlock(&svc_userfds_lock);
2N/A}
2N/A
2N/A
2N/A/*
2N/A * Check if a file descriptor is associated with a user reactor.
2N/A * To do this, just check that the array indexed on fd has a non-void linked
2N/A * list (ie. first element is not NULL)
2N/A */
2N/Abool_t
2N/A__is_a_userfd(int fd)
2N/A{
2N/A /* Checks argument */
2N/A if ((fd < 0) || (fd >= svc_nuserfds))
2N/A return (FALSE);
2N/A return ((svc_userfds[fd].mask == 0x0000)? FALSE:TRUE);
2N/A}
2N/A
2N/A/* free everything concerning user fd */
2N/A/* used in svc_run.c => no static */
2N/A
2N/Avoid
2N/A__destroy_userfd(void)
2N/A{
2N/A int one_fd;
2N/A /* Clean user fd */
2N/A if (svc_userfds != NULL) {
2N/A for (one_fd = 0; one_fd < svc_nuserfds; one_fd++) {
2N/A _svc_user_fd_node *node;
2N/A
2N/A node = svc_userfds[one_fd].lnk.next;
2N/A while ((_svc_user_link *) node
2N/A != (_svc_user_link *) &(svc_userfds[one_fd])) {
2N/A _svc_free_id(node->id);
2N/A node = node->lnk.next;
2N/A free(node->lnk.previous);
2N/A }
2N/A }
2N/A
2N/A free(user_fd_mgt_array);
2N/A user_fd_mgt_array = NULL;
2N/A first_free = (svc_input_id_t)-1;
2N/A
2N/A free(svc_userfds);
2N/A svc_userfds = NULL;
2N/A svc_nuserfds = 0;
2N/A }
2N/A}
2N/A
2N/A/*
2N/A * Remove all the callback associated with a fd => useful when the fd is
2N/A * closed for instance
2N/A */
2N/Astatic void
2N/A__svc_remove_input_of_fd(int fd)
2N/A{
2N/A _svc_user_fd_node *one_node;
2N/A
2N/A if ((fd < 0) || (fd >= svc_nuserfds))
2N/A return;
2N/A
2N/A one_node = svc_userfds[fd].lnk.next;
2N/A while ((_svc_user_link *) one_node
2N/A != (_svc_user_link *) &(svc_userfds[fd].lnk)) {
2N/A _svc_free_id(one_node->id);
2N/A one_node = one_node->lnk.next;
2N/A free(one_node->lnk.previous);
2N/A }
2N/A
2N/A LIST_CLR(svc_userfds[fd]);
2N/A svc_userfds[fd].mask = 0;
2N/A}
2N/A
2N/A/*
2N/A * Allow user to add an fd in the poll list. If it does not succeed, return
2N/A * -1. Otherwise, return a svc_id
2N/A */
2N/A
2N/Asvc_input_id_t
2N/Asvc_add_input(int user_fd, unsigned int events,
2N/A svc_callback_t user_callback, void *cookie)
2N/A{
2N/A _svc_user_fd_node *new_node;
2N/A
2N/A if (user_fd < 0) {
2N/A errno = EINVAL;
2N/A return ((svc_input_id_t)-1);
2N/A }
2N/A
2N/A if ((events == 0x0000) ||
2N/A (events & ~(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND|\
2N/A POLLWRBAND|POLLERR|POLLHUP|POLLNVAL))) {
2N/A errno = EINVAL;
2N/A return ((svc_input_id_t)-1);
2N/A }
2N/A
2N/A (void) mutex_lock(&svc_userfds_lock);
2N/A
2N/A if ((user_fd < svc_nuserfds) &&
2N/A (svc_userfds[user_fd].mask & events) != 0) {
2N/A /* Already registrated call-back */
2N/A errno = EEXIST;
2N/A (void) mutex_unlock(&svc_userfds_lock);
2N/A return ((svc_input_id_t)-1);
2N/A }
2N/A
2N/A /* Handle memory allocation. */
2N/A if (user_fd >= svc_nuserfds) {
2N/A int oldSize = svc_nuserfds;
2N/A int i;
2N/A
2N/A svc_nuserfds = (user_fd + 1) + USER_FD_INCREMENT;
2N/A
2N/A svc_userfds = (_svc_user_fd_head *)
2N/A realloc(svc_userfds,
2N/A svc_nuserfds * sizeof (_svc_user_fd_head));
2N/A
2N/A if (svc_userfds == NULL) {
2N/A syslog(LOG_ERR, "svc_add_input: out of memory");
2N/A errno = ENOMEM;
2N/A (void) mutex_unlock(&svc_userfds_lock);
2N/A return ((svc_input_id_t)-1);
2N/A }
2N/A
2N/A for (i = oldSize; i < svc_nuserfds; i++) {
2N/A LIST_CLR(svc_userfds[i]);
2N/A svc_userfds[i].mask = 0;
2N/A }
2N/A }
2N/A
2N/A new_node = malloc(sizeof (_svc_user_fd_node));
2N/A if (new_node == NULL) {
2N/A syslog(LOG_ERR, "svc_add_input: out of memory");
2N/A errno = ENOMEM;
2N/A (void) mutex_unlock(&svc_userfds_lock);
2N/A return ((svc_input_id_t)-1);
2N/A }
2N/A
2N/A /* create a new node */
2N/A new_node->fd = user_fd;
2N/A new_node->events = events;
2N/A new_node->callback = user_callback;
2N/A new_node->cookie = cookie;
2N/A
2N/A (void) _svc_attribute_new_id(new_node);
2N/A
2N/A /* Add the new element at the beginning of the list. */
2N/A if (LIST_ISEMPTY(svc_userfds[user_fd])) {
2N/A svc_userfds[user_fd].lnk.previous = new_node;
2N/A }
2N/A new_node->lnk.next = svc_userfds[user_fd].lnk.next;
2N/A new_node->lnk.previous = (_svc_user_fd_node *)&(svc_userfds[user_fd]);
2N/A
2N/A svc_userfds[user_fd].lnk.next = new_node;
2N/A
2N/A /* refresh global mask for this file desciptor */
2N/A svc_userfds[user_fd].mask |= events;
2N/A
2N/A /* refresh mask for the poll */
2N/A add_pollfd(user_fd, (svc_userfds[user_fd].mask));
2N/A
2N/A (void) mutex_unlock(&svc_userfds_lock);
2N/A return (new_node->id);
2N/A}
2N/A
2N/A
2N/Aint
2N/Asvc_remove_input(svc_input_id_t id)
2N/A{
2N/A _svc_user_fd_node* node;
2N/A _svc_user_fd_node* next;
2N/A _svc_user_fd_node* previous;
2N/A int fd; /* caching optim */
2N/A
2N/A (void) mutex_lock(&svc_userfds_lock);
2N/A
2N/A /* Immediately update data for id management */
2N/A if (user_fd_mgt_array == NULL || id >= svc_nmgtuserfds ||
2N/A is_free_id(id)) {
2N/A errno = EINVAL;
2N/A (void) mutex_unlock(&svc_userfds_lock);
2N/A return (-1);
2N/A }
2N/A
2N/A node = node_from_id(id);
2N/A assert(node != NULL);
2N/A
2N/A _svc_free_id(id);
2N/A next = node->lnk.next;
2N/A previous = node->lnk.previous;
2N/A fd = node->fd; /* caching optim */
2N/A
2N/A /* Remove this node from the list. */
2N/A previous->lnk.next = next;
2N/A next->lnk.previous = previous;
2N/A
2N/A /* Remove the node flags from the global mask */
2N/A svc_userfds[fd].mask ^= node->events;
2N/A
2N/A free(node);
2N/A if (svc_userfds[fd].mask == 0) {
2N/A LIST_CLR(svc_userfds[fd]);
2N/A assert(LIST_ISEMPTY(svc_userfds[fd]));
2N/A remove_pollfd(fd);
2N/A }
2N/A /* <=> CLEAN NEEDED TO SHRINK MEMORY USAGE */
2N/A
2N/A (void) mutex_unlock(&svc_userfds_lock);
2N/A return (0);
2N/A}
2N/A
2N/A
2N/A/*
2N/A * Provides default service-side functions for authentication flavors
2N/A * that do not use all the fields in struct svc_auth_ops.
2N/A */
2N/A
2N/A/*ARGSUSED*/
2N/Astatic int
2N/Aauthany_wrap(AUTH *auth, XDR *xdrs, xdrproc_t xfunc, caddr_t xwhere)
2N/A{
2N/A return (*xfunc)(xdrs, xwhere);
2N/A}
2N/A
2N/Astruct svc_auth_ops svc_auth_any_ops = {
2N/A authany_wrap,
2N/A authany_wrap,
2N/A};
2N/A
2N/A/*
2N/A * Return pointer to server authentication structure.
2N/A */
2N/ASVCAUTH *
2N/A__svc_get_svcauth(SVCXPRT *xprt)
2N/A{
2N/A/* LINTED pointer alignment */
2N/A return (&SVC_XP_AUTH(xprt));
2N/A}
2N/A
2N/A/*
2N/A * A callback routine to cleanup after a procedure is executed.
2N/A */
2N/Avoid (*__proc_cleanup_cb)() = NULL;
2N/A
2N/Avoid *
2N/A__svc_set_proc_cleanup_cb(void *cb)
2N/A{
2N/A void *tmp = (void *)__proc_cleanup_cb;
2N/A
2N/A __proc_cleanup_cb = (void (*)())cb;
2N/A return (tmp);
2N/A}
2N/A
2N/A/* *************** SVCXPRT related stuff **************** */
2N/A
2N/A
2N/Astatic int pollfd_shrinking = 1;
2N/A
2N/A
2N/A/*
2N/A * Add fd to svc_pollfd
2N/A */
2N/Astatic void
2N/Aadd_pollfd(int fd, short events)
2N/A{
2N/A if (fd < FD_SETSIZE) {
2N/A FD_SET(fd, &svc_fdset);
2N/A#if !defined(_LP64)
2N/A FD_SET(fd, &_new_svc_fdset);
2N/A#endif
2N/A svc_nfds++;
2N/A svc_nfds_set++;
2N/A if (fd >= svc_max_fd)
2N/A svc_max_fd = fd + 1;
2N/A }
2N/A if (fd >= svc_max_pollfd)
2N/A svc_max_pollfd = fd + 1;
2N/A if (svc_max_pollfd > svc_pollfd_allocd) {
2N/A int i = svc_pollfd_allocd;
2N/A pollfd_t *tmp;
2N/A do {
2N/A svc_pollfd_allocd += POLLFD_EXTEND;
2N/A } while (svc_max_pollfd > svc_pollfd_allocd);
2N/A tmp = realloc(svc_pollfd,
2N/A sizeof (pollfd_t) * svc_pollfd_allocd);
2N/A if (tmp != NULL) {
2N/A svc_pollfd = tmp;
2N/A for (; i < svc_pollfd_allocd; i++)
2N/A POLLFD_CLR(i, tmp);
2N/A } else {
2N/A /*
2N/A * give an error message; undo fdset setting
2N/A * above; reset the pollfd_shrinking flag.
2N/A * because of this poll will not be done
2N/A * on these fds.
2N/A */
2N/A if (fd < FD_SETSIZE) {
2N/A FD_CLR(fd, &svc_fdset);
2N/A#if !defined(_LP64)
2N/A FD_CLR(fd, &_new_svc_fdset);
2N/A#endif
2N/A svc_nfds--;
2N/A svc_nfds_set--;
2N/A if (fd == (svc_max_fd - 1))
2N/A svc_max_fd--;
2N/A }
2N/A if (fd == (svc_max_pollfd - 1))
2N/A svc_max_pollfd--;
2N/A pollfd_shrinking = 0;
2N/A syslog(LOG_ERR, "add_pollfd: out of memory");
2N/A _exit(1);
2N/A }
2N/A }
2N/A svc_pollfd[fd].fd = fd;
2N/A svc_pollfd[fd].events = events;
2N/A svc_npollfds++;
2N/A svc_npollfds_set++;
2N/A}
2N/A
2N/A/*
2N/A * the fd is still active but only the bit in fdset is cleared.
2N/A * do not subtract svc_nfds or svc_npollfds
2N/A */
2N/Avoid
2N/Aclear_pollfd(int fd)
2N/A{
2N/A if (fd < FD_SETSIZE && FD_ISSET(fd, &svc_fdset)) {
2N/A FD_CLR(fd, &svc_fdset);
2N/A#if !defined(_LP64)
2N/A FD_CLR(fd, &_new_svc_fdset);
2N/A#endif
2N/A svc_nfds_set--;
2N/A }
2N/A if (fd < svc_pollfd_allocd && POLLFD_ISSET(fd, svc_pollfd)) {
2N/A POLLFD_CLR(fd, svc_pollfd);
2N/A svc_npollfds_set--;
2N/A }
2N/A}
2N/A
2N/A/*
2N/A * sets the bit in fdset for an active fd so that poll() is done for that
2N/A */
2N/Avoid
2N/Aset_pollfd(int fd, short events)
2N/A{
2N/A if (fd < FD_SETSIZE) {
2N/A FD_SET(fd, &svc_fdset);
2N/A#if !defined(_LP64)
2N/A FD_SET(fd, &_new_svc_fdset);
2N/A#endif
2N/A svc_nfds_set++;
2N/A }
2N/A if (fd < svc_pollfd_allocd) {
2N/A svc_pollfd[fd].fd = fd;
2N/A svc_pollfd[fd].events = events;
2N/A svc_npollfds_set++;
2N/A }
2N/A}
2N/A
2N/A/*
2N/A * remove a svc_pollfd entry; it does not shrink the memory
2N/A */
2N/Astatic void
2N/Aremove_pollfd(int fd)
2N/A{
2N/A clear_pollfd(fd);
2N/A if (fd == (svc_max_fd - 1))
2N/A svc_max_fd--;
2N/A svc_nfds--;
2N/A if (fd == (svc_max_pollfd - 1))
2N/A svc_max_pollfd--;
2N/A svc_npollfds--;
2N/A}
2N/A
2N/A/*
2N/A * delete a svc_pollfd entry; it shrinks the memory
2N/A * use remove_pollfd if you do not want to shrink
2N/A */
2N/Astatic void
2N/Adelete_pollfd(int fd)
2N/A{
2N/A remove_pollfd(fd);
2N/A if (pollfd_shrinking && svc_max_pollfd <
2N/A (svc_pollfd_allocd - POLLFD_SHRINK)) {
2N/A do {
2N/A svc_pollfd_allocd -= POLLFD_SHRINK;
2N/A } while (svc_max_pollfd < (svc_pollfd_allocd - POLLFD_SHRINK));
2N/A svc_pollfd = realloc(svc_pollfd,
2N/A sizeof (pollfd_t) * svc_pollfd_allocd);
2N/A if (svc_pollfd == NULL) {
2N/A syslog(LOG_ERR, "delete_pollfd: out of memory");
2N/A _exit(1);
2N/A }
2N/A }
2N/A}
2N/A
2N/A
2N/A/*
2N/A * Activate a transport handle.
2N/A */
2N/Avoid
2N/Axprt_register(const SVCXPRT *xprt)
2N/A{
2N/A int fd = xprt->xp_fd;
2N/A#ifdef CALLBACK
2N/A extern void (*_svc_getreqset_proc)();
2N/A#endif
2N/A/* VARIABLES PROTECTED BY svc_fd_lock: svc_xports, svc_fdset */
2N/A
2N/A (void) rw_wrlock(&svc_fd_lock);
2N/A if (svc_xports == NULL) {
2N/A /* allocate some small amount first */
2N/A svc_xports = calloc(FD_INCREMENT, sizeof (SVCXPRT *));
2N/A if (svc_xports == NULL) {
2N/A syslog(LOG_ERR, "xprt_register: out of memory");
2N/A _exit(1);
2N/A }
2N/A nsvc_xports = FD_INCREMENT;
2N/A
2N/A#ifdef CALLBACK
2N/A /*
2N/A * XXX: This code does not keep track of the server state.
2N/A *
2N/A * This provides for callback support. When a client
2N/A * recv's a call from another client on the server fd's,
2N/A * it calls _svc_getreqset_proc() which would return
2N/A * after serving all the server requests. Also look under
2N/A * clnt_dg.c and clnt_vc.c (clnt_call part of it)
2N/A */
2N/A _svc_getreqset_proc = svc_getreq_poll;
2N/A#endif
2N/A }
2N/A
2N/A while (fd >= nsvc_xports) {
2N/A SVCXPRT **tmp_xprts = svc_xports;
2N/A
2N/A /* time to expand svc_xprts */
2N/A tmp_xprts = realloc(svc_xports,
2N/A sizeof (SVCXPRT *) * (nsvc_xports + FD_INCREMENT));
2N/A if (tmp_xprts == NULL) {
2N/A syslog(LOG_ERR, "xprt_register : out of memory.");
2N/A _exit(1);
2N/A }
2N/A
2N/A svc_xports = tmp_xprts;
2N/A (void) memset(&svc_xports[nsvc_xports], 0,
2N/A sizeof (SVCXPRT *) * FD_INCREMENT);
2N/A nsvc_xports += FD_INCREMENT;
2N/A }
2N/A
2N/A svc_xports[fd] = (SVCXPRT *)xprt;
2N/A
2N/A add_pollfd(fd, MASKVAL);
2N/A
2N/A if (svc_polling) {
2N/A char dummy;
2N/A
2N/A /*
2N/A * This happens only in one of the MT modes.
2N/A * Wake up poller.
2N/A */
2N/A (void) write(svc_pipe[1], &dummy, sizeof (dummy));
2N/A }
2N/A /*
2N/A * If already dispatching door based services, start
2N/A * dispatching TLI based services now.
2N/A */
2N/A (void) mutex_lock(&svc_door_mutex);
2N/A if (svc_ndoorfds > 0)
2N/A (void) cond_signal(&svc_door_waitcv);
2N/A (void) mutex_unlock(&svc_door_mutex);
2N/A
2N/A if (svc_xdrs == NULL) {
2N/A /* allocate initial chunk */
2N/A svc_xdrs = calloc(FD_INCREMENT, sizeof (XDR *));
2N/A if (svc_xdrs != NULL)
2N/A nsvc_xdrs = FD_INCREMENT;
2N/A else {
2N/A syslog(LOG_ERR, "xprt_register : out of memory.");
2N/A _exit(1);
2N/A }
2N/A }
2N/A (void) rw_unlock(&svc_fd_lock);
2N/A}
2N/A
2N/A/*
2N/A * De-activate a transport handle.
2N/A */
2N/Avoid
2N/A__xprt_unregister_private(const SVCXPRT *xprt, bool_t lock_not_held)
2N/A{
2N/A int fd = xprt->xp_fd;
2N/A
2N/A if (lock_not_held)
2N/A (void) rw_wrlock(&svc_fd_lock);
2N/A if ((fd < nsvc_xports) && (svc_xports[fd] == xprt)) {
2N/A svc_xports[fd] = NULL;
2N/A delete_pollfd(fd);
2N/A }
2N/A if (lock_not_held)
2N/A (void) rw_unlock(&svc_fd_lock);
2N/A __svc_rm_from_xlist(&_svc_xprtlist, xprt, &xprtlist_lock);
2N/A}
2N/A
2N/Avoid
2N/Axprt_unregister(const SVCXPRT *xprt)
2N/A{
2N/A __xprt_unregister_private(xprt, TRUE);
2N/A}
2N/A
2N/A/* ********************** CALLOUT list related stuff ************* */
2N/A
2N/A/*
2N/A * Add a service program to the callout list.
2N/A * The dispatch routine will be called when a rpc request for this
2N/A * program number comes in.
2N/A */
2N/Abool_t
2N/Asvc_reg(const SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
2N/A void (*dispatch)(), const struct netconfig *nconf)
2N/A{
2N/A struct svc_callout *prev;
2N/A struct svc_callout *s, **s2;
2N/A struct netconfig *tnconf;
2N/A char *netid = NULL;
2N/A int flag = 0;
2N/A
2N/A/* VARIABLES PROTECTED BY svc_lock: s, prev, svc_head */
2N/A
2N/A if (xprt->xp_netid) {
2N/A netid = strdup(xprt->xp_netid);
2N/A flag = 1;
2N/A } else if (nconf && nconf->nc_netid) {
2N/A netid = strdup(nconf->nc_netid);
2N/A flag = 1;
2N/A } else if ((tnconf = __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type))
2N/A != NULL) {
2N/A netid = strdup(tnconf->nc_netid);
2N/A flag = 1;
2N/A freenetconfigent(tnconf);
2N/A } /* must have been created with svc_raw_create */
2N/A if ((netid == NULL) && (flag == 1))
2N/A return (FALSE);
2N/A
2N/A (void) rw_wrlock(&svc_lock);
2N/A if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
2N/A if (netid)
2N/A free(netid);
2N/A if (s->sc_dispatch == dispatch)
2N/A goto rpcb_it; /* he is registering another xptr */
2N/A (void) rw_unlock(&svc_lock);
2N/A return (FALSE);
2N/A }
2N/A s = malloc(sizeof (struct svc_callout));
2N/A if (s == NULL) {
2N/A if (netid)
2N/A free(netid);
2N/A (void) rw_unlock(&svc_lock);
2N/A return (FALSE);
2N/A }
2N/A
2N/A s->sc_prog = prog;
2N/A s->sc_vers = vers;
2N/A s->sc_dispatch = dispatch;
2N/A s->sc_netid = netid;
2N/A s->sc_next = NULL;
2N/A
2N/A /*
2N/A * The ordering of transports is such that the most frequently used
2N/A * one appears first. So add the new entry to the end of the list.
2N/A */
2N/A for (s2 = &svc_head; *s2 != NULL; s2 = &(*s2)->sc_next)
2N/A ;
2N/A *s2 = s;
2N/A
2N/A if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
2N/A if ((((SVCXPRT *)xprt)->xp_netid = strdup(netid)) == NULL) {
2N/A syslog(LOG_ERR, "svc_reg : strdup failed.");
2N/A free(netid);
2N/A free(s);
2N/A *s2 = NULL;
2N/A (void) rw_unlock(&svc_lock);
2N/A return (FALSE);
2N/A }
2N/A
2N/Arpcb_it:
2N/A (void) rw_unlock(&svc_lock);
2N/A
2N/A /* now register the information with the local binder service */
2N/A if (nconf)
2N/A return (rpcb_set(prog, vers, nconf, &xprt->xp_ltaddr));
2N/A return (TRUE);
2N/A /*NOTREACHED*/
2N/A}
2N/A
2N/A/*
2N/A * Remove a service program from the callout list.
2N/A */
2N/Avoid
2N/Asvc_unreg(const rpcprog_t prog, const rpcvers_t vers)
2N/A{
2N/A struct svc_callout *prev;
2N/A struct svc_callout *s;
2N/A
2N/A /* unregister the information anyway */
2N/A (void) rpcb_unset(prog, vers, NULL);
2N/A
2N/A (void) rw_wrlock(&svc_lock);
2N/A while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
2N/A if (prev == NULL_SVC) {
2N/A svc_head = s->sc_next;
2N/A } else {
2N/A prev->sc_next = s->sc_next;
2N/A }
2N/A s->sc_next = NULL_SVC;
2N/A if (s->sc_netid)
2N/A free(s->sc_netid);
2N/A free(s);
2N/A }
2N/A (void) rw_unlock(&svc_lock);
2N/A}
2N/A
2N/A#ifdef PORTMAP
2N/A/*
2N/A * Add a service program to the callout list.
2N/A * The dispatch routine will be called when a rpc request for this
2N/A * program number comes in.
2N/A * For version 2 portmappers.
2N/A */
2N/Abool_t
2N/Asvc_register(SVCXPRT *xprt, rpcprog_t prog, rpcvers_t vers,
2N/A void (*dispatch)(), int protocol)
2N/A{
2N/A struct svc_callout *prev;
2N/A struct svc_callout *s;
2N/A struct netconfig *nconf;
2N/A char *netid = NULL;
2N/A int flag = 0;
2N/A
2N/A if (xprt->xp_netid) {
2N/A netid = strdup(xprt->xp_netid);
2N/A flag = 1;
2N/A } else if ((ioctl(xprt->xp_fd, I_FIND, "timod") > 0) && ((nconf =
2N/A __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type)) != NULL)) {
2N/A /* fill in missing netid field in SVCXPRT */
2N/A netid = strdup(nconf->nc_netid);
2N/A flag = 1;
2N/A freenetconfigent(nconf);
2N/A } /* must be svc_raw_create */
2N/A
2N/A if ((netid == NULL) && (flag == 1))
2N/A return (FALSE);
2N/A
2N/A (void) rw_wrlock(&svc_lock);
2N/A if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
2N/A if (netid)
2N/A free(netid);
2N/A if (s->sc_dispatch == dispatch)
2N/A goto pmap_it; /* he is registering another xptr */
2N/A (void) rw_unlock(&svc_lock);
2N/A return (FALSE);
2N/A }
2N/A s = malloc(sizeof (struct svc_callout));
2N/A if (s == (struct svc_callout *)0) {
2N/A if (netid)
2N/A free(netid);
2N/A (void) rw_unlock(&svc_lock);
2N/A return (FALSE);
2N/A }
2N/A s->sc_prog = prog;
2N/A s->sc_vers = vers;
2N/A s->sc_dispatch = dispatch;
2N/A s->sc_netid = netid;
2N/A s->sc_next = svc_head;
2N/A svc_head = s;
2N/A
2N/A if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
2N/A if ((xprt->xp_netid = strdup(netid)) == NULL) {
2N/A syslog(LOG_ERR, "svc_register : strdup failed.");
2N/A free(netid);
2N/A svc_head = s->sc_next;
2N/A free(s);
2N/A (void) rw_unlock(&svc_lock);
2N/A return (FALSE);
2N/A }
2N/A
2N/Apmap_it:
2N/A (void) rw_unlock(&svc_lock);
2N/A /* now register the information with the local binder service */
2N/A if (protocol)
2N/A return (pmap_set(prog, vers, protocol, xprt->xp_port));
2N/A return (TRUE);
2N/A}
2N/A
2N/A/*
2N/A * Remove a service program from the callout list.
2N/A * For version 2 portmappers.
2N/A */
2N/Avoid
2N/Asvc_unregister(rpcprog_t prog, rpcvers_t vers)
2N/A{
2N/A struct svc_callout *prev;
2N/A struct svc_callout *s;
2N/A
2N/A (void) rw_wrlock(&svc_lock);
2N/A while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
2N/A if (prev == NULL_SVC) {
2N/A svc_head = s->sc_next;
2N/A } else {
2N/A prev->sc_next = s->sc_next;
2N/A }
2N/A s->sc_next = NULL_SVC;
2N/A if (s->sc_netid)
2N/A free(s->sc_netid);
2N/A free(s);
2N/A /* unregister the information with the local binder service */
2N/A (void) pmap_unset(prog, vers);
2N/A }
2N/A (void) rw_unlock(&svc_lock);
2N/A}
2N/A#endif /* PORTMAP */
2N/A
2N/A/*
2N/A * Search the callout list for a program number, return the callout
2N/A * struct.
2N/A * Also check for transport as well. Many routines such as svc_unreg
2N/A * dont give any corresponding transport, so dont check for transport if
2N/A * netid == NULL
2N/A */
2N/Astatic struct svc_callout *
2N/Asvc_find(rpcprog_t prog, rpcvers_t vers, struct svc_callout **prev, char *netid)
2N/A{
2N/A struct svc_callout *s, *p;
2N/A
2N/A/* WRITE LOCK HELD ON ENTRY: svc_lock */
2N/A
2N/A/* assert(RW_WRITE_HELD(&svc_lock)); */
2N/A p = NULL_SVC;
2N/A for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
2N/A if (((s->sc_prog == prog) && (s->sc_vers == vers)) &&
2N/A ((netid == NULL) || (s->sc_netid == NULL) ||
2N/A (strcmp(netid, s->sc_netid) == 0)))
2N/A break;
2N/A p = s;
2N/A }
2N/A *prev = p;
2N/A return (s);
2N/A}
2N/A
2N/A
2N/A/* ******************* REPLY GENERATION ROUTINES ************ */
2N/A
2N/A/*
2N/A * Send a reply to an rpc request
2N/A */
2N/Abool_t
2N/Asvc_sendreply(const SVCXPRT *xprt, const xdrproc_t xdr_results,
2N/A const caddr_t xdr_location)
2N/A{
2N/A struct rpc_msg rply;
2N/A
2N/A rply.rm_direction = REPLY;
2N/A rply.rm_reply.rp_stat = MSG_ACCEPTED;
2N/A rply.acpted_rply.ar_verf = xprt->xp_verf;
2N/A rply.acpted_rply.ar_stat = SUCCESS;
2N/A rply.acpted_rply.ar_results.where = xdr_location;
2N/A rply.acpted_rply.ar_results.proc = xdr_results;
2N/A return (SVC_REPLY((SVCXPRT *)xprt, &rply));
2N/A}
2N/A
2N/A/*
2N/A * No procedure error reply
2N/A */
2N/Avoid
2N/Asvcerr_noproc(const SVCXPRT *xprt)
2N/A{
2N/A struct rpc_msg rply;
2N/A
2N/A rply.rm_direction = REPLY;
2N/A rply.rm_reply.rp_stat = MSG_ACCEPTED;
2N/A rply.acpted_rply.ar_verf = xprt->xp_verf;
2N/A rply.acpted_rply.ar_stat = PROC_UNAVAIL;
2N/A SVC_REPLY((SVCXPRT *)xprt, &rply);
2N/A}
2N/A
2N/A/*
2N/A * Can't decode args error reply
2N/A */
2N/Avoid
2N/Asvcerr_decode(const SVCXPRT *xprt)
2N/A{
2N/A struct rpc_msg rply;
2N/A
2N/A rply.rm_direction = REPLY;
2N/A rply.rm_reply.rp_stat = MSG_ACCEPTED;
2N/A rply.acpted_rply.ar_verf = xprt->xp_verf;
2N/A rply.acpted_rply.ar_stat = GARBAGE_ARGS;
2N/A SVC_REPLY((SVCXPRT *)xprt, &rply);
2N/A}
2N/A
2N/A/*
2N/A * Some system error
2N/A */
2N/Avoid
2N/Asvcerr_systemerr(const SVCXPRT *xprt)
2N/A{
2N/A struct rpc_msg rply;
2N/A
2N/A rply.rm_direction = REPLY;
2N/A rply.rm_reply.rp_stat = MSG_ACCEPTED;
2N/A rply.acpted_rply.ar_verf = xprt->xp_verf;
2N/A rply.acpted_rply.ar_stat = SYSTEM_ERR;
2N/A SVC_REPLY((SVCXPRT *)xprt, &rply);
2N/A}
2N/A
2N/A/*
2N/A * Tell RPC package to not complain about version errors to the client. This
2N/A * is useful when revving broadcast protocols that sit on a fixed address.
2N/A * There is really one (or should be only one) example of this kind of
2N/A * protocol: the portmapper (or rpc binder).
2N/A */
2N/Avoid
2N/A__svc_versquiet_on(const SVCXPRT *xprt)
2N/A{
2N/A/* LINTED pointer alignment */
2N/A svc_flags(xprt) |= SVC_VERSQUIET;
2N/A}
2N/A
2N/Avoid
2N/A__svc_versquiet_off(const SVCXPRT *xprt)
2N/A{
2N/A/* LINTED pointer alignment */
2N/A svc_flags(xprt) &= ~SVC_VERSQUIET;
2N/A}
2N/A
2N/Avoid
2N/Asvc_versquiet(const SVCXPRT *xprt)
2N/A{
2N/A __svc_versquiet_on(xprt);
2N/A}
2N/A
2N/Aint
2N/A__svc_versquiet_get(const SVCXPRT *xprt)
2N/A{
2N/A/* LINTED pointer alignment */
2N/A return (svc_flags(xprt) & SVC_VERSQUIET);
2N/A}
2N/A
2N/A/*
2N/A * Authentication error reply
2N/A */
2N/Avoid
2N/Asvcerr_auth(const SVCXPRT *xprt, const enum auth_stat why)
2N/A{
2N/A struct rpc_msg rply;
2N/A
2N/A rply.rm_direction = REPLY;
2N/A rply.rm_reply.rp_stat = MSG_DENIED;
2N/A rply.rjcted_rply.rj_stat = AUTH_ERROR;
2N/A rply.rjcted_rply.rj_why = why;
2N/A SVC_REPLY((SVCXPRT *)xprt, &rply);
2N/A}
2N/A
2N/A/*
2N/A * Auth too weak error reply
2N/A */
2N/Avoid
2N/Asvcerr_weakauth(const SVCXPRT *xprt)
2N/A{
2N/A svcerr_auth(xprt, AUTH_TOOWEAK);
2N/A}
2N/A
2N/A/*
2N/A * Program unavailable error reply
2N/A */
2N/Avoid
2N/Asvcerr_noprog(const SVCXPRT *xprt)
2N/A{
2N/A struct rpc_msg rply;
2N/A
2N/A rply.rm_direction = REPLY;
2N/A rply.rm_reply.rp_stat = MSG_ACCEPTED;
2N/A rply.acpted_rply.ar_verf = xprt->xp_verf;
2N/A rply.acpted_rply.ar_stat = PROG_UNAVAIL;
2N/A SVC_REPLY((SVCXPRT *)xprt, &rply);
2N/A}
2N/A
2N/A/*
2N/A * Program version mismatch error reply
2N/A */
2N/Avoid
2N/Asvcerr_progvers(const SVCXPRT *xprt, const rpcvers_t low_vers,
2N/A const rpcvers_t high_vers)
2N/A{
2N/A struct rpc_msg rply;
2N/A
2N/A rply.rm_direction = REPLY;
2N/A rply.rm_reply.rp_stat = MSG_ACCEPTED;
2N/A rply.acpted_rply.ar_verf = xprt->xp_verf;
2N/A rply.acpted_rply.ar_stat = PROG_MISMATCH;
2N/A rply.acpted_rply.ar_vers.low = low_vers;
2N/A rply.acpted_rply.ar_vers.high = high_vers;
2N/A SVC_REPLY((SVCXPRT *)xprt, &rply);
2N/A}
2N/A
2N/A/* ******************* SERVER INPUT STUFF ******************* */
2N/A
2N/A/*
2N/A * Get server side input from some transport.
2N/A *
2N/A * Statement of authentication parameters management:
2N/A * This function owns and manages all authentication parameters, specifically
2N/A * the "raw" parameters (msg.rm_call.cb_cred and msg.rm_call.cb_verf) and
2N/A * the "cooked" credentials (rqst->rq_clntcred).
2N/A * However, this function does not know the structure of the cooked
2N/A * credentials, so it make the following assumptions:
2N/A * a) the structure is contiguous (no pointers), and
2N/A * b) the cred structure size does not exceed RQCRED_SIZE bytes.
2N/A * In all events, all three parameters are freed upon exit from this routine.
2N/A * The storage is trivially management on the call stack in user land, but
2N/A * is mallocated in kernel land.
2N/A */
2N/A
2N/Avoid
2N/Asvc_getreq(int rdfds)
2N/A{
2N/A fd_set readfds;
2N/A
2N/A FD_ZERO(&readfds);
2N/A readfds.fds_bits[0] = rdfds;
2N/A svc_getreqset(&readfds);
2N/A}
2N/A
2N/Avoid
2N/Asvc_getreqset(fd_set *readfds)
2N/A{
2N/A int i;
2N/A
2N/A for (i = 0; i < svc_max_fd; i++) {
2N/A /* fd has input waiting */
2N/A if (FD_ISSET(i, readfds))
2N/A svc_getreq_common(i);
2N/A }
2N/A}
2N/A
2N/Avoid
2N/Asvc_getreq_poll(struct pollfd *pfdp, const int pollretval)
2N/A{
2N/A int i;
2N/A int fds_found;
2N/A
2N/A for (i = fds_found = 0; fds_found < pollretval; i++) {
2N/A struct pollfd *p = &pfdp[i];
2N/A
2N/A if (p->revents) {
2N/A /* fd has input waiting */
2N/A fds_found++;
2N/A /*
2N/A * We assume that this function is only called
2N/A * via someone select()ing from svc_fdset or
2N/A * poll()ing from svc_pollset[]. Thus it's safe
2N/A * to handle the POLLNVAL event by simply turning
2N/A * the corresponding bit off in svc_fdset. The
2N/A * svc_pollset[] array is derived from svc_fdset
2N/A * and so will also be updated eventually.
2N/A *
2N/A * XXX Should we do an xprt_unregister() instead?
2N/A */
2N/A /* Handle user callback */
2N/A if (__is_a_userfd(p->fd) == TRUE) {
2N/A (void) rw_rdlock(&svc_fd_lock);
2N/A __svc_getreq_user(p);
2N/A (void) rw_unlock(&svc_fd_lock);
2N/A } else {
2N/A if (p->revents & POLLNVAL) {
2N/A (void) rw_wrlock(&svc_fd_lock);
2N/A remove_pollfd(p->fd); /* XXX */
2N/A (void) rw_unlock(&svc_fd_lock);
2N/A } else {
2N/A svc_getreq_common(p->fd);
2N/A }
2N/A }
2N/A }
2N/A }
2N/A}
2N/A
2N/Avoid
2N/Asvc_getreq_common(const int fd)
2N/A{
2N/A SVCXPRT *xprt;
2N/A enum xprt_stat stat;
2N/A struct rpc_msg *msg;
2N/A struct svc_req *r;
2N/A char *cred_area;
2N/A
2N/A (void) rw_rdlock(&svc_fd_lock);
2N/A
2N/A /* HANDLE USER CALLBACK */
2N/A if (__is_a_userfd(fd) == TRUE) {
2N/A struct pollfd virtual_fd;
2N/A
2N/A virtual_fd.events = virtual_fd.revents = (short)0xFFFF;
2N/A virtual_fd.fd = fd;
2N/A __svc_getreq_user(&virtual_fd);
2N/A (void) rw_unlock(&svc_fd_lock);
2N/A return;
2N/A }
2N/A
2N/A /*
2N/A * The transport associated with this fd could have been
2N/A * removed from svc_timeout_nonblock_xprt_and_LRU, for instance.
2N/A * This can happen if two or more fds get read events and are
2N/A * passed to svc_getreq_poll/set, the first fd is seviced by
2N/A * the dispatch routine and cleans up any dead transports. If
2N/A * one of the dead transports removed is the other fd that
2N/A * had a read event then svc_getreq_common() will be called with no
2N/A * xprt associated with the fd that had the original read event.
2N/A */
2N/A if ((fd >= nsvc_xports) || (xprt = svc_xports[fd]) == NULL) {
2N/A (void) rw_unlock(&svc_fd_lock);
2N/A return;
2N/A }
2N/A (void) rw_unlock(&svc_fd_lock);
2N/A/* LINTED pointer alignment */
2N/A msg = SVCEXT(xprt)->msg;
2N/A/* LINTED pointer alignment */
2N/A r = SVCEXT(xprt)->req;
2N/A/* LINTED pointer alignment */
2N/A cred_area = SVCEXT(xprt)->cred_area;
2N/A msg->rm_call.cb_cred.oa_base = cred_area;
2N/A msg->rm_call.cb_verf.oa_base = &(cred_area[MAX_AUTH_BYTES]);
2N/A r->rq_clntcred = &(cred_area[2 * MAX_AUTH_BYTES]);
2N/A
2N/A /* receive msgs from xprtprt (support batch calls) */
2N/A do {
2N/A bool_t dispatch;
2N/A
2N/A if (dispatch = SVC_RECV(xprt, msg))
2N/A (void) _svc_prog_dispatch(xprt, msg, r);
2N/A /*
2N/A * Check if the xprt has been disconnected in a recursive call
2N/A * in the service dispatch routine. If so, then break
2N/A */
2N/A (void) rw_rdlock(&svc_fd_lock);
2N/A if (xprt != svc_xports[fd]) {
2N/A (void) rw_unlock(&svc_fd_lock);
2N/A break;
2N/A }
2N/A (void) rw_unlock(&svc_fd_lock);
2N/A
2N/A /*
2N/A * Call cleanup procedure if set.
2N/A */
2N/A if (__proc_cleanup_cb != NULL && dispatch)
2N/A (*__proc_cleanup_cb)(xprt);
2N/A
2N/A if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
2N/A SVC_DESTROY(xprt);
2N/A break;
2N/A }
2N/A } while (stat == XPRT_MOREREQS);
2N/A}
2N/A
2N/Aint
2N/A_svc_prog_dispatch(SVCXPRT *xprt, struct rpc_msg *msg, struct svc_req *r)
2N/A{
2N/A struct svc_callout *s;
2N/A enum auth_stat why;
2N/A int prog_found;
2N/A rpcvers_t low_vers;
2N/A rpcvers_t high_vers;
2N/A void (*disp_fn)();
2N/A
2N/A r->rq_xprt = xprt;
2N/A r->rq_prog = msg->rm_call.cb_prog;
2N/A r->rq_vers = msg->rm_call.cb_vers;
2N/A r->rq_proc = msg->rm_call.cb_proc;
2N/A r->rq_cred = msg->rm_call.cb_cred;
2N/A/* LINTED pointer alignment */
2N/A SVC_XP_AUTH(r->rq_xprt).svc_ah_ops = svc_auth_any_ops;
2N/A/* LINTED pointer alignment */
2N/A SVC_XP_AUTH(r->rq_xprt).svc_ah_private = NULL;
2N/A
2N/A /* first authenticate the message */
2N/A /* Check for null flavor and bypass these calls if possible */
2N/A
2N/A if (msg->rm_call.cb_cred.oa_flavor == AUTH_NULL) {
2N/A r->rq_xprt->xp_verf.oa_flavor = _null_auth.oa_flavor;
2N/A r->rq_xprt->xp_verf.oa_length = 0;
2N/A } else {
2N/A bool_t no_dispatch;
2N/A
2N/A if ((why = __gss_authenticate(r, msg,
2N/A &no_dispatch)) != AUTH_OK) {
2N/A svcerr_auth(xprt, why);
2N/A return (0);
2N/A }
2N/A if (no_dispatch)
2N/A return (0);
2N/A }
2N/A /* match message with a registered service */
2N/A prog_found = FALSE;
2N/A low_vers = (rpcvers_t)(0 - 1);
2N/A high_vers = 0;
2N/A (void) rw_rdlock(&svc_lock);
2N/A for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
2N/A if (s->sc_prog == r->rq_prog) {
2N/A prog_found = TRUE;
2N/A if (s->sc_vers == r->rq_vers) {
2N/A if ((xprt->xp_netid == NULL) ||
2N/A (s->sc_netid == NULL) ||
2N/A (strcmp(xprt->xp_netid,
2N/A s->sc_netid) == 0)) {
2N/A disp_fn = (*s->sc_dispatch);
2N/A (void) rw_unlock(&svc_lock);
2N/A disp_fn(r, xprt);
2N/A return (1);
2N/A }
2N/A prog_found = FALSE;
2N/A }
2N/A if (s->sc_vers < low_vers)
2N/A low_vers = s->sc_vers;
2N/A if (s->sc_vers > high_vers)
2N/A high_vers = s->sc_vers;
2N/A } /* found correct program */
2N/A }
2N/A (void) rw_unlock(&svc_lock);
2N/A
2N/A /*
2N/A * if we got here, the program or version
2N/A * is not served ...
2N/A */
2N/A if (prog_found) {
2N/A/* LINTED pointer alignment */
2N/A if (!version_keepquiet(xprt))
2N/A svcerr_progvers(xprt, low_vers, high_vers);
2N/A } else {
2N/A svcerr_noprog(xprt);
2N/A }
2N/A return (0);
2N/A}
2N/A
2N/A/* ******************* SVCXPRT allocation and deallocation ***************** */
2N/A
2N/A/*
2N/A * svc_xprt_alloc() - allocate a service transport handle
2N/A */
2N/ASVCXPRT *
2N/Asvc_xprt_alloc(void)
2N/A{
2N/A SVCXPRT *xprt = NULL;
2N/A SVCXPRT_EXT *xt = NULL;
2N/A SVCXPRT_LIST *xlist = NULL;
2N/A struct rpc_msg *msg = NULL;
2N/A struct svc_req *req = NULL;
2N/A char *cred_area = NULL;
2N/A
2N/A if ((xprt = calloc(1, sizeof (SVCXPRT))) == NULL)
2N/A goto err_exit;
2N/A
2N/A if ((xt = calloc(1, sizeof (SVCXPRT_EXT))) == NULL)
2N/A goto err_exit;
2N/A xprt->xp_p3 = (caddr_t)xt; /* SVCEXT(xprt) = xt */
2N/A
2N/A if ((xlist = calloc(1, sizeof (SVCXPRT_LIST))) == NULL)
2N/A goto err_exit;
2N/A xt->my_xlist = xlist;
2N/A xlist->xprt = xprt;
2N/A
2N/A if ((msg = malloc(sizeof (struct rpc_msg))) == NULL)
2N/A goto err_exit;
2N/A xt->msg = msg;
2N/A
2N/A if ((req = malloc(sizeof (struct svc_req))) == NULL)
2N/A goto err_exit;
2N/A xt->req = req;
2N/A
2N/A if ((cred_area = malloc(2*MAX_AUTH_BYTES + RQCRED_SIZE)) == NULL)
2N/A goto err_exit;
2N/A xt->cred_area = cred_area;
2N/A
2N/A/* LINTED pointer alignment */
2N/A (void) mutex_init(&svc_send_mutex(xprt), USYNC_THREAD, (void *)0);
2N/A return (xprt);
2N/A
2N/Aerr_exit:
2N/A svc_xprt_free(xprt);
2N/A return (NULL);
2N/A}
2N/A
2N/A
2N/A/*
2N/A * svc_xprt_free() - free a service handle
2N/A */
2N/Avoid
2N/Asvc_xprt_free(SVCXPRT *xprt)
2N/A{
2N/A/* LINTED pointer alignment */
2N/A SVCXPRT_EXT *xt = xprt ? SVCEXT(xprt) : NULL;
2N/A SVCXPRT_LIST *my_xlist = xt ? xt->my_xlist: NULL;
2N/A struct rpc_msg *msg = xt ? xt->msg : NULL;
2N/A struct svc_req *req = xt ? xt->req : NULL;
2N/A char *cred_area = xt ? xt->cred_area : NULL;
2N/A
2N/A if (xprt)
2N/A free(xprt);
2N/A if (xt)
2N/A free(xt);
2N/A if (my_xlist)
2N/A free(my_xlist);
2N/A if (msg)
2N/A free(msg);
2N/A if (req)
2N/A free(req);
2N/A if (cred_area)
2N/A free(cred_area);
2N/A}
2N/A
2N/A
2N/A/*
2N/A * svc_xprt_destroy() - free parent and child xprt list
2N/A */
2N/Avoid
2N/Asvc_xprt_destroy(SVCXPRT *xprt)
2N/A{
2N/A SVCXPRT_LIST *xlist, *xnext = NULL;
2N/A int type;
2N/A
2N/A/* LINTED pointer alignment */
2N/A if (SVCEXT(xprt)->parent)
2N/A/* LINTED pointer alignment */
2N/A xprt = SVCEXT(xprt)->parent;
2N/A/* LINTED pointer alignment */
2N/A type = svc_type(xprt);
2N/A/* LINTED pointer alignment */
2N/A for (xlist = SVCEXT(xprt)->my_xlist; xlist != NULL; xlist = xnext) {
2N/A xnext = xlist->next;
2N/A xprt = xlist->xprt;
2N/A switch (type) {
2N/A case SVC_DGRAM:
2N/A svc_dg_xprtfree(xprt);
2N/A break;
2N/A case SVC_RENDEZVOUS:
2N/A svc_vc_xprtfree(xprt);
2N/A break;
2N/A case SVC_CONNECTION:
2N/A svc_fd_xprtfree(xprt);
2N/A break;
2N/A case SVC_DOOR:
2N/A svc_door_xprtfree(xprt);
2N/A break;
2N/A }
2N/A }
2N/A}
2N/A
2N/A
2N/A/*
2N/A * svc_copy() - make a copy of parent
2N/A */
2N/ASVCXPRT *
2N/Asvc_copy(SVCXPRT *xprt)
2N/A{
2N/A/* LINTED pointer alignment */
2N/A switch (svc_type(xprt)) {
2N/A case SVC_DGRAM:
2N/A return (svc_dg_xprtcopy(xprt));
2N/A case SVC_RENDEZVOUS:
2N/A return (svc_vc_xprtcopy(xprt));
2N/A case SVC_CONNECTION:
2N/A return (svc_fd_xprtcopy(xprt));
2N/A }
2N/A return (NULL);
2N/A}
2N/A
2N/A
2N/A/*
2N/A * _svc_destroy_private() - private SVC_DESTROY interface
2N/A */
2N/Avoid
2N/A_svc_destroy_private(SVCXPRT *xprt)
2N/A{
2N/A/* LINTED pointer alignment */
2N/A switch (svc_type(xprt)) {
2N/A case SVC_DGRAM:
2N/A _svc_dg_destroy_private(xprt);
2N/A break;
2N/A case SVC_RENDEZVOUS:
2N/A case SVC_CONNECTION:
2N/A _svc_vc_destroy_private(xprt, TRUE);
2N/A break;
2N/A }
2N/A}
2N/A
2N/A/*
2N/A * svc_get_local_cred() - fetch local user credentials. This always
2N/A * works over doors based transports. For local transports, this
2N/A * does not yield correct results unless the __rpc_negotiate_uid()
2N/A * call has been invoked to enable this feature.
2N/A */
2N/Abool_t
2N/Asvc_get_local_cred(SVCXPRT *xprt, svc_local_cred_t *lcred)
2N/A{
2N/A /* LINTED pointer alignment */
2N/A if (svc_type(xprt) == SVC_DOOR)
2N/A return (__svc_get_door_cred(xprt, lcred));
2N/A return (__rpc_get_local_cred(xprt, lcred));
2N/A}
2N/A
2N/A
2N/A/* ******************* DUPLICATE ENTRY HANDLING ROUTINES ************** */
2N/A
2N/A/*
2N/A * the dup cacheing routines below provide a cache of received
2N/A * transactions. rpc service routines can use this to detect
2N/A * retransmissions and re-send a non-failure response. Uses a
2N/A * lru scheme to find entries to get rid of entries in the cache,
2N/A * though only DUP_DONE entries are placed on the lru list.
2N/A * the routines were written towards development of a generic
2N/A * SVC_DUP() interface, which can be expanded to encompass the
2N/A * svc_dg_enablecache() routines as well. the cache is currently
2N/A * private to the automounter.
2N/A */
2N/A
2N/A
2N/A/* dupcache header contains xprt specific information */
2N/Astruct dupcache {
2N/A rwlock_t dc_lock;
2N/A time_t dc_time;
2N/A int dc_buckets;
2N/A int dc_maxsz;
2N/A int dc_basis;
2N/A struct dupreq *dc_mru;
2N/A struct dupreq **dc_hashtbl;
2N/A};
2N/A
2N/A/*
2N/A * private duplicate cache request routines
2N/A */
2N/Astatic int __svc_dupcache_check(struct svc_req *, caddr_t *, uint_t *,
2N/A struct dupcache *, uint32_t, uint32_t);
2N/Astatic struct dupreq *__svc_dupcache_victim(struct dupcache *, time_t);
2N/Astatic int __svc_dupcache_enter(struct svc_req *, struct dupreq *,
2N/A struct dupcache *, uint32_t, uint32_t, time_t);
2N/Astatic int __svc_dupcache_update(struct svc_req *, caddr_t, uint_t, int,
2N/A struct dupcache *, uint32_t, uint32_t);
2N/A#ifdef DUP_DEBUG
2N/Astatic void __svc_dupcache_debug(struct dupcache *);
2N/A#endif /* DUP_DEBUG */
2N/A
2N/A/* default parameters for the dupcache */
2N/A#define DUPCACHE_BUCKETS 257
2N/A#define DUPCACHE_TIME 900
2N/A#define DUPCACHE_MAXSZ INT_MAX
2N/A
2N/A/*
2N/A * __svc_dupcache_init(void *condition, int basis, char *xprt_cache)
2N/A * initialize the duprequest cache and assign it to the xprt_cache
2N/A * Use default values depending on the cache condition and basis.
2N/A * return TRUE on success and FALSE on failure
2N/A */
2N/Abool_t
2N/A__svc_dupcache_init(void *condition, int basis, char **xprt_cache)
2N/A{
2N/A static mutex_t initdc_lock = DEFAULTMUTEX;
2N/A int i;
2N/A struct dupcache *dc;
2N/A
2N/A (void) mutex_lock(&initdc_lock);
2N/A if (*xprt_cache != NULL) { /* do only once per xprt */
2N/A (void) mutex_unlock(&initdc_lock);
2N/A syslog(LOG_ERR,
2N/A "__svc_dupcache_init: multiply defined dup cache");
2N/A return (FALSE);
2N/A }
2N/A
2N/A switch (basis) {
2N/A case DUPCACHE_FIXEDTIME:
2N/A dc = malloc(sizeof (struct dupcache));
2N/A if (dc == NULL) {
2N/A (void) mutex_unlock(&initdc_lock);
2N/A syslog(LOG_ERR,
2N/A "__svc_dupcache_init: memory alloc failed");
2N/A return (FALSE);
2N/A }
2N/A (void) rwlock_init(&(dc->dc_lock), USYNC_THREAD, NULL);
2N/A if (condition != NULL)
2N/A dc->dc_time = *((time_t *)condition);
2N/A else
2N/A dc->dc_time = DUPCACHE_TIME;
2N/A dc->dc_buckets = DUPCACHE_BUCKETS;
2N/A dc->dc_maxsz = DUPCACHE_MAXSZ;
2N/A dc->dc_basis = basis;
2N/A dc->dc_mru = NULL;
2N/A dc->dc_hashtbl = malloc(dc->dc_buckets *
2N/A sizeof (struct dupreq *));
2N/A if (dc->dc_hashtbl == NULL) {
2N/A free(dc);
2N/A (void) mutex_unlock(&initdc_lock);
2N/A syslog(LOG_ERR,
2N/A "__svc_dupcache_init: memory alloc failed");
2N/A return (FALSE);
2N/A }
2N/A for (i = 0; i < DUPCACHE_BUCKETS; i++)
2N/A dc->dc_hashtbl[i] = NULL;
2N/A *xprt_cache = (char *)dc;
2N/A break;
2N/A default:
2N/A (void) mutex_unlock(&initdc_lock);
2N/A syslog(LOG_ERR,
2N/A "__svc_dupcache_init: undefined dup cache basis");
2N/A return (FALSE);
2N/A }
2N/A
2N/A (void) mutex_unlock(&initdc_lock);
2N/A
2N/A return (TRUE);
2N/A}
2N/A
2N/A/*
2N/A * __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
2N/A * char *xprt_cache)
2N/A * searches the request cache. Creates an entry and returns DUP_NEW if
2N/A * the request is not found in the cache. If it is found, then it
2N/A * returns the state of the request (in progress, drop, or done) and
2N/A * also allocates, and passes back results to the user (if any) in
2N/A * resp_buf, and its length in resp_bufsz. DUP_ERROR is returned on error.
2N/A */
2N/Aint
2N/A__svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
2N/A char *xprt_cache)
2N/A{
2N/A uint32_t drxid, drhash;
2N/A int rc;
2N/A struct dupreq *dr = NULL;
2N/A time_t timenow = time(NULL);
2N/A
2N/A /* LINTED pointer alignment */
2N/A struct dupcache *dc = (struct dupcache *)xprt_cache;
2N/A
2N/A if (dc == NULL) {
2N/A syslog(LOG_ERR, "__svc_dup: undefined cache");
2N/A return (DUP_ERROR);
2N/A }
2N/A
2N/A /* get the xid of the request */
2N/A if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
2N/A syslog(LOG_ERR, "__svc_dup: xid error");
2N/A return (DUP_ERROR);
2N/A }
2N/A drhash = drxid % dc->dc_buckets;
2N/A
2N/A if ((rc = __svc_dupcache_check(req, resp_buf, resp_bufsz, dc, drxid,
2N/A drhash)) != DUP_NEW)
2N/A return (rc);
2N/A
2N/A if ((dr = __svc_dupcache_victim(dc, timenow)) == NULL)
2N/A return (DUP_ERROR);
2N/A
2N/A if ((rc = __svc_dupcache_enter(req, dr, dc, drxid, drhash, timenow))
2N/A == DUP_ERROR)
2N/A return (rc);
2N/A
2N/A return (DUP_NEW);
2N/A}
2N/A
2N/A
2N/A
2N/A/*
2N/A * __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf,
2N/A * uint_t *resp_bufsz,truct dupcache *dc, uint32_t drxid,
2N/A * uint32_t drhash)
2N/A * Checks to see whether an entry already exists in the cache. If it does
2N/A * copy back into the resp_buf, if appropriate. Return the status of
2N/A * the request, or DUP_NEW if the entry is not in the cache
2N/A */
2N/Astatic int
2N/A__svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
2N/A struct dupcache *dc, uint32_t drxid, uint32_t drhash)
2N/A{
2N/A struct dupreq *dr = NULL;
2N/A
2N/A (void) rw_rdlock(&(dc->dc_lock));
2N/A dr = dc->dc_hashtbl[drhash];
2N/A while (dr != NULL) {
2N/A if (dr->dr_xid == drxid &&
2N/A dr->dr_proc == req->rq_proc &&
2N/A dr->dr_prog == req->rq_prog &&
2N/A dr->dr_vers == req->rq_vers &&
2N/A dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
2N/A memcmp(dr->dr_addr.buf,
2N/A req->rq_xprt->xp_rtaddr.buf,
2N/A dr->dr_addr.len) == 0) { /* entry found */
2N/A if (dr->dr_hash != drhash) {
2N/A /* sanity check */
2N/A (void) rw_unlock((&dc->dc_lock));
2N/A syslog(LOG_ERR,
2N/A "\n__svc_dupdone: hashing error");
2N/A return (DUP_ERROR);
2N/A }
2N/A
2N/A /*
2N/A * return results for requests on lru list, if
2N/A * appropriate requests must be DUP_DROP or DUP_DONE
2N/A * to have a result. A NULL buffer in the cache
2N/A * implies no results were sent during dupdone.
2N/A * A NULL buffer in the call implies not interested
2N/A * in results.
2N/A */
2N/A if (((dr->dr_status == DUP_DONE) ||
2N/A (dr->dr_status == DUP_DROP)) &&
2N/A resp_buf != NULL &&
2N/A dr->dr_resp.buf != NULL) {
2N/A *resp_buf = malloc(dr->dr_resp.len);
2N/A if (*resp_buf == NULL) {
2N/A syslog(LOG_ERR,
2N/A "__svc_dupcache_check: malloc failed");
2N/A (void) rw_unlock(&(dc->dc_lock));
2N/A return (DUP_ERROR);
2N/A }
2N/A (void) memset(*resp_buf, 0, dr->dr_resp.len);
2N/A (void) memcpy(*resp_buf, dr->dr_resp.buf,
2N/A dr->dr_resp.len);
2N/A *resp_bufsz = dr->dr_resp.len;
2N/A } else {
2N/A /* no result */
2N/A if (resp_buf)
2N/A *resp_buf = NULL;
2N/A if (resp_bufsz)
2N/A *resp_bufsz = 0;
2N/A }
2N/A (void) rw_unlock(&(dc->dc_lock));
2N/A return (dr->dr_status);
2N/A }
2N/A dr = dr->dr_chain;
2N/A }
2N/A (void) rw_unlock(&(dc->dc_lock));
2N/A return (DUP_NEW);
2N/A}
2N/A
2N/A/*
2N/A * __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
2N/A * Return a victim dupreq entry to the caller, depending on cache policy.
2N/A */
2N/Astatic struct dupreq *
2N/A__svc_dupcache_victim(struct dupcache *dc, time_t timenow)
2N/A{
2N/A struct dupreq *dr = NULL;
2N/A
2N/A switch (dc->dc_basis) {
2N/A case DUPCACHE_FIXEDTIME:
2N/A /*
2N/A * The hash policy is to free up a bit of the hash
2N/A * table before allocating a new entry as the victim.
2N/A * Freeing up the hash table each time should split
2N/A * the cost of keeping the hash table clean among threads.
2N/A * Note that only DONE or DROPPED entries are on the lru
2N/A * list but we do a sanity check anyway.
2N/A */
2N/A (void) rw_wrlock(&(dc->dc_lock));
2N/A while ((dc->dc_mru) && (dr = dc->dc_mru->dr_next) &&
2N/A ((timenow - dr->dr_time) > dc->dc_time)) {
2N/A /* clean and then free the entry */
2N/A if (dr->dr_status != DUP_DONE &&
2N/A dr->dr_status != DUP_DROP) {
2N/A /*
2N/A * The LRU list can't contain an
2N/A * entry where the status is other than
2N/A * DUP_DONE or DUP_DROP.
2N/A */
2N/A syslog(LOG_ERR,
2N/A "__svc_dupcache_victim: bad victim");
2N/A#ifdef DUP_DEBUG
2N/A /*
2N/A * Need to hold the reader/writers lock to
2N/A * print the cache info, since we already
2N/A * hold the writers lock, we shall continue
2N/A * calling __svc_dupcache_debug()
2N/A */
2N/A __svc_dupcache_debug(dc);
2N/A#endif /* DUP_DEBUG */
2N/A (void) rw_unlock(&(dc->dc_lock));
2N/A return (NULL);
2N/A }
2N/A /* free buffers */
2N/A if (dr->dr_resp.buf) {
2N/A free(dr->dr_resp.buf);
2N/A dr->dr_resp.buf = NULL;
2N/A }
2N/A if (dr->dr_addr.buf) {
2N/A free(dr->dr_addr.buf);
2N/A dr->dr_addr.buf = NULL;
2N/A }
2N/A
2N/A /* unhash the entry */
2N/A if (dr->dr_chain)
2N/A dr->dr_chain->dr_prevchain = dr->dr_prevchain;
2N/A if (dr->dr_prevchain)
2N/A dr->dr_prevchain->dr_chain = dr->dr_chain;
2N/A if (dc->dc_hashtbl[dr->dr_hash] == dr)
2N/A dc->dc_hashtbl[dr->dr_hash] = dr->dr_chain;
2N/A
2N/A /* modify the lru pointers */
2N/A if (dc->dc_mru == dr) {
2N/A dc->dc_mru = NULL;
2N/A } else {
2N/A dc->dc_mru->dr_next = dr->dr_next;
2N/A dr->dr_next->dr_prev = dc->dc_mru;
2N/A }
2N/A free(dr);
2N/A dr = NULL;
2N/A }
2N/A (void) rw_unlock(&(dc->dc_lock));
2N/A
2N/A /*
2N/A * Allocate and return new clean entry as victim
2N/A */
2N/A if ((dr = malloc(sizeof (*dr))) == NULL) {
2N/A syslog(LOG_ERR,
2N/A "__svc_dupcache_victim: malloc failed");
2N/A return (NULL);
2N/A }
2N/A (void) memset(dr, 0, sizeof (*dr));
2N/A return (dr);
2N/A default:
2N/A syslog(LOG_ERR,
2N/A "__svc_dupcache_victim: undefined dup cache_basis");
2N/A return (NULL);
2N/A }
2N/A}
2N/A
2N/A/*
2N/A * __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
2N/A * struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
2N/A * build new duprequest entry and then insert into the cache
2N/A */
2N/Astatic int
2N/A__svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
2N/A struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
2N/A{
2N/A dr->dr_xid = drxid;
2N/A dr->dr_prog = req->rq_prog;
2N/A dr->dr_vers = req->rq_vers;
2N/A dr->dr_proc = req->rq_proc;
2N/A dr->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len;
2N/A dr->dr_addr.len = dr->dr_addr.maxlen;
2N/A if ((dr->dr_addr.buf = malloc(dr->dr_addr.maxlen)) == NULL) {
2N/A syslog(LOG_ERR, "__svc_dupcache_enter: malloc failed");
2N/A free(dr);
2N/A return (DUP_ERROR);
2N/A }
2N/A (void) memset(dr->dr_addr.buf, 0, dr->dr_addr.len);
2N/A (void) memcpy(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
2N/A dr->dr_addr.len);
2N/A dr->dr_resp.buf = NULL;
2N/A dr->dr_resp.maxlen = 0;
2N/A dr->dr_resp.len = 0;
2N/A dr->dr_status = DUP_INPROGRESS;
2N/A dr->dr_time = timenow;
2N/A dr->dr_hash = drhash; /* needed for efficient victim cleanup */
2N/A
2N/A /* place entry at head of hash table */
2N/A (void) rw_wrlock(&(dc->dc_lock));
2N/A dr->dr_chain = dc->dc_hashtbl[drhash];
2N/A dr->dr_prevchain = NULL;
2N/A if (dc->dc_hashtbl[drhash] != NULL)
2N/A dc->dc_hashtbl[drhash]->dr_prevchain = dr;
2N/A dc->dc_hashtbl[drhash] = dr;
2N/A (void) rw_unlock(&(dc->dc_lock));
2N/A return (DUP_NEW);
2N/A}
2N/A
2N/A/*
2N/A * __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2N/A * int status, char *xprt_cache)
2N/A * Marks the request done (DUP_DONE or DUP_DROP) and stores the response.
2N/A * Only DONE and DROP requests can be marked as done. Sets the lru pointers
2N/A * to make the entry the most recently used. Returns DUP_ERROR or status.
2N/A */
2N/Aint
2N/A__svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2N/A int status, char *xprt_cache)
2N/A{
2N/A uint32_t drxid, drhash;
2N/A int rc;
2N/A
2N/A /* LINTED pointer alignment */
2N/A struct dupcache *dc = (struct dupcache *)xprt_cache;
2N/A
2N/A if (dc == NULL) {
2N/A syslog(LOG_ERR, "__svc_dupdone: undefined cache");
2N/A return (DUP_ERROR);
2N/A }
2N/A
2N/A if (status != DUP_DONE && status != DUP_DROP) {
2N/A syslog(LOG_ERR, "__svc_dupdone: invalid dupdone status");
2N/A syslog(LOG_ERR, " must be DUP_DONE or DUP_DROP");
2N/A return (DUP_ERROR);
2N/A }
2N/A
2N/A /* find the xid of the entry in the cache */
2N/A if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
2N/A syslog(LOG_ERR, "__svc_dup: xid error");
2N/A return (DUP_ERROR);
2N/A }
2N/A drhash = drxid % dc->dc_buckets;
2N/A
2N/A /* update the status of the entry and result buffers, if required */
2N/A if ((rc = __svc_dupcache_update(req, resp_buf, resp_bufsz, status,
2N/A dc, drxid, drhash)) == DUP_ERROR) {
2N/A syslog(LOG_ERR, "__svc_dupdone: cache entry error");
2N/A return (DUP_ERROR);
2N/A }
2N/A
2N/A return (rc);
2N/A}
2N/A
2N/A/*
2N/A * __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf,
2N/A * uint_t resp_bufsz, int status, struct dupcache *dc, uint32_t drxid,
2N/A * uint32_t drhash)
2N/A * Check if entry exists in the dupcacache. If it does, update its status
2N/A * and time and also its buffer, if appropriate. Its possible, but unlikely
2N/A * for DONE requests to not exist in the cache. Return DUP_ERROR or status.
2N/A */
2N/Astatic int
2N/A__svc_dupcache_update(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2N/A int status, struct dupcache *dc, uint32_t drxid, uint32_t drhash)
2N/A{
2N/A struct dupreq *dr = NULL;
2N/A time_t timenow = time(NULL);
2N/A
2N/A (void) rw_wrlock(&(dc->dc_lock));
2N/A dr = dc->dc_hashtbl[drhash];
2N/A while (dr != NULL) {
2N/A if (dr->dr_xid == drxid &&
2N/A dr->dr_proc == req->rq_proc &&
2N/A dr->dr_prog == req->rq_prog &&
2N/A dr->dr_vers == req->rq_vers &&
2N/A dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
2N/A memcmp(dr->dr_addr.buf,
2N/A req->rq_xprt->xp_rtaddr.buf,
2N/A dr->dr_addr.len) == 0) { /* entry found */
2N/A if (dr->dr_hash != drhash) {
2N/A /* sanity check */
2N/A (void) rw_unlock(&(dc->dc_lock));
2N/A syslog(LOG_ERR,
2N/A "\n__svc_dupdone: hashing error");
2N/A return (DUP_ERROR);
2N/A }
2N/A
2N/A /* store the results if bufer is not NULL */
2N/A if (resp_buf != NULL) {
2N/A if ((dr->dr_resp.buf =
2N/A malloc(resp_bufsz)) == NULL) {
2N/A (void) rw_unlock(&(dc->dc_lock));
2N/A syslog(LOG_ERR,
2N/A "__svc_dupdone: malloc failed");
2N/A return (DUP_ERROR);
2N/A }
2N/A (void) memset(dr->dr_resp.buf, 0, resp_bufsz);
2N/A (void) memcpy(dr->dr_resp.buf, resp_buf,
2N/A (uint_t)resp_bufsz);
2N/A dr->dr_resp.len = resp_bufsz;
2N/A }
2N/A
2N/A /* update status and done time */
2N/A dr->dr_status = status;
2N/A dr->dr_time = timenow;
2N/A
2N/A /* move the entry to the mru position */
2N/A if (dc->dc_mru == NULL) {
2N/A dr->dr_next = dr;
2N/A dr->dr_prev = dr;
2N/A } else {
2N/A dr->dr_next = dc->dc_mru->dr_next;
2N/A dc->dc_mru->dr_next->dr_prev = dr;
2N/A dr->dr_prev = dc->dc_mru;
2N/A dc->dc_mru->dr_next = dr;
2N/A }
2N/A dc->dc_mru = dr;
2N/A
2N/A (void) rw_unlock(&(dc->dc_lock));
2N/A return (status);
2N/A }
2N/A dr = dr->dr_chain;
2N/A }
2N/A (void) rw_unlock(&(dc->dc_lock));
2N/A syslog(LOG_ERR, "__svc_dupdone: entry not in dup cache");
2N/A return (DUP_ERROR);
2N/A}
2N/A
2N/A#ifdef DUP_DEBUG
2N/A/*
2N/A * __svc_dupcache_debug(struct dupcache *dc)
2N/A * print out the hash table stuff
2N/A *
2N/A * This function requires the caller to hold the reader
2N/A * or writer version of the duplicate request cache lock (dc_lock).
2N/A */
2N/Astatic void
2N/A__svc_dupcache_debug(struct dupcache *dc)
2N/A{
2N/A struct dupreq *dr = NULL;
2N/A int i;
2N/A bool_t bval;
2N/A
2N/A fprintf(stderr, " HASHTABLE\n");
2N/A for (i = 0; i < dc->dc_buckets; i++) {
2N/A bval = FALSE;
2N/A dr = dc->dc_hashtbl[i];
2N/A while (dr != NULL) {
2N/A if (!bval) { /* ensures bucket printed only once */
2N/A fprintf(stderr, " bucket : %d\n", i);
2N/A bval = TRUE;
2N/A }
2N/A fprintf(stderr, "\txid: %u status: %d time: %ld",
2N/A dr->dr_xid, dr->dr_status, dr->dr_time);
2N/A fprintf(stderr, " dr: %x chain: %x prevchain: %x\n",
2N/A dr, dr->dr_chain, dr->dr_prevchain);
2N/A dr = dr->dr_chain;
2N/A }
2N/A }
2N/A
2N/A fprintf(stderr, " LRU\n");
2N/A if (dc->dc_mru) {
2N/A dr = dc->dc_mru->dr_next; /* lru */
2N/A while (dr != dc->dc_mru) {
2N/A fprintf(stderr, "\txid: %u status : %d time : %ld",
2N/A dr->dr_xid, dr->dr_status, dr->dr_time);
2N/A fprintf(stderr, " dr: %x next: %x prev: %x\n",
2N/A dr, dr->dr_next, dr->dr_prev);
2N/A dr = dr->dr_next;
2N/A }
2N/A fprintf(stderr, "\txid: %u status: %d time: %ld",
2N/A dr->dr_xid, dr->dr_status, dr->dr_time);
2N/A fprintf(stderr, " dr: %x next: %x prev: %x\n", dr,
2N/A dr->dr_next, dr->dr_prev);
2N/A }
2N/A}
2N/A#endif /* DUP_DEBUG */