inetd.c revision 3ad28c1e51ef5773481ccea340f94723d9d7b1aa
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* NOTES: To be expanded.
*
* The SMF inetd.
*
* Below are some high level notes of the operation of the SMF inetd. The
* notes don't go into any real detail, and the viewer of this file is
* encouraged to look at the code and its associated comments to better
* understand inetd's operation. This saves the potential for the code
* and these notes diverging over time.
*
* Inetd's major work is done from the context of event_loop(). Within this
* loop, inetd polls for events arriving from a number of different file
* descriptors, representing the following event types, and initiates
* any necessary event processing:
* - incoming network connections/datagrams.
* - notification of terminated processes (discovered via contract events).
* - instance specific events originating from the SMF master restarter.
* Unix Domain socket).
* There's also a timeout set for the poll, which is set to the nearest
* scheduled timer in a timer queue that inetd uses to perform delayed
* processing, such as bind retries.
* The SIGHUP and SIGINT signals can also interrupt the poll, and will
* result in inetd being refreshed or stopped respectively, as was the
* behavior with the old inetd.
*
* Inetd implements a state machine for each instance. The states within the
* machine are: offline, online, disabled, maintenance, uninitialized and
* specializations of the offline state for when an instance exceeds one of
* its DOS limits. The state of an instance can be changed as a
* result/side-effect of one of the above events occurring, or inetd being
* started up. The ongoing state of an instance is stored in the SMF
* repository, as required of SMF restarters. This enables an administrator
* to view the state of each instance, and, if inetd was to terminate
* unexpectedly, it could use the stored state to re-commence where it left off.
*
* Within the state machine a number of methods are run (if provided) as part
* of a state transition to aid/ effect a change in an instance's state. The
* supported methods are: offline, online, disable, refresh and start. The
* latter of these is the equivalent of the server program and its arguments
* in the old inetd.
*
* Events from the SMF master restarter come in on a number of threads
* created in the registration routine of librestart, the delegated restarter
* library. These threads call into the restart_event_proxy() function
* when an event arrives. To serialize the processing of instances, these events
* are then written down a pipe to the process's main thread, which listens
* for these events via a poll call, with the file descriptor of the other
* end of the pipe in its read set, and processes the event appropriately.
* When the event has been processed (which may be delayed if the instance
* for which the event is for is in the process of executing one of its methods
* as part of a state transition) it writes an acknowledgement back down the
* pipe the event was received on. The thread in restart_event_proxy() that
* wrote the event will read the acknowledgement it was blocked upon, and will
* then be able to return to its caller, thus implicitly acknowledging the
* event, and allowing another event to be written down the pipe for the main
* thread to process.
*/
#include <netdb.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <assert.h>
#include <fcntl.h>
#include <signal.h>
#include <errno.h>
#include <locale.h>
#include <syslog.h>
#include <libintl.h>
#include <librestart.h>
#include <pthread.h>
#include <time.h>
#include <limits.h>
#include <libgen.h>
#include <tcpd.h>
#include <libscf.h>
#include <libuutil.h>
#include <stddef.h>
#include <bsm/adt_event.h>
#include "inetd_impl.h"
/* path to inetd's binary */
#define INETD_PATH "/usr/lib/inet/inetd"
/*
* inetd's default configuration file paths. /etc/inetd/inetd.conf is set
* be be the primary file, so it is checked before /etc/inetd.conf.
*/
#define PRIMARY_DEFAULT_CONF_FILE "/etc/inet/inetd.conf"
#define SECONDARY_DEFAULT_CONF_FILE "/etc/inetd.conf"
/* Arguments passed to this binary to request which method to execute. */
#define START_METHOD_ARG "start"
#define STOP_METHOD_ARG "stop"
#define REFRESH_METHOD_ARG "refresh"
/* connection backlog for unix domain socket */
#define UDS_BACKLOG 2
/* number of retries to recv() a request on the UDS socket before giving up */
#define UDS_RECV_RETRIES 10
/* enumeration of the different ends of a pipe */
enum pipe_end {
};
typedef struct {
const char *name;
} state_info_t;
/*
* Collection of information for each state.
* NOTE: This table is indexed into using the internal_inst_state_t
* enumeration, so the ordering needs to be kept in synch.
*/
static state_info_t states[] = {
IM_NONE},
IM_NONE},
};
/*
* Pipe used to send events from the threads created by restarter_bind_handle()
* to the main thread of control.
*/
/*
* Used to protect the critical section of code in restarter_event_proxy() that
* involves writing an event down the event pipe and reading an acknowledgement.
*/
/* handle used in communication with the master restarter */
/* set to indicate a refresh of inetd is requested */
/* set by the SIGTERM handler to flag we got a SIGTERM */
/*
* Timer queue used to store timers for delayed event processing, such as
* bind retries.
*/
/*
* fd of Unix Domain socket used to communicate stop and refresh requests
* to the inetd start method process.
*/
static int uds_fd = -1;
/*
* List of inetd's currently managed instances; each containing its state,
* and in certain states its configuration.
*/
/* set to indicate we're being stopped */
/* TCP wrappers syslog globals. Consumed by libwrap. */
int allow_severity = LOG_INFO;
int deny_severity = LOG_WARNING;
/* path of the configuration file being monitored by check_conf_file() */
/* Auditing session handle */
static adt_session_data_t *audit_handle;
static void uds_fini(void);
static int uds_init(void);
static void create_bound_fds(instance_t *);
static void destroy_bound_fds(instance_t *);
static void destroy_instance(instance_t *);
static void inetd_stop(void);
static void
/*
* The following two functions are callbacks that libumem uses to determine
* exported by FMA and is consolidation private. The comments in the two
* functions give the environment variable that will effectively be set to
* their returned value, and thus whose behavior for this value, described in
* umem_debug(3MALLOC), will be followed.
*/
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* UMEM_LOGGING setting */
}
static void
log_invalid_cfg(const char *fmri)
{
"Invalid configuration for instance %s, placing in maintenance"),
fmri);
}
/*
* Returns B_TRUE if the instance is in a suitable state for inetd to stop.
*/
static boolean_t
{
}
/*
* Updates the current and next repository states of instance 'inst'. If
* any errors occur an error message is output.
*/
static void
{
int ret;
debug_msg("Entering update_instance_states: oldcur: %s, newcur: %s "
/* update the repository/cached internal state */
PR_NAME_CUR_INT_STATE)) != 0) ||
PR_NAME_NEXT_INT_STATE)) != 0))
/* update the repository SMF state */
err, 0)) != 0)
}
void
{
}
/*
* Sends a refresh event to the inetd start method process and returns
* SMF_EXIT_OK if it managed to send it. If it fails to send the request for
* some reason it returns SMF_EXIT_ERR_OTHER.
*/
static int
refresh_method(void)
{
int fd;
debug_msg("Entering refresh_method");
if ((fd = connect_to_inetd()) < 0) {
return (SMF_EXIT_ERR_OTHER);
}
/* write the request and return success */
gettext("Failed to send refresh request to inetd: %s"),
return (SMF_EXIT_ERR_OTHER);
}
return (SMF_EXIT_OK);
}
/*
* Sends a stop event to the inetd start method process and wait till it goes
* away. If inetd is determined to have stopped SMF_EXIT_OK is returned, else
* SMF_EXIT_ERR_OTHER is returned.
*/
static int
stop_method(void)
{
int fd;
char c;
debug_msg("Entering stop_method");
/*
* Assume connect_to_inetd() failed because inetd was already
* stopped, and return success.
*/
return (SMF_EXIT_OK);
}
/*
* This is safe to do since we're fired off in a separate process
* than inetd and in the case we get wedged, the stop method timeout
* will occur and we'd be killed by our restarter.
*/
/* write the stop request to inetd and wait till it goes away */
return (SMF_EXIT_ERR_OTHER);
}
/* wait until remote end of socket is closed */
;
if (ret != 0) {
return (SMF_EXIT_ERR_OTHER);
}
return (SMF_EXIT_OK);
}
/*
* This function is called to handle restarter events coming in from the
* master restarter. It is registered with the master restarter via
* restarter_bind_handle() and simply passes a pointer to the event down
* the event pipe, which will be discovered by the poll in the event loop
* and processed there. It waits for an acknowledgement to be written back down
* the pipe before returning.
* Writing a pointer to the function's 'event' parameter down the pipe will
* be safe, as the thread in restarter_event_proxy() doesn't return until
* the main thread has finished its processing of the passed event, thus
* the referenced event will remain around until the function returns.
* To impose the limit of only one event being in the pipe and processed
* at once, a lock is taken on entry to this function and returned on exit.
* Always returns 0.
*/
static int
{
debug_msg("Entering restarter_event_proxy");
(void) pthread_mutex_lock(&rst_event_pipe_mtx);
/* write the event to the main worker thread down the pipe */
sizeof (event)) != 0)
goto pipe_error;
/*
* Wait for an acknowledgement that the event has been processed from
* the same pipe. In the case that inetd is stopping, any thread in
* this function will simply block on this read until inetd eventually
* exits. This will result in this function not returning success to
* its caller, and the event that was being processed when the
* function exited will be re-sent when inetd is next started.
*/
sizeof (processed)) != 0)
goto pipe_error;
(void) pthread_mutex_unlock(&rst_event_pipe_mtx);
/*
* Something's seriously wrong with the event pipe. Notify the
* worker thread by closing this end of the event pipe and pause till
* inetd exits.
*/
for (;;)
(void) pause();
/* NOTREACHED */
}
/*
* Let restarter_event_proxy() know we're finished with the event it's blocked
* upon. The 'processed' argument denotes whether we successfully processed the
* event.
*/
static void
{
debug_msg("Entering ack_restarter_event");
/*
* If safe_write returns -1 something's seriously wrong with the event
* pipe, so start the shutdown proceedings.
*/
sizeof (processed)) == -1)
inetd_stop();
}
/*
* Switch the syslog identification string to 'ident'.
*/
static void
change_syslog_ident(const char *ident)
{
debug_msg("Entering change_syslog_ident: ident: %s", ident);
closelog();
}
/*
* Perform TCP wrappers checks on this instance. Due to the fact that the
* current wrappers code used in Solaris is taken untouched from the open
* source version, we're stuck with using the daemon name for the checks, as
* opposed to making use of instance FMRIs. Sigh.
* Returns B_TRUE if the check passed, else B_FALSE.
*/
static boolean_t
{
char *daemon_name;
struct request_info req;
/*
* Wrap the service using libwrap functions. The code below implements
* the functionality of tcpd. This is done only for stream,nowait
* changing the test below.
*/
if (*daemon_name == '/')
/*
* Change the syslog message identity to the name of the
* daemon being wrapped, as opposed to "inetd".
*/
eval_client(&req));
} else if (!hosts_access(&req)) {
"refused connect from %s (access denied)",
eval_client(&req));
} else {
eval_client(&req));
}
/* Revert syslog identity back to "inetd". */
}
return (rval);
}
/*
* Handler registered with the timer queue code to remove an instance from
* the connection rate offline state when it has been there for its allotted
* time.
*/
/* ARGSUSED */
static void
{
debug_msg("Entering conn_rate_online, instance: %s",
}
/*
* Check whether this instance in the offline state is in transition to
* another state and do the work to continue this transition.
*/
void
{
debug_msg("Entering process_offline_inst");
if (inst->disable_req) {
} else if (inst->maintenance_req) {
/*
* If inetd is in the process of stopping, we don't want to enter
* any states but offline, disabled and maintenance.
*/
} else if (!inetd_stopping) {
if (inst->conn_rate_exceeded) {
/*
* Schedule a timer to bring the instance out of the
* connection rate offline state.
*/
inst);
"won't be brought on line after %d "
}
} else if (copies_limit_exceeded(inst)) {
}
}
}
/*
* Create a socket bound to the instance's configured address. If the
* bind fails, returns -1, else the fd of the bound socket.
*/
static int
{
int fd;
int on = 1;
debug_msg("Entering create_bound_socket");
if (fd < 0) {
"Socket creation failure for instance %s, proto %s: %s"),
return (-1);
}
return (-1);
}
/* restrict socket to IPv6 communications only */
sizeof (on)) == -1) {
return (-1);
}
}
"Failed to bind to the port of service instance %s, "
return (-1);
}
/*
* Retrieve and store the address bound to for RPC services.
*/
struct sockaddr_storage ss;
return (-1);
}
sizeof (struct sockaddr_storage));
}
return (fd);
}
/*
* Handler registered with the timer queue code to retry the creation
* of a bound fd.
*/
/* ARGSUSED */
static void
{
switch (instance->cur_istate) {
case IIS_OFFLINE_BIND:
case IIS_ONLINE:
case IIS_DEGRADED:
case IIS_IN_ONLINE_METHOD:
case IIS_IN_REFRESH_METHOD:
break;
default:
#ifndef NDEBUG
#endif
abort();
}
}
/*
* For each of the fds for the given instance that are bound, if 'listen' is
* set add them to the poll set, else remove them from it. If any additions
* fail, returns -1, else 0 on success.
*/
int
{
int ret = 0;
debug_msg("Entering poll_bound_fds: instance: %s, on: %d",
if (!listen) {
ret = -1;
}
}
}
return (ret);
}
/*
* Handle the case were we either fail to create a bound fd or we fail
* to add a bound fd to the poll set for the given instance.
*/
static void
{
/*
* We must be being called as a result of a failed poll_bound_fds()
* as a bind retry is already scheduled. Just return and let it do
* the work.
*/
return;
/*
* Check if the rebind retries limit is operative and if so,
* if it has been reached.
*/
instance->bind_fail_count = 0;
switch (instance->cur_istate) {
case IIS_DEGRADED:
case IIS_ONLINE:
/* check if any of the fds are being poll'd upon */
break;
}
"all protocols for instance %s, "
"transitioning to degraded"),
break;
}
/*
* In the case we failed the 'bind' because set_pollfd()
* failed on all bound fds, use the offline handling.
*/
/* FALLTHROUGH */
case IIS_OFFLINE:
case IIS_OFFLINE_BIND:
break;
case IIS_IN_ONLINE_METHOD:
case IIS_IN_REFRESH_METHOD:
"protocols for instance %s, instance will go to "
/*
* Set the retries exceeded flag so when the method
* completes the instance goes to the degraded state.
*/
break;
default:
#ifndef NDEBUG
"%s:%d: Unknown instance state %d.\n",
#endif
abort();
}
/*
* bind re-scheduled, so if we're offline reflect this in the
* state.
*/
}
}
/*
* Check if two transport protocols for RPC conflict.
*/
return (B_TRUE);
return (B_TRUE);
return (B_FALSE);
}
return (B_TRUE);
return (B_TRUE);
return (B_TRUE);
return (B_FALSE);
}
return (B_TRUE);
return (B_TRUE);
return (B_FALSE);
}
return (B_TRUE);
return (B_TRUE);
return (B_FALSE);
}
return (B_TRUE);
return (B_TRUE);
return (B_TRUE);
return (B_FALSE);
}
return (B_TRUE);
return (B_TRUE);
return (0);
}
/*
* port namepsace and that conflicts can be detected by literal string
* comparison.
*/
return (FALSE);
return (B_TRUE);
}
/*
* Check if inetd thinks this RPC program number is already registered.
*
* An RPC protocol conflict occurs if
* a) the program numbers are the same and,
* b) the version numbers overlap,
* c) the protocols (TCP vs UDP vs tic*) are the same.
*/
instance_t *i;
i = uu_list_next(instance_list, i)) {
if (i->cur_istate != IIS_ONLINE)
continue;
continue;
continue;
continue;
continue;
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* Independent of the transport, for each of the entries in the instance's
* proto list this function first attempts to create an associated network fd;
* for RPC services these are then bound to a kernel chosen port and the
* fd is registered with rpcbind; for non-RPC services the fds are bound
* to the port associated with the instance's service name. On any successful
* binds the instance is taken online. Failed binds are handled by
* handle_bind_failure().
*/
void
{
/*
* Loop through and try and bind any unbound protos.
*/
continue;
(tlx_info_t *)pi);
} else {
/*
* We cast pi to a void so we can then go on to cast
* it to a socket_info_t without lint complaining
* about alignment. This is done because the x86
* version of lint thinks a lint suppression directive
* is unnecessary and flags it as such, yet the sparc
* version complains if it's absent.
*/
void *p = pi;
(socket_info_t *)p);
}
continue;
}
/*
* Don't register the same RPC program number twice.
* Doing so silently discards the old service
* without causing an error.
*/
continue;
}
-1) {
continue;
}
}
}
switch (instance->cur_istate) {
case IIS_OFFLINE:
case IIS_OFFLINE_BIND:
/*
* If we've managed to bind at least one proto lets run the
* online method, so we can start listening for it.
*/
return; /* instance gone to maintenance */
break;
case IIS_ONLINE:
case IIS_IN_REFRESH_METHOD:
/*
* We're 'online', so start polling on any bound fds we're
* currently not.
*/
} else if (!failure) {
/*
* We've successfully bound and poll'd upon all protos,
* so reset the failure count.
*/
instance->bind_fail_count = 0;
}
break;
case IIS_IN_ONLINE_METHOD:
/*
* Nothing to do here as the method completion code will start
* listening for any successfully bound fds.
*/
break;
default:
#ifndef NDEBUG
#endif
abort();
}
if (failure)
}
/*
* Counter to create_bound_fds(), for each of the bound network fds this
* function unregisters the instance from rpcbind if it's an RPC service,
* stops listening for new connections for it and then closes the listening fd.
*/
static void
{
}
}
/* cancel any bind retries */
}
/*
* Perform %A address expansion and return a pointer to a static string
* array containing crafted arguments. This expansion is provided for
* compatibility with 4.2BSD daemons, and as such we've copied the logic of
* the legacy inetd to maintain this compatibility as much as possible. This
* logic is a bit scatty, but it dates back at least as far as SunOS 4.x.
*/
static char **
{
static char addrbuf[sizeof ("ffffffff.65536")];
static char *ret[3];
/*
* We cast pi to a void so we can then go on to cast it to a
* socket_info_t without lint complaining about alignment. This
* is done because the x86 version of lint thinks a lint suppression
* directive is unnecessary and flags it as such, yet the sparc
* version complains if it's absent.
*/
const void *p = pi;
debug_msg("Entering expand_address");
/* set ret[0] to the basename of exec path */
!= NULL) {
ret[0]++;
} else {
}
} else {
addrbuf[0] = '\0';
struct sockaddr_in *sp;
}
}
return (ret);
}
/*
* Returns the state associated with the supplied method being run for an
* instance.
*/
static internal_inst_state_t
{
break;
}
}
/*
* Store the method's PID and CID in the repository. If the store fails
* we ignore it and just drive on.
*/
static void
{
debug_msg("Entering add_method_ids");
if (cid != -1)
}
} else {
}
}
}
/*
* Remove the method's PID and CID from the repository. If the removal
* fails we ignore it and drive on.
*/
void
{
debug_msg("Entering remove_method_ids");
if (cid != -1)
} else {
}
}
static instance_t *
create_instance(const char *fmri)
{
goto alloc_fail;
ret->conn_rate_count = 0;
ret->fail_rate_count = 0;
ret->bind_fail_count = 0;
goto alloc_fail;
goto alloc_fail;
return (ret);
return (NULL);
}
static void
{
debug_msg("Entering destroy_instance");
return;
}
/*
* Retrieves the current and next states internal states. Returns 0 on success,
* else returns one of the following on error:
* SCF_ERROR_NO_MEMORY if memory allocation failed.
* SCF_ERROR_CONNECTION_BROKEN if the connection to the repository was broken.
* SCF_ERROR_TYPE_MISMATCH if the property was of an unexpected type.
* SCF_ERROR_NO_RESOURCES if the server doesn't have adequate resources.
* SCF_ERROR_NO_SERVER if the server isn't running.
*/
static scf_error_t
{
debug_msg("Entering retrieve_instance_state: instance: %s",
/* retrieve internal states */
PR_NAME_CUR_INT_STATE)) != 0) ||
PR_NAME_NEXT_INT_STATE)) != 0)) {
if (ret != SCF_ERROR_NOT_FOUND) {
"Failed to read state of instance %s: %s"),
return (ret);
}
debug_msg("instance with no previous int state - "
"setting state to uninitialized");
return (SCF_ERROR_NO_MEMORY);
}
}
/* update convenience states */
inst->next_istate);
return (0);
}
/*
* Retrieve stored process ids and register each of them so we process their
* termination.
*/
static int
{
debug_msg("Entering remove_method_pids");
case 0:
break;
case SCF_ERROR_NOT_FOUND:
return (0);
default:
scf_strerror(scf_error()));
return (-1);
}
IM_START) == 0) {
/*
* The process must have already terminated. Remove
* it from the list.
*/
} else {
"of %s method of instance %s"), START_METHOD_NAME,
}
}
/* synch the repository pid list to remove any terminated pids */
return (0);
}
/*
* Remove the passed instance from inetd control.
*/
static void
{
debug_msg("Entering remove_instance");
switch (instance->cur_istate) {
case IIS_ONLINE:
case IIS_DEGRADED:
/* stop listening for network connections */
break;
case IIS_OFFLINE_BIND:
break;
case IIS_OFFLINE_CONRATE:
break;
}
/* stop listening for terminated methods */
}
/*
* Refresh the configuration of instance 'inst'. This method gets called as
* a result of a refresh event for the instance from the master restarter, so
* we can rely upon the instance's running snapshot having been updated from
* its configuration snapshot.
*/
void
{
switch (inst->cur_istate) {
case IIS_MAINTENANCE:
case IIS_DISABLED:
case IIS_UNINITIALIZED:
/*
* Ignore any possible changes, we'll re-read the configuration
* automatically when we exit these states.
*/
break;
case IIS_OFFLINE_COPIES:
case IIS_OFFLINE_BIND:
case IIS_OFFLINE:
case IIS_OFFLINE_CONRATE:
}
} else {
switch (inst->cur_istate) {
case IIS_OFFLINE_BIND:
if (copies_limit_exceeded(inst)) {
/* Cancel scheduled bind retries. */
/*
* Take the instance to the copies
* offline state, via the offline
* state.
*/
}
break;
case IIS_OFFLINE:
break;
case IIS_OFFLINE_CONRATE:
/*
* Since we're already in a DOS state,
* don't bother evaluating the copies
* limit. This will be evaluated when
* we leave this state in
* process_offline_inst().
*/
break;
case IIS_OFFLINE_COPIES:
/*
* Check if the copies limit has been increased
* above the current count.
*/
if (!copies_limit_exceeded(inst)) {
}
break;
default:
assert(0);
}
}
break;
case IIS_DEGRADED:
case IIS_ONLINE:
/*
* Try to avoid the overhead of taking an instance
* offline and back on again. We do this by limiting
* this behavior to two eventualities:
* - there needs to be a re-bind to listen on behalf
* of the instance with its new configuration. This
* could be because for example its service has been
* associated with a different port, or because the
* v6only protocol option has been newly applied to
* the instance.
* - one or both of the start or online methods of the
* instance have changed in the new configuration.
* Without taking the instance offline when the
* start method changed the instance may be running
* with unwanted parameters (or event an unwanted
* binary); and without taking the instance offline
* if its online method was to change, some part of
* its running environment may have changed and would
* not be picked up until the instance next goes
* offline for another reason.
*/
} else { /* no bind config / method changes */
/*
* swap the proto list over from the old
* configuration to the new, so we retain
* our set of network fds.
*/
/* re-evaluate copies limits based on new cfg */
if (copies_limit_exceeded(inst)) {
NULL);
} else {
/*
* Since the instance isn't being
* taken offline, where we assume it
* would pick-up any configuration
* changes automatically when it goes
* back online, run its refresh method
* to allow it to pick-up any changes
* whilst still online.
*/
NULL);
}
}
} else {
}
break;
default:
debug_msg("Unhandled current state %d for instance in "
assert(0);
}
}
/*
* Called by process_restarter_event() to handle a restarter event for an
* instance.
*/
static void
{
debug_msg("Entering handle_restarter_event: inst: %s, event: %d, "
switch (event) {
goto done;
goto done;
switch (instance->cur_istate) {
case IIS_OFFLINE_CONRATE:
case IIS_OFFLINE_BIND:
case IIS_OFFLINE_COPIES:
/*
* inetd must be closing down as we wouldn't get this
* event in one of these states from the master
* restarter. Take the instance to the offline resting
* state.
*/
} else if (instance->cur_istate ==
}
goto done;
}
break;
/*
* We've got a restart event, so if the instance is online
* in any way initiate taking it offline, and rely upon
* our restarter to send us an online event to bring
* it back online.
*/
switch (instance->cur_istate) {
case IIS_ONLINE:
case IIS_DEGRADED:
}
goto done;
}
switch (instance->cur_istate) {
case IIS_OFFLINE:
switch (event) {
/*
* Dependencies are met, let's take the service online.
* Only try and bind for a wait type service if
* no process is running on its behalf. Otherwise, just
* mark the service online and binding will be attempted
* when the process exits.
*/
} else {
}
break;
/*
* The instance should be disabled, so run the
* instance's disabled method that will do the work
* to take it there.
*/
break;
/*
* The master restarter has requested the instance
* go to maintenance; since we're already offline
* just update the state to the maintenance state.
*/
break;
}
break;
case IIS_OFFLINE_BIND:
switch (event) {
/*
* The instance should be disabled. Firstly, as for
* the above dependencies unmet comment, cancel
* the bind retry timer and update the state to
* offline. Then, run the disable method to do the
* work to take the instance from offline to
* disabled.
*/
break;
/*
* The master restarter has requested the instance
* be placed in the maintenance state. Cancel the
* outstanding retry timer, and since we're already
* offline, update the state to maintenance.
*/
break;
}
break;
case IIS_DEGRADED:
case IIS_ONLINE:
switch (event) {
/*
* The instance needs to be disabled. Do the same work
* as for the dependencies unmet event below to
* take the instance offline.
*/
/*
* Indicate that the offline method is being run
* as part of going to the disabled state, and to
* carry on this transition.
*/
break;
/*
* The master restarter has requested the instance be
* placed in the maintenance state. This involves
* firstly taking the service offline, so do the
* same work as for the dependencies unmet event
* below. We set the maintenance_req flag to
* indicate that when we get to the offline state
* we should be placed directly into the maintenance
* state.
*/
/* FALLTHROUGH */
/*
* Dependencies have become unmet. Close and
* stop listening on the instance's network file
* descriptor, and run the offline method to do
* any work required to take us to the offline state.
*/
}
break;
case IIS_UNINITIALIZED:
if (event == RESTARTER_EVENT_TYPE_DISABLE ||
break;
} else if (event != RESTARTER_EVENT_TYPE_ENABLE) {
/*
* Ignore other events until we know whether we're
* enabled or not.
*/
break;
}
/*
* We've got an enabled event; make use of the handling in the
* disable case.
*/
/* FALLTHROUGH */
case IIS_DISABLED:
switch (event) {
/*
* The instance needs enabling. Commence reading its
* configuration and if successful place the instance
* in the offline state and let process_offline_inst()
* take it from there.
*/
} else {
}
break;
/*
* The master restarter has requested the instance be
* placed in the maintenance state, so just update its
* state to maintenance.
*/
break;
}
break;
case IIS_MAINTENANCE:
switch (event) {
/*
* The master restarter has requested that the instance
* be taken out of maintenance. Read its configuration,
* and if successful place the instance in the offline
* state and call process_offline_inst() to take it
* from there.
*/
} else {
/*
* The configuration was invalid. If the
* service has disabled requested, let's
* just place the instance in disabled even
* though we haven't been able to run its
* disable method, as the slightly incorrect
* state is likely to be less of an issue to
* an administrator than refusing to move an
* instance to disabled. If disable isn't
* requested, re-mark the service's state
* as maintenance, so the administrator can
* see the request was processed.
*/
} else {
}
}
break;
}
break;
case IIS_OFFLINE_CONRATE:
switch (event) {
/*
* The instance wants disabling. Take the instance
* offline as for the dependencies unmet event above,
* and then from there run the disable method to do
* the work to take the instance to the disabled state.
*/
break;
/*
* The master restarter has requested the instance
* be taken to maintenance. Cancel the timer setup
* when we entered this state, and go directly to
* maintenance.
*/
break;
}
break;
case IIS_OFFLINE_COPIES:
switch (event) {
/*
* The instance wants disabling. Update the state
* to offline, and run the disable method to do the
* work to take it to the disabled state.
*/
break;
/*
* The master restarter has requested the instance be
* placed in maintenance. Since it's already offline
* simply update the state.
*/
break;
}
break;
default:
debug_msg("handle_restarter_event: instance in an "
"unexpected state");
assert(0);
}
done:
if (send_ack)
}
/*
* Tries to read and process an event from the event pipe. If there isn't one
* or an error occurred processing the event it returns -1. Else, if the event
* is for an instance we're not already managing we read its state, add it to
* our list to manage, and if appropriate read its configuration. Whether it's
* new to us or not, we then handle the specific event.
* Returns 0 if an event was read and processed successfully, else -1.
*/
static int
process_restarter_event(void)
{
char *fmri;
debug_msg("Entering process_restarter_event");
/*
* Try to read an event pointer from the event pipe.
*/
errno = 0;
sizeof (event))) {
case 0:
break;
case 1:
return (-1);
/* other end of pipe closed */
/* FALLTHROUGH */
default: /* unexpected read error */
/*
* There's something wrong with the event pipe. Let's
* shutdown and be restarted.
*/
inetd_stop();
return (-1);
}
/*
* Check if we're currently managing the instance which the event
* pertains to. If not, read its complete state and add it to our
* list to manage.
*/
goto fail;
}
assert(0);
break;
}
int err;
(retrieve_instance_state(instance) != 0) ||
(retrieve_method_pids(instance) != 0)) {
goto fail;
}
"Failed to adopt contracts of instance %s: %s"),
goto fail;
}
/*
* Only read configuration for instances that aren't in any of
* the disabled, maintenance or uninitialized states, since
* they'll read it on state exit.
*/
}
}
}
/*
* If the instance is currently running a method, don't process the
* event now, but attach it to the instance for processing when
* the instance finishes its transition.
*/
if (INST_IN_TRANSITION(instance)) {
} else {
}
return (0);
fail:
return (-1);
}
/*
* Do the state machine processing associated with the termination of instance
* 'inst''s start method.
*/
void
{
/* do any further processing/checks when we exit these states */
return;
}
switch (inst->cur_istate) {
case IIS_ONLINE:
case IIS_DEGRADED:
case IIS_IN_REFRESH_METHOD:
/*
* A wait type service's start method has exited.
* Check if the method was fired off in this inetd's
* lifetime, or a previous one; if the former,
* re-commence listening on the service's behalf; if
* the latter, mark the service offline and let bind
* attempts commence.
*/
/*
* If a bound fd exists, the method was fired
* off during this inetd's lifetime.
*/
break;
}
} else {
}
}
} else {
/*
* Check if a nowait service should be brought back online
* after exceeding its copies limit.
*/
}
}
}
/*
* If the instance has a pending event process it and initiate the
* acknowledgement.
*/
static void
{
debug_msg("Injecting pending event %d for instance %s",
}
}
/*
* Do the state machine processing associated with the termination
* of the specified instance's non-start method with the specified status.
* Once the processing of the termination is done, the function also picks up
* any processing that was blocked on the method running.
*/
void
{
debug_msg("Entering process_non_start_term: inst: %s, method: %s",
if (status == IMRET_FAILURE) {
"transitioning to maintenance"),
}
if (!inetd_stopping)
return;
}
/* non-failure method return */
if (status != IMRET_SUCCESS) {
/*
* An instance method never returned a supported return code.
* We'll assume this means the method succeeded for now whilst
* non-GL-cognizant methods are used - eg. pkill.
*/
debug_msg("The %s method of instance %s returned "
"non-compliant exit code: %d, assuming success",
}
/*
* Update the state from the in-transition state.
*/
switch (inst->cur_istate) {
case IIS_IN_ONLINE_METHOD:
/* FALLTHROUGH */
case IIS_IN_REFRESH_METHOD:
/*
* If we've exhausted the bind retries, flag that by setting
* the instance's state to degraded.
*/
if (inst->bind_retries_exceeded) {
break;
}
/* FALLTHROUGH */
default:
}
/*
* This instance was found during refresh to need
* taking offline because its newly read configuration
* was sufficiently different. Now we're offline,
* activate this new configuration.
*/
}
} else if (ran_online_method) {
/*
* We've just successfully executed the online method. We have
* a set of bound network fds that were created before running
* this method, so now we're online start listening for
* connections on them.
*/
}
/*
* If we're now out of transition (process_offline_inst() could have
* fired off another method), carry out any jobs that were blocked by
* us being in transition.
*/
if (!INST_IN_TRANSITION(inst)) {
if (inetd_stopping) {
if (!instance_stopped(inst)) {
/*
* inetd is stopping, and this instance hasn't
* been stopped. Inject a stop event.
*/
}
} else {
}
}
}
/*
* Check if configuration file specified is readable. If not return B_FALSE,
* else return B_TRUE.
*/
static boolean_t
can_read_file(const char *path)
{
int ret;
int serrno;
debug_msg("Entering can_read_file");
do {
if (ret < 0) {
"file %s for performing modification checks: %s"),
}
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Check whether the configuration file has changed contents since inetd
* inetconv needs to be run.
*/
static void
check_conf_file(void)
{
char *new_hash;
const char *file;
debug_msg("Entering check_conf_file");
/*
* No explicit config file specified, so see if one of the
* default two are readable, checking the primary one first
* followed by the secondary.
*/
} else {
return;
}
} else {
if (!can_read_file(file))
return;
}
if (((ret == SCF_ERROR_NONE) &&
/* modified config file */
"Configuration file %s has been modified since "
"inetconv was last run. \"inetconv -i %s\" must be "
} else if ((ret != SCF_ERROR_NOT_FOUND) &&
(ret != SCF_ERROR_NONE)) {
/* No message if hash not yet computed */
"configuration file %s has been modified: %s"),
}
} else {
}
}
/*
* Refresh all inetd's managed instances and check the configuration file
* for any updates since inetconv was last run, logging a message if there
* are. We call the SMF refresh function to refresh each instance so that
* the refresh request goes through the framework, and thus results in the
* running snapshot of each instance being updated from the configuration
* snapshot.
*/
static void
inetd_refresh(void)
{
debug_msg("Entering inetd_refresh");
/* call libscf to send refresh requests for all managed instances */
}
}
/*
* Log a message if the configuration file has changed since inetconv
* was last run.
*/
}
/*
* Initiate inetd's shutdown.
*/
static void
inetd_stop(void)
{
debug_msg("Entering inetd_stop");
/* Block handling signals for stop and refresh */
/* Indicate inetd is coming down */
/* Stop polling on restarter events. */
/*
* Send a stop event to all currently unstopped instances that
* aren't in transition. For those that are in transition, the
* event will get sent when the transition completes.
*/
}
}
/*
* Sets up the intra-inetd-process Unix Domain Socket.
* Returns -1 on error, else 0.
*/
static int
uds_init(void)
{
struct sockaddr_un addr;
debug_msg("Entering uds_init");
return (-1);
}
/* CONSTCOND */
return (-1);
}
(void) unlink(INETD_UDS_PATH);
return (-1);
}
return (0);
}
static void
uds_fini(void)
{
if (uds_fd != -1)
(void) unlink(INETD_UDS_PATH);
}
/*
* Handle an incoming request on the Unix Domain Socket. Returns -1 if there
* was an error handling the event, else 0.
*/
static int
process_uds_event(void)
{
int fd;
struct sockaddr_un addr;
int ret;
debug_msg("Entering process_uds_event");
do {
if (fd < 0) {
if (errno != EWOULDBLOCK)
return (-1);
}
break;
}
if (ret != 0) {
return (-1);
}
switch (req) {
case UR_REFRESH_INETD:
/* flag the request for event_loop() to process */
break;
case UR_STOP_INETD:
inetd_stop();
break;
default:
error_msg("unexpected UDS request");
return (-1);
}
return (0);
}
/*
* Perform checks for common exec string errors. We limit the checks to
* whether the file exists, is a regular file, and has at least one execute
* bit set. We leave the core security checks to exec() so as not to duplicate
* and thus incur the associated drawbacks, but hope to catch the common
* errors here.
*/
static boolean_t
const char *path)
{
debug_msg("Entering passes_basic_exec_checks");
/* check the file exists */
"Can't stat the %s method of instance %s: %s"),
return (B_FALSE);
}
}
/*
* Check if the file is a regular file and has at least one execute
* bit set.
*/
"The %s method of instance %s isn't a regular file"),
return (B_FALSE);
return (B_FALSE);
}
return (B_TRUE);
}
static void
{
char **args;
char **env;
const char *errf;
int serrno;
/*
* If wrappers checks fail, pretend the method was exec'd and
* failed.
*/
if (!tcp_wrappers_ok(instance))
}
/*
* Revert the disposition of handled signals and ignored signals to
* their defaults, unblocking any blocked ones as a side effect.
*/
/*
* Setup exec arguments. Do this before the fd setup below, so our
* logging related file fd doesn't get taken over before we call
* expand_address().
*/
} else {
}
/* Generate audit trail for start operations */
struct sockaddr_storage ss;
== NULL) {
"the %s method of instance %s"),
}
/*
* The inetd_connect audit record consists of:
* Service name
* Execution path
* Remote address and port
* Local port
* Process privileges
*/
} else {
int i;
for (i = 0; i < 4; ++i)
}
&sslen) == 0)
privset = priv_allocset();
}
}
}
/*
* Set method context before the fd setup below so we can output an
* error message if it fails.
*/
const char *msg;
if (errno == -1) {
"for the %s method of instance %s");
"control for the %s method of instance %s");
"instance %s to a pool due to a system "
"error");
} else {
assert(0);
abort();
}
}
switch (errno) {
case ENOENT:
"for the %s method of instance %s");
break;
case EBADF:
"instance %s to a pool due to invalid "
"configuration");
break;
case EINVAL:
"instance %s to a pool due to invalid "
"pool name");
break;
default:
assert(0);
abort();
}
}
"%s method of instance %s (%s: %s)"),
}
switch (errno) {
case ENOMEM:
"method of instance %s (out of memory)");
break;
case ENOENT:
"method of instance %s (no passwd or shadow "
"entry for user)");
break;
default:
assert(0);
abort();
}
}
/* let exec() free mthd_ctxt */
/* setup standard fds */
} else {
(void) close(STDIN_FILENO);
}
do {
}
/* start up logging again to report the error */
msg_init();
gettext("Failed to exec %s method of instance %s: %s"),
/*
* We couldn't exec the start method for a wait type service.
* Eat up data from the endpoint, so that hopefully the
* service's fd won't wake poll up on the next time round
* event_loop(). This behavior is carried over from the old
* inetd, and it seems somewhat arbitrary that it isn't
* also done in the case of fork failures; but I guess
* it assumes an exec failure is less likely to be the result
* of a resource shortage, and is thus not worth retrying.
*/
}
}
static restarter_error_t
{
switch (method) {
case IM_OFFLINE:
return (RERR_RESTART);
case IM_ONLINE:
return (RERR_RESTART);
case IM_DISABLE:
return (RERR_RESTART);
case IM_REFRESH:
return (RERR_REFRESH);
case IM_START:
return (RERR_RESTART);
}
abort();
/* NOTREACHED */
}
/*
* Runs the specified method of the specified service instance.
* If the method was never specified, we handle it the same as if the
* method was called and returned success, carrying on any transition the
* instance may be in the midst of.
* If the method isn't executable in its specified profile or an error occurs
* forking a process to run the method in the function returns -1.
* If a method binary is successfully executed, the function switches the
* instance's cur state to the method's associated 'run' state and the next
* state to the methods associated next state.
* Returns -1 if there's an error before forking, else 0.
*/
int
const proto_info_t *start_info)
{
const char *errstr;
int sig;
int ret;
int serrno;
debug_msg("Entering run_method, instance: %s, method: %s",
/*
* Don't bother updating the instance's state for the start method
* as there isn't a separate start method state.
*/
/*
* An unspecified method. Since the absence of this method
* must be valid (otherwise it would have been caught
* during configuration validation), simply pretend the method
* ran and returned success.
*/
return (0);
}
/* Handle special method tokens, not allowed on start */
/* :true means nothing should be done */
return (0);
}
/* Carry out contract assassination */
/* ENOENT means we didn't find any contracts */
"to contracts of instance %s: %s"), sig,
goto prefork_failure;
} else {
return (0);
}
}
/* Carry out process assassination */
ret = IMRET_SUCCESS;
ret = IMRET_FAILURE;
"start process of instance %s: %s"),
}
}
return (0);
}
}
/*
* Get the associated method context before the fork so we can
* modify the instances state if things go wrong.
*/
goto prefork_failure;
}
/*
* Perform some basic checks before we fork to limit the possibility
* of exec failures, so we can modify the instance state if necessary.
*/
goto prefork_failure;
}
if (contract_prefork() == -1)
goto prefork_failure;
switch (child_pid) {
case -1:
"Unable to fork %s method of instance %s: %s"),
goto prefork_failure;
case 0: /* child */
/* NOTREACHED */
default: /* parent */
if (get_latest_contract(&cid) < 0)
cid = -1;
/*
* Register this method so its termination is noticed and
* the state transition this method participates in is
* continued.
*/
/*
* Since we will never find out about the termination
* of this method, if it's a non-start method treat
* is as a failure so we don't block restarter event
* processing on it whilst it languishes in a method
* running state.
*/
}
/* do tcp tracing for those nowait instances that request it */
char buf[INET6_ADDRSTRLEN];
sizeof (buf)),
}
}
return (0);
}
/*
* Only place a start method in maintenance if we're sure
* that the failure was non-transient.
*/
if (!trans_failure) {
}
} else {
/* treat the failure as if the method ran and failed */
}
return (-1);
}
static int
{
int fd;
debug_msg("Entering accept_connection");
&(instance->remote_addr));
} else {
if (fd < 0)
}
return (fd);
}
/*
* Handle an incoming connection request for a nowait service.
* This involves accepting the incoming connection on a new fd. Connection
* rate checks are then performed, transitioning the service to the
* conrate offline state if these fail. Otherwise, the service's start method
* is run (performing TCP wrappers checks if applicable as we do), and on
* success concurrent copies checking is done, transitioning the service to the
* copies offline state if this fails.
*/
static void
{
int ret;
debug_msg("Entering process_nowait_req");
/* accept nowait service connections on a new fd */
/*
* Failed accept. Return and allow the event loop to initiate
* another attempt later if the request is still present.
*/
return;
}
/*
* Limit connection rate of nowait services. If either conn_rate_max
* or conn_rate_offline are <= 0, no connection rate limit checking
* is done. If the configured rate is exceeded, the instance is taken
* to the connrate_offline state and a timer scheduled to try and
* bring the instance back online after the configured offline time.
*/
if (instance->conn_rate_count++ == 0) {
} else if (instance->conn_rate_count >
cfg->conn_rate_max) {
} else {
/* Generate audit record */
ADT_inetd_ratelimit)) == NULL) {
"rate limit audit event"));
} else {
/*
* The inetd_ratelimit audit
* record consists of:
* Service name
* Connection rate limit
*/
}
"Instance %s has exceeded its configured "
"connection rate, additional connections "
"will not be accepted for %d seconds"),
instance->conn_rate_count = 0;
return;
}
}
}
return;
/*
* Limit concurrent connections of nowait services.
*/
if (copies_limit_exceeded(instance)) {
/* Generate audit record */
== NULL) {
"audit event"));
} else {
/*
* The inetd_copylimit audit record consists of:
* Service name
* Copy limit
*/
cfg->max_copies);
}
"configured copies, no new connections will be accepted"),
}
}
/*
* Handle an incoming request for a wait type service.
* Failure rate checking is done first, taking the service to the maintenance
* state if the checks fail. Following this, the service's start method is run,
* and on success, we stop listening for new requests for this service.
*/
static void
{
int ret;
debug_msg("Entering process_wait_request");
/*
* Detect broken servers and transition them to maintenance. If a
* wait type service exits without accepting the connection or
* consuming (reading) the datagram, that service's descriptor will
* select readable again, and inetd will fork another instance of
* the server. If either wait_fail_cnt or wait_fail_interval are <= 0,
* no failure rate detection is done.
*/
if (instance->fail_rate_count++ == 0) {
} else {
/* Generate audit record */
ADT_inetd_failrate)) == NULL) {
"failure rate audit event"));
} else {
/*
* The inetd_failrate audit record
* consists of:
* Service name
* Failure rate
* Interval
* Last two are expressed as k=v pairs
* in the values field.
*/
"limit=%lld,interval=%d",
}
"Instance %s has exceeded its configured "
"failure rate, transitioning to "
instance->fail_rate_count = 0;
return;
}
}
}
if (ret == 0) {
/*
* Stop listening for connections now we've fired off the
* server for a wait type instance.
*/
}
}
/*
* Process any networks requests for each proto for each instance.
*/
void
process_network_events(void)
{
debug_msg("Entering process_network_events");
/*
* Ignore instances in states that definitely don't have any
* listening fds.
*/
switch (instance->cur_istate) {
case IIS_ONLINE:
case IIS_DEGRADED:
case IIS_IN_REFRESH_METHOD:
break;
default:
continue;
}
} else {
}
}
}
}
}
/* ARGSUSED0 */
static void
sigterm_handler(int sig)
{
debug_msg("Entering sigterm_handler");
}
/* ARGSUSED0 */
static void
sighup_handler(int sig)
{
debug_msg("Entering sighup_handler");
}
/*
* inetd's major work loop. This function sits in poll waiting for events
* to occur, processing them when they do. The possible events are
* network events.
* The loop is exited when a stop request is received and processed, and
* all the instances have reached a suitable 'stopping' state.
*/
static void
event_loop(void)
{
int timeout;
debug_msg("Entering event_loop");
for (;;) {
int pret = -1;
if (!got_sigterm && !refresh_inetd_requested) {
continue;
}
}
if (got_sigterm) {
msg_fini();
inetd_stop();
goto check_if_stopped;
}
/*
* Socket.
*/
while (process_uds_event() == 0)
;
}
/*
* Process refresh request. We do this check after the UDS
* event check above, as it would be wasted processing if we
* started refreshing inetd based on a SIGHUP, and then were
* told to shut-down via a UDS event.
*/
if (refresh_inetd_requested) {
if (!inetd_stopping)
}
/*
* We were interrupted by a signal. Don't waste any more
* time processing a potentially inaccurate poll return.
*/
if (pret == -1)
continue;
/*
* Process any instance restarter events.
*/
while (process_restarter_event() == 0)
;
}
/*
* Process any expired timers (bind retry, con-rate offline,
* method timeouts).
*/
(void) iu_expire_timers(timer_queue);
/*
* If inetd is stopping, check whether all our managed
* instances have been stopped and we can return.
*/
if (inetd_stopping) {
if (!instance_stopped(instance)) {
debug_msg("%s not yet stopped",
break;
}
}
/* if all instances are stopped, return */
return;
}
}
}
static void
fini(void)
{
debug_msg("Entering fini");
method_fini();
uds_fini();
if (timer_queue != NULL)
/*
* We don't bother to undo the restarter interface at all.
* Because of quirks in the interface, there is no way to
* disconnect from the channel and cause any new events to be
* queued. However, any events which are received and not
* acknowledged will be re-sent when inetd restarts as long as inetd
* uses the same subscriber ID, which it does.
*
* By keeping the event pipe open but ignoring it, any events which
* occur will cause restarter_event_proxy to hang without breaking
* anything.
*/
if (instance_list != NULL) {
NULL)
}
if (instance_pool != NULL)
tlx_fini();
config_fini();
repval_fini();
poll_fini();
/* Close audit session */
(void) adt_end_session(audit_handle);
}
static int
init(void)
{
int err;
debug_msg("Entering init");
if (repval_init() < 0)
goto failed;
if (config_init() < 0)
goto failed;
if (tlx_init() < 0)
goto failed;
/* Setup instance list. */
UU_LIST_POOL_DEBUG)) == NULL) {
error_msg("%s: %s",
gettext("Failed to create instance pool"),
uu_strerror(uu_error()));
goto failed;
}
error_msg("%s: %s",
gettext("Failed to create instance list"),
uu_strerror(uu_error()));
goto failed;
}
/*
* Create event pipe to communicate events with the main event
* loop and add it to the event loop's fdset.
*/
if (pipe(rst_event_pipe) < 0) {
goto failed;
}
/*
* can't afford to block in the main thread, yet need to in
* the restarter event thread, so it can sit and wait for an
* acknowledgement to be written to the pipe.
*/
goto failed;
/*
* Register with master restarter for managed service events. This
* will fail, amongst other reasons, if inetd is already running.
*/
&rst_event_handle)) != 0) {
"Failed to register for restarter events: %s"),
goto failed;
}
if (contract_init() < 0)
goto failed;
goto failed;
}
if (uds_init() < 0)
goto failed;
if (method_init() < 0)
goto failed;
/* Initialize auditing session */
}
/*
* Initialize signal dispositions/masks
*/
return (0);
fini();
return (-1);
}
static int
start_method(void)
{
int i;
int pipe_fds[2];
int child;
debug_msg("ENTERING START_METHOD:");
/* Create pipe for child to notify parent of initialization success. */
return (SMF_EXIT_ERR_OTHER);
}
return (SMF_EXIT_ERR_OTHER);
} else if (child > 0) { /* parent */
/* Wait on child to return success of initialization. */
(i < 0)) {
"Initialization failed, unable to start"));
/*
* Batch all initialization errors as 'other' errors,
* resulting in retries being attempted.
*/
return (SMF_EXIT_ERR_OTHER);
} else {
return (SMF_EXIT_OK);
}
} else { /* child */
/*
* Perform initialization and return success code down
* the pipe.
*/
i = init();
(i < 0)) {
exit(1);
}
(void) setsid();
/*
* Log a message if the configuration file has changed since
* inetconv was last run.
*/
event_loop();
fini();
debug_msg("inetd stopped");
msg_fini();
exit(0);
}
/* NOTREACHED */
}
/*
* When inetd is run from outside the SMF, this message is output to provide
* the person invoking inetd with further information that will help them
* understand how to start and stop inetd, and to achieve the other
* behaviors achievable with the legacy inetd command line interface, if
* it is possible.
*/
static void
legacy_usage(void)
{
"inetd is now an smf(5) managed service and can no longer be run "
"from the\n"
"command line. To enable or disable inetd refer to svcadm(1M) on\n"
"how to enable \"%s\", the inetd instance.\n"
"\n"
"The traditional inetd command line option mappings are:\n"
"\t-d : there is no supported debug output\n"
"\t-s : inetd is only runnable from within the SMF\n"
"\t-t : See inetadm(1M) on how to enable TCP tracing\n"
"\t-r : See inetadm(1M) on how to set a failure rate\n"
"\n"
"To specify an alternative configuration file see svccfg(1M)\n"
"for how to modify the \"%s/%s\" string type property of\n"
"the inetd instance, and modify it according to the syntax:\n"
"\"%s [alt_config_file] %%m\".\n"
"\n"
"For further information on inetd see inetd(1M).\n",
}
/*
* Usage message printed out for usage errors when running under the SMF.
*/
static void
{
}
/*
* Returns B_TRUE if we're being run from within the SMF, else B_FALSE.
*/
static boolean_t
run_through_smf(void)
{
char *fmri;
/*
* check if the instance fmri environment variable has been set by
* our restarter.
*/
}
int
{
char *method;
int ret;
#if !defined(TEXT_DOMAIN)
#define TEXT_DOMAIN "SYS_TEST"
#endif
(void) textdomain(TEXT_DOMAIN);
if (!run_through_smf()) {
legacy_usage();
return (SMF_EXIT_ERR_NOSMF);
}
msg_init(); /* setup logging */
/* inetd invocation syntax is inetd [alt_conf_file] method_name */
switch (argc) {
case 2:
break;
case 3:
break;
default:
return (SMF_EXIT_ERR_CONFIG);
}
ret = start_method();
ret = stop_method();
ret = refresh_method();
} else {
return (SMF_EXIT_ERR_CONFIG);
}
return (ret);
}