/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* syseventd - The system event daemon
*
* This daemon dispatches event buffers received from the
* kernel to all interested SLM clients. SLMs in turn
* deliver the buffers to their particular application
* clients.
*/
#include <stdio.h>
#include <dirent.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdlib.h>
#include <dlfcn.h>
#include <door.h>
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <strings.h>
#include <unistd.h>
#include <synch.h>
#include <syslog.h>
#include <thread.h>
#include <libsysevent.h>
#include <limits.h>
#include <locale.h>
#include <sys/sysevent.h>
#include <sys/sysevent_impl.h>
#include <sys/systeminfo.h>
#include "sysevent_signal.h"
#include "syseventd.h"
#include "message.h"
extern void delete_client(int id);
extern void initialize_client_tbl(void);
extern struct sysevent_client *sysevent_client_tbl[];
extern mutex_t client_tbl_lock;
/* levels less than DEBUG_LEVEL_FORK */
int debug_level = 0;
/* Maximum number of outstanding events dispatched */
/* declarations and definitions for avoiding multiple daemons running */
static int hold_daemon_lock;
static int daemon_lock_fd;
/*
* sema_eventbuf - guards against the global buffer eventbuf
* being written to before it has been dispatched to clients
*
* sema_dispatch - synchronizes between the kernel uploading thread
* (producer) and the userland dispatch_message thread (consumer).
*
* sema_resource - throttles outstanding event consumption.
*
* event_comp_cv - synchronizes threads waiting for the event completion queue
* to empty or become active.
*/
/* Self-tuning concurrency level */
/* SLM defines */
/* syslog message related */
static int logflag = 0;
static char *prog;
/* function prototypes */
static void dispatch_message(void);
static int dispatch(void);
static void event_completion_thr(void);
static void usage(void);
static void syseventd_init(void);
static void syseventd_fini(int sig);
static pid_t enter_daemon_lock(void);
static void exit_daemon_lock(void);
static void
usage() {
"[-r <root_dir>]\n");
exit(2);
}
/* common exit function which ensures releasing locks */
void
{
if (hold_daemon_lock) {
}
}
/*
* hup_handler - SIGHUP handler. SIGHUP is used to force a reload of
* all SLMs. During fini, events are drained from all
* client event queues. The events that have been consumed
* by all clients are freed from the kernel event queue.
*
* Events that have not yet been delivered to all clients
* are not freed and will be replayed after all SLMs have
* been (re)loaded.
*
* After all client event queues have been drained, each
* SLM client is unloaded. The init phase will (re)load
* each SLM and initiate event replay and delivery from
* the kernel.
*
*/
/*ARGSUSED*/
static void
{
(void) fflush(0);
(void) fflush(0);
}
/*
* Fault handler for other signals caught
*/
/*ARGSUSED*/
static void
{
}
switch (sig) {
case SIGINT:
case SIGSTOP:
case SIGTERM:
/* Close kernel door */
(void) door_revoke(upcall_door);
/* Gracefully exit current event delivery threads */
(void) fflush(0);
(void) se_signal_unblockall();
syseventd_exit(1);
/*NOTREACHED*/
case SIGCLD:
case SIGPWR:
case SIGWINCH:
case SIGURG:
case SIGCONT:
case SIGWAITING:
case SIGLWP:
case SIGFREEZE:
case SIGTHAW:
case SIGCANCEL:
case SIGXRES:
case SIGJVM1:
case SIGJVM2:
case SIGINFO:
/* No need to abort */
break;
default:
abort();
}
}
/*
* Daemon parent process only.
* Child process signal to indicate successful daemon initialization.
* This is the normal and expected exit path of the daemon parent.
*/
/*ARGSUSED*/
static void
{
syseventd_exit(0);
}
static void
{
int sig;
int err;
for (;;) {
(void) sigfillset(&signal_set);
if (err) {
syseventd_exit(2);
}
/*
* Block all signals until the signal handler completes
*/
} else {
}
}
/* NOTREACHED */
}
static void
{
syseventd_exit(2);
}
}
int
{
int i, c;
int fd;
int has_forked = 0;
extern char *optarg;
(void) textdomain(TEXT_DOMAIN);
if (getuid() != 0) {
syseventd_exit(1);
}
if (argc > 5) {
usage();
}
} else {
prog++;
}
switch (c) {
case 'd':
break;
case 'r':
/*
* Private flag for suninstall to run
* daemon during install.
*/
break;
case '?':
default:
usage();
}
}
/* demonize ourselves */
if (debug_level < DEBUG_LEVEL_FORK) {
(void) sigemptyset(&mask);
syseventd_exit(1);
}
if (pid != 0) {
/*
* parent
* handshake with the daemon so that dependents
* of the syseventd service don't start up until
* the service is actually functional
*/
int status;
/*
* child process signal indicating
* successful daemon initialization
*/
syseventd_exit(0);
}
/* child exited implying unsuccessful startup */
syseventd_exit(1);
}
/* child */
has_forked = 1;
(void) chdir("/");
(void) setsid();
if (debug_level <= 1) {
closefrom(0);
logflag = 1;
}
}
"syseventd started, debug level = %d\n", debug_level);
/* only one instance of syseventd can run at a time */
"event daemon pid %ld already running\n", pid);
exit(3);
}
/* initialize semaphores and eventbuf */
USYNC_THREAD, NULL);
USYNC_THREAD, NULL);
sizeof (sysevent_t *));
exit(2);
}
for (i = 0; i < SE_EVENT_DISPATCH_CNT; ++i) {
"buffers\n");
exit(2);
}
}
event_compq = NULL;
/*
* Block all signals to all threads include the main thread.
* The sigwait_thr thread will process any signals and initiate
* a graceful recovery if possible.
*/
if (se_signal_blockall() < 0) {
syseventd_exit(2);
}
(void *)0, 0, NULL) < 0) {
syseventd_exit(2);
}
(void *(*)(void *))event_completion_thr, NULL,
syseventd_exit(2);
}
/* Create signal catching thread */
syseventd_exit(2);
}
/* Initialize and load SLM clients */
/* signal parent to indicate successful daemon initialization */
if (has_forked) {
"signal to the parent failed - %s\n",
syseventd_exit(2);
}
}
for (;;) {
(void) pause();
}
/* NOTREACHED */
return (0);
}
/*
* door_upcall - called from the kernel via kernel sysevent door
* to upload event(s).
*
* This routine should never block. If resources are
* not available to immediately accept the event buffer
* EAGAIN is returned to the kernel.
*
* Once resources are available, the kernel is notified
* via a modctl interface to resume event delivery to
* syseventd.
*
*/
/*ARGSUSED*/
static void
{
int rval;
(void) mutex_lock(&door_lock);
} else if (sema_trywait(&sema_eventbuf)) {
ev = (sysevent_t *)
} else {
/*
* Copy received message to local buffer.
*/
ev = (sysevent_t *)
rval = 0;
(void) sema_post(&sema_dispatch);
}
(void) mutex_unlock(&door_lock);
/*
* Filling in return values for door_return
*/
}
/*
* dispatch_message - dispatch message thread
* This thread spins until an event buffer is delivered
* delivered from the kernel.
*
* It will wait to dispatch an event to any clients
* until adequate resources are available to process
* the event buffer.
*/
static void
dispatch_message(void)
{
int error;
for (;;) {
/*
* Spin till a message comes
*/
while (sema_wait(&sema_dispatch) != 0) {
"dispatch_message: sema_wait failed\n");
(void) sleep(1);
}
/*
* Wait for available resources
*/
while (sema_wait(&sema_resource) != 0) {
"failed\n");
(void) sleep(1);
}
/*
* Client dispatch
*/
do {
/*
* kernel received a busy signal -
* kickstart the kernel delivery thread
* door_lock blocks the kernel so we hold it for the
* shortest time possible.
*/
(void) mutex_lock(&door_lock);
"door_upcall_retval = %d\n",
door_upcall_retval = 0;
}
(void) mutex_unlock(&door_lock);
}
/* NOTREACHED */
}
/*
* drain_eventq - Called to drain all pending events from the client's
* event queue.
*/
static void
{
scp->client_num);
while (eventq) {
/*
* Mark all dispatched events as completed, but indicate the
* error status
*/
}
}
}
/*
* client_deliver_event_thr - Client delivery thread
* This thread will process any events on this
* client's eventq.
*/
static void
{
for (;;) {
/*
* Client has been suspended or unloaded, go no further.
*/
if (fini_pending) {
"exiting flags: 0X%x\n",
return;
}
}
/*
* Process events from the head of the eventq, eventq is locked
* going into the processing.
*/
"with retry count %d\n",
/*
* Retry limit has been reached by this client, indicate
* that no further retries are allowed
*/
for (i = 0; i <= scp->retry_limit; ++i) {
if (i == scp->retry_limit)
flag = SE_NO_RETRY;
/* Start the clock for the event delivery */
/* Can not allow another retry */
if (i == scp->retry_limit)
error = 0;
/* Stop the clock */
/*
* Suspend event processing and drain the
* event q for latent clients
*/
"client %d: Draining eventq and "
"suspending event delivery\n",
scp->client_num);
scp->client_flags &=
scp->client_flags |=
/* Cleanup current event */
(void) sema_post(
/*
* Drain the remaining events from the
* queue.
*/
return;
}
/* Event delivery retry requested */
break;
} else {
(void) sleep(SE_RETRY_TIME);
}
}
"error %d\n", error);
}
scp->client_num);
/* Return if this was a synchronous delivery */
if (!SE_CLIENT_IS_THR_RUNNING(scp)) {
return;
}
}
}
/*
* client_deliver_event - Client specific event delivery
* This routine will allocate and initialize the
* neccessary per-client dispatch data.
*
* If the eventq is not empty, it may be assumed that
* a delivery thread exists for this client and the
* dispatch data is appended to the eventq.
*
* The dispatch package is freed by the event completion
* thread (event_completion_thr) and the eventq entry
* is freed by the event delivery thread.
*/
static struct event_dispatch_pkg *
{
if (debug_level == 9) {
}
/*
* Check for suspended client
*/
return (NULL);
}
/*
* Allocate a new dispatch package and eventq entry
*/
sizeof (struct event_dispatchq));
return (NULL);
}
sizeof (struct event_dispatch_pkg));
return (NULL);
}
/* Initialize the dispatch package */
d_pkg->retry_count = 0;
d_pkg->completion_status = 0;
/* Add entry to the end of the eventq */
} else {
/* event queue empty, wakeup delivery thread */
}
return (d_pkg);
}
/*
* event_completion_thr - Event completion thread. This thread routine
* waits for all client delivery thread to complete
* delivery of a particular event.
*/
static void
{
(void) mutex_lock(&ev_comp_lock);
for (;;) {
while (event_compq == NULL) {
}
/*
* Process event completions from the head of the
* completion queue
*/
while (ev_comp) {
(void) mutex_unlock(&ev_comp_lock);
ok_to_free = 1;
"event 0X%llx on %d clients\n",
while (client_count) {
"event id 0X%llx\n", client_count,
--client_count;
}
ok_to_free = 0;
"complete for client %d retry count %d "
}
if (ok_to_free) {
for (i = 0; i < MAX_MODCTL_RETRY; ++i) {
NULL, 0)) != 0) {
"to free event 0X%llx\n",
/*
* Kernel may need time to
* move this event buffer to
* the sysevent sent queue
*/
(void) sleep(1);
} else {
break;
}
}
if (ret) {
"event 0X%llx from the "
}
} else {
}
(void) mutex_lock(&ev_comp_lock);
(void) sema_post(&sema_resource);
}
/*
* Event completion queue is empty, signal possible unload
* operation
*/
(void) cond_signal(&event_comp_cv);
}
}
/*
* dispatch - Dispatch the current event buffer to all valid SLM clients.
*/
static int
dispatch(void)
{
/* Check for module unload operation */
if (rw_tryrdlock(&mod_unload_lock) != 0) {
(void) sema_post(&sema_eventbuf);
(void) sema_post(&sema_resource);
return (0);
}
/*
* ev_comp is used to hold event completion data. It is freed
* by the event completion thread (event_completion_thr).
*/
ev_comp = (struct ev_completion *)
malloc(sizeof (struct ev_completion));
(void) rw_unlock(&mod_unload_lock);
return (EAGAIN);
}
(void) rw_unlock(&mod_unload_lock);
return (EAGAIN);
}
/*
* For long messages, copy additional data from kernel
*/
if (ev_sz > LOGEVENT_BUFSIZE) {
int ret = 0;
/* Ok to release eventbuf for next event buffer from kernel */
(void) sema_post(&sema_eventbuf);
for (i = 0; i < MAX_MODCTL_RETRY; ++i) {
== 0)
break;
else
(void) sleep(1);
}
if (ret) {
(void) rw_unlock(&mod_unload_lock);
return (EAGAIN);
}
} else {
/* Ok to release eventbuf for next event buffer from kernel */
(void) sema_post(&sema_eventbuf);
}
/*
* Deliver a copy of eventbuf to clients so
* eventbuf can be used for the next message
*/
for (i = 0; i < MAX_SLM; ++i) {
/* Don't bother for suspended or unloaded clients */
if (!SE_CLIENT_IS_LOADED(sysevent_client_tbl[i]) ||
continue;
/*
* Allocate event dispatch queue entry. All queue entries
* are freed by the event completion thread as client
* delivery completes.
*/
sizeof (struct event_dispatchq));
continue;
}
/* Initiate client delivery */
"package for event id 0X%llx client %d\n",
continue;
}
++client_count;
} else {
}
}
(void) mutex_lock(&ev_comp_lock);
if (event_compq == NULL) {
(void) cond_signal(&event_comp_cv);
} else {
/* Add entry to the end of the event completion queue */
tmp = event_compq;
}
(void) mutex_unlock(&ev_comp_lock);
(void) rw_unlock(&mod_unload_lock);
return (0);
}
static char *
{
if (dirnum >= MOD_DIR_NUM)
return (NULL);
if (dirname[0][0] == '\0') {
return (NULL);
"platform name too long: %s\n",
infobuf);
return (NULL);
}
return (NULL);
"machine name too long: %s\n",
infobuf);
return (NULL);
}
}
}
/*
* load_modules - Load modules found in the common syseventd module directories
* Modules that do not provide valid interfaces are rejected.
*/
static void
{
int client_id;
return;
/* Return silently if module directory does not exist */
return;
}
/*
* Go through directory, looking for files ending with .so
*/
void *dlh, *f;
continue;
}
continue;
}
continue;
continue;
}
continue;
}
continue;
}
/* load in other module functions */
dlerror());
continue;
}
/* Call module init routine */
continue;
}
mod->event_mod_fini();
continue;
}
/* Add module entry to client list */
mod->event_mod_fini();
continue;
}
(void) thr_setconcurrency(concurrency_level);
if (thr_create(NULL, 0,
(void *(*)(void *))client_deliver_event_thr,
mod->event_mod_fini();
continue;
}
}
}
/*
* unload_modules - modules are unloaded prior to graceful shutdown or
* before restarting the daemon upon receipt of
* SIGHUP.
*/
static void
{
/*
* unload modules that are ready, skip those that have not
* drained their event queues.
*/
/* Don't wait indefinitely for unresponsive clients */
break;
}
done = 0;
/* Shutdown clients */
for (i = 0; i < MAX_SLM; ++i) {
scp = sysevent_client_tbl[i];
done++;
continue;
}
} else {
"client %d: client locked\n",
scp->client_num);
continue;
}
/*
* Drain the eventq and wait for delivery thread to
* cleanly exit
*/
/*
* It is now safe to unload the module
*/
mod->event_mod_fini();
(void) mutex_lock(&client_tbl_lock);
delete_client(i);
(void) mutex_unlock(&client_tbl_lock);
++done;
}
++count;
(void) sleep(1);
}
/*
* Wait for event completions
*/
(void) mutex_lock(&ev_comp_lock);
while (event_compq != NULL) {
}
(void) mutex_unlock(&ev_comp_lock);
}
/*
* syseventd_init - Called at daemon (re)start-up time to load modules
* and kickstart the kernel delivery engine.
*/
static void
{
int i, fd;
fini_pending = 0;
(void) thr_setconcurrency(concurrency_level);
/*
* Load client modules for event delivering
*/
for (i = 0; i < MOD_DIR_NUM; ++i) {
load_modules(dir_num2name(i));
}
/*
* Create kernel delivery door service
*/
syseventd_exit(5);
}
/*
* Remove door file for robustness.
*/
if (unlink(local_door_file) != 0)
syseventd_exit(5);
}
if (upcall_door == -1) {
syseventd_exit(5);
}
(void) fdetach(local_door_file);
goto retry;
(void) door_revoke(upcall_door);
syseventd_exit(5);
}
/*
* Tell kernel the door name and start delivery
*/
"local_door_file = %s\n", local_door_file);
syseventd_exit(6);
}
door_upcall_retval = 0;
< 0) {
syseventd_exit(7);
}
}
/*
* syseventd_fini - shut down daemon, but do not exit
*/
static void
{
/*
* Indicate that event queues should be drained and no
* additional events be accepted
*/
fini_pending = 1;
/* Close the kernel event door to halt delivery */
(void) door_revoke(upcall_door);
(void) rw_wrlock(&mod_unload_lock);
(void) rw_unlock(&mod_unload_lock);
}
/*
* enter_daemon_lock - lock the daemon file lock
*
* Use an advisory lock to ensure that only one daemon process is active
* in the system at any point in time. If the lock is held by another
* process, do not block but return the pid owner of the lock to the
* caller immediately. The lock is cleared if the holding daemon process
* exits for any reason even if the lock file remains, so the daemon can
* be restarted if necessary. The lock file is DAEMON_LOCK_FILE.
*/
static pid_t
enter_daemon_lock(void)
{
syseventd_exit(8);
}
if (daemon_lock_fd < 0) {
syseventd_exit(8);
}
exit(2);
}
}
hold_daemon_lock = 1;
return (getpid());
}
/*
* exit_daemon_lock - release the daemon file lock
*/
static void
exit_daemon_lock(void)
{
}
exit(-1);
}
}
/*
* syseventd_err_print - print error messages to the terminal if not
* yet daemonized or to syslog.
*/
/*PRINTFLIKE1*/
void
{
(void) mutex_lock(&err_mutex);
if (logflag) {
} else {
}
(void) mutex_unlock(&err_mutex);
}
/*
* syseventd_print - print messages to the terminal or to syslog
* the following levels are implemented:
*
* 1 - transient errors that does not affect normal program flow
* 3 - program flow trace as each message goes through the daemon
* 8 - all the nit-gritty details of startup and shutdown
* 9 - very verbose event flow tracing (no daemonization of syseventd)
*
*/
/*PRINTFLIKE2*/
void
{
if (level > debug_level) {
return;
}
(void) mutex_lock(&err_mutex);
if (logflag) {
} else {
if (newline) {
} else {
}
}
newline = 1;
} else {
newline = 0;
}
(void) mutex_unlock(&err_mutex);
}