/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2013 Nexenta Systems, Inc. All rights reserved.
* Copyright 2016 Toomas Soome <tsoome@me.com>
*/
#include <sys/sysevent.h>
#include <sys/sysevent_impl.h>
#include <sys/lofi_impl.h>
#include <sys/sysmacros.h>
#include <sys/autoconf.h>
/* for doors */
#include <sys/pathname.h>
/*
* log_sysevent.c - Provides the interfaces for kernel event publication
* to the sysevent event daemon (syseventd).
*/
/*
* Debug stuff
*/
static int log_event_debug = 0;
#ifdef DEBUG
#else
#endif
/*
* Local static vars
*/
/* queue of event buffers sent to syseventd */
/*
* Count of event buffers in the queue
*/
int log_eventq_cnt = 0;
/* queue of event buffers awaiting delivery to syseventd */
/* log event delivery flag */
/*
* Tunable maximum event buffer queue size. Size depends on how many events
* the queue must hold when syseventd is not available, for example during
* system startup. Experience showed that more than 2000 events could be posted
* due to correctable memory errors.
*/
/*
* async thread-related variables
*
* eventq_head_mutex - synchronizes access to the kernel event queue
*
* eventq_sent_mutex - synchronizes access to the queue of event sents to
* userlevel
*
* log_event_cv - condition variable signaled when an event has arrived or
* userlevel ready to process event buffers
*
* async_thread - asynchronous event delivery thread to userlevel daemon.
*
* sysevent_upcall_status - status of the door upcall link
*/
static int event_qfull_blocked = 0;
/*
* Indicates the syseventd daemon has begun taking events
*/
int sysevent_daemon_init = 0;
/*
* Back-off delay when door_ki_upcall returns EAGAIN. Typically
* caused by the server process doing a forkall(). Since all threads
* but the thread actually doing the forkall() need to be quiesced,
* of clock ticks.
*/
static int event_pause_state = 0;
/* Cached device links for lofi. */
/*ARGSUSED*/
static void
{
event_pause_state = 0;
}
static void
{
/*
* Only one use of log_event_pause at a time
*/
ASSERT(event_pause_state == 0);
event_pause_state = 1;
if (id != 0) {
while (event_pause_state)
}
event_pause_state = 0;
}
/*
* log_event_upcall - Perform the upcall to syseventd for event buffer delivery.
* Check for rebinding errors
* This buffer is reused to by the syseventd door_return
* to hold the result code
*/
static int
{
int error;
int retry;
int neagain = 0;
int neintr = 0;
/* Initialize door args */
if (event_door == NULL) {
return (EBADF);
}
SIZE_MAX, 0)) == 0) {
break;
}
/*
* EBADF is handled outside the switch below because we need to
* hold event_door_mutex a bit longer
*/
/* Server died */
event_door = NULL;
return (error);
}
/*
* The EBADF case is already handled above with event_door_mutex
* held
*/
switch (error) {
case EINTR:
neintr++;
log_event_pause(2);
break;
case EAGAIN:
/* cannot deliver upcall - process may be forking */
neagain++;
nticks <<= 1;
if (nticks > LOG_EVENT_MAX_PAUSE)
break;
default:
"log_event_upcall: door_ki_upcall error %d\n",
error);
return (error);
}
}
}
"error=%d rptr1=%p rptr2=%p dptr2=%p ret1=%x ret2=%x\n",
if (!error) {
/*
* upcall was successfully executed. Check return code.
*/
}
return (error);
}
/*
* log_event_deliver - event delivery thread
* Deliver all events on the event queue to syseventd.
* If the daemon can not process events, stop event
* delivery and wait for an indication from the
* daemon to resume delivery.
*
* Once all event buffers have been delivered, wait
* until there are more to deliver.
*/
static void
{
log_eventq_t *q;
int upcall_err;
"logevent");
/*
* eventq_head_mutex is exited (released) when there are no more
* events to process from the eventq in cv_wait().
*/
for (;;) {
(void *)log_eventq_head));
upcall_err = 0;
q = log_eventq_head;
while (q) {
if (log_event_delivery == LOGEVENT_DELIVERY_HOLD) {
upcall_err = EAGAIN;
break;
}
/*
* Release event queue lock during upcall to
* syseventd
*/
break;
}
/*
* We may be able to add entries to
* the queue now.
*/
if (event_qfull_blocked > 0 &&
if (event_qfull_blocked > 0) {
}
}
/*
* Daemon restart can cause entries to be moved from
* the sent queue and put back on the event queue.
* If this has occurred, replay event queue
* processing from the new queue head.
*/
if (q != log_eventq_head) {
q = log_eventq_head;
} else {
/*
* Move the event to the sent queue when a
* successful delivery has been made.
*/
q->next = log_eventq_sent;
log_eventq_sent = q;
q = next;
log_eventq_head = q;
if (q == NULL) {
ASSERT(log_eventq_cnt == 0);
}
}
}
switch (upcall_err) {
case 0:
/*
* Success. The queue is empty.
*/
break;
case EAGAIN:
/*
* Delivery is on hold (but functional).
*/
/*
* If the user has already signaled for delivery
* resumption, continue. Otherwise, we wait until
* we are signaled to continue.
*/
continue;
break;
default:
"upcall err %d\n", upcall_err));
/*
* Signal everyone waiting that transport is down
*/
if (event_qfull_blocked > 0) {
if (event_qfull_blocked > 0) {
}
}
break;
}
}
/* NOTREACHED */
}
/*
* Set up the nvlist based data cache. User by lofi to find
* device name for mapped file.
*/
static void
{
}
/*
* log_event_init - Allocate and initialize log_event data structures.
*/
void
{
/* Set up devlink cache for lofi. */
}
/*
* The following routines are used by kernel event publishers to
* allocate, append and free event buffers
*/
/*
* sysevent_alloc - Allocate new eventq struct. This element contains
* an event buffer that will be used in a subsequent
* call to log_sysevent.
*/
{
int payload_sz;
log_eventq_t *q;
/*
* Calculate and reserve space for the class, subclass and
* publisher strings in the event buffer
*/
/* String sizes must be 64-bit aligned in the event buffer */
(aligned_subclass_sz - sizeof (uint64_t)) +
/*
* Allocate event buffer plus additional sysevent queue
* and payload overhead.
*/
if (q == NULL) {
return (NULL);
}
/* Initialize the event buffer data */
return (ev);
}
/*
* sysevent_free - Free event buffer and any attribute data.
*/
void
{
log_eventq_t *q;
}
}
/*
* free_packed_event - Free packed event buffer
*/
static void
{
log_eventq_t *q;
}
/*
* sysevent_add_attr - Add new attribute element to an event attribute list
* If attribute list is NULL, start a new list.
*/
int
{
int error;
return (SE_EINVAL);
}
/*
* attr_sz is composed of the value data size + the name data size +
* any header data. 64-bit aligned.
*/
return (SE_EINVAL);
}
/*
* Allocate nvlist
*/
return (SE_ENOMEM);
/* add the attribute */
switch (se_value->value_type) {
case SE_DATA_TYPE_BYTE:
break;
case SE_DATA_TYPE_INT16:
break;
case SE_DATA_TYPE_UINT16:
break;
case SE_DATA_TYPE_INT32:
break;
case SE_DATA_TYPE_UINT32:
break;
case SE_DATA_TYPE_INT64:
break;
case SE_DATA_TYPE_UINT64:
break;
case SE_DATA_TYPE_STRING:
return (SE_EINVAL);
break;
case SE_DATA_TYPE_BYTES:
return (SE_EINVAL);
break;
case SE_DATA_TYPE_TIME:
break;
default:
return (SE_EINVAL);
}
}
/*
* sysevent_free_attr - Free an attribute list not associated with an
* event buffer.
*/
void
{
}
/*
* sysevent_attach_attributes - Attach an attribute list to an event buffer.
*
* This data will be re-packed into contiguous memory when the event
* buffer is posted to log_sysevent.
*/
int
{
return (SE_EINVAL);
}
return (0);
}
/*
* sysevent_detach_attributes - Detach but don't free attribute list from the
* event buffer.
*/
void
{
return;
}
}
/*
* sysevent_attr_name - Get name of attribute
*/
char *
{
return (NULL);
}
return (nvpair_name(attr));
}
/*
* sysevent_attr_type - Get type of attribute
*/
int
{
/*
* The SE_DATA_TYPE_* are typedef'ed to be the
* same value as DATA_TYPE_*
*/
}
/*
* Repack event buffer into contiguous memory
*/
static sysevent_t *
{
return (NULL);
}
/*
* Copy event header, class, subclass and publisher names
* Set the attribute offset (in number of bytes) to contiguous
* memory after the header.
*/
/* Check if attribute list exists */
return (copy);
}
/*
* Copy attribute data to contiguous memory
*/
return (NULL);
}
return (copy);
}
/*
* The sysevent registration provides a persistent and reliable database
* for channel information for sysevent channel publishers and
* subscribers.
*
* A channel is created and maintained by the kernel upon the first
* SE_OPEN_REGISTRATION operation to log_sysevent_register(). Channel
* event subscription information is updated as publishers or subscribers
* perform subsequent operations (SE_BIND_REGISTRATION, SE_REGISTER,
* SE_UNREGISTER and SE_UNBIND_REGISTRATION).
*
* For consistency, id's are assigned for every publisher or subscriber
* bound to a particular channel. The id's are used to constrain resources
* and perform subscription lookup.
*
* Associated with each channel is a hashed list of the current subscriptions
* based upon event class and subclasses. A subscription contains a class name,
* list of possible subclasses and an array of subscriber ids. Subscriptions
* are updated for every SE_REGISTER or SE_UNREGISTER operation.
*
* Channels are closed once the last subscriber or publisher performs a
* SE_CLOSE_REGISTRATION operation. All resources associated with the named
* channel are freed upon last close.
*
* Locking:
* Every operation to log_sysevent() is protected by a single lock,
* registered_channel_mutex. It is expected that the granularity of
* a single lock is sufficient given the frequency that updates will
* occur.
*
* If this locking strategy proves to be too contentious, a per-hash
* or per-channel locking strategy may be implemented.
*/
% CHAN_HASH_SZ)
static int channel_cnt;
static uint32_t
hash_func(const char *s)
{
uint_t g;
while (*s != '\0') {
result <<= 4;
g = result & 0xf0000000;
if (g != 0) {
result ^= g >> 24;
result ^= g;
}
}
return (result);
}
static sysevent_channel_descriptor_t *
{
int hash_index;
if (channel_name == NULL)
return (NULL);
/* Find channel descriptor */
break;
} else {
}
}
return (chan_list);
}
static class_lst_t *
char *event_class, int index)
{
sizeof (EC_SUB_ALL));
return (c_list);
}
static void
{
int i;
for (i = 0; i <= CLASS_HASH_SZ; ++i) {
}
clist = next_clist;
}
}
}
static int
{
int hash_index;
if (channel_cnt > MAX_CHAN) {
return (-1);
}
/* Find channel descriptor */
chan_list->scd_ref_cnt++;
return (0);
} else {
}
}
/* New channel descriptor */
/*
* Create subscriber ids in the range [1, MAX_SUBSCRIBERS).
* Subscriber id 0 is never allocated, but is used as a reserved id
* by libsysevent
*/
return (-1);
}
return (-1);
}
++channel_cnt;
return (0);
}
static void
{
int hash_index;
/* Find channel descriptor */
break;
} else {
}
}
return;
chan->scd_ref_cnt--;
if (chan->scd_ref_cnt > 0)
return;
else
--channel_cnt;
}
static id_t
{
if (type == SUBSCRIBER) {
VM_NOSLEEP | VM_NEXTFIT);
return (0);
} else {
VM_NOSLEEP | VM_NEXTFIT);
return (0);
}
return (id);
}
static int
{
if (type == SUBSCRIBER) {
return (0);
return (0);
} else {
return (0);
return (0);
}
return (1);
}
static void
{
}
static subclass_lst_t *
{
return (NULL);
return (sc_list);
}
}
return (NULL);
}
static void
{
int i, subclass_sz;
for (i = 0; i < subclass_num; ++i) {
!= NULL) {
} else {
KM_SLEEP);
}
}
}
static class_lst_t *
{
break;
}
return (c_list);
}
static void
{
int i;
for (i = 0; i <= CLASS_HASH_SZ; ++i) {
}
}
}
}
static void
char *class_name)
{
return;
}
return;
}
}
}
static int
{
return (0);
}
return (-1);
/* New class, add to the registration cache */
}
/* Update the subclass list */
return (0);
}
static int
{
char *event_class;
char **event_list;
return (-1);
return (-1);
}
return (-1);
}
&num_elem) != 0) {
return (-1);
}
return (-1);
}
return (0);
}
/*
* get_registration - Return the requested class hash chain
*/
static int
{
int num_classes = 0;
return (EINVAL);
return (ENOENT);
}
if (nvlist_alloc(&nvl, 0, 0) != 0) {
return (EFAULT);
}
!= 0) {
return (EFAULT);
}
return (EFAULT);
}
}
num_classes++;
}
if (num_classes == 0) {
return (ENOENT);
}
!= 0) {
return (EFAULT);
}
return (EAGAIN);
}
return (0);
}
/*
* log_sysevent_register - Register event subscriber for a particular
* event channel.
*/
int
{
int error = 0;
return (EFAULT);
}
if (kdata.ps_channel_name_len == 0) {
return (EINVAL);
}
return (EFAULT);
}
if (bufsz > 0) {
return (EFAULT);
}
}
if (bufsz > 0)
return (ENOENT);
}
}
case SE_OPEN_REGISTRATION:
if (open_channel(kchannel) != 0) {
if (bufsz > 0)
}
return (error);
case SE_CLOSE_REGISTRATION:
break;
case SE_BIND_REGISTRATION:
break;
case SE_UNBIND_REGISTRATION:
break;
case SE_REGISTER:
if (bufsz == 0) {
break;
}
break;
case SE_UNREGISTER:
if (bufsz == 0) {
break;
}
break;
case SE_CLEANUP:
/* Cleanup the indicated subscriber or publisher */
break;
case SE_GET_REGISTRATION:
break;
default:
}
if (bufsz > 0) {
}
return (EFAULT);
return (error);
}
/*
* log_sysevent_copyout_data - Copyout event data to userland.
* This is called from modctl(MODEVENTS, MODEVENTS_GETDATA)
* The buffer size is always sufficient.
*/
int
{
log_eventq_t *q;
/*
* Copy eid
*/
return (EFAULT);
}
q = log_eventq_sent;
/*
* Search for event buffer on the sent queue with matching
* event identifier
*/
while (q) {
q = q->next;
continue;
}
break;
}
} else {
error = 0;
}
break;
}
return (error);
}
/*
* log_sysevent_free_data - Free kernel copy of the event buffer identified
* by eid (must have already been sent). Called from
* modctl(MODEVENTS, MODEVENTS_FREEDATA).
*/
int
{
/*
* Copy eid
*/
return (EFAULT);
}
q = log_eventq_sent;
/*
* Look for the event to be freed on the sent queue. Due to delayed
* processing of the event, it may not be on the sent queue yet.
* It is up to the user to retry the free operation to ensure that the
* event is properly freed.
*/
while (q) {
prev = q;
q = q->next;
continue;
}
/*
* Take it out of log_eventq_sent and free it
*/
if (prev) {
} else {
log_eventq_sent = q->next;
}
error = 0;
break;
}
return (error);
}
/*
* log_sysevent_flushq - Begin or resume event buffer delivery. If neccessary,
* create log_event_deliver thread or wake it up
*/
/*ARGSUSED*/
void
{
/*
* Start the event delivery thread
* Mark the upcall status as active since we should
* now be able to begin emptying the queue normally.
*/
if (!async_thread) {
sysevent_daemon_init = 1;
}
}
/*
* log_sysevent_filename - Called by syseventd via
* modctl(MODEVENTS, MODEVENTS_SET_DOOR_UPCALL_FILENAME)
* to subsequently bind the event_door.
*
* This routine is called everytime syseventd (re)starts
* and must therefore replay any events buffers that have
* been sent but not freed.
*
* Event buffer delivery begins after a call to
* log_sysevent_flushq().
*/
int
{
sizeof (logevent_door_upcall_filename));
/* Unbind old event door */
if (event_door != NULL)
/* Establish door connection with user event daemon (syseventd) */
event_door = NULL;
/*
* We are called when syseventd restarts. Move all sent, but
* not committed events from log_eventq_sent to log_eventq_head.
* Do it in proper order to maintain increasing event id.
*/
while (log_eventq_sent) {
if (log_eventq_head == NULL) {
ASSERT(log_eventq_cnt == 0);
} else if (log_eventq_head == log_eventq_tail) {
}
}
return (0);
}
/*
* queue_sysevent - queue an event buffer
*/
static int
{
log_eventq_t *q;
/* Max Q size exceeded */
/*
* If queue full and transport down, return no transport
*/
if (sysevent_upcall_status != 0) {
return (SE_NO_TRANSPORT);
}
if (flag == SE_NOSLEEP) {
return (SE_EQSIZE);
}
goto restart;
}
/* Time stamp and assign ID */
(uint64_t)1);
/*
* Put event on eventq
*/
if (log_eventq_head == NULL) {
ASSERT(log_eventq_cnt == 0);
log_eventq_head = q;
log_eventq_tail = q;
} else {
if (log_eventq_head == log_eventq_tail) {
}
log_eventq_tail->next = q;
log_eventq_tail = q;
}
/* Signal event delivery thread */
if (log_eventq_cnt == 1) {
}
return (0);
}
/*
* log_sysevent - kernel system event logger.
*
* Returns SE_ENOMEM if buf allocation failed or SE_EQSIZE if the
* maximum event queue size will be exceeded
* Returns 0 for successfully queued event buffer
*/
int
{
int rval;
return (SE_ENOMEM);
}
rval == SE_NO_TRANSPORT);
return (rval);
}
/*
* Publish EC_DEV_ADD and EC_DEV_REMOVE events from devfsadm to lofi.
* This interface is needed to pass device link names to the lofi driver,
* to be returned via ioctl() to the lofiadm command.
* The problem is, if lofiadm is executed in local zone, there is no
* mechanism to announce the device name from the /dev tree back to lofiadm,
* as sysevents are not accessible from local zone and devfsadmd is only
* running in global zone.
*
* to lofiadm is for information and can be re-queried with listing
* mappings with lofiadm command.
*
* Once we have a better method, this interface should be reworked.
*/
static void
{
return;
}
/* We are only interested about lofi. */
return;
}
/*
* insert or remove device info, then announce the change
* via cv_broadcast.
*/
} else {
/* Can not use fnvlist_remove() as we can get ENOENT. */
}
}
/*
* log_usr_sysevent - user system event logger
* Private to devfsadm and accessible only via
* modctl(MODEVENTS, MODEVENTS_POST_EVENT)
*/
int
{
/*
* Copy event
*/
return (EFAULT);
}
return (EAGAIN);
else
return (EIO);
}
return (EFAULT);
}
return (0);
}
int
char *vendor,
char *class,
char *subclass,
int sleep_flag)
{
const char *drvname;
char *publisher;
int se_flag;
int rval;
int n;
"event from interrupt context with sleep semantics\n",
return (DDI_ECONTEXT);
}
if (n < sizeof (pubstr)) {
} else {
publisher = kmem_alloc(n,
return (DDI_ENOMEM);
}
}
}
return (DDI_ENOMEM);
}
if (list) {
}
if (list) {
}
if (rval == 0) {
if (eidp) {
}
return (DDI_SUCCESS);
}
if (rval == SE_NO_TRANSPORT)
return (DDI_ETRANSPORT);
}
log_sysevent_new_id(void)
{
}