/*
* Copyright (C) 1998-2017 Internet Systems Consortium, Inc. ("ISC")
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*! \file
* \author Principal Author: Bob Halley
*/
/*
* XXXRTH Need to document the states a task can be in, and the rules
* for changing states.
*/
#include <config.h>
#include <isc/condition.h>
#include <isc/platform.h>
#ifdef OPENSSL_LEAKS
#endif
/*%
* For BIND9 internal applications:
* when built with threads we use multiple worker threads shared by the whole
* application.
* when built without threads we share a single global task manager and use
* an integrated event loop for socket, timer, and other generic task events.
* For generic library:
* we don't use either of them: an application can have multiple task managers
* whether or not it's threaded, and if the application is threaded each thread
* is expected to have a separate manager; no "worker threads" are shared by
* the application threads.
*/
#ifdef ISC_PLATFORM_USETHREADS
#define USE_WORKER_THREADS
#else
#define USE_SHARED_MANAGER
#endif /* ISC_PLATFORM_USETHREADS */
#include "task_p.h"
#ifdef ISC_TASK_TRACE
task, isc_thread_self(), (m))
(t), isc_thread_self(), (m))
isc_thread_self(), (m))
#else
#define XTRACE(m)
#define XTTRACE(t, m)
#define XTHREADTRACE(m)
#endif
/***
*** Types.
***/
typedef enum {
} task_state_t;
#if defined(HAVE_LIBXML2) || defined(HAVE_JSON)
static const char *statenames[] = {
"idle", "ready", "running", "done",
};
#endif
struct isc__task {
/* Not locked. */
/* Locked by task lock. */
unsigned int references;
unsigned int nevents;
unsigned int quantum;
unsigned int flags;
void * tag;
/* Locked by task manager lock. */
};
!= 0)
struct isc__taskmgr {
/* Not locked. */
#ifdef ISC_PLATFORM_USETHREADS
unsigned int workers;
#endif /* ISC_PLATFORM_USETHREADS */
/* Locked by task manager lock. */
unsigned int default_quantum;
#ifdef ISC_PLATFORM_USETHREADS
#endif /* ISC_PLATFORM_USETHREADS */
unsigned int tasks_running;
unsigned int tasks_ready;
/*
* to protect the access. We can't use 'lock' since isc_task_detach()
* will try to acquire it.
*/
#ifdef USE_SHARED_MANAGER
unsigned int refs;
#endif /* ISC_PLATFORM_USETHREADS */
};
#ifdef USE_SHARED_MANAGER
#endif /* USE_SHARED_MANAGER */
/*%
* The following are intended for internal use (indicated by "isc__"
* prefix) but are not declared as static, allowing direct access from
* unit tests etc.
*/
isc_task_t **taskp);
void
void
void
void
unsigned int
unsigned int
void *tag);
unsigned int
unsigned int
void *arg);
void
void
void
const char *
void *
void
void
void
void
void
void
void
static inline isc_boolean_t
static inline isc__task_t *
static inline void
static struct isc__taskmethods {
/*%
* The following are defined just for avoiding unused static functions.
*/
} taskmethods = {
{
},
(void *)isc_task_purgeevent,
(void *)isc__task_unsendrange,
(void *)isc__task_getname,
(void *)isc__task_gettag,
(void *)isc__task_getcurrenttime,
(void *)isc__task_getcurrenttimex
};
};
/***
*** Tasks.
***/
static void
XTRACE("task_finished");
#ifdef USE_WORKER_THREADS
/*
* All tasks have completed and the
* task manager is exiting. Wake up
* any idle worker threads so they
* can exit.
*/
}
#endif /* USE_WORKER_THREADS */
}
isc_task_t **taskp)
{
return (ISC_R_NOMEMORY);
XTRACE("isc_task_create");
if (result != ISC_R_SUCCESS) {
return (result);
}
} else
if (exiting) {
return (ISC_R_SHUTTINGDOWN);
}
return (ISC_R_SUCCESS);
}
void
/*
* Attach *targetp to source.
*/
source->references++;
}
static inline isc_boolean_t
/*
* Caller must be holding the task's lock.
*/
XTRACE("task_shutdown");
if (! TASK_SHUTTINGDOWN(task)) {
ISC_MSG_SHUTTINGDOWN, "shutting down"));
}
/*
* Note that we post shutdown events LIFO.
*/
}
}
return (was_idle);
}
/*
* Moves a task onto the appropriate run queue.
*
* Caller must NOT hold manager lock.
*/
static inline void
#ifdef USE_WORKER_THREADS
#endif /* USE_WORKER_THREADS */
XTRACE("task_ready");
#ifdef USE_WORKER_THREADS
#endif /* USE_WORKER_THREADS */
}
static inline isc_boolean_t
/*
* Caller must be holding the task lock.
*/
XTRACE("detach");
task->references--;
/*
* There are no references to this task, and no
* pending events. We could try to optimize and
* either initiate shutdown or clean up the task,
* depending on its state, but it's easier to just
* make the task ready and allow run() or the event
* loop to deal with shutting down and termination.
*/
return (ISC_TRUE);
}
return (ISC_FALSE);
}
void
/*
* Detach *taskp from its task.
*/
XTRACE("isc_task_detach");
if (was_idle)
}
static inline isc_boolean_t
/*
* Caller must be holding the task lock.
*/
XTRACE("task_send");
}
return (was_idle);
}
void
/*
* Send '*event' to 'task'.
*/
XTRACE("isc_task_send");
/*
* We're trying hard to hold locks for as short a time as possible.
* We're also trying to hold as few locks as possible. This is why
* some processing is deferred until after the lock is released.
*/
if (was_idle) {
/*
* We need to add this task to the ready queue.
*
* We've waited until now to do it because making a task
* ready requires locking the manager. If we tried to do
* this while holding the task lock, we could deadlock.
*
* We've changed the state to ready, so no one else will
* be trying to add this task to the ready queue. The
* only way to leave the ready state is by executing the
* task. It thus doesn't matter if events are added,
* removed, or a shutdown is started in the interval
* between the time we released the task lock, and the time
* we add the task to the ready queue.
*/
}
}
void
/*
* Send '*event' to '*taskp' and then detach '*taskp' from its
* task.
*/
XTRACE("isc_task_sendanddetach");
/*
* If idle1, then idle2 shouldn't be true as well since we're holding
* the task lock, and thus the task cannot switch from ready back to
* idle.
*/
}
static unsigned int
{
unsigned int count = 0;
XTRACE("dequeue_events");
/*
* Events matching 'sender', whose type is >= first and <= last, and
* whose tag is 'tag' will be dequeued. If 'purging', matching events
* which are marked as unpurgable will not be dequeued.
*
* sender == NULL means "any sender", and tag == NULL means "any tag".
*/
count++;
}
}
return (count);
}
unsigned int
{
unsigned int count;
/*
* Purge events from a task's event queue.
*/
XTRACE("isc_task_purgerange");
ISC_TRUE);
}
/*
* Note that purging never changes the state of the task.
*/
return (count);
}
unsigned int
void *tag)
{
/*
* Purge events from a task's event queue.
*/
XTRACE("isc_task_purge");
}
/*
* Purge 'event' from a task's event queue.
*
* XXXRTH: WARNING: This method may be removed before beta.
*/
/*
* If 'event' is on the task's event queue, it will be purged,
* unless it is marked as unpurgeable. 'event' does not have to be
* on the task's event queue; in fact, it can even be an invalid
* pointer. Purging only occurs if the event is actually on the task's
* event queue.
*
* Purging never changes the state of the task.
*/
curr_event != NULL;
curr_event = next_event) {
break;
}
}
if (curr_event == NULL)
return (ISC_FALSE);
return (ISC_TRUE);
}
unsigned int
{
/*
* Remove events from a task's event queue.
*/
XTRACE("isc_task_unsendrange");
}
unsigned int
{
/*
* Remove events from a task's event queue.
*/
XTRACE("isc_task_unsend");
}
void *arg)
{
/*
* Send a shutdown event with action 'action' and argument 'arg' when
* 'task' is shutdown.
*/
NULL,
arg,
sizeof(*event));
return (ISC_R_NOMEMORY);
if (TASK_SHUTTINGDOWN(task)) {
} else
if (disallowed)
return (result);
}
void
/*
* Shutdown 'task'.
*/
if (was_idle)
}
void
/*
* Destroy '*taskp'.
*/
}
void
/*
* Name 'task'.
*/
}
const char *
}
void *
}
void
}
void
}
/***
*** Task Manager.
***/
/*
* Return ISC_TRUE if the current ready list for the manager, which is
* either ready_tasks or the ready_priority_tasks, depending on whether
* the manager is currently in normal or privileged execution mode.
*
* Caller must hold the task manager lock.
*/
static inline isc_boolean_t
else
}
/*
* Dequeue and return a pointer to the first task on the current ready
* list for the manager.
* If the task is privileged, dequeue it from the other ready list
* as well.
*
* Caller must hold the task manager lock.
*/
static inline isc__task_t *
else
}
return (task);
}
/*
* Push 'task' onto the ready_tasks queue. If 'task' has the privilege
* flag set, then also push it onto the ready_priority_tasks queue.
*
* Caller must hold the task manager lock.
*/
static inline void
manager->tasks_ready++;
}
static void
#ifndef USE_WORKER_THREADS
unsigned int total_dispatch_count = 0;
unsigned int tasks_ready = 0;
#endif /* USE_WORKER_THREADS */
/*
* Again we're trying to hold the lock for as short a time as possible
* and to do as little locking and unlocking as possible.
*
* In both while loops, the appropriate lock must be held before the
* while body starts. Code which acquired the lock at the top of
* the loop would be more readable, but would result in a lot of
* extra locking. Compare:
*
* Straightforward:
*
* LOCK();
* ...
* UNLOCK();
* while (expression) {
* LOCK();
* ...
* UNLOCK();
*
* Unlocked part here...
*
* LOCK();
* ...
* UNLOCK();
* }
*
* Note how if the loop continues we unlock and then immediately lock.
* For N iterations of the loop, this code does 2N+1 locks and 2N+1
* unlocks. Also note that the lock is not held when the while
* condition is tested, which may or may not be important, depending
* on the expression.
*
* As written:
*
* LOCK();
* while (expression) {
* ...
* UNLOCK();
*
* Unlocked part here...
*
* LOCK();
* ...
* }
* UNLOCK();
*
* For N iterations of the loop, this code does N+1 locks and N+1
* unlocks. The while expression is always protected by the lock.
*/
#ifndef USE_WORKER_THREADS
#endif
#ifdef USE_WORKER_THREADS
/*
* For reasons similar to those given in the comment in
* isc_task_send() above, it is safe for us to dequeue
* the task while only holding the manager lock, and then
* change the task to running state while only holding the
* task lock.
*
* If a pause has been requested, don't do any work
* until it's been released.
*/
{
ISC_MSG_WAIT, "wait"));
ISC_MSG_AWAKE, "awake"));
}
#else /* USE_WORKER_THREADS */
if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM ||
break;
#endif /* USE_WORKER_THREADS */
ISC_MSG_WORKING, "working"));
unsigned int dispatch_count = 0;
/*
* Note we only unlock the manager lock if we actually
* have a task to do. We must reacquire the manager
* lock before exiting the 'if (task != NULL)' block.
*/
manager->tasks_ready--;
manager->tasks_running++;
ISC_MSG_RUNNING, "running"));
do {
/*
* Execute the event action.
*/
"execute action"));
(isc_task_t *)task,
event);
}
#ifndef USE_WORKER_THREADS
#endif /* USE_WORKER_THREADS */
}
if (task->references == 0 &&
!TASK_SHUTTINGDOWN(task)) {
/*
* There are no references and no
* pending events for this task,
* which means it will not become
* runnable again via an external
* action (such as sending an event
* or detaching).
*
* We initiate shutdown to prevent
* it from becoming a zombie.
*
* We do this here instead of in
* the "if EMPTY(task->events)" block
* below because:
*
* If we post no shutdown events,
* we want the task to finish.
*
* If we did post shutdown events,
* will still want the task's
* quantum to be applied.
*/
}
/*
* Nothing else to do for this task
* right now.
*/
"empty"));
if (task->references == 0 &&
/*
* The task is done.
*/
"done"));
} else
/*
* Our quantum has expired, but
* there is more work to be done.
* We'll requeue it to the ready
* queue later.
*
* We don't check quantum until
* dispatching at least one event,
* so the minimum quantum is one.
*/
"quantum"));
}
} while (!done);
if (finished)
manager->tasks_running--;
#ifdef USE_WORKER_THREADS
if (manager->exclusive_requested &&
} else if (manager->pause_requested &&
manager->tasks_running == 0) {
}
#endif /* USE_WORKER_THREADS */
if (requeue) {
/*
* We know we're awake, so we don't have
* to wakeup any sleeping threads if the
* ready queue is empty before we requeue.
*
* A possible optimization if the queue is
* empty is to 'goto' the 'if (task != NULL)'
* block, avoiding the ENQUEUE of the task
* and the subsequent immediate DEQUEUE
* (since it is the only executable task).
* We don't do this because then we'd be
* skipping the exit_requested check. The
* cost of ENQUEUE is low anyway, especially
* when you consider that we'd have to do
* an extra EMPTY check to see if we could
* do the optimization. If the ready queue
* were usually nonempty, the 'optimization'
* might even hurt rather than help.
*/
#ifdef USE_WORKER_THREADS
#else
tasks_ready++;
#endif
}
}
#ifdef USE_WORKER_THREADS
/*
* If we are in privileged execution mode and there are no
* tasks remaining on the current ready queue, then
* we're stuck. Automatically drop privileges at that
* point and continue with the regular ready queue.
*/
if (!empty_readyq(manager))
}
#endif
}
#ifndef USE_WORKER_THREADS
if (empty_readyq(manager))
#endif
}
#ifdef USE_WORKER_THREADS
static isc_threadresult_t
#ifdef _WIN32
#endif
ISC_MSG_STARTING, "starting"));
ISC_MSG_EXITING, "exiting"));
#ifdef OPENSSL_LEAKS
ERR_remove_state(0);
#endif
return ((isc_threadresult_t)0);
}
#endif /* USE_WORKER_THREADS */
static void
#ifdef USE_WORKER_THREADS
#endif /* USE_WORKER_THREADS */
#ifdef USE_SHARED_MANAGER
#endif /* USE_SHARED_MANAGER */
}
{
unsigned int i, started = 0;
/*
* Create a new task manager.
*/
#ifndef USE_WORKER_THREADS
UNUSED(i);
#endif
#ifdef USE_SHARED_MANAGER
return (ISC_R_SHUTTINGDOWN);
return (ISC_R_SUCCESS);
}
#endif /* USE_SHARED_MANAGER */
return (ISC_R_NOMEMORY);
if (result != ISC_R_SUCCESS)
goto cleanup_mgr;
if (result != ISC_R_SUCCESS) {
goto cleanup_mgr;
}
#ifdef USE_WORKER_THREADS
workers * sizeof(isc_thread_t));
goto cleanup_lock;
}
"isc_condition_init() %s",
ISC_MSG_FAILED, "failed"));
goto cleanup_threads;
}
"isc_condition_init() %s",
ISC_MSG_FAILED, "failed"));
goto cleanup_workavailable;
}
"isc_condition_init() %s",
ISC_MSG_FAILED, "failed"));
goto cleanup_exclusivegranted;
}
#endif /* USE_WORKER_THREADS */
if (default_quantum == 0)
manager->tasks_running = 0;
manager->tasks_ready = 0;
#ifdef USE_WORKER_THREADS
/*
* Start workers.
*/
for (i = 0; i < workers; i++) {
name);
started++;
}
}
if (started == 0) {
return (ISC_R_NOTHREADS);
}
#endif /* USE_WORKER_THREADS */
#ifdef USE_SHARED_MANAGER
#endif /* USE_SHARED_MANAGER */
return (ISC_R_SUCCESS);
#ifdef USE_WORKER_THREADS
#endif
return (result);
}
void
unsigned int i;
/*
* Destroy '*managerp'.
*/
#ifndef USE_WORKER_THREADS
UNUSED(i);
#endif /* USE_WORKER_THREADS */
#ifdef USE_SHARED_MANAGER
return;
}
#endif
XTHREADTRACE("isc_taskmgr_destroy");
/*
* Only one non-worker thread may ever call this routine.
* If a worker thread wants to initiate shutdown of the
* task manager, it should ask some non-worker thread to call
* isc_taskmgr_destroy(), e.g. by signalling a condition variable
* that the startup thread is sleeping on.
*/
/*
* Detach the exclusive task before acquiring the manager lock
*/
/*
* Unlike elsewhere, we're going to hold this lock a long time.
* We need to do so, because otherwise the list of tasks could
* change while we were traversing it.
*
* This is also the only function where we will hold both the
* task manager lock and a task lock at the same time.
*/
/*
* Make sure we only get called once.
*/
/*
* If privileged mode was on, turn it off.
*/
/*
* Post shutdown event(s) to every task (if they haven't already been
* posted).
*/
if (task_shutdown(task))
}
#ifdef USE_WORKER_THREADS
/*
* Wake up any sleeping workers. This ensures we get work done if
* there's work left to do, and if there are already no tasks left
* it will cause the workers to see manager->exiting.
*/
/*
* Wait for all the worker threads to exit.
*/
#else /* USE_WORKER_THREADS */
/*
* Dispatch the shutdown events.
*/
#ifdef USE_SHARED_MANAGER
#endif
#endif /* USE_WORKER_THREADS */
}
void
}
return (mode);
}
#ifndef USE_WORKER_THREADS
#ifdef USE_SHARED_MANAGER
#endif
return (ISC_FALSE);
return (is_ready);
}
#ifdef USE_SHARED_MANAGER
#endif
return (ISC_R_NOTFOUND);
return (ISC_R_SUCCESS);
}
#else
void
while (manager->tasks_running > 0) {
}
}
void
if (manager->pause_requested) {
}
}
#endif /* USE_WORKER_THREADS */
void
}
else
return (result);
}
#ifdef USE_WORKER_THREADS
/*
* TODO REQUIRE(task == task->manager->excl);
* it should be here, it fails on shutdown server->task
*/
if (manager->exclusive_requested) {
return (ISC_R_LOCKBUSY);
}
}
#else
#endif
return (ISC_R_SUCCESS);
}
void
#ifdef USE_WORKER_THREADS
#else
#endif
}
void
if (priv)
else
return;
}
return (priv);
}
isc__task_register(void) {
return (isc_task_register(isc__taskmgr_create));
}
return (TASK_SHUTTINGDOWN(task));
}
#ifdef HAVE_LIBXML2
int
int xmlrc;
/*
* Write out the thread-model, and some details about each depending
* on which type is enabled.
*/
#ifdef ISC_PLATFORM_USETHREADS
#else /* ISC_PLATFORM_USETHREADS */
#endif /* ISC_PLATFORM_USETHREADS */
mgr->default_quantum));
ISC_XMLCHAR "name"));
}
ISC_XMLCHAR "references"));
task->references));
}
return (xmlrc);
}
#endif /* HAVE_LIBXML2 */
#ifdef HAVE_JSON
#define CHECKMEM(m) do { \
if (m == NULL) { \
result = ISC_R_NOMEMORY;\
goto error;\
} \
} while(0)
/*
* Write out the thread-model, and some details about each depending
* on which type is enabled.
*/
#ifdef ISC_PLATFORM_USETHREADS
#else /* ISC_PLATFORM_USETHREADS */
#endif /* ISC_PLATFORM_USETHREADS */
{
}
}
return (result);
}
#endif
static void
initialize(void) {
}
LOCK(&createlock);
if (taskmgr_createfunc == NULL)
else
UNLOCK(&createlock);
return (result);
}
unsigned int workers, unsigned int default_quantum,
{
LOCK(&createlock);
managerp);
UNLOCK(&createlock);
if (result == ISC_R_SUCCESS)
return (result);
}
{
if (isc_bind9)
LOCK(&createlock);
managerp);
UNLOCK(&createlock);
return (result);
}
void
if (isc_bind9)
else
}
void
if (isc_bind9)
else
}
if (isc_bind9)
return (isc__taskmgr_mode(manager));
}
isc_task_t **taskp)
{
if (isc_bind9)
}
void
if (isc_bind9)
else
}
void
if (isc_bind9)
else
}
void
if (isc_bind9)
else {
}
}
void
if (isc_bind9)
else {
}
}
unsigned int
{
if (isc_bind9)
}
{
if (isc_bind9)
}
void
if (isc_bind9)
else
}
void
if (!isc_bind9)
return;
}
void
if (isc_bind9)
else
}
unsigned int
{
if (isc_bind9)
}
if (isc_bind9)
return (isc__task_beginexclusive(task));
}
void
if (isc_bind9)
else
}
void
if (isc_bind9)
else
}
if (isc_bind9)
return (isc__task_privilege(task));
}
void
if (!isc_bind9)
return;
}
void
if (!isc_bind9)
return;
}
/*%
* This is necessary for libisc's internal timer implementation. Other
* implementation might skip implementing this.
*/
unsigned int
{
if (isc_bind9)
}