event.c revision 219c1ab259be52daf74b530ee9f80e02c723b7a8
2d2eda71267231c2526be701fe655db125852c1ffielding/* Licensed to the Apache Software Foundation (ASF) under one or more
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * contributor license agreements. See the NOTICE file distributed with
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * this work for additional information regarding copyright ownership.
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * The ASF licenses this file to You under the Apache License, Version 2.0
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * (the "License"); you may not use this file except in compliance with
2d2eda71267231c2526be701fe655db125852c1ffielding * the License. You may obtain a copy of the License at
2d2eda71267231c2526be701fe655db125852c1ffielding *
2d2eda71267231c2526be701fe655db125852c1ffielding * http://www.apache.org/licenses/LICENSE-2.0
2d2eda71267231c2526be701fe655db125852c1ffielding *
2d2eda71267231c2526be701fe655db125852c1ffielding * Unless required by applicable law or agreed to in writing, software
2d2eda71267231c2526be701fe655db125852c1ffielding * distributed under the License is distributed on an "AS IS" BASIS,
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2d2eda71267231c2526be701fe655db125852c1ffielding * See the License for the specific language governing permissions and
2d2eda71267231c2526be701fe655db125852c1ffielding * limitations under the License.
2d2eda71267231c2526be701fe655db125852c1ffielding */
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding/**
2d2eda71267231c2526be701fe655db125852c1ffielding * This MPM tries to fix the 'keep alive problem' in HTTP.
f062ed7bd262a37a909dd77ce5fc23b446818823fielding *
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * After a client completes the first request, the client can keep the
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * connection open to send more requests with the same socket. This can save
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * signifigant overhead in creating TCP connections. However, the major
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * disadvantage is that Apache traditionally keeps an entire child
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * process/thread waiting for data from the client. To solve this problem,
2d2eda71267231c2526be701fe655db125852c1ffielding * this MPM has a dedicated thread for handling both the Listenting sockets,
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * and all sockets that are in a Keep Alive status.
f062ed7bd262a37a909dd77ce5fc23b446818823fielding *
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * The MPM assumes the underlying apr_pollset implementation is somewhat
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * threadsafe. This currently is only compatible with KQueue and EPoll. This
2d2eda71267231c2526be701fe655db125852c1ffielding * enables the MPM to avoid extra high level locking or having to wake up the
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * listener thread when a keep-alive socket needs to be sent to it.
f062ed7bd262a37a909dd77ce5fc23b446818823fielding *
64185f9824e42f21ca7b9ae6c004484215c031a7rbb * This MPM not preform well on older platforms that do not have very good
2d2eda71267231c2526be701fe655db125852c1ffielding * threading, like Linux with a 2.4 kernel, but this does not matter, since we
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * require EPoll or KQueue.
f062ed7bd262a37a909dd77ce5fc23b446818823fielding *
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * For FreeBSD, use 5.3. It is possible to run this MPM on FreeBSD 5.2.1, if
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * you use libkse (see `man libmap.conf`).
2d2eda71267231c2526be701fe655db125852c1ffielding *
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * For NetBSD, use at least 2.0.
f062ed7bd262a37a909dd77ce5fc23b446818823fielding *
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * For Linux, you should use a 2.6 kernel, and make sure your glibc has epoll
f062ed7bd262a37a909dd77ce5fc23b446818823fielding * support compiled in.
f062ed7bd262a37a909dd77ce5fc23b446818823fielding *
f062ed7bd262a37a909dd77ce5fc23b446818823fielding */
f062ed7bd262a37a909dd77ce5fc23b446818823fielding
2d2eda71267231c2526be701fe655db125852c1ffielding#include "apr.h"
2d2eda71267231c2526be701fe655db125852c1ffielding#include "apr_portable.h"
2d2eda71267231c2526be701fe655db125852c1ffielding#include "apr_strings.h"
f062ed7bd262a37a909dd77ce5fc23b446818823fielding#include "apr_file_io.h"
f062ed7bd262a37a909dd77ce5fc23b446818823fielding#include "apr_thread_proc.h"
f062ed7bd262a37a909dd77ce5fc23b446818823fielding#include "apr_signal.h"
2d2eda71267231c2526be701fe655db125852c1ffielding#include "apr_thread_mutex.h"
f062ed7bd262a37a909dd77ce5fc23b446818823fielding#include "apr_poll.h"
f062ed7bd262a37a909dd77ce5fc23b446818823fielding#include "apr_ring.h"
f062ed7bd262a37a909dd77ce5fc23b446818823fielding#include "apr_queue.h"
2d2eda71267231c2526be701fe655db125852c1ffielding#define APR_WANT_STRFUNC
2d2eda71267231c2526be701fe655db125852c1ffielding#include "apr_want.h"
2d2eda71267231c2526be701fe655db125852c1ffielding#include "apr_version.h"
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding#if APR_HAVE_UNISTD_H
2d2eda71267231c2526be701fe655db125852c1ffielding#include <unistd.h>
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
2d2eda71267231c2526be701fe655db125852c1ffielding#if APR_HAVE_SYS_SOCKET_H
2d2eda71267231c2526be701fe655db125852c1ffielding#include <sys/socket.h>
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
2d2eda71267231c2526be701fe655db125852c1ffielding#if APR_HAVE_SYS_WAIT_H
2d2eda71267231c2526be701fe655db125852c1ffielding#include <sys/wait.h>
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#ifdef HAVE_SYS_PROCESSOR_H
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#include <sys/processor.h> /* for bindprocessor() */
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#endif
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
952908500d5f99f35afc5ed510391b9bdc3833farbb#if !APR_HAS_THREADS
952908500d5f99f35afc5ed510391b9bdc3833farbb#error The Event MPM requires APR threads, but they are unavailable.
952908500d5f99f35afc5ed510391b9bdc3833farbb#endif
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
2d2eda71267231c2526be701fe655db125852c1ffielding#include "ap_config.h"
30c289e6bc6d28d210b21edd800ab2cfc78a8381wrowe#include "httpd.h"
bd53cb2bf4d77574fd502e1c02d8c3c0d5431967stoddard#include "http_main.h"
b4c8a80f7dbfc9b56dbe03bdc28f0b5eb5f23697rbb#include "http_log.h"
b4c8a80f7dbfc9b56dbe03bdc28f0b5eb5f23697rbb#include "http_config.h" /* for read_config */
44c46ef733836b32585d135d2d90856e7cfd9929rbb#include "http_core.h" /* for get_remote_host */
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#include "http_connection.h"
2d2eda71267231c2526be701fe655db125852c1ffielding#include "ap_mpm.h"
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#include "pod.h"
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#include "mpm_common.h"
14bea4ba98aabaf554e37165a07123bb05d6736bstoddard#include "ap_listen.h"
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#include "scoreboard.h"
9731f9232bddd7dbac757c780b2b1a2a6931dce7stoddard#include "fdqueue.h"
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#include "mpm_default.h"
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#include "http_vhost.h"
2d2eda71267231c2526be701fe655db125852c1ffielding#include "unixd.h"
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding#include <signal.h>
2d2eda71267231c2526be701fe655db125852c1ffielding#include <limits.h> /* for INT_MAX */
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding#if HAVE_SERF
2d2eda71267231c2526be701fe655db125852c1ffielding#include "mod_serf.h"
2d2eda71267231c2526be701fe655db125852c1ffielding#include "serf.h"
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding/* Limit on the total --- clients will be locked out if more servers than
2d2eda71267231c2526be701fe655db125852c1ffielding * this are needed. It is intended solely to keep the server from crashing
2e123e8beedc9f921448c113e2d6823a92fd5261fielding * when things get out of hand.
2e123e8beedc9f921448c113e2d6823a92fd5261fielding *
2d2eda71267231c2526be701fe655db125852c1ffielding * We keep a hard maximum number of servers, for two reasons --- first off,
2d2eda71267231c2526be701fe655db125852c1ffielding * in case something goes seriously wrong, we want to stop the fork bomb
2d2eda71267231c2526be701fe655db125852c1ffielding * short of actually crashing the machine we're running on by filling some
2d2eda71267231c2526be701fe655db125852c1ffielding * kernel table. Secondly, it keeps the size of the scoreboard file small
2d2eda71267231c2526be701fe655db125852c1ffielding * enough that we can read the whole thing without worrying too much about
2d2eda71267231c2526be701fe655db125852c1ffielding * the overhead.
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb */
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#ifndef DEFAULT_SERVER_LIMIT
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#define DEFAULT_SERVER_LIMIT 16
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#endif
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb/* Admin can't tune ServerLimit beyond MAX_SERVER_LIMIT. We want
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb * some sort of compile-time limit to help catch typos.
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb */
2d2eda71267231c2526be701fe655db125852c1ffielding#ifndef MAX_SERVER_LIMIT
2d2eda71267231c2526be701fe655db125852c1ffielding#define MAX_SERVER_LIMIT 20000
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding/* Limit on the threads per process. Clients will be locked out if more than
2d2eda71267231c2526be701fe655db125852c1ffielding * this are needed.
2d2eda71267231c2526be701fe655db125852c1ffielding *
2d2eda71267231c2526be701fe655db125852c1ffielding * We keep this for one reason it keeps the size of the scoreboard file small
2d2eda71267231c2526be701fe655db125852c1ffielding * enough that we can read the whole thing without worrying too much about
2d2eda71267231c2526be701fe655db125852c1ffielding * the overhead.
2d2eda71267231c2526be701fe655db125852c1ffielding */
2d2eda71267231c2526be701fe655db125852c1ffielding#ifndef DEFAULT_THREAD_LIMIT
2d2eda71267231c2526be701fe655db125852c1ffielding#define DEFAULT_THREAD_LIMIT 64
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding/* Admin can't tune ThreadLimit beyond MAX_THREAD_LIMIT. We want
2d2eda71267231c2526be701fe655db125852c1ffielding * some sort of compile-time limit to help catch typos.
2d2eda71267231c2526be701fe655db125852c1ffielding */
2d2eda71267231c2526be701fe655db125852c1ffielding#ifndef MAX_THREAD_LIMIT
2d2eda71267231c2526be701fe655db125852c1ffielding#define MAX_THREAD_LIMIT 100000
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding#define MPM_CHILD_PID(i) (ap_scoreboard_image->parent[i].pid)
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding#if !APR_VERSION_AT_LEAST(1,4,0)
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#define apr_time_from_msec(x) (x * 1000)
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding/*
2d2eda71267231c2526be701fe655db125852c1ffielding * Actual definitions of config globals
2d2eda71267231c2526be701fe655db125852c1ffielding */
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int threads_per_child = 0; /* Worker threads per child */
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int ap_daemons_to_start = 0;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int min_spare_threads = 0;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int max_spare_threads = 0;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int ap_daemons_limit = 0;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int max_clients = 0;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int server_limit = 0;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int thread_limit = 0;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int dying = 0;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int workers_may_exit = 0;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int start_thread_may_exit = 0;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int listener_may_exit = 0;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int requests_this_child;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int num_listensocks = 0;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int resource_shortage = 0;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic fd_queue_t *worker_queue;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic fd_queue_info_t *worker_queue_info;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int mpm_state = AP_MPMQ_STARTING;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int sick_child_detected;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic ap_generation_t volatile my_generation = 0;
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic apr_thread_mutex_t *timeout_mutex;
2d2eda71267231c2526be701fe655db125852c1ffieldingAPR_RING_HEAD(timeout_head_t, conn_state_t);
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic struct timeout_head_t timeout_head, keepalive_timeout_head;
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic apr_pollset_t *event_pollset;
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding#if HAVE_SERF
2d2eda71267231c2526be701fe655db125852c1ffieldingtypedef struct {
2d2eda71267231c2526be701fe655db125852c1ffielding apr_pollset_t *pollset;
2d2eda71267231c2526be701fe655db125852c1ffielding apr_pool_t *pool;
2d2eda71267231c2526be701fe655db125852c1ffielding} s_baton_t;
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic serf_context_t *g_serf;
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding/* The structure used to pass unique initialization info to each thread */
2d2eda71267231c2526be701fe655db125852c1ffieldingtypedef struct
2d2eda71267231c2526be701fe655db125852c1ffielding{
2d2eda71267231c2526be701fe655db125852c1ffielding int pid;
2d2eda71267231c2526be701fe655db125852c1ffielding int tid;
2d2eda71267231c2526be701fe655db125852c1ffielding int sd;
2d2eda71267231c2526be701fe655db125852c1ffielding} proc_info;
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding/* Structure used to pass information to the thread responsible for
2d2eda71267231c2526be701fe655db125852c1ffielding * creating the rest of the threads.
2d2eda71267231c2526be701fe655db125852c1ffielding */
2d2eda71267231c2526be701fe655db125852c1ffieldingtypedef struct
2d2eda71267231c2526be701fe655db125852c1ffielding{
2d2eda71267231c2526be701fe655db125852c1ffielding apr_thread_t **threads;
2d2eda71267231c2526be701fe655db125852c1ffielding apr_thread_t *listener;
2d2eda71267231c2526be701fe655db125852c1ffielding int child_num_arg;
2e123e8beedc9f921448c113e2d6823a92fd5261fielding apr_threadattr_t *threadattr;
2d2eda71267231c2526be701fe655db125852c1ffielding} thread_starter;
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffieldingtypedef enum
2d2eda71267231c2526be701fe655db125852c1ffielding{
2e123e8beedc9f921448c113e2d6823a92fd5261fielding PT_CSD,
2d2eda71267231c2526be701fe655db125852c1ffielding PT_ACCEPT
2d2eda71267231c2526be701fe655db125852c1ffielding#if HAVE_SERF
2d2eda71267231c2526be701fe655db125852c1ffielding , PT_SERF
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
2d2eda71267231c2526be701fe655db125852c1ffielding} poll_type_e;
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffieldingtypedef struct
2d2eda71267231c2526be701fe655db125852c1ffielding{
2d2eda71267231c2526be701fe655db125852c1ffielding poll_type_e type;
2d2eda71267231c2526be701fe655db125852c1ffielding int bypass_push;
2d2eda71267231c2526be701fe655db125852c1ffielding void *baton;
2d2eda71267231c2526be701fe655db125852c1ffielding} listener_poll_type;
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding/* data retained by event across load/unload of the module
2d2eda71267231c2526be701fe655db125852c1ffielding * allocated on first call to pre-config hook; located on
2d2eda71267231c2526be701fe655db125852c1ffielding * subsequent calls to pre-config hook
2d2eda71267231c2526be701fe655db125852c1ffielding */
2d2eda71267231c2526be701fe655db125852c1ffieldingtypedef struct event_retained_data {
2d2eda71267231c2526be701fe655db125852c1ffielding int first_server_limit;
2d2eda71267231c2526be701fe655db125852c1ffielding int first_thread_limit;
2d2eda71267231c2526be701fe655db125852c1ffielding int module_loads;
2d2eda71267231c2526be701fe655db125852c1ffielding} event_retained_data;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic event_retained_data *retained;
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding#define ID_FROM_CHILD_THREAD(c, t) ((c * thread_limit) + t)
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding/*
2d2eda71267231c2526be701fe655db125852c1ffielding * The max child slot ever assigned, preserved across restarts. Necessary
2d2eda71267231c2526be701fe655db125852c1ffielding * to deal with MaxClients changes across AP_SIG_GRACEFUL restarts. We
2d2eda71267231c2526be701fe655db125852c1ffielding * use this value to optimize routines that have to scan the entire
2d2eda71267231c2526be701fe655db125852c1ffielding * scoreboard.
2d2eda71267231c2526be701fe655db125852c1ffielding */
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int max_daemons_limit = -1;
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic ap_event_pod_t *pod;
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding/* The event MPM respects a couple of runtime flags that can aid
2d2eda71267231c2526be701fe655db125852c1ffielding * in debugging. Setting the -DNO_DETACH flag will prevent the root process
2d2eda71267231c2526be701fe655db125852c1ffielding * from detaching from its controlling terminal. Additionally, setting
2d2eda71267231c2526be701fe655db125852c1ffielding * the -DONE_PROCESS flag (which implies -DNO_DETACH) will get you the
2d2eda71267231c2526be701fe655db125852c1ffielding * child_main loop running in the process which originally started up.
2d2eda71267231c2526be701fe655db125852c1ffielding * This gives you a pretty nice debugging environment. (You'll get a SIGHUP
2d2eda71267231c2526be701fe655db125852c1ffielding * early in standalone_main; just continue through. This is the server
2d2eda71267231c2526be701fe655db125852c1ffielding * trying to kill off any child processes which it might have lying
2d2eda71267231c2526be701fe655db125852c1ffielding * around --- Apache doesn't keep track of their pids, it just sends
2d2eda71267231c2526be701fe655db125852c1ffielding * SIGHUP to the process group, ignoring it in the root process.
2efb935ae8fe12d5192a3bf2c52c28461b6c68afdgaudet * Continue through and you'll be fine.).
2efb935ae8fe12d5192a3bf2c52c28461b6c68afdgaudet */
2efb935ae8fe12d5192a3bf2c52c28461b6c68afdgaudet
2efb935ae8fe12d5192a3bf2c52c28461b6c68afdgaudetstatic int one_process = 0;
2efb935ae8fe12d5192a3bf2c52c28461b6c68afdgaudet
2efb935ae8fe12d5192a3bf2c52c28461b6c68afdgaudet#ifdef DEBUG_SIGSTOP
2efb935ae8fe12d5192a3bf2c52c28461b6c68afdgaudetint raise_sigstop_flags;
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic apr_pool_t *pconf; /* Pool for config stuff */
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic apr_pool_t *pchild; /* Pool for httpd child stuff */
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic pid_t ap_my_pid; /* Linux getpid() doesn't work except in main
2d2eda71267231c2526be701fe655db125852c1ffielding thread. Use this instead */
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic pid_t parent_pid;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic apr_os_thread_t *listener_os_thread;
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding/* The LISTENER_SIGNAL signal will be sent from the main thread to the
2d2eda71267231c2526be701fe655db125852c1ffielding * listener thread to wake it up for graceful termination (what a child
2d2eda71267231c2526be701fe655db125852c1ffielding * process from an old generation does when the admin does "apachectl
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb * graceful"). This signal will be blocked in all threads of a child
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb * process except for the listener thread.
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb */
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#define LISTENER_SIGNAL SIGHUP
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb/* An array of socket descriptors in use by each thread used to
32644678e889a3253f71bde0b3d6daea6d9dc21awrowe * perform a non-graceful (forced) shutdown of the server.
32644678e889a3253f71bde0b3d6daea6d9dc21awrowe */
8da41ac411143966d8c280c6ae54207eef0293b6rbbstatic apr_socket_t **worker_sockets;
32644678e889a3253f71bde0b3d6daea6d9dc21awrowe
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbbstatic void close_worker_sockets(void)
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb{
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb int i;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb for (i = 0; i < threads_per_child; i++) {
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb if (worker_sockets[i]) {
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb apr_socket_close(worker_sockets[i]);
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb worker_sockets[i] = NULL;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb }
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb }
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb}
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbbstatic void wakeup_listener(void)
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb{
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb listener_may_exit = 1;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb if (!listener_os_thread) {
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb /* XXX there is an obscure path that this doesn't handle perfectly:
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb * right after listener thread is created but before
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb * listener_os_thread is set, the first worker thread hits an
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb * error and starts graceful termination
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb */
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb return;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb }
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb /* unblock the listener if it's waiting for a worker */
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb ap_queue_info_term(worker_queue_info);
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb /*
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb * we should just be able to "kill(ap_my_pid, LISTENER_SIGNAL)" on all
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb * platforms and wake up the listener thread since it is the only thread
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb * with SIGHUP unblocked, but that doesn't work on Linux
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb */
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#ifdef HAVE_PTHREAD_KILL
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb pthread_kill(*listener_os_thread, LISTENER_SIGNAL);
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#else
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb kill(ap_my_pid, LISTENER_SIGNAL);
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#endif
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb}
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#define ST_INIT 0
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#define ST_GRACEFUL 1
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb#define ST_UNGRACEFUL 2
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbbstatic int terminate_mode = ST_INIT;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbbstatic void signal_threads(int mode)
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb{
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb if (terminate_mode == mode) {
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb return;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb }
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb terminate_mode = mode;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb mpm_state = AP_MPMQ_STOPPING;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb /* in case we weren't called from the listener thread, wake up the
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb * listener thread
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb */
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb wakeup_listener();
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb /* for ungraceful termination, let the workers exit now;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb * for graceful termination, the listener thread will notify the
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb * workers to exit once it has stopped accepting new connections
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb */
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb if (mode == ST_UNGRACEFUL) {
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb workers_may_exit = 1;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb ap_queue_interrupt_all(worker_queue);
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb close_worker_sockets(); /* forcefully kill all current connections */
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb }
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb}
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbbstatic int event_query(int query_code, int *result, apr_status_t *rv)
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb{
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb *rv = APR_SUCCESS;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb switch (query_code) {
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb case AP_MPMQ_MAX_DAEMON_USED:
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb *result = max_daemons_limit;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb break;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb case AP_MPMQ_IS_THREADED:
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb *result = AP_MPMQ_STATIC;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb break;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb case AP_MPMQ_IS_FORKED:
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb *result = AP_MPMQ_DYNAMIC;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb break;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb case AP_MPMQ_IS_ASYNC:
2e123e8beedc9f921448c113e2d6823a92fd5261fielding *result = 1;
2d2eda71267231c2526be701fe655db125852c1ffielding break;
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm case AP_MPMQ_HAS_SERF:
2d2eda71267231c2526be701fe655db125852c1ffielding *result = 1;
2d2eda71267231c2526be701fe655db125852c1ffielding break;
2d2eda71267231c2526be701fe655db125852c1ffielding case AP_MPMQ_HARD_LIMIT_DAEMONS:
2d2eda71267231c2526be701fe655db125852c1ffielding *result = server_limit;
2d2eda71267231c2526be701fe655db125852c1ffielding break;
2d2eda71267231c2526be701fe655db125852c1ffielding case AP_MPMQ_HARD_LIMIT_THREADS:
8da41ac411143966d8c280c6ae54207eef0293b6rbb *result = thread_limit;
2d2eda71267231c2526be701fe655db125852c1ffielding break;
2d2eda71267231c2526be701fe655db125852c1ffielding case AP_MPMQ_MAX_THREADS:
2d2eda71267231c2526be701fe655db125852c1ffielding *result = threads_per_child;
2d2eda71267231c2526be701fe655db125852c1ffielding break;
2d2eda71267231c2526be701fe655db125852c1ffielding case AP_MPMQ_MIN_SPARE_DAEMONS:
2d2eda71267231c2526be701fe655db125852c1ffielding *result = 0;
7463de0c603f07c9e1820e812d2f1a73661843e6rbb break;
2d2eda71267231c2526be701fe655db125852c1ffielding case AP_MPMQ_MIN_SPARE_THREADS:
2d2eda71267231c2526be701fe655db125852c1ffielding *result = min_spare_threads;
2d2eda71267231c2526be701fe655db125852c1ffielding break;
2d2eda71267231c2526be701fe655db125852c1ffielding case AP_MPMQ_MAX_SPARE_DAEMONS:
2d2eda71267231c2526be701fe655db125852c1ffielding *result = 0;
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm break;
2d2eda71267231c2526be701fe655db125852c1ffielding case AP_MPMQ_MAX_SPARE_THREADS:
2d2eda71267231c2526be701fe655db125852c1ffielding *result = max_spare_threads;
2d2eda71267231c2526be701fe655db125852c1ffielding break;
2d2eda71267231c2526be701fe655db125852c1ffielding case AP_MPMQ_MAX_REQUESTS_DAEMON:
2d2eda71267231c2526be701fe655db125852c1ffielding *result = ap_max_requests_per_child;
2d2eda71267231c2526be701fe655db125852c1ffielding break;
2d2eda71267231c2526be701fe655db125852c1ffielding case AP_MPMQ_MAX_DAEMONS:
2d2eda71267231c2526be701fe655db125852c1ffielding *result = ap_daemons_limit;
2d2eda71267231c2526be701fe655db125852c1ffielding break;
2d2eda71267231c2526be701fe655db125852c1ffielding case AP_MPMQ_MPM_STATE:
2d2eda71267231c2526be701fe655db125852c1ffielding *result = mpm_state;
2d2eda71267231c2526be701fe655db125852c1ffielding break;
2d2eda71267231c2526be701fe655db125852c1ffielding case AP_MPMQ_GENERATION:
2d2eda71267231c2526be701fe655db125852c1ffielding *result = my_generation;
2d2eda71267231c2526be701fe655db125852c1ffielding break;
2d2eda71267231c2526be701fe655db125852c1ffielding default:
2d2eda71267231c2526be701fe655db125852c1ffielding *rv = APR_ENOTIMPL;
2d2eda71267231c2526be701fe655db125852c1ffielding break;
2d2eda71267231c2526be701fe655db125852c1ffielding }
2d2eda71267231c2526be701fe655db125852c1ffielding return OK;
2d2eda71267231c2526be701fe655db125852c1ffielding}
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic apr_status_t event_note_child_killed(int childnum)
2d2eda71267231c2526be701fe655db125852c1ffielding{
2d2eda71267231c2526be701fe655db125852c1ffielding ap_scoreboard_image->parent[childnum].pid = 0;
2d2eda71267231c2526be701fe655db125852c1ffielding return APR_SUCCESS;
2d2eda71267231c2526be701fe655db125852c1ffielding}
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic const char *event_get_name(void)
2d2eda71267231c2526be701fe655db125852c1ffielding{
2d2eda71267231c2526be701fe655db125852c1ffielding return "event";
2d2eda71267231c2526be701fe655db125852c1ffielding}
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding/* a clean exit from a child with proper cleanup */
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic void clean_child_exit(int code) __attribute__ ((noreturn));
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic void clean_child_exit(int code)
2d2eda71267231c2526be701fe655db125852c1ffielding{
2d2eda71267231c2526be701fe655db125852c1ffielding mpm_state = AP_MPMQ_STOPPING;
2d2eda71267231c2526be701fe655db125852c1ffielding if (pchild) {
2d2eda71267231c2526be701fe655db125852c1ffielding apr_pool_destroy(pchild);
2d2eda71267231c2526be701fe655db125852c1ffielding }
2d2eda71267231c2526be701fe655db125852c1ffielding exit(code);
2d2eda71267231c2526be701fe655db125852c1ffielding}
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic void just_die(int sig)
2d2eda71267231c2526be701fe655db125852c1ffielding{
2d2eda71267231c2526be701fe655db125852c1ffielding clean_child_exit(0);
2d2eda71267231c2526be701fe655db125852c1ffielding}
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding/*****************************************************************
2d2eda71267231c2526be701fe655db125852c1ffielding * Connection structures and accounting...
2d2eda71267231c2526be701fe655db125852c1ffielding */
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding/* volatile just in case */
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int volatile shutdown_pending;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int volatile restart_pending;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int volatile is_graceful;
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic volatile int child_fatal;
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding/*
2d2eda71267231c2526be701fe655db125852c1ffielding * ap_start_shutdown() and ap_start_restart(), below, are a first stab at
2d2eda71267231c2526be701fe655db125852c1ffielding * functions to initiate shutdown or restart without relying on signals.
2d2eda71267231c2526be701fe655db125852c1ffielding * Previously this was initiated in sig_term() and restart() signal handlers,
2d2eda71267231c2526be701fe655db125852c1ffielding * but we want to be able to start a shutdown/restart from other sources --
2d2eda71267231c2526be701fe655db125852c1ffielding * e.g. on Win32, from the service manager. Now the service manager can
2d2eda71267231c2526be701fe655db125852c1ffielding * call ap_start_shutdown() or ap_start_restart() as appropiate. Note that
2d2eda71267231c2526be701fe655db125852c1ffielding * these functions can also be called by the child processes, since global
2d2eda71267231c2526be701fe655db125852c1ffielding * variables are no longer used to pass on the required action to the parent.
2d2eda71267231c2526be701fe655db125852c1ffielding *
2d2eda71267231c2526be701fe655db125852c1ffielding * These should only be called from the parent process itself, since the
2d2eda71267231c2526be701fe655db125852c1ffielding * parent process will use the shutdown_pending and restart_pending variables
2d2eda71267231c2526be701fe655db125852c1ffielding * to determine whether to shutdown or restart. The child process should
763f7b125b6d3dd1e4992a3822005efa2616f983coar * call signal_parent() directly to tell the parent to die -- this will
763f7b125b6d3dd1e4992a3822005efa2616f983coar * cause neither of those variable to be set, which the parent will
2d2eda71267231c2526be701fe655db125852c1ffielding * assume means something serious is wrong (which it will be, for the
2d2eda71267231c2526be701fe655db125852c1ffielding * child to force an exit) and so do an exit anyway.
2d2eda71267231c2526be701fe655db125852c1ffielding */
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic void ap_start_shutdown(int graceful)
2d2eda71267231c2526be701fe655db125852c1ffielding{
2d2eda71267231c2526be701fe655db125852c1ffielding mpm_state = AP_MPMQ_STOPPING;
2d2eda71267231c2526be701fe655db125852c1ffielding if (shutdown_pending == 1) {
2d2eda71267231c2526be701fe655db125852c1ffielding /* Um, is this _probably_ not an error, if the user has
2d2eda71267231c2526be701fe655db125852c1ffielding * tried to do a shutdown twice quickly, so we won't
2d2eda71267231c2526be701fe655db125852c1ffielding * worry about reporting it.
2d2eda71267231c2526be701fe655db125852c1ffielding */
2d2eda71267231c2526be701fe655db125852c1ffielding return;
2d2eda71267231c2526be701fe655db125852c1ffielding }
2d2eda71267231c2526be701fe655db125852c1ffielding shutdown_pending = 1;
2d2eda71267231c2526be701fe655db125852c1ffielding is_graceful = graceful;
2d2eda71267231c2526be701fe655db125852c1ffielding}
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding/* do a graceful restart if graceful == 1 */
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic void ap_start_restart(int graceful)
2d2eda71267231c2526be701fe655db125852c1ffielding{
2d2eda71267231c2526be701fe655db125852c1ffielding mpm_state = AP_MPMQ_STOPPING;
2d2eda71267231c2526be701fe655db125852c1ffielding if (restart_pending == 1) {
2d2eda71267231c2526be701fe655db125852c1ffielding /* Probably not an error - don't bother reporting it */
54e94821097724bf413d2d4cc70711760f7494e1trawick return;
54e94821097724bf413d2d4cc70711760f7494e1trawick }
54e94821097724bf413d2d4cc70711760f7494e1trawick restart_pending = 1;
2d2eda71267231c2526be701fe655db125852c1ffielding is_graceful = graceful;
54e94821097724bf413d2d4cc70711760f7494e1trawick}
54e94821097724bf413d2d4cc70711760f7494e1trawick
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic void sig_term(int sig)
2d2eda71267231c2526be701fe655db125852c1ffielding{
54e94821097724bf413d2d4cc70711760f7494e1trawick ap_start_shutdown(sig == AP_SIG_GRACEFUL_STOP);
54e94821097724bf413d2d4cc70711760f7494e1trawick}
54e94821097724bf413d2d4cc70711760f7494e1trawick
54e94821097724bf413d2d4cc70711760f7494e1trawickstatic void restart(int sig)
54e94821097724bf413d2d4cc70711760f7494e1trawick{
54e94821097724bf413d2d4cc70711760f7494e1trawick ap_start_restart(sig == AP_SIG_GRACEFUL);
54e94821097724bf413d2d4cc70711760f7494e1trawick}
54e94821097724bf413d2d4cc70711760f7494e1trawick
54e94821097724bf413d2d4cc70711760f7494e1trawickstatic void set_signals(void)
54e94821097724bf413d2d4cc70711760f7494e1trawick{
54e94821097724bf413d2d4cc70711760f7494e1trawick#ifndef NO_USE_SIGACTION
54e94821097724bf413d2d4cc70711760f7494e1trawick struct sigaction sa;
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding if (!one_process) {
2d2eda71267231c2526be701fe655db125852c1ffielding ap_fatal_signal_setup(ap_server_conf, pconf);
2d2eda71267231c2526be701fe655db125852c1ffielding }
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding#ifndef NO_USE_SIGACTION
2d2eda71267231c2526be701fe655db125852c1ffielding sigemptyset(&sa.sa_mask);
2d2eda71267231c2526be701fe655db125852c1ffielding sa.sa_flags = 0;
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding sa.sa_handler = sig_term;
2d2eda71267231c2526be701fe655db125852c1ffielding if (sigaction(SIGTERM, &sa, NULL) < 0)
2d2eda71267231c2526be701fe655db125852c1ffielding ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
2d2eda71267231c2526be701fe655db125852c1ffielding "sigaction(SIGTERM)");
2d2eda71267231c2526be701fe655db125852c1ffielding#ifdef AP_SIG_GRACEFUL_STOP
2d2eda71267231c2526be701fe655db125852c1ffielding if (sigaction(AP_SIG_GRACEFUL_STOP, &sa, NULL) < 0)
2d2eda71267231c2526be701fe655db125852c1ffielding ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
2d2eda71267231c2526be701fe655db125852c1ffielding "sigaction(" AP_SIG_GRACEFUL_STOP_STRING ")");
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
2d2eda71267231c2526be701fe655db125852c1ffielding#ifdef SIGINT
2d2eda71267231c2526be701fe655db125852c1ffielding if (sigaction(SIGINT, &sa, NULL) < 0)
2d2eda71267231c2526be701fe655db125852c1ffielding ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
2d2eda71267231c2526be701fe655db125852c1ffielding "sigaction(SIGINT)");
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
2d2eda71267231c2526be701fe655db125852c1ffielding#ifdef SIGXCPU
952908500d5f99f35afc5ed510391b9bdc3833farbb sa.sa_handler = SIG_DFL;
952908500d5f99f35afc5ed510391b9bdc3833farbb if (sigaction(SIGXCPU, &sa, NULL) < 0)
2d2eda71267231c2526be701fe655db125852c1ffielding ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
2d2eda71267231c2526be701fe655db125852c1ffielding "sigaction(SIGXCPU)");
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
952908500d5f99f35afc5ed510391b9bdc3833farbb#ifdef SIGXFSZ
952908500d5f99f35afc5ed510391b9bdc3833farbb sa.sa_handler = SIG_DFL;
952908500d5f99f35afc5ed510391b9bdc3833farbb if (sigaction(SIGXFSZ, &sa, NULL) < 0)
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
952908500d5f99f35afc5ed510391b9bdc3833farbb "sigaction(SIGXFSZ)");
952908500d5f99f35afc5ed510391b9bdc3833farbb#endif
952908500d5f99f35afc5ed510391b9bdc3833farbb#ifdef SIGPIPE
2d2eda71267231c2526be701fe655db125852c1ffielding sa.sa_handler = SIG_IGN;
2d2eda71267231c2526be701fe655db125852c1ffielding if (sigaction(SIGPIPE, &sa, NULL) < 0)
2d2eda71267231c2526be701fe655db125852c1ffielding ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
3887202241db08986e94b252fbd06a55e55d4b2dbhyde "sigaction(SIGPIPE)");
3887202241db08986e94b252fbd06a55e55d4b2dbhyde#endif
3887202241db08986e94b252fbd06a55e55d4b2dbhyde
3887202241db08986e94b252fbd06a55e55d4b2dbhyde /* we want to ignore HUPs and AP_SIG_GRACEFUL while we're busy
3887202241db08986e94b252fbd06a55e55d4b2dbhyde * processing one */
3887202241db08986e94b252fbd06a55e55d4b2dbhyde sigaddset(&sa.sa_mask, SIGHUP);
3887202241db08986e94b252fbd06a55e55d4b2dbhyde sigaddset(&sa.sa_mask, AP_SIG_GRACEFUL);
3887202241db08986e94b252fbd06a55e55d4b2dbhyde sa.sa_handler = restart;
2d2eda71267231c2526be701fe655db125852c1ffielding if (sigaction(SIGHUP, &sa, NULL) < 0)
3887202241db08986e94b252fbd06a55e55d4b2dbhyde ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
2d2eda71267231c2526be701fe655db125852c1ffielding "sigaction(SIGHUP)");
2d2eda71267231c2526be701fe655db125852c1ffielding if (sigaction(AP_SIG_GRACEFUL, &sa, NULL) < 0)
2d2eda71267231c2526be701fe655db125852c1ffielding ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf,
2d2eda71267231c2526be701fe655db125852c1ffielding "sigaction(" AP_SIG_GRACEFUL_STRING ")");
3a50e4da8a0db4515ab45678e5b39ff7e7594320trawick#else
3a50e4da8a0db4515ab45678e5b39ff7e7594320trawick if (!one_process) {
3a50e4da8a0db4515ab45678e5b39ff7e7594320trawick#ifdef SIGXCPU
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_signal(SIGXCPU, SIG_DFL);
3a50e4da8a0db4515ab45678e5b39ff7e7594320trawick#endif /* SIGXCPU */
3a50e4da8a0db4515ab45678e5b39ff7e7594320trawick#ifdef SIGXFSZ
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_signal(SIGXFSZ, SIG_DFL);
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm#endif /* SIGXFSZ */
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_signal(SIGTERM, sig_term);
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm#ifdef SIGHUP
3a50e4da8a0db4515ab45678e5b39ff7e7594320trawick apr_signal(SIGHUP, restart);
3a50e4da8a0db4515ab45678e5b39ff7e7594320trawick#endif /* SIGHUP */
3a50e4da8a0db4515ab45678e5b39ff7e7594320trawick#ifdef AP_SIG_GRACEFUL
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_signal(AP_SIG_GRACEFUL, restart);
3887202241db08986e94b252fbd06a55e55d4b2dbhyde#endif /* AP_SIG_GRACEFUL */
952908500d5f99f35afc5ed510391b9bdc3833farbb#ifdef AP_SIG_GRACEFUL_STOP
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_signal(AP_SIG_GRACEFUL_STOP, sig_term);
952908500d5f99f35afc5ed510391b9bdc3833farbb#endif /* AP_SIG_GRACEFUL_STOP */
952908500d5f99f35afc5ed510391b9bdc3833farbb#ifdef SIGPIPE
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_signal(SIGPIPE, SIG_IGN);
3887202241db08986e94b252fbd06a55e55d4b2dbhyde#endif /* SIGPIPE */
952908500d5f99f35afc5ed510391b9bdc3833farbb
b28574227fcf956444ac03bde294122a05ad2bderbb#endif
952908500d5f99f35afc5ed510391b9bdc3833farbb}
3887202241db08986e94b252fbd06a55e55d4b2dbhyde
3887202241db08986e94b252fbd06a55e55d4b2dbhyde/*****************************************************************
3887202241db08986e94b252fbd06a55e55d4b2dbhyde * Child process main loop.
952908500d5f99f35afc5ed510391b9bdc3833farbb */
2d2eda71267231c2526be701fe655db125852c1ffielding
952908500d5f99f35afc5ed510391b9bdc3833farbbstatic int process_socket(apr_thread_t *thd, apr_pool_t * p, apr_socket_t * sock,
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm conn_state_t * cs, int my_child_num,
952908500d5f99f35afc5ed510391b9bdc3833farbb int my_thread_num)
2d2eda71267231c2526be701fe655db125852c1ffielding{
952908500d5f99f35afc5ed510391b9bdc3833farbb conn_rec *c;
2d2eda71267231c2526be701fe655db125852c1ffielding listener_poll_type *pt;
2d2eda71267231c2526be701fe655db125852c1ffielding long conn_id = ID_FROM_CHILD_THREAD(my_child_num, my_thread_num);
952908500d5f99f35afc5ed510391b9bdc3833farbb int rc;
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_sb_handle_t *sbh;
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_create_sb_handle(&sbh, p, my_child_num, my_thread_num);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb if (cs == NULL) { /* This is a new connection */
2d2eda71267231c2526be701fe655db125852c1ffielding
952908500d5f99f35afc5ed510391b9bdc3833farbb cs = apr_pcalloc(p, sizeof(conn_state_t));
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb pt = apr_pcalloc(p, sizeof(*pt));
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding cs->bucket_alloc = apr_bucket_alloc_create(p);
2d2eda71267231c2526be701fe655db125852c1ffielding c = ap_run_create_connection(p, ap_server_conf, sock,
2d2eda71267231c2526be701fe655db125852c1ffielding conn_id, sbh, cs->bucket_alloc);
2d2eda71267231c2526be701fe655db125852c1ffielding c->current_thread = thd;
952908500d5f99f35afc5ed510391b9bdc3833farbb cs->c = c;
952908500d5f99f35afc5ed510391b9bdc3833farbb c->cs = cs;
952908500d5f99f35afc5ed510391b9bdc3833farbb cs->p = p;
952908500d5f99f35afc5ed510391b9bdc3833farbb cs->pfd.desc_type = APR_POLL_SOCKET;
952908500d5f99f35afc5ed510391b9bdc3833farbb cs->pfd.reqevents = APR_POLLIN;
952908500d5f99f35afc5ed510391b9bdc3833farbb cs->pfd.desc.s = sock;
952908500d5f99f35afc5ed510391b9bdc3833farbb pt->type = PT_CSD;
952908500d5f99f35afc5ed510391b9bdc3833farbb pt->bypass_push = 1;
952908500d5f99f35afc5ed510391b9bdc3833farbb pt->baton = cs;
952908500d5f99f35afc5ed510391b9bdc3833farbb cs->pfd.client_data = pt;
952908500d5f99f35afc5ed510391b9bdc3833farbb APR_RING_ELEM_INIT(cs, timeout_list);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_update_vhost_given_ip(c);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb rc = ap_run_pre_connection(c, sock);
952908500d5f99f35afc5ed510391b9bdc3833farbb if (rc != OK && rc != DONE) {
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
952908500d5f99f35afc5ed510391b9bdc3833farbb "process_socket: connection aborted");
952908500d5f99f35afc5ed510391b9bdc3833farbb c->aborted = 1;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb /**
2d2eda71267231c2526be701fe655db125852c1ffielding * XXX If the platform does not have a usable way of bundling
2d2eda71267231c2526be701fe655db125852c1ffielding * accept() with a socket readability check, like Win32,
2d2eda71267231c2526be701fe655db125852c1ffielding * and there are measurable delays before the
2d2eda71267231c2526be701fe655db125852c1ffielding * socket is readable due to the first data packet arriving,
2d2eda71267231c2526be701fe655db125852c1ffielding * it might be better to create the cs on the listener thread
952908500d5f99f35afc5ed510391b9bdc3833farbb * with the state set to CONN_STATE_CHECK_REQUEST_LINE_READABLE
952908500d5f99f35afc5ed510391b9bdc3833farbb *
952908500d5f99f35afc5ed510391b9bdc3833farbb * FreeBSD users will want to enable the HTTP accept filter
952908500d5f99f35afc5ed510391b9bdc3833farbb * module in their kernel for the highest performance
952908500d5f99f35afc5ed510391b9bdc3833farbb * When the accept filter is active, sockets are kept in the
952908500d5f99f35afc5ed510391b9bdc3833farbb * kernel until a HTTP request is received.
952908500d5f99f35afc5ed510391b9bdc3833farbb */
952908500d5f99f35afc5ed510391b9bdc3833farbb cs->state = CONN_STATE_READ_REQUEST_LINE;
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb else {
952908500d5f99f35afc5ed510391b9bdc3833farbb c = cs->c;
952908500d5f99f35afc5ed510391b9bdc3833farbb c->sbh = sbh;
952908500d5f99f35afc5ed510391b9bdc3833farbb pt = cs->pfd.client_data;
952908500d5f99f35afc5ed510391b9bdc3833farbb c->current_thread = thd;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb if (c->clogging_input_filters && !c->aborted) {
952908500d5f99f35afc5ed510391b9bdc3833farbb /* Since we have an input filter which 'cloggs' the input stream,
952908500d5f99f35afc5ed510391b9bdc3833farbb * like mod_ssl, lets just do the normal read from input filters,
952908500d5f99f35afc5ed510391b9bdc3833farbb * like the Worker MPM does.
952908500d5f99f35afc5ed510391b9bdc3833farbb */
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_run_process_connection(c);
952908500d5f99f35afc5ed510391b9bdc3833farbb if (cs->state != CONN_STATE_SUSPENDED) {
952908500d5f99f35afc5ed510391b9bdc3833farbb cs->state = CONN_STATE_LINGER;
2d2eda71267231c2526be701fe655db125852c1ffielding }
5d54ba1fdf6f8d7167fafcd93bef30df3906b1aecoar }
2d2eda71267231c2526be701fe655db125852c1ffielding
952908500d5f99f35afc5ed510391b9bdc3833farbbread_request:
952908500d5f99f35afc5ed510391b9bdc3833farbb if (cs->state == CONN_STATE_READ_REQUEST_LINE) {
952908500d5f99f35afc5ed510391b9bdc3833farbb if (!c->aborted) {
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_run_process_connection(c);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb /* state will be updated upon return
2d2eda71267231c2526be701fe655db125852c1ffielding * fall thru to either wait for readability/timeout or
2d2eda71267231c2526be701fe655db125852c1ffielding * do lingering close
2d2eda71267231c2526be701fe655db125852c1ffielding */
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb else {
952908500d5f99f35afc5ed510391b9bdc3833farbb cs->state = CONN_STATE_LINGER;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb if (cs->state == CONN_STATE_WRITE_COMPLETION) {
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_filter_t *output_filter = c->output_filters;
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_status_t rv;
952908500d5f99f35afc5ed510391b9bdc3833farbb while (output_filter->next != NULL) {
952908500d5f99f35afc5ed510391b9bdc3833farbb output_filter = output_filter->next;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb rv = output_filter->frec->filter_func.out_func(output_filter, NULL);
952908500d5f99f35afc5ed510391b9bdc3833farbb if (rv != APR_SUCCESS) {
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_log_error(APLOG_MARK, APLOG_WARNING, rv, ap_server_conf,
952908500d5f99f35afc5ed510391b9bdc3833farbb "network write failure in core output filter");
952908500d5f99f35afc5ed510391b9bdc3833farbb cs->state = CONN_STATE_LINGER;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb else if (c->data_in_output_filters) {
952908500d5f99f35afc5ed510391b9bdc3833farbb /* Still in WRITE_COMPLETION_STATE:
952908500d5f99f35afc5ed510391b9bdc3833farbb * Set a write timeout for this connection, and let the
2d2eda71267231c2526be701fe655db125852c1ffielding * event thread poll for writeability.
2d2eda71267231c2526be701fe655db125852c1ffielding */
2d2eda71267231c2526be701fe655db125852c1ffielding cs->expiration_time = ap_server_conf->timeout + apr_time_now();
2d2eda71267231c2526be701fe655db125852c1ffielding apr_thread_mutex_lock(timeout_mutex);
2d2eda71267231c2526be701fe655db125852c1ffielding APR_RING_INSERT_TAIL(&timeout_head, cs, conn_state_t, timeout_list);
2d2eda71267231c2526be701fe655db125852c1ffielding apr_thread_mutex_unlock(timeout_mutex);
2d2eda71267231c2526be701fe655db125852c1ffielding pt->bypass_push = 0;
2d2eda71267231c2526be701fe655db125852c1ffielding cs->pfd.reqevents = APR_POLLOUT | APR_POLLHUP | APR_POLLERR;
2d2eda71267231c2526be701fe655db125852c1ffielding rc = apr_pollset_add(event_pollset, &cs->pfd);
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm return 1;
2d2eda71267231c2526be701fe655db125852c1ffielding }
2d2eda71267231c2526be701fe655db125852c1ffielding else if (c->keepalive != AP_CONN_KEEPALIVE || c->aborted ||
2d2eda71267231c2526be701fe655db125852c1ffielding listener_may_exit) {
952908500d5f99f35afc5ed510391b9bdc3833farbb c->cs->state = CONN_STATE_LINGER;
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm }
952908500d5f99f35afc5ed510391b9bdc3833farbb else if (c->data_in_input_filters) {
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm cs->state = CONN_STATE_READ_REQUEST_LINE;
952908500d5f99f35afc5ed510391b9bdc3833farbb goto read_request;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm else {
952908500d5f99f35afc5ed510391b9bdc3833farbb cs->state = CONN_STATE_CHECK_REQUEST_LINE_READABLE;
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm }
952908500d5f99f35afc5ed510391b9bdc3833farbb }
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm
2d2eda71267231c2526be701fe655db125852c1ffielding if (cs->state == CONN_STATE_LINGER) {
2d2eda71267231c2526be701fe655db125852c1ffielding ap_lingering_close(c);
2d2eda71267231c2526be701fe655db125852c1ffielding apr_pool_clear(p);
2d2eda71267231c2526be701fe655db125852c1ffielding ap_push_pool(worker_queue_info, p);
2d2eda71267231c2526be701fe655db125852c1ffielding return 0;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
2d2eda71267231c2526be701fe655db125852c1ffielding else if (cs->state == CONN_STATE_CHECK_REQUEST_LINE_READABLE) {
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_status_t rc;
2d2eda71267231c2526be701fe655db125852c1ffielding listener_poll_type *pt = (listener_poll_type *) cs->pfd.client_data;
2d2eda71267231c2526be701fe655db125852c1ffielding
952908500d5f99f35afc5ed510391b9bdc3833farbb /* It greatly simplifies the logic to use a single timeout value here
2d2eda71267231c2526be701fe655db125852c1ffielding * because the new element can just be added to the end of the list and
952908500d5f99f35afc5ed510391b9bdc3833farbb * it will stay sorted in expiration time sequence. If brand new
952908500d5f99f35afc5ed510391b9bdc3833farbb * sockets are sent to the event thread for a readability check, this
952908500d5f99f35afc5ed510391b9bdc3833farbb * will be a slight behavior change - they use the non-keepalive
952908500d5f99f35afc5ed510391b9bdc3833farbb * timeout today. With a normal client, the socket will be readable in
2d2eda71267231c2526be701fe655db125852c1ffielding * a few milliseconds anyway.
952908500d5f99f35afc5ed510391b9bdc3833farbb */
952908500d5f99f35afc5ed510391b9bdc3833farbb cs->expiration_time = ap_server_conf->keep_alive_timeout +
2e123e8beedc9f921448c113e2d6823a92fd5261fielding apr_time_now();
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_thread_mutex_lock(timeout_mutex);
952908500d5f99f35afc5ed510391b9bdc3833farbb APR_RING_INSERT_TAIL(&keepalive_timeout_head, cs, conn_state_t, timeout_list);
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_thread_mutex_unlock(timeout_mutex);
952908500d5f99f35afc5ed510391b9bdc3833farbb
2d2eda71267231c2526be701fe655db125852c1ffielding pt->bypass_push = 0;
952908500d5f99f35afc5ed510391b9bdc3833farbb /* Add work to pollset. */
2d2eda71267231c2526be701fe655db125852c1ffielding cs->pfd.reqevents = APR_POLLIN;
952908500d5f99f35afc5ed510391b9bdc3833farbb rc = apr_pollset_add(event_pollset, &cs->pfd);
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding if (rc != APR_SUCCESS) {
2d2eda71267231c2526be701fe655db125852c1ffielding ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
2d2eda71267231c2526be701fe655db125852c1ffielding "process_socket: apr_pollset_add failure");
2d2eda71267231c2526be701fe655db125852c1ffielding AP_DEBUG_ASSERT(rc == APR_SUCCESS);
2d2eda71267231c2526be701fe655db125852c1ffielding }
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb return 1;
952908500d5f99f35afc5ed510391b9bdc3833farbb}
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb/* requests_this_child has gone to zero or below. See if the admin coded
2d2eda71267231c2526be701fe655db125852c1ffielding "MaxConnectionsPerChild 0", and keep going in that case. Doing it this way
952908500d5f99f35afc5ed510391b9bdc3833farbb simplifies the hot path in worker_thread */
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic void check_infinite_requests(void)
952908500d5f99f35afc5ed510391b9bdc3833farbb{
952908500d5f99f35afc5ed510391b9bdc3833farbb if (ap_max_requests_per_child) {
952908500d5f99f35afc5ed510391b9bdc3833farbb signal_threads(ST_GRACEFUL);
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb else {
952908500d5f99f35afc5ed510391b9bdc3833farbb requests_this_child = INT_MAX; /* keep going */
2d2eda71267231c2526be701fe655db125852c1ffielding }
2d2eda71267231c2526be701fe655db125852c1ffielding}
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic void unblock_signal(int sig)
2d2eda71267231c2526be701fe655db125852c1ffielding{
2d2eda71267231c2526be701fe655db125852c1ffielding sigset_t sig_mask;
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb sigemptyset(&sig_mask);
952908500d5f99f35afc5ed510391b9bdc3833farbb sigaddset(&sig_mask, sig);
952908500d5f99f35afc5ed510391b9bdc3833farbb#if defined(SIGPROCMASK_SETS_THREAD_MASK)
2d2eda71267231c2526be701fe655db125852c1ffielding sigprocmask(SIG_UNBLOCK, &sig_mask, NULL);
952908500d5f99f35afc5ed510391b9bdc3833farbb#else
2d2eda71267231c2526be701fe655db125852c1ffielding pthread_sigmask(SIG_UNBLOCK, &sig_mask, NULL);
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
2d2eda71267231c2526be701fe655db125852c1ffielding}
2d2eda71267231c2526be701fe655db125852c1ffielding
952908500d5f99f35afc5ed510391b9bdc3833farbbstatic void dummy_signal_handler(int sig)
2d2eda71267231c2526be701fe655db125852c1ffielding{
2d2eda71267231c2526be701fe655db125852c1ffielding /* XXX If specifying SIG_IGN is guaranteed to unblock a syscall,
2d2eda71267231c2526be701fe655db125852c1ffielding * then we don't need this goofy function.
3a50e4da8a0db4515ab45678e5b39ff7e7594320trawick */
952908500d5f99f35afc5ed510391b9bdc3833farbb}
952908500d5f99f35afc5ed510391b9bdc3833farbb
3a50e4da8a0db4515ab45678e5b39ff7e7594320trawick
3a50e4da8a0db4515ab45678e5b39ff7e7594320trawick#if HAVE_SERF
777a2b42697cb8cb94ac4e73774862f879259c45rbbstatic apr_status_t s_socket_add(void *user_baton,
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_pollfd_t *pfd,
952908500d5f99f35afc5ed510391b9bdc3833farbb void *serf_baton)
eea18cb2519ccf7549091ec417e86d2f12e9f92ctrawick{
777a2b42697cb8cb94ac4e73774862f879259c45rbb s_baton_t *s = (s_baton_t*)user_baton;
2d2eda71267231c2526be701fe655db125852c1ffielding /* XXXXX: recycle listener_poll_types */
2d2eda71267231c2526be701fe655db125852c1ffielding listener_poll_type *pt = malloc(sizeof(*pt));
2d2eda71267231c2526be701fe655db125852c1ffielding pt->type = PT_SERF;
2d2eda71267231c2526be701fe655db125852c1ffielding pt->baton = serf_baton;
2d2eda71267231c2526be701fe655db125852c1ffielding pfd->client_data = pt;
2d2eda71267231c2526be701fe655db125852c1ffielding return apr_pollset_add(s->pollset, pfd);
2d2eda71267231c2526be701fe655db125852c1ffielding}
2d2eda71267231c2526be701fe655db125852c1ffielding
952908500d5f99f35afc5ed510391b9bdc3833farbbstatic apr_status_t s_socket_remove(void *user_baton,
2d2eda71267231c2526be701fe655db125852c1ffielding apr_pollfd_t *pfd,
952908500d5f99f35afc5ed510391b9bdc3833farbb void *serf_baton)
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm{
952908500d5f99f35afc5ed510391b9bdc3833farbb s_baton_t *s = (s_baton_t*)user_baton;
952908500d5f99f35afc5ed510391b9bdc3833farbb listener_poll_type *pt = pfd->client_data;
952908500d5f99f35afc5ed510391b9bdc3833farbb free(pt);
952908500d5f99f35afc5ed510391b9bdc3833farbb return apr_pollset_remove(s->pollset, pfd);
2d2eda71267231c2526be701fe655db125852c1ffielding}
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
2d2eda71267231c2526be701fe655db125852c1ffielding
952908500d5f99f35afc5ed510391b9bdc3833farbbstatic apr_status_t init_pollset(apr_pool_t *p)
952908500d5f99f35afc5ed510391b9bdc3833farbb{
2d2eda71267231c2526be701fe655db125852c1ffielding#if HAVE_SERF
2d2eda71267231c2526be701fe655db125852c1ffielding s_baton_t *baton = NULL;
2d2eda71267231c2526be701fe655db125852c1ffielding#endif
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_listen_rec *lr;
952908500d5f99f35afc5ed510391b9bdc3833farbb listener_poll_type *pt;
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb APR_RING_INIT(&timeout_head, conn_state_t, timeout_list);
952908500d5f99f35afc5ed510391b9bdc3833farbb APR_RING_INIT(&keepalive_timeout_head, conn_state_t, timeout_list);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb for (lr = ap_listeners; lr != NULL; lr = lr->next) {
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_pollfd_t *pfd = apr_palloc(p, sizeof(*pfd));
952908500d5f99f35afc5ed510391b9bdc3833farbb pt = apr_pcalloc(p, sizeof(*pt));
952908500d5f99f35afc5ed510391b9bdc3833farbb pfd->desc_type = APR_POLL_SOCKET;
952908500d5f99f35afc5ed510391b9bdc3833farbb pfd->desc.s = lr->sd;
952908500d5f99f35afc5ed510391b9bdc3833farbb pfd->reqevents = APR_POLLIN;
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb pt->type = PT_ACCEPT;
952908500d5f99f35afc5ed510391b9bdc3833farbb pt->baton = lr;
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb pfd->client_data = pt;
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_socket_opt_set(pfd->desc.s, APR_SO_NONBLOCK, 1);
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_pollset_add(event_pollset, pfd);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb lr->accept_func = ap_unixd_accept;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb#if HAVE_SERF
952908500d5f99f35afc5ed510391b9bdc3833farbb baton = apr_pcalloc(p, sizeof(*baton));
952908500d5f99f35afc5ed510391b9bdc3833farbb baton->pollset = event_pollset;
952908500d5f99f35afc5ed510391b9bdc3833farbb /* TODO: subpools, threads, reuse, etc. -- currently use malloc() inside :( */
952908500d5f99f35afc5ed510391b9bdc3833farbb baton->pool = p;
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb g_serf = serf_context_create_ex(baton,
952908500d5f99f35afc5ed510391b9bdc3833farbb s_socket_add,
952908500d5f99f35afc5ed510391b9bdc3833farbb s_socket_remove, p);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_register_provider(p, "mpm_serf",
952908500d5f99f35afc5ed510391b9bdc3833farbb "instance", "0", g_serf);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb#endif
952908500d5f99f35afc5ed510391b9bdc3833farbb
2d2eda71267231c2526be701fe655db125852c1ffielding return APR_SUCCESS;
2d2eda71267231c2526be701fe655db125852c1ffielding}
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic apr_status_t push_timer2worker(timer_event_t* te)
2d2eda71267231c2526be701fe655db125852c1ffielding{
2d2eda71267231c2526be701fe655db125852c1ffielding return ap_queue_push_timer(worker_queue, te);
2d2eda71267231c2526be701fe655db125852c1ffielding}
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic apr_status_t push2worker(const apr_pollfd_t * pfd,
2d2eda71267231c2526be701fe655db125852c1ffielding apr_pollset_t * pollset)
952908500d5f99f35afc5ed510391b9bdc3833farbb{
952908500d5f99f35afc5ed510391b9bdc3833farbb listener_poll_type *pt = (listener_poll_type *) pfd->client_data;
2d2eda71267231c2526be701fe655db125852c1ffielding conn_state_t *cs = (conn_state_t *) pt->baton;
169f62b04de69074b561b4e6dcf6f82572a5e367trawick apr_status_t rc;
2d2eda71267231c2526be701fe655db125852c1ffielding
952908500d5f99f35afc5ed510391b9bdc3833farbb if (pt->bypass_push) {
952908500d5f99f35afc5ed510391b9bdc3833farbb return APR_SUCCESS;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb pt->bypass_push = 1;
952908500d5f99f35afc5ed510391b9bdc3833farbb
2d2eda71267231c2526be701fe655db125852c1ffielding rc = apr_pollset_remove(pollset, pfd);
2d2eda71267231c2526be701fe655db125852c1ffielding
952908500d5f99f35afc5ed510391b9bdc3833farbb /*
2d2eda71267231c2526be701fe655db125852c1ffielding * Some of the pollset backends, like KQueue or Epoll
952908500d5f99f35afc5ed510391b9bdc3833farbb * automagically remove the FD if the socket is closed,
3887202241db08986e94b252fbd06a55e55d4b2dbhyde * therefore, we can accept _SUCCESS or _NOTFOUND,
952908500d5f99f35afc5ed510391b9bdc3833farbb * and we still want to keep going
2d2eda71267231c2526be701fe655db125852c1ffielding */
2d2eda71267231c2526be701fe655db125852c1ffielding if (rc != APR_SUCCESS && !APR_STATUS_IS_NOTFOUND(rc)) {
952908500d5f99f35afc5ed510391b9bdc3833farbb cs->state = CONN_STATE_LINGER;
2d2eda71267231c2526be701fe655db125852c1ffielding }
952908500d5f99f35afc5ed510391b9bdc3833farbb
2d2eda71267231c2526be701fe655db125852c1ffielding rc = ap_queue_push(worker_queue, cs->pfd.desc.s, cs, cs->p);
2d2eda71267231c2526be701fe655db125852c1ffielding if (rc != APR_SUCCESS) {
2d2eda71267231c2526be701fe655db125852c1ffielding /* trash the connection; we couldn't queue the connected
2d2eda71267231c2526be701fe655db125852c1ffielding * socket to a worker
952908500d5f99f35afc5ed510391b9bdc3833farbb */
2d2eda71267231c2526be701fe655db125852c1ffielding apr_bucket_alloc_destroy(cs->bucket_alloc);
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_socket_close(cs->pfd.desc.s);
2d2eda71267231c2526be701fe655db125852c1ffielding ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_server_conf, "push2worker: ap_queue_push failed");
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_pool_clear(cs->p);
2d2eda71267231c2526be701fe655db125852c1ffielding ap_push_pool(worker_queue_info, cs->p);
2d2eda71267231c2526be701fe655db125852c1ffielding }
2d2eda71267231c2526be701fe655db125852c1ffielding
952908500d5f99f35afc5ed510391b9bdc3833farbb return rc;
2d2eda71267231c2526be701fe655db125852c1ffielding}
952908500d5f99f35afc5ed510391b9bdc3833farbb
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm/* get_worker:
952908500d5f99f35afc5ed510391b9bdc3833farbb * reserve a worker thread, block if all are currently busy.
2d2eda71267231c2526be701fe655db125852c1ffielding * this prevents the worker queue from overflowing and lets
2d2eda71267231c2526be701fe655db125852c1ffielding * other processes accept new connections in the mean time.
2d2eda71267231c2526be701fe655db125852c1ffielding */
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int get_worker(int *have_idle_worker_p)
952908500d5f99f35afc5ed510391b9bdc3833farbb{
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_status_t rc;
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb if (!*have_idle_worker_p) {
169f62b04de69074b561b4e6dcf6f82572a5e367trawick rc = ap_queue_info_wait_for_idler(worker_queue_info);
169f62b04de69074b561b4e6dcf6f82572a5e367trawick
169f62b04de69074b561b4e6dcf6f82572a5e367trawick if (rc == APR_SUCCESS) {
952908500d5f99f35afc5ed510391b9bdc3833farbb *have_idle_worker_p = 1;
2d2eda71267231c2526be701fe655db125852c1ffielding return 1;
2d2eda71267231c2526be701fe655db125852c1ffielding }
952908500d5f99f35afc5ed510391b9bdc3833farbb else {
2d2eda71267231c2526be701fe655db125852c1ffielding if (!APR_STATUS_IS_EOF(rc)) {
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
952908500d5f99f35afc5ed510391b9bdc3833farbb "ap_queue_info_wait_for_idler failed. "
952908500d5f99f35afc5ed510391b9bdc3833farbb "Attempting to shutdown process gracefully");
952908500d5f99f35afc5ed510391b9bdc3833farbb signal_threads(ST_GRACEFUL);
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb return 0;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb else {
952908500d5f99f35afc5ed510391b9bdc3833farbb /* already reserved a worker thread - must have hit a
952908500d5f99f35afc5ed510391b9bdc3833farbb * transient error on a previous pass
952908500d5f99f35afc5ed510391b9bdc3833farbb */
952908500d5f99f35afc5ed510391b9bdc3833farbb return 1;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb}
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb/* XXXXXX: Convert to skiplist or other better data structure
952908500d5f99f35afc5ed510391b9bdc3833farbb * (yes, this is VERY VERY VERY VERY BAD)
952908500d5f99f35afc5ed510391b9bdc3833farbb */
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb/* Structures to reuse */
952908500d5f99f35afc5ed510391b9bdc3833farbbstatic APR_RING_HEAD(timer_free_ring_t, timer_event_t) timer_free_ring;
952908500d5f99f35afc5ed510391b9bdc3833farbb/* Active timers */
952908500d5f99f35afc5ed510391b9bdc3833farbbstatic APR_RING_HEAD(timer_ring_t, timer_event_t) timer_ring;
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbbstatic apr_thread_mutex_t *g_timer_ring_mtx;
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbbstatic apr_status_t event_register_timed_callback(apr_time_t t,
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_mpm_callback_fn_t *cbfn,
952908500d5f99f35afc5ed510391b9bdc3833farbb void *baton)
2d2eda71267231c2526be701fe655db125852c1ffielding{
2d2eda71267231c2526be701fe655db125852c1ffielding int inserted = 0;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb timer_event_t *ep;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb timer_event_t *te;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb /* oh yeah, and make locking smarter/fine grained. */
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb apr_thread_mutex_lock(g_timer_ring_mtx);
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb if (!APR_RING_EMPTY(&timer_free_ring, timer_event_t, link)) {
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb te = APR_RING_FIRST(&timer_free_ring);
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb APR_RING_REMOVE(te, link);
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb }
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb else {
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb /* XXXXX: lol, pool allocation without a context from any thread.Yeah. Right. MPMs Suck. */
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb te = malloc(sizeof(timer_event_t));
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb APR_RING_ELEM_INIT(te, link);
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb }
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb te->cbfunc = cbfn;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb te->baton = baton;
e68becff3c3ddc18723c9799b8cc2e6e9c3dbd66wrowe /* XXXXX: optimize */
e68becff3c3ddc18723c9799b8cc2e6e9c3dbd66wrowe te->when = t + apr_time_now();
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
e68becff3c3ddc18723c9799b8cc2e6e9c3dbd66wrowe /* Okay, insert sorted by when.. */
e68becff3c3ddc18723c9799b8cc2e6e9c3dbd66wrowe for (ep = APR_RING_FIRST(&timer_ring);
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb ep != APR_RING_SENTINEL(&timer_ring,
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb timer_event_t, link);
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb ep = APR_RING_NEXT(ep, link))
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb {
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb if (ep->when > te->when) {
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb inserted = 1;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb APR_RING_INSERT_BEFORE(ep, te, link);
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb break;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb }
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb }
2d2eda71267231c2526be701fe655db125852c1ffielding
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb if (!inserted) {
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb APR_RING_INSERT_TAIL(&timer_ring, te, timer_event_t, link);
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb }
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
2d2eda71267231c2526be701fe655db125852c1ffielding apr_thread_mutex_unlock(g_timer_ring_mtx);
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb return APR_SUCCESS;
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb}
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbbstatic void * APR_THREAD_FUNC listener_thread(apr_thread_t * thd, void *dummy)
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb{
b0f20a4a26bcfa85724b1c2e5ec6a077f12ef44crbb timer_event_t *ep;
2d2eda71267231c2526be701fe655db125852c1ffielding timer_event_t *te;
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_status_t rc;
952908500d5f99f35afc5ed510391b9bdc3833farbb proc_info *ti = dummy;
952908500d5f99f35afc5ed510391b9bdc3833farbb int process_slot = ti->pid;
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_pool_t *tpool = apr_thread_pool_get(thd);
952908500d5f99f35afc5ed510391b9bdc3833farbb void *csd = NULL;
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_pool_t *ptrans; /* Pool for per-transaction stuff */
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_listen_rec *lr;
952908500d5f99f35afc5ed510391b9bdc3833farbb int have_idle_worker = 0;
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm conn_state_t *cs;
952908500d5f99f35afc5ed510391b9bdc3833farbb const apr_pollfd_t *out_pfd;
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_int32_t num = 0;
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_time_t time_now = 0;
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_interval_time_t timeout_interval;
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_time_t timeout_time;
952908500d5f99f35afc5ed510391b9bdc3833farbb listener_poll_type *pt;
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb free(ti);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb /* the following times out events that are really close in the future
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm * to prevent extra poll calls
2d2eda71267231c2526be701fe655db125852c1ffielding *
2d2eda71267231c2526be701fe655db125852c1ffielding * current value is .1 second
2d2eda71267231c2526be701fe655db125852c1ffielding */
2d2eda71267231c2526be701fe655db125852c1ffielding#define TIMEOUT_FUDGE_FACTOR 100000
2d2eda71267231c2526be701fe655db125852c1ffielding#define EVENT_FUDGE_FACTOR 10000
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb rc = init_pollset(tpool);
952908500d5f99f35afc5ed510391b9bdc3833farbb if (rc != APR_SUCCESS) {
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
952908500d5f99f35afc5ed510391b9bdc3833farbb "failed to initialize pollset, "
952908500d5f99f35afc5ed510391b9bdc3833farbb "attempting to shutdown process gracefully");
952908500d5f99f35afc5ed510391b9bdc3833farbb signal_threads(ST_GRACEFUL);
952908500d5f99f35afc5ed510391b9bdc3833farbb return NULL;
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm }
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb /* Unblock the signal used to wake this thread up, and set a handler for
952908500d5f99f35afc5ed510391b9bdc3833farbb * it.
952908500d5f99f35afc5ed510391b9bdc3833farbb */
952908500d5f99f35afc5ed510391b9bdc3833farbb unblock_signal(LISTENER_SIGNAL);
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_signal(LISTENER_SIGNAL, dummy_signal_handler);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb while (!listener_may_exit) {
952908500d5f99f35afc5ed510391b9bdc3833farbb
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm if (requests_this_child <= 0) {
952908500d5f99f35afc5ed510391b9bdc3833farbb check_infinite_requests();
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb {
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_time_t now = apr_time_now();
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_thread_mutex_lock(g_timer_ring_mtx);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb if (!APR_RING_EMPTY(&timer_ring, timer_event_t, link)) {
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm te = APR_RING_FIRST(&timer_ring);
952908500d5f99f35afc5ed510391b9bdc3833farbb if (te->when > now) {
952908500d5f99f35afc5ed510391b9bdc3833farbb timeout_interval = te->when - now;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb else {
952908500d5f99f35afc5ed510391b9bdc3833farbb timeout_interval = 1;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb else {
952908500d5f99f35afc5ed510391b9bdc3833farbb timeout_interval = apr_time_from_msec(100);
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm }
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_thread_mutex_unlock(g_timer_ring_mtx);
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb#if HAVE_SERF
952908500d5f99f35afc5ed510391b9bdc3833farbb rc = serf_context_prerun(g_serf);
952908500d5f99f35afc5ed510391b9bdc3833farbb if (rc != APR_SUCCESS) {
952908500d5f99f35afc5ed510391b9bdc3833farbb /* TOOD: what should do here? ugh. */
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb#endif
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm rc = apr_pollset_poll(event_pollset, timeout_interval, &num,
952908500d5f99f35afc5ed510391b9bdc3833farbb &out_pfd);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb if (rc != APR_SUCCESS) {
952908500d5f99f35afc5ed510391b9bdc3833farbb if (APR_STATUS_IS_EINTR(rc)) {
952908500d5f99f35afc5ed510391b9bdc3833farbb continue;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb if (!APR_STATUS_IS_TIMEUP(rc)) {
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_log_error(APLOG_MARK, APLOG_ERR, rc, ap_server_conf,
952908500d5f99f35afc5ed510391b9bdc3833farbb "apr_pollset_poll failed. Attempting to "
952908500d5f99f35afc5ed510391b9bdc3833farbb "shutdown process gracefully");
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm signal_threads(ST_GRACEFUL);
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb if (listener_may_exit)
952908500d5f99f35afc5ed510391b9bdc3833farbb break;
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb {
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_time_t now = apr_time_now();
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm apr_thread_mutex_lock(g_timer_ring_mtx);
952908500d5f99f35afc5ed510391b9bdc3833farbb for (ep = APR_RING_FIRST(&timer_ring);
952908500d5f99f35afc5ed510391b9bdc3833farbb ep != APR_RING_SENTINEL(&timer_ring,
952908500d5f99f35afc5ed510391b9bdc3833farbb timer_event_t, link);
952908500d5f99f35afc5ed510391b9bdc3833farbb ep = APR_RING_FIRST(&timer_ring))
952908500d5f99f35afc5ed510391b9bdc3833farbb {
952908500d5f99f35afc5ed510391b9bdc3833farbb if (ep->when < now + EVENT_FUDGE_FACTOR) {
952908500d5f99f35afc5ed510391b9bdc3833farbb APR_RING_REMOVE(ep, link);
952908500d5f99f35afc5ed510391b9bdc3833farbb push_timer2worker(ep);
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm }
952908500d5f99f35afc5ed510391b9bdc3833farbb else {
952908500d5f99f35afc5ed510391b9bdc3833farbb break;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_thread_mutex_unlock(g_timer_ring_mtx);
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb while (num && get_worker(&have_idle_worker)) {
952908500d5f99f35afc5ed510391b9bdc3833farbb pt = (listener_poll_type *) out_pfd->client_data;
952908500d5f99f35afc5ed510391b9bdc3833farbb if (pt->type == PT_CSD) {
952908500d5f99f35afc5ed510391b9bdc3833farbb /* one of the sockets is readable */
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm cs = (conn_state_t *) pt->baton;
2d2eda71267231c2526be701fe655db125852c1ffielding switch (cs->state) {
952908500d5f99f35afc5ed510391b9bdc3833farbb case CONN_STATE_CHECK_REQUEST_LINE_READABLE:
952908500d5f99f35afc5ed510391b9bdc3833farbb cs->state = CONN_STATE_READ_REQUEST_LINE;
952908500d5f99f35afc5ed510391b9bdc3833farbb break;
952908500d5f99f35afc5ed510391b9bdc3833farbb case CONN_STATE_WRITE_COMPLETION:
952908500d5f99f35afc5ed510391b9bdc3833farbb break;
952908500d5f99f35afc5ed510391b9bdc3833farbb default:
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_log_error(APLOG_MARK, APLOG_ERR, rc,
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_server_conf,
952908500d5f99f35afc5ed510391b9bdc3833farbb "event_loop: unexpected state %d",
952908500d5f99f35afc5ed510391b9bdc3833farbb cs->state);
2d2eda71267231c2526be701fe655db125852c1ffielding AP_DEBUG_ASSERT(0);
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_thread_mutex_lock(timeout_mutex);
952908500d5f99f35afc5ed510391b9bdc3833farbb APR_RING_REMOVE(cs, timeout_list);
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_thread_mutex_unlock(timeout_mutex);
952908500d5f99f35afc5ed510391b9bdc3833farbb APR_RING_ELEM_INIT(cs, timeout_list);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb rc = push2worker(out_pfd, event_pollset);
952908500d5f99f35afc5ed510391b9bdc3833farbb if (rc != APR_SUCCESS) {
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_server_conf, "push2worker failed");
952908500d5f99f35afc5ed510391b9bdc3833farbb }
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm else {
952908500d5f99f35afc5ed510391b9bdc3833farbb have_idle_worker = 0;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb else if (pt->type == PT_ACCEPT) {
952908500d5f99f35afc5ed510391b9bdc3833farbb /* A Listener Socket is ready for an accept() */
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb lr = (ap_listen_rec *) pt->baton;
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_pop_pool(&ptrans, worker_queue_info);
952908500d5f99f35afc5ed510391b9bdc3833farbb
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm if (ptrans == NULL) {
2d2eda71267231c2526be701fe655db125852c1ffielding /* create a new transaction pool for each accepted socket */
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_allocator_t *allocator;
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_allocator_create(&allocator);
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_allocator_max_free_set(allocator,
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_max_mem_free);
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_pool_create_ex(&ptrans, pconf, NULL, allocator);
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_allocator_owner_set(allocator, ptrans);
952908500d5f99f35afc5ed510391b9bdc3833farbb if (ptrans == NULL) {
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_server_conf,
952908500d5f99f35afc5ed510391b9bdc3833farbb "Failed to create transaction pool");
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm signal_threads(ST_GRACEFUL);
952908500d5f99f35afc5ed510391b9bdc3833farbb return NULL;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_pool_tag(ptrans, "transaction");
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb rc = lr->accept_func(&csd, lr, ptrans);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb /* later we trash rv and rely on csd to indicate
952908500d5f99f35afc5ed510391b9bdc3833farbb * success/failure
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm */
952908500d5f99f35afc5ed510391b9bdc3833farbb AP_DEBUG_ASSERT(rc == APR_SUCCESS || !csd);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb if (rc == APR_EGENERAL) {
952908500d5f99f35afc5ed510391b9bdc3833farbb /* E[NM]FILE, ENOMEM, etc */
952908500d5f99f35afc5ed510391b9bdc3833farbb resource_shortage = 1;
952908500d5f99f35afc5ed510391b9bdc3833farbb signal_threads(ST_GRACEFUL);
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb if (csd != NULL) {
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm rc = ap_queue_push(worker_queue, csd, NULL, ptrans);
2d2eda71267231c2526be701fe655db125852c1ffielding if (rc != APR_SUCCESS) {
952908500d5f99f35afc5ed510391b9bdc3833farbb /* trash the connection; we couldn't queue the connected
952908500d5f99f35afc5ed510391b9bdc3833farbb * socket to a worker
952908500d5f99f35afc5ed510391b9bdc3833farbb */
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_socket_close(csd);
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_log_error(APLOG_MARK, APLOG_CRIT, rc,
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_server_conf,
2d2eda71267231c2526be701fe655db125852c1ffielding "ap_queue_push failed");
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_pool_clear(ptrans);
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_push_pool(worker_queue_info, ptrans);
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb else {
952908500d5f99f35afc5ed510391b9bdc3833farbb have_idle_worker = 0;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb }
2d2eda71267231c2526be701fe655db125852c1ffielding else {
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_pool_clear(ptrans);
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_push_pool(worker_queue_info, ptrans);
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb } /* if:else on pt->type */
952908500d5f99f35afc5ed510391b9bdc3833farbb#if HAVE_SERF
2d2eda71267231c2526be701fe655db125852c1ffielding else if (pt->type == PT_SERF) {
952908500d5f99f35afc5ed510391b9bdc3833farbb /* send socket to serf. */
952908500d5f99f35afc5ed510391b9bdc3833farbb /* XXXX: this doesn't require get_worker(&have_idle_worker) */
952908500d5f99f35afc5ed510391b9bdc3833farbb serf_event_trigger(g_serf, pt->baton, out_pfd);
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb#endif
952908500d5f99f35afc5ed510391b9bdc3833farbb out_pfd++;
2d2eda71267231c2526be701fe655db125852c1ffielding num--;
952908500d5f99f35afc5ed510391b9bdc3833farbb } /* while for processing poll */
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb /* XXX possible optimization: stash the current time for use as
952908500d5f99f35afc5ed510391b9bdc3833farbb * r->request_time for new requests
952908500d5f99f35afc5ed510391b9bdc3833farbb */
952908500d5f99f35afc5ed510391b9bdc3833farbb time_now = apr_time_now();
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb /* handle timed out sockets */
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm apr_thread_mutex_lock(timeout_mutex);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb /* Step 1: keepalive timeouts */
952908500d5f99f35afc5ed510391b9bdc3833farbb cs = APR_RING_FIRST(&keepalive_timeout_head);
952908500d5f99f35afc5ed510391b9bdc3833farbb timeout_time = time_now + TIMEOUT_FUDGE_FACTOR;
952908500d5f99f35afc5ed510391b9bdc3833farbb while (!APR_RING_EMPTY(&keepalive_timeout_head, conn_state_t, timeout_list)
952908500d5f99f35afc5ed510391b9bdc3833farbb && cs->expiration_time < timeout_time) {
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb cs->state = CONN_STATE_LINGER;
952908500d5f99f35afc5ed510391b9bdc3833farbb
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm APR_RING_REMOVE(cs, timeout_list);
2d2eda71267231c2526be701fe655db125852c1ffielding apr_thread_mutex_unlock(timeout_mutex);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb if (!get_worker(&have_idle_worker)) {
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_thread_mutex_lock(timeout_mutex);
952908500d5f99f35afc5ed510391b9bdc3833farbb APR_RING_INSERT_HEAD(&keepalive_timeout_head, cs,
952908500d5f99f35afc5ed510391b9bdc3833farbb conn_state_t, timeout_list);
952908500d5f99f35afc5ed510391b9bdc3833farbb break;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm rc = push2worker(&cs->pfd, event_pollset);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb if (rc != APR_SUCCESS) {
952908500d5f99f35afc5ed510391b9bdc3833farbb return NULL;
952908500d5f99f35afc5ed510391b9bdc3833farbb /* XXX return NULL looks wrong - not an init failure
952908500d5f99f35afc5ed510391b9bdc3833farbb * that bypasses all the cleanup outside the main loop
952908500d5f99f35afc5ed510391b9bdc3833farbb * break seems more like it
952908500d5f99f35afc5ed510391b9bdc3833farbb * need to evaluate seriousness of push2worker failures
952908500d5f99f35afc5ed510391b9bdc3833farbb */
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb have_idle_worker = 0;
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm apr_thread_mutex_lock(timeout_mutex);
2d2eda71267231c2526be701fe655db125852c1ffielding cs = APR_RING_FIRST(&keepalive_timeout_head);
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb /* Step 2: write completion timeouts */
952908500d5f99f35afc5ed510391b9bdc3833farbb cs = APR_RING_FIRST(&timeout_head);
952908500d5f99f35afc5ed510391b9bdc3833farbb while (!APR_RING_EMPTY(&timeout_head, conn_state_t, timeout_list)
952908500d5f99f35afc5ed510391b9bdc3833farbb && cs->expiration_time < timeout_time) {
952908500d5f99f35afc5ed510391b9bdc3833farbb
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm cs->state = CONN_STATE_LINGER;
2d2eda71267231c2526be701fe655db125852c1ffielding APR_RING_REMOVE(cs, timeout_list);
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_thread_mutex_unlock(timeout_mutex);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb if (!get_worker(&have_idle_worker)) {
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_thread_mutex_lock(timeout_mutex);
952908500d5f99f35afc5ed510391b9bdc3833farbb APR_RING_INSERT_HEAD(&timeout_head, cs,
952908500d5f99f35afc5ed510391b9bdc3833farbb conn_state_t, timeout_list);
2d2eda71267231c2526be701fe655db125852c1ffielding break;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb rc = push2worker(&cs->pfd, event_pollset);
952908500d5f99f35afc5ed510391b9bdc3833farbb if (rc != APR_SUCCESS) {
952908500d5f99f35afc5ed510391b9bdc3833farbb return NULL;
952908500d5f99f35afc5ed510391b9bdc3833farbb }
952908500d5f99f35afc5ed510391b9bdc3833farbb have_idle_worker = 0;
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_thread_mutex_lock(timeout_mutex);
952908500d5f99f35afc5ed510391b9bdc3833farbb cs = APR_RING_FIRST(&timeout_head);
2d2eda71267231c2526be701fe655db125852c1ffielding }
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb apr_thread_mutex_unlock(timeout_mutex);
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb } /* listener main loop */
952908500d5f99f35afc5ed510391b9bdc3833farbb
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_close_listeners();
952908500d5f99f35afc5ed510391b9bdc3833farbb ap_queue_term(worker_queue);
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm dying = 1;
2d2eda71267231c2526be701fe655db125852c1ffielding ap_scoreboard_image->parent[process_slot].quiescing = 1;
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm /* wake up the main thread */
2d2eda71267231c2526be701fe655db125852c1ffielding kill(ap_my_pid, SIGTERM);
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding apr_thread_exit(thd, APR_SUCCESS);
2d2eda71267231c2526be701fe655db125852c1ffielding return NULL;
ef7331e9f2c6fb4f671b13db21e0ac27a6dcf4f3dgaudet}
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm/* XXX For ungraceful termination/restart, we definitely don't want to
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm * wait for active connections to finish but we may want to wait
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm * for idle workers to get out of the queue code and release mutexes,
2d2eda71267231c2526be701fe655db125852c1ffielding * since those mutexes are cleaned up pretty soon and some systems
06924437019f9871bc4ee49748511130548b7d35rbb * may not react favorably (i.e., segfault) if operations are attempted
2d2eda71267231c2526be701fe655db125852c1ffielding * on cleaned-up mutexes.
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm */
ffba30a2a49b298bfa65151bdf61ce3e3d4636d1manojstatic void *APR_THREAD_FUNC worker_thread(apr_thread_t * thd, void *dummy)
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm{
06924437019f9871bc4ee49748511130548b7d35rbb proc_info *ti = dummy;
2d2eda71267231c2526be701fe655db125852c1ffielding int process_slot = ti->pid;
2d2eda71267231c2526be701fe655db125852c1ffielding int thread_slot = ti->tid;
2d2eda71267231c2526be701fe655db125852c1ffielding apr_socket_t *csd = NULL;
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm conn_state_t *cs;
2d2eda71267231c2526be701fe655db125852c1ffielding apr_pool_t *ptrans; /* Pool for per-transaction stuff */
2d2eda71267231c2526be701fe655db125852c1ffielding apr_status_t rv;
2d2eda71267231c2526be701fe655db125852c1ffielding int is_idle = 0;
2d2eda71267231c2526be701fe655db125852c1ffielding timer_event_t *te = NULL;
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding free(ti);
2d2eda71267231c2526be701fe655db125852c1ffielding
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm ap_scoreboard_image->servers[process_slot][thread_slot].pid = ap_my_pid;
2d2eda71267231c2526be701fe655db125852c1ffielding ap_scoreboard_image->servers[process_slot][thread_slot].tid = apr_os_thread_current();
2d2eda71267231c2526be701fe655db125852c1ffielding ap_scoreboard_image->servers[process_slot][thread_slot].generation = my_generation;
2d2eda71267231c2526be701fe655db125852c1ffielding ap_update_child_status_from_indexes(process_slot, thread_slot,
2d2eda71267231c2526be701fe655db125852c1ffielding SERVER_STARTING, NULL);
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding while (!workers_may_exit) {
2d2eda71267231c2526be701fe655db125852c1ffielding if (!is_idle) {
50486fceda867c0bb0223d9548c7c1c9b194259dfielding rv = ap_queue_info_set_idle(worker_queue_info, NULL);
2d2eda71267231c2526be701fe655db125852c1ffielding if (rv != APR_SUCCESS) {
2d2eda71267231c2526be701fe655db125852c1ffielding ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm "ap_queue_info_set_idle failed. Attempting to "
2d2eda71267231c2526be701fe655db125852c1ffielding "shutdown process gracefully.");
2d2eda71267231c2526be701fe655db125852c1ffielding signal_threads(ST_GRACEFUL);
2d2eda71267231c2526be701fe655db125852c1ffielding break;
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm }
2d2eda71267231c2526be701fe655db125852c1ffielding is_idle = 1;
2d2eda71267231c2526be701fe655db125852c1ffielding }
1ccd992d37d62c8cb2056126f2234f64ec189bfddougm
2d2eda71267231c2526be701fe655db125852c1ffielding ap_update_child_status_from_indexes(process_slot, thread_slot,
2d2eda71267231c2526be701fe655db125852c1ffielding SERVER_READY, NULL);
2d2eda71267231c2526be701fe655db125852c1ffielding worker_pop:
2d2eda71267231c2526be701fe655db125852c1ffielding if (workers_may_exit) {
2d2eda71267231c2526be701fe655db125852c1ffielding break;
2d2eda71267231c2526be701fe655db125852c1ffielding }
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding te = NULL;
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding rv = ap_queue_pop_something(worker_queue, &csd, &cs, &ptrans, &te);
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffielding if (rv != APR_SUCCESS) {
2d2eda71267231c2526be701fe655db125852c1ffielding /* We get APR_EOF during a graceful shutdown once all the
2d2eda71267231c2526be701fe655db125852c1ffielding * connections accepted by this server process have been handled.
2d2eda71267231c2526be701fe655db125852c1ffielding */
2d2eda71267231c2526be701fe655db125852c1ffielding if (APR_STATUS_IS_EOF(rv)) {
2d2eda71267231c2526be701fe655db125852c1ffielding break;
2d2eda71267231c2526be701fe655db125852c1ffielding }
2d2eda71267231c2526be701fe655db125852c1ffielding /* We get APR_EINTR whenever ap_queue_pop() has been interrupted
2d2eda71267231c2526be701fe655db125852c1ffielding * from an explicit call to ap_queue_interrupt_all(). This allows
2d2eda71267231c2526be701fe655db125852c1ffielding * us to unblock threads stuck in ap_queue_pop() when a shutdown
2d2eda71267231c2526be701fe655db125852c1ffielding * is pending.
2d2eda71267231c2526be701fe655db125852c1ffielding *
2d2eda71267231c2526be701fe655db125852c1ffielding * If workers_may_exit is set and this is ungraceful termination/
2d2eda71267231c2526be701fe655db125852c1ffielding * restart, we are bound to get an error on some systems (e.g.,
2d2eda71267231c2526be701fe655db125852c1ffielding * AIX, which sanity-checks mutex operations) since the queue
2d2eda71267231c2526be701fe655db125852c1ffielding * may have already been cleaned up. Don't log the "error" if
2d2eda71267231c2526be701fe655db125852c1ffielding * workers_may_exit is set.
2d2eda71267231c2526be701fe655db125852c1ffielding */
2d2eda71267231c2526be701fe655db125852c1ffielding else if (APR_STATUS_IS_EINTR(rv)) {
2d2eda71267231c2526be701fe655db125852c1ffielding goto worker_pop;
2d2eda71267231c2526be701fe655db125852c1ffielding }
2d2eda71267231c2526be701fe655db125852c1ffielding /* We got some other error. */
2d2eda71267231c2526be701fe655db125852c1ffielding else if (!workers_may_exit) {
2d2eda71267231c2526be701fe655db125852c1ffielding ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
2d2eda71267231c2526be701fe655db125852c1ffielding "ap_queue_pop failed");
0e6e93183d91142d7cf9ffbf502114ff77bd9e19ben }
0e6e93183d91142d7cf9ffbf502114ff77bd9e19ben continue;
0e6e93183d91142d7cf9ffbf502114ff77bd9e19ben }
0e6e93183d91142d7cf9ffbf502114ff77bd9e19ben if (te != NULL) {
0e6e93183d91142d7cf9ffbf502114ff77bd9e19ben
0e6e93183d91142d7cf9ffbf502114ff77bd9e19ben te->cbfunc(te->baton);
5f7c351eb2a69d8cef6c3e98e27ce6158a0b1780rbb
66d349e02d1a5a599a01c977d2c5b0009181f7deben {
5f7c351eb2a69d8cef6c3e98e27ce6158a0b1780rbb apr_thread_mutex_lock(g_timer_ring_mtx);
0e6e93183d91142d7cf9ffbf502114ff77bd9e19ben APR_RING_INSERT_TAIL(&timer_free_ring, te, timer_event_t, link);
dca927eafb338b9de9d0214818136c16d436e3fdrbb apr_thread_mutex_unlock(g_timer_ring_mtx);
dca927eafb338b9de9d0214818136c16d436e3fdrbb }
0e6e93183d91142d7cf9ffbf502114ff77bd9e19ben }
66d349e02d1a5a599a01c977d2c5b0009181f7deben else {
66d349e02d1a5a599a01c977d2c5b0009181f7deben is_idle = 0;
0e6e93183d91142d7cf9ffbf502114ff77bd9e19ben worker_sockets[thread_slot] = csd;
0e6e93183d91142d7cf9ffbf502114ff77bd9e19ben rv = process_socket(thd, ptrans, csd, cs, process_slot, thread_slot);
dca927eafb338b9de9d0214818136c16d436e3fdrbb if (!rv) {
dca927eafb338b9de9d0214818136c16d436e3fdrbb requests_this_child--;
0e6e93183d91142d7cf9ffbf502114ff77bd9e19ben }
0e6e93183d91142d7cf9ffbf502114ff77bd9e19ben worker_sockets[thread_slot] = NULL;
0e6e93183d91142d7cf9ffbf502114ff77bd9e19ben }
66d349e02d1a5a599a01c977d2c5b0009181f7deben }
66d349e02d1a5a599a01c977d2c5b0009181f7deben
0e6e93183d91142d7cf9ffbf502114ff77bd9e19ben ap_update_child_status_from_indexes(process_slot, thread_slot,
0e6e93183d91142d7cf9ffbf502114ff77bd9e19ben (dying) ? SERVER_DEAD :
dca927eafb338b9de9d0214818136c16d436e3fdrbb SERVER_GRACEFUL,
dca927eafb338b9de9d0214818136c16d436e3fdrbb (request_rec *) NULL);
0e6e93183d91142d7cf9ffbf502114ff77bd9e19ben
0e6e93183d91142d7cf9ffbf502114ff77bd9e19ben apr_thread_exit(thd, APR_SUCCESS);
0a09a4a642f7c0d367598394411dbdd4a6d8cd09fielding return NULL;
2d2eda71267231c2526be701fe655db125852c1ffielding}
2d2eda71267231c2526be701fe655db125852c1ffielding
2d2eda71267231c2526be701fe655db125852c1ffieldingstatic int check_signal(int signum)
2d2eda71267231c2526be701fe655db125852c1ffielding{
2d2eda71267231c2526be701fe655db125852c1ffielding switch (signum) {
case SIGTERM:
case SIGINT:
return 1;
}
return 0;
}
static void create_listener_thread(thread_starter * ts)
{
int my_child_num = ts->child_num_arg;
apr_threadattr_t *thread_attr = ts->threadattr;
proc_info *my_info;
apr_status_t rv;
my_info = (proc_info *) malloc(sizeof(proc_info));
my_info->pid = my_child_num;
my_info->tid = -1; /* listener thread doesn't have a thread slot */
my_info->sd = 0;
rv = apr_thread_create(&ts->listener, thread_attr, listener_thread,
my_info, pchild);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
"apr_thread_create: unable to create listener thread");
/* let the parent decide how bad this really is */
clean_child_exit(APEXIT_CHILDSICK);
}
apr_os_thread_get(&listener_os_thread, ts->listener);
}
/* XXX under some circumstances not understood, children can get stuck
* in start_threads forever trying to take over slots which will
* never be cleaned up; for now there is an APLOG_DEBUG message issued
* every so often when this condition occurs
*/
static void *APR_THREAD_FUNC start_threads(apr_thread_t * thd, void *dummy)
{
thread_starter *ts = dummy;
apr_thread_t **threads = ts->threads;
apr_threadattr_t *thread_attr = ts->threadattr;
int child_num_arg = ts->child_num_arg;
int my_child_num = child_num_arg;
proc_info *my_info;
apr_status_t rv;
int i;
int threads_created = 0;
int listener_started = 0;
int loops;
int prev_threads_created;
/* We must create the fd queues before we start up the listener
* and worker threads. */
worker_queue = apr_pcalloc(pchild, sizeof(*worker_queue));
rv = ap_queue_init(worker_queue, threads_per_child, pchild);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
"ap_queue_init() failed");
clean_child_exit(APEXIT_CHILDFATAL);
}
rv = ap_queue_info_create(&worker_queue_info, pchild,
threads_per_child);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
"ap_queue_info_create() failed");
clean_child_exit(APEXIT_CHILDFATAL);
}
/* Create the timeout mutex and main pollset before the listener
* thread starts.
*/
rv = apr_thread_mutex_create(&timeout_mutex, APR_THREAD_MUTEX_DEFAULT,
pchild);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
"creation of the timeout mutex failed.");
clean_child_exit(APEXIT_CHILDFATAL);
}
/* Create the main pollset */
rv = apr_pollset_create(&event_pollset,
threads_per_child,
pchild, APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
"apr_pollset_create with Thread Safety failed.");
clean_child_exit(APEXIT_CHILDFATAL);
}
worker_sockets = apr_pcalloc(pchild, threads_per_child
* sizeof(apr_socket_t *));
loops = prev_threads_created = 0;
while (1) {
/* threads_per_child does not include the listener thread */
for (i = 0; i < threads_per_child; i++) {
int status =
ap_scoreboard_image->servers[child_num_arg][i].status;
if (status != SERVER_GRACEFUL && status != SERVER_DEAD) {
continue;
}
my_info = (proc_info *) malloc(sizeof(proc_info));
if (my_info == NULL) {
ap_log_error(APLOG_MARK, APLOG_ALERT, errno, ap_server_conf,
"malloc: out of memory");
clean_child_exit(APEXIT_CHILDFATAL);
}
my_info->pid = my_child_num;
my_info->tid = i;
my_info->sd = 0;
/* We are creating threads right now */
ap_update_child_status_from_indexes(my_child_num, i,
SERVER_STARTING, NULL);
/* We let each thread update its own scoreboard entry. This is
* done because it lets us deal with tid better.
*/
rv = apr_thread_create(&threads[i], thread_attr,
worker_thread, my_info, pchild);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
"apr_thread_create: unable to create worker thread");
/* let the parent decide how bad this really is */
clean_child_exit(APEXIT_CHILDSICK);
}
threads_created++;
}
/* Start the listener only when there are workers available */
if (!listener_started && threads_created) {
create_listener_thread(ts);
listener_started = 1;
}
if (start_thread_may_exit || threads_created == threads_per_child) {
break;
}
/* wait for previous generation to clean up an entry */
apr_sleep(apr_time_from_sec(1));
++loops;
if (loops % 120 == 0) { /* every couple of minutes */
if (prev_threads_created == threads_created) {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
"child %" APR_PID_T_FMT " isn't taking over "
"slots very quickly (%d of %d)",
ap_my_pid, threads_created,
threads_per_child);
}
prev_threads_created = threads_created;
}
}
/* What state should this child_main process be listed as in the
* scoreboard...?
* ap_update_child_status_from_indexes(my_child_num, i, SERVER_STARTING,
* (request_rec *) NULL);
*
* This state should be listed separately in the scoreboard, in some kind
* of process_status, not mixed in with the worker threads' status.
* "life_status" is almost right, but it's in the worker's structure, and
* the name could be clearer. gla
*/
apr_thread_exit(thd, APR_SUCCESS);
return NULL;
}
static void join_workers(apr_thread_t * listener, apr_thread_t ** threads)
{
int i;
apr_status_t rv, thread_rv;
if (listener) {
int iter;
/* deal with a rare timing window which affects waking up the
* listener thread... if the signal sent to the listener thread
* is delivered between the time it verifies that the
* listener_may_exit flag is clear and the time it enters a
* blocking syscall, the signal didn't do any good... work around
* that by sleeping briefly and sending it again
*/
iter = 0;
while (iter < 10 &&
#ifdef HAVE_PTHREAD_KILL
pthread_kill(*listener_os_thread, 0)
#else
kill(ap_my_pid, 0)
#endif
== 0) {
/* listener not dead yet */
apr_sleep(apr_time_make(0, 500000));
wakeup_listener();
++iter;
}
if (iter >= 10) {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
"the listener thread didn't exit");
}
else {
rv = apr_thread_join(&thread_rv, listener);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
"apr_thread_join: unable to join listener thread");
}
}
}
for (i = 0; i < threads_per_child; i++) {
if (threads[i]) { /* if we ever created this thread */
rv = apr_thread_join(&thread_rv, threads[i]);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
"apr_thread_join: unable to join worker "
"thread %d", i);
}
}
}
}
static void join_start_thread(apr_thread_t * start_thread_id)
{
apr_status_t rv, thread_rv;
start_thread_may_exit = 1; /* tell it to give up in case it is still
* trying to take over slots from a
* previous generation
*/
rv = apr_thread_join(&thread_rv, start_thread_id);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
"apr_thread_join: unable to join the start " "thread");
}
}
static void child_main(int child_num_arg)
{
apr_thread_t **threads;
apr_status_t rv;
thread_starter *ts;
apr_threadattr_t *thread_attr;
apr_thread_t *start_thread_id;
mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
* child initializes
*/
ap_my_pid = getpid();
ap_fatal_signal_child_setup(ap_server_conf);
apr_pool_create(&pchild, pconf);
/*stuff to do before we switch id's, so we have permissions. */
ap_reopen_scoreboard(pchild, NULL, 0);
if (ap_run_drop_privileges(pchild, ap_server_conf)) {
clean_child_exit(APEXIT_CHILDFATAL);
}
apr_thread_mutex_create(&g_timer_ring_mtx, APR_THREAD_MUTEX_DEFAULT, pchild);
APR_RING_INIT(&timer_free_ring, timer_event_t, link);
APR_RING_INIT(&timer_ring, timer_event_t, link);
ap_run_child_init(pchild, ap_server_conf);
/* done with init critical section */
/* Just use the standard apr_setup_signal_thread to block all signals
* from being received. The child processes no longer use signals for
* any communication with the parent process.
*/
rv = apr_setup_signal_thread();
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
"Couldn't initialize signal thread");
clean_child_exit(APEXIT_CHILDFATAL);
}
if (ap_max_requests_per_child) {
requests_this_child = ap_max_requests_per_child;
}
else {
/* coding a value of zero means infinity */
requests_this_child = INT_MAX;
}
/* Setup worker threads */
/* clear the storage; we may not create all our threads immediately,
* and we want a 0 entry to indicate a thread which was not created
*/
threads = (apr_thread_t **) calloc(1,
sizeof(apr_thread_t *) *
threads_per_child);
if (threads == NULL) {
ap_log_error(APLOG_MARK, APLOG_ALERT, errno, ap_server_conf,
"malloc: out of memory");
clean_child_exit(APEXIT_CHILDFATAL);
}
ts = (thread_starter *) apr_palloc(pchild, sizeof(*ts));
apr_threadattr_create(&thread_attr, pchild);
/* 0 means PTHREAD_CREATE_JOINABLE */
apr_threadattr_detach_set(thread_attr, 0);
if (ap_thread_stacksize != 0) {
apr_threadattr_stacksize_set(thread_attr, ap_thread_stacksize);
}
ts->threads = threads;
ts->listener = NULL;
ts->child_num_arg = child_num_arg;
ts->threadattr = thread_attr;
rv = apr_thread_create(&start_thread_id, thread_attr, start_threads,
ts, pchild);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
"apr_thread_create: unable to create worker thread");
/* let the parent decide how bad this really is */
clean_child_exit(APEXIT_CHILDSICK);
}
mpm_state = AP_MPMQ_RUNNING;
/* If we are only running in one_process mode, we will want to
* still handle signals. */
if (one_process) {
/* Block until we get a terminating signal. */
apr_signal_thread(check_signal);
/* make sure the start thread has finished; signal_threads()
* and join_workers() depend on that
*/
/* XXX join_start_thread() won't be awakened if one of our
* threads encounters a critical error and attempts to
* shutdown this child
*/
join_start_thread(start_thread_id);
/* helps us terminate a little more quickly than the dispatch of the
* signal thread; beats the Pipe of Death and the browsers
*/
signal_threads(ST_UNGRACEFUL);
/* A terminating signal was received. Now join each of the
* workers to clean them up.
* If the worker already exited, then the join frees
* their resources and returns.
* If the worker hasn't exited, then this blocks until
* they have (then cleans up).
*/
join_workers(ts->listener, threads);
}
else { /* !one_process */
/* remove SIGTERM from the set of blocked signals... if one of
* the other threads in the process needs to take us down
* (e.g., for MaxConnectionsPerChild) it will send us SIGTERM
*/
unblock_signal(SIGTERM);
apr_signal(SIGTERM, dummy_signal_handler);
/* Watch for any messages from the parent over the POD */
while (1) {
rv = ap_event_pod_check(pod);
if (rv == AP_NORESTART) {
/* see if termination was triggered while we slept */
switch (terminate_mode) {
case ST_GRACEFUL:
rv = AP_GRACEFUL;
break;
case ST_UNGRACEFUL:
rv = AP_RESTART;
break;
}
}
if (rv == AP_GRACEFUL || rv == AP_RESTART) {
/* make sure the start thread has finished;
* signal_threads() and join_workers depend on that
*/
join_start_thread(start_thread_id);
signal_threads(rv ==
AP_GRACEFUL ? ST_GRACEFUL : ST_UNGRACEFUL);
break;
}
}
/* A terminating signal was received. Now join each of the
* workers to clean them up.
* If the worker already exited, then the join frees
* their resources and returns.
* If the worker hasn't exited, then this blocks until
* they have (then cleans up).
*/
join_workers(ts->listener, threads);
}
free(threads);
clean_child_exit(resource_shortage ? APEXIT_CHILDSICK : 0);
}
static int make_child(server_rec * s, int slot)
{
int pid;
if (slot + 1 > max_daemons_limit) {
max_daemons_limit = slot + 1;
}
if (one_process) {
set_signals();
ap_scoreboard_image->parent[slot].pid = getpid();
child_main(slot);
/* NOTREACHED */
}
if ((pid = fork()) == -1) {
ap_log_error(APLOG_MARK, APLOG_ERR, errno, s,
"fork: Unable to fork new process");
/* fork didn't succeed. There's no need to touch the scoreboard;
* if we were trying to replace a failed child process, then
* server_main_loop() marked its workers SERVER_DEAD, and if
* we were trying to replace a child process that exited normally,
* its worker_thread()s left SERVER_DEAD or SERVER_GRACEFUL behind.
*/
/* In case system resources are maxxed out, we don't want
Apache running away with the CPU trying to fork over and
over and over again. */
apr_sleep(apr_time_from_sec(10));
return -1;
}
if (!pid) {
#ifdef HAVE_BINDPROCESSOR
/* By default, AIX binds to a single processor. This bit unbinds
* children which will then bind to another CPU.
*/
int status = bindprocessor(BINDPROCESS, (int) getpid(),
PROCESSOR_CLASS_ANY);
if (status != OK)
ap_log_error(APLOG_MARK, APLOG_DEBUG, errno,
ap_server_conf,
"processor unbind failed");
#endif
RAISE_SIGSTOP(MAKE_CHILD);
apr_signal(SIGTERM, just_die);
child_main(slot);
/* NOTREACHED */
}
/* else */
if (ap_scoreboard_image->parent[slot].pid != 0) {
/* This new child process is squatting on the scoreboard
* entry owned by an exiting child process, which cannot
* exit until all active requests complete.
* Don't forget about this exiting child process, or we
* won't be able to kill it if it doesn't exit by the
* time the server is shut down.
*/
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
"taking over scoreboard slot from %" APR_PID_T_FMT "%s",
ap_scoreboard_image->parent[slot].pid,
ap_scoreboard_image->parent[slot].quiescing ?
" (quiescing)" : "");
ap_register_extra_mpm_process(ap_scoreboard_image->parent[slot].pid);
}
ap_scoreboard_image->parent[slot].quiescing = 0;
ap_scoreboard_image->parent[slot].pid = pid;
return 0;
}
/* start up a bunch of children */
static void startup_children(int number_to_start)
{
int i;
for (i = 0; number_to_start && i < ap_daemons_limit; ++i) {
if (ap_scoreboard_image->parent[i].pid != 0) {
continue;
}
if (make_child(ap_server_conf, i) < 0) {
break;
}
--number_to_start;
}
}
/*
* idle_spawn_rate is the number of children that will be spawned on the
* next maintenance cycle if there aren't enough idle servers. It is
* doubled up to MAX_SPAWN_RATE, and reset only when a cycle goes by
* without the need to spawn.
*/
static int idle_spawn_rate = 1;
#ifndef MAX_SPAWN_RATE
#define MAX_SPAWN_RATE (32)
#endif
static int hold_off_on_exponential_spawning;
static void perform_idle_server_maintenance(void)
{
int i, j;
int idle_thread_count;
worker_score *ws;
process_score *ps;
int free_length;
int totally_free_length = 0;
int free_slots[MAX_SPAWN_RATE];
int last_non_dead;
int total_non_dead;
int active_thread_count = 0;
/* initialize the free_list */
free_length = 0;
idle_thread_count = 0;
last_non_dead = -1;
total_non_dead = 0;
for (i = 0; i < ap_daemons_limit; ++i) {
/* Initialization to satisfy the compiler. It doesn't know
* that threads_per_child is always > 0 */
int status = SERVER_DEAD;
int any_dying_threads = 0;
int any_dead_threads = 0;
int all_dead_threads = 1;
if (i >= max_daemons_limit
&& totally_free_length == idle_spawn_rate)
/* short cut if all active processes have been examined and
* enough empty scoreboard slots have been found
*/
break;
ps = &ap_scoreboard_image->parent[i];
for (j = 0; j < threads_per_child; j++) {
ws = &ap_scoreboard_image->servers[i][j];
status = ws->status;
/* XXX any_dying_threads is probably no longer needed GLA */
any_dying_threads = any_dying_threads ||
(status == SERVER_GRACEFUL);
any_dead_threads = any_dead_threads || (status == SERVER_DEAD);
all_dead_threads = all_dead_threads &&
(status == SERVER_DEAD || status == SERVER_GRACEFUL);
/* We consider a starting server as idle because we started it
* at least a cycle ago, and if it still hasn't finished starting
* then we're just going to swamp things worse by forking more.
* So we hopefully won't need to fork more if we count it.
* This depends on the ordering of SERVER_READY and SERVER_STARTING.
*/
if (ps->pid != 0) { /* XXX just set all_dead_threads in outer
for loop if no pid? not much else matters */
if (status <= SERVER_READY &&
!ps->quiescing && ps->generation == my_generation) {
++idle_thread_count;
}
if (status >= SERVER_READY && status < SERVER_GRACEFUL) {
++active_thread_count;
}
}
}
if (any_dead_threads
&& totally_free_length < idle_spawn_rate
&& free_length < MAX_SPAWN_RATE
&& (!ps->pid /* no process in the slot */
|| ps->quiescing)) { /* or at least one is going away */
if (all_dead_threads) {
/* great! we prefer these, because the new process can
* start more threads sooner. So prioritize this slot
* by putting it ahead of any slots with active threads.
*
* first, make room by moving a slot that's potentially still
* in use to the end of the array
*/
free_slots[free_length] = free_slots[totally_free_length];
free_slots[totally_free_length++] = i;
}
else {
/* slot is still in use - back of the bus
*/
free_slots[free_length] = i;
}
++free_length;
}
/* XXX if (!ps->quiescing) is probably more reliable GLA */
if (!any_dying_threads) {
last_non_dead = i;
++total_non_dead;
}
}
if (sick_child_detected) {
if (active_thread_count > 0) {
/* some child processes appear to be working. don't kill the
* whole server.
*/
sick_child_detected = 0;
}
else {
/* looks like a basket case. give up.
*/
shutdown_pending = 1;
child_fatal = 1;
ap_log_error(APLOG_MARK, APLOG_ALERT, 0,
ap_server_conf,
"No active workers found..."
" Apache is exiting!");
/* the child already logged the failure details */
return;
}
}
max_daemons_limit = last_non_dead + 1;
if (idle_thread_count > max_spare_threads) {
/* Kill off one child */
ap_event_pod_signal(pod, TRUE);
idle_spawn_rate = 1;
}
else if (idle_thread_count < min_spare_threads) {
/* terminate the free list */
if (free_length == 0) { /* scoreboard is full, can't fork */
if (active_thread_count >= ap_daemons_limit * threads_per_child) {
static int reported = 0;
if (!reported) {
/* only report this condition once */
ap_log_error(APLOG_MARK, APLOG_ERR, 0,
ap_server_conf,
"server reached MaxClients setting, consider"
" raising the MaxClients setting");
reported = 1;
}
}
else {
ap_log_error(APLOG_MARK, APLOG_ERR, 0,
ap_server_conf,
"scoreboard is full, not at MaxClients");
}
idle_spawn_rate = 1;
}
else {
if (free_length > idle_spawn_rate) {
free_length = idle_spawn_rate;
}
if (idle_spawn_rate >= 8) {
ap_log_error(APLOG_MARK, APLOG_INFO, 0,
ap_server_conf,
"server seems busy, (you may need "
"to increase StartServers, ThreadsPerChild "
"or Min/MaxSpareThreads), "
"spawning %d children, there are around %d idle "
"threads, and %d total children", free_length,
idle_thread_count, total_non_dead);
}
for (i = 0; i < free_length; ++i) {
make_child(ap_server_conf, free_slots[i]);
}
/* the next time around we want to spawn twice as many if this
* wasn't good enough, but not if we've just done a graceful
*/
if (hold_off_on_exponential_spawning) {
--hold_off_on_exponential_spawning;
}
else if (idle_spawn_rate < MAX_SPAWN_RATE) {
idle_spawn_rate *= 2;
}
}
}
else {
idle_spawn_rate = 1;
}
}
static void server_main_loop(int remaining_children_to_start)
{
int child_slot;
apr_exit_why_e exitwhy;
int status, processed_status;
apr_proc_t pid;
int i;
while (!restart_pending && !shutdown_pending) {
ap_wait_or_timeout(&exitwhy, &status, &pid, pconf, ap_server_conf);
if (pid.pid != -1) {
processed_status = ap_process_child_status(&pid, exitwhy, status);
if (processed_status == APEXIT_CHILDFATAL) {
shutdown_pending = 1;
child_fatal = 1;
return;
}
else if (processed_status == APEXIT_CHILDSICK) {
/* tell perform_idle_server_maintenance to check into this
* on the next timer pop
*/
sick_child_detected = 1;
}
/* non-fatal death... note that it's gone in the scoreboard. */
child_slot = ap_find_child_by_pid(&pid);
if (child_slot >= 0) {
for (i = 0; i < threads_per_child; i++)
ap_update_child_status_from_indexes(child_slot, i,
SERVER_DEAD,
(request_rec *) NULL);
ap_scoreboard_image->parent[child_slot].pid = 0;
ap_scoreboard_image->parent[child_slot].quiescing = 0;
if (processed_status == APEXIT_CHILDSICK) {
/* resource shortage, minimize the fork rate */
idle_spawn_rate = 1;
}
else if (remaining_children_to_start
&& child_slot < ap_daemons_limit) {
/* we're still doing a 1-for-1 replacement of dead
* children with new children
*/
make_child(ap_server_conf, child_slot);
--remaining_children_to_start;
}
}
else if (ap_unregister_extra_mpm_process(pid.pid) == 1) {
/* handled */
#if APR_HAS_OTHER_CHILD
}
else if (apr_proc_other_child_alert(&pid, APR_OC_REASON_DEATH,
status) == 0) {
/* handled */
#endif
}
else if (is_graceful) {
/* Great, we've probably just lost a slot in the
* scoreboard. Somehow we don't know about this child.
*/
ap_log_error(APLOG_MARK, APLOG_WARNING, 0,
ap_server_conf,
"long lost child came home! (pid %ld)",
(long) pid.pid);
}
/* Don't perform idle maintenance when a child dies,
* only do it when there's a timeout. Remember only a
* finite number of children can die, and it's pretty
* pathological for a lot to die suddenly.
*/
continue;
}
else if (remaining_children_to_start) {
/* we hit a 1 second timeout in which none of the previous
* generation of children needed to be reaped... so assume
* they're all done, and pick up the slack if any is left.
*/
startup_children(remaining_children_to_start);
remaining_children_to_start = 0;
/* In any event we really shouldn't do the code below because
* few of the servers we just started are in the IDLE state
* yet, so we'd mistakenly create an extra server.
*/
continue;
}
perform_idle_server_maintenance();
}
}
static int event_run(apr_pool_t * _pconf, apr_pool_t * plog, server_rec * s)
{
int remaining_children_to_start;
ap_log_pid(pconf, ap_pid_fname);
if (!is_graceful) {
if (ap_run_pre_mpm(s->process->pool, SB_SHARED) != OK) {
mpm_state = AP_MPMQ_STOPPING;
return DONE;
}
/* fix the generation number in the global score; we just got a new,
* cleared scoreboard
*/
ap_scoreboard_image->global->running_generation = my_generation;
}
set_signals();
/* Don't thrash... */
if (max_spare_threads < min_spare_threads + threads_per_child)
max_spare_threads = min_spare_threads + threads_per_child;
/* If we're doing a graceful_restart then we're going to see a lot
* of children exiting immediately when we get into the main loop
* below (because we just sent them AP_SIG_GRACEFUL). This happens pretty
* rapidly... and for each one that exits we may start a new one, until
* there are at least min_spare_threads idle threads, counting across
* all children. But we may be permitted to start more children than
* that, so we'll just keep track of how many we're
* supposed to start up without the 1 second penalty between each fork.
*/
remaining_children_to_start = ap_daemons_to_start;
if (remaining_children_to_start > ap_daemons_limit) {
remaining_children_to_start = ap_daemons_limit;
}
if (!is_graceful) {
startup_children(remaining_children_to_start);
remaining_children_to_start = 0;
}
else {
/* give the system some time to recover before kicking into
* exponential mode */
hold_off_on_exponential_spawning = 10;
}
ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
"%s configured -- resuming normal operations",
ap_get_server_description());
ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf,
"Server built: %s", ap_get_server_built());
ap_log_command_line(plog, s);
restart_pending = shutdown_pending = 0;
mpm_state = AP_MPMQ_RUNNING;
server_main_loop(remaining_children_to_start);
mpm_state = AP_MPMQ_STOPPING;
if (shutdown_pending && !is_graceful) {
/* Time to shut down:
* Kill child processes, tell them to call child_exit, etc...
*/
ap_event_pod_killpg(pod, ap_daemons_limit, FALSE);
ap_reclaim_child_processes(1); /* Start with SIGTERM */
if (!child_fatal) {
/* cleanup pid file on normal shutdown */
const char *pidfile = NULL;
pidfile = ap_server_root_relative(pconf, ap_pid_fname);
if (pidfile != NULL && unlink(pidfile) == 0)
ap_log_error(APLOG_MARK, APLOG_INFO, 0,
ap_server_conf,
"removed PID file %s (pid=%ld)",
pidfile, (long) getpid());
ap_log_error(APLOG_MARK, APLOG_NOTICE, 0,
ap_server_conf, "caught SIGTERM, shutting down");
}
return DONE;
} else if (shutdown_pending) {
/* Time to gracefully shut down:
* Kill child processes, tell them to call child_exit, etc...
*/
int active_children;
int index;
apr_time_t cutoff = 0;
/* Close our listeners, and then ask our children to do same */
ap_close_listeners();
ap_event_pod_killpg(pod, ap_daemons_limit, TRUE);
ap_relieve_child_processes();
if (!child_fatal) {
/* cleanup pid file on normal shutdown */
const char *pidfile = NULL;
pidfile = ap_server_root_relative (pconf, ap_pid_fname);
if ( pidfile != NULL && unlink(pidfile) == 0)
ap_log_error(APLOG_MARK, APLOG_INFO, 0,
ap_server_conf,
"removed PID file %s (pid=%ld)",
pidfile, (long)getpid());
ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
"caught " AP_SIG_GRACEFUL_STOP_STRING
", shutting down gracefully");
}
if (ap_graceful_shutdown_timeout) {
cutoff = apr_time_now() +
apr_time_from_sec(ap_graceful_shutdown_timeout);
}
/* Don't really exit until each child has finished */
shutdown_pending = 0;
do {
/* Pause for a second */
apr_sleep(apr_time_from_sec(1));
/* Relieve any children which have now exited */
ap_relieve_child_processes();
active_children = 0;
for (index = 0; index < ap_daemons_limit; ++index) {
if (ap_mpm_safe_kill(MPM_CHILD_PID(index), 0) == APR_SUCCESS) {
active_children = 1;
/* Having just one child is enough to stay around */
break;
}
}
} while (!shutdown_pending && active_children &&
(!ap_graceful_shutdown_timeout || apr_time_now() < cutoff));
/* We might be here because we received SIGTERM, either
* way, try and make sure that all of our processes are
* really dead.
*/
ap_event_pod_killpg(pod, ap_daemons_limit, FALSE);
ap_reclaim_child_processes(1);
return DONE;
}
/* we've been told to restart */
apr_signal(SIGHUP, SIG_IGN);
if (one_process) {
/* not worth thinking about */
return DONE;
}
/* advance to the next generation */
/* XXX: we really need to make sure this new generation number isn't in
* use by any of the children.
*/
++my_generation;
ap_scoreboard_image->global->running_generation = my_generation;
if (is_graceful) {
ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
AP_SIG_GRACEFUL_STRING
" received. Doing graceful restart");
/* wake up the children...time to die. But we'll have more soon */
ap_event_pod_killpg(pod, ap_daemons_limit, TRUE);
/* This is mostly for debugging... so that we know what is still
* gracefully dealing with existing request.
*/
}
else {
/* Kill 'em all. Since the child acts the same on the parents SIGTERM
* and a SIGHUP, we may as well use the same signal, because some user
* pthreads are stealing signals from us left and right.
*/
ap_event_pod_killpg(pod, ap_daemons_limit, FALSE);
ap_reclaim_child_processes(1); /* Start with SIGTERM */
ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
"SIGHUP received. Attempting to restart");
}
return OK;
}
/* This really should be a post_config hook, but the error log is already
* redirected by that point, so we need to do this in the open_logs phase.
*/
static int event_open_logs(apr_pool_t * p, apr_pool_t * plog,
apr_pool_t * ptemp, server_rec * s)
{
int startup = 0;
int level_flags = 0;
apr_status_t rv;
pconf = p;
/* the reverse of pre_config, we want this only the first time around */
if (retained->module_loads == 1) {
startup = 1;
level_flags |= APLOG_STARTUP;
}
if ((num_listensocks = ap_setup_listeners(ap_server_conf)) < 1) {
ap_log_error(APLOG_MARK, APLOG_ALERT | level_flags, 0,
(startup ? NULL : s),
"no listening sockets available, shutting down");
return DONE;
}
if (!one_process) {
if ((rv = ap_event_pod_open(pconf, &pod))) {
ap_log_error(APLOG_MARK, APLOG_CRIT | level_flags, rv,
(startup ? NULL : s),
"could not open pipe-of-death");
return DONE;
}
}
return OK;
}
static int event_pre_config(apr_pool_t * pconf, apr_pool_t * plog,
apr_pool_t * ptemp)
{
int no_detach, debug, foreground;
apr_status_t rv;
const char *userdata_key = "mpm_event_module";
mpm_state = AP_MPMQ_STARTING;
debug = ap_exists_config_define("DEBUG");
if (debug) {
foreground = one_process = 1;
no_detach = 0;
}
else {
one_process = ap_exists_config_define("ONE_PROCESS");
no_detach = ap_exists_config_define("NO_DETACH");
foreground = ap_exists_config_define("FOREGROUND");
}
/* sigh, want this only the second time around */
retained = ap_retained_data_get(userdata_key);
if (!retained) {
retained = ap_retained_data_create(userdata_key, sizeof(*retained));
}
++retained->module_loads;
if (retained->module_loads == 2) {
is_graceful = 0;
rv = apr_pollset_create(&event_pollset, 1, plog,
APR_POLLSET_THREADSAFE | APR_POLLSET_NOCOPY);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL,
"Couldn't create a Thread Safe Pollset. "
"Is it supported on your platform?"
"Also check system or user limits!");
return HTTP_INTERNAL_SERVER_ERROR;
}
apr_pollset_destroy(event_pollset);
if (!one_process && !foreground) {
rv = apr_proc_detach(no_detach ? APR_PROC_DETACH_FOREGROUND
: APR_PROC_DETACH_DAEMONIZE);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL,
"apr_proc_detach failed");
return HTTP_INTERNAL_SERVER_ERROR;
}
}
parent_pid = ap_my_pid = getpid();
}
ap_listen_pre_config();
ap_daemons_to_start = DEFAULT_START_DAEMON;
min_spare_threads = DEFAULT_MIN_FREE_DAEMON * DEFAULT_THREADS_PER_CHILD;
max_spare_threads = DEFAULT_MAX_FREE_DAEMON * DEFAULT_THREADS_PER_CHILD;
server_limit = DEFAULT_SERVER_LIMIT;
thread_limit = DEFAULT_THREAD_LIMIT;
ap_daemons_limit = server_limit;
threads_per_child = DEFAULT_THREADS_PER_CHILD;
max_clients = ap_daemons_limit * threads_per_child;
ap_pid_fname = DEFAULT_PIDLOG;
ap_max_requests_per_child = DEFAULT_MAX_REQUESTS_PER_CHILD;
ap_extended_status = 0;
ap_max_mem_free = APR_ALLOCATOR_MAX_FREE_UNLIMITED;
apr_cpystrn(ap_coredump_dir, ap_server_root, sizeof(ap_coredump_dir));
return OK;
}
static int event_check_config(apr_pool_t *p, apr_pool_t *plog,
apr_pool_t *ptemp, server_rec *s)
{
int startup = 0;
/* the reverse of pre_config, we want this only the first time around */
if (retained->module_loads == 1) {
startup = 1;
}
if (server_limit > MAX_SERVER_LIMIT) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
"WARNING: ServerLimit of %d exceeds compile-time "
"limit of", server_limit);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" %d servers, decreasing to %d.",
MAX_SERVER_LIMIT, MAX_SERVER_LIMIT);
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
"ServerLimit of %d exceeds compile-time limit "
"of %d, decreasing to match",
server_limit, MAX_SERVER_LIMIT);
}
server_limit = MAX_SERVER_LIMIT;
}
else if (server_limit < 1) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
"WARNING: ServerLimit of %d not allowed, "
"increasing to 1.", server_limit);
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
"ServerLimit of %d not allowed, increasing to 1",
server_limit);
}
server_limit = 1;
}
/* you cannot change ServerLimit across a restart; ignore
* any such attempts
*/
if (!retained->first_server_limit) {
retained->first_server_limit = server_limit;
}
else if (server_limit != retained->first_server_limit) {
/* don't need a startup console version here */
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
"changing ServerLimit to %d from original value of %d "
"not allowed during restart",
server_limit, retained->first_server_limit);
server_limit = retained->first_server_limit;
}
if (thread_limit > MAX_THREAD_LIMIT) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
"WARNING: ThreadLimit of %d exceeds compile-time "
"limit of", thread_limit);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" %d threads, decreasing to %d.",
MAX_THREAD_LIMIT, MAX_THREAD_LIMIT);
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
"ThreadLimit of %d exceeds compile-time limit "
"of %d, decreasing to match",
thread_limit, MAX_THREAD_LIMIT);
}
thread_limit = MAX_THREAD_LIMIT;
}
else if (thread_limit < 1) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
"WARNING: ThreadLimit of %d not allowed, "
"increasing to 1.", thread_limit);
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
"ThreadLimit of %d not allowed, increasing to 1",
thread_limit);
}
thread_limit = 1;
}
/* you cannot change ThreadLimit across a restart; ignore
* any such attempts
*/
if (!retained->first_thread_limit) {
retained->first_thread_limit = thread_limit;
}
else if (thread_limit != retained->first_thread_limit) {
/* don't need a startup console version here */
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
"changing ThreadLimit to %d from original value of %d "
"not allowed during restart",
thread_limit, retained->first_thread_limit);
thread_limit = retained->first_thread_limit;
}
if (threads_per_child > thread_limit) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
"WARNING: ThreadsPerChild of %d exceeds ThreadLimit "
"of", threads_per_child);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" %d threads, decreasing to %d.",
thread_limit, thread_limit);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" To increase, please see the ThreadLimit "
"directive.");
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
"ThreadsPerChild of %d exceeds ThreadLimit "
"of %d, decreasing to match",
threads_per_child, thread_limit);
}
threads_per_child = thread_limit;
}
else if (threads_per_child < 1) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
"WARNING: ThreadsPerChild of %d not allowed, "
"increasing to 1.", threads_per_child);
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
"ThreadsPerChild of %d not allowed, increasing to 1",
threads_per_child);
}
threads_per_child = 1;
}
if (max_clients < threads_per_child) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
"WARNING: MaxClients of %d is less than "
"ThreadsPerChild of", max_clients);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" %d, increasing to %d. MaxClients must be at "
"least as large",
threads_per_child, threads_per_child);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" as the number of threads in a single server.");
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
"MaxClients of %d is less than ThreadsPerChild "
"of %d, increasing to match",
max_clients, threads_per_child);
}
max_clients = threads_per_child;
}
ap_daemons_limit = max_clients / threads_per_child;
if (max_clients % threads_per_child) {
int tmp_max_clients = ap_daemons_limit * threads_per_child;
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
"WARNING: MaxClients of %d is not an integer "
"multiple of", max_clients);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" ThreadsPerChild of %d, decreasing to nearest "
"multiple %d,", threads_per_child,
tmp_max_clients);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" for a maximum of %d servers.",
ap_daemons_limit);
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
"MaxClients of %d is not an integer multiple of "
"ThreadsPerChild of %d, decreasing to nearest "
"multiple %d", max_clients, threads_per_child,
tmp_max_clients);
}
max_clients = tmp_max_clients;
}
if (ap_daemons_limit > server_limit) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
"WARNING: MaxClients of %d would require %d "
"servers and ", max_clients, ap_daemons_limit);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" would exceed ServerLimit of %d, decreasing to %d.",
server_limit, server_limit * threads_per_child);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" To increase, please see the ServerLimit "
"directive.");
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
"MaxClients of %d would require %d servers and "
"exceed ServerLimit of %d, decreasing to %d",
max_clients, ap_daemons_limit, server_limit,
server_limit * threads_per_child);
}
ap_daemons_limit = server_limit;
}
/* ap_daemons_to_start > ap_daemons_limit checked in ap_mpm_run() */
if (ap_daemons_to_start < 0) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
"WARNING: StartServers of %d not allowed, "
"increasing to 1.", ap_daemons_to_start);
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
"StartServers of %d not allowed, increasing to 1",
ap_daemons_to_start);
}
ap_daemons_to_start = 1;
}
if (min_spare_threads < 1) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
"WARNING: MinSpareThreads of %d not allowed, "
"increasing to 1", min_spare_threads);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" to avoid almost certain server failure.");
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" Please read the documentation.");
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s,
"MinSpareThreads of %d not allowed, increasing to 1",
min_spare_threads);
}
min_spare_threads = 1;
}
/* max_spare_threads < min_spare_threads + threads_per_child
* checked in ap_mpm_run()
*/
return OK;
}
static void event_hooks(apr_pool_t * p)
{
/* Our open_logs hook function must run before the core's, or stderr
* will be redirected to a file, and the messages won't print to the
* console.
*/
static const char *const aszSucc[] = { "core.c", NULL };
one_process = 0;
ap_hook_open_logs(event_open_logs, NULL, aszSucc, APR_HOOK_REALLY_FIRST);
/* we need to set the MPM state before other pre-config hooks use MPM query
* to retrieve it, so register as REALLY_FIRST
*/
ap_hook_pre_config(event_pre_config, NULL, NULL, APR_HOOK_REALLY_FIRST);
ap_hook_check_config(event_check_config, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_mpm(event_run, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_mpm_query(event_query, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_mpm_note_child_killed(event_note_child_killed, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_mpm_register_timed_callback(event_register_timed_callback, NULL, NULL,
APR_HOOK_MIDDLE);
ap_hook_mpm_get_name(event_get_name, NULL, NULL, APR_HOOK_MIDDLE);
}
static const char *set_daemons_to_start(cmd_parms *cmd, void *dummy,
const char *arg)
{
const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
if (err != NULL) {
return err;
}
ap_daemons_to_start = atoi(arg);
return NULL;
}
static const char *set_min_spare_threads(cmd_parms * cmd, void *dummy,
const char *arg)
{
const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
if (err != NULL) {
return err;
}
min_spare_threads = atoi(arg);
return NULL;
}
static const char *set_max_spare_threads(cmd_parms * cmd, void *dummy,
const char *arg)
{
const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
if (err != NULL) {
return err;
}
max_spare_threads = atoi(arg);
return NULL;
}
static const char *set_max_clients(cmd_parms * cmd, void *dummy,
const char *arg)
{
const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
if (err != NULL) {
return err;
}
max_clients = atoi(arg);
return NULL;
}
static const char *set_threads_per_child(cmd_parms * cmd, void *dummy,
const char *arg)
{
const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
if (err != NULL) {
return err;
}
threads_per_child = atoi(arg);
return NULL;
}
static const char *set_server_limit (cmd_parms *cmd, void *dummy, const char *arg)
{
const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
if (err != NULL) {
return err;
}
server_limit = atoi(arg);
return NULL;
}
static const char *set_thread_limit(cmd_parms * cmd, void *dummy,
const char *arg)
{
const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
if (err != NULL) {
return err;
}
thread_limit = atoi(arg);
return NULL;
}
static const command_rec event_cmds[] = {
LISTEN_COMMANDS,
AP_INIT_TAKE1("StartServers", set_daemons_to_start, NULL, RSRC_CONF,
"Number of child processes launched at server startup"),
AP_INIT_TAKE1("ServerLimit", set_server_limit, NULL, RSRC_CONF,
"Maximum number of child processes for this run of Apache"),
AP_INIT_TAKE1("MinSpareThreads", set_min_spare_threads, NULL, RSRC_CONF,
"Minimum number of idle threads, to handle request spikes"),
AP_INIT_TAKE1("MaxSpareThreads", set_max_spare_threads, NULL, RSRC_CONF,
"Maximum number of idle threads"),
AP_INIT_TAKE1("MaxClients", set_max_clients, NULL, RSRC_CONF,
"Maximum number of threads alive at the same time"),
AP_INIT_TAKE1("ThreadsPerChild", set_threads_per_child, NULL, RSRC_CONF,
"Number of threads each child creates"),
AP_INIT_TAKE1("ThreadLimit", set_thread_limit, NULL, RSRC_CONF,
"Maximum number of worker threads per child process for this "
"run of Apache - Upper limit for ThreadsPerChild"),
AP_GRACEFUL_SHUTDOWN_TIMEOUT_COMMAND,
{NULL}
};
AP_DECLARE_MODULE(mpm_event) = {
MPM20_MODULE_STUFF,
NULL, /* hook to run before apache parses args */
NULL, /* create per-directory config structure */
NULL, /* merge per-directory config structures */
NULL, /* create per-server config structure */
NULL, /* merge per-server config structures */
event_cmds, /* command apr_table_t */
event_hooks /* register_hooks */
};