worker.c revision a94a0f8604bc182459825c18b5574236ea2673c7
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers/* Licensed to the Apache Software Foundation (ASF) under one or more
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * contributor license agreements. See the NOTICE file distributed with
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * this work for additional information regarding copyright ownership.
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * The ASF licenses this file to You under the Apache License, Version 2.0
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * (the "License"); you may not use this file except in compliance with
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * the License. You may obtain a copy of the License at
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers *
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * http://www.apache.org/licenses/LICENSE-2.0
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer *
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * Unless required by applicable law or agreed to in writing, software
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * distributed under the License is distributed on an "AS IS" BASIS,
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * See the License for the specific language governing permissions and
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * limitations under the License.
b26e4ced91d0ac0eabdce1c505228ccafc65a23fDavid Herrmann */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
b26e4ced91d0ac0eabdce1c505228ccafc65a23fDavid Herrmann/* The purpose of this MPM is to fix the design flaws in the threaded
b26e4ced91d0ac0eabdce1c505228ccafc65a23fDavid Herrmann * model. Because of the way that pthreads and mutex locks interact,
b26e4ced91d0ac0eabdce1c505228ccafc65a23fDavid Herrmann * it is basically impossible to cleanly gracefully shutdown a child
b26e4ced91d0ac0eabdce1c505228ccafc65a23fDavid Herrmann * process if multiple threads are all blocked in accept. This model
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * fixes those problems.
b17de8449c3ce26031d5bb7dec1086b56e51ac9bDavid Herrmann */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
b17de8449c3ce26031d5bb7dec1086b56e51ac9bDavid Herrmann#include "apr.h"
b17de8449c3ce26031d5bb7dec1086b56e51ac9bDavid Herrmann#include "apr_portable.h"
b17de8449c3ce26031d5bb7dec1086b56e51ac9bDavid Herrmann#include "apr_strings.h"
ba76ee29bc02879fb42c048132af8889b00220d5David Herrmann#include "apr_file_io.h"
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer#include "apr_thread_proc.h"
b17de8449c3ce26031d5bb7dec1086b56e51ac9bDavid Herrmann#include "apr_signal.h"
b17de8449c3ce26031d5bb7dec1086b56e51ac9bDavid Herrmann#include "apr_thread_mutex.h"
b17de8449c3ce26031d5bb7dec1086b56e51ac9bDavid Herrmann#include "apr_proc_mutex.h"
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers#include "apr_poll.h"
fa9d4be3f1f4a792b2f3624c2a08fe9cc6ce6e54Zbigniew Jędrzejewski-Szmek
fa9d4be3f1f4a792b2f3624c2a08fe9cc6ce6e54Zbigniew Jędrzejewski-Szmek#include <stdlib.h>
fa9d4be3f1f4a792b2f3624c2a08fe9cc6ce6e54Zbigniew Jędrzejewski-Szmek
fa9d4be3f1f4a792b2f3624c2a08fe9cc6ce6e54Zbigniew Jędrzejewski-Szmek#define APR_WANT_STRFUNC
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers#include "apr_want.h"
fa9d4be3f1f4a792b2f3624c2a08fe9cc6ce6e54Zbigniew Jędrzejewski-Szmek
fa9d4be3f1f4a792b2f3624c2a08fe9cc6ce6e54Zbigniew Jędrzejewski-Szmek#if APR_HAVE_UNISTD_H
fa9d4be3f1f4a792b2f3624c2a08fe9cc6ce6e54Zbigniew Jędrzejewski-Szmek#include <unistd.h>
fa9d4be3f1f4a792b2f3624c2a08fe9cc6ce6e54Zbigniew Jędrzejewski-Szmek#endif
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers#if APR_HAVE_SYS_SOCKET_H
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers#include <sys/socket.h>
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers#endif
fa9d4be3f1f4a792b2f3624c2a08fe9cc6ce6e54Zbigniew Jędrzejewski-Szmek#if APR_HAVE_SYS_WAIT_H
fa9d4be3f1f4a792b2f3624c2a08fe9cc6ce6e54Zbigniew Jędrzejewski-Szmek#include <sys/wait.h>
24a4501d70e60f15c074e9f5bda46a7fbc5735e5Peter Hutterer#endif
fa9d4be3f1f4a792b2f3624c2a08fe9cc6ce6e54Zbigniew Jędrzejewski-Szmek#ifdef HAVE_SYS_PROCESSOR_H
fa9d4be3f1f4a792b2f3624c2a08fe9cc6ce6e54Zbigniew Jędrzejewski-Szmek#include <sys/processor.h> /* for bindprocessor() */
fa9d4be3f1f4a792b2f3624c2a08fe9cc6ce6e54Zbigniew Jędrzejewski-Szmek#endif
fa9d4be3f1f4a792b2f3624c2a08fe9cc6ce6e54Zbigniew Jędrzejewski-Szmek
fa9d4be3f1f4a792b2f3624c2a08fe9cc6ce6e54Zbigniew Jędrzejewski-Szmek#if !APR_HAS_THREADS
fa9d4be3f1f4a792b2f3624c2a08fe9cc6ce6e54Zbigniew Jędrzejewski-Szmek#error The Worker MPM requires APR threads, but they are unavailable.
195c9e37fcb1273b92e2cdacc1d0aa573bf7a92dZbigniew Jędrzejewski-Szmek#endif
195c9e37fcb1273b92e2cdacc1d0aa573bf7a92dZbigniew Jędrzejewski-Szmek
195c9e37fcb1273b92e2cdacc1d0aa573bf7a92dZbigniew Jędrzejewski-Szmek#include "ap_config.h"
195c9e37fcb1273b92e2cdacc1d0aa573bf7a92dZbigniew Jędrzejewski-Szmek#include "httpd.h"
195c9e37fcb1273b92e2cdacc1d0aa573bf7a92dZbigniew Jędrzejewski-Szmek#include "http_main.h"
52bd587fe7ff7ab29a56efe7f5cf5c82de74ca0bChris Atkinson#include "http_log.h"
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers#include "http_config.h" /* for read_config */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#include "http_core.h" /* for get_remote_host */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#include "http_connection.h"
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#include "ap_mpm.h"
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers#include "mpm_common.h"
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt#include "ap_listen.h"
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer#include "scoreboard.h"
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer#include "fdqueue.h"
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer#include "mpm_default.h"
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#include "util_mutex.h"
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#include "unixd.h"
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#include "util_time.h"
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt#include <signal.h>
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#include <limits.h> /* for INT_MAX */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers/* Limit on the total --- clients will be locked out if more servers than
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * this are needed. It is intended solely to keep the server from crashing
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * when things get out of hand.
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers *
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * We keep a hard maximum number of servers, for two reasons --- first off,
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * in case something goes seriously wrong, we want to stop the fork bomb
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * short of actually crashing the machine we're running on by filling some
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * kernel table. Secondly, it keeps the size of the scoreboard file small
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * enough that we can read the whole thing without worrying too much about
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * the overhead.
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#ifndef DEFAULT_SERVER_LIMIT
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#define DEFAULT_SERVER_LIMIT 16
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#endif
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
42a9de1c2513aa348df369080cdd941ef4ab00abMartin Pitt/* Admin can't tune ServerLimit beyond MAX_SERVER_LIMIT. We want
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * some sort of compile-time limit to help catch typos.
84a122a008e06901ce2d1d3941864afce8fd5d15Kay Sievers */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer#ifndef MAX_SERVER_LIMIT
90e633a7901060063e62bf53948c4c239a9f55d1Jason St. John#define MAX_SERVER_LIMIT 20000
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt#endif
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer/* Limit on the threads per process. Clients will be locked out if more than
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt * this * server_limit are needed.
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt *
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt * We keep this for one reason it keeps the size of the scoreboard file small
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt * enough that we can read the whole thing without worrying too much about
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * the overhead.
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer */
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt#ifndef DEFAULT_THREAD_LIMIT
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt#define DEFAULT_THREAD_LIMIT 64
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer#endif
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt/* Admin can't tune ThreadLimit beyond MAX_THREAD_LIMIT. We want
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt * some sort of compile-time limit to help catch typos.
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer#ifndef MAX_THREAD_LIMIT
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt#define MAX_THREAD_LIMIT 20000
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt#endif
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt/*
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt * Actual definitions of config globals
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pitt */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pittstatic int threads_per_child = 0; /* Worker threads per child */
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pittstatic int ap_daemons_to_start = 0;
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pittstatic int min_spare_threads = 0;
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pittstatic int max_spare_threads = 0;
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pittstatic int ap_daemons_limit = 0;
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pittstatic int max_workers = 0;
c5b7838ddeae1fcd5c613ea15f04918b945823e5Martin Pittstatic int server_limit = 0;
51c0c2869845a058268d54c3111d55d0dd485704Peter Huttererstatic int thread_limit = 0;
51c0c2869845a058268d54c3111d55d0dd485704Peter Huttererstatic int had_healthy_child = 0;
51c0c2869845a058268d54c3111d55d0dd485704Peter Huttererstatic int dying = 0;
51c0c2869845a058268d54c3111d55d0dd485704Peter Huttererstatic int workers_may_exit = 0;
51c0c2869845a058268d54c3111d55d0dd485704Peter Huttererstatic int start_thread_may_exit = 0;
51c0c2869845a058268d54c3111d55d0dd485704Peter Huttererstatic int listener_may_exit = 0;
51c0c2869845a058268d54c3111d55d0dd485704Peter Huttererstatic int requests_this_child;
51c0c2869845a058268d54c3111d55d0dd485704Peter Huttererstatic int num_listensocks = 0;
2dd30e7da94b32df03451df8cf602e9454a376cbKay Sieversstatic int resource_shortage = 0;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sieversstatic fd_queue_t *worker_queue;
51c0c2869845a058268d54c3111d55d0dd485704Peter Huttererstatic fd_queue_info_t *worker_queue_info;
1e091c1285b59d0fbab58e6e5113ad111bc08794Martin Pittstatic int mpm_state = AP_MPMQ_STARTING;
1e091c1285b59d0fbab58e6e5113ad111bc08794Martin Pitt
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers/* data retained by worker across load/unload of the module
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * allocated on first call to pre-config hook; located on
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * subsequent calls to pre-config hook
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterertypedef struct worker_retained_data {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers int first_server_limit;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers int first_thread_limit;
aa75494ad5cdf7bede947212ad8c8356d78580faMario Limonciello int module_loads;
aa75494ad5cdf7bede947212ad8c8356d78580faMario Limonciello int sick_child_detected;
aa75494ad5cdf7bede947212ad8c8356d78580faMario Limonciello ap_generation_t my_generation;
aa75494ad5cdf7bede947212ad8c8356d78580faMario Limonciello int volatile is_graceful; /* set from signal handler */
aa75494ad5cdf7bede947212ad8c8356d78580faMario Limonciello int maxclients_reported;
aa75494ad5cdf7bede947212ad8c8356d78580faMario Limonciello int near_maxclients_reported;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers /*
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * The max child slot ever assigned, preserved across restarts. Necessary
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * to deal with MaxRequestWorkers changes across AP_SIG_GRACEFUL restarts.
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * We use this value to optimize routines that have to scan the entire
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * scoreboard.
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers int max_daemons_limit;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /*
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * idle_spawn_rate is the number of children that will be spawned on the
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * next maintenance cycle if there aren't enough idle servers. It is
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * doubled up to MAX_SPAWN_RATE, and reset only when a cycle goes by
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * without the need to spawn.
90e633a7901060063e62bf53948c4c239a9f55d1Jason St. John */
33e74db2667103e33f7e47277378612dcdbdfaa5Martin Pitt int *idle_spawn_rate;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#ifndef MAX_SPAWN_RATE
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#define MAX_SPAWN_RATE (32)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#endif
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers int hold_off_on_exponential_spawning;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer} worker_retained_data;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sieversstatic worker_retained_data *retained;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers#define MPM_CHILD_PID(i) (ap_scoreboard_image->parent[i].pid)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers/* The structure used to pass unique initialization info to each thread */
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sieverstypedef struct {
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer int pid;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers int tid;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers int sd;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers} proc_info;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers/* Structure used to pass information to the thread responsible for
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * creating the rest of the threads.
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterertypedef struct {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_thread_t **threads;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_thread_t *listener;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers int child_num_arg;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_threadattr_t *threadattr;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers} thread_starter;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
fc1ae82cae69d8dbbd9e7a31938810a486fac782Hans de Goede#define ID_FROM_CHILD_THREAD(c, t) ((c * thread_limit) + t)
fc1ae82cae69d8dbbd9e7a31938810a486fac782Hans de Goede
fc1ae82cae69d8dbbd9e7a31938810a486fac782Hans de Goedestatic ap_pod_t **pod;
fc1ae82cae69d8dbbd9e7a31938810a486fac782Hans de Goedestatic ap_pod_t *child_pod;
fc1ae82cae69d8dbbd9e7a31938810a486fac782Hans de Goede
fc1ae82cae69d8dbbd9e7a31938810a486fac782Hans de Goede/* The worker MPM respects a couple of runtime flags that can aid
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * in debugging. Setting the -DNO_DETACH flag will prevent the root process
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * from detaching from its controlling terminal. Additionally, setting
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * the -DONE_PROCESS flag (which implies -DNO_DETACH) will get you the
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * child_main loop running in the process which originally started up.
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * This gives you a pretty nice debugging environment. (You'll get a SIGHUP
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * early in standalone_main; just continue through. This is the server
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * trying to kill off any child processes which it might have lying
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * around --- Apache doesn't keep track of their pids, it just sends
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * SIGHUP to the process group, ignoring it in the root process.
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * Continue through and you'll be fine.).
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sieversstatic int one_process = 0;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#ifdef DEBUG_SIGSTOP
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sieversint raise_sigstop_flags;
42a9de1c2513aa348df369080cdd941ef4ab00abMartin Pitt#endif
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sieversstatic apr_pool_t *pconf; /* Pool for config stuff */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sieversstatic apr_pool_t *pchild; /* Pool for httpd child stuff */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
42a9de1c2513aa348df369080cdd941ef4ab00abMartin Pittstatic pid_t ap_my_pid; /* Linux getpid() doesn't work except in main
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers thread. Use this instead */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sieversstatic pid_t parent_pid;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sieversstatic apr_os_thread_t *listener_os_thread;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers/* Locks for accept serialization */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sieversstatic apr_proc_mutex_t **accept_mutex;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sieversstatic apr_proc_mutex_t *child_mutex;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sieversstatic ap_listen_rec *child_listen;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#ifdef SINGLE_LISTEN_UNSERIALIZED_ACCEPT
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#define SAFE_ACCEPT(stmt) (child_listen->next ? (stmt) : APR_SUCCESS)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#else
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#define SAFE_ACCEPT(stmt) (stmt)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#endif
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer/* The LISTENER_SIGNAL signal will be sent from the main thread to the
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * listener thread to wake it up for graceful termination (what a child
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * process from an old generation does when the admin does "apachectl
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * graceful"). This signal will be blocked in all threads of a child
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * process except for the listener thread.
aba248ee6b1eb10baf3d89eca2ad7569459af6abHans de Goede */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer#define LISTENER_SIGNAL SIGHUP
aba248ee6b1eb10baf3d89eca2ad7569459af6abHans de Goede
aba248ee6b1eb10baf3d89eca2ad7569459af6abHans de Goede/* The WORKER_SIGNAL signal will be sent from the main thread to the
aba248ee6b1eb10baf3d89eca2ad7569459af6abHans de Goede * worker threads during an ungraceful restart or shutdown.
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * This ensures that on systems (i.e., Linux) where closing the worker
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * socket doesn't awake the worker thread when it is polling on the socket
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * (especially in apr_wait_for_io_or_timeout() when handling
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * Keep-Alive connections), close_worker_sockets() and join_workers()
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * still function in timely manner and allow ungraceful shutdowns to
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * proceed to completion. Otherwise join_workers() doesn't return
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * before the main process decides the child process is non-responsive
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * and sends a SIGKILL.
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#define WORKER_SIGNAL AP_SIG_GRACEFUL
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers/* An array of socket descriptors in use by each thread used to
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * perform a non-graceful (forced) shutdown of the server. */
51c0c2869845a058268d54c3111d55d0dd485704Peter Huttererstatic apr_socket_t **worker_sockets;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
0e336347dedc3577214484ec3d97f0f799dd81fdMartin Pittstatic void close_worker_sockets(void)
3f42b51f21171a3166200af3d9966812f1ddd0f0Martin Pitt{
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers int i;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers for (i = 0; i < threads_per_child; i++) {
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer if (worker_sockets[i]) {
42a9de1c2513aa348df369080cdd941ef4ab00abMartin Pitt apr_socket_close(worker_sockets[i]);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers worker_sockets[i] = NULL;
24119cf10c7ed58a8fc0851745149dcc6dd5757fStefan Brüns }
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer }
24119cf10c7ed58a8fc0851745149dcc6dd5757fStefan Brüns}
4f70555d76c90ffdc5a5e4f75bbc08b38022c911Zbigniew Jędrzejewski-Szmek
24119cf10c7ed58a8fc0851745149dcc6dd5757fStefan Brünsstatic void wakeup_listener(void)
1f6d36f267186c0e3184bab4c7eca48481c6faabHui Wang{
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer listener_may_exit = 1;
6e675e278c04bd5662914888a2b3cf856d743659Chen-Han Hsiao (Stanley) if (!listener_os_thread) {
6e675e278c04bd5662914888a2b3cf856d743659Chen-Han Hsiao (Stanley) /* XXX there is an obscure path that this doesn't handle perfectly:
d946bb53f94713241004810de92cc37f1e19c2d2Martin Pitt * right after listener thread is created but before
1f6d36f267186c0e3184bab4c7eca48481c6faabHui Wang * listener_os_thread is set, the first worker thread hits an
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * error and starts graceful termination
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers return;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* unblock the listener if it's waiting for a worker */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_queue_info_term(worker_queue_info);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /*
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * we should just be able to "kill(ap_my_pid, LISTENER_SIGNAL)" on all
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * platforms and wake up the listener thread since it is the only thread
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * with SIGHUP unblocked, but that doesn't work on Linux
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#ifdef HAVE_PTHREAD_KILL
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers pthread_kill(*listener_os_thread, LISTENER_SIGNAL);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#else
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers kill(ap_my_pid, LISTENER_SIGNAL);
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer#endif
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers}
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#define ST_INIT 0
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer#define ST_GRACEFUL 1
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#define ST_UNGRACEFUL 2
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sieversstatic int terminate_mode = ST_INIT;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sieversstatic void signal_threads(int mode)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers{
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (terminate_mode == mode) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers return;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer terminate_mode = mode;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers mpm_state = AP_MPMQ_STOPPING;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* in case we weren't called from the listener thread, wake up the
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * listener thread
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer wakeup_listener();
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* for ungraceful termination, let the workers exit now;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * for graceful termination, the listener thread will notify the
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * workers to exit once it has stopped accepting new connections
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (mode == ST_UNGRACEFUL) {
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer workers_may_exit = 1;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_queue_interrupt_all(worker_queue);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers close_worker_sockets(); /* forcefully kill all current connections */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers}
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sieversstatic int worker_query(int query_code, int *result, apr_status_t *rv)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers{
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers *rv = APR_SUCCESS;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers switch (query_code) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers case AP_MPMQ_MAX_DAEMON_USED:
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers *result = retained->max_daemons_limit;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers break;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers case AP_MPMQ_IS_THREADED:
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer *result = AP_MPMQ_STATIC;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers break;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers case AP_MPMQ_IS_FORKED:
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers *result = AP_MPMQ_DYNAMIC;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers break;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers case AP_MPMQ_HARD_LIMIT_DAEMONS:
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers *result = server_limit;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer break;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers case AP_MPMQ_HARD_LIMIT_THREADS:
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers *result = thread_limit;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers break;
0787758d26337ec897d9553fe962678fbf0a0962Zbigniew Jędrzejewski-Szmek case AP_MPMQ_MAX_THREADS:
0787758d26337ec897d9553fe962678fbf0a0962Zbigniew Jędrzejewski-Szmek *result = threads_per_child;
0787758d26337ec897d9553fe962678fbf0a0962Zbigniew Jędrzejewski-Szmek break;
0787758d26337ec897d9553fe962678fbf0a0962Zbigniew Jędrzejewski-Szmek case AP_MPMQ_MIN_SPARE_DAEMONS:
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer *result = 0;
0787758d26337ec897d9553fe962678fbf0a0962Zbigniew Jędrzejewski-Szmek break;
0787758d26337ec897d9553fe962678fbf0a0962Zbigniew Jędrzejewski-Szmek case AP_MPMQ_MIN_SPARE_THREADS:
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers *result = min_spare_threads;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers break;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers case AP_MPMQ_MAX_SPARE_DAEMONS:
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers *result = 0;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers break;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer case AP_MPMQ_MAX_SPARE_THREADS:
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers *result = max_spare_threads;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers break;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers case AP_MPMQ_MAX_REQUESTS_DAEMON:
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers *result = ap_max_requests_per_child;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers break;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers case AP_MPMQ_MAX_DAEMONS:
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers *result = ap_daemons_limit;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers break;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers case AP_MPMQ_MPM_STATE:
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers *result = mpm_state;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers break;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers case AP_MPMQ_GENERATION:
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers *result = retained->my_generation;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers break;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers default:
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers *rv = APR_ENOTIMPL;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers break;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers return OK;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer}
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sieversstatic void worker_note_child_killed(int childnum, pid_t pid, ap_generation_t gen)
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers{
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers if (childnum != -1) { /* child had a scoreboard slot? */
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_run_child_status(ap_server_conf,
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_scoreboard_image->parent[childnum].pid,
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_scoreboard_image->parent[childnum].generation,
42a9de1c2513aa348df369080cdd941ef4ab00abMartin Pitt childnum, MPM_CHILD_EXITED);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_scoreboard_image->parent[childnum].pid = 0;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers else {
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_run_child_status(ap_server_conf, pid, gen, -1, MPM_CHILD_EXITED);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers}
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sieversstatic void worker_note_child_started(int slot, pid_t pid)
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers{
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_scoreboard_image->parent[slot].pid = pid;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_run_child_status(ap_server_conf,
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_scoreboard_image->parent[slot].pid,
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers retained->my_generation, slot, MPM_CHILD_STARTED);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers}
e7627e14dc883ab0ad73c931e4ff0caa1cad6860Zbigniew Jędrzejewski-Szmek
51c0c2869845a058268d54c3111d55d0dd485704Peter Huttererstatic void worker_note_child_lost_slot(int slot, pid_t newpid)
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers{
e7627e14dc883ab0ad73c931e4ff0caa1cad6860Zbigniew Jędrzejewski-Szmek ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00263)
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers "pid %" APR_PID_T_FMT " taking over scoreboard slot from "
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers "%" APR_PID_T_FMT "%s",
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers newpid,
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer ap_scoreboard_image->parent[slot].pid,
e7627e14dc883ab0ad73c931e4ff0caa1cad6860Zbigniew Jędrzejewski-Szmek ap_scoreboard_image->parent[slot].quiescing ?
e7627e14dc883ab0ad73c931e4ff0caa1cad6860Zbigniew Jędrzejewski-Szmek " (quiescing)" : "");
e7627e14dc883ab0ad73c931e4ff0caa1cad6860Zbigniew Jędrzejewski-Szmek ap_run_child_status(ap_server_conf,
efb4bf4e419e14a13eead6289ea40165579a816fMartin Pitt ap_scoreboard_image->parent[slot].pid,
e7627e14dc883ab0ad73c931e4ff0caa1cad6860Zbigniew Jędrzejewski-Szmek ap_scoreboard_image->parent[slot].generation,
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers slot, MPM_CHILD_LOST_SLOT);
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer /* Don't forget about this exiting child process, or we
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * won't be able to kill it if it doesn't exit by the
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * time the server is shut down.
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers */
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_register_extra_mpm_process(ap_scoreboard_image->parent[slot].pid,
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_scoreboard_image->parent[slot].generation);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers}
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sieversstatic const char *worker_get_name(void)
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers{
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers return "worker";
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers}
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer/* a clean exit from a child with proper cleanup */
51c0c2869845a058268d54c3111d55d0dd485704Peter Huttererstatic void clean_child_exit(int code) __attribute__ ((noreturn));
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sieversstatic void clean_child_exit(int code)
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers{
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers mpm_state = AP_MPMQ_STOPPING;
4a792f4676e5f8fa86aff8aac09c4b6391dee313Christoph Junghans if (pchild) {
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer apr_pool_destroy(pchild);
4a792f4676e5f8fa86aff8aac09c4b6391dee313Christoph Junghans }
4a792f4676e5f8fa86aff8aac09c4b6391dee313Christoph Junghans
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers if (one_process) {
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer worker_note_child_killed(/* slot */ 0, 0, 0);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers exit(code);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers}
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
51c0c2869845a058268d54c3111d55d0dd485704Peter Huttererstatic void just_die(int sig)
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers{
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers clean_child_exit(0);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers}
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers/*****************************************************************
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * Connection structures and accounting...
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers */
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sieversstatic int child_fatal;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers/* volatile because they're updated from a signal handler */
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sieversstatic int volatile shutdown_pending;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sieversstatic int volatile restart_pending;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers/*
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * ap_start_shutdown() and ap_start_restart(), below, are a first stab at
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * functions to initiate shutdown or restart without relying on signals.
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * Previously this was initiated in sig_term() and restart() signal handlers,
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * but we want to be able to start a shutdown/restart from other sources --
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * e.g. on Win32, from the service manager. Now the service manager can
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * call ap_start_shutdown() or ap_start_restart() as appropriate. Note that
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * these functions can also be called by the child processes, since global
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * variables are no longer used to pass on the required action to the parent.
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers *
82cd413782cca6de3088c2705f839ff31abec7f9Martin Pitt * These should only be called from the parent process itself, since the
82cd413782cca6de3088c2705f839ff31abec7f9Martin Pitt * parent process will use the shutdown_pending and restart_pending variables
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * to determine whether to shutdown or restart. The child process should
82cd413782cca6de3088c2705f839ff31abec7f9Martin Pitt * call signal_parent() directly to tell the parent to die -- this will
82cd413782cca6de3088c2705f839ff31abec7f9Martin Pitt * cause neither of those variable to be set, which the parent will
82cd413782cca6de3088c2705f839ff31abec7f9Martin Pitt * assume means something serious is wrong (which it will be, for the
82cd413782cca6de3088c2705f839ff31abec7f9Martin Pitt * child to force an exit) and so do an exit anyway.
82cd413782cca6de3088c2705f839ff31abec7f9Martin Pitt */
82cd413782cca6de3088c2705f839ff31abec7f9Martin Pitt
82cd413782cca6de3088c2705f839ff31abec7f9Martin Pittstatic void ap_start_shutdown(int graceful)
82cd413782cca6de3088c2705f839ff31abec7f9Martin Pitt{
82cd413782cca6de3088c2705f839ff31abec7f9Martin Pitt mpm_state = AP_MPMQ_STOPPING;
82cd413782cca6de3088c2705f839ff31abec7f9Martin Pitt if (shutdown_pending == 1) {
82cd413782cca6de3088c2705f839ff31abec7f9Martin Pitt /* Um, is this _probably_ not an error, if the user has
82cd413782cca6de3088c2705f839ff31abec7f9Martin Pitt * tried to do a shutdown twice quickly, so we won't
82cd413782cca6de3088c2705f839ff31abec7f9Martin Pitt * worry about reporting it.
4b9bb683eecda5a70594075297ef89af91cde8feHui Wang */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer return;
4b9bb683eecda5a70594075297ef89af91cde8feHui Wang }
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer shutdown_pending = 1;
4b9bb683eecda5a70594075297ef89af91cde8feHui Wang retained->is_graceful = graceful;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer}
5ad6091884425c11abd5920dae77b0c283b54123Zbigniew Jędrzejewski-Szmek
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer/* do a graceful restart if graceful == 1 */
4b9bb683eecda5a70594075297ef89af91cde8feHui Wangstatic void ap_start_restart(int graceful)
d0361a866b671111358e0ac584624d81f8d4a5b0Mantas MikulÄ—nas{
bbea73316f12896364e8e8eef82259280063eda0Martin Pitt mpm_state = AP_MPMQ_STOPPING;
bbea73316f12896364e8e8eef82259280063eda0Martin Pitt if (restart_pending == 1) {
bbea73316f12896364e8e8eef82259280063eda0Martin Pitt /* Probably not an error - don't bother reporting it */
4b9bb683eecda5a70594075297ef89af91cde8feHui Wang return;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers restart_pending = 1;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers retained->is_graceful = graceful;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers}
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
51c0c2869845a058268d54c3111d55d0dd485704Peter Huttererstatic void sig_term(int sig)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers{
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_start_shutdown(sig == AP_SIG_GRACEFUL_STOP);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers}
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sieversstatic void restart(int sig)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers{
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_start_restart(sig == AP_SIG_GRACEFUL);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers}
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sieversstatic void set_signals(void)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers{
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#ifndef NO_USE_SIGACTION
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers struct sigaction sa;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#endif
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (!one_process) {
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_fatal_signal_setup(ap_server_conf, pconf);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#ifndef NO_USE_SIGACTION
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers sigemptyset(&sa.sa_mask);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers sa.sa_flags = 0;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers sa.sa_handler = sig_term;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (sigaction(SIGTERM, &sa, NULL) < 0)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00264)
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers "sigaction(SIGTERM)");
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers#ifdef AP_SIG_GRACEFUL_STOP
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (sigaction(AP_SIG_GRACEFUL_STOP, &sa, NULL) < 0)
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00265)
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers "sigaction(" AP_SIG_GRACEFUL_STOP_STRING ")");
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#endif
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer#ifdef SIGINT
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (sigaction(SIGINT, &sa, NULL) < 0)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00266)
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers "sigaction(SIGINT)");
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers#endif
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#ifdef SIGXCPU
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers sa.sa_handler = SIG_DFL;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers if (sigaction(SIGXCPU, &sa, NULL) < 0)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00267)
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer "sigaction(SIGXCPU)");
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#endif
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#ifdef SIGXFSZ
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* For systems following the LFS standard, ignoring SIGXFSZ allows
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * a write() beyond the 2GB limit to fail gracefully with E2BIG
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * rather than terminate the process. */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers sa.sa_handler = SIG_IGN;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (sigaction(SIGXFSZ, &sa, NULL) < 0)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00268)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers "sigaction(SIGXFSZ)");
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#endif
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#ifdef SIGPIPE
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers sa.sa_handler = SIG_IGN;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (sigaction(SIGPIPE, &sa, NULL) < 0)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00269)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers "sigaction(SIGPIPE)");
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#endif
d946bb53f94713241004810de92cc37f1e19c2d2Martin Pitt
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers /* we want to ignore HUPs and AP_SIG_GRACEFUL while we're busy
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * processing one */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer sigaddset(&sa.sa_mask, SIGHUP);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers sigaddset(&sa.sa_mask, AP_SIG_GRACEFUL);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers sa.sa_handler = restart;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (sigaction(SIGHUP, &sa, NULL) < 0)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00270)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers "sigaction(SIGHUP)");
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (sigaction(AP_SIG_GRACEFUL, &sa, NULL) < 0)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_log_error(APLOG_MARK, APLOG_WARNING, errno, ap_server_conf, APLOGNO(00271)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers "sigaction(" AP_SIG_GRACEFUL_STRING ")");
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#else
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (!one_process) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#ifdef SIGXCPU
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_signal(SIGXCPU, SIG_DFL);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers#endif /* SIGXCPU */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#ifdef SIGXFSZ
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer apr_signal(SIGXFSZ, SIG_IGN);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#endif /* SIGXFSZ */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_signal(SIGTERM, sig_term);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#ifdef SIGHUP
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers apr_signal(SIGHUP, restart);
ef686ae230c55124e3efdc7d756fb1931e10aef4Marc Schmitzer#endif /* SIGHUP */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer#ifdef AP_SIG_GRACEFUL
ef686ae230c55124e3efdc7d756fb1931e10aef4Marc Schmitzer apr_signal(AP_SIG_GRACEFUL, restart);
ef686ae230c55124e3efdc7d756fb1931e10aef4Marc Schmitzer#endif /* AP_SIG_GRACEFUL */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#ifdef AP_SIG_GRACEFUL_STOP
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer apr_signal(AP_SIG_GRACEFUL_STOP, sig_term);
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer#endif /* AP_SIG_GRACEFUL_STOP */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#ifdef SIGPIPE
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_signal(SIGPIPE, SIG_IGN);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#endif /* SIGPIPE */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#endif
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers}
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers/*****************************************************************
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * Here follows a long bunch of generic server bookkeeping stuff...
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers/*****************************************************************
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * Child process main loop.
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sieversstatic void process_socket(apr_thread_t *thd, apr_pool_t *p, apr_socket_t *sock,
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers int my_child_num,
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers int my_thread_num, apr_bucket_alloc_t *bucket_alloc)
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer{
b4c72e52d2109fe7f0ac3440c81ae3e1ce64a143Bastien Nocera conn_rec *current_conn;
5d7afd82a159f8a781594f5538b7af35dbb9ceccBastien Nocera long conn_id = ID_FROM_CHILD_THREAD(my_child_num, my_thread_num);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_sb_handle_t *sbh;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_create_sb_handle(&sbh, p, my_child_num, my_thread_num);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers current_conn = ap_run_create_connection(p, ap_server_conf, sock,
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers conn_id, sbh, bucket_alloc);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers if (current_conn) {
71ed2d38711e345f22e2200bc7bb156aed98972aBastien Nocera current_conn->current_thread = thd;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer ap_process_connection(current_conn, sock);
71ed2d38711e345f22e2200bc7bb156aed98972aBastien Nocera ap_lingering_close(current_conn);
5d7afd82a159f8a781594f5538b7af35dbb9ceccBastien Nocera }
71ed2d38711e345f22e2200bc7bb156aed98972aBastien Nocera}
71ed2d38711e345f22e2200bc7bb156aed98972aBastien Nocera
71ed2d38711e345f22e2200bc7bb156aed98972aBastien Nocera/* requests_this_child has gone to zero or below. See if the admin coded
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers "MaxConnectionsPerChild 0", and keep going in that case. Doing it this way
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer simplifies the hot path in worker_thread */
df7daa9a689277126b9eb36508abcc2510fa5594Martin Pittstatic void check_infinite_requests(void)
df7daa9a689277126b9eb36508abcc2510fa5594Martin Pitt{
df7daa9a689277126b9eb36508abcc2510fa5594Martin Pitt if (ap_max_requests_per_child) {
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer signal_threads(ST_GRACEFUL);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers else {
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers requests_this_child = INT_MAX; /* keep going */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers}
d946bb53f94713241004810de92cc37f1e19c2d2Martin Pitt
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sieversstatic void unblock_signal(int sig)
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer{
a7792fecc36fa3e9dae4a5a00aaacdf1da9a661bMartin Pitt sigset_t sig_mask;
a7792fecc36fa3e9dae4a5a00aaacdf1da9a661bMartin Pitt
a7792fecc36fa3e9dae4a5a00aaacdf1da9a661bMartin Pitt sigemptyset(&sig_mask);
a7792fecc36fa3e9dae4a5a00aaacdf1da9a661bMartin Pitt sigaddset(&sig_mask, sig);
df7daa9a689277126b9eb36508abcc2510fa5594Martin Pitt#if defined(SIGPROCMASK_SETS_THREAD_MASK)
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer sigprocmask(SIG_UNBLOCK, &sig_mask, NULL);
df7daa9a689277126b9eb36508abcc2510fa5594Martin Pitt#else
df7daa9a689277126b9eb36508abcc2510fa5594Martin Pitt pthread_sigmask(SIG_UNBLOCK, &sig_mask, NULL);
d258d4967eb24122c2b1014d4e873f61b633f1d2Martin Pitt#endif
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer}
d258d4967eb24122c2b1014d4e873f61b633f1d2Martin Pitt
d258d4967eb24122c2b1014d4e873f61b633f1d2Martin Pittstatic void dummy_signal_handler(int sig)
d258d4967eb24122c2b1014d4e873f61b633f1d2Martin Pitt{
d258d4967eb24122c2b1014d4e873f61b633f1d2Martin Pitt /* XXX If specifying SIG_IGN is guaranteed to unblock a syscall,
d258d4967eb24122c2b1014d4e873f61b633f1d2Martin Pitt * then we don't need this goofy function.
d258d4967eb24122c2b1014d4e873f61b633f1d2Martin Pitt */
d258d4967eb24122c2b1014d4e873f61b633f1d2Martin Pitt}
d258d4967eb24122c2b1014d4e873f61b633f1d2Martin Pitt
d258d4967eb24122c2b1014d4e873f61b633f1d2Martin Pittstatic void accept_mutex_error(const char *func, apr_status_t rv, int process_slot)
d258d4967eb24122c2b1014d4e873f61b633f1d2Martin Pitt{
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers int level = APLOG_EMERG;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (ap_scoreboard_image->parent[process_slot].generation !=
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_scoreboard_image->global->running_generation) {
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt level = APLOG_DEBUG; /* common to get these at restart time */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer }
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers else if (requests_this_child == INT_MAX
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers || ((requests_this_child == ap_max_requests_per_child)
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers && ap_max_requests_per_child)) {
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers ap_log_error(APLOG_MARK, level, rv, ap_server_conf, APLOGNO(00272)
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers "apr_proc_mutex_%s failed "
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers "before this child process served any requests.",
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt func);
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt clean_child_exit(APEXIT_CHILDSICK);
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer }
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt ap_log_error(APLOG_MARK, level, rv, ap_server_conf, APLOGNO(00273)
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt "apr_proc_mutex_%s failed. Attempting to "
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt "shutdown process gracefully.", func);
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt signal_threads(ST_GRACEFUL);
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer}
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pittstatic void * APR_THREAD_FUNC listener_thread(apr_thread_t *thd, void * dummy)
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt{
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers proc_info * ti = dummy;
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt int process_slot = ti->pid;
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt apr_pool_t *tpool = apr_thread_pool_get(thd);
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt void *csd = NULL;
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt apr_pool_t *ptrans = NULL; /* Pool for per-transaction stuff */
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt apr_pollset_t *pollset;
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers apr_status_t rv;
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers ap_listen_rec *lr;
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers int have_idle_worker = 0;
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers int last_poll_idx = 0;
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers free(ti);
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt rv = apr_pollset_create(&pollset, num_listensocks, tpool, 0);
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt if (rv != APR_SUCCESS) {
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
bf89b99c5a39115112c2eda4c2103e2db54988d2Martin Pitt "Couldn't create pollset in thread;"
bf89b99c5a39115112c2eda4c2103e2db54988d2Martin Pitt " check system or user limits");
bf89b99c5a39115112c2eda4c2103e2db54988d2Martin Pitt /* let the parent decide how bad this really is */
bf89b99c5a39115112c2eda4c2103e2db54988d2Martin Pitt clean_child_exit(APEXIT_CHILDSICK);
bf89b99c5a39115112c2eda4c2103e2db54988d2Martin Pitt }
bf89b99c5a39115112c2eda4c2103e2db54988d2Martin Pitt
bf89b99c5a39115112c2eda4c2103e2db54988d2Martin Pitt for (lr = child_listen; lr != NULL; lr = lr->next) {
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers apr_pollfd_t pfd = { 0 };
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers pfd.desc_type = APR_POLL_SOCKET;
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers pfd.desc.s = lr->sd;
ce39bb6909578017aa10031638e724e038f0b859Kay Sievers pfd.reqevents = APR_POLLIN;
bf89b99c5a39115112c2eda4c2103e2db54988d2Martin Pitt pfd.client_data = lr;
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt
1b6bce89b3383904d0dab619dd38bff673f7286eMartin Pitt rv = apr_pollset_add(pollset, &pfd);
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer if (rv != APR_SUCCESS) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers "Couldn't create add listener to pollset;"
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers " check system or user limits");
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* let the parent decide how bad this really is */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers clean_child_exit(APEXIT_CHILDSICK);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers lr->accept_func = ap_unixd_accept;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* Unblock the signal used to wake this thread up, and set a handler for
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * it.
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers */
19c98efe17155734698c12482cd40834a89f0e48Lennart Poettering unblock_signal(LISTENER_SIGNAL);
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer apr_signal(LISTENER_SIGNAL, dummy_signal_handler);
19c98efe17155734698c12482cd40834a89f0e48Lennart Poettering
19c98efe17155734698c12482cd40834a89f0e48Lennart Poettering /* TODO: Switch to a system where threads reuse the results from earlier
19c98efe17155734698c12482cd40834a89f0e48Lennart Poettering poll calls - manoj */
19c98efe17155734698c12482cd40834a89f0e48Lennart Poettering while (1) {
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt /* TODO: requests_this_child should be synchronized - aaron */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer if (requests_this_child <= 0) {
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt check_infinite_requests();
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt }
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt if (listener_may_exit) break;
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt if (!have_idle_worker) {
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt /* the following pops a recycled ptrans pool off a stack
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt * if there is one, in addition to reserving a worker thread
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt */
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt rv = ap_queue_info_wait_for_idler(worker_queue_info,
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt &ptrans);
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt if (APR_STATUS_IS_EOF(rv)) {
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt break; /* we've been signaled to die now */
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt }
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt else if (rv != APR_SUCCESS) {
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt "apr_queue_info_wait failed. Attempting to "
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt " shutdown process gracefully.");
792d616391159f4fa992341bf264c9407e480c6dMartin Pitt signal_threads(ST_GRACEFUL);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers break;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers have_idle_worker = 1;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer /* We've already decremented the idle worker count inside
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * ap_queue_info_wait_for_idler. */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if ((rv = SAFE_ACCEPT(apr_proc_mutex_lock(child_mutex)))
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers != APR_SUCCESS) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (!listener_may_exit) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers accept_mutex_error("lock", rv, process_slot);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers break; /* skip the lock release */
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (!child_listen->next) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* Only one listener, so skip the poll */
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers lr = child_listen;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer else {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers while (!listener_may_exit) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_int32_t numdesc;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers const apr_pollfd_t *pdesc;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer rv = apr_pollset_poll(pollset, -1, &numdesc, &pdesc);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (rv != APR_SUCCESS) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (APR_STATUS_IS_EINTR(rv)) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers continue;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* apr_pollset_poll() will only return errors in catastrophic
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * circumstances. Let's try exiting gracefully, for now. */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers "apr_pollset_poll: (listen)");
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers signal_threads(ST_GRACEFUL);
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (listener_may_exit) break;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers /* We can always use pdesc[0], but sockets at position N
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * could end up completely starved of attention in a very
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * busy server. Therefore, we round-robin across the
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * returned set of descriptors. While it is possible that
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * the returned set of descriptors might flip around and
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * continue to starve some sockets, we happen to know the
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * internal pollset implementation retains ordering
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * stability of the sockets. Thus, the round-robin should
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * ensure that a socket will eventually be serviced.
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers */
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers if (last_poll_idx >= numdesc)
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers last_poll_idx = 0;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers /* Grab a listener record from the client_data of the poll
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * descriptor, and advance our saved index to round-robin
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * the next fetch.
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers *
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * ### hmm... this descriptor might have POLLERR rather
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * ### than POLLIN
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers */
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers lr = pdesc[last_poll_idx++].client_data;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer break;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer } /* while */
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers } /* if/else */
288026bda90245ae6523441ce308d58ad1caefc8Maxim Mikityanskiy
288026bda90245ae6523441ce308d58ad1caefc8Maxim Mikityanskiy if (!listener_may_exit) {
288026bda90245ae6523441ce308d58ad1caefc8Maxim Mikityanskiy if (ptrans == NULL) {
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer /* we can't use a recycled transaction pool this time.
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * create a new transaction pool */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer apr_allocator_t *allocator;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_allocator_create(&allocator);
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers apr_allocator_max_free_set(allocator, ap_max_mem_free);
288026bda90245ae6523441ce308d58ad1caefc8Maxim Mikityanskiy apr_pool_create_ex(&ptrans, pconf, NULL, allocator);
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer apr_allocator_owner_set(allocator, ptrans);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers apr_pool_tag(ptrans, "transaction");
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers rv = lr->accept_func(&csd, lr, ptrans);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* later we trash rv and rely on csd to indicate success/failure */
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers AP_DEBUG_ASSERT(rv == APR_SUCCESS || !csd);
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer if (rv == APR_EGENERAL) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* E[NM]FILE, ENOMEM, etc */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers resource_shortage = 1;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers signal_threads(ST_GRACEFUL);
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers }
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers if ((rv = SAFE_ACCEPT(apr_proc_mutex_unlock(child_mutex)))
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers != APR_SUCCESS) {
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers if (listener_may_exit) {
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer break;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers accept_mutex_error("unlock", rv, process_slot);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers if (csd != NULL) {
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers rv = ap_queue_push(worker_queue, csd, ptrans);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers if (rv) {
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers /* trash the connection; we couldn't queue the connected
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * socket to a worker
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers */
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers apr_socket_close(csd);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers "ap_queue_push failed");
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers else {
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers have_idle_worker = 0;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers else {
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers if ((rv = SAFE_ACCEPT(apr_proc_mutex_unlock(child_mutex)))
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers != APR_SUCCESS) {
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers int level = APLOG_EMERG;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers if (ap_scoreboard_image->parent[process_slot].generation !=
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_scoreboard_image->global->running_generation) {
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers level = APLOG_DEBUG; /* common to get these at restart time */
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
d946bb53f94713241004810de92cc37f1e19c2d2Martin Pitt ap_log_error(APLOG_MARK, level, rv, ap_server_conf, APLOGNO(00274)
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers "apr_proc_mutex_unlock failed. Attempting to "
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers "shutdown process gracefully.");
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers signal_threads(ST_GRACEFUL);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers break;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_close_listeners();
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_queue_term(worker_queue);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers dying = 1;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_scoreboard_image->parent[process_slot].quiescing = 1;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers /* wake up the main thread */
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers kill(ap_my_pid, SIGTERM);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers apr_thread_exit(thd, APR_SUCCESS);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers return NULL;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers}
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers/* XXX For ungraceful termination/restart, we definitely don't want to
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * wait for active connections to finish but we may want to wait
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * for idle workers to get out of the queue code and release mutexes,
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * since those mutexes are cleaned up pretty soon and some systems
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * may not react favorably (i.e., segfault) if operations are attempted
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers * on cleaned-up mutexes.
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers */
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sieversstatic void * APR_THREAD_FUNC worker_thread(apr_thread_t *thd, void * dummy)
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers{
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers proc_info * ti = dummy;
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers int process_slot = ti->pid;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer int thread_slot = ti->tid;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers apr_socket_t *csd = NULL;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers apr_bucket_alloc_t *bucket_alloc;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers apr_pool_t *last_ptrans = NULL;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers apr_pool_t *ptrans; /* Pool for per-transaction stuff */
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers apr_status_t rv;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers int is_idle = 0;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers free(ti);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_scoreboard_image->servers[process_slot][thread_slot].pid = ap_my_pid;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_scoreboard_image->servers[process_slot][thread_slot].tid = apr_os_thread_current();
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_scoreboard_image->servers[process_slot][thread_slot].generation = retained->my_generation;
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_update_child_status_from_indexes(process_slot, thread_slot, SERVER_STARTING, NULL);
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers#ifdef HAVE_PTHREAD_KILL
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers unblock_signal(WORKER_SIGNAL);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_signal(WORKER_SIGNAL, dummy_signal_handler);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#endif
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers while (!workers_may_exit) {
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer if (!is_idle) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers rv = ap_queue_info_set_idle(worker_queue_info, last_ptrans);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers last_ptrans = NULL;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (rv != APR_SUCCESS) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers "ap_queue_info_set_idle failed. Attempting to "
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers "shutdown process gracefully.");
9e3dbf6b2b99d0e16989d9cedb458729db5a60c3Zbigniew Jędrzejewski-Szmek signal_threads(ST_GRACEFUL);
9e3dbf6b2b99d0e16989d9cedb458729db5a60c3Zbigniew Jędrzejewski-Szmek break;
9e3dbf6b2b99d0e16989d9cedb458729db5a60c3Zbigniew Jędrzejewski-Szmek }
9e3dbf6b2b99d0e16989d9cedb458729db5a60c3Zbigniew Jędrzejewski-Szmek is_idle = 1;
9e3dbf6b2b99d0e16989d9cedb458729db5a60c3Zbigniew Jędrzejewski-Szmek }
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
9e3dbf6b2b99d0e16989d9cedb458729db5a60c3Zbigniew Jędrzejewski-Szmek ap_update_child_status_from_indexes(process_slot, thread_slot, SERVER_READY, NULL);
9e3dbf6b2b99d0e16989d9cedb458729db5a60c3Zbigniew Jędrzejewski-Szmekworker_pop:
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers if (workers_may_exit) {
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers break;
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers }
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers rv = ap_queue_pop(worker_queue, &csd, &ptrans);
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers if (rv != APR_SUCCESS) {
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers /* We get APR_EOF during a graceful shutdown once all the connections
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers * accepted by this server process have been handled.
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers */
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers if (APR_STATUS_IS_EOF(rv)) {
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers break;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* We get APR_EINTR whenever ap_queue_pop() has been interrupted
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * from an explicit call to ap_queue_interrupt_all(). This allows
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * us to unblock threads stuck in ap_queue_pop() when a shutdown
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * is pending.
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers *
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * If workers_may_exit is set and this is ungraceful termination/
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * restart, we are bound to get an error on some systems (e.g.,
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * AIX, which sanity-checks mutex operations) since the queue
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * may have already been cleaned up. Don't log the "error" if
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * workers_may_exit is set.
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers else if (APR_STATUS_IS_EINTR(rv)) {
6e1452d6f015ebca6801e497d12bc6c7c114386dMartin Pitt goto worker_pop;
6e1452d6f015ebca6801e497d12bc6c7c114386dMartin Pitt }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* We got some other error. */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers else if (!workers_may_exit) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf,
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers "ap_queue_pop failed");
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer }
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer continue;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers is_idle = 0;
93a279e34a8d2c38b1c6bcf750548e730f5309d8Herczeg Zsolt worker_sockets[thread_slot] = csd;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer bucket_alloc = apr_bucket_alloc_create(ptrans);
6e1452d6f015ebca6801e497d12bc6c7c114386dMartin Pitt process_socket(thd, ptrans, csd, process_slot, thread_slot, bucket_alloc);
6e1452d6f015ebca6801e497d12bc6c7c114386dMartin Pitt worker_sockets[thread_slot] = NULL;
6e1452d6f015ebca6801e497d12bc6c7c114386dMartin Pitt requests_this_child--;
6e1452d6f015ebca6801e497d12bc6c7c114386dMartin Pitt apr_pool_clear(ptrans);
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer last_ptrans = ptrans;
6e1452d6f015ebca6801e497d12bc6c7c114386dMartin Pitt }
6e1452d6f015ebca6801e497d12bc6c7c114386dMartin Pitt
6e1452d6f015ebca6801e497d12bc6c7c114386dMartin Pitt ap_update_child_status_from_indexes(process_slot, thread_slot,
6e1452d6f015ebca6801e497d12bc6c7c114386dMartin Pitt (dying) ? SERVER_DEAD : SERVER_GRACEFUL, (request_rec *) NULL);
77e8ff6974ffaa6c9954e3d2264d9aa75ef25233Kieran Clancy
ff48c774236967273732a7ee154b4b8e834b4409dslul apr_thread_exit(thd, APR_SUCCESS);
ff48c774236967273732a7ee154b4b8e834b4409dslul return NULL;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer}
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
51c0c2869845a058268d54c3111d55d0dd485704Peter Huttererstatic int check_signal(int signum)
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer{
93a279e34a8d2c38b1c6bcf750548e730f5309d8Herczeg Zsolt switch (signum) {
93a279e34a8d2c38b1c6bcf750548e730f5309d8Herczeg Zsolt case SIGTERM:
93a279e34a8d2c38b1c6bcf750548e730f5309d8Herczeg Zsolt case SIGINT:
93a279e34a8d2c38b1c6bcf750548e730f5309d8Herczeg Zsolt return 1;
93a279e34a8d2c38b1c6bcf750548e730f5309d8Herczeg Zsolt }
93a279e34a8d2c38b1c6bcf750548e730f5309d8Herczeg Zsolt return 0;
93a279e34a8d2c38b1c6bcf750548e730f5309d8Herczeg Zsolt}
93a279e34a8d2c38b1c6bcf750548e730f5309d8Herczeg Zsolt
51c0c2869845a058268d54c3111d55d0dd485704Peter Huttererstatic void create_listener_thread(thread_starter *ts)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers{
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers int my_child_num = ts->child_num_arg;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_threadattr_t *thread_attr = ts->threadattr;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers proc_info *my_info;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_status_t rv;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer my_info = (proc_info *)ap_malloc(sizeof(proc_info));
dfa2ea215df5f1e78084de862a54c0f1f321a13aRaudi my_info->pid = my_child_num;
dfa2ea215df5f1e78084de862a54c0f1f321a13aRaudi my_info->tid = -1; /* listener thread doesn't have a thread slot */
dfa2ea215df5f1e78084de862a54c0f1f321a13aRaudi my_info->sd = 0;
dfa2ea215df5f1e78084de862a54c0f1f321a13aRaudi rv = apr_thread_create(&ts->listener, thread_attr, listener_thread,
dfa2ea215df5f1e78084de862a54c0f1f321a13aRaudi my_info, pchild);
dfa2ea215df5f1e78084de862a54c0f1f321a13aRaudi if (rv != APR_SUCCESS) {
15f392394e75ffb7f318920008fd1bbe4e82b488Scott Thrasher ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(00275)
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer "apr_thread_create: unable to create listener thread");
15f392394e75ffb7f318920008fd1bbe4e82b488Scott Thrasher /* let the parent decide how bad this really is */
15f392394e75ffb7f318920008fd1bbe4e82b488Scott Thrasher clean_child_exit(APEXIT_CHILDSICK);
15f392394e75ffb7f318920008fd1bbe4e82b488Scott Thrasher }
15f392394e75ffb7f318920008fd1bbe4e82b488Scott Thrasher apr_os_thread_get(&listener_os_thread, ts->listener);
15f392394e75ffb7f318920008fd1bbe4e82b488Scott Thrasher}
15f392394e75ffb7f318920008fd1bbe4e82b488Scott Thrasher
af97ebf2dd8a2ec0d46f2924e35a63a55523c133Gavin Li/* XXX under some circumstances not understood, children can get stuck
af97ebf2dd8a2ec0d46f2924e35a63a55523c133Gavin Li * in start_threads forever trying to take over slots which will
af97ebf2dd8a2ec0d46f2924e35a63a55523c133Gavin Li * never be cleaned up; for now there is an APLOG_DEBUG message issued
af97ebf2dd8a2ec0d46f2924e35a63a55523c133Gavin Li * every so often when this condition occurs
af97ebf2dd8a2ec0d46f2924e35a63a55523c133Gavin Li */
af97ebf2dd8a2ec0d46f2924e35a63a55523c133Gavin Listatic void * APR_THREAD_FUNC start_threads(apr_thread_t *thd, void *dummy)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers{
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer thread_starter *ts = dummy;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_thread_t **threads = ts->threads;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_threadattr_t *thread_attr = ts->threadattr;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers int child_num_arg = ts->child_num_arg;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers int my_child_num = child_num_arg;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers proc_info *my_info;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_status_t rv;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers int i;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers int threads_created = 0;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers int listener_started = 0;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer int loops;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers int prev_threads_created;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* We must create the fd queues before we start up the listener
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * and worker threads. */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers worker_queue = apr_pcalloc(pchild, sizeof(*worker_queue));
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer rv = ap_queue_init(worker_queue, threads_per_child, pchild);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (rv != APR_SUCCESS) {
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers "ap_queue_init() failed");
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers clean_child_exit(APEXIT_CHILDFATAL);
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers }
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers rv = ap_queue_info_create(&worker_queue_info, pchild,
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer threads_per_child);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (rv != APR_SUCCESS) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers "ap_queue_info_create() failed");
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers clean_child_exit(APEXIT_CHILDFATAL);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers worker_sockets = apr_pcalloc(pchild, threads_per_child
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * sizeof(apr_socket_t *));
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer loops = prev_threads_created = 0;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer while (1) {
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer /* threads_per_child does not include the listener thread */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer for (i = 0; i < threads_per_child; i++) {
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer int status = ap_scoreboard_image->servers[child_num_arg][i].status;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer if (status != SERVER_GRACEFUL && status != SERVER_DEAD) {
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer continue;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer }
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers my_info = (proc_info *)ap_malloc(sizeof(proc_info));
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers my_info->pid = my_child_num;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers my_info->tid = i;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer my_info->sd = 0;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer /* We are creating threads right now */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_update_child_status_from_indexes(my_child_num, i,
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers SERVER_STARTING, NULL);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* We let each thread update its own scoreboard entry. This is
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * done because it lets us deal with tid better.
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers rv = apr_thread_create(&threads[i], thread_attr,
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers worker_thread, my_info, pchild);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (rv != APR_SUCCESS) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer "apr_thread_create: unable to create worker thread");
7b36bf82c4deeadef6d914cef750b4a51ff2ed48Martin Pitt /* let the parent decide how bad this really is */
7b36bf82c4deeadef6d914cef750b4a51ff2ed48Martin Pitt clean_child_exit(APEXIT_CHILDSICK);
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers threads_created++;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* Start the listener only when there are workers available */
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers if (!listener_started && threads_created) {
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers create_listener_thread(ts);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers listener_started = 1;
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers }
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers if (start_thread_may_exit || threads_created == threads_per_child) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers break;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* wait for previous generation to clean up an entry */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_sleep(apr_time_from_sec(1));
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ++loops;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (loops % 120 == 0) { /* every couple of minutes */
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer if (prev_threads_created == threads_created) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers "child %" APR_PID_T_FMT " isn't taking over "
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers "slots very quickly (%d of %d)",
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_my_pid, threads_created, threads_per_child);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers prev_threads_created = threads_created;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* What state should this child_main process be listed as in the
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * scoreboard...?
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * ap_update_child_status_from_indexes(my_child_num, i, SERVER_STARTING,
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * (request_rec *) NULL);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers *
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * This state should be listed separately in the scoreboard, in some kind
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * of process_status, not mixed in with the worker threads' status.
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * "life_status" is almost right, but it's in the worker's structure, and
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers * the name could be clearer. gla
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers */
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers apr_thread_exit(thd, APR_SUCCESS);
176cceb051fd9537239e5e8a43f80a33d06fe3b8Zbigniew Jędrzejewski-Szmek return NULL;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer}
176cceb051fd9537239e5e8a43f80a33d06fe3b8Zbigniew Jędrzejewski-Szmek
176cceb051fd9537239e5e8a43f80a33d06fe3b8Zbigniew Jędrzejewski-Szmekstatic void join_workers(apr_thread_t *listener, apr_thread_t **threads,
176cceb051fd9537239e5e8a43f80a33d06fe3b8Zbigniew Jędrzejewski-Szmek int mode)
176cceb051fd9537239e5e8a43f80a33d06fe3b8Zbigniew Jędrzejewski-Szmek{
176cceb051fd9537239e5e8a43f80a33d06fe3b8Zbigniew Jędrzejewski-Szmek int i;
bc9cdba5ddf78d0ecb5c64f55621bb2f474ea280Jose Ignacio Naranjo apr_status_t rv, thread_rv;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
bc9cdba5ddf78d0ecb5c64f55621bb2f474ea280Jose Ignacio Naranjo if (listener) {
bc9cdba5ddf78d0ecb5c64f55621bb2f474ea280Jose Ignacio Naranjo int iter;
bc9cdba5ddf78d0ecb5c64f55621bb2f474ea280Jose Ignacio Naranjo
bc9cdba5ddf78d0ecb5c64f55621bb2f474ea280Jose Ignacio Naranjo /* deal with a rare timing window which affects waking up the
bc9cdba5ddf78d0ecb5c64f55621bb2f474ea280Jose Ignacio Naranjo * listener thread... if the signal sent to the listener thread
2a61aaac6607d998e981481c34ae06e3e3ace4feUnai Uribarri * is delivered between the time it verifies that the
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * listener_may_exit flag is clear and the time it enters a
2a61aaac6607d998e981481c34ae06e3e3ace4feUnai Uribarri * blocking syscall, the signal didn't do any good... work around
2a61aaac6607d998e981481c34ae06e3e3ace4feUnai Uribarri * that by sleeping briefly and sending it again
2a61aaac6607d998e981481c34ae06e3e3ace4feUnai Uribarri */
a046659f8551e1c8f79ba4b66472444e285255dfMartin Pitt
2a61aaac6607d998e981481c34ae06e3e3ace4feUnai Uribarri iter = 0;
2a61aaac6607d998e981481c34ae06e3e3ace4feUnai Uribarri while (iter < 10 &&
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers#ifdef HAVE_PTHREAD_KILL
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers pthread_kill(*listener_os_thread, 0)
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers#else
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers kill(ap_my_pid, 0)
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer#endif
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers == 0) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers /* listener not dead yet */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_sleep(apr_time_make(0, 500000));
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers wakeup_listener();
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ++iter;
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (iter >= 10) {
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00276)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers "the listener thread didn't exit");
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers else {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers rv = apr_thread_join(&thread_rv, listener);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (rv != APR_SUCCESS) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00277)
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers "apr_thread_join: unable to join listener thread");
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers }
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers for (i = 0; i < threads_per_child; i++) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (threads[i]) { /* if we ever created this thread */
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers if (mode != ST_GRACEFUL) {
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#ifdef HAVE_PTHREAD_KILL
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_os_thread_t *worker_os_thread;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers apr_os_thread_get(&worker_os_thread, threads[i]);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers pthread_kill(*worker_os_thread, WORKER_SIGNAL);
aedc2eddd16e48d468e6ad0aea2caf00c7d37365Kay Sievers#endif
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers }
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers rv = apr_thread_join(&thread_rv, threads[i]);
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers if (rv != APR_SUCCESS) {
0c959b39175b126fdb70ae00de37ca6d9c8ca3a1Kay Sievers ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00278)
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers "apr_thread_join: unable to join worker "
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer "thread %d",
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer i);
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer }
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer }
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer }
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer}
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
51c0c2869845a058268d54c3111d55d0dd485704Peter Huttererstatic void join_start_thread(apr_thread_t *start_thread_id)
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer{
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer apr_status_t rv, thread_rv;
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer start_thread_may_exit = 1; /* tell it to give up in case it is still
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * trying to take over slots from a
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer * previous generation
51c0c2869845a058268d54c3111d55d0dd485704Peter Hutterer */
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers rv = apr_thread_join(&thread_rv, start_thread_id);
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers if (rv != APR_SUCCESS) {
e8193554925a22b63bef0e77b8397b56d63a91ffKay Sievers ap_log_error(APLOG_MARK, APLOG_CRIT, rv, ap_server_conf, APLOGNO(00279)
"apr_thread_join: unable to join the start "
"thread");
}
}
static void child_main(int child_num_arg, int child_bucket)
{
apr_thread_t **threads;
apr_status_t rv;
thread_starter *ts;
apr_threadattr_t *thread_attr;
apr_thread_t *start_thread_id;
int i;
ap_listen_rec *lr;
mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
* child initializes
*/
ap_my_pid = getpid();
ap_fatal_signal_child_setup(ap_server_conf);
apr_pool_create(&pchild, pconf);
child_listen = mpm_listen[child_bucket];
child_mutex = accept_mutex[child_bucket];
child_pod = pod[child_bucket];
/* close unused listeners and pods */
for (i = 0; i < num_buckets; i++) {
if (i != child_bucket) {
lr = mpm_listen[i];
while(lr) {
apr_socket_close(lr->sd);
lr->active = 0;
lr = lr->next;
}
ap_mpm_podx_close(pod[i]);
}
}
/*stuff to do before we switch id's, so we have permissions.*/
ap_reopen_scoreboard(pchild, NULL, 0);
rv = SAFE_ACCEPT(apr_proc_mutex_child_init(&child_mutex,
apr_proc_mutex_lockfile(child_mutex),
pchild));
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf, APLOGNO(00280)
"Couldn't initialize cross-process lock in child");
clean_child_exit(APEXIT_CHILDFATAL);
}
if (ap_run_drop_privileges(pchild, ap_server_conf)) {
clean_child_exit(APEXIT_CHILDFATAL);
}
ap_run_child_init(pchild, ap_server_conf);
/* done with init critical section */
/* Just use the standard apr_setup_signal_thread to block all signals
* from being received. The child processes no longer use signals for
* any communication with the parent process.
*/
rv = apr_setup_signal_thread();
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf, APLOGNO(00281)
"Couldn't initialize signal thread");
clean_child_exit(APEXIT_CHILDFATAL);
}
if (ap_max_requests_per_child) {
requests_this_child = ap_max_requests_per_child;
}
else {
/* coding a value of zero means infinity */
requests_this_child = INT_MAX;
}
/* Setup worker threads */
/* clear the storage; we may not create all our threads immediately,
* and we want a 0 entry to indicate a thread which was not created
*/
threads = (apr_thread_t **)ap_calloc(1,
sizeof(apr_thread_t *) * threads_per_child);
ts = (thread_starter *)apr_palloc(pchild, sizeof(*ts));
apr_threadattr_create(&thread_attr, pchild);
/* 0 means PTHREAD_CREATE_JOINABLE */
apr_threadattr_detach_set(thread_attr, 0);
if (ap_thread_stacksize != 0) {
rv = apr_threadattr_stacksize_set(thread_attr, ap_thread_stacksize);
if (rv != APR_SUCCESS && rv != APR_ENOTIMPL) {
ap_log_error(APLOG_MARK, APLOG_WARNING, rv, ap_server_conf, APLOGNO(02435)
"WARNING: ThreadStackSize of %" APR_SIZE_T_FMT " is "
"inappropriate, using default",
ap_thread_stacksize);
}
}
ts->threads = threads;
ts->listener = NULL;
ts->child_num_arg = child_num_arg;
ts->threadattr = thread_attr;
rv = apr_thread_create(&start_thread_id, thread_attr, start_threads,
ts, pchild);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf, APLOGNO(00282)
"apr_thread_create: unable to create worker thread");
/* let the parent decide how bad this really is */
clean_child_exit(APEXIT_CHILDSICK);
}
mpm_state = AP_MPMQ_RUNNING;
/* If we are only running in one_process mode, we will want to
* still handle signals. */
if (one_process) {
/* Block until we get a terminating signal. */
apr_signal_thread(check_signal);
/* make sure the start thread has finished; signal_threads()
* and join_workers() depend on that
*/
/* XXX join_start_thread() won't be awakened if one of our
* threads encounters a critical error and attempts to
* shutdown this child
*/
join_start_thread(start_thread_id);
signal_threads(ST_UNGRACEFUL); /* helps us terminate a little more
* quickly than the dispatch of the signal thread
* beats the Pipe of Death and the browsers
*/
/* A terminating signal was received. Now join each of the
* workers to clean them up.
* If the worker already exited, then the join frees
* their resources and returns.
* If the worker hasn't exited, then this blocks until
* they have (then cleans up).
*/
join_workers(ts->listener, threads, ST_UNGRACEFUL);
}
else { /* !one_process */
/* remove SIGTERM from the set of blocked signals... if one of
* the other threads in the process needs to take us down
* (e.g., for MaxConnectionsPerChild) it will send us SIGTERM
*/
unblock_signal(SIGTERM);
apr_signal(SIGTERM, dummy_signal_handler);
/* Watch for any messages from the parent over the POD */
while (1) {
rv = ap_mpm_podx_check(child_pod);
if (rv == AP_MPM_PODX_NORESTART) {
/* see if termination was triggered while we slept */
switch(terminate_mode) {
case ST_GRACEFUL:
rv = AP_MPM_PODX_GRACEFUL;
break;
case ST_UNGRACEFUL:
rv = AP_MPM_PODX_RESTART;
break;
}
}
if (rv == AP_MPM_PODX_GRACEFUL || rv == AP_MPM_PODX_RESTART) {
/* make sure the start thread has finished;
* signal_threads() and join_workers depend on that
*/
join_start_thread(start_thread_id);
signal_threads(rv == AP_MPM_PODX_GRACEFUL ? ST_GRACEFUL : ST_UNGRACEFUL);
break;
}
}
/* A terminating signal was received. Now join each of the
* workers to clean them up.
* If the worker already exited, then the join frees
* their resources and returns.
* If the worker hasn't exited, then this blocks until
* they have (then cleans up).
*/
join_workers(ts->listener, threads,
rv == AP_MPM_PODX_GRACEFUL ? ST_GRACEFUL : ST_UNGRACEFUL);
}
free(threads);
clean_child_exit(resource_shortage ? APEXIT_CHILDSICK : 0);
}
static int make_child(server_rec *s, int slot, int bucket)
{
int pid;
if (slot + 1 > retained->max_daemons_limit) {
retained->max_daemons_limit = slot + 1;
}
if (one_process) {
set_signals();
worker_note_child_started(slot, getpid());
child_main(0, 0);
/* NOTREACHED */
ap_assert(0);
return -1;
}
if ((pid = fork()) == -1) {
ap_log_error(APLOG_MARK, APLOG_ERR, errno, s, APLOGNO(00283)
"fork: Unable to fork new process");
/* fork didn't succeed. There's no need to touch the scoreboard;
* if we were trying to replace a failed child process, then
* server_main_loop() marked its workers SERVER_DEAD, and if
* we were trying to replace a child process that exited normally,
* its worker_thread()s left SERVER_DEAD or SERVER_GRACEFUL behind.
*/
/* In case system resources are maxxed out, we don't want
Apache running away with the CPU trying to fork over and
over and over again. */
apr_sleep(apr_time_from_sec(10));
return -1;
}
if (!pid) {
#ifdef HAVE_BINDPROCESSOR
/* By default, AIX binds to a single processor. This bit unbinds
* children which will then bind to another CPU.
*/
int status = bindprocessor(BINDPROCESS, (int)getpid(),
PROCESSOR_CLASS_ANY);
if (status != OK)
ap_log_error(APLOG_MARK, APLOG_DEBUG, errno,
ap_server_conf, APLOGNO(00284)
"processor unbind failed");
#endif
RAISE_SIGSTOP(MAKE_CHILD);
apr_signal(SIGTERM, just_die);
child_main(slot, bucket);
/* NOTREACHED */
ap_assert(0);
return -1;
}
if (ap_scoreboard_image->parent[slot].pid != 0) {
/* This new child process is squatting on the scoreboard
* entry owned by an exiting child process, which cannot
* exit until all active requests complete.
*/
worker_note_child_lost_slot(slot, pid);
}
ap_scoreboard_image->parent[slot].quiescing = 0;
ap_scoreboard_image->parent[slot].bucket = bucket;
worker_note_child_started(slot, pid);
return 0;
}
/* start up a bunch of children */
static void startup_children(int number_to_start)
{
int i;
for (i = 0; number_to_start && i < ap_daemons_limit; ++i) {
if (ap_scoreboard_image->parent[i].pid != 0) {
continue;
}
if (make_child(ap_server_conf, i, i % num_buckets) < 0) {
break;
}
--number_to_start;
}
}
static void perform_idle_server_maintenance(int child_bucket)
{
int i, j;
int idle_thread_count;
worker_score *ws;
process_score *ps;
int free_length;
int totally_free_length = 0;
int free_slots[MAX_SPAWN_RATE];
int last_non_dead;
int total_non_dead;
int active_thread_count = 0;
/* initialize the free_list */
free_length = 0;
idle_thread_count = 0;
last_non_dead = -1;
total_non_dead = 0;
for (i = 0; i < ap_daemons_limit; ++i) {
/* Initialization to satisfy the compiler. It doesn't know
* that threads_per_child is always > 0 */
int status = SERVER_DEAD;
int any_dying_threads = 0;
int any_dead_threads = 0;
int all_dead_threads = 1;
int child_threads_active = 0;
if (i >= retained->max_daemons_limit && totally_free_length == retained->idle_spawn_rate[child_bucket])
/* short cut if all active processes have been examined and
* enough empty scoreboard slots have been found
*/
break;
ps = &ap_scoreboard_image->parent[i];
for (j = 0; j < threads_per_child; j++) {
ws = &ap_scoreboard_image->servers[i][j];
status = ws->status;
/* XXX any_dying_threads is probably no longer needed GLA */
any_dying_threads = any_dying_threads ||
(status == SERVER_GRACEFUL);
any_dead_threads = any_dead_threads || (status == SERVER_DEAD);
all_dead_threads = all_dead_threads &&
(status == SERVER_DEAD ||
status == SERVER_GRACEFUL);
/* We consider a starting server as idle because we started it
* at least a cycle ago, and if it still hasn't finished starting
* then we're just going to swamp things worse by forking more.
* So we hopefully won't need to fork more if we count it.
* This depends on the ordering of SERVER_READY and SERVER_STARTING.
*/
if (ps->pid != 0) { /* XXX just set all_dead_threads in outer for
loop if no pid? not much else matters */
if (status <= SERVER_READY &&
!ps->quiescing &&
ps->generation == retained->my_generation &&
ps->bucket == child_bucket) {
++idle_thread_count;
}
if (status >= SERVER_READY && status < SERVER_GRACEFUL) {
++child_threads_active;
}
}
}
active_thread_count += child_threads_active;
if (any_dead_threads && totally_free_length < retained->idle_spawn_rate[child_bucket]
&& free_length < MAX_SPAWN_RATE / num_buckets
&& (!ps->pid /* no process in the slot */
|| ps->quiescing)) { /* or at least one is going away */
if (all_dead_threads) {
/* great! we prefer these, because the new process can
* start more threads sooner. So prioritize this slot
* by putting it ahead of any slots with active threads.
*
* first, make room by moving a slot that's potentially still
* in use to the end of the array
*/
free_slots[free_length] = free_slots[totally_free_length];
free_slots[totally_free_length++] = i;
}
else {
/* slot is still in use - back of the bus
*/
free_slots[free_length] = i;
}
++free_length;
}
else if (child_threads_active == threads_per_child) {
had_healthy_child = 1;
}
/* XXX if (!ps->quiescing) is probably more reliable GLA */
if (!any_dying_threads) {
last_non_dead = i;
++total_non_dead;
}
}
if (retained->sick_child_detected) {
if (had_healthy_child) {
/* Assume this is a transient error, even though it may not be. Leave
* the server up in case it is able to serve some requests or the
* problem will be resolved.
*/
retained->sick_child_detected = 0;
}
else {
/* looks like a basket case, as no child ever fully initialized; give up.
*/
shutdown_pending = 1;
child_fatal = 1;
ap_log_error(APLOG_MARK, APLOG_ALERT, 0,
ap_server_conf, APLOGNO(02325)
"A resource shortage or other unrecoverable failure "
"was encountered before any child process initialized "
"successfully... httpd is exiting!");
/* the child already logged the failure details */
return;
}
}
retained->max_daemons_limit = last_non_dead + 1;
if (idle_thread_count > max_spare_threads / num_buckets) {
/* Kill off one child */
ap_mpm_podx_signal(pod[child_bucket], AP_MPM_PODX_GRACEFUL);
retained->idle_spawn_rate[child_bucket] = 1;
}
else if (idle_thread_count < min_spare_threads) {
/* terminate the free list */
if (free_length == 0) { /* scoreboard is full, can't fork */
if (active_thread_count >= ap_daemons_limit * threads_per_child) {
/* no threads are "inactive" - starting, stopping, etc. */
/* have we reached MaxRequestWorkers, or just getting close? */
if (0 == idle_thread_count) {
if (!retained->maxclients_reported) {
/* only report this condition once */
ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(00286)
"server reached MaxRequestWorkers "
"setting, consider raising the "
"MaxRequestWorkers setting");
retained->maxclients_reported = 1;
}
} else {
if (!retained->near_maxclients_reported) {
ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(00287)
"server is within MinSpareThreads of "
"MaxRequestWorkers, consider raising the "
"MaxRequestWorkers setting");
retained->near_maxclients_reported = 1;
}
}
}
else {
ap_log_error(APLOG_MARK, APLOG_ERR, 0,
ap_server_conf, APLOGNO(00288)
"scoreboard is full, not at MaxRequestWorkers");
}
retained->idle_spawn_rate[child_bucket] = 1;
}
else {
if (free_length > retained->idle_spawn_rate[child_bucket]) {
free_length = retained->idle_spawn_rate[child_bucket];
}
if (retained->idle_spawn_rate[child_bucket] >= 8) {
ap_log_error(APLOG_MARK, APLOG_INFO, 0,
ap_server_conf, APLOGNO(00289)
"server seems busy, (you may need "
"to increase StartServers, ThreadsPerChild "
"or Min/MaxSpareThreads), "
"spawning %d children, there are around %d idle "
"threads, and %d total children", free_length,
idle_thread_count, total_non_dead);
}
for (i = 0; i < free_length; ++i) {
make_child(ap_server_conf, free_slots[i], child_bucket);
}
/* the next time around we want to spawn twice as many if this
* wasn't good enough, but not if we've just done a graceful
*/
if (retained->hold_off_on_exponential_spawning) {
--retained->hold_off_on_exponential_spawning;
}
else if (retained->idle_spawn_rate[child_bucket]
< MAX_SPAWN_RATE / num_buckets) {
retained->idle_spawn_rate[child_bucket] *= 2;
}
}
}
else {
retained->idle_spawn_rate[child_bucket] = 1;
}
}
static void server_main_loop(int remaining_children_to_start)
{
ap_generation_t old_gen;
int child_slot;
apr_exit_why_e exitwhy;
int status, processed_status;
apr_proc_t pid;
int i;
while (!restart_pending && !shutdown_pending) {
ap_wait_or_timeout(&exitwhy, &status, &pid, pconf, ap_server_conf);
if (pid.pid != -1) {
processed_status = ap_process_child_status(&pid, exitwhy, status);
child_slot = ap_find_child_by_pid(&pid);
if (processed_status == APEXIT_CHILDFATAL) {
/* fix race condition found in PR 39311
* A child created at the same time as a graceful happens
* can find the lock missing and create a fatal error.
* It is not fatal for the last generation to be in this state.
*/
if (child_slot < 0
|| ap_get_scoreboard_process(child_slot)->generation
== retained->my_generation) {
shutdown_pending = 1;
child_fatal = 1;
return;
}
else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ap_server_conf, APLOGNO(00290)
"Ignoring fatal error in child of previous "
"generation (pid %ld).",
(long)pid.pid);
retained->sick_child_detected = 1;
}
}
else if (processed_status == APEXIT_CHILDSICK) {
/* tell perform_idle_server_maintenance to check into this
* on the next timer pop
*/
retained->sick_child_detected = 1;
}
/* non-fatal death... note that it's gone in the scoreboard. */
if (child_slot >= 0) {
process_score *ps;
for (i = 0; i < threads_per_child; i++)
ap_update_child_status_from_indexes(child_slot, i, SERVER_DEAD,
(request_rec *) NULL);
worker_note_child_killed(child_slot, 0, 0);
ps = &ap_scoreboard_image->parent[child_slot];
ps->quiescing = 0;
if (processed_status == APEXIT_CHILDSICK) {
/* resource shortage, minimize the fork rate */
retained->idle_spawn_rate[ps->bucket] = 1;
}
else if (remaining_children_to_start
&& child_slot < ap_daemons_limit) {
/* we're still doing a 1-for-1 replacement of dead
* children with new children
*/
make_child(ap_server_conf, child_slot, ps->bucket);
--remaining_children_to_start;
}
}
else if (ap_unregister_extra_mpm_process(pid.pid, &old_gen) == 1) {
worker_note_child_killed(-1, /* already out of the scoreboard */
pid.pid, old_gen);
if (processed_status == APEXIT_CHILDSICK
&& old_gen == retained->my_generation) {
/* resource shortage, minimize the fork rate */
for (i = 0; i < num_buckets; i++) {
retained->idle_spawn_rate[i] = 1;
}
}
#if APR_HAS_OTHER_CHILD
}
else if (apr_proc_other_child_alert(&pid, APR_OC_REASON_DEATH,
status) == 0) {
/* handled */
#endif
}
else if (retained->is_graceful) {
/* Great, we've probably just lost a slot in the
* scoreboard. Somehow we don't know about this child.
*/
ap_log_error(APLOG_MARK, APLOG_WARNING, 0,
ap_server_conf, APLOGNO(00291)
"long lost child came home! (pid %ld)",
(long)pid.pid);
}
/* Don't perform idle maintenance when a child dies,
* only do it when there's a timeout. Remember only a
* finite number of children can die, and it's pretty
* pathological for a lot to die suddenly.
*/
continue;
}
else if (remaining_children_to_start) {
/* we hit a 1 second timeout in which none of the previous
* generation of children needed to be reaped... so assume
* they're all done, and pick up the slack if any is left.
*/
startup_children(remaining_children_to_start);
remaining_children_to_start = 0;
/* In any event we really shouldn't do the code below because
* few of the servers we just started are in the IDLE state
* yet, so we'd mistakenly create an extra server.
*/
continue;
}
for (i = 0; i < num_buckets; i++) {
perform_idle_server_maintenance(i);
}
}
}
static int worker_run(apr_pool_t *_pconf, apr_pool_t *plog, server_rec *s)
{
int remaining_children_to_start;
apr_status_t rv;
int i;
ap_log_pid(pconf, ap_pid_fname);
/* Initialize cross-process accept lock */
accept_mutex = apr_palloc(_pconf, sizeof(apr_proc_mutex_t *) * num_buckets);
for (i = 0; i < num_buckets; i++) {
rv = ap_proc_mutex_create(&accept_mutex[i], NULL, AP_ACCEPT_MUTEX_TYPE, NULL,
s, _pconf, 0);
if (rv != APR_SUCCESS) {
mpm_state = AP_MPMQ_STOPPING;
return DONE;
}
}
if (!retained->is_graceful) {
if (ap_run_pre_mpm(s->process->pool, SB_SHARED) != OK) {
mpm_state = AP_MPMQ_STOPPING;
return DONE;
}
/* fix the generation number in the global score; we just got a new,
* cleared scoreboard
*/
ap_scoreboard_image->global->running_generation = retained->my_generation;
}
restart_pending = shutdown_pending = 0;
set_signals();
/* Don't thrash... */
if (max_spare_threads < min_spare_threads + threads_per_child * num_buckets)
max_spare_threads = min_spare_threads + threads_per_child * num_buckets;
/* If we're doing a graceful_restart then we're going to see a lot
* of children exiting immediately when we get into the main loop
* below (because we just sent them AP_SIG_GRACEFUL). This happens pretty
* rapidly... and for each one that exits we may start a new one, until
* there are at least min_spare_threads idle threads, counting across
* all children. But we may be permitted to start more children than
* that, so we'll just keep track of how many we're
* supposed to start up without the 1 second penalty between each fork.
*/
remaining_children_to_start = ap_daemons_to_start;
if (remaining_children_to_start > ap_daemons_limit) {
remaining_children_to_start = ap_daemons_limit;
}
if (!retained->is_graceful) {
startup_children(remaining_children_to_start);
remaining_children_to_start = 0;
}
else {
/* give the system some time to recover before kicking into
* exponential mode */
retained->hold_off_on_exponential_spawning = 10;
}
ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00292)
"%s configured -- resuming normal operations",
ap_get_server_description());
ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf, APLOGNO(00293)
"Server built: %s", ap_get_server_built());
ap_log_command_line(plog, s);
ap_log_common(s);
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00294)
"Accept mutex: %s (default: %s)",
apr_proc_mutex_name(accept_mutex[0]),
apr_proc_mutex_defname());
mpm_state = AP_MPMQ_RUNNING;
server_main_loop(remaining_children_to_start);
mpm_state = AP_MPMQ_STOPPING;
if (shutdown_pending && !retained->is_graceful) {
/* Time to shut down:
* Kill child processes, tell them to call child_exit, etc...
*/
for (i = 0; i < num_buckets; i++) {
ap_mpm_podx_killpg(pod[i], ap_daemons_limit, AP_MPM_PODX_RESTART);
}
ap_reclaim_child_processes(1, /* Start with SIGTERM */
worker_note_child_killed);
if (!child_fatal) {
/* cleanup pid file on normal shutdown */
ap_remove_pid(pconf, ap_pid_fname);
ap_log_error(APLOG_MARK, APLOG_NOTICE, 0,
ap_server_conf, APLOGNO(00295) "caught SIGTERM, shutting down");
}
return DONE;
} else if (shutdown_pending) {
/* Time to gracefully shut down:
* Kill child processes, tell them to call child_exit, etc...
*/
int active_children;
int index;
apr_time_t cutoff = 0;
/* Close our listeners, and then ask our children to do same */
ap_close_listeners();
for (i = 0; i < num_buckets; i++) {
ap_mpm_podx_killpg(pod[i], ap_daemons_limit, AP_MPM_PODX_GRACEFUL);
}
ap_relieve_child_processes(worker_note_child_killed);
if (!child_fatal) {
/* cleanup pid file on normal shutdown */
ap_remove_pid(pconf, ap_pid_fname);
ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00296)
"caught " AP_SIG_GRACEFUL_STOP_STRING
", shutting down gracefully");
}
if (ap_graceful_shutdown_timeout) {
cutoff = apr_time_now() +
apr_time_from_sec(ap_graceful_shutdown_timeout);
}
/* Don't really exit until each child has finished */
shutdown_pending = 0;
do {
/* Pause for a second */
apr_sleep(apr_time_from_sec(1));
/* Relieve any children which have now exited */
ap_relieve_child_processes(worker_note_child_killed);
active_children = 0;
for (index = 0; index < ap_daemons_limit; ++index) {
if (ap_mpm_safe_kill(MPM_CHILD_PID(index), 0) == APR_SUCCESS) {
active_children = 1;
/* Having just one child is enough to stay around */
break;
}
}
} while (!shutdown_pending && active_children &&
(!ap_graceful_shutdown_timeout || apr_time_now() < cutoff));
/* We might be here because we received SIGTERM, either
* way, try and make sure that all of our processes are
* really dead.
*/
for (i = 0; i < num_buckets; i++) {
ap_mpm_podx_killpg(pod[i], ap_daemons_limit, AP_MPM_PODX_RESTART);
}
ap_reclaim_child_processes(1, worker_note_child_killed);
return DONE;
}
/* we've been told to restart */
apr_signal(SIGHUP, SIG_IGN);
if (one_process) {
/* not worth thinking about */
return DONE;
}
/* advance to the next generation */
/* XXX: we really need to make sure this new generation number isn't in
* use by any of the children.
*/
++retained->my_generation;
ap_scoreboard_image->global->running_generation = retained->my_generation;
if (retained->is_graceful) {
ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00297)
AP_SIG_GRACEFUL_STRING " received. Doing graceful restart");
/* wake up the children...time to die. But we'll have more soon */
for (i = 0; i < num_buckets; i++) {
ap_mpm_podx_killpg(pod[i], ap_daemons_limit, AP_MPM_PODX_GRACEFUL);
}
/* This is mostly for debugging... so that we know what is still
* gracefully dealing with existing request.
*/
}
else {
/* Kill 'em all. Since the child acts the same on the parents SIGTERM
* and a SIGHUP, we may as well use the same signal, because some user
* pthreads are stealing signals from us left and right.
*/
for (i = 0; i < num_buckets; i++) {
ap_mpm_podx_killpg(pod[i], ap_daemons_limit, AP_MPM_PODX_RESTART);
}
ap_reclaim_child_processes(1, /* Start with SIGTERM */
worker_note_child_killed);
ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00298)
"SIGHUP received. Attempting to restart");
}
return OK;
}
/* This really should be a post_config hook, but the error log is already
* redirected by that point, so we need to do this in the open_logs phase.
*/
static int worker_open_logs(apr_pool_t *p, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s)
{
int startup = 0;
int level_flags = 0;
apr_status_t rv;
int i;
pconf = p;
/* the reverse of pre_config, we want this only the first time around */
if (retained->module_loads == 1) {
startup = 1;
level_flags |= APLOG_STARTUP;
}
enable_default_listener = 0;
if ((num_listensocks = ap_setup_listeners(ap_server_conf)) < 1) {
ap_log_error(APLOG_MARK, APLOG_ALERT | level_flags, 0,
(startup ? NULL : s),
"no listening sockets available, shutting down");
return DONE;
}
enable_default_listener = 1;
ap_duplicate_listeners(ap_server_conf, pconf, num_buckets);
pod = apr_palloc(pconf, sizeof(ap_pod_t *) * num_buckets);
if (!one_process) {
for (i = 0; i < num_buckets; i++) {
if ((rv = ap_mpm_podx_open(pconf, &pod[i]))) {
ap_log_error(APLOG_MARK, APLOG_CRIT | level_flags, rv,
(startup ? NULL : s),
"could not open pipe-of-death");
return DONE;
}
}
}
return OK;
}
static int worker_pre_config(apr_pool_t *pconf, apr_pool_t *plog,
apr_pool_t *ptemp)
{
int no_detach, debug, foreground;
apr_status_t rv;
const char *userdata_key = "mpm_worker_module";
int i;
mpm_state = AP_MPMQ_STARTING;
debug = ap_exists_config_define("DEBUG");
if (debug) {
foreground = one_process = 1;
no_detach = 0;
}
else {
one_process = ap_exists_config_define("ONE_PROCESS");
no_detach = ap_exists_config_define("NO_DETACH");
foreground = ap_exists_config_define("FOREGROUND");
}
ap_mutex_register(pconf, AP_ACCEPT_MUTEX_TYPE, NULL, APR_LOCK_DEFAULT, 0);
/* sigh, want this only the second time around */
retained = ap_retained_data_get(userdata_key);
if (!retained) {
retained = ap_retained_data_create(userdata_key, sizeof(*retained));
retained->max_daemons_limit = -1;
}
if (!retained->is_graceful) {
num_buckets = 1;
#ifdef _SC_NPROCESSORS_ONLN
if (have_so_reuseport) {
int num_online_cores = sysconf(_SC_NPROCESSORS_ONLN);
if (num_online_cores > 8) {
num_buckets = num_online_cores / 8;
}
}
#endif
}
++retained->module_loads;
if (retained->module_loads == 2) {
if (!one_process && !foreground) {
/* before we detach, setup crash handlers to log to errorlog */
ap_fatal_signal_setup(ap_server_conf, pconf);
rv = apr_proc_detach(no_detach ? APR_PROC_DETACH_FOREGROUND
: APR_PROC_DETACH_DAEMONIZE);
if (rv != APR_SUCCESS) {
ap_log_error(APLOG_MARK, APLOG_CRIT, rv, NULL, APLOGNO(00299)
"apr_proc_detach failed");
return HTTP_INTERNAL_SERVER_ERROR;
}
}
retained->idle_spawn_rate = apr_palloc(pconf, sizeof(int) * num_buckets);
for (i = 0; i< num_buckets; i++) {
retained->idle_spawn_rate[i] = 1;
}
}
parent_pid = ap_my_pid = getpid();
ap_listen_pre_config();
ap_daemons_to_start = DEFAULT_START_DAEMON;
min_spare_threads = DEFAULT_MIN_FREE_DAEMON * DEFAULT_THREADS_PER_CHILD;
max_spare_threads = DEFAULT_MAX_FREE_DAEMON * DEFAULT_THREADS_PER_CHILD;
server_limit = DEFAULT_SERVER_LIMIT;
thread_limit = DEFAULT_THREAD_LIMIT;
ap_daemons_limit = server_limit;
threads_per_child = DEFAULT_THREADS_PER_CHILD;
max_workers = ap_daemons_limit * threads_per_child;
had_healthy_child = 0;
ap_extended_status = 0;
return OK;
}
static int worker_check_config(apr_pool_t *p, apr_pool_t *plog,
apr_pool_t *ptemp, server_rec *s)
{
int startup = 0;
/* the reverse of pre_config, we want this only the first time around */
if (retained->module_loads == 1) {
startup = 1;
}
if (server_limit > MAX_SERVER_LIMIT) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00300)
"WARNING: ServerLimit of %d exceeds compile-time "
"limit of", server_limit);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" %d servers, decreasing to %d.",
MAX_SERVER_LIMIT, MAX_SERVER_LIMIT);
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00301)
"ServerLimit of %d exceeds compile-time limit "
"of %d, decreasing to match",
server_limit, MAX_SERVER_LIMIT);
}
server_limit = MAX_SERVER_LIMIT;
}
else if (server_limit < 1) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00302)
"WARNING: ServerLimit of %d not allowed, "
"increasing to 1.", server_limit);
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00303)
"ServerLimit of %d not allowed, increasing to 1",
server_limit);
}
server_limit = 1;
}
/* you cannot change ServerLimit across a restart; ignore
* any such attempts
*/
if (!retained->first_server_limit) {
retained->first_server_limit = server_limit;
}
else if (server_limit != retained->first_server_limit) {
/* don't need a startup console version here */
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00304)
"changing ServerLimit to %d from original value of %d "
"not allowed during restart",
server_limit, retained->first_server_limit);
server_limit = retained->first_server_limit;
}
if (thread_limit > MAX_THREAD_LIMIT) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00305)
"WARNING: ThreadLimit of %d exceeds compile-time "
"limit of", thread_limit);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" %d threads, decreasing to %d.",
MAX_THREAD_LIMIT, MAX_THREAD_LIMIT);
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00306)
"ThreadLimit of %d exceeds compile-time limit "
"of %d, decreasing to match",
thread_limit, MAX_THREAD_LIMIT);
}
thread_limit = MAX_THREAD_LIMIT;
}
else if (thread_limit < 1) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00307)
"WARNING: ThreadLimit of %d not allowed, "
"increasing to 1.", thread_limit);
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00308)
"ThreadLimit of %d not allowed, increasing to 1",
thread_limit);
}
thread_limit = 1;
}
/* you cannot change ThreadLimit across a restart; ignore
* any such attempts
*/
if (!retained->first_thread_limit) {
retained->first_thread_limit = thread_limit;
}
else if (thread_limit != retained->first_thread_limit) {
/* don't need a startup console version here */
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00309)
"changing ThreadLimit to %d from original value of %d "
"not allowed during restart",
thread_limit, retained->first_thread_limit);
thread_limit = retained->first_thread_limit;
}
if (threads_per_child > thread_limit) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00310)
"WARNING: ThreadsPerChild of %d exceeds ThreadLimit "
"of", threads_per_child);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" %d threads, decreasing to %d.",
thread_limit, thread_limit);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" To increase, please see the ThreadLimit "
"directive.");
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00311)
"ThreadsPerChild of %d exceeds ThreadLimit "
"of %d, decreasing to match",
threads_per_child, thread_limit);
}
threads_per_child = thread_limit;
}
else if (threads_per_child < 1) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00312)
"WARNING: ThreadsPerChild of %d not allowed, "
"increasing to 1.", threads_per_child);
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00313)
"ThreadsPerChild of %d not allowed, increasing to 1",
threads_per_child);
}
threads_per_child = 1;
}
if (max_workers < threads_per_child) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00314)
"WARNING: MaxRequestWorkers of %d is less than "
"ThreadsPerChild of", max_workers);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" %d, increasing to %d. MaxRequestWorkers must be at "
"least as large",
threads_per_child, threads_per_child);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" as the number of threads in a single server.");
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00315)
"MaxRequestWorkers of %d is less than ThreadsPerChild "
"of %d, increasing to match",
max_workers, threads_per_child);
}
max_workers = threads_per_child;
}
ap_daemons_limit = max_workers / threads_per_child;
if (max_workers % threads_per_child) {
int tmp_max_workers = ap_daemons_limit * threads_per_child;
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00316)
"WARNING: MaxRequestWorkers of %d is not an integer "
"multiple of", max_workers);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" ThreadsPerChild of %d, decreasing to nearest "
"multiple %d,", threads_per_child,
tmp_max_workers);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" for a maximum of %d servers.",
ap_daemons_limit);
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00317)
"MaxRequestWorkers of %d is not an integer multiple of "
"ThreadsPerChild of %d, decreasing to nearest "
"multiple %d", max_workers, threads_per_child,
tmp_max_workers);
}
max_workers = tmp_max_workers;
}
if (ap_daemons_limit > server_limit) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00318)
"WARNING: MaxRequestWorkers of %d would require %d "
"servers and ", max_workers, ap_daemons_limit);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" would exceed ServerLimit of %d, decreasing to %d.",
server_limit, server_limit * threads_per_child);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" To increase, please see the ServerLimit "
"directive.");
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00319)
"MaxRequestWorkers of %d would require %d servers and "
"exceed ServerLimit of %d, decreasing to %d",
max_workers, ap_daemons_limit, server_limit,
server_limit * threads_per_child);
}
ap_daemons_limit = server_limit;
}
else if (ap_daemons_limit < num_buckets) {
/* Don't thrash since num_buckets depends on
* the system and the number of CPU cores.
*/
ap_daemons_limit = num_buckets;
}
/* ap_daemons_to_start > ap_daemons_limit checked in worker_run() */
if (ap_daemons_to_start < 1) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00320)
"WARNING: StartServers of %d not allowed, "
"increasing to 1.", ap_daemons_to_start);
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00321)
"StartServers of %d not allowed, increasing to 1",
ap_daemons_to_start);
}
ap_daemons_to_start = 1;
}
if (ap_daemons_to_start < num_buckets) {
/* Don't thrash since num_buckets depends on
* the system and the number of CPU cores.
*/
ap_daemons_to_start = num_buckets;
}
if (min_spare_threads < 1) {
if (startup) {
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL, APLOGNO(00322)
"WARNING: MinSpareThreads of %d not allowed, "
"increasing to 1", min_spare_threads);
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" to avoid almost certain server failure.");
ap_log_error(APLOG_MARK, APLOG_WARNING | APLOG_STARTUP, 0, NULL,
" Please read the documentation.");
} else {
ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(00323)
"MinSpareThreads of %d not allowed, increasing to 1",
min_spare_threads);
}
min_spare_threads = 1;
}
if (min_spare_threads < num_buckets) {
/* Don't thrash since num_buckets depends on
* the system and the number of CPU cores.
*/
min_spare_threads = num_buckets;
}
/* max_spare_threads < min_spare_threads + threads_per_child
* checked in worker_run()
*/
return OK;
}
static void worker_hooks(apr_pool_t *p)
{
/* Our open_logs hook function must run before the core's, or stderr
* will be redirected to a file, and the messages won't print to the
* console.
*/
static const char *const aszSucc[] = {"core.c", NULL};
one_process = 0;
ap_force_set_tz(p);
ap_hook_open_logs(worker_open_logs, NULL, aszSucc, APR_HOOK_REALLY_FIRST);
/* we need to set the MPM state before other pre-config hooks use MPM query
* to retrieve it, so register as REALLY_FIRST
*/
ap_hook_pre_config(worker_pre_config, NULL, NULL, APR_HOOK_REALLY_FIRST);
ap_hook_check_config(worker_check_config, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_mpm(worker_run, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_mpm_query(worker_query, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_mpm_get_name(worker_get_name, NULL, NULL, APR_HOOK_MIDDLE);
}
static const char *set_daemons_to_start(cmd_parms *cmd, void *dummy,
const char *arg)
{
const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
if (err != NULL) {
return err;
}
ap_daemons_to_start = atoi(arg);
return NULL;
}
static const char *set_min_spare_threads(cmd_parms *cmd, void *dummy,
const char *arg)
{
const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
if (err != NULL) {
return err;
}
min_spare_threads = atoi(arg);
return NULL;
}
static const char *set_max_spare_threads(cmd_parms *cmd, void *dummy,
const char *arg)
{
const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
if (err != NULL) {
return err;
}
max_spare_threads = atoi(arg);
return NULL;
}
static const char *set_max_workers (cmd_parms *cmd, void *dummy,
const char *arg)
{
const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
if (err != NULL) {
return err;
}
if (!strcasecmp(cmd->cmd->name, "MaxClients")) {
ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL, APLOGNO(00324)
"MaxClients is deprecated, use MaxRequestWorkers "
"instead.");
}
max_workers = atoi(arg);
return NULL;
}
static const char *set_threads_per_child (cmd_parms *cmd, void *dummy,
const char *arg)
{
const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
if (err != NULL) {
return err;
}
threads_per_child = atoi(arg);
return NULL;
}
static const char *set_server_limit (cmd_parms *cmd, void *dummy, const char *arg)
{
const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
if (err != NULL) {
return err;
}
server_limit = atoi(arg);
return NULL;
}
static const char *set_thread_limit (cmd_parms *cmd, void *dummy, const char *arg)
{
const char *err = ap_check_cmd_context(cmd, GLOBAL_ONLY);
if (err != NULL) {
return err;
}
thread_limit = atoi(arg);
return NULL;
}
static const command_rec worker_cmds[] = {
LISTEN_COMMANDS,
AP_INIT_TAKE1("StartServers", set_daemons_to_start, NULL, RSRC_CONF,
"Number of child processes launched at server startup"),
AP_INIT_TAKE1("MinSpareThreads", set_min_spare_threads, NULL, RSRC_CONF,
"Minimum number of idle threads, to handle request spikes"),
AP_INIT_TAKE1("MaxSpareThreads", set_max_spare_threads, NULL, RSRC_CONF,
"Maximum number of idle threads"),
AP_INIT_TAKE1("MaxRequestWorkers", set_max_workers, NULL, RSRC_CONF,
"Maximum number of threads alive at the same time"),
AP_INIT_TAKE1("MaxClients", set_max_workers, NULL, RSRC_CONF,
"Deprecated name of MaxRequestWorkers"),
AP_INIT_TAKE1("ThreadsPerChild", set_threads_per_child, NULL, RSRC_CONF,
"Number of threads each child creates"),
AP_INIT_TAKE1("ServerLimit", set_server_limit, NULL, RSRC_CONF,
"Maximum number of child processes for this run of Apache"),
AP_INIT_TAKE1("ThreadLimit", set_thread_limit, NULL, RSRC_CONF,
"Maximum number of worker threads per child process for this run of Apache - Upper limit for ThreadsPerChild"),
AP_GRACEFUL_SHUTDOWN_TIMEOUT_COMMAND,
{ NULL }
};
AP_DECLARE_MODULE(mpm_worker) = {
MPM20_MODULE_STUFF,
NULL, /* hook to run before apache parses args */
NULL, /* create per-directory config structure */
NULL, /* merge per-directory config structures */
NULL, /* create per-server config structure */
NULL, /* merge per-server config structures */
worker_cmds, /* command apr_table_t */
worker_hooks /* register_hooks */
};