mod_proxy.h revision ac7de32d845d22f83ce5b6a4c584110772fe1c1b
#
else /*APR_CHARSET_EBCDIC*/#
endif /*APR_CHARSET_EBCDIC*//* default Max-Forwards header setting */ /* Set this to -1, which complies with RFC2616 by not setting * max-forwards if the client didn't send it to us. /* static information about a remote proxy */ const char *
scheme;
/* the schemes handled by this proxy, or '*' */ const char *
protocol;
/* the scheme used to talk to this proxy */ const char *
hostname;
/* the hostname of this proxy */ int use_regex;
/* simple boolean. True if we have a regex pattern */ const char *
domain;
/* domain name to use in absence of a domain name in the request */ int req;
/* true if proxy requests are enabled */ }
viaopt;
/* how to deal with proxy Via: headers */ }
badopt;
/* how to deal with bad headers */ const char *p;
/* The path */ * ProxyPassReverse and friends are documented as working inside * <Location>. But in fact they never have done in the case of * more than one <Location>, because the server_conf can't see it. * We need to move them to the per-dir config. * Discussed in February 2005: signed char p_is_fnmatch;
/* Is the path an fnmatch candidate? */ * the following setting masks the error page * returned from the 'proxied server' and just * forwards the status code upwards. * This allows the main server (us) to generate * the error page, (so it will look like a error * returned from the rest of the system /* if we interpolate env vars per-request, we'll need a per-request * copy of the reverse proxy config request_rec *r;
/* Request record of the backend request * that is used over the backend connection. */ void *
data;
/* per scheme connection data */ void *
forward;
/* opaque forward proxy data */ int close:
1;
/* Close 'this' connection */ int need_flush:
1;
/* Flag to decide whether we need to flush the int inreslist:
1;
/* connection in apr_reslist? */ apr_pool_t *
pool;
/* The pool used in constructor and destructor calls */ /* worker status flags */ /* NOTE: these check the shared status */ /* default worker retry timeout in seconds */ /* Some max char string sizes, for shm fields */ /* Runtime worker status informations. Shared in scoreboard */ int lbset;
/* load balancer cluster set */ int retries;
/* number of retries on this worker */ int min;
/* Desired minimum number of available connections */ int smax;
/* Soft maximum on the total number of connections */ int hmax;
/* Hard maximum on the total number of connections */ int flush_wait;
/* poll wait time in microseconds if flush_auto */ int index;
/* shm array index */ unsigned int hash;
/* hash of worker name */ unsigned int status;
/* worker status bitfield */ * may be available while exceeding the soft limit */ void *
context;
/* general purpose storage */ /* Worker configuration */ unsigned int hash;
/* hash of worker name */ unsigned int local_status;
/* status of per-process worker */ void *
context;
/* general purpose storage */ * Time to wait (in microseconds) to find out if more data is currently * available at the backend. int index;
/* shm array index */ unsigned int sticky_force:
1;
/* Disable failover for sticky sessions */ unsigned int scolonsep:
1;
/* true if ';' seps sticky session paths */ int growth;
/* number of post-config workers can added */ int max_workers;
/* maximum number of allowed workers */ const char *
name;
/* name of the load balancer */ const char *
sname;
/* filesystem safe balancer name */ void *
context;
/* general purpose storage */ const char *
name;
/* name of the load balancer method*/ void *
context;
/* general purpose storage */ /* Create a set of PROXY_DECLARE(type), PROXY_DECLARE_NONSTD(type) and * PROXY_DECLARE_DATA with appropriate export and import tags for the platform * Hook an optional proxy hook. Unlike static hooks, this uses a macro * It will return the most suitable worker at the moment * and coresponding balancer. * and then the scheme_handler is called. * It is called after request for updating runtime balancer status. * It is called after all proxy processing has been done. This gives other * modules a chance to create default content on failure, for example /* DEPRECATED (will be replaced with ap_proxy_connect_backend */ /* Header mapping functions, and a typedef of their signature */ /* Connection pool API */ * Get the worker from proxy configuration * @param p memory pool used for finding worker * @param balancer the balancer that the worker belongs to * @param conf current proxy server configuration * @param url url to find the worker from * @return proxy_worker or NULL if not found * Define and Allocate space for the worker to proxy configuration * @param p memory pool to allocate worker from * @param worker the new worker * @param balancer the balancer that the worker belongs to * @param conf current proxy server configuration * @param url url containing worker name * @param do_malloc true if shared struct should be malloced * @return error message or NULL if successful (*worker is new worker) * Share a defined proxy worker via shm * @param worker worker to be shared * @param shm location of shared info * @param i index into shm * @return APR_SUCCESS or error code * Initialize the worker by setting up worker connection pool and mutex * @param worker worker to initialize * @param s current server record * @param p memory pool used for mutex and connection pool * @return APR_SUCCESS or error code * Verifies valid balancer name (eg: balancer://foo) * @param name name to test * @param i number of chars to test; 0 for all. * Get the balancer from proxy configuration * @param p memory pool used for temporary storage while finding balancer * @param conf current proxy server configuration * @param url url to find the worker from; must have balancer:// prefix * @return proxy_balancer or NULL if not found * Define and Allocate space for the balancer to proxy configuration * @param p memory pool to allocate balancer from * @param balancer the new balancer * @param conf current proxy server configuration * @param url url containing balancer name * @param do_malloc true if shared struct should be malloced * @return error message or NULL if successfull * Share a defined proxy balancer via shm * @param balancer balancer to be shared * @param shm location of shared info * @param i index into shm * @return APR_SUCCESS or error code * Initialize the balancer as needed * @param balancer balancer to initialize * @param s current server record * @param p memory pool used for mutex and connection pool * @return APR_SUCCESS or error code * Get the most suitable worker and/or balancer for the request * @param worker worker used for processing request * @param balancer balancer used for processing request * @param r current request * @param conf current proxy server configuration * @param url request url that balancer can rewrite. * @return OK or HTTP_XXX error * @note It calls balancer pre_request hook if the url starts with balancer:// * The balancer then rewrites the url to particular worker, like http://host:port * Post request worker and balancer cleanup * @param worker worker used for processing request * @param balancer balancer used for processing request * @param r current request * @param conf current proxy server configuration * @return OK or HTTP_XXX error * @note Whenever the pre_request is called, the post_request has to be * Request status function * @param status status of proxy request (result) * @param r the request to obtain the status for * Determine backend hostname and port * @param p memory pool used for processing * @param r current request * @param conf current proxy server configuration * @param worker worker used for processing request * @param conn proxy connection struct * @param uri processed uri * @param proxyname are we connecting directly or via a proxy * @param proxyport proxy host port * @param server_portstr Via headers server port * @param server_portstr_size size of the server_portstr buffer * @return OK or HTTP_XXX error * Mark a worker for retry * @param proxy_function calling proxy scheme (http, ajp, ...) * @param worker worker used for retrying * @param s current server record * @return OK if marked for retry, DECLINED otherwise * @note The error status of the worker will cleared if the retry interval has * elapsed since the last error. * Acquire a connection from worker connection pool * @param proxy_function calling proxy scheme (http, ajp, ...) * @param conn acquired connection * @param worker worker used for obtaining connection * @param s current server record * @return OK or HTTP_XXX error * @note If the connection limit has been reached, the function will * block until a connection becomes available or the timeout has * Release a connection back to worker connection pool * @param proxy_function calling proxy scheme (http, ajp, ...) * @param conn acquired connection * @param s current server record * @return OK or HTTP_XXX error * @note The connection will be closed if conn->close_on_release is set * Make a connection to the backend * @param proxy_function calling proxy scheme (http, ajp, ...) * @param conn acquired connection * @param worker connection worker * @param s current server record * @return OK or HTTP_XXX error * @note In case the socket already exists for conn, just check the link * Make a connection record for backend connection * @param proxy_function calling proxy scheme (http, ajp, ...) * @param conn acquired connection * @param c client connection record * @param s current server record * @return OK or HTTP_XXX error * @note The function will return immediately if conn->connection * Signal the upstream chain that the connection to the backend broke in the * middle of the response. This is done by sending an error bucket with * status HTTP_BAD_GATEWAY and an EOS bucket up the filter chain. * @param r current request record of client request * @param brigade The brigade that is sent through the output filter chain * Transform buckets from one bucket allocator to another one by creating a * transient bucket for each data bucket and let it use the data read from * the old bucket. Metabuckets are transformed by just recreating them. * Attention: Currently only the following bucket types are handled: * If an other bucket type is found its type is logged as a debug message * and APR_EGENERAL is returned. * @param r current request record of client request. Only used for logging * @param from the brigade that contains the buckets to transform * @param to the brigade that will receive the transformed buckets * @return APR_SUCCESS if all buckets could be transformed APR_EGENERAL * Return a hash based on the passed string * @param str string to produce hash from * @param method hashing method to use * @return hash as unsigned int * Set/unset the worker status bitfield depending on flag * @param set set or unset bit * @return APR_SUCCESS if valid flag * Create readable representation of worker status bitfield * @return string representation of status /* The number of dynamic workers that can be added when reconfiguring. * If this limit is reached you must stop and restart the server. * Calculate maximum number of workers in scoreboard. * @return number of workers to allocate in the scoreboard