http_protocol.c revision 33bdcae1f7a1a65e351dda2a766a0cf28b1e695d
/* ====================================================================
* The Apache Software License, Version 1.1
*
* Copyright (c) 2000-2003 The Apache Software Foundation. All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* distribution.
*
* 3. The end-user documentation included with the redistribution,
* if any, must include the following acknowledgment:
* "This product includes software developed by the
* Apache Software Foundation (http://www.apache.org/)."
* Alternately, this acknowledgment may appear in the software itself,
* if and wherever such third-party acknowledgments normally appear.
*
* 4. The names "Apache" and "Apache Software Foundation" must
* not be used to endorse or promote products derived from this
* software without prior written permission. For written
* permission, please contact apache@apache.org.
*
* 5. Products derived from this software may not be called "Apache",
* nor may "Apache" appear in their name, without prior written
* permission of the Apache Software Foundation.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
*
* Portions of this software are based upon public domain software
* originally written at the National Center for Supercomputing Applications,
* University of Illinois, Urbana-Champaign.
*/
/*
* http_protocol.c --- routines which directly communicate with the client.
*
* Code originally by Rob McCool; much redone by Robert S. Thau
* and the Apache Software Foundation.
*/
#include "apr.h"
#include "apr_strings.h"
#include "apr_buckets.h"
#include "apr_lib.h"
#include "apr_signal.h"
#define APR_WANT_STDIO /* for sscanf */
#define APR_WANT_STRFUNC
#define APR_WANT_MEMFUNC
#include "apr_want.h"
#define CORE_PRIVATE
#include "util_filter.h"
#include "ap_config.h"
#include "httpd.h"
#include "http_config.h"
#include "http_core.h"
#include "http_protocol.h"
#include "http_main.h"
#include "http_request.h"
#include "http_vhost.h"
#include "http_log.h" /* For errors detected in basic auth common
* support code... */
#include "apr_date.h" /* For apr_date_parse_http and APR_DATE_BAD */
#include "util_charset.h"
#include "util_ebcdic.h"
#include "util_time.h"
#include "mod_core.h"
#include <stdarg.h>
#endif
#include <unistd.h>
#endif
/* New Apache routine to map status codes into array indicies
* e.g. 100 -> 0, 101 -> 1, 200 -> 2 ...
* The number of status lines must equal the value of RESPONSE_CODES (httpd.h)
* and must be listed in order.
*/
#ifdef UTS21
/* The second const triggers an assembler bug on UTS 2.1.
* Another workaround is to move some code out of this file into another,
* but this is easier. Dave Dykstra, 3/31/99
*/
static const char * status_lines[RESPONSE_CODES] =
#else
static const char * const status_lines[RESPONSE_CODES] =
#endif
{
"100 Continue",
"101 Switching Protocols",
"102 Processing",
#define LEVEL_200 3
"200 OK",
"201 Created",
"202 Accepted",
"203 Non-Authoritative Information",
"204 No Content",
"205 Reset Content",
"206 Partial Content",
"207 Multi-Status",
#define LEVEL_300 11
"300 Multiple Choices",
"301 Moved Permanently",
"302 Found",
"303 See Other",
"304 Not Modified",
"305 Use Proxy",
"306 unused",
"307 Temporary Redirect",
#define LEVEL_400 19
"400 Bad Request",
"401 Authorization Required",
"402 Payment Required",
"403 Forbidden",
"404 Not Found",
"405 Method Not Allowed",
"406 Not Acceptable",
"407 Proxy Authentication Required",
"408 Request Time-out",
"409 Conflict",
"410 Gone",
"411 Length Required",
"412 Precondition Failed",
"413 Request Entity Too Large",
"414 Request-URI Too Large",
"415 Unsupported Media Type",
"416 Requested Range Not Satisfiable",
"417 Expectation Failed",
"418 unused",
"419 unused",
"420 unused",
"421 unused",
"422 Unprocessable Entity",
"423 Locked",
"424 Failed Dependency",
/* This is a hack, but it is required for ap_index_of_response
* to work with 426.
*/
"425 No code",
"426 Upgrade Required",
#define LEVEL_500 46
"500 Internal Server Error",
"501 Method Not Implemented",
"502 Bad Gateway",
"503 Service Temporarily Unavailable",
"504 Gateway Time-out",
"505 HTTP Version Not Supported",
"506 Variant Also Negotiates",
"507 Insufficient Storage",
"508 unused",
"509 unused",
"510 Not Extended"
};
/* The index of the first bit field that is used to index into a limit
* bitmask. M_INVALID + 1 to METHOD_NUMBER_LAST.
*/
/* The max method number. Method numbers are used to shift bitmasks,
* so this cannot exceed 63, and all bits high is equal to -1, which is a
* special flag, so the last bit used has index 62.
*/
#define METHOD_NUMBER_LAST 62
{
int ka_sent = 0;
"close");
/* The following convoluted conditional determines whether or not
* the current connection should remain persistent after this response
* (a.k.a. HTTP Keep-Alive) and whether or not the output message
* body should use the HTTP/1.1 chunked transfer-coding. In English,
*
* IF we have not marked this connection as errored;
* and the response body has a defined length due to the status code
* being 304 or 204, the request method being HEAD, already
* having defined Content-Length or Transfer-Encoding: chunked, or
* the request version being HTTP/1.1 and thus capable of being set
* as chunked [we know the (r->chunked = 1) side-effect is ugly];
* and the server configuration enables keep-alive;
* and the server configuration has a reasonable inter-request timeout;
* and there is no maximum # requests or the max hasn't been reached;
* and the response status does not require a close;
* and the response generator has not already indicated close;
* and the client did not request non-persistence (Connection: close);
* and we haven't been configured to ignore the buggy twit
* or they're a buggy twit coming through a HTTP/1.1 proxy
* and the client is requesting an HTTP/1.0-style keep-alive
* or the client claims to be HTTP/1.1 compliant (perhaps a proxy);
* THEN we can be persistent, which requires more headers be output.
*
* Note that the condition evaluation order is extremely important.
*/
&& ((r->status == HTTP_NOT_MODIFIED)
|| (r->status == HTTP_NO_CONTENT)
|| r->header_only
|| ap_find_last_token(r->pool,
"Transfer-Encoding"),
"chunked")
&& r->server->keep_alive
&& (r->server->keep_alive_timeout > 0)
&& ((r->server->keep_alive_max == 0)
&& !ap_status_drops_connection(r->status)
&& !wimpy
r->connection->keepalives++;
/* If they sent a Keep-Alive token, send one back */
if (ka_sent) {
if (r->server->keep_alive_max) {
left));
}
else {
}
}
return 1;
}
/* Otherwise, we need to indicate that we will be closing this
* connection immediately after the current response.
*
* We only really need to send "close" to HTTP/1.1 clients, but we
* always send it anyway, because a broken proxy may identify itself
* as HTTP/1.0, but pass our request along with our HTTP/1.1 tag
* to a HTTP/1.1 client. Better safe than sorry.
*/
if (!wimpy) {
}
return 0;
}
{
const char *etag;
/* Check for conditional requests --- note that we only want to do
* this if we are successful so far and we are not processing a
* subrequest or an ErrorDocument.
*
* The order of the checks is important, since ETag checks are supposed
* to be more accurate than checks relative to the modification time.
* However, not all documents are guaranteed to *have* ETags, and some
* might have Last-Modified values w/o ETags, so this gets a little
* complicated.
*/
return OK;
}
/* All of our comparisons must be in seconds, because that's the
* highest time resolution the HTTP specification allows.
*/
/* XXX: we should define a "time unset" constant */
/* If an If-Match request-header field was given
* AND the field value is not "*" (meaning match anything)
* AND if our strong ETag does not match any entity tag in that field,
* respond with a status of 412 (Precondition Failed).
*/
if (if_match[0] != '*'
return HTTP_PRECONDITION_FAILED;
}
}
else {
/* Else if a valid If-Unmodified-Since request-header field was given
* AND the requested resource has been modified since the time
* specified in this field, then the server MUST
* respond with a status of 412 (Precondition Failed).
*/
if (if_unmodified != NULL) {
return HTTP_PRECONDITION_FAILED;
}
}
}
/* If an If-None-Match request-header field was given
* AND the field value is "*" (meaning match anything)
* OR our ETag matches any of the entity tags in that field, fail.
*
* If the request method was GET or HEAD, failure means the server
* SHOULD respond with a 304 (Not Modified) response.
* For all other request methods, failure means the server MUST
* respond with a status of 412 (Precondition Failed).
*
* GET or HEAD allow weak etag comparison, all other methods require
* strong comparison. We can only use weak if it's not a range request.
*/
if (if_nonematch != NULL) {
if (r->method_number == M_GET) {
if (if_nonematch[0] == '*') {
return HTTP_NOT_MODIFIED;
}
if (etag[0] != 'W'
return HTTP_NOT_MODIFIED;
}
}
return HTTP_NOT_MODIFIED;
}
}
}
else if (if_nonematch[0] == '*'
return HTTP_PRECONDITION_FAILED;
}
}
/* Else if a valid If-Modified-Since request-header field was given
* AND it is a GET or HEAD request
* AND the requested resource has not been modified since the time
* specified in this field, then the server MUST
* respond with a status of 304 (Not Modified).
* A date later than the server's current request time is invalid.
*/
else if ((r->method_number == M_GET)
&& ((if_modified_since =
"If-Modified-Since")) != NULL)) {
return HTTP_NOT_MODIFIED;
}
}
return OK;
}
/**
* Singleton registry of additional methods. This maps new method names
* such as "MYGET" to methnums, which are int offsets into bitmasks.
*
* This follows the same technique as standard M_GET, M_POST, etc. These
* are dynamically assigned when modules are loaded and <Limit GET MYGET>
* directives are processed.
*/
static int cur_method_number = METHOD_NUMBER_FIRST;
int methnum)
{
}
/* This internal function is used to clear the method registry
* and reset the cur_method_number counter.
*/
{
return APR_SUCCESS;
}
{
/* put all the standard methods into the registry hash to ease the
mapping operations between name and number */
}
{
int *methnum;
if (methods_registry == NULL) {
}
return M_INVALID;
}
/* Check if the method was previously registered. If it was
* return the associated method number.
*/
return *methnum;
if (cur_method_number > METHOD_NUMBER_LAST) {
/* The method registry has run out of dynamically
* assignable method numbers. Log this and return M_INVALID.
*/
"Maximum new request methods %d reached while "
"registering method %s.",
return M_INVALID;
}
return cur_method_number++;
}
#define UNKNOWN_METHOD (-1)
{
/* Note: the following code was generated by the "shilka" tool from
the "cocom" parsing/compilation toolkit. It is an optimized lookup
based on analysis of the input keywords. Postprocessing was done
on the shilka output, but the basic structure and analysis is
from there. Should new HTTP methods be added, then manual insertion
into this code is fine, or simply re-running the shilka tool on
the appropriate input. */
/* Note: it is also quite reasonable to just use our method_registry,
but I'm assuming (probably incorrectly) we want more speed here
(based on the optimizations the previous code was doing). */
switch (len)
{
case 3:
switch (method[0])
{
case 'P':
? M_PUT : UNKNOWN_METHOD);
case 'G':
? M_GET : UNKNOWN_METHOD);
default:
return UNKNOWN_METHOD;
}
case 4:
switch (method[0])
{
case 'H':
? M_GET : UNKNOWN_METHOD);
case 'P':
? M_POST : UNKNOWN_METHOD);
case 'M':
? M_MOVE : UNKNOWN_METHOD);
case 'L':
? M_LOCK : UNKNOWN_METHOD);
case 'C':
? M_COPY : UNKNOWN_METHOD);
default:
return UNKNOWN_METHOD;
}
case 5:
switch (method[2])
{
case 'T':
? M_PATCH : UNKNOWN_METHOD);
case 'R':
? M_MERGE : UNKNOWN_METHOD);
case 'C':
? M_MKCOL : UNKNOWN_METHOD);
case 'B':
? M_LABEL : UNKNOWN_METHOD);
case 'A':
? M_TRACE : UNKNOWN_METHOD);
default:
return UNKNOWN_METHOD;
}
case 6:
switch (method[0])
{
case 'U':
switch (method[5])
{
case 'K':
? M_UNLOCK : UNKNOWN_METHOD);
case 'E':
? M_UPDATE : UNKNOWN_METHOD);
default:
return UNKNOWN_METHOD;
}
case 'R':
? M_REPORT : UNKNOWN_METHOD);
case 'D':
? M_DELETE : UNKNOWN_METHOD);
default:
return UNKNOWN_METHOD;
}
case 7:
switch (method[1])
{
case 'P':
? M_OPTIONS : UNKNOWN_METHOD);
case 'O':
? M_CONNECT : UNKNOWN_METHOD);
case 'H':
? M_CHECKIN : UNKNOWN_METHOD);
default:
return UNKNOWN_METHOD;
}
case 8:
switch (method[0])
{
case 'P':
? M_PROPFIND : UNKNOWN_METHOD);
case 'C':
? M_CHECKOUT : UNKNOWN_METHOD);
default:
return UNKNOWN_METHOD;
}
case 9:
? M_PROPPATCH : UNKNOWN_METHOD);
case 10:
switch (method[0])
{
case 'U':
? M_UNCHECKOUT : UNKNOWN_METHOD);
case 'M':
? M_MKACTIVITY : UNKNOWN_METHOD);
default:
return UNKNOWN_METHOD;
}
case 11:
? M_MKWORKSPACE : UNKNOWN_METHOD);
case 15:
case 16:
default:
return UNKNOWN_METHOD;
}
/* NOTREACHED */
}
/* Get the method number associated with the given string, assumed to
* contain an HTTP method. Returns M_INVALID if not recognized.
*
* This is the first step toward placing method names in a configurable
* list. Hopefully it (and other routines) can eventually be moved to
* something like a mod_http_methods.c, complete with config stuff.
*/
{
if (which != UNKNOWN_METHOD)
return which;
/* check if the method has been dynamically registered */
if (methods_registry != NULL) {
return *methnum;
}
}
return M_INVALID;
}
/*
* Turn a known method number into a name.
*/
{
/* scan through the hash table, looking for a value that matches
the provided method number. */
const void *key;
void *val;
return key;
}
/* it wasn't found in the hash */
return NULL;
}
static long get_chunk_size(char *);
typedef struct http_filter_ctx {
enum {
} state;
int eos_sent;
} http_ctx_t;
/* This is the HTTP_INPUT filter for HTTP requests and responses from
* proxied servers (mod_proxy). It handles chunked and content-length
* are successfully parsed.
*/
{
apr_bucket *e;
/* just get out of the way of things we don't want. */
}
if (!ctx) {
ctx->limit_used = 0;
/* LimitRequestBody does not apply to proxied responses.
* Consider implementing this check in its own filter.
* Would adding a directive to limit the size of proxied
* responses be useful?
*/
if (!f->r->proxyreq) {
}
else {
}
if (tenc) {
}
}
else if (lenp) {
int conversion_error = 0;
char *endstr;
errno = 0;
* non-digit chars in the string (excluding leading space)
* (the endstr checks) and a negative number. Depending
* on the strtol implementation, the errno check may also
* trigger on an all whitespace string */
conversion_error = 1;
}
if (conversion_error) {
"Invalid Content-Length");
f->r->pool, f->c->bucket_alloc);
e = apr_bucket_eos_create(f->c->bucket_alloc);
}
/* If we have a limit in effect and we know the C-L ahead of
* time, stop it here if it is invalid.
*/
"Requested content-length of %" APR_OFF_T_FMT
" is larger than the configured limit"
f->r->pool, f->c->bucket_alloc);
e = apr_bucket_eos_create(f->c->bucket_alloc);
}
}
/* If we don't have a request entity indicated by the headers, EOS.
* (BODY_NONE is a valid intermediate state due to trailers,
* but it isn't a valid starting state.)
*
* RFC 2616 Section 4.4 note 5 states that connection-close
* is invalid for a request entity - request bodies must be
* denoted by C-L or T-E: chunked.
*
* Note that since the proxy uses this filter to handle the
* proxied *response*, proxy responses MUST be exempt.
*/
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
return APR_SUCCESS;
}
/* Since we're about to read data, send 100-Continue if needed.
* Only valid on chunked and C-L bodies where the C-L is > 0. */
char *tmp;
f->c->bucket_alloc);
e = apr_bucket_flush_create(f->c->bucket_alloc);
}
/* We can't read the chunk until after sending 100 if required. */
char line[30];
APR_BLOCK_READ, 0);
if (rv == APR_SUCCESS) {
if (rv == APR_SUCCESS) {
}
}
/* Detect chunksize error (such as overflow) */
* come back here later */
f->r->pool,
f->c->bucket_alloc);
e = apr_bucket_eos_create(f->c->bucket_alloc);
}
/* Handle trailers by calling ap_get_mime_headers again! */
ap_get_mime_headers(f->r);
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
return APR_SUCCESS;
}
}
}
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
return APR_SUCCESS;
}
case BODY_NONE:
break;
case BODY_LENGTH:
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
return APR_SUCCESS;
case BODY_CHUNK:
{
char line[30];
/* We need to read the CRLF after the chunk. */
APR_BLOCK_READ, 0);
if (rv == APR_SUCCESS) {
/* Read the real chunk line. */
APR_BLOCK_READ, 0);
if (rv == APR_SUCCESS) {
if (rv == APR_SUCCESS) {
}
}
}
/* Detect chunksize error (such as overflow) */
* come back here later */
f->c->bucket_alloc);
e = apr_bucket_eos_create(f->c->bucket_alloc);
}
/* Handle trailers by calling ap_get_mime_headers again! */
ap_get_mime_headers(f->r);
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
return APR_SUCCESS;
}
}
break;
}
}
/* Ensure that the caller can not go over our boundary point. */
}
AP_DEBUG_ASSERT(readbytes > 0);
}
if (rv != APR_SUCCESS) {
return rv;
}
/* How many bytes did we just read? */
apr_brigade_length(b, 0, &totalread);
/* If this happens, we have a bucket of unknown length. Die because
* it means our assumptions have changed. */
AP_DEBUG_ASSERT(totalread >= 0);
}
/* If we have no more bytes remaining on a C-L request,
* save the callter a roundtrip to discover EOS.
*/
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
}
/* We have a limit in effect. */
/* FIXME: Note that we might get slightly confused on chunked inputs
* as we'd need to compensate for the chunk lengths which may not
* really count. This seems to be up for interpretation. */
"Read content-length of %" APR_OFF_T_FMT
" is larger than the configured limit"
f->r->pool,
f->c->bucket_alloc);
e = apr_bucket_eos_create(f->c->bucket_alloc);
}
}
return APR_SUCCESS;
}
/* The index is found by its offset from the x00 code of each level.
* Although this is fast, it will need to be replaced if some nutcase
* decides to define a high-numbered code before the lower numbers.
* If that sad event occurs, replace the code below with a linear search
* from status_lines[shortcut[i]] to status_lines[shortcut[i+1]-1];
*/
{
int i, pos;
return LEVEL_500;
}
for (i = 0; i < 5; i++) {
status -= 100;
if (status < 100) {
return pos;
}
else {
return LEVEL_500; /* status unknown (falls in gap) */
}
}
}
return LEVEL_500; /* 600 or above is also illegal */
}
{
}
typedef struct header_struct {
/* Send a single HTTP header field to the client. Note that this function
* is used in calls to table_do(), so their interfaces are co-dependent.
* In other words, don't change this one without checking table_do in alloc.c.
* It returns true unless there was a write error of some kind.
*/
static int form_header_field(header_struct *h,
{
char *headfield;
char *next;
*next++ = ':';
*next++ = ' ';
*next = 0;
#else
v++;
v->iov_base = ": ";
v++;
v++;
#endif /* !APR_CHARSET_EBCDIC */
return 1;
}
/* Send a request's HTTP response headers to the client.
*/
const request_rec *r)
{
const apr_array_header_t *elts;
const apr_table_entry_t *t_elt;
const apr_table_entry_t *t_end;
return APR_SUCCESS;
}
sizeof(struct iovec));
/* For each field, generate
* name ": " value CRLF
*/
do {
vec_next++;
vec_next++;
vec_next++;
vec_next++;
t_elt++;
}
/*
* Determine the protocol to use for the response. Potentially downgrade
*
* also prepare r->status_line.
*/
static void basic_http_header_check(request_rec *r,
const char **protocol)
{
if (r->assbackwards) {
/* no such thing as a response protocol */
return;
}
if (!r->status_line) {
}
/* Note that we must downgrade before checking for force responses. */
}
/* kludge around broken browsers when indicated by force-response-1.0
*/
*protocol = "HTTP/1.0";
}
else {
}
}
const char *protocol)
{
char *date;
const char *server;
if (r->assbackwards) {
/* there are no headers to send */
return;
}
/* Output the HTTP/1.x Status-Line and the Date and Server fields */
{
char *tmp;
}
#else
#endif
/* keep a previously set server header (possibly from proxy), otherwise
* generate a new server header */
}
else {
}
/* unset so we don't send them again */
}
{
const char *protocol;
}
/* Navigator versions 2.x, 3.x and 4.0 betas up to and including 4.0b2
* have a header parsing bug. If the terminating \r\n occur starting
* at offset 256, 257 or 258 of output then it will not properly parse
* the headers. Curiously it doesn't exhibit this problem at 512, 513.
* We are guessing that this is because their initial read of a new request
* uses a 256 byte buffer, and subsequent reads use a larger buffer.
* So the problem might exist at different offsets as well.
*
* This should also work on keepalive connections assuming they use the
* same small buffer for the first read of each new request.
*
* At any rate, we check the bytes written so far and, if we are about to
* tickle the bug, we instead insert a bogus padding header. Since the bug
* manifests as a broken image in Navigator, users blame the server. :(
* It is more expensive to check the User-Agent than it is to just add the
* bytes, so we haven't used the BrowserMatch feature here.
*/
{
}
}
/* Build the Allow field-value from the request handler method mask.
* Note that we always allow TRACE, since it is handled below.
*/
static char *make_allow(request_rec *r)
{
char *list;
const void *key;
void *val;
/* the M_GET method actually refers to two methods */
}
}
/* TRACE is always allowed */
/* ### this is rather annoying. we should enforce registration of
### these methods */
int i;
/*
* Append all of the elements of r->allowed_methods->method_list
*/
}
}
return list;
}
{
int rv;
if (r->method_number != M_TRACE) {
return DECLINED;
}
/* Get the original request */
while (r->prev) {
r = r->prev;
}
return rv;
}
ap_set_content_type(r, "message/http");
/* Now we recreate the request, and echo it back */
h.bb = b;
apr_table_do((int (*) (void *, const char *, const char *))
ap_pass_brigade(r->output_filters, b);
return DONE;
}
{
if (r->assbackwards) {
return DECLINED;
}
/* the request finalization will send an EOS, which will flush all
* the headers out (including the Allow header)
*/
return OK;
}
/* This routine is called by apr_table_do and merges all instances of
* the passed field values into a single array that will be further
* processed by some later routine. Originally intended to help split
* and recombine multiple Vary fields, though it is generic to any field
* consisting of comma/space-separated tokens.
*/
{
char *start;
char *e;
char **strpp;
int i;
values = (apr_array_header_t *)d;
do {
/* Find a non-empty fieldname */
while (*e == ',' || apr_isspace(*e)) {
++e;
}
if (*e == '\0') {
break;
}
start = e;
++e;
}
if (*e != '\0') {
*e++ = '\0';
}
/* Now add it to values if it isn't already represented.
* Could be replaced by a ap_array_strcasecmp() if we had one.
*/
++i, ++strpp) {
break;
}
}
}
} while (*e != '\0');
return 1;
}
/*
* Since some clients choke violently on multiple Vary fields, or
* Vary fields with duplicate tokens, combine any multiples and remove
* any duplicates.
*/
static void fixup_vary(request_rec *r)
{
/* Extract all Vary fields from the headers_out, separate each into
* its comma-separated fieldname values, and then add them to varies
* if not already present in the array.
*/
apr_table_do((int (*)(void *, const char *, const char *))uniq_field_values,
/* If we found any, replace old Vary fields with unique-ified value */
}
}
{
if (!ct) {
r->content_type = NULL;
}
r->content_type = ct;
/* Insert filters requested by the AddOutputFiltersByType
* configuration directive. Content-type filters must be
* inserted after the content handlers have run because
* only then, do we reliably know the content-type.
*/
}
}
typedef struct header_filter_ctx {
int headers_sent;
{
request_rec *r = f->r;
conn_rec *c = r->connection;
const char *clheader;
const char *protocol;
apr_bucket *e;
AP_DEBUG_ASSERT(!r->main);
if (r->header_only) {
if (!ctx) {
}
else if (ctx->headers_sent) {
return OK;
}
}
APR_BRIGADE_FOREACH(e, b) {
if (e->type == &ap_bucket_type_error) {
return AP_FILTER_ERROR;
}
}
if (r->assbackwards) {
r->sent_bodyct = 1;
return ap_pass_brigade(f->next, b);
}
/*
* Now that we are ready to send a response, we need to combine the two
* header field tables into a single table. If we don't do this, our
* later attempts to set or unset a given fieldname might be bypassed.
*/
if (!apr_is_empty_table(r->err_headers_out)) {
r->headers_out);
}
/*
* Remove the 'Vary' header field if the client can't handle it.
* Since this will have nasty effects on HTTP/1.1 caches, force
* the response into HTTP/1.0 mode.
*
* Note: the force-response-1.0 should come before the call to
* basic_http_header_check()
*/
}
else {
fixup_vary(r);
}
/*
* Now remove any ETag response header field if earlier processing
* says so (such as a 'FileETag None' directive).
*/
}
/* determine the protocol and whether we should use keepalives. */
ap_set_keepalive(r);
if (r->chunked) {
}
ap_make_content_type(r, r->content_type));
if (r->content_encoding) {
r->content_encoding);
}
if (!apr_is_empty_array(r->content_languages)) {
int i;
for (i = 0; i < r->content_languages->nelts; ++i) {
}
}
/*
* Control cachability for non-cachable responses if not already set by
* some other part of the server configuration.
*/
}
/* This is a hack, but I can't find anyway around it. The idea is that
* we don't want to send out 0 Content-Lengths if it is a head request.
* This happens when modules try to outsmart the server, and return
* if they see a HEAD request. Apache 1.3 handlers were supposed to
* just return in that situation, and the core handled the HEAD. In
* 2.0, if a handler returns, then the core sends an EOS bucket down
* the filter stack, and the content-length filter computes a C-L of
* zero and that gets put in the headers, and we end up sending a
* zero C-L to the client. We can't just remove the C-L filter,
* because well behaved 2.0 handlers will send their data down the stack,
* and we will compute a real C-L for the head request. RBB
*/
if (r->header_only
}
if (r->status == HTTP_NOT_MODIFIED) {
apr_table_do((int (*)(void *, const char *, const char *)) form_header_field,
(void *) &h, r->headers_out,
"Connection",
"Keep-Alive",
"ETag",
"Content-Location",
"Expires",
"Cache-Control",
"Vary",
"Warning",
"WWW-Authenticate",
"Proxy-Authenticate",
NULL);
}
else {
send_all_header_fields(&h, r);
}
if (r->header_only) {
return OK;
}
if (r->chunked) {
/* We can't add this filter until we have already sent the headers.
* If we add it before this point, then the headers will be chunked
* as well, and that is just wrong.
*/
}
/* Don't remove this filter until after we have added the CHUNK filter.
* Otherwise, f->next won't be the CHUNK filter and thus the first
* brigade won't be chunked properly.
*/
return ap_pass_brigade(f->next, b);
}
/* Here we deal with getting the request message body from the client.
* Whether or not the request contains a body is signaled by the presence
* of a non-zero Content-Length or by a Transfer-Encoding: chunked.
*
* Note that this is more complicated than it was in Apache 1.1 and prior
* versions, because chunked support means that the module does less.
*
* The proper procedure is this:
*
* 1. Call setup_client_block() near the beginning of the request
* handler. This will set up all the necessary properties, and will
* return either OK, or an error code. If the latter, the module should
* return that error code. The second parameter selects the policy to
* apply if the request message indicates a body, and how a chunked
* transfer-coding should be interpreted. Choose one of
*
* REQUEST_NO_BODY Send 413 error if message has any body
* REQUEST_CHUNKED_ERROR Send 411 error if body without Content-Length
* REQUEST_CHUNKED_DECHUNK If chunked, remove the chunks for me.
*
* In order to use the last two options, the caller MUST provide a buffer
* large enough to hold a chunk-size line, including any extensions.
*
* 2. When you are ready to read a body (if any), call should_client_block().
* This will tell the module whether or not to read input. If it is 0,
* the module should assume that there is no message body to read.
* This step also sends a 100 Continue response to HTTP/1.1 clients,
* so should not be called until the module is *definitely* ready to
* read content. (otherwise, the point of the 100 response is defeated).
* Never call this function more than once.
*
* 3. Finally, call get_client_block in a loop. Pass it a buffer and its size.
* It will put data into the buffer (not necessarily a full buffer), and
* return the length of the input block. When it is done reading, it will
* return 0 if EOF, or -1 if there was an error.
* If an error occurs on input, we force an end to keepalive.
*/
{
r->read_body = read_policy;
r->read_chunked = 0;
r->remaining = 0;
if (tenc) {
"Unknown Transfer-Encoding %s", tenc);
return HTTP_NOT_IMPLEMENTED;
}
if (r->read_body == REQUEST_CHUNKED_ERROR) {
"chunked Transfer-Encoding forbidden: %s", r->uri);
}
r->read_chunked = 1;
}
else if (lenp) {
int conversion_error = 0;
char *endstr;
errno = 0;
/* See comments in ap_http_filter() */
conversion_error = 1;
}
if (conversion_error) {
"Invalid Content-Length");
return HTTP_BAD_REQUEST;
}
}
if ((r->read_body == REQUEST_NO_BODY)
&& (r->read_chunked || (r->remaining > 0))) {
return HTTP_REQUEST_ENTITY_TOO_LARGE;
}
#ifdef AP_DEBUG
{
/* Make sure ap_getline() didn't leave any droppings. */
&core_module);
}
#endif
return OK;
}
{
/* First check if we have already read the request body */
return 0;
}
return 1;
}
/**
* Parse a chunk extension, detect overflow.
* There are two error cases:
* 1) If the conversion would require too many bits, a -1 is returned.
* 2) If the conversion used the correct number of bits, but an overflow
* caused only the sign bit to flip, then that negative number is
* returned.
* In general, any negative number can be considered an overflow error.
*/
static long get_chunk_size(char *b)
{
long chunksize = 0;
/* Skip leading zeros */
while (*b == '0') {
++b;
}
while (apr_isxdigit(*b) && (chunkbits > 0)) {
int xvalue = 0;
if (*b >= '0' && *b <= '9') {
xvalue = *b - '0';
}
else if (*b >= 'A' && *b <= 'F') {
}
else if (*b >= 'a' && *b <= 'f') {
}
chunkbits -= 4;
++b;
}
if (apr_isxdigit(*b) && (chunkbits <= 0)) {
/* overflow */
return -1;
}
return chunksize;
}
/* get_client_block is called in a loop to get the request message body.
* This is quite simple if the client includes a content-length
* (the normal case), but gets messy if the body is chunked. Note that
* r->remaining is used to maintain state across calls and that
* r->read_length is the total number of bytes given to the caller
* across all invocations. It is messy because we have to be careful not
* to read past the data provided by the client, since these reads block.
* Returns 0 on End-of-body, -1 on error or premature chunk end.
*
*/
{
return -1;
}
/* We lose the failure code here. This is why ap_get_client_block should
* not be used.
*/
if (rv != APR_SUCCESS) {
/* if we actually fail here, we want to just return and
* stop trying to read data from the client.
*/
return -1;
}
/* If this fails, it means that a filter is written incorrectly and that
* it needs to learn how to properly handle APR_BLOCK_READ requests by
* returning data when requested.
*/
if (rv != APR_SUCCESS) {
return -1;
}
/* XXX yank me? */
r->read_length += bufsiz;
return bufsiz;
}
/* In HTTP/1.1, any method can have a body. However, most GET handlers
* wouldn't know what to do with a request body if they received one.
* This helper routine tests for and reads any message body in the request,
* simply discarding whatever it receives. We need to do this because
* failing to read the request body would cause it to be interpreted
* as the next request on a persistent connection.
*
* Since we return an error status if the request is malformed, this
* routine should be called at the beginning of a no-body handler, e.g.,
*
* if ((retval = ap_discard_request_body(r)) != OK) {
* return retval;
* }
*/
{
/* Sometimes we'll get in a state where the input handling has
* detected an error where we want to drop the connection, so if
* that's the case, don't read the data as that is what we're trying
* to avoid.
*
* This function is also a no-op on a subrequest.
*/
ap_status_drops_connection(r->status)) {
return OK;
}
seen_eos = 0;
do {
if (rv != APR_SUCCESS) {
/* FIXME: If we ever have a mapping from filters (apr_status_t)
* to HTTP error codes, this would be a good place for them.
*
* If we received the special case AP_FILTER_ERROR, it means
* that the filters have already handled this error.
* Otherwise, we should assume we have a bad request.
*/
if (rv == AP_FILTER_ERROR) {
return rv;
}
else {
return HTTP_BAD_REQUEST;
}
}
const char *data;
if (APR_BUCKET_IS_EOS(bucket)) {
seen_eos = 1;
break;
}
/* These are metadata buckets. */
continue;
}
/* We MUST read because in case we have an unknown-length
* bucket or one that morphs, we want to exhaust it.
*/
if (rv != APR_SUCCESS) {
return HTTP_BAD_REQUEST;
}
}
} while (!seen_eos);
return OK;
}
static const char *add_optional_notes(request_rec *r,
const char *prefix,
const char *key,
const char *suffix)
{
}
else {
}
return result;
}
/* construct and return the default error message for a given
* HTTP defined error code
*/
static const char *get_canned_error_string(int status,
request_rec *r,
const char *location)
{
apr_pool_t *p = r->pool;
switch (status) {
case HTTP_MOVED_PERMANENTLY:
case HTTP_MOVED_TEMPORARILY:
case HTTP_TEMPORARY_REDIRECT:
return(apr_pstrcat(p,
"<p>The document has moved <a href=\"",
"\">here</a>.</p>\n",
NULL));
case HTTP_SEE_OTHER:
return(apr_pstrcat(p,
"<p>The answer to your request is located "
"<a href=\"",
"\">here</a>.</p>\n",
NULL));
case HTTP_USE_PROXY:
return(apr_pstrcat(p,
"<p>This resource is only accessible "
"through the proxy\n",
"<br />\nYou will need to configure "
"your client to use that proxy.</p>\n",
NULL));
case HTTP_UNAUTHORIZED:
return("<p>This server could not verify that you\n"
"are authorized to access the document\n"
"requested. Either you supplied the wrong\n"
"credentials (e.g., bad password), or your\n"
"browser doesn't understand how to supply\n"
"the credentials required.</p>\n");
case HTTP_BAD_REQUEST:
return(add_optional_notes(r,
"<p>Your browser sent a request that "
"this server could not understand.<br />\n",
"error-notes",
"</p>\n"));
case HTTP_FORBIDDEN:
return(apr_pstrcat(p,
"<p>You don't have permission to access ",
"\non this server.</p>\n",
NULL));
case HTTP_NOT_FOUND:
return(apr_pstrcat(p,
"<p>The requested URL ",
" was not found on this server.</p>\n",
NULL));
case HTTP_METHOD_NOT_ALLOWED:
return(apr_pstrcat(p,
"<p>The requested method ", r->method,
" is not allowed for the URL ",
".</p>\n",
NULL));
case HTTP_NOT_ACCEPTABLE:
s1 = apr_pstrcat(p,
"<p>An appropriate representation of the "
"requested resource ",
" could not be found on this server.</p>\n",
NULL);
case HTTP_MULTIPLE_CHOICES:
case HTTP_LENGTH_REQUIRED:
s1 = apr_pstrcat(p,
"<p>A request of the requested method ",
r->method,
" requires a valid Content-length.<br />\n",
NULL);
case HTTP_PRECONDITION_FAILED:
return(apr_pstrcat(p,
"<p>The precondition on the request "
"for the URL ",
" evaluated to false.</p>\n",
NULL));
case HTTP_NOT_IMPLEMENTED:
s1 = apr_pstrcat(p,
"<p>",
" not supported.<br />\n",
NULL);
case HTTP_BAD_GATEWAY:
"response from an upstream server.<br />" CRLF;
case HTTP_VARIANT_ALSO_VARIES:
return(apr_pstrcat(p,
"<p>A variant for the requested "
"resource\n<pre>\n",
"\n</pre>\nis itself a negotiable resource. "
"This indicates a configuration error.</p>\n",
NULL));
case HTTP_REQUEST_TIME_OUT:
return("<p>Server timeout waiting for the HTTP request from the client.</p>\n");
case HTTP_GONE:
return(apr_pstrcat(p,
"<p>The requested resource<br />",
"<br />\nis no longer available on this server "
"and there is no forwarding address.\n"
"Please remove all references to this "
"resource.</p>\n",
NULL));
return(apr_pstrcat(p,
"The requested resource<br />",
"does not allow request data with ",
r->method,
" requests, or the amount of data provided in\n"
"the request exceeds the capacity limit.\n",
NULL));
s1 = "<p>The requested URL's length exceeds the capacity\n"
"limit for this server.<br />\n";
return("<p>The supplied request data is not in a format\n"
"acceptable for processing by this resource.</p>\n");
return("<p>None of the range-specifier values in the Range\n"
"request-header field overlap the current extent\n"
"of the selected resource.</p>\n");
case HTTP_EXPECTATION_FAILED:
return(apr_pstrcat(p,
"<p>The expectation given in the Expect "
"request-header"
"\nfield could not be met by this server.</p>\n"
"<p>The client sent<pre>\n Expect: ",
"\n</pre>\n"
"but we only allow the 100-continue "
"expectation.</p>\n",
NULL));
return("<p>The server understands the media type of the\n"
"request entity, but was unable to process the\n"
"contained instructions.</p>\n");
case HTTP_LOCKED:
return("<p>The requested resource is currently locked.\n"
"The lock must be released or proper identification\n"
"given before the method can be applied.</p>\n");
case HTTP_FAILED_DEPENDENCY:
return("<p>The method could not be performed on the resource\n"
"because the requested action depended on another\n"
"action and that other action failed.</p>\n");
case HTTP_UPGRADE_REQUIRED:
return("<p>The requested resource can only be retrieved\n"
"using SSL. The server is willing to upgrade the current\n"
"connection to SSL, but your client doesn't support it.\n"
"Either upgrade your client, or try requesting the page\n"
"using https://\n");
return("<p>The method could not be performed on the resource\n"
"because the server is unable to store the\n"
"representation needed to successfully complete the\n"
"request. There is insufficient free space left in\n"
"your storage allocation.</p>\n");
case HTTP_SERVICE_UNAVAILABLE:
return("<p>The server is temporarily unable to service your\n"
"request due to maintenance downtime or capacity\n"
"problems. Please try again later.</p>\n");
case HTTP_GATEWAY_TIME_OUT:
return("<p>The proxy server did not receive a timely response\n"
"from the upstream server.</p>\n");
case HTTP_NOT_EXTENDED:
return("<p>A mandatory extension policy in the request is not\n"
"accepted by the server for this resource.</p>\n");
default: /* HTTP_INTERNAL_SERVER_ERROR */
/*
* This comparison to expose error-notes could be modified to
* use a configuration directive and export based on that
* directive. For now "*" is used to designate an error-notes
* that is totally safe for any user to see (ie lacks paths,
* database passwords, etc.)
*/
"error-notes")) != NULL)
}
else {
return(apr_pstrcat(p,
"<p>The server encountered an internal "
"error or\n"
"misconfiguration and was unable to complete\n"
"your request.</p>\n"
"<p>Please contact the server "
"administrator,\n ",
ap_escape_html(r->pool,
r->server->server_admin),
" and inform them of the time the "
"error occurred,\n"
"and anything you might have done that "
"may have\n"
"caused the error.</p>\n"
"<p>More information about this error "
"may be available\n"
"in the server error log.</p>\n",
NULL));
}
/*
* It would be nice to give the user the information they need to
* fix the problem directly since many users don't have access to
* the error_log (think University sites) even though they can easily
* get this error by misconfiguring an htaccess file. However, the
* e error notes tend to include the real file pathname in this case,
* which some people consider to be a breach of privacy. Until we
* can figure out a way to remove the pathname, leave this commented.
*
* if ((error_notes = apr_table_get(r->notes,
* "error-notes")) != NULL) {
* return(apr_pstrcat(p, error_notes, "<p />\n", NULL);
* }
* else {
* return "";
* }
*/
}
}
/* We should have named this send_canned_response, since it is used for any
* response that can be generated by the server from the request record.
* This includes all 204 (no content), 3xx (redirect), 4xx (client error),
* and 5xx (server error) messages that have not been redirected to another
* handler via the ErrorDocument feature.
*/
{
char *custom_response;
/* At this point, we are starting the response over, so we have to reset
* this value.
*/
r->eos_sent = 0;
/* and we need to get rid of any RESOURCE filters that might be lurking
* around, thinking they are in the middle of the original request
*/
r->output_filters = r->proto_output_filters;
/*
* It's possible that the Location field might be in r->err_headers_out
* instead of r->headers_out; use the latter if possible, else the
* former.
*/
}
/* We need to special-case the handling of 204 and 304 responses,
* since they have specific HTTP requirements and do not include a
* message body. Note that being assbackwards here is not an option.
*/
if (status == HTTP_NOT_MODIFIED) {
return;
}
if (status == HTTP_NO_CONTENT) {
return;
}
if (!r->assbackwards) {
/* For all HTTP/1.x responses for which we generate the message,
* we need to avoid inheriting the "normal status" header fields
* that may have been set by the request handler before the
* error or redirect, except for Location on external redirects.
*/
r->headers_out = r->err_headers_out;
r->err_headers_out = tmp;
}
else {
}
}
r->content_languages = NULL;
r->content_encoding = NULL;
r->clength = 0;
ap_set_content_type(r, "text/html; charset=iso-8859-1");
if ((status == HTTP_METHOD_NOT_ALLOWED)
|| (status == HTTP_NOT_IMPLEMENTED)) {
}
if (r->header_only) {
return;
}
}
/*
* We have a custom response output. This should only be
* a text-string to write back. But if the ErrorDocument
* was a local redirect and the requested resource failed
* for any reason, the custom_response will still hold the
* redirect URL. We don't really want to output this URL
* as a text message, so first check the custom response
* string to ensure that it is a text-string (using the
* same test used in ap_die(), i.e. does it start with a ").
*
* If it's not a text string, we've got a recursive error or
* an external redirect. If it's a recursive error, ap_die passes
* us the second error code so we can write both, and has already
* backed up to the original error. If it's an external redirect,
* it hasn't happened yet; we may never know if it fails.
*/
if (custom_response[0] == '\"') {
return;
}
}
{
const char *h1;
/* Accept a status_line set by a module, but only if it begins
* with the 3 digit status code
*/
if (r->status_line != NULL
&& apr_isdigit(r->status_line[0])
title = r->status_line;
}
/* folks decided they didn't want the error code in the H1 text */
/* can't count on a charset filter being in place here,
* so do ebcdic->ascii translation explicitly (if needed)
*/
"<html><head>\n<title>", title,
NULL);
NULL);
if (recursive_error) {
ap_rvputs_proto_in_ascii(r, "<p>Additionally, a ",
"\nerror was encountered while trying to use an "
"ErrorDocument to handle the request.</p>\n", NULL);
}
}
}
/*
* Create a new method list with the specified number of preallocated
* extension slots.
*/
{
ml->method_mask = 0;
return ml;
}
/*
* Make a copy of a method list (primarily for subrequests that may
* subsequently change it; don't want them changing the parent's, too!).
*/
{
int i;
char **imethods;
char **omethods;
}
}
/*
* Invoke a callback routine for each method in the specified list.
*/
const char *mname,
int mnum),
void *rec,
const ap_method_list_t *ml, ...)
{
}
const char *mname,
int mnum),
{
}
/*
* Return true if the specified HTTP method is in the provided
* method list.
*/
{
int methnum;
int i;
char **methods;
/*
* If it's one of our known methods, use the shortcut and check the
* bitmask.
*/
}
/*
* Otherwise, see if the method name is in the array or string names
*/
return 0;
}
for (i = 0; i < l->method_list->nelts; ++i) {
return 1;
}
}
return 0;
}
/*
* Add the specified method to a method list (if it isn't already there).
*/
{
int methnum;
int i;
const char **xmethod;
char **methods;
/*
* If it's one of our known methods, use the shortcut and use the
* bitmask.
*/
return;
}
/*
* Otherwise, see if the method name is in the array of string names.
*/
if (l->method_list->nelts != 0) {
for (i = 0; i < l->method_list->nelts; ++i) {
return;
}
}
}
}
/*
* Remove the specified method from a method list.
*/
const char *method)
{
int methnum;
char **methods;
/*
* If it's a known methods, either builtin or registered
* by a module, use the bitmask.
*/
return;
}
/*
* Otherwise, see if the method name is in the array of string names.
*/
if (l->method_list->nelts != 0) {
register int i, j, k;
for (i = 0; i < l->method_list->nelts; ) {
}
--l->method_list->nelts;
}
else {
++i;
}
}
}
}
/*
* Reset a method list to be completely empty.
*/
{
l->method_mask = 0;
l->method_list->nelts = 0;
}
/* Generate the human-readable hex representation of an unsigned long
* (basically a faster version of 'sprintf("%lx")')
*/
#define HEX_DIGITS "0123456789abcdef"
static char *etag_ulong_to_hex(char *next, unsigned long u)
{
int printing = 0;
do {
if (next_digit) {
printing = 1;
}
else if (printing) {
}
shift -= 4;
} while (shift);
return next;
}
#define ETAG_WEAK "W/"
#define CHARS_PER_UNSIGNED_LONG (sizeof(unsigned long) * 2)
/*
* Construct an entity tag (ETag) from resource information. If it's a real
* file, build in some of the file characteristics. If the modification time
* is newer than (request-time minus 1 second), mark the ETag as weak - it
* could be modified again in as short an interval. We rationalize the
* modification time we're given to keep it from being in the future.
*/
{
char *weak;
char *etag;
char *next;
&core_module);
/*
* If it's a file (or we wouldn't be here) and no ETags
* should be set for files, return an empty string and
* note it for the header-sender to ignore.
*/
return "";
}
if (etag_bits == ETAG_UNSET) {
}
/*
* Make an ETag header out of various pieces of information. We use
* the last-modified date and, if we have a real file, the
* length and inode number - note that this doesn't have to match
* the content-length (i.e. includes), it just has to be unique
* for the file.
*
* If the request was made within a second of the last-modified date,
* we send a weak tag instead of a strong one, since it could
* be modified again later in the second, and the validation
* would be incorrect.
*/
!force_weak) {
weak_len = 0;
}
else {
}
/*
* ETag gets set to [W/]"inode-size-mtime", modulo any
* FileETag keywords.
*/
if (weak) {
while (*weak) {
}
}
*next++ = '"';
bits_added = 0;
if (etag_bits & ETAG_INODE) {
bits_added |= ETAG_INODE;
}
if (bits_added != 0) {
*next++ = '-';
}
bits_added |= ETAG_SIZE;
}
if (etag_bits & ETAG_MTIME) {
if (bits_added != 0) {
*next++ = '-';
}
}
*next++ = '"';
*next = '\0';
}
else {
/*
* Not a file document, so just use the mtime: [W/]"mtime"
*/
CHARS_PER_UNSIGNED_LONG + 1);
if (weak) {
while (*weak) {
}
}
*next++ = '"';
*next++ = '"';
*next = '\0';
}
return etag;
}
{
char *etag;
char *variant_etag, *vlv;
int vlv_weak;
if (!r->vlist_validator) {
etag = ap_make_etag(r, 0);
/* If we get a blank etag back, don't set the header. */
if (!etag[0]) {
return;
}
}
else {
/* If we have a variant list validator (vlv) due to the
* response being negotiated, then we create a structured
* entity tag which merges the variant etag with the variant
* list validator (vlv). This merging makes revalidation
* somewhat safer, ensures that caches which can deal with
* Vary will (eventually) be updated if the set of variants is
* changed, and is also a protocol requirement for transparent
* content negotiation.
*/
/* if the variant list validator is weak, we make the whole
* structured etag weak. If we would not, then clients could
* have problems merging range responses if we have different
* variants with the same non-globally-unique strong etag.
*/
vlv = r->vlist_validator;
/* If we get a blank etag back, don't append vlv and stop now. */
if (!variant_etag[0]) {
return;
}
/* merge variant_etag and vlv into a structured etag */
if (vlv_weak) {
vlv += 3;
}
else {
vlv++;
}
}
}
{
if (!dash) {
return 0;
}
/* In the form "-5" */
}
else {
*dash = '\0';
dash++;
if (*dash) {
}
else { /* "5-" */
}
}
if (*start < 0) {
*start = 0;
}
}
return -1;
}
}
static int ap_set_byterange(request_rec *r);
typedef struct byterange_ctx {
int num_ranges;
char *boundary;
char *bound_head;
/*
* Here we try to be compatible with clients that want multipart/x-byteranges
* instead of multipart/byteranges (also see above), as per HTTP/1.1. We
* look for the Request-Range header (e.g. Netscape 2 and 3) as an indication
* that the browser supports an older protocol. We also check User-Agent
* for Microsoft Internet Explorer 3, which needs this as well.
*/
static int use_range_x(request_rec *r)
{
const char *ua;
}
#define PARTITION_ERR_FMT "apr_brigade_partition() failed " \
{
request_rec *r = f->r;
conn_rec *c = r->connection;
apr_bucket *e;
char *current;
int found = 0;
if (!ctx) {
int num_ranges = ap_set_byterange(r);
/* We have nothing to do, get out of the way. */
if (num_ranges == 0) {
}
/* create a brigade in case we never call ap_save_brigade() */
/* Is ap_make_content_type required here? */
/* need APR_TIME_T_FMT_HEX */
r->request_time, (long) getpid());
"byteranges; boundary=",
CRLF "Content-type: ",
CRLF "Content-range: bytes ",
NULL);
}
}
/* We can't actually deal with byte-ranges until we have the whole brigade
* because the byte-ranges can be in any order, and according to the RFC,
* we SHOULD return the data in the same order it was requested.
*
* XXX: We really need to dump all bytes prior to the start of the earliest
* range, and only slurp up to the end of the latest range. By this we
* mean that we should peek-ahead at the lowest first byte of any range,
* and the highest last byte of any range.
*/
return APR_SUCCESS;
}
/* Prepend any earlier saved brigades. */
/* It is possible that we won't have a content length yet, so we have to
* compute the length before we can actually do the byterange work.
*/
/* this brigade holds what we will be sending */
&range_end))) {
apr_bucket *e2;
apr_bucket *ec;
if (rv == -1) {
continue;
}
/* these calls to apr_brigade_partition() should theoretically
* never fail because of the above call to apr_brigade_length(),
* but what the heck, we'll check for an error anyway */
continue;
}
continue;
}
found = 1;
/* For single range requests, we must produce Content-Range header.
* Otherwise, we need to produce the multipart boundaries.
*/
}
else {
char *ts;
r->pool, c->bucket_alloc);
c->bucket_alloc);
}
do {
const char *str;
/* this shouldn't ever happen due to the call to
* apr_brigade_length() above which normalizes
* indeterminate-length buckets. just to be sure,
* though, this takes care of uncopyable buckets that
* do somehow manage to slip through.
*/
/* XXX: check for failure? */
}
}
if (found == 0) {
/* bsend is assumed to be empty if we get here. */
r->pool, c->bucket_alloc);
e = apr_bucket_eos_create(c->bucket_alloc);
}
char *end;
/* add the final boundary */
}
e = apr_bucket_eos_create(c->bucket_alloc);
/* we're done with the original content - all of our data is in bsend. */
/* send our multipart output */
}
static int ap_set_byterange(request_rec *r)
{
const char *range;
const char *if_range;
const char *match;
const char *ct;
int num_ranges;
if (r->assbackwards) {
return 0;
}
/* Check for Range request-header (HTTP/1.1) or Request-Range for
* byte-ranges (e.g. Netscape Navigator 2-3).
*
* We support this form, with Request-Range, and (farther down) we
* send multipart/x-byteranges instead of multipart/byteranges for
* Request-Range based requests to work around a bug in Netscape
* Navigator 2-3 and MSIE 3.
*/
}
return 0;
}
/* is content already a single range? */
return 0;
}
/* is content already a multiple range? */
return 0;
}
/* Check the If-Range header for Etag or Date.
* Note that this check will return false (as required) if either
* of the two etags are weak.
*/
if (if_range[0] == '"') {
return 0;
}
}
return 0;
}
}
/* a single range */
num_ranges = 1;
}
else {
/* a multiple range */
num_ranges = 2;
}
r->status = HTTP_PARTIAL_CONTENT;
return num_ranges;
}