http_filters.c revision 7184de27ec1d62a83c41cdeac0953ca9fd661e8c
/* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* http_filter.c --- HTTP routines which either filters or deal with filters.
*/
#include "apr.h"
#include "apr_strings.h"
#include "apr_buckets.h"
#include "apr_lib.h"
#include "apr_signal.h"
#define APR_WANT_STDIO /* for sscanf */
#define APR_WANT_STRFUNC
#define APR_WANT_MEMFUNC
#include "apr_want.h"
#include "util_filter.h"
#include "ap_config.h"
#include "httpd.h"
#include "http_config.h"
#include "http_core.h"
#include "http_protocol.h"
#include "http_main.h"
#include "http_request.h"
#include "http_vhost.h"
#include "http_connection.h"
#include "http_log.h" /* For errors detected in basic auth common
* support code... */
#include "apr_date.h" /* For apr_date_parse_http and APR_DATE_BAD */
#include "util_charset.h"
#include "util_ebcdic.h"
#include "util_time.h"
#include "mod_core.h"
#include <stdarg.h>
#endif
#include <unistd.h>
#endif
#define INVALID_CHAR -2
static long get_chunk_size(char *);
typedef struct http_filter_ctx {
enum {
} state;
int eos_sent;
char chunk_ln[32];
char *pos;
} http_ctx_t;
/* bail out if some error in the HTTP input filter happens */
ap_filter_t *f,
int http_error)
{
apr_bucket *e;
f->c->bucket_alloc);
e = apr_bucket_eos_create(f->c->bucket_alloc);
/* If chunked encoding / content-length are corrupt, we may treat parts
* of this request's body as the next one's headers.
* To be safe, disable keep-alive.
*/
}
int linelimit)
{
apr_bucket *e;
const char *lineend;
apr_size_t len = 0;
/*
* As the brigade b should have been requested in mode AP_MODE_GETLINE
* all buckets in this brigade are already some type of memory
* buckets (due to the needed scanning for LF in mode AP_MODE_GETLINE)
* or META buckets.
*/
if (rv != APR_SUCCESS) {
return rv;
}
/* Sanity check. Should never happen. See above. */
if (brigade_length == -1) {
return APR_EGENERAL;
}
if (!brigade_length) {
return APR_EAGAIN;
}
return APR_ENOSPC;
}
/*
* As all buckets are already some type of memory buckets or META buckets
* (see above), we only need to check the last byte in the last data bucket.
*/
for (e = APR_BRIGADE_LAST(b);
e != APR_BRIGADE_SENTINEL(b);
e = APR_BUCKET_PREV(e)) {
if (APR_BUCKET_IS_METADATA(e)) {
continue;
}
if (rv != APR_SUCCESS) {
return rv;
}
if (len > 0) {
break; /* we got the data we want */
}
/* If we got a zero-length data bucket, we try the next one */
}
/* We had no data in this brigade */
if (!len || e == APR_BRIGADE_SENTINEL(b)) {
return APR_EAGAIN;
}
return APR_EAGAIN;
}
/* Line is complete. So reset ctx for next round. */
return APR_SUCCESS;
}
int linelimit)
{
int tmp_len;
/* Saveguard ourselves against underflows */
if (tmp_len < 0) {
len = 0;
}
else {
}
/*
* Check if there is space left in ctx->chunk_ln. If not, then either
* the chunk size is insane or we have chunk-extensions. Ignore both
* by discarding the remaining part of the line via
* get_remaining_chunk_line. Only bail out if the line is too long.
*/
if (len > 0) {
if (rv != APR_SUCCESS) {
return rv;
}
/*
* Check if we really got a full line. If yes the
* last char in the just read buffer must be LF.
* If not advance the buffer and return APR_EAGAIN.
* We do not start processing until we have the
* full line.
*/
/* Check if the remaining data in the brigade has the LF */
}
/* Line is complete. So reset ctx->pos for next round. */
return APR_SUCCESS;
}
}
/* This is the HTTP_INPUT filter for HTTP requests and responses from
* proxied servers (mod_proxy). It handles chunked and content-length
* are successfully parsed.
*/
{
apr_bucket *e;
/* just get out of the way of things we don't want. */
}
if (!ctx) {
/* LimitRequestBody does not apply to proxied responses.
* Consider implementing this check in its own filter.
* Would adding a directive to limit the size of proxied
* responses be useful?
*/
if (!f->r->proxyreq) {
}
else {
}
if (tenc) {
}
/* test lenp, because it gives another case we can handle */
else if (!lenp) {
/* Something that isn't in HTTP, unless some future
* edition defines new transfer ecodings, is unsupported.
*/
"Unknown Transfer-Encoding: %s", tenc);
}
else {
"Unknown Transfer-Encoding: %s; using Content-Length", tenc);
}
}
char *endstr;
* string (excluding leading space) (the endstr checks)
* and a negative number. */
"Invalid Content-Length");
}
/* If we have a limit in effect and we know the C-L ahead of
* time, stop it here if it is invalid.
*/
"Requested content-length of %" APR_OFF_T_FMT
" is larger than the configured limit"
}
}
/* If we don't have a request entity indicated by the headers, EOS.
* (BODY_NONE is a valid intermediate state due to trailers,
* but it isn't a valid starting state.)
*
* RFC 2616 Section 4.4 note 5 states that connection-close
* is invalid for a request entity - request bodies must be
* denoted by C-L or T-E: chunked.
*
* Note that since the proxy uses this filter to handle the
* proxied *response*, proxy responses MUST be exempt.
*/
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
return APR_SUCCESS;
}
/* Since we're about to read data, send 100-Continue if needed.
* Only valid on chunked and C-L bodies where the C-L is > 0. */
!(f->r->eos_sent || f->r->bytes_sent)) {
if (!ap_is_HTTP_SUCCESS(f->r->status)) {
} else {
char *tmp;
int len;
/* if we send an interim response, we're no longer
* in a state of expecting one.
*/
f->r->expecting_100 = 0;
NULL);
f->c->bucket_alloc);
e = apr_bucket_flush_create(f->c->bucket_alloc);
}
}
/* We can't read the chunk until after sending 100 if required. */
block, 0);
/* for timeout */
if (block == APR_NONBLOCK_READ &&
(APR_STATUS_IS_EAGAIN(rv)) )) {
return APR_EAGAIN;
}
if (rv == APR_SUCCESS) {
if (APR_STATUS_IS_EAGAIN(rv)) {
return rv;
}
if (rv == APR_SUCCESS) {
rv = APR_EGENERAL;
}
}
}
/* Detect chunksize error (such as overflow) */
* come back here later */
if (APR_STATUS_IS_TIMEUP(rv)) {
}
}
/* Handle trailers by calling ap_get_mime_headers again! */
ap_get_mime_headers(f->r);
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
return APR_SUCCESS;
}
}
}
else {
}
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
return APR_SUCCESS;
}
case BODY_NONE:
break;
case BODY_LENGTH:
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
return APR_SUCCESS;
case BODY_CHUNK:
case BODY_CHUNK_PART:
{
/* We need to read the CRLF after the chunk. */
block, 0);
if (block == APR_NONBLOCK_READ &&
(APR_STATUS_IS_EAGAIN(rv)) )) {
return APR_EAGAIN;
}
/* If we get an error, then leave */
if (rv != APR_SUCCESS) {
return rv;
}
/*
* We really don't care whats on this line. If it is RFC
* compliant it should be only \r\n. If there is more
* before we just ignore it as long as we do not get over
* the limit for request lines.
*/
f->r->server->limit_req_line);
if (APR_STATUS_IS_EAGAIN(rv)) {
return rv;
}
} else {
rv = APR_SUCCESS;
}
if (rv == APR_SUCCESS) {
/* Read the real chunk line. */
block, 0);
/* Test timeout */
if (block == APR_NONBLOCK_READ &&
(APR_STATUS_IS_EAGAIN(rv)) )) {
return APR_EAGAIN;
}
if (rv == APR_SUCCESS) {
if (APR_STATUS_IS_EAGAIN(rv)) {
return rv;
}
if (rv == APR_SUCCESS) {
rv = APR_EGENERAL;
}
}
}
}
/* Detect chunksize error (such as overflow) */
* come back here later */
if (APR_STATUS_IS_TIMEUP(rv)) {
}
}
/* Handle trailers by calling ap_get_mime_headers again! */
ap_get_mime_headers(f->r);
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
return APR_SUCCESS;
}
}
break;
}
}
/* Ensure that the caller can not go over our boundary point. */
}
AP_DEBUG_ASSERT(readbytes > 0);
}
if (rv != APR_SUCCESS) {
return rv;
}
/* How many bytes did we just read? */
apr_brigade_length(b, 0, &totalread);
/* If this happens, we have a bucket of unknown length. Die because
* it means our assumptions have changed. */
AP_DEBUG_ASSERT(totalread >= 0);
e = APR_BRIGADE_LAST(b);
if (APR_BUCKET_IS_EOS(e))
return APR_EOF;
}
}
/* If we have no more bytes remaining on a C-L request,
* save the callter a roundtrip to discover EOS.
*/
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
}
/* We have a limit in effect. */
/* FIXME: Note that we might get slightly confused on chunked inputs
* as we'd need to compensate for the chunk lengths which may not
* really count. This seems to be up for interpretation. */
"Read content-length of %" APR_OFF_T_FMT
" is larger than the configured limit"
f->r->pool,
f->c->bucket_alloc);
e = apr_bucket_eos_create(f->c->bucket_alloc);
}
}
return APR_SUCCESS;
}
/**
* Parse a chunk extension, detect overflow.
* There are two error cases:
* 1) If the conversion would require too many bits, a -1 is returned.
* 2) If the conversion used the correct number of bits, but an overflow
* caused only the sign bit to flip, then that negative number is
* returned.
* In general, any negative number can be considered an overflow error.
*/
static long get_chunk_size(char *b)
{
long chunksize = 0;
ap_xlate_proto_from_ascii(b, strlen(b));
if (!apr_isxdigit(*b)) {
/*
* Detect invalid character at beginning. This also works for empty
* chunk size lines.
*/
return INVALID_CHAR;
}
/* Skip leading zeros */
while (*b == '0') {
++b;
}
while (apr_isxdigit(*b) && (chunkbits > 0)) {
int xvalue = 0;
if (*b >= '0' && *b <= '9') {
xvalue = *b - '0';
}
else if (*b >= 'A' && *b <= 'F') {
}
else if (*b >= 'a' && *b <= 'f') {
}
chunkbits -= 4;
++b;
}
if (apr_isxdigit(*b) && (chunkbits <= 0)) {
/* overflow */
return -1;
}
return chunksize;
}
typedef struct header_struct {
/* Send a single HTTP header field to the client. Note that this function
* is used in calls to table_do(), so their interfaces are co-dependent.
* In other words, don't change this one without checking table_do in alloc.c.
* It returns true unless there was a write error of some kind.
*/
static int form_header_field(header_struct *h,
{
char *headfield;
char *next;
*next++ = ':';
*next++ = ' ';
*next = 0;
#else
v++;
v->iov_base = ": ";
v++;
v++;
#endif /* !APR_CHARSET_EBCDIC */
return 1;
}
/* This routine is called by apr_table_do and merges all instances of
* the passed field values into a single array that will be further
* processed by some later routine. Originally intended to help split
* and recombine multiple Vary fields, though it is generic to any field
* consisting of comma/space-separated tokens.
*/
{
char *start;
char *e;
char **strpp;
int i;
values = (apr_array_header_t *)d;
do {
/* Find a non-empty fieldname */
while (*e == ',' || apr_isspace(*e)) {
++e;
}
if (*e == '\0') {
break;
}
start = e;
++e;
}
if (*e != '\0') {
*e++ = '\0';
}
/* Now add it to values if it isn't already represented.
* Could be replaced by a ap_array_strcasecmp() if we had one.
*/
++i, ++strpp) {
break;
}
}
}
} while (*e != '\0');
return 1;
}
/*
* Since some clients choke violently on multiple Vary fields, or
* Vary fields with duplicate tokens, combine any multiples and remove
* any duplicates.
*/
static void fixup_vary(request_rec *r)
{
/* Extract all Vary fields from the headers_out, separate each into
* its comma-separated fieldname values, and then add them to varies
* if not already present in the array.
*/
apr_table_do((int (*)(void *, const char *, const char *))uniq_field_values,
/* If we found any, replace old Vary fields with unique-ified value */
}
}
/* Send a request's HTTP response headers to the client.
*/
const request_rec *r)
{
const apr_array_header_t *elts;
const apr_table_entry_t *t_elt;
const apr_table_entry_t *t_end;
return APR_SUCCESS;
}
sizeof(struct iovec));
/* For each field, generate
* name ": " value CRLF
*/
do {
vec_next++;
vec_next++;
vec_next++;
vec_next++;
t_elt++;
if (APLOGrtrace4(r)) {
do {
t_elt++;
}
{
}
#else
#endif
}
/* Confirm that the status line is well-formed and matches r->status.
* If they don't match, a filter may have negated the status line set by a
* handler.
* Zap r->status_line if bad.
*/
static void validate_status_line(request_rec *r)
{
char *end;
if (r->status_line) {
if (len < 3
r->status_line = NULL;
}
/* Since we passed the above check, we know that length three
* is equivalent to only a 3 digit numeric http status.
* RFC2616 mandates a trailing space, let's add it.
*/
else if (len == 3) {
}
}
}
/*
* Determine the protocol to use for the response. Potentially downgrade
*
* also prepare r->status_line.
*/
static void basic_http_header_check(request_rec *r,
const char **protocol)
{
if (r->assbackwards) {
/* no such thing as a response protocol */
return;
}
if (!r->status_line) {
}
/* Note that we must downgrade before checking for force responses. */
}
/* kludge around broken browsers when indicated by force-response-1.0
*/
*protocol = "HTTP/1.0";
}
else {
}
}
const char *protocol)
{
const char *proxy_date = NULL;
const char *us = ap_get_server_banner();
if (r->assbackwards) {
/* there are no headers to send */
return;
}
/* Output the HTTP/1.x Status-Line and the Date and Server fields */
{
char *tmp;
}
#else
#endif
/*
* keep the set-by-proxy server and date headers, otherwise
* generate a new server header / date header
*/
if (r->proxyreq != PROXYREQ_NONE) {
if (!proxy_date) {
/*
* proxy_date needs to be const. So use date for the creation of
* our own Date header and pass it over to proxy_date later to
* avoid a compiler warning.
*/
}
}
else {
}
if (server)
if (APLOGrtrace3(r)) {
"Response sent with status %d%s",
r->status,
/*
* Date and Server are less interesting, use TRACE5 for them while
* using TRACE4 for the other headers.
*/
if (server)
server);
}
/* unset so we don't send them again */
if (server) {
}
}
{
const char *protocol;
}
{
}
{
int rv;
apr_bucket *b;
int body;
apr_size_t bodylen = 0;
if (r->method_number != M_TRACE) {
return DECLINED;
}
/* Get the original request */
while (r->prev) {
r = r->prev;
}
"TRACE denied by server configuration");
return HTTP_METHOD_NOT_ALLOWED;
}
/* XXX: should be = REQUEST_CHUNKED_PASS */
else
if (rv == HTTP_REQUEST_ENTITY_TOO_LARGE)
"TRACE with a request body is not allowed");
return rv;
}
if (ap_should_client_block(r)) {
if (r->remaining > 0) {
if (r->remaining > 65536) {
"Extended TRACE request bodies cannot exceed 64k\n");
return HTTP_REQUEST_ENTITY_TOO_LARGE;
}
/* always 32 extra bytes to catch chunk header exceptions */
}
else {
/* Add an extra 8192 for chunk headers */
bodybuf = 73730;
}
/* only while we have enough for a chunked header */
}
/* discard_rest_of_request_body into our buffer */
;
"Extended TRACE request bodies cannot exceed 64k\n");
return HTTP_REQUEST_ENTITY_TOO_LARGE;
}
if (res < 0) {
return HTTP_BAD_REQUEST;
}
}
ap_set_content_type(r, "message/http");
/* Now we recreate the request, and echo it back */
{
char *tmp;
}
#else
#endif
apr_table_do((int (*) (void *, const char *, const char *))
/* If configured to accept a body, echo the body */
if (bodylen) {
}
return DONE;
}
typedef struct header_filter_ctx {
int headers_sent;
{
request_rec *r = f->r;
conn_rec *c = r->connection;
const char *clheader;
const char *protocol;
apr_bucket *e;
const char *ctype;
AP_DEBUG_ASSERT(!r->main);
if (r->header_only) {
if (!ctx) {
}
else if (ctx->headers_sent) {
return OK;
}
}
for (e = APR_BRIGADE_FIRST(b);
e != APR_BRIGADE_SENTINEL(b);
e = APR_BUCKET_NEXT(e))
{
if (AP_BUCKET_IS_ERROR(e) && !eb) {
continue;
}
/*
* If we see an EOC bucket it is a signal that we should get out
* of the way doing nothing.
*/
if (AP_BUCKET_IS_EOC(e)) {
return ap_pass_brigade(f->next, b);
}
}
if (eb) {
int status;
return AP_FILTER_ERROR;
}
if (r->assbackwards) {
r->sent_bodyct = 1;
return ap_pass_brigade(f->next, b);
}
/*
* Now that we are ready to send a response, we need to combine the two
* header field tables into a single table. If we don't do this, our
* later attempts to set or unset a given fieldname might be bypassed.
*/
if (!apr_is_empty_table(r->err_headers_out)) {
r->headers_out);
}
/*
* Remove the 'Vary' header field if the client can't handle it.
* Since this will have nasty effects on HTTP/1.1 caches, force
* the response into HTTP/1.0 mode.
*
* Note: the force-response-1.0 should come before the call to
* basic_http_header_check()
*/
}
else {
fixup_vary(r);
}
/*
* Now remove any ETag response header field if earlier processing
* says so (such as a 'FileETag None' directive).
*/
}
/* determine the protocol and whether we should use keepalives. */
ap_set_keepalive(r);
if (r->chunked) {
}
if (ctype) {
}
if (r->content_encoding) {
r->content_encoding);
}
if (!apr_is_empty_array(r->content_languages)) {
int i;
char *token;
for (i = 0; i < r->content_languages->nelts; ++i) {
break;
}
if (i == r->content_languages->nelts) {
}
}
}
/*
* Control cachability for non-cachable responses if not already set by
* some other part of the server configuration.
*/
}
/* This is a hack, but I can't find anyway around it. The idea is that
* we don't want to send out 0 Content-Lengths if it is a head request.
* This happens when modules try to outsmart the server, and return
* if they see a HEAD request. Apache 1.3 handlers were supposed to
* just return in that situation, and the core handled the HEAD. In
* 2.0, if a handler returns, then the core sends an EOS bucket down
* the filter stack, and the content-length filter computes a C-L of
* zero and that gets put in the headers, and we end up sending a
* zero C-L to the client. We can't just remove the C-L filter,
* because well behaved 2.0 handlers will send their data down the stack,
* and we will compute a real C-L for the head request. RBB
*/
if (r->header_only
}
if (r->status == HTTP_NOT_MODIFIED) {
apr_table_do((int (*)(void *, const char *, const char *)) form_header_field,
(void *) &h, r->headers_out,
"Connection",
"Keep-Alive",
"ETag",
"Content-Location",
"Expires",
"Cache-Control",
"Vary",
"Warning",
"WWW-Authenticate",
"Proxy-Authenticate",
"Set-Cookie",
"Set-Cookie2",
NULL);
}
else {
send_all_header_fields(&h, r);
}
if (r->header_only) {
return OK;
}
if (r->chunked) {
/* We can't add this filter until we have already sent the headers.
* If we add it before this point, then the headers will be chunked
* as well, and that is just wrong.
*/
}
/* Don't remove this filter until after we have added the CHUNK filter.
* Otherwise, f->next won't be the CHUNK filter and thus the first
* brigade won't be chunked properly.
*/
return ap_pass_brigade(f->next, b);
}
/* In HTTP/1.1, any method can have a body. However, most GET handlers
* wouldn't know what to do with a request body if they received one.
* This helper routine tests for and reads any message body in the request,
* simply discarding whatever it receives. We need to do this because
* failing to read the request body would cause it to be interpreted
* as the next request on a persistent connection.
*
* Since we return an error status if the request is malformed, this
* routine should be called at the beginning of a no-body handler, e.g.,
*
* if ((retval = ap_discard_request_body(r)) != OK) {
* return retval;
* }
*/
{
/* Sometimes we'll get in a state where the input handling has
* detected an error where we want to drop the connection, so if
* that's the case, don't read the data as that is what we're trying
* to avoid.
*
* This function is also a no-op on a subrequest.
*/
ap_status_drops_connection(r->status)) {
return OK;
}
seen_eos = 0;
do {
if (rv != APR_SUCCESS) {
/* FIXME: If we ever have a mapping from filters (apr_status_t)
* to HTTP error codes, this would be a good place for them.
*
* If we received the special case AP_FILTER_ERROR, it means
* that the filters have already handled this error.
* Otherwise, we should assume we have a bad request.
*/
if (rv == AP_FILTER_ERROR) {
return rv;
}
else {
return HTTP_BAD_REQUEST;
}
}
{
const char *data;
if (APR_BUCKET_IS_EOS(bucket)) {
seen_eos = 1;
break;
}
/* These are metadata buckets. */
continue;
}
/* We MUST read because in case we have an unknown-length
* bucket or one that morphs, we want to exhaust it.
*/
if (rv != APR_SUCCESS) {
return HTTP_BAD_REQUEST;
}
}
} while (!seen_eos);
return OK;
}
/* Here we deal with getting the request message body from the client.
* Whether or not the request contains a body is signaled by the presence
* of a non-zero Content-Length or by a Transfer-Encoding: chunked.
*
* Note that this is more complicated than it was in Apache 1.1 and prior
* versions, because chunked support means that the module does less.
*
* The proper procedure is this:
*
* 1. Call ap_setup_client_block() near the beginning of the request
* handler. This will set up all the necessary properties, and will
* return either OK, or an error code. If the latter, the module should
* return that error code. The second parameter selects the policy to
* apply if the request message indicates a body, and how a chunked
* transfer-coding should be interpreted. Choose one of
*
* REQUEST_NO_BODY Send 413 error if message has any body
* REQUEST_CHUNKED_ERROR Send 411 error if body without Content-Length
* REQUEST_CHUNKED_DECHUNK If chunked, remove the chunks for me.
* REQUEST_CHUNKED_PASS If chunked, pass the chunk headers with body.
*
* In order to use the last two options, the caller MUST provide a buffer
* large enough to hold a chunk-size line, including any extensions.
*
* 2. When you are ready to read a body (if any), call ap_should_client_block().
* This will tell the module whether or not to read input. If it is 0,
* the module should assume that there is no message body to read.
*
* 3. Finally, call ap_get_client_block in a loop. Pass it a buffer and its size.
* It will put data into the buffer (not necessarily a full buffer), and
* return the length of the input block. When it is done reading, it will
* return 0 if EOF, or -1 if there was an error.
* If an error occurs on input, we force an end to keepalive.
*
* This step also sends a 100 Continue response to HTTP/1.1 clients if appropriate.
*/
{
r->read_body = read_policy;
r->read_chunked = 0;
r->remaining = 0;
if (tenc) {
"Unknown Transfer-Encoding %s", tenc);
return HTTP_NOT_IMPLEMENTED;
}
if (r->read_body == REQUEST_CHUNKED_ERROR) {
"chunked Transfer-Encoding forbidden: %s", r->uri);
}
r->read_chunked = 1;
}
else if (lenp) {
char *endstr;
r->remaining = 0;
"Invalid Content-Length");
return HTTP_BAD_REQUEST;
}
}
if ((r->read_body == REQUEST_NO_BODY)
&& (r->read_chunked || (r->remaining > 0))) {
return HTTP_REQUEST_ENTITY_TOO_LARGE;
}
#ifdef AP_DEBUG
{
/* Make sure ap_getline() didn't leave any droppings. */
}
#endif
return OK;
}
{
/* First check if we have already read the request body */
return 0;
}
return 1;
}
/* get_client_block is called in a loop to get the request message body.
* This is quite simple if the client includes a content-length
* (the normal case), but gets messy if the body is chunked. Note that
* r->remaining is used to maintain state across calls and that
* r->read_length is the total number of bytes given to the caller
* across all invocations. It is messy because we have to be careful not
* to read past the data provided by the client, since these reads block.
* Returns 0 on End-of-body, -1 on error or premature chunk end.
*
*/
{
return 0;
}
return -1;
}
/* We lose the failure code here. This is why ap_get_client_block should
* not be used.
*/
if (rv != APR_SUCCESS) {
/* if we actually fail here, we want to just return and
* stop trying to read data from the client.
*/
return -1;
}
/* If this fails, it means that a filter is written incorrectly and that
* it needs to learn how to properly handle APR_BLOCK_READ requests by
* returning data when requested.
*/
/* Check to see if EOS in the brigade.
*
* If so, we have to leave a nugget for the *next* ap_get_client_block
* call to return 0.
*/
if (r->read_chunked) {
r->remaining = -1;
}
else {
r->remaining = 0;
}
}
if (rv != APR_SUCCESS) {
return -1;
}
/* XXX yank me? */
r->read_length += bufsiz;
return bufsiz;
}
/* Context struct for ap_http_outerror_filter */
typedef struct {
int seen_eoc;
/* Filter to handle any error buckets on output */
{
request_rec *r = f->r;
apr_bucket *e;
/* Create context if none is present */
if (!ctx) {
}
for (e = APR_BRIGADE_FIRST(b);
e != APR_BRIGADE_SENTINEL(b);
e = APR_BUCKET_NEXT(e))
{
if (AP_BUCKET_IS_ERROR(e)) {
/*
* Start of error handling state tree. Just one condition
* right now :)
*/
/* stream aborted and we have not ended it yet */
}
continue;
}
/* Detect EOC buckets and memorize this in the context. */
if (AP_BUCKET_IS_EOC(e)) {
}
}
/*
* Remove all data buckets that are in a brigade after an EOC bucket
* was seen, as an EOC bucket tells us that no (further) resource
* and protocol data should go out to the client. OTOH meta buckets
* are still welcome as they might trigger needed actions down in
* the chain (e.g. in network filters like SSL).
* Remark 1: It is needed to dump ALL data buckets in the brigade
* since an filter in between might have inserted data
* buckets BEFORE the EOC bucket sent by the original
* sender and we do NOT want this data to be sent.
* Remark 2: Dumping all data buckets here does not necessarily mean
* that no further data is send to the client as:
* 1. Network filters like SSL can still be triggered via
* meta buckets to talk with the client e.g. for a
* clean shutdown.
* 2. There could be still data that was buffered before
* down in the chain that gets flushed by a FLUSH or an
* EOS bucket.
*/
for (e = APR_BRIGADE_FIRST(b);
e != APR_BRIGADE_SENTINEL(b);
e = APR_BUCKET_NEXT(e))
{
if (!APR_BUCKET_IS_METADATA(e)) {
}
}
}
return ap_pass_brigade(f->next, b);
}