mod_include.c revision f4c472b8dce3c2e559232dbb5b27ed2466922ea4
/* ====================================================================
* The Apache Software License, Version 1.1
*
* Copyright (c) 2000-2002 The Apache Software Foundation. All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* distribution.
*
* 3. The end-user documentation included with the redistribution,
* if any, must include the following acknowledgment:
* "This product includes software developed by the
* Apache Software Foundation (http://www.apache.org/)."
* Alternately, this acknowledgment may appear in the software itself,
* if and wherever such third-party acknowledgments normally appear.
*
* 4. The names "Apache" and "Apache Software Foundation" must
* not be used to endorse or promote products derived from this
* software without prior written permission. For written
* permission, please contact apache@apache.org.
*
* 5. Products derived from this software may not be called "Apache",
* nor may "Apache" appear in their name, without prior written
* permission of the Apache Software Foundation.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
*
* Portions of this software are based upon public domain software
* originally written at the National Center for Supercomputing Applications,
* University of Illinois, Urbana-Champaign.
*/
/*
* http_include.c: Handles the server-parsed HTML documents
*
* Original by Rob McCool; substantial fixups by David Robinson;
* incorporated into the Apache module framework by rst.
*
*/
#include "apr.h"
#include "apr_strings.h"
#include "apr_thread_proc.h"
#include "apr_hash.h"
#include "apr_user.h"
#include "apr_lib.h"
#include "apr_optional.h"
#define APR_WANT_STRFUNC
#include "apr_want.h"
#define CORE_PRIVATE
#include "ap_config.h"
#include "util_filter.h"
#include "httpd.h"
#include "http_config.h"
#include "http_core.h"
#include "http_request.h"
#include "http_core.h"
#include "http_protocol.h"
#include "http_log.h"
#include "http_main.h"
#include "util_script.h"
#include "http_core.h"
#include "mod_include.h"
#include "util_ebcdic.h"
static apr_hash_t *include_hash;
/*****************************************************************
*
* XBITHACK. Sigh... NB it's configurable per-directory; the compile-time
* option only changes the default.
*/
enum xbithack {
};
struct bndm_t {
unsigned int T[256];
unsigned int x;
} ;
typedef struct {
char *default_error_msg;
char *default_time_fmt;
typedef struct {
char *default_start_tag;
char *default_end_tag;
int start_tag_len;
char *undefinedEcho;
int undefinedEchoLen;
#ifdef XBITHACK
#define DEFAULT_XBITHACK xbithack_full
#else
#define DEFAULT_XBITHACK xbithack_off
#endif
/* ------------------------ Environment function -------------------------- */
/* Sentinel value to store in subprocess_env for items that
*/
static const char lazy_eval_sentinel;
#define LAZY_VALUE (&lazy_eval_sentinel)
{
apr_table_t *e = r->subprocess_env;
char *t;
}
apr_table_setn(e, "DOCUMENT_NAME", ++t);
}
else {
}
if (r->args) {
apr_table_setn(e, "QUERY_STRING_UNESCAPED",
}
}
{
char *val;
}
}
}
val = "<unknown>";
}
}
else {
}
if (val) {
}
return val;
}
const char *var)
{
const char *val;
/* Handle $0 .. $9 from the last regex evaluated.
* The choice of returning NULL strings on not-found,
* v.s. empty strings on an empty match is deliberate.
*/
return NULL;
}
else {
return NULL;
}
}
}
else {
if (val == LAZY_VALUE)
}
return val;
}
/* --------------------------- Parser functions --------------------------- */
/* This is an implementation of the BNDM search algorithm.
*
* Fast and Flexible String Matching by Combining Bit-parallelism and
* Suffix Automata (2001)
* Gonzalo Navarro, Mathieu Raffinot
*
*
* Initial code submitted by Sascha Schumann.
*/
/* Precompile the bndm_t data structure. */
{
unsigned int x;
memset(t->T, 0, sizeof(unsigned int) * 256);
t->T[(unsigned char) *n++] |= x;
t->x = x - 1;
}
/* Implements the BNDM search algorithm (as described above).
*
* n - the pattern to search for
* nl - length of the pattern to search for
* h - the string to look in
* hl - length of the string to look for
* t - precompiled bndm structure against the pattern
*
* Returns the count of character that is the first match or hl if no
* match is found.
*/
{
const char *skip;
unsigned int *T, x, d;
T = t->T;
x = t->x;
while (p < he) {
skip = p;
d = x;
do {
d &= T[(unsigned char) *p--];
if (!d) {
break;
}
if ((d & 1)) {
if (p != pi)
skip = p;
else
return p - h + 1;
}
d >>= 1;
} while (d);
}
return hl;
}
/* We've now found a start sequence tag... */
{
/* We want to split the bucket at the '<'. */
ctx->tag_length = 0;
/* If tagStart indexes the end of the bucket, then tag_start_bucket
* should be the next bucket
*/
}
else {
ctx->tag_start_index = 0;
}
if (ctx->head_start_index > 0) {
/* Split the bucket with the start of the tag in it */
/* If it was a one bucket match */
}
ctx->head_start_index = 0;
}
return ctx->head_start_bucket;
}
/* This function returns either a pointer to the split bucket containing the
* first byte of the BEGINNING_SEQUENCE (after finding a complete match) or it
* returns NULL if no match found.
*/
{
const char *c;
const char *buf;
*do_cleanup = 0;
do {
apr_status_t rv = 0;
int read_done = 0;
if (APR_BUCKET_IS_EOS(dptr)) {
break;
}
#if 0
/* XXX the bucket flush support is commented out for now
* because it was causing a segfault */
if (APR_BUCKET_IS_FLUSH(dptr)) {
}
else
#endif /* 0 */
}
else if (ctx->bytes_parsed > 0) {
read_done = 1;
if (APR_STATUS_IS_EAGAIN(rv)) {
}
}
if (ctx->output_now) {
if (ctx->head_start_index > 0) {
ctx->head_start_index = 0;
}
else {
start_bucket = dptr;
}
return start_bucket;
}
if (!read_done) {
}
if (!APR_STATUS_IS_SUCCESS(rv)) {
return NULL;
}
if (len == 0) { /* end of pipe? */
break;
}
/* Set our buffer to use. */
c = buf;
/* The last bucket had a left over partial match that we need to
* complete.
*/
{
{
c++;
}
{
}
continue;
}
/* False alarm...
*/
/* We know we are at the beginning of this bucket so
* we can just prepend the saved bytes from the
* ssi_tag_brigade (which empties the ssi_tag_brigade)
* and continue processing.
* We do not need to set do_cleanup beacuse the
* prepend takes care of that.
*/
ctx->head_start_index = 0;
}
if (len)
{
{
}
}
/* Consider the case where we have <!-- at the end of the bucket. */
}
else {
c = buf;
}
{
}
c++;
ctx->bytes_parsed++;
}
{
/* DO NOT INCREMENT c IN THIS BLOCK!
* Don't increment bytes_parsed either.
* This block is just to reset the indexes and
* pointers related to parsing the tag start_sequence.
* The value c needs to be checked again to handle
* the case where we find "<<!--#". We are now
* looking at the second "<" and need to restart
* the start_sequence checking from parse_pos = 0.
* do_cleanup causes the stored bytes in ssi_tag_brigade
* to be forwarded on and cleaned up. We may not be
* able to just prepend the ssi_tag_brigade because
* we may have advanced too far before we noticed this
* case, so just flag it and clean it up later.
*/
*do_cleanup = 1;
ctx->head_start_index = 0;
}
else {
c++;
ctx->bytes_parsed++;
}
}
return NULL;
}
{
const char *c;
const char *buf;
const char *start;
do {
apr_status_t rv = 0;
int read_done = 0;
if (APR_BUCKET_IS_EOS(dptr)) {
break;
}
#if 0
/* XXX the bucket flush support is commented out for now
* because it was causing a segfault */
if (APR_BUCKET_IS_FLUSH(dptr)) {
}
else
#endif /* 0 */
}
else if (ctx->bytes_parsed > 0) {
read_done = 1;
if (APR_STATUS_IS_EAGAIN(rv)) {
}
}
if (ctx->output_now) {
/* gonna start over parsing the directive next time through */
ctx->directive_length = 0;
ctx->tag_length = 0;
}
return dptr;
}
if (!read_done) {
}
if (!APR_STATUS_IS_SUCCESS(rv)) {
return NULL;
}
if (len == 0) { /* end of pipe? */
break;
}
}
else {
c = buf;
}
start = c;
}
/* We want to split the bucket at the '>'. The
* end of the END_SEQUENCE is in the current bucket.
* The beginning might be in a previous bucket.
*/
c++;
return (tmp_buck);
}
}
else {
if (ctx->tag_length == 0) {
if (!apr_isspace(*c)) {
const char *tmp = c;
do {
c++;
*c != *str);
continue;
}
}
else {
if (!apr_isspace(*c)) {
ctx->directive_length++;
}
else {
}
ctx->tag_length++;
}
}
const char *tmp = c;
do {
c++;
continue;
}
else {
/* The reason for this, is that we need to make sure
* that we catch cases like --->. This makes the
* second check after the original check fails.
* If parse_pos was already 0 then we already checked
* this.
*/
if (*c == str[0]) {
}
else {
ctx->tag_length++;
}
else {
}
ctx->tail_start_index = 0;
}
}
}
}
c++;
}
return NULL;
}
/* This function culls through the buckets that have been set aside in the
* ssi_tag_brigade and copies just the directive part of the SSI tag (none
* of the start and end delimiter bytes are copied).
*/
request_rec *r,
char *tmp_buf,
{
int done = 0;
const char *tmp_from;
/* If the tag length is longer than the tmp buffer, allocate space. */
return (APR_ENOMEM);
}
} /* Else, just use the temp buffer. */
else {
}
/* Prime the pump. Start at the beginning of the tag... */
/* Read the bucket... */
/* Adjust the pointer to start at the tag within the bucket... */
}
/* Loop through the buckets from the tag_start_bucket until before
* the tail_start_bucket copying the contents into the buffer.
*/
do {
done = 1;
}
else {
/* Adjust the count to stop at the beginning of the tail. */
}
}
} while ((!done) &&
return (APR_SUCCESS);
}
/*
* decodes a string containing html entities or numeric character references.
* 's' is overwritten with the decoded string.
* If 's' is syntatically incorrect, then the followed fixups will be made:
* unknown entities will be left undecoded;
* references to unused numeric characters will be deleted.
* In particular, � will not be decoded, but will be deleted.
*
* drtr
*/
/* maximum length of any ISO-LATIN-1 HTML entity name. */
#define MAXENTLEN (6)
/* The following is a shrinking transformation, therefore safe. */
static void decodehtml(char *s)
{
int val, i, j;
char *p;
const char *ents;
{
NULL, /* 0 */
NULL, /* 1 */
"lt\074gt\076", /* 2 */
"amp\046ETH\320eth\360", /* 3 */
"quot\042Auml\304Euml\313Iuml\317Ouml\326Uuml\334auml\344euml\353\
iuml\357ouml\366uuml\374yuml\377", /* 4 */
"Acirc\302Aring\305AElig\306Ecirc\312Icirc\316Ocirc\324Ucirc\333\
THORN\336szlig\337acirc\342aring\345aelig\346ecirc\352icirc\356ocirc\364\
ucirc\373thorn\376", /* 5 */
"Agrave\300Aacute\301Atilde\303Ccedil\307Egrave\310Eacute\311\
Igrave\314Iacute\315Ntilde\321Ograve\322Oacute\323Otilde\325Oslash\330\
Ugrave\331Uacute\332Yacute\335agrave\340aacute\341atilde\343ccedil\347\
egrave\350eacute\351igrave\354iacute\355ntilde\361ograve\362oacute\363\
otilde\365oslash\370ugrave\371uacute\372yacute\375" /* 6 */
};
/* Do a fast scan through the string until we find anything
* that needs more complicated handling
*/
for (; *s != '&'; s++) {
if (*s == '\0') {
return;
}
}
for (p = s; *s != '\0'; s++, p++) {
if (*s != '&') {
*p = *s;
continue;
}
/* find end of entity */
for (i = 1; s[i] != ';' && s[i] != '\0'; i++) {
continue;
}
if (s[i] == '\0') { /* treat as normal data */
*p = *s;
continue;
}
/* is it numeric ? */
if (s[1] == '#') {
}
s += i;
p--; /* no data to output */
}
else {
*p = RAW_ASCII_CHAR(val);
}
}
else {
j = i - 1;
/* wrong length */
*p = '&';
continue; /* skip it */
}
break;
}
}
if (*ents == '\0') {
*p = '&'; /* unknown */
}
else {
*p = RAW_ASCII_CHAR(((const unsigned char *) ents)[j]);
s += i;
}
}
}
*p = '\0';
}
/*
* Extract the next tag name and value.
* If there are no more tags, set the tag name to NULL.
* The tag value is html decoded if dodecode is non-zero.
* The tag value may be NULL if there is no tag value..
* format:
* [WS]<Tag>[WS]=[WS]['|"|`]<Value>[['|"|`|]|WS]
*/
{
char *c = ctx->curr_tag_pos;
int shift_val = 0;
char term = '\0';
return;
}
*tag = c; /* First non-whitespace character (could be NULL). */
while (apr_islower(*c)) {
c++; /* Optimization for the common case where the tag */
} /* is already lowercase */
*c = apr_tolower(*c); /* find end of tag, lowercasing as we go... */
c++;
}
}
ctx->curr_tag_pos = c;
return; /* We have found the end of the buffer. */
} /* We might have a tag, but definitely no value. */
if (*c == '=') {
*c++ = '\0'; /* Overwrite the '=' with a terminating byte after tag. */
}
else { /* Try skipping WS to find the '='. */
*c++ = '\0'; /* Terminate the tag... */
/* There needs to be an equal sign if there's a value. */
if (*c != '=') {
ctx->curr_tag_pos = c;
return; /* There apparently was no value. */
}
else {
c++; /* Skip the equals sign. */
}
}
if (*c == '"' || *c == '\'' || *c == '`') {
/* Allow quoted values for space inclusion.
* NOTE: This does not pass the quotes on return.
*/
term = *c++;
}
*tag_val = c;
if (!term) {
while (!apr_isspace(*c) && (*c != '\0')) {
c++;
}
}
else {
/* Quickly scan past the string until we reach
* either the end of the tag or a backslash. If
* we find a backslash, we have to switch to the
* more complicated parser loop that follows.
*/
c++;
}
if (*c == '\\') {
do {
/* Accept \" (or ' or `) as valid quotation of string.
*/
if (*c == '\\') {
/* Overwrite the "\" during the embedded
* escape sequence of '"'. "\'" or '`'.
* Shift bytes from here to next delimiter.
*/
c++;
if (*c == term) {
shift_val++;
}
if (shift_val > 0) {
*(c-shift_val) = *c;
}
if (*c == '\0') {
break;
}
}
c++;
if (shift_val > 0) {
*(c-shift_val) = *c;
}
} while ((*c != term) && (*c != '\0'));
}
}
ctx->curr_tag_pos = ++c;
if (dodecode) {
}
return;
}
/* initial buffer size for power-of-two allocator in ap_ssi_parse_string */
#define PARSE_STRING_INITIAL_SIZE 64
/*
* Do variable substitution on strings
* (Note: If out==NULL, this function allocs a buffer for the resulting
* string from r->pool. The return value is the parsed string)
*/
{
char ch;
char *next;
char *end_out;
/* allocate an output buffer if needed */
if (!out) {
}
}
else {
}
/* leave room for nul terminator */
switch (ch) {
case '\\':
/* double the buffer size */
char *new_out;
if (new_out_size > length) {
}
}
else {
/* truncated */
*next = '\0';
return out;
}
}
if (*in == '$') {
}
else {
}
break;
case '$':
{
const char *start_of_var_name;
char *end_of_var_name; /* end of var name + 1 */
char tmp_store;
apr_size_t l;
/* guess that the expansion won't happen */
if (*in == '{') {
++in;
0, r, "Missing '}' on variable \"%s\"",
*next = '\0';
return out;
}
end_of_var_name = (char *)temp_end;
++in;
}
else {
++in;
}
end_of_var_name = (char *)temp_end;
}
/* what a pain, too bad there's no table_getn where you can
* pass a non-nul terminated string */
l = end_of_var_name - start_of_var_name;
if (l != 0) {
*end_of_var_name = '\0';
if (val) {
}
else if (leave_name) {
}
else {
/* no expansion to be done */
break;
}
}
else {
/* zero-length variable name causes just the $ to be
* copied */
l = 1;
}
/* increase the buffer size to accommodate l more chars */
char *new_out;
do {
new_out_size *= 2;
} while (new_out_size < current_length + l);
if (new_out_size > length) {
}
}
next += l;
break;
}
default:
/* double the buffer size */
char *new_out;
if (new_out_size > length) {
}
}
else {
/* truncated */
*next = '\0';
return out;
}
}
break;
}
}
*next = '\0';
return out;
}
/* --------------------------- Action handlers ---------------------------- */
/* ensure that path is relative, and does not contain ".." elements
* ensentially ensure that it does not match the regex:
* (^/|(^|/)\.\.(/|$))
* XXX: Simply replace with apr_filepath_merge
*/
static int is_only_below(const char *path)
{
#ifdef HAVE_DRIVE_LETTERS
return 0;
#endif
#ifdef NETWARE
return 0;
#endif
if (path[0] == '/') {
return 0;
}
while (*path) {
int dots = 0;
++dots;
#if defined(WIN32)
/* If the name is canonical this is redundant
* but in security, redundancy is worthwhile.
* Does OS2 belong here (accepts ... for ..)?
*/
return 0;
#else
return 0;
#endif
/* Advance to either the null byte at the end of the
* string or the character right after the next slash,
* whichever comes first
*/
continue;
}
}
return 1;
}
{
char *parsed_string;
*inserted_head = NULL;
while (1) {
return (0);
}
else {
return (1);
}
}
if (rc != APR_SUCCESS) {
return rc;
}
MAX_STRING_LEN, 0);
if (tag[0] == 'f') {
/* XXX: Port to apr_filepath_merge
* be safe; only files in this directory or below allowed
*/
if (!is_only_below(parsed_string)) {
error_fmt = "unable to include file \"%s\" "
"in parsed file %s";
}
else {
}
}
else {
}
error_fmt = "unable to include \"%s\" in parsed file %s";
}
rr->content_type &&
error_fmt = "unable to include potential exec \"%s\" "
"in parsed file %s";
}
/* try to avoid recursive includes. We do this by walking
* up the r->main list of subrequests, and at each level
* walking back through any internal redirects. At each
* step, we compare the filenames and the URIs.
*
* The filename comparison catches a recursive include
* with an ever-changing URL, eg.
* <!--#include virtual=
* "$REQUEST_URI/$QUERY_STRING?$QUERY_STRING/x" -->
* which, although they would eventually be caught because
* we have a limit on the length of files, etc., can
* recurse for a while.
*
* The URI comparison catches the case where the filename
* is changed while processing the request, so the
* current name is never the same as any previous one.
* This can happen with "DocumentRoot /foo" when you
* request "/" on the server and it includes "/".
* This only applies to modules such as mod_dir that
* (somewhat improperly) mess with r->filename outside
* of a filename translation phase.
*/
int founddupe = 0;
request_rec *p;
request_rec *q;
((*q->uri == '/') &&
{
founddupe = 1;
break;
}
}
}
if (p != NULL) {
error_fmt = "Recursive include of \"%s\" "
"in parsed file %s";
}
}
/* See the Kludge in send_parsed_file for why */
/* Basically, it puts a bread crumb in here, then looks */
/* for the crumb later to see if its been here. */
if (rr)
&include_module, r);
error_fmt = "unable to include \"%s\" in parsed file %s";
}
if (error_fmt) {
}
/* destroy the sub request */
}
}
else {
"unknown parameter \"%s\" to tag include in %s",
}
}
}
return 0;
}
{
*inserted_head = NULL;
while (1) {
return 1;
}
else {
return 0;
}
}
conn_rec *c = r->connection;
const char *val =
get_include_var(r, ctx,
MAX_STRING_LEN, 0));
if (val) {
switch(encode) {
case E_NONE:
break;
case E_URL:
break;
case E_ENTITY:
break;
}
r->pool, c->bucket_alloc);
}
else {
r->pool, c->bucket_alloc);
}
if (*inserted_head == NULL) {
}
}
else {
"unknown value \"%s\" to parameter \"encoding\" of "
}
}
else {
"unknown parameter \"%s\" in tag echo of %s",
}
}
}
return 0;
}
/* error and tf must point to a string with room for at
* least MAX_STRING_LEN characters
*/
{
char *parsed_string;
*inserted_head = NULL;
while (1) {
return 0; /* Reached the end of the string. */
}
else {
return 1; /* tags must have values. */
}
}
}
MAX_STRING_LEN, 0);
}
}
MAX_STRING_LEN, 0);
}
MAX_STRING_LEN, 0);
}
}
}
else {
"unknown parameter \"%s\" to tag config in %s",
}
}
}
return 0;
}
{
int ret=0;
/* XXX: Port to apr_filepath_merge
* be safe; only files in this directory or below allowed
*/
if (!is_only_below(tag_val)) {
error_fmt = "unable to access file \"%s\" "
"in parsed file %s";
}
else {
/* note: it is okay to pass NULL for the "next filter" since
we never attempt to "run" this sub request. */
&& rv != APR_INCOMPLETE) {
error_fmt = "unable to get information about \"%s\" "
"in parsed file %s";
}
}
else {
error_fmt = "unable to lookup information about \"%s\" "
"in parsed file %s";
}
}
if (error_fmt) {
ret = -1;
}
return ret;
}
/* note: it is okay to pass NULL for the "next filter" since
we never attempt to "run" this sub request. */
return 0;
}
else {
"unable to get information about \"%s\" "
"in parsed file %s",
return -1;
}
}
else {
"unknown parameter \"%s\" to tag %s in %s",
return -1;
}
}
{
char *parsed_string;
*inserted_head = NULL;
while (1) {
return 0;
}
else {
return 1;
}
}
else {
MAX_STRING_LEN, 0);
/* XXX: if we *know* we're going to have to copy the
* thing off of the stack anyway, why not palloc buff
* instead of sticking it on the stack; then we can just
* use a pool bucket and skip the copy
*/
char buff[50];
}
else {
int l, x, pos = 0;
char tmp_buff[50];
for (x = 0; x < l; x++) {
if (x && (!((l - x) % 3))) {
}
}
}
r->connection->bucket_alloc);
if (*inserted_head == NULL) {
}
}
else {
}
}
}
}
return 0;
}
request_rec *r, ap_filter_t *f,
{
char *parsed_string;
*inserted_head = NULL;
while (1) {
return 0;
}
else {
return 1;
}
}
else {
MAX_STRING_LEN, 0);
char *t_val;
r->connection->bucket_alloc);
if (*inserted_head == NULL) {
}
}
else {
}
}
}
}
return 0;
}
{
int regex_error;
"unable to compile pattern \"%s\"", rexp);
return -1;
}
}
return (!regex_error);
}
enum token_type {
};
struct token {
enum token_type type;
char* value;
};
{
char ch;
int next = 0;
char qs = 0;
int tkn_fnd = 0;
/* Skip leading white space */
return (char *) NULL;
}
if (!apr_isspace(ch)) {
break;
}
}
if (ch == '\0') {
return (char *) NULL;
}
switch (ch) {
case '(':
return (string);
case ')':
return (string);
case '=':
return (string);
case '!':
if (*string == '=') {
return (string + 1);
}
else {
return (string);
}
case '\'':
/* already token->type == token_string */
qs = '\'';
break;
case '/':
qs = '/';
break;
case '|':
if (*string == '|') {
return (string + 1);
}
break;
case '&':
if (*string == '&') {
return (string + 1);
}
break;
case '>':
if (*string == '=') {
return (string + 1);
}
else {
return (string);
}
case '<':
if (*string == '=') {
return (string + 1);
}
else {
return (string);
}
default:
/* already token->type == token_string */
break;
}
/* We should only be here if we are in a string */
trailing null */
if (!qs) {
}
/*
* I used the ++string throughout this section so that string
* ends up pointing to the next token and I can just return it
*/
if (ch == '\\') {
tkn_fnd = 1;
}
else {
}
}
else {
if (!qs) {
if (apr_isspace(ch)) {
tkn_fnd = 1;
}
else {
switch (ch) {
case '(':
case ')':
case '=':
case '!':
case '<':
case '>':
tkn_fnd = 1;
break;
case '|':
tkn_fnd = 1;
}
break;
case '&':
tkn_fnd = 1;
}
break;
}
if (!tkn_fnd) {
}
}
}
else {
qs = 0;
tkn_fnd = 1;
string++;
}
else {
}
}
}
if (tkn_fnd) {
break;
}
}
/* If qs is still set, we have an unmatched quote */
if (qs) {
*unmatched = 1;
next = 0;
}
return (string);
}
/* there is an implicit assumption here that expr is at most MAX_STRING_LEN-1
* characters long...
*/
{
struct parse_node {
const char *parse;
char* buffer;
int retval = 0;
apr_size_t debug_pos = 0;
*was_error = 0;
*was_unmatched = 0;
return (0);
}
/* Create Parse Tree */
while (1) {
sizeof(struct parse_node));
(char *) NULL) {
break;
}
case token_string:
#ifdef DEBUG_INCLUDE
" Token: string (%s)\n",
#endif
break;
}
case token_string:
NULL);
break;
case token_eq:
case token_ne:
case token_and:
case token_or:
case token_lbrace:
case token_not:
case token_ge:
case token_gt:
case token_le:
case token_lt:
break;
default:
"Invalid expression \"%s\" in file %s",
*was_error = 1;
return retval;
}
break;
case token_re:
#ifdef DEBUG_INCLUDE
" Token: regex (%s)\n",
#endif
break;
}
case token_eq:
case token_ne:
case token_and:
case token_or:
case token_lbrace:
case token_not:
break;
default:
"Invalid expression \"%s\" in file %s",
*was_error = 1;
return retval;
}
break;
case token_and:
case token_or:
#ifdef DEBUG_INCLUDE
#endif
"Invalid expression \"%s\" in file %s",
*was_error = 1;
return retval;
}
/* Percolate upwards */
case token_string:
case token_re:
case token_group:
case token_not:
case token_eq:
case token_ne:
case token_and:
case token_or:
case token_ge:
case token_gt:
case token_le:
case token_lt:
continue;
case token_lbrace:
break;
default:
"Invalid expression \"%s\" in file %s",
*was_error = 1;
return retval;
}
break;
}
}
else {
}
break;
case token_not:
#ifdef DEBUG_INCLUDE
sizeof(" Token: not\n"));
debug_pos += sizeof(" Token: not\n");
#endif
break;
}
/* Percolate upwards */
case token_not:
case token_eq:
case token_ne:
case token_and:
case token_or:
case token_lbrace:
case token_ge:
case token_gt:
case token_le:
case token_lt:
break;
default:
"Invalid expression \"%s\" in file %s",
*was_error = 1;
return retval;
}
}
}
else {
}
break;
case token_eq:
case token_ne:
case token_ge:
case token_gt:
case token_le:
case token_lt:
#ifdef DEBUG_INCLUDE
#endif
"Invalid expression \"%s\" in file %s",
*was_error = 1;
return retval;
}
/* Percolate upwards */
case token_string:
case token_re:
case token_group:
continue;
case token_lbrace:
case token_and:
case token_or:
break;
case token_not:
case token_eq:
case token_ne:
case token_ge:
case token_gt:
case token_le:
case token_lt:
default:
"Invalid expression \"%s\" in file %s",
*was_error = 1;
return retval;
}
break;
}
}
else {
}
break;
case token_rbrace:
#ifdef DEBUG_INCLUDE
sizeof (" Token: rbrace\n"));
debug_pos += sizeof (" Token: rbrace\n");
#endif
break;
}
}
"Unmatched ')' in \"%s\" in file %s",
*was_error = 1;
return retval;
}
break;
case token_lbrace:
#ifdef DEBUG_INCLUDE
sizeof (" Token: lbrace\n"));
debug_pos += sizeof (" Token: lbrace\n");
#endif
break;
}
/* Percolate upwards */
case token_not:
case token_eq:
case token_ne:
case token_and:
case token_or:
case token_lbrace:
case token_ge:
case token_gt:
case token_le:
case token_lt:
break;
case token_string:
case token_re:
case token_group:
default:
"Invalid expression \"%s\" in file %s",
*was_error = 1;
return retval;
}
}
}
else {
}
break;
default:
break;
}
}
/* Evaluate Parse Tree */
case token_string:
#ifdef DEBUG_INCLUDE
sizeof (" Evaluate string\n"));
debug_pos += sizeof (" Evaluate string\n");
#endif
MAX_STRING_LEN, 0);
break;
case token_re:
"No operator before regex of expr \"%s\" in file %s",
*was_error = 1;
return retval;
case token_and:
case token_or:
#ifdef DEBUG_INCLUDE
#endif
"Invalid expression \"%s\" in file %s",
*was_error = 1;
return retval;
}
case token_string:
NULL, MAX_STRING_LEN, 0);
break;
default:
continue;
}
}
case token_string:
NULL, MAX_STRING_LEN, 0);
break;
default:
continue;
}
}
#ifdef DEBUG_INCLUDE
#endif
}
else {
}
#ifdef DEBUG_INCLUDE
#endif
break;
case token_eq:
case token_ne:
#ifdef DEBUG_INCLUDE
#endif
"Invalid expression \"%s\" in file %s",
*was_error = 1;
return retval;
}
NULL, MAX_STRING_LEN, 0);
NULL, MAX_STRING_LEN, 0);
#ifdef DEBUG_INCLUDE
" Re Compare (%s) with /%s/\n",
#endif
}
else {
#ifdef DEBUG_INCLUDE
" Compare (%s) with (%s)\n",
#endif
}
}
#ifdef DEBUG_INCLUDE
#endif
break;
case token_ge:
case token_gt:
case token_le:
case token_lt:
#ifdef DEBUG_INCLUDE
#endif
"Invalid expression \"%s\" in file %s",
*was_error = 1;
return retval;
}
NULL, MAX_STRING_LEN, 0);
NULL, MAX_STRING_LEN, 0);
#ifdef DEBUG_INCLUDE
" Compare (%s) with (%s)\n",
#endif
}
}
}
}
else {
}
#ifdef DEBUG_INCLUDE
#endif
break;
case token_not:
continue;
}
}
else {
}
#ifdef DEBUG_INCLUDE
#endif
break;
case token_group:
continue;
}
}
else {
}
#ifdef DEBUG_INCLUDE
#endif
break;
case token_lbrace:
"Unmatched '(' in \"%s\" in file %s",
*was_error = 1;
return retval;
case token_rbrace:
"Unmatched ')' in \"%s\" in file %s",
*was_error = 1;
return retval;
default:
"bad token type");
*was_error = 1;
return retval;
}
}
return (retval);
}
/*-------------------------------------------------------------------------*/
#ifdef DEBUG_INCLUDE
#define MAX_DEBUG_SIZE MAX_STRING_LEN
{ \
char cond_txt[] = "**** X conditional_status=\"0\"\n"; \
\
} \
\
} \
}
{ \
if (d_buf[0] != '\0') { \
\
} \
} \
}
#else
#define MAX_DEBUG_SIZE 10
#endif
/*-------------------------------------------------------------------------*/
/* pjr - These seem to allow expr="fred" expr="joe" where joe overwrites fred. */
{
char debug_buf[MAX_DEBUG_SIZE];
*inserted_head = NULL;
ctx->if_nesting_level++;
}
else {
while (1) {
"missing expr in if statement: %s",
r->filename);
return 1;
}
if (was_error) {
return 1;
}
if (was_unmatched) {
"\nUnmatched '\n", *inserted_head);
}
if (expr_ret) {
}
else {
}
" if");
ctx->if_nesting_level = 0;
return 0;
}
#ifdef DEBUG_INCLUDE
if (1) {
apr_size_t d_len = 0;
r->connection->bucket_alloc);
if (*inserted_head == NULL) {
}
}
#endif
}
else {
"unknown parameter \"%s\" to tag if in %s", tag,
r->filename);
}
}
}
return 0;
}
{
char debug_buf[MAX_DEBUG_SIZE];
*inserted_head = NULL;
if (!ctx->if_nesting_level) {
while (1) {
if (tag == '\0') {
" elif");
return (0);
}
"missing expr in elif statement: %s",
r->filename);
return (1);
}
if (was_error) {
return 1;
}
if (was_unmatched) {
"\nUnmatched '\n", *inserted_head);
}
if (expr_ret) {
}
else {
}
" elif");
return (0);
}
#ifdef DEBUG_INCLUDE
if (1) {
apr_size_t d_len = 0;
r->connection->bucket_alloc);
if (*inserted_head == NULL) {
}
}
#endif
}
else {
"unknown parameter \"%s\" to tag if in %s", tag,
r->filename);
}
}
}
return 0;
}
{
*inserted_head = NULL;
if (!ctx->if_nesting_level) {
"else directive does not take tags in %s", r->filename);
}
return -1;
}
else {
}
else {
}
return 0;
}
}
return 0;
}
{
*inserted_head = NULL;
if (!ctx->if_nesting_level) {
"endif directive does not take tags in %s", r->filename);
return -1;
}
else {
return 0;
}
}
else {
ctx->if_nesting_level--;
return 0;
}
}
{
char *parsed_string;
apr_pool_t *p = r->pool;
/* we need to use the 'main' request pool to set notes as that is
* a notes lifetime
*/
while (sub) {
}
*inserted_head = NULL;
while (1) {
return 0;
}
return 1;
}
MAX_STRING_LEN, 0);
}
"variable must precede value in set directive in %s",
r->filename);
return (-1);
}
MAX_STRING_LEN, 0);
apr_pstrdup(p, parsed_string));
}
else {
"Invalid tag for set directive in %s", r->filename);
return -1;
}
}
}
return 0;
}
request_rec *r, ap_filter_t *f,
{
int i;
*inserted_head = NULL;
if (val_text == LAZY_VALUE) {
}
*next++ = '=';
*next++ = '\n';
*next = 0;
r->pool,
r->connection->bucket_alloc);
if (*inserted_head == NULL) {
}
}
return 0;
}
else {
"printenv directive does not take tags in %s",
r->filename);
return -1;
}
}
return 0;
}
/* -------------------------- The main function --------------------------- */
request_rec *r, ap_filter_t *f)
{
if (r->args) { /* add QUERY stuff to env cause it ain't yet */
}
/* State to check for the STARTING_SEQUENCE. */
int do_cleanup = 0;
}
/* The few bytes stored in the ssi_tag_brigade turned out not to
* be a tag after all. This can only happen if the starting
* tag actually spans brigades. This should be very rare.
*/
r->connection->bucket_alloc);
}
/* If I am inside a conditional (if, elif, else) that is false
* then I need to throw away anything contained in it.
*/
}
}
/* Adjust the current bucket position based on what was found... */
}
else {
}
}
(ctx->output_now ||
/* Send the large chunk of pre-tag bytes... */
if (ctx->output_flush) {
}
if (rv != APR_SUCCESS) {
return rv;
}
*bb = tag_and_after;
ctx->output_flush = 0;
ctx->bytes_parsed = 0;
ctx->output_now = 0;
}
/* There was no possible SSI tag in the
* remainder of this brigade... */
}
}
/* State to check for the ENDING_SEQUENCE. */
}
/* If some of the tag has already been set aside then set
* aside remainder of tag. Now the full tag is in
* ssi_tag_brigade.
* If none has yet been set aside, then leave it all where it
* is.
* In any event after this the entire set of tag buckets will
* be in one place or another.
*/
*bb = tag_and_after;
}
else if (ctx->output_now ||
if (rv != APR_SUCCESS) {
return rv;
}
ctx->output_flush = 0;
ctx->output_now = 0;
}
}
else {
/* remainder of this brigade... */
}
}
/* State to processed the directive... */
char tmp_buf[TMP_BUF_SIZE];
apr_bucket **);
/* By now the full tag (all buckets) should either be set aside into
* ssi_tag_brigade or contained within the current bb. All tag
* processing from here on can assume that.
*/
/* At this point, everything between ctx->head_start_bucket and
* ctx->tail_start_bucket is an SSI
* directive, we just have to deal with it now.
*/
TMP_BUF_SIZE) != APR_SUCCESS) {
"mod_include: error copying directive in %s",
r->filename);
/* DO CLEANUP HERE!!!!! */
}
else {
do {
}
return APR_SUCCESS;
}
/* Can't destroy the tag buckets until I'm done processing
* because the combined_tag might just be pointing to
* the contents of a single bucket!
*/
/* Retrieve the handler function to be called for this directive
* from the functions registered in the hash table.
* Need to lower case the directive for proper matching. Also need
* to have it NULL terminated for proper hash matching.
*/
}
if (handle_func != NULL) {
return (rv);
}
}
else {
"unknown directive \"%s\" in parsed doc %s",
}
/* This chunk of code starts at the first bucket in the chain
* of tag buckets (assuming that by this point the bucket for
* the STARTING_SEQUENCE has been split) and loops through to
* the end of the tag buckets freeing them all.
*
* Remember that some part of this may have been set aside
* into the ssi_tag_brigade and the remainder (possibly as
* little as one byte) will be in the current brigade.
*
* The value of dptr should have been set during the
* PARSE_TAIL state to the first bucket after the
* ENDING_SEQUENCE.
*
* The value of content_head may have been set during processing
* of the directive. If so, the content was inserted in front
* of the dptr bucket. The inserted buckets should not be thrown
* away here, but they should also not be parsed later.
*/
if (content_head == NULL) {
content_head = dptr;
}
}
else {
do {
} while ((tmp_dptr != content_head) &&
}
}
/* Don't reset the flags or the nesting level!!! */
ctx->head_start_index = 0;
ctx->tag_start_index = 0;
ctx->tail_start_index = 0;
ctx->tag_length = 0;
ctx->directive_length = 0;
}
}
}
/* We have nothing more to send, stop now. */
/* We might have something saved that we never completed, but send
* down unparsed. This allows for <!-- at the end of files to be
* sent correctly. */
}
}
/* If I am in the middle of parsing an SSI tag then I need to set aside
* the pertinent trailing buckets and pass on the initial part of the
* brigade. The pertinent parts of the next brigades will be added to
* these set aside buckets to form the whole tag and will be processed
* once the whole tag has been found.
*/
/* Inside a false conditional (if, elif, else), so toss it all... */
do {
free_bucket = dptr;
}
else {
/* Otherwise pass it along...
* No SSI tags in this brigade... */
if (rv != APR_SUCCESS) {
return rv;
}
ctx->bytes_parsed = 0;
}
}
"Invalid mod_include state during file %s", r->filename);
}
else { /* Entire brigade is middle chunk of SSI tag... */
}
else { /* End of brigade contains part of SSI tag... */
if (ctx->head_start_index > 0) {
ctx->head_start_index = 0;
}
/* Set aside tag, pass pre-tag... */
if (rv != APR_SUCCESS) {
return rv;
}
/* Set aside the partial tag
* Exception: if there's an EOS at the end of this brigade,
* the tag will never be completed, so send an error and EOS
*/
if (APR_BUCKET_IS_EOS(last)) {
/* Remove everything before the EOS (i.e., the partial tag)
* and replace it with an error msg */
apr_bucket *b;
for (b = APR_BRIGADE_FIRST(tag_and_after);
!APR_BUCKET_IS_EOS(b);
b = APR_BRIGADE_FIRST(tag_and_after)) {
}
}
else {
&tag_and_after, r->pool);
}
if (rv != APR_SUCCESS) {
return rv;
}
ctx->bytes_parsed = 0;
}
}
return APR_SUCCESS;
}
{
*xbh = DEFAULT_XBITHACK;
return result;
}
{
/* compile the pattern used by find_start_sequence */
return result;
}
{
}
}
}
else {
return "XBitHack must be set to Off, On, or Full";
}
return NULL;
}
static int includes_setup(ap_filter_t *f)
{
/* When our xbithack value isn't set to full or our platform isn't
* providing group-level protection bits or our group-level bits do not
* have group-execite on, we will set the no_local_copy value to 1 so
* that we will not send 304s.
*/
f->r->no_local_copy = 1;
}
return OK;
}
{
request_rec *r = f->r;
if (!(ap_allow_options(r) & OPT_INCLUDES)) {
return ap_pass_brigade(f->next, b);
}
if (!f->ctx) {
if (ap_allow_options(r) & OPT_INCNOEXEC) {
}
f->c->bucket_alloc);
}
else {
ctx->bytes_parsed = 0;
}
/* Kludge --- for nested includes, we want to keep the subprocess
* environment of the base document (for compatibility); that means
* torquing our own last_modified date as well so that the
* LAST_MODIFIED variable gets reset to the proper value if the
* nested document resets <!--#config timefmt -->.
*/
}
else {
/* we're not a nested include, so we create an initial
* environment */
ap_add_cgi_vars(r);
}
/* Always unset the content-length. There is no way to know if
* the content will be modified at some point by send_parsed_content.
* It is very possible for us to not find any content in the first
* 9k of the file, but still have to modify the content of the file.
* If we are going to pass the file through send_parsed_content, then
* the content-length should just be unset.
*/
/* Always unset the ETag/Last-Modified fields - see RFC2616 - 13.3.4.
* We don't know if we are going to be including a file or executing
* a program which may change the Last-Modified header or make the
* content completely dynamic. Therefore, we can't support these
* headers.
* Exception: XBitHack full means we *should* set the Last-Modified field.
*/
/* Assure the platform supports Group protections */
}
else {
}
return send_parsed_content(&b, r, f);
}
{
}
{
include_hash = apr_hash_make(p);
if(ssi_pfn_register) {
}
return OK;
}
{
return NULL;
}
{
return NULL;
}
{
return NULL;
}
{
return NULL;
}
{
return NULL;
}
/*
* Module definition and configuration data structs...
*/
static const command_rec includes_cmds[] =
{
"Off, On, or Full"),
"a string"),
"a strftime(3) formatted string"),
"SSI Start String Tag"),
"SSI End String Tag"),
"SSI Start String Tag"),
{NULL}
};
static int include_fixup(request_rec *r)
{
{
if (!r->content_type || !*r->content_type) {
ap_set_content_type(r, "text/html");
}
r->handler = "default-handler";
}
else
/* These OS's don't support xbithack. This is being worked on. */
{
return DECLINED;
}
#else
{
return DECLINED;
}
return DECLINED;
}
return DECLINED;
}
}
#endif
/* We always return declined, because the default handler actually
* serves the file. All we have to do is add the filter.
*/
return DECLINED;
}
static void register_hooks(apr_pool_t *p)
{
}
{
create_includes_dir_config, /* dir config creater */
NULL, /* dir merger --- default is to override */
create_includes_server_config,/* server config */
NULL, /* merge server config */
includes_cmds, /* command apr_table_t */
register_hooks /* register hooks */
};