mod_disk_cache.c revision 38c37050250a232748df499cb74e2bec82cd791a
/* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "apr_file_io.h"
#include "apr_strings.h"
#include "mod_cache.h"
#include "mod_disk_cache.h"
#include "ap_provider.h"
#include "util_filter.h"
#include "util_script.h"
#include "util_charset.h"
/*
* mod_disk_cache: Disk Based HTTP 1.1 Cache.
*
* Flow to Find the .data file:
* Open <hash>.header
* Read in <hash>.header file (may contain Format #1 or Format #2)
* If format #1 (Contains a list of Vary Headers):
* Use each header name (from .header) with our request values (headers_in) to
* re-read in <hash>.header (must be format #2)
* read in <hash>.data
*
* Always first in the header file:
* disk_cache_format_t format;
*
* VARY_FORMAT_VERSION:
* apr_time_t expire;
* apr_array_t vary_headers (delimited by CRLF)
*
* DISK_FORMAT_VERSION:
* disk_cache_info_t
* entity name (dobj->name) [length is in disk_cache_info_t->name_len]
* r->headers_out (delimited by CRLF)
* CRLF
* r->headers_in (delimited by CRLF)
* CRLF
*/
/* Forward declarations */
static int remove_entity(cache_handle_t *h);
apr_file_t *file);
/*
* Modified file bucket implementation to be able to deliver files
* while caching.
*/
/* Derived from apr_buckets_file.c */
static void diskcache_bucket_destroy(void *data)
{
diskcache_bucket_data *f = data;
if (apr_bucket_shared_destroy(f)) {
/* no need to close files here; it will get
* done automatically when the pool gets cleaned up */
apr_bucket_free(f);
}
}
/* The idea here is to convert diskcache buckets to regular file buckets
as data becomes available */
/* FIXME: Maybe we should care about the block argument, right now we're
always blocking */
{
diskcache_bucket_data *a = e->data;
apr_file_t *f = a->fd;
apr_bucket *b = NULL;
char *buf;
#if APR_HAS_THREADS && !APR_HAS_XTHREAD_FILES
#endif
#if APR_HAS_THREADS && !APR_HAS_XTHREAD_FILES
/* this file descriptor is shared across multiple threads and
* this OS doesn't support that natively, so as a workaround
* we must reopen the file into a->readpool */
const char *fname;
apr_file_name_get(&fname, f);
if (rv != APR_SUCCESS)
return rv;
a->fd = f;
}
#endif
/* in case we die prematurely */
*len = 0;
while(1) {
/* Figure out how big the file is right now, sit here until
it's grown enough or we get bored */
fileend = 0;
if(rv != APR_SUCCESS) {
return rv;
}
break;
}
if(rv != APR_SUCCESS ||
{
return APR_EGENERAL;
}
}
/* Convert this bucket to a zero-length heap bucket so we won't be called
again */
/* Wrap as much as possible into a regular file bucket */
APR_BUCKET_INSERT_AFTER(e, b);
/* Put any remains in yet another bucket */
if(available < filelength) {
e=b;
/* for efficiency, we can just build a new apr_bucket struct
* to wrap around the existing bucket */
b = apr_bucket_alloc(sizeof(*b), e->list);
b->data = a;
b->type = &bucket_type_diskcache;
b->free = apr_bucket_free;
APR_BUCKET_INSERT_AFTER(e, b);
}
else {
}
return APR_SUCCESS;
}
apr_file_t *fd,
apr_pool_t *p)
{
f = apr_bucket_alloc(sizeof(*f), b->list);
f->readpool = p;
f->updtimeout = timeout;
b->type = &bucket_type_diskcache;
return b;
}
apr_pool_t *p,
{
APR_BUCKET_INIT(b);
b->free = apr_bucket_free;
}
/* FIXME: This is probably only correct for the first case, that seems
to be the one that occurs all the time... */
{
apr_file_t *f = a->fd;
return APR_SUCCESS;
}
/* FIXME: Figure out what needs to be done here */
"disk_cache: diskcache_bucket_setaside: FIXME1");
}
/* FIXME: Figure out what needs to be done here */
"disk_cache: diskcache_bucket_setaside: FIXME2");
return APR_SUCCESS;
}
};
/* From apr_brigade.c */
/* A "safe" maximum bucket size, 1Gb */
#define MAX_BUCKET_SIZE (0x40000000)
apr_file_t *f, apr_off_t
apr_pool_t *p)
{
apr_bucket *e;
if (length < MAX_BUCKET_SIZE) {
bb->bucket_alloc);
}
else {
/* Several buckets are needed. */
bb->bucket_alloc);
while (length > MAX_BUCKET_SIZE) {
apr_bucket *ce;
apr_bucket_copy(e, &ce);
e->start += MAX_BUCKET_SIZE;
}
}
return e;
}
/* --------------------------------------------------------------- */
/*
* Local static functions
*/
{
}
}
else {
}
}
{
}
}
else {
}
}
{
char *p;
p = strchr(p, '/');
if (!p)
break;
*p = '\0';
*p = '/';
break;
}
++p;
}
return rv;
}
return APR_SUCCESS;
}
/* htcacheclean may remove directories underneath us.
* So, we'll try renaming three times at a cost of 0.002 seconds.
*/
{
if (rv != APR_SUCCESS) {
int i;
/* 1000 micro-seconds aka 0.001 seconds. */
apr_sleep(1000);
}
}
return rv;
}
{
/* Remove the header file and the body file. */
/* If we opened the temporary data file, close and remove it. */
}
return APR_SUCCESS;
}
{
int i, k;
int nvec;
const char *header;
const char **elts;
/* TODO:
* - Handle multiple-value headers better. (sort them?)
* - Handle Case in-sensitive Values better.
* This isn't the end of the world, since it just lowers the cache
* hit rate, but it would be nice to fix.
*
* The majority are case insenstive if they are values (encoding etc).
* Most of rfc2616 is case insensitive on header contents.
*
* So the better solution may be to identify headers which should be
* treated case-sensitive?
* HTTP URI's (3.2.3) [host and scheme are insensitive]
* HTTP method (5.1.1)
* HTTP-date values (3.3.1)
* 3.7 Media Types [exerpt]
* The type, subtype, and parameter attribute names are case-
* insensitive. Parameter values might or might not be case-sensitive,
* depending on the semantics of the parameter name.
* 4.20 Except [exerpt]
* Comparison of expectation values is case-insensitive for unquoted
* tokens (including the 100-continue token), and is case-sensitive for
* quoted-string expectation-extensions.
*/
if (!header) {
header = "";
}
k++;
k++;
}
k++;
}
{
}
{
char *token;
}
/* Sort it so that "Vary: A, B" and "Vary: B, A" are stored the same. */
sizeof(char *), array_alphasort);
}
/*
* Hook and mod_cache callback functions
*/
{
return DECLINED;
}
/* Note, len is -1 if unknown so don't trust it too hard */
"disk_cache: URL %s failed the size check "
return DECLINED;
}
"disk_cache: URL %s failed the size check "
return DECLINED;
}
/* Allocate and initialize cache_object_t and disk_cache_object_t */
/* Save the cache root */
return OK;
}
{
done = 0;
while(1) {
if (rc == APR_SUCCESS) {
break;
}
if(!APR_STATUS_IS_EOF(rc)) {
return rc;
}
if(rc != APR_SUCCESS) {
return rc;
}
return APR_ETIMEDOUT;
}
}
return APR_SUCCESS;
}
{
int flags;
if (rc != APR_SUCCESS) {
return CACHE_EDECLINED;
}
/* read the format from the cache file */
if(APR_STATUS_IS_EOF(rc)) {
return CACHE_ENODATA;
}
else if(rc != APR_SUCCESS) {
return rc;
}
/* Vary-files are being written to tmpfile and moved in place, so
the should always be complete */
if (format == VARY_FORMAT_VERSION) {
if(rc != APR_SUCCESS) {
return rc;
}
if (expire < r->request_time) {
return CACHE_EDECLINED;
}
if (rc != APR_SUCCESS) {
"disk_cache: Cannot parse vary header file: %s",
return CACHE_EDECLINED;
}
if (rc != APR_SUCCESS) {
return CACHE_EDECLINED;
}
if(APR_STATUS_IS_EOF(rc)) {
return CACHE_ENODATA;
}
else if(rc != APR_SUCCESS) {
return rc;
}
}
if(format != DISK_FORMAT_VERSION) {
"disk_cache: File '%s' had a version mismatch. File had "
return CACHE_EDECLINED;
}
/* read the data from the header file */
if(APR_STATUS_IS_EOF(rc)) {
return CACHE_ENODATA;
}
else if(rc != APR_SUCCESS) {
return rc;
}
/* Store it away so we can get it later. */
return APR_SUCCESS;
}
{
while(1) {
}
if(rc != CACHE_EDECLINED) {
"disk_cache: Cannot load header file: %s",
}
return rc;
}
/* Objects with unknown body size will have file_size == -1 until the
entire body is written and the header updated with the actual size.
And since we depend on knowing the body size we wait until the size
is written */
break;
}
if(rc != APR_SUCCESS) {
return rc;
}
"disk_cache: Timed out waiting for header for URL %s"
" - caching the body failed?", key);
return CACHE_EDECLINED;
}
}
return APR_SUCCESS;
}
{
int flags;
#if APR_HAS_SENDFILE
? 0 : APR_SENDFILE_ENABLED);
#endif
/* Wait here until we get a body cachefile, data in it, and do quick sanity
* check */
while(1) {
if(rc != APR_SUCCESS) {
"disk_cache: Timed out waiting for body for "
"URL %s - caching failed?", key);
return CACHE_EDECLINED;
}
continue;
}
}
if(rc != APR_SUCCESS) {
return rc;
}
"disk_cache: Bad cached body for URL %s, size %"
return CACHE_EDECLINED;
}
/* Still caching or failed? */
if(rc != APR_SUCCESS ||
{
"disk_cache: Body for URL %s is too small - "
return CACHE_EDECLINED;
}
}
break;
}
}
/* Go back to the beginning */
off = 0;
if(rc != APR_SUCCESS) {
return rc;
}
return APR_SUCCESS;
}
{
static int error_logged = 0;
char urlbuff[MAX_STRING_LEN];
/* Look up entity keyed to 'url' */
if (!error_logged) {
error_logged = 1;
"disk_cache: Cannot cache files to disk without a "
"CacheRoot specified.");
}
return DECLINED;
}
/* Create and init the cache object */
/* Save the cache root */
/* Open header and read basic info, wait until header contains
valid size information for the body */
if(rc != APR_SUCCESS) {
return DECLINED;
}
/* TODO: We have the ability to serve partially cached requests,
* however in order to avoid some sticky what if conditions
* should the content turn out to be too large to be cached,
* we must only allow partial cache serving if the cached
* entry has a content length known in advance.
*/
if(len > 0) {
if (rc == APR_ETIMEDOUT) {
"disk_cache: Timed out waiting for urlbuff for "
"URL %s - caching failed?", key);
return DECLINED;
}
else if(rc != APR_SUCCESS) {
"disk_cache: Error reading urlbuff for URL %s",
key);
return DECLINED;
}
}
/* check that we have the same URL */
"disk_cache: Cached URL %s didn't match requested "
return DECLINED;
}
/* Only need body cachefile if we have a body */
if(dobj->initial_size > 0) {
if(rc != APR_SUCCESS) {
return DECLINED;
}
}
else {
}
return OK;
}
static int remove_entity(cache_handle_t *h)
{
/* Null out the cache object pointer so next time we start from scratch */
return OK;
}
{
/* Get disk cache object from cache handle */
if (!dobj) {
return DECLINED;
}
/* Delete headers file */
/* Will only result in an output if httpd is started with -e debug.
* For reason see log_error_core for the case s == NULL.
*/
"disk_cache: Failed to delete headers file %s from cache.",
return DECLINED;
}
}
/* Delete data file */
/* Will only result in an output if httpd is started with -e debug.
* For reason see log_error_core for the case s == NULL.
*/
"disk_cache: Failed to delete data file %s from cache.",
return DECLINED;
}
}
/* now delete directories as far as possible up to our cache root */
const char *str_to_copy;
if (str_to_copy) {
/* remove filename */
*slash = '\0';
/*
* now walk our way back to the cache root, delete everything
* in the way as far as possible
*
* Note: due to the way we constructed the file names in
* header_file and data_file, we are guaranteed that the
* cache_root is suffixed by at least one '/' which will be
* turned into a terminating null by this loop. Therefore,
* we won't either delete or go above our cache root.
*/
"disk_cache: Deleting directory %s from cache",
dir);
break;
}
*slash = '\0';
}
}
}
return OK;
}
{
char w[MAX_STRING_LEN];
int p;
while (1) {
if (rv != APR_SUCCESS) {
"Premature end of vary array.");
return rv;
}
p = strlen(w);
if (p > 0 && w[p - 1] == '\n') {
w[p - 2] = '\0';
}
else {
w[p - 1] = '\0';
}
}
/* If we've finished reading the array, break out of the loop. */
if (w[0] == '\0') {
break;
}
}
return APR_SUCCESS;
}
{
int i;
const char **elts;
&amt);
if (rv != APR_SUCCESS) {
return rv;
}
}
&amt);
}
{
char w[MAX_STRING_LEN];
char *l;
int p;
while (1) {
/* ### What about APR_EOF? */
if (rv != APR_SUCCESS) {
return rv;
}
/* Delete terminal (CR?)LF */
p = strlen(w);
/* Indeed, the host's '\n':
'\012' for UNIX; '\015' for MacOS; '\025' for OS/390
-- whatever the script generates.
*/
if (p > 0 && w[p - 1] == '\n') {
w[p - 2] = '\0';
}
else {
w[p - 1] = '\0';
}
}
/* If we've finished reading the headers, break out of the loop. */
if (w[0] == '\0') {
break;
}
/* Chances are that we received an ASCII header text instead of
* the expected EBCDIC header lines. Try to auto-detect:
*/
if (!(l = strchr(w, ':'))) {
int maybeASCII = 0, maybeEBCDIC = 0;
++maybeEBCDIC;
++maybeASCII;
}
if (maybeASCII > maybeEBCDIC) {
"disk_cache: CGI Interface Error: Script headers apparently ASCII: (CGI = %s)",
r->filename);
w, &inbytes_left, w, &outbytes_left);
}
}
#endif /*APR_CHARSET_EBCDIC*/
/* if we see a bogus header don't ignore it. Shout and scream */
if (!(l = strchr(w, ':'))) {
return APR_EGENERAL;
}
*l++ = '\0';
while (*l && apr_isspace(*l)) {
++l;
}
apr_table_add(table, w, l);
}
return APR_SUCCESS;
}
{
off = 0;
if(rv != APR_SUCCESS) {
return rv;
}
while(1) {
if(rv == APR_SUCCESS) {
break;
}
if(rv != APR_SUCCESS) {
return rv;
}
if(rv != APR_SUCCESS ||
{
"disk_cache: Timed out waiting for cache headers "
return APR_EGENERAL;
}
}
return APR_SUCCESS;
}
/*
* Reads headers from a buffer and returns an array of headers.
* Returns NULL on file error
* This routine tries to deal with too long lines and continuation lines.
* @@@: XXX: FIXME: currently the headers are passed thru un-merged.
* Is that okay, or should they be collapsed where possible?
*/
{
/* This case should not happen... */
/* XXX log message */
return APR_NOTFOUND;
}
if(rv != APR_SUCCESS) {
"disk_cache: Timed out waiting for response headers "
return rv;
}
if(rv != APR_SUCCESS) {
"disk_cache: Timed out waiting for request headers "
return rv;
}
return APR_SUCCESS;
}
{
apr_bucket *e;
/* Insert as much as possible as regular file (ie. sendfile():able) */
{
return APR_ENOMEM;
}
}
/* Insert any remainder as read-while-caching bucket */
dobj->updtimeout, p
) == NULL)
{
return APR_ENOMEM;
}
}
return APR_SUCCESS;
}
{
int i;
&amt);
if (rv != APR_SUCCESS) {
return rv;
}
}
}
&amt);
return rv;
}
{
int flags;
#if APR_HAS_SENDFILE
? 0 : APR_SENDFILE_ENABLED);
#endif
while(1) {
"disk_cache: open_new_file: Opening %s", filename);
if(APR_STATUS_IS_EEXIST(rv)) {
if(APR_STATUS_IS_ENOENT(rv)) {
/* Someone else has already removed it, try again */
continue;
}
else if(rv != APR_SUCCESS) {
return rv;
}
/* Something stale that's left around */
"disk_cache: open_new_file: Failed to "
"remove old %s", filename);
return rv;
}
continue;
}
else {
/* Someone else has just created the file, return identifiable
status so calling function can do the right thing */
return CACHE_EEXIST;
}
}
else if(APR_STATUS_IS_ENOENT(rv)) {
/* The directory for the file didn't exist */
if(rv != APR_SUCCESS) {
"disk_cache: open_new_file: Failed to make "
"directory for %s", filename);
return rv;
}
continue;
}
else if(rv == APR_SUCCESS) {
return APR_SUCCESS;
}
else {
"disk_cache: open_new_file: Failed to open %s",
filename);
return rv;
}
}
/* We should never get here, so */
return APR_EGENERAL;
}
const char *varyhdr)
{
const char *vfile;
int flags;
}
else {
}
if (rv != APR_SUCCESS) {
return rv;
}
if (rv != APR_SUCCESS) {
return rv;
}
if (rv != APR_SUCCESS) {
return rv;
}
if (rv != APR_SUCCESS) {
return rv;
}
if (rv != APR_SUCCESS) {
"disk_cache: rename tempfile to varyfile failed: "
return rv;
}
}
return APR_SUCCESS;
}
{
int niov;
niov = 0;
if (rv != APR_SUCCESS) {
return rv;
}
if (r->headers_out) {
r->server);
&& r->content_type) {
ap_make_content_type(r, r->content_type));
}
r->err_headers_out);
if (rv != APR_SUCCESS) {
return rv;
}
}
/* Parse the vary header and dump those fields from the headers_in. */
/* FIXME: Make call to the same thing cache_select calls to crack Vary. */
if (r->headers_in) {
r->server);
if (rv != APR_SUCCESS) {
return rv;
}
}
return APR_SUCCESS;
}
{
/* This is flaky... we need to manage the cache_info differently */
}
else {
}
if (r->headers_out) {
const char *tmp;
if (tmp) {
if(rv != APR_SUCCESS) {
return rv;
}
}
}
if(rewriting) {
/* Assume we are just rewriting the header if we have an fd. The
fd might be readonly though, in that case reopen it for writes.
Something equivalent to fdopen would have been handy. */
if (rv != APR_SUCCESS) {
return rv;
}
}
else {
/* We can write here, so let's just move to the right place */
if (rv != APR_SUCCESS) {
return rv;
}
}
}
else {
if(rv == CACHE_EEXIST) {
}
else if(rv != APR_SUCCESS) {
return rv;
}
}
"disk_cache: Skipping store for URL %s: Someone else "
return APR_SUCCESS;
}
if(rv != APR_SUCCESS) {
return rv;
}
/* If the body size is unknown, the header file will be rewritten later
so we can't close it */
if(dobj->initial_size < 0) {
}
else {
}
if(rv != APR_SUCCESS) {
return rv;
}
return APR_SUCCESS;
}
/**
* Store the body of the response in the disk cache.
*
* As the data is written to the cache, it is also written to
* the filter provided. On network write failure, the full body
* will still be cached.
*/
{
apr_bucket *e, *b;
request_rec *r = f->r;
if(r->no_cache) {
"disk_cache: store_body called for URL %s even though"
}
if(dobj->initial_size == 0) {
/* Don't waste a body cachefile on a 0 length body */
}
if (rv == CACHE_EEXIST) {
/* Someone else beat us to storing this */
}
else if (rv != APR_SUCCESS) {
"disk_cache: store_body tried to open cached file "
}
else {
}
}
/* Someone else beat us to storing this object.
* We are too late to take advantage of this storage :( */
}
/* set up our temporary brigade */
}
else {
}
/* start caching the brigade */
e = APR_BRIGADE_FIRST(bb);
while (e != APR_BRIGADE_SENTINEL(bb)) {
const char *str;
/* try write all data buckets to the cache, except for metadata buckets */
if(!APR_BUCKET_IS_METADATA(e)) {
/* read in a bucket fragment */
if (rv != APR_SUCCESS) {
"disk_cache: Error when reading bucket for URL %s, aborting request",
/* not being able to read the bucket is fatal,
* return this up the filter stack
*/
return rv;
}
/* try write the bucket fragment to the cache */
/* if the cache write was successful, swap the original bucket
* with a file bucket pointing to the same data in the cache.
*
* This is done because:
*
* - The ap_core_output_filter can take advantage of its ability
* to do non blocking writes on file buckets.
*
* - We are prevented from the need to read the original bucket
* a second time inside ap_core_output_filter, which could be
* expensive or memory consuming.
*
* - The cache, in theory, should be faster than the backend,
* otherwise there would be little point in caching in the first
* place.
*/
if (APR_SUCCESS == rv) {
/* remove and destroy the original bucket from the brigade */
b = e;
e = APR_BUCKET_NEXT(e);
/* Is our network connection still alive?
* If not, we must continue caching the file, so keep looping.
* We will return the error at the end when caching is done.
*/
/* insert a file bucket pointing to the cache into out temporary brigade */
return APR_ENOMEM;
}
/* TODO: If we are not able to guarantee that
* apr_core_output_filter() will not block on our
* file buckets, then the check for whether the
* socket will block must go here.
*/
/* send our new brigade to the network */
}
/* update the write counter, and sanity check the size */
"disk_cache: URL %s failed the size check "
}
}
/*
* If the cache write failed, continue to loop and pass data to
* the network. Remove the cache filter from the output filters
* so we don't inadvertently try to cache write again, leaving
* a hole in the cached data.
*/
else {
/* mark the write as having failed */
"disk_cache: Error when writing cache file for "
/* step away gracefully */
/* write the rest of the brigade to the network, and leave */
}
}
/* write metadata buckets direct to the output filter */
else {
/* move the metadata bucket to our temporary brigade */
b = e;
e = APR_BUCKET_NEXT(e);
/* Is our network connection still alive?
* If not, we must continue looping, but stop writing to the network.
*/
/* TODO: If we are not able to guarantee that
* apr_core_output_filter() will not block on our
* file buckets, then the check for whether the
* socket will block must go here.
*/
/* send our new brigade to the network */
}
}
}
/* Drop out here if this wasn't the end */
return APR_SUCCESS;
}
"disk_cache: Done caching URL %s, len %" APR_OFF_T_FMT,
"disk_cache: An error occurred while writing to the "
"network for URL %s.",
}
"disk_cache: URL %s failed the size check "
/* Remove the intermediate cache file and return filter status */
}
if (dobj->initial_size < 0) {
/* Update header information now that we know the size */
if (rv != APR_SUCCESS) {
}
}
"disk_cache: URL %s - body size mismatch: suggested %"
}
/* All checks were fine, close output file */
if (rv != APR_SUCCESS) {
"disk_cache: While trying to close the cache file for "
}
}
{
/* XXX: Set default values */
conf->cache_root_len = 0;
return conf;
}
/*
* mod_disk_cache configuration directives handlers.
*/
static const char
{
/* TODO: canonicalize cache_root and strip off any trailing slashes */
return NULL;
}
/*
* Consider eliminating the next two directives in favor of
* Ian's prime number hash...
* key = hash_fn( r->uri)
*/
static const char
{
if (val < 1)
return "CacheDirLevels value must be an integer greater than 0";
return "CacheDirLevels*CacheDirLength value must not be higher than 20";
return NULL;
}
static const char
{
if (val < 1)
return "CacheDirLength value must be an integer greater than 0";
return "CacheDirLevels*CacheDirLength value must not be higher than 20";
return NULL;
}
static const char
{
{
return "CacheMinFileSize argument must be a non-negative integer representing the min size of a file to cache in bytes.";
}
return NULL;
}
static const char
{
{
return "CacheMaxFileSize argument must be a non-negative integer representing the max size of a file to cache in bytes.";
}
return NULL;
}
static const char
{
{
return "CacheUpdateTimeout argument must be a non-negative integer representing the timeout in milliseconds for cache update operations";
}
return NULL;
}
static const command_rec disk_cache_cmds[] =
{
"The directory to store cache files"),
"The number of levels of subdirectories in the cache"),
"The number of characters in subdirectory names"),
"The minimum file size to cache a document"),
"The maximum file size to cache a document"),
"Timeout in ms for cache updates"),
{NULL}
};
static const cache_provider cache_disk_provider =
{
};
static void disk_cache_register_hook(apr_pool_t *p)
{
/* cache initializer */
}
NULL, /* create per-directory config structure */
NULL, /* merge per-directory config structures */
create_config, /* create per-server config structure */
NULL, /* merge per-server config structures */
disk_cache_cmds, /* command apr_table_t */
disk_cache_register_hook /* register hooks */
};