mod_mem_cache.c revision 7ad8e71125ec66a8ebfaf9e52bd680ce6beca327
/* ====================================================================
* The Apache Software License, Version 1.1
*
* Copyright (c) 2000-2003 The Apache Software Foundation. All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* distribution.
*
* 3. The end-user documentation included with the redistribution,
* if any, must include the following acknowledgment:
* "This product includes software developed by the
* Apache Software Foundation (http://www.apache.org/)."
* Alternately, this acknowledgment may appear in the software itself,
* if and wherever such third-party acknowledgments normally appear.
*
* 4. The names "Apache" and "Apache Software Foundation" must
* not be used to endorse or promote products derived from this
* software without prior written permission. For written
* permission, please contact apache@apache.org.
*
* 5. Products derived from this software may not be called "Apache",
* nor may "Apache" appear in their name, without prior written
* permission of the Apache Software Foundation.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
*
* Portions of this software are based upon public domain software
* originally written at the National Center for Supercomputing Applications,
* University of Illinois, Urbana-Champaign.
*/
#define CORE_PRIVATE
#include "mod_cache.h"
#include "cache_pqueue.h"
#include "cache_cache.h"
#include "ap_mpm.h"
#include "apr_thread_mutex.h"
#include <unistd.h>
#endif
#if !APR_HAS_THREADS
#endif
typedef enum {
CACHE_TYPE_FILE = 1,
} cache_type_e;
typedef struct {
char* hdr;
char* val;
typedef struct mem_cache_object {
void *m;
long priority; /**< the priority of this entry */
long total_refs; /**< total number of references this entry has had */
#ifdef USE_ATOMICS
#else
#endif
typedef struct {
/* Fields set by config directives */
/* maximum amount of data to buffer on a streamed response where
* we haven't yet seen EOS */
static mem_cache_conf *sconf;
#define DEFAULT_MIN_CACHE_OBJECT_SIZE 0
#define DEFAULT_MAX_CACHE_OBJECT_SIZE 10000
#define DEFAULT_MAX_OBJECT_CNT 1009
#define DEFAULT_MAX_STREAMING_BUFFER_SIZE 100000
#define CACHEFILE_LEN 20
/* Forward declarations */
static int remove_entity(cache_handle_t *h);
static long memcache_get_priority(void*a)
{
}
static void memcache_inc_frequency(void*a)
{
mobj->total_refs++;
}
{
#ifdef USE_ATOMICS
#else
#endif
}
static apr_ssize_t memcache_get_pos(void *a)
{
#ifdef USE_ATOMICS
#else
#endif
}
static apr_size_t memcache_cache_get_size(void*a)
{
}
/** callback to get the key of a item */
static const char* memcache_cache_get_key(void*a)
{
}
/**
* callback to free an entry
* There is way too much magic in this code. Right now, this callback
* is only called out of cache_insert() which is called under protection
* of the sconf->lock, which means that we do not (and should not)
* attempt to obtain the lock recursively.
*/
static void memcache_cache_free(void*a)
{
/* Cleanup the cache object. Object should be removed from the cache
* now. Increment the refcount before setting cleanup to avoid a race
* condition. A similar pattern is used in remove_url()
*/
#ifdef USE_ATOMICS
#else
#endif
#ifdef USE_ATOMICS
}
#else
}
#endif
}
/*
* functions return a 'negative' score since priority queues
* dequeue the object with the highest value first
*/
static long memcache_lru_algorithm(long queue_clock, void *a)
{
/*
* a 'proper' LRU function would just be
* mobj->priority = mobj->total_refs;
*/
}
static long memcache_gdsf_algorithm(long queue_clock, void *a)
{
}
{
/* TODO:
* We desperately need a more efficient way of allocating objects. We're
* making way too many malloc calls to create a fully populated
* cache object...
*/
/* Cleanup the cache_object_t */
}
}
}
}
}
/* Cleanup the mem_cache_object_t */
if (mobj) {
}
#ifdef WIN32
#else
#endif
}
if (mobj->header_out) {
}
if (mobj->err_header_out) {
}
if (mobj->subprocess_env) {
}
}
}
}
}
{
/* If obj->complete is not set, the cache update failed and the
* object needs to be removed from the cache then cleaned up.
*/
}
/* Remember, objects marked for cleanup are, by design, already
* removed from the cache. remove_url() could have already
* removed the object from the cache (and set obj->cleanup)
*/
sconf->object_cnt--;
}
}
}
/* Cleanup the cache object */
#ifdef USE_ATOMICS
}
}
#else
}
/* If the object is marked for cleanup and the refcount
* has dropped to zero, cleanup the object
*/
}
}
#endif
return APR_SUCCESS;
}
{
if (!co) {
return APR_SUCCESS;
}
if (!co->cache_cache) {
return APR_SUCCESS;
}
}
while (obj) {
/* Iterate over the cache and clean up each entry */
/* Free the object if the recount == 0 */
#ifdef USE_ATOMICS
#else
#endif
}
}
/* Cache is empty, free the cache table */
}
return APR_SUCCESS;
}
/*
* TODO: enable directives to be overridden in various containers
*/
{
/* Number of objects in the cache */
sconf->object_cnt = 0;
/* Size of the cache in bytes */
sconf->cache_size = 0;
return sconf;
}
const char *type,
const char *key,
{
}
}
else {
return DECLINED;
}
if (len == -1) {
/* Caching a streaming response. Assume the response is
* less than or equal to max_streaming_buffer_size. We will
* correct all the cache size counters in write_body once
* we know exactly know how much we are caching.
*/
}
/* Note: cache_insert() will automatically garbage collect
* objects from the cache if the max_cache_size threshold is
* exceeded. This means mod_mem_cache does not need to implement
* max_cache_size checks.
*/
"mem_cache: URL %s failed the size check and will not be cached.",
key);
return DECLINED;
}
if (type_e == CACHE_TYPE_FILE) {
/* CACHE_TYPE_FILE is only valid for local content handled by the
* default handler. Need a better way to check if the file is
* local or not.
*/
if (!r->filename) {
return DECLINED;
}
}
/* Allocate and initialize cache_object_t */
if (!obj) {
return DECLINED;
}
return DECLINED;
}
/* Safe cast: We tested < sconf->max_cache_object_size above */
/* Allocate and init mem_cache_object_t */
if (!mobj) {
return DECLINED;
}
/* Finish initing the cache object */
#ifdef USE_ATOMICS
#else
#endif
/* Safe cast: We tested < sconf->max_cache_object_size above */
/* Place the cache_object_t into the hash table.
* Note: Perhaps we should wait to put the object in the
* hash table when the object is complete? I add the object here to
* avoid multiple threads attempting to cache the same content only
* to discover at the very end that only one of them will suceed.
* Furthermore, adding the cache object to the table at the end could
* open up a subtle but easy to exploit DoS hole: someone could request
* a very large file with multiple requests. Better to detect this here
* rather than after the cache object has been completely built and
* initialized...
* XXX Need a way to insert into the cache w/o such coarse grained locking
*/
}
if (!tmp_obj) {
sconf->object_cnt++;
/* Safe cast: Must fit in cache_size or alloc would have failed */
}
}
if (tmp_obj) {
/* This thread collided with another thread loading the same object
* into the cache at the same time. Defer to the other thread which
* is further along.
*/
return DECLINED;
}
/* Populate the cache handle */
h->read_headers = &read_headers;
h->write_body = &write_body;
h->write_headers = &write_headers;
h->remove_entity = &remove_entity;
return OK;
}
{
/* Look up entity keyed to 'url' */
return DECLINED;
}
}
if (obj) {
#ifdef USE_ATOMICS
#else
#endif
/* cache is worried about overall counts, not 'open' ones */
/* If this is a subrequest, register the cleanup against
* the main request. This will prevent the cache object
* from being cleaned up from under the request after the
* subrequest is destroyed.
*/
rtmp = r;
while (rtmp) {
}
}
else {
}
}
}
if (!obj) {
return DECLINED;
}
/* Initialize the cache_handle */
h->read_headers = &read_headers;
h->write_body = &write_body;
h->write_headers = &write_headers;
h->remove_entity = &remove_entity;
return OK;
}
static int remove_entity(cache_handle_t *h)
{
/* Remove the cache object from the cache under protection */
}
/* If the object is not already marked for cleanup, remove
* it from the cache and mark it for cleanup. Remember,
* an object marked for cleanup is by design not in the
* hash table.
*/
sconf->object_cnt--;
}
}
return OK;
}
{
apr_ssize_t i;
apr_size_t len = 0;
apr_size_t idx = 0;
char *buf;
if (*nelts == 0 ) {
return APR_SUCCESS;
}
return APR_ENOMEM;
}
}
/* Transfer the headers into a contiguous memory block */
if (!buf) {
return APR_ENOMEM;
}
for (i = 0; i < *nelts; ++i) {
}
return APR_SUCCESS;
}
int num_headers,
apr_table_t *t )
{
int i;
for (i = 0; i < num_headers; ++i) {
}
return APR_SUCCESS;
}
/* Define request processing hook handlers */
{
return DECLINED;
}
/* Order of the operations is important to avoid race conditions.
* First, remove the object from the cache. Remember, all additions
* deletions from the cache are protected by sconf->lock.
* Increment the ref count on the object to indicate our thread
* is accessing the object. Then set the cleanup flag on the
* object. Remember, the cleanup flag is NEVER set on an
* object in the hash table. If an object has the cleanup
* flag set, it is guaranteed to NOT be in the hash table.
*/
}
if (obj) {
sconf->object_cnt--;
#ifdef USE_ATOMICS
/* Refcount increment in this case MUST be made under
* protection of the lock
*/
#else
}
#endif
if (obj) {
}
}
}
#ifdef USE_ATOMICS
if (obj) {
}
}
#endif
return OK;
}
{
int rc;
h->req_hdrs);
r->headers_out);
r->err_headers_out);
r->subprocess_env);
r->notes);
/* Content-Type: header may not be set if content is local since
* CACHE_IN runs before header filters....
*/
return rc;
}
{
apr_bucket *b;
/* CACHE_TYPE_FILE */
}
else {
/* CACHE_TYPE_HEAP */
}
return APR_SUCCESS;
}
{
int rc;
/*
* The cache needs to keep track of the following information:
* - Date, LastMod, Version, ReqTime, RespTime, ContentLength
* - The original request headers (for Vary)
* - The original response headers (for returning with a cached response)
* - The body of the message
*/
&mobj->num_req_hdrs,
r->headers_in);
if (rc != APR_SUCCESS) {
return rc;
}
/* Precompute how much storage we need to hold the headers */
if (rc != APR_SUCCESS) {
return rc;
}
if (rc != APR_SUCCESS) {
return rc;
}
r->subprocess_env );
if (rc != APR_SUCCESS) {
return rc;
}
if (rc != APR_SUCCESS) {
return rc;
}
/* Init the info struct */
}
}
if (info->response_time) {
}
if (info->request_time) {
}
}
if (info->content_type) {
return APR_ENOMEM;
}
}
return APR_ENOMEM;
}
}
return APR_ENOMEM;
}
}
return APR_ENOMEM;
}
}
return APR_SUCCESS;
}
{
apr_bucket *e;
char *cur;
int eos = 0;
int fd = 0;
int other = 0;
/* We can cache an open file descriptor if:
* - the brigade contains one and only one file_bucket &&
* - the brigade is complete &&
* - the file_bucket is the last data bucket in the brigade
*/
for (e = APR_BRIGADE_FIRST(b);
e != APR_BRIGADE_SENTINEL(b);
e = APR_BUCKET_NEXT(e))
{
if (APR_BUCKET_IS_EOS(e)) {
eos = 1;
}
else if (APR_BUCKET_IS_FILE(e)) {
apr_bucket_file *a = e->data;
fd++;
}
else {
other++;
}
}
const char *name;
/* Open a new XTHREAD handle to the file */
APR_OS_DEFAULT, r->pool);
if (rv != APR_SUCCESS) {
return rv;
}
/* Open for business */
return APR_SUCCESS;
}
/* Content not suitable for fd caching. Cache in-memory instead. */
}
/*
* FD cacheing is not enabled or the content was not
* suitable for fd caching.
*/
return APR_ENOMEM;
}
}
/* Iterate accross the brigade and populate the cache storage */
for (e = APR_BRIGADE_FIRST(b);
e != APR_BRIGADE_SENTINEL(b);
e = APR_BUCKET_NEXT(e))
{
const char *s;
if (APR_BUCKET_IS_EOS(e)) {
/* Caching a streamed response. Reallocate a buffer of the
* correct size and copy the streamed response into that
* buffer */
if (!buf) {
return APR_ENOMEM;
}
/* Now comes the crufty part... there is no way to tell the
* cache that the size of the object has changed. We need
* to remove the object, update the size and re-add the
* object, all under protection of the lock.
*/
}
/* If obj->cleanup is set, the object has been prematurly
* ejected from the cache by the garbage collector. Add the
* object back to the cache. If an object with the same key is
* found in the cache, eject it in favor of the completed obj.
*/
if (tmp_obj) {
sconf->object_cnt--;
}
}
}
else {
}
}
}
/* Open for business */
break;
}
if (rv != APR_SUCCESS) {
return rv;
}
if (len) {
/* Check for buffer overflow */
return APR_ENOMEM;
}
else {
}
}
/* This should not fail, but if it does, we are in BIG trouble
* cause we just stomped all over the heap.
*/
}
return APR_SUCCESS;
}
/**
* Configuration and start-up
*/
{
int threaded_mpm;
/* Sanity check the cache configuration */
"MCacheMaxObjectSize must be greater than MCacheMinObjectSize");
return DONE;
}
"MCacheSize must be greater than MCacheMaxObjectSize");
return DONE;
}
/* Issue a notice only if something other than the default config
* is being used */
"MCacheMaxStreamingBuffer must be less than or equal to MCacheMaxObjectSize. "
"Resetting MCacheMaxStreamingBuffer to MCacheMaxObjectSize.");
}
}
"MCacheMaxStreamingBuffer must be greater than or equal to MCacheMinObjectSize. "
"Resetting MCacheMaxStreamingBuffer to MCacheMinObjectSize.");
}
if (threaded_mpm) {
}
if (sconf->cache_cache)
return OK;
return -1;
}
static const char
{
return "MCacheSize argument must be an integer representing the max cache size in KBytes.";
}
return NULL;
}
static const char
{
return "MCacheMinObjectSize value must be an integer (bytes)";
}
return NULL;
}
static const char
{
return "MCacheMaxObjectSize value must be an integer (bytes)";
}
return NULL;
}
static const char
{
return "MCacheMaxObjectCount value must be an integer";
}
return NULL;
}
static const char
{
}
else {
}
else {
return "currently implemented algorithms are LRU and GDSF";
}
}
return NULL;
}
const char *arg)
{
char *err;
if (*err != 0) {
return "MCacheMaxStreamingBuffer value must be a number";
}
return NULL;
}
static const command_rec cache_cmds[] =
{
"The maximum amount of memory used by the cache in KBytes"),
"The maximum number of objects allowed to be placed in the cache"),
"The minimum size (in bytes) of an object to be placed in the cache"),
"The maximum size (in bytes) of an object to be placed in the cache"),
"The algorithm used to remove entries from the cache (default: GDSF)"),
"Maximum number of bytes of content to buffer for a streamed response"),
{NULL}
};
static void register_hooks(apr_pool_t *p)
{
/* cache initializer */
/* cache_hook_init(cache_mem_init, NULL, NULL, APR_HOOK_MIDDLE); */
}
{
NULL, /* create per-directory config structure */
NULL, /* merge per-directory config structures */
create_cache_config, /* create per-server config structure */
NULL, /* merge per-server config structures */
cache_cmds, /* command apr_table_t */
};