mod_mem_cache.c revision 12901074f5d6b36d08be84d8637b6f2c21e0da26
/* ====================================================================
* The Apache Software License, Version 1.1
*
* Copyright (c) 2000-2002 The Apache Software Foundation. All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. The end-user documentation included with the redistribution,
* if any, must include the following acknowledgment:
* "This product includes software developed by the
* Apache Software Foundation (http://www.apache.org/)."
* Alternately, this acknowledgment may appear in the software itself,
* if and wherever such third-party acknowledgments normally appear.
*
* 4. The names "Apache" and "Apache Software Foundation" must
* not be used to endorse or promote products derived from this
* software without prior written permission. For written
* permission, please contact apache@apache.org.
*
* 5. Products derived from this software may not be called "Apache",
* nor may "Apache" appear in their name, without prior written
* permission of the Apache Software Foundation.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* ====================================================================
*
* This software consists of voluntary contributions made by many
* individuals on behalf of the Apache Software Foundation. For more
* information on the Apache Software Foundation, please see
* <http://www.apache.org/>.
*
* Portions of this software are based upon public domain software
* originally written at the National Center for Supercomputing Applications,
* University of Illinois, Urbana-Champaign.
*/
#define CORE_PRIVATE
#include "mod_cache.h"
#include "cache_hash.h"
#include "ap_mpm.h"
#include "apr_thread_mutex.h"
#if APR_HAVE_UNISTD_H
#include <unistd.h>
#endif
#if !APR_HAS_THREADS
#error This module does not currently compile unless you have a thread-capable APR. Sorry!
#endif
module AP_MODULE_DECLARE_DATA mem_cache_module;
typedef enum {
CACHE_TYPE_FILE = 1,
CACHE_TYPE_HEAP,
CACHE_TYPE_MMAP
} cache_type_e;
typedef struct {
char* hdr;
char* val;
} cache_header_tbl_t;
typedef struct mem_cache_object {
cache_type_e type;
apr_ssize_t num_header_out;
apr_ssize_t num_subprocess_env;
apr_ssize_t num_notes;
cache_header_tbl_t *header_out;
cache_header_tbl_t *subprocess_env;
cache_header_tbl_t *notes;
apr_size_t m_len;
void *m;
apr_os_file_t fd;
} mem_cache_object_t;
typedef struct {
apr_thread_mutex_t *lock;
cache_hash_t *cacheht;
apr_size_t cache_size;
apr_size_t object_cnt;
/* Fields set by config directives */
apr_size_t min_cache_object_size; /* in bytes */
apr_size_t max_cache_object_size; /* in bytes */
apr_size_t max_cache_size; /* in bytes */
apr_size_t max_object_cnt;
} mem_cache_conf;
static mem_cache_conf *sconf;
#define DEFAULT_MAX_CACHE_SIZE 100*1024
#define DEFAULT_MIN_CACHE_OBJECT_SIZE 0
#define DEFAULT_MAX_CACHE_OBJECT_SIZE 10000
#define DEFAULT_MAX_OBJECT_CNT 1000
#define CACHEFILE_LEN 20
/* Forward declarations */
static int remove_entity(cache_handle_t *h);
static apr_status_t write_headers(cache_handle_t *h, request_rec *r, cache_info *i);
static apr_status_t write_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b);
static apr_status_t read_headers(cache_handle_t *h, request_rec *r);
static apr_status_t read_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb);
static void cleanup_cache_object(cache_object_t *obj)
{
mem_cache_object_t *mobj = obj->vobj;
/* TODO:
* We desperately need a more efficient way of allocating objects. We're
* making way too many malloc calls to create a fully populated
* cache object...
*/
/* Cleanup the cache_object_t */
if (obj->key) {
free(obj->key);
}
if (obj->info.content_type) {
free(obj->info.content_type);
}
if (obj->info.etag) {
free(obj->info.etag);
}
if (obj->info.lastmods) {
free(obj->info.lastmods);
}
if (obj->info.filename) {
free(obj->info.filename);
}
free(obj);
/* Cleanup the mem_cache_object_t */
if (mobj) {
if (mobj->type == CACHE_TYPE_HEAP && mobj->m) {
free(mobj->m);
}
if (mobj->type == CACHE_TYPE_FILE && mobj->fd) {
#ifdef WIN32
CloseHandle(mobj->fd);
#else
close(mobj->fd);
#endif
}
if (mobj->header_out) {
if (mobj->header_out[0].hdr)
free(mobj->header_out[0].hdr);
free(mobj->header_out);
}
if (mobj->subprocess_env) {
if (mobj->subprocess_env[0].hdr)
free(mobj->subprocess_env[0].hdr);
free(mobj->subprocess_env);
}
if (mobj->notes) {
if (mobj->notes[0].hdr)
free(mobj->notes[0].hdr);
free(mobj->notes);
}
free(mobj);
}
return;
}
static apr_status_t decrement_refcount(void *arg)
{
cache_object_t *obj = (cache_object_t *) arg;
/* If obj->complete is not set, the cache update failed and the
* object needs to be removed from the cache then cleaned up.
*/
if (!obj->complete) {
mem_cache_object_t *mobj = (mem_cache_object_t *) obj->vobj;
if (sconf->lock) {
apr_thread_mutex_lock(sconf->lock);
}
/* Remember, objects marked for cleanup are, by design, already
* removed from the cache. remove_url() could have already
* removed the object from the cache (and set obj->cleanup)
*/
if (!obj->cleanup) {
cache_hash_set(sconf->cacheht, obj->key, strlen(obj->key), NULL);
sconf->object_cnt--;
sconf->cache_size -= mobj->m_len;
obj->cleanup = 1;
}
if (sconf->lock) {
apr_thread_mutex_unlock(sconf->lock);
}
}
/* Cleanup the cache object */
#ifdef USE_ATOMICS
if (!apr_atomic_dec(&obj->refcount)) {
if (obj->cleanup) {
cleanup_cache_object(obj);
}
}
#else
if (sconf->lock) {
apr_thread_mutex_lock(sconf->lock);
}
obj->refcount--;
/* If the object is marked for cleanup and the refcount
* has dropped to zero, cleanup the object
*/
if ((obj->cleanup) && (!obj->refcount)) {
cleanup_cache_object(obj);
}
if (sconf->lock) {
apr_thread_mutex_unlock(sconf->lock);
}
#endif
return APR_SUCCESS;
}
static apr_status_t cleanup_cache_mem(void *sconfv)
{
cache_object_t *obj;
cache_hash_index_t *hi;
mem_cache_conf *co = (mem_cache_conf*) sconfv;
if (!co) {
return APR_SUCCESS;
}
if (sconf->lock) {
apr_thread_mutex_lock(sconf->lock);
}
/* Iterate over the cache and clean up each entry */
while ((hi = cache_hash_first(co->cacheht)) != NULL) {
/* Fetch the object from the cache */
cache_hash_this(hi, NULL, NULL, (void **)&obj);
if (obj) {
/* Remove the object from the cache */
cache_hash_set(sconf->cacheht, obj->key, strlen(obj->key), NULL);
/* Free the object if the recount == 0 */
#ifdef USE_ATOMICS
apr_atomic_inc(&obj->refcount);
obj->cleanup = 1;
if (!apr_atomic_dec(&obj->refcount)) {
#else
obj->cleanup = 1;
if (!obj->refcount) {
#endif
cleanup_cache_object(obj);
}
}
}
if (sconf->lock) {
apr_thread_mutex_unlock(sconf->lock);
}
return APR_SUCCESS;
}
/*
* TODO: enable directives to be overridden in various containers
*/
static void *create_cache_config(apr_pool_t *p, server_rec *s)
{
int threaded_mpm;
sconf = apr_pcalloc(p, sizeof(mem_cache_conf));
ap_mpm_query(AP_MPMQ_IS_THREADED, &threaded_mpm);
if (threaded_mpm) {
apr_thread_mutex_create(&sconf->lock, APR_THREAD_MUTEX_DEFAULT, p);
}
/* Todo: determine hash table size from max_cache_object_cnt */
sconf->cacheht = cache_hash_make(512);
sconf->min_cache_object_size = DEFAULT_MIN_CACHE_OBJECT_SIZE;
sconf->max_cache_object_size = DEFAULT_MAX_CACHE_OBJECT_SIZE;
/* Number of objects in the cache */
sconf->max_object_cnt = DEFAULT_MAX_OBJECT_CNT;
sconf->object_cnt = 0;
/* Size of the cache in bytes */
sconf->max_cache_size = DEFAULT_MAX_CACHE_SIZE;
sconf->cache_size = 0;
apr_pool_cleanup_register(p, sconf, cleanup_cache_mem, apr_pool_cleanup_null);
return sconf;
}
static int create_entity(cache_handle_t *h, request_rec *r,
const char *type,
const char *key,
apr_size_t len)
{
cache_object_t *obj, *tmp_obj;
mem_cache_object_t *mobj;
cache_type_e type_e;
if (!strcasecmp(type, "mem")) {
type_e = CACHE_TYPE_HEAP;
}
else if (!strcasecmp(type, "fd")) {
type_e = CACHE_TYPE_FILE;
}
else {
return DECLINED;
}
/*
* TODO: Get smarter about managing the cache size. If the cache is
* full, we need to garbage collect stale/infrequently referenced
* objects.
*/
if (sconf->object_cnt >= sconf->max_object_cnt) {
return DECLINED;
}
if (type_e == CACHE_TYPE_HEAP) {
/* We can safely ignore these measures when caching open fds */
if (len < sconf->min_cache_object_size ||
len > sconf->max_cache_object_size) {
return DECLINED;
}
if ((sconf->cache_size + len) > sconf->max_cache_size) {
return DECLINED;
}
} else {
/* CACHE_TYPE_FILE is only valid for local content
* handled by the default handler?
* This is not the right check...
*/
if (!r->filename) {
return DECLINED;
}
}
/* Allocate and initialize cache_object_t */
obj = calloc(1, sizeof(*obj));
if (!obj) {
return DECLINED;
}
obj->key = calloc(1, strlen(key) + 1);
if (!obj->key) {
cleanup_cache_object(obj);
return DECLINED;
}
strncpy(obj->key, key, strlen(key) + 1);
obj->info.len = len;
/* Allocate and init mem_cache_object_t */
mobj = calloc(1, sizeof(*mobj));
if (!mobj) {
cleanup_cache_object(obj);
return DECLINED;
}
/* Finish initing the cache object */
obj->refcount = 1;
obj->complete = 0;
obj->cleanup = 0;
obj->vobj = mobj;
mobj->m_len = len;
mobj->type = type_e;
/* Place the cache_object_t into the hash table.
* Note: Perhaps we should wait to put the object in the
* hash table when the object is complete? I add the object here to
* avoid multiple threads attempting to cache the same content only
* to discover at the very end that only one of them will suceed.
* Furthermore, adding the cache object to the table at the end could
* open up a subtle but easy to exploit DoS hole: someone could request
* a very large file with multiple requests. Better to detect this here
* rather than after the cache object has been completely built and
* initialized...
* XXX Need a way to insert into the cache w/o such coarse grained locking
*/
if (sconf->lock) {
apr_thread_mutex_lock(sconf->lock);
}
tmp_obj = (cache_object_t *) cache_hash_get(sconf->cacheht,
key,
CACHE_HASH_KEY_STRING);
if (!tmp_obj) {
cache_hash_set(sconf->cacheht, obj->key, strlen(obj->key), obj);
sconf->object_cnt++;
sconf->cache_size += len;
}
if (sconf->lock) {
apr_thread_mutex_unlock(sconf->lock);
}
if (tmp_obj) {
/* This thread collided with another thread loading the same object
* into the cache at the same time. Defer to the other thread which
* is further along.
*/
cleanup_cache_object(obj);
return DECLINED;
}
apr_pool_cleanup_register(r->pool, obj, decrement_refcount,
apr_pool_cleanup_null);
/* Populate the cache handle */
h->cache_obj = obj;
h->read_body = &read_body;
h->read_headers = &read_headers;
h->write_body = &write_body;
h->write_headers = &write_headers;
h->remove_entity = &remove_entity;
return OK;
}
static int open_entity(cache_handle_t *h, request_rec *r, const char *type, const char *key)
{
cache_object_t *obj;
/* Look up entity keyed to 'url' */
if (strcasecmp(type, "mem") && strcasecmp(type, "fd")) {
return DECLINED;
}
if (sconf->lock) {
apr_thread_mutex_lock(sconf->lock);
}
obj = (cache_object_t *) cache_hash_get(sconf->cacheht, key,
CACHE_HASH_KEY_STRING);
if (obj) {
if (obj->complete) {
request_rec *rmain=r, *rtmp;
#ifdef USE_ATOMICS
apr_atomic_inc(&obj->refcount);
#else
obj->refcount++;
#endif
/* If this is a subrequest, register the cleanup against
* the main request. This will prevent the cache object
* from being cleaned up from under the request after the
* subrequest is destroyed.
*/
rtmp = r;
while (rtmp) {
rmain = rtmp;
rtmp = rmain->main;
}
apr_pool_cleanup_register(rmain->pool, obj, decrement_refcount,
apr_pool_cleanup_null);
}
else {
obj = NULL;
}
}
if (sconf->lock) {
apr_thread_mutex_unlock(sconf->lock);
}
if (!obj) {
return DECLINED;
}
/* Initialize the cache_handle */
h->read_body = &read_body;
h->read_headers = &read_headers;
h->write_body = &write_body;
h->write_headers = &write_headers;
h->remove_entity = &remove_entity;
h->cache_obj = obj;
return OK;
}
static int remove_entity(cache_handle_t *h)
{
cache_object_t *obj = h->cache_obj;
/* Remove the cache object from the cache under protection */
if (sconf->lock) {
apr_thread_mutex_lock(sconf->lock);
}
/* If the object is not already marked for cleanup, remove
* it from the cache and mark it for cleanup. Remember,
* an object marked for cleanup is by design not in the
* hash table.
*/
if (!obj->cleanup) {
mem_cache_object_t *mobj = (mem_cache_object_t *) obj->vobj;
cache_hash_set(sconf->cacheht, obj->key, strlen(obj->key), NULL);
sconf->object_cnt--;
sconf->cache_size -= mobj->m_len;
obj->cleanup = 1;
ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL, "gcing a cache entry");
}
if (sconf->lock) {
apr_thread_mutex_unlock(sconf->lock);
}
return OK;
}
static apr_status_t serialize_table(cache_header_tbl_t **obj,
apr_ssize_t *nelts,
apr_table_t *table)
{
apr_table_entry_t *elts = (apr_table_entry_t *) table->a.elts;
apr_ssize_t i;
apr_size_t len = 0;
apr_size_t idx = 0;
char *buf;
*nelts = table->a.nelts;
if (*nelts == 0 ) {
*obj=NULL;
return APR_SUCCESS;
}
*obj = calloc(1, sizeof(cache_header_tbl_t) * table->a.nelts);
if (NULL == *obj) {
return APR_ENOMEM;
}
for (i = 0; i < table->a.nelts; ++i) {
len += strlen(elts[i].key);
len += strlen(elts[i].val);
len += 2; /* Extra space for NULL string terminator for key and val */
}
/* Transfer the headers into a contiguous memory block */
buf = calloc(1, len);
if (!buf) {
*obj = NULL;
return APR_ENOMEM;
}
for (i = 0; i < *nelts; ++i) {
(*obj)[i].hdr = &buf[idx];
len = strlen(elts[i].key) + 1; /* Include NULL terminator */
strncpy(&buf[idx], elts[i].key, len);
idx+=len;
(*obj)[i].val = &buf[idx];
len = strlen(elts[i].val) + 1;
strncpy(&buf[idx], elts[i].val, len);
idx+=len;
}
return APR_SUCCESS;
}
static int unserialize_table( cache_header_tbl_t *ctbl,
int num_headers,
apr_table_t *t )
{
int i;
for (i = 0; i < num_headers; ++i) {
apr_table_setn(t, ctbl[i].hdr, ctbl[i].val);
}
return APR_SUCCESS;
}
/* Define request processing hook handlers */
static int remove_url(const char *type, const char *key)
{
cache_object_t *obj;
if (strcasecmp(type, "mem") && strcasecmp(type, "fd")) {
return DECLINED;
}
/* Order of the operations is important to avoid race conditions.
* First, remove the object from the cache. Remember, all additions
* deletions from the cache are protected by sconf->lock.
* Increment the ref count on the object to indicate our thread
* is accessing the object. Then set the cleanup flag on the
* object. Remember, the cleanup flag is NEVER set on an
* object in the hash table. If an object has the cleanup
* flag set, it is guaranteed to NOT be in the hash table.
*/
if (sconf->lock) {
apr_thread_mutex_lock(sconf->lock);
}
/* This call will return a pointer to the cache_object just removed */
obj = cache_hash_set(sconf->cacheht, key, CACHE_HASH_KEY_STRING, NULL);
if (obj) {
/* obj has been removed from the cache */
mem_cache_object_t *mobj = (mem_cache_object_t *) obj->vobj;
sconf->object_cnt--;
sconf->cache_size -= mobj->m_len;
#ifdef USE_ATOMICS
/* Refcount increment in this case MUST be made under
* protection of the lock
*/
apr_atomic_inc(&obj->refcount);
#else
if (!obj->refcount) {
cleanup_cache_object(obj);
obj = NULL;
}
#endif
if (obj) {
obj->cleanup = 1;
}
}
if (sconf->lock) {
apr_thread_mutex_unlock(sconf->lock);
}
#ifdef USE_ATOMICS
if (obj) {
if (!apr_atomic_dec(&obj->refcount)) {
cleanup_cache_object(obj);
}
}
#endif
return OK;
}
static apr_status_t read_headers(cache_handle_t *h, request_rec *r)
{
int rc;
mem_cache_object_t *mobj = (mem_cache_object_t*) h->cache_obj->vobj;
r->headers_out = apr_table_make(r->pool,mobj->num_header_out);
r->subprocess_env = apr_table_make(r->pool, mobj->num_subprocess_env);
r->notes = apr_table_make(r->pool, mobj->num_notes);
rc = unserialize_table( mobj->header_out,
mobj->num_header_out,
r->headers_out);
rc = unserialize_table( mobj->subprocess_env,
mobj->num_subprocess_env,
r->subprocess_env);
rc = unserialize_table( mobj->notes,
mobj->num_notes,
r->notes);
return rc;
}
static apr_status_t read_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_brigade *bb)
{
apr_bucket *b;
mem_cache_object_t *mobj = (mem_cache_object_t*) h->cache_obj->vobj;
if (mobj->type == CACHE_TYPE_FILE) {
/* CACHE_TYPE_FILE */
apr_file_t *file;
apr_os_file_put(&file, &mobj->fd, APR_READ|APR_XTHREAD, p);
b = apr_bucket_file_create(file, 0, mobj->m_len, p, bb->bucket_alloc);
}
else {
/* CACHE_TYPE_HEAP */
b = apr_bucket_immortal_create(mobj->m, mobj->m_len, bb->bucket_alloc);
}
APR_BRIGADE_INSERT_TAIL(bb, b);
b = apr_bucket_eos_create(bb->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, b);
return APR_SUCCESS;
}
static apr_status_t write_headers(cache_handle_t *h, request_rec *r, cache_info *info)
{
cache_object_t *obj = h->cache_obj;
mem_cache_object_t *mobj = (mem_cache_object_t*) obj->vobj;
int rc;
/* Precompute how much storage we need to hold the headers */
rc = serialize_table(&mobj->header_out,
&mobj->num_header_out,
r->headers_out);
if (rc != APR_SUCCESS) {
return rc;
}
rc = serialize_table(&mobj->subprocess_env,
&mobj->num_subprocess_env,
r->subprocess_env );
if (rc != APR_SUCCESS) {
return rc;
}
rc = serialize_table(&mobj->notes, &mobj->num_notes, r->notes);
if (rc != APR_SUCCESS) {
return rc;
}
/* Init the info struct */
if (info->date) {
obj->info.date = info->date;
}
if (info->lastmod) {
obj->info.lastmod = info->lastmod;
}
if (info->expire) {
obj->info.expire = info->expire;
}
if (info->content_type) {
obj->info.content_type = (char*) calloc(1, strlen(info->content_type) + 1);
if (!obj->info.content_type) {
return APR_ENOMEM;
}
strcpy(obj->info.content_type, info->content_type);
}
if ( info->filename) {
obj->info.filename = (char*) calloc(1, strlen(info->filename) + 1);
if (!obj->info.filename ) {
return APR_ENOMEM;
}
strcpy(obj->info.filename, info->filename );
}
return APR_SUCCESS;
}
static apr_status_t write_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b)
{
apr_status_t rv;
cache_object_t *obj = h->cache_obj;
mem_cache_object_t *mobj = (mem_cache_object_t*) obj->vobj;
apr_read_type_e eblock = APR_BLOCK_READ;
apr_bucket *e;
char *cur;
int eos = 0;
if (mobj->type == CACHE_TYPE_FILE) {
apr_file_t *file = NULL;
int fd = 0;
int other = 0;
/* We can cache an open file descriptor if:
* - the brigade contains one and only one file_bucket &&
* - the brigade is complete &&
* - the file_bucket is the last data bucket in the brigade
*/
APR_BRIGADE_FOREACH(e, b) {
if (APR_BUCKET_IS_EOS(e)) {
eos = 1;
}
else if (APR_BUCKET_IS_FILE(e)) {
apr_bucket_file *a = e->data;
fd++;
file = a->fd;
}
else {
other++;
}
}
if (fd == 1 && !other && eos) {
apr_file_t *tmpfile;
const char *name;
/* Open a new XTHREAD handle to the file */
apr_file_name_get(&name, file);
rv = apr_file_open(&tmpfile, name,
APR_READ | APR_BINARY | APR_XTHREAD | APR_FILE_NOCLEANUP,
APR_OS_DEFAULT, r->pool);
if (rv != APR_SUCCESS) {
return rv;
}
apr_file_unset_inherit(tmpfile);
apr_os_file_get(&(mobj->fd), tmpfile);
/* Open for business */
obj->complete = 1;
return APR_SUCCESS;
}
/* Content not suitable for fd caching. Cache in-memory instead. */
mobj->type = CACHE_TYPE_HEAP;
/* Check to make sure the object will not exceed configured thresholds */
if (mobj->m_len < sconf->min_cache_object_size ||
mobj->m_len > sconf->max_cache_object_size) {
return APR_ENOMEM; /* ?? DECLINED; */
}
if ((sconf->cache_size + mobj->m_len) > sconf->max_cache_size) {
return APR_ENOMEM; /* ?? DECLINED; */
}
sconf->cache_size += mobj->m_len;
}
/*
* FD cacheing is not enabled or the content was not
* suitable for fd caching.
*/
if (mobj->m == NULL) {
mobj->m = malloc(mobj->m_len);
if (mobj->m == NULL) {
return APR_ENOMEM;
}
obj->count = 0;
}
cur = (char*) mobj->m + obj->count;
/* Iterate accross the brigade and populate the cache storage */
APR_BRIGADE_FOREACH(e, b) {
const char *s;
apr_size_t len;
if (APR_BUCKET_IS_EOS(e)) {
/* Open for business */
obj->complete = 1;
break;
}
rv = apr_bucket_read(e, &s, &len, eblock);
if (rv != APR_SUCCESS) {
return rv;
}
if (len) {
/* Check for buffer overflow */
if ((obj->count + len) > mobj->m_len) {
return APR_ENOMEM;
}
else {
memcpy(cur, s, len);
cur+=len;
obj->count+=len;
}
}
/* This should not happen, but if it does, we are in BIG trouble
* cause we just stomped all over the heap.
*/
AP_DEBUG_ASSERT(obj->count > mobj->m_len);
}
return APR_SUCCESS;
}
static const char
*set_max_cache_size(cmd_parms *parms, void *in_struct_ptr, const char *arg)
{
apr_size_t val;
if (sscanf(arg, "%d", &val) != 1) {
return "MCacheSize argument must be an integer representing the max cache size in KBytes.";
}
sconf->max_cache_size = val*1024;
return NULL;
}
static const char
*set_min_cache_object_size(cmd_parms *parms, void *in_struct_ptr, const char *arg)
{
apr_size_t val;
if (sscanf(arg, "%d", &val) != 1) {
return "MCacheMinObjectSize value must be an integer (bytes)";
}
sconf->min_cache_object_size = val;
return NULL;
}
static const char
*set_max_cache_object_size(cmd_parms *parms, void *in_struct_ptr, const char *arg)
{
apr_size_t val;
if (sscanf(arg, "%d", &val) != 1) {
return "MCacheMaxObjectSize value must be an integer (bytes)";
}
sconf->max_cache_object_size = val;
return NULL;
}
static const char
*set_max_object_count(cmd_parms *parms, void *in_struct_ptr, const char *arg)
{
apr_size_t val;
if (sscanf(arg, "%d", &val) != 1) {
return "MCacheMaxObjectCount value must be an integer";
}
sconf->max_object_cnt = val;
return NULL;
}
static const command_rec cache_cmds[] =
{
AP_INIT_TAKE1("MCacheSize", set_max_cache_size, NULL, RSRC_CONF,
"The maximum amount of memory used by the cache in KBytes"),
AP_INIT_TAKE1("MCacheMaxObjectCount", set_max_object_count, NULL, RSRC_CONF,
"The maximum number of objects allowed to be placed in the cache"),
AP_INIT_TAKE1("MCacheMinObjectSize", set_min_cache_object_size, NULL, RSRC_CONF,
"The minimum size (in bytes) of an object to be placed in the cache"),
AP_INIT_TAKE1("MCacheMaxObjectSize", set_max_cache_object_size, NULL, RSRC_CONF,
"The maximum size (in bytes) of an object to be placed in the cache"),
{NULL}
};
static void register_hooks(apr_pool_t *p)
{
/* cache initializer */
/* cache_hook_cache_init(cache_init, NULL, NULL, AP_HOOK_FIRST); */
cache_hook_create_entity(create_entity, NULL, NULL, APR_HOOK_MIDDLE);
cache_hook_open_entity(open_entity, NULL, NULL, APR_HOOK_MIDDLE);
cache_hook_remove_url(remove_url, NULL, NULL, APR_HOOK_MIDDLE);
}
module AP_MODULE_DECLARE_DATA mem_cache_module =
{
STANDARD20_MODULE_STUFF,
NULL, /* create per-directory config structure */
NULL, /* merge per-directory config structures */
create_cache_config, /* create per-server config structure */
NULL, /* merge per-server config structures */
cache_cmds, /* command apr_table_t */
register_hooks
};