ssl_scache_shmcb.c revision d86ef5503dcbc38e87c0e03cd3e1f16458cb6323
/* _ _
** _ __ ___ ___ __| | ___ ___| | mod_ssl
** | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL
** | | | | | | (_) | (_| | \__ \__ \ | www.modssl.org
** |_| |_| |_|\___/ \__,_|___|___/___/_| ftp.modssl.org
** |_____|
** Session Cache via Shared Memory (Cyclic Buffer Variant)
*/
/* ====================================================================
* The Apache Software License, Version 1.1
*
* Copyright (c) 2000-2001 The Apache Software Foundation. All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* distribution.
*
* 3. The end-user documentation included with the redistribution,
* if any, must include the following acknowledgment:
* "This product includes software developed by the
* Apache Software Foundation (http://www.apache.org/)."
* Alternately, this acknowledgment may appear in the software itself,
* if and wherever such third-party acknowledgments normally appear.
*
* 4. The names "Apache" and "Apache Software Foundation" must
* not be used to endorse or promote products derived from this
* software without prior written permission. For written
* permission, please contact apache@apache.org.
*
* 5. Products derived from this software may not be called "Apache",
* nor may "Apache" appear in their name, without prior written
* permission of the Apache Software Foundation.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE APACHE SOFTWARE FOUNDATION OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* ====================================================================
*/
#include "mod_ssl.h"
/*
* This shared memory based SSL session cache implementation was
* originally written by Geoff Thorpe <geoff@eu.c2.net> for C2Net Europe
* and as a contribution to Ralf Engelschall's mod_ssl project.
*/
/*
* The shared-memory segment header can be cast to and from the
* SHMCBHeader type, all other structures need to be initialised by
* utility functions.
*
* The "header" looks like this;
*
* data applying to the overall structure:
* - division_offset (unsigned int):
* how far into the shared memory segment the first division is.
* - division_size (unsigned int):
* how many bytes each division occupies.
* (NB: This includes the queue and the cache)
* - division_mask (unsigned char):
* the "mask" in the next line. Add one to this,
* and that's the number of divisions.
*
* data applying to within each division:
* - queue_size (unsigned int):
* how big each "queue" is. NB: The queue is the first block in each
* division and is followed immediately by the cache itself so so
* there's no cache_offset value.
*
* data applying to within each queue:
* - index_num (unsigned char):
* how many indexes in each cache's queue
* - index_offset (unsigned char):
* how far into the queue the first index is.
* - index_size:
* how big each index is.
*
* data applying to within each cache:
* - cache_data_offset (unsigned int):
* how far into the cache the session-data array is stored.
* - cache_data_size (unsigned int):
* how big each cache's data block is.
*
* statistics data (this will eventually be per-division but right now
* there's only one mutex):
* - stores (unsigned long):
* how many stores have been performed in the cache.
* - expiries (unsigned long):
* how many session have been expired from the cache.
* - scrolled (unsigned long):
* how many sessions have been scrolled out of full cache during a
* "store" operation. This is different to the "removes" stats as
* cache logistics. (NB: Also, this value should be deducible from
* the others if my code has no bugs, but I count it anyway - plus
* it helps debugging :-).
* - retrieves_hit (unsigned long):
* how many session-retrieves have succeeded.
* - retrieves_miss (unsigned long):
* how many session-retrieves have failed.
* - removes_hit (unsigned long):
* - removes_miss (unsigned long):
*
* Following immediately after the header is an array of "divisions".
* Each division is simply a "queue" immediately followed by its
* corresponding "cache". Each division handles some pre-defined band
* of sessions by using the "division_mask" in the header. Eg. if
* division_mask=0x1f then there are 32 divisions, the first of which
* will store sessions whose least-significant 5 bits are 0, the second
* stores session whose LS 5 bits equal 1, etc. A queue is an indexing
* structure referring to its corresponding cache.
*
* A "queue" looks like this;
*
* - first_pos (unsigned int):
* the location within the array of indexes where the virtual
* "left-hand-edge" of the cyclic buffer is.
* - pos_count (unsigned int):
* the number of indexes occupied from first_pos onwards.
*
* ...followed by an array of indexes, each of which can be
* memcpy'd to and from an SHMCBIndex, and look like this;
*
* - expires (time_t):
* the time() value at which this session expires.
* - offset (unsigned int):
* the offset within the cache data block where the corresponding
* session is stored.
* - s_id2 (unsigned char):
* the second byte of the session_id, stored as an optimisation to
* reduce the number of d2i_SSL_SESSION calls that are made when doing
* a lookup.
* - removed (unsigned char):
* a byte used to indicate whether a session has been "passively"
* removed. Ie. it is still in the cache but is to be disregarded by
* any "retrieve" operation.
*
* A "cache" looks like this;
*
* - first_pos (unsigned int):
* the location within the data block where the virtual
* "left-hand-edge" of the cyclic buffer is.
* - pos_count (unsigned int):
* the number of bytes used in the data block from first_pos onwards.
*
* ...followed by the data block in which actual DER-encoded SSL
* sessions are stored.
*/
/*
* Header - can be memcpy'd to and from the front of the shared
* memory segment. NB: The first copy (commented out) has the
* elements in a meaningful order, but due to data-alignment
* braindeadness, the second (uncommented) copy has the types grouped
* so as to decrease "struct-bloat". sigh.
*/
typedef struct {
#if 0
unsigned char division_mask;
unsigned int division_offset;
unsigned int division_size;
unsigned int queue_size;
unsigned char index_num;
unsigned char index_offset;
unsigned char index_size;
unsigned int cache_data_offset;
unsigned int cache_data_size;
unsigned long num_stores;
unsigned long num_expiries;
unsigned long num_scrolled;
unsigned long num_retrieves_hit;
unsigned long num_retrieves_miss;
unsigned long num_removes_hit;
unsigned long num_removes_miss;
#else
unsigned long num_stores;
unsigned long num_expiries;
unsigned long num_scrolled;
unsigned long num_retrieves_hit;
unsigned long num_retrieves_miss;
unsigned long num_removes_hit;
unsigned long num_removes_miss;
unsigned int division_offset;
unsigned int division_size;
unsigned int queue_size;
unsigned int cache_data_offset;
unsigned int cache_data_size;
unsigned char division_mask;
unsigned char index_num;
unsigned char index_offset;
unsigned char index_size;
#endif
} SHMCBHeader;
/*
* Index - can be memcpy'd to and from an index inside each
* queue's index array.
*/
typedef struct {
unsigned int offset;
unsigned char s_id2;
unsigned char removed;
} SHMCBIndex;
/*
* Queue - must be populated by a call to shmcb_get_division
* and the structure's pointers are used for updating (ie.
* the structure doesn't need any "set" to update values).
*/
typedef struct {
unsigned int *first_pos;
unsigned int *pos_count;
} SHMCBQueue;
/*
* Cache - same comment as for Queue. 'Queue's are in a 1-1
* correspondance with 'Cache's and are usually carried round
* in a pair, they are only seperated for clarity.
*/
typedef struct {
unsigned int *first_pos;
unsigned int *pos_count;
unsigned char *data;
} SHMCBCache;
/*
* Forward function prototypes.
*/
/* Functions for working around data-alignment-picky systems (sparcs,
Irix, etc). These use "memcpy" as a way of foxing these systems into
treating the composite types as byte-arrays rather than higher-level
primitives that it prefers to have 4-(or 8-)byte aligned. I don't
envisage this being a performance issue as a couple of 2 or 4 byte
memcpys can hardly make a dent on the massive memmove operations this
static unsigned int shmcb_get_safe_uint(unsigned int *);
static void shmcb_set_safe_uint(unsigned int *, unsigned int);
#if 0 /* Unused so far */
static unsigned long shmcb_get_safe_ulong(unsigned long *);
static void shmcb_set_safe_ulong(unsigned long *, unsigned long);
#endif
/* Underlying functions for session-caching */
/* Utility functions for manipulating the structures */
static void shmcb_get_header(void *, SHMCBHeader **);
static BOOL shmcb_insert_encoded_session(server_rec *, SHMCBQueue *, SHMCBCache *, unsigned char *, unsigned int, unsigned char *, time_t);
static SSL_SESSION *shmcb_lookup_session_id(server_rec *, SHMCBQueue *, SHMCBCache *, UCHAR *, int);
/*
* Data-alignment functions (a.k.a. avoidance tactics)
*
* NB: On HPUX (and possibly others) there is a *very* mischievous little
* "optimisation" in the compilers where it will convert the following;
* memcpy(dest_ptr, &source, sizeof(unsigned int));
* (where dest_ptr is of type (unsigned int *) and source is (unsigned int))
* into;
* *dest_ptr = source; (or *dest_ptr = *(&source), not sure).
* Either way, it completely destroys the whole point of these _safe_
* functions, because the assignment operation will fall victim to the
* architecture's byte-alignment dictations, whereas the memcpy (as a
* byte-by-byte copy) should not. sigh. So, if you're wondering about the
* apparently unnecessary conversions to (unsigned char *) in these
* functions, you now have an explanation. Don't just revert them back and
* say "ooh look, it still works" - if you try it on HPUX (well, 32-bit
* HPUX 11.00 at least) you may find it fails with a SIGBUS. :-(
*/
static unsigned int shmcb_get_safe_uint(unsigned int *ptr)
{
unsigned char *from;
unsigned int ret;
return ret;
}
{
}
#if 0 /* Unused so far */
static unsigned long shmcb_get_safe_ulong(unsigned long *ptr)
{
unsigned char *from;
unsigned long ret;
return ret;
}
{
}
#endif
{
unsigned char *from;
return ret;
}
{
}
/*
**
** High-Level "handlers" as per ssl_scache.c
**
*/
{
}
{
void *shm_segment = NULL;
int avail, avail_orig;
/*
* Create shared memory segment
*/
ssl_die();
}
ssl_log(s, SSL_LOG_ERROR,
"Cannot allocate shared memory: %s", ap_mm_error());
ssl_die();
}
/*
* Make sure the child processes have access to the underlying files
*/
/*
* Create cache inside the shared memory segment
*/
avail);
/*
* For some reason to do with MM's internal management, I can't
* allocate the full amount. Implement a reasonable form of trial
* and error and output trace information.
*/
if (shm_segment == NULL) {
ssl_log(s, SSL_LOG_TRACE,
"shmcb_malloc attempt for %u bytes failed", avail);
avail -= 2;
}
}
if (shm_segment == NULL) {
ssl_log(s, SSL_LOG_ERROR,
"Cannot allocate memory for the 'shmcb' session cache\n");
ssl_die();
}
"memory", avail);
ssl_log(s, SSL_LOG_ERROR,
"Failure initialising 'shmcb' shared memory");
ssl_die();
}
/*
* Success ... we hack the memory block into place by cheating for
* now and stealing a member variable the original shared memory
* cache was using. :-)
*/
return;
}
void ssl_scache_shmcb_kill(server_rec *s)
{
}
return;
}
{
void *shm_segment;
/* We've kludged our pointer into the other cache's member variable. */
ssl_mutex_on(s);
/* in this cache engine, "stores" should never fail. */
"session in the cache.");
else {
}
ssl_mutex_off(s);
return to_return;
}
{
void *shm_segment;
/* We've kludged our pointer into the other cache's member variable. */
ssl_mutex_on(s);
ssl_mutex_off(s);
if (pSession)
else {
"we have no such session.");
}
return pSession;
}
{
void *shm_segment;
/* We've kludged our pointer into the other cache's member variable. */
}
void ssl_scache_shmcb_expire(server_rec *s)
{
/* NOP */
return;
}
{
void *shm_segment;
double expiry_total;
/* We've kludged our pointer into the other cache's member variable. */
/* Get the header structure. */
expiry_total = 0;
/* It may seem strange to grab "now" at this point, but in theory
* we should never have a negative threshold but grabbing "now" after
* the loop (which performs expiries) could allow that chance. */
expiry_total += (double) idxexpiry;
if (min_expiry == 0)
else
}
}
}
"bytes, current sessions: <b>%d</b><br>",
if (non_empty_divisions != 0) {
if (now < average_expiry)
else
"<br>"), arg);
}
return;
}
/*
**
** Memory manipulation and low-level cache operations
**
*/
static BOOL shmcb_init_memory(
server_rec *s, void *shm_mem,
unsigned int shm_mem_size)
{
/* Calculate some sizes... */
temp = sizeof(SHMCBHeader);
/* If the segment is ridiculously too small, bail out */
return FALSE;
}
/* Make temp the amount of memory without the header */
/* Work on the basis that you need 10 bytes index for each session
* (approx 150 bytes), which is to divide temp by 160 - and then
* make sure we err on having too index space to burn even when
* the cache is full, which is a lot less stupid than having
* having not enough index space to utilise the whole cache!. */
temp /= 120;
shm_mem_size, temp);
/* We should divide these indexes evenly amongst the queues. Try
* to get it so that there are roughly half the number of divisions
* as there are indexes in each division. */
granularity = 256;
granularity /= 2;
/* So we have 'granularity' divisions, set 'temp' equal to the
* number of indexes in each division. */
temp /= granularity;
/* Too small? Bail ... */
if (temp < 5) {
return FALSE;
}
/* OK, we're sorted - from here on in, the return should be TRUE */
/* Now calculate the space for each division */
/* Calculate the space left in each division for the cache */
/* Output trace info */
/* The header is done, make the caches empty */
}
return TRUE;
}
static BOOL shmcb_store_session(
{
unsigned char masked_index;
unsigned char encoded[SSL_SESSION_MAX_DER];
unsigned char *ptr_encoded;
unsigned int len_encoded;
/* Get the header structure, which division this session will fall into etc. */
return FALSE;
}
/* Serialise the session, work out how much we're dealing
* with. NB: This check could be removed if we're not paranoid
* or we find some assurance that it will never be necessary. */
if (len_encoded > SSL_SESSION_MAX_DER) {
return FALSE;
}
expiry_time)) {
return FALSE;
}
header->num_stores++;
return TRUE;
}
static SSL_SESSION *shmcb_retrieve_session(
server_rec *s, void *shm_segment,
{
unsigned char masked_index;
if (idlen < 2) {
"(%u bytes)", idlen);
return FALSE;
}
/* Get the header structure, which division this session lookup
* will come from etc. */
return FALSE;
}
/* Get the session corresponding to the session_id or NULL if it
* doesn't exist (or is flagged as "removed"). */
if (pSession)
else
return pSession;
}
static BOOL shmcb_remove_session(
server_rec *s, void *shm_segment,
{
unsigned char masked_index;
return FALSE;
}
/* Get the header structure, which division this session remove
* will happen in etc. */
id[0], masked_index);
return FALSE;
}
if (res)
else
return res;
}
/*
**
** Weirdo cyclic buffer functions
**
*/
/* This gets used in the cyclic "index array" (in the 'Queue's) and
* in the cyclic 'Cache's too ... you provide the "width" of the
* cyclic store, the starting position and how far to move (with
* wrapping if necessary). Basically it's addition modulo buf_size. */
static unsigned int shmcb_cyclic_increment(
unsigned int buf_size,
unsigned int start_pos,
unsigned int to_add)
{
return start_pos;
}
/* Given two positions in a cyclic buffer, calculate the "distance".
* This is to cover the case ("non-trivial") where the 'next' offset
* is to the left of the 'start' offset. NB: This calculates the
* space inclusive of one end-point but not the other. There is an
* ambiguous case (which is why we use the <start_pos,offset>
* coordinate system rather than <start_pos,end_pos> one) when 'start'
* is the same as 'next'. It could indicate the buffer is full or it
* can indicate the buffer is empty ... I choose the latter as it's
* easier and usually necessary to check if the buffer is full anyway
* before doing incremental logic (which is this useful for), but we
* definitely need the empty case handled - in fact it's our starting
* state!! */
static unsigned int shmcb_cyclic_space(
unsigned int buf_size,
unsigned int start_offset,
unsigned int next_offset)
{
/* Is it the trivial case? */
if (start_offset <= next_offset)
else
}
/* A "normal-to-cyclic" memcpy ... this takes a linear block of
* memory and copies it onto a cyclic buffer. The purpose and
* function of this is pretty obvious, you need to cover the case
* that the destination (cyclic) buffer has to wrap round. */
static void shmcb_cyclic_ntoc_memcpy(
unsigned int buf_size,
unsigned char *data,
unsigned int dest_offset,
{
/* Can it be copied all in one go? */
/* yes */
else {
/* no */
}
return;
}
/* A "cyclic-to-normal" memcpy ... given the last function, this
* one's purpose is clear, it copies out of a cyclic buffer handling
* wrapping. */
static void shmcb_cyclic_cton_memcpy(
unsigned int buf_size,
unsigned char *dest,
unsigned char *data,
unsigned int src_offset,
unsigned int src_len)
{
/* Can it be copied all in one go? */
/* yes */
else {
/* no */
}
return;
}
/* Here's the cool hack that makes it all work ... by simply
* making the first collection of bytes *be* our header structure
* (casting it into the C structure), we have the perfect way to
* maintain state in a shared-memory session cache from one call
* (and process) to the next, use the shared memory itself! The
* original mod_ssl shared-memory session cache uses variables
* inside the context, but we simply use that for storing the
* pointer to the shared memory itself. And don't forget, after
* so we can read it outside any locking.
* <grin> - sometimes I just *love* coding y'know?! */
{
return;
}
/* This is what populates our "interesting" structures. Given a
* pointer to the header, and an index into the appropriate
* division (this must have already been masked using the
* division_mask by the caller!), we can populate the provided
* SHMCBQueue and SHMCBCache structures with values and
* pointers to the underlying shared memory. Upon returning
* (if not FALSE), the caller can meddle with the pointer
* values and they will map into the shared-memory directly,
* as such there's no need to "free" or "set" the Queue or
* Cache values, they were themselves references to the *real*
* data. */
static BOOL shmcb_get_division(
{
unsigned char *pQueue;
unsigned char *pCache;
/* bounds check */
return FALSE;
/* Locate the blocks of memory storing the corresponding data */
/* Populate the structures with appropriate pointers */
/* Our structures stay packed, no matter what the system's
* data-alignment regime is. */
return TRUE;
}
/* This returns a pointer to the piece of shared memory containing
* a specified 'Index'. SHMCBIndex, like SHMCBHeader, is a fixed
* width non-referencing structure of primitive types that can be
* cast onto the corresponding block of shared memory. Thus, by
* returning a cast pointer to that section of shared memory, the
* caller can read and write values to and from the "structure" and
* they are actually reading and writing the underlying shared
* memory. */
static SHMCBIndex *shmcb_get_index(
{
/* bounds check */
return NULL;
/* Return a pointer to the index. NB: I am being horribly pendantic
* here so as to avoid any potential data-alignment assumptions being
* placed on the pointer arithmetic by the compiler (sigh). */
(idx * sizeof(SHMCBIndex)));
}
/* This functions rolls expired cache (and index) entries off the front
* of the cyclic buffers in a division. The function returns the number
* of expired sessions. */
static unsigned int shmcb_expire_division(
{
/* We must calculate num and space ourselves based on expiry times. */
loop = 0;
/* Cache useful values */
/* it hasn't expired yet, we're done iterating */
break;
/* This one should be expired too. Shift to the next entry. */
loop++;
}
/* Find the new_offset and make the expiries happen. */
if (loop > 0) {
/* We calculate the new_offset by "peeking" (or in the
* case it's the last entry, "sneaking" ;-). */
/* We are expiring everything! This is easy to do... */
}
else {
/* The Queue is easy to adjust */
/* peek to the start of the next session */
/* We can use shmcb_cyclic_space because we've guaranteed
}
}
return loop;
}
* (early or otherwise) any leading sessions as necessary to ensure
* there is room. An error return (FALSE) should only happen in the
* event of surreal values being passed on, or ridiculously small
* cache sizes. NB: For tracing purposes, this function is also given
* the server_rec to allow "ssl_log()". */
static BOOL shmcb_insert_encoded_session(
SHMCBCache * cache,
unsigned char *encoded,
unsigned int encoded_len,
unsigned char *session_id,
{
int need;
/* If there's entries to expire, ditch them first thing. */
if (gap < encoded_len) {
loop = 0;
loop += 1;
}
if (loop > 0) {
/* We are removing "loop" items from the cache. */
/* Update the stats!!! */
}
}
/* probably unecessary checks, but I'll leave them until this code
* is verified. */
"internal error");
return FALSE;
}
"internal error");
return FALSE;
}
/* HERE WE ASSUME THAT THE NEW SESSION SHOULD GO ON THE END! I'M NOT
* CHECKING WHETHER IT SHOULD BE GENUINELY "INSERTED" SOMEWHERE.
*
* We either fix that, or find out at a "higher" (read "mod_ssl")
* level whether it is possible to have distinct session caches for
* any attempted tomfoolery to do with different session timeouts.
* Knowing in advance that we can have a cache-wide constant timeout
* would make this stuff *MUCH* more efficient. Mind you, it's very
* efficient right now because I'm ignoring this problem!!!
*/
/* Increment to the first unused byte */
/* Copy the DER-encoded session into place */
/* Get the new index that this session is stored in. */
"internal error");
return FALSE;
}
/* idx->removed = (unsigned char)0; */ /* Not needed given the memset above. */
/* All that remains is to adjust the cache's and queue's "pos_count"s. */
/* And just for good debugging measure ... */
return TRUE;
}
* session_id. If found, the session is deserialised
* and returned, otherwise NULL. */
static SSL_SESSION *shmcb_lookup_session_id(
int idlen)
{
unsigned char tempasn[SSL_SESSION_MAX_DER];
unsigned char *ptr;
/* If there are entries to expire, ditch them first thing. */
/* Only look into the session further if;
* (a) the second byte of the session_id matches,
* (b) the "removed" flag isn't set,
* (c) the session hasn't expired yet.
* We do (c) like this so that it saves us having to
* do natural expiries ... naturally expired sessions
* scroll off the front anyway when the cache is full and
* "rotating", the only real issue that remains is the
* removal or disabling of forcibly killed sessions. */
"session match", curr_pos);
"session_id, internal error");
return NULL;
}
return pSession;
}
}
}
return NULL;
}
static BOOL shmcb_remove_session_id(
{
unsigned char tempasn[SSL_SESSION_MAX_DER];
unsigned char *ptr;
/* If there's entries to expire, ditch them first thing. */
/* shmcb_expire_division(s, queue, cache); */
/* Regarding the above ... hmmm ... I know my expiry code is slightly
* "faster" than all this remove stuff ... but if the higher level
* code calls a "remove" operation (and this *only* seems to happen
* when it has spotted an expired session before we had a chance to)
* then it should get credit for a remove (stats-wise). Also, in the
* off-chance that the server *requests* a renegotiate and wants to
* wipe the session clean we should give that priority over our own
* routine expiry handling. So I've moved the expiry check to *after*
* this general remove stuff. */
id[1]);
/* Only look into the session further if the second byte of the
* session_id matches. */
"session match", curr_pos);
"internal error");
goto end;
}
/* Scrub out this session "quietly" */
goto end;
}
}
}
/* If there's entries to expire, ditch them now. */
end:
return to_return;
}