mail-cache.c revision b5369a59b5b620421ae63a2f88e2178bb4b796fc
/* Copyright (C) 2003 Timo Sirainen */
#include "lib.h"
#include "buffer.h"
#include "byteorder.h"
#include "file-lock.h"
#include "file-set-size.h"
#include "ioloop.h"
#include "mmap-util.h"
#include "write-full.h"
#include "mail-index.h"
#include "mail-index-util.h"
#include "mail-cache.h"
#include <stddef.h>
#include <unistd.h>
/* Never compress the file if it's smaller than this */
/* Compress the file when deleted space reaches n% of total size */
#define COMPRESS_PERCENTAGE 20
/* Compress the file when n% of rows contain continued rows.
200% means that there's 2 continued rows per record. */
#define COMPRESS_CONTINUED_PERCENTAGE 200
/* Initial size for the file */
/* When more space is needed, grow the file n% larger than the previous size */
#define MAIL_CACHE_GROW_PERCENTAGE 10
#define MAIL_CACHE_LOCK_TIMEOUT 120
#define MAIL_CACHE_LOCK_CHANGE_TIMEOUT 60
struct mail_cache_header {
};
struct mail_cache_record {
};
struct mail_cache {
struct mail_index *index;
char *filepath;
int fd;
void *mmap_base;
struct mail_cache_header *header;
const char *const *split_headers[MAIL_CACHE_HEADERS_COUNT];
struct mail_cache_transaction_ctx *trans_ctx;
unsigned int locks;
unsigned int anon_mmap:1;
unsigned int mmap_refresh:1;
unsigned int silent:1;
};
struct mail_cache_transaction_ctx {
struct mail_cache *cache;
unsigned int next_unused_header_lowwater;
unsigned int last_idx;
struct mail_cache_record cache_rec;
enum mail_cache_field prev_fields;
};
unsigned int mail_cache_field_sizes[32] = {
sizeof(enum mail_index_record_flag),
sizeof(uoff_t),
16,
sizeof(struct mail_sent_date),
sizeof(time_t),
sizeof(uoff_t),
sizeof(uoff_t),
0, 0, 0, 0, 0,
/* variable sized */
(unsigned int)-1, (unsigned int)-1, (unsigned int)-1, (unsigned int)-1,
(unsigned int)-1, (unsigned int)-1, (unsigned int)-1, (unsigned int)-1,
(unsigned int)-1, (unsigned int)-1, (unsigned int)-1, (unsigned int)-1,
(unsigned int)-1, (unsigned int)-1, (unsigned int)-1, (unsigned int)-1,
(unsigned int)-1, (unsigned int)-1, (unsigned int)-1, (unsigned int)-1
};
};
static const unsigned char *null4[] = { 0, 0, 0, 0 };
static const char *
static struct mail_cache_record *
const struct mail_index_record *rec,
enum mail_cache_field fields);
{
unsigned char buf[4];
offset >>= 2;
}
{
return 0;
}
const char *function)
{
return FALSE;
}
return FALSE;
}
struct mail_cache_header *hdr)
{
cache->mmap_length);
return FALSE;
}
return TRUE;
}
{
}
cache->mmap_length = 0;
}
}
{
int fd;
/* cache was set corrupted, we'll have to quit */
return FALSE;
}
if (fd == -1)
return TRUE;
}
{
struct mail_cache_header *hdr;
/* check that the header is still ok */
/* index id changed */
return FALSE;
}
/* we've updated used_file_size, do nothing */
return TRUE;
}
/* only check the header if we're locked */
return TRUE;
return FALSE;
}
return FALSE;
}
/* maybe a crash truncated the file - just fix it */
}
return TRUE;
}
{
/* if sync id has changed, the file has to be reopened.
note that if main index isn't locked, it may change again */
if (!mail_cache_file_reopen(cache))
return -1;
}
!cache->mmap_refresh) {
/* already mapped */
return 1;
/* requesting the whole file - see if we need to
re-mmap */
return -1;
}
return 1;
}
return 1;
/* in the middle of transaction - write the changes */
MS_SYNC) < 0) {
return -1;
}
}
}
/* map the whole file */
cache->mmap_length = 0;
return -1;
}
/* re-mmaped, check header */
return 0;
}
{
if (ret > 0)
return TRUE;
if (ret < 0)
return FALSE;
if (!mmap_verify_header(cache))
return FALSE;
/* see if cache file was rebuilt - do it only once to avoid
infinite looping */
break;
if (!mail_cache_file_reopen(cache))
return FALSE;
}
return TRUE;
}
{
return 0;
return -1;
}
return -1;
}
return 0;
return -1;
/* verify that this really is the cache for wanted index */
if (!mmap_verify_header(cache)) {
return 0;
}
return 1;
}
{
struct mail_index_record *rec;
index->sync_stamp = 0;
rec->cache_offset = 0;
}
}
struct mail_cache_header *hdr)
{
if (ret != 0)
return ret > 0;
/* we'll have to clear cache_offsets which requires exclusive lock */
return FALSE;
/* maybe a rebuild.. */
if (fd == -1) {
return FALSE;
}
/* see if someone else just created the cache file */
if (ret != 0) {
return ret > 0;
}
/* rebuild then */
return FALSE;
}
return FALSE;
}
return FALSE;
}
return FALSE;
return TRUE;
}
{
struct mail_cache_header hdr;
struct mail_cache *cache;
/* we'll do anon-mmaping only if initially requested. if we fail
because of out of disk space, we'll just let the main index code
know it and fail. */
if (INDEX_IS_IN_MEMORY(index)) {
return FALSE;
}
} else {
return FALSE;
}
}
/* unset inconsistency - we already rebuilt the cache file */
return TRUE;
}
{
}
{
}
static const struct mail_cache_record *
{
struct mail_cache_record cache_rec;
const void *data;
int i;
if ((cached_fields & field) == 0)
continue;
cached_fields &= ~field;
continue;
}
if ((field & MAIL_CACHE_FIXED_MASK) == 0)
if ((size & 3) != 0)
}
/* now merge all the headers if we have them all */
nb_size = 0;
for (i = 0; i <= header_idx; i++) {
size--; /* terminating \0 */
}
}
nb_size++;
if ((nb_size & 3) != 0)
}
return data;
}
{
struct mail_cache_header *hdr;
const struct mail_cache_record *cache_rec;
struct mail_index_record *rec;
enum mail_cache_field used_fields;
unsigned char *mmap_base;
const char *str;
int i, header_idx;
/* pick some reasonably good file size */
MAP_SHARED, fd, 0);
if (mmap_base == MAP_FAILED)
/* skip file's header */
/* merge all the header pieces into one. if some message doesn't have
all the required pieces, we'll just have to drop them all. */
for (i = MAIL_CACHE_HEADERS_COUNT-1; i >= 0; i--) {
break;
}
header_idx = -1;
else {
header_idx = i;
}
used_fields = 0;
rec->cache_offset = 0;
/* just one unmodified block, copy it */
} else {
/* multiple blocks, sort them into buffer */
t_push();
&size);
t_pop();
}
}
/* update header */
/* write everything to disk */
return TRUE;
}
{
return TRUE;
return FALSE;
return FALSE;
#ifdef DEBUG
#endif
if (fd == -1) {
return FALSE;
}
/* now we'll begin the actual moving. keep rebuild-flag on
while doing it. */
return FALSE;
} else {
"file_dotlock_replace()");
}
if (!mmap_update(cache, 0, 0))
}
/* headers could have changed, reread them */
if (ret) {
}
if (!mail_cache_unlock(cache))
return ret;
}
{
struct mail_cache_header hdr;
return TRUE;
}
if (ret != 0)
return ret > 0;
if (fd == -1) {
return FALSE;
}
return FALSE;
}
return FALSE;
}
return FALSE;
}
return FALSE;
return TRUE;
}
{
else {
}
return TRUE;
}
{
int ret;
return TRUE;
return TRUE;
if (nonblock) {
if (ret < 0)
} else {
if (ret <= 0)
}
if (ret > 0) {
if (!mmap_update(cache, 0, 0)) {
(void)mail_cache_unlock(cache);
return -1;
}
/* we have the cache file locked and sync_id still
doesn't match. it means we crashed between updating
cache file and updating sync_id in index header.
just update the sync_ids so they match. */
i_warning("Updating broken sync_id in cache file %s",
}
}
return ret;
}
{
return TRUE;
return TRUE;
return FALSE;
}
return TRUE;
}
{
}
{
}
struct mail_cache_transaction_ctx **ctx_r)
{
int ret;
if (ret <= 0)
return ret;
(*ctx_r)->cache_data =
return 1;
}
{
(void)mail_cache_transaction_rollback(ctx);
return ret;
}
{
ctx->prev_fields = 0;
}
{
/* data is in big endian, we want to update only the lowest byte */
}
{
return FALSE;
}
data += 2;
}
return TRUE;
}
{
}
}
{
cache->mmap_length);
}
}
}
{
return TRUE;
}
/* write everything to disk */
/* now that we're sure it's there, set on all the used-bits */
return FALSE;
/* update continued records count */
(sizeof(uint32_t) * 2);
/* too many continued rows, compress */
}
}
/* write index last */
return FALSE;
return FALSE;
}
return TRUE;
}
{
if (!mail_cache_write(ctx))
return FALSE;
}
if (!commit_all_changes(ctx))
/* they're all used - compress the cache to get more */
}
return ret;
}
{
unsigned int i;
/* no need to actually modify the file - we just didn't update
used_file_size */
/* make sure we don't cache the headers */
for (i = 0; i < ctx->next_unused_header_lowwater; i++) {
}
return TRUE;
}
{
void *base;
if (grow_size < 16384)
grow_size = 16384;
new_fsize &= ~1023;
if (base == MAP_FAILED) {
return FALSE;
}
return TRUE;
}
/* no need to grow, just update mmap */
if (!mmap_update(cache, 0, 0))
return FALSE;
return TRUE;
}
return mmap_update(cache, 0, 0);
}
{
/* NOTE: must be done within transaction or rollback would break it */
if (offset >= 0x40000000) {
return 0;
}
return 0;
}
return offset;
}
static const char *
{
unsigned char *buf;
if (offset == 0)
return NULL;
return NULL;
idx);
return NULL;
}
if (data_size == 0) {
"Header %u points to empty string", idx);
return NULL;
}
return NULL;
idx);
return NULL;
}
"Header %u points to invalid string", idx);
return NULL;
}
}
static const char *const *
{
char *str;
return NULL;
}
}
unsigned int idx)
{
const char *str;
int i;
/* t_strsplit() is a bit slow, so we cache it */
t_push();
for (i = 0; i < MAIL_CACHE_HEADERS_COUNT; i++) {
cache->split_offsets[i] =
}
t_pop();
}
}
static const char *write_header_string(const char *const headers[],
{
if (buffer_get_used_size(buffer) != 0)
headers++;
}
if ((size & 3) != 0) {
}
}
{
const char *header_str, *prev_str;
t_push();
if (idx != 0) {
t_pop();
return FALSE;
}
}
if (offset != 0) {
header_str, size);
/* update cached headers */
/* mark used-bit to be updated later. not really needed for
read-safety, but if transaction get rolled back we can't let
this point to invalid location. */
/* make sure get_header_fields() still works for this header
while the transaction isn't yet committed. */
}
t_pop();
return offset > 0;
}
static struct mail_cache_record *
{
#define CACHE_PREFETCH 1024
struct mail_cache_record *cache_rec;
if (offset == 0)
return NULL;
return NULL;
return NULL;
}
return NULL;
}
if (size > CACHE_PREFETCH) {
return NULL;
}
return NULL;
}
return cache_rec;
}
static struct mail_cache_record *
{
struct mail_cache_record *next;
return NULL;
}
return next;
}
{
struct mail_index_record *rec;
const void *buf;
if (write_offset == 0)
return FALSE;
/* first cache record - update offset in index file */
/* mark cache_offset to be updated later */
} else {
/* find the last cache record */
/* mark next_offset to be updated later */
}
/* reset the write context */
return TRUE;
}
static struct mail_cache_record *
enum mail_cache_field fields)
{
struct mail_cache_record *cache_rec;
unsigned int idx;
/* we have to auto-commit since we're not capable of looking
into uncommitted records. it would be possible by checking
index_marks and cache_marks, but it's just more trouble
than worth. */
return NULL;
}
return NULL;
}
return NULL;
return cache_rec;
}
{
unsigned int mask;
int i;
return i;
}
return -1;
}
enum mail_cache_field field)
{
const unsigned char *buf;
unsigned int mask;
int i;
return offset;
if ((mask & MAIL_CACHE_FIXED_MASK) != 0)
else {
sizeof(data_size));
}
}
}
i_unreached();
return offset;
}
{
unsigned char *buf;
unsigned int idx;
int field_num;
if ((field & MAIL_CACHE_FIXED_MASK) != 0) {
} else if ((field & MAIL_CACHE_STRING_MASK) != 0) {
}
/* NOTE: we use index because the record pointer might not last. */
if (!mail_cache_write(ctx))
return FALSE;
}
if ((field & MAIL_CACHE_FIXED_MASK) == 0)
full_size += sizeof(nb_data_size);
/* fields must be ordered. find where to insert it. */
else {
}
/* @UNSAFE */
if ((field & MAIL_CACHE_FIXED_MASK) == 0) {
buf += sizeof(nb_data_size);
}
if ((data_size & 3) != 0)
/* remember the transaction uid range */
ctx->prev_fields = 0;
}
return TRUE;
}
struct mail_index_record *rec)
{
struct mail_cache_record *cache_rec;
return TRUE;
/* NOTE: it would be nice to erase the cached data for the record,
but some other processes might still be using them. So, we just
update the deleted_space in header */
do {
/* see if we've reached the max. deleted space in file */
if (deleted_space >= max_del_space &&
return TRUE;
}
enum mail_cache_field
const struct mail_index_record *rec)
{
struct mail_cache_record *cache_rec;
enum mail_cache_field fields = 0;
}
return fields;
}
struct mail_cache_record *cache_rec,
enum mail_cache_field field,
{
unsigned char *buf;
unsigned int mask;
int i;
continue;
/* all records are at least 32bit. we have to check this
before getting data_size. */
"Record continues outside it's allocated size");
return FALSE;
}
if ((mask & MAIL_CACHE_FIXED_MASK) != 0)
else {
}
if (next_offset > rec_size) {
"Record continues outside it's allocated size");
return FALSE;
}
if (data_size == 0) {
"Field size is 0");
return FALSE;
}
return TRUE;
}
}
i_unreached();
return FALSE;
}
const struct mail_index_record *rec,
enum mail_cache_field field,
{
struct mail_cache_record *cache_rec;
}
}
return FALSE;
}
const struct mail_index_record *rec,
enum mail_cache_field field,
{
void *data;
return FALSE;
return TRUE;
}
const struct mail_index_record *rec,
enum mail_cache_field field)
{
const void *data;
return NULL;
"String field %x doesn't end with NUL", field);
return NULL;
}
return data;
}
const struct mail_index_record *rec,
enum mail_cache_field field,
{
const void *data;
return FALSE;
if (buffer_size != size) {
i_panic("cache: fixed field %x wrong size "
}
return TRUE;
}
enum mail_cache_field fields)
{
// FIXME: count these
}
const struct mail_index_record *rec)
{
enum mail_index_record_flag flags;
return 0;
return flags;
}
struct mail_index_record *rec,
enum mail_index_record_flag flags)
{
void *data;
return FALSE;
}
return TRUE;
}
struct mail_index_record *rec,
{
void *data;
return FALSE;
}
return TRUE;
}
{
if (!mmap_update(cache, 0, 0))
return NULL;
}
{
return FALSE;
t_push();
t_pop();
return FALSE;
}