mail-cache-private.h revision ab70f55bb8d824ca1ed7c74196f2f502edd29cc7
#ifndef MAIL_CACHE_PRIVATE_H
#define MAIL_CACHE_PRIVATE_H
#include "file-dotlock.h"
#include "mail-index-private.h"
#include "mail-cache.h"
#define MAIL_CACHE_MAJOR_VERSION 1
#define MAIL_CACHE_MINOR_VERSION 1
/* Drop fields that haven't been accessed for n seconds */
/* Never compress the file if it's smaller than this */
/* Compress the file when n% of records are deleted */
#define MAIL_CACHE_COMPRESS_DELETE_PERCENTAGE 20
/* Compress the file when n% of rows contain continued rows.
200% means that there's 2 continued rows per record. */
#define MAIL_CACHE_COMPRESS_CONTINUED_PERCENTAGE 200
/* Compress the file when we need to follow more than n next_offsets to find
the latest cache header. */
#define MAIL_CACHE_HEADER_FIELD_CONTINUE_COUNT 4
/* If cache record becomes larger than this, don't add it. */
#define MAIL_CACHE_LOCK_TIMEOUT 10
#define MAIL_CACHE_LOCK_CHANGE_TIMEOUT 300
#define MAIL_CACHE_IS_UNUSABLE(cache) \
struct mail_cache_header {
/* version is increased only when you can't have backwards
compatibility. */
/* NOTE: old versions used this for hole offset, so we can't fully
rely on it */
};
struct mail_cache_header_fields {
#if 0
/* last time the field was accessed. not updated more often than
once a day. */
/* (uint32_t)-1 for variable sized fields */
/* enum mail_cache_field_type */
/* enum mail_cache_decision_type */
/* NUL-separated list of field names */
char name[fields_count][];
#endif
};
#define MAIL_CACHE_FIELD_LAST_USED() \
(sizeof(uint32_t) * 3)
#define MAIL_CACHE_FIELD_SIZE(count) \
#define MAIL_CACHE_FIELD_TYPE(count) \
#define MAIL_CACHE_FIELD_DECISION(count) \
#define MAIL_CACHE_FIELD_NAMES(count) \
struct mail_cache_record {
/* array of { uint32_t field; [ uint32_t size; ] { .. } } */
};
struct mail_cache_field_private {
struct mail_cache_field field;
/* Unused fields aren't written to cache file */
unsigned int used:1;
unsigned int adding:1;
unsigned int decision_dirty:1;
};
struct mail_cache {
struct mail_index *index;
char *filepath;
int fd;
/* a) mmaping the whole file */
void *mmap_base;
/* b) using file cache */
struct file_cache *file_cache;
/* c) using small read() calls with MAIL_INDEX_OPEN_FLAG_SAVEONLY */
/* mail_cache_map() increases this always. */
unsigned int remap_counter;
struct dotlock_settings dotlock_settings;
/* mmap_disable=no: hdr points to data / NULL when cache is invalid.
mmap_disable=yes: hdr points to hdr_ro_copy. this is needed because
cache invalidation can zero the data any time */
const struct mail_cache_header *hdr;
struct mail_cache_header hdr_ro_copy;
/* hdr_copy gets updated when cache is locked and written when
unlocking and hdr_modified=TRUE */
struct mail_cache_header hdr_copy;
struct mail_cache_field_private *fields;
unsigned int fields_count;
/* 0 is no need for compression, otherwise the file sequence number
which we want compressed. */
unsigned int *file_field_map;
unsigned int file_fields_count;
unsigned int opened:1;
unsigned int locked:1;
unsigned int last_lock_failed:1;
unsigned int hdr_modified:1;
unsigned int field_header_write_pending:1;
unsigned int compressing:1;
unsigned int map_with_read:1;
};
struct mail_cache_loop_track {
/* we're looping if size_sum > (max_offset-min_offset) */
};
struct mail_cache_view {
struct mail_cache *cache;
struct mail_cache_transaction_ctx *transaction;
struct mail_cache_loop_track loop_track;
/* if cached_exists_buf[field] == cached_exists_value, it's cached.
this allows us to avoid constantly clearing the whole buffer.
it needs to be cleared only when cached_exists_value is wrapped. */
unsigned int no_decision_updates:1;
};
struct mail_cache_iterate_field {
unsigned int field_idx;
unsigned int size;
const void *data;
};
struct mail_cache_lookup_iterate_ctx {
struct mail_cache_view *view;
unsigned int remap_counter;
const struct mail_cache_record *rec;
unsigned int trans_next_idx;
unsigned int stop:1;
unsigned int failed:1;
unsigned int memory_appends_checked:1;
unsigned int disk_appends_checked:1;
};
/* Explicitly lock the cache file. Returns -1 if error / timed out,
/* Returns -1 if cache is / just got corrupted, 0 if ok. */
const struct mail_cache_record **rec_r);
/* Returns TRUE if offset..size area has been tracked before.
Returns FALSE if the area may or may not have been tracked before,
but we don't know for sure yet. */
/* Iterate through a message's cached fields. */
struct mail_cache_lookup_iterate_ctx *ctx_r);
/* Returns 1 if field was returned, 0 if end of fields, or -1 if error */
struct mail_cache_iterate_field *field_r);
const struct mail_cache_record *
unsigned int seq,
unsigned int *trans_next_idx);
const void **data_r);
/* Notify the decision handling code that field was looked up for seq.
This should be called even for fields that aren't currently in cache file */
unsigned int field);
void **sync_context, void *context);
const char *function);
#endif