mail-cache-private.h revision 763f83d3cc47bce05cbc396419c4db2b71dd8e68
132N/A#ifndef MAIL_CACHE_PRIVATE_H
132N/A#define MAIL_CACHE_PRIVATE_H
132N/A
132N/A#include "file-dotlock.h"
132N/A#include "mail-index-private.h"
132N/A#include "mail-cache.h"
132N/A
132N/A#define MAIL_CACHE_VERSION 1
132N/A
132N/A/* Drop fields that haven't been accessed for n seconds */
132N/A#define MAIL_CACHE_FIELD_DROP_SECS (3600*24*30)
132N/A
132N/A/* Never compress the file if it's smaller than this */
132N/A#define MAIL_CACHE_COMPRESS_MIN_SIZE (1024*50)
132N/A
132N/A/* Don't bother remembering holes smaller than this */
132N/A#define MAIL_CACHE_MIN_HOLE_SIZE 1024
132N/A
132N/A/* Compress the file when deleted space reaches n% of total size */
132N/A#define MAIL_CACHE_COMPRESS_PERCENTAGE 20
132N/A
132N/A/* Compress the file when n% of rows contain continued rows.
132N/A 200% means that there's 2 continued rows per record. */
132N/A#define MAIL_CACHE_COMPRESS_CONTINUED_PERCENTAGE 200
215N/A
132N/A/* Compress the file when we need to follow more than n next_offsets to find
132N/A the latest cache header. */
132N/A#define MAIL_CACHE_HEADER_FIELD_CONTINUE_COUNT 4
132N/A
132N/A/* Initial size for the file */
132N/A#define MAIL_CACHE_INITIAL_SIZE (sizeof(struct mail_cache_header) + 10240)
132N/A
132N/A/* When more space is needed, grow the file n% larger than the previous size */
132N/A#define MAIL_CACHE_GROW_PERCENTAGE 10
132N/A
132N/A/* When allocating space for transactions, don't use blocks larger than this. */
132N/A#define MAIL_CACHE_MAX_RESERVED_BLOCK_SIZE (1024*512)
132N/A
132N/A#define MAIL_CACHE_LOCK_TIMEOUT 10
132N/A#define MAIL_CACHE_LOCK_CHANGE_TIMEOUT 300
132N/A
132N/A#define CACHE_RECORD(cache, offset) \
132N/A ((const struct mail_cache_record *) \
132N/A ((const char *) (cache)->data + offset))
132N/A
132N/A#define MAIL_CACHE_IS_UNUSABLE(cache) \
132N/A ((cache)->hdr == NULL)
132N/A
132N/Astruct mail_cache_header {
132N/A /* version is increased only when you can't have backwards
132N/A compatibility. */
132N/A uint8_t version;
132N/A uint8_t compat_sizeof_uoff_t;
132N/A uint8_t unused[2];
132N/A
132N/A uint32_t indexid;
132N/A uint32_t file_seq;
132N/A
132N/A uint32_t continued_record_count;
132N/A
132N/A uint32_t hole_offset;
132N/A uint32_t used_file_size;
132N/A uint32_t deleted_space;
132N/A
132N/A uint32_t field_header_offset;
132N/A};
132N/A
132N/Astruct mail_cache_header_fields {
132N/A uint32_t next_offset;
132N/A uint32_t size;
132N/A uint32_t fields_count;
132N/A
132N/A#if 0
132N/A /* last time the field was accessed. not updated more often than
132N/A once a day. */
132N/A uint32_t last_used[fields_count];
132N/A /* (uint32_t)-1 for variable sized fields */
132N/A uint32_t size[fields_count];
132N/A /* enum mail_cache_field_type */
132N/A uint8_t type[fields_count];
132N/A /* enum mail_cache_decision_type */
132N/A uint8_t decision[fields_count];
132N/A /* NUL-separated list of field names */
132N/A char name[fields_count][];
132N/A#endif
132N/A};
132N/A
132N/A#define MAIL_CACHE_FIELD_LAST_USED() \
132N/A (sizeof(uint32_t) * 3)
219N/A#define MAIL_CACHE_FIELD_SIZE(count) \
849N/A (MAIL_CACHE_FIELD_LAST_USED() + sizeof(uint32_t) * (count))
220N/A#define MAIL_CACHE_FIELD_TYPE(count) \
341N/A (MAIL_CACHE_FIELD_SIZE(count) + sizeof(uint32_t) * (count))
341N/A#define MAIL_CACHE_FIELD_DECISION(count) \
379N/A (MAIL_CACHE_FIELD_TYPE(count) + sizeof(uint8_t) * (count))
411N/A#define MAIL_CACHE_FIELD_NAMES(count) \
487N/A (MAIL_CACHE_FIELD_DECISION(count) + sizeof(uint8_t) * (count))
706N/A
706N/Astruct mail_cache_record {
706N/A uint32_t prev_offset;
741N/A uint32_t size; /* full record size, including this header */
741N/A /* array of { uint32_t field; [ uint32_t size; ] { .. } } */
706N/A};
706N/A
736N/Astruct mail_cache_hole_header {
487N/A uint32_t next_offset; /* 0 if no holes left */
704N/A uint32_t size; /* including this header */
487N/A
704N/A /* make sure we notice if we're treating hole as mail_cache_record.
704N/A magic is a large number so if it's treated as size field, it'll
487N/A point outside the file */
487N/A#define MAIL_CACHE_HOLE_HEADER_MAGIC 0xffeedeff
487N/A uint32_t magic;
320N/A};
336N/A
336N/Astruct mail_cache_field_private {
336N/A struct mail_cache_field field;
336N/A
336N/A uint32_t uid_highwater;
336N/A uint32_t last_used;
336N/A
341N/A /* Unused fields aren't written to cache file */
487N/A unsigned int used:1;
336N/A unsigned int adding:1;
336N/A unsigned int decision_dirty:1;
336N/A};
379N/A
379N/Astruct mail_cache {
487N/A struct mail_index *index;
379N/A uint32_t ext_id;
379N/A
411N/A char *filepath;
411N/A int fd;
487N/A
411N/A ino_t st_ino;
411N/A dev_t st_dev;
411N/A
320N/A void *mmap_base;
336N/A const void *data;
320N/A size_t mmap_length;
336N/A struct file_cache *file_cache;
320N/A /* mail_cache_map() increases this always. */
320N/A unsigned int remap_counter;
132N/A
132N/A struct dotlock_settings dotlock_settings;
132N/A struct dotlock *dotlock;
132N/A struct file_lock *file_lock;
132N/A
132N/A /* mmap_disable=no: hdr points to data / NULL when cache is invalid.
671N/A mmap_disable=yes: hdr points to hdr_ro_copy. this is needed because
671N/A cache invalidation can zero the data any time */
825N/A const struct mail_cache_header *hdr;
825N/A struct mail_cache_header hdr_ro_copy;
132N/A /* hdr_copy gets updated when cache is locked and written when
132N/A unlocking and hdr_modified=TRUE */
132N/A struct mail_cache_header hdr_copy;
132N/A
244N/A pool_t field_pool;
244N/A struct mail_cache_field_private *fields;
132N/A uint32_t *field_file_map;
132N/A unsigned int fields_count;
132N/A struct hash_table *field_name_hash; /* name -> idx */
132N/A uint32_t last_field_header_offset;
190N/A
132N/A /* 0 is no need for compression, otherwise the file sequence number
132N/A which we want compressed. */
132N/A uint32_t need_compress_file_seq;
132N/A
132N/A unsigned int *file_field_map;
132N/A unsigned int file_fields_count;
132N/A
132N/A unsigned int opened:1;
230N/A unsigned int locked:1;
230N/A unsigned int last_lock_failed:1;
230N/A unsigned int hdr_modified:1;
230N/A unsigned int field_header_write_pending:1;
230N/A unsigned int compressing:1;
230N/A};
230N/A
230N/Astruct mail_cache_loop_track {
230N/A /* we're looping if size_sum > (max_offset-min_offset) */
230N/A uoff_t min_offset, max_offset;
230N/A uoff_t size_sum;
230N/A};
230N/A
230N/Astruct mail_cache_view {
230N/A struct mail_cache *cache;
230N/A struct mail_index_view *view, *trans_view;
230N/A
230N/A struct mail_cache_transaction_ctx *transaction;
230N/A uint32_t trans_seq1, trans_seq2;
230N/A
230N/A struct mail_cache_loop_track loop_track;
230N/A
230N/A /* if cached_exists_buf[field] == cached_exists_value, it's cached.
230N/A this allows us to avoid constantly clearing the whole buffer.
230N/A it needs to be cleared only when cached_exists_value is wrapped. */
230N/A buffer_t *cached_exists_buf;
230N/A uint8_t cached_exists_value;
230N/A uint32_t cached_exists_seq;
230N/A};
230N/A
301N/Astruct mail_cache_iterate_field {
132N/A unsigned int field_idx;
132N/A const void *data;
132N/A unsigned int size;
213N/A};
301N/A
741N/Astruct mail_cache_lookup_iterate_ctx {
132N/A struct mail_cache_view *view;
132N/A unsigned int remap_counter;
211N/A uint32_t seq;
213N/A
213N/A const struct mail_cache_record *rec;
213N/A unsigned int pos, rec_size;
213N/A uint32_t offset;
213N/A
213N/A unsigned int stop:1;
213N/A unsigned int failed:1;
213N/A unsigned int appends_checked:1;
487N/A};
213N/A
213N/Aint mail_cache_open_and_verify(struct mail_cache *cache);
213N/A
213N/A/* Explicitly lock the cache file. Returns -1 if error / timed out,
213N/A 1 if ok, 0 if cache is broken/doesn't exist */
213N/Aint mail_cache_lock(struct mail_cache *cache, bool require_same_reset_id);
213N/Aint mail_cache_try_lock(struct mail_cache *cache);
213N/A/* Returns -1 if cache is / just got corrupted, 0 if ok. */
213N/Aint mail_cache_unlock(struct mail_cache *cache);
213N/A
213N/Aint mail_cache_write(struct mail_cache *cache, const void *data, size_t size,
213N/A uoff_t offset);
213N/A
213N/Aint mail_cache_header_fields_read(struct mail_cache *cache);
213N/Aint mail_cache_header_fields_update(struct mail_cache *cache);
213N/Avoid mail_cache_header_fields_get(struct mail_cache *cache, buffer_t *dest);
213N/Aint mail_cache_header_fields_get_next_offset(struct mail_cache *cache,
213N/A uint32_t *offset_r);
781N/A
845N/Auint32_t mail_cache_lookup_cur_offset(struct mail_index_view *view,
213N/A uint32_t seq, uint32_t *reset_id_r);
213N/Aint mail_cache_get_record(struct mail_cache *cache, uint32_t offset,
213N/A const struct mail_cache_record **rec_r);
230N/Auint32_t mail_cache_get_first_new_seq(struct mail_index_view *view);
213N/A
213N/A/* Returns TRUE if offset..size area has been tracked before.
211N/A Returns FALSE if the area may or may not have been tracked before,
211N/A but we don't know for sure yet. */
211N/Abool mail_cache_track_loops(struct mail_cache_loop_track *loop_track,
211N/A uoff_t offset, uoff_t size);
211N/A
246N/A/* Iterate through a message's cached fields. */
246N/Avoid mail_cache_lookup_iter_init(struct mail_cache_view *view, uint32_t seq,
246N/A struct mail_cache_lookup_iterate_ctx *ctx_r);
213N/A/* Returns 1 if field was returned, 0 if end of fields, or -1 if error */
213N/Aint mail_cache_lookup_iter_next(struct mail_cache_lookup_iterate_ctx *ctx,
211N/A struct mail_cache_iterate_field *field_r);
211N/A
132N/Aint mail_cache_map(struct mail_cache *cache, size_t offset, size_t size);
211N/Avoid mail_cache_file_close(struct mail_cache *cache);
144N/Aint mail_cache_reopen(struct mail_cache *cache);
144N/A
144N/A/* Update new_offset's prev_offset field to old_offset. */
144N/Aint mail_cache_link(struct mail_cache *cache, uint32_t old_offset,
453N/A uint32_t new_offset);
144N/A/* Mark record in given offset to be deleted. */
260N/Aint mail_cache_delete(struct mail_cache *cache, uint32_t offset);
260N/A
260N/A/* Notify the decision handling code that field was looked up for seq.
260N/A This should be called even for fields that aren't currently in cache file */
260N/Avoid mail_cache_decision_state_update(struct mail_cache_view *view,
260N/A uint32_t seq, unsigned int field);
260N/Avoid mail_cache_decision_add(struct mail_cache_view *view, uint32_t seq,
260N/A unsigned int field);
144N/A
219N/Aint mail_cache_expunge_handler(struct mail_index_sync_map_ctx *sync_ctx,
219N/A uint32_t seq, const void *data,
219N/A void **sync_context, void *context);
679N/Aint mail_cache_sync_handler(struct mail_index_sync_map_ctx *sync_ctx,
219N/A uint32_t seq, void *old_data, const void *new_data,
219N/A void **context);
219N/Avoid mail_cache_sync_lost_handler(struct mail_index *index);
219N/A
219N/Avoid mail_cache_set_syscall_error(struct mail_cache *cache,
220N/A const char *function);
220N/A
220N/A#endif
220N/A