#ifndef MAIL_CACHE_PRIVATE_H
#define MAIL_CACHE_PRIVATE_H
#include "file-dotlock.h"
#include "mail-index-private.h"
#include "mail-cache.h"
struct mail_cache_header {
/* version is increased only when you can't have backwards
compatibility. */
/* NOTE: old versions used this for hole offset, so we can't fully
rely on it */
};
struct mail_cache_header_fields {
#if 0
/* last time the field was accessed. not updated more often than
once a day. */
/* (uint32_t)-1 for variable sized fields */
/* enum mail_cache_field_type */
/* enum mail_cache_decision_type */
/* NUL-separated list of field names */
char name[fields_count][];
#endif
};
#define MAIL_CACHE_FIELD_LAST_USED() \
(sizeof(uint32_t) * 3)
struct mail_cache_record {
/* array of { uint32_t field; [ uint32_t size; ] { .. } } */
};
struct mail_cache_field_private {
/* Unused fields aren't written to cache file */
};
struct mail_cache {
char *filepath;
int fd;
/* a) mmaping the whole file */
void *mmap_base;
/* b) using file cache */
/* c) using small read() calls with MAIL_INDEX_OPEN_FLAG_SAVEONLY */
/* mail_cache_map() increases this always. */
unsigned int remap_counter;
/* mmap_disable=no: hdr points to data / NULL when cache is invalid.
mmap_disable=yes: hdr points to hdr_ro_copy. this is needed because
cache invalidation can zero the data any time */
/* hdr_copy gets updated when cache is locked and written when
unlocking and hdr_modified=TRUE */
unsigned int fields_count;
/* 0 is no need for compression, otherwise the file sequence number
which we want compressed. */
unsigned int *file_field_map;
unsigned int file_fields_count;
};
struct mail_cache_loop_track {
/* we're looping if size_sum > (max_offset-min_offset) */
};
struct mail_cache_missing_reason_cache {
};
struct mail_cache_view {
/* if cached_exists_buf[field] == cached_exists_value, it's cached.
this allows us to avoid constantly clearing the whole buffer.
it needs to be cleared only when cached_exists_value is wrapped. */
};
struct mail_cache_iterate_field {
unsigned int field_idx;
unsigned int size;
const void *data;
};
struct mail_cache_lookup_iterate_ctx {
unsigned int remap_counter;
unsigned int trans_next_idx;
};
/* Explicitly lock the cache file. Returns -1 if error / timed out,
/* Returns -1 if cache is / just got corrupted, 0 if ok. */
const struct mail_cache_record **rec_r);
/* Returns TRUE if offset..size area has been tracked before.
Returns FALSE if the area may or may not have been tracked before,
but we don't know for sure yet. */
/* Iterate through a message's cached fields. */
struct mail_cache_lookup_iterate_ctx *ctx_r);
/* Returns 1 if field was returned, 0 if end of fields, or -1 if error */
struct mail_cache_iterate_field *field_r);
const struct mail_cache_record *
unsigned int seq,
unsigned int *trans_next_idx);
const void **data_r);
/* Notify the decision handling code that field was looked up for seq.
This should be called even for fields that aren't currently in cache file.
This is used to update caching decisions for fields that already exist
in the cache file. */
/* Notify the decision handling code when field is committed to cache.
If this is the first time the field is added to cache, its caching decision
is updated to TEMP. */
unsigned int field);
void **sync_context, void *context);
const char *function);
#endif