mail-index-sync-update.c revision d051664df497582e1eb75a9f238d04b65e858db8
/* Copyright (C) 2004 Timo Sirainen */
#include "lib.h"
#include "ioloop.h"
#include "array.h"
#include "mmap-util.h"
#include "mail-index-view-private.h"
#include "mail-index-sync-private.h"
#include "mail-transaction-log.h"
#include "mail-transaction-log-private.h"
/* If we have less than this many bytes to sync from log file, don't bother
reading the main index */
#define MAIL_INDEX_SYNC_MIN_READ_INDEX_SIZE 2048
static void
{
&prev_seq, &prev_offset);
if (!eol) {
/* previous transaction was an extension introduction.
we probably came here from
mail_index_sync_ext_reset(). if there are any more
views which want to continue syncing it needs the
intro. so back up a bit more.
don't do this in case the last transaction in the
log is the extension intro, so we don't keep trying
to sync it over and over again. */
}
} else {
}
}
struct mail_index_map *map)
{
}
static void
{
}
}
struct mail_index_map *
{
}
static int
const char **error_r)
{
/* different seen-flag */
if (hdr->seen_messages_count == 0) {
*error_r = "Seen counter wrong";
return -1;
}
} else {
*error_r = "Seen counter wrong";
return -1;
}
}
}
/* different deleted-flag */
if ((old_flags & MAIL_DELETED) == 0) {
*error_r = "Deleted counter wrong";
return -1;
}
} else {
if (hdr->deleted_messages_count == 0 ||
*error_r = "Deleted counter wrong";
return -1;
}
if (--hdr->deleted_messages_count == 0)
}
}
return 0;
}
static void
{
struct mail_index_map *const *maps;
const char *error;
unsigned int i, count;
for (i = 0; i < count; i++) {
continue;
&error) < 0)
}
}
static void
{
const char *error;
if (all) {
} else {
&error) < 0)
}
}
static void
{
struct mail_index_map *const *maps;
unsigned int i, count;
for (i = 0; i < count; i++) {
if ((flags & MAIL_DELETED) != 0 &&
}
}
static int
{
const struct mail_index_expunge_handler *eh;
struct mail_index_record *rec;
unsigned int i, count;
/* call expunge handlers only when syncing index file */
return 0;
if (!ctx->expunge_handlers_set)
return 0;
return -1;
}
}
return 0;
}
static int
struct mail_index_sync_map_ctx *ctx)
{
struct mail_index_record *rec;
unsigned int i;
for (i = 0; i < count; i++, e++) {
if (seq1 == 0) {
/* everything expunged already */
continue;
}
FALSE);
}
return -1;
/* @UNSAFE */
}
return 1;
}
{
}
struct mail_index_sync_map_ctx *ctx)
{
enum mail_flags new_flags;
void *dest;
"Append with UID %u, but next_uid = %u",
return -1;
}
/* move to memory. the mapping is written when unlocking so we don't
waste time re-mmap()ing multiple times or waste space growing index
file too large */
/* the flags may have changed since it was added to map.
use the updated flags already, so flag counters won't get
broken. */
} else {
/* don't rely on buffer->used being at the correct position.
at least expunges can move it */
}
if ((new_flags & MAIL_INDEX_MAIL_FLAG_DIRTY) != 0)
return 1;
}
static int sync_flag_update(const struct mail_transaction_flag_update *u,
struct mail_index_sync_map_ctx *ctx)
{
struct mail_index_record *rec;
if (seq1 == 0)
return 1;
if ((u->add_flags & MAIL_INDEX_MAIL_FLAG_DIRTY) != 0)
flag_mask = ~u->remove_flags;
if (((u->add_flags | u->remove_flags) &
(MAIL_SEEN | MAIL_DELETED)) == 0) {
/* we're not modifying any counted/lowwatered flags */
}
} else {
}
}
return 1;
}
static int sync_header_update(const struct mail_transaction_header_update *u,
struct mail_index_sync_map_ctx *ctx)
{
"Header update outside range: %u + %u > %u",
return -1;
}
/* @UNSAFE */
u + 1, u->size);
}
/* the tail offset updates are intended for internal transaction
log handling. we'll update the offset in the header only when
the sync is finished. */
return 1;
}
const struct mail_transaction_header *hdr,
const void *data)
{
int ret = 0;
t_push();
case MAIL_TRANSACTION_APPEND: {
if (ret <= 0)
break;
}
break;
}
case MAIL_TRANSACTION_EXPUNGE:
/* this is simply a request for expunge */
break;
}
break;
}
case MAIL_TRANSACTION_FLAG_UPDATE: {
if (ret <= 0)
break;
}
break;
}
case MAIL_TRANSACTION_HEADER_UPDATE: {
const struct mail_transaction_header_update *rec;
unsigned int i;
if (ret <= 0)
break;
if ((i % 4) != 0)
i += 4 - (i % 4);
}
break;
}
case MAIL_TRANSACTION_EXT_INTRO: {
unsigned int i;
&prev_seq, &prev_offset);
/* should be just extra padding */
break;
}
"ext intro: name_size too large");
ret = -1;
break;
}
if (ret <= 0)
break;
if ((i % 4) != 0)
i += 4 - (i % 4);
}
break;
}
case MAIL_TRANSACTION_EXT_RESET: {
"ext reset: invalid record size");
ret = -1;
break;
}
break;
}
case MAIL_TRANSACTION_EXT_HDR_UPDATE: {
unsigned int i;
"ext hdr update: invalid record size");
ret = -1;
break;
}
if (ret <= 0)
break;
if ((i % 4) != 0)
i += 4 - (i % 4);
}
break;
}
case MAIL_TRANSACTION_EXT_REC_UPDATE: {
const struct mail_transaction_ext_rec_update *rec;
const struct mail_index_ext *ext;
unsigned int i, record_size;
"Extension record updated "
"without intro prefix");
ret = -1;
break;
}
if (ctx->cur_ext_ignore) {
ret = 1;
break;
}
/* the record is padded to 32bits in the transaction log */
"ext rec update: invalid record size");
ret = -1;
break;
}
if (ret <= 0)
break;
}
break;
}
case MAIL_TRANSACTION_KEYWORD_UPDATE: {
break;
}
case MAIL_TRANSACTION_KEYWORD_RESET: {
break;
}
default:
i_unreached();
}
t_pop();
return ret;
}
struct mail_index_view *view,
{
}
{
}
{
const struct mail_index_record *rec;
unsigned int i;
return;
/* do we have dirty flags anymore? */
break;
}
}
}
#ifdef DEBUG
{
for (i = 0; i < hdr->messages_count; i++) {
const struct mail_index_record *rec;
del++;
}
seen++;
else
}
}
#endif
{
struct mail_index_view *view;
struct mail_index_sync_map_ctx sync_map_ctx;
const struct mail_transaction_header *thdr;
const void *tdata;
int ret;
if (!force) {
/* see if we'd prefer to reopen the index file instead of
syncing the current map from the transaction log */
return 0;
/* we don't know the index's size, so use the
smallest index size we're willing to read */
} else {
}
/* this isn't necessary correct currently, but it should be
close enough */
return 0;
}
if (ret <= 0) {
(void)mail_index_fsck(index);
}
/* can't use it. sync by re-reading index. */
return 0;
}
/* view referenced the map. avoid unnecessary map cloning by
unreferencing the map while view exists. */
if (had_dirty) {
}
/* if syncing updates the header, it updates hdr_copy_buf
and updates hdr_base to hdr_copy_buf. so the buffer must
initially contain a valid header or we'll break it when
writing it. */
}
if (reset) {
/* Reset the entire index. Leave only indexid and
log_file_seq. */
&prev_seq, &prev_offset);
}
/* FIXME: when transaction sync lock is removed, we'll need to handle
the case when a transaction is committed while mailbox is being
synced ([synced transactions][new transaction][ext transaction]).
this means int_offset contains [synced] and ext_offset contains
all */
&tdata)) > 0) {
&prev_seq, &prev_offset);
/* this has been synced already. we're here only to call
expunge handlers and extension update handlers. */
continue;
continue;
}
/* we'll just skip over broken entries */
}
if (had_dirty)
#ifdef DEBUG
#endif
/* transaction log tracks internally the current tail offset.
besides using header updates, it also updates the offset to skip
over following external transactions to avoid extra unneeded log
reading. */
if (!MAIL_INDEX_MAP_IS_IN_MEMORY(map)) {
}
if (sync_map_ctx.errors) {
/* avoid the same syncing errors the next time */
}
/* restore refcount before closing the view. this is necessary also
if map got cloned, because view closing would otherwise destroy it */
if (mail_index_map_check_header(map) <= 0) {
"Synchronization corrupted index header: %s",
(void)mail_index_fsck(index);
}
}