mail-index-sync.c revision c91de2744f8c1e61e91082ff5e214450f28a0e7c
/* Copyright (C) 2003-2004 Timo Sirainen */
#include "lib.h"
#include "array.h"
#include "write-full.h"
#include "mail-index-view-private.h"
#include "mail-index-sync-private.h"
#include "mail-index-transaction-private.h"
#include "mail-transaction-log-private.h"
#include "mail-transaction-util.h"
#include "mail-cache.h"
#include <stdio.h>
#include <stdlib.h>
struct mail_index_sync_ctx {
struct mail_index *index;
struct mail_index_view *view;
const struct mail_transaction_header *hdr;
const void *data;
unsigned int lock_id;
unsigned int sync_appends:1;
unsigned int sync_recent:1;
unsigned int sync_dirty:1;
};
{
for (i = 0; i < size; i++) {
}
}
{
for (i = 0; i < size; i++) {
if (u[i].add_flags != 0) {
u[i].add_flags);
}
if (u[i].remove_flags != 0) {
u[i].remove_flags);
}
}
}
{
const char *keyword_names[2];
struct mail_keywords *keywords;
uidset_offset = sizeof(*u) + u->name_size;
if ((uidset_offset % 4) != 0)
t_push();
for (i = 0; i < size; i += 2) {
/* FIXME: mail_index_update_keywords_range() */
u->modify_type, keywords);
}
}
t_pop();
}
{
struct mail_keywords *keywords;
for (i = 0; i < size; i++) {
}
}
}
{
}
{
case MAIL_TRANSACTION_EXPUNGE:
break;
break;
break;
break;
case MAIL_TRANSACTION_APPEND:
break;
default:
return FALSE;
}
return TRUE;
}
{
struct mail_transaction_flag_update update;
const struct mail_index_record *rec;
return -1;
continue;
}
return 0;
}
{
const struct mail_index_record *rec;
bool seen_recent = FALSE;
return -1;
seen_recent = TRUE;
}
}
if (!seen_recent) {
/* no recent messages, drop the sync_recent flag so we
don't scan through the message again */
}
return 0;
}
static void
{
}
static int
{
struct mail_index_sync_list *synclist;
const struct mail_index_transaction_keyword_update *keyword_updates;
unsigned int i, keyword_count;
int ret;
ctx->sync_dirty) {
/* show dirty flags as flag updates */
if (mail_index_sync_add_dirty_updates(ctx) < 0)
return -1;
}
if (ctx->sync_recent) {
if (mail_index_sync_add_recent_updates(ctx) < 0)
return -1;
}
/* read all transactions from log into a transaction in memory.
skip the external ones, they're already synced to mailbox and
included in our view */
continue;
}
/* create an array containing all expunge, flag and keyword update
arrays so we can easily go through all of the changes. */
}
}
/* we must return resets before keyword additions or they get lost */
}
for (i = 0; i < keyword_count; i++) {
synclist->keyword_idx = i;
}
(void *)&keyword_updates[i].remove_seq;
synclist->keyword_idx = i;
}
}
return ret;
}
static bool
{
// FIXME: how's this recent syncing supposed to work?
return TRUE;
return TRUE;
/* already synced */
}
static int
{
int ret;
if (ret <= 0) {
/* either corrupted or the file was deleted for
some reason. either way, we can't go forward */
"Unexpected transaction log desync with index %s",
return -1;
}
return 0;
}
struct mail_index_sync_ctx **ctx_r,
struct mail_index_view **view_r,
struct mail_index_transaction **trans_r,
bool sync_recent, bool sync_dirty)
{
const struct mail_index_header *hdr;
struct mail_index_sync_ctx *ctx;
struct mail_index_view *sync_view;
unsigned int lock_id = 0;
return -1;
/* The view must contain what we expect the mailbox to look like
currently. That allows the backend to update external flag
changes (etc.) if the view doesn't match the mailbox.
We'll update the view to contain everything that exist in the
transaction log except for expunges. They're synced in
mail_index_sync_commit(). */
&lock_id) <= 0) {
// FIXME: handle ret=0 specially?
return -1;
}
return 0;
}
/* broken sync positions. fix them. */
"broken sync positions in index file %s",
if (mail_index_fsck(index) <= 0) {
return -1;
}
}
/* we wish to see all the changes from last mailbox sync position to
the end of the transaction log */
hdr->log_file_mailbox_offset) < 0) {
return -1;
}
/* we need to have all the transactions sorted to optimize
caller's mailbox access patterns */
if (mail_index_sync_read_and_sort(ctx) < 0) {
return -1;
}
/* create the transaction after the view has been updated with
external transactions and marked as sync view */
return 1;
}
static void
const struct mail_transaction_expunge *exp)
{
}
static void
const struct mail_transaction_flag_update *update)
{
}
static void
struct mail_index_sync_list *sync_list)
{
}
{
}
struct mail_index_sync_rec *sync_rec)
{
struct mail_index_sync_list *sync_list;
next_i = (unsigned int)-1;
/* FIXME: replace with a priority queue so we don't have to go
through the whole list constantly. and remember to make sure that
keyword resets are sent before adds! */
for (i = 0; i < count; i++) {
continue;
/* use this one. */
break;
}
next_i = i;
}
}
if (i == count) {
if (next_i == (unsigned int)-1) {
/* nothing left in sync_list */
if (ctx->sync_appends) {
return 1;
}
return 0;
}
i = next_i;
}
(const struct mail_transaction_expunge *)uid_range);
(const struct mail_transaction_flag_update *)uid_range);
} else {
&sync_list[i]);
}
return 1;
}
{
const struct mail_index_sync_list *sync_list;
unsigned int i, count;
if (ctx->sync_appends)
return TRUE;
for (i = 0; i < count; i++) {
return TRUE;
}
return FALSE;
}
{
struct mail_index_sync_list *sync_list;
unsigned int i, count;
for (i = 0; i < count; i++)
}
{
}
{
unsigned int base_size;
const char *path;
if (fd == -1)
return -1;
/* write base header */
if (ret == 0) {
/* write extended headers */
}
if (ret == 0) {
}
if (ret < 0)
ret = -1;
}
ret = -1;
}
ret = -1;
}
if (ret < 0) {
path);
}
}
return ret;
}
{
unsigned int base_size;
if (MAIL_INDEX_IS_IN_MEMORY(index))
return 0;
/* write records. */
if (map->write_seq_first != 0) {
(map->write_seq_last -
return -1;
}
/* write base header */
if (map->write_base_header) {
return -1;
}
/* write extended headers */
if (map->write_ext_header) {
base_size) < 0)
return -1;
}
return 0;
}
#define mail_index_map_has_changed(map) \
(map)->write_seq_first != 0)
{
unsigned int lock_id;
if (!mail_index_map_has_changed(map))
return;
/* header size growed. we can't update this file anymore. */
}
/* index file doesn't exist, it's corrupted or we haven't
opened it for some reason */
}
if (!map->write_atomic) {
/* locking failed, rewrite */
}
}
if (map->write_atomic) {
if (!MAIL_INDEX_IS_IN_MEMORY(index)) {
if (mail_index_recreate(index) < 0) {
return;
}
}
} else {
if (mail_index_write_map_over(index) < 0) {
}
}
if (want_rotate &&
}
static void
{
/* This sync may have seen only external transactions, in which case
it's not required to write the mailbox sync offset. Otherwise we
must update the offset even if nothing else is going to be
written. */
}
{
unsigned int lock_id;
bool want_rotate;
int ret = 0;
/* if cache compression fails, we don't really care.
the cache offsets are updated only if the compression was
successful. */
}
return -1;
}
/* refresh the mapping with newly committed external transactions
and the synced expunges. sync using file handler here so that the
expunge handlers get called. */
&lock_id) <= 0) {
// FIXME: handle ret=0 specially?
// FIXME: do we really need to return failure?
ret = -1;
}
/* FIXME: create a better rule? */
return ret;
}
{
}
{
}
{
const unsigned int *keyword_indexes;
unsigned int i, count;
for (i = 0; i < count; i++) {
if (keyword_indexes[i] == idx)
return FALSE;
}
return TRUE;
for (i = 0; i < count; i++) {
if (keyword_indexes[i] == idx) {
return TRUE;
}
}
return FALSE;
if (array_count(keywords) == 0)
return FALSE;
return TRUE;
default:
i_unreached();
return FALSE;
}
}
const char *fmt, ...)
{
const char *error;
t_push();
"%s", error);
} else {
"View synchronization from transaction log "
error);
}
t_pop();
}