mail-index-sync-update.c revision 89b7d6ce9266288c156e3513f5798680f1e33572
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina/* Copyright (c) 2004-2014 Dovecot authors, see the included COPYING file */
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina#include "lib.h"
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina#include "ioloop.h"
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina#include "array.h"
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina#include "mmap-util.h"
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina#include "mail-index-modseq.h"
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina#include "mail-index-view-private.h"
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina#include "mail-index-sync-private.h"
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina#include "mail-transaction-log.h"
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina#include "mail-transaction-log-private.h"
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina/* If we have less than this many bytes to sync from log file, don't bother
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina reading the main index */
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina#define MAIL_INDEX_SYNC_MIN_READ_INDEX_SIZE 2048
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březinastatic void
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březinamail_index_sync_update_log_offset(struct mail_index_sync_map_ctx *ctx,
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina struct mail_index_map *map, bool eol)
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina{
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina uint32_t prev_seq;
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina uoff_t prev_offset;
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina mail_transaction_log_view_get_prev_pos(ctx->view->log_view,
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina &prev_seq, &prev_offset);
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina if (prev_seq == 0) {
c747b0c875785ce693f70b50bdda0237c4b04e35Pavel Březina /* handling lost changes in view syncing */
132e477d69e07e02fe6e4d668c0bb6226206474aPavel Březina return;
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina }
beeef7f627a5ed9264de25ee4c76eb9620c1c984Pavel Březina
beeef7f627a5ed9264de25ee4c76eb9620c1c984Pavel Březina if (!eol) {
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina if (prev_offset == ctx->ext_intro_end_offset &&
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina prev_seq == ctx->ext_intro_seq) {
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina /* previous transaction was an extension introduction.
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina we probably came here from
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina mail_index_sync_ext_reset(). if there are any more
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina views which want to continue syncing it needs the
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina intro. so back up a bit more.
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina don't do this in case the last transaction in the
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina log is the extension intro, so we don't keep trying
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina to sync it over and over again. */
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina prev_offset = ctx->ext_intro_offset;
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina }
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina map->hdr.log_file_seq = prev_seq;
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina } else {
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina i_assert(ctx->view->index->log->head->hdr.file_seq == prev_seq);
beeef7f627a5ed9264de25ee4c76eb9620c1c984Pavel Březina if (map->hdr.log_file_seq != prev_seq) {
beeef7f627a5ed9264de25ee4c76eb9620c1c984Pavel Březina map->hdr.log_file_seq = prev_seq;
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina map->hdr.log_file_tail_offset = 0;
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina }
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina }
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina map->hdr.log_file_head_offset = prev_offset;
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březina}
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březina
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březinastatic void mail_index_sync_replace_map(struct mail_index_sync_map_ctx *ctx,
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březina struct mail_index_map *map)
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina{
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina struct mail_index_view *view = ctx->view;
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březina
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina i_assert(view->map != map);
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina
beeef7f627a5ed9264de25ee4c76eb9620c1c984Pavel Březina mail_index_sync_update_log_offset(ctx, view->map, FALSE);
beeef7f627a5ed9264de25ee4c76eb9620c1c984Pavel Březina mail_index_unmap(&view->map);
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březina view->map = map;
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březina
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březina if (ctx->type != MAIL_INDEX_SYNC_HANDLER_VIEW)
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březina view->index->map = map;
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březina
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březina mail_index_modseq_sync_map_replaced(ctx->modseq_ctx);
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březina}
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březina
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březinastatic struct mail_index_map *
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březinamail_index_sync_move_to_private_memory(struct mail_index_sync_map_ctx *ctx)
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březina{
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březina struct mail_index_map *map = ctx->view->map;
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březina
4e5d19f659d8c545c4ed3c307c95cfe4f2ca33cbPavel Březina if (map->refcount > 1) {
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina map = mail_index_map_clone(map);
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina mail_index_sync_replace_map(ctx, map);
9e9ad4cb181c6c0ec70caacfb31319753f889e98Pavel Březina }
9e9ad4cb181c6c0ec70caacfb31319753f889e98Pavel Březina
9e9ad4cb181c6c0ec70caacfb31319753f889e98Pavel Březina if (!MAIL_INDEX_MAP_IS_IN_MEMORY(ctx->view->map))
9e9ad4cb181c6c0ec70caacfb31319753f889e98Pavel Březina mail_index_map_move_to_memory(ctx->view->map);
9e9ad4cb181c6c0ec70caacfb31319753f889e98Pavel Březina mail_index_modseq_sync_map_replaced(ctx->modseq_ctx);
9e9ad4cb181c6c0ec70caacfb31319753f889e98Pavel Březina return map;
c747b0c875785ce693f70b50bdda0237c4b04e35Pavel Březina}
c747b0c875785ce693f70b50bdda0237c4b04e35Pavel Březina
c747b0c875785ce693f70b50bdda0237c4b04e35Pavel Březinastruct mail_index_map *
c747b0c875785ce693f70b50bdda0237c4b04e35Pavel Březinamail_index_sync_get_atomic_map(struct mail_index_sync_map_ctx *ctx)
827a016a07d5f911cc4195be89896a376fd71f59Sumit Bose{
c747b0c875785ce693f70b50bdda0237c4b04e35Pavel Březina (void)mail_index_sync_move_to_private_memory(ctx);
c747b0c875785ce693f70b50bdda0237c4b04e35Pavel Březina mail_index_record_map_move_to_private(ctx->view->map);
c747b0c875785ce693f70b50bdda0237c4b04e35Pavel Březina mail_index_modseq_sync_map_replaced(ctx->modseq_ctx);
c747b0c875785ce693f70b50bdda0237c4b04e35Pavel Březina return ctx->view->map;
a1e4113a5388e34c08459c5b69679c82ac2bddc9Pavel Březina}
a1e4113a5388e34c08459c5b69679c82ac2bddc9Pavel Březina
a1e4113a5388e34c08459c5b69679c82ac2bddc9Pavel Březinastatic int
a1e4113a5388e34c08459c5b69679c82ac2bddc9Pavel Březinamail_index_header_update_counts(struct mail_index_header *hdr,
a1e4113a5388e34c08459c5b69679c82ac2bddc9Pavel Březina uint8_t old_flags, uint8_t new_flags,
a1e4113a5388e34c08459c5b69679c82ac2bddc9Pavel Březina const char **error_r)
a1e4113a5388e34c08459c5b69679c82ac2bddc9Pavel Březina{
a1e4113a5388e34c08459c5b69679c82ac2bddc9Pavel Březina if (((old_flags ^ new_flags) & MAIL_SEEN) != 0) {
a1e4113a5388e34c08459c5b69679c82ac2bddc9Pavel Březina /* different seen-flag */
a1e4113a5388e34c08459c5b69679c82ac2bddc9Pavel Březina if ((old_flags & MAIL_SEEN) != 0) {
a1e4113a5388e34c08459c5b69679c82ac2bddc9Pavel Březina if (hdr->seen_messages_count == 0) {
a1e4113a5388e34c08459c5b69679c82ac2bddc9Pavel Březina *error_r = "Seen counter wrong";
a1e4113a5388e34c08459c5b69679c82ac2bddc9Pavel Březina return -1;
132e477d69e07e02fe6e4d668c0bb6226206474aPavel Březina }
132e477d69e07e02fe6e4d668c0bb6226206474aPavel Březina hdr->seen_messages_count--;
132e477d69e07e02fe6e4d668c0bb6226206474aPavel Březina } else {
132e477d69e07e02fe6e4d668c0bb6226206474aPavel Březina if (hdr->seen_messages_count >= hdr->messages_count) {
132e477d69e07e02fe6e4d668c0bb6226206474aPavel Březina *error_r = "Seen counter wrong";
132e477d69e07e02fe6e4d668c0bb6226206474aPavel Březina return -1;
132e477d69e07e02fe6e4d668c0bb6226206474aPavel Březina }
132e477d69e07e02fe6e4d668c0bb6226206474aPavel Březina
8fe171bf5a7a570591418e6548105f1d5a0097b3Pavel Březina if (++hdr->seen_messages_count == hdr->messages_count)
8fe171bf5a7a570591418e6548105f1d5a0097b3Pavel Březina hdr->first_unseen_uid_lowwater = hdr->next_uid;
8fe171bf5a7a570591418e6548105f1d5a0097b3Pavel Březina }
8fe171bf5a7a570591418e6548105f1d5a0097b3Pavel Březina }
8fe171bf5a7a570591418e6548105f1d5a0097b3Pavel Březina
8fe171bf5a7a570591418e6548105f1d5a0097b3Pavel Březina if (((old_flags ^ new_flags) & MAIL_DELETED) != 0) {
8fe171bf5a7a570591418e6548105f1d5a0097b3Pavel Březina /* different deleted-flag */
8fe171bf5a7a570591418e6548105f1d5a0097b3Pavel Březina if ((old_flags & MAIL_DELETED) == 0) {
8fe171bf5a7a570591418e6548105f1d5a0097b3Pavel Březina hdr->deleted_messages_count++;
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina if (hdr->deleted_messages_count > hdr->messages_count) {
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina *error_r = "Deleted counter wrong";
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina return -1;
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina }
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina } else {
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina if (hdr->deleted_messages_count == 0 ||
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina hdr->deleted_messages_count > hdr->messages_count) {
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina *error_r = "Deleted counter wrong";
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina return -1;
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina }
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina if (--hdr->deleted_messages_count == 0)
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina hdr->first_deleted_uid_lowwater = hdr->next_uid;
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina }
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina }
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina return 0;
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina}
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březinastatic void
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březinamail_index_sync_header_update_counts_all(struct mail_index_sync_map_ctx *ctx,
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina uint32_t uid,
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina uint8_t old_flags, uint8_t new_flags)
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina{
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina struct mail_index_map *const *maps;
397bc52dd09a8c032abc7ea47a6d81dba5957464Pavel Březina const char *error;
62ebed8582285bd24efba92b9a06366511507946Pavel Březina unsigned int i, count;
b420aae3becdbf501deb2637e2a06636bd6ce1fePavel Březina
62ebed8582285bd24efba92b9a06366511507946Pavel Březina maps = array_get(&ctx->view->map->rec_map->maps, &count);
9e9ad4cb181c6c0ec70caacfb31319753f889e98Pavel Březina for (i = 0; i < count; i++) {
62ebed8582285bd24efba92b9a06366511507946Pavel Březina if (uid >= maps[i]->hdr.next_uid)
c747b0c875785ce693f70b50bdda0237c4b04e35Pavel Březina continue;
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina
a1e4113a5388e34c08459c5b69679c82ac2bddc9Pavel Březina if (mail_index_header_update_counts(&maps[i]->hdr,
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina old_flags, new_flags,
132e477d69e07e02fe6e4d668c0bb6226206474aPavel Březina &error) < 0)
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina mail_index_sync_set_corrupted(ctx, "%s", error);
8fe171bf5a7a570591418e6548105f1d5a0097b3Pavel Březina }
d3c82d0170d6d7407549afdadd08aa7e11aeb9a2Pavel Březina}
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březinastatic void
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březinamail_index_sync_header_update_counts(struct mail_index_sync_map_ctx *ctx,
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina uint32_t uid, uint8_t old_flags,
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina uint8_t new_flags)
397bc52dd09a8c032abc7ea47a6d81dba5957464Pavel Březina{
772199031f0ec687fa1fefd939206858c440e5a1Pavel Březina const char *error;
if (uid >= ctx->view->map->hdr.next_uid) {
mail_index_sync_set_corrupted(ctx, "uid %u >= next_uid %u",
uid, ctx->view->map->hdr.next_uid);
} else {
if (mail_index_header_update_counts(&ctx->view->map->hdr,
old_flags, new_flags,
&error) < 0)
mail_index_sync_set_corrupted(ctx, "%s", error);
}
}
static void
mail_index_header_update_lowwaters(struct mail_index_sync_map_ctx *ctx,
uint32_t uid, enum mail_flags flags)
{
struct mail_index_map *const *maps;
unsigned int i, count;
maps = array_get(&ctx->view->map->rec_map->maps, &count);
for (i = 0; i < count; i++) {
if ((flags & MAIL_SEEN) == 0 &&
uid < maps[i]->hdr.first_unseen_uid_lowwater)
maps[i]->hdr.first_unseen_uid_lowwater = uid;
if ((flags & MAIL_DELETED) != 0 &&
uid < maps[i]->hdr.first_deleted_uid_lowwater)
maps[i]->hdr.first_deleted_uid_lowwater = uid;
}
}
static void
sync_expunge_call_handlers(struct mail_index_sync_map_ctx *ctx,
uint32_t seq1, uint32_t seq2)
{
const struct mail_index_expunge_handler *eh;
struct mail_index_record *rec;
uint32_t seq;
array_foreach(&ctx->expunge_handlers, eh) {
for (seq = seq1; seq <= seq2; seq++) {
rec = MAIL_INDEX_MAP_IDX(ctx->view->map, seq-1);
/* FIXME: does expunge handler's return value matter?
we probably shouldn't disallow expunges if the
handler returns failure.. should it be just changed
to return void? */
(void)eh->handler(ctx, seq,
PTR_OFFSET(rec, eh->record_offset),
eh->sync_context, eh->context);
}
}
}
static bool
sync_expunge_handlers_init(struct mail_index_sync_map_ctx *ctx)
{
/* call expunge handlers only when syncing index file */
if (ctx->type != MAIL_INDEX_SYNC_HANDLER_FILE)
return FALSE;
if (!ctx->expunge_handlers_set)
mail_index_sync_init_expunge_handlers(ctx);
if (!array_is_created(&ctx->expunge_handlers))
return FALSE;
return TRUE;
}
static void
sync_expunge_range(struct mail_index_sync_map_ctx *ctx, const ARRAY_TYPE(seq_range) *seqs)
{
const struct seq_range *range;
unsigned int i, count;
/* call the expunge handlers first */
range = array_get(seqs, &count);
i_assert(count > 0);
if (sync_expunge_handlers_init(ctx)) {
for (i = 0; i < count; i++) {
sync_expunge_call_handlers(ctx,
range[i].seq1, range[i].seq2);
}
}
/* do this in reverse so the memmove()s are smaller */
for (i = count; i > 0; i--) {
uint32_t seq1 = range[i-1].seq1;
uint32_t seq2 = range[i-1].seq2;
struct mail_index_map *map;
struct mail_index_record *rec;
uint32_t seq_count, seq;
map = mail_index_sync_get_atomic_map(ctx);
for (seq = seq1; seq <= seq2; seq++) {
rec = MAIL_INDEX_MAP_IDX(map, seq-1);
mail_index_sync_header_update_counts(ctx, rec->uid, rec->flags, 0);
}
/* @UNSAFE */
memmove(MAIL_INDEX_MAP_IDX(map, seq1-1),
MAIL_INDEX_MAP_IDX(map, seq2),
(map->rec_map->records_count - seq2) * map->hdr.record_size);
seq_count = seq2 - seq1 + 1;
map->rec_map->records_count -= seq_count;
map->hdr.messages_count -= seq_count;
mail_index_modseq_expunge(ctx->modseq_ctx, seq1, seq2);
}
}
static void *sync_append_record(struct mail_index_map *map)
{
size_t append_pos;
void *ret;
append_pos = map->rec_map->records_count * map->hdr.record_size;
ret = buffer_get_space_unsafe(map->rec_map->buffer, append_pos,
map->hdr.record_size);
map->rec_map->records =
buffer_get_modifiable_data(map->rec_map->buffer, NULL);
return ret;
}
static bool sync_update_ignored_change(struct mail_index_sync_map_ctx *ctx)
{
struct mail_index_transaction_commit_result *result =
ctx->view->index->sync_commit_result;
uint32_t prev_log_seq;
uoff_t prev_log_offset, trans_start_offset, trans_end_offset;
if (result == NULL)
return FALSE;
/* we'll return TRUE if this modseq change was written within the
transaction that was just committed */
mail_transaction_log_view_get_prev_pos(ctx->view->log_view,
&prev_log_seq, &prev_log_offset);
if (prev_log_seq != result->log_file_seq)
return FALSE;
trans_end_offset = result->log_file_offset;
trans_start_offset = trans_end_offset - result->commit_size;
if (prev_log_offset < trans_start_offset ||
prev_log_offset >= trans_end_offset)
return FALSE;
return TRUE;
}
static int
sync_modseq_update(struct mail_index_sync_map_ctx *ctx,
const struct mail_transaction_modseq_update *u,
unsigned int size)
{
struct mail_index_view *view = ctx->view;
const struct mail_transaction_modseq_update *end;
uint32_t seq;
uint64_t min_modseq, highest_modseq = 0;
int ret;
end = CONST_PTR_OFFSET(u, size);
for (; u < end; u++) {
if (u->uid == 0)
seq = 0;
else if (!mail_index_lookup_seq(view, u->uid, &seq))
continue;
min_modseq = ((uint64_t)u->modseq_high32 << 32) |
u->modseq_low32;
if (highest_modseq < min_modseq)
highest_modseq = min_modseq;
ret = seq == 0 ? 1 :
mail_index_modseq_set(view, seq, min_modseq);
if (ret < 0) {
mail_index_sync_set_corrupted(ctx,
"modseqs updated before they were enabled");
return -1;
}
if (ret == 0 && sync_update_ignored_change(ctx))
view->index->sync_commit_result->ignored_modseq_changes++;
}
mail_index_modseq_update_highest(ctx->modseq_ctx, highest_modseq);
return 1;
}
static int sync_append(const struct mail_index_record *rec,
struct mail_index_sync_map_ctx *ctx)
{
struct mail_index_view *view = ctx->view;
struct mail_index_map *map = view->map;
const struct mail_index_record *old_rec;
enum mail_flags new_flags;
void *dest;
if (rec->uid < map->hdr.next_uid) {
mail_index_sync_set_corrupted(ctx,
"Append with UID %u, but next_uid = %u",
rec->uid, map->hdr.next_uid);
return -1;
}
/* move to memory. the mapping is written when unlocking so we don't
waste time re-mmap()ing multiple times or waste space growing index
file too large */
map = mail_index_sync_move_to_private_memory(ctx);
if (rec->uid <= map->rec_map->last_appended_uid) {
i_assert(map->hdr.messages_count < map->rec_map->records_count);
/* the flags may have changed since it was added to map.
use the updated flags already, so flag counters won't get
broken. */
old_rec = MAIL_INDEX_MAP_IDX(map, map->hdr.messages_count);
i_assert(old_rec->uid == rec->uid);
new_flags = old_rec->flags;
} else {
/* don't rely on buffer->used being at the correct position.
at least expunges can move it */
dest = sync_append_record(map);
memcpy(dest, rec, sizeof(*rec));
memset(PTR_OFFSET(dest, sizeof(*rec)), 0,
map->hdr.record_size - sizeof(*rec));
map->rec_map->records_count++;
map->rec_map->last_appended_uid = rec->uid;
new_flags = rec->flags;
mail_index_modseq_append(ctx->modseq_ctx,
map->rec_map->records_count);
}
map->hdr.messages_count++;
map->hdr.next_uid = rec->uid+1;
if ((new_flags & MAIL_INDEX_MAIL_FLAG_DIRTY) != 0)
map->hdr.flags |= MAIL_INDEX_HDR_FLAG_HAVE_DIRTY;
mail_index_header_update_lowwaters(ctx, rec->uid, new_flags);
mail_index_sync_header_update_counts(ctx, rec->uid, 0, new_flags);
return 1;
}
static int sync_flag_update(const struct mail_transaction_flag_update *u,
struct mail_index_sync_map_ctx *ctx)
{
struct mail_index_view *view = ctx->view;
struct mail_index_record *rec;
uint8_t flag_mask, old_flags;
uint32_t idx, seq1, seq2;
if (!mail_index_lookup_seq_range(view, u->uid1, u->uid2, &seq1, &seq2))
return 1;
if (!MAIL_TRANSACTION_FLAG_UPDATE_IS_INTERNAL(u)) {
mail_index_modseq_update_flags(ctx->modseq_ctx,
u->add_flags | u->remove_flags,
seq1, seq2);
}
if ((u->add_flags & MAIL_INDEX_MAIL_FLAG_DIRTY) != 0)
view->map->hdr.flags |= MAIL_INDEX_HDR_FLAG_HAVE_DIRTY;
flag_mask = ~u->remove_flags;
if (((u->add_flags | u->remove_flags) &
(MAIL_SEEN | MAIL_DELETED)) == 0) {
/* we're not modifying any counted/lowwatered flags */
for (idx = seq1-1; idx < seq2; idx++) {
rec = MAIL_INDEX_MAP_IDX(view->map, idx);
rec->flags = (rec->flags & flag_mask) | u->add_flags;
}
} else {
for (idx = seq1-1; idx < seq2; idx++) {
rec = MAIL_INDEX_MAP_IDX(view->map, idx);
old_flags = rec->flags;
rec->flags = (rec->flags & flag_mask) | u->add_flags;
mail_index_header_update_lowwaters(ctx, rec->uid,
rec->flags);
mail_index_sync_header_update_counts_all(ctx, rec->uid,
old_flags,
rec->flags);
}
}
return 1;
}
static int sync_header_update(const struct mail_transaction_header_update *u,
struct mail_index_sync_map_ctx *ctx)
{
#define MAIL_INDEX_HEADER_UPDATE_FIELD_IN_RANGE(u, field) \
((u)->offset <= offsetof(struct mail_index_header, field) && \
(u)->offset + (u)->size > offsetof(struct mail_index_header, field))
struct mail_index_map *map = ctx->view->map;
uint32_t orig_log_file_tail_offset = map->hdr.log_file_tail_offset;
uint32_t orig_next_uid = map->hdr.next_uid;
if (u->offset >= map->hdr.base_header_size ||
u->offset + u->size > map->hdr.base_header_size) {
mail_index_sync_set_corrupted(ctx,
"Header update outside range: %u + %u > %u",
u->offset, u->size, map->hdr.base_header_size);
return -1;
}
buffer_write(map->hdr_copy_buf, u->offset, u + 1, u->size);
map->hdr_base = map->hdr_copy_buf->data;
/* @UNSAFE */
if ((uint32_t)(u->offset + u->size) <= sizeof(map->hdr)) {
memcpy(PTR_OFFSET(&map->hdr, u->offset),
u + 1, u->size);
} else if (u->offset < sizeof(map->hdr)) {
memcpy(PTR_OFFSET(&map->hdr, u->offset),
u + 1, sizeof(map->hdr) - u->offset);
}
if (map->hdr.next_uid < orig_next_uid) {
/* next_uid update tried to shrink its value. this can happen
in some race conditions with e.g. with dsync, so just
silently ignore it. */
map->hdr.next_uid = orig_next_uid;
}
/* the tail offset updates are intended for internal transaction
log handling. we'll update the offset in the header only when
the sync is finished. */
map->hdr.log_file_tail_offset = orig_log_file_tail_offset;
return 1;
}
static int
mail_index_sync_record_real(struct mail_index_sync_map_ctx *ctx,
const struct mail_transaction_header *hdr,
const void *data)
{
uint64_t modseq;
int ret = 0;
switch (hdr->type & MAIL_TRANSACTION_TYPE_MASK) {
case MAIL_TRANSACTION_APPEND: {
const struct mail_index_record *rec, *end;
end = CONST_PTR_OFFSET(data, hdr->size);
for (rec = data; rec < end; rec++) {
ret = sync_append(rec, ctx);
if (ret <= 0)
break;
}
break;
}
case MAIL_TRANSACTION_EXPUNGE:
case MAIL_TRANSACTION_EXPUNGE|MAIL_TRANSACTION_EXPUNGE_PROT: {
const struct mail_transaction_expunge *rec = data, *end;
ARRAY_TYPE(seq_range) seqs;
uint32_t seq1, seq2;
if ((hdr->type & MAIL_TRANSACTION_EXTERNAL) == 0) {
/* this is simply a request for expunge */
break;
}
t_array_init(&seqs, 64);
end = CONST_PTR_OFFSET(data, hdr->size);
for (; rec != end; rec++) {
if (mail_index_lookup_seq_range(ctx->view,
rec->uid1, rec->uid2, &seq1, &seq2))
seq_range_array_add_range(&seqs, seq1, seq2);
}
sync_expunge_range(ctx, &seqs);
break;
}
case MAIL_TRANSACTION_EXPUNGE_GUID:
case MAIL_TRANSACTION_EXPUNGE_GUID|MAIL_TRANSACTION_EXPUNGE_PROT: {
const struct mail_transaction_expunge_guid *rec = data, *end;
ARRAY_TYPE(seq_range) seqs;
uint32_t seq;
if ((hdr->type & MAIL_TRANSACTION_EXTERNAL) == 0) {
/* this is simply a request for expunge */
break;
}
t_array_init(&seqs, 64);
end = CONST_PTR_OFFSET(data, hdr->size);
for (; rec != end; rec++) {
i_assert(rec->uid != 0);
if (mail_index_lookup_seq(ctx->view, rec->uid, &seq))
seq_range_array_add(&seqs, seq);
}
sync_expunge_range(ctx, &seqs);
break;
}
case MAIL_TRANSACTION_FLAG_UPDATE: {
const struct mail_transaction_flag_update *rec, *end;
end = CONST_PTR_OFFSET(data, hdr->size);
for (rec = data; rec < end; rec++) {
ret = sync_flag_update(rec, ctx);
if (ret <= 0)
break;
}
break;
}
case MAIL_TRANSACTION_HEADER_UPDATE: {
const struct mail_transaction_header_update *rec;
unsigned int i;
for (i = 0; i < hdr->size; ) {
rec = CONST_PTR_OFFSET(data, i);
ret = sync_header_update(rec, ctx);
if (ret <= 0)
break;
i += sizeof(*rec) + rec->size;
if ((i % 4) != 0)
i += 4 - (i % 4);
}
break;
}
case MAIL_TRANSACTION_EXT_INTRO: {
const struct mail_transaction_ext_intro *rec = data;
unsigned int i;
uint32_t prev_seq;
uoff_t prev_offset;
mail_transaction_log_view_get_prev_pos(ctx->view->log_view,
&prev_seq, &prev_offset);
ctx->ext_intro_seq = prev_seq;
ctx->ext_intro_offset = prev_offset;
ctx->ext_intro_end_offset =
prev_offset + hdr->size + sizeof(*hdr);
for (i = 0; i < hdr->size; ) {
if (i + sizeof(*rec) > hdr->size) {
/* should be just extra padding */
break;
}
rec = CONST_PTR_OFFSET(data, i);
/* name_size checked by _log_view_next() */
i_assert(i + sizeof(*rec) + rec->name_size <= hdr->size);
ret = mail_index_sync_ext_intro(ctx, rec);
if (ret <= 0)
break;
i += sizeof(*rec) + rec->name_size;
if ((i % 4) != 0)
i += 4 - (i % 4);
}
break;
}
case MAIL_TRANSACTION_EXT_RESET: {
struct mail_transaction_ext_reset rec;
/* old versions have only new_reset_id */
if (hdr->size < sizeof(uint32_t)) {
mail_index_sync_set_corrupted(ctx,
"ext reset: invalid record size");
ret = -1;
break;
}
memset(&rec, 0, sizeof(rec));
memcpy(&rec, data, I_MIN(hdr->size, sizeof(rec)));
ret = mail_index_sync_ext_reset(ctx, &rec);
break;
}
case MAIL_TRANSACTION_EXT_HDR_UPDATE: {
const struct mail_transaction_ext_hdr_update *rec;
unsigned int i;
for (i = 0; i < hdr->size; ) {
rec = CONST_PTR_OFFSET(data, i);
if (i + sizeof(*rec) > hdr->size ||
i + sizeof(*rec) + rec->size > hdr->size) {
mail_index_sync_set_corrupted(ctx,
"ext hdr update: invalid record size");
ret = -1;
break;
}
ret = mail_index_sync_ext_hdr_update(ctx, rec->offset,
rec->size, rec + 1);
if (ret <= 0)
break;
i += sizeof(*rec) + rec->size;
if ((i % 4) != 0)
i += 4 - (i % 4);
}
break;
}
case MAIL_TRANSACTION_EXT_HDR_UPDATE32: {
const struct mail_transaction_ext_hdr_update32 *rec;
unsigned int i;
for (i = 0; i < hdr->size; ) {
rec = CONST_PTR_OFFSET(data, i);
if (i + sizeof(*rec) > hdr->size ||
i + sizeof(*rec) + rec->size > hdr->size) {
mail_index_sync_set_corrupted(ctx,
"ext hdr update: invalid record size");
ret = -1;
break;
}
ret = mail_index_sync_ext_hdr_update(ctx, rec->offset,
rec->size, rec + 1);
if (ret <= 0)
break;
i += sizeof(*rec) + rec->size;
if ((i % 4) != 0)
i += 4 - (i % 4);
}
break;
}
case MAIL_TRANSACTION_EXT_REC_UPDATE: {
const struct mail_transaction_ext_rec_update *rec;
const struct mail_index_ext *ext;
unsigned int i, record_size;
if (ctx->cur_ext_map_idx == (uint32_t)-1) {
mail_index_sync_set_corrupted(ctx,
"Extension record updated "
"without intro prefix");
ret = -1;
break;
}
if (ctx->cur_ext_ignore) {
ret = 1;
break;
}
ext = array_idx(&ctx->view->map->extensions,
ctx->cur_ext_map_idx);
/* the record is padded to 32bits in the transaction log */
record_size = (sizeof(*rec) + ext->record_size + 3) & ~3;
for (i = 0; i < hdr->size; i += record_size) {
rec = CONST_PTR_OFFSET(data, i);
if (i + record_size > hdr->size) {
mail_index_sync_set_corrupted(ctx,
"ext rec update: invalid record size");
ret = -1;
break;
}
ret = mail_index_sync_ext_rec_update(ctx, rec);
if (ret <= 0)
break;
}
break;
}
case MAIL_TRANSACTION_EXT_ATOMIC_INC: {
const struct mail_transaction_ext_atomic_inc *rec, *end;
if (ctx->cur_ext_map_idx == (uint32_t)-1) {
mail_index_sync_set_corrupted(ctx,
"Extension record updated "
"without intro prefix");
ret = -1;
break;
}
if (ctx->cur_ext_ignore) {
ret = 1;
break;
}
end = CONST_PTR_OFFSET(data, hdr->size);
for (rec = data; rec < end; rec++) {
ret = mail_index_sync_ext_atomic_inc(ctx, rec);
if (ret <= 0)
break;
}
break;
}
case MAIL_TRANSACTION_KEYWORD_UPDATE: {
const struct mail_transaction_keyword_update *rec = data;
ret = mail_index_sync_keywords(ctx, hdr, rec);
break;
}
case MAIL_TRANSACTION_KEYWORD_RESET: {
const struct mail_transaction_keyword_reset *rec = data;
ret = mail_index_sync_keywords_reset(ctx, hdr, rec);
break;
}
case MAIL_TRANSACTION_MODSEQ_UPDATE: {
const struct mail_transaction_modseq_update *rec = data;
ret = sync_modseq_update(ctx, rec, hdr->size);
break;
}
case MAIL_TRANSACTION_INDEX_DELETED:
if ((hdr->type & MAIL_TRANSACTION_EXTERNAL) == 0) {
/* next sync finishes the deletion */
ctx->view->index->index_delete_requested = TRUE;
} else {
/* transaction log reading handles this */
}
break;
case MAIL_TRANSACTION_INDEX_UNDELETED:
ctx->view->index->index_delete_requested = FALSE;
break;
case MAIL_TRANSACTION_BOUNDARY:
break;
case MAIL_TRANSACTION_ATTRIBUTE_UPDATE:
modseq = mail_transaction_log_view_get_prev_modseq(ctx->view->log_view);
mail_index_modseq_update_highest(ctx->modseq_ctx, modseq);
break;
default:
mail_index_sync_set_corrupted(ctx,
"Unknown transaction record type 0x%x",
(hdr->type & MAIL_TRANSACTION_TYPE_MASK));
ret = -1;
break;
}
return ret;
}
int mail_index_sync_record(struct mail_index_sync_map_ctx *ctx,
const struct mail_transaction_header *hdr,
const void *data)
{
int ret;
T_BEGIN {
ret = mail_index_sync_record_real(ctx, hdr, data);
} T_END;
return ret;
}
void mail_index_sync_map_init(struct mail_index_sync_map_ctx *sync_map_ctx,
struct mail_index_view *view,
enum mail_index_sync_handler_type type)
{
memset(sync_map_ctx, 0, sizeof(*sync_map_ctx));
sync_map_ctx->view = view;
sync_map_ctx->cur_ext_map_idx = (uint32_t)-1;
sync_map_ctx->type = type;
sync_map_ctx->modseq_ctx = mail_index_modseq_sync_begin(sync_map_ctx);
mail_index_sync_init_handlers(sync_map_ctx);
}
void mail_index_sync_map_deinit(struct mail_index_sync_map_ctx *sync_map_ctx)
{
i_assert(sync_map_ctx->modseq_ctx == NULL);
if (sync_map_ctx->unknown_extensions != NULL)
buffer_free(&sync_map_ctx->unknown_extensions);
if (sync_map_ctx->expunge_handlers_used)
mail_index_sync_deinit_expunge_handlers(sync_map_ctx);
mail_index_sync_deinit_handlers(sync_map_ctx);
}
static void mail_index_sync_update_hdr_dirty_flag(struct mail_index_map *map)
{
const struct mail_index_record *rec;
unsigned int i;
if ((map->hdr.flags & MAIL_INDEX_HDR_FLAG_HAVE_DIRTY) != 0)
return;
/* do we have dirty flags anymore? */
for (i = 0; i < map->rec_map->records_count; i++) {
rec = MAIL_INDEX_MAP_IDX(map, i);
if ((rec->flags & MAIL_INDEX_MAIL_FLAG_DIRTY) != 0) {
map->hdr.flags |= MAIL_INDEX_HDR_FLAG_HAVE_DIRTY;
break;
}
}
}
#ifdef DEBUG
void mail_index_map_check(struct mail_index_map *map)
{
const struct mail_index_header *hdr = &map->hdr;
unsigned int i, del = 0, seen = 0;
uint32_t prev_uid = 0;
i_assert(hdr->messages_count <= map->rec_map->records_count);
for (i = 0; i < hdr->messages_count; i++) {
const struct mail_index_record *rec;
rec = MAIL_INDEX_MAP_IDX(map, i);
i_assert(rec->uid > prev_uid);
prev_uid = rec->uid;
if (rec->flags & MAIL_DELETED) {
i_assert(rec->uid >= hdr->first_deleted_uid_lowwater);
del++;
}
if (rec->flags & MAIL_SEEN)
seen++;
else
i_assert(rec->uid >= hdr->first_unseen_uid_lowwater);
}
i_assert(del == hdr->deleted_messages_count);
i_assert(seen == hdr->seen_messages_count);
}
#endif
int mail_index_sync_map(struct mail_index_map **_map,
enum mail_index_sync_handler_type type, bool force)
{
struct mail_index_map *map = *_map;
struct mail_index *index = map->index;
struct mail_index_view *view;
struct mail_index_sync_map_ctx sync_map_ctx;
const struct mail_transaction_header *thdr;
const void *tdata;
uint32_t prev_seq;
uoff_t start_offset, prev_offset;
int ret;
bool had_dirty, reset;
i_assert(index->map == map || type == MAIL_INDEX_SYNC_HANDLER_VIEW);
if (index->log->head == NULL) {
i_assert(!force);
return 0;
}
start_offset = type == MAIL_INDEX_SYNC_HANDLER_FILE ?
map->hdr.log_file_tail_offset : map->hdr.log_file_head_offset;
if (!force && (index->flags & MAIL_INDEX_OPEN_FLAG_MMAP_DISABLE) == 0) {
/* see if we'd prefer to reopen the index file instead of
syncing the current map from the transaction log.
don't check this if mmap is disabled, because reopening
index causes sync to get lost. */
uoff_t log_size, index_size;
if (index->fd == -1 &&
index->log->head->hdr.prev_file_seq != 0) {
/* we don't know the index's size, so use the
smallest index size we're willing to read */
index_size = MAIL_INDEX_SYNC_MIN_READ_INDEX_SIZE;
} else {
index_size = map->hdr.header_size +
map->rec_map->records_count *
map->hdr.record_size;
}
/* this isn't necessary correct currently, but it should be
close enough */
log_size = index->log->head->last_size;
if (log_size > start_offset &&
log_size - start_offset > index_size)
return 0;
}
view = mail_index_view_open_with_map(index, map);
ret = mail_transaction_log_view_set(view->log_view,
map->hdr.log_file_seq, start_offset,
(uint32_t)-1, (uoff_t)-1, &reset);
if (ret <= 0) {
mail_index_view_close(&view);
if (force && ret == 0) {
/* the seq/offset is probably broken */
mail_index_set_error(index, "Index %s: Lost log for "
"seq=%u offset=%"PRIuUOFF_T, index->filepath,
map->hdr.log_file_seq, start_offset);
(void)mail_index_fsck(index);
}
/* can't use it. sync by re-reading index. */
return 0;
}
mail_transaction_log_get_head(index->log, &prev_seq, &prev_offset);
if (prev_seq != map->hdr.log_file_seq ||
prev_offset - map->hdr.log_file_tail_offset >
MAIL_INDEX_MIN_WRITE_BYTES) {
/* we're reading more from log than we would have preferred.
remember that we probably want to rewrite index soon. */
index->index_min_write = TRUE;
}
/* view referenced the map. avoid unnecessary map cloning by
unreferencing the map while view exists. */
map->refcount--;
had_dirty = (map->hdr.flags & MAIL_INDEX_HDR_FLAG_HAVE_DIRTY) != 0;
if (had_dirty)
map->hdr.flags &= ~MAIL_INDEX_HDR_FLAG_HAVE_DIRTY;
if (map->hdr_base != map->hdr_copy_buf->data) {
/* if syncing updates the header, it updates hdr_copy_buf
and updates hdr_base to hdr_copy_buf. so the buffer must
initially contain a valid header or we'll break it when
writing it. */
buffer_reset(map->hdr_copy_buf);
buffer_append(map->hdr_copy_buf, map->hdr_base,
map->hdr.header_size);
map->hdr_base = map->hdr_copy_buf->data;
}
mail_transaction_log_view_get_prev_pos(view->log_view,
&prev_seq, &prev_offset);
mail_index_sync_map_init(&sync_map_ctx, view, type);
if (reset) {
/* Reset the entire index. Leave only indexid and
log_file_seq. */
mail_transaction_log_view_get_prev_pos(view->log_view,
&prev_seq, &prev_offset);
map = mail_index_map_alloc(index);
map->hdr.log_file_seq = prev_seq;
map->hdr.log_file_tail_offset = 0;
mail_index_sync_replace_map(&sync_map_ctx, map);
}
map = NULL;
/* FIXME: when transaction sync lock is removed, we'll need to handle
the case when a transaction is committed while mailbox is being
synced ([synced transactions][new transaction][ext transaction]).
this means int_offset contains [synced] and ext_offset contains
all */
while ((ret = mail_transaction_log_view_next(view->log_view, &thdr,
&tdata)) > 0) {
mail_transaction_log_view_get_prev_pos(view->log_view,
&prev_seq, &prev_offset);
if (LOG_IS_BEFORE(prev_seq, prev_offset,
view->map->hdr.log_file_seq,
view->map->hdr.log_file_head_offset)) {
/* this has been synced already. we're here only to call
expunge handlers and extension update handlers. */
i_assert(type == MAIL_INDEX_SYNC_HANDLER_FILE);
if ((thdr->type & MAIL_TRANSACTION_EXTERNAL) != 0)
continue;
if ((thdr->type & MAIL_TRANSACTION_EXT_MASK) == 0)
continue;
}
/* we'll just skip over broken entries */
(void)mail_index_sync_record(&sync_map_ctx, thdr, tdata);
}
map = view->map;
if (had_dirty)
mail_index_sync_update_hdr_dirty_flag(map);
mail_index_modseq_sync_end(&sync_map_ctx.modseq_ctx);
mail_index_sync_update_log_offset(&sync_map_ctx, view->map, TRUE);
#ifdef DEBUG
mail_index_map_check(map);
#endif
i_assert(map->hdr.indexid == index->indexid || map->hdr.indexid == 0);
/* transaction log tracks internally the current tail offset.
besides using header updates, it also updates the offset to skip
over following external transactions to avoid extra unneeded log
reading. */
i_assert(map->hdr.log_file_seq == index->log->head->hdr.file_seq);
if (map->hdr.log_file_tail_offset < index->log->head->max_tail_offset) {
map->hdr.log_file_tail_offset =
index->log->head->max_tail_offset;
}
buffer_write(map->hdr_copy_buf, 0, &map->hdr, sizeof(map->hdr));
if (!MAIL_INDEX_MAP_IS_IN_MEMORY(map)) {
memcpy(map->rec_map->mmap_base, map->hdr_copy_buf->data,
map->hdr_copy_buf->used);
}
/* restore refcount before closing the view. this is necessary also
if map got cloned, because view closing would otherwise destroy it */
map->refcount++;
mail_index_sync_map_deinit(&sync_map_ctx);
mail_index_view_close(&view);
i_assert(index->map == map || type == MAIL_INDEX_SYNC_HANDLER_VIEW);
if (mail_index_map_check_header(map) <= 0) {
mail_index_set_error(index,
"Synchronization corrupted index header: %s",
index->filepath);
(void)mail_index_fsck(index);
map = index->map;
} else if (sync_map_ctx.errors) {
/* make sure the index looks valid now */
(void)mail_index_fsck(index);
map = index->map;
}
*_map = map;
return ret < 0 ? -1 : 1;
}