mail-index-sync-update.c revision b61a8c8f555a3ef1dd8edd63be04146c76eb66a7
c25356d5978632df6203437e1953bcb29e0c736fTimo Sirainen/* Copyright (C) 2004 Timo Sirainen */
c25356d5978632df6203437e1953bcb29e0c736fTimo Sirainen
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen#include "lib.h"
64e244defe74f513ce94f33d000a048ddbe2ea23Timo Sirainen#include "ioloop.h"
1299f2c3723ca9ccf8f9e563ec23ee1a1721fe4cTimo Sirainen#include "array.h"
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen#include "mmap-util.h"
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen#include "mail-index-view-private.h"
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen#include "mail-index-sync-private.h"
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen#include "mail-transaction-log.h"
73bfdbe28c2ce6d143eadf0bab8ccfbe4cab0faeTimo Sirainen#include "mail-transaction-log-private.h"
73bfdbe28c2ce6d143eadf0bab8ccfbe4cab0faeTimo Sirainen
d8702d15ee7721ed1fcfc8f00a589970bd6b3598Timo Sirainenstatic void
d8702d15ee7721ed1fcfc8f00a589970bd6b3598Timo Sirainenmail_index_sync_update_log_offset(struct mail_index_sync_map_ctx *ctx,
38505846b6d083e19f0a7d1373761bdda5d9a5a9Timo Sirainen struct mail_index_map *map, bool eol)
38505846b6d083e19f0a7d1373761bdda5d9a5a9Timo Sirainen{
38505846b6d083e19f0a7d1373761bdda5d9a5a9Timo Sirainen uint32_t prev_seq;
73bfdbe28c2ce6d143eadf0bab8ccfbe4cab0faeTimo Sirainen uoff_t prev_offset;
73bfdbe28c2ce6d143eadf0bab8ccfbe4cab0faeTimo Sirainen
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen mail_transaction_log_view_get_prev_pos(ctx->view->log_view,
7c849dbc7be089175c1a83a84ee7249ed695810dTimo Sirainen &prev_seq, &prev_offset);
7c849dbc7be089175c1a83a84ee7249ed695810dTimo Sirainen
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen if (!eol) {
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen if (prev_offset == ctx->ext_intro_end_offset &&
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen prev_seq == ctx->ext_intro_seq) {
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen /* previous transaction was an extension introduction.
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen we probably came here from
d1414c09cf0d58ac983054e2f4e1a1f329272dcfTimo Sirainen mail_index_sync_ext_reset(). if there are any more
d1414c09cf0d58ac983054e2f4e1a1f329272dcfTimo Sirainen views which want to continue syncing it needs the
73bfdbe28c2ce6d143eadf0bab8ccfbe4cab0faeTimo Sirainen intro. so back up a bit more.
d1414c09cf0d58ac983054e2f4e1a1f329272dcfTimo Sirainen
d1414c09cf0d58ac983054e2f4e1a1f329272dcfTimo Sirainen don't do this in case the last transaction in the
8eea67470c1bd8562a62e7445d930bb2079b1a43Timo Sirainen log is the extension intro, so we don't keep trying
8eea67470c1bd8562a62e7445d930bb2079b1a43Timo Sirainen to sync it over and over again. */
8eea67470c1bd8562a62e7445d930bb2079b1a43Timo Sirainen prev_offset = ctx->ext_intro_offset;
8eea67470c1bd8562a62e7445d930bb2079b1a43Timo Sirainen }
8eea67470c1bd8562a62e7445d930bb2079b1a43Timo Sirainen } else {
64e244defe74f513ce94f33d000a048ddbe2ea23Timo Sirainen i_assert(ctx->view->index->log->head->hdr.file_seq == prev_seq);
64e244defe74f513ce94f33d000a048ddbe2ea23Timo Sirainen map->hdr.log_file_seq = prev_seq;
87cc5e9025e7fb6408f0de64c48d2d2897773ba5Timo Sirainen }
f016dec9837e6a41867708e4b89ca5308dedab05Timo Sirainen map->hdr.log_file_head_offset = prev_offset;
939451389b8e0ad529277b84fe51dab38a8cf77cTimo Sirainen}
73bfdbe28c2ce6d143eadf0bab8ccfbe4cab0faeTimo Sirainen
64e244defe74f513ce94f33d000a048ddbe2ea23Timo Sirainen#if 0 // FIXME: can we / do we want to support this?
64e244defe74f513ce94f33d000a048ddbe2ea23Timo Sirainenstatic int
9c3577aeb78a27920439ad9f1e62ee03699378c3Timo Sirainenmail_index_map_msync(struct mail_index *index, struct mail_index_map *map)
64e244defe74f513ce94f33d000a048ddbe2ea23Timo Sirainen{
73bfdbe28c2ce6d143eadf0bab8ccfbe4cab0faeTimo Sirainen if (MAIL_INDEX_MAP_IS_IN_MEMORY(map)) {
64e244defe74f513ce94f33d000a048ddbe2ea23Timo Sirainen buffer_write(map->hdr_copy_buf, 0, &map->hdr, sizeof(map->hdr));
64e244defe74f513ce94f33d000a048ddbe2ea23Timo Sirainen return 0;
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen }
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen
73bfdbe28c2ce6d143eadf0bab8ccfbe4cab0faeTimo Sirainen map->mmap_used_size = map->hdr.header_size +
73bfdbe28c2ce6d143eadf0bab8ccfbe4cab0faeTimo Sirainen map->records_count * map->hdr.record_size;
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen memcpy(map->mmap_base, &map->hdr,
6ef7e31619edfaa17ed044b45861d106a86191efTimo Sirainen I_MIN(map->hdr.base_header_size, sizeof(map->hdr)));
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen memcpy(PTR_OFFSET(map->mmap_base, map->hdr.base_header_size),
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen CONST_PTR_OFFSET(map->hdr_base, map->hdr.base_header_size),
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen map->hdr.header_size - map->hdr.base_header_size);
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen if (msync(map->mmap_base, map->mmap_used_size, MS_SYNC) < 0) {
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen mail_index_set_syscall_error(index, "msync()");
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen return -1;
18ddd4fba186b1b407cae98bb388fa8add7db48dTimo Sirainen }
90ed03ab289947f5576d2c616ada27724f50e9cdTimo Sirainen return 0;
9ddd3d7d8651985e373a6c48e0ddc76b8a4ef1c7Timo Sirainen}
6ef7e31619edfaa17ed044b45861d106a86191efTimo Sirainen#endif
05817ffe09295892e1aa5c4a7f91d060e249563cTimo Sirainen
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainenstatic void mail_index_sync_replace_map(struct mail_index_sync_map_ctx *ctx,
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen struct mail_index_map *map)
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen{
d1414c09cf0d58ac983054e2f4e1a1f329272dcfTimo Sirainen struct mail_index_view *view = ctx->view;
d1414c09cf0d58ac983054e2f4e1a1f329272dcfTimo Sirainen
d1414c09cf0d58ac983054e2f4e1a1f329272dcfTimo Sirainen i_assert(view->map != map);
d1414c09cf0d58ac983054e2f4e1a1f329272dcfTimo Sirainen
d1414c09cf0d58ac983054e2f4e1a1f329272dcfTimo Sirainen mail_index_sync_update_log_offset(ctx, view->map, FALSE);
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen#if 0 // FIXME
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen /* we could have already updated some of the records, so make sure
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen that other views (in possibly other processes) will see this map's
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen header in a valid state. */
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen (void)mail_index_map_msync(view->index, view->map);
8eea67470c1bd8562a62e7445d930bb2079b1a43Timo Sirainen#endif
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen mail_index_unmap(view->index, &view->map);
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen view->map = map;
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen
64e244defe74f513ce94f33d000a048ddbe2ea23Timo Sirainen if (ctx->type != MAIL_INDEX_SYNC_HANDLER_VIEW)
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen view->index->map = map;
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen}
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainenvoid mail_index_sync_move_to_private(struct mail_index_sync_map_ctx *ctx)
73bfdbe28c2ce6d143eadf0bab8ccfbe4cab0faeTimo Sirainen{
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen struct mail_index_map *map = ctx->view->map;
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen if (map->refcount == 1) {
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen if (!MAIL_INDEX_MAP_IS_IN_MEMORY(map))
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen mail_index_map_move_to_memory(map);
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen } else {
9137c55411aa39d41c1e705ddc34d5bd26c65021Timo Sirainen map = mail_index_map_clone(map);
66c3f635f2f33905af527d49b27f95322aa7dfa7Timo Sirainen mail_index_sync_replace_map(ctx, map);
66c3f635f2f33905af527d49b27f95322aa7dfa7Timo Sirainen }
acf3b7bf3a8891b118a71c45e6c48d17bc90b259Timo Sirainen}
3b8d05391336c0e4d24c8ddcc962f350409ffbd3Timo Sirainen
3b8d05391336c0e4d24c8ddcc962f350409ffbd3Timo Sirainenstruct mail_index_map *
3b8d05391336c0e4d24c8ddcc962f350409ffbd3Timo Sirainenmail_index_sync_get_atomic_map(struct mail_index_sync_map_ctx *ctx)
2a90d8a14b0e7cc1508814bc87d3dfa598ef46a8Timo Sirainen{
mail_index_sync_move_to_private(ctx);
ctx->view->map->write_atomic = TRUE;
return ctx->view->map;
}
static int
mail_index_header_update_counts(struct mail_index_header *hdr,
uint8_t old_flags, uint8_t new_flags,
const char **error_r)
{
if (((old_flags ^ new_flags) & MAIL_RECENT) != 0) {
/* different recent-flag */
if ((old_flags & MAIL_RECENT) == 0) {
hdr->recent_messages_count++;
if (hdr->recent_messages_count > hdr->messages_count) {
*error_r = "Recent counter wrong";
return -1;
}
} else {
if (hdr->recent_messages_count == 0 ||
hdr->recent_messages_count > hdr->messages_count) {
*error_r = "Recent counter wrong";
return -1;
}
if (--hdr->recent_messages_count == 0)
hdr->first_recent_uid_lowwater = hdr->next_uid;
}
}
if (((old_flags ^ new_flags) & MAIL_SEEN) != 0) {
/* different seen-flag */
if ((old_flags & MAIL_SEEN) != 0) {
if (hdr->seen_messages_count == 0) {
*error_r = "Seen counter wrong";
return -1;
}
hdr->seen_messages_count--;
} else {
if (hdr->seen_messages_count >= hdr->messages_count) {
*error_r = "Seen counter wrong";
return -1;
}
if (++hdr->seen_messages_count == hdr->messages_count)
hdr->first_unseen_uid_lowwater = hdr->next_uid;
}
}
if (((old_flags ^ new_flags) & MAIL_DELETED) != 0) {
/* different deleted-flag */
if ((old_flags & MAIL_DELETED) == 0) {
hdr->deleted_messages_count++;
if (hdr->deleted_messages_count > hdr->messages_count) {
*error_r = "Deleted counter wrong";
return -1;
}
} else {
if (hdr->deleted_messages_count == 0 ||
hdr->deleted_messages_count > hdr->messages_count) {
*error_r = "Deleted counter wrong";
return -1;
}
if (--hdr->deleted_messages_count == 0)
hdr->first_deleted_uid_lowwater = hdr->next_uid;
}
}
return 0;
}
static void
mail_index_sync_header_update_counts(struct mail_index_sync_map_ctx *ctx,
uint8_t old_flags, uint8_t new_flags)
{
const char *error;
if (mail_index_header_update_counts(&ctx->view->map->hdr,
old_flags, new_flags, &error) < 0)
mail_index_sync_set_corrupted(ctx, "%s", error);
}
static void
mail_index_header_update_lowwaters(struct mail_index_header *hdr,
const struct mail_index_record *rec)
{
if ((rec->flags & MAIL_RECENT) != 0 &&
rec->uid < hdr->first_recent_uid_lowwater)
hdr->first_recent_uid_lowwater = rec->uid;
if ((rec->flags & MAIL_SEEN) == 0 &&
rec->uid < hdr->first_unseen_uid_lowwater)
hdr->first_unseen_uid_lowwater = rec->uid;
if ((rec->flags & MAIL_DELETED) != 0 &&
rec->uid < hdr->first_deleted_uid_lowwater)
hdr->first_deleted_uid_lowwater = rec->uid;
}
static int
sync_expunge_call_handlers(struct mail_index_sync_map_ctx *ctx,
uint32_t seq1, uint32_t seq2)
{
const struct mail_index_expunge_handler *eh;
struct mail_index_record *rec;
unsigned int i, count;
/* call expunge handlers only when syncing index file */
if (ctx->type != MAIL_INDEX_SYNC_HANDLER_FILE)
return 0;
if (!ctx->expunge_handlers_set)
mail_index_sync_init_expunge_handlers(ctx);
if (!array_is_created(&ctx->expunge_handlers))
return 0;
eh = array_get(&ctx->expunge_handlers, &count);
for (i = 0; i < count; i++, eh++) {
for (; seq1 <= seq2; seq1++) {
rec = MAIL_INDEX_MAP_IDX(ctx->view->map, seq1-1);
if (eh->handler(ctx, seq1,
PTR_OFFSET(rec, eh->record_offset),
eh->sync_context, eh->context) < 0)
return -1;
}
}
return 0;
}
static int
sync_expunge(const struct mail_transaction_expunge *e, unsigned int count,
struct mail_index_sync_map_ctx *ctx)
{
struct mail_index_map *map = ctx->view->map;
struct mail_index_record *rec;
uint32_t seq_count, seq, seq1, seq2;
unsigned int i;
/* we don't ever want to move around data inside a memory mapped file.
it gets corrupted too easily if we crash in the middle. */
// FIXME: it's necessary for current view code that we get atomic
// map even if these messages are already expunged, because the
// view code doesn't check that and our index_int_offset goes wrong
map = mail_index_sync_get_atomic_map(ctx);
for (i = 0; i < count; i++, e++) {
if (mail_index_lookup_uid_range(ctx->view, e->uid1, e->uid2,
&seq1, &seq2) < 0)
return -1;
if (seq1 == 0) {
/* everything expunged already */
continue;
}
for (seq = seq1; seq <= seq2; seq++) {
rec = MAIL_INDEX_MAP_IDX(map, seq-1);
mail_index_sync_header_update_counts(ctx,
rec->flags, 0);
}
if (sync_expunge_call_handlers(ctx, seq1, seq2) < 0)
return -1;
/* @UNSAFE */
memmove(MAIL_INDEX_MAP_IDX(map, seq1-1),
MAIL_INDEX_MAP_IDX(map, seq2),
(map->records_count - seq2) * map->hdr.record_size);
seq_count = seq2 - seq1 + 1;
map->records_count -= seq_count;
map->hdr.messages_count -= seq_count;
/* lookup_uid_range() relies on this */
ctx->view->hdr.messages_count -= seq_count;
}
return 1;
}
void mail_index_sync_write_seq_update(struct mail_index_sync_map_ctx *ctx,
uint32_t seq1, uint32_t seq2)
{
struct mail_index_map *map = ctx->view->map;
i_assert(MAIL_INDEX_MAP_IS_IN_MEMORY(map));
if (map->write_seq_first == 0 ||
map->write_seq_first > seq1)
map->write_seq_first = seq1;
if (map->write_seq_last < seq2)
map->write_seq_last = seq2;
}
static int sync_append(const struct mail_index_record *rec,
struct mail_index_sync_map_ctx *ctx)
{
struct mail_index_view *view = ctx->view;
struct mail_index_map *map = view->map;
void *dest;
size_t append_pos;
if (rec->uid < map->hdr.next_uid) {
mail_index_sync_set_corrupted(ctx,
"Append with UID %u, but next_uid = %u",
rec->uid, map->hdr.next_uid);
return -1;
}
/* move to memory. the mapping is written when unlocking so we don't
waste time re-mmap()ing multiple times or waste space growing index
file too large */
mail_index_sync_move_to_private(ctx);
map = view->map;
/* don't rely on buffer->used being at the correct position.
at least expunges can move it */
append_pos = map->records_count * map->hdr.record_size;
dest = buffer_get_space_unsafe(map->buffer, append_pos,
map->hdr.record_size);
map->records = buffer_get_modifiable_data(map->buffer, NULL);
memcpy(dest, rec, sizeof(*rec));
memset(PTR_OFFSET(dest, sizeof(*rec)), 0,
map->hdr.record_size - sizeof(*rec));
map->hdr.messages_count++;
map->hdr.next_uid = rec->uid+1;
map->records_count++;
mail_index_sync_write_seq_update(ctx, map->hdr.messages_count,
map->hdr.messages_count);
map->write_base_header = TRUE;
if ((rec->flags & MAIL_INDEX_MAIL_FLAG_DIRTY) != 0)
map->hdr.flags |= MAIL_INDEX_HDR_FLAG_HAVE_DIRTY;
mail_index_header_update_lowwaters(&map->hdr, rec);
mail_index_sync_header_update_counts(ctx, 0, rec->flags);
return 1;
}
static int sync_flag_update(const struct mail_transaction_flag_update *u,
struct mail_index_sync_map_ctx *ctx)
{
struct mail_index_view *view = ctx->view;
struct mail_index_header *hdr;
struct mail_index_record *rec;
uint8_t flag_mask, old_flags;
uint32_t idx, seq1, seq2;
if (mail_index_lookup_uid_range(view, u->uid1, u->uid2,
&seq1, &seq2) < 0)
return -1;
if (seq1 == 0)
return 1;
mail_index_sync_move_to_private(ctx);
mail_index_sync_write_seq_update(ctx, seq1, seq2);
view->map->write_base_header = TRUE;
hdr = &view->map->hdr;
if ((u->add_flags & MAIL_INDEX_MAIL_FLAG_DIRTY) != 0)
hdr->flags |= MAIL_INDEX_HDR_FLAG_HAVE_DIRTY;
flag_mask = ~u->remove_flags;
if (((u->add_flags | u->remove_flags) &
(MAIL_SEEN | MAIL_DELETED | MAIL_RECENT)) == 0) {
/* we're not modifying any counted/lowwatered flags */
for (idx = seq1-1; idx < seq2; idx++) {
rec = MAIL_INDEX_MAP_IDX(view->map, idx);
rec->flags = (rec->flags & flag_mask) | u->add_flags;
}
} else {
for (idx = seq1-1; idx < seq2; idx++) {
rec = MAIL_INDEX_MAP_IDX(view->map, idx);
old_flags = rec->flags;
rec->flags = (rec->flags & flag_mask) | u->add_flags;
mail_index_header_update_lowwaters(hdr, rec);
mail_index_sync_header_update_counts(ctx, old_flags,
rec->flags);
}
}
return 1;
}
static int sync_header_update(const struct mail_transaction_header_update *u,
struct mail_index_sync_map_ctx *ctx)
{
struct mail_index_map *map = ctx->view->map;
uint32_t orig_log_file_tail_offset = map->hdr.log_file_tail_offset;
if (u->offset >= map->hdr.base_header_size ||
u->offset + u->size > map->hdr.base_header_size) {
mail_index_sync_set_corrupted(ctx,
"Header update outside range: %u + %u > %u",
u->offset, u->size, map->hdr.base_header_size);
return -1;
}
buffer_write(map->hdr_copy_buf, u->offset, u + 1, u->size);
map->hdr_base = map->hdr_copy_buf->data;
map->write_base_header = TRUE;
/* @UNSAFE */
if ((uint32_t)(u->offset + u->size) <= sizeof(map->hdr)) {
memcpy(PTR_OFFSET(&map->hdr, u->offset),
u + 1, u->size);
} else if (u->offset < sizeof(map->hdr)) {
memcpy(PTR_OFFSET(&map->hdr, u->offset),
u + 1, sizeof(map->hdr) - u->offset);
}
/* the tail offset updates are intended for internal transaction
log handling. we'll update the offset in the header only when
the sync is finished. */
map->hdr.log_file_tail_offset = orig_log_file_tail_offset;
return 1;
}
int mail_index_sync_record(struct mail_index_sync_map_ctx *ctx,
const struct mail_transaction_header *hdr,
const void *data)
{
int ret = 0;
t_push();
switch (hdr->type & MAIL_TRANSACTION_TYPE_MASK) {
case MAIL_TRANSACTION_APPEND: {
const struct mail_index_record *rec, *end;
end = CONST_PTR_OFFSET(data, hdr->size);
for (rec = data; rec < end; rec++) {
ret = sync_append(rec, ctx);
if (ret <= 0)
break;
}
break;
}
case MAIL_TRANSACTION_EXPUNGE:
case MAIL_TRANSACTION_EXPUNGE|MAIL_TRANSACTION_EXPUNGE_PROT: {
const struct mail_transaction_expunge *rec = data, *end;
if ((hdr->type & MAIL_TRANSACTION_EXTERNAL) == 0) {
/* this is simply a request for expunge */
break;
}
end = CONST_PTR_OFFSET(data, hdr->size);
ret = sync_expunge(rec, end - rec, ctx);
break;
}
case MAIL_TRANSACTION_FLAG_UPDATE: {
const struct mail_transaction_flag_update *rec, *end;
end = CONST_PTR_OFFSET(data, hdr->size);
for (rec = data; rec < end; rec++) {
ret = sync_flag_update(rec, ctx);
if (ret <= 0)
break;
}
break;
}
case MAIL_TRANSACTION_HEADER_UPDATE: {
const struct mail_transaction_header_update *rec;
unsigned int i;
for (i = 0; i < hdr->size; ) {
rec = CONST_PTR_OFFSET(data, i);
ret = sync_header_update(rec, ctx);
if (ret <= 0)
break;
i += sizeof(*rec) + rec->size;
if ((i % 4) != 0)
i += 4 - (i % 4);
}
break;
}
case MAIL_TRANSACTION_EXT_INTRO: {
const struct mail_transaction_ext_intro *rec = data;
unsigned int i;
uint32_t prev_seq;
uoff_t prev_offset;
mail_transaction_log_view_get_prev_pos(ctx->view->log_view,
&prev_seq, &prev_offset);
ctx->ext_intro_seq = prev_seq;
ctx->ext_intro_offset = prev_offset;
ctx->ext_intro_end_offset =
prev_offset + hdr->size + sizeof(*hdr);
for (i = 0; i < hdr->size; ) {
if (i + sizeof(*rec) > hdr->size) {
/* should be just extra padding */
break;
}
rec = CONST_PTR_OFFSET(data, i);
if (i + sizeof(*rec) + rec->name_size > hdr->size) {
mail_index_sync_set_corrupted(ctx,
"ext intro: name_size too large");
ret = -1;
break;
}
ret = mail_index_sync_ext_intro(ctx, rec);
if (ret <= 0)
break;
i += sizeof(*rec) + rec->name_size;
if ((i % 4) != 0)
i += 4 - (i % 4);
}
break;
}
case MAIL_TRANSACTION_EXT_RESET: {
const struct mail_transaction_ext_reset *rec = data;
if (hdr->size != sizeof(*rec)) {
mail_index_sync_set_corrupted(ctx,
"ext reset: invalid record size");
ret = -1;
break;
}
ret = mail_index_sync_ext_reset(ctx, rec);
break;
}
case MAIL_TRANSACTION_EXT_HDR_UPDATE: {
const struct mail_transaction_ext_hdr_update *rec = data;
unsigned int i;
for (i = 0; i < hdr->size; ) {
rec = CONST_PTR_OFFSET(data, i);
if (i + sizeof(*rec) > hdr->size ||
i + sizeof(*rec) + rec->size > hdr->size) {
mail_index_sync_set_corrupted(ctx,
"ext hdr update: invalid record size");
ret = -1;
break;
}
ret = mail_index_sync_ext_hdr_update(ctx, rec);
if (ret <= 0)
break;
i += sizeof(*rec) + rec->size;
if ((i % 4) != 0)
i += 4 - (i % 4);
}
break;
}
case MAIL_TRANSACTION_EXT_REC_UPDATE: {
const struct mail_transaction_ext_rec_update *rec;
const struct mail_index_ext *ext;
unsigned int i, record_size;
if (ctx->cur_ext_id == (uint32_t)-1) {
mail_index_sync_set_corrupted(ctx,
"Extension record updated "
"without intro prefix");
ret = -1;
break;
}
if (ctx->cur_ext_ignore) {
ret = 1;
break;
}
ext = array_idx(&ctx->view->map->extensions, ctx->cur_ext_id);
/* the record is padded to 32bits in the transaction log */
record_size = (sizeof(*rec) + ext->record_size + 3) & ~3;
for (i = 0; i < hdr->size; i += record_size) {
rec = CONST_PTR_OFFSET(data, i);
if (i + record_size > hdr->size) {
mail_index_sync_set_corrupted(ctx,
"ext rec update: invalid record size");
ret = -1;
break;
}
ret = mail_index_sync_ext_rec_update(ctx, rec);
if (ret <= 0)
break;
}
break;
}
case MAIL_TRANSACTION_KEYWORD_UPDATE: {
const struct mail_transaction_keyword_update *rec = data;
ret = mail_index_sync_keywords(ctx, hdr, rec);
break;
}
case MAIL_TRANSACTION_KEYWORD_RESET: {
const struct mail_transaction_keyword_reset *rec = data;
ret = mail_index_sync_keywords_reset(ctx, hdr, rec);
break;
}
default:
i_unreached();
}
t_pop();
i_assert(ctx->view->map->records_count ==
ctx->view->map->hdr.messages_count);
ctx->view->hdr = ctx->view->map->hdr;
return ret;
}
void mail_index_sync_map_init(struct mail_index_sync_map_ctx *sync_map_ctx,
struct mail_index_view *view,
enum mail_index_sync_handler_type type)
{
memset(sync_map_ctx, 0, sizeof(*sync_map_ctx));
sync_map_ctx->view = view;
sync_map_ctx->cur_ext_id = (uint32_t)-1;
sync_map_ctx->type = type;
/* make sure we re-read it in case it has changed */
sync_map_ctx->view->map->keywords_read = FALSE;
mail_index_sync_init_handlers(sync_map_ctx);
}
void mail_index_sync_map_deinit(struct mail_index_sync_map_ctx *sync_map_ctx)
{
if (sync_map_ctx->expunge_handlers_used)
mail_index_sync_deinit_expunge_handlers(sync_map_ctx);
mail_index_sync_deinit_handlers(sync_map_ctx);
}
static void mail_index_sync_remove_recent(struct mail_index_sync_map_ctx *ctx)
{
struct mail_index_map *map = ctx->view->map;
struct mail_index_record *rec;
unsigned int i;
for (i = 0; i < map->records_count; i++) {
rec = MAIL_INDEX_MAP_IDX(map, i);
if ((rec->flags & MAIL_RECENT) != 0) {
rec->flags &= ~MAIL_RECENT;
mail_index_sync_write_seq_update(ctx, i + 1, i + 1);
}
}
map->hdr.recent_messages_count = 0;
map->hdr.first_recent_uid_lowwater = map->hdr.next_uid;
map->write_base_header = TRUE;
}
static void mail_index_sync_update_hdr_dirty_flag(struct mail_index_map *map)
{
const struct mail_index_record *rec;
unsigned int i;
if ((map->hdr.flags & MAIL_INDEX_HDR_FLAG_HAVE_DIRTY) != 0)
return;
/* do we have dirty flags anymore? */
for (i = 0; i < map->records_count; i++) {
rec = MAIL_INDEX_MAP_IDX(map, i);
if ((rec->flags & MAIL_INDEX_MAIL_FLAG_DIRTY) != 0) {
map->hdr.flags |= MAIL_INDEX_HDR_FLAG_HAVE_DIRTY;
break;
}
}
}
int mail_index_sync_map(struct mail_index *index, struct mail_index_map **_map,
enum mail_index_sync_handler_type type, bool force)
{
struct mail_index_map *map = *_map;
struct mail_index_view *view;
struct mail_index_sync_map_ctx sync_map_ctx;
const struct mail_transaction_header *thdr;
const void *tdata;
uint32_t prev_seq, mailbox_sync_seq;
uoff_t start_offset, prev_offset, mailbox_sync_offset;
int ret;
bool had_dirty;
i_assert(index->map == map || type == MAIL_INDEX_SYNC_HANDLER_VIEW);
if (!force) {
/* see if we'd prefer to reopen the index file instead of
syncing the current map from the transaction log */
uoff_t log_size, index_size;
if (index->log->head == NULL || index->fd == -1)
return 0;
index_size = map->hdr.header_size +
map->records_count * map->hdr.record_size;
/* this isn't necessary correct currently, but it should be
close enough */
log_size = index->log->head->last_size;
if (log_size > map->hdr.log_file_tail_offset &&
log_size - map->hdr.log_file_tail_offset > index_size)
return 0;
}
start_offset = type == MAIL_INDEX_SYNC_HANDLER_FILE ?
map->hdr.log_file_tail_offset : map->hdr.log_file_head_offset;
view = mail_index_view_open_with_map(index, map);
ret = mail_transaction_log_view_set(view->log_view,
map->hdr.log_file_seq, start_offset,
(uint32_t)-1, (uoff_t)-1);
if (ret <= 0) {
if (force && ret == 0) {
/* the seq/offset is probably broken */
(void)mail_index_fsck(index);
}
/* can't use it. sync by re-reading index. */
mail_index_view_close(&view);
return 0;
}
mail_transaction_log_get_mailbox_sync_pos(index->log, &mailbox_sync_seq,
&mailbox_sync_offset);
/* view referenced the map. avoid unnecessary map cloning by
unreferencing the map while view exists. */
map->refcount--;
had_dirty = (map->hdr.flags & MAIL_INDEX_HDR_FLAG_HAVE_DIRTY) != 0;
if (had_dirty) {
map->hdr.flags &= ~MAIL_INDEX_HDR_FLAG_HAVE_DIRTY;
map->write_base_header = TRUE;
}
if (map->hdr_base != map->hdr_copy_buf->data) {
/* if syncing updates the header, it updates hdr_copy_buf
and updates hdr_base to hdr_copy_buf. so the buffer must
initially contain a valid header or we'll break it when
writing it. */
buffer_reset(map->hdr_copy_buf);
buffer_append(map->hdr_copy_buf, map->hdr_base,
map->hdr.header_size);
map->hdr_base = map->hdr_copy_buf->data;
}
mail_index_sync_map_init(&sync_map_ctx, view, type);
map = NULL;
/* FIXME: when transaction sync lock is removed, we'll need to handle
the case when a transaction is committed while mailbox is being
synced ([synced transactions][new transaction][ext transaction]).
this means int_offset contains [synced] and ext_offset contains
all */
while ((ret = mail_transaction_log_view_next(view->log_view, &thdr,
&tdata)) > 0) {
mail_transaction_log_view_get_prev_pos(view->log_view,
&prev_seq, &prev_offset);
if (LOG_IS_BEFORE(prev_seq, prev_offset,
view->map->hdr.log_file_seq,
view->map->hdr.log_file_head_offset)) {
/* this has been synced already. we're here only to call
expunge handlers and extension update handlers. */
i_assert(type == MAIL_INDEX_SYNC_HANDLER_FILE);
if ((thdr->type & MAIL_TRANSACTION_EXTERNAL) != 0)
continue;
if ((thdr->type & MAIL_TRANSACTION_EXT_MASK) == 0)
continue;
}
/* we'll just skip over broken entries */
(void)mail_index_sync_record(&sync_map_ctx, thdr, tdata);
}
map = view->map;
if (had_dirty)
mail_index_sync_update_hdr_dirty_flag(map);
mail_index_sync_update_log_offset(&sync_map_ctx, view->map, TRUE);
/* transaction log tracks internally the current tail offset.
besides using header updates, it also updates the offset to skip
over following external transactions to avoid extra unneeded log
reading. */
map->hdr.log_file_tail_offset = index->log->head->max_tail_offset;
if (map->write_base_header) {
i_assert(MAIL_INDEX_MAP_IS_IN_MEMORY(map));
buffer_write(map->hdr_copy_buf, 0, &map->hdr, sizeof(map->hdr));
}
/*FIXME:if (mail_index_map_msync(index, map) < 0)
ret = -1;*/
/* restore refcount before closing the view. this is necessary also
if map got cloned, because view closing would otherwise destroy it */
map->refcount++;
mail_index_view_close(&view);
mail_index_sync_map_deinit(&sync_map_ctx);
i_assert(index->map == map || type == MAIL_INDEX_SYNC_HANDLER_VIEW);
*_map = map;
return ret < 0 ? -1 : 1;
}