mail-transaction-log-append.c revision 6f90ce01176bd920609d9d12e6419b9ba27c1359
/* Copyright (c) 2003-2009 Dovecot authors, see the included COPYING file */
#include "lib.h"
#include "array.h"
#include "buffer.h"
#include "write-full.h"
#include "mail-index-private.h"
#include "mail-index-view-private.h"
#include "mail-index-modseq.h"
#include "mail-index-transaction-private.h"
#include "mail-transaction-log-private.h"
struct log_append_context {
struct mail_transaction_log_file *file;
struct mail_index_transaction *trans;
bool sync_includes_this;
};
enum mail_transaction_type type)
{
struct mail_transaction_header hdr;
return;
if (type == MAIL_TRANSACTION_EXPUNGE)
/* update the size */
ctx->first_append_size == 0) {
/* size will be written later once everything
is in disk */
} else {
}
}
{
/* first we need to truncate this latest write so that log syncing
doesn't break */
"ftruncate()");
}
return -1;
file->sync_offset);
return 0;
}
{
}
return 0;
}
file->sync_offset) < 0) {
/* write failure, fallback to in-memory indexes. */
"pwrite_full()");
return log_buffer_move_to_memory(ctx);
}
/* now that the whole transaction has been written, rewrite the first
record's size so the transaction becomes visible */
"pwrite_full()");
return log_buffer_move_to_memory(ctx);
}
"fdatasync()");
return log_buffer_move_to_memory(ctx);
}
}
/* FIXME: when we're relying on O_APPEND and someone else wrote a
transaction, we'll need to wait for it to commit its transaction.
if it crashes before doing that, we'll need to overwrite it with
a dummy record */
return 0;
}
static const buffer_t *
{
struct mail_transaction_header_update u;
int state = 0;
memset(&u, 0, sizeof(u));
if (state == 0) {
state++;
}
} else {
if (state > 0) {
buffer_append(buf, &u, sizeof(u));
state = 0;
}
}
}
return buf;
}
static void
ext_reset_update_atomic(struct mail_index_transaction *t,
{
const struct mail_index_ext *map_ext;
struct mail_transaction_ext_reset *reset;
/* new extension */
reset_id = 1;
} else {
}
if (reset_id != expected_reset_id) {
/* ignore this extension update */
mail_index_ext_set_reset_id(t, ext_id, 0);
return;
}
if (reset_id == 0)
reset_id++;
/* reseting existing data is optional */
if (array_is_created(&t->ext_resets)) {
}
}
static void
{
const uint32_t *expected_reset_ids;
if (!array_is_created(&t->ext_reset_atomic))
return;
if (expected_reset_ids[ext_id] != 0) {
}
}
}
{
const struct mail_index_registered_ext *rext;
unsigned int count;
if (t->reset ||
/* new extension */
}
if (!array_is_created(&t->ext_resizes)) {
count = 0;
} else {
}
/* we're resizing the extension. use the resize struct. */
} else {
/* generate a new intro structure */
}
if (reset_id != 0) {
/* we're going to reset this extension in this transaction */
/* use the existing reset_id */
const struct mail_index_ext *map_ext =
} else {
/* new extension, reset_id defaults to 0 */
}
/* modseq tracking started */
}
}
static void
const struct mail_index_transaction_ext_hdr_update *hdr)
{
struct mail_transaction_ext_hdr_update u;
memset(&u, 0, sizeof(u));
if (!started) {
}
} else {
if (started) {
buffer_append(buf, &u, sizeof(u));
}
}
}
}
static void
{
const struct mail_transaction_ext_intro *resize;
const struct mail_index_transaction_ext_hdr_update *hdrs;
struct mail_transaction_ext_reset ext_reset;
unsigned int resize_count, ext_count = 0;
const struct mail_transaction_ext_reset *reset;
if (!array_is_created(&t->ext_resizes)) {
resize_count = 0;
} else {
if (ext_count < resize_count)
}
if (!array_is_created(&t->ext_reset_ids)) {
reset_id_count = 0;
} else {
}
if (!array_is_created(&t->ext_resets)) {
reset_count = 0;
} else {
if (ext_count < reset_count)
}
if (!array_is_created(&t->ext_hdr_updates)) {
hdrs_count = 0;
} else {
if (ext_count < hdrs_count)
}
if (ext_id < reset_count)
else
ext_reset.new_reset_id = 0;
ext_reset.new_reset_id != 0 ||
if (ext_reset.new_reset_id != 0) {
/* we're going to reset this extension
immediately after the intro */
reset_id = 0;
} else {
}
}
if (ext_reset.new_reset_id != 0) {
}
T_BEGIN {
} T_END;
}
}
}
enum mail_transaction_type type)
{
if (!array_is_created(&t->ext_reset_ids)) {
reset_id_count = 0;
} else {
}
continue;
}
}
static void
{
}
static enum mail_index_sync_type
{
const struct mail_index_transaction_keyword_update *updates;
const char *const *keywords;
enum mail_index_sync_type change_mask = 0;
unsigned int i, count, keywords_count;
for (i = 0; i < count; i++) {
MODIFY_ADD, keywords[i],
}
MODIFY_REMOVE, keywords[i],
}
}
return change_mask;
}
{
struct mail_transaction_header_update *u;
/* Update the tail offsets only when committing the sync transaction.
Other transactions may not know the latest tail offset and might
end up shrinking it. (Alternatively the shrinking tail offsets could
just be ignored, which would probably work fine too.) */
return;
/* FIXME: when we remove exclusive log locking, we
can't rely on this. then write non-changed offset + check
real offset + rewrite the new offset if other transactions
weren't written in the middle */
sizeof(struct mail_transaction_header) +
sizeof(*u) + sizeof(offset);
}
return;
sizeof(*u) + sizeof(offset));
u = buffer_append_space_unsafe(buf, sizeof(*u));
}
#define TRANSACTION_HAS_CHANGES(t) \
((t)->log_updates || (t)->log_ext_updates || \
static int
{
enum mail_index_sync_type change_mask = 0;
struct mail_index *index;
struct mail_transaction_log *log;
struct mail_transaction_log_file *file;
struct log_append_context ctx;
bool want_fsync;
if (t->reset) {
/* Reset the whole index, preserving only indexid. Begin by
rotating the log. We don't care if we skip some non-synced
transactions. */
return -1;
if (!TRANSACTION_HAS_CHANGES(t)) {
/* we only wanted to reset */
return 0;
}
}
if (!index->log_locked) {
/* update sync_offset */
(uoff_t)-1) <= 0)
return -1;
}
MAIL_INDEX_SYNC_HANDLER_HEAD) <= 0)
return -1;
}
if (array_is_created(&t->ext_reset_atomic))
if (t->max_modseq != 0)
if (!TRANSACTION_HAS_CHANGES(t)) {
/* we aborted all changes, nothing else to do */
return 0;
}
/* finally convert all sequences to UIDs before we write them,
but after we've checked and removed conflicts */
/* send all extension introductions and resizes before appends
to avoid resize overhead as much as possible */
if (t->pre_hdr_changed) {
}
if (array_is_created(&t->appends)) {
}
if (array_is_created(&t->updates)) {
}
if (array_is_created(&t->ext_rec_updates)) {
}
if (array_is_created(&t->ext_rec_atomics)) {
}
/* keyword resets before updates */
if (array_is_created(&t->keyword_resets)) {
}
if (array_is_created(&t->keyword_updates))
if (array_is_created(&t->expunges)) {
/* non-external expunges are only requests, ignore them when
checking fsync_mask */
if ((t->flags & MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL) != 0)
}
if (t->post_hdr_changed) {
}
/* NOTE: mailbox sync offset update must be the last change.
it may update the sync offset to include this transaction, so it
needs to know this transaction's size */
if ((t->flags & MAIL_INDEX_TRANSACTION_FLAG_EXTERNAL) != 0)
/* there is some garbage at the end of the transaction log
(eg. previous write failed). remove it so reader doesn't
break because of it. */
if (!MAIL_TRANSACTION_LOG_FILE_IN_MEMORY(file)) {
}
}
}
(t->flags & MAIL_INDEX_TRANSACTION_FLAG_FSYNC) != 0;
return -1;
}
if ((t->flags & MAIL_INDEX_TRANSACTION_FLAG_HIDE) != 0) {
/* mark the area covered by this transaction hidden */
}
return 0;
}
int mail_transaction_log_append(struct mail_index_transaction *t,
{
struct mail_index *index;
int ret;
*log_file_seq_r = 0;
*log_file_offset_r = 0;
if (!TRANSACTION_HAS_CHANGES(t) && !t->reset) {
/* nothing to append */
return 0;
}
if (!index->log_locked) {
return -1;
}
if (!index->log_locked)
return ret;
}