mdbox-map.c revision 836e57b1e7817d008f8ae05cd4b506f420fed80d
/* Copyright (c) 2007-2010 Dovecot authors, see the included COPYING file */
#include "lib.h"
#include "array.h"
#include "hash.h"
#include "ostream.h"
#include "mkdir-parents.h"
#include "unlink-old-files.h"
#include "mailbox-list-private.h"
#include "mdbox-storage.h"
#include "mdbox-file.h"
#include "mdbox-map-private.h"
#include <stdlib.h>
#include <dirent.h>
#define MAX_BACKWARDS_LOOKUPS 10
#define DBOX_FORCE_PURGE_MIN_RATIO 0.5
struct mdbox_map_transaction_context {
struct mdbox_map_atomic_context *atomic;
struct mail_index_transaction *trans;
unsigned int changed:1;
unsigned int committed:1;
};
{
"mdbox map %s corrupted: %s",
}
struct mdbox_map *
const char *path)
{
sizeof(struct mdbox_map_mail_index_header),
sizeof(struct mdbox_map_mail_index_record),
sizeof(uint32_t));
return map;
}
{
}
}
{
return 0;
MAILBOX_LIST_PATH_TYPE_DIR) < 0) {
return -1;
}
return 0;
}
{
return;
/* check once in a while if there are temp files to clean up */
/* there haven't been any changes to this directory since we
last checked it. */
/* time to scan */
}
}
{
int ret;
/* already opened */
return 1;
}
if (create_missing) {
if (mdbox_map_mkdir_storage(map) < 0)
return -1;
}
if (ret < 0) {
return -1;
}
if (ret == 0) {
/* index not found - for now just return failure */
return 0;
}
if (mdbox_map_generate_uid_validity(map) < 0) {
return -1;
}
}
return 1;
}
{
}
{
}
{
struct mail_index_view_sync_ctx *ctx;
bool delayed_expunges;
/* some open files may have read partially written mails. now that
map syncing makes the new mails visible, we need to make sure the
partial data is flushed out of memory */
return -1;
}
/* can't sync when there are transactions */
return 0;
}
return -1;
}
return 0;
}
static void
struct mdbox_map_mail_index_header *hdr_r)
{
const void *data;
}
{
struct mdbox_map_mail_index_header hdr;
return hdr.rebuild_count;
}
static int
const struct mdbox_map_mail_index_record **rec_r)
{
const struct mdbox_map_mail_index_record *rec;
const void *data;
bool expunged;
return -1;
}
return 0;
}
static int
{
/* not found - try again after a refresh */
if (mdbox_map_refresh(map) < 0)
return -1;
return 0;
}
return 1;
}
{
const struct mdbox_map_mail_index_record *rec;
int ret;
if (mdbox_map_open_or_create(map) < 0)
return -1;
return ret;
return -1;
return 1;
}
struct mdbox_map_mail_index_record *rec_r,
{
const struct mdbox_map_mail_index_record *rec;
const void *data;
bool expunged;
int ret;
if (mdbox_map_open_or_create(map) < 0)
return -1;
return ret;
return -1;
return -1;
}
*refcount_r = *ref16_p;
return 1;
}
struct dbox_mail_lookup_rec *rec_r)
{
const void *data;
bool expunged;
return -1;
}
return -1;
}
return 0;
}
{
const struct mail_index_header *hdr;
struct dbox_mail_lookup_rec rec;
struct mdbox_map_file_msg msg;
if (mdbox_map_refresh(map) < 0)
return -1;
return -1;
}
}
return 0;
}
{
const struct mail_index_header *hdr;
const struct mdbox_map_mail_index_record *rec;
const void *data;
bool expunged;
int ret;
/* no map / internal error */
return ret;
}
if (mdbox_map_refresh(map) < 0)
return -1;
if (*ref16_p != 0)
continue;
}
}
}
return 0;
}
{
struct mdbox_map_atomic_context *atomic;
return atomic;
}
static void
struct mail_index_sync_ctx *sync_ctx)
{
struct mail_index_sync_rec sync_rec;
/* something had crashed. need a full resync. */
i_warning("mdbox %s: Inconsistency in map index "
} else {
}
}
{
int ret;
return 0;
return -1;
/* use syncing to lock the transaction log, so that we always see
log's head_offset = tail_offset */
if (ret <= 0) {
return -1;
}
/* reset refresh state so that if it's wanted to be done locked,
it gets the latest changes */
return 0;
}
{
}
{
}
{
int ret = 0;
/* not locked */
ret = -1;
}
} else {
}
return ret;
}
struct mdbox_map_transaction_context *
bool external)
{
struct mdbox_map_transaction_context *ctx;
bool success;
if (external)
/* already refreshed within a lock, don't do it again */
} else {
}
if (success) {
flags);
}
return ctx;
}
{
return 0;
return -1;
return -1;
}
return 0;
}
{
}
{
const void *data;
bool expunged;
return -1;
/* we can't refresh map here since view has a
transaction open. */
map_uid);
return -1;
}
map_uid);
return -1;
}
/* we're getting close to the 64k limit. fail early
to make it less likely that two processes increase
the refcount enough times to cross the limit */
t_strdup_printf("Message has been copied too many times (%d + %d)",
return -1;
}
return 0;
}
{
unsigned int i, count;
return -1;
for (i = 0; i < count; i++) {
return -1;
}
return 0;
}
{
struct mdbox_map_atomic_context *atomic;
struct mdbox_map_transaction_context *map_trans;
const struct mail_index_header *hdr;
const struct mdbox_map_mail_index_record *rec;
const void *data;
bool expunged;
int ret = 0;
/* make sure the map is refreshed, otherwise we might be expunging
messages that have already been moved to other files. */
/* we need a per-file transaction, otherwise we can't refresh the map */
ret = -1;
break;
}
}
}
if (ret == 0)
if (mdbox_map_atomic_finish(&atomic) < 0)
ret = -1;
return ret;
}
struct mdbox_map_append_context *
{
struct mdbox_map_append_context *ctx;
else {
/* refresh the map so we can try appending to the
latest files */
else
}
return ctx;
}
{
unsigned int unit = 1;
if (interval == 0)
return 0;
the interval is */
if (interval >= 60) {
unit = 60;
if (interval >= 3600) {
unit = 3600;
}
}
}
i_panic("mktime(today) failed");
}
{
bool notfound;
if (want_altpath) {
return FALSE;
} else {
return FALSE;
}
if (notfound)
return FALSE;
/* already locked, we're possibly in the middle of purging it
in which case we really don't want to write there. */
return FALSE;
}
/* different alt location than what we want, can't use it */
return FALSE;
}
return TRUE;
}
static bool
bool want_altpath,
struct dbox_file_append_context **file_append_r,
{
struct dbox_file_append_context *file_append;
bool file_too_old = FALSE;
int ret;
*file_append_r = NULL;
*retry_later_r = FALSE;
return TRUE;
}
file_too_old = TRUE;
/* locking failed */
*retry_later_r = ret == 0;
/* the file was unlinked between opening and locking it. */
} else {
/* couldn't append to this file */
/* file was too large after all */
} else {
/* success */
return TRUE;
}
}
/* failure */
return !file_too_old;
}
static bool
{
struct dbox_file_append_context *const *file_appends;
unsigned int i, count;
/* there shouldn't be many files open, don't bother with anything
faster. */
for (i = 0; i < count; i++) {
struct mdbox_file *mfile =
return TRUE;
}
return FALSE;
}
static struct dbox_file_append_context *
{
struct mdbox_file *mfile;
unsigned int i, count;
return NULL;
/* first try to use files already used in this append */
continue;
return append;
/* can't append to this file anymore. if we created this file,
close it so we don't waste fds. if we didn't, we can't close
it without also losing our lock too early. */
}
return NULL;
}
static int
{
struct dirent *d;
const struct mail_index_header *hdr;
const struct mdbox_map_mail_index_record *rec;
int ret = 0;
/* we want to quickly find the latest alt file, but we also want to
avoid accessing the alt storage as much as possible. so we'll do
this by finding the lowest numbered file (n) from primary storage.
hopefully one of n-[1..m] is appendable in alt storage. */
return -1;
}
strlen(MDBOX_MAIL_FILE_PREFIX)) != 0)
continue;
&file_id) < 0)
continue;
if (min_file_id > file_id)
}
if (errno != 0) {
ret = -1;
}
ret = -1;
}
if (ret < 0)
return -1;
/* find the newest message in alt storage from map view */
return -1;
break;
}
return 0;
}
static int
struct dbox_file_append_context **file_append_r,
{
const struct mail_index_header *hdr;
const struct mdbox_map_mail_index_record *rec;
unsigned int backwards_lookup_count;
bool retry_later;
return 0;
/* try to find an existing appendable file */
if (!want_altpath)
else {
/* we want to save to alt storage. */
return -1;
}
return -1;
continue;
if (++backwards_lookup_count > MAX_BACKWARDS_LOOKUPS) {
/* we've wasted enough time here */
break;
}
/* first lookup: this should be enough usually, but we can't
be sure until after locking. also if messages were recently
moved, this message might not be the last one in the file. */
continue;
/* already checked this */
continue;
}
output_r, &retry_later)) {
/* file is too old. the rest of the files are too. */
break;
}
/* NOTE: we've now refreshed map view. there are no guarantees
about sequences anymore. */
if (*file_append_r != NULL)
return 1;
/* FIXME: use retry_later somehow */
if (uid == 1 ||
break;
seq++;
}
return 0;
}
struct dbox_file_append_context **file_append_ctx_r,
{
struct mdbox_map_append *append;
struct dbox_file_append_context *file_append;
bool existing, want_altpath;
int ret;
return -1;
if (file_append != NULL) {
ret = 1;
} else {
&file_append, output_r);
}
if (ret > 0)
else if (ret < 0)
return -1;
else {
/* create a new file */
if (ret <= 0) {
return -1;
}
}
if (!existing) {
}
return 0;
}
{
struct mdbox_map_append *appends;
unsigned int count;
}
{
struct mdbox_map_append *appends;
unsigned int count;
}
bool separate_transaction)
{
struct dbox_file_append_context *const *file_appends;
unsigned int i, count;
struct mdbox_map_mail_index_header hdr;
/* start the syncing. we'll need it even if there are no file ids to
be assigned. */
return -1;
/* assign file_ids for newly created files */
for (i = 0; i < count; i++) {
struct mdbox_file *mfile =
if (dbox_file_append_flush(file_appends[i]) < 0)
return -1;
return -1;
}
}
/* update the highest used file_id */
if (first_file_id != file_id) {
file_id--;
}
return 0;
}
{
const struct mdbox_map_append *appends;
const struct mail_index_header *hdr;
struct mdbox_map_mail_index_record rec;
unsigned int i, count;
int ret = 0;
*first_map_uid_r = 0;
*last_map_uid_r = 0;
return 0;
}
return -1;
/* append map records to index */
ref16 = 1;
for (i = 0; i < count; i++) {
struct mdbox_file *mfile =
}
/* assign map UIDs for appended records */
if (hdr->uid_validity == 0) {
/* we don't really care about uidvalidity, but it can't be 0 */
}
return -1;
}
return ret;
}
{
const struct mdbox_map_append *appends;
struct mdbox_map_mail_index_record rec;
struct seq_range_iter iter;
unsigned int i, j, map_uids_count, appends_count;
return -1;
for (i = j = 0; i < map_uids_count; i++) {
struct mdbox_file *mfile =
i_assert(j < appends_count);
j++;
i_unreached();
}
i_unreached();
}
return 0;
}
{
struct dbox_file_append_context **file_appends;
unsigned int i, count;
for (i = 0; i < count; i++) {
if (dbox_file_append_commit(&file_appends[i]) < 0)
return -1;
}
return 0;
}
{
struct dbox_file_append_context **file_appends;
unsigned int i, count;
for (i = 0; i < count; i++) {
if (file_appends[i] != NULL)
}
for (i = 0; i < count; i++) {
dbox_file_unlock(files[i]);
dbox_file_unref(&files[i]);
}
}
{
const struct mail_index_header *hdr;
struct mail_index_sync_ctx *sync_ctx;
struct mail_index_view *view;
struct mail_index_transaction *trans;
int ret;
/* do this inside syncing, so that we're locked and there are no
race conditions */
if (ret <= 0) {
return -1;
}
if (hdr->uid_validity != 0) {
/* someone else beat us to it */
} else {
}
return mail_index_sync_commit(&sync_ctx);
}
{
if (uid_validity == 0)
return uid_validity;
}