mail-cache-transaction.c revision 959a66f100887ce0acf442a397cfaec89570436a
/* Copyright (c) 2003-2007 Dovecot authors, see the included COPYING file */
#include "lib.h"
#include "ioloop.h"
#include "array.h"
#include "buffer.h"
#include "file-cache.h"
#include "file-set-size.h"
#include "read-full.h"
#include "write-full.h"
#include "mail-cache-private.h"
#include "mail-index-transaction-private.h"
#include <stddef.h>
#define MAIL_CACHE_WRITE_BUFFER 32768
struct mail_cache_reservation {
};
struct mail_cache_transaction_ctx {
struct mail_cache *cache;
struct mail_cache_view *view;
struct mail_index_transaction *trans;
unsigned int tried_compression:1;
unsigned int changes:1;
};
struct mail_cache_transaction_ctx *
struct mail_index_transaction *t)
{
struct mail_cache_transaction_ctx *ctx;
if (t->cache_trans_ctx != NULL)
return t->cache_trans_ctx;
t->cache_trans_ctx = ctx;
return ctx;
}
{
ctx->reserved_space_offset = 0;
ctx->reserved_space = 0;
ctx->last_grow_size = 0;
}
static void
{
}
static void
{
const struct mail_index_ext *ext;
(void)mail_cache_open_and_verify(cache);
return;
}
if (MAIL_CACHE_IS_UNUSABLE(cache))
return;
/* see if we should try to reopen the cache file */
return;
(void)mail_cache_reopen(cache);
}
{
int ret;
if (ret < 0)
return -1;
return -1;
return mail_cache_transaction_lock(ctx);
} else {
return 0;
}
}
if (mail_cache_unlock(cache) < 0)
return -1;
return 0;
}
return 1;
}
{
/* grow the file */
if (grow_size < 16384)
grow_size = 16384;
new_fsize &= ~1023;
return -1;
}
return -1;
}
}
return 0;
}
struct mail_cache_hole_header *hole_r)
{
struct mail_cache_hole_header hole;
while (offset != 0) {
return FALSE;
}
"Invalid magic in hole header");
return FALSE;
}
break;
}
if (offset == 0)
return FALSE;
if (prev_offset == 0)
else {
return FALSE;
}
return TRUE;
}
static void
{
struct mail_cache_reservation res;
}
static void
{
struct mail_cache_reservation *res;
unsigned int i, count;
ctx->reserved_space_offset = 0;
}
for (i = 0; i < count; i++) {
} else {
}
break;
}
}
}
static int
{
struct mail_cache_hole_header hole;
struct mail_cache_reservation *reservations;
unsigned int count;
/* found a large enough hole. */
return 0;
}
if (MAIL_CACHE_IS_UNUSABLE(cache)) {
/* mail_cache_unlink_hole() could have noticed corruption */
return -1;
}
return -1;
}
/* allocate some more space than we need */
}
}
return -1;
hdr->used_file_size) {
/* we can simply grow it */
/* grow reservation. it's probably the last one in the buffer,
but it's not guarateed because we might have used holes
as well */
do {
count--;
} else {
}
return 0;
}
static void
{
struct mail_cache_hole_header hole;
if (MAIL_CACHE_IS_UNUSABLE(cache))
return;
/* we can just set used_file_size back */
} else if (size >= MAIL_CACHE_MIN_HOLE_SIZE) {
/* set it up as a hole */
return;
}
}
static int
{
if (ctx->reserved_space == 0)
return 0;
if (!locked) {
if (mail_cache_transaction_lock(ctx) <= 0)
return 0;
}
/* check again - locking might have reopened the cache file */
if (ctx->reserved_space != 0) {
ctx->reserved_space_offset = 0;
ctx->reserved_space = 0;
}
if (!locked) {
return -1;
}
return 0;
}
static int
bool commit)
{
int ret;
/* not enough preallocated space in transaction, get more */
if (!locked) {
return ret;
}
commit);
if (!locked) {
return -1;
}
if (ret < 0)
return -1;
/* cache file reopened - need to abort */
return 0;
}
} else {
}
if (available_space_r != NULL)
/* final commit - see if we can free the rest of the
reserved space */
if (mail_cache_transaction_free_space(ctx) < 0)
return -1;
}
return 1;
}
static int
const struct mail_cache_record *rec,
{
/* write the cache_offsets to index file. records' prev_offset
is updated to point to old cache record when index is being
synced. */
&write_offset, &old_offset);
if (old_offset != 0) {
/* we added records for this message multiple
times in this same uncommitted transaction.
only the new one will be written to
transaction log, we need to do the linking
ourself here. */
if (old_offset > write_offset) {
write_offset) < 0)
return -1;
} else {
/* if we're combining multiple transactions,
make sure the one with the smallest offset
is written into index. this is required for
non-file-mmaped cache to work properly. */
&old_offset, NULL);
old_offset) < 0)
return -1;
}
}
}
*seq_idx = i;
return 0;
}
static int
{
unsigned int seq_count;
int ret;
bool commit;
if (MAIL_CACHE_IS_UNUSABLE(cache))
return -1;
if (commit) {
/* committing, remove the last dummy record */
}
/* cache file reopened - need to abort */
return 0;
}
seq_limit = 0;
if (ret <= 0) {
/* error / couldn't lock / cache file reopened */
return ret;
}
/* see how much we can really write there */
seq_limit++;
}
} else {
}
/* write it to file */
return -1;
&write_size) < 0)
return -1;
rec_pos += write_size;
}
/* drop the written data from buffer */
return 1;
}
static void
{
void *data;
/* fix record size */
ctx->cache_data =
}
}
{
int ret = 0;
return 0;
}
if (mail_cache_transaction_lock(ctx) <= 0) {
return -1;
}
if (mail_cache_transaction_flush(ctx) < 0)
ret = -1;
/* Here would be a good place to do fdatasync() to make sure
everything is written before offsets are updated to index.
However it slows down I/O unneededly and we're pretty good at
catching and fixing cache corruption, so we no longer do it. */
if (mail_cache_unlock(cache) < 0)
ret = -1;
return ret;
}
{
const struct mail_cache_reservation *reservations;
unsigned int count;
if (mail_cache_transaction_lock(ctx) > 0) {
/* free flushed data as well. do it from end to
beginning so we have a better chance of
updating used_file_size instead of adding
holes */
while (count > 0) {
count--;
}
(void)mail_cache_unlock(cache);
}
}
}
static int
{
return -1;
return -1;
return -1;
}
}
return -1;
/* if we rollback the transaction, we must not overwrite this
area because it's already committed after updating the
header offset */
/* after it's guaranteed to be in disk, update header offset */
return -1;
/* we're adding the first field. hdr_copy needs to be kept
in sync so unlocking won't overwrite it. */
}
return 0;
}
unsigned int field_idx)
{
unsigned int i;
int ret;
/* we want to avoid adding all the fields one by one to the cache file,
so just add all of them at once in here. the unused ones get dropped
later when compressing. */
for (i = 0; i < cache->fields_count; i++)
if (MAIL_CACHE_IS_UNUSABLE(cache))
return -1;
/* if we compressed the cache, the field should be there now.
it's however possible that someone else just compressed it
and we only reopened the cache file. */
return 0;
/* need to add it */
return -1;
}
/* re-read header to make sure we don't lose any fields. */
if (mail_cache_header_fields_read(cache) < 0) {
(void)mail_cache_unlock(cache);
return -1;
}
/* it was already added */
if (mail_cache_unlock(cache) < 0)
return -1;
return 0;
}
t_push();
t_pop();
if (ret == 0) {
/* we wrote all the headers, so there are no pending changes */
}
"Cache file %s: Newly added field got "
ret = -1;
}
if (mail_cache_unlock(cache) < 0)
ret = -1;
return ret;
}
{
unsigned int fixed_size;
return;
if (ctx->cache_file_seq == 0) {
/* cache was compressed within this transaction */
}
/* we'll have to add this field to headers */
return;
if (ctx->cache_file_seq == 0)
}
/* remember roughly what we have modified, so cache lookups can
look into transactions to see changes. */
}
/* remember that this value exists, in case we try to look it up */
if (fixed_size == (unsigned int)-1)
full_size += sizeof(data_size32);
/* time to flush our buffer. if flushing fails because the
cache file had been compressed and was reopened, return
without adding the cached data since cache_data buffer
doesn't contain the cache_rec anymore. */
if (mail_cache_transaction_flush(ctx) <= 0) {
/* make sure the transaction is reset, so we don't
constantly try to flush for each call to this
function */
return;
}
}
if (fixed_size == (unsigned int)-1) {
sizeof(data_size32));
}
if ((data_size & 3) != 0)
}
{
return FALSE;
}
{
return FALSE;
}
{
}
{
if (MAIL_CACHE_IS_UNUSABLE(cache))
return -1;
if (new_offset + sizeof(struct mail_cache_record) >
"Cache record offset %u points outside file",
return -1;
}
return -1;
return 0;
}
{
const struct mail_cache_record *rec;
int ret = -1;
/* we'll only update the deleted_space in header. we can't really
do any actual deleting as other processes might still be using
the data. also it's actually useful as some index views are still
able to ask cached data from messages that have already been
expunged. */
t_push();
if (offset == 0) {
/* successfully got to the end of the list */
ret = 0;
break;
}
"record list is circular");
break;
}
}
t_pop();
return ret;
}