mail-cache.c revision 620b5ed41650da63b0ba15c489f9f312231d5d9b
294N/A/* Copyright (c) 2003-2014 Dovecot authors, see the included COPYING file */
294N/A
787N/A#include "lib.h"
789N/A#include "array.h"
789N/A#include "buffer.h"
789N/A#include "hash.h"
1336N/A#include "nfs-workarounds.h"
789N/A#include "file-cache.h"
789N/A#include "mmap-util.h"
873N/A#include "read-full.h"
789N/A#include "write-full.h"
877N/A#include "mail-cache-private.h"
789N/A
294N/A#include <unistd.h>
873N/A
873N/A#define MAIL_CACHE_MIN_HEADER_READ_SIZE 4096
789N/A
789N/Avoid mail_cache_set_syscall_error(struct mail_cache *cache,
789N/A const char *function)
873N/A{
789N/A mail_index_file_set_syscall_error(cache->index, cache->filepath,
789N/A function);
789N/A}
789N/A
789N/Astatic void mail_cache_unlink(struct mail_cache *cache)
789N/A{
789N/A if (!cache->index->readonly)
789N/A (void)unlink(cache->filepath);
789N/A}
1336N/A
1336N/Avoid mail_cache_reset(struct mail_cache *cache)
789N/A{
873N/A mail_cache_unlink(cache);
1336N/A /* mark the cache as unusable */
1336N/A cache->hdr = NULL;
1336N/A}
1336N/A
1336N/Avoid mail_cache_set_corrupted(struct mail_cache *cache, const char *fmt, ...)
1336N/A{
1336N/A va_list va;
1336N/A
1336N/A mail_cache_reset(cache);
1336N/A
1336N/A va_start(va, fmt);
1336N/A T_BEGIN {
1336N/A mail_index_set_error(cache->index,
1336N/A "Corrupted index cache file %s: %s",
1336N/A cache->filepath,
1336N/A t_strdup_vprintf(fmt, va));
1336N/A } T_END;
1336N/A va_end(va);
873N/A}
873N/A
873N/Avoid mail_cache_file_close(struct mail_cache *cache)
789N/A{
787N/A if (cache->mmap_base != NULL) {
294N/A if (munmap(cache->mmap_base, cache->mmap_length) < 0)
1336N/A mail_cache_set_syscall_error(cache, "munmap()");
1336N/A }
1336N/A
1336N/A if (cache->file_cache != NULL)
1337N/A file_cache_set_fd(cache->file_cache, -1);
294N/A
294N/A cache->mmap_base = NULL;
294N/A cache->hdr = NULL;
1336N/A cache->mmap_length = 0;
1336N/A cache->last_field_header_offset = 0;
1337N/A
1337N/A if (cache->file_lock != NULL)
1337N/A file_lock_free(&cache->file_lock);
1337N/A cache->locked = FALSE;
1337N/A
1337N/A if (cache->fd != -1) {
1337N/A if (close(cache->fd) < 0)
1337N/A mail_cache_set_syscall_error(cache, "close()");
1337N/A cache->fd = -1;
1336N/A }
1337N/A}
1337N/A
1337N/Astatic void mail_cache_init_file_cache(struct mail_cache *cache)
294N/A{
294N/A struct stat st;
294N/A
294N/A if (cache->file_cache == NULL)
789N/A return;
294N/A
1109N/A file_cache_set_fd(cache->file_cache, cache->fd);
1109N/A
1337N/A if (fstat(cache->fd, &st) == 0)
1337N/A (void)file_cache_set_size(cache->file_cache, st.st_size);
1109N/A else if (!ESTALE_FSTAT(errno))
1337N/A mail_cache_set_syscall_error(cache, "fstat()");
1337N/A
1109N/A cache->st_ino = st.st_ino;
787N/A cache->st_dev = st.st_dev;
869N/A}
869N/A
869N/Astatic int mail_cache_try_open(struct mail_cache *cache)
869N/A{
789N/A const void *data;
789N/A
789N/A cache->opened = TRUE;
789N/A
789N/A if (MAIL_INDEX_IS_IN_MEMORY(cache->index))
789N/A return 0;
294N/A
789N/A cache->fd = nfs_safe_open(cache->filepath,
294N/A cache->index->readonly ? O_RDONLY : O_RDWR);
294N/A if (cache->fd == -1) {
789N/A if (errno == ENOENT) {
294N/A cache->need_compress_file_seq = 0;
294N/A return 0;
294N/A }
294N/A
294N/A mail_cache_set_syscall_error(cache, "open()");
294N/A return -1;
294N/A }
789N/A
789N/A mail_cache_init_file_cache(cache);
789N/A
787N/A if (mail_cache_map(cache, 0, 0, &data) < 0)
789N/A return -1;
294N/A return 1;
869N/A}
868N/A
789N/Astatic bool mail_cache_need_reopen(struct mail_cache *cache)
789N/A{
869N/A struct stat st;
869N/A
869N/A if (MAIL_CACHE_IS_UNUSABLE(cache)) {
869N/A if (cache->need_compress_file_seq != 0) {
789N/A /* we're waiting for compression */
789N/A return FALSE;
869N/A }
949N/A if (MAIL_INDEX_IS_IN_MEMORY(cache->index)) {
789N/A /* disabled */
789N/A return FALSE;
979N/A }
1340N/A }
1340N/A
789N/A if (cache->fd == -1)
787N/A return TRUE;
787N/A
294N/A /* see if the file has changed */
294N/A if ((cache->index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0) {
789N/A i_assert(!cache->locked);
294N/A nfs_flush_file_handle_cache(cache->filepath);
294N/A }
294N/A if (nfs_safe_stat(cache->filepath, &st) < 0) {
294N/A mail_cache_set_syscall_error(cache, "stat()");
789N/A return TRUE;
294N/A }
294N/A
294N/A if (st.st_ino != cache->st_ino ||
789N/A !CMP_DEV_T(st.st_dev, cache->st_dev)) {
294N/A /* file changed */
789N/A return TRUE;
294N/A }
789N/A
789N/A if ((cache->index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0) {
911N/A /* if the old file has been deleted, the new file may have
911N/A the same inode as the old one. we'll catch this here by
911N/A checking if fstat() fails with ESTALE */
915N/A if (fstat(cache->fd, &st) < 0) {
911N/A if (ESTALE_FSTAT(errno))
911N/A return TRUE;
911N/A mail_cache_set_syscall_error(cache, "fstat()");
869N/A return FALSE;
869N/A }
869N/A }
869N/A return FALSE;
789N/A}
857N/A
789N/Astatic int mail_cache_reopen_now(struct mail_cache *cache)
789N/A{
789N/A struct mail_index_view *view;
789N/A const struct mail_index_ext *ext;
789N/A
294N/A mail_cache_file_close(cache);
294N/A
789N/A if (mail_cache_try_open(cache) <= 0)
294N/A return -1;
294N/A
789N/A if (mail_cache_header_fields_read(cache) < 0)
294N/A return -1;
294N/A
294N/A view = mail_index_view_open(cache->index);
789N/A ext = mail_index_view_get_ext(view, cache->ext_id);
294N/A if (ext == NULL || cache->hdr->file_seq != ext->reset_id) {
1339N/A /* still different - maybe a race condition or maybe the
1339N/A file_seq really is corrupted. either way, this shouldn't
1340N/A happen often so we'll just mark cache to be compressed
1340N/A later which fixes this. */
877N/A cache->need_compress_file_seq = cache->hdr->file_seq;
869N/A mail_index_view_close(&view);
869N/A return 0;
868N/A }
869N/A
869N/A mail_index_view_close(&view);
869N/A i_assert(!MAIL_CACHE_IS_UNUSABLE(cache));
868N/A return 1;
789N/A}
941N/A
1086N/Aint mail_cache_reopen(struct mail_cache *cache)
911N/A{
941N/A i_assert(!cache->locked);
911N/A
294N/A if (!mail_cache_need_reopen(cache)) {
869N/A /* reopening does no good */
869N/A return 0;
941N/A }
1086N/A return mail_cache_reopen_now(cache);
872N/A}
941N/A
872N/Astatic void mail_cache_update_need_compress(struct mail_cache *cache)
869N/A{
994N/A const struct mail_cache_header *hdr = cache->hdr;
1086N/A struct stat st;
994N/A unsigned int msg_count;
994N/A unsigned int records_count, cont_percentage, delete_percentage;
994N/A bool want_compress = FALSE;
994N/A
869N/A if (hdr->minor_version == 0) {
1109N/A /* compress to get ourself into the new header version */
1109N/A cache->need_compress_file_seq = hdr->file_seq;
1109N/A return;
1109N/A }
1109N/A
1109N/A msg_count = cache->index->map->rec_map->records_count;
868N/A if (msg_count == 0)
869N/A records_count = 1;
869N/A else if (hdr->record_count == 0 || hdr->record_count > msg_count*2) {
869N/A /* probably not the real record_count, but hole offset that
869N/A Dovecot <=v2.1 versions used to use in this position.
868N/A we already checked that minor_version>0, but this could
870N/A happen if old Dovecot was used to access mailbox after
870N/A it had been updated. */
870N/A records_count = I_MAX(msg_count, 1);
870N/A cache->hdr_copy.record_count = msg_count;
870N/A cache->hdr_modified = TRUE;
869N/A } else {
869N/A records_count = hdr->record_count;
869N/A }
869N/A
1336N/A cont_percentage = hdr->continued_record_count * 100 / records_count;
1336N/A if (cont_percentage >= MAIL_CACHE_COMPRESS_CONTINUED_PERCENTAGE) {
1336N/A /* too many continued rows, compress */
1336N/A want_compress = TRUE;
1336N/A }
1336N/A
1336N/A delete_percentage = hdr->deleted_record_count * 100 /
1336N/A (records_count + hdr->deleted_record_count);
1336N/A if (delete_percentage >= MAIL_CACHE_COMPRESS_DELETE_PERCENTAGE) {
1336N/A /* too many deleted records, compress */
1336N/A want_compress = TRUE;
1336N/A }
1336N/A
869N/A if (want_compress) {
869N/A if (fstat(cache->fd, &st) < 0) {
912N/A if (!ESTALE_FSTAT(errno))
912N/A mail_cache_set_syscall_error(cache, "fstat()");
868N/A return;
789N/A }
789N/A if (st.st_size >= MAIL_CACHE_COMPRESS_MIN_SIZE)
789N/A cache->need_compress_file_seq = hdr->file_seq;
789N/A }
789N/A
877N/A}
877N/A
877N/Astatic bool mail_cache_verify_header(struct mail_cache *cache,
877N/A const struct mail_cache_header *hdr)
877N/A{
787N/A /* check that the header is still ok */
294N/A if (cache->mmap_length < sizeof(struct mail_cache_header)) {
789N/A mail_cache_set_corrupted(cache, "File too small");
789N/A return FALSE;
789N/A }
789N/A
789N/A if (hdr->major_version != MAIL_CACHE_MAJOR_VERSION) {
789N/A /* version changed - upgrade silently */
877N/A mail_cache_unlink(cache);
877N/A return FALSE;
877N/A }
877N/A if (hdr->compat_sizeof_uoff_t != sizeof(uoff_t)) {
877N/A /* architecture change - handle silently(?) */
789N/A mail_cache_unlink(cache);
294N/A return FALSE;
869N/A }
869N/A
1091N/A if (hdr->indexid != cache->index->indexid) {
869N/A /* index id changed - handle silently */
869N/A mail_cache_unlink(cache);
869N/A return FALSE;
869N/A }
1086N/A if (hdr->file_seq == 0) {
1086N/A mail_cache_set_corrupted(cache, "file_seq is 0");
1086N/A return FALSE;
1086N/A }
294N/A return TRUE;
294N/A}
294N/A
294N/Astatic int
294N/Amail_cache_map_finish(struct mail_cache *cache, uoff_t offset, size_t size,
294N/A const void *hdr_data, bool copy_hdr)
294N/A{
294N/A const struct mail_cache_header *hdr = hdr_data;
294N/A
294N/A if (offset == 0) {
869N/A /* verify the header validity only with offset=0. this way
294N/A we won't waste time re-verifying it all the time */
294N/A if (!mail_cache_verify_header(cache, hdr)) {
869N/A cache->need_compress_file_seq =
873N/A !MAIL_CACHE_IS_UNUSABLE(cache) &&
1086N/A cache->hdr->file_seq != 0 ?
1091N/A cache->hdr->file_seq : 0;
873N/A cache->hdr = NULL;
873N/A return -1;
872N/A }
1155N/A }
1114N/A if (hdr_data != NULL) {
1114N/A if (!copy_hdr)
1114N/A cache->hdr = hdr;
1114N/A else {
789N/A memcpy(&cache->hdr_ro_copy, hdr,
789N/A sizeof(cache->hdr_ro_copy));
789N/A cache->hdr = &cache->hdr_ro_copy;
789N/A }
789N/A mail_cache_update_need_compress(cache);
789N/A } else {
789N/A i_assert(cache->hdr != NULL);
789N/A }
789N/A i_assert(cache->hdr->file_seq != 0);
789N/A
789N/A if (offset + size > cache->mmap_length)
789N/A return 0;
789N/A return 1;
789N/A}
789N/A
789N/Astatic int
789N/Amail_cache_map_with_read(struct mail_cache *cache, size_t offset, size_t size,
789N/A const void **data_r)
789N/A{
789N/A const void *hdr_data;
789N/A void *data;
789N/A ssize_t ret;
789N/A
789N/A if (cache->read_buf == NULL) {
789N/A cache->read_buf =
789N/A buffer_create_dynamic(default_pool, size);
789N/A } else if (cache->read_offset <= offset &&
789N/A cache->read_offset + cache->read_buf->used >= offset+size) {
789N/A /* already mapped */
789N/A *data_r = CONST_PTR_OFFSET(cache->read_buf->data,
789N/A offset - cache->read_offset);
789N/A hdr_data = offset == 0 ? *data_r : NULL;
789N/A return mail_cache_map_finish(cache, offset, size, hdr_data, TRUE);
789N/A } else {
1340N/A buffer_set_used_size(cache->read_buf, 0);
1340N/A }
1340N/A if (offset == 0 && size < MAIL_CACHE_MIN_HEADER_READ_SIZE) {
1340N/A /* we can usually read the fields header after the cache
1340N/A header. we need them both, so try to read them all with one
789N/A pread() call. */
789N/A size = MAIL_CACHE_MIN_HEADER_READ_SIZE;
789N/A }
789N/A
789N/A data = buffer_append_space_unsafe(cache->read_buf, size);
789N/A ret = pread(cache->fd, data, size, offset);
789N/A if (ret < 0) {
789N/A if (errno != ESTALE)
789N/A mail_cache_set_syscall_error(cache, "read()");
789N/A
789N/A buffer_set_used_size(cache->read_buf, 0);
789N/A cache->hdr = NULL;
789N/A cache->mmap_length = 0;
789N/A return -1;
789N/A }
789N/A buffer_set_used_size(cache->read_buf, ret);
294N/A
1155N/A cache->read_offset = offset;
1155N/A cache->mmap_length = offset + cache->read_buf->used;
1155N/A
1339N/A *data_r = data;
1155N/A hdr_data = offset == 0 ? *data_r : NULL;
1155N/A return mail_cache_map_finish(cache, offset,
1155N/A cache->read_buf->used, hdr_data, TRUE);
1155N/A}
1155N/A
1155N/Aint mail_cache_map(struct mail_cache *cache, size_t offset, size_t size,
1155N/A const void **data_r)
1155N/A{
1155N/A struct stat st;
1155N/A const void *data;
1155N/A ssize_t ret;
789N/A
789N/A if (size == 0)
1155N/A size = sizeof(struct mail_cache_header);
1155N/A
789N/A /* verify offset + size before trying to allocate a huge amount of
789N/A memory due to them. note that we may be prefetching more than we
789N/A actually need, so don't fail too early. */
789N/A if ((size > cache->mmap_length || offset + size > cache->mmap_length) &&
789N/A (offset > 0 || size > sizeof(struct mail_cache_header))) {
789N/A if (fstat(cache->fd, &st) < 0) {
1155N/A i_error("fstat(%s) failed: %m", cache->filepath);
1155N/A return -1;
1155N/A }
1155N/A if (offset >= (uoff_t)st.st_size) {
1155N/A *data_r = NULL;
1155N/A return 0;
1155N/A }
1155N/A if (offset + size > (uoff_t)st.st_size)
1155N/A size = st.st_size - offset;
1155N/A }
789N/A
789N/A cache->remap_counter++;
789N/A if (cache->map_with_read)
789N/A return mail_cache_map_with_read(cache, offset, size, data_r);
1091N/A
789N/A if (cache->file_cache != NULL) {
789N/A ret = file_cache_read(cache->file_cache, offset, size);
789N/A if (ret < 0) {
1091N/A /* In case of ESTALE we'll simply fail without error
1091N/A messages. The caller will then just have to
1091N/A fallback to generating the value itself.
1091N/A
1091N/A We can't simply reopen the cache flie, because
1091N/A using it requires also having updated file
1091N/A offsets. */
1091N/A if (errno != ESTALE)
1091N/A mail_cache_set_syscall_error(cache, "read()");
1091N/A cache->hdr = NULL;
1091N/A return -1;
1091N/A }
1091N/A
1091N/A data = file_cache_get_map(cache->file_cache,
1091N/A &cache->mmap_length);
1091N/A *data_r = offset > cache->mmap_length ? NULL :
1091N/A CONST_PTR_OFFSET(data, offset);
1091N/A return mail_cache_map_finish(cache, offset, size,
1091N/A offset == 0 ? data : NULL, TRUE);
789N/A }
1091N/A
789N/A if (offset < cache->mmap_length &&
789N/A size <= cache->mmap_length - offset) {
789N/A /* already mapped */
789N/A i_assert(cache->mmap_base != NULL);
789N/A *data_r = CONST_PTR_OFFSET(cache->mmap_base, offset);
789N/A return 1;
789N/A }
789N/A
789N/A if (cache->mmap_base != NULL) {
789N/A if (munmap(cache->mmap_base, cache->mmap_length) < 0)
789N/A mail_cache_set_syscall_error(cache, "munmap()");
789N/A } else {
789N/A if (cache->fd == -1) {
789N/A /* unusable, waiting for compression or
789N/A index is in memory */
789N/A i_assert(cache->need_compress_file_seq != 0 ||
789N/A MAIL_INDEX_IS_IN_MEMORY(cache->index));
789N/A return -1;
789N/A }
789N/A }
789N/A
789N/A /* map the whole file */
789N/A cache->hdr = NULL;
789N/A cache->mmap_length = 0;
789N/A
789N/A cache->mmap_base = mmap_ro_file(cache->fd, &cache->mmap_length);
787N/A if (cache->mmap_base == MAP_FAILED) {
789N/A cache->mmap_base = NULL;
789N/A cache->mmap_length = 0;
789N/A mail_cache_set_syscall_error(cache, "mmap()");
789N/A return -1;
789N/A }
789N/A *data_r = offset > cache->mmap_length ? NULL :
789N/A CONST_PTR_OFFSET(cache->mmap_base, offset);
789N/A return mail_cache_map_finish(cache, offset, size,
789N/A cache->mmap_base, FALSE);
789N/A}
789N/A
789N/Aint mail_cache_open_and_verify(struct mail_cache *cache)
789N/A{
1341N/A int ret;
1341N/A
1341N/A ret = mail_cache_try_open(cache);
1341N/A if (ret > 0)
789N/A ret = mail_cache_header_fields_read(cache);
789N/A if (ret < 0) {
789N/A /* failed for some reason - doesn't really matter,
789N/A it's disabled for now. */
789N/A mail_cache_file_close(cache);
789N/A }
789N/A return ret;
789N/A}
789N/A
789N/Astatic struct mail_cache *mail_cache_alloc(struct mail_index *index)
789N/A{
789N/A struct mail_cache *cache;
789N/A
789N/A cache = i_new(struct mail_cache, 1);
789N/A cache->index = index;
789N/A cache->fd = -1;
789N/A cache->filepath =
789N/A i_strconcat(index->filepath, MAIL_CACHE_FILE_SUFFIX, NULL);
789N/A cache->field_pool = pool_alloconly_create("Cache fields", 2048);
789N/A hash_table_create(&cache->field_name_hash, cache->field_pool, 0,
789N/A strcase_hash, strcasecmp);
789N/A
789N/A cache->dotlock_settings.use_excl_lock =
789N/A (index->flags & MAIL_INDEX_OPEN_FLAG_DOTLOCK_USE_EXCL) != 0;
789N/A cache->dotlock_settings.nfs_flush =
789N/A (index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0;
789N/A cache->dotlock_settings.timeout =
789N/A I_MIN(MAIL_CACHE_LOCK_TIMEOUT, index->max_lock_timeout_secs);
789N/A cache->dotlock_settings.stale_timeout = MAIL_CACHE_LOCK_CHANGE_TIMEOUT;
789N/A
789N/A if (!MAIL_INDEX_IS_IN_MEMORY(index) &&
789N/A (index->flags & MAIL_INDEX_OPEN_FLAG_MMAP_DISABLE) != 0)
1155N/A cache->file_cache = file_cache_new(-1);
1341N/A cache->map_with_read =
789N/A (cache->index->flags & MAIL_INDEX_OPEN_FLAG_SAVEONLY) != 0;
789N/A
789N/A cache->ext_id =
789N/A mail_index_ext_register(index, "cache", 0,
789N/A sizeof(uint32_t), sizeof(uint32_t));
789N/A mail_index_register_expunge_handler(index, cache->ext_id, FALSE,
789N/A mail_cache_expunge_handler, cache);
789N/A return cache;
787N/A}
789N/A
789N/Astruct mail_cache *mail_cache_open_or_create(struct mail_index *index)
789N/A{
789N/A struct mail_cache *cache;
789N/A
789N/A cache = mail_cache_alloc(index);
789N/A return cache;
789N/A}
789N/A
789N/Avoid mail_cache_free(struct mail_cache **_cache)
789N/A{
789N/A struct mail_cache *cache = *_cache;
789N/A
789N/A *_cache = NULL;
789N/A if (cache->file_cache != NULL)
1341N/A file_cache_free(&cache->file_cache);
789N/A
789N/A mail_index_unregister_expunge_handler(cache->index, cache->ext_id);
789N/A mail_cache_file_close(cache);
789N/A
789N/A if (cache->read_buf != NULL)
789N/A buffer_free(&cache->read_buf);
789N/A hash_table_destroy(&cache->field_name_hash);
789N/A pool_unref(&cache->field_pool);
789N/A i_free(cache->field_file_map);
789N/A i_free(cache->file_field_map);
789N/A i_free(cache->fields);
789N/A i_free(cache->filepath);
789N/A i_free(cache);
789N/A}
789N/A
789N/Astatic int mail_cache_lock_file(struct mail_cache *cache, bool nonblock)
789N/A{
789N/A unsigned int timeout_secs;
789N/A int ret;
789N/A
789N/A if (cache->last_lock_failed) {
789N/A /* previous locking failed. don't waste time waiting on it
1339N/A again, just try once to see if it's available now. */
1339N/A nonblock = TRUE;
789N/A }
789N/A
789N/A if (cache->index->lock_method != FILE_LOCK_METHOD_DOTLOCK) {
789N/A i_assert(cache->file_lock == NULL);
789N/A timeout_secs = I_MIN(MAIL_CACHE_LOCK_TIMEOUT,
789N/A cache->index->max_lock_timeout_secs);
789N/A
789N/A ret = mail_index_lock_fd(cache->index, cache->filepath,
789N/A cache->fd, F_WRLCK,
789N/A nonblock ? 0 : timeout_secs,
789N/A &cache->file_lock);
1339N/A } else {
1339N/A enum dotlock_create_flags flags =
789N/A nonblock ? DOTLOCK_CREATE_FLAG_NONBLOCK : 0;
789N/A
789N/A i_assert(cache->dotlock == NULL);
789N/A ret = file_dotlock_create(&cache->dotlock_settings,
787N/A cache->filepath, flags,
294N/A &cache->dotlock);
294N/A if (ret < 0) {
294N/A mail_cache_set_syscall_error(cache,
789N/A "file_dotlock_create()");
789N/A }
294N/A }
789N/A cache->last_lock_failed = ret <= 0;
789N/A
789N/A /* don't bother warning if locking failed due to a timeout. since cache
789N/A updating isn't all that important we're using a very short timeout
1339N/A so it can be triggered sometimes on heavy load */
1339N/A if (ret <= 0)
789N/A return ret;
789N/A
789N/A mail_index_flush_read_cache(cache->index, cache->filepath, cache->fd,
789N/A TRUE);
789N/A return 1;
789N/A}
789N/A
789N/Astatic void mail_cache_unlock_file(struct mail_cache *cache)
1338N/A{
1338N/A if (cache->index->lock_method != FILE_LOCK_METHOD_DOTLOCK)
1338N/A file_unlock(&cache->file_lock);
789N/A else
789N/A file_dotlock_delete(&cache->dotlock);
789N/A}
789N/A
789N/Astatic int
789N/Amail_cache_lock_full(struct mail_cache *cache, bool require_same_reset_id,
789N/A bool nonblock)
789N/A{
789N/A const struct mail_index_ext *ext;
1338N/A const void *data;
1338N/A struct mail_index_view *iview;
789N/A uint32_t reset_id;
789N/A int i, ret;
789N/A
789N/A i_assert(!cache->locked);
789N/A
789N/A if (!cache->opened)
789N/A (void)mail_cache_open_and_verify(cache);
789N/A
789N/A if (MAIL_CACHE_IS_UNUSABLE(cache) ||
789N/A MAIL_INDEX_IS_IN_MEMORY(cache->index) ||
789N/A cache->index->readonly)
789N/A return 0;
789N/A
789N/A iview = mail_index_view_open(cache->index);
789N/A ext = mail_index_view_get_ext(iview, cache->ext_id);
789N/A reset_id = ext == NULL ? 0 : ext->reset_id;
789N/A mail_index_view_close(&iview);
789N/A
789N/A if (ext == NULL && require_same_reset_id) {
873N/A /* cache not used */
873N/A return 0;
873N/A }
873N/A
873N/A for (i = 0; i < 3; i++) {
873N/A if (cache->hdr->file_seq != reset_id &&
873N/A (require_same_reset_id || i == 0)) {
1086N/A /* we want the latest cache file */
1086N/A if (reset_id < cache->hdr->file_seq) {
1086N/A /* either we're still waiting for index to
1339N/A catch up with a cache compression, or
1339N/A that catching up is never going to happen */
1086N/A ret = 0;
789N/A break;
789N/A }
789N/A ret = mail_cache_reopen(cache);
789N/A if (ret < 0 || (ret == 0 && require_same_reset_id))
789N/A break;
789N/A }
1086N/A
789N/A if ((ret = mail_cache_lock_file(cache, nonblock)) <= 0) {
789N/A ret = -1;
789N/A break;
857N/A }
857N/A cache->locked = TRUE;
1086N/A
857N/A if (cache->hdr->file_seq == reset_id ||
857N/A !require_same_reset_id) {
857N/A /* got it */
857N/A break;
1339N/A }
1339N/A
857N/A /* okay, so it was just compressed. try again. */
857N/A (void)mail_cache_unlock(cache);
857N/A ret = 0;
857N/A }
857N/A
1339N/A if (ret > 0) {
1339N/A /* make sure our header is up to date */
857N/A if (cache->file_cache != NULL) {
857N/A file_cache_invalidate(cache->file_cache, 0,
869N/A sizeof(struct mail_cache_header));
868N/A }
869N/A if (cache->read_buf != NULL)
869N/A buffer_set_used_size(cache->read_buf, 0);
869N/A if (mail_cache_map(cache, 0, 0, &data) > 0)
911N/A cache->hdr_copy = *cache->hdr;
911N/A else {
869N/A (void)mail_cache_unlock(cache);
917N/A ret = -1;
1339N/A }
1339N/A }
1339N/A
1339N/A i_assert((ret <= 0 && !cache->locked) || (ret > 0 && cache->locked));
1339N/A return ret;
1339N/A}
1339N/A
1339N/Aint mail_cache_lock(struct mail_cache *cache, bool require_same_reset_id)
1339N/A{
1339N/A return mail_cache_lock_full(cache, require_same_reset_id, FALSE);
1339N/A}
869N/A
868N/Aint mail_cache_try_lock(struct mail_cache *cache)
1086N/A{
1086N/A return mail_cache_lock_full(cache, FALSE, TRUE);
1086N/A}
1339N/A
1339N/Aint mail_cache_unlock(struct mail_cache *cache)
1339N/A{
1339N/A int ret = 0;
1339N/A
1339N/A i_assert(cache->locked);
1339N/A
1086N/A if (cache->field_header_write_pending)
789N/A ret = mail_cache_header_fields_update(cache);
294N/A
294N/A if (MAIL_CACHE_IS_UNUSABLE(cache)) {
294N/A /* we found it to be broken during the lock. just clean up. */
789N/A cache->hdr_modified = FALSE;
869N/A cache->locked = FALSE;
877N/A return -1;
789N/A }
869N/A
869N/A if (cache->hdr_modified) {
869N/A cache->hdr_modified = FALSE;
994N/A if (mail_cache_write(cache, &cache->hdr_copy,
868N/A sizeof(cache->hdr_copy), 0) < 0)
1114N/A ret = -1;
789N/A cache->hdr_ro_copy = cache->hdr_copy;
869N/A mail_cache_update_need_compress(cache);
911N/A }
872N/A
1336N/A if (cache->index->fsync_mode == FSYNC_MODE_ALWAYS) {
789N/A if (fdatasync(cache->fd) < 0)
789N/A mail_cache_set_syscall_error(cache, "fdatasync()");
294N/A }
294N/A
294N/A cache->locked = FALSE;
294N/A mail_cache_unlock_file(cache);
294N/A return ret;
294N/A}
294N/A
294N/Aint mail_cache_write(struct mail_cache *cache, const void *data, size_t size,
294N/A uoff_t offset)
294N/A{
789N/A i_assert(cache->locked);
868N/A
294N/A if (pwrite_full(cache->fd, data, size, offset) < 0) {
789N/A mail_cache_set_syscall_error(cache, "pwrite_full()");
789N/A return -1;
294N/A }
294N/A
294N/A if (cache->file_cache != NULL)
294N/A file_cache_write(cache->file_cache, data, size, offset);
294N/A if (cache->read_buf != NULL)
869N/A buffer_set_used_size(cache->read_buf, 0);
294N/A return 0;
294N/A}
294N/A
787N/Aint mail_cache_append(struct mail_cache *cache, const void *data, size_t size,
787N/A uint32_t *offset)
789N/A{
789N/A struct stat st;
789N/A
789N/A if (*offset == 0) {
789N/A if (fstat(cache->fd, &st) < 0) {
789N/A if (!ESTALE_FSTAT(errno))
787N/A mail_cache_set_syscall_error(cache, "fstat()");
789N/A return -1;
789N/A }
789N/A *offset = st.st_size;
787N/A }
789N/A if (*offset > (uint32_t)-1 || (uint32_t)-1 - *offset < size) {
789N/A mail_cache_set_corrupted(cache, "Cache file too large");
1339N/A return -1;
1339N/A }
789N/A if (mail_cache_write(cache, data, size, *offset) < 0)
787N/A return -1;
789N/A
789N/A /* FIXME: this is updated only so that older Dovecot versions (<=v2.1)
789N/A can read this file. we can remove this later. */
789N/A cache->hdr_modified = TRUE;
789N/A cache->hdr_copy.backwards_compat_used_file_size = *offset + size;
789N/A return 0;
789N/A}
789N/A
789N/Abool mail_cache_exists(struct mail_cache *cache)
787N/A{
789N/A return !MAIL_CACHE_IS_UNUSABLE(cache);
1308N/A}
789N/A
789N/Astruct mail_cache_view *
789N/Amail_cache_view_open(struct mail_cache *cache, struct mail_index_view *iview)
789N/A{
787N/A struct mail_cache_view *view;
789N/A
1339N/A view = i_new(struct mail_cache_view, 1);
1339N/A view->cache = cache;
1339N/A view->view = iview;
1339N/A view->cached_exists_buf =
789N/A buffer_create_dynamic(default_pool,
789N/A cache->file_fields_count + 10);
789N/A return view;
789N/A}
787N/A
787N/Avoid mail_cache_view_close(struct mail_cache_view **_view)
1086N/A{
1086N/A struct mail_cache_view *view = *_view;
1086N/A
1086N/A i_assert(view->trans_view == NULL);
1091N/A
1278N/A *_view = NULL;
1086N/A if (view->cache->field_header_write_pending &&
1091N/A !view->cache->compressing)
1086N/A (void)mail_cache_header_fields_update(view->cache);
1339N/A
1339N/A buffer_free(&view->cached_exists_buf);
1086N/A i_free(view);
1086N/A}
1086N/A
1086N/Avoid mail_cache_view_update_cache_decisions(struct mail_cache_view *view,
1086N/A bool update)
294N/A{
294N/A view->no_decision_updates = !update;
294N/A}
294N/A
868N/Auint32_t mail_cache_get_first_new_seq(struct mail_index_view *view)
294N/A{
294N/A const struct mail_index_header *idx_hdr;
294N/A uint32_t first_new_seq, message_count;
294N/A
789N/A idx_hdr = mail_index_get_header(view);
789N/A if (idx_hdr->day_first_uid[7] == 0)
789N/A return 1;
789N/A
294N/A if (!mail_index_lookup_seq_range(view, idx_hdr->day_first_uid[7],
787N/A (uint32_t)-1, &first_new_seq,
787N/A &message_count)) {
787N/A /* all messages are too old */
1340N/A return message_count+1;
1086N/A }
1086N/A return first_new_seq;
1086N/A}
1086N/A