squat-uidlist.c revision e2a88d59c0d47d63ce1ad5b1fd95e487124a3fd4
/* Copyright (c) 2007-2012 Dovecot authors, see the included COPYING file */
#include "lib.h"
#include "array.h"
#include "bsearch-insert-pos.h"
#include "file-cache.h"
#include "file-lock.h"
#include "read-full.h"
#include "write-full.h"
#include "ostream.h"
#include "mmap-util.h"
#include "squat-trie-private.h"
#include "squat-uidlist.h"
#include <stdio.h>
#define UIDLIST_LIST_SIZE 31
#define UIDLIST_BLOCK_LIST_COUNT 100
#define UID_LIST_MASK_RANGE 0x80000000
/* set = points to uidlist index number, unset = points to uidlist offset */
#define UID_LIST_POINTER_MASK_LIST_IDX 0x80000000
#define UIDLIST_PACKED_FLAG_BITMASK 1
#define UIDLIST_PACKED_FLAG_BEGINS_WITH_POINTER 2
struct uidlist_list {
unsigned int uid_count:31;
unsigned int uid_begins_with_pointer:1;
};
struct squat_uidlist {
struct squat_trie *trie;
char *path;
int fd;
struct file_cache *file_cache;
void *mmap_base;
struct squat_uidlist_file_header hdr;
const void *data;
unsigned int cur_block_count;
const uint32_t *cur_block_offsets;
const uint32_t *cur_block_end_indexes;
unsigned int corrupted:1;
unsigned int building:1;
};
struct squat_uidlist_build_context {
struct squat_uidlist *uidlist;
struct squat_uidlist_file_header build_hdr;
unsigned int need_reopen:1;
};
struct squat_uidlist_rebuild_context {
struct squat_uidlist *uidlist;
struct squat_uidlist_build_context *build_ctx;
int fd;
unsigned int list_idx;
unsigned int new_count;
};
{
}
const char *reason)
{
return;
}
static int
{
unsigned int i, bitmask_len, uid_list_len;
bool datastack;
int num;
if ((packed_flags & UIDLIST_PACKED_FLAG_BEGINS_WITH_POINTER) != 0)
/* @UNSAFE */
if (datastack)
else
if (bitmask_len < uid_count) {
if ((uid_list[0] & UID_LIST_MASK_RANGE) == 0) {
i = 1;
} else {
i = 0;
}
base_uid++;
for (; i < uid_count; i++) {
if ((uid & UID_LIST_MASK_RANGE) == 0) {
} else {
uid &= ~UID_LIST_MASK_RANGE;
i++;
}
} else {
/* first byte */
if (num != 0) {
}
/* middle bytes */
/* last byte */
}
}
} else {
prev = 0;
for (i = 0; i < uid_count; i++) {
return -1;
if ((uid & UID_LIST_MASK_RANGE) == 0) {
} else {
uid &= ~UID_LIST_MASK_RANGE;
i++;
}
}
if (uid_list_len > bitmask_len) {
goto bitmask_build;
}
}
size_value = ((uid_list_len +
if (write_size) {
}
if (!datastack)
*size_r = size_value;
return 0;
}
static int
{
uint32_t packed_flags = 0;
int ret;
if (list->uid_begins_with_pointer) {
/* continued UID list */
if ((uid_list[0] & UID_LIST_POINTER_MASK_LIST_IDX) != 0) {
return 0;
}
return -1;
} else {
}
uid_list++;
uid_count--;
}
T_BEGIN {
write_size, size_r);
} T_END;
return ret;
}
{
const void *base;
sizeof(uint32_t));
} else {
}
}
{
return 0;
return -1;
}
return 0;
}
{
const void *base;
if (hdr->block_list_offset == 0) {
/* empty file */
uidlist->cur_block_count = 0;
return 1;
}
/* get number of blocks */
sizeof(block_count)) < 0)
return -1;
return 0;
}
/* map the blocks */
return -1;
return 0;
}
/* verify just a couple of the end indexes to make sure they
look correct */
for (i = 1; i < verify_count; i++) {
uidlist->cur_block_end_indexes[i])) {
"block list corrupted");
return 0;
}
}
return 1;
}
{
/* still being built */
return 1;
}
/* see if trie was recreated */
}
return 0;
}
return 0;
}
return squat_uidlist_map_blocks(uidlist);
}
{
}
uidlist->cur_block_count = 0;
}
{
return -1;
}
return -1;
}
return -1;
}
return 0;
}
{
int ret;
/* file hasn't changed */
return 1;
}
if (squat_uidlist_mmap(uidlist) < 0)
return -1;
}
}
/* we want to update blocks mapping, but using the header
in memory */
} else {
if (ret <= 0) {
if (ret < 0) {
return -1;
}
return 0;
}
}
return squat_uidlist_map_header(uidlist);
}
{
return uidlist_file_cache_read(uidlist, 0,
}
/* Tell the kernel we're going to use the uidlist data, so it loads
it into memory and keeps it there. */
/* It also speeds up a bit for us to sequentially load everything
into memory, although at least Linux catches up quite fast even
without this code. Compiler can quite easily optimize away this
entire for loop, but volatile seems to help with gcc 4.2. */
return 0;
}
{
} else {
}
}
{
struct squat_uidlist *uidlist;
return uidlist;
}
{
}
{
return 0;
}
return -1;
}
}
{
}
}
{
/* we assume here that trie is locked, so that we don't need to worry
about it when reading the header */
if (squat_uidlist_open(uidlist) < 0)
return -1;
} else {
if (squat_uidlist_map(uidlist) <= 0)
return -1;
}
return 0;
}
{
return 1;
return -1;
}
return -1;
}
}
{
int ret;
for (;;) {
} else {
}
if (ret == 0) {
i_error("squat uidlist %s: Locking timed out",
return 0;
}
if (ret < 0)
return -1;
if (ret == 0)
break;
else
if (ret < 0)
return -1;
return -1;
}
return 1;
}
{
int ret;
return -1;
}
if (squat_uidlist_lock(uidlist) <= 0)
return -1;
if (uidlist->locked_file_size != 0) {
return -1;
if (ret == 0) {
/* broken file, truncate */
i_error("ftruncate(%s) failed: %m",
return -1;
}
uidlist->locked_file_size = 0;
}
}
if (uidlist->locked_file_size == 0) {
/* write using 0 until we're finished */
return -1;
}
}
return 0;
}
struct squat_uidlist_build_context **ctx_r)
{
struct squat_uidlist_build_context *ctx;
int ret;
if (ret == 0 &&
ret = -1;
}
if (ret < 0) {
return -1;
}
struct squat_uidlist_file_header hdr;
}
return 0;
}
static void
bool write_old_blocks)
{
if (array_count(block_end_indexes) == 0) {
return;
}
if (align != 0) {
}
/* write end indexes */
old_block_count * sizeof(uint32_t));
new_block_count * sizeof(uint32_t));
/* write offsets */
old_block_count * sizeof(uint32_t));
new_block_count * sizeof(uint32_t));
(void)o_stream_flush(output);
/* update header - it's written later when trie is locked */
}
{
struct uidlist_list *lists;
return;
if (count == 0)
return;
/* write the lists and save the written sizes to uid_list[0] */
for (i = 0; i < count; i += UIDLIST_BLOCK_LIST_COUNT) {
for (j = 0; j < max; j++) {
FALSE, &list_sizes[j]) < 0) {
"Broken uidlists");
return;
}
}
/* write the full size of the uidlists */
for (j = 0; j < max; j++) {
}
}
&ctx->block_offsets,
}
{
return -1;
}
return -1;
}
return 0;
}
{
else
if (ctx->need_reopen)
}
bool compress,
struct squat_uidlist_rebuild_context **ctx_r)
{
struct squat_uidlist_rebuild_context *ctx;
struct squat_uidlist_file_header hdr;
const char *temp_path;
int fd;
return 0;
if (!compress) {
return 0;
}
/* make sure the entire uidlist is in memory before beginning,
otherwise the pages are faulted to memory in random order which
takes forever. */
return -1;
if (fd == -1)
return -1;
return 1;
}
static void
{
unsigned int i;
/* this block's contents started from cur_block_start_offset and
ended to current offset. write the size of this area. */
}
}
{
int ret;
T_BEGIN {
} T_END;
if (ret < 0)
}
}
{
unsigned int i, count;
if (count == 0)
return 0;
/* we can use a singleton bitmask */
ret = 0;
for (i = 0; i < count; i++) {
}
return ret;
}
/* single UID */
}
/* convert seq range to our internal representation and use the
normal _rebuild_next() to write it */
for (i = 0; i < count; i++) {
else {
}
}
return ret;
}
bool cancel)
{
const char *temp_path;
int ret = 1;
ret = 0;
if (ret > 0) {
FALSE);
ret = -1;
ret = -1;
i_error("rename(%s, %s) failed: %m",
ret = -1;
}
}
/* we no longer require the entire uidlist to be in memory,
let it be used for something more useful. */
if (ret <= 0) {
}
return ret < 0 ? -1 : 0;
}
static void
{
}
static struct uidlist_list *
{
struct uidlist_list *list;
return list;
}
{
struct uidlist_list *list;
uint32_t *p;
if ((uid_list_idx & 1) != 0) {
/* adding second UID */
return uid_list_idx;
if (uid < 8) {
/* UID lists containing only UIDs 0-7 are saved as
uidlist values 2..511. think of it as a bitmask. */
return uid_list_idx;
}
if (uid_list_idx == 0) {
/* first UID */
}
/* create a new list */
/* add the first UID ourself */
idx = 0;
if ((old_list_idx & mask) != 0) {
break;
}
}
if ((old_list_idx & mask) != 0) {
uid_list_idx, idx);
}
}
}
/* add to existing list */
return uid_list_idx;
}
return 0;
}
if (uid == *p + 1 &&
/* use a range */
/* increase the existing range */
*p += 1;
return uid_list_idx;
}
return uid_list_idx;
}
/* create a new range */
*p |= UID_LIST_MASK_RANGE;
} else {
return uid_list_idx;
}
}
p++;
*p = uid;
return uid_list_idx;
}
{
unsigned int count;
if (count == 0) {
return;
}
UID_LIST_MASK_RANGE) != 0) {
return;
}
}
}
{
unsigned int count;
if (count == 0) {
return;
}
UID_LIST_MASK_RANGE) != 0) {
return;
}
} else {
}
}
static int
{
unsigned int i, j, count;
if (num != 0)
else {
/* not given, read it */
SQUAT_PACK_MAX_SIZE) < 0)
return -1;
}
return -1;
"size points outside file");
return -1;
}
if ((flags & UIDLIST_PACKED_FLAG_BEGINS_WITH_POINTER) != 0) {
/* link to the file */
if ((prev & 1) != 0) {
/* pointer to uidlist */
return -1;
} else {
0, uids) < 0)
return -1;
}
} else {
next_uid = 0;
}
if ((flags & UIDLIST_PACKED_FLAG_BITMASK) == 0)
base_uid >>= 1;
"broken continued uidlist");
return -1;
}
if ((flags & UIDLIST_PACKED_FLAG_BITMASK) != 0) {
/* bitmask */
for (i = 0; i < size; i++) {
for (j = 0; j < 8; j++, base_uid++) {
if ((p[i] & (1 << j)) != 0)
}
}
} else {
/* range */
for (;;) {
if ((num & 1) == 0) {
} else {
/* range */
base_uid);
}
if (p == end)
break;
}
}
return 0;
}
{
}
static int
{
unsigned int idx;
return -1;
}
idx++;
return -1;
}
return -1;
}
/* make sure everything is mapped */
max_map_size) < 0)
return -1;
/* find the uidlist inside the block */
squat_unpack_num(&p, end);
}
return -1;
}
return -1;
}
return 0;
}
{
unsigned int mask;
if ((uid_list_idx & 1) != 0) {
/* single UID */
return 0;
/* bitmask */
if ((uid_list_idx & mask) != 0)
}
return 0;
}
return -1;
}
{
if ((uid_list_idx & 1) != 0) {
/* single UID */
return uid_list_idx >> 1;
/* bitmask */
if (uid_list_idx == 2) {
/* just a quick optimization */
return 0;
}
if ((uid_list_idx & mask) != 0)
return idx;
}
}
i_unreached();
return 0;
}
{
unsigned int i, count;
int ret;
if (ret == 0) {
for (i = 0; i < count; i++) {
if ((tmp_uids[i] & UID_LIST_MASK_RANGE) == 0)
else {
}
}
}
return ret;
}
{
const struct seq_range *parent_range;
int ret = 0;
if (parent_count == 0)
return 0;
parent_idx = 0;
for (i = 0; i < rel_count; i++) {
i_error("broken UID ranges");
ret = -1;
break;
}
if ((rel_range[i] & UID_LIST_MASK_RANGE) == 0)
else {
}
while (diff > 0) {
i_error("broken UID ranges");
ret = -1;
break;
}
continue;
else
parent_uid++;
break;
}
diff--;
}
while (diff > 0) {
i_error("broken UID ranges");
ret = -1;
break;
}
continue;
else
parent_uid++;
break;
}
diff--;
}
}
return ret;
}
unsigned int *count_r)
{
}