hashmap.c revision 3d4db144b073f5a03e9c679af3c90433f096649b
/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
/***
This file is part of systemd.
Copyright 2010 Lennart Poettering
Copyright 2014 Michal Schmidt
under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
systemd is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with systemd; If not, see <http://www.gnu.org/licenses/>.
***/
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include "alloc-util.h"
#include "hashmap.h"
#include "macro.h"
#include "mempool.h"
#include "process-util.h"
#include "random-util.h"
#include "set.h"
#include "siphash24.h"
#include "strv.h"
#include "util.h"
#ifdef ENABLE_DEBUG_HASHMAP
#include <pthread.h>
#include "list.h"
#endif
/*
* Implementation of hashmaps.
* Addressing: open
* - uses less RAM compared to closed addressing (chaining), because
* our entries are small (especially in Sets, which tend to contain
* the majority of entries in systemd).
* Collision resolution: Robin Hood
* - tends to equalize displacement of entries from their optimal buckets.
* Probe sequence: linear
* hashing, it is good for cache locality.
*
* References:
* Celis, P. 1986. Robin Hood Hashing.
* Ph.D. Dissertation. University of Waterloo, Waterloo, Ont., Canada, Canada.
* - The results are derived for random probing. Suggests deletion with
* tombstones and two mean-centered search methods. None of that works
* well for linear probing.
*
* Janson, S. 2005. Individual displacements for linear probing hashing with different insertion policies.
* ACM Trans. Algorithms 1, 2 (October 2005), 177-213.
* DOI=10.1145/1103963.1103964 http://doi.acm.org/10.1145/1103963.1103964
* - Applies to Robin Hood with linear probing. Contains remarks on
* the unsuitability of mean-centered search with linear probing.
*
* Viola, A. 2005. Exact distribution of individual displacements in linear probing hashing.
* ACM Trans. Algorithms 1, 2 (October 2005), 214-242.
* DOI=10.1145/1103963.1103965 http://doi.acm.org/10.1145/1103963.1103965
* - Similar to Janson. Note that Viola writes about C_{m,n} (number of probes
* in a successful search), and Janson writes about displacement. C = d + 1.
*
* Goossaert, E. 2013. Robin Hood hashing: backward shift deletion.
* - Explanation of backward shift deletion with pictures.
*
* Khuong, P. 2013. The Other Robin Hood Hashing.
* - Short summary of random vs. linear probing, and tombstones vs. backward shift.
*/
/*
* XXX Ideas for improvement:
* For unordered hashmaps, randomize iteration order, similarly to Perl:
*/
/* INV_KEEP_FREE = 1 / (1 - max_load_factor)
* e.g. 1 / (1 - 0.8) = 5 ... keep one fifth of the buckets free. */
#define INV_KEEP_FREE 5U
struct hashmap_base_entry {
const void *key;
};
* hashmap_base_entry must be at the beginning of each entry struct. */
struct plain_hashmap_entry {
struct hashmap_base_entry b;
void *value;
};
struct ordered_hashmap_entry {
struct plain_hashmap_entry p;
unsigned iterate_next, iterate_previous;
};
struct set_entry {
struct hashmap_base_entry b;
};
/* In several functions it is advantageous to have the hash table extended
* virtually by a couple of additional buckets. We reserve special index values
* for these "swap" buckets. */
#define IDX_PUT (_IDX_SWAP_BEGIN + 0)
/* Storage space for the "swap" buckets.
* All entry types can fit into a ordered_hashmap_entry. */
struct swap_entries {
};
/* Distance from Initial Bucket */
#ifdef ENABLE_DEBUG_HASHMAP
struct hashmap_debug_info {
unsigned max_entries; /* high watermark of n_entries */
/* who allocated this hashmap */
int line;
const char *file;
const char *func;
/* fields to detect modification while iterating */
unsigned put_count; /* counts puts into the hashmap */
unsigned rem_count; /* counts removals from hashmap */
unsigned last_rem_idx; /* remembers last removal index */
};
/* Tracks all existing hashmaps. Get at it from gdb. See sd_dump_hashmaps.py */
#else /* !ENABLE_DEBUG_HASHMAP */
#define HASHMAP_DEBUG_FIELDS
#endif /* ENABLE_DEBUG_HASHMAP */
enum HashmapType {
};
struct _packed_ indirect_storage {
char *storage; /* where buckets and DIBs are stored */
unsigned n_entries; /* number of stored entries */
unsigned n_buckets; /* number of buckets */
unsigned idx_lowest_entry; /* Index below which all buckets are free.
Makes "while(hashmap_steal_first())" loops
O(n) instead of O(n^2) for unordered hashmaps. */
/* The bitfields in HashmapBase complete the alignment of the whole thing. */
};
struct direct_storage {
/* This gives us 39 bytes on 64bit, or 35 bytes on 32bit.
* That's room for 4 set_entries + 4 DIB bytes + 3 unused bytes on 64bit,
* or 7 set_entries + 7 DIB bytes + 0 unused bytes on 32bit. */
char storage[sizeof(struct indirect_storage)];
};
#define DIRECT_BUCKETS(entry_t) \
/* We should be able to store at least one entry directly. */
/* We have 3 bits for n_direct_entries. */
/* Hashmaps with directly stored entries all use this shared hash key.
* It's no big deal if the key is guessed, because there can be only
* a handful of directly stored entries in a hashmap. When a hashmap
* outgrows direct storage, it gets its own key for indirect storage. */
static bool shared_hash_key_initialized;
struct HashmapBase {
union _packed_ {
};
* Only valid if !has_indirect. */
HASHMAP_DEBUG_FIELDS /* optional hashmap_debug_info */
};
/* Specific hash types
* HashmapBase must be at the beginning of each hashmap struct. */
struct Hashmap {
struct HashmapBase b;
};
struct OrderedHashmap {
struct HashmapBase b;
unsigned iterate_list_head, iterate_list_tail;
};
struct Set {
struct HashmapBase b;
};
/* No need for a separate Set pool */
struct hashmap_type_info {
unsigned n_direct_buckets;
};
[HASHMAP_TYPE_PLAIN] = {
.entry_size = sizeof(struct plain_hashmap_entry),
.mempool = &hashmap_pool,
},
[HASHMAP_TYPE_ORDERED] = {
.head_size = sizeof(OrderedHashmap),
.entry_size = sizeof(struct ordered_hashmap_entry),
},
[HASHMAP_TYPE_SET] = {
.entry_size = sizeof(struct set_entry),
.mempool = &hashmap_pool,
},
};
}
int string_compare_func(const void *a, const void *b) {
return strcmp(a, b);
}
const struct hash_ops string_hash_ops = {
.hash = string_hash_func,
};
siphash24_compress(&p, sizeof(p), state);
}
int trivial_compare_func(const void *a, const void *b) {
return a < b ? -1 : (a > b ? 1 : 0);
}
const struct hash_ops trivial_hash_ops = {
};
}
uint64_t a, b;
return a < b ? -1 : (a > b ? 1 : 0);
}
const struct hash_ops uint64_hash_ops = {
.hash = uint64_hash_func,
};
#if SIZEOF_DEV_T != 8
}
dev_t a, b;
return a < b ? -1 : (a > b ? 1 : 0);
}
const struct hash_ops devt_hash_ops = {
.hash = devt_hash_func,
};
#endif
static unsigned n_buckets(HashmapBase *h) {
}
static unsigned n_entries(HashmapBase *h) {
: h->n_direct_entries;
}
static void n_entries_inc(HashmapBase *h) {
if (h->has_indirect)
else
h->n_direct_entries++;
}
static void n_entries_dec(HashmapBase *h) {
if (h->has_indirect)
else
h->n_direct_entries--;
}
static char *storage_ptr(HashmapBase *h) {
}
}
static unsigned base_bucket_hash(HashmapBase *h, const void *p) {
}
static bool current_initialized = false;
/* Returns a hash function key to use. In order to keep things
* fast we will not generate a new key each time we allocate a
* new hash table. Instead, we'll just reuse the most recently
* generated one, except if we never generated one or when we
* are rehashing an entire hash table because we reached a
* fill level */
if (!current_initialized || !reuse_is_ok) {
current_initialized = true;
}
}
return (struct hashmap_base_entry*)
}
}
}
}
}
/* Returns a pointer to the bucket at index idx.
* Understands real indexes and swap indexes, hence "_virtual". */
unsigned idx) {
if (idx < _IDX_SWAP_BEGIN)
if (idx < _IDX_SWAP_END)
assert_not_reached("Invalid index");
}
return (dib_raw_t*)
}
}
unsigned initial_bucket;
if (raw_dib == DIB_RAW_FREE)
return DIB_FREE;
return raw_dib;
/*
* Having an overflow DIB value is very unlikely. The hash function
* would have to be bad. For example, in a table of size 2^24 filled
* to load factor 0.9 the maximum observed DIB is only about 60.
* In theory (assuming I used Maxima correctly), for an infinite size
* hash table with load factor 0.8 the probability of a given entry
* having DIB > 40 is 1.9e-8.
* This returns the correct DIB value by recomputing the hash value in
* the unlikely case. XXX Hitting this case could be a hint to rehash.
*/
}
}
dibs = dib_raw_ptr(h);
return idx;
return IDX_NIL;
}
}
if (h->type == HASHMAP_TYPE_ORDERED) {
le = (struct ordered_hashmap_entry*)
}
le = (struct ordered_hashmap_entry*)
}
}
}
}
}
switch (h->type) {
case HASHMAP_TYPE_PLAIN:
case HASHMAP_TYPE_ORDERED:
return ((struct plain_hashmap_entry*)e)->value;
case HASHMAP_TYPE_SET:
return (void*) e->key;
default:
assert_not_reached("Unknown hashmap type");
}
}
dibs = dib_raw_ptr(h);
#ifdef ENABLE_DEBUG_HASHMAP
#endif
/* Find the stop bucket ("right"). It is either free or has DIB == 0. */
break;
/* The buckets are not supposed to be all occupied and with DIB > 0.
* That would mean we could make everyone better off by shifting them
* backward. This scenario is impossible. */
}
if (h->type == HASHMAP_TYPE_ORDERED) {
else
else
}
/* Now shift all buckets in the interval (left, right) one step backwards */
}
bucket_mark_free(h, prev);
n_entries_dec(h);
}
struct ordered_hashmap_entry *e;
unsigned idx;
assert(h);
assert(i);
goto at_end;
goto at_end;
idx = h->iterate_list_head;
e = ordered_bucket_at(h, idx);
} else {
e = ordered_bucket_at(h, idx);
/*
* We allow removing the current entry while iterating, but removal may cause
* a backward shift. The next entry may thus move one bucket to the left.
* To detect when it happens, we remember the key pointer of the entry we were
* going to iterate next. If it does not match, there was a backward shift.
*/
e = ordered_bucket_at(h, idx);
}
}
#ifdef ENABLE_DEBUG_HASHMAP
#endif
if (e->iterate_next != IDX_NIL) {
struct ordered_hashmap_entry *n;
i->idx = e->iterate_next;
n = ordered_bucket_at(h, i->idx);
} else
return idx;
return IDX_NIL;
}
unsigned idx;
assert(h);
assert(i);
goto at_end;
/* fast forward to the first occupied bucket */
if (h->has_indirect) {
} else
i->idx = skip_free_buckets(h, 0);
goto at_end;
} else {
struct hashmap_base_entry *e;
/*
* We allow removing the current entry while iterating, but removal may cause
* a backward shift. The next entry may thus move one bucket to the left.
* To detect when it happens, we remember the key pointer of the entry we were
* going to iterate next. If it does not match, there was a backward shift.
*/
}
#ifdef ENABLE_DEBUG_HASHMAP
#endif
else
return idx;
return IDX_NIL;
}
if (!h) {
return IDX_NIL;
}
#ifdef ENABLE_DEBUG_HASHMAP
} else {
/* While iterating, must not add any new entries */
/* ... or remove entries other than the current one */
/* Reset our removals counter */
}
#endif
: hashmap_iterate_in_internal_order(h, i);
}
struct hashmap_base_entry *e;
void *data;
unsigned idx;
idx = hashmap_iterate_entry(h, i);
if (value)
if (key)
return false;
}
data = entry_value(h, e);
if (value)
if (key)
return true;
}
}
#define HASHMAP_FOREACH_IDX(idx, h, i) \
(idx) = hashmap_iterate_entry((h), &(i)))
static void reset_direct_storage(HashmapBase *h) {
void *p;
assert(!h->has_indirect);
}
static struct HashmapBase *hashmap_base_new(const struct hash_ops *hash_ops, enum HashmapType type HASHMAP_DEBUG_PARAMS) {
HashmapBase *h;
bool use_pool;
use_pool = is_main_thread();
if (!h)
return NULL;
if (type == HASHMAP_TYPE_ORDERED) {
}
if (!shared_hash_key_initialized) {
shared_hash_key_initialized= true;
}
#ifdef ENABLE_DEBUG_HASHMAP
#endif
return h;
}
}
OrderedHashmap *internal_ordered_hashmap_new(const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
}
}
HashmapBase *q;
assert(h);
if (*h)
return 0;
if (!q)
return -ENOMEM;
*h = q;
return 0;
}
int internal_hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_PLAIN HASHMAP_DEBUG_PASS_ARGS);
}
int internal_ordered_hashmap_ensure_allocated(OrderedHashmap **h, const struct hash_ops *hash_ops HASHMAP_DEBUG_PARAMS) {
return hashmap_base_ensure_allocated((HashmapBase**)h, hash_ops, HASHMAP_TYPE_ORDERED HASHMAP_DEBUG_PASS_ARGS);
}
return hashmap_base_ensure_allocated((HashmapBase**)s, hash_ops, HASHMAP_TYPE_SET HASHMAP_DEBUG_PASS_ARGS);
}
static void hashmap_free_no_clear(HashmapBase *h) {
assert(!h->has_indirect);
assert(!h->n_direct_entries);
#ifdef ENABLE_DEBUG_HASHMAP
#endif
if (h->from_pool)
else
free(h);
}
/* Free the hashmap, but nothing in it */
if (h) {
}
return NULL;
}
/* Free the hashmap and all data objects in it, but not the
* keys */
if (h) {
}
return NULL;
}
/* Free the hashmap and all data and key objects in it */
if (h) {
}
return NULL;
}
void internal_hashmap_clear(HashmapBase *h) {
if (!h)
return;
if (h->has_indirect) {
h->has_indirect = false;
}
h->n_direct_entries = 0;
if (h->type == HASHMAP_TYPE_ORDERED) {
}
}
void internal_hashmap_clear_free(HashmapBase *h) {
unsigned idx;
if (!h)
return;
}
void hashmap_clear_free_free(Hashmap *h) {
unsigned idx;
if (!h)
return;
}
}
/*
* Finds an empty bucket to put an entry into, starting the scan at 'idx'.
* Performs Robin Hood swaps as it goes. The entry to put must be placed
* by the caller into swap slot IDX_PUT.
* If used for in-place resizing, may leave a displaced entry in swap slot
* IDX_PUT. Caller must rehash it next.
* Returns: true if it left a displaced entry to rehash next in IDX_PUT,
* false otherwise.
*/
struct swap_entries *swap) {
#ifdef ENABLE_DEBUG_HASHMAP
#endif
dibs = dib_raw_ptr(h);
if (raw_dib == DIB_RAW_REHASH)
if (raw_dib == DIB_RAW_REHASH) {
return true;
}
return false;
}
/* Found a wealthier entry. Go Robin Hood! */
/* swap the entries */
}
}
}
/*
* Puts an entry into a hashmap, boldly - no check whether key already exists.
* The caller must place the entry (only its key and value, not link indexes)
* in swap slot IDX_PUT.
* Caller must ensure: the key does not exist yet in the hashmap.
* that resize is not needed if !may_resize.
* Returns: 1 if entry was put successfully.
* -ENOMEM if may_resize==true and resize failed with -ENOMEM.
* Cannot return -ENOMEM if !may_resize.
*/
struct ordered_hashmap_entry *new_entry;
int r;
if (may_resize) {
r = resize_buckets(h, 1);
if (r < 0)
return r;
if (r > 0)
}
if (h->type == HASHMAP_TYPE_ORDERED) {
struct ordered_hashmap_entry *old_tail;
}
}
n_entries_inc(h);
#ifdef ENABLE_DEBUG_HASHMAP
#endif
return 1;
}
/*
* Returns 0 if resize is not needed.
* 1 if successfully resized.
* -ENOMEM on allocation failure.
*/
struct swap_entries swap;
char *new_storage;
const struct hashmap_type_info *hi;
unsigned idx, optimal_idx;
bool rehash_next;
assert(h);
/* overflow? */
return -ENOMEM;
/* For direct storage we allow 100% load, because it's tiny. */
return 0;
/*
* Load factor = n/m = 1 - (1/INV_KEEP_FREE).
* From it follows: m = n + n/(INV_KEEP_FREE - 1)
*/
/* overflow? */
return -ENOMEM;
return -ENOMEM;
old_n_buckets = n_buckets(h);
return 0;
2 * sizeof(struct direct_storage)));
/* Realloc storage (buckets and DIB array). */
1U << new_shift);
if (!new_storage)
return -ENOMEM;
/* Must upgrade direct to indirect storage. */
if (!h->has_indirect) {
h->indirect.idx_lowest_entry = 0;
h->n_direct_entries = 0;
}
/* Get a new hash key. If we've just upgraded to indirect storage,
* allow reusing a previously generated key. It's still a different key
* from the shared one that we used for direct storage. */
h->has_indirect = true;
new_dibs = dib_raw_ptr(h);
/*
* Move the DIB array to the new place, replacing valid DIB values with
* DIB_RAW_REHASH to indicate all of the used buckets need rehashing.
* Note: Overlap is not possible, because we have at least doubled the
* number of buckets and dib_raw_t is smaller than any entry type.
*/
}
/* Zero the area of newly added entries (including the old DIB area) */
/* The upper half of the new DIB array needs initialization */
/* Rehash entries that need it */
n_rehashed = 0;
continue;
/*
* Not much to do if by luck the entry hashes to its current
* location. Just set its DIB.
*/
if (optimal_idx == idx) {
n_rehashed++;
continue;
}
/* bucket_move_entry does not clear the source */
do {
/*
* Find the new bucket for the current entry. This may make
* another entry homeless and load it into IDX_PUT.
*/
n_rehashed++;
/* Did the current entry displace another one? */
if (rehash_next)
} while (rehash_next);
}
return 1;
}
/*
* Finds an entry with a matching key
* Returns: index of the found entry, or IDX_NIL if not found.
*/
struct hashmap_base_entry *e;
return IDX_NIL;
return IDX_NIL;
return idx;
}
}
}
struct swap_entries swap;
struct plain_hashmap_entry *e;
assert(h);
e = plain_bucket_at(h, idx);
return 0;
return -EEXIST;
}
}
struct swap_entries swap;
struct hashmap_base_entry *e;
assert(s);
return 0;
}
struct swap_entries swap;
struct plain_hashmap_entry *e;
assert(h);
e = plain_bucket_at(h, idx);
#ifdef ENABLE_DEBUG_HASHMAP
/* Although the key is equal, the key pointer may have changed,
* and this would break our assumption for iterating. So count
* this operation as incompatible with iteration. */
}
#endif
return 0;
}
}
struct plain_hashmap_entry *e;
assert(h);
return -ENOENT;
e = plain_bucket_at(h, idx);
return 0;
}
struct hashmap_base_entry *e;
if (!h)
return NULL;
return NULL;
return entry_value(h, e);
}
struct plain_hashmap_entry *e;
if (!h)
return NULL;
return NULL;
e = plain_bucket_at(h, idx);
if (key2)
return e->value;
}
unsigned hash;
if (!h)
return false;
}
struct hashmap_base_entry *e;
void *data;
if (!h)
return NULL;
return NULL;
data = entry_value(h, e);
remove_entry(h, idx);
return data;
}
struct plain_hashmap_entry *e;
void *data;
if (!h) {
if (rkey)
return NULL;
}
if (rkey)
return NULL;
}
e = plain_bucket_at(h, idx);
if (rkey)
remove_entry(h, idx);
return data;
}
struct swap_entries swap;
struct plain_hashmap_entry *e;
if (!h)
return -ENOENT;
return -ENOENT;
return -EEXIST;
remove_entry(h, idx);
return 0;
}
struct swap_entries swap;
struct hashmap_base_entry *e;
if (!s)
return -ENOENT;
return -ENOENT;
return -EEXIST;
remove_entry(s, idx);
return 0;
}
struct swap_entries swap;
struct plain_hashmap_entry *e;
if (!h)
return -ENOENT;
return -ENOENT;
remove_entry(h, idx_new);
/* Compensate for a possible backward shift. */
}
remove_entry(h, idx_old);
return 0;
}
struct plain_hashmap_entry *e;
if (!h)
return NULL;
return NULL;
e = plain_bucket_at(h, idx);
return NULL;
remove_entry(h, idx);
return value;
}
static unsigned find_first_entry(HashmapBase *h) {
Iterator i = ITERATOR_FIRST;
if (!h || !n_entries(h))
return IDX_NIL;
return hashmap_iterate_entry(h, &i);
}
void *internal_hashmap_first(HashmapBase *h) {
unsigned idx;
idx = find_first_entry(h);
return NULL;
}
void *internal_hashmap_first_key(HashmapBase *h) {
struct hashmap_base_entry *e;
unsigned idx;
idx = find_first_entry(h);
return NULL;
return (void*) e->key;
}
void *internal_hashmap_steal_first(HashmapBase *h) {
struct hashmap_base_entry *e;
void *data;
unsigned idx;
idx = find_first_entry(h);
return NULL;
data = entry_value(h, e);
remove_entry(h, idx);
return data;
}
void *internal_hashmap_steal_first_key(HashmapBase *h) {
struct hashmap_base_entry *e;
void *key;
unsigned idx;
idx = find_first_entry(h);
return NULL;
remove_entry(h, idx);
return key;
}
unsigned internal_hashmap_size(HashmapBase *h) {
if (!h)
return 0;
return n_entries(h);
}
unsigned internal_hashmap_buckets(HashmapBase *h) {
if (!h)
return 0;
return n_buckets(h);
}
Iterator i;
unsigned idx;
assert(h);
int r;
if (r < 0 && r != -EEXIST)
return r;
}
return 0;
}
Iterator i;
unsigned idx;
assert(s);
int r;
if (r < 0)
return r;
}
return 0;
}
int r;
assert(h);
r = resize_buckets(h, entries_add);
if (r < 0)
return r;
return 0;
}
/*
* The same as hashmap_merge(), but every new item from other is moved to h.
* Keys already in h are skipped and stay in other.
* Returns: 0 on success.
* -ENOMEM on alloc failure, in which case no move has been done.
*/
struct swap_entries swap;
struct hashmap_base_entry *e, *n;
Iterator i;
unsigned idx;
int r;
assert(h);
if (!other)
return 0;
/*
* This reserves buckets for the worst case, where none of other's
* entries are yet present in h. This is preferable to risking
* an allocation failure in the middle of the moving and having to
* rollback or return a partial result.
*/
if (r < 0)
return r;
unsigned h_hash;
continue;
if (h->type != HASHMAP_TYPE_SET)
((struct plain_hashmap_entry*) n)->value =
((struct plain_hashmap_entry*) e)->value;
}
return 0;
}
struct swap_entries swap;
struct hashmap_base_entry *e, *n;
int r;
assert(h);
return -EEXIST;
if (!other)
return -ENOENT;
return -ENOENT;
if (h->type != HASHMAP_TYPE_SET)
((struct plain_hashmap_entry*) n)->value =
((struct plain_hashmap_entry*) e)->value;
if (r < 0)
return r;
return 0;
}
int r;
assert(h);
if (!copy)
return NULL;
switch (h->type) {
case HASHMAP_TYPE_PLAIN:
case HASHMAP_TYPE_ORDERED:
break;
case HASHMAP_TYPE_SET:
break;
default:
assert_not_reached("Unknown hashmap type");
}
if (r < 0) {
return NULL;
}
return copy;
}
char **internal_hashmap_get_strv(HashmapBase *h) {
char **sv;
Iterator i;
unsigned idx, n;
if (!sv)
return NULL;
n = 0;
HASHMAP_FOREACH_IDX(idx, h, i)
return sv;
}
struct ordered_hashmap_entry *e;
if (!h)
return NULL;
return NULL;
e = ordered_bucket_at(h, idx);
if (e->iterate_next == IDX_NIL)
return NULL;
}
int r;
if (r <= 0)
return r;
}
int set_put_strdup(Set *s, const char *p) {
char *c;
int r;
assert(s);
assert(p);
c = strdup(p);
if (!c)
return -ENOMEM;
r = set_consume(s, c);
if (r == -EEXIST)
return 0;
return r;
}
int set_put_strdupv(Set *s, char **l) {
int n = 0, r;
char **i;
STRV_FOREACH(i, l) {
r = set_put_strdup(s, *i);
if (r < 0)
return r;
n += r;
}
return n;
}