summaryrefslogtreecommitdiff
path: root/hashmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'hashmap.c')
-rw-r--r--hashmap.c46
1 files changed, 40 insertions, 6 deletions
diff --git a/hashmap.c b/hashmap.c
index b10b642229..9b6a12859b 100644
--- a/hashmap.c
+++ b/hashmap.c
@@ -50,6 +50,23 @@ unsigned int memihash(const void *buf, size_t len)
return hash;
}
+/*
+ * Incoporate another chunk of data into a memihash
+ * computation.
+ */
+unsigned int memihash_cont(unsigned int hash_seed, const void *buf, size_t len)
+{
+ unsigned int hash = hash_seed;
+ unsigned char *ucbuf = (unsigned char *) buf;
+ while (len--) {
+ unsigned int c = *ucbuf++;
+ if (c >= 'a' && c <= 'z')
+ c -= 'a' - 'A';
+ hash = (hash * FNV32_PRIME) ^ c;
+ }
+ return hash;
+}
+
#define HASHMAP_INITIAL_SIZE 64
/* grow / shrink by 2^2 */
#define HASHMAP_RESIZE_BITS 2
@@ -78,7 +95,9 @@ static inline int entry_equals(const struct hashmap *map,
const struct hashmap_entry *e1, const struct hashmap_entry *e2,
const void *keydata)
{
- return (e1 == e2) || (e1->hash == e2->hash && !map->cmpfn(e1, e2, keydata));
+ return (e1 == e2) ||
+ (e1->hash == e2->hash &&
+ !map->cmpfn(map->cmpfn_data, e1, e2, keydata));
}
static inline unsigned int bucket(const struct hashmap *map,
@@ -87,11 +106,19 @@ static inline unsigned int bucket(const struct hashmap *map,
return key->hash & (map->tablesize - 1);
}
+int hashmap_bucket(const struct hashmap *map, unsigned int hash)
+{
+ return hash & (map->tablesize - 1);
+}
+
static void rehash(struct hashmap *map, unsigned int newsize)
{
unsigned int i, oldsize = map->tablesize;
struct hashmap_entry **oldtable = map->table;
+ if (map->disallow_rehash)
+ return;
+
alloc_table(map, newsize);
for (i = 0; i < oldsize; i++) {
struct hashmap_entry *e = oldtable[i];
@@ -115,17 +142,23 @@ static inline struct hashmap_entry **find_entry_ptr(const struct hashmap *map,
return e;
}
-static int always_equal(const void *unused1, const void *unused2, const void *unused3)
+static int always_equal(const void *unused_cmp_data,
+ const void *unused1,
+ const void *unused2,
+ const void *unused_keydata)
{
return 0;
}
void hashmap_init(struct hashmap *map, hashmap_cmp_fn equals_function,
- size_t initial_size)
+ const void *cmpfn_data, size_t initial_size)
{
unsigned int size = HASHMAP_INITIAL_SIZE;
- map->size = 0;
+
+ memset(map, 0, sizeof(*map));
+
map->cmpfn = equals_function ? equals_function : always_equal;
+ map->cmpfn_data = cmpfn_data;
/* calculate initial table size and allocate the table */
initial_size = (unsigned int) ((uint64_t) initial_size * 100
@@ -233,7 +266,8 @@ struct pool_entry {
unsigned char data[FLEX_ARRAY];
};
-static int pool_entry_cmp(const struct pool_entry *e1,
+static int pool_entry_cmp(const void *unused_cmp_data,
+ const struct pool_entry *e1,
const struct pool_entry *e2,
const unsigned char *keydata)
{
@@ -248,7 +282,7 @@ const void *memintern(const void *data, size_t len)
/* initialize string pool hashmap */
if (!map.tablesize)
- hashmap_init(&map, (hashmap_cmp_fn) pool_entry_cmp, 0);
+ hashmap_init(&map, (hashmap_cmp_fn) pool_entry_cmp, NULL, 0);
/* lookup interned string in pool */
hashmap_entry_init(&key, memhash(data, len));