|
@@ -4,7 +4,7 @@
|
|
#include <limits.h>
|
|
#include <limits.h>
|
|
#include <assert.h>
|
|
#include <assert.h>
|
|
|
|
|
|
-#include "include/htable.h"
|
|
|
|
|
|
+#include "htable.h"
|
|
|
|
|
|
|
|
|
|
#define BUCKET_EMPTY 1 << 0
|
|
#define BUCKET_EMPTY 1 << 0
|
|
@@ -21,42 +21,41 @@
|
|
#define APPROX_85_PERCENT(x) (((x) * 870) >> 10)
|
|
#define APPROX_85_PERCENT(x) (((x) * 870) >> 10)
|
|
#define APPROX_40_PERCENT(x) (((x) * 409) >> 10)
|
|
#define APPROX_40_PERCENT(x) (((x) * 409) >> 10)
|
|
|
|
|
|
|
|
+#define MIN_HT_SIZE 1 << 3
|
|
|
|
+
|
|
|
|
|
|
typedef struct {
|
|
typedef struct {
|
|
- void * key;
|
|
|
|
|
|
+ LSUP_TripleKey key; // TODO Make configurable but
|
|
|
|
+ // statically allocated via macros
|
|
void * val;
|
|
void * val;
|
|
- uint64_t hash : 32;
|
|
|
|
- uint64_t psl : 16;
|
|
|
|
|
|
+ uint64_t hash;
|
|
|
|
+ uint16_t psl;
|
|
} bucket_t;
|
|
} bucket_t;
|
|
|
|
|
|
typedef struct htable_t {
|
|
typedef struct htable_t {
|
|
- unsigned size;
|
|
|
|
- unsigned nitems;
|
|
|
|
|
|
+ htsize_t size;
|
|
|
|
+ htsize_t nitems;
|
|
unsigned flags;
|
|
unsigned flags;
|
|
uint64_t divinfo;
|
|
uint64_t divinfo;
|
|
- bucket_t * buckets;
|
|
|
|
- uint64_t hashkey;
|
|
|
|
|
|
+ bucket_t * buckets;
|
|
|
|
+ uint64_t seed;
|
|
|
|
|
|
key_hash_fn_t key_hash_fn;
|
|
key_hash_fn_t key_hash_fn;
|
|
key_eq_fn_t key_eq_fn;
|
|
key_eq_fn_t key_eq_fn;
|
|
|
|
|
|
- void * key_data;
|
|
|
|
ksize_t ksize;
|
|
ksize_t ksize;
|
|
- void * val_data;
|
|
|
|
vsize_t vsize;
|
|
vsize_t vsize;
|
|
|
|
|
|
- /*
|
|
|
|
- * Small optimisation for a single element case: allocate one
|
|
|
|
- * bucket together with the hashmap structure -- it will generally
|
|
|
|
- * fit within the same cache-line.
|
|
|
|
- */
|
|
|
|
- bucket_t init_bucket;
|
|
|
|
|
|
+ void * del_marker; // Used to fill deleted buckets.
|
|
} HTable;
|
|
} HTable;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* * * GENERIC UTILITIES * * */
|
|
/* * * GENERIC UTILITIES * * */
|
|
|
|
|
|
|
|
+static inline bool is_empty_bucket(const HTable *ht, const bucket_t *bucket)
|
|
|
|
+{ return memcmp(bucket->key, ht->del_marker, ht->ksize) == 0; }
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Find first bit.
|
|
* Find first bit.
|
|
*/
|
|
*/
|
|
@@ -113,38 +112,16 @@ fast_rem32(uint32_t v, uint32_t div, uint64_t divinfo)
|
|
|
|
|
|
//static int __attribute__((__unused__))
|
|
//static int __attribute__((__unused__))
|
|
static int
|
|
static int
|
|
-validate_psl_p(HTable *ht, const bucket_t *bucket, unsigned i)
|
|
|
|
|
|
+validate_psl_p(const HTable *ht, const bucket_t *bucket, unsigned i)
|
|
{
|
|
{
|
|
unsigned base_i = fast_rem32(bucket->hash, ht->size, ht->divinfo);
|
|
unsigned base_i = fast_rem32(bucket->hash, ht->size, ht->divinfo);
|
|
unsigned diff = (base_i > i) ? ht->size - base_i + i : i - base_i;
|
|
unsigned diff = (base_i > i) ? ht->size - base_i + i : i - base_i;
|
|
- return bucket->key == NULL || diff == bucket->psl;
|
|
|
|
|
|
+ return is_empty_bucket(ht, bucket) || diff == bucket->psl;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
/* * * PUBLIC API * * */
|
|
/* * * PUBLIC API * * */
|
|
|
|
|
|
-/*
|
|
|
|
- * Construct a new hash table.
|
|
|
|
- *
|
|
|
|
- * => If size is non-zero, then pre-allocate the given number of buckets;
|
|
|
|
- * => If size is zero, then a default minimum is used.
|
|
|
|
- */
|
|
|
|
-int LSUP_htable_init(
|
|
|
|
- HTable *ht, htsize_t size, ksize_t ksize, vsize_t vsize,
|
|
|
|
- key_hash_fn_t key_hash_fn, key_eq_fn_t key_eq_fn, unsigned flags)
|
|
|
|
-{
|
|
|
|
- ht->flags = flags;
|
|
|
|
- if (LSUP_htable_resize(ht, size) != 0) {
|
|
|
|
- free(ht);
|
|
|
|
- return -1;
|
|
|
|
- }
|
|
|
|
- ASSERT(ht->buckets);
|
|
|
|
- ASSERT(ht->size);
|
|
|
|
-
|
|
|
|
- return LSUP_OK;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-
|
|
|
|
HTable *LSUP_htable_new(
|
|
HTable *LSUP_htable_new(
|
|
htsize_t size, ksize_t ksize, vsize_t vsize,
|
|
htsize_t size, ksize_t ksize, vsize_t vsize,
|
|
key_hash_fn_t key_hash_fn, key_eq_fn_t key_eq_fn, unsigned flags)
|
|
key_hash_fn_t key_hash_fn, key_eq_fn_t key_eq_fn, unsigned flags)
|
|
@@ -152,7 +129,16 @@ HTable *LSUP_htable_new(
|
|
HTable *ht;
|
|
HTable *ht;
|
|
CRITICAL(ht = malloc(sizeof(HTable)));
|
|
CRITICAL(ht = malloc(sizeof(HTable)));
|
|
|
|
|
|
- LSUP_htable_init(ht, size, ksize, vsize, key_hash_fn, key_eq_fn, flags);
|
|
|
|
|
|
+ ht->ksize = ksize;
|
|
|
|
+ ht->vsize = vsize;
|
|
|
|
+ ht->key_hash_fn = key_hash_fn;
|
|
|
|
+ ht->key_eq_fn = key_eq_fn;
|
|
|
|
+ ht->flags = flags;
|
|
|
|
+ ht->size = 0;
|
|
|
|
+
|
|
|
|
+ CRITICAL(ht->del_marker = calloc(1, ksize));
|
|
|
|
+
|
|
|
|
+ LSUP_htable_resize(ht, size);
|
|
|
|
|
|
return ht;
|
|
return ht;
|
|
}
|
|
}
|
|
@@ -164,49 +150,30 @@ HTable *LSUP_htable_new(
|
|
int LSUP_htable_resize(HTable *ht, htsize_t newsize)
|
|
int LSUP_htable_resize(HTable *ht, htsize_t newsize)
|
|
{
|
|
{
|
|
bucket_t *oldbuckets = ht->buckets;
|
|
bucket_t *oldbuckets = ht->buckets;
|
|
- const size_t oldsize = ht->size;
|
|
|
|
- bucket_t *newbuckets;
|
|
|
|
-
|
|
|
|
- ASSERT(newsize > 0);
|
|
|
|
- ASSERT(newsize > ht->nitems);
|
|
|
|
|
|
+ const htsize_t oldsize = ht->size;
|
|
|
|
|
|
- /*
|
|
|
|
- * Check for an overflow and allocate buckets. Also, generate
|
|
|
|
- * a new hash key/seed every time we resize the hash table.
|
|
|
|
- */
|
|
|
|
- if (newsize == 1) {
|
|
|
|
- memset(&ht->init_bucket, 0, sizeof(bucket_t));
|
|
|
|
- newbuckets = &ht->init_bucket;
|
|
|
|
- } else if (newsize > UINT_MAX) {
|
|
|
|
- return -1;
|
|
|
|
- }
|
|
|
|
|
|
+ // Clip size to min & max limits.
|
|
|
|
+ if (newsize < MIN_HT_SIZE) newsize = MIN_HT_SIZE;
|
|
|
|
+ if (newsize > HTSIZE_MAX) newsize = HTSIZE_MAX;
|
|
|
|
|
|
- CRITICAL(ht->buckets = calloc(1, newsize * sizeof(bucket_t)));
|
|
|
|
- CRITICAL(ht->key_data = realloc(ht->key_data, newsize * ht->ksize));
|
|
|
|
- CRITICAL(ht->val_data = realloc(ht->key_data, newsize * ht->vsize));
|
|
|
|
|
|
+ CRITICAL(ht->buckets = calloc(newsize, sizeof(bucket_t)));
|
|
|
|
|
|
ht->size = newsize;
|
|
ht->size = newsize;
|
|
ht->nitems = 0;
|
|
ht->nitems = 0;
|
|
|
|
|
|
ht->divinfo = fast_div32_init(newsize);
|
|
ht->divinfo = fast_div32_init(newsize);
|
|
- ht->hashkey ^= random() | (random() << 32);
|
|
|
|
|
|
+ ht->seed ^= random() | (random() << 32);
|
|
|
|
|
|
for (unsigned i = 0; i < oldsize; i++) {
|
|
for (unsigned i = 0; i < oldsize; i++) {
|
|
const bucket_t *bucket = &oldbuckets[i];
|
|
const bucket_t *bucket = &oldbuckets[i];
|
|
|
|
|
|
/* Skip the empty buckets. */
|
|
/* Skip the empty buckets. */
|
|
- if (!bucket->key) {
|
|
|
|
- continue;
|
|
|
|
- }
|
|
|
|
- LSUP_htable_insert(ht, bucket->key, bucket->val);
|
|
|
|
- if ((ht->flags & HTABLE_NOCOPY) == 0) {
|
|
|
|
- free(bucket->key);
|
|
|
|
- }
|
|
|
|
|
|
+ if (!is_empty_bucket(ht, bucket))
|
|
|
|
+ LSUP_htable_insert(ht, bucket->key, bucket->val);
|
|
}
|
|
}
|
|
- if (oldbuckets && oldbuckets != &ht->init_bucket) {
|
|
|
|
- free(oldbuckets);
|
|
|
|
- }
|
|
|
|
- return 0;
|
|
|
|
|
|
+ if (oldbuckets != NULL) free(oldbuckets);
|
|
|
|
+
|
|
|
|
+ return LSUP_OK;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -223,23 +190,17 @@ htsize_t LSUP_htable_size(LSUP_HTable *ht)
|
|
*/
|
|
*/
|
|
int LSUP_htable_insert(HTable *ht, const void *key, void *val)
|
|
int LSUP_htable_insert(HTable *ht, const void *key, void *val)
|
|
{
|
|
{
|
|
- const uint32_t hash = ht->key_hash_fn(key, ht->ksize, ht->hashkey);
|
|
|
|
bucket_t *bucket, entry;
|
|
bucket_t *bucket, entry;
|
|
- unsigned i;
|
|
|
|
|
|
|
|
ASSERT(key != NULL);
|
|
ASSERT(key != NULL);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Setup the bucket entry.
|
|
* Setup the bucket entry.
|
|
*/
|
|
*/
|
|
- if (ht->flags & HTABLE_NOCOPY) {
|
|
|
|
- entry.key = (void *)(uintptr_t)key;
|
|
|
|
- } else {
|
|
|
|
- CRITICAL(entry.key = malloc(ht->ksize))
|
|
|
|
- memcpy(entry.key, key, ht->ksize);
|
|
|
|
- }
|
|
|
|
- entry.hash = hash;
|
|
|
|
|
|
+ memcpy(entry.key, key, ht->ksize);
|
|
|
|
+ //memcpy(entry.val, val, ht->vsize);
|
|
entry.val = val;
|
|
entry.val = val;
|
|
|
|
+ entry.hash = ht->key_hash_fn(entry.key, ht->ksize, ht->seed);
|
|
entry.psl = 0;
|
|
entry.psl = 0;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -252,49 +213,53 @@ int LSUP_htable_insert(HTable *ht, const void *key, void *val)
|
|
* being inserted is greater than PSL of the element in the bucket,
|
|
* being inserted is greater than PSL of the element in the bucket,
|
|
* then swap them and continue.
|
|
* then swap them and continue.
|
|
*/
|
|
*/
|
|
- i = fast_rem32(hash, ht->size, ht->divinfo);
|
|
|
|
|
|
+ htsize_t i = fast_rem32(entry.hash, ht->size, ht->divinfo);
|
|
|
|
|
|
- while(1) {
|
|
|
|
- bucket = &ht->buckets[i];
|
|
|
|
- if (bucket->key) {
|
|
|
|
- ASSERT(validate_psl_p(ht, bucket, i));
|
|
|
|
-
|
|
|
|
- // There is a key in the bucket.
|
|
|
|
- if (bucket->hash == hash && ht->key_eq_fn(
|
|
|
|
- bucket->key, key, ht->ksize)) {
|
|
|
|
- // Duplicate key: do nothing.
|
|
|
|
- if ((ht->flags & HTABLE_NOCOPY) == 0) {
|
|
|
|
- free(entry.key);
|
|
|
|
- }
|
|
|
|
- return LSUP_NOACTION;
|
|
|
|
- }
|
|
|
|
|
|
+ for(;;) {
|
|
|
|
+ bucket = ht->buckets + i;
|
|
|
|
+
|
|
|
|
+ if(is_empty_bucket(ht, ht->buckets + i)) break;
|
|
|
|
|
|
|
|
+ ASSERT(validate_psl_p(ht, bucket, i));
|
|
|
|
+
|
|
|
|
+ // There is a key in the bucket.
|
|
|
|
+ TRACE("Entry key: {%lu, %lu, %lu}; bucket key: {%lu, %lu, %lu}", entry.key[0], entry.key[1], entry.key[2], bucket->key[0], bucket->key[1], bucket->key[2]);
|
|
|
|
+ if (ht->key_eq_fn(bucket->key, entry.key, ht->ksize)) {
|
|
|
|
+ // Duplicate key: do nothing.
|
|
|
|
+ TRACE(STR, "Duplicate key.");
|
|
|
|
+ return LSUP_NOACTION;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * We found a "rich" bucket. Capture its location.
|
|
|
|
+ */
|
|
|
|
+ if (entry.psl > bucket->psl) {
|
|
|
|
+ //TRACE("Entry PSL: %d; Bucket PSL: %d", entry.psl, bucket->psl);
|
|
|
|
+ bucket_t tmp;
|
|
|
|
+
|
|
|
|
+ TRACE(STR, "SWAP");
|
|
/*
|
|
/*
|
|
- * We found a "rich" bucket. Capture its location.
|
|
|
|
|
|
+ * Place our key-value pair by swapping the "rich"
|
|
|
|
+ * bucket with our entry. Copy the structures.
|
|
*/
|
|
*/
|
|
- if (entry.psl > bucket->psl) {
|
|
|
|
- bucket_t tmp;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * Place our key-value pair by swapping the "rich"
|
|
|
|
- * bucket with our entry. Copy the structures.
|
|
|
|
- */
|
|
|
|
- tmp = entry;
|
|
|
|
- entry = *bucket;
|
|
|
|
- *bucket = tmp;
|
|
|
|
- }
|
|
|
|
- entry.psl++;
|
|
|
|
-
|
|
|
|
- /* Continue to the next bucket. */
|
|
|
|
- ASSERT(validate_psl_p(ht, bucket, i));
|
|
|
|
- i = fast_rem32(i + 1, ht->size, ht->divinfo);
|
|
|
|
|
|
+ tmp = entry;
|
|
|
|
+ entry = *bucket;
|
|
|
|
+ *bucket = tmp;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ entry.psl++;
|
|
|
|
+
|
|
|
|
+ /* Continue to the next bucket. */
|
|
|
|
+ ASSERT(validate_psl_p(ht, bucket, i));
|
|
|
|
+ i = fast_rem32(i + 1, ht->size, ht->divinfo);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
* Found a free bucket: insert the entry.
|
|
* Found a free bucket: insert the entry.
|
|
*/
|
|
*/
|
|
|
|
+ TRACE("Inserting {%lu, %lu, %lu} in bucket #%d", entry.key[0], entry.key[1], entry.key[2], i);
|
|
*bucket = entry; // copy
|
|
*bucket = entry; // copy
|
|
|
|
+ //memcpy(bucket, &entry, sizeof(bucket_t)); // copy
|
|
ht->nitems++;
|
|
ht->nitems++;
|
|
|
|
|
|
ASSERT(validate_psl_p(ht, bucket, i));
|
|
ASSERT(validate_psl_p(ht, bucket, i));
|
|
@@ -332,7 +297,7 @@ int LSUP_htable_put(HTable *ht, const void *key, void *val)
|
|
|
|
|
|
int LSUP_htable_get(const HTable *ht, const void *key, void **valp)
|
|
int LSUP_htable_get(const HTable *ht, const void *key, void **valp)
|
|
{
|
|
{
|
|
- const uint64_t hash = ht->key_hash_fn(key, ht->ksize, ht->hashkey);
|
|
|
|
|
|
+ const uint64_t hash = ht->key_hash_fn(key, ht->ksize, ht->seed);
|
|
htsize_t n = 0, i = fast_rem32(hash, ht->size, ht->divinfo);
|
|
htsize_t n = 0, i = fast_rem32(hash, ht->size, ht->divinfo);
|
|
|
|
|
|
if (key == NULL) return LSUP_VALUE_ERR;
|
|
if (key == NULL) return LSUP_VALUE_ERR;
|
|
@@ -340,16 +305,16 @@ int LSUP_htable_get(const HTable *ht, const void *key, void **valp)
|
|
/*
|
|
/*
|
|
* Lookup is a linear probe.
|
|
* Lookup is a linear probe.
|
|
*/
|
|
*/
|
|
- while (1) {
|
|
|
|
- bucket_t *bucket = &ht->buckets[i];
|
|
|
|
|
|
+ for(;;) {
|
|
|
|
+ bucket_t *bucket = ht->buckets + i;
|
|
ASSERT(validate_psl_p(ht, bucket, i));
|
|
ASSERT(validate_psl_p(ht, bucket, i));
|
|
|
|
|
|
- if (bucket->hash == hash && ht->key_eq_fn(
|
|
|
|
- bucket->key, key, ht->ksize))
|
|
|
|
- if (valp != NULL)
|
|
|
|
- *valp = ht->vsize == 0 ? NULL : bucket->val;
|
|
|
|
|
|
+ if (ht->key_eq_fn(bucket->key, key, ht->ksize)) {
|
|
|
|
+ // Key found within max probe length.
|
|
|
|
+ if (valp != NULL) *valp = bucket->val;
|
|
|
|
|
|
return LSUP_OK;
|
|
return LSUP_OK;
|
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
/*
|
|
* Stop probing if we hit an empty bucket; also, if we hit a
|
|
* Stop probing if we hit an empty bucket; also, if we hit a
|
|
@@ -358,10 +323,12 @@ int LSUP_htable_get(const HTable *ht, const void *key, void **valp)
|
|
* have been captured, if the key was inserted -- see the central
|
|
* have been captured, if the key was inserted -- see the central
|
|
* point of the algorithm in the insertion function.
|
|
* point of the algorithm in the insertion function.
|
|
*/
|
|
*/
|
|
- if (!bucket->key || n > bucket->psl) {
|
|
|
|
|
|
+ if (is_empty_bucket(ht, bucket) || n > bucket->psl) {
|
|
valp = NULL;
|
|
valp = NULL;
|
|
|
|
+
|
|
return LSUP_NORESULT;
|
|
return LSUP_NORESULT;
|
|
}
|
|
}
|
|
|
|
+
|
|
n++;
|
|
n++;
|
|
|
|
|
|
/* Continue to the next bucket. */
|
|
/* Continue to the next bucket. */
|
|
@@ -373,59 +340,51 @@ int LSUP_htable_get(const HTable *ht, const void *key, void **valp)
|
|
int LSUP_htable_del(HTable *ht, const void *key)
|
|
int LSUP_htable_del(HTable *ht, const void *key)
|
|
{
|
|
{
|
|
const size_t threshold = APPROX_40_PERCENT(ht->size);
|
|
const size_t threshold = APPROX_40_PERCENT(ht->size);
|
|
- const uint32_t hash = ht->key_hash_fn(key, ht->ksize, ht->hashkey);
|
|
|
|
|
|
+ const uint32_t hash = ht->key_hash_fn(key, ht->ksize, ht->seed);
|
|
unsigned n = 0, i = fast_rem32(hash, ht->size, ht->divinfo);
|
|
unsigned n = 0, i = fast_rem32(hash, ht->size, ht->divinfo);
|
|
bucket_t *bucket;
|
|
bucket_t *bucket;
|
|
|
|
|
|
ASSERT(key != NULL);
|
|
ASSERT(key != NULL);
|
|
|
|
|
|
- while(1) {
|
|
|
|
|
|
+ for(;;) {
|
|
/*
|
|
/*
|
|
* The same probing logic as in the lookup function.
|
|
* The same probing logic as in the lookup function.
|
|
*/
|
|
*/
|
|
- bucket = &ht->buckets[i];
|
|
|
|
- if (!bucket->key || n > bucket->psl) {
|
|
|
|
|
|
+ bucket_t *bucket = ht->buckets + i;
|
|
|
|
+ if (is_empty_bucket(ht, bucket) || n > bucket->psl)
|
|
return LSUP_NOACTION;
|
|
return LSUP_NOACTION;
|
|
- }
|
|
|
|
|
|
+
|
|
ASSERT(validate_psl_p(ht, bucket, i));
|
|
ASSERT(validate_psl_p(ht, bucket, i));
|
|
|
|
|
|
- if (
|
|
|
|
- bucket->hash != hash ||
|
|
|
|
- ht->key_eq_fn(bucket->key, key, ht->ksize)) {
|
|
|
|
|
|
+ if (!ht->key_eq_fn(bucket->key, key, ht->ksize)) {
|
|
/* Continue to the next bucket. */
|
|
/* Continue to the next bucket. */
|
|
i = fast_rem32(i + 1, ht->size, ht->divinfo);
|
|
i = fast_rem32(i + 1, ht->size, ht->divinfo);
|
|
n++;
|
|
n++;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * Free the bucket.
|
|
|
|
- */
|
|
|
|
- if ((ht->flags & HTABLE_NOCOPY) == 0) {
|
|
|
|
- free(bucket->key);
|
|
|
|
- }
|
|
|
|
ht->nitems--;
|
|
ht->nitems--;
|
|
|
|
|
|
/*
|
|
/*
|
|
* The probe sequence must be preserved in the deletion case.
|
|
* The probe sequence must be preserved in the deletion case.
|
|
* Use the backwards-shifting method to maintain low variance.
|
|
* Use the backwards-shifting method to maintain low variance.
|
|
*/
|
|
*/
|
|
|
|
+
|
|
while(1) {
|
|
while(1) {
|
|
bucket_t *nbucket;
|
|
bucket_t *nbucket;
|
|
|
|
|
|
- bucket->key = NULL;
|
|
|
|
|
|
+ memcpy(bucket->key, ht->del_marker, ht->ksize);
|
|
|
|
|
|
i = fast_rem32(i + 1, ht->size, ht->divinfo);
|
|
i = fast_rem32(i + 1, ht->size, ht->divinfo);
|
|
- nbucket = &ht->buckets[i];
|
|
|
|
|
|
+ nbucket = ht->buckets + i;
|
|
ASSERT(validate_psl_p(ht, nbucket, i));
|
|
ASSERT(validate_psl_p(ht, nbucket, i));
|
|
|
|
|
|
/*
|
|
/*
|
|
* Stop if we reach an empty bucket or hit a key which
|
|
* Stop if we reach an empty bucket or hit a key which
|
|
* is in its base (original) location.
|
|
* is in its base (original) location.
|
|
*/
|
|
*/
|
|
- if (!nbucket->key || nbucket->psl == 0) {
|
|
|
|
|
|
+ if (is_empty_bucket(ht, nbucket) || nbucket->psl == 0)
|
|
break;
|
|
break;
|
|
- }
|
|
|
|
|
|
|
|
nbucket->psl--;
|
|
nbucket->psl--;
|
|
*bucket = *nbucket;
|
|
*bucket = *nbucket;
|
|
@@ -446,16 +405,20 @@ int LSUP_htable_del(HTable *ht, const void *key)
|
|
|
|
|
|
|
|
|
|
extern int LSUP_htable_iter(
|
|
extern int LSUP_htable_iter(
|
|
- LSUP_HTable *ht, htsize_t *cur, void *key, void **valp)
|
|
|
|
|
|
+ LSUP_HTable *ht, htsize_t *cur, void **keyp, void **valp)
|
|
{
|
|
{
|
|
while (*cur < ht->size) {
|
|
while (*cur < ht->size) {
|
|
- bucket_t *bucket = &ht->buckets[*cur];
|
|
|
|
|
|
+ bucket_t *bucket = ht->buckets + *cur;
|
|
|
|
|
|
- *cur++;
|
|
|
|
|
|
+ (*cur)++;
|
|
|
|
|
|
- if (!bucket->key) continue;
|
|
|
|
|
|
+ if (is_empty_bucket(ht, bucket)) {
|
|
|
|
+ TRACE("Empty bucket: %d. Skipping.", (*cur) - 1);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
|
|
- memcpy(key, bucket->key, ht->ksize);
|
|
|
|
|
|
+ // Copy key, and if relevant, value.
|
|
|
|
+ *keyp = bucket->key;
|
|
if (valp != NULL && ht->vsize > 0) *valp = bucket->val;
|
|
if (valp != NULL && ht->vsize > 0) *valp = bucket->val;
|
|
|
|
|
|
return LSUP_OK;
|
|
return LSUP_OK;
|
|
@@ -466,18 +429,8 @@ extern int LSUP_htable_iter(
|
|
|
|
|
|
void LSUP_htable_done(HTable *ht)
|
|
void LSUP_htable_done(HTable *ht)
|
|
{
|
|
{
|
|
- if ((ht->flags & HTABLE_NOCOPY) == 0) {
|
|
|
|
- for (htsize_t i = 0; i < ht->size; i++) {
|
|
|
|
- const bucket_t *bucket = &ht->buckets[i];
|
|
|
|
-
|
|
|
|
- if (bucket->key) {
|
|
|
|
- free(bucket->key);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- if (ht->buckets != &ht->init_bucket) {
|
|
|
|
- free(ht->buckets);
|
|
|
|
- }
|
|
|
|
|
|
+ if(LIKELY(ht->buckets != NULL)) free(ht->buckets);
|
|
|
|
+ free(ht->del_marker);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|