summaryrefslogtreecommitdiff
path: root/rts
diff options
context:
space:
mode:
Diffstat (limited to 'rts')
-rw-r--r--rts/Hash.c128
1 files changed, 64 insertions, 64 deletions
diff --git a/rts/Hash.c b/rts/Hash.c
index 9ab8ffb53e..b91d70c219 100644
--- a/rts/Hash.c
+++ b/rts/Hash.c
@@ -17,13 +17,13 @@
#include <string.h>
#define HSEGSIZE 1024 /* Size of a single hash table segment */
- /* Also the minimum size of a hash table */
+ /* Also the minimum size of a hash table */
#define HDIRSIZE 1024 /* Size of the segment directory */
- /* Maximum hash table size is HSEGSIZE * HDIRSIZE */
-#define HLOAD 5 /* Maximum average load of a single hash bucket */
+ /* Maximum hash table size is HSEGSIZE * HDIRSIZE */
+#define HLOAD 5 /* Maximum average load of a single hash bucket */
-#define HCHUNK (1024 * sizeof(W_) / sizeof(HashList))
- /* Number of HashList cells to allocate in one go */
+#define HCHUNK (1024 * sizeof(W_) / sizeof(HashList))
+ /* Number of HashList cells to allocate in one go */
/* Linked list of (key, data) pairs for separate chaining */
@@ -39,13 +39,13 @@ typedef struct chunklist {
} HashListChunk;
struct hashtable {
- int split; /* Next bucket to split when expanding */
- int max; /* Max bucket of smaller table */
- int mask1; /* Mask for doing the mod of h_1 (smaller table) */
- int mask2; /* Mask for doing the mod of h_2 (larger table) */
- int kcount; /* Number of keys */
- int bcount; /* Number of buckets */
- HashList **dir[HDIRSIZE]; /* Directory of segments */
+ int split; /* Next bucket to split when expanding */
+ int max; /* Max bucket of smaller table */
+ int mask1; /* Mask for doing the mod of h_1 (smaller table) */
+ int mask2; /* Mask for doing the mod of h_2 (larger table) */
+ int kcount; /* Number of keys */
+ int bcount; /* Number of buckets */
+ HashList **dir[HDIRSIZE]; /* Directory of segments */
HashList *freeList; /* free list of HashLists */
HashListChunk *chunks;
HashFunction *hash; /* hash function */
@@ -69,8 +69,8 @@ hashWord(HashTable *table, StgWord key)
bucket = key & table->mask1;
if (bucket < table->split) {
- /* Mod the size of the expanded hash table (also a power of 2) */
- bucket = key & table->mask2;
+ /* Mod the size of the expanded hash table (also a power of 2) */
+ bucket = key & table->mask2;
}
return bucket;
}
@@ -83,17 +83,17 @@ hashStr(HashTable *table, char *key)
s = key;
for (h=0; *s; s++) {
- h *= 128;
- h += *s;
- h = h % 1048583; /* some random large prime */
+ h *= 128;
+ h += *s;
+ h = h % 1048583; /* some random large prime */
}
/* Mod the size of the hash table (a power of 2) */
bucket = h & table->mask1;
if (bucket < table->split) {
- /* Mod the size of the expanded hash table (also a power of 2) */
- bucket = h & table->mask2;
+ /* Mod the size of the expanded hash table (also a power of 2) */
+ bucket = h & table->mask2;
}
return bucket;
@@ -119,8 +119,8 @@ compareStr(StgWord key1, StgWord key2)
static void
allocSegment(HashTable *table, int segment)
{
- table->dir[segment] = stgMallocBytes(HSEGSIZE * sizeof(HashList *),
- "allocSegment");
+ table->dir[segment] = stgMallocBytes(HSEGSIZE * sizeof(HashList *),
+ "allocSegment");
}
@@ -143,8 +143,8 @@ expand(HashTable *table)
HashList *old, *new;
if (table->split + table->max >= HDIRSIZE * HSEGSIZE)
- /* Wow! That's big. Too big, so don't expand. */
- return;
+ /* Wow! That's big. Too big, so don't expand. */
+ return;
/* Calculate indices of bucket to split */
oldsegment = table->split / HSEGSIZE;
@@ -157,13 +157,13 @@ expand(HashTable *table)
newindex = newbucket % HSEGSIZE;
if (newindex == 0)
- allocSegment(table, newsegment);
+ allocSegment(table, newsegment);
if (++table->split == table->max) {
- table->split = 0;
- table->max *= 2;
- table->mask1 = table->mask2;
- table->mask2 = table->mask2 << 1 | 1;
+ table->split = 0;
+ table->max *= 2;
+ table->mask1 = table->mask2;
+ table->mask2 = table->mask2 << 1 | 1;
}
table->bcount++;
@@ -171,14 +171,14 @@ expand(HashTable *table)
old = new = NULL;
for (hl = table->dir[oldsegment][oldindex]; hl != NULL; hl = next) {
- next = hl->next;
- if (table->hash(table, hl->key) == newbucket) {
- hl->next = new;
- new = hl;
- } else {
- hl->next = old;
- old = hl;
- }
+ next = hl->next;
+ if (table->hash(table, hl->key) == newbucket) {
+ hl->next = new;
+ new = hl;
+ } else {
+ hl->next = old;
+ old = hl;
+ }
}
table->dir[oldsegment][oldindex] = old;
table->dir[newsegment][newindex] = new;
@@ -199,8 +199,8 @@ lookupHashTable(HashTable *table, StgWord key)
index = bucket % HSEGSIZE;
for (hl = table->dir[segment][index]; hl != NULL; hl = hl->next)
- if (table->compare(hl->key, key))
- return hl->data;
+ if (table->compare(hl->key, key))
+ return hl->data;
/* It's not there */
return NULL;
@@ -222,15 +222,15 @@ allocHashList (HashTable *table)
table->freeList = hl->next;
} else {
hl = stgMallocBytes(HCHUNK * sizeof(HashList), "allocHashList");
- cl = stgMallocBytes(sizeof (*cl), "allocHashList: chunkList");
+ cl = stgMallocBytes(sizeof (*cl), "allocHashList: chunkList");
cl->chunk = hl;
cl->next = table->chunks;
table->chunks = cl;
table->freeList = hl + 1;
for (p = table->freeList; p < hl + HCHUNK - 1; p++)
- p->next = p + 1;
- p->next = NULL;
+ p->next = p + 1;
+ p->next = NULL;
}
return hl;
}
@@ -256,7 +256,7 @@ insertHashTable(HashTable *table, StgWord key, void *data)
/* When the average load gets too high, we expand the table */
if (++table->kcount >= HLOAD * table->bcount)
- expand(table);
+ expand(table);
bucket = table->hash(table, key);
segment = bucket / HSEGSIZE;
@@ -285,16 +285,16 @@ removeHashTable(HashTable *table, StgWord key, void *data)
index = bucket % HSEGSIZE;
for (hl = table->dir[segment][index]; hl != NULL; hl = hl->next) {
- if (table->compare(hl->key,key) && (data == NULL || hl->data == data)) {
- if (prev == NULL)
- table->dir[segment][index] = hl->next;
- else
- prev->next = hl->next;
+ if (table->compare(hl->key,key) && (data == NULL || hl->data == data)) {
+ if (prev == NULL)
+ table->dir[segment][index] = hl->next;
+ else
+ prev->next = hl->next;
freeHashList(table,hl);
- table->kcount--;
- return hl->data;
- }
- prev = hl;
+ table->kcount--;
+ return hl->data;
+ }
+ prev = hl;
}
/* It's not there */
@@ -322,17 +322,17 @@ freeHashTable(HashTable *table, void (*freeDataFun)(void *) )
index = (table->max + table->split - 1) % HSEGSIZE;
while (segment >= 0) {
- while (index >= 0) {
- for (hl = table->dir[segment][index]; hl != NULL; hl = next) {
- next = hl->next;
- if (freeDataFun != NULL)
- (*freeDataFun)(hl->data);
+ while (index >= 0) {
+ for (hl = table->dir[segment][index]; hl != NULL; hl = next) {
+ next = hl->next;
+ if (freeDataFun != NULL)
+ (*freeDataFun)(hl->data);
}
- index--;
- }
- stgFree(table->dir[segment]);
- segment--;
- index = HSEGSIZE - 1;
+ index--;
+ }
+ stgFree(table->dir[segment]);
+ segment--;
+ index = HSEGSIZE - 1;
}
for (cl = table->chunks; cl != NULL; cl = cl_next) {
cl_next = cl->next;
@@ -358,7 +358,7 @@ allocHashTable_(HashFunction *hash, CompareFunction *compare)
allocSegment(table, 0);
for (hb = table->dir[0]; hb < table->dir[0] + HSEGSIZE; hb++)
- *hb = NULL;
+ *hb = NULL;
table->split = 0;
table->max = HSEGSIZE;
@@ -383,8 +383,8 @@ allocHashTable(void)
HashTable *
allocStrHashTable(void)
{
- return allocHashTable_((HashFunction *)hashStr,
- (CompareFunction *)compareStr);
+ return allocHashTable_((HashFunction *)hashStr,
+ (CompareFunction *)compareStr);
}
void