rts: Add missing `const` from HashTable API
[ghc.git] / rts / Hash.c
1 /*-----------------------------------------------------------------------------
2 *
3 * (c) The AQUA Project, Glasgow University, 1995-1998
4 * (c) The GHC Team, 1999
5 *
6 * Dynamically expanding linear hash tables, as described in
7 * Per-\AAke Larson, ``Dynamic Hash Tables,'' CACM 31(4), April 1988,
8 * pp. 446 -- 457.
9 * -------------------------------------------------------------------------- */
10
11 #include "PosixSource.h"
12 #include "Rts.h"
13
14 #include "Hash.h"
15 #include "RtsUtils.h"
16
17 #include <string.h>
18
19 #define HSEGSIZE 1024 /* Size of a single hash table segment */
20 /* Also the minimum size of a hash table */
21 #define HDIRSIZE 1024 /* Size of the segment directory */
22 /* Maximum hash table size is HSEGSIZE * HDIRSIZE */
23 #define HLOAD 5 /* Maximum average load of a single hash bucket */
24
25 #define HCHUNK (1024 * sizeof(W_) / sizeof(HashList))
26 /* Number of HashList cells to allocate in one go */
27
28
29 /* Linked list of (key, data) pairs for separate chaining */
30 typedef struct hashlist {
31 StgWord key;
32 const void *data;
33 struct hashlist *next; /* Next cell in bucket chain (same hash value) */
34 } HashList;
35
36 typedef struct chunklist {
37 HashList *chunk;
38 struct chunklist *next;
39 } HashListChunk;
40
41 struct hashtable {
42 int split; /* Next bucket to split when expanding */
43 int max; /* Max bucket of smaller table */
44 int mask1; /* Mask for doing the mod of h_1 (smaller table) */
45 int mask2; /* Mask for doing the mod of h_2 (larger table) */
46 int kcount; /* Number of keys */
47 int bcount; /* Number of buckets */
48 HashList **dir[HDIRSIZE]; /* Directory of segments */
49 HashList *freeList; /* free list of HashLists */
50 HashListChunk *chunks;
51 HashFunction *hash; /* hash function */
52 CompareFunction *compare; /* key comparison function */
53 };
54
55 /* -----------------------------------------------------------------------------
56 * Hash first using the smaller table. If the bucket is less than the
57 * next bucket to be split, re-hash using the larger table.
58 * -------------------------------------------------------------------------- */
59
60 int
61 hashWord(const HashTable *table, StgWord key)
62 {
63 int bucket;
64
65 /* Strip the boring zero bits */
66 key /= sizeof(StgWord);
67
68 /* Mod the size of the hash table (a power of 2) */
69 bucket = key & table->mask1;
70
71 if (bucket < table->split) {
72 /* Mod the size of the expanded hash table (also a power of 2) */
73 bucket = key & table->mask2;
74 }
75 return bucket;
76 }
77
78 int
79 hashStr(const HashTable *table, char *key)
80 {
81 int h, bucket;
82 char *s;
83
84 s = key;
85 for (h=0; *s; s++) {
86 h *= 128;
87 h += *s;
88 h = h % 1048583; /* some random large prime */
89 }
90
91 /* Mod the size of the hash table (a power of 2) */
92 bucket = h & table->mask1;
93
94 if (bucket < table->split) {
95 /* Mod the size of the expanded hash table (also a power of 2) */
96 bucket = h & table->mask2;
97 }
98
99 return bucket;
100 }
101
102 static int
103 compareWord(StgWord key1, StgWord key2)
104 {
105 return (key1 == key2);
106 }
107
108 static int
109 compareStr(StgWord key1, StgWord key2)
110 {
111 return (strcmp((char *)key1, (char *)key2) == 0);
112 }
113
114
115 /* -----------------------------------------------------------------------------
116 * Allocate a new segment of the dynamically growing hash table.
117 * -------------------------------------------------------------------------- */
118
119 static void
120 allocSegment(HashTable *table, int segment)
121 {
122 table->dir[segment] = stgMallocBytes(HSEGSIZE * sizeof(HashList *),
123 "allocSegment");
124 }
125
126
127 /* -----------------------------------------------------------------------------
128 * Expand the larger hash table by one bucket, and split one bucket
129 * from the smaller table into two parts. Only the bucket referenced
130 * by @table->split@ is affected by the expansion.
131 * -------------------------------------------------------------------------- */
132
133 static void
134 expand(HashTable *table)
135 {
136 int oldsegment;
137 int oldindex;
138 int newbucket;
139 int newsegment;
140 int newindex;
141 HashList *hl;
142 HashList *next;
143 HashList *old, *new;
144
145 if (table->split + table->max >= HDIRSIZE * HSEGSIZE)
146 /* Wow! That's big. Too big, so don't expand. */
147 return;
148
149 /* Calculate indices of bucket to split */
150 oldsegment = table->split / HSEGSIZE;
151 oldindex = table->split % HSEGSIZE;
152
153 newbucket = table->max + table->split;
154
155 /* And the indices of the new bucket */
156 newsegment = newbucket / HSEGSIZE;
157 newindex = newbucket % HSEGSIZE;
158
159 if (newindex == 0)
160 allocSegment(table, newsegment);
161
162 if (++table->split == table->max) {
163 table->split = 0;
164 table->max *= 2;
165 table->mask1 = table->mask2;
166 table->mask2 = table->mask2 << 1 | 1;
167 }
168 table->bcount++;
169
170 /* Split the bucket, paying no attention to the original order */
171
172 old = new = NULL;
173 for (hl = table->dir[oldsegment][oldindex]; hl != NULL; hl = next) {
174 next = hl->next;
175 if (table->hash(table, hl->key) == newbucket) {
176 hl->next = new;
177 new = hl;
178 } else {
179 hl->next = old;
180 old = hl;
181 }
182 }
183 table->dir[oldsegment][oldindex] = old;
184 table->dir[newsegment][newindex] = new;
185
186 return;
187 }
188
189 void *
190 lookupHashTable(const HashTable *table, StgWord key)
191 {
192 int bucket;
193 int segment;
194 int index;
195 HashList *hl;
196
197 bucket = table->hash(table, key);
198 segment = bucket / HSEGSIZE;
199 index = bucket % HSEGSIZE;
200
201 for (hl = table->dir[segment][index]; hl != NULL; hl = hl->next)
202 if (table->compare(hl->key, key))
203 return (void *) hl->data;
204
205 /* It's not there */
206 return NULL;
207 }
208
209 // Puts up to szKeys keys of the hash table into the given array. Returns the
210 // actual amount of keys that have been retrieved.
211 //
212 // If the table is modified concurrently, the function behavior is undefined.
213 //
214 int keysHashTable(HashTable *table, StgWord keys[], int szKeys) {
215 int segment, index;
216 int k = 0;
217 HashList *hl;
218
219
220 /* The last bucket with something in it is table->max + table->split - 1 */
221 segment = (table->max + table->split - 1) / HSEGSIZE;
222 index = (table->max + table->split - 1) % HSEGSIZE;
223
224 while (segment >= 0 && k < szKeys) {
225 while (index >= 0 && k < szKeys) {
226 hl = table->dir[segment][index];
227 while (hl && k < szKeys) {
228 keys[k] = hl->key;
229 k += 1;
230 hl = hl->next;
231 }
232 index--;
233 }
234 segment--;
235 index = HSEGSIZE - 1;
236 }
237 return k;
238 }
239
240 /* -----------------------------------------------------------------------------
241 * We allocate the hashlist cells in large chunks to cut down on malloc
242 * overhead. Although we keep a free list of hashlist cells, we make
243 * no effort to actually return the space to the malloc arena.
244 * -------------------------------------------------------------------------- */
245
246 static HashList *
247 allocHashList (HashTable *table)
248 {
249 HashList *hl, *p;
250 HashListChunk *cl;
251
252 if ((hl = table->freeList) != NULL) {
253 table->freeList = hl->next;
254 } else {
255 hl = stgMallocBytes(HCHUNK * sizeof(HashList), "allocHashList");
256 cl = stgMallocBytes(sizeof (*cl), "allocHashList: chunkList");
257 cl->chunk = hl;
258 cl->next = table->chunks;
259 table->chunks = cl;
260
261 table->freeList = hl + 1;
262 for (p = table->freeList; p < hl + HCHUNK - 1; p++)
263 p->next = p + 1;
264 p->next = NULL;
265 }
266 return hl;
267 }
268
269 static void
270 freeHashList (HashTable *table, HashList *hl)
271 {
272 hl->next = table->freeList;
273 table->freeList = hl;
274 }
275
276 void
277 insertHashTable(HashTable *table, StgWord key, const void *data)
278 {
279 int bucket;
280 int segment;
281 int index;
282 HashList *hl;
283
284 // Disable this assert; sometimes it's useful to be able to
285 // overwrite entries in the hash table.
286 // ASSERT(lookupHashTable(table, key) == NULL);
287
288 /* When the average load gets too high, we expand the table */
289 if (++table->kcount >= HLOAD * table->bcount)
290 expand(table);
291
292 bucket = table->hash(table, key);
293 segment = bucket / HSEGSIZE;
294 index = bucket % HSEGSIZE;
295
296 hl = allocHashList(table);
297
298 hl->key = key;
299 hl->data = data;
300 hl->next = table->dir[segment][index];
301 table->dir[segment][index] = hl;
302
303 }
304
305 void *
306 removeHashTable(HashTable *table, StgWord key, const void *data)
307 {
308 int bucket;
309 int segment;
310 int index;
311 HashList *hl;
312 HashList *prev = NULL;
313
314 bucket = table->hash(table, key);
315 segment = bucket / HSEGSIZE;
316 index = bucket % HSEGSIZE;
317
318 for (hl = table->dir[segment][index]; hl != NULL; hl = hl->next) {
319 if (table->compare(hl->key,key) && (data == NULL || hl->data == data)) {
320 if (prev == NULL)
321 table->dir[segment][index] = hl->next;
322 else
323 prev->next = hl->next;
324 freeHashList(table,hl);
325 table->kcount--;
326 return (void *) hl->data;
327 }
328 prev = hl;
329 }
330
331 /* It's not there */
332 ASSERT(data == NULL);
333 return NULL;
334 }
335
336 /* -----------------------------------------------------------------------------
337 * When we free a hash table, we are also good enough to free the
338 * data part of each (key, data) pair, as long as our caller can tell
339 * us how to do it.
340 * -------------------------------------------------------------------------- */
341
342 void
343 freeHashTable(HashTable *table, void (*freeDataFun)(void *) )
344 {
345 long segment;
346 long index;
347 HashList *hl;
348 HashList *next;
349 HashListChunk *cl, *cl_next;
350
351 /* The last bucket with something in it is table->max + table->split - 1 */
352 segment = (table->max + table->split - 1) / HSEGSIZE;
353 index = (table->max + table->split - 1) % HSEGSIZE;
354
355 while (segment >= 0) {
356 while (index >= 0) {
357 for (hl = table->dir[segment][index]; hl != NULL; hl = next) {
358 next = hl->next;
359 if (freeDataFun != NULL)
360 (*freeDataFun)((void *) hl->data);
361 }
362 index--;
363 }
364 stgFree(table->dir[segment]);
365 segment--;
366 index = HSEGSIZE - 1;
367 }
368 for (cl = table->chunks; cl != NULL; cl = cl_next) {
369 cl_next = cl->next;
370 stgFree(cl->chunk);
371 stgFree(cl);
372 }
373 stgFree(table);
374 }
375
376 /* -----------------------------------------------------------------------------
377 * When we initialize a hash table, we set up the first segment as well,
378 * initializing all of the first segment's hash buckets to NULL.
379 * -------------------------------------------------------------------------- */
380
381 HashTable *
382 allocHashTable_(HashFunction *hash, CompareFunction *compare)
383 {
384 HashTable *table;
385 HashList **hb;
386
387 table = stgMallocBytes(sizeof(HashTable),"allocHashTable");
388
389 allocSegment(table, 0);
390
391 for (hb = table->dir[0]; hb < table->dir[0] + HSEGSIZE; hb++)
392 *hb = NULL;
393
394 table->split = 0;
395 table->max = HSEGSIZE;
396 table->mask1 = HSEGSIZE - 1;
397 table->mask2 = 2 * HSEGSIZE - 1;
398 table->kcount = 0;
399 table->bcount = HSEGSIZE;
400 table->freeList = NULL;
401 table->chunks = NULL;
402 table->hash = hash;
403 table->compare = compare;
404
405 return table;
406 }
407
408 HashTable *
409 allocHashTable(void)
410 {
411 return allocHashTable_(hashWord, compareWord);
412 }
413
414 HashTable *
415 allocStrHashTable(void)
416 {
417 return allocHashTable_((HashFunction *)hashStr,
418 (CompareFunction *)compareStr);
419 }
420
421 void
422 exitHashTable(void)
423 {
424 /* nothing to do */
425 }
426
427 int keyCountHashTable (HashTable *table)
428 {
429 return table->kcount;
430 }