2 * Various storage structures (pool allocation, vector, hash table)
4 * Copyright (C) 1993, Eric Youngdale.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
26 #include "wine/debug.h"
28 #include "dbghelp_private.h"
33 WINE_DEFAULT_DEBUG_CHANNEL(dbghelp);
37 struct pool_arena* next;
41 void pool_init(struct pool* a, unsigned arena_size)
43 a->arena_size = arena_size;
47 void pool_destroy(struct pool* pool)
49 struct pool_arena* arena;
50 struct pool_arena* next;
53 unsigned alloc, used, num;
55 for (alloc = used = num = 0, arena = pool->first; arena; arena = arena->next)
57 alloc += pool->arena_size;
58 used += arena->current - (char*)arena;
61 FIXME("STATS: pool %p has allocated %u kbytes, used %u kbytes in %u arenas,\n"
62 "\t\t\t\tnon-allocation ratio: %.2f%%\n",
63 pool, alloc >> 10, used >> 10, num, 100.0 - (float)used / (float)alloc * 100.0);
66 for (arena = pool->first; arena; arena = next)
69 HeapFree(GetProcessHeap(), 0, arena);
74 void* pool_alloc(struct pool* pool, unsigned len)
76 struct pool_arena* arena;
79 len = (len + 3) & ~3; /* round up size on DWORD boundary */
80 assert(sizeof(struct pool_arena) + len <= pool->arena_size && len);
82 for (arena = pool->first; arena; arena = arena->next)
84 if ((char*)arena + pool->arena_size - arena->current >= len)
87 arena->current += len;
92 arena = HeapAlloc(GetProcessHeap(), 0, pool->arena_size);
93 if (!arena) {FIXME("OOM\n");return NULL;}
95 ret = (char*)arena + sizeof(*arena);
96 arena->next = pool->first;
98 arena->current = (char*)ret + len;
102 char* pool_strdup(struct pool* pool, const char* str)
105 if ((ret = pool_alloc(pool, strlen(str) + 1))) strcpy(ret, str);
109 void vector_init(struct vector* v, unsigned esz, unsigned bucket_sz)
112 /* align size on DWORD boundaries */
113 v->elt_size = (esz + 3) & ~3;
116 case 2: v->shift = 1; break;
117 case 4: v->shift = 2; break;
118 case 8: v->shift = 3; break;
119 case 16: v->shift = 4; break;
120 case 32: v->shift = 5; break;
121 case 64: v->shift = 6; break;
122 case 128: v->shift = 7; break;
123 case 256: v->shift = 8; break;
124 case 512: v->shift = 9; break;
125 case 1024: v->shift = 10; break;
129 v->buckets_allocated = 0;
133 unsigned vector_length(const struct vector* v)
138 void* vector_at(const struct vector* v, unsigned pos)
142 if (pos >= v->num_elts) return NULL;
143 o = pos & ((1 << v->shift) - 1);
144 return (char*)v->buckets[pos >> v->shift] + o * v->elt_size;
147 void* vector_add(struct vector* v, struct pool* pool)
149 unsigned ncurr = v->num_elts++;
151 /* check that we don't wrap around */
152 assert(v->num_elts > ncurr);
153 if (ncurr == (v->num_buckets << v->shift))
155 if(v->num_buckets == v->buckets_allocated)
157 /* Double the bucket cache, so it scales well with big vectors.*/
158 unsigned new_reserved;
161 new_reserved = 2*v->buckets_allocated;
162 if(new_reserved == 0) new_reserved = 1;
164 /* Don't even try to resize memory.
165 Pool datastructure is very inefficient with reallocs. */
166 new = pool_alloc(pool, new_reserved * sizeof(void*));
167 memcpy(new, v->buckets, v->buckets_allocated * sizeof(void*));
169 v->buckets_allocated = new_reserved;
171 v->buckets[v->num_buckets] = pool_alloc(pool, v->elt_size << v->shift);
172 return v->buckets[v->num_buckets++];
174 return vector_at(v, ncurr);
177 static unsigned vector_position(const struct vector* v, const void* elt)
181 for (i = 0; i < v->num_buckets; i++)
183 if (v->buckets[i] <= elt &&
184 (const char*)elt < (const char*)v->buckets[i] + (v->elt_size << v->shift))
186 return (i << v->shift) +
187 ((const char*)elt - (const char*)v->buckets[i]) / v->elt_size;
194 void* vector_iter_up(const struct vector* v, const void* elt)
198 if (!elt) return vector_at(v, 0);
199 pos = vector_position(v, elt) + 1;
200 if (pos >= vector_length(v)) return NULL;
201 return vector_at(v, pos);
204 void* vector_iter_down(const struct vector* v, const void* elt)
207 if (!elt) return vector_at(v, vector_length(v) - 1);
208 pos = vector_position(v, elt);
209 if (pos == 0) return NULL;
210 return vector_at(v, pos - 1);
213 /* We construct the sparse array as two vectors (of equal size)
214 * The first vector (key2index) is the lookup table between the key and
215 * an index in the second vector (elements)
216 * When inserting an element, it's always appended in second vector (and
217 * never moved in memory later on), only the first vector is reordered
225 void sparse_array_init(struct sparse_array* sa, unsigned elt_sz, unsigned bucket_sz)
227 vector_init(&sa->key2index, sizeof(struct key2index), bucket_sz);
228 vector_init(&sa->elements, elt_sz, bucket_sz);
231 /******************************************************************
232 * sparse_array_lookup
234 * Returns the first index which key is >= at passed key
236 static struct key2index* sparse_array_lookup(const struct sparse_array* sa,
237 unsigned long key, unsigned* idx)
239 struct key2index* pk2i;
242 if (!sa->elements.num_elts)
247 high = sa->elements.num_elts;
248 pk2i = vector_at(&sa->key2index, high - 1);
254 if (pk2i->key == key)
260 pk2i = vector_at(&sa->key2index, low);
261 if (pk2i->key >= key)
266 /* now we have: sa(lowest key) < key < sa(highest key) */
269 *idx = (low + high) / 2;
270 pk2i = vector_at(&sa->key2index, *idx);
271 if (pk2i->key > key) high = *idx;
272 else if (pk2i->key < key) low = *idx + 1;
275 /* binary search could return exact item, we search for highest one
279 pk2i = vector_at(&sa->key2index, ++(*idx));
283 void* sparse_array_find(const struct sparse_array* sa, unsigned long key)
286 struct key2index* pk2i;
288 if ((pk2i = sparse_array_lookup(sa, key, &idx)) && pk2i->key == key)
289 return vector_at(&sa->elements, pk2i->index);
293 void* sparse_array_add(struct sparse_array* sa, unsigned long key,
297 struct key2index* pk2i;
298 struct key2index* to;
300 pk2i = sparse_array_lookup(sa, key, &idx);
301 if (pk2i && pk2i->key == key)
303 FIXME("re adding an existing key\n");
306 to = vector_add(&sa->key2index, pool);
309 /* we need to shift vector's content... */
310 /* let's do it brute force... (FIXME) */
311 assert(sa->key2index.num_elts >= 2);
312 for (i = sa->key2index.num_elts - 1; i > idx; i--)
314 pk2i = vector_at(&sa->key2index, i - 1);
321 to->index = sa->elements.num_elts;
323 return vector_add(&sa->elements, pool);
326 unsigned sparse_array_length(const struct sparse_array* sa)
328 return sa->elements.num_elts;
331 unsigned hash_table_hash(const char* name, unsigned num_buckets)
337 hash += (hash << 10);
341 hash ^= (hash >> 11);
342 hash += (hash << 15);
343 return hash % num_buckets;
346 void hash_table_init(struct pool* pool, struct hash_table* ht, unsigned num_buckets)
349 ht->buckets = pool_alloc(pool, num_buckets * sizeof(struct hash_table_elt*));
351 ht->num_buckets = num_buckets;
352 memset(ht->buckets, 0, num_buckets * sizeof(struct hash_table_elt*));
355 void hash_table_destroy(struct hash_table* ht)
357 #if defined(USE_STATS)
360 unsigned min = 0xffffffff, max = 0, sq = 0;
361 struct hash_table_elt* elt;
362 double mean, variance;
364 for (i = 0; i < ht->num_buckets; i++)
366 for (len = 0, elt = ht->buckets[i]; elt; elt = elt->next) len++;
367 if (len < min) min = len;
368 if (len > max) max = len;
371 mean = (double)ht->num_elts / ht->num_buckets;
372 variance = (double)sq / ht->num_buckets - mean * mean;
373 FIXME("STATS: elts[num:%-4u size:%u mean:%f] buckets[min:%-4u variance:%+f max:%-4u]\n",
374 ht->num_elts, ht->num_buckets, mean, min, variance, max);
376 for (i = 0; i < ht->num_buckets; i++)
378 for (len = 0, elt = ht->buckets[i]; elt; elt = elt->next) len++;
381 FIXME("Longuest bucket:\n");
382 for (elt = ht->buckets[i]; elt; elt = elt->next)
383 FIXME("\t%s\n", elt->name);
392 void hash_table_add(struct hash_table* ht, struct hash_table_elt* elt)
394 unsigned hash = hash_table_hash(elt->name, ht->num_buckets);
395 struct hash_table_elt** p;
397 /* in some cases, we need to get back the symbols of same name in the order
398 * in which they've been inserted. So insert new elements at the end of the list.
400 for (p = &ht->buckets[hash]; *p; p = &((*p)->next));
406 void* hash_table_find(const struct hash_table* ht, const char* name)
408 unsigned hash = hash_table_hash(name, ht->num_buckets);
409 struct hash_table_elt* elt;
411 for (elt = ht->buckets[hash]; elt; elt = elt->next)
412 if (!strcmp(name, elt->name)) return elt;
416 void hash_table_iter_init(const struct hash_table* ht,
417 struct hash_table_iter* hti, const char* name)
422 hti->last = hash_table_hash(name, ht->num_buckets);
423 hti->index = hti->last - 1;
427 hti->last = ht->num_buckets - 1;
433 void* hash_table_iter_up(struct hash_table_iter* hti)
435 if (hti->element) hti->element = hti->element->next;
436 while (!hti->element && hti->index < hti->last)
437 hti->element = hti->ht->buckets[++hti->index];