Line | Count | Source (jump to first uncovered line) |
1 | | /* This file is included by symbol.c */ |
2 | | |
3 | | #include "id_table.h" |
4 | | |
5 | | #ifndef ID_TABLE_DEBUG |
6 | | #define ID_TABLE_DEBUG 0 |
7 | | #endif |
8 | | |
9 | | #if ID_TABLE_DEBUG == 0 |
10 | | #undef NDEBUG |
11 | | #define NDEBUG |
12 | | #endif |
13 | | #include "ruby_assert.h" |
14 | | |
15 | | typedef rb_id_serial_t id_key_t; |
16 | | |
17 | | static inline ID |
18 | | key2id(id_key_t key) |
19 | 0 | { |
20 | 0 | return rb_id_serial_to_id(key); |
21 | 0 | } |
22 | | |
23 | | static inline id_key_t |
24 | | id2key(ID id) |
25 | 0 | { |
26 | 0 | return rb_id_to_serial(id); |
27 | 0 | } |
28 | | |
29 | | /* simple open addressing with quadratic probing. |
30 | | uses mark-bit on collisions - need extra 1 bit, |
31 | | ID is strictly 3 bits larger than rb_id_serial_t */ |
32 | | |
33 | | typedef struct rb_id_item { |
34 | | id_key_t key; |
35 | | #if SIZEOF_VALUE == 8 |
36 | | int collision; |
37 | | #endif |
38 | | VALUE val; |
39 | | } item_t; |
40 | | |
41 | | struct rb_id_table { |
42 | | int capa; |
43 | | int num; |
44 | | int used; |
45 | | item_t *items; |
46 | | }; |
47 | | |
48 | | #if SIZEOF_VALUE == 8 |
49 | 0 | #define ITEM_GET_KEY(tbl, i) ((tbl)->items[i].key) |
50 | 0 | #define ITEM_KEY_ISSET(tbl, i) ((tbl)->items[i].key) |
51 | 0 | #define ITEM_COLLIDED(tbl, i) ((tbl)->items[i].collision) |
52 | 0 | #define ITEM_SET_COLLIDED(tbl, i) ((tbl)->items[i].collision = 1) |
53 | | static inline void |
54 | | ITEM_SET_KEY(struct rb_id_table *tbl, int i, id_key_t key) |
55 | 0 | { |
56 | 0 | tbl->items[i].key = key; |
57 | 0 | } |
58 | | #else |
59 | | #define ITEM_GET_KEY(tbl, i) ((tbl)->items[i].key >> 1) |
60 | | #define ITEM_KEY_ISSET(tbl, i) ((tbl)->items[i].key > 1) |
61 | | #define ITEM_COLLIDED(tbl, i) ((tbl)->items[i].key & 1) |
62 | | #define ITEM_SET_COLLIDED(tbl, i) ((tbl)->items[i].key |= 1) |
63 | | static inline void |
64 | | ITEM_SET_KEY(struct rb_id_table *tbl, int i, id_key_t key) |
65 | | { |
66 | | tbl->items[i].key = (key << 1) | ITEM_COLLIDED(tbl, i); |
67 | | } |
68 | | #endif |
69 | | |
70 | | static inline int |
71 | | round_capa(int capa) |
72 | 0 | { |
73 | | /* minsize is 4 */ |
74 | 0 | capa >>= 2; |
75 | 0 | capa |= capa >> 1; |
76 | 0 | capa |= capa >> 2; |
77 | 0 | capa |= capa >> 4; |
78 | 0 | capa |= capa >> 8; |
79 | 0 | capa |= capa >> 16; |
80 | 0 | return (capa + 1) << 2; |
81 | 0 | } |
82 | | |
83 | | static struct rb_id_table * |
84 | | rb_id_table_init(struct rb_id_table *tbl, int capa) |
85 | 0 | { |
86 | 0 | MEMZERO(tbl, struct rb_id_table, 1); |
87 | 0 | if (capa > 0) { |
88 | 0 | capa = round_capa(capa); |
89 | 0 | tbl->capa = (int)capa; |
90 | 0 | tbl->items = ZALLOC_N(item_t, capa); |
91 | 0 | } |
92 | 0 | return tbl; |
93 | 0 | } |
94 | | |
95 | | struct rb_id_table * |
96 | | rb_id_table_create(size_t capa) |
97 | 0 | { |
98 | 0 | struct rb_id_table *tbl = ALLOC(struct rb_id_table); |
99 | 0 | return rb_id_table_init(tbl, (int)capa); |
100 | 0 | } |
101 | | |
102 | | void |
103 | | rb_id_table_free(struct rb_id_table *tbl) |
104 | 0 | { |
105 | 0 | xfree(tbl->items); |
106 | 0 | xfree(tbl); |
107 | 0 | } |
108 | | |
109 | | void |
110 | | rb_id_table_clear(struct rb_id_table *tbl) |
111 | 0 | { |
112 | 0 | tbl->num = 0; |
113 | 0 | tbl->used = 0; |
114 | 0 | MEMZERO(tbl->items, item_t, tbl->capa); |
115 | 0 | } |
116 | | |
117 | | size_t |
118 | | rb_id_table_size(const struct rb_id_table *tbl) |
119 | 0 | { |
120 | 0 | return (size_t)tbl->num; |
121 | 0 | } |
122 | | |
123 | | size_t |
124 | | rb_id_table_memsize(const struct rb_id_table *tbl) |
125 | 0 | { |
126 | 0 | return sizeof(item_t) * tbl->capa + sizeof(struct rb_id_table); |
127 | 0 | } |
128 | | |
129 | | static int |
130 | | hash_table_index(struct rb_id_table* tbl, id_key_t key) |
131 | 0 | { |
132 | 0 | if (tbl->capa > 0) { |
133 | 0 | int mask = tbl->capa - 1; |
134 | 0 | int ix = key & mask; |
135 | 0 | int d = 1; |
136 | 0 | while (key != ITEM_GET_KEY(tbl, ix)) { |
137 | 0 | if (!ITEM_COLLIDED(tbl, ix)) |
138 | 0 | return -1; |
139 | 0 | ix = (ix + d) & mask; |
140 | 0 | d++; |
141 | 0 | } |
142 | 0 | return ix; |
143 | 0 | } |
144 | 0 | return -1; |
145 | 0 | } |
146 | | |
147 | | static void |
148 | | hash_table_raw_insert(struct rb_id_table *tbl, id_key_t key, VALUE val) |
149 | 0 | { |
150 | 0 | int mask = tbl->capa - 1; |
151 | 0 | int ix = key & mask; |
152 | 0 | int d = 1; |
153 | 0 | RUBY_ASSERT(key != 0); |
154 | 0 | while (ITEM_KEY_ISSET(tbl, ix)) { |
155 | 0 | ITEM_SET_COLLIDED(tbl, ix); |
156 | 0 | ix = (ix + d) & mask; |
157 | 0 | d++; |
158 | 0 | } |
159 | 0 | tbl->num++; |
160 | 0 | if (!ITEM_COLLIDED(tbl, ix)) { |
161 | 0 | tbl->used++; |
162 | 0 | } |
163 | 0 | ITEM_SET_KEY(tbl, ix, key); |
164 | 0 | tbl->items[ix].val = val; |
165 | 0 | } |
166 | | |
167 | | static int |
168 | | hash_delete_index(struct rb_id_table *tbl, int ix) |
169 | 0 | { |
170 | 0 | if (ix >= 0) { |
171 | 0 | if (!ITEM_COLLIDED(tbl, ix)) { |
172 | 0 | tbl->used--; |
173 | 0 | } |
174 | 0 | tbl->num--; |
175 | 0 | ITEM_SET_KEY(tbl, ix, 0); |
176 | 0 | tbl->items[ix].val = 0; |
177 | 0 | return TRUE; |
178 | 0 | } |
179 | 0 | else { |
180 | 0 | return FALSE; |
181 | 0 | } |
182 | 0 | } |
183 | | |
184 | | static void |
185 | | hash_table_extend(struct rb_id_table* tbl) |
186 | 0 | { |
187 | 0 | if (tbl->used + (tbl->used >> 1) >= tbl->capa) { |
188 | 0 | int new_cap = round_capa(tbl->num + (tbl->num >> 1)); |
189 | 0 | int i; |
190 | 0 | item_t* old; |
191 | 0 | struct rb_id_table tmp_tbl = {0, 0, 0}; |
192 | 0 | if (new_cap < tbl->capa) { |
193 | 0 | new_cap = round_capa(tbl->used + (tbl->used >> 1)); |
194 | 0 | } |
195 | 0 | tmp_tbl.capa = new_cap; |
196 | 0 | tmp_tbl.items = ZALLOC_N(item_t, new_cap); |
197 | 0 | for (i = 0; i < tbl->capa; i++) { |
198 | 0 | id_key_t key = ITEM_GET_KEY(tbl, i); |
199 | 0 | if (key != 0) { |
200 | 0 | hash_table_raw_insert(&tmp_tbl, key, tbl->items[i].val); |
201 | 0 | } |
202 | 0 | } |
203 | 0 | old = tbl->items; |
204 | 0 | *tbl = tmp_tbl; |
205 | 0 | xfree(old); |
206 | 0 | } |
207 | 0 | } |
208 | | |
209 | | #if ID_TABLE_DEBUG && 0 |
210 | | static void |
211 | | hash_table_show(struct rb_id_table *tbl) |
212 | | { |
213 | | const id_key_t *keys = tbl->keys; |
214 | | const int capa = tbl->capa; |
215 | | int i; |
216 | | |
217 | | fprintf(stderr, "tbl: %p (capa: %d, num: %d, used: %d)\n", tbl, tbl->capa, tbl->num, tbl->used); |
218 | | for (i=0; i<capa; i++) { |
219 | | if (ITEM_KEY_ISSET(tbl, i)) { |
220 | | fprintf(stderr, " -> [%d] %s %d\n", i, rb_id2name(key2id(keys[i])), (int)keys[i]); |
221 | | } |
222 | | } |
223 | | } |
224 | | #endif |
225 | | |
226 | | int |
227 | | rb_id_table_lookup(struct rb_id_table *tbl, ID id, VALUE *valp) |
228 | 0 | { |
229 | 0 | id_key_t key = id2key(id); |
230 | 0 | int index = hash_table_index(tbl, key); |
231 | |
|
232 | 0 | if (index >= 0) { |
233 | 0 | *valp = tbl->items[index].val; |
234 | 0 | return TRUE; |
235 | 0 | } |
236 | 0 | else { |
237 | 0 | return FALSE; |
238 | 0 | } |
239 | 0 | } |
240 | | |
241 | | static int |
242 | | rb_id_table_insert_key(struct rb_id_table *tbl, const id_key_t key, const VALUE val) |
243 | 0 | { |
244 | 0 | const int index = hash_table_index(tbl, key); |
245 | |
|
246 | 0 | if (index >= 0) { |
247 | 0 | tbl->items[index].val = val; |
248 | 0 | } |
249 | 0 | else { |
250 | 0 | hash_table_extend(tbl); |
251 | 0 | hash_table_raw_insert(tbl, key, val); |
252 | 0 | } |
253 | 0 | return TRUE; |
254 | 0 | } |
255 | | |
256 | | int |
257 | | rb_id_table_insert(struct rb_id_table *tbl, ID id, VALUE val) |
258 | 0 | { |
259 | 0 | return rb_id_table_insert_key(tbl, id2key(id), val); |
260 | 0 | } |
261 | | |
262 | | int |
263 | | rb_id_table_delete(struct rb_id_table *tbl, ID id) |
264 | 0 | { |
265 | 0 | const id_key_t key = id2key(id); |
266 | 0 | int index = hash_table_index(tbl, key); |
267 | 0 | return hash_delete_index(tbl, index); |
268 | 0 | } |
269 | | |
270 | | void |
271 | | rb_id_table_foreach(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, void *data) |
272 | 0 | { |
273 | 0 | int i, capa = tbl->capa; |
274 | |
|
275 | 0 | for (i=0; i<capa; i++) { |
276 | 0 | if (ITEM_KEY_ISSET(tbl, i)) { |
277 | 0 | const id_key_t key = ITEM_GET_KEY(tbl, i); |
278 | 0 | enum rb_id_table_iterator_result ret = (*func)(key2id(key), tbl->items[i].val, data); |
279 | 0 | RUBY_ASSERT(key != 0); |
280 | |
|
281 | 0 | if (ret == ID_TABLE_DELETE) |
282 | 0 | hash_delete_index(tbl, i); |
283 | 0 | else if (ret == ID_TABLE_STOP) |
284 | 0 | return; |
285 | 0 | } |
286 | 0 | } |
287 | 0 | } |
288 | | |
289 | | void |
290 | | rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data) |
291 | 0 | { |
292 | 0 | int i, capa = tbl->capa; |
293 | |
|
294 | 0 | for (i=0; i<capa; i++) { |
295 | 0 | if (ITEM_KEY_ISSET(tbl, i)) { |
296 | 0 | enum rb_id_table_iterator_result ret = (*func)(tbl->items[i].val, data); |
297 | |
|
298 | 0 | if (ret == ID_TABLE_DELETE) |
299 | 0 | hash_delete_index(tbl, i); |
300 | 0 | else if (ret == ID_TABLE_STOP) |
301 | 0 | return; |
302 | 0 | } |
303 | 0 | } |
304 | 0 | } |
305 | | |
306 | | void |
307 | | rb_id_table_foreach_values_with_replace(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, rb_id_table_update_value_callback_func_t *replace, void *data) |
308 | 0 | { |
309 | 0 | int i, capa = tbl->capa; |
310 | |
|
311 | 0 | for (i = 0; i < capa; i++) { |
312 | 0 | if (ITEM_KEY_ISSET(tbl, i)) { |
313 | 0 | enum rb_id_table_iterator_result ret = (*func)(tbl->items[i].val, data); |
314 | |
|
315 | 0 | if (ret == ID_TABLE_REPLACE) { |
316 | 0 | VALUE val = tbl->items[i].val; |
317 | 0 | ret = (*replace)(&val, data, TRUE); |
318 | 0 | tbl->items[i].val = val; |
319 | 0 | } |
320 | |
|
321 | 0 | if (ret == ID_TABLE_STOP) |
322 | 0 | return; |
323 | 0 | } |
324 | 0 | } |
325 | 0 | } |
326 | | |