/src/binutils-gdb/libctf/ctf-create.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* CTF dict creation. |
2 | | Copyright (C) 2019-2023 Free Software Foundation, Inc. |
3 | | |
4 | | This file is part of libctf. |
5 | | |
6 | | libctf is free software; you can redistribute it and/or modify it under |
7 | | the terms of the GNU General Public License as published by the Free |
8 | | Software Foundation; either version 3, or (at your option) any later |
9 | | version. |
10 | | |
11 | | This program is distributed in the hope that it will be useful, but |
12 | | WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
14 | | See the GNU General Public License for more details. |
15 | | |
16 | | You should have received a copy of the GNU General Public License |
17 | | along with this program; see the file COPYING. If not see |
18 | | <http://www.gnu.org/licenses/>. */ |
19 | | |
20 | | #include <ctf-impl.h> |
21 | | #include <sys/param.h> |
22 | | #include <string.h> |
23 | | #include <unistd.h> |
24 | | |
25 | | #ifndef EOVERFLOW |
26 | | #define EOVERFLOW ERANGE |
27 | | #endif |
28 | | |
29 | | #ifndef roundup |
30 | | #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) |
31 | | #endif |
32 | | |
33 | | /* The initial size of a dynamic type's vlen in members. Arbitrary: the bigger |
34 | | this is, the less allocation needs to be done for small structure |
35 | | initialization, and the more memory is wasted for small structures during CTF |
36 | | construction. No effect on generated CTF or ctf_open()ed CTF. */ |
37 | 0 | #define INITIAL_VLEN 16 |
38 | | |
39 | | /* Make sure the ptrtab has enough space for at least one more type. |
40 | | |
41 | | We start with 4KiB of ptrtab, enough for a thousand types, then grow it 25% |
42 | | at a time. */ |
43 | | |
44 | | static int |
45 | | ctf_grow_ptrtab (ctf_dict_t *fp) |
46 | 0 | { |
47 | 0 | size_t new_ptrtab_len = fp->ctf_ptrtab_len; |
48 | | |
49 | | /* We allocate one more ptrtab entry than we need, for the initial zero, |
50 | | plus one because the caller will probably allocate a new type. */ |
51 | |
|
52 | 0 | if (fp->ctf_ptrtab == NULL) |
53 | 0 | new_ptrtab_len = 1024; |
54 | 0 | else if ((fp->ctf_typemax + 2) > fp->ctf_ptrtab_len) |
55 | 0 | new_ptrtab_len = fp->ctf_ptrtab_len * 1.25; |
56 | |
|
57 | 0 | if (new_ptrtab_len != fp->ctf_ptrtab_len) |
58 | 0 | { |
59 | 0 | uint32_t *new_ptrtab; |
60 | |
|
61 | 0 | if ((new_ptrtab = realloc (fp->ctf_ptrtab, |
62 | 0 | new_ptrtab_len * sizeof (uint32_t))) == NULL) |
63 | 0 | return (ctf_set_errno (fp, ENOMEM)); |
64 | | |
65 | 0 | fp->ctf_ptrtab = new_ptrtab; |
66 | 0 | memset (fp->ctf_ptrtab + fp->ctf_ptrtab_len, 0, |
67 | 0 | (new_ptrtab_len - fp->ctf_ptrtab_len) * sizeof (uint32_t)); |
68 | 0 | fp->ctf_ptrtab_len = new_ptrtab_len; |
69 | 0 | } |
70 | 0 | return 0; |
71 | 0 | } |
72 | | |
73 | | /* Make sure a vlen has enough space: expand it otherwise. Unlike the ptrtab, |
74 | | which grows quite slowly, the vlen grows in big jumps because it is quite |
75 | | expensive to expand: the caller has to scan the old vlen for string refs |
76 | | first and remove them, then re-add them afterwards. The initial size is |
77 | | more or less arbitrary. */ |
78 | | static int |
79 | | ctf_grow_vlen (ctf_dict_t *fp, ctf_dtdef_t *dtd, size_t vlen) |
80 | 0 | { |
81 | 0 | unsigned char *old = dtd->dtd_vlen; |
82 | |
|
83 | 0 | if (dtd->dtd_vlen_alloc > vlen) |
84 | 0 | return 0; |
85 | | |
86 | 0 | if ((dtd->dtd_vlen = realloc (dtd->dtd_vlen, |
87 | 0 | dtd->dtd_vlen_alloc * 2)) == NULL) |
88 | 0 | { |
89 | 0 | dtd->dtd_vlen = old; |
90 | 0 | return (ctf_set_errno (fp, ENOMEM)); |
91 | 0 | } |
92 | 0 | memset (dtd->dtd_vlen + dtd->dtd_vlen_alloc, 0, dtd->dtd_vlen_alloc); |
93 | 0 | dtd->dtd_vlen_alloc *= 2; |
94 | 0 | return 0; |
95 | 0 | } |
96 | | |
97 | | /* To create an empty CTF dict, we just declare a zeroed header and call |
98 | | ctf_bufopen() on it. If ctf_bufopen succeeds, we mark the new dict r/w and |
99 | | initialize the dynamic members. We start assigning type IDs at 1 because |
100 | | type ID 0 is used as a sentinel and a not-found indicator. */ |
101 | | |
102 | | ctf_dict_t * |
103 | | ctf_create (int *errp) |
104 | 0 | { |
105 | 0 | static const ctf_header_t hdr = { .cth_preamble = { CTF_MAGIC, CTF_VERSION, 0 } }; |
106 | |
|
107 | 0 | ctf_dynhash_t *dthash; |
108 | 0 | ctf_dynhash_t *dvhash; |
109 | 0 | ctf_dynhash_t *structs = NULL, *unions = NULL, *enums = NULL, *names = NULL; |
110 | 0 | ctf_dynhash_t *objthash = NULL, *funchash = NULL; |
111 | 0 | ctf_sect_t cts; |
112 | 0 | ctf_dict_t *fp; |
113 | |
|
114 | 0 | libctf_init_debug(); |
115 | 0 | dthash = ctf_dynhash_create (ctf_hash_integer, ctf_hash_eq_integer, |
116 | 0 | NULL, NULL); |
117 | 0 | if (dthash == NULL) |
118 | 0 | { |
119 | 0 | ctf_set_open_errno (errp, EAGAIN); |
120 | 0 | goto err; |
121 | 0 | } |
122 | | |
123 | 0 | dvhash = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string, |
124 | 0 | NULL, NULL); |
125 | 0 | if (dvhash == NULL) |
126 | 0 | { |
127 | 0 | ctf_set_open_errno (errp, EAGAIN); |
128 | 0 | goto err_dt; |
129 | 0 | } |
130 | | |
131 | 0 | structs = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string, |
132 | 0 | NULL, NULL); |
133 | 0 | unions = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string, |
134 | 0 | NULL, NULL); |
135 | 0 | enums = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string, |
136 | 0 | NULL, NULL); |
137 | 0 | names = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string, |
138 | 0 | NULL, NULL); |
139 | 0 | objthash = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string, |
140 | 0 | free, NULL); |
141 | 0 | funchash = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string, |
142 | 0 | free, NULL); |
143 | 0 | if (!structs || !unions || !enums || !names) |
144 | 0 | { |
145 | 0 | ctf_set_open_errno (errp, EAGAIN); |
146 | 0 | goto err_dv; |
147 | 0 | } |
148 | | |
149 | 0 | cts.cts_name = _CTF_SECTION; |
150 | 0 | cts.cts_data = &hdr; |
151 | 0 | cts.cts_size = sizeof (hdr); |
152 | 0 | cts.cts_entsize = 1; |
153 | |
|
154 | 0 | if ((fp = ctf_bufopen_internal (&cts, NULL, NULL, NULL, 1, errp)) == NULL) |
155 | 0 | goto err_dv; |
156 | | |
157 | 0 | fp->ctf_structs.ctn_writable = structs; |
158 | 0 | fp->ctf_unions.ctn_writable = unions; |
159 | 0 | fp->ctf_enums.ctn_writable = enums; |
160 | 0 | fp->ctf_names.ctn_writable = names; |
161 | 0 | fp->ctf_objthash = objthash; |
162 | 0 | fp->ctf_funchash = funchash; |
163 | 0 | fp->ctf_dthash = dthash; |
164 | 0 | fp->ctf_dvhash = dvhash; |
165 | 0 | fp->ctf_dtoldid = 0; |
166 | 0 | fp->ctf_snapshots = 1; |
167 | 0 | fp->ctf_snapshot_lu = 0; |
168 | 0 | fp->ctf_flags |= LCTF_DIRTY; |
169 | |
|
170 | 0 | ctf_set_ctl_hashes (fp); |
171 | 0 | ctf_setmodel (fp, CTF_MODEL_NATIVE); |
172 | 0 | if (ctf_grow_ptrtab (fp) < 0) |
173 | 0 | { |
174 | 0 | ctf_set_open_errno (errp, ctf_errno (fp)); |
175 | 0 | ctf_dict_close (fp); |
176 | 0 | return NULL; |
177 | 0 | } |
178 | | |
179 | 0 | return fp; |
180 | | |
181 | 0 | err_dv: |
182 | 0 | ctf_dynhash_destroy (structs); |
183 | 0 | ctf_dynhash_destroy (unions); |
184 | 0 | ctf_dynhash_destroy (enums); |
185 | 0 | ctf_dynhash_destroy (names); |
186 | 0 | ctf_dynhash_destroy (objthash); |
187 | 0 | ctf_dynhash_destroy (funchash); |
188 | 0 | ctf_dynhash_destroy (dvhash); |
189 | 0 | err_dt: |
190 | 0 | ctf_dynhash_destroy (dthash); |
191 | 0 | err: |
192 | 0 | return NULL; |
193 | 0 | } |
194 | | |
195 | | /* Compatibility: just update the threshold for ctf_discard. */ |
196 | | int |
197 | | ctf_update (ctf_dict_t *fp) |
198 | 0 | { |
199 | 0 | if (!(fp->ctf_flags & LCTF_RDWR)) |
200 | 0 | return (ctf_set_errno (fp, ECTF_RDONLY)); |
201 | | |
202 | 0 | fp->ctf_dtoldid = fp->ctf_typemax; |
203 | 0 | return 0; |
204 | 0 | } |
205 | | |
206 | | ctf_names_t * |
207 | | ctf_name_table (ctf_dict_t *fp, int kind) |
208 | 0 | { |
209 | 0 | switch (kind) |
210 | 0 | { |
211 | 0 | case CTF_K_STRUCT: |
212 | 0 | return &fp->ctf_structs; |
213 | 0 | case CTF_K_UNION: |
214 | 0 | return &fp->ctf_unions; |
215 | 0 | case CTF_K_ENUM: |
216 | 0 | return &fp->ctf_enums; |
217 | 0 | default: |
218 | 0 | return &fp->ctf_names; |
219 | 0 | } |
220 | 0 | } |
221 | | |
222 | | int |
223 | | ctf_dtd_insert (ctf_dict_t *fp, ctf_dtdef_t *dtd, int flag, int kind) |
224 | 0 | { |
225 | 0 | const char *name; |
226 | 0 | if (ctf_dynhash_insert (fp->ctf_dthash, (void *) (uintptr_t) dtd->dtd_type, |
227 | 0 | dtd) < 0) |
228 | 0 | { |
229 | 0 | ctf_set_errno (fp, ENOMEM); |
230 | 0 | return -1; |
231 | 0 | } |
232 | | |
233 | 0 | if (flag == CTF_ADD_ROOT && dtd->dtd_data.ctt_name |
234 | 0 | && (name = ctf_strraw (fp, dtd->dtd_data.ctt_name)) != NULL) |
235 | 0 | { |
236 | 0 | if (ctf_dynhash_insert (ctf_name_table (fp, kind)->ctn_writable, |
237 | 0 | (char *) name, (void *) (uintptr_t) |
238 | 0 | dtd->dtd_type) < 0) |
239 | 0 | { |
240 | 0 | ctf_dynhash_remove (fp->ctf_dthash, (void *) (uintptr_t) |
241 | 0 | dtd->dtd_type); |
242 | 0 | ctf_set_errno (fp, ENOMEM); |
243 | 0 | return -1; |
244 | 0 | } |
245 | 0 | } |
246 | 0 | ctf_list_append (&fp->ctf_dtdefs, dtd); |
247 | 0 | return 0; |
248 | 0 | } |
249 | | |
250 | | void |
251 | | ctf_dtd_delete (ctf_dict_t *fp, ctf_dtdef_t *dtd) |
252 | 0 | { |
253 | 0 | int kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info); |
254 | 0 | size_t vlen = LCTF_INFO_VLEN (fp, dtd->dtd_data.ctt_info); |
255 | 0 | int name_kind = kind; |
256 | 0 | const char *name; |
257 | |
|
258 | 0 | ctf_dynhash_remove (fp->ctf_dthash, (void *) (uintptr_t) dtd->dtd_type); |
259 | |
|
260 | 0 | switch (kind) |
261 | 0 | { |
262 | 0 | case CTF_K_STRUCT: |
263 | 0 | case CTF_K_UNION: |
264 | 0 | { |
265 | 0 | ctf_lmember_t *memb = (ctf_lmember_t *) dtd->dtd_vlen; |
266 | 0 | size_t i; |
267 | |
|
268 | 0 | for (i = 0; i < vlen; i++) |
269 | 0 | ctf_str_remove_ref (fp, ctf_strraw (fp, memb[i].ctlm_name), |
270 | 0 | &memb[i].ctlm_name); |
271 | 0 | } |
272 | 0 | break; |
273 | 0 | case CTF_K_ENUM: |
274 | 0 | { |
275 | 0 | ctf_enum_t *en = (ctf_enum_t *) dtd->dtd_vlen; |
276 | 0 | size_t i; |
277 | |
|
278 | 0 | for (i = 0; i < vlen; i++) |
279 | 0 | ctf_str_remove_ref (fp, ctf_strraw (fp, en[i].cte_name), |
280 | 0 | &en[i].cte_name); |
281 | 0 | } |
282 | 0 | break; |
283 | 0 | case CTF_K_FORWARD: |
284 | 0 | name_kind = dtd->dtd_data.ctt_type; |
285 | 0 | break; |
286 | 0 | } |
287 | 0 | free (dtd->dtd_vlen); |
288 | 0 | dtd->dtd_vlen_alloc = 0; |
289 | |
|
290 | 0 | if (dtd->dtd_data.ctt_name |
291 | 0 | && (name = ctf_strraw (fp, dtd->dtd_data.ctt_name)) != NULL |
292 | 0 | && LCTF_INFO_ISROOT (fp, dtd->dtd_data.ctt_info)) |
293 | 0 | { |
294 | 0 | ctf_dynhash_remove (ctf_name_table (fp, name_kind)->ctn_writable, |
295 | 0 | name); |
296 | 0 | ctf_str_remove_ref (fp, name, &dtd->dtd_data.ctt_name); |
297 | 0 | } |
298 | |
|
299 | 0 | ctf_list_delete (&fp->ctf_dtdefs, dtd); |
300 | 0 | free (dtd); |
301 | 0 | } |
302 | | |
303 | | ctf_dtdef_t * |
304 | | ctf_dtd_lookup (const ctf_dict_t *fp, ctf_id_t type) |
305 | 0 | { |
306 | 0 | return (ctf_dtdef_t *) |
307 | 0 | ctf_dynhash_lookup (fp->ctf_dthash, (void *) (uintptr_t) type); |
308 | 0 | } |
309 | | |
310 | | ctf_dtdef_t * |
311 | | ctf_dynamic_type (const ctf_dict_t *fp, ctf_id_t id) |
312 | 0 | { |
313 | 0 | ctf_id_t idx; |
314 | |
|
315 | 0 | if (!(fp->ctf_flags & LCTF_RDWR)) |
316 | 0 | return NULL; |
317 | | |
318 | 0 | if ((fp->ctf_flags & LCTF_CHILD) && LCTF_TYPE_ISPARENT (fp, id)) |
319 | 0 | fp = fp->ctf_parent; |
320 | |
|
321 | 0 | idx = LCTF_TYPE_TO_INDEX(fp, id); |
322 | |
|
323 | 0 | if ((unsigned long) idx <= fp->ctf_typemax) |
324 | 0 | return ctf_dtd_lookup (fp, id); |
325 | 0 | return NULL; |
326 | 0 | } |
327 | | |
328 | | int |
329 | | ctf_dvd_insert (ctf_dict_t *fp, ctf_dvdef_t *dvd) |
330 | 0 | { |
331 | 0 | if (ctf_dynhash_insert (fp->ctf_dvhash, dvd->dvd_name, dvd) < 0) |
332 | 0 | { |
333 | 0 | ctf_set_errno (fp, ENOMEM); |
334 | 0 | return -1; |
335 | 0 | } |
336 | 0 | ctf_list_append (&fp->ctf_dvdefs, dvd); |
337 | 0 | return 0; |
338 | 0 | } |
339 | | |
340 | | void |
341 | | ctf_dvd_delete (ctf_dict_t *fp, ctf_dvdef_t *dvd) |
342 | 0 | { |
343 | 0 | ctf_dynhash_remove (fp->ctf_dvhash, dvd->dvd_name); |
344 | 0 | free (dvd->dvd_name); |
345 | |
|
346 | 0 | ctf_list_delete (&fp->ctf_dvdefs, dvd); |
347 | 0 | free (dvd); |
348 | 0 | } |
349 | | |
350 | | ctf_dvdef_t * |
351 | | ctf_dvd_lookup (const ctf_dict_t *fp, const char *name) |
352 | 0 | { |
353 | 0 | return (ctf_dvdef_t *) ctf_dynhash_lookup (fp->ctf_dvhash, name); |
354 | 0 | } |
355 | | |
356 | | /* Discard all of the dynamic type definitions and variable definitions that |
357 | | have been added to the dict since the last call to ctf_update(). We locate |
358 | | such types by scanning the dtd list and deleting elements that have type IDs |
359 | | greater than ctf_dtoldid, which is set by ctf_update(), above, and by |
360 | | scanning the variable list and deleting elements that have update IDs equal |
361 | | to the current value of the last-update snapshot count (indicating that they |
362 | | were added after the most recent call to ctf_update()). */ |
363 | | int |
364 | | ctf_discard (ctf_dict_t *fp) |
365 | 0 | { |
366 | 0 | ctf_snapshot_id_t last_update = |
367 | 0 | { fp->ctf_dtoldid, |
368 | 0 | fp->ctf_snapshot_lu + 1 }; |
369 | | |
370 | | /* Update required? */ |
371 | 0 | if (!(fp->ctf_flags & LCTF_DIRTY)) |
372 | 0 | return 0; |
373 | | |
374 | 0 | return (ctf_rollback (fp, last_update)); |
375 | 0 | } |
376 | | |
377 | | ctf_snapshot_id_t |
378 | | ctf_snapshot (ctf_dict_t *fp) |
379 | 0 | { |
380 | 0 | ctf_snapshot_id_t snapid; |
381 | 0 | snapid.dtd_id = fp->ctf_typemax; |
382 | 0 | snapid.snapshot_id = fp->ctf_snapshots++; |
383 | 0 | return snapid; |
384 | 0 | } |
385 | | |
386 | | /* Like ctf_discard(), only discards everything after a particular ID. */ |
387 | | int |
388 | | ctf_rollback (ctf_dict_t *fp, ctf_snapshot_id_t id) |
389 | 0 | { |
390 | 0 | ctf_dtdef_t *dtd, *ntd; |
391 | 0 | ctf_dvdef_t *dvd, *nvd; |
392 | |
|
393 | 0 | if (!(fp->ctf_flags & LCTF_RDWR)) |
394 | 0 | return (ctf_set_errno (fp, ECTF_RDONLY)); |
395 | | |
396 | 0 | if (fp->ctf_snapshot_lu >= id.snapshot_id) |
397 | 0 | return (ctf_set_errno (fp, ECTF_OVERROLLBACK)); |
398 | | |
399 | 0 | for (dtd = ctf_list_next (&fp->ctf_dtdefs); dtd != NULL; dtd = ntd) |
400 | 0 | { |
401 | 0 | int kind; |
402 | 0 | const char *name; |
403 | |
|
404 | 0 | ntd = ctf_list_next (dtd); |
405 | |
|
406 | 0 | if (LCTF_TYPE_TO_INDEX (fp, dtd->dtd_type) <= id.dtd_id) |
407 | 0 | continue; |
408 | | |
409 | 0 | kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info); |
410 | 0 | if (kind == CTF_K_FORWARD) |
411 | 0 | kind = dtd->dtd_data.ctt_type; |
412 | |
|
413 | 0 | if (dtd->dtd_data.ctt_name |
414 | 0 | && (name = ctf_strraw (fp, dtd->dtd_data.ctt_name)) != NULL |
415 | 0 | && LCTF_INFO_ISROOT (fp, dtd->dtd_data.ctt_info)) |
416 | 0 | { |
417 | 0 | ctf_dynhash_remove (ctf_name_table (fp, kind)->ctn_writable, |
418 | 0 | name); |
419 | 0 | ctf_str_remove_ref (fp, name, &dtd->dtd_data.ctt_name); |
420 | 0 | } |
421 | |
|
422 | 0 | ctf_dynhash_remove (fp->ctf_dthash, (void *) (uintptr_t) dtd->dtd_type); |
423 | 0 | ctf_dtd_delete (fp, dtd); |
424 | 0 | } |
425 | |
|
426 | 0 | for (dvd = ctf_list_next (&fp->ctf_dvdefs); dvd != NULL; dvd = nvd) |
427 | 0 | { |
428 | 0 | nvd = ctf_list_next (dvd); |
429 | |
|
430 | 0 | if (dvd->dvd_snapshots <= id.snapshot_id) |
431 | 0 | continue; |
432 | | |
433 | 0 | ctf_dvd_delete (fp, dvd); |
434 | 0 | } |
435 | |
|
436 | 0 | fp->ctf_typemax = id.dtd_id; |
437 | 0 | fp->ctf_snapshots = id.snapshot_id; |
438 | |
|
439 | 0 | if (fp->ctf_snapshots == fp->ctf_snapshot_lu) |
440 | 0 | fp->ctf_flags &= ~LCTF_DIRTY; |
441 | |
|
442 | 0 | return 0; |
443 | 0 | } |
444 | | |
445 | | /* Note: vlen is the amount of space *allocated* for the vlen. It may well not |
446 | | be the amount of space used (yet): the space used is declared in per-kind |
447 | | fashion in the dtd_data's info word. */ |
448 | | static ctf_id_t |
449 | | ctf_add_generic (ctf_dict_t *fp, uint32_t flag, const char *name, int kind, |
450 | | size_t vlen, ctf_dtdef_t **rp) |
451 | 0 | { |
452 | 0 | ctf_dtdef_t *dtd; |
453 | 0 | ctf_id_t type; |
454 | |
|
455 | 0 | if (flag != CTF_ADD_NONROOT && flag != CTF_ADD_ROOT) |
456 | 0 | return (ctf_set_errno (fp, EINVAL)); |
457 | | |
458 | 0 | if (!(fp->ctf_flags & LCTF_RDWR)) |
459 | 0 | return (ctf_set_errno (fp, ECTF_RDONLY)); |
460 | | |
461 | 0 | if (LCTF_INDEX_TO_TYPE (fp, fp->ctf_typemax, 1) >= CTF_MAX_TYPE) |
462 | 0 | return (ctf_set_errno (fp, ECTF_FULL)); |
463 | | |
464 | 0 | if (LCTF_INDEX_TO_TYPE (fp, fp->ctf_typemax, 1) == (CTF_MAX_PTYPE - 1)) |
465 | 0 | return (ctf_set_errno (fp, ECTF_FULL)); |
466 | | |
467 | | /* Make sure ptrtab always grows to be big enough for all types. */ |
468 | 0 | if (ctf_grow_ptrtab (fp) < 0) |
469 | 0 | return CTF_ERR; /* errno is set for us. */ |
470 | | |
471 | 0 | if ((dtd = calloc (1, sizeof (ctf_dtdef_t))) == NULL) |
472 | 0 | return (ctf_set_errno (fp, EAGAIN)); |
473 | | |
474 | 0 | dtd->dtd_vlen_alloc = vlen; |
475 | 0 | if (vlen > 0) |
476 | 0 | { |
477 | 0 | if ((dtd->dtd_vlen = calloc (1, vlen)) == NULL) |
478 | 0 | goto oom; |
479 | 0 | } |
480 | 0 | else |
481 | 0 | dtd->dtd_vlen = NULL; |
482 | | |
483 | 0 | type = ++fp->ctf_typemax; |
484 | 0 | type = LCTF_INDEX_TO_TYPE (fp, type, (fp->ctf_flags & LCTF_CHILD)); |
485 | |
|
486 | 0 | dtd->dtd_data.ctt_name = ctf_str_add_pending (fp, name, |
487 | 0 | &dtd->dtd_data.ctt_name); |
488 | 0 | dtd->dtd_type = type; |
489 | |
|
490 | 0 | if (dtd->dtd_data.ctt_name == 0 && name != NULL && name[0] != '\0') |
491 | 0 | goto oom; |
492 | | |
493 | 0 | if (ctf_dtd_insert (fp, dtd, flag, kind) < 0) |
494 | 0 | goto err; /* errno is set for us. */ |
495 | | |
496 | 0 | fp->ctf_flags |= LCTF_DIRTY; |
497 | |
|
498 | 0 | *rp = dtd; |
499 | 0 | return type; |
500 | | |
501 | 0 | oom: |
502 | 0 | ctf_set_errno (fp, EAGAIN); |
503 | 0 | err: |
504 | 0 | free (dtd->dtd_vlen); |
505 | 0 | free (dtd); |
506 | 0 | return CTF_ERR; |
507 | 0 | } |
508 | | |
509 | | /* When encoding integer sizes, we want to convert a byte count in the range |
510 | | 1-8 to the closest power of 2 (e.g. 3->4, 5->8, etc). The clp2() function |
511 | | is a clever implementation from "Hacker's Delight" by Henry Warren, Jr. */ |
512 | | static size_t |
513 | | clp2 (size_t x) |
514 | 0 | { |
515 | 0 | x--; |
516 | |
|
517 | 0 | x |= (x >> 1); |
518 | 0 | x |= (x >> 2); |
519 | 0 | x |= (x >> 4); |
520 | 0 | x |= (x >> 8); |
521 | 0 | x |= (x >> 16); |
522 | |
|
523 | 0 | return (x + 1); |
524 | 0 | } |
525 | | |
526 | | ctf_id_t |
527 | | ctf_add_encoded (ctf_dict_t *fp, uint32_t flag, |
528 | | const char *name, const ctf_encoding_t *ep, uint32_t kind) |
529 | 0 | { |
530 | 0 | ctf_dtdef_t *dtd; |
531 | 0 | ctf_id_t type; |
532 | 0 | uint32_t encoding; |
533 | |
|
534 | 0 | if (ep == NULL) |
535 | 0 | return (ctf_set_errno (fp, EINVAL)); |
536 | | |
537 | 0 | if (name == NULL || name[0] == '\0') |
538 | 0 | return (ctf_set_errno (fp, ECTF_NONAME)); |
539 | | |
540 | 0 | if (!ctf_assert (fp, kind == CTF_K_INTEGER || kind == CTF_K_FLOAT)) |
541 | 0 | return -1; /* errno is set for us. */ |
542 | | |
543 | 0 | if ((type = ctf_add_generic (fp, flag, name, kind, sizeof (uint32_t), |
544 | 0 | &dtd)) == CTF_ERR) |
545 | 0 | return CTF_ERR; /* errno is set for us. */ |
546 | | |
547 | 0 | dtd->dtd_data.ctt_info = CTF_TYPE_INFO (kind, flag, 0); |
548 | 0 | dtd->dtd_data.ctt_size = clp2 (P2ROUNDUP (ep->cte_bits, CHAR_BIT) |
549 | 0 | / CHAR_BIT); |
550 | 0 | switch (kind) |
551 | 0 | { |
552 | 0 | case CTF_K_INTEGER: |
553 | 0 | encoding = CTF_INT_DATA (ep->cte_format, ep->cte_offset, ep->cte_bits); |
554 | 0 | break; |
555 | 0 | case CTF_K_FLOAT: |
556 | 0 | encoding = CTF_FP_DATA (ep->cte_format, ep->cte_offset, ep->cte_bits); |
557 | 0 | break; |
558 | 0 | } |
559 | 0 | memcpy (dtd->dtd_vlen, &encoding, sizeof (encoding)); |
560 | |
|
561 | 0 | return type; |
562 | 0 | } |
563 | | |
564 | | ctf_id_t |
565 | | ctf_add_reftype (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref, uint32_t kind) |
566 | 0 | { |
567 | 0 | ctf_dtdef_t *dtd; |
568 | 0 | ctf_id_t type; |
569 | 0 | ctf_dict_t *tmp = fp; |
570 | 0 | int child = fp->ctf_flags & LCTF_CHILD; |
571 | |
|
572 | 0 | if (ref == CTF_ERR || ref > CTF_MAX_TYPE) |
573 | 0 | return (ctf_set_errno (fp, EINVAL)); |
574 | | |
575 | 0 | if (ref != 0 && ctf_lookup_by_id (&tmp, ref) == NULL) |
576 | 0 | return CTF_ERR; /* errno is set for us. */ |
577 | | |
578 | 0 | if ((type = ctf_add_generic (fp, flag, NULL, kind, 0, &dtd)) == CTF_ERR) |
579 | 0 | return CTF_ERR; /* errno is set for us. */ |
580 | | |
581 | 0 | dtd->dtd_data.ctt_info = CTF_TYPE_INFO (kind, flag, 0); |
582 | 0 | dtd->dtd_data.ctt_type = (uint32_t) ref; |
583 | |
|
584 | 0 | if (kind != CTF_K_POINTER) |
585 | 0 | return type; |
586 | | |
587 | | /* If we are adding a pointer, update the ptrtab, pointing at this type from |
588 | | the type it points to. Note that ctf_typemax is at this point one higher |
589 | | than we want to check against, because it's just been incremented for the |
590 | | addition of this type. The pptrtab is lazily-updated as needed, so is not |
591 | | touched here. */ |
592 | | |
593 | 0 | uint32_t type_idx = LCTF_TYPE_TO_INDEX (fp, type); |
594 | 0 | uint32_t ref_idx = LCTF_TYPE_TO_INDEX (fp, ref); |
595 | |
|
596 | 0 | if (LCTF_TYPE_ISCHILD (fp, ref) == child |
597 | 0 | && ref_idx < fp->ctf_typemax) |
598 | 0 | fp->ctf_ptrtab[ref_idx] = type_idx; |
599 | |
|
600 | 0 | return type; |
601 | 0 | } |
602 | | |
603 | | ctf_id_t |
604 | | ctf_add_slice (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref, |
605 | | const ctf_encoding_t *ep) |
606 | 0 | { |
607 | 0 | ctf_dtdef_t *dtd; |
608 | 0 | ctf_slice_t slice; |
609 | 0 | ctf_id_t resolved_ref = ref; |
610 | 0 | ctf_id_t type; |
611 | 0 | int kind; |
612 | 0 | const ctf_type_t *tp; |
613 | 0 | ctf_dict_t *tmp = fp; |
614 | |
|
615 | 0 | if (ep == NULL) |
616 | 0 | return (ctf_set_errno (fp, EINVAL)); |
617 | | |
618 | 0 | if ((ep->cte_bits > 255) || (ep->cte_offset > 255)) |
619 | 0 | return (ctf_set_errno (fp, ECTF_SLICEOVERFLOW)); |
620 | | |
621 | 0 | if (ref == CTF_ERR || ref > CTF_MAX_TYPE) |
622 | 0 | return (ctf_set_errno (fp, EINVAL)); |
623 | | |
624 | 0 | if (ref != 0 && ((tp = ctf_lookup_by_id (&tmp, ref)) == NULL)) |
625 | 0 | return CTF_ERR; /* errno is set for us. */ |
626 | | |
627 | | /* Make sure we ultimately point to an integral type. We also allow slices to |
628 | | point to the unimplemented type, for now, because the compiler can emit |
629 | | such slices, though they're not very much use. */ |
630 | | |
631 | 0 | resolved_ref = ctf_type_resolve_unsliced (fp, ref); |
632 | 0 | kind = ctf_type_kind_unsliced (fp, resolved_ref); |
633 | |
|
634 | 0 | if ((kind != CTF_K_INTEGER) && (kind != CTF_K_FLOAT) && |
635 | 0 | (kind != CTF_K_ENUM) |
636 | 0 | && (ref != 0)) |
637 | 0 | return (ctf_set_errno (fp, ECTF_NOTINTFP)); |
638 | | |
639 | 0 | if ((type = ctf_add_generic (fp, flag, NULL, CTF_K_SLICE, |
640 | 0 | sizeof (ctf_slice_t), &dtd)) == CTF_ERR) |
641 | 0 | return CTF_ERR; /* errno is set for us. */ |
642 | | |
643 | 0 | memset (&slice, 0, sizeof (ctf_slice_t)); |
644 | |
|
645 | 0 | dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_SLICE, flag, 0); |
646 | 0 | dtd->dtd_data.ctt_size = clp2 (P2ROUNDUP (ep->cte_bits, CHAR_BIT) |
647 | 0 | / CHAR_BIT); |
648 | 0 | slice.cts_type = (uint32_t) ref; |
649 | 0 | slice.cts_bits = ep->cte_bits; |
650 | 0 | slice.cts_offset = ep->cte_offset; |
651 | 0 | memcpy (dtd->dtd_vlen, &slice, sizeof (ctf_slice_t)); |
652 | |
|
653 | 0 | return type; |
654 | 0 | } |
655 | | |
656 | | ctf_id_t |
657 | | ctf_add_integer (ctf_dict_t *fp, uint32_t flag, |
658 | | const char *name, const ctf_encoding_t *ep) |
659 | 0 | { |
660 | 0 | return (ctf_add_encoded (fp, flag, name, ep, CTF_K_INTEGER)); |
661 | 0 | } |
662 | | |
663 | | ctf_id_t |
664 | | ctf_add_float (ctf_dict_t *fp, uint32_t flag, |
665 | | const char *name, const ctf_encoding_t *ep) |
666 | 0 | { |
667 | 0 | return (ctf_add_encoded (fp, flag, name, ep, CTF_K_FLOAT)); |
668 | 0 | } |
669 | | |
670 | | ctf_id_t |
671 | | ctf_add_pointer (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref) |
672 | 0 | { |
673 | 0 | return (ctf_add_reftype (fp, flag, ref, CTF_K_POINTER)); |
674 | 0 | } |
675 | | |
676 | | ctf_id_t |
677 | | ctf_add_array (ctf_dict_t *fp, uint32_t flag, const ctf_arinfo_t *arp) |
678 | 0 | { |
679 | 0 | ctf_dtdef_t *dtd; |
680 | 0 | ctf_array_t cta; |
681 | 0 | ctf_id_t type; |
682 | 0 | ctf_dict_t *tmp = fp; |
683 | |
|
684 | 0 | if (arp == NULL) |
685 | 0 | return (ctf_set_errno (fp, EINVAL)); |
686 | | |
687 | 0 | if (arp->ctr_contents != 0 |
688 | 0 | && ctf_lookup_by_id (&tmp, arp->ctr_contents) == NULL) |
689 | 0 | return CTF_ERR; /* errno is set for us. */ |
690 | | |
691 | 0 | tmp = fp; |
692 | 0 | if (ctf_lookup_by_id (&tmp, arp->ctr_index) == NULL) |
693 | 0 | return CTF_ERR; /* errno is set for us. */ |
694 | | |
695 | 0 | if (ctf_type_kind (fp, arp->ctr_index) == CTF_K_FORWARD) |
696 | 0 | { |
697 | 0 | ctf_err_warn (fp, 1, ECTF_INCOMPLETE, |
698 | 0 | _("ctf_add_array: index type %lx is incomplete"), |
699 | 0 | arp->ctr_contents); |
700 | 0 | return (ctf_set_errno (fp, ECTF_INCOMPLETE)); |
701 | 0 | } |
702 | | |
703 | 0 | if ((type = ctf_add_generic (fp, flag, NULL, CTF_K_ARRAY, |
704 | 0 | sizeof (ctf_array_t), &dtd)) == CTF_ERR) |
705 | 0 | return CTF_ERR; /* errno is set for us. */ |
706 | | |
707 | 0 | memset (&cta, 0, sizeof (ctf_array_t)); |
708 | |
|
709 | 0 | dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_ARRAY, flag, 0); |
710 | 0 | dtd->dtd_data.ctt_size = 0; |
711 | 0 | cta.cta_contents = (uint32_t) arp->ctr_contents; |
712 | 0 | cta.cta_index = (uint32_t) arp->ctr_index; |
713 | 0 | cta.cta_nelems = arp->ctr_nelems; |
714 | 0 | memcpy (dtd->dtd_vlen, &cta, sizeof (ctf_array_t)); |
715 | |
|
716 | 0 | return type; |
717 | 0 | } |
718 | | |
719 | | int |
720 | | ctf_set_array (ctf_dict_t *fp, ctf_id_t type, const ctf_arinfo_t *arp) |
721 | 0 | { |
722 | 0 | ctf_dtdef_t *dtd = ctf_dtd_lookup (fp, type); |
723 | 0 | ctf_array_t *vlen; |
724 | |
|
725 | 0 | if (!(fp->ctf_flags & LCTF_RDWR)) |
726 | 0 | return (ctf_set_errno (fp, ECTF_RDONLY)); |
727 | | |
728 | 0 | if (dtd == NULL |
729 | 0 | || LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info) != CTF_K_ARRAY) |
730 | 0 | return (ctf_set_errno (fp, ECTF_BADID)); |
731 | | |
732 | 0 | vlen = (ctf_array_t *) dtd->dtd_vlen; |
733 | 0 | fp->ctf_flags |= LCTF_DIRTY; |
734 | 0 | vlen->cta_contents = (uint32_t) arp->ctr_contents; |
735 | 0 | vlen->cta_index = (uint32_t) arp->ctr_index; |
736 | 0 | vlen->cta_nelems = arp->ctr_nelems; |
737 | |
|
738 | 0 | return 0; |
739 | 0 | } |
740 | | |
741 | | ctf_id_t |
742 | | ctf_add_function (ctf_dict_t *fp, uint32_t flag, |
743 | | const ctf_funcinfo_t *ctc, const ctf_id_t *argv) |
744 | 0 | { |
745 | 0 | ctf_dtdef_t *dtd; |
746 | 0 | ctf_id_t type; |
747 | 0 | uint32_t vlen; |
748 | 0 | uint32_t *vdat; |
749 | 0 | ctf_dict_t *tmp = fp; |
750 | 0 | size_t initial_vlen; |
751 | 0 | size_t i; |
752 | |
|
753 | 0 | if (!(fp->ctf_flags & LCTF_RDWR)) |
754 | 0 | return (ctf_set_errno (fp, ECTF_RDONLY)); |
755 | | |
756 | 0 | if (ctc == NULL || (ctc->ctc_flags & ~CTF_FUNC_VARARG) != 0 |
757 | 0 | || (ctc->ctc_argc != 0 && argv == NULL)) |
758 | 0 | return (ctf_set_errno (fp, EINVAL)); |
759 | | |
760 | 0 | vlen = ctc->ctc_argc; |
761 | 0 | if (ctc->ctc_flags & CTF_FUNC_VARARG) |
762 | 0 | vlen++; /* Add trailing zero to indicate varargs (see below). */ |
763 | |
|
764 | 0 | if (ctc->ctc_return != 0 |
765 | 0 | && ctf_lookup_by_id (&tmp, ctc->ctc_return) == NULL) |
766 | 0 | return CTF_ERR; /* errno is set for us. */ |
767 | | |
768 | 0 | if (vlen > CTF_MAX_VLEN) |
769 | 0 | return (ctf_set_errno (fp, EOVERFLOW)); |
770 | | |
771 | | /* One word extra allocated for padding for 4-byte alignment if need be. |
772 | | Not reflected in vlen: we don't want to copy anything into it, and |
773 | | it's in addition to (e.g.) the trailing 0 indicating varargs. */ |
774 | | |
775 | 0 | initial_vlen = (sizeof (uint32_t) * (vlen + (vlen & 1))); |
776 | 0 | if ((type = ctf_add_generic (fp, flag, NULL, CTF_K_FUNCTION, |
777 | 0 | initial_vlen, &dtd)) == CTF_ERR) |
778 | 0 | return CTF_ERR; /* errno is set for us. */ |
779 | | |
780 | 0 | vdat = (uint32_t *) dtd->dtd_vlen; |
781 | |
|
782 | 0 | for (i = 0; i < ctc->ctc_argc; i++) |
783 | 0 | { |
784 | 0 | tmp = fp; |
785 | 0 | if (argv[i] != 0 && ctf_lookup_by_id (&tmp, argv[i]) == NULL) |
786 | 0 | return CTF_ERR; /* errno is set for us. */ |
787 | 0 | vdat[i] = (uint32_t) argv[i]; |
788 | 0 | } |
789 | | |
790 | 0 | dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_FUNCTION, flag, vlen); |
791 | 0 | dtd->dtd_data.ctt_type = (uint32_t) ctc->ctc_return; |
792 | |
|
793 | 0 | if (ctc->ctc_flags & CTF_FUNC_VARARG) |
794 | 0 | vdat[vlen - 1] = 0; /* Add trailing zero to indicate varargs. */ |
795 | |
|
796 | 0 | return type; |
797 | 0 | } |
798 | | |
799 | | ctf_id_t |
800 | | ctf_add_struct_sized (ctf_dict_t *fp, uint32_t flag, const char *name, |
801 | | size_t size) |
802 | 0 | { |
803 | 0 | ctf_dtdef_t *dtd; |
804 | 0 | ctf_id_t type = 0; |
805 | 0 | size_t initial_vlen = sizeof (ctf_lmember_t) * INITIAL_VLEN; |
806 | | |
807 | | /* Promote root-visible forwards to structs. */ |
808 | 0 | if (name != NULL) |
809 | 0 | type = ctf_lookup_by_rawname (fp, CTF_K_STRUCT, name); |
810 | |
|
811 | 0 | if (type != 0 && ctf_type_kind (fp, type) == CTF_K_FORWARD) |
812 | 0 | dtd = ctf_dtd_lookup (fp, type); |
813 | 0 | else if ((type = ctf_add_generic (fp, flag, name, CTF_K_STRUCT, |
814 | 0 | initial_vlen, &dtd)) == CTF_ERR) |
815 | 0 | return CTF_ERR; /* errno is set for us. */ |
816 | | |
817 | | /* Forwards won't have any vlen yet. */ |
818 | 0 | if (dtd->dtd_vlen_alloc == 0) |
819 | 0 | { |
820 | 0 | if ((dtd->dtd_vlen = calloc (1, initial_vlen)) == NULL) |
821 | 0 | return (ctf_set_errno (fp, ENOMEM)); |
822 | 0 | dtd->dtd_vlen_alloc = initial_vlen; |
823 | 0 | } |
824 | | |
825 | 0 | dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_STRUCT, flag, 0); |
826 | 0 | dtd->dtd_data.ctt_size = CTF_LSIZE_SENT; |
827 | 0 | dtd->dtd_data.ctt_lsizehi = CTF_SIZE_TO_LSIZE_HI (size); |
828 | 0 | dtd->dtd_data.ctt_lsizelo = CTF_SIZE_TO_LSIZE_LO (size); |
829 | |
|
830 | 0 | return type; |
831 | 0 | } |
832 | | |
833 | | ctf_id_t |
834 | | ctf_add_struct (ctf_dict_t *fp, uint32_t flag, const char *name) |
835 | 0 | { |
836 | 0 | return (ctf_add_struct_sized (fp, flag, name, 0)); |
837 | 0 | } |
838 | | |
839 | | ctf_id_t |
840 | | ctf_add_union_sized (ctf_dict_t *fp, uint32_t flag, const char *name, |
841 | | size_t size) |
842 | 0 | { |
843 | 0 | ctf_dtdef_t *dtd; |
844 | 0 | ctf_id_t type = 0; |
845 | 0 | size_t initial_vlen = sizeof (ctf_lmember_t) * INITIAL_VLEN; |
846 | | |
847 | | /* Promote root-visible forwards to unions. */ |
848 | 0 | if (name != NULL) |
849 | 0 | type = ctf_lookup_by_rawname (fp, CTF_K_UNION, name); |
850 | |
|
851 | 0 | if (type != 0 && ctf_type_kind (fp, type) == CTF_K_FORWARD) |
852 | 0 | dtd = ctf_dtd_lookup (fp, type); |
853 | 0 | else if ((type = ctf_add_generic (fp, flag, name, CTF_K_UNION, |
854 | 0 | initial_vlen, &dtd)) == CTF_ERR) |
855 | 0 | return CTF_ERR; /* errno is set for us */ |
856 | | |
857 | | /* Forwards won't have any vlen yet. */ |
858 | 0 | if (dtd->dtd_vlen_alloc == 0) |
859 | 0 | { |
860 | 0 | if ((dtd->dtd_vlen = calloc (1, initial_vlen)) == NULL) |
861 | 0 | return (ctf_set_errno (fp, ENOMEM)); |
862 | 0 | dtd->dtd_vlen_alloc = initial_vlen; |
863 | 0 | } |
864 | | |
865 | 0 | dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_UNION, flag, 0); |
866 | 0 | dtd->dtd_data.ctt_size = CTF_LSIZE_SENT; |
867 | 0 | dtd->dtd_data.ctt_lsizehi = CTF_SIZE_TO_LSIZE_HI (size); |
868 | 0 | dtd->dtd_data.ctt_lsizelo = CTF_SIZE_TO_LSIZE_LO (size); |
869 | |
|
870 | 0 | return type; |
871 | 0 | } |
872 | | |
873 | | ctf_id_t |
874 | | ctf_add_union (ctf_dict_t *fp, uint32_t flag, const char *name) |
875 | 0 | { |
876 | 0 | return (ctf_add_union_sized (fp, flag, name, 0)); |
877 | 0 | } |
878 | | |
879 | | ctf_id_t |
880 | | ctf_add_enum (ctf_dict_t *fp, uint32_t flag, const char *name) |
881 | 0 | { |
882 | 0 | ctf_dtdef_t *dtd; |
883 | 0 | ctf_id_t type = 0; |
884 | 0 | size_t initial_vlen = sizeof (ctf_enum_t) * INITIAL_VLEN; |
885 | | |
886 | | /* Promote root-visible forwards to enums. */ |
887 | 0 | if (name != NULL) |
888 | 0 | type = ctf_lookup_by_rawname (fp, CTF_K_ENUM, name); |
889 | |
|
890 | 0 | if (type != 0 && ctf_type_kind (fp, type) == CTF_K_FORWARD) |
891 | 0 | dtd = ctf_dtd_lookup (fp, type); |
892 | 0 | else if ((type = ctf_add_generic (fp, flag, name, CTF_K_ENUM, |
893 | 0 | initial_vlen, &dtd)) == CTF_ERR) |
894 | 0 | return CTF_ERR; /* errno is set for us. */ |
895 | | |
896 | | /* Forwards won't have any vlen yet. */ |
897 | 0 | if (dtd->dtd_vlen_alloc == 0) |
898 | 0 | { |
899 | 0 | if ((dtd->dtd_vlen = calloc (1, initial_vlen)) == NULL) |
900 | 0 | return (ctf_set_errno (fp, ENOMEM)); |
901 | 0 | dtd->dtd_vlen_alloc = initial_vlen; |
902 | 0 | } |
903 | | |
904 | 0 | dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_ENUM, flag, 0); |
905 | 0 | dtd->dtd_data.ctt_size = fp->ctf_dmodel->ctd_int; |
906 | |
|
907 | 0 | return type; |
908 | 0 | } |
909 | | |
910 | | ctf_id_t |
911 | | ctf_add_enum_encoded (ctf_dict_t *fp, uint32_t flag, const char *name, |
912 | | const ctf_encoding_t *ep) |
913 | 0 | { |
914 | 0 | ctf_id_t type = 0; |
915 | | |
916 | | /* First, create the enum if need be, using most of the same machinery as |
917 | | ctf_add_enum(), to ensure that we do not allow things past that are not |
918 | | enums or forwards to them. (This includes other slices: you cannot slice a |
919 | | slice, which would be a useless thing to do anyway.) */ |
920 | |
|
921 | 0 | if (name != NULL) |
922 | 0 | type = ctf_lookup_by_rawname (fp, CTF_K_ENUM, name); |
923 | |
|
924 | 0 | if (type != 0) |
925 | 0 | { |
926 | 0 | if ((ctf_type_kind (fp, type) != CTF_K_FORWARD) && |
927 | 0 | (ctf_type_kind_unsliced (fp, type) != CTF_K_ENUM)) |
928 | 0 | return (ctf_set_errno (fp, ECTF_NOTINTFP)); |
929 | 0 | } |
930 | 0 | else if ((type = ctf_add_enum (fp, flag, name)) == CTF_ERR) |
931 | 0 | return CTF_ERR; /* errno is set for us. */ |
932 | | |
933 | | /* Now attach a suitable slice to it. */ |
934 | | |
935 | 0 | return ctf_add_slice (fp, flag, type, ep); |
936 | 0 | } |
937 | | |
938 | | ctf_id_t |
939 | | ctf_add_forward (ctf_dict_t *fp, uint32_t flag, const char *name, |
940 | | uint32_t kind) |
941 | 0 | { |
942 | 0 | ctf_dtdef_t *dtd; |
943 | 0 | ctf_id_t type = 0; |
944 | |
|
945 | 0 | if (!ctf_forwardable_kind (kind)) |
946 | 0 | return (ctf_set_errno (fp, ECTF_NOTSUE)); |
947 | | |
948 | 0 | if (name == NULL || name[0] == '\0') |
949 | 0 | return (ctf_set_errno (fp, ECTF_NONAME)); |
950 | | |
951 | | /* If the type is already defined or exists as a forward tag, just |
952 | | return the ctf_id_t of the existing definition. */ |
953 | | |
954 | 0 | type = ctf_lookup_by_rawname (fp, kind, name); |
955 | |
|
956 | 0 | if (type) |
957 | 0 | return type; |
958 | | |
959 | 0 | if ((type = ctf_add_generic (fp, flag, name, kind, 0, &dtd)) == CTF_ERR) |
960 | 0 | return CTF_ERR; /* errno is set for us. */ |
961 | | |
962 | 0 | dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_FORWARD, flag, 0); |
963 | 0 | dtd->dtd_data.ctt_type = kind; |
964 | |
|
965 | 0 | return type; |
966 | 0 | } |
967 | | |
968 | | ctf_id_t |
969 | | ctf_add_unknown (ctf_dict_t *fp, uint32_t flag, const char *name) |
970 | 0 | { |
971 | 0 | ctf_dtdef_t *dtd; |
972 | 0 | ctf_id_t type = 0; |
973 | | |
974 | | /* If a type is already defined with this name, error (if not CTF_K_UNKNOWN) |
975 | | or just return it. */ |
976 | |
|
977 | 0 | if (name != NULL && name[0] != '\0' && flag == CTF_ADD_ROOT |
978 | 0 | && (type = ctf_lookup_by_rawname (fp, CTF_K_UNKNOWN, name))) |
979 | 0 | { |
980 | 0 | if (ctf_type_kind (fp, type) == CTF_K_UNKNOWN) |
981 | 0 | return type; |
982 | 0 | else |
983 | 0 | { |
984 | 0 | ctf_err_warn (fp, 1, ECTF_CONFLICT, |
985 | 0 | _("ctf_add_unknown: cannot add unknown type " |
986 | 0 | "named %s: type of this name already defined"), |
987 | 0 | name ? name : _("(unnamed type)")); |
988 | 0 | return (ctf_set_errno (fp, ECTF_CONFLICT)); |
989 | 0 | } |
990 | 0 | } |
991 | | |
992 | 0 | if ((type = ctf_add_generic (fp, flag, name, CTF_K_UNKNOWN, 0, &dtd)) == CTF_ERR) |
993 | 0 | return CTF_ERR; /* errno is set for us. */ |
994 | | |
995 | 0 | dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_UNKNOWN, flag, 0); |
996 | 0 | dtd->dtd_data.ctt_type = 0; |
997 | |
|
998 | 0 | return type; |
999 | 0 | } |
1000 | | |
1001 | | ctf_id_t |
1002 | | ctf_add_typedef (ctf_dict_t *fp, uint32_t flag, const char *name, |
1003 | | ctf_id_t ref) |
1004 | 0 | { |
1005 | 0 | ctf_dtdef_t *dtd; |
1006 | 0 | ctf_id_t type; |
1007 | 0 | ctf_dict_t *tmp = fp; |
1008 | |
|
1009 | 0 | if (ref == CTF_ERR || ref > CTF_MAX_TYPE) |
1010 | 0 | return (ctf_set_errno (fp, EINVAL)); |
1011 | | |
1012 | 0 | if (name == NULL || name[0] == '\0') |
1013 | 0 | return (ctf_set_errno (fp, ECTF_NONAME)); |
1014 | | |
1015 | 0 | if (ref != 0 && ctf_lookup_by_id (&tmp, ref) == NULL) |
1016 | 0 | return CTF_ERR; /* errno is set for us. */ |
1017 | | |
1018 | 0 | if ((type = ctf_add_generic (fp, flag, name, CTF_K_TYPEDEF, 0, |
1019 | 0 | &dtd)) == CTF_ERR) |
1020 | 0 | return CTF_ERR; /* errno is set for us. */ |
1021 | | |
1022 | 0 | dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_TYPEDEF, flag, 0); |
1023 | 0 | dtd->dtd_data.ctt_type = (uint32_t) ref; |
1024 | |
|
1025 | 0 | return type; |
1026 | 0 | } |
1027 | | |
1028 | | ctf_id_t |
1029 | | ctf_add_volatile (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref) |
1030 | 0 | { |
1031 | 0 | return (ctf_add_reftype (fp, flag, ref, CTF_K_VOLATILE)); |
1032 | 0 | } |
1033 | | |
1034 | | ctf_id_t |
1035 | | ctf_add_const (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref) |
1036 | 0 | { |
1037 | 0 | return (ctf_add_reftype (fp, flag, ref, CTF_K_CONST)); |
1038 | 0 | } |
1039 | | |
1040 | | ctf_id_t |
1041 | | ctf_add_restrict (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref) |
1042 | 0 | { |
1043 | 0 | return (ctf_add_reftype (fp, flag, ref, CTF_K_RESTRICT)); |
1044 | 0 | } |
1045 | | |
1046 | | int |
1047 | | ctf_add_enumerator (ctf_dict_t *fp, ctf_id_t enid, const char *name, |
1048 | | int value) |
1049 | 0 | { |
1050 | 0 | ctf_dtdef_t *dtd = ctf_dtd_lookup (fp, enid); |
1051 | 0 | unsigned char *old_vlen; |
1052 | 0 | ctf_enum_t *en; |
1053 | 0 | size_t i; |
1054 | |
|
1055 | 0 | uint32_t kind, vlen, root; |
1056 | |
|
1057 | 0 | if (name == NULL) |
1058 | 0 | return (ctf_set_errno (fp, EINVAL)); |
1059 | | |
1060 | 0 | if (!(fp->ctf_flags & LCTF_RDWR)) |
1061 | 0 | return (ctf_set_errno (fp, ECTF_RDONLY)); |
1062 | | |
1063 | 0 | if (dtd == NULL) |
1064 | 0 | return (ctf_set_errno (fp, ECTF_BADID)); |
1065 | | |
1066 | 0 | kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info); |
1067 | 0 | root = LCTF_INFO_ISROOT (fp, dtd->dtd_data.ctt_info); |
1068 | 0 | vlen = LCTF_INFO_VLEN (fp, dtd->dtd_data.ctt_info); |
1069 | |
|
1070 | 0 | if (kind != CTF_K_ENUM) |
1071 | 0 | return (ctf_set_errno (fp, ECTF_NOTENUM)); |
1072 | | |
1073 | 0 | if (vlen == CTF_MAX_VLEN) |
1074 | 0 | return (ctf_set_errno (fp, ECTF_DTFULL)); |
1075 | | |
1076 | 0 | old_vlen = dtd->dtd_vlen; |
1077 | 0 | if (ctf_grow_vlen (fp, dtd, sizeof (ctf_enum_t) * (vlen + 1)) < 0) |
1078 | 0 | return -1; /* errno is set for us. */ |
1079 | 0 | en = (ctf_enum_t *) dtd->dtd_vlen; |
1080 | |
|
1081 | 0 | if (dtd->dtd_vlen != old_vlen) |
1082 | 0 | { |
1083 | 0 | ptrdiff_t move = (signed char *) dtd->dtd_vlen - (signed char *) old_vlen; |
1084 | | |
1085 | | /* Remove pending refs in the old vlen region and reapply them. */ |
1086 | |
|
1087 | 0 | for (i = 0; i < vlen; i++) |
1088 | 0 | ctf_str_move_pending (fp, &en[i].cte_name, move); |
1089 | 0 | } |
1090 | |
|
1091 | 0 | for (i = 0; i < vlen; i++) |
1092 | 0 | if (strcmp (ctf_strptr (fp, en[i].cte_name), name) == 0) |
1093 | 0 | return (ctf_set_errno (fp, ECTF_DUPLICATE)); |
1094 | | |
1095 | 0 | en[i].cte_name = ctf_str_add_pending (fp, name, &en[i].cte_name); |
1096 | 0 | en[i].cte_value = value; |
1097 | |
|
1098 | 0 | if (en[i].cte_name == 0 && name != NULL && name[0] != '\0') |
1099 | 0 | return -1; /* errno is set for us. */ |
1100 | | |
1101 | 0 | dtd->dtd_data.ctt_info = CTF_TYPE_INFO (kind, root, vlen + 1); |
1102 | |
|
1103 | 0 | fp->ctf_flags |= LCTF_DIRTY; |
1104 | |
|
1105 | 0 | return 0; |
1106 | 0 | } |
1107 | | |
1108 | | int |
1109 | | ctf_add_member_offset (ctf_dict_t *fp, ctf_id_t souid, const char *name, |
1110 | | ctf_id_t type, unsigned long bit_offset) |
1111 | 0 | { |
1112 | 0 | ctf_dtdef_t *dtd = ctf_dtd_lookup (fp, souid); |
1113 | |
|
1114 | 0 | ssize_t msize, malign, ssize; |
1115 | 0 | uint32_t kind, vlen, root; |
1116 | 0 | size_t i; |
1117 | 0 | int is_incomplete = 0; |
1118 | 0 | unsigned char *old_vlen; |
1119 | 0 | ctf_lmember_t *memb; |
1120 | |
|
1121 | 0 | if (!(fp->ctf_flags & LCTF_RDWR)) |
1122 | 0 | return (ctf_set_errno (fp, ECTF_RDONLY)); |
1123 | | |
1124 | 0 | if (dtd == NULL) |
1125 | 0 | return (ctf_set_errno (fp, ECTF_BADID)); |
1126 | | |
1127 | 0 | if (name != NULL && name[0] == '\0') |
1128 | 0 | name = NULL; |
1129 | |
|
1130 | 0 | kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info); |
1131 | 0 | root = LCTF_INFO_ISROOT (fp, dtd->dtd_data.ctt_info); |
1132 | 0 | vlen = LCTF_INFO_VLEN (fp, dtd->dtd_data.ctt_info); |
1133 | |
|
1134 | 0 | if (kind != CTF_K_STRUCT && kind != CTF_K_UNION) |
1135 | 0 | return (ctf_set_errno (fp, ECTF_NOTSOU)); |
1136 | | |
1137 | 0 | if (vlen == CTF_MAX_VLEN) |
1138 | 0 | return (ctf_set_errno (fp, ECTF_DTFULL)); |
1139 | | |
1140 | 0 | old_vlen = dtd->dtd_vlen; |
1141 | 0 | if (ctf_grow_vlen (fp, dtd, sizeof (ctf_lmember_t) * (vlen + 1)) < 0) |
1142 | 0 | return -1; /* errno is set for us. */ |
1143 | 0 | memb = (ctf_lmember_t *) dtd->dtd_vlen; |
1144 | |
|
1145 | 0 | if (dtd->dtd_vlen != old_vlen) |
1146 | 0 | { |
1147 | 0 | ptrdiff_t move = (signed char *) dtd->dtd_vlen - (signed char *) old_vlen; |
1148 | | |
1149 | | /* Remove pending refs in the old vlen region and reapply them. */ |
1150 | |
|
1151 | 0 | for (i = 0; i < vlen; i++) |
1152 | 0 | ctf_str_move_pending (fp, &memb[i].ctlm_name, move); |
1153 | 0 | } |
1154 | |
|
1155 | 0 | if (name != NULL) |
1156 | 0 | { |
1157 | 0 | for (i = 0; i < vlen; i++) |
1158 | 0 | if (strcmp (ctf_strptr (fp, memb[i].ctlm_name), name) == 0) |
1159 | 0 | return (ctf_set_errno (fp, ECTF_DUPLICATE)); |
1160 | 0 | } |
1161 | | |
1162 | 0 | if ((msize = ctf_type_size (fp, type)) < 0 || |
1163 | 0 | (malign = ctf_type_align (fp, type)) < 0) |
1164 | 0 | { |
1165 | | /* The unimplemented type, and any type that resolves to it, has no size |
1166 | | and no alignment: it can correspond to any number of compiler-inserted |
1167 | | types. We allow incomplete types through since they are routinely |
1168 | | added to the ends of structures, and can even be added elsewhere in |
1169 | | structures by the deduplicator. They are assumed to be zero-size with |
1170 | | no alignment: this is often wrong, but problems can be avoided in this |
1171 | | case by explicitly specifying the size of the structure via the _sized |
1172 | | functions. The deduplicator always does this. */ |
1173 | |
|
1174 | 0 | msize = 0; |
1175 | 0 | malign = 0; |
1176 | 0 | if (ctf_errno (fp) == ECTF_NONREPRESENTABLE) |
1177 | 0 | ctf_set_errno (fp, 0); |
1178 | 0 | else if (ctf_errno (fp) == ECTF_INCOMPLETE) |
1179 | 0 | is_incomplete = 1; |
1180 | 0 | else |
1181 | 0 | return -1; /* errno is set for us. */ |
1182 | 0 | } |
1183 | | |
1184 | 0 | memb[vlen].ctlm_name = ctf_str_add_pending (fp, name, &memb[vlen].ctlm_name); |
1185 | 0 | memb[vlen].ctlm_type = type; |
1186 | 0 | if (memb[vlen].ctlm_name == 0 && name != NULL && name[0] != '\0') |
1187 | 0 | return -1; /* errno is set for us. */ |
1188 | | |
1189 | 0 | if (kind == CTF_K_STRUCT && vlen != 0) |
1190 | 0 | { |
1191 | 0 | if (bit_offset == (unsigned long) - 1) |
1192 | 0 | { |
1193 | | /* Natural alignment. */ |
1194 | |
|
1195 | 0 | ctf_id_t ltype = ctf_type_resolve (fp, memb[vlen - 1].ctlm_type); |
1196 | 0 | size_t off = CTF_LMEM_OFFSET(&memb[vlen - 1]); |
1197 | |
|
1198 | 0 | ctf_encoding_t linfo; |
1199 | 0 | ssize_t lsize; |
1200 | | |
1201 | | /* Propagate any error from ctf_type_resolve. If the last member was |
1202 | | of unimplemented type, this may be -ECTF_NONREPRESENTABLE: we |
1203 | | cannot insert right after such a member without explicit offset |
1204 | | specification, because its alignment and size is not known. */ |
1205 | 0 | if (ltype == CTF_ERR) |
1206 | 0 | return -1; /* errno is set for us. */ |
1207 | | |
1208 | 0 | if (is_incomplete) |
1209 | 0 | { |
1210 | 0 | ctf_err_warn (fp, 1, ECTF_INCOMPLETE, |
1211 | 0 | _("ctf_add_member_offset: cannot add member %s of " |
1212 | 0 | "incomplete type %lx to struct %lx without " |
1213 | 0 | "specifying explicit offset\n"), |
1214 | 0 | name ? name : _("(unnamed member)"), type, souid); |
1215 | 0 | return (ctf_set_errno (fp, ECTF_INCOMPLETE)); |
1216 | 0 | } |
1217 | | |
1218 | 0 | if (ctf_type_encoding (fp, ltype, &linfo) == 0) |
1219 | 0 | off += linfo.cte_bits; |
1220 | 0 | else if ((lsize = ctf_type_size (fp, ltype)) > 0) |
1221 | 0 | off += lsize * CHAR_BIT; |
1222 | 0 | else if (lsize == -1 && ctf_errno (fp) == ECTF_INCOMPLETE) |
1223 | 0 | { |
1224 | 0 | const char *lname = ctf_strraw (fp, memb[vlen - 1].ctlm_name); |
1225 | |
|
1226 | 0 | ctf_err_warn (fp, 1, ECTF_INCOMPLETE, |
1227 | 0 | _("ctf_add_member_offset: cannot add member %s of " |
1228 | 0 | "type %lx to struct %lx without specifying " |
1229 | 0 | "explicit offset after member %s of type %lx, " |
1230 | 0 | "which is an incomplete type\n"), |
1231 | 0 | name ? name : _("(unnamed member)"), type, souid, |
1232 | 0 | lname ? lname : _("(unnamed member)"), ltype); |
1233 | 0 | return -1; /* errno is set for us. */ |
1234 | 0 | } |
1235 | | |
1236 | | /* Round up the offset of the end of the last member to |
1237 | | the next byte boundary, convert 'off' to bytes, and |
1238 | | then round it up again to the next multiple of the |
1239 | | alignment required by the new member. Finally, |
1240 | | convert back to bits and store the result in |
1241 | | dmd_offset. Technically we could do more efficient |
1242 | | packing if the new member is a bit-field, but we're |
1243 | | the "compiler" and ANSI says we can do as we choose. */ |
1244 | | |
1245 | 0 | off = roundup (off, CHAR_BIT) / CHAR_BIT; |
1246 | 0 | off = roundup (off, MAX (malign, 1)); |
1247 | 0 | memb[vlen].ctlm_offsethi = CTF_OFFSET_TO_LMEMHI (off * CHAR_BIT); |
1248 | 0 | memb[vlen].ctlm_offsetlo = CTF_OFFSET_TO_LMEMLO (off * CHAR_BIT); |
1249 | 0 | ssize = off + msize; |
1250 | 0 | } |
1251 | 0 | else |
1252 | 0 | { |
1253 | | /* Specified offset in bits. */ |
1254 | |
|
1255 | 0 | memb[vlen].ctlm_offsethi = CTF_OFFSET_TO_LMEMHI (bit_offset); |
1256 | 0 | memb[vlen].ctlm_offsetlo = CTF_OFFSET_TO_LMEMLO (bit_offset); |
1257 | 0 | ssize = ctf_get_ctt_size (fp, &dtd->dtd_data, NULL, NULL); |
1258 | 0 | ssize = MAX (ssize, ((signed) bit_offset / CHAR_BIT) + msize); |
1259 | 0 | } |
1260 | 0 | } |
1261 | 0 | else |
1262 | 0 | { |
1263 | 0 | memb[vlen].ctlm_offsethi = 0; |
1264 | 0 | memb[vlen].ctlm_offsetlo = 0; |
1265 | 0 | ssize = ctf_get_ctt_size (fp, &dtd->dtd_data, NULL, NULL); |
1266 | 0 | ssize = MAX (ssize, msize); |
1267 | 0 | } |
1268 | | |
1269 | 0 | dtd->dtd_data.ctt_size = CTF_LSIZE_SENT; |
1270 | 0 | dtd->dtd_data.ctt_lsizehi = CTF_SIZE_TO_LSIZE_HI (ssize); |
1271 | 0 | dtd->dtd_data.ctt_lsizelo = CTF_SIZE_TO_LSIZE_LO (ssize); |
1272 | 0 | dtd->dtd_data.ctt_info = CTF_TYPE_INFO (kind, root, vlen + 1); |
1273 | |
|
1274 | 0 | fp->ctf_flags |= LCTF_DIRTY; |
1275 | 0 | return 0; |
1276 | 0 | } |
1277 | | |
1278 | | int |
1279 | | ctf_add_member_encoded (ctf_dict_t *fp, ctf_id_t souid, const char *name, |
1280 | | ctf_id_t type, unsigned long bit_offset, |
1281 | | const ctf_encoding_t encoding) |
1282 | 0 | { |
1283 | 0 | ctf_dtdef_t *dtd = ctf_dtd_lookup (fp, type); |
1284 | 0 | int kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info); |
1285 | 0 | int otype = type; |
1286 | |
|
1287 | 0 | if ((kind != CTF_K_INTEGER) && (kind != CTF_K_FLOAT) && (kind != CTF_K_ENUM)) |
1288 | 0 | return (ctf_set_errno (fp, ECTF_NOTINTFP)); |
1289 | | |
1290 | 0 | if ((type = ctf_add_slice (fp, CTF_ADD_NONROOT, otype, &encoding)) == CTF_ERR) |
1291 | 0 | return -1; /* errno is set for us. */ |
1292 | | |
1293 | 0 | return ctf_add_member_offset (fp, souid, name, type, bit_offset); |
1294 | 0 | } |
1295 | | |
1296 | | int |
1297 | | ctf_add_member (ctf_dict_t *fp, ctf_id_t souid, const char *name, |
1298 | | ctf_id_t type) |
1299 | 0 | { |
1300 | 0 | return ctf_add_member_offset (fp, souid, name, type, (unsigned long) - 1); |
1301 | 0 | } |
1302 | | |
1303 | | int |
1304 | | ctf_add_variable (ctf_dict_t *fp, const char *name, ctf_id_t ref) |
1305 | 0 | { |
1306 | 0 | ctf_dvdef_t *dvd; |
1307 | 0 | ctf_dict_t *tmp = fp; |
1308 | |
|
1309 | 0 | if (!(fp->ctf_flags & LCTF_RDWR)) |
1310 | 0 | return (ctf_set_errno (fp, ECTF_RDONLY)); |
1311 | | |
1312 | 0 | if (ctf_dvd_lookup (fp, name) != NULL) |
1313 | 0 | return (ctf_set_errno (fp, ECTF_DUPLICATE)); |
1314 | | |
1315 | 0 | if (ctf_lookup_by_id (&tmp, ref) == NULL) |
1316 | 0 | return -1; /* errno is set for us. */ |
1317 | | |
1318 | | /* Make sure this type is representable. */ |
1319 | 0 | if ((ctf_type_resolve (fp, ref) == CTF_ERR) |
1320 | 0 | && (ctf_errno (fp) == ECTF_NONREPRESENTABLE)) |
1321 | 0 | return -1; |
1322 | | |
1323 | 0 | if ((dvd = malloc (sizeof (ctf_dvdef_t))) == NULL) |
1324 | 0 | return (ctf_set_errno (fp, EAGAIN)); |
1325 | | |
1326 | 0 | if (name != NULL && (dvd->dvd_name = strdup (name)) == NULL) |
1327 | 0 | { |
1328 | 0 | free (dvd); |
1329 | 0 | return (ctf_set_errno (fp, EAGAIN)); |
1330 | 0 | } |
1331 | 0 | dvd->dvd_type = ref; |
1332 | 0 | dvd->dvd_snapshots = fp->ctf_snapshots; |
1333 | |
|
1334 | 0 | if (ctf_dvd_insert (fp, dvd) < 0) |
1335 | 0 | { |
1336 | 0 | free (dvd->dvd_name); |
1337 | 0 | free (dvd); |
1338 | 0 | return -1; /* errno is set for us. */ |
1339 | 0 | } |
1340 | | |
1341 | 0 | fp->ctf_flags |= LCTF_DIRTY; |
1342 | 0 | return 0; |
1343 | 0 | } |
1344 | | |
1345 | | int |
1346 | | ctf_add_funcobjt_sym (ctf_dict_t *fp, int is_function, const char *name, ctf_id_t id) |
1347 | 0 | { |
1348 | 0 | ctf_dict_t *tmp = fp; |
1349 | 0 | char *dupname; |
1350 | 0 | ctf_dynhash_t *h = is_function ? fp->ctf_funchash : fp->ctf_objthash; |
1351 | |
|
1352 | 0 | if (!(fp->ctf_flags & LCTF_RDWR)) |
1353 | 0 | return (ctf_set_errno (fp, ECTF_RDONLY)); |
1354 | | |
1355 | 0 | if (ctf_dynhash_lookup (fp->ctf_objthash, name) != NULL || |
1356 | 0 | ctf_dynhash_lookup (fp->ctf_funchash, name) != NULL) |
1357 | 0 | return (ctf_set_errno (fp, ECTF_DUPLICATE)); |
1358 | | |
1359 | 0 | if (ctf_lookup_by_id (&tmp, id) == NULL) |
1360 | 0 | return -1; /* errno is set for us. */ |
1361 | | |
1362 | 0 | if (is_function && ctf_type_kind (fp, id) != CTF_K_FUNCTION) |
1363 | 0 | return (ctf_set_errno (fp, ECTF_NOTFUNC)); |
1364 | | |
1365 | 0 | if ((dupname = strdup (name)) == NULL) |
1366 | 0 | return (ctf_set_errno (fp, ENOMEM)); |
1367 | | |
1368 | 0 | if (ctf_dynhash_insert (h, dupname, (void *) (uintptr_t) id) < 0) |
1369 | 0 | { |
1370 | 0 | free (dupname); |
1371 | 0 | return (ctf_set_errno (fp, ENOMEM)); |
1372 | 0 | } |
1373 | 0 | return 0; |
1374 | 0 | } |
1375 | | |
1376 | | int |
1377 | | ctf_add_objt_sym (ctf_dict_t *fp, const char *name, ctf_id_t id) |
1378 | 0 | { |
1379 | 0 | return (ctf_add_funcobjt_sym (fp, 0, name, id)); |
1380 | 0 | } |
1381 | | |
1382 | | int |
1383 | | ctf_add_func_sym (ctf_dict_t *fp, const char *name, ctf_id_t id) |
1384 | 0 | { |
1385 | 0 | return (ctf_add_funcobjt_sym (fp, 1, name, id)); |
1386 | 0 | } |
1387 | | |
1388 | | typedef struct ctf_bundle |
1389 | | { |
1390 | | ctf_dict_t *ctb_dict; /* CTF dict handle. */ |
1391 | | ctf_id_t ctb_type; /* CTF type identifier. */ |
1392 | | ctf_dtdef_t *ctb_dtd; /* CTF dynamic type definition (if any). */ |
1393 | | } ctf_bundle_t; |
1394 | | |
1395 | | static int |
1396 | | enumcmp (const char *name, int value, void *arg) |
1397 | 0 | { |
1398 | 0 | ctf_bundle_t *ctb = arg; |
1399 | 0 | int bvalue; |
1400 | |
|
1401 | 0 | if (ctf_enum_value (ctb->ctb_dict, ctb->ctb_type, name, &bvalue) < 0) |
1402 | 0 | { |
1403 | 0 | ctf_err_warn (ctb->ctb_dict, 0, 0, |
1404 | 0 | _("conflict due to enum %s iteration error"), name); |
1405 | 0 | return 1; |
1406 | 0 | } |
1407 | 0 | if (value != bvalue) |
1408 | 0 | { |
1409 | 0 | ctf_err_warn (ctb->ctb_dict, 1, ECTF_CONFLICT, |
1410 | 0 | _("conflict due to enum value change: %i versus %i"), |
1411 | 0 | value, bvalue); |
1412 | 0 | return 1; |
1413 | 0 | } |
1414 | 0 | return 0; |
1415 | 0 | } |
1416 | | |
1417 | | static int |
1418 | | enumadd (const char *name, int value, void *arg) |
1419 | 0 | { |
1420 | 0 | ctf_bundle_t *ctb = arg; |
1421 | |
|
1422 | 0 | return (ctf_add_enumerator (ctb->ctb_dict, ctb->ctb_type, |
1423 | 0 | name, value) < 0); |
1424 | 0 | } |
1425 | | |
1426 | | static int |
1427 | | membcmp (const char *name, ctf_id_t type _libctf_unused_, unsigned long offset, |
1428 | | void *arg) |
1429 | 0 | { |
1430 | 0 | ctf_bundle_t *ctb = arg; |
1431 | 0 | ctf_membinfo_t ctm; |
1432 | | |
1433 | | /* Don't check nameless members (e.g. anonymous structs/unions) against each |
1434 | | other. */ |
1435 | 0 | if (name[0] == 0) |
1436 | 0 | return 0; |
1437 | | |
1438 | 0 | if (ctf_member_info (ctb->ctb_dict, ctb->ctb_type, name, &ctm) < 0) |
1439 | 0 | { |
1440 | 0 | ctf_err_warn (ctb->ctb_dict, 0, 0, |
1441 | 0 | _("conflict due to struct member %s iteration error"), |
1442 | 0 | name); |
1443 | 0 | return 1; |
1444 | 0 | } |
1445 | 0 | if (ctm.ctm_offset != offset) |
1446 | 0 | { |
1447 | 0 | ctf_err_warn (ctb->ctb_dict, 1, ECTF_CONFLICT, |
1448 | 0 | _("conflict due to struct member %s offset change: " |
1449 | 0 | "%lx versus %lx"), |
1450 | 0 | name, ctm.ctm_offset, offset); |
1451 | 0 | return 1; |
1452 | 0 | } |
1453 | 0 | return 0; |
1454 | 0 | } |
1455 | | |
1456 | | /* Record the correspondence between a source and ctf_add_type()-added |
1457 | | destination type: both types are translated into parent type IDs if need be, |
1458 | | so they relate to the actual dictionary they are in. Outside controlled |
1459 | | circumstances (like linking) it is probably not useful to do more than |
1460 | | compare these pointers, since there is nothing stopping the user closing the |
1461 | | source dict whenever they want to. |
1462 | | |
1463 | | Our OOM handling here is just to not do anything, because this is called deep |
1464 | | enough in the call stack that doing anything useful is painfully difficult: |
1465 | | the worst consequence if we do OOM is a bit of type duplication anyway. */ |
1466 | | |
1467 | | static void |
1468 | | ctf_add_type_mapping (ctf_dict_t *src_fp, ctf_id_t src_type, |
1469 | | ctf_dict_t *dst_fp, ctf_id_t dst_type) |
1470 | 0 | { |
1471 | 0 | if (LCTF_TYPE_ISPARENT (src_fp, src_type) && src_fp->ctf_parent) |
1472 | 0 | src_fp = src_fp->ctf_parent; |
1473 | |
|
1474 | 0 | src_type = LCTF_TYPE_TO_INDEX(src_fp, src_type); |
1475 | |
|
1476 | 0 | if (LCTF_TYPE_ISPARENT (dst_fp, dst_type) && dst_fp->ctf_parent) |
1477 | 0 | dst_fp = dst_fp->ctf_parent; |
1478 | |
|
1479 | 0 | dst_type = LCTF_TYPE_TO_INDEX(dst_fp, dst_type); |
1480 | |
|
1481 | 0 | if (dst_fp->ctf_link_type_mapping == NULL) |
1482 | 0 | { |
1483 | 0 | ctf_hash_fun f = ctf_hash_type_key; |
1484 | 0 | ctf_hash_eq_fun e = ctf_hash_eq_type_key; |
1485 | |
|
1486 | 0 | if ((dst_fp->ctf_link_type_mapping = ctf_dynhash_create (f, e, free, |
1487 | 0 | NULL)) == NULL) |
1488 | 0 | return; |
1489 | 0 | } |
1490 | | |
1491 | 0 | ctf_link_type_key_t *key; |
1492 | 0 | key = calloc (1, sizeof (struct ctf_link_type_key)); |
1493 | 0 | if (!key) |
1494 | 0 | return; |
1495 | | |
1496 | 0 | key->cltk_fp = src_fp; |
1497 | 0 | key->cltk_idx = src_type; |
1498 | | |
1499 | | /* No OOM checking needed, because if this doesn't work the worst we'll do is |
1500 | | add a few more duplicate types (which will probably run out of memory |
1501 | | anyway). */ |
1502 | 0 | ctf_dynhash_insert (dst_fp->ctf_link_type_mapping, key, |
1503 | 0 | (void *) (uintptr_t) dst_type); |
1504 | 0 | } |
1505 | | |
1506 | | /* Look up a type mapping: return 0 if none. The DST_FP is modified to point to |
1507 | | the parent if need be. The ID returned is from the dst_fp's perspective. */ |
1508 | | static ctf_id_t |
1509 | | ctf_type_mapping (ctf_dict_t *src_fp, ctf_id_t src_type, ctf_dict_t **dst_fp) |
1510 | 0 | { |
1511 | 0 | ctf_link_type_key_t key; |
1512 | 0 | ctf_dict_t *target_fp = *dst_fp; |
1513 | 0 | ctf_id_t dst_type = 0; |
1514 | |
|
1515 | 0 | if (LCTF_TYPE_ISPARENT (src_fp, src_type) && src_fp->ctf_parent) |
1516 | 0 | src_fp = src_fp->ctf_parent; |
1517 | |
|
1518 | 0 | src_type = LCTF_TYPE_TO_INDEX(src_fp, src_type); |
1519 | 0 | key.cltk_fp = src_fp; |
1520 | 0 | key.cltk_idx = src_type; |
1521 | |
|
1522 | 0 | if (target_fp->ctf_link_type_mapping) |
1523 | 0 | dst_type = (uintptr_t) ctf_dynhash_lookup (target_fp->ctf_link_type_mapping, |
1524 | 0 | &key); |
1525 | |
|
1526 | 0 | if (dst_type != 0) |
1527 | 0 | { |
1528 | 0 | dst_type = LCTF_INDEX_TO_TYPE (target_fp, dst_type, |
1529 | 0 | target_fp->ctf_parent != NULL); |
1530 | 0 | *dst_fp = target_fp; |
1531 | 0 | return dst_type; |
1532 | 0 | } |
1533 | | |
1534 | 0 | if (target_fp->ctf_parent) |
1535 | 0 | target_fp = target_fp->ctf_parent; |
1536 | 0 | else |
1537 | 0 | return 0; |
1538 | | |
1539 | 0 | if (target_fp->ctf_link_type_mapping) |
1540 | 0 | dst_type = (uintptr_t) ctf_dynhash_lookup (target_fp->ctf_link_type_mapping, |
1541 | 0 | &key); |
1542 | |
|
1543 | 0 | if (dst_type) |
1544 | 0 | dst_type = LCTF_INDEX_TO_TYPE (target_fp, dst_type, |
1545 | 0 | target_fp->ctf_parent != NULL); |
1546 | |
|
1547 | 0 | *dst_fp = target_fp; |
1548 | 0 | return dst_type; |
1549 | 0 | } |
1550 | | |
1551 | | /* The ctf_add_type routine is used to copy a type from a source CTF dictionary |
1552 | | to a dynamic destination dictionary. This routine operates recursively by |
1553 | | following the source type's links and embedded member types. If the |
1554 | | destination dict already contains a named type which has the same attributes, |
1555 | | then we succeed and return this type but no changes occur. */ |
1556 | | static ctf_id_t |
1557 | | ctf_add_type_internal (ctf_dict_t *dst_fp, ctf_dict_t *src_fp, ctf_id_t src_type, |
1558 | | ctf_dict_t *proc_tracking_fp) |
1559 | 0 | { |
1560 | 0 | ctf_id_t dst_type = CTF_ERR; |
1561 | 0 | uint32_t dst_kind = CTF_K_UNKNOWN; |
1562 | 0 | ctf_dict_t *tmp_fp = dst_fp; |
1563 | 0 | ctf_id_t tmp; |
1564 | |
|
1565 | 0 | const char *name; |
1566 | 0 | uint32_t kind, forward_kind, flag, vlen; |
1567 | |
|
1568 | 0 | const ctf_type_t *src_tp, *dst_tp; |
1569 | 0 | ctf_bundle_t src, dst; |
1570 | 0 | ctf_encoding_t src_en, dst_en; |
1571 | 0 | ctf_arinfo_t src_ar, dst_ar; |
1572 | |
|
1573 | 0 | ctf_funcinfo_t ctc; |
1574 | |
|
1575 | 0 | ctf_id_t orig_src_type = src_type; |
1576 | |
|
1577 | 0 | if (!(dst_fp->ctf_flags & LCTF_RDWR)) |
1578 | 0 | return (ctf_set_errno (dst_fp, ECTF_RDONLY)); |
1579 | | |
1580 | 0 | if ((src_tp = ctf_lookup_by_id (&src_fp, src_type)) == NULL) |
1581 | 0 | return (ctf_set_errno (dst_fp, ctf_errno (src_fp))); |
1582 | | |
1583 | 0 | if ((ctf_type_resolve (src_fp, src_type) == CTF_ERR) |
1584 | 0 | && (ctf_errno (src_fp) == ECTF_NONREPRESENTABLE)) |
1585 | 0 | return (ctf_set_errno (dst_fp, ECTF_NONREPRESENTABLE)); |
1586 | | |
1587 | 0 | name = ctf_strptr (src_fp, src_tp->ctt_name); |
1588 | 0 | kind = LCTF_INFO_KIND (src_fp, src_tp->ctt_info); |
1589 | 0 | flag = LCTF_INFO_ISROOT (src_fp, src_tp->ctt_info); |
1590 | 0 | vlen = LCTF_INFO_VLEN (src_fp, src_tp->ctt_info); |
1591 | | |
1592 | | /* If this is a type we are currently in the middle of adding, hand it |
1593 | | straight back. (This lets us handle self-referential structures without |
1594 | | considering forwards and empty structures the same as their completed |
1595 | | forms.) */ |
1596 | |
|
1597 | 0 | tmp = ctf_type_mapping (src_fp, src_type, &tmp_fp); |
1598 | |
|
1599 | 0 | if (tmp != 0) |
1600 | 0 | { |
1601 | 0 | if (ctf_dynhash_lookup (proc_tracking_fp->ctf_add_processing, |
1602 | 0 | (void *) (uintptr_t) src_type)) |
1603 | 0 | return tmp; |
1604 | | |
1605 | | /* If this type has already been added from this dictionary, and is the |
1606 | | same kind and (if a struct or union) has the same number of members, |
1607 | | hand it straight back. */ |
1608 | | |
1609 | 0 | if (ctf_type_kind_unsliced (tmp_fp, tmp) == (int) kind) |
1610 | 0 | { |
1611 | 0 | if (kind == CTF_K_STRUCT || kind == CTF_K_UNION |
1612 | 0 | || kind == CTF_K_ENUM) |
1613 | 0 | { |
1614 | 0 | if ((dst_tp = ctf_lookup_by_id (&tmp_fp, dst_type)) != NULL) |
1615 | 0 | if (vlen == LCTF_INFO_VLEN (tmp_fp, dst_tp->ctt_info)) |
1616 | 0 | return tmp; |
1617 | 0 | } |
1618 | 0 | else |
1619 | 0 | return tmp; |
1620 | 0 | } |
1621 | 0 | } |
1622 | | |
1623 | 0 | forward_kind = kind; |
1624 | 0 | if (kind == CTF_K_FORWARD) |
1625 | 0 | forward_kind = src_tp->ctt_type; |
1626 | | |
1627 | | /* If the source type has a name and is a root type (visible at the top-level |
1628 | | scope), lookup the name in the destination dictionary and verify that it is |
1629 | | of the same kind before we do anything else. */ |
1630 | |
|
1631 | 0 | if ((flag & CTF_ADD_ROOT) && name[0] != '\0' |
1632 | 0 | && (tmp = ctf_lookup_by_rawname (dst_fp, forward_kind, name)) != 0) |
1633 | 0 | { |
1634 | 0 | dst_type = tmp; |
1635 | 0 | dst_kind = ctf_type_kind_unsliced (dst_fp, dst_type); |
1636 | 0 | } |
1637 | | |
1638 | | /* If an identically named dst_type exists, fail with ECTF_CONFLICT |
1639 | | unless dst_type is a forward declaration and src_type is a struct, |
1640 | | union, or enum (i.e. the definition of the previous forward decl). |
1641 | | |
1642 | | We also allow addition in the opposite order (addition of a forward when a |
1643 | | struct, union, or enum already exists), which is a NOP and returns the |
1644 | | already-present struct, union, or enum. */ |
1645 | |
|
1646 | 0 | if (dst_type != CTF_ERR && dst_kind != kind) |
1647 | 0 | { |
1648 | 0 | if (kind == CTF_K_FORWARD |
1649 | 0 | && (dst_kind == CTF_K_ENUM || dst_kind == CTF_K_STRUCT |
1650 | 0 | || dst_kind == CTF_K_UNION)) |
1651 | 0 | { |
1652 | 0 | ctf_add_type_mapping (src_fp, src_type, dst_fp, dst_type); |
1653 | 0 | return dst_type; |
1654 | 0 | } |
1655 | | |
1656 | 0 | if (dst_kind != CTF_K_FORWARD |
1657 | 0 | || (kind != CTF_K_ENUM && kind != CTF_K_STRUCT |
1658 | 0 | && kind != CTF_K_UNION)) |
1659 | 0 | { |
1660 | 0 | ctf_err_warn (dst_fp, 1, ECTF_CONFLICT, |
1661 | 0 | _("ctf_add_type: conflict for type %s: " |
1662 | 0 | "kinds differ, new: %i; old (ID %lx): %i"), |
1663 | 0 | name, kind, dst_type, dst_kind); |
1664 | 0 | return (ctf_set_errno (dst_fp, ECTF_CONFLICT)); |
1665 | 0 | } |
1666 | 0 | } |
1667 | | |
1668 | | /* We take special action for an integer, float, or slice since it is |
1669 | | described not only by its name but also its encoding. For integers, |
1670 | | bit-fields exploit this degeneracy. */ |
1671 | | |
1672 | 0 | if (kind == CTF_K_INTEGER || kind == CTF_K_FLOAT || kind == CTF_K_SLICE) |
1673 | 0 | { |
1674 | 0 | if (ctf_type_encoding (src_fp, src_type, &src_en) != 0) |
1675 | 0 | return (ctf_set_errno (dst_fp, ctf_errno (src_fp))); |
1676 | | |
1677 | 0 | if (dst_type != CTF_ERR) |
1678 | 0 | { |
1679 | 0 | ctf_dict_t *fp = dst_fp; |
1680 | |
|
1681 | 0 | if ((dst_tp = ctf_lookup_by_id (&fp, dst_type)) == NULL) |
1682 | 0 | return CTF_ERR; |
1683 | | |
1684 | 0 | if (ctf_type_encoding (dst_fp, dst_type, &dst_en) != 0) |
1685 | 0 | return CTF_ERR; /* errno set for us. */ |
1686 | | |
1687 | 0 | if (LCTF_INFO_ISROOT (fp, dst_tp->ctt_info) & CTF_ADD_ROOT) |
1688 | 0 | { |
1689 | | /* The type that we found in the hash is also root-visible. If |
1690 | | the two types match then use the existing one; otherwise, |
1691 | | declare a conflict. Note: slices are not certain to match |
1692 | | even if there is no conflict: we must check the contained type |
1693 | | too. */ |
1694 | |
|
1695 | 0 | if (memcmp (&src_en, &dst_en, sizeof (ctf_encoding_t)) == 0) |
1696 | 0 | { |
1697 | 0 | if (kind != CTF_K_SLICE) |
1698 | 0 | { |
1699 | 0 | ctf_add_type_mapping (src_fp, src_type, dst_fp, dst_type); |
1700 | 0 | return dst_type; |
1701 | 0 | } |
1702 | 0 | } |
1703 | 0 | else |
1704 | 0 | { |
1705 | 0 | return (ctf_set_errno (dst_fp, ECTF_CONFLICT)); |
1706 | 0 | } |
1707 | 0 | } |
1708 | 0 | else |
1709 | 0 | { |
1710 | | /* We found a non-root-visible type in the hash. If its encoding |
1711 | | is the same, we can reuse it, unless it is a slice. */ |
1712 | |
|
1713 | 0 | if (memcmp (&src_en, &dst_en, sizeof (ctf_encoding_t)) == 0) |
1714 | 0 | { |
1715 | 0 | if (kind != CTF_K_SLICE) |
1716 | 0 | { |
1717 | 0 | ctf_add_type_mapping (src_fp, src_type, dst_fp, dst_type); |
1718 | 0 | return dst_type; |
1719 | 0 | } |
1720 | 0 | } |
1721 | 0 | } |
1722 | 0 | } |
1723 | 0 | } |
1724 | | |
1725 | 0 | src.ctb_dict = src_fp; |
1726 | 0 | src.ctb_type = src_type; |
1727 | 0 | src.ctb_dtd = NULL; |
1728 | |
|
1729 | 0 | dst.ctb_dict = dst_fp; |
1730 | 0 | dst.ctb_type = dst_type; |
1731 | 0 | dst.ctb_dtd = NULL; |
1732 | | |
1733 | | /* Now perform kind-specific processing. If dst_type is CTF_ERR, then we add |
1734 | | a new type with the same properties as src_type to dst_fp. If dst_type is |
1735 | | not CTF_ERR, then we verify that dst_type has the same attributes as |
1736 | | src_type. We recurse for embedded references. Before we start, we note |
1737 | | that we are processing this type, to prevent infinite recursion: we do not |
1738 | | re-process any type that appears in this list. The list is emptied |
1739 | | wholesale at the end of processing everything in this recursive stack. */ |
1740 | |
|
1741 | 0 | if (ctf_dynhash_insert (proc_tracking_fp->ctf_add_processing, |
1742 | 0 | (void *) (uintptr_t) src_type, (void *) 1) < 0) |
1743 | 0 | return ctf_set_errno (dst_fp, ENOMEM); |
1744 | | |
1745 | 0 | switch (kind) |
1746 | 0 | { |
1747 | 0 | case CTF_K_INTEGER: |
1748 | | /* If we found a match we will have either returned it or declared a |
1749 | | conflict. */ |
1750 | 0 | dst_type = ctf_add_integer (dst_fp, flag, name, &src_en); |
1751 | 0 | break; |
1752 | | |
1753 | 0 | case CTF_K_FLOAT: |
1754 | | /* If we found a match we will have either returned it or declared a |
1755 | | conflict. */ |
1756 | 0 | dst_type = ctf_add_float (dst_fp, flag, name, &src_en); |
1757 | 0 | break; |
1758 | | |
1759 | 0 | case CTF_K_SLICE: |
1760 | | /* We have checked for conflicting encodings: now try to add the |
1761 | | contained type. */ |
1762 | 0 | src_type = ctf_type_reference (src_fp, src_type); |
1763 | 0 | src_type = ctf_add_type_internal (dst_fp, src_fp, src_type, |
1764 | 0 | proc_tracking_fp); |
1765 | |
|
1766 | 0 | if (src_type == CTF_ERR) |
1767 | 0 | return CTF_ERR; /* errno is set for us. */ |
1768 | | |
1769 | 0 | dst_type = ctf_add_slice (dst_fp, flag, src_type, &src_en); |
1770 | 0 | break; |
1771 | | |
1772 | 0 | case CTF_K_POINTER: |
1773 | 0 | case CTF_K_VOLATILE: |
1774 | 0 | case CTF_K_CONST: |
1775 | 0 | case CTF_K_RESTRICT: |
1776 | 0 | src_type = ctf_type_reference (src_fp, src_type); |
1777 | 0 | src_type = ctf_add_type_internal (dst_fp, src_fp, src_type, |
1778 | 0 | proc_tracking_fp); |
1779 | |
|
1780 | 0 | if (src_type == CTF_ERR) |
1781 | 0 | return CTF_ERR; /* errno is set for us. */ |
1782 | | |
1783 | 0 | dst_type = ctf_add_reftype (dst_fp, flag, src_type, kind); |
1784 | 0 | break; |
1785 | | |
1786 | 0 | case CTF_K_ARRAY: |
1787 | 0 | if (ctf_array_info (src_fp, src_type, &src_ar) != 0) |
1788 | 0 | return (ctf_set_errno (dst_fp, ctf_errno (src_fp))); |
1789 | | |
1790 | 0 | src_ar.ctr_contents = |
1791 | 0 | ctf_add_type_internal (dst_fp, src_fp, src_ar.ctr_contents, |
1792 | 0 | proc_tracking_fp); |
1793 | 0 | src_ar.ctr_index = ctf_add_type_internal (dst_fp, src_fp, |
1794 | 0 | src_ar.ctr_index, |
1795 | 0 | proc_tracking_fp); |
1796 | 0 | src_ar.ctr_nelems = src_ar.ctr_nelems; |
1797 | |
|
1798 | 0 | if (src_ar.ctr_contents == CTF_ERR || src_ar.ctr_index == CTF_ERR) |
1799 | 0 | return CTF_ERR; /* errno is set for us. */ |
1800 | | |
1801 | 0 | if (dst_type != CTF_ERR) |
1802 | 0 | { |
1803 | 0 | if (ctf_array_info (dst_fp, dst_type, &dst_ar) != 0) |
1804 | 0 | return CTF_ERR; /* errno is set for us. */ |
1805 | | |
1806 | 0 | if (memcmp (&src_ar, &dst_ar, sizeof (ctf_arinfo_t))) |
1807 | 0 | { |
1808 | 0 | ctf_err_warn (dst_fp, 1, ECTF_CONFLICT, |
1809 | 0 | _("conflict for type %s against ID %lx: array info " |
1810 | 0 | "differs, old %lx/%lx/%x; new: %lx/%lx/%x"), |
1811 | 0 | name, dst_type, src_ar.ctr_contents, |
1812 | 0 | src_ar.ctr_index, src_ar.ctr_nelems, |
1813 | 0 | dst_ar.ctr_contents, dst_ar.ctr_index, |
1814 | 0 | dst_ar.ctr_nelems); |
1815 | 0 | return (ctf_set_errno (dst_fp, ECTF_CONFLICT)); |
1816 | 0 | } |
1817 | 0 | } |
1818 | 0 | else |
1819 | 0 | dst_type = ctf_add_array (dst_fp, flag, &src_ar); |
1820 | 0 | break; |
1821 | | |
1822 | 0 | case CTF_K_FUNCTION: |
1823 | 0 | ctc.ctc_return = ctf_add_type_internal (dst_fp, src_fp, |
1824 | 0 | src_tp->ctt_type, |
1825 | 0 | proc_tracking_fp); |
1826 | 0 | ctc.ctc_argc = 0; |
1827 | 0 | ctc.ctc_flags = 0; |
1828 | |
|
1829 | 0 | if (ctc.ctc_return == CTF_ERR) |
1830 | 0 | return CTF_ERR; /* errno is set for us. */ |
1831 | | |
1832 | 0 | dst_type = ctf_add_function (dst_fp, flag, &ctc, NULL); |
1833 | 0 | break; |
1834 | | |
1835 | 0 | case CTF_K_STRUCT: |
1836 | 0 | case CTF_K_UNION: |
1837 | 0 | { |
1838 | 0 | ctf_next_t *i = NULL; |
1839 | 0 | ssize_t offset; |
1840 | 0 | const char *membname; |
1841 | 0 | ctf_id_t src_membtype; |
1842 | | |
1843 | | /* Technically to match a struct or union we need to check both |
1844 | | ways (src members vs. dst, dst members vs. src) but we make |
1845 | | this more optimal by only checking src vs. dst and comparing |
1846 | | the total size of the structure (which we must do anyway) |
1847 | | which covers the possibility of dst members not in src. |
1848 | | This optimization can be defeated for unions, but is so |
1849 | | pathological as to render it irrelevant for our purposes. */ |
1850 | |
|
1851 | 0 | if (dst_type != CTF_ERR && kind != CTF_K_FORWARD |
1852 | 0 | && dst_kind != CTF_K_FORWARD) |
1853 | 0 | { |
1854 | 0 | if (ctf_type_size (src_fp, src_type) != |
1855 | 0 | ctf_type_size (dst_fp, dst_type)) |
1856 | 0 | { |
1857 | 0 | ctf_err_warn (dst_fp, 1, ECTF_CONFLICT, |
1858 | 0 | _("conflict for type %s against ID %lx: union " |
1859 | 0 | "size differs, old %li, new %li"), name, |
1860 | 0 | dst_type, (long) ctf_type_size (src_fp, src_type), |
1861 | 0 | (long) ctf_type_size (dst_fp, dst_type)); |
1862 | 0 | return (ctf_set_errno (dst_fp, ECTF_CONFLICT)); |
1863 | 0 | } |
1864 | | |
1865 | 0 | if (ctf_member_iter (src_fp, src_type, membcmp, &dst)) |
1866 | 0 | { |
1867 | 0 | ctf_err_warn (dst_fp, 1, ECTF_CONFLICT, |
1868 | 0 | _("conflict for type %s against ID %lx: members " |
1869 | 0 | "differ, see above"), name, dst_type); |
1870 | 0 | return (ctf_set_errno (dst_fp, ECTF_CONFLICT)); |
1871 | 0 | } |
1872 | | |
1873 | 0 | break; |
1874 | 0 | } |
1875 | | |
1876 | 0 | dst_type = ctf_add_struct_sized (dst_fp, flag, name, |
1877 | 0 | ctf_type_size (src_fp, src_type)); |
1878 | 0 | if (dst_type == CTF_ERR) |
1879 | 0 | return CTF_ERR; /* errno is set for us. */ |
1880 | | |
1881 | | /* Pre-emptively add this struct to the type mapping so that |
1882 | | structures that refer to themselves work. */ |
1883 | 0 | ctf_add_type_mapping (src_fp, src_type, dst_fp, dst_type); |
1884 | |
|
1885 | 0 | while ((offset = ctf_member_next (src_fp, src_type, &i, &membname, |
1886 | 0 | &src_membtype, 0)) >= 0) |
1887 | 0 | { |
1888 | 0 | ctf_dict_t *dst = dst_fp; |
1889 | 0 | ctf_id_t dst_membtype = ctf_type_mapping (src_fp, src_membtype, &dst); |
1890 | |
|
1891 | 0 | if (dst_membtype == 0) |
1892 | 0 | { |
1893 | 0 | dst_membtype = ctf_add_type_internal (dst_fp, src_fp, |
1894 | 0 | src_membtype, |
1895 | 0 | proc_tracking_fp); |
1896 | 0 | if (dst_membtype == CTF_ERR) |
1897 | 0 | { |
1898 | 0 | if (ctf_errno (dst_fp) != ECTF_NONREPRESENTABLE) |
1899 | 0 | { |
1900 | 0 | ctf_next_destroy (i); |
1901 | 0 | break; |
1902 | 0 | } |
1903 | 0 | } |
1904 | 0 | } |
1905 | | |
1906 | 0 | if (ctf_add_member_offset (dst_fp, dst_type, membname, |
1907 | 0 | dst_membtype, offset) < 0) |
1908 | 0 | { |
1909 | 0 | ctf_next_destroy (i); |
1910 | 0 | break; |
1911 | 0 | } |
1912 | 0 | } |
1913 | 0 | if (ctf_errno (src_fp) != ECTF_NEXT_END) |
1914 | 0 | return CTF_ERR; /* errno is set for us. */ |
1915 | 0 | break; |
1916 | 0 | } |
1917 | | |
1918 | 0 | case CTF_K_ENUM: |
1919 | 0 | if (dst_type != CTF_ERR && kind != CTF_K_FORWARD |
1920 | 0 | && dst_kind != CTF_K_FORWARD) |
1921 | 0 | { |
1922 | 0 | if (ctf_enum_iter (src_fp, src_type, enumcmp, &dst) |
1923 | 0 | || ctf_enum_iter (dst_fp, dst_type, enumcmp, &src)) |
1924 | 0 | { |
1925 | 0 | ctf_err_warn (dst_fp, 1, ECTF_CONFLICT, |
1926 | 0 | _("conflict for enum %s against ID %lx: members " |
1927 | 0 | "differ, see above"), name, dst_type); |
1928 | 0 | return (ctf_set_errno (dst_fp, ECTF_CONFLICT)); |
1929 | 0 | } |
1930 | 0 | } |
1931 | 0 | else |
1932 | 0 | { |
1933 | 0 | dst_type = ctf_add_enum (dst_fp, flag, name); |
1934 | 0 | if ((dst.ctb_type = dst_type) == CTF_ERR |
1935 | 0 | || ctf_enum_iter (src_fp, src_type, enumadd, &dst)) |
1936 | 0 | return CTF_ERR; /* errno is set for us */ |
1937 | 0 | } |
1938 | 0 | break; |
1939 | | |
1940 | 0 | case CTF_K_FORWARD: |
1941 | 0 | if (dst_type == CTF_ERR) |
1942 | 0 | dst_type = ctf_add_forward (dst_fp, flag, name, forward_kind); |
1943 | 0 | break; |
1944 | | |
1945 | 0 | case CTF_K_TYPEDEF: |
1946 | 0 | src_type = ctf_type_reference (src_fp, src_type); |
1947 | 0 | src_type = ctf_add_type_internal (dst_fp, src_fp, src_type, |
1948 | 0 | proc_tracking_fp); |
1949 | |
|
1950 | 0 | if (src_type == CTF_ERR) |
1951 | 0 | return CTF_ERR; /* errno is set for us. */ |
1952 | | |
1953 | | /* If dst_type is not CTF_ERR at this point, we should check if |
1954 | | ctf_type_reference(dst_fp, dst_type) != src_type and if so fail with |
1955 | | ECTF_CONFLICT. However, this causes problems with bitness typedefs |
1956 | | that vary based on things like if 32-bit then pid_t is int otherwise |
1957 | | long. We therefore omit this check and assume that if the identically |
1958 | | named typedef already exists in dst_fp, it is correct or |
1959 | | equivalent. */ |
1960 | | |
1961 | 0 | if (dst_type == CTF_ERR) |
1962 | 0 | dst_type = ctf_add_typedef (dst_fp, flag, name, src_type); |
1963 | |
|
1964 | 0 | break; |
1965 | | |
1966 | 0 | default: |
1967 | 0 | return (ctf_set_errno (dst_fp, ECTF_CORRUPT)); |
1968 | 0 | } |
1969 | | |
1970 | 0 | if (dst_type != CTF_ERR) |
1971 | 0 | ctf_add_type_mapping (src_fp, orig_src_type, dst_fp, dst_type); |
1972 | 0 | return dst_type; |
1973 | 0 | } |
1974 | | |
1975 | | ctf_id_t |
1976 | | ctf_add_type (ctf_dict_t *dst_fp, ctf_dict_t *src_fp, ctf_id_t src_type) |
1977 | 0 | { |
1978 | 0 | ctf_id_t id; |
1979 | |
|
1980 | 0 | if (!src_fp->ctf_add_processing) |
1981 | 0 | src_fp->ctf_add_processing = ctf_dynhash_create (ctf_hash_integer, |
1982 | 0 | ctf_hash_eq_integer, |
1983 | 0 | NULL, NULL); |
1984 | | |
1985 | | /* We store the hash on the source, because it contains only source type IDs: |
1986 | | but callers will invariably expect errors to appear on the dest. */ |
1987 | 0 | if (!src_fp->ctf_add_processing) |
1988 | 0 | return (ctf_set_errno (dst_fp, ENOMEM)); |
1989 | | |
1990 | 0 | id = ctf_add_type_internal (dst_fp, src_fp, src_type, src_fp); |
1991 | 0 | ctf_dynhash_empty (src_fp->ctf_add_processing); |
1992 | |
|
1993 | 0 | return id; |
1994 | 0 | } |