/src/binutils-gdb/bfd/elf64-kvx.c
Line | Count | Source (jump to first uncovered line) |
1 | | #line 1 "elfnn-kvx.c" |
2 | | /* KVX-specific support for 64-bit ELF. |
3 | | Copyright (C) 2009-2025 Free Software Foundation, Inc. |
4 | | Contributed by Kalray SA. |
5 | | |
6 | | This file is part of BFD, the Binary File Descriptor library. |
7 | | |
8 | | This program is free software; you can redistribute it and/or modify |
9 | | it under the terms of the GNU General Public License as published by |
10 | | the Free Software Foundation; either version 3 of the License, or |
11 | | (at your option) any later version. |
12 | | |
13 | | This program is distributed in the hope that it will be useful, |
14 | | but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | | GNU General Public License for more details. |
17 | | |
18 | | You should have received a copy of the GNU General Public License |
19 | | along with this program; see the file COPYING3. If not, |
20 | | see <http://www.gnu.org/licenses/>. */ |
21 | | |
22 | | #include "sysdep.h" |
23 | | #include "bfd.h" |
24 | | #include "libiberty.h" |
25 | | #include "libbfd.h" |
26 | | #include "elf-bfd.h" |
27 | | #include "bfdlink.h" |
28 | | #include "objalloc.h" |
29 | | #include "elf/kvx.h" |
30 | | #include "elfxx-kvx.h" |
31 | | |
32 | 0 | #define ARCH_SIZE 64 |
33 | | |
34 | | #if ARCH_SIZE == 64 |
35 | 0 | #define LOG_FILE_ALIGN 3 |
36 | | #endif |
37 | | |
38 | | #if ARCH_SIZE == 32 |
39 | | #define LOG_FILE_ALIGN 2 |
40 | | #endif |
41 | | |
42 | | #define IS_KVX_TLS_RELOC(R_TYPE) \ |
43 | 0 | ((R_TYPE) == BFD_RELOC_KVX_S37_TLS_LE_LO10 \ |
44 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LE_UP27 \ |
45 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_LO10 \ |
46 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_UP27 \ |
47 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_EX6 \ |
48 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_DTPOFF_LO10 \ |
49 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_DTPOFF_UP27 \ |
50 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_LO10 \ |
51 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_UP27 \ |
52 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_EX6 \ |
53 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_IE_LO10 \ |
54 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_IE_UP27 \ |
55 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_LO10 \ |
56 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_UP27 \ |
57 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_EX6 \ |
58 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_GD_LO10 \ |
59 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_GD_UP27 \ |
60 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_LO10 \ |
61 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_UP27 \ |
62 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_EX6 \ |
63 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LD_LO10 \ |
64 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LD_UP27 \ |
65 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_LO10 \ |
66 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_UP27 \ |
67 | 0 | || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_EX6 \ |
68 | 0 | ) |
69 | | |
70 | 0 | #define IS_KVX_TLS_RELAX_RELOC(R_TYPE) 0 |
71 | | |
72 | 0 | #define ELIMINATE_COPY_RELOCS 0 |
73 | | |
74 | | /* Return size of a relocation entry. HTAB is the bfd's |
75 | | elf_kvx_link_hash_entry. */ |
76 | 0 | #define RELOC_SIZE(HTAB) (sizeof (Elf64_External_Rela)) |
77 | | |
78 | | /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */ |
79 | 0 | #define GOT_ENTRY_SIZE (ARCH_SIZE / 8) |
80 | 0 | #define PLT_ENTRY_SIZE (32) |
81 | | |
82 | 0 | #define PLT_SMALL_ENTRY_SIZE (4*4) |
83 | | |
84 | | /* Encoding of the nop instruction */ |
85 | 0 | #define INSN_NOP 0x00f0037f |
86 | | |
87 | | #define kvx_compute_jump_table_size(htab) \ |
88 | 0 | (((htab)->root.srelplt == NULL) ? 0 \ |
89 | 0 | : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE) |
90 | | |
91 | | static const bfd_byte elf64_kvx_small_plt0_entry[PLT_ENTRY_SIZE] = |
92 | | { |
93 | | /* FIXME KVX: no first entry, not used yet */ |
94 | | 0 |
95 | | }; |
96 | | |
97 | | /* Per function entry in a procedure linkage table looks like this |
98 | | if the distance between the PLTGOT and the PLT is < 4GB use |
99 | | these PLT entries. */ |
100 | | static const bfd_byte elf64_kvx_small_plt_entry[PLT_SMALL_ENTRY_SIZE] = |
101 | | { |
102 | | 0x10, 0x00, 0xc4, 0x0f, /* get $r16 = $pc ;; */ |
103 | | #if ARCH_SIZE == 32 |
104 | | 0x10, 0x00, 0x40, 0xb0, /* lwz $r16 = 0[$r16] ;; */ |
105 | | #else |
106 | | 0x10, 0x00, 0x40, 0xb8, /* ld $r16 = 0[$r16] ;; */ |
107 | | #endif |
108 | | 0x00, 0x00, 0x00, 0x18, /* upper 27 bits for LSU */ |
109 | | 0x10, 0x00, 0xd8, 0x0f, /* igoto $r16 ;; */ |
110 | | }; |
111 | | |
112 | | /* Long stub use 43bits format of make. */ |
113 | | static const uint32_t elf64_kvx_long_branch_stub[] = |
114 | | { |
115 | | 0xe0400000, /* make $r16 = LO10<emm43> EX6<imm43> */ |
116 | | 0x00000000, /* UP27<imm43> ;; */ |
117 | | 0x0fd80010, /* igoto "r16 ;; */ |
118 | | }; |
119 | | |
120 | | #define elf_info_to_howto elf64_kvx_info_to_howto |
121 | | #define elf_info_to_howto_rel elf64_kvx_info_to_howto |
122 | | |
123 | 0 | #define KVX_ELF_ABI_VERSION 0 |
124 | | |
125 | | /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */ |
126 | | #define ALL_ONES (~ (bfd_vma) 0) |
127 | | |
128 | | /* Indexed by the bfd interal reloc enumerators. |
129 | | Therefore, the table needs to be synced with BFD_RELOC_KVX_* |
130 | | in reloc.c. */ |
131 | | |
132 | | #define KVX_KV3_V1_KV3_V2_KV4_V1 |
133 | | #include "elfxx-kvx-relocs.h" |
134 | | #undef KVX_KV3_V1_KV3_V2_KV4_V1 |
135 | | |
136 | | /* Given HOWTO, return the bfd internal relocation enumerator. */ |
137 | | |
138 | | static bfd_reloc_code_real_type |
139 | | elf64_kvx_bfd_reloc_from_howto (reloc_howto_type *howto) |
140 | 0 | { |
141 | 0 | const int size = (int) ARRAY_SIZE (elf_kvx_howto_table); |
142 | 0 | const ptrdiff_t offset = howto - elf_kvx_howto_table; |
143 | |
|
144 | 0 | if (offset >= 0 && offset < size) |
145 | 0 | return BFD_RELOC_KVX_RELOC_START + offset + 1; |
146 | | |
147 | 0 | return BFD_RELOC_KVX_RELOC_START + 1; |
148 | 0 | } |
149 | | |
150 | | /* Given R_TYPE, return the bfd internal relocation enumerator. */ |
151 | | |
152 | | static bfd_reloc_code_real_type |
153 | | elf64_kvx_bfd_reloc_from_type (bfd *abfd ATTRIBUTE_UNUSED, unsigned int r_type) |
154 | 343 | { |
155 | 343 | static bool initialized_p = false; |
156 | | /* Indexed by R_TYPE, values are offsets in the howto_table. */ |
157 | 343 | static unsigned int offsets[R_KVX_end]; |
158 | | |
159 | 343 | if (!initialized_p) |
160 | 3 | { |
161 | 3 | unsigned int i; |
162 | | |
163 | 252 | for (i = 0; i < ARRAY_SIZE (elf_kvx_howto_table); ++i) |
164 | 249 | offsets[elf_kvx_howto_table[i].type] = i; |
165 | | |
166 | 3 | initialized_p = true; |
167 | 3 | } |
168 | | |
169 | | /* PR 17512: file: b371e70a. */ |
170 | 343 | if (r_type >= R_KVX_end) |
171 | 23 | { |
172 | 23 | bfd_set_error (bfd_error_bad_value); |
173 | 23 | return BFD_RELOC_KVX_RELOC_END; |
174 | 23 | } |
175 | | |
176 | 320 | return (BFD_RELOC_KVX_RELOC_START + 1) + offsets[r_type]; |
177 | 343 | } |
178 | | |
179 | | struct elf_kvx_reloc_map |
180 | | { |
181 | | bfd_reloc_code_real_type from; |
182 | | bfd_reloc_code_real_type to; |
183 | | }; |
184 | | |
185 | | /* Map bfd generic reloc to KVX-specific reloc. */ |
186 | | static const struct elf_kvx_reloc_map elf_kvx_reloc_map[] = |
187 | | { |
188 | | {BFD_RELOC_NONE, BFD_RELOC_KVX_NONE}, |
189 | | |
190 | | /* Basic data relocations. */ |
191 | | {BFD_RELOC_CTOR, BFD_RELOC_KVX_64}, |
192 | | {BFD_RELOC_64, BFD_RELOC_KVX_64}, |
193 | | {BFD_RELOC_32, BFD_RELOC_KVX_32}, |
194 | | {BFD_RELOC_16, BFD_RELOC_KVX_16}, |
195 | | {BFD_RELOC_8, BFD_RELOC_KVX_8}, |
196 | | |
197 | | {BFD_RELOC_64_PCREL, BFD_RELOC_KVX_64_PCREL}, |
198 | | {BFD_RELOC_32_PCREL, BFD_RELOC_KVX_32_PCREL}, |
199 | | }; |
200 | | |
201 | | /* Given the bfd internal relocation enumerator in CODE, return the |
202 | | corresponding howto entry. */ |
203 | | |
204 | | static reloc_howto_type * |
205 | | elf64_kvx_howto_from_bfd_reloc (bfd_reloc_code_real_type code) |
206 | 343 | { |
207 | 343 | unsigned int i; |
208 | | |
209 | | /* Convert bfd generic reloc to KVX-specific reloc. */ |
210 | 343 | if (code < BFD_RELOC_KVX_RELOC_START || code > BFD_RELOC_KVX_RELOC_END) |
211 | 0 | for (i = 0; i < ARRAY_SIZE (elf_kvx_reloc_map) ; i++) |
212 | 0 | if (elf_kvx_reloc_map[i].from == code) |
213 | 0 | { |
214 | 0 | code = elf_kvx_reloc_map[i].to; |
215 | 0 | break; |
216 | 0 | } |
217 | | |
218 | 343 | if (code > BFD_RELOC_KVX_RELOC_START && code < BFD_RELOC_KVX_RELOC_END) |
219 | 320 | return &elf_kvx_howto_table[code - (BFD_RELOC_KVX_RELOC_START + 1)]; |
220 | | |
221 | 23 | return NULL; |
222 | 343 | } |
223 | | |
224 | | static reloc_howto_type * |
225 | | elf64_kvx_howto_from_type (bfd *abfd, unsigned int r_type) |
226 | 343 | { |
227 | 343 | bfd_reloc_code_real_type val; |
228 | 343 | reloc_howto_type *howto; |
229 | | |
230 | | #if ARCH_SIZE == 32 |
231 | | if (r_type > 256) |
232 | | { |
233 | | bfd_set_error (bfd_error_bad_value); |
234 | | return NULL; |
235 | | } |
236 | | #endif |
237 | | |
238 | 343 | val = elf64_kvx_bfd_reloc_from_type (abfd, r_type); |
239 | 343 | howto = elf64_kvx_howto_from_bfd_reloc (val); |
240 | | |
241 | 343 | if (howto != NULL) |
242 | 320 | return howto; |
243 | | |
244 | 23 | bfd_set_error (bfd_error_bad_value); |
245 | 23 | return NULL; |
246 | 343 | } |
247 | | |
248 | | static bool |
249 | | elf64_kvx_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc, |
250 | | Elf_Internal_Rela *elf_reloc) |
251 | 343 | { |
252 | 343 | unsigned int r_type; |
253 | | |
254 | 343 | r_type = ELF64_R_TYPE (elf_reloc->r_info); |
255 | 343 | bfd_reloc->howto = elf64_kvx_howto_from_type (abfd, r_type); |
256 | | |
257 | 343 | if (bfd_reloc->howto == NULL) |
258 | 23 | { |
259 | | /* xgettext:c-format */ |
260 | 23 | _bfd_error_handler (_("%pB: unsupported relocation type %#x"), |
261 | 23 | abfd, r_type); |
262 | 23 | return false; |
263 | 23 | } |
264 | 320 | return true; |
265 | 343 | } |
266 | | |
267 | | static reloc_howto_type * |
268 | | elf64_kvx_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED, |
269 | | bfd_reloc_code_real_type code) |
270 | 0 | { |
271 | 0 | reloc_howto_type *howto = elf64_kvx_howto_from_bfd_reloc (code); |
272 | |
|
273 | 0 | if (howto != NULL) |
274 | 0 | return howto; |
275 | | |
276 | 0 | bfd_set_error (bfd_error_bad_value); |
277 | 0 | return NULL; |
278 | 0 | } |
279 | | |
280 | | static reloc_howto_type * |
281 | | elf64_kvx_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED, |
282 | | const char *r_name) |
283 | 0 | { |
284 | 0 | unsigned int i; |
285 | |
|
286 | 0 | for (i = 0; i < ARRAY_SIZE (elf_kvx_howto_table); ++i) |
287 | 0 | if (elf_kvx_howto_table[i].name != NULL |
288 | 0 | && strcasecmp (elf_kvx_howto_table[i].name, r_name) == 0) |
289 | 0 | return &elf_kvx_howto_table[i]; |
290 | | |
291 | 0 | return NULL; |
292 | 0 | } |
293 | | |
294 | | #define TARGET_LITTLE_SYM kvx_elf64_vec |
295 | | #define TARGET_LITTLE_NAME "elf64-kvx" |
296 | | |
297 | | /* The linker script knows the section names for placement. |
298 | | The entry_names are used to do simple name mangling on the stubs. |
299 | | Given a function name, and its type, the stub can be found. The |
300 | | name can be changed. The only requirement is the %s be present. */ |
301 | 0 | #define STUB_ENTRY_NAME "__%s_veneer" |
302 | | |
303 | | /* The name of the dynamic interpreter. This is put in the .interp |
304 | | section. */ |
305 | 0 | #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1" |
306 | | |
307 | | |
308 | | /* PCREL 27 is signed-extended and scaled by 4 */ |
309 | | #define KVX_MAX_FWD_CALL_OFFSET \ |
310 | 0 | (((1 << 26) - 1) << 2) |
311 | | #define KVX_MAX_BWD_CALL_OFFSET \ |
312 | 0 | (-((1 << 26) << 2)) |
313 | | |
314 | | /* Check that the destination of the call is within the PCREL27 |
315 | | range. */ |
316 | | static int |
317 | | kvx_valid_call_p (bfd_vma value, bfd_vma place) |
318 | 0 | { |
319 | 0 | bfd_signed_vma offset = (bfd_signed_vma) (value - place); |
320 | 0 | return (offset <= KVX_MAX_FWD_CALL_OFFSET |
321 | 0 | && offset >= KVX_MAX_BWD_CALL_OFFSET); |
322 | 0 | } |
323 | | |
324 | | /* Section name for stubs is the associated section name plus this |
325 | | string. */ |
326 | 0 | #define STUB_SUFFIX ".stub" |
327 | | |
328 | | enum elf_kvx_stub_type |
329 | | { |
330 | | kvx_stub_none, |
331 | | kvx_stub_long_branch, |
332 | | }; |
333 | | |
334 | | struct elf_kvx_stub_hash_entry |
335 | | { |
336 | | /* Base hash table entry structure. */ |
337 | | struct bfd_hash_entry root; |
338 | | |
339 | | /* The stub section. */ |
340 | | asection *stub_sec; |
341 | | |
342 | | /* Offset within stub_sec of the beginning of this stub. */ |
343 | | bfd_vma stub_offset; |
344 | | |
345 | | /* Given the symbol's value and its section we can determine its final |
346 | | value when building the stubs (so the stub knows where to jump). */ |
347 | | bfd_vma target_value; |
348 | | asection *target_section; |
349 | | |
350 | | enum elf_kvx_stub_type stub_type; |
351 | | |
352 | | /* The symbol table entry, if any, that this was derived from. */ |
353 | | struct elf_kvx_link_hash_entry *h; |
354 | | |
355 | | /* Destination symbol type */ |
356 | | unsigned char st_type; |
357 | | |
358 | | /* Where this stub is being called from, or, in the case of combined |
359 | | stub sections, the first input section in the group. */ |
360 | | asection *id_sec; |
361 | | |
362 | | /* The name for the local symbol at the start of this stub. The |
363 | | stub name in the hash table has to be unique; this does not, so |
364 | | it can be friendlier. */ |
365 | | char *output_name; |
366 | | }; |
367 | | |
368 | | /* Used to build a map of a section. This is required for mixed-endian |
369 | | code/data. */ |
370 | | |
371 | | typedef struct elf_elf_section_map |
372 | | { |
373 | | bfd_vma vma; |
374 | | char type; |
375 | | } |
376 | | elf_kvx_section_map; |
377 | | |
378 | | |
379 | | typedef struct _kvx_elf_section_data |
380 | | { |
381 | | struct bfd_elf_section_data elf; |
382 | | unsigned int mapcount; |
383 | | unsigned int mapsize; |
384 | | elf_kvx_section_map *map; |
385 | | } |
386 | | _kvx_elf_section_data; |
387 | | |
388 | | #define elf_kvx_section_data(sec) \ |
389 | | ((_kvx_elf_section_data *) elf_section_data (sec)) |
390 | | |
391 | | struct elf_kvx_local_symbol |
392 | | { |
393 | | unsigned int got_type; |
394 | | bfd_signed_vma got_refcount; |
395 | | bfd_vma got_offset; |
396 | | }; |
397 | | |
398 | | struct elf_kvx_obj_tdata |
399 | | { |
400 | | struct elf_obj_tdata root; |
401 | | |
402 | | /* local symbol descriptors */ |
403 | | struct elf_kvx_local_symbol *locals; |
404 | | |
405 | | /* Zero to warn when linking objects with incompatible enum sizes. */ |
406 | | int no_enum_size_warning; |
407 | | |
408 | | /* Zero to warn when linking objects with incompatible wchar_t sizes. */ |
409 | | int no_wchar_size_warning; |
410 | | }; |
411 | | |
412 | | #define elf_kvx_tdata(bfd) \ |
413 | 0 | ((struct elf_kvx_obj_tdata *) (bfd)->tdata.any) |
414 | | |
415 | 0 | #define elf_kvx_locals(bfd) (elf_kvx_tdata (bfd)->locals) |
416 | | |
417 | | #define is_kvx_elf(bfd) \ |
418 | 0 | (bfd_get_flavour (bfd) == bfd_target_elf_flavour \ |
419 | 0 | && elf_tdata (bfd) != NULL \ |
420 | 0 | && elf_object_id (bfd) == KVX_ELF_DATA) |
421 | | |
422 | | static bool |
423 | | elf64_kvx_mkobject (bfd *abfd) |
424 | 264k | { |
425 | 264k | return bfd_elf_allocate_object (abfd, sizeof (struct elf_kvx_obj_tdata)); |
426 | 264k | } |
427 | | |
428 | | #define elf_kvx_hash_entry(ent) \ |
429 | 0 | ((struct elf_kvx_link_hash_entry *)(ent)) |
430 | | |
431 | 0 | #define GOT_UNKNOWN 0 |
432 | 0 | #define GOT_NORMAL 1 |
433 | | |
434 | 0 | #define GOT_TLS_GD 2 |
435 | 0 | #define GOT_TLS_IE 4 |
436 | 0 | #define GOT_TLS_LD 8 |
437 | | |
438 | | /* KVX ELF linker hash entry. */ |
439 | | struct elf_kvx_link_hash_entry |
440 | | { |
441 | | struct elf_link_hash_entry root; |
442 | | |
443 | | /* Since PLT entries have variable size, we need to record the |
444 | | index into .got.plt instead of recomputing it from the PLT |
445 | | offset. */ |
446 | | bfd_signed_vma plt_got_offset; |
447 | | |
448 | | /* Bit mask representing the type of GOT entry(s) if any required by |
449 | | this symbol. */ |
450 | | unsigned int got_type; |
451 | | |
452 | | /* A pointer to the most recently used stub hash entry against this |
453 | | symbol. */ |
454 | | struct elf_kvx_stub_hash_entry *stub_cache; |
455 | | }; |
456 | | |
457 | | /* Get the KVX elf linker hash table from a link_info structure. */ |
458 | | #define elf_kvx_hash_table(info) \ |
459 | 0 | ((struct elf_kvx_link_hash_table *) ((info)->hash)) |
460 | | |
461 | | #define kvx_stub_hash_lookup(table, string, create, copy) \ |
462 | 0 | ((struct elf_kvx_stub_hash_entry *) \ |
463 | 0 | bfd_hash_lookup ((table), (string), (create), (copy))) |
464 | | |
465 | | /* KVX ELF linker hash table. */ |
466 | | struct elf_kvx_link_hash_table |
467 | | { |
468 | | /* The main hash table. */ |
469 | | struct elf_link_hash_table root; |
470 | | |
471 | | /* Nonzero to force PIC branch veneers. */ |
472 | | int pic_veneer; |
473 | | |
474 | | /* The number of bytes in the initial entry in the PLT. */ |
475 | | bfd_size_type plt_header_size; |
476 | | |
477 | | /* The number of bytes in the subsequent PLT etries. */ |
478 | | bfd_size_type plt_entry_size; |
479 | | |
480 | | /* The bytes of the subsequent PLT entry. */ |
481 | | const bfd_byte *plt_entry; |
482 | | |
483 | | /* Short-cuts to get to dynamic linker sections. */ |
484 | | asection *sdynbss; |
485 | | asection *srelbss; |
486 | | |
487 | | /* Small local sym cache. */ |
488 | | struct sym_cache sym_cache; |
489 | | |
490 | | /* For convenience in allocate_dynrelocs. */ |
491 | | bfd *obfd; |
492 | | |
493 | | /* The amount of space used by the reserved portion of the sgotplt |
494 | | section, plus whatever space is used by the jump slots. */ |
495 | | bfd_vma sgotplt_jump_table_size; |
496 | | |
497 | | /* The stub hash table. */ |
498 | | struct bfd_hash_table stub_hash_table; |
499 | | |
500 | | /* Linker stub bfd. */ |
501 | | bfd *stub_bfd; |
502 | | |
503 | | /* Linker call-backs. */ |
504 | | asection *(*add_stub_section) (const char *, asection *); |
505 | | void (*layout_sections_again) (void); |
506 | | |
507 | | /* Array to keep track of which stub sections have been created, and |
508 | | information on stub grouping. */ |
509 | | struct map_stub |
510 | | { |
511 | | /* This is the section to which stubs in the group will be |
512 | | attached. */ |
513 | | asection *link_sec; |
514 | | /* The stub section. */ |
515 | | asection *stub_sec; |
516 | | } *stub_group; |
517 | | |
518 | | /* Assorted information used by elf64_kvx_size_stubs. */ |
519 | | unsigned int bfd_count; |
520 | | unsigned int top_index; |
521 | | asection **input_list; |
522 | | }; |
523 | | |
524 | | /* Create an entry in an KVX ELF linker hash table. */ |
525 | | |
526 | | static struct bfd_hash_entry * |
527 | | elf64_kvx_link_hash_newfunc (struct bfd_hash_entry *entry, |
528 | | struct bfd_hash_table *table, |
529 | | const char *string) |
530 | 0 | { |
531 | 0 | struct elf_kvx_link_hash_entry *ret = |
532 | 0 | (struct elf_kvx_link_hash_entry *) entry; |
533 | | |
534 | | /* Allocate the structure if it has not already been allocated by a |
535 | | subclass. */ |
536 | 0 | if (ret == NULL) |
537 | 0 | ret = bfd_hash_allocate (table, |
538 | 0 | sizeof (struct elf_kvx_link_hash_entry)); |
539 | 0 | if (ret == NULL) |
540 | 0 | return (struct bfd_hash_entry *) ret; |
541 | | |
542 | | /* Call the allocation method of the superclass. */ |
543 | 0 | ret = ((struct elf_kvx_link_hash_entry *) |
544 | 0 | _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret, |
545 | 0 | table, string)); |
546 | 0 | if (ret != NULL) |
547 | 0 | { |
548 | 0 | ret->got_type = GOT_UNKNOWN; |
549 | 0 | ret->plt_got_offset = (bfd_vma) - 1; |
550 | 0 | ret->stub_cache = NULL; |
551 | 0 | } |
552 | |
|
553 | 0 | return (struct bfd_hash_entry *) ret; |
554 | 0 | } |
555 | | |
556 | | /* Initialize an entry in the stub hash table. */ |
557 | | |
558 | | static struct bfd_hash_entry * |
559 | | stub_hash_newfunc (struct bfd_hash_entry *entry, |
560 | | struct bfd_hash_table *table, const char *string) |
561 | 0 | { |
562 | | /* Allocate the structure if it has not already been allocated by a |
563 | | subclass. */ |
564 | 0 | if (entry == NULL) |
565 | 0 | { |
566 | 0 | entry = bfd_hash_allocate (table, |
567 | 0 | sizeof (struct |
568 | 0 | elf_kvx_stub_hash_entry)); |
569 | 0 | if (entry == NULL) |
570 | 0 | return entry; |
571 | 0 | } |
572 | | |
573 | | /* Call the allocation method of the superclass. */ |
574 | 0 | entry = bfd_hash_newfunc (entry, table, string); |
575 | 0 | if (entry != NULL) |
576 | 0 | { |
577 | 0 | struct elf_kvx_stub_hash_entry *eh; |
578 | | |
579 | | /* Initialize the local fields. */ |
580 | 0 | eh = (struct elf_kvx_stub_hash_entry *) entry; |
581 | 0 | eh->stub_sec = NULL; |
582 | 0 | eh->stub_offset = 0; |
583 | 0 | eh->target_value = 0; |
584 | 0 | eh->target_section = NULL; |
585 | 0 | eh->stub_type = kvx_stub_none; |
586 | 0 | eh->h = NULL; |
587 | 0 | eh->id_sec = NULL; |
588 | 0 | } |
589 | |
|
590 | 0 | return entry; |
591 | 0 | } |
592 | | |
593 | | /* Copy the extra info we tack onto an elf_link_hash_entry. */ |
594 | | |
595 | | static void |
596 | | elf64_kvx_copy_indirect_symbol (struct bfd_link_info *info, |
597 | | struct elf_link_hash_entry *dir, |
598 | | struct elf_link_hash_entry *ind) |
599 | 0 | { |
600 | 0 | struct elf_kvx_link_hash_entry *edir, *eind; |
601 | |
|
602 | 0 | edir = (struct elf_kvx_link_hash_entry *) dir; |
603 | 0 | eind = (struct elf_kvx_link_hash_entry *) ind; |
604 | |
|
605 | 0 | if (ind->root.type == bfd_link_hash_indirect) |
606 | 0 | { |
607 | | /* Copy over PLT info. */ |
608 | 0 | if (dir->got.refcount <= 0) |
609 | 0 | { |
610 | 0 | edir->got_type = eind->got_type; |
611 | 0 | eind->got_type = GOT_UNKNOWN; |
612 | 0 | } |
613 | 0 | } |
614 | |
|
615 | 0 | _bfd_elf_link_hash_copy_indirect (info, dir, ind); |
616 | 0 | } |
617 | | |
618 | | /* Destroy a KVX elf linker hash table. */ |
619 | | |
620 | | static void |
621 | | elf64_kvx_link_hash_table_free (bfd *obfd) |
622 | 0 | { |
623 | 0 | struct elf_kvx_link_hash_table *ret |
624 | 0 | = (struct elf_kvx_link_hash_table *) obfd->link.hash; |
625 | |
|
626 | 0 | bfd_hash_table_free (&ret->stub_hash_table); |
627 | 0 | _bfd_elf_link_hash_table_free (obfd); |
628 | 0 | } |
629 | | |
630 | | /* Create a KVX elf linker hash table. */ |
631 | | |
632 | | static struct bfd_link_hash_table * |
633 | | elf64_kvx_link_hash_table_create (bfd *abfd) |
634 | 0 | { |
635 | 0 | struct elf_kvx_link_hash_table *ret; |
636 | 0 | bfd_size_type amt = sizeof (struct elf_kvx_link_hash_table); |
637 | |
|
638 | 0 | ret = bfd_zmalloc (amt); |
639 | 0 | if (ret == NULL) |
640 | 0 | return NULL; |
641 | | |
642 | 0 | if (!_bfd_elf_link_hash_table_init |
643 | 0 | (&ret->root, abfd, elf64_kvx_link_hash_newfunc, |
644 | 0 | sizeof (struct elf_kvx_link_hash_entry))) |
645 | 0 | { |
646 | 0 | free (ret); |
647 | 0 | return NULL; |
648 | 0 | } |
649 | | |
650 | 0 | ret->plt_header_size = PLT_ENTRY_SIZE; |
651 | 0 | ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE; |
652 | 0 | ret->plt_entry = elf64_kvx_small_plt_entry; |
653 | |
|
654 | 0 | ret->obfd = abfd; |
655 | |
|
656 | 0 | if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc, |
657 | 0 | sizeof (struct elf_kvx_stub_hash_entry))) |
658 | 0 | { |
659 | 0 | _bfd_elf_link_hash_table_free (abfd); |
660 | 0 | return NULL; |
661 | 0 | } |
662 | | |
663 | 0 | ret->root.root.hash_table_free = elf64_kvx_link_hash_table_free; |
664 | |
|
665 | 0 | return &ret->root.root; |
666 | 0 | } |
667 | | |
668 | | static bfd_reloc_status_type |
669 | | kvx_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section, |
670 | | bfd_vma offset, bfd_vma value) |
671 | 0 | { |
672 | 0 | reloc_howto_type *howto; |
673 | |
|
674 | 0 | howto = elf64_kvx_howto_from_type (input_bfd, r_type); |
675 | 0 | r_type = elf64_kvx_bfd_reloc_from_type (input_bfd, r_type); |
676 | 0 | return _bfd_kvx_elf_put_addend (input_bfd, |
677 | 0 | input_section->contents + offset, r_type, |
678 | 0 | howto, value); |
679 | 0 | } |
680 | | |
681 | | /* Determine the type of stub needed, if any, for a call. */ |
682 | | |
683 | | static enum elf_kvx_stub_type |
684 | | kvx_type_of_stub (asection *input_sec, |
685 | | const Elf_Internal_Rela *rel, |
686 | | asection *sym_sec, |
687 | | unsigned char st_type, |
688 | | bfd_vma destination) |
689 | 0 | { |
690 | 0 | bfd_vma location; |
691 | 0 | bfd_signed_vma branch_offset; |
692 | 0 | unsigned int r_type; |
693 | 0 | enum elf_kvx_stub_type stub_type = kvx_stub_none; |
694 | |
|
695 | 0 | if (st_type != STT_FUNC |
696 | 0 | && (sym_sec == input_sec)) |
697 | 0 | return stub_type; |
698 | | |
699 | | /* Determine where the call point is. */ |
700 | 0 | location = (input_sec->output_offset |
701 | 0 | + input_sec->output_section->vma + rel->r_offset); |
702 | |
|
703 | 0 | branch_offset = (bfd_signed_vma) (destination - location); |
704 | |
|
705 | 0 | r_type = ELF64_R_TYPE (rel->r_info); |
706 | | |
707 | | /* We don't want to redirect any old unconditional jump in this way, |
708 | | only one which is being used for a sibcall, where it is |
709 | | acceptable for the R16 and R17 registers to be clobbered. */ |
710 | 0 | if (r_type == R_KVX_PCREL27 |
711 | 0 | && (branch_offset > KVX_MAX_FWD_CALL_OFFSET |
712 | 0 | || branch_offset < KVX_MAX_BWD_CALL_OFFSET)) |
713 | 0 | { |
714 | 0 | stub_type = kvx_stub_long_branch; |
715 | 0 | } |
716 | |
|
717 | 0 | return stub_type; |
718 | 0 | } |
719 | | |
720 | | /* Build a name for an entry in the stub hash table. */ |
721 | | |
722 | | static char * |
723 | | elf64_kvx_stub_name (const asection *input_section, |
724 | | const asection *sym_sec, |
725 | | const struct elf_kvx_link_hash_entry *hash, |
726 | | const Elf_Internal_Rela *rel) |
727 | 0 | { |
728 | 0 | char *stub_name; |
729 | 0 | bfd_size_type len; |
730 | |
|
731 | 0 | if (hash) |
732 | 0 | { |
733 | 0 | len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1; |
734 | 0 | stub_name = bfd_malloc (len); |
735 | 0 | if (stub_name != NULL) |
736 | 0 | snprintf (stub_name, len, "%08x_%s+%" PRIx64 "x", |
737 | 0 | (unsigned int) input_section->id, |
738 | 0 | hash->root.root.root.string, |
739 | 0 | (uint64_t) rel->r_addend); |
740 | 0 | } |
741 | 0 | else |
742 | 0 | { |
743 | 0 | len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1; |
744 | 0 | stub_name = bfd_malloc (len); |
745 | 0 | if (stub_name != NULL) |
746 | 0 | snprintf (stub_name, len, "%08x_%x:%x+%" PRIx64 "x", |
747 | 0 | (unsigned int) input_section->id, |
748 | 0 | (unsigned int) sym_sec->id, |
749 | 0 | (unsigned int) ELF64_R_SYM (rel->r_info), |
750 | 0 | (uint64_t) rel->r_addend); |
751 | 0 | } |
752 | |
|
753 | 0 | return stub_name; |
754 | 0 | } |
755 | | |
756 | | /* Return true if symbol H should be hashed in the `.gnu.hash' section. For |
757 | | executable PLT slots where the executable never takes the address of those |
758 | | functions, the function symbols are not added to the hash table. */ |
759 | | |
760 | | static bool |
761 | | elf_kvx_hash_symbol (struct elf_link_hash_entry *h) |
762 | 0 | { |
763 | 0 | if (h->plt.offset != (bfd_vma) -1 |
764 | 0 | && !h->def_regular |
765 | 0 | && !h->pointer_equality_needed) |
766 | 0 | return false; |
767 | | |
768 | 0 | return _bfd_elf_hash_symbol (h); |
769 | 0 | } |
770 | | |
771 | | |
772 | | /* Look up an entry in the stub hash. Stub entries are cached because |
773 | | creating the stub name takes a bit of time. */ |
774 | | |
775 | | static struct elf_kvx_stub_hash_entry * |
776 | | elf64_kvx_get_stub_entry (const asection *input_section, |
777 | | const asection *sym_sec, |
778 | | struct elf_link_hash_entry *hash, |
779 | | const Elf_Internal_Rela *rel, |
780 | | struct elf_kvx_link_hash_table *htab) |
781 | 0 | { |
782 | 0 | struct elf_kvx_stub_hash_entry *stub_entry; |
783 | 0 | struct elf_kvx_link_hash_entry *h = |
784 | 0 | (struct elf_kvx_link_hash_entry *) hash; |
785 | 0 | const asection *id_sec; |
786 | |
|
787 | 0 | if ((input_section->flags & SEC_CODE) == 0) |
788 | 0 | return NULL; |
789 | | |
790 | | /* If this input section is part of a group of sections sharing one |
791 | | stub section, then use the id of the first section in the group. |
792 | | Stub names need to include a section id, as there may well be |
793 | | more than one stub used to reach say, printf, and we need to |
794 | | distinguish between them. */ |
795 | 0 | id_sec = htab->stub_group[input_section->id].link_sec; |
796 | |
|
797 | 0 | if (h != NULL && h->stub_cache != NULL |
798 | 0 | && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec) |
799 | 0 | { |
800 | 0 | stub_entry = h->stub_cache; |
801 | 0 | } |
802 | 0 | else |
803 | 0 | { |
804 | 0 | char *stub_name; |
805 | |
|
806 | 0 | stub_name = elf64_kvx_stub_name (id_sec, sym_sec, h, rel); |
807 | 0 | if (stub_name == NULL) |
808 | 0 | return NULL; |
809 | | |
810 | 0 | stub_entry = kvx_stub_hash_lookup (&htab->stub_hash_table, |
811 | 0 | stub_name, false, false); |
812 | 0 | if (h != NULL) |
813 | 0 | h->stub_cache = stub_entry; |
814 | |
|
815 | 0 | free (stub_name); |
816 | 0 | } |
817 | | |
818 | 0 | return stub_entry; |
819 | 0 | } |
820 | | |
821 | | |
822 | | /* Create a stub section. */ |
823 | | |
824 | | static asection * |
825 | | _bfd_kvx_create_stub_section (asection *section, |
826 | | struct elf_kvx_link_hash_table *htab) |
827 | | |
828 | 0 | { |
829 | 0 | size_t namelen; |
830 | 0 | bfd_size_type len; |
831 | 0 | char *s_name; |
832 | |
|
833 | 0 | namelen = strlen (section->name); |
834 | 0 | len = namelen + sizeof (STUB_SUFFIX); |
835 | 0 | s_name = bfd_alloc (htab->stub_bfd, len); |
836 | 0 | if (s_name == NULL) |
837 | 0 | return NULL; |
838 | | |
839 | 0 | memcpy (s_name, section->name, namelen); |
840 | 0 | memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX)); |
841 | 0 | return (*htab->add_stub_section) (s_name, section); |
842 | 0 | } |
843 | | |
844 | | |
845 | | /* Find or create a stub section for a link section. |
846 | | |
847 | | Fix or create the stub section used to collect stubs attached to |
848 | | the specified link section. */ |
849 | | |
850 | | static asection * |
851 | | _bfd_kvx_get_stub_for_link_section (asection *link_section, |
852 | | struct elf_kvx_link_hash_table *htab) |
853 | 0 | { |
854 | 0 | if (htab->stub_group[link_section->id].stub_sec == NULL) |
855 | 0 | htab->stub_group[link_section->id].stub_sec |
856 | 0 | = _bfd_kvx_create_stub_section (link_section, htab); |
857 | 0 | return htab->stub_group[link_section->id].stub_sec; |
858 | 0 | } |
859 | | |
860 | | |
861 | | /* Find or create a stub section in the stub group for an input |
862 | | section. */ |
863 | | |
864 | | static asection * |
865 | | _bfd_kvx_create_or_find_stub_sec (asection *section, |
866 | | struct elf_kvx_link_hash_table *htab) |
867 | 0 | { |
868 | 0 | asection *link_sec = htab->stub_group[section->id].link_sec; |
869 | 0 | return _bfd_kvx_get_stub_for_link_section (link_sec, htab); |
870 | 0 | } |
871 | | |
872 | | |
873 | | /* Add a new stub entry in the stub group associated with an input |
874 | | section to the stub hash. Not all fields of the new stub entry are |
875 | | initialised. */ |
876 | | |
877 | | static struct elf_kvx_stub_hash_entry * |
878 | | _bfd_kvx_add_stub_entry_in_group (const char *stub_name, |
879 | | asection *section, |
880 | | struct elf_kvx_link_hash_table *htab) |
881 | 0 | { |
882 | 0 | asection *link_sec; |
883 | 0 | asection *stub_sec; |
884 | 0 | struct elf_kvx_stub_hash_entry *stub_entry; |
885 | |
|
886 | 0 | link_sec = htab->stub_group[section->id].link_sec; |
887 | 0 | stub_sec = _bfd_kvx_create_or_find_stub_sec (section, htab); |
888 | | |
889 | | /* Enter this entry into the linker stub hash table. */ |
890 | 0 | stub_entry = kvx_stub_hash_lookup (&htab->stub_hash_table, stub_name, |
891 | 0 | true, false); |
892 | 0 | if (stub_entry == NULL) |
893 | 0 | { |
894 | | /* xgettext:c-format */ |
895 | 0 | _bfd_error_handler (_("%pB: cannot create stub entry %s"), |
896 | 0 | section->owner, stub_name); |
897 | 0 | return NULL; |
898 | 0 | } |
899 | | |
900 | 0 | stub_entry->stub_sec = stub_sec; |
901 | 0 | stub_entry->stub_offset = 0; |
902 | 0 | stub_entry->id_sec = link_sec; |
903 | |
|
904 | 0 | return stub_entry; |
905 | 0 | } |
906 | | |
907 | | static bool |
908 | | kvx_build_one_stub (struct bfd_hash_entry *gen_entry, |
909 | | void *in_arg) |
910 | 0 | { |
911 | 0 | struct elf_kvx_stub_hash_entry *stub_entry; |
912 | 0 | asection *stub_sec; |
913 | 0 | bfd *stub_bfd; |
914 | 0 | bfd_byte *loc; |
915 | 0 | bfd_vma sym_value; |
916 | 0 | unsigned int template_size; |
917 | 0 | const uint32_t *template; |
918 | 0 | unsigned int i; |
919 | 0 | struct bfd_link_info *info; |
920 | | |
921 | | /* Massage our args to the form they really have. */ |
922 | 0 | stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry; |
923 | |
|
924 | 0 | info = (struct bfd_link_info *) in_arg; |
925 | | |
926 | | /* Fail if the target section could not be assigned to an output |
927 | | section. The user should fix his linker script. */ |
928 | 0 | if (stub_entry->target_section->output_section == NULL |
929 | 0 | && info->non_contiguous_regions) |
930 | 0 | info->callbacks->fatal (_("%P: Could not assign '%pA' to an output section. " |
931 | 0 | "Retry without " |
932 | 0 | "--enable-non-contiguous-regions.\n"), |
933 | 0 | stub_entry->target_section); |
934 | | |
935 | 0 | stub_sec = stub_entry->stub_sec; |
936 | | |
937 | | /* Make a note of the offset within the stubs for this entry. */ |
938 | 0 | stub_entry->stub_offset = stub_sec->size; |
939 | 0 | loc = stub_sec->contents + stub_entry->stub_offset; |
940 | |
|
941 | 0 | stub_bfd = stub_sec->owner; |
942 | | |
943 | | /* This is the address of the stub destination. */ |
944 | 0 | sym_value = (stub_entry->target_value |
945 | 0 | + stub_entry->target_section->output_offset |
946 | 0 | + stub_entry->target_section->output_section->vma); |
947 | |
|
948 | 0 | switch (stub_entry->stub_type) |
949 | 0 | { |
950 | 0 | case kvx_stub_long_branch: |
951 | 0 | template = elf64_kvx_long_branch_stub; |
952 | 0 | template_size = sizeof (elf64_kvx_long_branch_stub); |
953 | 0 | break; |
954 | 0 | default: |
955 | 0 | abort (); |
956 | 0 | } |
957 | | |
958 | 0 | for (i = 0; i < (template_size / sizeof template[0]); i++) |
959 | 0 | { |
960 | 0 | bfd_putl32 (template[i], loc); |
961 | 0 | loc += 4; |
962 | 0 | } |
963 | |
|
964 | 0 | stub_sec->size += template_size; |
965 | |
|
966 | 0 | switch (stub_entry->stub_type) |
967 | 0 | { |
968 | 0 | case kvx_stub_long_branch: |
969 | | /* The stub uses a make insn with 43bits immediate. |
970 | | We need to apply 3 relocations: |
971 | | BFD_RELOC_KVX_S43_LO10, |
972 | | BFD_RELOC_KVX_S43_UP27, |
973 | | BFD_RELOC_KVX_S43_EX6. */ |
974 | 0 | if (kvx_relocate (R_KVX_S43_LO10, stub_bfd, stub_sec, |
975 | 0 | stub_entry->stub_offset, sym_value) != bfd_reloc_ok) |
976 | 0 | BFD_FAIL (); |
977 | 0 | if (kvx_relocate (R_KVX_S43_EX6, stub_bfd, stub_sec, |
978 | 0 | stub_entry->stub_offset, sym_value) != bfd_reloc_ok) |
979 | 0 | BFD_FAIL (); |
980 | 0 | if (kvx_relocate (R_KVX_S43_UP27, stub_bfd, stub_sec, |
981 | 0 | stub_entry->stub_offset + 4, sym_value) != bfd_reloc_ok) |
982 | 0 | BFD_FAIL (); |
983 | 0 | break; |
984 | 0 | default: |
985 | 0 | abort (); |
986 | 0 | } |
987 | | |
988 | 0 | return true; |
989 | 0 | } |
990 | | |
991 | | /* As above, but don't actually build the stub. Just bump offset so |
992 | | we know stub section sizes. */ |
993 | | |
994 | | static bool |
995 | | kvx_size_one_stub (struct bfd_hash_entry *gen_entry, |
996 | | void *in_arg ATTRIBUTE_UNUSED) |
997 | 0 | { |
998 | 0 | struct elf_kvx_stub_hash_entry *stub_entry; |
999 | 0 | int size; |
1000 | | |
1001 | | /* Massage our args to the form they really have. */ |
1002 | 0 | stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry; |
1003 | |
|
1004 | 0 | switch (stub_entry->stub_type) |
1005 | 0 | { |
1006 | 0 | case kvx_stub_long_branch: |
1007 | 0 | size = sizeof (elf64_kvx_long_branch_stub); |
1008 | 0 | break; |
1009 | 0 | default: |
1010 | 0 | abort (); |
1011 | 0 | } |
1012 | | |
1013 | 0 | stub_entry->stub_sec->size += size; |
1014 | 0 | return true; |
1015 | 0 | } |
1016 | | |
1017 | | /* External entry points for sizing and building linker stubs. */ |
1018 | | |
1019 | | /* Set up various things so that we can make a list of input sections |
1020 | | for each output section included in the link. Returns -1 on error, |
1021 | | 0 when no stubs will be needed, and 1 on success. */ |
1022 | | |
1023 | | int |
1024 | | elf64_kvx_setup_section_lists (bfd *output_bfd, |
1025 | | struct bfd_link_info *info) |
1026 | 0 | { |
1027 | 0 | bfd *input_bfd; |
1028 | 0 | unsigned int bfd_count; |
1029 | 0 | unsigned int top_id, top_index; |
1030 | 0 | asection *section; |
1031 | 0 | asection **input_list, **list; |
1032 | 0 | bfd_size_type amt; |
1033 | 0 | struct elf_kvx_link_hash_table *htab = |
1034 | 0 | elf_kvx_hash_table (info); |
1035 | |
|
1036 | 0 | if (!is_elf_hash_table ((const struct bfd_link_hash_table *)htab)) |
1037 | 0 | return 0; |
1038 | | |
1039 | | /* Count the number of input BFDs and find the top input section id. */ |
1040 | 0 | for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0; |
1041 | 0 | input_bfd != NULL; input_bfd = input_bfd->link.next) |
1042 | 0 | { |
1043 | 0 | bfd_count += 1; |
1044 | 0 | for (section = input_bfd->sections; |
1045 | 0 | section != NULL; section = section->next) |
1046 | 0 | { |
1047 | 0 | if (top_id < section->id) |
1048 | 0 | top_id = section->id; |
1049 | 0 | } |
1050 | 0 | } |
1051 | 0 | htab->bfd_count = bfd_count; |
1052 | |
|
1053 | 0 | amt = sizeof (struct map_stub) * (top_id + 1); |
1054 | 0 | htab->stub_group = bfd_zmalloc (amt); |
1055 | 0 | if (htab->stub_group == NULL) |
1056 | 0 | return -1; |
1057 | | |
1058 | | /* We can't use output_bfd->section_count here to find the top output |
1059 | | section index as some sections may have been removed, and |
1060 | | _bfd_strip_section_from_output doesn't renumber the indices. */ |
1061 | 0 | for (section = output_bfd->sections, top_index = 0; |
1062 | 0 | section != NULL; section = section->next) |
1063 | 0 | { |
1064 | 0 | if (top_index < section->index) |
1065 | 0 | top_index = section->index; |
1066 | 0 | } |
1067 | |
|
1068 | 0 | htab->top_index = top_index; |
1069 | 0 | amt = sizeof (asection *) * (top_index + 1); |
1070 | 0 | input_list = bfd_malloc (amt); |
1071 | 0 | htab->input_list = input_list; |
1072 | 0 | if (input_list == NULL) |
1073 | 0 | return -1; |
1074 | | |
1075 | | /* For sections we aren't interested in, mark their entries with a |
1076 | | value we can check later. */ |
1077 | 0 | list = input_list + top_index; |
1078 | 0 | do |
1079 | 0 | *list = bfd_abs_section_ptr; |
1080 | 0 | while (list-- != input_list); |
1081 | |
|
1082 | 0 | for (section = output_bfd->sections; |
1083 | 0 | section != NULL; section = section->next) |
1084 | 0 | { |
1085 | 0 | if ((section->flags & SEC_CODE) != 0) |
1086 | 0 | input_list[section->index] = NULL; |
1087 | 0 | } |
1088 | |
|
1089 | 0 | return 1; |
1090 | 0 | } |
1091 | | |
1092 | | /* Used by elf64_kvx_next_input_section and group_sections. */ |
1093 | 0 | #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec) |
1094 | | |
1095 | | /* The linker repeatedly calls this function for each input section, |
1096 | | in the order that input sections are linked into output sections. |
1097 | | Build lists of input sections to determine groupings between which |
1098 | | we may insert linker stubs. */ |
1099 | | |
1100 | | void |
1101 | | elf64_kvx_next_input_section (struct bfd_link_info *info, asection *isec) |
1102 | 0 | { |
1103 | 0 | struct elf_kvx_link_hash_table *htab = |
1104 | 0 | elf_kvx_hash_table (info); |
1105 | |
|
1106 | 0 | if (isec->output_section->index <= htab->top_index) |
1107 | 0 | { |
1108 | 0 | asection **list = htab->input_list + isec->output_section->index; |
1109 | |
|
1110 | 0 | if (*list != bfd_abs_section_ptr) |
1111 | 0 | { |
1112 | | /* Steal the link_sec pointer for our list. */ |
1113 | | /* This happens to make the list in reverse order, |
1114 | | which is what we want. */ |
1115 | 0 | PREV_SEC (isec) = *list; |
1116 | 0 | *list = isec; |
1117 | 0 | } |
1118 | 0 | } |
1119 | 0 | } |
1120 | | |
1121 | | /* See whether we can group stub sections together. Grouping stub |
1122 | | sections may result in fewer stubs. More importantly, we need to |
1123 | | put all .init* and .fini* stubs at the beginning of the .init or |
1124 | | .fini output sections respectively, because glibc splits the |
1125 | | _init and _fini functions into multiple parts. Putting a stub in |
1126 | | the middle of a function is not a good idea. */ |
1127 | | |
1128 | | static void |
1129 | | group_sections (struct elf_kvx_link_hash_table *htab, |
1130 | | bfd_size_type stub_group_size, |
1131 | | bool stubs_always_after_branch) |
1132 | 0 | { |
1133 | 0 | asection **list = htab->input_list; |
1134 | |
|
1135 | 0 | do |
1136 | 0 | { |
1137 | 0 | asection *tail = *list; |
1138 | 0 | asection *head; |
1139 | |
|
1140 | 0 | if (tail == bfd_abs_section_ptr) |
1141 | 0 | continue; |
1142 | | |
1143 | | /* Reverse the list: we must avoid placing stubs at the |
1144 | | beginning of the section because the beginning of the text |
1145 | | section may be required for an interrupt vector in bare metal |
1146 | | code. */ |
1147 | 0 | #define NEXT_SEC PREV_SEC |
1148 | 0 | head = NULL; |
1149 | 0 | while (tail != NULL) |
1150 | 0 | { |
1151 | | /* Pop from tail. */ |
1152 | 0 | asection *item = tail; |
1153 | 0 | tail = PREV_SEC (item); |
1154 | | |
1155 | | /* Push on head. */ |
1156 | 0 | NEXT_SEC (item) = head; |
1157 | 0 | head = item; |
1158 | 0 | } |
1159 | |
|
1160 | 0 | while (head != NULL) |
1161 | 0 | { |
1162 | 0 | asection *curr; |
1163 | 0 | asection *next; |
1164 | 0 | bfd_vma stub_group_start = head->output_offset; |
1165 | 0 | bfd_vma end_of_next; |
1166 | |
|
1167 | 0 | curr = head; |
1168 | 0 | while (NEXT_SEC (curr) != NULL) |
1169 | 0 | { |
1170 | 0 | next = NEXT_SEC (curr); |
1171 | 0 | end_of_next = next->output_offset + next->size; |
1172 | 0 | if (end_of_next - stub_group_start >= stub_group_size) |
1173 | | /* End of NEXT is too far from start, so stop. */ |
1174 | 0 | break; |
1175 | | /* Add NEXT to the group. */ |
1176 | 0 | curr = next; |
1177 | 0 | } |
1178 | | |
1179 | | /* OK, the size from the start to the start of CURR is less |
1180 | | than stub_group_size and thus can be handled by one stub |
1181 | | section. (Or the head section is itself larger than |
1182 | | stub_group_size, in which case we may be toast.) |
1183 | | We should really be keeping track of the total size of |
1184 | | stubs added here, as stubs contribute to the final output |
1185 | | section size. */ |
1186 | 0 | do |
1187 | 0 | { |
1188 | 0 | next = NEXT_SEC (head); |
1189 | | /* Set up this stub group. */ |
1190 | 0 | htab->stub_group[head->id].link_sec = curr; |
1191 | 0 | } |
1192 | 0 | while (head != curr && (head = next) != NULL); |
1193 | | |
1194 | | /* But wait, there's more! Input sections up to stub_group_size |
1195 | | bytes after the stub section can be handled by it too. */ |
1196 | 0 | if (!stubs_always_after_branch) |
1197 | 0 | { |
1198 | 0 | stub_group_start = curr->output_offset + curr->size; |
1199 | |
|
1200 | 0 | while (next != NULL) |
1201 | 0 | { |
1202 | 0 | end_of_next = next->output_offset + next->size; |
1203 | 0 | if (end_of_next - stub_group_start >= stub_group_size) |
1204 | | /* End of NEXT is too far from stubs, so stop. */ |
1205 | 0 | break; |
1206 | | /* Add NEXT to the stub group. */ |
1207 | 0 | head = next; |
1208 | 0 | next = NEXT_SEC (head); |
1209 | 0 | htab->stub_group[head->id].link_sec = curr; |
1210 | 0 | } |
1211 | 0 | } |
1212 | 0 | head = next; |
1213 | 0 | } |
1214 | 0 | } |
1215 | 0 | while (list++ != htab->input_list + htab->top_index); |
1216 | | |
1217 | 0 | free (htab->input_list); |
1218 | 0 | } |
1219 | | |
1220 | | static void |
1221 | | _bfd_kvx_resize_stubs (struct elf_kvx_link_hash_table *htab) |
1222 | 0 | { |
1223 | 0 | asection *section; |
1224 | | |
1225 | | /* OK, we've added some stubs. Find out the new size of the |
1226 | | stub sections. */ |
1227 | 0 | for (section = htab->stub_bfd->sections; |
1228 | 0 | section != NULL; section = section->next) |
1229 | 0 | { |
1230 | | /* Ignore non-stub sections. */ |
1231 | 0 | if (!strstr (section->name, STUB_SUFFIX)) |
1232 | 0 | continue; |
1233 | 0 | section->size = 0; |
1234 | 0 | } |
1235 | |
|
1236 | 0 | bfd_hash_traverse (&htab->stub_hash_table, kvx_size_one_stub, htab); |
1237 | 0 | } |
1238 | | |
1239 | | /* Satisfy the ELF linker by filling in some fields in our fake bfd. */ |
1240 | | |
1241 | | bool |
1242 | | kvx_elf64_init_stub_bfd (struct bfd_link_info *info, |
1243 | | bfd *stub_bfd) |
1244 | 0 | { |
1245 | 0 | struct elf_kvx_link_hash_table *htab; |
1246 | |
|
1247 | 0 | elf_elfheader (stub_bfd)->e_ident[EI_CLASS] = ELFCLASS64; |
1248 | | |
1249 | | /* Always hook our dynamic sections into the first bfd, which is the |
1250 | | linker created stub bfd. This ensures that the GOT header is at |
1251 | | the start of the output TOC section. */ |
1252 | 0 | htab = elf_kvx_hash_table (info); |
1253 | 0 | if (htab == NULL) |
1254 | 0 | return false; |
1255 | | |
1256 | 0 | return true; |
1257 | 0 | } |
1258 | | |
1259 | | /* Determine and set the size of the stub section for a final link. |
1260 | | |
1261 | | The basic idea here is to examine all the relocations looking for |
1262 | | PC-relative calls to a target that is unreachable with a 27bits |
1263 | | immediate (found in call and goto). */ |
1264 | | |
1265 | | bool |
1266 | | elf64_kvx_size_stubs (bfd *output_bfd, |
1267 | | bfd *stub_bfd, |
1268 | | struct bfd_link_info *info, |
1269 | | bfd_signed_vma group_size, |
1270 | | asection * (*add_stub_section) (const char *, |
1271 | | asection *), |
1272 | | void (*layout_sections_again) (void)) |
1273 | 0 | { |
1274 | 0 | bfd_size_type stub_group_size; |
1275 | 0 | bool stubs_always_before_branch; |
1276 | 0 | bool stub_changed = false; |
1277 | 0 | struct elf_kvx_link_hash_table *htab = elf_kvx_hash_table (info); |
1278 | | |
1279 | | /* Propagate mach to stub bfd, because it may not have been |
1280 | | finalized when we created stub_bfd. */ |
1281 | 0 | bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd), |
1282 | 0 | bfd_get_mach (output_bfd)); |
1283 | | |
1284 | | /* Stash our params away. */ |
1285 | 0 | htab->stub_bfd = stub_bfd; |
1286 | 0 | htab->add_stub_section = add_stub_section; |
1287 | 0 | htab->layout_sections_again = layout_sections_again; |
1288 | 0 | stubs_always_before_branch = group_size < 0; |
1289 | 0 | if (group_size < 0) |
1290 | 0 | stub_group_size = -group_size; |
1291 | 0 | else |
1292 | 0 | stub_group_size = group_size; |
1293 | |
|
1294 | 0 | if (stub_group_size == 1) |
1295 | 0 | { |
1296 | | /* Default values. */ |
1297 | | /* KVX branch range is +-256MB. The value used is 1MB less. */ |
1298 | 0 | stub_group_size = 255 * 1024 * 1024; |
1299 | 0 | } |
1300 | |
|
1301 | 0 | group_sections (htab, stub_group_size, stubs_always_before_branch); |
1302 | |
|
1303 | 0 | (*htab->layout_sections_again) (); |
1304 | |
|
1305 | 0 | while (1) |
1306 | 0 | { |
1307 | 0 | bfd *input_bfd; |
1308 | |
|
1309 | 0 | for (input_bfd = info->input_bfds; |
1310 | 0 | input_bfd != NULL; input_bfd = input_bfd->link.next) |
1311 | 0 | { |
1312 | 0 | Elf_Internal_Shdr *symtab_hdr; |
1313 | 0 | asection *section; |
1314 | 0 | Elf_Internal_Sym *local_syms = NULL; |
1315 | |
|
1316 | 0 | if (!is_kvx_elf (input_bfd) |
1317 | 0 | || (input_bfd->flags & BFD_LINKER_CREATED) != 0) |
1318 | 0 | continue; |
1319 | | |
1320 | | /* We'll need the symbol table in a second. */ |
1321 | 0 | symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr; |
1322 | 0 | if (symtab_hdr->sh_info == 0) |
1323 | 0 | continue; |
1324 | | |
1325 | | /* Walk over each section attached to the input bfd. */ |
1326 | 0 | for (section = input_bfd->sections; |
1327 | 0 | section != NULL; section = section->next) |
1328 | 0 | { |
1329 | 0 | Elf_Internal_Rela *internal_relocs, *irelaend, *irela; |
1330 | | |
1331 | | /* If there aren't any relocs, then there's nothing more |
1332 | | to do. */ |
1333 | 0 | if ((section->flags & SEC_RELOC) == 0 |
1334 | 0 | || section->reloc_count == 0 |
1335 | 0 | || (section->flags & SEC_CODE) == 0) |
1336 | 0 | continue; |
1337 | | |
1338 | | /* If this section is a link-once section that will be |
1339 | | discarded, then don't create any stubs. */ |
1340 | 0 | if (section->output_section == NULL |
1341 | 0 | || section->output_section->owner != output_bfd) |
1342 | 0 | continue; |
1343 | | |
1344 | | /* Get the relocs. */ |
1345 | 0 | internal_relocs |
1346 | 0 | = _bfd_elf_link_read_relocs (input_bfd, section, NULL, |
1347 | 0 | NULL, info->keep_memory); |
1348 | 0 | if (internal_relocs == NULL) |
1349 | 0 | goto error_ret_free_local; |
1350 | | |
1351 | | /* Now examine each relocation. */ |
1352 | 0 | irela = internal_relocs; |
1353 | 0 | irelaend = irela + section->reloc_count; |
1354 | 0 | for (; irela < irelaend; irela++) |
1355 | 0 | { |
1356 | 0 | unsigned int r_type, r_indx; |
1357 | 0 | enum elf_kvx_stub_type stub_type; |
1358 | 0 | struct elf_kvx_stub_hash_entry *stub_entry; |
1359 | 0 | asection *sym_sec; |
1360 | 0 | bfd_vma sym_value; |
1361 | 0 | bfd_vma destination; |
1362 | 0 | struct elf_kvx_link_hash_entry *hash; |
1363 | 0 | const char *sym_name; |
1364 | 0 | char *stub_name; |
1365 | 0 | const asection *id_sec; |
1366 | 0 | unsigned char st_type; |
1367 | 0 | bfd_size_type len; |
1368 | |
|
1369 | 0 | r_type = ELF64_R_TYPE (irela->r_info); |
1370 | 0 | r_indx = ELF64_R_SYM (irela->r_info); |
1371 | |
|
1372 | 0 | if (r_type >= (unsigned int) R_KVX_end) |
1373 | 0 | { |
1374 | 0 | bfd_set_error (bfd_error_bad_value); |
1375 | 0 | error_ret_free_internal: |
1376 | 0 | if (elf_section_data (section)->relocs == NULL) |
1377 | 0 | free (internal_relocs); |
1378 | 0 | goto error_ret_free_local; |
1379 | 0 | } |
1380 | | |
1381 | | /* Only look for stubs on unconditional branch and |
1382 | | branch and link instructions. */ |
1383 | | /* This catches CALL and GOTO insn */ |
1384 | 0 | if (r_type != (unsigned int) R_KVX_PCREL27) |
1385 | 0 | continue; |
1386 | | |
1387 | | /* Now determine the call target, its name, value, |
1388 | | section. */ |
1389 | 0 | sym_sec = NULL; |
1390 | 0 | sym_value = 0; |
1391 | 0 | destination = 0; |
1392 | 0 | hash = NULL; |
1393 | 0 | sym_name = NULL; |
1394 | 0 | if (r_indx < symtab_hdr->sh_info) |
1395 | 0 | { |
1396 | | /* It's a local symbol. */ |
1397 | 0 | Elf_Internal_Sym *sym; |
1398 | 0 | Elf_Internal_Shdr *hdr; |
1399 | |
|
1400 | 0 | if (local_syms == NULL) |
1401 | 0 | { |
1402 | 0 | local_syms |
1403 | 0 | = (Elf_Internal_Sym *) symtab_hdr->contents; |
1404 | 0 | if (local_syms == NULL) |
1405 | 0 | local_syms |
1406 | 0 | = bfd_elf_get_elf_syms (input_bfd, symtab_hdr, |
1407 | 0 | symtab_hdr->sh_info, 0, |
1408 | 0 | NULL, NULL, NULL); |
1409 | 0 | if (local_syms == NULL) |
1410 | 0 | goto error_ret_free_internal; |
1411 | 0 | } |
1412 | | |
1413 | 0 | sym = local_syms + r_indx; |
1414 | 0 | hdr = elf_elfsections (input_bfd)[sym->st_shndx]; |
1415 | 0 | sym_sec = hdr->bfd_section; |
1416 | 0 | if (!sym_sec) |
1417 | | /* This is an undefined symbol. It can never |
1418 | | be resolved. */ |
1419 | 0 | continue; |
1420 | | |
1421 | 0 | if (ELF_ST_TYPE (sym->st_info) != STT_SECTION) |
1422 | 0 | sym_value = sym->st_value; |
1423 | 0 | destination = (sym_value + irela->r_addend |
1424 | 0 | + sym_sec->output_offset |
1425 | 0 | + sym_sec->output_section->vma); |
1426 | 0 | st_type = ELF_ST_TYPE (sym->st_info); |
1427 | 0 | sym_name |
1428 | 0 | = bfd_elf_string_from_elf_section (input_bfd, |
1429 | 0 | symtab_hdr->sh_link, |
1430 | 0 | sym->st_name); |
1431 | 0 | } |
1432 | 0 | else |
1433 | 0 | { |
1434 | 0 | int e_indx; |
1435 | |
|
1436 | 0 | e_indx = r_indx - symtab_hdr->sh_info; |
1437 | 0 | hash = ((struct elf_kvx_link_hash_entry *) |
1438 | 0 | elf_sym_hashes (input_bfd)[e_indx]); |
1439 | |
|
1440 | 0 | while (hash->root.root.type == bfd_link_hash_indirect |
1441 | 0 | || hash->root.root.type == bfd_link_hash_warning) |
1442 | 0 | hash = ((struct elf_kvx_link_hash_entry *) |
1443 | 0 | hash->root.root.u.i.link); |
1444 | |
|
1445 | 0 | if (hash->root.root.type == bfd_link_hash_defined |
1446 | 0 | || hash->root.root.type == bfd_link_hash_defweak) |
1447 | 0 | { |
1448 | 0 | struct elf_kvx_link_hash_table *globals = |
1449 | 0 | elf_kvx_hash_table (info); |
1450 | 0 | sym_sec = hash->root.root.u.def.section; |
1451 | 0 | sym_value = hash->root.root.u.def.value; |
1452 | | /* For a destination in a shared library, |
1453 | | use the PLT stub as target address to |
1454 | | decide whether a branch stub is |
1455 | | needed. */ |
1456 | 0 | if (globals->root.splt != NULL && hash != NULL |
1457 | 0 | && hash->root.plt.offset != (bfd_vma) - 1) |
1458 | 0 | { |
1459 | 0 | sym_sec = globals->root.splt; |
1460 | 0 | sym_value = hash->root.plt.offset; |
1461 | 0 | if (sym_sec->output_section != NULL) |
1462 | 0 | destination = (sym_value |
1463 | 0 | + sym_sec->output_offset |
1464 | 0 | + sym_sec->output_section->vma); |
1465 | 0 | } |
1466 | 0 | else if (sym_sec->output_section != NULL) |
1467 | 0 | destination = (sym_value + irela->r_addend |
1468 | 0 | + sym_sec->output_offset |
1469 | 0 | + sym_sec->output_section->vma); |
1470 | 0 | } |
1471 | 0 | else if (hash->root.root.type == bfd_link_hash_undefined |
1472 | 0 | || (hash->root.root.type |
1473 | 0 | == bfd_link_hash_undefweak)) |
1474 | 0 | { |
1475 | | /* For a shared library, use the PLT stub as |
1476 | | target address to decide whether a long |
1477 | | branch stub is needed. |
1478 | | For absolute code, they cannot be handled. */ |
1479 | 0 | struct elf_kvx_link_hash_table *globals = |
1480 | 0 | elf_kvx_hash_table (info); |
1481 | |
|
1482 | 0 | if (globals->root.splt != NULL && hash != NULL |
1483 | 0 | && hash->root.plt.offset != (bfd_vma) - 1) |
1484 | 0 | { |
1485 | 0 | sym_sec = globals->root.splt; |
1486 | 0 | sym_value = hash->root.plt.offset; |
1487 | 0 | if (sym_sec->output_section != NULL) |
1488 | 0 | destination = (sym_value |
1489 | 0 | + sym_sec->output_offset |
1490 | 0 | + sym_sec->output_section->vma); |
1491 | 0 | } |
1492 | 0 | else |
1493 | 0 | continue; |
1494 | 0 | } |
1495 | 0 | else |
1496 | 0 | { |
1497 | 0 | bfd_set_error (bfd_error_bad_value); |
1498 | 0 | goto error_ret_free_internal; |
1499 | 0 | } |
1500 | 0 | st_type = ELF_ST_TYPE (hash->root.type); |
1501 | 0 | sym_name = hash->root.root.root.string; |
1502 | 0 | } |
1503 | | |
1504 | | /* Determine what (if any) linker stub is needed. */ |
1505 | 0 | stub_type = kvx_type_of_stub (section, irela, sym_sec, |
1506 | 0 | st_type, destination); |
1507 | 0 | if (stub_type == kvx_stub_none) |
1508 | 0 | continue; |
1509 | | |
1510 | | /* Support for grouping stub sections. */ |
1511 | 0 | id_sec = htab->stub_group[section->id].link_sec; |
1512 | | |
1513 | | /* Get the name of this stub. */ |
1514 | 0 | stub_name = elf64_kvx_stub_name (id_sec, sym_sec, hash, |
1515 | 0 | irela); |
1516 | 0 | if (!stub_name) |
1517 | 0 | goto error_ret_free_internal; |
1518 | | |
1519 | 0 | stub_entry = |
1520 | 0 | kvx_stub_hash_lookup (&htab->stub_hash_table, |
1521 | 0 | stub_name, false, false); |
1522 | 0 | if (stub_entry != NULL) |
1523 | 0 | { |
1524 | | /* The proper stub has already been created. */ |
1525 | 0 | free (stub_name); |
1526 | | /* Always update this stub's target since it may have |
1527 | | changed after layout. */ |
1528 | 0 | stub_entry->target_value = sym_value + irela->r_addend; |
1529 | 0 | continue; |
1530 | 0 | } |
1531 | | |
1532 | 0 | stub_entry = _bfd_kvx_add_stub_entry_in_group |
1533 | 0 | (stub_name, section, htab); |
1534 | 0 | if (stub_entry == NULL) |
1535 | 0 | { |
1536 | 0 | free (stub_name); |
1537 | 0 | goto error_ret_free_internal; |
1538 | 0 | } |
1539 | | |
1540 | 0 | stub_entry->target_value = sym_value + irela->r_addend; |
1541 | 0 | stub_entry->target_section = sym_sec; |
1542 | 0 | stub_entry->stub_type = stub_type; |
1543 | 0 | stub_entry->h = hash; |
1544 | 0 | stub_entry->st_type = st_type; |
1545 | |
|
1546 | 0 | if (sym_name == NULL) |
1547 | 0 | sym_name = "unnamed"; |
1548 | 0 | len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name); |
1549 | 0 | stub_entry->output_name = bfd_alloc (htab->stub_bfd, len); |
1550 | 0 | if (stub_entry->output_name == NULL) |
1551 | 0 | { |
1552 | 0 | free (stub_name); |
1553 | 0 | goto error_ret_free_internal; |
1554 | 0 | } |
1555 | | |
1556 | 0 | snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME, |
1557 | 0 | sym_name); |
1558 | |
|
1559 | 0 | stub_changed = true; |
1560 | 0 | } |
1561 | | |
1562 | | /* We're done with the internal relocs, free them. */ |
1563 | 0 | if (elf_section_data (section)->relocs == NULL) |
1564 | 0 | free (internal_relocs); |
1565 | 0 | } |
1566 | 0 | } |
1567 | | |
1568 | 0 | if (!stub_changed) |
1569 | 0 | break; |
1570 | | |
1571 | 0 | _bfd_kvx_resize_stubs (htab); |
1572 | | |
1573 | | /* Ask the linker to do its stuff. */ |
1574 | 0 | (*htab->layout_sections_again) (); |
1575 | 0 | stub_changed = false; |
1576 | 0 | } |
1577 | | |
1578 | 0 | return true; |
1579 | | |
1580 | 0 | error_ret_free_local: |
1581 | 0 | return false; |
1582 | |
|
1583 | 0 | } |
1584 | | |
1585 | | /* Build all the stubs associated with the current output file. The |
1586 | | stubs are kept in a hash table attached to the main linker hash |
1587 | | table. We also set up the .plt entries for statically linked PIC |
1588 | | functions here. This function is called via kvx_elf_finish in the |
1589 | | linker. */ |
1590 | | |
1591 | | bool |
1592 | | elf64_kvx_build_stubs (struct bfd_link_info *info) |
1593 | 0 | { |
1594 | 0 | asection *stub_sec; |
1595 | 0 | struct bfd_hash_table *table; |
1596 | 0 | struct elf_kvx_link_hash_table *htab; |
1597 | |
|
1598 | 0 | htab = elf_kvx_hash_table (info); |
1599 | |
|
1600 | 0 | for (stub_sec = htab->stub_bfd->sections; |
1601 | 0 | stub_sec != NULL; stub_sec = stub_sec->next) |
1602 | 0 | { |
1603 | 0 | bfd_size_type size; |
1604 | | |
1605 | | /* Ignore non-stub sections. */ |
1606 | 0 | if (!strstr (stub_sec->name, STUB_SUFFIX)) |
1607 | 0 | continue; |
1608 | | |
1609 | | /* Allocate memory to hold the linker stubs. */ |
1610 | 0 | size = stub_sec->size; |
1611 | 0 | stub_sec->contents = bfd_zalloc (htab->stub_bfd, size); |
1612 | 0 | if (stub_sec->contents == NULL && size != 0) |
1613 | 0 | return false; |
1614 | 0 | stub_sec->alloced = 1; |
1615 | 0 | stub_sec->size = 0; |
1616 | 0 | } |
1617 | | |
1618 | | /* Build the stubs as directed by the stub hash table. */ |
1619 | 0 | table = &htab->stub_hash_table; |
1620 | 0 | bfd_hash_traverse (table, kvx_build_one_stub, info); |
1621 | |
|
1622 | 0 | return true; |
1623 | 0 | } |
1624 | | |
1625 | | static bfd_vma |
1626 | | kvx_calculate_got_entry_vma (struct elf_link_hash_entry *h, |
1627 | | struct elf_kvx_link_hash_table |
1628 | | *globals, struct bfd_link_info *info, |
1629 | | bfd_vma value, bfd *output_bfd, |
1630 | | bool *unresolved_reloc_p) |
1631 | 0 | { |
1632 | 0 | bfd_vma off = (bfd_vma) - 1; |
1633 | 0 | asection *basegot = globals->root.sgot; |
1634 | 0 | bool dyn = globals->root.dynamic_sections_created; |
1635 | |
|
1636 | 0 | if (h != NULL) |
1637 | 0 | { |
1638 | 0 | BFD_ASSERT (basegot != NULL); |
1639 | 0 | off = h->got.offset; |
1640 | 0 | BFD_ASSERT (off != (bfd_vma) - 1); |
1641 | 0 | if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h) |
1642 | 0 | || (bfd_link_pic (info) |
1643 | 0 | && SYMBOL_REFERENCES_LOCAL (info, h)) |
1644 | 0 | || (ELF_ST_VISIBILITY (h->other) |
1645 | 0 | && h->root.type == bfd_link_hash_undefweak)) |
1646 | 0 | { |
1647 | | /* This is actually a static link, or it is a -Bsymbolic link |
1648 | | and the symbol is defined locally. We must initialize this |
1649 | | entry in the global offset table. Since the offset must |
1650 | | always be a multiple of 8 (4 in the case of ILP32), we use |
1651 | | the least significant bit to record whether we have |
1652 | | initialized it already. |
1653 | | When doing a dynamic link, we create a .rel(a).got relocation |
1654 | | entry to initialize the value. This is done in the |
1655 | | finish_dynamic_symbol routine. */ |
1656 | 0 | if ((off & 1) != 0) |
1657 | 0 | off &= ~1; |
1658 | 0 | else |
1659 | 0 | { |
1660 | 0 | bfd_put_64 (output_bfd, value, basegot->contents + off); |
1661 | 0 | h->got.offset |= 1; |
1662 | 0 | } |
1663 | 0 | } |
1664 | 0 | else |
1665 | 0 | *unresolved_reloc_p = false; |
1666 | 0 | } |
1667 | |
|
1668 | 0 | return off; |
1669 | 0 | } |
1670 | | |
1671 | | static unsigned int |
1672 | | kvx_reloc_got_type (bfd_reloc_code_real_type r_type) |
1673 | 0 | { |
1674 | 0 | switch (r_type) |
1675 | 0 | { |
1676 | | /* Extracted with: |
1677 | | awk 'match ($0, /HOWTO.*R_(KVX.*_GOT(OFF)?(64)?_.*),/,ary) \ |
1678 | | {print "case BFD_RELOC_" ary[1] ":";}' elfxx-kvxc.def */ |
1679 | 0 | case BFD_RELOC_KVX_S37_GOTOFF_LO10: |
1680 | 0 | case BFD_RELOC_KVX_S37_GOTOFF_UP27: |
1681 | |
|
1682 | 0 | case BFD_RELOC_KVX_S37_GOT_LO10: |
1683 | 0 | case BFD_RELOC_KVX_S37_GOT_UP27: |
1684 | |
|
1685 | 0 | case BFD_RELOC_KVX_S43_GOTOFF_LO10: |
1686 | 0 | case BFD_RELOC_KVX_S43_GOTOFF_UP27: |
1687 | 0 | case BFD_RELOC_KVX_S43_GOTOFF_EX6: |
1688 | |
|
1689 | 0 | case BFD_RELOC_KVX_S43_GOT_LO10: |
1690 | 0 | case BFD_RELOC_KVX_S43_GOT_UP27: |
1691 | 0 | case BFD_RELOC_KVX_S43_GOT_EX6: |
1692 | 0 | return GOT_NORMAL; |
1693 | | |
1694 | 0 | case BFD_RELOC_KVX_S37_TLS_GD_LO10: |
1695 | 0 | case BFD_RELOC_KVX_S37_TLS_GD_UP27: |
1696 | 0 | case BFD_RELOC_KVX_S43_TLS_GD_LO10: |
1697 | 0 | case BFD_RELOC_KVX_S43_TLS_GD_UP27: |
1698 | 0 | case BFD_RELOC_KVX_S43_TLS_GD_EX6: |
1699 | 0 | return GOT_TLS_GD; |
1700 | | |
1701 | 0 | case BFD_RELOC_KVX_S37_TLS_LD_LO10: |
1702 | 0 | case BFD_RELOC_KVX_S37_TLS_LD_UP27: |
1703 | 0 | case BFD_RELOC_KVX_S43_TLS_LD_LO10: |
1704 | 0 | case BFD_RELOC_KVX_S43_TLS_LD_UP27: |
1705 | 0 | case BFD_RELOC_KVX_S43_TLS_LD_EX6: |
1706 | 0 | return GOT_TLS_LD; |
1707 | | |
1708 | 0 | case BFD_RELOC_KVX_S37_TLS_IE_LO10: |
1709 | 0 | case BFD_RELOC_KVX_S37_TLS_IE_UP27: |
1710 | 0 | case BFD_RELOC_KVX_S43_TLS_IE_LO10: |
1711 | 0 | case BFD_RELOC_KVX_S43_TLS_IE_UP27: |
1712 | 0 | case BFD_RELOC_KVX_S43_TLS_IE_EX6: |
1713 | 0 | return GOT_TLS_IE; |
1714 | | |
1715 | 0 | default: |
1716 | 0 | break; |
1717 | 0 | } |
1718 | 0 | return GOT_UNKNOWN; |
1719 | 0 | } |
1720 | | |
1721 | | static bool |
1722 | | kvx_can_relax_tls (bfd *input_bfd ATTRIBUTE_UNUSED, |
1723 | | struct bfd_link_info *info ATTRIBUTE_UNUSED, |
1724 | | bfd_reloc_code_real_type r_type ATTRIBUTE_UNUSED, |
1725 | | struct elf_link_hash_entry *h ATTRIBUTE_UNUSED, |
1726 | | unsigned long r_symndx ATTRIBUTE_UNUSED) |
1727 | 0 | { |
1728 | 0 | if (! IS_KVX_TLS_RELAX_RELOC (r_type)) |
1729 | 0 | return false; |
1730 | | |
1731 | | /* Relaxing hook. Disabled on KVX. */ |
1732 | | /* See elfnn-aarch64.c */ |
1733 | 0 | return true; |
1734 | 0 | } |
1735 | | |
1736 | | /* Given the relocation code R_TYPE, return the relaxed bfd reloc |
1737 | | enumerator. */ |
1738 | | |
1739 | | static bfd_reloc_code_real_type |
1740 | | kvx_tls_transition (bfd *input_bfd, |
1741 | | struct bfd_link_info *info, |
1742 | | unsigned int r_type, |
1743 | | struct elf_link_hash_entry *h, |
1744 | | unsigned long r_symndx) |
1745 | 0 | { |
1746 | 0 | bfd_reloc_code_real_type bfd_r_type |
1747 | 0 | = elf64_kvx_bfd_reloc_from_type (input_bfd, r_type); |
1748 | |
|
1749 | 0 | if (! kvx_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx)) |
1750 | 0 | return bfd_r_type; |
1751 | | |
1752 | 0 | return bfd_r_type; |
1753 | 0 | } |
1754 | | |
1755 | | /* Return the base VMA address which should be subtracted from real addresses |
1756 | | when resolving R_KVX_*_TLS_GD_* and R_KVX_*_TLS_LD_* relocation. */ |
1757 | | |
1758 | | static bfd_vma |
1759 | | dtpoff_base (struct bfd_link_info *info) |
1760 | 0 | { |
1761 | | /* If tls_sec is NULL, we should have signalled an error already. */ |
1762 | 0 | BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL); |
1763 | 0 | return elf_hash_table (info)->tls_sec->vma; |
1764 | 0 | } |
1765 | | |
1766 | | /* Return the base VMA address which should be subtracted from real addresses |
1767 | | when resolving R_KVX_*_TLS_IE_* and R_KVX_*_TLS_LE_* relocations. */ |
1768 | | |
1769 | | static bfd_vma |
1770 | | tpoff_base (struct bfd_link_info *info) |
1771 | 0 | { |
1772 | 0 | struct elf_link_hash_table *htab = elf_hash_table (info); |
1773 | | |
1774 | | /* If tls_sec is NULL, we should have signalled an error already. */ |
1775 | 0 | BFD_ASSERT (htab->tls_sec != NULL); |
1776 | |
|
1777 | 0 | bfd_vma base = align_power ((bfd_vma) 0, |
1778 | 0 | htab->tls_sec->alignment_power); |
1779 | 0 | return htab->tls_sec->vma - base; |
1780 | 0 | } |
1781 | | |
1782 | | static bfd_vma * |
1783 | | symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h, |
1784 | | unsigned long r_symndx) |
1785 | 0 | { |
1786 | | /* Calculate the address of the GOT entry for symbol |
1787 | | referred to in h. */ |
1788 | 0 | if (h != NULL) |
1789 | 0 | return &h->got.offset; |
1790 | 0 | else |
1791 | 0 | { |
1792 | | /* local symbol */ |
1793 | 0 | struct elf_kvx_local_symbol *l; |
1794 | |
|
1795 | 0 | l = elf_kvx_locals (input_bfd); |
1796 | 0 | return &l[r_symndx].got_offset; |
1797 | 0 | } |
1798 | 0 | } |
1799 | | |
1800 | | static void |
1801 | | symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h, |
1802 | | unsigned long r_symndx) |
1803 | 0 | { |
1804 | 0 | bfd_vma *p; |
1805 | 0 | p = symbol_got_offset_ref (input_bfd, h, r_symndx); |
1806 | 0 | *p |= 1; |
1807 | 0 | } |
1808 | | |
1809 | | static int |
1810 | | symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h, |
1811 | | unsigned long r_symndx) |
1812 | 0 | { |
1813 | 0 | bfd_vma value; |
1814 | 0 | value = * symbol_got_offset_ref (input_bfd, h, r_symndx); |
1815 | 0 | return value & 1; |
1816 | 0 | } |
1817 | | |
1818 | | static bfd_vma |
1819 | | symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h, |
1820 | | unsigned long r_symndx) |
1821 | 0 | { |
1822 | 0 | bfd_vma value; |
1823 | 0 | value = * symbol_got_offset_ref (input_bfd, h, r_symndx); |
1824 | 0 | value &= ~1; |
1825 | 0 | return value; |
1826 | 0 | } |
1827 | | |
1828 | | /* N_ONES produces N one bits, without overflowing machine arithmetic. */ |
1829 | 0 | #define N_ONES(n) (((((bfd_vma) 1 << ((n) -1)) - 1) << 1) | 1) |
1830 | | |
1831 | | /* This is a copy/paste + modification from |
1832 | | reloc.c:_bfd_relocate_contents. Relocations are applied to 32bits |
1833 | | words, so all overflow checks will overflow for values above |
1834 | | 32bits. */ |
1835 | | static bfd_reloc_status_type |
1836 | | check_signed_overflow (enum complain_overflow complain_on_overflow, |
1837 | | bfd_reloc_code_real_type bfd_r_type, bfd *input_bfd, |
1838 | | bfd_vma relocation) |
1839 | 0 | { |
1840 | 0 | bfd_reloc_status_type flag = bfd_reloc_ok; |
1841 | 0 | bfd_vma addrmask, fieldmask, signmask, ss; |
1842 | 0 | bfd_vma a, b, sum; |
1843 | 0 | bfd_vma x = 0; |
1844 | | |
1845 | | /* These usually come from howto struct. As we don't check for |
1846 | | values fitting in bitfields or in subpart of words, we set all |
1847 | | these to values to check as if the field is starting from first |
1848 | | bit. */ |
1849 | 0 | unsigned int rightshift = 0; |
1850 | 0 | unsigned int bitpos = 0; |
1851 | 0 | unsigned int bitsize = 0; |
1852 | 0 | bfd_vma src_mask = -1; |
1853 | | |
1854 | | /* Only regular symbol relocations are checked here. Others |
1855 | | relocations (GOT, TLS) could be checked if the need is |
1856 | | confirmed. At the moment, we keep previous behavior |
1857 | | (ie. unchecked) for those. */ |
1858 | 0 | switch (bfd_r_type) |
1859 | 0 | { |
1860 | 0 | case BFD_RELOC_KVX_S37_LO10: |
1861 | 0 | case BFD_RELOC_KVX_S37_UP27: |
1862 | 0 | bitsize = 37; |
1863 | 0 | break; |
1864 | | |
1865 | 0 | case BFD_RELOC_KVX_S32_LO5: |
1866 | 0 | case BFD_RELOC_KVX_S32_UP27: |
1867 | 0 | bitsize = 32; |
1868 | 0 | break; |
1869 | | |
1870 | 0 | case BFD_RELOC_KVX_S43_LO10: |
1871 | 0 | case BFD_RELOC_KVX_S43_UP27: |
1872 | 0 | case BFD_RELOC_KVX_S43_EX6: |
1873 | 0 | bitsize = 43; |
1874 | 0 | break; |
1875 | | |
1876 | 0 | case BFD_RELOC_KVX_S64_LO10: |
1877 | 0 | case BFD_RELOC_KVX_S64_UP27: |
1878 | 0 | case BFD_RELOC_KVX_S64_EX27: |
1879 | 0 | bitsize = 64; |
1880 | 0 | break; |
1881 | | |
1882 | 0 | default: |
1883 | 0 | return bfd_reloc_ok; |
1884 | 0 | } |
1885 | | |
1886 | | /* direct copy/paste from reloc.c below */ |
1887 | | |
1888 | | /* Get the values to be added together. For signed and unsigned |
1889 | | relocations, we assume that all values should be truncated to |
1890 | | the size of an address. For bitfields, all the bits matter. |
1891 | | See also bfd_check_overflow. */ |
1892 | 0 | fieldmask = N_ONES (bitsize); |
1893 | 0 | signmask = ~fieldmask; |
1894 | 0 | addrmask = (N_ONES (bfd_arch_bits_per_address (input_bfd)) |
1895 | 0 | | (fieldmask << rightshift)); |
1896 | 0 | a = (relocation & addrmask) >> rightshift; |
1897 | 0 | b = (x & src_mask & addrmask) >> bitpos; |
1898 | 0 | addrmask >>= rightshift; |
1899 | |
|
1900 | 0 | switch (complain_on_overflow) |
1901 | 0 | { |
1902 | 0 | case complain_overflow_signed: |
1903 | | /* If any sign bits are set, all sign bits must be set. |
1904 | | That is, A must be a valid negative address after |
1905 | | shifting. */ |
1906 | 0 | signmask = ~(fieldmask >> 1); |
1907 | | /* Fall thru */ |
1908 | |
|
1909 | 0 | case complain_overflow_bitfield: |
1910 | | /* Much like the signed check, but for a field one bit |
1911 | | wider. We allow a bitfield to represent numbers in the |
1912 | | range -2**n to 2**n-1, where n is the number of bits in the |
1913 | | field. Note that when bfd_vma is 32 bits, a 32-bit reloc |
1914 | | can't overflow, which is exactly what we want. */ |
1915 | 0 | ss = a & signmask; |
1916 | 0 | if (ss != 0 && ss != (addrmask & signmask)) |
1917 | 0 | flag = bfd_reloc_overflow; |
1918 | | |
1919 | | /* We only need this next bit of code if the sign bit of B |
1920 | | is below the sign bit of A. This would only happen if |
1921 | | SRC_MASK had fewer bits than BITSIZE. Note that if |
1922 | | SRC_MASK has more bits than BITSIZE, we can get into |
1923 | | trouble; we would need to verify that B is in range, as |
1924 | | we do for A above. */ |
1925 | 0 | ss = ((~src_mask) >> 1) & src_mask; |
1926 | 0 | ss >>= bitpos; |
1927 | | |
1928 | | /* Set all the bits above the sign bit. */ |
1929 | 0 | b = (b ^ ss) - ss; |
1930 | | |
1931 | | /* Now we can do the addition. */ |
1932 | 0 | sum = a + b; |
1933 | | |
1934 | | /* See if the result has the correct sign. Bits above the |
1935 | | sign bit are junk now; ignore them. If the sum is |
1936 | | positive, make sure we did not have all negative inputs; |
1937 | | if the sum is negative, make sure we did not have all |
1938 | | positive inputs. The test below looks only at the sign |
1939 | | bits, and it really just |
1940 | | SIGN (A) == SIGN (B) && SIGN (A) != SIGN (SUM) |
1941 | | |
1942 | | We mask with addrmask here to explicitly allow an address |
1943 | | wrap-around. The Linux kernel relies on it, and it is |
1944 | | the only way to write assembler code which can run when |
1945 | | loaded at a location 0x80000000 away from the location at |
1946 | | which it is linked. */ |
1947 | 0 | if (((~(a ^ b)) & (a ^ sum)) & signmask & addrmask) |
1948 | 0 | flag = bfd_reloc_overflow; |
1949 | 0 | break; |
1950 | | |
1951 | 0 | case complain_overflow_unsigned: |
1952 | | /* Checking for an unsigned overflow is relatively easy: |
1953 | | trim the addresses and add, and trim the result as well. |
1954 | | Overflow is normally indicated when the result does not |
1955 | | fit in the field. However, we also need to consider the |
1956 | | case when, e.g., fieldmask is 0x7fffffff or smaller, an |
1957 | | input is 0x80000000, and bfd_vma is only 32 bits; then we |
1958 | | will get sum == 0, but there is an overflow, since the |
1959 | | inputs did not fit in the field. Instead of doing a |
1960 | | separate test, we can check for this by or-ing in the |
1961 | | operands when testing for the sum overflowing its final |
1962 | | field. */ |
1963 | 0 | sum = (a + b) & addrmask; |
1964 | 0 | if ((a | b | sum) & signmask) |
1965 | 0 | flag = bfd_reloc_overflow; |
1966 | 0 | break; |
1967 | | |
1968 | 0 | default: |
1969 | 0 | abort (); |
1970 | 0 | } |
1971 | 0 | return flag; |
1972 | 0 | } |
1973 | | |
1974 | | /* Perform a relocation as part of a final link. */ |
1975 | | static bfd_reloc_status_type |
1976 | | elf64_kvx_final_link_relocate (reloc_howto_type *howto, |
1977 | | bfd *input_bfd, |
1978 | | bfd *output_bfd, |
1979 | | asection *input_section, |
1980 | | bfd_byte *contents, |
1981 | | Elf_Internal_Rela *rel, |
1982 | | bfd_vma value, |
1983 | | struct bfd_link_info *info, |
1984 | | asection *sym_sec, |
1985 | | struct elf_link_hash_entry *h, |
1986 | | bool *unresolved_reloc_p, |
1987 | | bool save_addend, |
1988 | | bfd_vma *saved_addend, |
1989 | | Elf_Internal_Sym *sym) |
1990 | 0 | { |
1991 | 0 | Elf_Internal_Shdr *symtab_hdr; |
1992 | 0 | unsigned int r_type = howto->type; |
1993 | 0 | bfd_reloc_code_real_type bfd_r_type |
1994 | 0 | = elf64_kvx_bfd_reloc_from_howto (howto); |
1995 | 0 | bfd_reloc_code_real_type new_bfd_r_type; |
1996 | 0 | unsigned long r_symndx; |
1997 | 0 | bfd_byte *hit_data = contents + rel->r_offset; |
1998 | 0 | bfd_vma place, off; |
1999 | 0 | bfd_vma addend; |
2000 | 0 | struct elf_kvx_link_hash_table *globals; |
2001 | 0 | bool weak_undef_p; |
2002 | 0 | asection *base_got; |
2003 | 0 | bfd_reloc_status_type rret = bfd_reloc_ok; |
2004 | 0 | bool resolved_to_zero; |
2005 | 0 | globals = elf_kvx_hash_table (info); |
2006 | |
|
2007 | 0 | symtab_hdr = &elf_symtab_hdr (input_bfd); |
2008 | |
|
2009 | 0 | BFD_ASSERT (is_kvx_elf (input_bfd)); |
2010 | |
|
2011 | 0 | r_symndx = ELF64_R_SYM (rel->r_info); |
2012 | | |
2013 | | /* It is possible to have linker relaxations on some TLS access |
2014 | | models. Update our information here. */ |
2015 | 0 | new_bfd_r_type = kvx_tls_transition (input_bfd, info, r_type, h, r_symndx); |
2016 | 0 | if (new_bfd_r_type != bfd_r_type) |
2017 | 0 | { |
2018 | 0 | bfd_r_type = new_bfd_r_type; |
2019 | 0 | howto = elf64_kvx_howto_from_bfd_reloc (bfd_r_type); |
2020 | 0 | BFD_ASSERT (howto != NULL); |
2021 | 0 | r_type = howto->type; |
2022 | 0 | } |
2023 | |
|
2024 | 0 | place = input_section->output_section->vma |
2025 | 0 | + input_section->output_offset + rel->r_offset; |
2026 | | |
2027 | | /* Get addend, accumulating the addend for consecutive relocs |
2028 | | which refer to the same offset. */ |
2029 | 0 | addend = saved_addend ? *saved_addend : 0; |
2030 | 0 | addend += rel->r_addend; |
2031 | |
|
2032 | 0 | weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak |
2033 | 0 | : bfd_is_und_section (sym_sec)); |
2034 | 0 | resolved_to_zero = (h != NULL |
2035 | 0 | && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h)); |
2036 | |
|
2037 | 0 | switch (bfd_r_type) |
2038 | 0 | { |
2039 | 0 | case BFD_RELOC_KVX_64: |
2040 | 0 | #if ARCH_SIZE == 64 |
2041 | 0 | case BFD_RELOC_KVX_32: |
2042 | 0 | #endif |
2043 | 0 | case BFD_RELOC_KVX_S37_LO10: |
2044 | 0 | case BFD_RELOC_KVX_S37_UP27: |
2045 | |
|
2046 | 0 | case BFD_RELOC_KVX_S32_LO5: |
2047 | 0 | case BFD_RELOC_KVX_S32_UP27: |
2048 | |
|
2049 | 0 | case BFD_RELOC_KVX_S43_LO10: |
2050 | 0 | case BFD_RELOC_KVX_S43_UP27: |
2051 | 0 | case BFD_RELOC_KVX_S43_EX6: |
2052 | |
|
2053 | 0 | case BFD_RELOC_KVX_S64_LO10: |
2054 | 0 | case BFD_RELOC_KVX_S64_UP27: |
2055 | 0 | case BFD_RELOC_KVX_S64_EX27: |
2056 | | /* When generating a shared library or PIE, these relocations |
2057 | | are copied into the output file to be resolved at run time. */ |
2058 | 0 | if (bfd_link_pic (info) |
2059 | 0 | && (input_section->flags & SEC_ALLOC) |
2060 | 0 | && (h == NULL |
2061 | 0 | || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT |
2062 | 0 | && !resolved_to_zero) |
2063 | 0 | || h->root.type != bfd_link_hash_undefweak)) |
2064 | 0 | { |
2065 | 0 | Elf_Internal_Rela outrel; |
2066 | 0 | bfd_byte *loc; |
2067 | 0 | bool skip, relocate; |
2068 | 0 | asection *sreloc; |
2069 | |
|
2070 | 0 | *unresolved_reloc_p = false; |
2071 | |
|
2072 | 0 | skip = false; |
2073 | 0 | relocate = false; |
2074 | |
|
2075 | 0 | outrel.r_addend = addend; |
2076 | 0 | outrel.r_offset = |
2077 | 0 | _bfd_elf_section_offset (output_bfd, info, input_section, |
2078 | 0 | rel->r_offset); |
2079 | 0 | if (outrel.r_offset == (bfd_vma) - 1) |
2080 | 0 | skip = true; |
2081 | 0 | else if (outrel.r_offset == (bfd_vma) - 2) |
2082 | 0 | { |
2083 | 0 | skip = true; |
2084 | 0 | relocate = true; |
2085 | 0 | } |
2086 | |
|
2087 | 0 | outrel.r_offset += (input_section->output_section->vma |
2088 | 0 | + input_section->output_offset); |
2089 | |
|
2090 | 0 | if (skip) |
2091 | 0 | memset (&outrel, 0, sizeof outrel); |
2092 | 0 | else if (h != NULL |
2093 | 0 | && h->dynindx != -1 |
2094 | 0 | && (!bfd_link_pic (info) || !info->symbolic |
2095 | 0 | || !h->def_regular)) |
2096 | 0 | outrel.r_info = ELF64_R_INFO (h->dynindx, r_type); |
2097 | 0 | else if (bfd_r_type == BFD_RELOC_KVX_32 |
2098 | 0 | || bfd_r_type == BFD_RELOC_KVX_64) |
2099 | 0 | { |
2100 | 0 | int symbol; |
2101 | | |
2102 | | /* On SVR4-ish systems, the dynamic loader cannot |
2103 | | relocate the text and data segments independently, |
2104 | | so the symbol does not matter. */ |
2105 | 0 | symbol = 0; |
2106 | 0 | outrel.r_info = ELF64_R_INFO (symbol, R_KVX_RELATIVE); |
2107 | 0 | outrel.r_addend += value; |
2108 | 0 | } |
2109 | 0 | else if (bfd_link_pic (info) && info->symbolic) |
2110 | 0 | { |
2111 | 0 | goto skip_because_pic; |
2112 | 0 | } |
2113 | 0 | else |
2114 | 0 | { |
2115 | | /* We may endup here from bad input code trying to |
2116 | | insert relocation on symbols within code. We do not |
2117 | | want that currently, and such code should use GOT + |
2118 | | KVX_32/64 reloc that translate in KVX_RELATIVE. */ |
2119 | 0 | const char *name; |
2120 | 0 | if (h && h->root.root.string) |
2121 | 0 | name = h->root.root.string; |
2122 | 0 | else |
2123 | 0 | name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, |
2124 | 0 | NULL); |
2125 | |
|
2126 | 0 | (*_bfd_error_handler) |
2127 | | /* xgettext:c-format */ |
2128 | 0 | (_("%pB(%pA+%#" PRIx64 "): " |
2129 | 0 | "unresolvable %s relocation in section `%s'"), |
2130 | 0 | input_bfd, input_section, (uint64_t) rel->r_offset, howto->name, |
2131 | 0 | name); |
2132 | 0 | return bfd_reloc_notsupported; |
2133 | 0 | } |
2134 | | |
2135 | 0 | sreloc = elf_section_data (input_section)->sreloc; |
2136 | 0 | if (sreloc == NULL || sreloc->contents == NULL) |
2137 | 0 | return bfd_reloc_notsupported; |
2138 | | |
2139 | 0 | loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals); |
2140 | 0 | bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc); |
2141 | |
|
2142 | 0 | if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size) |
2143 | 0 | { |
2144 | | /* Sanity to check that we have previously allocated |
2145 | | sufficient space in the relocation section for the |
2146 | | number of relocations we actually want to emit. */ |
2147 | 0 | abort (); |
2148 | 0 | } |
2149 | | |
2150 | | /* If this reloc is against an external symbol, we do not want to |
2151 | | fiddle with the addend. Otherwise, we need to include the symbol |
2152 | | value so that it becomes an addend for the dynamic reloc. */ |
2153 | 0 | if (!relocate) |
2154 | 0 | return bfd_reloc_ok; |
2155 | | |
2156 | 0 | rret = check_signed_overflow (complain_overflow_signed, bfd_r_type, |
2157 | 0 | input_bfd, value + addend); |
2158 | 0 | if (rret != bfd_reloc_ok) |
2159 | 0 | return rret; |
2160 | | |
2161 | 0 | return _bfd_final_link_relocate (howto, input_bfd, input_section, |
2162 | 0 | contents, rel->r_offset, value, |
2163 | 0 | addend); |
2164 | 0 | } |
2165 | | |
2166 | 0 | skip_because_pic: |
2167 | 0 | rret = check_signed_overflow (complain_overflow_signed, bfd_r_type, |
2168 | 0 | input_bfd, value + addend); |
2169 | 0 | if (rret != bfd_reloc_ok) |
2170 | 0 | return rret; |
2171 | | |
2172 | 0 | return _bfd_final_link_relocate (howto, input_bfd, input_section, |
2173 | 0 | contents, rel->r_offset, value, |
2174 | 0 | addend); |
2175 | 0 | break; |
2176 | | |
2177 | 0 | case BFD_RELOC_KVX_PCREL17: |
2178 | 0 | case BFD_RELOC_KVX_PCREL27: |
2179 | 0 | { |
2180 | | /* BCU insn are always first in a bundle, so there is no need |
2181 | | to correct the address using offset within bundle. */ |
2182 | |
|
2183 | 0 | asection *splt = globals->root.splt; |
2184 | 0 | bool via_plt_p = |
2185 | 0 | splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1; |
2186 | | |
2187 | | /* A call to an undefined weak symbol is converted to a jump to |
2188 | | the next instruction unless a PLT entry will be created. |
2189 | | The jump to the next instruction is optimized as a NOP. |
2190 | | Do the same for local undefined symbols. */ |
2191 | 0 | if (weak_undef_p && ! via_plt_p) |
2192 | 0 | { |
2193 | 0 | bfd_putl32 (INSN_NOP, hit_data); |
2194 | 0 | return bfd_reloc_ok; |
2195 | 0 | } |
2196 | | |
2197 | | /* If the call goes through a PLT entry, make sure to |
2198 | | check distance to the right destination address. */ |
2199 | 0 | if (via_plt_p) |
2200 | 0 | value = (splt->output_section->vma |
2201 | 0 | + splt->output_offset + h->plt.offset); |
2202 | | |
2203 | | /* Check if a stub has to be inserted because the destination |
2204 | | is too far away. */ |
2205 | 0 | struct elf_kvx_stub_hash_entry *stub_entry = NULL; |
2206 | | |
2207 | | /* If the target symbol is global and marked as a function the |
2208 | | relocation applies a function call or a tail call. In this |
2209 | | situation we can veneer out of range branches. The veneers |
2210 | | use R16 and R17 hence cannot be used arbitrary out of range |
2211 | | branches that occur within the body of a function. */ |
2212 | | |
2213 | | /* Check if a stub has to be inserted because the destination |
2214 | | is too far away. */ |
2215 | 0 | if (! kvx_valid_call_p (value, place)) |
2216 | 0 | { |
2217 | | /* The target is out of reach, so redirect the branch to |
2218 | | the local stub for this function. */ |
2219 | 0 | stub_entry = elf64_kvx_get_stub_entry (input_section, |
2220 | 0 | sym_sec, h, |
2221 | 0 | rel, globals); |
2222 | 0 | if (stub_entry != NULL) |
2223 | 0 | value = (stub_entry->stub_offset |
2224 | 0 | + stub_entry->stub_sec->output_offset |
2225 | 0 | + stub_entry->stub_sec->output_section->vma); |
2226 | | /* We have redirected the destination to stub entry address, |
2227 | | so ignore any addend record in the original rela entry. */ |
2228 | 0 | addend = 0; |
2229 | 0 | } |
2230 | 0 | } |
2231 | 0 | *unresolved_reloc_p = false; |
2232 | | |
2233 | | /* FALLTHROUGH */ |
2234 | | |
2235 | | /* PCREL 32 are used in dwarf2 table for exception handling */ |
2236 | 0 | case BFD_RELOC_KVX_32_PCREL: |
2237 | 0 | case BFD_RELOC_KVX_S64_PCREL_LO10: |
2238 | 0 | case BFD_RELOC_KVX_S64_PCREL_UP27: |
2239 | 0 | case BFD_RELOC_KVX_S64_PCREL_EX27: |
2240 | 0 | case BFD_RELOC_KVX_S37_PCREL_LO10: |
2241 | 0 | case BFD_RELOC_KVX_S37_PCREL_UP27: |
2242 | 0 | case BFD_RELOC_KVX_S43_PCREL_LO10: |
2243 | 0 | case BFD_RELOC_KVX_S43_PCREL_UP27: |
2244 | 0 | case BFD_RELOC_KVX_S43_PCREL_EX6: |
2245 | 0 | return _bfd_final_link_relocate (howto, input_bfd, input_section, |
2246 | 0 | contents, rel->r_offset, value, |
2247 | 0 | addend); |
2248 | 0 | break; |
2249 | | |
2250 | 0 | case BFD_RELOC_KVX_S37_TLS_LE_LO10: |
2251 | 0 | case BFD_RELOC_KVX_S37_TLS_LE_UP27: |
2252 | |
|
2253 | 0 | case BFD_RELOC_KVX_S43_TLS_LE_LO10: |
2254 | 0 | case BFD_RELOC_KVX_S43_TLS_LE_UP27: |
2255 | 0 | case BFD_RELOC_KVX_S43_TLS_LE_EX6: |
2256 | 0 | return _bfd_final_link_relocate (howto, input_bfd, input_section, |
2257 | 0 | contents, rel->r_offset, |
2258 | 0 | value - tpoff_base (info), addend); |
2259 | 0 | break; |
2260 | | |
2261 | 0 | case BFD_RELOC_KVX_S37_TLS_DTPOFF_LO10: |
2262 | 0 | case BFD_RELOC_KVX_S37_TLS_DTPOFF_UP27: |
2263 | |
|
2264 | 0 | case BFD_RELOC_KVX_S43_TLS_DTPOFF_LO10: |
2265 | 0 | case BFD_RELOC_KVX_S43_TLS_DTPOFF_UP27: |
2266 | 0 | case BFD_RELOC_KVX_S43_TLS_DTPOFF_EX6: |
2267 | 0 | return _bfd_final_link_relocate (howto, input_bfd, input_section, |
2268 | 0 | contents, rel->r_offset, |
2269 | 0 | value - dtpoff_base (info), addend); |
2270 | | |
2271 | 0 | case BFD_RELOC_KVX_S37_TLS_GD_UP27: |
2272 | 0 | case BFD_RELOC_KVX_S37_TLS_GD_LO10: |
2273 | |
|
2274 | 0 | case BFD_RELOC_KVX_S43_TLS_GD_UP27: |
2275 | 0 | case BFD_RELOC_KVX_S43_TLS_GD_EX6: |
2276 | 0 | case BFD_RELOC_KVX_S43_TLS_GD_LO10: |
2277 | |
|
2278 | 0 | case BFD_RELOC_KVX_S37_TLS_IE_UP27: |
2279 | 0 | case BFD_RELOC_KVX_S37_TLS_IE_LO10: |
2280 | |
|
2281 | 0 | case BFD_RELOC_KVX_S43_TLS_IE_UP27: |
2282 | 0 | case BFD_RELOC_KVX_S43_TLS_IE_EX6: |
2283 | 0 | case BFD_RELOC_KVX_S43_TLS_IE_LO10: |
2284 | |
|
2285 | 0 | case BFD_RELOC_KVX_S37_TLS_LD_UP27: |
2286 | 0 | case BFD_RELOC_KVX_S37_TLS_LD_LO10: |
2287 | |
|
2288 | 0 | case BFD_RELOC_KVX_S43_TLS_LD_UP27: |
2289 | 0 | case BFD_RELOC_KVX_S43_TLS_LD_EX6: |
2290 | 0 | case BFD_RELOC_KVX_S43_TLS_LD_LO10: |
2291 | |
|
2292 | 0 | if (globals->root.sgot == NULL) |
2293 | 0 | return bfd_reloc_notsupported; |
2294 | 0 | value = symbol_got_offset (input_bfd, h, r_symndx); |
2295 | |
|
2296 | 0 | _bfd_final_link_relocate (howto, input_bfd, input_section, |
2297 | 0 | contents, rel->r_offset, value, addend); |
2298 | 0 | *unresolved_reloc_p = false; |
2299 | 0 | break; |
2300 | | |
2301 | 0 | case BFD_RELOC_KVX_S37_GOTADDR_UP27: |
2302 | 0 | case BFD_RELOC_KVX_S37_GOTADDR_LO10: |
2303 | |
|
2304 | 0 | case BFD_RELOC_KVX_S43_GOTADDR_UP27: |
2305 | 0 | case BFD_RELOC_KVX_S43_GOTADDR_EX6: |
2306 | 0 | case BFD_RELOC_KVX_S43_GOTADDR_LO10: |
2307 | |
|
2308 | 0 | case BFD_RELOC_KVX_S64_GOTADDR_UP27: |
2309 | 0 | case BFD_RELOC_KVX_S64_GOTADDR_EX27: |
2310 | 0 | case BFD_RELOC_KVX_S64_GOTADDR_LO10: |
2311 | 0 | { |
2312 | 0 | if (globals->root.sgot == NULL) |
2313 | 0 | BFD_ASSERT (h != NULL); |
2314 | |
|
2315 | 0 | value = globals->root.sgot->output_section->vma |
2316 | 0 | + globals->root.sgot->output_offset; |
2317 | |
|
2318 | 0 | return _bfd_final_link_relocate (howto, input_bfd, input_section, |
2319 | 0 | contents, rel->r_offset, value, |
2320 | 0 | addend); |
2321 | 0 | } |
2322 | 0 | break; |
2323 | | |
2324 | 0 | case BFD_RELOC_KVX_S37_GOTOFF_LO10: |
2325 | 0 | case BFD_RELOC_KVX_S37_GOTOFF_UP27: |
2326 | |
|
2327 | 0 | case BFD_RELOC_KVX_32_GOTOFF: |
2328 | 0 | case BFD_RELOC_KVX_64_GOTOFF: |
2329 | |
|
2330 | 0 | case BFD_RELOC_KVX_S43_GOTOFF_LO10: |
2331 | 0 | case BFD_RELOC_KVX_S43_GOTOFF_UP27: |
2332 | 0 | case BFD_RELOC_KVX_S43_GOTOFF_EX6: |
2333 | |
|
2334 | 0 | { |
2335 | 0 | asection *basegot = globals->root.sgot; |
2336 | | /* BFD_ASSERT(h == NULL); */ |
2337 | 0 | BFD_ASSERT(globals->root.sgot != NULL); |
2338 | 0 | value -= basegot->output_section->vma + basegot->output_offset; |
2339 | 0 | return _bfd_final_link_relocate (howto, input_bfd, input_section, |
2340 | 0 | contents, rel->r_offset, value, |
2341 | 0 | addend); |
2342 | 0 | } |
2343 | 0 | break; |
2344 | | |
2345 | 0 | case BFD_RELOC_KVX_S37_GOT_LO10: |
2346 | 0 | case BFD_RELOC_KVX_S37_GOT_UP27: |
2347 | |
|
2348 | 0 | case BFD_RELOC_KVX_32_GOT: |
2349 | 0 | case BFD_RELOC_KVX_64_GOT: |
2350 | |
|
2351 | 0 | case BFD_RELOC_KVX_S43_GOT_LO10: |
2352 | 0 | case BFD_RELOC_KVX_S43_GOT_UP27: |
2353 | 0 | case BFD_RELOC_KVX_S43_GOT_EX6: |
2354 | |
|
2355 | 0 | if (globals->root.sgot == NULL) |
2356 | 0 | BFD_ASSERT (h != NULL); |
2357 | |
|
2358 | 0 | if (h != NULL) |
2359 | 0 | { |
2360 | 0 | value = kvx_calculate_got_entry_vma (h, globals, info, value, |
2361 | 0 | output_bfd, |
2362 | 0 | unresolved_reloc_p); |
2363 | | #ifdef UGLY_DEBUG |
2364 | | printf("GOT_LO/HI for %s, value %x\n", h->root.root.string, value); |
2365 | | #endif |
2366 | |
|
2367 | 0 | return _bfd_final_link_relocate (howto, input_bfd, input_section, |
2368 | 0 | contents, rel->r_offset, value, |
2369 | 0 | addend); |
2370 | 0 | } |
2371 | 0 | else |
2372 | 0 | { |
2373 | | #ifdef UGLY_DEBUG |
2374 | | printf("GOT_LO/HI with h NULL, initial value %x\n", value); |
2375 | | #endif |
2376 | 0 | struct elf_kvx_local_symbol *locals = elf_kvx_locals (input_bfd); |
2377 | |
|
2378 | 0 | if (locals == NULL) |
2379 | 0 | { |
2380 | 0 | int howto_index = bfd_r_type - BFD_RELOC_KVX_RELOC_START; |
2381 | 0 | _bfd_error_handler |
2382 | | /* xgettext:c-format */ |
2383 | 0 | (_("%pB: local symbol descriptor table be NULL when applying " |
2384 | 0 | "relocation %s against local symbol"), |
2385 | 0 | input_bfd, elf_kvx_howto_table[howto_index].name); |
2386 | 0 | abort (); |
2387 | 0 | } |
2388 | | |
2389 | 0 | off = symbol_got_offset (input_bfd, h, r_symndx); |
2390 | 0 | base_got = globals->root.sgot; |
2391 | 0 | bfd_vma got_entry_addr = (base_got->output_section->vma |
2392 | 0 | + base_got->output_offset + off); |
2393 | |
|
2394 | 0 | if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx)) |
2395 | 0 | { |
2396 | 0 | bfd_put_64 (output_bfd, value, base_got->contents + off); |
2397 | |
|
2398 | 0 | if (bfd_link_pic (info)) |
2399 | 0 | { |
2400 | 0 | asection *s; |
2401 | 0 | Elf_Internal_Rela outrel; |
2402 | | |
2403 | | /* For PIC executables and shared libraries we need |
2404 | | to relocate the GOT entry at run time. */ |
2405 | 0 | s = globals->root.srelgot; |
2406 | 0 | if (s == NULL) |
2407 | 0 | abort (); |
2408 | | |
2409 | 0 | outrel.r_offset = got_entry_addr; |
2410 | 0 | outrel.r_info = ELF64_R_INFO (0, R_KVX_RELATIVE); |
2411 | 0 | outrel.r_addend = value; |
2412 | 0 | elf_append_rela (output_bfd, s, &outrel); |
2413 | 0 | } |
2414 | | |
2415 | 0 | symbol_got_offset_mark (input_bfd, h, r_symndx); |
2416 | 0 | } |
2417 | | |
2418 | | /* Update the relocation value to GOT entry addr as we have |
2419 | | transformed the direct data access into an indirect data |
2420 | | access through GOT. */ |
2421 | 0 | value = got_entry_addr; |
2422 | |
|
2423 | 0 | return _bfd_final_link_relocate (howto, input_bfd, input_section, |
2424 | 0 | contents, rel->r_offset, off, 0); |
2425 | 0 | } |
2426 | 0 | break; |
2427 | | |
2428 | 0 | default: |
2429 | 0 | return bfd_reloc_notsupported; |
2430 | 0 | } |
2431 | | |
2432 | 0 | if (saved_addend) |
2433 | 0 | *saved_addend = value; |
2434 | | |
2435 | | /* Only apply the final relocation in a sequence. */ |
2436 | 0 | if (save_addend) |
2437 | 0 | return bfd_reloc_continue; |
2438 | | |
2439 | 0 | return _bfd_kvx_elf_put_addend (input_bfd, hit_data, bfd_r_type, |
2440 | 0 | howto, value); |
2441 | 0 | } |
2442 | | |
2443 | | |
2444 | | |
2445 | | /* Relocate a KVX ELF section. */ |
2446 | | |
2447 | | static int |
2448 | | elf64_kvx_relocate_section (bfd *output_bfd, |
2449 | | struct bfd_link_info *info, |
2450 | | bfd *input_bfd, |
2451 | | asection *input_section, |
2452 | | bfd_byte *contents, |
2453 | | Elf_Internal_Rela *relocs, |
2454 | | Elf_Internal_Sym *local_syms, |
2455 | | asection **local_sections) |
2456 | 0 | { |
2457 | 0 | Elf_Internal_Shdr *symtab_hdr; |
2458 | 0 | struct elf_link_hash_entry **sym_hashes; |
2459 | 0 | Elf_Internal_Rela *rel; |
2460 | 0 | Elf_Internal_Rela *relend; |
2461 | 0 | const char *name; |
2462 | 0 | struct elf_kvx_link_hash_table *globals; |
2463 | 0 | bool save_addend = false; |
2464 | 0 | bfd_vma addend = 0; |
2465 | |
|
2466 | 0 | globals = elf_kvx_hash_table (info); |
2467 | |
|
2468 | 0 | symtab_hdr = &elf_symtab_hdr (input_bfd); |
2469 | 0 | sym_hashes = elf_sym_hashes (input_bfd); |
2470 | |
|
2471 | 0 | rel = relocs; |
2472 | 0 | relend = relocs + input_section->reloc_count; |
2473 | 0 | for (; rel < relend; rel++) |
2474 | 0 | { |
2475 | 0 | unsigned int r_type; |
2476 | 0 | bfd_reloc_code_real_type bfd_r_type; |
2477 | 0 | reloc_howto_type *howto; |
2478 | 0 | unsigned long r_symndx; |
2479 | 0 | Elf_Internal_Sym *sym; |
2480 | 0 | asection *sec; |
2481 | 0 | struct elf_link_hash_entry *h; |
2482 | 0 | bfd_vma relocation; |
2483 | 0 | bfd_reloc_status_type r; |
2484 | 0 | arelent bfd_reloc; |
2485 | 0 | char sym_type; |
2486 | 0 | bool unresolved_reloc = false; |
2487 | 0 | char *error_message = NULL; |
2488 | |
|
2489 | 0 | r_symndx = ELF64_R_SYM (rel->r_info); |
2490 | 0 | r_type = ELF64_R_TYPE (rel->r_info); |
2491 | |
|
2492 | 0 | bfd_reloc.howto = elf64_kvx_howto_from_type (input_bfd, r_type); |
2493 | 0 | howto = bfd_reloc.howto; |
2494 | |
|
2495 | 0 | if (howto == NULL) |
2496 | 0 | return _bfd_unrecognized_reloc (input_bfd, input_section, r_type); |
2497 | | |
2498 | 0 | bfd_r_type = elf64_kvx_bfd_reloc_from_howto (howto); |
2499 | |
|
2500 | 0 | h = NULL; |
2501 | 0 | sym = NULL; |
2502 | 0 | sec = NULL; |
2503 | |
|
2504 | 0 | if (r_symndx < symtab_hdr->sh_info) /* A local symbol. */ |
2505 | 0 | { |
2506 | 0 | sym = local_syms + r_symndx; |
2507 | 0 | sym_type = ELF64_ST_TYPE (sym->st_info); |
2508 | 0 | sec = local_sections[r_symndx]; |
2509 | | |
2510 | | /* An object file might have a reference to a local |
2511 | | undefined symbol. This is a draft object file, but we |
2512 | | should at least do something about it. */ |
2513 | 0 | if (r_type != R_KVX_NONE |
2514 | 0 | && r_type != R_KVX_S37_GOTADDR_LO10 |
2515 | 0 | && r_type != R_KVX_S37_GOTADDR_UP27 |
2516 | 0 | && r_type != R_KVX_S64_GOTADDR_LO10 |
2517 | 0 | && r_type != R_KVX_S64_GOTADDR_UP27 |
2518 | 0 | && r_type != R_KVX_S64_GOTADDR_EX27 |
2519 | 0 | && r_type != R_KVX_S43_GOTADDR_LO10 |
2520 | 0 | && r_type != R_KVX_S43_GOTADDR_UP27 |
2521 | 0 | && r_type != R_KVX_S43_GOTADDR_EX6 |
2522 | 0 | && bfd_is_und_section (sec) |
2523 | 0 | && ELF_ST_BIND (sym->st_info) != STB_WEAK) |
2524 | 0 | (*info->callbacks->undefined_symbol) |
2525 | 0 | (info, bfd_elf_string_from_elf_section |
2526 | 0 | (input_bfd, symtab_hdr->sh_link, sym->st_name), |
2527 | 0 | input_bfd, input_section, rel->r_offset, true); |
2528 | |
|
2529 | 0 | relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel); |
2530 | 0 | } |
2531 | 0 | else |
2532 | 0 | { |
2533 | 0 | bool warned, ignored; |
2534 | |
|
2535 | 0 | RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel, |
2536 | 0 | r_symndx, symtab_hdr, sym_hashes, |
2537 | 0 | h, sec, relocation, |
2538 | 0 | unresolved_reloc, warned, ignored); |
2539 | | |
2540 | 0 | sym_type = h->type; |
2541 | 0 | } |
2542 | | |
2543 | 0 | if (sec != NULL && discarded_section (sec)) |
2544 | 0 | RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section, |
2545 | 0 | rel, 1, relend, howto, 0, contents); |
2546 | |
|
2547 | 0 | if (bfd_link_relocatable (info)) |
2548 | 0 | continue; |
2549 | | |
2550 | 0 | if (h != NULL) |
2551 | 0 | name = h->root.root.string; |
2552 | 0 | else |
2553 | 0 | { |
2554 | 0 | name = (bfd_elf_string_from_elf_section |
2555 | 0 | (input_bfd, symtab_hdr->sh_link, sym->st_name)); |
2556 | 0 | if (name == NULL || *name == '\0') |
2557 | 0 | name = bfd_section_name (sec); |
2558 | 0 | } |
2559 | |
|
2560 | 0 | if (r_symndx != 0 |
2561 | 0 | && r_type != R_KVX_NONE |
2562 | 0 | && (h == NULL |
2563 | 0 | || h->root.type == bfd_link_hash_defined |
2564 | 0 | || h->root.type == bfd_link_hash_defweak) |
2565 | 0 | && IS_KVX_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS)) |
2566 | 0 | { |
2567 | 0 | (*_bfd_error_handler) |
2568 | 0 | ((sym_type == STT_TLS |
2569 | | /* xgettext:c-format */ |
2570 | 0 | ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s") |
2571 | | /* xgettext:c-format */ |
2572 | 0 | : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")), |
2573 | 0 | input_bfd, |
2574 | 0 | input_section, (uint64_t) rel->r_offset, howto->name, name); |
2575 | 0 | } |
2576 | | |
2577 | | /* Original aarch64 has relaxation handling for TLS here. */ |
2578 | 0 | r = bfd_reloc_continue; |
2579 | | |
2580 | | /* There may be multiple consecutive relocations for the |
2581 | | same offset. In that case we are supposed to treat the |
2582 | | output of each relocation as the addend for the next. */ |
2583 | 0 | if (rel + 1 < relend |
2584 | 0 | && rel->r_offset == rel[1].r_offset |
2585 | 0 | && ELF64_R_TYPE (rel[1].r_info) != R_KVX_NONE) |
2586 | | |
2587 | 0 | save_addend = true; |
2588 | 0 | else |
2589 | 0 | save_addend = false; |
2590 | |
|
2591 | 0 | if (r == bfd_reloc_continue) |
2592 | 0 | r = elf64_kvx_final_link_relocate (howto, input_bfd, output_bfd, |
2593 | 0 | input_section, contents, rel, |
2594 | 0 | relocation, info, sec, |
2595 | 0 | h, &unresolved_reloc, |
2596 | 0 | save_addend, &addend, sym); |
2597 | |
|
2598 | 0 | switch (elf64_kvx_bfd_reloc_from_type (input_bfd, r_type)) |
2599 | 0 | { |
2600 | 0 | case BFD_RELOC_KVX_S37_TLS_GD_LO10: |
2601 | 0 | case BFD_RELOC_KVX_S37_TLS_GD_UP27: |
2602 | |
|
2603 | 0 | case BFD_RELOC_KVX_S43_TLS_GD_LO10: |
2604 | 0 | case BFD_RELOC_KVX_S43_TLS_GD_UP27: |
2605 | 0 | case BFD_RELOC_KVX_S43_TLS_GD_EX6: |
2606 | |
|
2607 | 0 | case BFD_RELOC_KVX_S37_TLS_LD_LO10: |
2608 | 0 | case BFD_RELOC_KVX_S37_TLS_LD_UP27: |
2609 | |
|
2610 | 0 | case BFD_RELOC_KVX_S43_TLS_LD_LO10: |
2611 | 0 | case BFD_RELOC_KVX_S43_TLS_LD_UP27: |
2612 | 0 | case BFD_RELOC_KVX_S43_TLS_LD_EX6: |
2613 | |
|
2614 | 0 | if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx)) |
2615 | 0 | { |
2616 | 0 | bool need_relocs = false; |
2617 | 0 | bfd_byte *loc; |
2618 | 0 | int indx; |
2619 | 0 | bfd_vma off; |
2620 | |
|
2621 | 0 | off = symbol_got_offset (input_bfd, h, r_symndx); |
2622 | 0 | indx = h && h->dynindx != -1 ? h->dynindx : 0; |
2623 | |
|
2624 | 0 | need_relocs = |
2625 | 0 | (bfd_link_pic (info) || indx != 0) && |
2626 | 0 | (h == NULL |
2627 | 0 | || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT |
2628 | 0 | || h->root.type != bfd_link_hash_undefweak); |
2629 | |
|
2630 | 0 | BFD_ASSERT (globals->root.srelgot != NULL); |
2631 | |
|
2632 | 0 | if (need_relocs) |
2633 | 0 | { |
2634 | 0 | Elf_Internal_Rela rela; |
2635 | 0 | rela.r_info = ELF64_R_INFO (indx, R_KVX_64_DTPMOD); |
2636 | 0 | rela.r_addend = 0; |
2637 | 0 | rela.r_offset = globals->root.sgot->output_section->vma + |
2638 | 0 | globals->root.sgot->output_offset + off; |
2639 | |
|
2640 | 0 | loc = globals->root.srelgot->contents; |
2641 | 0 | loc += globals->root.srelgot->reloc_count++ |
2642 | 0 | * RELOC_SIZE (htab); |
2643 | 0 | bfd_elf64_swap_reloca_out (output_bfd, &rela, loc); |
2644 | |
|
2645 | 0 | bfd_reloc_code_real_type real_type = |
2646 | 0 | elf64_kvx_bfd_reloc_from_type (input_bfd, r_type); |
2647 | |
|
2648 | 0 | if (real_type == BFD_RELOC_KVX_S37_TLS_LD_LO10 |
2649 | 0 | || real_type == BFD_RELOC_KVX_S37_TLS_LD_UP27 |
2650 | 0 | || real_type == BFD_RELOC_KVX_S43_TLS_LD_LO10 |
2651 | 0 | || real_type == BFD_RELOC_KVX_S43_TLS_LD_UP27 |
2652 | 0 | || real_type == BFD_RELOC_KVX_S43_TLS_LD_EX6) |
2653 | 0 | { |
2654 | | /* For local dynamic, don't generate DTPOFF in any case. |
2655 | | Initialize the DTPOFF slot into zero, so we get module |
2656 | | base address when invoke runtime TLS resolver. */ |
2657 | 0 | bfd_put_64 (output_bfd, 0, |
2658 | 0 | globals->root.sgot->contents + off |
2659 | 0 | + GOT_ENTRY_SIZE); |
2660 | 0 | } |
2661 | 0 | else if (indx == 0) |
2662 | 0 | { |
2663 | 0 | bfd_put_64 (output_bfd, |
2664 | 0 | relocation - dtpoff_base (info), |
2665 | 0 | globals->root.sgot->contents + off |
2666 | 0 | + GOT_ENTRY_SIZE); |
2667 | 0 | } |
2668 | 0 | else |
2669 | 0 | { |
2670 | | /* This TLS symbol is global. We emit a |
2671 | | relocation to fixup the tls offset at load |
2672 | | time. */ |
2673 | 0 | rela.r_info = |
2674 | 0 | ELF64_R_INFO (indx, R_KVX_64_DTPOFF); |
2675 | 0 | rela.r_addend = 0; |
2676 | 0 | rela.r_offset = |
2677 | 0 | (globals->root.sgot->output_section->vma |
2678 | 0 | + globals->root.sgot->output_offset + off |
2679 | 0 | + GOT_ENTRY_SIZE); |
2680 | |
|
2681 | 0 | loc = globals->root.srelgot->contents; |
2682 | 0 | loc += globals->root.srelgot->reloc_count++ |
2683 | 0 | * RELOC_SIZE (globals); |
2684 | 0 | bfd_elf64_swap_reloca_out (output_bfd, &rela, loc); |
2685 | 0 | bfd_put_64 (output_bfd, (bfd_vma) 0, |
2686 | 0 | globals->root.sgot->contents + off |
2687 | 0 | + GOT_ENTRY_SIZE); |
2688 | 0 | } |
2689 | 0 | } |
2690 | 0 | else |
2691 | 0 | { |
2692 | 0 | bfd_put_64 (output_bfd, (bfd_vma) 1, |
2693 | 0 | globals->root.sgot->contents + off); |
2694 | 0 | bfd_put_64 (output_bfd, |
2695 | 0 | relocation - dtpoff_base (info), |
2696 | 0 | globals->root.sgot->contents + off |
2697 | 0 | + GOT_ENTRY_SIZE); |
2698 | 0 | } |
2699 | |
|
2700 | 0 | symbol_got_offset_mark (input_bfd, h, r_symndx); |
2701 | 0 | } |
2702 | 0 | break; |
2703 | | |
2704 | 0 | case BFD_RELOC_KVX_S37_TLS_IE_LO10: |
2705 | 0 | case BFD_RELOC_KVX_S37_TLS_IE_UP27: |
2706 | |
|
2707 | 0 | case BFD_RELOC_KVX_S43_TLS_IE_LO10: |
2708 | 0 | case BFD_RELOC_KVX_S43_TLS_IE_UP27: |
2709 | 0 | case BFD_RELOC_KVX_S43_TLS_IE_EX6: |
2710 | 0 | if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx)) |
2711 | 0 | { |
2712 | 0 | bool need_relocs = false; |
2713 | 0 | bfd_byte *loc; |
2714 | 0 | int indx; |
2715 | 0 | bfd_vma off; |
2716 | |
|
2717 | 0 | off = symbol_got_offset (input_bfd, h, r_symndx); |
2718 | |
|
2719 | 0 | indx = h && h->dynindx != -1 ? h->dynindx : 0; |
2720 | |
|
2721 | 0 | need_relocs = |
2722 | 0 | (bfd_link_pic (info) || indx != 0) && |
2723 | 0 | (h == NULL |
2724 | 0 | || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT |
2725 | 0 | || h->root.type != bfd_link_hash_undefweak); |
2726 | |
|
2727 | 0 | BFD_ASSERT (globals->root.srelgot != NULL); |
2728 | |
|
2729 | 0 | if (need_relocs) |
2730 | 0 | { |
2731 | 0 | Elf_Internal_Rela rela; |
2732 | |
|
2733 | 0 | if (indx == 0) |
2734 | 0 | rela.r_addend = relocation - dtpoff_base (info); |
2735 | 0 | else |
2736 | 0 | rela.r_addend = 0; |
2737 | |
|
2738 | 0 | rela.r_info = ELF64_R_INFO (indx, R_KVX_64_TPOFF); |
2739 | 0 | rela.r_offset = globals->root.sgot->output_section->vma + |
2740 | 0 | globals->root.sgot->output_offset + off; |
2741 | |
|
2742 | 0 | loc = globals->root.srelgot->contents; |
2743 | 0 | loc += globals->root.srelgot->reloc_count++ |
2744 | 0 | * RELOC_SIZE (htab); |
2745 | |
|
2746 | 0 | bfd_elf64_swap_reloca_out (output_bfd, &rela, loc); |
2747 | |
|
2748 | 0 | bfd_put_64 (output_bfd, rela.r_addend, |
2749 | 0 | globals->root.sgot->contents + off); |
2750 | 0 | } |
2751 | 0 | else |
2752 | 0 | bfd_put_64 (output_bfd, relocation - tpoff_base (info), |
2753 | 0 | globals->root.sgot->contents + off); |
2754 | |
|
2755 | 0 | symbol_got_offset_mark (input_bfd, h, r_symndx); |
2756 | 0 | } |
2757 | 0 | break; |
2758 | | |
2759 | 0 | default: |
2760 | 0 | break; |
2761 | 0 | } |
2762 | | |
2763 | | /* Dynamic relocs are not propagated for SEC_DEBUGGING sections |
2764 | | because such sections are not SEC_ALLOC and thus ld.so will |
2765 | | not process them. */ |
2766 | 0 | if (unresolved_reloc |
2767 | 0 | && !((input_section->flags & SEC_DEBUGGING) != 0 |
2768 | 0 | && h->def_dynamic) |
2769 | 0 | && _bfd_elf_section_offset (output_bfd, info, input_section, |
2770 | 0 | +rel->r_offset) != (bfd_vma) - 1) |
2771 | 0 | { |
2772 | 0 | (*_bfd_error_handler) |
2773 | | /* xgettext:c-format */ |
2774 | 0 | (_("%pB(%pA+%#" PRIx64 "): " |
2775 | 0 | "unresolvable %s relocation against symbol `%s'"), |
2776 | 0 | input_bfd, input_section, (uint64_t) rel->r_offset, howto->name, |
2777 | 0 | h->root.root.string); |
2778 | 0 | return false; |
2779 | 0 | } |
2780 | | |
2781 | 0 | if (r != bfd_reloc_ok && r != bfd_reloc_continue) |
2782 | 0 | { |
2783 | 0 | switch (r) |
2784 | 0 | { |
2785 | 0 | case bfd_reloc_overflow: |
2786 | 0 | (*info->callbacks->reloc_overflow) |
2787 | 0 | (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0, |
2788 | 0 | input_bfd, input_section, rel->r_offset); |
2789 | | |
2790 | | /* Original aarch64 code had a check for alignement correctness */ |
2791 | 0 | break; |
2792 | | |
2793 | 0 | case bfd_reloc_undefined: |
2794 | 0 | (*info->callbacks->undefined_symbol) |
2795 | 0 | (info, name, input_bfd, input_section, rel->r_offset, true); |
2796 | 0 | break; |
2797 | | |
2798 | 0 | case bfd_reloc_outofrange: |
2799 | 0 | error_message = _("out of range"); |
2800 | 0 | goto common_error; |
2801 | | |
2802 | 0 | case bfd_reloc_notsupported: |
2803 | 0 | error_message = _("unsupported relocation"); |
2804 | 0 | goto common_error; |
2805 | | |
2806 | 0 | case bfd_reloc_dangerous: |
2807 | | /* error_message should already be set. */ |
2808 | 0 | goto common_error; |
2809 | | |
2810 | 0 | default: |
2811 | 0 | error_message = _("unknown error"); |
2812 | | /* Fall through. */ |
2813 | |
|
2814 | 0 | common_error: |
2815 | 0 | BFD_ASSERT (error_message != NULL); |
2816 | 0 | (*info->callbacks->reloc_dangerous) |
2817 | 0 | (info, error_message, input_bfd, input_section, rel->r_offset); |
2818 | 0 | break; |
2819 | 0 | } |
2820 | 0 | } |
2821 | | |
2822 | 0 | if (!save_addend) |
2823 | 0 | addend = 0; |
2824 | 0 | } |
2825 | | |
2826 | 0 | return true; |
2827 | 0 | } |
2828 | | |
2829 | | /* Set the right machine number. */ |
2830 | | |
2831 | | static bool |
2832 | | elf64_kvx_object_p (bfd *abfd) |
2833 | 13.0k | { |
2834 | | /* must be coherent with default arch in cpu-kvx.c */ |
2835 | 13.0k | int e_set = bfd_mach_kv3_1; |
2836 | | |
2837 | 13.0k | if (elf_elfheader (abfd)->e_machine == EM_KVX) |
2838 | 13.0k | { |
2839 | 13.0k | int e_core = elf_elfheader (abfd)->e_flags & ELF_KVX_CORE_MASK; |
2840 | 13.0k | switch(e_core) |
2841 | 13.0k | { |
2842 | 0 | #if ARCH_SIZE == 64 |
2843 | 557 | case ELF_KVX_CORE_KV3_1 : e_set = bfd_mach_kv3_1_64; break; |
2844 | 204 | case ELF_KVX_CORE_KV3_2 : e_set = bfd_mach_kv3_2_64; break; |
2845 | 954 | case ELF_KVX_CORE_KV4_1 : e_set = bfd_mach_kv4_1_64; break; |
2846 | | #else |
2847 | | case ELF_KVX_CORE_KV3_1 : e_set = bfd_mach_kv3_1; break; |
2848 | | case ELF_KVX_CORE_KV3_2 : e_set = bfd_mach_kv3_2; break; |
2849 | | case ELF_KVX_CORE_KV4_1 : e_set = bfd_mach_kv4_1; break; |
2850 | | #endif |
2851 | 11.3k | default: |
2852 | 11.3k | (*_bfd_error_handler)(_("%s: Bad ELF id: `%d'"), |
2853 | 11.3k | abfd->filename, e_core); |
2854 | 13.0k | } |
2855 | 13.0k | } |
2856 | 13.0k | return bfd_default_set_arch_mach (abfd, bfd_arch_kvx, e_set); |
2857 | 13.0k | } |
2858 | | |
2859 | | /* Function to keep KVX specific flags in the ELF header. */ |
2860 | | |
2861 | | static bool |
2862 | | elf64_kvx_set_private_flags (bfd *abfd, flagword flags) |
2863 | 0 | { |
2864 | 0 | if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags) |
2865 | 0 | { |
2866 | 0 | } |
2867 | 0 | else |
2868 | 0 | { |
2869 | 0 | elf_elfheader (abfd)->e_flags = flags; |
2870 | 0 | elf_flags_init (abfd) = true; |
2871 | 0 | } |
2872 | |
|
2873 | 0 | return true; |
2874 | 0 | } |
2875 | | |
2876 | | /* Merge backend specific data from an object file to the output |
2877 | | object file when linking. */ |
2878 | | |
2879 | | static bool |
2880 | | elf64_kvx_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info) |
2881 | 0 | { |
2882 | 0 | bfd *obfd = info->output_bfd; |
2883 | 0 | flagword out_flags; |
2884 | 0 | flagword in_flags; |
2885 | 0 | bool flags_compatible = true; |
2886 | 0 | asection *sec; |
2887 | | |
2888 | | /* Check if we have the same endianess. */ |
2889 | 0 | if (!_bfd_generic_verify_endian_match (ibfd, info)) |
2890 | 0 | return false; |
2891 | | |
2892 | 0 | if (!is_kvx_elf (ibfd) || !is_kvx_elf (obfd)) |
2893 | 0 | return true; |
2894 | | |
2895 | | /* The input BFD must have had its flags initialised. */ |
2896 | | /* The following seems bogus to me -- The flags are initialized in |
2897 | | the assembler but I don't think an elf_flags_init field is |
2898 | | written into the object. */ |
2899 | | /* BFD_ASSERT (elf_flags_init (ibfd)); */ |
2900 | | |
2901 | 0 | if (bfd_get_arch_size (ibfd) != bfd_get_arch_size (obfd)) |
2902 | 0 | { |
2903 | 0 | const char *msg; |
2904 | |
|
2905 | 0 | if (bfd_get_arch_size (ibfd) == 32 |
2906 | 0 | && bfd_get_arch_size (obfd) == 64) |
2907 | 0 | msg = _("%s: compiled as 32-bit object and %s is 64-bit"); |
2908 | 0 | else if (bfd_get_arch_size (ibfd) == 64 |
2909 | 0 | && bfd_get_arch_size (obfd) == 32) |
2910 | 0 | msg = _("%s: compiled as 64-bit object and %s is 32-bit"); |
2911 | 0 | else |
2912 | 0 | msg = _("%s: object size does not match that of target %s"); |
2913 | |
|
2914 | 0 | (*_bfd_error_handler) (msg, bfd_get_filename (ibfd), |
2915 | 0 | bfd_get_filename (obfd)); |
2916 | 0 | bfd_set_error (bfd_error_wrong_format); |
2917 | 0 | return false; |
2918 | 0 | } |
2919 | | |
2920 | 0 | in_flags = elf_elfheader (ibfd)->e_flags; |
2921 | 0 | out_flags = elf_elfheader (obfd)->e_flags; |
2922 | |
|
2923 | 0 | if (!elf_flags_init (obfd)) |
2924 | 0 | { |
2925 | | /* If the input is the default architecture and had the default |
2926 | | flags then do not bother setting the flags for the output |
2927 | | architecture, instead allow future merges to do this. If no |
2928 | | future merges ever set these flags then they will retain their |
2929 | | uninitialised values, which surprise surprise, correspond |
2930 | | to the default values. */ |
2931 | 0 | if (bfd_get_arch_info (ibfd)->the_default |
2932 | 0 | && elf_elfheader (ibfd)->e_flags == 0) |
2933 | 0 | return true; |
2934 | | |
2935 | 0 | elf_flags_init (obfd) = true; |
2936 | 0 | elf_elfheader (obfd)->e_flags = in_flags; |
2937 | |
|
2938 | 0 | if (bfd_get_arch (obfd) == bfd_get_arch (ibfd) |
2939 | 0 | && bfd_get_arch_info (obfd)->the_default) |
2940 | 0 | return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), |
2941 | 0 | bfd_get_mach (ibfd)); |
2942 | | |
2943 | 0 | return true; |
2944 | 0 | } |
2945 | | |
2946 | | /* Identical flags must be compatible. */ |
2947 | 0 | if (in_flags == out_flags) |
2948 | 0 | return true; |
2949 | | |
2950 | | /* Check to see if the input BFD actually contains any sections. If |
2951 | | not, its flags may not have been initialised either, but it |
2952 | | cannot actually cause any incompatiblity. Do not short-circuit |
2953 | | dynamic objects; their section list may be emptied by |
2954 | | elf_link_add_object_symbols. |
2955 | | |
2956 | | Also check to see if there are no code sections in the input. |
2957 | | In this case there is no need to check for code specific flags. |
2958 | | XXX - do we need to worry about floating-point format compatability |
2959 | | in data sections ? */ |
2960 | 0 | if (!(ibfd->flags & DYNAMIC)) |
2961 | 0 | { |
2962 | 0 | bool null_input_bfd = true; |
2963 | 0 | bool only_data_sections = true; |
2964 | |
|
2965 | 0 | for (sec = ibfd->sections; sec != NULL; sec = sec->next) |
2966 | 0 | { |
2967 | 0 | if ((bfd_section_flags (sec) |
2968 | 0 | & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) |
2969 | 0 | == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) |
2970 | 0 | only_data_sections = false; |
2971 | |
|
2972 | 0 | null_input_bfd = false; |
2973 | 0 | break; |
2974 | 0 | } |
2975 | |
|
2976 | 0 | if (null_input_bfd || only_data_sections) |
2977 | 0 | return true; |
2978 | 0 | } |
2979 | 0 | return flags_compatible; |
2980 | 0 | } |
2981 | | |
2982 | | /* Display the flags field. */ |
2983 | | |
2984 | | static bool |
2985 | | elf64_kvx_print_private_bfd_data (bfd *abfd, void *ptr) |
2986 | 82 | { |
2987 | 82 | FILE *file = (FILE *) ptr; |
2988 | 82 | unsigned long flags; |
2989 | | |
2990 | 82 | BFD_ASSERT (abfd != NULL && ptr != NULL); |
2991 | | |
2992 | | /* Print normal ELF private data. */ |
2993 | 82 | _bfd_elf_print_private_bfd_data (abfd, ptr); |
2994 | | |
2995 | 82 | flags = elf_elfheader (abfd)->e_flags; |
2996 | | /* Ignore init flag - it may not be set, despite the flags field |
2997 | | containing valid data. */ |
2998 | | |
2999 | | /* xgettext:c-format */ |
3000 | 82 | fprintf (file, _("Private flags = 0x%lx : "), elf_elfheader (abfd)->e_flags); |
3001 | 82 | if((flags & ELF_KVX_ABI_64B_ADDR_BIT) == ELF_KVX_ABI_64B_ADDR_BIT) |
3002 | 13 | { |
3003 | 13 | if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_1)) |
3004 | 2 | fprintf (file, _("Coolidge (kv3) V1 64 bits")); |
3005 | 11 | else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_2)) |
3006 | 0 | fprintf (file, _("Coolidge (kv3) V2 64 bits")); |
3007 | 11 | else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV4_1)) |
3008 | 1 | fprintf (file, _("Coolidge (kv4) V1 64 bits")); |
3009 | 13 | } |
3010 | 69 | else |
3011 | 69 | { |
3012 | 69 | if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_1)) |
3013 | 39 | fprintf (file, _("Coolidge (kv3) V1 32 bits")); |
3014 | 30 | else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_2)) |
3015 | 0 | fprintf (file, _("Coolidge (kv3) V2 32 bits")); |
3016 | 30 | else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV4_1)) |
3017 | 1 | fprintf (file, _("Coolidge (kv4) V1 32 bits")); |
3018 | 69 | } |
3019 | | |
3020 | 82 | fputc ('\n', file); |
3021 | | |
3022 | 82 | return true; |
3023 | 82 | } |
3024 | | |
3025 | | /* Adjust a symbol defined by a dynamic object and referenced by a |
3026 | | regular object. The current definition is in some section of the |
3027 | | dynamic object, but we're not including those sections. We have to |
3028 | | change the definition to something the rest of the link can |
3029 | | understand. */ |
3030 | | |
3031 | | static bool |
3032 | | elf64_kvx_adjust_dynamic_symbol (struct bfd_link_info *info, |
3033 | | struct elf_link_hash_entry *h) |
3034 | 0 | { |
3035 | 0 | struct elf_kvx_link_hash_table *htab; |
3036 | 0 | asection *s; |
3037 | | |
3038 | | /* If this is a function, put it in the procedure linkage table. We |
3039 | | will fill in the contents of the procedure linkage table later, |
3040 | | when we know the address of the .got section. */ |
3041 | 0 | if (h->type == STT_FUNC || h->needs_plt) |
3042 | 0 | { |
3043 | 0 | if (h->plt.refcount <= 0 |
3044 | 0 | || ((SYMBOL_CALLS_LOCAL (info, h) |
3045 | 0 | || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT |
3046 | 0 | && h->root.type == bfd_link_hash_undefweak)))) |
3047 | 0 | { |
3048 | | /* This case can occur if we saw a CALL26 reloc in |
3049 | | an input file, but the symbol wasn't referred to |
3050 | | by a dynamic object or all references were |
3051 | | garbage collected. In which case we can end up |
3052 | | resolving. */ |
3053 | 0 | h->plt.offset = (bfd_vma) - 1; |
3054 | 0 | h->needs_plt = 0; |
3055 | 0 | } |
3056 | |
|
3057 | 0 | return true; |
3058 | 0 | } |
3059 | 0 | else |
3060 | | /* Otherwise, reset to -1. */ |
3061 | 0 | h->plt.offset = (bfd_vma) - 1; |
3062 | | |
3063 | | |
3064 | | /* If this is a weak symbol, and there is a real definition, the |
3065 | | processor independent code will have arranged for us to see the |
3066 | | real definition first, and we can just use the same value. */ |
3067 | 0 | if (h->is_weakalias) |
3068 | 0 | { |
3069 | 0 | struct elf_link_hash_entry *def = weakdef (h); |
3070 | 0 | BFD_ASSERT (def->root.type == bfd_link_hash_defined); |
3071 | 0 | h->root.u.def.section = def->root.u.def.section; |
3072 | 0 | h->root.u.def.value = def->root.u.def.value; |
3073 | 0 | if (ELIMINATE_COPY_RELOCS || info->nocopyreloc) |
3074 | 0 | h->non_got_ref = def->non_got_ref; |
3075 | 0 | return true; |
3076 | 0 | } |
3077 | | |
3078 | | /* If we are creating a shared library, we must presume that the |
3079 | | only references to the symbol are via the global offset table. |
3080 | | For such cases we need not do anything here; the relocations will |
3081 | | be handled correctly by relocate_section. */ |
3082 | 0 | if (bfd_link_pic (info)) |
3083 | 0 | return true; |
3084 | | |
3085 | | /* If there are no references to this symbol that do not use the |
3086 | | GOT, we don't need to generate a copy reloc. */ |
3087 | 0 | if (!h->non_got_ref) |
3088 | 0 | return true; |
3089 | | |
3090 | | /* If -z nocopyreloc was given, we won't generate them either. */ |
3091 | 0 | if (info->nocopyreloc) |
3092 | 0 | { |
3093 | 0 | h->non_got_ref = 0; |
3094 | 0 | return true; |
3095 | 0 | } |
3096 | | |
3097 | | /* We must allocate the symbol in our .dynbss section, which will |
3098 | | become part of the .bss section of the executable. There will be |
3099 | | an entry for this symbol in the .dynsym section. The dynamic |
3100 | | object will contain position independent code, so all references |
3101 | | from the dynamic object to this symbol will go through the global |
3102 | | offset table. The dynamic linker will use the .dynsym entry to |
3103 | | determine the address it must put in the global offset table, so |
3104 | | both the dynamic object and the regular object will refer to the |
3105 | | same memory location for the variable. */ |
3106 | | |
3107 | 0 | htab = elf_kvx_hash_table (info); |
3108 | | |
3109 | | /* We must generate a R_KVX_COPY reloc to tell the dynamic linker |
3110 | | to copy the initial value out of the dynamic object and into the |
3111 | | runtime process image. */ |
3112 | 0 | if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0) |
3113 | 0 | { |
3114 | 0 | htab->srelbss->size += RELOC_SIZE (htab); |
3115 | 0 | h->needs_copy = 1; |
3116 | 0 | } |
3117 | |
|
3118 | 0 | s = htab->sdynbss; |
3119 | |
|
3120 | 0 | return _bfd_elf_adjust_dynamic_copy (info, h, s); |
3121 | 0 | } |
3122 | | |
3123 | | static bool |
3124 | | elf64_kvx_allocate_local_symbols (bfd *abfd, unsigned number) |
3125 | 0 | { |
3126 | 0 | struct elf_kvx_local_symbol *locals; |
3127 | 0 | locals = elf_kvx_locals (abfd); |
3128 | 0 | if (locals == NULL) |
3129 | 0 | { |
3130 | 0 | locals = (struct elf_kvx_local_symbol *) |
3131 | 0 | bfd_zalloc (abfd, number * sizeof (struct elf_kvx_local_symbol)); |
3132 | 0 | if (locals == NULL) |
3133 | 0 | return false; |
3134 | 0 | elf_kvx_locals (abfd) = locals; |
3135 | 0 | } |
3136 | 0 | return true; |
3137 | 0 | } |
3138 | | |
3139 | | /* Create the .got section to hold the global offset table. */ |
3140 | | |
3141 | | static bool |
3142 | | kvx_elf_create_got_section (bfd *abfd, struct bfd_link_info *info) |
3143 | 0 | { |
3144 | 0 | const struct elf_backend_data *bed = get_elf_backend_data (abfd); |
3145 | 0 | flagword flags; |
3146 | 0 | asection *s; |
3147 | 0 | struct elf_link_hash_entry *h; |
3148 | 0 | struct elf_link_hash_table *htab = elf_hash_table (info); |
3149 | | |
3150 | | /* This function may be called more than once. */ |
3151 | 0 | s = bfd_get_linker_section (abfd, ".got"); |
3152 | 0 | if (s != NULL) |
3153 | 0 | return true; |
3154 | | |
3155 | 0 | flags = bed->dynamic_sec_flags; |
3156 | |
|
3157 | 0 | s = bfd_make_section_anyway_with_flags (abfd, |
3158 | 0 | (bed->rela_plts_and_copies_p |
3159 | 0 | ? ".rela.got" : ".rel.got"), |
3160 | 0 | (bed->dynamic_sec_flags |
3161 | 0 | | SEC_READONLY)); |
3162 | 0 | if (s == NULL |
3163 | 0 | || !bfd_set_section_alignment (s, bed->s->log_file_align)) |
3164 | | |
3165 | 0 | return false; |
3166 | 0 | htab->srelgot = s; |
3167 | |
|
3168 | 0 | s = bfd_make_section_anyway_with_flags (abfd, ".got", flags); |
3169 | 0 | if (s == NULL |
3170 | 0 | || !bfd_set_section_alignment (s, bed->s->log_file_align)) |
3171 | 0 | return false; |
3172 | 0 | htab->sgot = s; |
3173 | 0 | htab->sgot->size += GOT_ENTRY_SIZE; |
3174 | |
|
3175 | 0 | if (bed->want_got_sym) |
3176 | 0 | { |
3177 | | /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got |
3178 | | (or .got.plt) section. We don't do this in the linker script |
3179 | | because we don't want to define the symbol if we are not creating |
3180 | | a global offset table. */ |
3181 | 0 | h = _bfd_elf_define_linkage_sym (abfd, info, s, |
3182 | 0 | "_GLOBAL_OFFSET_TABLE_"); |
3183 | 0 | elf_hash_table (info)->hgot = h; |
3184 | 0 | if (h == NULL) |
3185 | 0 | return false; |
3186 | 0 | } |
3187 | | |
3188 | 0 | if (bed->want_got_plt) |
3189 | 0 | { |
3190 | 0 | s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags); |
3191 | 0 | if (s == NULL |
3192 | 0 | || !bfd_set_section_alignment (s, |
3193 | 0 | bed->s->log_file_align)) |
3194 | 0 | return false; |
3195 | 0 | htab->sgotplt = s; |
3196 | 0 | } |
3197 | | |
3198 | | /* The first bit of the global offset table is the header. */ |
3199 | 0 | s->size += bed->got_header_size; |
3200 | | |
3201 | | /* we still need to handle got content when doing static link with PIC */ |
3202 | 0 | if (bfd_link_executable (info) && !bfd_link_pic (info)) { |
3203 | 0 | htab->dynobj = abfd; |
3204 | 0 | } |
3205 | |
|
3206 | 0 | return true; |
3207 | 0 | } |
3208 | | |
3209 | | /* Look through the relocs for a section during the first phase. */ |
3210 | | |
3211 | | static bool |
3212 | | elf64_kvx_check_relocs (bfd *abfd, struct bfd_link_info *info, |
3213 | | asection *sec, const Elf_Internal_Rela *relocs) |
3214 | 0 | { |
3215 | 0 | Elf_Internal_Shdr *symtab_hdr; |
3216 | 0 | struct elf_link_hash_entry **sym_hashes; |
3217 | 0 | const Elf_Internal_Rela *rel; |
3218 | 0 | const Elf_Internal_Rela *rel_end; |
3219 | 0 | asection *sreloc; |
3220 | |
|
3221 | 0 | struct elf_kvx_link_hash_table *htab; |
3222 | |
|
3223 | 0 | if (bfd_link_relocatable (info)) |
3224 | 0 | return true; |
3225 | | |
3226 | 0 | BFD_ASSERT (is_kvx_elf (abfd)); |
3227 | |
|
3228 | 0 | htab = elf_kvx_hash_table (info); |
3229 | 0 | sreloc = NULL; |
3230 | |
|
3231 | 0 | symtab_hdr = &elf_symtab_hdr (abfd); |
3232 | 0 | sym_hashes = elf_sym_hashes (abfd); |
3233 | |
|
3234 | 0 | rel_end = relocs + sec->reloc_count; |
3235 | 0 | for (rel = relocs; rel < rel_end; rel++) |
3236 | 0 | { |
3237 | 0 | struct elf_link_hash_entry *h; |
3238 | 0 | unsigned int r_symndx; |
3239 | 0 | unsigned int r_type; |
3240 | 0 | bfd_reloc_code_real_type bfd_r_type; |
3241 | 0 | Elf_Internal_Sym *isym; |
3242 | |
|
3243 | 0 | r_symndx = ELF64_R_SYM (rel->r_info); |
3244 | 0 | r_type = ELF64_R_TYPE (rel->r_info); |
3245 | |
|
3246 | 0 | if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr)) |
3247 | 0 | { |
3248 | | /* xgettext:c-format */ |
3249 | 0 | _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd, r_symndx); |
3250 | 0 | return false; |
3251 | 0 | } |
3252 | | |
3253 | 0 | if (r_symndx < symtab_hdr->sh_info) |
3254 | 0 | { |
3255 | | /* A local symbol. */ |
3256 | 0 | isym = bfd_sym_from_r_symndx (&htab->sym_cache, |
3257 | 0 | abfd, r_symndx); |
3258 | 0 | if (isym == NULL) |
3259 | 0 | return false; |
3260 | | |
3261 | 0 | h = NULL; |
3262 | 0 | } |
3263 | 0 | else |
3264 | 0 | { |
3265 | 0 | h = sym_hashes[r_symndx - symtab_hdr->sh_info]; |
3266 | 0 | while (h->root.type == bfd_link_hash_indirect |
3267 | 0 | || h->root.type == bfd_link_hash_warning) |
3268 | 0 | h = (struct elf_link_hash_entry *) h->root.u.i.link; |
3269 | 0 | } |
3270 | | |
3271 | | /* Could be done earlier, if h were already available. */ |
3272 | 0 | bfd_r_type = kvx_tls_transition (abfd, info, r_type, h, r_symndx); |
3273 | |
|
3274 | 0 | if (h != NULL) |
3275 | 0 | { |
3276 | | /* Create the ifunc sections for static executables. If we |
3277 | | never see an indirect function symbol nor we are building |
3278 | | a static executable, those sections will be empty and |
3279 | | won't appear in output. */ |
3280 | 0 | switch (bfd_r_type) |
3281 | 0 | { |
3282 | 0 | default: |
3283 | 0 | break; |
3284 | 0 | } |
3285 | | |
3286 | | /* It is referenced by a non-shared object. */ |
3287 | 0 | h->ref_regular = 1; |
3288 | 0 | } |
3289 | | |
3290 | 0 | switch (bfd_r_type) |
3291 | 0 | { |
3292 | | |
3293 | 0 | case BFD_RELOC_KVX_S43_LO10: |
3294 | 0 | case BFD_RELOC_KVX_S43_UP27: |
3295 | 0 | case BFD_RELOC_KVX_S43_EX6: |
3296 | |
|
3297 | 0 | case BFD_RELOC_KVX_S37_LO10: |
3298 | 0 | case BFD_RELOC_KVX_S37_UP27: |
3299 | |
|
3300 | 0 | case BFD_RELOC_KVX_S64_LO10: |
3301 | 0 | case BFD_RELOC_KVX_S64_UP27: |
3302 | 0 | case BFD_RELOC_KVX_S64_EX27: |
3303 | |
|
3304 | 0 | case BFD_RELOC_KVX_32: |
3305 | 0 | case BFD_RELOC_KVX_64: |
3306 | | |
3307 | | /* We don't need to handle relocs into sections not going into |
3308 | | the "real" output. */ |
3309 | 0 | if ((sec->flags & SEC_ALLOC) == 0) |
3310 | 0 | break; |
3311 | | |
3312 | 0 | if (h != NULL) |
3313 | 0 | { |
3314 | 0 | if (!bfd_link_pic (info)) |
3315 | 0 | h->non_got_ref = 1; |
3316 | |
|
3317 | 0 | h->plt.refcount += 1; |
3318 | 0 | h->pointer_equality_needed = 1; |
3319 | 0 | } |
3320 | | |
3321 | | /* No need to do anything if we're not creating a shared |
3322 | | object. */ |
3323 | 0 | if (! bfd_link_pic (info)) |
3324 | 0 | break; |
3325 | | |
3326 | 0 | { |
3327 | 0 | struct elf_dyn_relocs *p; |
3328 | 0 | struct elf_dyn_relocs **head; |
3329 | | |
3330 | | /* We must copy these reloc types into the output file. |
3331 | | Create a reloc section in dynobj and make room for |
3332 | | this reloc. */ |
3333 | 0 | if (sreloc == NULL) |
3334 | 0 | { |
3335 | 0 | if (htab->root.dynobj == NULL) |
3336 | 0 | htab->root.dynobj = abfd; |
3337 | |
|
3338 | 0 | sreloc = _bfd_elf_make_dynamic_reloc_section |
3339 | 0 | (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ true); |
3340 | |
|
3341 | 0 | if (sreloc == NULL) |
3342 | 0 | return false; |
3343 | 0 | } |
3344 | | |
3345 | | /* If this is a global symbol, we count the number of |
3346 | | relocations we need for this symbol. */ |
3347 | 0 | if (h != NULL) |
3348 | 0 | { |
3349 | 0 | head = &h->dyn_relocs; |
3350 | 0 | } |
3351 | 0 | else |
3352 | 0 | { |
3353 | | /* Track dynamic relocs needed for local syms too. |
3354 | | We really need local syms available to do this |
3355 | | easily. Oh well. */ |
3356 | |
|
3357 | 0 | asection *s; |
3358 | 0 | void **vpp; |
3359 | |
|
3360 | 0 | isym = bfd_sym_from_r_symndx (&htab->sym_cache, |
3361 | 0 | abfd, r_symndx); |
3362 | 0 | if (isym == NULL) |
3363 | 0 | return false; |
3364 | | |
3365 | 0 | s = bfd_section_from_elf_index (abfd, isym->st_shndx); |
3366 | 0 | if (s == NULL) |
3367 | 0 | s = sec; |
3368 | | |
3369 | | /* Beware of type punned pointers vs strict aliasing |
3370 | | rules. */ |
3371 | 0 | vpp = &(elf_section_data (s)->local_dynrel); |
3372 | 0 | head = (struct elf_dyn_relocs **) vpp; |
3373 | 0 | } |
3374 | | |
3375 | 0 | p = *head; |
3376 | 0 | if (p == NULL || p->sec != sec) |
3377 | 0 | { |
3378 | 0 | bfd_size_type amt = sizeof *p; |
3379 | 0 | p = ((struct elf_dyn_relocs *) |
3380 | 0 | bfd_zalloc (htab->root.dynobj, amt)); |
3381 | 0 | if (p == NULL) |
3382 | 0 | return false; |
3383 | 0 | p->next = *head; |
3384 | 0 | *head = p; |
3385 | 0 | p->sec = sec; |
3386 | 0 | } |
3387 | | |
3388 | 0 | p->count += 1; |
3389 | |
|
3390 | 0 | } |
3391 | 0 | break; |
3392 | | |
3393 | 0 | case BFD_RELOC_KVX_S37_GOT_LO10: |
3394 | 0 | case BFD_RELOC_KVX_S37_GOT_UP27: |
3395 | |
|
3396 | 0 | case BFD_RELOC_KVX_S37_GOTOFF_LO10: |
3397 | 0 | case BFD_RELOC_KVX_S37_GOTOFF_UP27: |
3398 | |
|
3399 | 0 | case BFD_RELOC_KVX_S43_GOT_LO10: |
3400 | 0 | case BFD_RELOC_KVX_S43_GOT_UP27: |
3401 | 0 | case BFD_RELOC_KVX_S43_GOT_EX6: |
3402 | |
|
3403 | 0 | case BFD_RELOC_KVX_S43_GOTOFF_LO10: |
3404 | 0 | case BFD_RELOC_KVX_S43_GOTOFF_UP27: |
3405 | 0 | case BFD_RELOC_KVX_S43_GOTOFF_EX6: |
3406 | |
|
3407 | 0 | case BFD_RELOC_KVX_S37_TLS_GD_LO10: |
3408 | 0 | case BFD_RELOC_KVX_S37_TLS_GD_UP27: |
3409 | |
|
3410 | 0 | case BFD_RELOC_KVX_S43_TLS_GD_LO10: |
3411 | 0 | case BFD_RELOC_KVX_S43_TLS_GD_UP27: |
3412 | 0 | case BFD_RELOC_KVX_S43_TLS_GD_EX6: |
3413 | |
|
3414 | 0 | case BFD_RELOC_KVX_S37_TLS_IE_LO10: |
3415 | 0 | case BFD_RELOC_KVX_S37_TLS_IE_UP27: |
3416 | |
|
3417 | 0 | case BFD_RELOC_KVX_S43_TLS_IE_LO10: |
3418 | 0 | case BFD_RELOC_KVX_S43_TLS_IE_UP27: |
3419 | 0 | case BFD_RELOC_KVX_S43_TLS_IE_EX6: |
3420 | |
|
3421 | 0 | case BFD_RELOC_KVX_S37_TLS_LD_LO10: |
3422 | 0 | case BFD_RELOC_KVX_S37_TLS_LD_UP27: |
3423 | |
|
3424 | 0 | case BFD_RELOC_KVX_S43_TLS_LD_LO10: |
3425 | 0 | case BFD_RELOC_KVX_S43_TLS_LD_UP27: |
3426 | 0 | case BFD_RELOC_KVX_S43_TLS_LD_EX6: |
3427 | 0 | { |
3428 | 0 | unsigned got_type; |
3429 | 0 | unsigned old_got_type; |
3430 | |
|
3431 | 0 | got_type = kvx_reloc_got_type (bfd_r_type); |
3432 | |
|
3433 | 0 | if (h) |
3434 | 0 | { |
3435 | 0 | h->got.refcount += 1; |
3436 | 0 | old_got_type = elf_kvx_hash_entry (h)->got_type; |
3437 | 0 | } |
3438 | 0 | else |
3439 | 0 | { |
3440 | 0 | struct elf_kvx_local_symbol *locals; |
3441 | |
|
3442 | 0 | if (!elf64_kvx_allocate_local_symbols |
3443 | 0 | (abfd, symtab_hdr->sh_info)) |
3444 | 0 | return false; |
3445 | | |
3446 | 0 | locals = elf_kvx_locals (abfd); |
3447 | 0 | BFD_ASSERT (r_symndx < symtab_hdr->sh_info); |
3448 | 0 | locals[r_symndx].got_refcount += 1; |
3449 | 0 | old_got_type = locals[r_symndx].got_type; |
3450 | 0 | } |
3451 | | |
3452 | | /* We will already have issued an error message if there |
3453 | | is a TLS/non-TLS mismatch, based on the symbol type. |
3454 | | So just combine any TLS types needed. */ |
3455 | 0 | if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL |
3456 | 0 | && got_type != GOT_NORMAL) |
3457 | 0 | got_type |= old_got_type; |
3458 | | |
3459 | | /* If the symbol is accessed by both IE and GD methods, we |
3460 | | are able to relax. Turn off the GD flag, without |
3461 | | messing up with any other kind of TLS types that may be |
3462 | | involved. */ |
3463 | | /* Disabled untested and unused TLS */ |
3464 | | /* if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type)) */ |
3465 | | /* got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD); */ |
3466 | |
|
3467 | 0 | if (old_got_type != got_type) |
3468 | 0 | { |
3469 | 0 | if (h != NULL) |
3470 | 0 | elf_kvx_hash_entry (h)->got_type = got_type; |
3471 | 0 | else |
3472 | 0 | { |
3473 | 0 | struct elf_kvx_local_symbol *locals; |
3474 | 0 | locals = elf_kvx_locals (abfd); |
3475 | 0 | BFD_ASSERT (r_symndx < symtab_hdr->sh_info); |
3476 | 0 | locals[r_symndx].got_type = got_type; |
3477 | 0 | } |
3478 | 0 | } |
3479 | |
|
3480 | 0 | if (htab->root.dynobj == NULL) |
3481 | 0 | htab->root.dynobj = abfd; |
3482 | 0 | if (! kvx_elf_create_got_section (htab->root.dynobj, info)) |
3483 | 0 | return false; |
3484 | 0 | break; |
3485 | 0 | } |
3486 | | |
3487 | 0 | case BFD_RELOC_KVX_S64_GOTADDR_LO10: |
3488 | 0 | case BFD_RELOC_KVX_S64_GOTADDR_UP27: |
3489 | 0 | case BFD_RELOC_KVX_S64_GOTADDR_EX27: |
3490 | |
|
3491 | 0 | case BFD_RELOC_KVX_S43_GOTADDR_LO10: |
3492 | 0 | case BFD_RELOC_KVX_S43_GOTADDR_UP27: |
3493 | 0 | case BFD_RELOC_KVX_S43_GOTADDR_EX6: |
3494 | |
|
3495 | 0 | case BFD_RELOC_KVX_S37_GOTADDR_LO10: |
3496 | 0 | case BFD_RELOC_KVX_S37_GOTADDR_UP27: |
3497 | |
|
3498 | 0 | if (htab->root.dynobj == NULL) |
3499 | 0 | htab->root.dynobj = abfd; |
3500 | 0 | if (! kvx_elf_create_got_section (htab->root.dynobj, info)) |
3501 | 0 | return false; |
3502 | 0 | break; |
3503 | | |
3504 | 0 | case BFD_RELOC_KVX_PCREL27: |
3505 | 0 | case BFD_RELOC_KVX_PCREL17: |
3506 | | /* If this is a local symbol then we resolve it |
3507 | | directly without creating a PLT entry. */ |
3508 | 0 | if (h == NULL) |
3509 | 0 | continue; |
3510 | | |
3511 | 0 | h->needs_plt = 1; |
3512 | 0 | if (h->plt.refcount <= 0) |
3513 | 0 | h->plt.refcount = 1; |
3514 | 0 | else |
3515 | 0 | h->plt.refcount += 1; |
3516 | 0 | break; |
3517 | | |
3518 | 0 | default: |
3519 | 0 | break; |
3520 | 0 | } |
3521 | 0 | } |
3522 | | |
3523 | 0 | return true; |
3524 | 0 | } |
3525 | | |
3526 | | static bool |
3527 | | elf64_kvx_init_file_header (bfd *abfd, struct bfd_link_info *link_info) |
3528 | 0 | { |
3529 | 0 | Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */ |
3530 | |
|
3531 | 0 | if (!_bfd_elf_init_file_header (abfd, link_info)) |
3532 | 0 | return false; |
3533 | | |
3534 | 0 | i_ehdrp = elf_elfheader (abfd); |
3535 | 0 | i_ehdrp->e_ident[EI_ABIVERSION] = KVX_ELF_ABI_VERSION; |
3536 | 0 | return true; |
3537 | 0 | } |
3538 | | |
3539 | | static enum elf_reloc_type_class |
3540 | | elf64_kvx_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED, |
3541 | | const asection *rel_sec ATTRIBUTE_UNUSED, |
3542 | | const Elf_Internal_Rela *rela) |
3543 | 0 | { |
3544 | 0 | switch ((int) ELF64_R_TYPE (rela->r_info)) |
3545 | 0 | { |
3546 | 0 | case R_KVX_RELATIVE: |
3547 | 0 | return reloc_class_relative; |
3548 | 0 | case R_KVX_JMP_SLOT: |
3549 | 0 | return reloc_class_plt; |
3550 | 0 | case R_KVX_COPY: |
3551 | 0 | return reloc_class_copy; |
3552 | 0 | default: |
3553 | 0 | return reloc_class_normal; |
3554 | 0 | } |
3555 | 0 | } |
3556 | | |
3557 | | /* A structure used to record a list of sections, independently |
3558 | | of the next and prev fields in the asection structure. */ |
3559 | | typedef struct section_list |
3560 | | { |
3561 | | asection *sec; |
3562 | | struct section_list *next; |
3563 | | struct section_list *prev; |
3564 | | } |
3565 | | section_list; |
3566 | | |
3567 | | typedef struct |
3568 | | { |
3569 | | void *finfo; |
3570 | | struct bfd_link_info *info; |
3571 | | asection *sec; |
3572 | | int sec_shndx; |
3573 | | int (*func) (void *, const char *, Elf_Internal_Sym *, |
3574 | | asection *, struct elf_link_hash_entry *); |
3575 | | } output_arch_syminfo; |
3576 | | |
3577 | | /* Output a single local symbol for a generated stub. */ |
3578 | | |
3579 | | static bool |
3580 | | elf64_kvx_output_stub_sym (output_arch_syminfo *osi, const char *name, |
3581 | | bfd_vma offset, bfd_vma size) |
3582 | 0 | { |
3583 | 0 | Elf_Internal_Sym sym; |
3584 | |
|
3585 | 0 | sym.st_value = (osi->sec->output_section->vma |
3586 | 0 | + osi->sec->output_offset + offset); |
3587 | 0 | sym.st_size = size; |
3588 | 0 | sym.st_other = 0; |
3589 | 0 | sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC); |
3590 | 0 | sym.st_shndx = osi->sec_shndx; |
3591 | 0 | return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1; |
3592 | 0 | } |
3593 | | |
3594 | | static bool |
3595 | | kvx_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg) |
3596 | 0 | { |
3597 | 0 | struct elf_kvx_stub_hash_entry *stub_entry; |
3598 | 0 | asection *stub_sec; |
3599 | 0 | bfd_vma addr; |
3600 | 0 | char *stub_name; |
3601 | 0 | output_arch_syminfo *osi; |
3602 | | |
3603 | | /* Massage our args to the form they really have. */ |
3604 | 0 | stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry; |
3605 | 0 | osi = (output_arch_syminfo *) in_arg; |
3606 | |
|
3607 | 0 | stub_sec = stub_entry->stub_sec; |
3608 | | |
3609 | | /* Ensure this stub is attached to the current section being |
3610 | | processed. */ |
3611 | 0 | if (stub_sec != osi->sec) |
3612 | 0 | return true; |
3613 | | |
3614 | 0 | addr = (bfd_vma) stub_entry->stub_offset; |
3615 | |
|
3616 | 0 | stub_name = stub_entry->output_name; |
3617 | |
|
3618 | 0 | switch (stub_entry->stub_type) |
3619 | 0 | { |
3620 | 0 | case kvx_stub_long_branch: |
3621 | 0 | if (!elf64_kvx_output_stub_sym |
3622 | 0 | (osi, stub_name, addr, sizeof (elf64_kvx_long_branch_stub))) |
3623 | 0 | return false; |
3624 | 0 | break; |
3625 | | |
3626 | 0 | default: |
3627 | 0 | abort (); |
3628 | 0 | } |
3629 | | |
3630 | 0 | return true; |
3631 | 0 | } |
3632 | | |
3633 | | /* Output mapping symbols for linker generated sections. */ |
3634 | | |
3635 | | static bool |
3636 | | elf64_kvx_output_arch_local_syms (bfd *output_bfd, |
3637 | | struct bfd_link_info *info, |
3638 | | void *finfo, |
3639 | | int (*func) (void *, const char *, |
3640 | | Elf_Internal_Sym *, |
3641 | | asection *, |
3642 | | struct elf_link_hash_entry *)) |
3643 | 0 | { |
3644 | 0 | output_arch_syminfo osi; |
3645 | 0 | struct elf_kvx_link_hash_table *htab; |
3646 | |
|
3647 | 0 | htab = elf_kvx_hash_table (info); |
3648 | |
|
3649 | 0 | osi.finfo = finfo; |
3650 | 0 | osi.info = info; |
3651 | 0 | osi.func = func; |
3652 | | |
3653 | | /* Long calls stubs. */ |
3654 | 0 | if (htab->stub_bfd && htab->stub_bfd->sections) |
3655 | 0 | { |
3656 | 0 | asection *stub_sec; |
3657 | |
|
3658 | 0 | for (stub_sec = htab->stub_bfd->sections; |
3659 | 0 | stub_sec != NULL; stub_sec = stub_sec->next) |
3660 | 0 | { |
3661 | | /* Ignore non-stub sections. */ |
3662 | 0 | if (!strstr (stub_sec->name, STUB_SUFFIX)) |
3663 | 0 | continue; |
3664 | | |
3665 | 0 | osi.sec = stub_sec; |
3666 | |
|
3667 | 0 | osi.sec_shndx = _bfd_elf_section_from_bfd_section |
3668 | 0 | (output_bfd, osi.sec->output_section); |
3669 | |
|
3670 | 0 | bfd_hash_traverse (&htab->stub_hash_table, kvx_map_one_stub, |
3671 | 0 | &osi); |
3672 | 0 | } |
3673 | 0 | } |
3674 | | |
3675 | | /* Finally, output mapping symbols for the PLT. */ |
3676 | 0 | if (!htab->root.splt || htab->root.splt->size == 0) |
3677 | 0 | return true; |
3678 | | |
3679 | 0 | osi.sec_shndx = _bfd_elf_section_from_bfd_section |
3680 | 0 | (output_bfd, htab->root.splt->output_section); |
3681 | 0 | osi.sec = htab->root.splt; |
3682 | |
|
3683 | 0 | return true; |
3684 | |
|
3685 | 0 | } |
3686 | | |
3687 | | /* Allocate target specific section data. */ |
3688 | | |
3689 | | static bool |
3690 | | elf64_kvx_new_section_hook (bfd *abfd, asection *sec) |
3691 | 67.9k | { |
3692 | 67.9k | _kvx_elf_section_data *sdata; |
3693 | | |
3694 | 67.9k | sdata = bfd_zalloc (abfd, sizeof (*sdata)); |
3695 | 67.9k | if (sdata == NULL) |
3696 | 0 | return false; |
3697 | 67.9k | sec->used_by_bfd = sdata; |
3698 | | |
3699 | 67.9k | return _bfd_elf_new_section_hook (abfd, sec); |
3700 | 67.9k | } |
3701 | | |
3702 | | /* Create dynamic sections. This is different from the ARM backend in that |
3703 | | the got, plt, gotplt and their relocation sections are all created in the |
3704 | | standard part of the bfd elf backend. */ |
3705 | | |
3706 | | static bool |
3707 | | elf64_kvx_create_dynamic_sections (bfd *dynobj, |
3708 | | struct bfd_link_info *info) |
3709 | 0 | { |
3710 | 0 | struct elf_kvx_link_hash_table *htab; |
3711 | | |
3712 | | /* We need to create .got section. */ |
3713 | 0 | if (!kvx_elf_create_got_section (dynobj, info)) |
3714 | 0 | return false; |
3715 | | |
3716 | 0 | if (!_bfd_elf_create_dynamic_sections (dynobj, info)) |
3717 | 0 | return false; |
3718 | | |
3719 | 0 | htab = elf_kvx_hash_table (info); |
3720 | 0 | htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss"); |
3721 | 0 | if (!bfd_link_pic (info)) |
3722 | 0 | htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss"); |
3723 | |
|
3724 | 0 | if (!htab->sdynbss || (!bfd_link_pic (info) && !htab->srelbss)) |
3725 | 0 | abort (); |
3726 | | |
3727 | 0 | return true; |
3728 | 0 | } |
3729 | | |
3730 | | |
3731 | | /* Allocate space in .plt, .got and associated reloc sections for |
3732 | | dynamic relocs. */ |
3733 | | |
3734 | | static bool |
3735 | | elf64_kvx_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf) |
3736 | 0 | { |
3737 | 0 | struct bfd_link_info *info; |
3738 | 0 | struct elf_kvx_link_hash_table *htab; |
3739 | 0 | struct elf_dyn_relocs *p; |
3740 | | |
3741 | | /* An example of a bfd_link_hash_indirect symbol is versioned |
3742 | | symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect) |
3743 | | -> __gxx_personality_v0(bfd_link_hash_defined) |
3744 | | |
3745 | | There is no need to process bfd_link_hash_indirect symbols here |
3746 | | because we will also be presented with the concrete instance of |
3747 | | the symbol and elf64_kvx_copy_indirect_symbol () will have been |
3748 | | called to copy all relevant data from the generic to the concrete |
3749 | | symbol instance. */ |
3750 | 0 | if (h->root.type == bfd_link_hash_indirect) |
3751 | 0 | return true; |
3752 | | |
3753 | 0 | if (h->root.type == bfd_link_hash_warning) |
3754 | 0 | h = (struct elf_link_hash_entry *) h->root.u.i.link; |
3755 | |
|
3756 | 0 | info = (struct bfd_link_info *) inf; |
3757 | 0 | htab = elf_kvx_hash_table (info); |
3758 | |
|
3759 | 0 | if (htab->root.dynamic_sections_created && h->plt.refcount > 0) |
3760 | 0 | { |
3761 | | /* Make sure this symbol is output as a dynamic symbol. |
3762 | | Undefined weak syms won't yet be marked as dynamic. */ |
3763 | 0 | if (h->dynindx == -1 && !h->forced_local) |
3764 | 0 | { |
3765 | 0 | if (!bfd_elf_link_record_dynamic_symbol (info, h)) |
3766 | 0 | return false; |
3767 | 0 | } |
3768 | | |
3769 | 0 | if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h)) |
3770 | 0 | { |
3771 | 0 | asection *s = htab->root.splt; |
3772 | | |
3773 | | /* If this is the first .plt entry, make room for the special |
3774 | | first entry. */ |
3775 | 0 | if (s->size == 0) |
3776 | 0 | s->size += htab->plt_header_size; |
3777 | |
|
3778 | 0 | h->plt.offset = s->size; |
3779 | | |
3780 | | /* If this symbol is not defined in a regular file, and we are |
3781 | | not generating a shared library, then set the symbol to this |
3782 | | location in the .plt. This is required to make function |
3783 | | pointers compare as equal between the normal executable and |
3784 | | the shared library. */ |
3785 | 0 | if (!bfd_link_pic (info) && !h->def_regular) |
3786 | 0 | { |
3787 | 0 | h->root.u.def.section = s; |
3788 | 0 | h->root.u.def.value = h->plt.offset; |
3789 | 0 | } |
3790 | | |
3791 | | /* Make room for this entry. For now we only create the |
3792 | | small model PLT entries. We later need to find a way |
3793 | | of relaxing into these from the large model PLT entries. */ |
3794 | 0 | s->size += PLT_SMALL_ENTRY_SIZE; |
3795 | | |
3796 | | /* We also need to make an entry in the .got.plt section, which |
3797 | | will be placed in the .got section by the linker script. */ |
3798 | 0 | htab->root.sgotplt->size += GOT_ENTRY_SIZE; |
3799 | | |
3800 | | /* We also need to make an entry in the .rela.plt section. */ |
3801 | 0 | htab->root.srelplt->size += RELOC_SIZE (htab); |
3802 | | |
3803 | | /* We need to ensure that all GOT entries that serve the PLT |
3804 | | are consecutive with the special GOT slots [0] [1] and |
3805 | | [2]. Any addtional relocations must be placed after the |
3806 | | PLT related entries. We abuse the reloc_count such that |
3807 | | during sizing we adjust reloc_count to indicate the |
3808 | | number of PLT related reserved entries. In subsequent |
3809 | | phases when filling in the contents of the reloc entries, |
3810 | | PLT related entries are placed by computing their PLT |
3811 | | index (0 .. reloc_count). While other none PLT relocs are |
3812 | | placed at the slot indicated by reloc_count and |
3813 | | reloc_count is updated. */ |
3814 | |
|
3815 | 0 | htab->root.srelplt->reloc_count++; |
3816 | 0 | } |
3817 | 0 | else |
3818 | 0 | { |
3819 | 0 | h->plt.offset = (bfd_vma) - 1; |
3820 | 0 | h->needs_plt = 0; |
3821 | 0 | } |
3822 | 0 | } |
3823 | 0 | else |
3824 | 0 | { |
3825 | 0 | h->plt.offset = (bfd_vma) - 1; |
3826 | 0 | h->needs_plt = 0; |
3827 | 0 | } |
3828 | | |
3829 | 0 | if (h->got.refcount > 0) |
3830 | 0 | { |
3831 | 0 | bool dyn; |
3832 | 0 | unsigned got_type = elf_kvx_hash_entry (h)->got_type; |
3833 | |
|
3834 | 0 | h->got.offset = (bfd_vma) - 1; |
3835 | |
|
3836 | 0 | dyn = htab->root.dynamic_sections_created; |
3837 | | |
3838 | | /* Make sure this symbol is output as a dynamic symbol. |
3839 | | Undefined weak syms won't yet be marked as dynamic. */ |
3840 | 0 | if (dyn && h->dynindx == -1 && !h->forced_local) |
3841 | 0 | { |
3842 | 0 | if (!bfd_elf_link_record_dynamic_symbol (info, h)) |
3843 | 0 | return false; |
3844 | 0 | } |
3845 | | |
3846 | 0 | if (got_type == GOT_UNKNOWN) |
3847 | 0 | { |
3848 | 0 | (*_bfd_error_handler) |
3849 | 0 | (_("relocation against `%s' has faulty GOT type "), |
3850 | 0 | (h) ? h->root.root.string : "a local symbol"); |
3851 | 0 | bfd_set_error (bfd_error_bad_value); |
3852 | 0 | return false; |
3853 | 0 | } |
3854 | 0 | else if (got_type == GOT_NORMAL) |
3855 | 0 | { |
3856 | 0 | h->got.offset = htab->root.sgot->size; |
3857 | 0 | htab->root.sgot->size += GOT_ENTRY_SIZE; |
3858 | 0 | if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT |
3859 | 0 | || h->root.type != bfd_link_hash_undefweak) |
3860 | 0 | && (bfd_link_pic (info) |
3861 | 0 | || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h))) |
3862 | 0 | { |
3863 | 0 | htab->root.srelgot->size += RELOC_SIZE (htab); |
3864 | 0 | } |
3865 | 0 | } |
3866 | 0 | else |
3867 | 0 | { |
3868 | 0 | int indx; |
3869 | | |
3870 | | /* Any of these will require 2 GOT slots because |
3871 | | * they use __tls_get_addr() */ |
3872 | 0 | if (got_type & (GOT_TLS_GD | GOT_TLS_LD)) |
3873 | 0 | { |
3874 | 0 | h->got.offset = htab->root.sgot->size; |
3875 | 0 | htab->root.sgot->size += GOT_ENTRY_SIZE * 2; |
3876 | 0 | } |
3877 | |
|
3878 | 0 | if (got_type & GOT_TLS_IE) |
3879 | 0 | { |
3880 | 0 | h->got.offset = htab->root.sgot->size; |
3881 | 0 | htab->root.sgot->size += GOT_ENTRY_SIZE; |
3882 | 0 | } |
3883 | |
|
3884 | 0 | indx = h && h->dynindx != -1 ? h->dynindx : 0; |
3885 | 0 | if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT |
3886 | 0 | || h->root.type != bfd_link_hash_undefweak) |
3887 | 0 | && (bfd_link_pic (info) |
3888 | 0 | || indx != 0 |
3889 | 0 | || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h))) |
3890 | 0 | { |
3891 | | /* Only the GD case requires 2 relocations. */ |
3892 | 0 | if (got_type & GOT_TLS_GD) |
3893 | 0 | htab->root.srelgot->size += RELOC_SIZE (htab) * 2; |
3894 | | |
3895 | | /* LD needs a DTPMOD reloc, IE needs a DTPOFF. */ |
3896 | 0 | if (got_type & (GOT_TLS_LD | GOT_TLS_IE)) |
3897 | 0 | htab->root.srelgot->size += RELOC_SIZE (htab); |
3898 | 0 | } |
3899 | 0 | } |
3900 | 0 | } |
3901 | 0 | else |
3902 | 0 | { |
3903 | 0 | h->got.offset = (bfd_vma) - 1; |
3904 | 0 | } |
3905 | | |
3906 | 0 | if (h->dyn_relocs == NULL) |
3907 | 0 | return true; |
3908 | | |
3909 | | /* In the shared -Bsymbolic case, discard space allocated for |
3910 | | dynamic pc-relative relocs against symbols which turn out to be |
3911 | | defined in regular objects. For the normal shared case, discard |
3912 | | space for pc-relative relocs that have become local due to symbol |
3913 | | visibility changes. */ |
3914 | | |
3915 | 0 | if (bfd_link_pic (info)) |
3916 | 0 | { |
3917 | | /* Relocs that use pc_count are those that appear on a call |
3918 | | insn, or certain REL relocs that can generated via assembly. |
3919 | | We want calls to protected symbols to resolve directly to the |
3920 | | function rather than going via the plt. If people want |
3921 | | function pointer comparisons to work as expected then they |
3922 | | should avoid writing weird assembly. */ |
3923 | 0 | if (SYMBOL_CALLS_LOCAL (info, h)) |
3924 | 0 | { |
3925 | 0 | struct elf_dyn_relocs **pp; |
3926 | |
|
3927 | 0 | for (pp = &h->dyn_relocs; (p = *pp) != NULL;) |
3928 | 0 | { |
3929 | 0 | p->count -= p->pc_count; |
3930 | 0 | p->pc_count = 0; |
3931 | 0 | if (p->count == 0) |
3932 | 0 | *pp = p->next; |
3933 | 0 | else |
3934 | 0 | pp = &p->next; |
3935 | 0 | } |
3936 | 0 | } |
3937 | | |
3938 | | /* Also discard relocs on undefined weak syms with non-default |
3939 | | visibility. */ |
3940 | 0 | if (h->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak) |
3941 | 0 | { |
3942 | 0 | if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT |
3943 | 0 | || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h)) |
3944 | 0 | h->dyn_relocs = NULL; |
3945 | | |
3946 | | /* Make sure undefined weak symbols are output as a dynamic |
3947 | | symbol in PIEs. */ |
3948 | 0 | else if (h->dynindx == -1 |
3949 | 0 | && !h->forced_local |
3950 | 0 | && !bfd_elf_link_record_dynamic_symbol (info, h)) |
3951 | 0 | return false; |
3952 | 0 | } |
3953 | |
|
3954 | 0 | } |
3955 | 0 | else if (ELIMINATE_COPY_RELOCS) |
3956 | 0 | { |
3957 | | /* For the non-shared case, discard space for relocs against |
3958 | | symbols which turn out to need copy relocs or are not |
3959 | | dynamic. */ |
3960 | |
|
3961 | 0 | if (!h->non_got_ref |
3962 | 0 | && ((h->def_dynamic |
3963 | 0 | && !h->def_regular) |
3964 | 0 | || (htab->root.dynamic_sections_created |
3965 | 0 | && (h->root.type == bfd_link_hash_undefweak |
3966 | 0 | || h->root.type == bfd_link_hash_undefined)))) |
3967 | 0 | { |
3968 | | /* Make sure this symbol is output as a dynamic symbol. |
3969 | | Undefined weak syms won't yet be marked as dynamic. */ |
3970 | 0 | if (h->dynindx == -1 |
3971 | 0 | && !h->forced_local |
3972 | 0 | && !bfd_elf_link_record_dynamic_symbol (info, h)) |
3973 | 0 | return false; |
3974 | | |
3975 | | /* If that succeeded, we know we'll be keeping all the |
3976 | | relocs. */ |
3977 | 0 | if (h->dynindx != -1) |
3978 | 0 | goto keep; |
3979 | 0 | } |
3980 | | |
3981 | 0 | h->dyn_relocs = NULL; |
3982 | |
|
3983 | 0 | keep:; |
3984 | 0 | } |
3985 | | |
3986 | | /* Finally, allocate space. */ |
3987 | 0 | for (p = h->dyn_relocs; p != NULL; p = p->next) |
3988 | 0 | { |
3989 | 0 | asection *sreloc; |
3990 | |
|
3991 | 0 | sreloc = elf_section_data (p->sec)->sreloc; |
3992 | |
|
3993 | 0 | BFD_ASSERT (sreloc != NULL); |
3994 | |
|
3995 | 0 | sreloc->size += p->count * RELOC_SIZE (htab); |
3996 | 0 | } |
3997 | |
|
3998 | 0 | return true; |
3999 | 0 | } |
4000 | | |
4001 | | /* Find any dynamic relocs that apply to read-only sections. */ |
4002 | | |
4003 | | static bool |
4004 | | kvx_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf) |
4005 | 0 | { |
4006 | 0 | struct elf_dyn_relocs * p; |
4007 | |
|
4008 | 0 | for (p = h->dyn_relocs; p != NULL; p = p->next) |
4009 | 0 | { |
4010 | 0 | asection *s = p->sec; |
4011 | |
|
4012 | 0 | if (s != NULL && (s->flags & SEC_READONLY) != 0) |
4013 | 0 | { |
4014 | 0 | struct bfd_link_info *info = (struct bfd_link_info *) inf; |
4015 | |
|
4016 | 0 | info->flags |= DF_TEXTREL; |
4017 | 0 | info->callbacks->minfo (_("%pB: dynamic relocation against `%pT' in " |
4018 | 0 | "read-only section `%pA'\n"), |
4019 | 0 | s->owner, h->root.root.string, s); |
4020 | | |
4021 | | /* Not an error, just cut short the traversal. */ |
4022 | 0 | return false; |
4023 | 0 | } |
4024 | 0 | } |
4025 | 0 | return true; |
4026 | 0 | } |
4027 | | |
4028 | | /* This is the most important function of all . Innocuosly named |
4029 | | though ! */ |
4030 | | static bool |
4031 | | elf64_kvx_late_size_sections (bfd *output_bfd ATTRIBUTE_UNUSED, |
4032 | | struct bfd_link_info *info) |
4033 | 0 | { |
4034 | 0 | struct elf_kvx_link_hash_table *htab; |
4035 | 0 | bfd *dynobj; |
4036 | 0 | asection *s; |
4037 | 0 | bool relocs; |
4038 | 0 | bfd *ibfd; |
4039 | |
|
4040 | 0 | htab = elf_kvx_hash_table ((info)); |
4041 | 0 | dynobj = htab->root.dynobj; |
4042 | 0 | if (dynobj == NULL) |
4043 | 0 | return true; |
4044 | | |
4045 | 0 | if (htab->root.dynamic_sections_created) |
4046 | 0 | { |
4047 | 0 | if (bfd_link_executable (info) && !info->nointerp) |
4048 | 0 | { |
4049 | 0 | s = bfd_get_linker_section (dynobj, ".interp"); |
4050 | 0 | if (s == NULL) |
4051 | 0 | abort (); |
4052 | 0 | s->size = sizeof ELF_DYNAMIC_INTERPRETER; |
4053 | 0 | s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER; |
4054 | 0 | s->alloced = 1; |
4055 | 0 | } |
4056 | 0 | } |
4057 | | |
4058 | | /* Set up .got offsets for local syms, and space for local dynamic |
4059 | | relocs. */ |
4060 | 0 | for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next) |
4061 | 0 | { |
4062 | 0 | struct elf_kvx_local_symbol *locals = NULL; |
4063 | 0 | Elf_Internal_Shdr *symtab_hdr; |
4064 | 0 | asection *srel; |
4065 | 0 | unsigned int i; |
4066 | |
|
4067 | 0 | if (!is_kvx_elf (ibfd)) |
4068 | 0 | continue; |
4069 | | |
4070 | 0 | for (s = ibfd->sections; s != NULL; s = s->next) |
4071 | 0 | { |
4072 | 0 | struct elf_dyn_relocs *p; |
4073 | |
|
4074 | 0 | for (p = (struct elf_dyn_relocs *) |
4075 | 0 | (elf_section_data (s)->local_dynrel); p != NULL; p = p->next) |
4076 | 0 | { |
4077 | 0 | if (!bfd_is_abs_section (p->sec) |
4078 | 0 | && bfd_is_abs_section (p->sec->output_section)) |
4079 | 0 | { |
4080 | | /* Input section has been discarded, either because |
4081 | | it is a copy of a linkonce section or due to |
4082 | | linker script /DISCARD/, so we'll be discarding |
4083 | | the relocs too. */ |
4084 | 0 | } |
4085 | 0 | else if (p->count != 0) |
4086 | 0 | { |
4087 | 0 | srel = elf_section_data (p->sec)->sreloc; |
4088 | 0 | srel->size += p->count * RELOC_SIZE (htab); |
4089 | 0 | if ((p->sec->output_section->flags & SEC_READONLY) != 0) |
4090 | 0 | info->flags |= DF_TEXTREL; |
4091 | 0 | } |
4092 | 0 | } |
4093 | 0 | } |
4094 | |
|
4095 | 0 | locals = elf_kvx_locals (ibfd); |
4096 | 0 | if (!locals) |
4097 | 0 | continue; |
4098 | | |
4099 | 0 | symtab_hdr = &elf_symtab_hdr (ibfd); |
4100 | 0 | srel = htab->root.srelgot; |
4101 | 0 | for (i = 0; i < symtab_hdr->sh_info; i++) |
4102 | 0 | { |
4103 | 0 | locals[i].got_offset = (bfd_vma) - 1; |
4104 | 0 | if (locals[i].got_refcount > 0) |
4105 | 0 | { |
4106 | 0 | unsigned got_type = locals[i].got_type; |
4107 | 0 | if (got_type & (GOT_TLS_GD | GOT_TLS_LD)) |
4108 | 0 | { |
4109 | 0 | locals[i].got_offset = htab->root.sgot->size; |
4110 | 0 | htab->root.sgot->size += GOT_ENTRY_SIZE * 2; |
4111 | 0 | } |
4112 | |
|
4113 | 0 | if (got_type & (GOT_NORMAL | GOT_TLS_IE )) |
4114 | 0 | { |
4115 | 0 | locals[i].got_offset = htab->root.sgot->size; |
4116 | 0 | htab->root.sgot->size += GOT_ENTRY_SIZE; |
4117 | 0 | } |
4118 | |
|
4119 | 0 | if (got_type == GOT_UNKNOWN) |
4120 | 0 | { |
4121 | 0 | } |
4122 | |
|
4123 | 0 | if (bfd_link_pic (info)) |
4124 | 0 | { |
4125 | 0 | if (got_type & GOT_TLS_GD) |
4126 | 0 | htab->root.srelgot->size += RELOC_SIZE (htab) * 2; |
4127 | |
|
4128 | 0 | if (got_type & GOT_TLS_IE |
4129 | 0 | || got_type & GOT_TLS_LD |
4130 | 0 | || got_type & GOT_NORMAL) |
4131 | 0 | htab->root.srelgot->size += RELOC_SIZE (htab); |
4132 | 0 | } |
4133 | 0 | } |
4134 | 0 | else |
4135 | 0 | { |
4136 | 0 | locals[i].got_refcount = (bfd_vma) - 1; |
4137 | 0 | } |
4138 | 0 | } |
4139 | 0 | } |
4140 | | |
4141 | | |
4142 | | /* Allocate global sym .plt and .got entries, and space for global |
4143 | | sym dynamic relocs. */ |
4144 | 0 | elf_link_hash_traverse (&htab->root, elf64_kvx_allocate_dynrelocs, |
4145 | 0 | info); |
4146 | | |
4147 | | /* For every jump slot reserved in the sgotplt, reloc_count is |
4148 | | incremented. However, when we reserve space for TLS descriptors, |
4149 | | it's not incremented, so in order to compute the space reserved |
4150 | | for them, it suffices to multiply the reloc count by the jump |
4151 | | slot size. */ |
4152 | |
|
4153 | 0 | if (htab->root.srelplt) |
4154 | 0 | htab->sgotplt_jump_table_size = kvx_compute_jump_table_size (htab); |
4155 | | |
4156 | | /* We now have determined the sizes of the various dynamic sections. |
4157 | | Allocate memory for them. */ |
4158 | 0 | relocs = false; |
4159 | 0 | for (s = dynobj->sections; s != NULL; s = s->next) |
4160 | 0 | { |
4161 | 0 | if ((s->flags & SEC_LINKER_CREATED) == 0) |
4162 | 0 | continue; |
4163 | | |
4164 | 0 | if (s == htab->root.splt |
4165 | 0 | || s == htab->root.sgot |
4166 | 0 | || s == htab->root.sgotplt |
4167 | 0 | || s == htab->root.iplt |
4168 | 0 | || s == htab->root.igotplt || s == htab->sdynbss) |
4169 | 0 | { |
4170 | | /* Strip this section if we don't need it; see the |
4171 | | comment below. */ |
4172 | 0 | } |
4173 | 0 | else if (startswith (bfd_section_name (s), ".rela")) |
4174 | 0 | { |
4175 | 0 | if (s->size != 0 && s != htab->root.srelplt) |
4176 | 0 | relocs = true; |
4177 | | |
4178 | | /* We use the reloc_count field as a counter if we need |
4179 | | to copy relocs into the output file. */ |
4180 | 0 | if (s != htab->root.srelplt) |
4181 | 0 | s->reloc_count = 0; |
4182 | 0 | } |
4183 | 0 | else |
4184 | 0 | { |
4185 | | /* It's not one of our sections, so don't allocate space. */ |
4186 | 0 | continue; |
4187 | 0 | } |
4188 | | |
4189 | 0 | if (s->size == 0) |
4190 | 0 | { |
4191 | | /* If we don't need this section, strip it from the |
4192 | | output file. This is mostly to handle .rela.bss and |
4193 | | .rela.plt. We must create both sections in |
4194 | | create_dynamic_sections, because they must be created |
4195 | | before the linker maps input sections to output |
4196 | | sections. The linker does that before |
4197 | | adjust_dynamic_symbol is called, and it is that |
4198 | | function which decides whether anything needs to go |
4199 | | into these sections. */ |
4200 | |
|
4201 | 0 | s->flags |= SEC_EXCLUDE; |
4202 | 0 | continue; |
4203 | 0 | } |
4204 | | |
4205 | 0 | if ((s->flags & SEC_HAS_CONTENTS) == 0) |
4206 | 0 | continue; |
4207 | | |
4208 | | /* Allocate memory for the section contents. We use bfd_zalloc |
4209 | | here in case unused entries are not reclaimed before the |
4210 | | section's contents are written out. This should not happen, |
4211 | | but this way if it does, we get a R_KVX_NONE reloc instead |
4212 | | of garbage. */ |
4213 | 0 | s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size); |
4214 | 0 | if (s->contents == NULL) |
4215 | 0 | return false; |
4216 | 0 | s->alloced = 1; |
4217 | 0 | } |
4218 | | |
4219 | 0 | if (htab->root.dynamic_sections_created) |
4220 | 0 | { |
4221 | | /* Add some entries to the .dynamic section. We fill in the |
4222 | | values later, in elf64_kvx_finish_dynamic_sections, but we |
4223 | | must add the entries now so that we get the correct size for |
4224 | | the .dynamic section. The DT_DEBUG entry is filled in by the |
4225 | | dynamic linker and used by the debugger. */ |
4226 | 0 | #define add_dynamic_entry(TAG, VAL) \ |
4227 | 0 | _bfd_elf_add_dynamic_entry (info, TAG, VAL) |
4228 | |
|
4229 | 0 | if (bfd_link_executable (info)) |
4230 | 0 | { |
4231 | 0 | if (!add_dynamic_entry (DT_DEBUG, 0)) |
4232 | 0 | return false; |
4233 | 0 | } |
4234 | | |
4235 | 0 | if (htab->root.splt->size != 0) |
4236 | 0 | { |
4237 | 0 | if (!add_dynamic_entry (DT_PLTGOT, 0) |
4238 | 0 | || !add_dynamic_entry (DT_PLTRELSZ, 0) |
4239 | 0 | || !add_dynamic_entry (DT_PLTREL, DT_RELA) |
4240 | 0 | || !add_dynamic_entry (DT_JMPREL, 0)) |
4241 | 0 | return false; |
4242 | 0 | } |
4243 | | |
4244 | 0 | if (relocs) |
4245 | 0 | { |
4246 | 0 | if (!add_dynamic_entry (DT_RELA, 0) |
4247 | 0 | || !add_dynamic_entry (DT_RELASZ, 0) |
4248 | 0 | || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab))) |
4249 | 0 | return false; |
4250 | | |
4251 | | /* If any dynamic relocs apply to a read-only section, |
4252 | | then we need a DT_TEXTREL entry. */ |
4253 | 0 | if ((info->flags & DF_TEXTREL) == 0) |
4254 | 0 | elf_link_hash_traverse (&htab->root, kvx_readonly_dynrelocs, |
4255 | 0 | info); |
4256 | |
|
4257 | 0 | if ((info->flags & DF_TEXTREL) != 0) |
4258 | 0 | { |
4259 | 0 | if (!add_dynamic_entry (DT_TEXTREL, 0)) |
4260 | 0 | return false; |
4261 | 0 | } |
4262 | 0 | } |
4263 | 0 | } |
4264 | 0 | #undef add_dynamic_entry |
4265 | | |
4266 | 0 | return true; |
4267 | 0 | } |
4268 | | |
4269 | | static inline void |
4270 | | elf_kvx_update_plt_entry (bfd *output_bfd, |
4271 | | bfd_reloc_code_real_type r_type, |
4272 | | bfd_byte *plt_entry, bfd_vma value) |
4273 | 0 | { |
4274 | 0 | reloc_howto_type *howto = elf64_kvx_howto_from_bfd_reloc (r_type); |
4275 | 0 | BFD_ASSERT(howto != NULL); |
4276 | 0 | _bfd_kvx_elf_put_addend (output_bfd, plt_entry, r_type, howto, value); |
4277 | 0 | } |
4278 | | |
4279 | | static void |
4280 | | elf64_kvx_create_small_pltn_entry (struct elf_link_hash_entry *h, |
4281 | | struct elf_kvx_link_hash_table *htab, |
4282 | | bfd *output_bfd) |
4283 | 0 | { |
4284 | 0 | bfd_byte *plt_entry; |
4285 | 0 | bfd_vma plt_index; |
4286 | 0 | bfd_vma got_offset; |
4287 | 0 | bfd_vma gotplt_entry_address; |
4288 | 0 | bfd_vma plt_entry_address; |
4289 | 0 | Elf_Internal_Rela rela; |
4290 | 0 | bfd_byte *loc; |
4291 | 0 | asection *plt, *gotplt, *relplt; |
4292 | |
|
4293 | 0 | plt = htab->root.splt; |
4294 | 0 | gotplt = htab->root.sgotplt; |
4295 | 0 | relplt = htab->root.srelplt; |
4296 | | |
4297 | | /* Get the index in the procedure linkage table which |
4298 | | corresponds to this symbol. This is the index of this symbol |
4299 | | in all the symbols for which we are making plt entries. The |
4300 | | first entry in the procedure linkage table is reserved. |
4301 | | |
4302 | | Get the offset into the .got table of the entry that |
4303 | | corresponds to this function. Each .got entry is GOT_ENTRY_SIZE |
4304 | | bytes. The first three are reserved for the dynamic linker. |
4305 | | |
4306 | | For static executables, we don't reserve anything. */ |
4307 | |
|
4308 | 0 | if (plt == htab->root.splt) |
4309 | 0 | { |
4310 | 0 | plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size; |
4311 | 0 | got_offset = (plt_index + 3) * GOT_ENTRY_SIZE; |
4312 | 0 | } |
4313 | 0 | else |
4314 | 0 | { |
4315 | 0 | plt_index = h->plt.offset / htab->plt_entry_size; |
4316 | 0 | got_offset = plt_index * GOT_ENTRY_SIZE; |
4317 | 0 | } |
4318 | |
|
4319 | 0 | plt_entry = plt->contents + h->plt.offset; |
4320 | 0 | plt_entry_address = plt->output_section->vma |
4321 | 0 | + plt->output_offset + h->plt.offset; |
4322 | 0 | gotplt_entry_address = gotplt->output_section->vma + |
4323 | 0 | gotplt->output_offset + got_offset; |
4324 | | |
4325 | | /* Copy in the boiler-plate for the PLTn entry. */ |
4326 | 0 | memcpy (plt_entry, elf64_kvx_small_plt_entry, PLT_SMALL_ENTRY_SIZE); |
4327 | | |
4328 | | /* Patch the loading of the GOT entry, relative to the PLT entry |
4329 | | address. */ |
4330 | | |
4331 | | /* Use 37bits offset for both 32 and 64bits mode. |
4332 | | Fill the LO10 of of lw $r9 = 0[$r14]. */ |
4333 | 0 | elf_kvx_update_plt_entry(output_bfd, BFD_RELOC_KVX_S37_LO10, |
4334 | 0 | plt_entry+4, |
4335 | 0 | gotplt_entry_address - plt_entry_address); |
4336 | | |
4337 | | /* Fill the UP27 of of lw $r9 = 0[$r14]. */ |
4338 | 0 | elf_kvx_update_plt_entry(output_bfd, BFD_RELOC_KVX_S37_UP27, |
4339 | 0 | plt_entry+8, |
4340 | 0 | gotplt_entry_address - plt_entry_address); |
4341 | |
|
4342 | 0 | rela.r_offset = gotplt_entry_address; |
4343 | | |
4344 | | /* Fill in the entry in the .rela.plt section. */ |
4345 | 0 | rela.r_info = ELF64_R_INFO (h->dynindx, R_KVX_JMP_SLOT); |
4346 | 0 | rela.r_addend = 0; |
4347 | | |
4348 | | /* Compute the relocation entry to used based on PLT index and do |
4349 | | not adjust reloc_count. The reloc_count has already been adjusted |
4350 | | to account for this entry. */ |
4351 | 0 | loc = relplt->contents + plt_index * RELOC_SIZE (htab); |
4352 | 0 | bfd_elf64_swap_reloca_out (output_bfd, &rela, loc); |
4353 | 0 | } |
4354 | | |
4355 | | /* Size sections even though they're not dynamic. We use it to setup |
4356 | | _TLS_MODULE_BASE_, if needed. */ |
4357 | | |
4358 | | static bool |
4359 | | elf64_kvx_early_size_sections (bfd *output_bfd, struct bfd_link_info *info) |
4360 | 0 | { |
4361 | 0 | asection *tls_sec; |
4362 | |
|
4363 | 0 | if (bfd_link_relocatable (info)) |
4364 | 0 | return true; |
4365 | | |
4366 | 0 | tls_sec = elf_hash_table (info)->tls_sec; |
4367 | |
|
4368 | 0 | if (tls_sec) |
4369 | 0 | { |
4370 | 0 | struct elf_link_hash_entry *tlsbase; |
4371 | |
|
4372 | 0 | tlsbase = elf_link_hash_lookup (elf_hash_table (info), |
4373 | 0 | "_TLS_MODULE_BASE_", true, true, false); |
4374 | |
|
4375 | 0 | if (tlsbase) |
4376 | 0 | { |
4377 | 0 | struct bfd_link_hash_entry *h = NULL; |
4378 | 0 | const struct elf_backend_data *bed = |
4379 | 0 | get_elf_backend_data (output_bfd); |
4380 | |
|
4381 | 0 | if (!(_bfd_generic_link_add_one_symbol |
4382 | 0 | (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL, |
4383 | 0 | tls_sec, 0, NULL, false, bed->collect, &h))) |
4384 | 0 | return false; |
4385 | | |
4386 | 0 | tlsbase->type = STT_TLS; |
4387 | 0 | tlsbase = (struct elf_link_hash_entry *) h; |
4388 | 0 | tlsbase->def_regular = 1; |
4389 | 0 | tlsbase->other = STV_HIDDEN; |
4390 | 0 | (*bed->elf_backend_hide_symbol) (info, tlsbase, true); |
4391 | 0 | } |
4392 | 0 | } |
4393 | | |
4394 | 0 | return true; |
4395 | 0 | } |
4396 | | |
4397 | | /* Finish up dynamic symbol handling. We set the contents of various |
4398 | | dynamic sections here. */ |
4399 | | static bool |
4400 | | elf64_kvx_finish_dynamic_symbol (bfd *output_bfd, |
4401 | | struct bfd_link_info *info, |
4402 | | struct elf_link_hash_entry *h, |
4403 | | Elf_Internal_Sym *sym) |
4404 | 0 | { |
4405 | 0 | struct elf_kvx_link_hash_table *htab; |
4406 | 0 | htab = elf_kvx_hash_table (info); |
4407 | |
|
4408 | 0 | if (h->plt.offset != (bfd_vma) - 1) |
4409 | 0 | { |
4410 | 0 | asection *plt = NULL, *gotplt = NULL, *relplt = NULL; |
4411 | | |
4412 | | /* This symbol has an entry in the procedure linkage table. Set |
4413 | | it up. */ |
4414 | |
|
4415 | 0 | if (htab->root.splt != NULL) |
4416 | 0 | { |
4417 | 0 | plt = htab->root.splt; |
4418 | 0 | gotplt = htab->root.sgotplt; |
4419 | 0 | relplt = htab->root.srelplt; |
4420 | 0 | } |
4421 | | |
4422 | | /* This symbol has an entry in the procedure linkage table. Set |
4423 | | it up. */ |
4424 | 0 | if ((h->dynindx == -1 |
4425 | 0 | && !((h->forced_local || bfd_link_executable (info)) |
4426 | 0 | && h->def_regular |
4427 | 0 | && h->type == STT_GNU_IFUNC)) |
4428 | 0 | || plt == NULL |
4429 | 0 | || gotplt == NULL |
4430 | 0 | || relplt == NULL) |
4431 | 0 | abort (); |
4432 | | |
4433 | 0 | elf64_kvx_create_small_pltn_entry (h, htab, output_bfd); |
4434 | 0 | if (!h->def_regular) |
4435 | 0 | { |
4436 | | /* Mark the symbol as undefined, rather than as defined in |
4437 | | the .plt section. */ |
4438 | 0 | sym->st_shndx = SHN_UNDEF; |
4439 | | /* If the symbol is weak we need to clear the value. |
4440 | | Otherwise, the PLT entry would provide a definition for |
4441 | | the symbol even if the symbol wasn't defined anywhere, |
4442 | | and so the symbol would never be NULL. Leave the value if |
4443 | | there were any relocations where pointer equality matters |
4444 | | (this is a clue for the dynamic linker, to make function |
4445 | | pointer comparisons work between an application and shared |
4446 | | library). */ |
4447 | 0 | if (!h->ref_regular_nonweak || !h->pointer_equality_needed) |
4448 | 0 | sym->st_value = 0; |
4449 | 0 | } |
4450 | 0 | } |
4451 | | |
4452 | 0 | if (h->got.offset != (bfd_vma) - 1 |
4453 | 0 | && elf_kvx_hash_entry (h)->got_type == GOT_NORMAL) |
4454 | 0 | { |
4455 | 0 | Elf_Internal_Rela rela; |
4456 | 0 | bfd_byte *loc; |
4457 | | |
4458 | | /* This symbol has an entry in the global offset table. Set it |
4459 | | up. */ |
4460 | 0 | if (htab->root.sgot == NULL || htab->root.srelgot == NULL) |
4461 | 0 | abort (); |
4462 | | |
4463 | 0 | rela.r_offset = (htab->root.sgot->output_section->vma |
4464 | 0 | + htab->root.sgot->output_offset |
4465 | 0 | + (h->got.offset & ~(bfd_vma) 1)); |
4466 | |
|
4467 | | #ifdef UGLY_DEBUG |
4468 | | printf("setting rela at offset 0x%x(0x%x + 0x%x + 0x%x) for %s\n", |
4469 | | rela.r_offset, |
4470 | | htab->root.sgot->output_section->vma, |
4471 | | htab->root.sgot->output_offset, |
4472 | | h->got.offset, |
4473 | | h->root.root.string); |
4474 | | #endif |
4475 | |
|
4476 | 0 | if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h)) |
4477 | 0 | { |
4478 | 0 | if (!h->def_regular) |
4479 | 0 | return false; |
4480 | | |
4481 | | /* in case of PLT related GOT entry, it is not clear who is |
4482 | | supposed to set the LSB of GOT entry... |
4483 | | kvx_calculate_got_entry_vma() would be a good candidate, |
4484 | | but it is not called currently |
4485 | | So we are commenting it ATM. */ |
4486 | | // BFD_ASSERT ((h->got.offset & 1) != 0); |
4487 | 0 | rela.r_info = ELF64_R_INFO (0, R_KVX_RELATIVE); |
4488 | 0 | rela.r_addend = (h->root.u.def.value |
4489 | 0 | + h->root.u.def.section->output_section->vma |
4490 | 0 | + h->root.u.def.section->output_offset); |
4491 | 0 | } |
4492 | 0 | else |
4493 | 0 | { |
4494 | 0 | BFD_ASSERT ((h->got.offset & 1) == 0); |
4495 | 0 | bfd_put_64 (output_bfd, (bfd_vma) 0, |
4496 | 0 | htab->root.sgot->contents + h->got.offset); |
4497 | 0 | rela.r_info = ELF64_R_INFO (h->dynindx, R_KVX_GLOB_DAT); |
4498 | 0 | rela.r_addend = 0; |
4499 | 0 | } |
4500 | | |
4501 | 0 | loc = htab->root.srelgot->contents; |
4502 | 0 | loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab); |
4503 | 0 | bfd_elf64_swap_reloca_out (output_bfd, &rela, loc); |
4504 | 0 | } |
4505 | | |
4506 | 0 | if (h->needs_copy) |
4507 | 0 | { |
4508 | 0 | Elf_Internal_Rela rela; |
4509 | 0 | bfd_byte *loc; |
4510 | | |
4511 | | /* This symbol needs a copy reloc. Set it up. */ |
4512 | |
|
4513 | 0 | if (h->dynindx == -1 |
4514 | 0 | || (h->root.type != bfd_link_hash_defined |
4515 | 0 | && h->root.type != bfd_link_hash_defweak) |
4516 | 0 | || htab->srelbss == NULL) |
4517 | 0 | abort (); |
4518 | | |
4519 | 0 | rela.r_offset = (h->root.u.def.value |
4520 | 0 | + h->root.u.def.section->output_section->vma |
4521 | 0 | + h->root.u.def.section->output_offset); |
4522 | 0 | rela.r_info = ELF64_R_INFO (h->dynindx, R_KVX_COPY); |
4523 | 0 | rela.r_addend = 0; |
4524 | 0 | loc = htab->srelbss->contents; |
4525 | 0 | loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab); |
4526 | 0 | bfd_elf64_swap_reloca_out (output_bfd, &rela, loc); |
4527 | 0 | } |
4528 | | |
4529 | | /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may |
4530 | | be NULL for local symbols. */ |
4531 | 0 | if (sym != NULL |
4532 | 0 | && (h == elf_hash_table (info)->hdynamic |
4533 | 0 | || h == elf_hash_table (info)->hgot)) |
4534 | 0 | sym->st_shndx = SHN_ABS; |
4535 | |
|
4536 | 0 | return true; |
4537 | 0 | } |
4538 | | |
4539 | | static void |
4540 | | elf64_kvx_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED, |
4541 | | struct elf_kvx_link_hash_table *htab) |
4542 | 0 | { |
4543 | 0 | memcpy (htab->root.splt->contents, elf64_kvx_small_plt0_entry, |
4544 | 0 | PLT_ENTRY_SIZE); |
4545 | 0 | elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize = |
4546 | 0 | PLT_ENTRY_SIZE; |
4547 | 0 | } |
4548 | | |
4549 | | static bool |
4550 | | elf64_kvx_finish_dynamic_sections (bfd *output_bfd, |
4551 | | struct bfd_link_info *info) |
4552 | 0 | { |
4553 | 0 | struct elf_kvx_link_hash_table *htab; |
4554 | 0 | bfd *dynobj; |
4555 | 0 | asection *sdyn; |
4556 | |
|
4557 | 0 | htab = elf_kvx_hash_table (info); |
4558 | 0 | dynobj = htab->root.dynobj; |
4559 | 0 | sdyn = bfd_get_linker_section (dynobj, ".dynamic"); |
4560 | |
|
4561 | 0 | if (htab->root.dynamic_sections_created) |
4562 | 0 | { |
4563 | 0 | Elf64_External_Dyn *dyncon, *dynconend; |
4564 | |
|
4565 | 0 | if (sdyn == NULL || htab->root.sgot == NULL) |
4566 | 0 | abort (); |
4567 | | |
4568 | 0 | dyncon = (Elf64_External_Dyn *) sdyn->contents; |
4569 | 0 | dynconend = (Elf64_External_Dyn *) (sdyn->contents + sdyn->size); |
4570 | 0 | for (; dyncon < dynconend; dyncon++) |
4571 | 0 | { |
4572 | 0 | Elf_Internal_Dyn dyn; |
4573 | 0 | asection *s; |
4574 | |
|
4575 | 0 | bfd_elf64_swap_dyn_in (dynobj, dyncon, &dyn); |
4576 | |
|
4577 | 0 | switch (dyn.d_tag) |
4578 | 0 | { |
4579 | 0 | default: |
4580 | 0 | continue; |
4581 | | |
4582 | 0 | case DT_PLTGOT: |
4583 | 0 | s = htab->root.sgotplt; |
4584 | 0 | dyn.d_un.d_ptr = s->output_section->vma + s->output_offset; |
4585 | 0 | break; |
4586 | | |
4587 | 0 | case DT_JMPREL: |
4588 | 0 | s = htab->root.srelplt; |
4589 | 0 | dyn.d_un.d_ptr = s->output_section->vma + s->output_offset; |
4590 | 0 | break; |
4591 | | |
4592 | 0 | case DT_PLTRELSZ: |
4593 | 0 | s = htab->root.srelplt; |
4594 | 0 | dyn.d_un.d_val = s->size; |
4595 | 0 | break; |
4596 | | |
4597 | 0 | case DT_RELASZ: |
4598 | | /* The procedure linkage table relocs (DT_JMPREL) should |
4599 | | not be included in the overall relocs (DT_RELA). |
4600 | | Therefore, we override the DT_RELASZ entry here to |
4601 | | make it not include the JMPREL relocs. Since the |
4602 | | linker script arranges for .rela.plt to follow all |
4603 | | other relocation sections, we don't have to worry |
4604 | | about changing the DT_RELA entry. */ |
4605 | 0 | if (htab->root.srelplt != NULL) |
4606 | 0 | { |
4607 | 0 | s = htab->root.srelplt; |
4608 | 0 | dyn.d_un.d_val -= s->size; |
4609 | 0 | } |
4610 | 0 | break; |
4611 | 0 | } |
4612 | | |
4613 | 0 | bfd_elf64_swap_dyn_out (output_bfd, &dyn, dyncon); |
4614 | 0 | } |
4615 | |
|
4616 | 0 | } |
4617 | | |
4618 | | /* Fill in the special first entry in the procedure linkage table. */ |
4619 | 0 | if (htab->root.splt && htab->root.splt->size > 0) |
4620 | 0 | { |
4621 | 0 | elf64_kvx_init_small_plt0_entry (output_bfd, htab); |
4622 | |
|
4623 | 0 | elf_section_data (htab->root.splt->output_section)-> |
4624 | 0 | this_hdr.sh_entsize = htab->plt_entry_size; |
4625 | 0 | } |
4626 | |
|
4627 | 0 | if (htab->root.sgotplt) |
4628 | 0 | { |
4629 | 0 | if (bfd_is_abs_section (htab->root.sgotplt->output_section)) |
4630 | 0 | { |
4631 | 0 | (*_bfd_error_handler) |
4632 | 0 | (_("discarded output section: `%pA'"), htab->root.sgotplt); |
4633 | 0 | return false; |
4634 | 0 | } |
4635 | | |
4636 | | /* Fill in the first three entries in the global offset table. */ |
4637 | 0 | if (htab->root.sgotplt->size > 0) |
4638 | 0 | { |
4639 | 0 | bfd_put_64 (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents); |
4640 | | |
4641 | | /* Write GOT[1] and GOT[2], needed for the dynamic linker. */ |
4642 | 0 | bfd_put_64 (output_bfd, |
4643 | 0 | (bfd_vma) 0, |
4644 | 0 | htab->root.sgotplt->contents + GOT_ENTRY_SIZE); |
4645 | 0 | bfd_put_64 (output_bfd, |
4646 | 0 | (bfd_vma) 0, |
4647 | 0 | htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2); |
4648 | 0 | } |
4649 | |
|
4650 | 0 | if (htab->root.sgot) |
4651 | 0 | { |
4652 | 0 | if (htab->root.sgot->size > 0) |
4653 | 0 | { |
4654 | 0 | bfd_vma addr = |
4655 | 0 | sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0; |
4656 | 0 | bfd_put_64 (output_bfd, addr, htab->root.sgot->contents); |
4657 | 0 | } |
4658 | 0 | } |
4659 | |
|
4660 | 0 | elf_section_data (htab->root.sgotplt->output_section)-> |
4661 | 0 | this_hdr.sh_entsize = GOT_ENTRY_SIZE; |
4662 | 0 | } |
4663 | | |
4664 | 0 | if (htab->root.sgot && htab->root.sgot->size > 0) |
4665 | 0 | elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize |
4666 | 0 | = GOT_ENTRY_SIZE; |
4667 | |
|
4668 | 0 | return true; |
4669 | 0 | } |
4670 | | |
4671 | | /* Return address for Ith PLT stub in section PLT, for relocation REL |
4672 | | or (bfd_vma) -1 if it should not be included. */ |
4673 | | |
4674 | | static bfd_vma |
4675 | | elf64_kvx_plt_sym_val (bfd_vma i, const asection *plt, |
4676 | | const arelent *rel ATTRIBUTE_UNUSED) |
4677 | 0 | { |
4678 | 0 | return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE; |
4679 | 0 | } |
4680 | | |
4681 | | #define ELF_ARCH bfd_arch_kvx |
4682 | | #define ELF_TARGET_ID KVX_ELF_DATA |
4683 | | #define ELF_MACHINE_CODE EM_KVX |
4684 | | #define ELF_MAXPAGESIZE 0x10000 |
4685 | | #define ELF_MINPAGESIZE 0x1000 |
4686 | | #define ELF_COMMONPAGESIZE 0x1000 |
4687 | | |
4688 | | #define bfd_elf64_bfd_link_hash_table_create \ |
4689 | | elf64_kvx_link_hash_table_create |
4690 | | |
4691 | | #define bfd_elf64_bfd_merge_private_bfd_data \ |
4692 | | elf64_kvx_merge_private_bfd_data |
4693 | | |
4694 | | #define bfd_elf64_bfd_print_private_bfd_data \ |
4695 | | elf64_kvx_print_private_bfd_data |
4696 | | |
4697 | | #define bfd_elf64_bfd_reloc_type_lookup \ |
4698 | | elf64_kvx_reloc_type_lookup |
4699 | | |
4700 | | #define bfd_elf64_bfd_reloc_name_lookup \ |
4701 | | elf64_kvx_reloc_name_lookup |
4702 | | |
4703 | | #define bfd_elf64_bfd_set_private_flags \ |
4704 | | elf64_kvx_set_private_flags |
4705 | | |
4706 | | #define bfd_elf64_mkobject \ |
4707 | | elf64_kvx_mkobject |
4708 | | |
4709 | | #define bfd_elf64_new_section_hook \ |
4710 | | elf64_kvx_new_section_hook |
4711 | | |
4712 | | #define elf_backend_adjust_dynamic_symbol \ |
4713 | | elf64_kvx_adjust_dynamic_symbol |
4714 | | |
4715 | | #define elf_backend_early_size_sections \ |
4716 | | elf64_kvx_early_size_sections |
4717 | | |
4718 | | #define elf_backend_check_relocs \ |
4719 | | elf64_kvx_check_relocs |
4720 | | |
4721 | | #define elf_backend_copy_indirect_symbol \ |
4722 | | elf64_kvx_copy_indirect_symbol |
4723 | | |
4724 | | /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts |
4725 | | to them in our hash. */ |
4726 | | #define elf_backend_create_dynamic_sections \ |
4727 | | elf64_kvx_create_dynamic_sections |
4728 | | |
4729 | | #define elf_backend_init_index_section \ |
4730 | | _bfd_elf_init_2_index_sections |
4731 | | |
4732 | | #define elf_backend_finish_dynamic_sections \ |
4733 | | elf64_kvx_finish_dynamic_sections |
4734 | | |
4735 | | #define elf_backend_finish_dynamic_symbol \ |
4736 | | elf64_kvx_finish_dynamic_symbol |
4737 | | |
4738 | | #define elf_backend_object_p \ |
4739 | | elf64_kvx_object_p |
4740 | | |
4741 | | #define elf_backend_output_arch_local_syms \ |
4742 | | elf64_kvx_output_arch_local_syms |
4743 | | |
4744 | | #define elf_backend_plt_sym_val \ |
4745 | | elf64_kvx_plt_sym_val |
4746 | | |
4747 | | #define elf_backend_init_file_header \ |
4748 | | elf64_kvx_init_file_header |
4749 | | |
4750 | | #define elf_backend_init_process_headers \ |
4751 | | elf64_kvx_init_process_headers |
4752 | | |
4753 | | #define elf_backend_relocate_section \ |
4754 | | elf64_kvx_relocate_section |
4755 | | |
4756 | | #define elf_backend_reloc_type_class \ |
4757 | | elf64_kvx_reloc_type_class |
4758 | | |
4759 | | #define elf_backend_late_size_sections \ |
4760 | | elf64_kvx_late_size_sections |
4761 | | |
4762 | | #define elf_backend_can_refcount 1 |
4763 | | #define elf_backend_can_gc_sections 1 |
4764 | | #define elf_backend_plt_readonly 1 |
4765 | | #define elf_backend_want_got_plt 1 |
4766 | | #define elf_backend_want_plt_sym 0 |
4767 | | #define elf_backend_may_use_rel_p 0 |
4768 | | #define elf_backend_may_use_rela_p 1 |
4769 | | #define elf_backend_default_use_rela_p 1 |
4770 | | #define elf_backend_rela_normal 1 |
4771 | | #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3) |
4772 | | #define elf_backend_default_execstack 0 |
4773 | | #define elf_backend_extern_protected_data 1 |
4774 | | #define elf_backend_hash_symbol elf_kvx_hash_symbol |
4775 | | |
4776 | | #include "elf64-target.h" |