Coverage Report

Created: 2026-03-10 08:46

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/binutils-gdb/bfd/elf64-kvx.c
Line
Count
Source
1
#line 1 "elfnn-kvx.c"
2
/* KVX-specific support for 64-bit ELF.
3
   Copyright (C) 2009-2026 Free Software Foundation, Inc.
4
   Contributed by Kalray SA.
5
6
   This file is part of BFD, the Binary File Descriptor library.
7
8
   This program is free software; you can redistribute it and/or modify
9
   it under the terms of the GNU General Public License as published by
10
   the Free Software Foundation; either version 3 of the License, or
11
   (at your option) any later version.
12
13
   This program is distributed in the hope that it will be useful,
14
   but WITHOUT ANY WARRANTY; without even the implied warranty of
15
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
   GNU General Public License for more details.
17
18
   You should have received a copy of the GNU General Public License
19
   along with this program; see the file COPYING3. If not,
20
   see <http://www.gnu.org/licenses/>.  */
21
22
#include "sysdep.h"
23
#include "bfd.h"
24
#include "libiberty.h"
25
#include "libbfd.h"
26
#include "elf-bfd.h"
27
#include "bfdlink.h"
28
#include "objalloc.h"
29
#include "elf/kvx.h"
30
#include "elfxx-kvx.h"
31
32
0
#define ARCH_SIZE 64
33
34
#if ARCH_SIZE == 64
35
0
#define LOG_FILE_ALIGN  3
36
#endif
37
38
#if ARCH_SIZE == 32
39
#define LOG_FILE_ALIGN  2
40
#endif
41
42
#define IS_KVX_TLS_RELOC(R_TYPE)      \
43
0
  ((R_TYPE) == BFD_RELOC_KVX_S37_TLS_LE_LO10  \
44
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LE_UP27  \
45
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_LO10  \
46
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_UP27  \
47
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_EX6  \
48
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_DTPOFF_LO10  \
49
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_DTPOFF_UP27  \
50
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_LO10  \
51
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_UP27  \
52
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_EX6  \
53
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_IE_LO10  \
54
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_IE_UP27  \
55
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_LO10  \
56
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_UP27  \
57
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_EX6  \
58
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_GD_LO10  \
59
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_GD_UP27  \
60
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_LO10  \
61
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_UP27  \
62
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_EX6  \
63
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LD_LO10  \
64
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LD_UP27  \
65
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_LO10  \
66
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_UP27  \
67
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_EX6  \
68
0
   )
69
70
0
#define IS_KVX_TLS_RELAX_RELOC(R_TYPE) 0
71
72
0
#define ELIMINATE_COPY_RELOCS 0
73
74
/* Return size of a relocation entry.  HTAB is the bfd's
75
   elf_kvx_link_hash_entry.  */
76
0
#define RELOC_SIZE(HTAB) (sizeof (Elf64_External_Rela))
77
78
/* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32.  */
79
0
#define GOT_ENTRY_SIZE                  (ARCH_SIZE / 8)
80
0
#define PLT_ENTRY_SIZE                  (32)
81
82
0
#define PLT_SMALL_ENTRY_SIZE            (4*4)
83
84
/* Encoding of the nop instruction */
85
0
#define INSN_NOP 0x00f0037f
86
87
#define kvx_compute_jump_table_size(htab)   \
88
0
  (((htab)->root.srelplt == NULL) ? 0      \
89
0
   : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
90
91
static const bfd_byte elf64_kvx_small_plt0_entry[PLT_ENTRY_SIZE] =
92
{
93
 /* FIXME KVX: no first entry, not used yet */
94
  0
95
};
96
97
/* Per function entry in a procedure linkage table looks like this
98
   if the distance between the PLTGOT and the PLT is < 4GB use
99
   these PLT entries.  */
100
static const bfd_byte elf64_kvx_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
101
{
102
  0x10, 0x00, 0xc4, 0x0f,       /* get $r16 = $pc     ;; */
103
#if ARCH_SIZE == 32
104
  0x10, 0x00, 0x40, 0xb0,       /* lwz $r16 = 0[$r16]   ;; */
105
#else
106
  0x10, 0x00, 0x40, 0xb8,       /* ld $r16 = 0[$r16] ;; */
107
#endif
108
  0x00, 0x00, 0x00, 0x18,       /* upper 27 bits for LSU */
109
  0x10, 0x00, 0xd8, 0x0f, /* igoto $r16          ;; */
110
};
111
112
/* Long stub use 43bits format of make. */
113
static const uint32_t elf64_kvx_long_branch_stub[] =
114
{
115
  0xe0400000,      /* make $r16 = LO10<emm43> EX6<imm43> */
116
  0x00000000,      /* UP27<imm43> ;; */
117
  0x0fd80010,      /* igoto "r16  ;; */
118
};
119
120
#define elf_info_to_howto               elf64_kvx_info_to_howto
121
#define elf_info_to_howto_rel           elf64_kvx_info_to_howto
122
123
7
#define KVX_ELF_ABI_VERSION   0
124
125
/* In case we're on a 32-bit machine, construct a 64-bit "-1" value.  */
126
#define ALL_ONES (~ (bfd_vma) 0)
127
128
/* Indexed by the bfd interal reloc enumerators.
129
   Therefore, the table needs to be synced with BFD_RELOC_KVX_*
130
   in reloc.c.   */
131
132
#define KVX_KV3_V1_KV3_V2_KV4_V1
133
#include "elfxx-kvx-relocs.h"
134
#undef KVX_KV3_V1_KV3_V2_KV4_V1
135
136
/* Given HOWTO, return the bfd internal relocation enumerator.  */
137
138
static bfd_reloc_code_real_type
139
elf64_kvx_bfd_reloc_from_howto (reloc_howto_type *howto)
140
0
{
141
0
  const int size = (int) ARRAY_SIZE (elf_kvx_howto_table);
142
0
  const ptrdiff_t offset = howto - elf_kvx_howto_table;
143
144
0
  if (offset >= 0 && offset < size)
145
0
    return BFD_RELOC_KVX_RELOC_START + offset + 1;
146
147
0
  return BFD_RELOC_KVX_RELOC_START + 1;
148
0
}
149
150
/* Given R_TYPE, return the bfd internal relocation enumerator.  */
151
152
static bfd_reloc_code_real_type
153
elf64_kvx_bfd_reloc_from_type (bfd *abfd ATTRIBUTE_UNUSED, unsigned int r_type)
154
668
{
155
668
  static bool initialized_p = false;
156
  /* Indexed by R_TYPE, values are offsets in the howto_table.  */
157
668
  static unsigned int offsets[R_KVX_end];
158
159
668
  if (!initialized_p)
160
4
    {
161
4
      unsigned int i;
162
163
336
      for (i = 0; i < ARRAY_SIZE (elf_kvx_howto_table); ++i)
164
332
  offsets[elf_kvx_howto_table[i].type] = i;
165
166
4
      initialized_p = true;
167
4
    }
168
169
  /* PR 17512: file: b371e70a.  */
170
668
  if (r_type >= R_KVX_end)
171
71
    {
172
71
      bfd_set_error (bfd_error_bad_value);
173
71
      return BFD_RELOC_KVX_RELOC_END;
174
71
    }
175
176
597
  return (BFD_RELOC_KVX_RELOC_START + 1) + offsets[r_type];
177
668
}
178
179
struct elf_kvx_reloc_map
180
{
181
  bfd_reloc_code_real_type from;
182
  bfd_reloc_code_real_type to;
183
};
184
185
/* Map bfd generic reloc to KVX-specific reloc.  */
186
static const struct elf_kvx_reloc_map elf_kvx_reloc_map[] =
187
{
188
  {BFD_RELOC_NONE, BFD_RELOC_KVX_NONE},
189
190
  /* Basic data relocations.  */
191
  {BFD_RELOC_CTOR, BFD_RELOC_KVX_64},
192
  {BFD_RELOC_64, BFD_RELOC_KVX_64},
193
  {BFD_RELOC_32, BFD_RELOC_KVX_32},
194
  {BFD_RELOC_16, BFD_RELOC_KVX_16},
195
  {BFD_RELOC_8,  BFD_RELOC_KVX_8},
196
197
  {BFD_RELOC_64_PCREL, BFD_RELOC_KVX_64_PCREL},
198
  {BFD_RELOC_32_PCREL, BFD_RELOC_KVX_32_PCREL},
199
};
200
201
/* Given the bfd internal relocation enumerator in CODE, return the
202
   corresponding howto entry.  */
203
204
static reloc_howto_type *
205
elf64_kvx_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
206
668
{
207
668
  unsigned int i;
208
209
  /* Convert bfd generic reloc to KVX-specific reloc.  */
210
668
  if (code < BFD_RELOC_KVX_RELOC_START || code > BFD_RELOC_KVX_RELOC_END)
211
0
    for (i = 0; i < ARRAY_SIZE (elf_kvx_reloc_map) ; i++)
212
0
      if (elf_kvx_reloc_map[i].from == code)
213
0
  {
214
0
    code = elf_kvx_reloc_map[i].to;
215
0
    break;
216
0
  }
217
218
668
  if (code > BFD_RELOC_KVX_RELOC_START && code < BFD_RELOC_KVX_RELOC_END)
219
597
      return &elf_kvx_howto_table[code - (BFD_RELOC_KVX_RELOC_START + 1)];
220
221
71
  return NULL;
222
668
}
223
224
static reloc_howto_type *
225
elf64_kvx_howto_from_type (bfd *abfd, unsigned int r_type)
226
668
{
227
668
  bfd_reloc_code_real_type val;
228
668
  reloc_howto_type *howto;
229
230
#if ARCH_SIZE == 32
231
  if (r_type > 256)
232
    {
233
      bfd_set_error (bfd_error_bad_value);
234
      return NULL;
235
    }
236
#endif
237
238
668
  val = elf64_kvx_bfd_reloc_from_type (abfd, r_type);
239
668
  howto = elf64_kvx_howto_from_bfd_reloc (val);
240
241
668
  if (howto != NULL)
242
597
    return howto;
243
244
71
  bfd_set_error (bfd_error_bad_value);
245
71
  return NULL;
246
668
}
247
248
static bool
249
elf64_kvx_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
250
       Elf_Internal_Rela *elf_reloc)
251
668
{
252
668
  unsigned int r_type;
253
254
668
  r_type = ELF64_R_TYPE (elf_reloc->r_info);
255
668
  bfd_reloc->howto = elf64_kvx_howto_from_type (abfd, r_type);
256
257
668
  if (bfd_reloc->howto == NULL)
258
71
    {
259
      /* xgettext:c-format */
260
71
      _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
261
71
        abfd, r_type);
262
71
      return false;
263
71
    }
264
597
  return true;
265
668
}
266
267
static reloc_howto_type *
268
elf64_kvx_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
269
           bfd_reloc_code_real_type code)
270
0
{
271
0
  reloc_howto_type *howto = elf64_kvx_howto_from_bfd_reloc (code);
272
273
0
  if (howto != NULL)
274
0
    return howto;
275
276
0
  bfd_set_error (bfd_error_bad_value);
277
0
  return NULL;
278
0
}
279
280
static reloc_howto_type *
281
elf64_kvx_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
282
           const char *r_name)
283
0
{
284
0
  unsigned int i;
285
286
0
  for (i = 0; i < ARRAY_SIZE (elf_kvx_howto_table); ++i)
287
0
    if (elf_kvx_howto_table[i].name != NULL
288
0
  && strcasecmp (elf_kvx_howto_table[i].name, r_name) == 0)
289
0
      return &elf_kvx_howto_table[i];
290
291
0
  return NULL;
292
0
}
293
294
#define TARGET_LITTLE_SYM               kvx_elf64_vec
295
#define TARGET_LITTLE_NAME              "elf64-kvx"
296
297
/* The linker script knows the section names for placement.
298
   The entry_names are used to do simple name mangling on the stubs.
299
   Given a function name, and its type, the stub can be found. The
300
   name can be changed. The only requirement is the %s be present.  */
301
0
#define STUB_ENTRY_NAME   "__%s_veneer"
302
303
/* The name of the dynamic interpreter.  This is put in the .interp
304
   section.  */
305
0
#define ELF_DYNAMIC_INTERPRETER     "/lib/ld.so.1"
306
307
308
/* PCREL 27 is signed-extended and scaled by 4 */
309
#define KVX_MAX_FWD_CALL_OFFSET \
310
0
  (((1 << 26) - 1) << 2)
311
#define KVX_MAX_BWD_CALL_OFFSET \
312
0
  (-((1 << 26) << 2))
313
314
/* Check that the destination of the call is within the PCREL27
315
   range. */
316
static int
317
kvx_valid_call_p (bfd_vma value, bfd_vma place)
318
0
{
319
0
  bfd_signed_vma offset = (bfd_signed_vma) (value - place);
320
0
  return (offset <= KVX_MAX_FWD_CALL_OFFSET
321
0
    && offset >= KVX_MAX_BWD_CALL_OFFSET);
322
0
}
323
324
/* Section name for stubs is the associated section name plus this
325
   string.  */
326
0
#define STUB_SUFFIX ".stub"
327
328
enum elf_kvx_stub_type
329
{
330
  kvx_stub_none,
331
  kvx_stub_long_branch,
332
};
333
334
struct elf_kvx_stub_hash_entry
335
{
336
  /* Base hash table entry structure.  */
337
  struct bfd_hash_entry root;
338
339
  /* The stub section.  */
340
  asection *stub_sec;
341
342
  /* Offset within stub_sec of the beginning of this stub.  */
343
  bfd_vma stub_offset;
344
345
  /* Given the symbol's value and its section we can determine its final
346
     value when building the stubs (so the stub knows where to jump).  */
347
  bfd_vma target_value;
348
  asection *target_section;
349
350
  enum elf_kvx_stub_type stub_type;
351
352
  /* The symbol table entry, if any, that this was derived from.  */
353
  struct elf_kvx_link_hash_entry *h;
354
355
  /* Destination symbol type */
356
  unsigned char st_type;
357
358
  /* Where this stub is being called from, or, in the case of combined
359
     stub sections, the first input section in the group.  */
360
  asection *id_sec;
361
362
  /* The name for the local symbol at the start of this stub.  The
363
     stub name in the hash table has to be unique; this does not, so
364
     it can be friendlier.  */
365
  char *output_name;
366
};
367
368
/* Used to build a map of a section.  This is required for mixed-endian
369
   code/data.  */
370
371
typedef struct elf_elf_section_map
372
{
373
  bfd_vma vma;
374
  char type;
375
}
376
elf_kvx_section_map;
377
378
379
typedef struct _kvx_elf_section_data
380
{
381
  struct bfd_elf_section_data elf;
382
  unsigned int mapcount;
383
  unsigned int mapsize;
384
  elf_kvx_section_map *map;
385
}
386
_kvx_elf_section_data;
387
388
#define elf_kvx_section_data(sec) \
389
  ((_kvx_elf_section_data *) elf_section_data (sec))
390
391
struct elf_kvx_local_symbol
392
{
393
  unsigned int got_type;
394
  bfd_signed_vma got_refcount;
395
  bfd_vma got_offset;
396
};
397
398
struct elf_kvx_obj_tdata
399
{
400
  struct elf_obj_tdata root;
401
402
  /* local symbol descriptors */
403
  struct elf_kvx_local_symbol *locals;
404
405
  /* Zero to warn when linking objects with incompatible enum sizes.  */
406
  int no_enum_size_warning;
407
408
  /* Zero to warn when linking objects with incompatible wchar_t sizes.  */
409
  int no_wchar_size_warning;
410
};
411
412
#define elf_kvx_tdata(bfd)        \
413
0
  ((struct elf_kvx_obj_tdata *) (bfd)->tdata.any)
414
415
0
#define elf_kvx_locals(bfd) (elf_kvx_tdata (bfd)->locals)
416
417
#define is_kvx_elf(bfd)       \
418
0
  (bfd_get_flavour (bfd) == bfd_target_elf_flavour  \
419
0
   && elf_tdata (bfd) != NULL        \
420
0
   && elf_object_id (bfd) == KVX_ELF_DATA)
421
422
static bool
423
elf64_kvx_mkobject (bfd *abfd)
424
14.1k
{
425
14.1k
  return bfd_elf_allocate_object (abfd, sizeof (struct elf_kvx_obj_tdata));
426
14.1k
}
427
428
#define elf_kvx_hash_entry(ent) \
429
0
  ((struct elf_kvx_link_hash_entry *)(ent))
430
431
0
#define GOT_UNKNOWN    0
432
0
#define GOT_NORMAL     1
433
434
0
#define GOT_TLS_GD     2
435
0
#define GOT_TLS_IE     4
436
0
#define GOT_TLS_LD     8
437
438
/* KVX ELF linker hash entry.  */
439
struct elf_kvx_link_hash_entry
440
{
441
  struct elf_link_hash_entry root;
442
443
  /* Since PLT entries have variable size, we need to record the
444
     index into .got.plt instead of recomputing it from the PLT
445
     offset.  */
446
  bfd_signed_vma plt_got_offset;
447
448
  /* Bit mask representing the type of GOT entry(s) if any required by
449
     this symbol.  */
450
  unsigned int got_type;
451
452
  /* A pointer to the most recently used stub hash entry against this
453
     symbol.  */
454
  struct elf_kvx_stub_hash_entry *stub_cache;
455
};
456
457
/* Get the KVX elf linker hash table from a link_info structure.  */
458
#define elf_kvx_hash_table(info)          \
459
0
  ((struct elf_kvx_link_hash_table *) ((info)->hash))
460
461
#define kvx_stub_hash_lookup(table, string, create, copy)   \
462
0
  ((struct elf_kvx_stub_hash_entry *)       \
463
0
   bfd_hash_lookup ((table), (string), (create), (copy)))
464
465
/* KVX ELF linker hash table.  */
466
struct elf_kvx_link_hash_table
467
{
468
  /* The main hash table.  */
469
  struct elf_link_hash_table root;
470
471
  /* Nonzero to force PIC branch veneers.  */
472
  int pic_veneer;
473
474
  /* The number of bytes in the initial entry in the PLT.  */
475
  bfd_size_type plt_header_size;
476
477
  /* The number of bytes in the subsequent PLT etries.  */
478
  bfd_size_type plt_entry_size;
479
480
  /* The bytes of the subsequent PLT entry.  */
481
  const bfd_byte *plt_entry;
482
483
  /* Short-cuts to get to dynamic linker sections.  */
484
  asection *sdynbss;
485
  asection *srelbss;
486
487
  /* Small local sym cache.  */
488
  struct sym_cache sym_cache;
489
490
  /* For convenience in allocate_dynrelocs.  */
491
  bfd *obfd;
492
493
  /* The amount of space used by the reserved portion of the sgotplt
494
     section, plus whatever space is used by the jump slots.  */
495
  bfd_vma sgotplt_jump_table_size;
496
497
  /* The stub hash table.  */
498
  struct bfd_hash_table stub_hash_table;
499
500
  /* Linker stub bfd.  */
501
  bfd *stub_bfd;
502
503
  /* Linker call-backs.  */
504
  asection *(*add_stub_section) (const char *, asection *);
505
  void (*layout_sections_again) (void);
506
507
  /* Array to keep track of which stub sections have been created, and
508
     information on stub grouping.  */
509
  struct map_stub
510
  {
511
    /* This is the section to which stubs in the group will be
512
       attached.  */
513
    asection *link_sec;
514
    /* The stub section.  */
515
    asection *stub_sec;
516
  } *stub_group;
517
518
  /* Assorted information used by elf64_kvx_size_stubs.  */
519
  unsigned int bfd_count;
520
  unsigned int top_index;
521
  asection **input_list;
522
};
523
524
/* Create an entry in an KVX ELF linker hash table.  */
525
526
static struct bfd_hash_entry *
527
elf64_kvx_link_hash_newfunc (struct bfd_hash_entry *entry,
528
           struct bfd_hash_table *table,
529
           const char *string)
530
0
{
531
0
  struct elf_kvx_link_hash_entry *ret =
532
0
    (struct elf_kvx_link_hash_entry *) entry;
533
534
  /* Allocate the structure if it has not already been allocated by a
535
     subclass.  */
536
0
  if (ret == NULL)
537
0
    ret = bfd_hash_allocate (table,
538
0
           sizeof (struct elf_kvx_link_hash_entry));
539
0
  if (ret == NULL)
540
0
    return (struct bfd_hash_entry *) ret;
541
542
  /* Call the allocation method of the superclass.  */
543
0
  ret = ((struct elf_kvx_link_hash_entry *)
544
0
   _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
545
0
             table, string));
546
0
  if (ret != NULL)
547
0
    {
548
0
      ret->got_type = GOT_UNKNOWN;
549
0
      ret->plt_got_offset = (bfd_vma) - 1;
550
0
      ret->stub_cache = NULL;
551
0
    }
552
553
0
  return (struct bfd_hash_entry *) ret;
554
0
}
555
556
/* Initialize an entry in the stub hash table.  */
557
558
static struct bfd_hash_entry *
559
stub_hash_newfunc (struct bfd_hash_entry *entry,
560
       struct bfd_hash_table *table, const char *string)
561
0
{
562
  /* Allocate the structure if it has not already been allocated by a
563
     subclass.  */
564
0
  if (entry == NULL)
565
0
    {
566
0
      entry = bfd_hash_allocate (table,
567
0
         sizeof (struct
568
0
           elf_kvx_stub_hash_entry));
569
0
      if (entry == NULL)
570
0
  return entry;
571
0
    }
572
573
  /* Call the allocation method of the superclass.  */
574
0
  entry = bfd_hash_newfunc (entry, table, string);
575
0
  if (entry != NULL)
576
0
    {
577
0
      struct elf_kvx_stub_hash_entry *eh;
578
579
      /* Initialize the local fields.  */
580
0
      eh = (struct elf_kvx_stub_hash_entry *) entry;
581
0
      eh->stub_sec = NULL;
582
0
      eh->stub_offset = 0;
583
0
      eh->target_value = 0;
584
0
      eh->target_section = NULL;
585
0
      eh->stub_type = kvx_stub_none;
586
0
      eh->h = NULL;
587
0
      eh->id_sec = NULL;
588
0
    }
589
590
0
  return entry;
591
0
}
592
593
/* Copy the extra info we tack onto an elf_link_hash_entry.  */
594
595
static void
596
elf64_kvx_copy_indirect_symbol (struct bfd_link_info *info,
597
        struct elf_link_hash_entry *dir,
598
        struct elf_link_hash_entry *ind)
599
0
{
600
0
  struct elf_kvx_link_hash_entry *edir, *eind;
601
602
0
  edir = (struct elf_kvx_link_hash_entry *) dir;
603
0
  eind = (struct elf_kvx_link_hash_entry *) ind;
604
605
0
  if (ind->root.type == bfd_link_hash_indirect)
606
0
    {
607
      /* Copy over PLT info.  */
608
0
      if (dir->got.refcount <= 0)
609
0
  {
610
0
    edir->got_type = eind->got_type;
611
0
    eind->got_type = GOT_UNKNOWN;
612
0
  }
613
0
    }
614
615
0
  _bfd_elf_link_hash_copy_indirect (info, dir, ind);
616
0
}
617
618
/* Destroy a KVX elf linker hash table.  */
619
620
static void
621
elf64_kvx_link_hash_table_free (bfd *obfd)
622
0
{
623
0
  struct elf_kvx_link_hash_table *ret
624
0
    = (struct elf_kvx_link_hash_table *) obfd->link.hash;
625
626
0
  bfd_hash_table_free (&ret->stub_hash_table);
627
0
  _bfd_elf_link_hash_table_free (obfd);
628
0
}
629
630
/* Create a KVX elf linker hash table.  */
631
632
static struct bfd_link_hash_table *
633
elf64_kvx_link_hash_table_create (bfd *abfd)
634
0
{
635
0
  struct elf_kvx_link_hash_table *ret;
636
0
  bfd_size_type amt = sizeof (struct elf_kvx_link_hash_table);
637
638
0
  ret = bfd_zmalloc (amt);
639
0
  if (ret == NULL)
640
0
    return NULL;
641
642
0
  if (!_bfd_elf_link_hash_table_init
643
0
      (&ret->root, abfd, elf64_kvx_link_hash_newfunc,
644
0
       sizeof (struct elf_kvx_link_hash_entry)))
645
0
    {
646
0
      free (ret);
647
0
      return NULL;
648
0
    }
649
650
0
  ret->plt_header_size = PLT_ENTRY_SIZE;
651
0
  ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
652
0
  ret->plt_entry = elf64_kvx_small_plt_entry;
653
654
0
  ret->obfd = abfd;
655
656
0
  if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
657
0
          sizeof (struct elf_kvx_stub_hash_entry)))
658
0
    {
659
0
      _bfd_elf_link_hash_table_free (abfd);
660
0
      return NULL;
661
0
    }
662
663
0
  ret->root.root.hash_table_free = elf64_kvx_link_hash_table_free;
664
665
0
  return &ret->root.root;
666
0
}
667
668
static bfd_reloc_status_type
669
kvx_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
670
        bfd_vma offset, bfd_vma value)
671
0
{
672
0
  reloc_howto_type *howto;
673
674
0
  howto = elf64_kvx_howto_from_type (input_bfd, r_type);
675
0
  r_type = elf64_kvx_bfd_reloc_from_type (input_bfd, r_type);
676
0
  return _bfd_kvx_elf_put_addend (input_bfd,
677
0
          input_section->contents + offset, r_type,
678
0
          howto, value);
679
0
}
680
681
/* Determine the type of stub needed, if any, for a call.  */
682
683
static enum elf_kvx_stub_type
684
kvx_type_of_stub (asection *input_sec,
685
      const Elf_Internal_Rela *rel,
686
      asection *sym_sec,
687
      unsigned char st_type,
688
      bfd_vma destination)
689
0
{
690
0
  bfd_vma location;
691
0
  bfd_signed_vma branch_offset;
692
0
  unsigned int r_type;
693
0
  enum elf_kvx_stub_type stub_type = kvx_stub_none;
694
695
0
  if (st_type != STT_FUNC
696
0
      && (sym_sec == input_sec))
697
0
    return stub_type;
698
699
  /* Determine where the call point is.  */
700
0
  location = (input_sec->output_offset
701
0
        + input_sec->output_section->vma + rel->r_offset);
702
703
0
  branch_offset = (bfd_signed_vma) (destination - location);
704
705
0
  r_type = ELF64_R_TYPE (rel->r_info);
706
707
  /* We don't want to redirect any old unconditional jump in this way,
708
     only one which is being used for a sibcall, where it is
709
     acceptable for the R16 and R17 registers to be clobbered.  */
710
0
  if (r_type == R_KVX_PCREL27
711
0
      && (branch_offset > KVX_MAX_FWD_CALL_OFFSET
712
0
    || branch_offset < KVX_MAX_BWD_CALL_OFFSET))
713
0
    {
714
0
      stub_type = kvx_stub_long_branch;
715
0
    }
716
717
0
  return stub_type;
718
0
}
719
720
/* Build a name for an entry in the stub hash table.  */
721
722
static char *
723
elf64_kvx_stub_name (const asection *input_section,
724
         const asection *sym_sec,
725
         const struct elf_kvx_link_hash_entry *hash,
726
         const Elf_Internal_Rela *rel)
727
0
{
728
0
  char *stub_name;
729
0
  bfd_size_type len;
730
731
0
  if (hash)
732
0
    {
733
0
      len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
734
0
      stub_name = bfd_malloc (len);
735
0
      if (stub_name != NULL)
736
0
  snprintf (stub_name, len, "%08x_%s+%" PRIx64 "x",
737
0
      (unsigned int) input_section->id,
738
0
      hash->root.root.root.string,
739
0
      (uint64_t) rel->r_addend);
740
0
    }
741
0
  else
742
0
    {
743
0
      len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
744
0
      stub_name = bfd_malloc (len);
745
0
      if (stub_name != NULL)
746
0
  snprintf (stub_name, len, "%08x_%x:%x+%" PRIx64 "x",
747
0
      (unsigned int) input_section->id,
748
0
      (unsigned int) sym_sec->id,
749
0
      (unsigned int) ELF64_R_SYM (rel->r_info),
750
0
      (uint64_t) rel->r_addend);
751
0
    }
752
753
0
  return stub_name;
754
0
}
755
756
/* Return true if symbol H should be hashed in the `.gnu.hash' section.  For
757
   executable PLT slots where the executable never takes the address of those
758
   functions, the function symbols are not added to the hash table.  */
759
760
static bool
761
elf_kvx_hash_symbol (struct elf_link_hash_entry *h)
762
0
{
763
0
  if (h->plt.offset != (bfd_vma) -1
764
0
      && !h->def_regular
765
0
      && !h->pointer_equality_needed)
766
0
    return false;
767
768
0
  return _bfd_elf_hash_symbol (h);
769
0
}
770
771
772
/* Look up an entry in the stub hash.  Stub entries are cached because
773
   creating the stub name takes a bit of time.  */
774
775
static struct elf_kvx_stub_hash_entry *
776
elf64_kvx_get_stub_entry (const asection *input_section,
777
        const asection *sym_sec,
778
        struct elf_link_hash_entry *hash,
779
        const Elf_Internal_Rela *rel,
780
        struct elf_kvx_link_hash_table *htab)
781
0
{
782
0
  struct elf_kvx_stub_hash_entry *stub_entry;
783
0
  struct elf_kvx_link_hash_entry *h =
784
0
    (struct elf_kvx_link_hash_entry *) hash;
785
0
  const asection *id_sec;
786
787
0
  if ((input_section->flags & SEC_CODE) == 0)
788
0
    return NULL;
789
790
  /* If this input section is part of a group of sections sharing one
791
     stub section, then use the id of the first section in the group.
792
     Stub names need to include a section id, as there may well be
793
     more than one stub used to reach say, printf, and we need to
794
     distinguish between them.  */
795
0
  id_sec = htab->stub_group[input_section->id].link_sec;
796
797
0
  if (h != NULL && h->stub_cache != NULL
798
0
      && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
799
0
    {
800
0
      stub_entry = h->stub_cache;
801
0
    }
802
0
  else
803
0
    {
804
0
      char *stub_name;
805
806
0
      stub_name = elf64_kvx_stub_name (id_sec, sym_sec, h, rel);
807
0
      if (stub_name == NULL)
808
0
  return NULL;
809
810
0
      stub_entry = kvx_stub_hash_lookup (&htab->stub_hash_table,
811
0
           stub_name, false, false);
812
0
      if (h != NULL)
813
0
  h->stub_cache = stub_entry;
814
815
0
      free (stub_name);
816
0
    }
817
818
0
  return stub_entry;
819
0
}
820
821
822
/* Create a stub section.  */
823
824
static asection *
825
_bfd_kvx_create_stub_section (asection *section,
826
            struct elf_kvx_link_hash_table *htab)
827
828
0
{
829
0
  size_t namelen;
830
0
  bfd_size_type len;
831
0
  char *s_name;
832
833
0
  namelen = strlen (section->name);
834
0
  len = namelen + sizeof (STUB_SUFFIX);
835
0
  s_name = bfd_alloc (htab->stub_bfd, len);
836
0
  if (s_name == NULL)
837
0
    return NULL;
838
839
0
  memcpy (s_name, section->name, namelen);
840
0
  memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
841
0
  return (*htab->add_stub_section) (s_name, section);
842
0
}
843
844
845
/* Find or create a stub section for a link section.
846
847
   Fix or create the stub section used to collect stubs attached to
848
   the specified link section.  */
849
850
static asection *
851
_bfd_kvx_get_stub_for_link_section (asection *link_section,
852
            struct elf_kvx_link_hash_table *htab)
853
0
{
854
0
  if (htab->stub_group[link_section->id].stub_sec == NULL)
855
0
    htab->stub_group[link_section->id].stub_sec
856
0
      = _bfd_kvx_create_stub_section (link_section, htab);
857
0
  return htab->stub_group[link_section->id].stub_sec;
858
0
}
859
860
861
/* Find or create a stub section in the stub group for an input
862
   section.  */
863
864
static asection *
865
_bfd_kvx_create_or_find_stub_sec (asection *section,
866
          struct elf_kvx_link_hash_table *htab)
867
0
{
868
0
  asection *link_sec = htab->stub_group[section->id].link_sec;
869
0
  return _bfd_kvx_get_stub_for_link_section (link_sec, htab);
870
0
}
871
872
873
/* Add a new stub entry in the stub group associated with an input
874
   section to the stub hash.  Not all fields of the new stub entry are
875
   initialised.  */
876
877
static struct elf_kvx_stub_hash_entry *
878
_bfd_kvx_add_stub_entry_in_group (const char *stub_name,
879
          asection *section,
880
          struct elf_kvx_link_hash_table *htab)
881
0
{
882
0
  asection *link_sec;
883
0
  asection *stub_sec;
884
0
  struct elf_kvx_stub_hash_entry *stub_entry;
885
886
0
  link_sec = htab->stub_group[section->id].link_sec;
887
0
  stub_sec = _bfd_kvx_create_or_find_stub_sec (section, htab);
888
889
  /* Enter this entry into the linker stub hash table.  */
890
0
  stub_entry = kvx_stub_hash_lookup (&htab->stub_hash_table, stub_name,
891
0
             true, false);
892
0
  if (stub_entry == NULL)
893
0
    {
894
      /* xgettext:c-format */
895
0
      _bfd_error_handler (_("%pB: cannot create stub entry %s"),
896
0
        section->owner, stub_name);
897
0
      return NULL;
898
0
    }
899
900
0
  stub_entry->stub_sec = stub_sec;
901
0
  stub_entry->stub_offset = 0;
902
0
  stub_entry->id_sec = link_sec;
903
904
0
  return stub_entry;
905
0
}
906
907
static bool
908
kvx_build_one_stub (struct bfd_hash_entry *gen_entry,
909
        void *in_arg)
910
0
{
911
0
  struct elf_kvx_stub_hash_entry *stub_entry;
912
0
  asection *stub_sec;
913
0
  bfd *stub_bfd;
914
0
  bfd_byte *loc;
915
0
  bfd_vma sym_value;
916
0
  unsigned int template_size;
917
0
  const uint32_t *template;
918
0
  unsigned int i;
919
0
  struct bfd_link_info *info;
920
921
  /* Massage our args to the form they really have.  */
922
0
  stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
923
924
0
  info = (struct bfd_link_info *) in_arg;
925
926
  /* Fail if the target section could not be assigned to an output
927
     section.  The user should fix his linker script.  */
928
0
  if (stub_entry->target_section->output_section == NULL
929
0
      && info->non_contiguous_regions)
930
0
    info->callbacks->fatal (_("%P: Could not assign '%pA' to an output section. "
931
0
            "Retry without "
932
0
            "--enable-non-contiguous-regions.\n"),
933
0
          stub_entry->target_section);
934
935
0
  stub_sec = stub_entry->stub_sec;
936
937
  /* Make a note of the offset within the stubs for this entry.  */
938
0
  stub_entry->stub_offset = stub_sec->size;
939
0
  loc = stub_sec->contents + stub_entry->stub_offset;
940
941
0
  stub_bfd = stub_sec->owner;
942
943
  /* This is the address of the stub destination.  */
944
0
  sym_value = (stub_entry->target_value
945
0
         + stub_entry->target_section->output_offset
946
0
         + stub_entry->target_section->output_section->vma);
947
948
0
  switch (stub_entry->stub_type)
949
0
    {
950
0
    case kvx_stub_long_branch:
951
0
      template = elf64_kvx_long_branch_stub;
952
0
      template_size = sizeof (elf64_kvx_long_branch_stub);
953
0
      break;
954
0
    default:
955
0
      abort ();
956
0
    }
957
958
0
  for (i = 0; i < (template_size / sizeof template[0]); i++)
959
0
    {
960
0
      bfd_putl32 (template[i], loc);
961
0
      loc += 4;
962
0
    }
963
964
0
  stub_sec->size += template_size;
965
966
0
  switch (stub_entry->stub_type)
967
0
    {
968
0
    case kvx_stub_long_branch:
969
      /* The stub uses a make insn with 43bits immediate.
970
   We need to apply 3 relocations:
971
   BFD_RELOC_KVX_S43_LO10,
972
   BFD_RELOC_KVX_S43_UP27,
973
   BFD_RELOC_KVX_S43_EX6.  */
974
0
      if (kvx_relocate (R_KVX_S43_LO10, stub_bfd, stub_sec,
975
0
      stub_entry->stub_offset, sym_value) != bfd_reloc_ok)
976
0
  BFD_FAIL ();
977
0
      if (kvx_relocate (R_KVX_S43_EX6, stub_bfd, stub_sec,
978
0
      stub_entry->stub_offset, sym_value) != bfd_reloc_ok)
979
0
  BFD_FAIL ();
980
0
      if (kvx_relocate (R_KVX_S43_UP27, stub_bfd, stub_sec,
981
0
      stub_entry->stub_offset + 4, sym_value) != bfd_reloc_ok)
982
0
  BFD_FAIL ();
983
0
      break;
984
0
    default:
985
0
      abort ();
986
0
    }
987
988
0
  return true;
989
0
}
990
991
/* As above, but don't actually build the stub.  Just bump offset so
992
   we know stub section sizes.  */
993
994
static bool
995
kvx_size_one_stub (struct bfd_hash_entry *gen_entry,
996
       void *in_arg ATTRIBUTE_UNUSED)
997
0
{
998
0
  struct elf_kvx_stub_hash_entry *stub_entry;
999
0
  int size;
1000
1001
  /* Massage our args to the form they really have.  */
1002
0
  stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
1003
1004
0
  switch (stub_entry->stub_type)
1005
0
    {
1006
0
    case kvx_stub_long_branch:
1007
0
      size = sizeof (elf64_kvx_long_branch_stub);
1008
0
      break;
1009
0
    default:
1010
0
      abort ();
1011
0
    }
1012
1013
0
  stub_entry->stub_sec->size += size;
1014
0
  return true;
1015
0
}
1016
1017
/* External entry points for sizing and building linker stubs.  */
1018
1019
/* Set up various things so that we can make a list of input sections
1020
   for each output section included in the link.  Returns -1 on error,
1021
   0 when no stubs will be needed, and 1 on success.  */
1022
1023
int
1024
elf64_kvx_setup_section_lists (bfd *output_bfd,
1025
             struct bfd_link_info *info)
1026
0
{
1027
0
  bfd *input_bfd;
1028
0
  unsigned int bfd_count;
1029
0
  unsigned int top_id, top_index;
1030
0
  asection *section;
1031
0
  asection **input_list, **list;
1032
0
  bfd_size_type amt;
1033
0
  struct elf_kvx_link_hash_table *htab =
1034
0
    elf_kvx_hash_table (info);
1035
1036
0
  if (!is_elf_hash_table ((const struct bfd_link_hash_table *)htab))
1037
0
    return 0;
1038
1039
  /* Count the number of input BFDs and find the top input section id.  */
1040
0
  for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
1041
0
       input_bfd != NULL; input_bfd = input_bfd->link.next)
1042
0
    {
1043
0
      bfd_count += 1;
1044
0
      for (section = input_bfd->sections;
1045
0
     section != NULL; section = section->next)
1046
0
  {
1047
0
    if (top_id < section->id)
1048
0
      top_id = section->id;
1049
0
  }
1050
0
    }
1051
0
  htab->bfd_count = bfd_count;
1052
1053
0
  amt = sizeof (struct map_stub) * (top_id + 1);
1054
0
  htab->stub_group = bfd_zmalloc (amt);
1055
0
  if (htab->stub_group == NULL)
1056
0
    return -1;
1057
1058
  /* We can't use output_bfd->section_count here to find the top output
1059
     section index as some sections may have been removed, and
1060
     _bfd_strip_section_from_output doesn't renumber the indices.  */
1061
0
  for (section = output_bfd->sections, top_index = 0;
1062
0
       section != NULL; section = section->next)
1063
0
    {
1064
0
      if (top_index < section->index)
1065
0
  top_index = section->index;
1066
0
    }
1067
1068
0
  htab->top_index = top_index;
1069
0
  amt = sizeof (asection *) * (top_index + 1);
1070
0
  input_list = bfd_malloc (amt);
1071
0
  htab->input_list = input_list;
1072
0
  if (input_list == NULL)
1073
0
    return -1;
1074
1075
  /* For sections we aren't interested in, mark their entries with a
1076
     value we can check later.  */
1077
0
  list = input_list + top_index;
1078
0
  do
1079
0
    *list = bfd_abs_section_ptr;
1080
0
  while (list-- != input_list);
1081
1082
0
  for (section = output_bfd->sections;
1083
0
       section != NULL; section = section->next)
1084
0
    {
1085
0
      if ((section->flags & SEC_CODE) != 0)
1086
0
  input_list[section->index] = NULL;
1087
0
    }
1088
1089
0
  return 1;
1090
0
}
1091
1092
/* Used by elf64_kvx_next_input_section and group_sections.  */
1093
0
#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
1094
1095
/* The linker repeatedly calls this function for each input section,
1096
   in the order that input sections are linked into output sections.
1097
   Build lists of input sections to determine groupings between which
1098
   we may insert linker stubs.  */
1099
1100
void
1101
elf64_kvx_next_input_section (struct bfd_link_info *info, asection *isec)
1102
0
{
1103
0
  struct elf_kvx_link_hash_table *htab =
1104
0
    elf_kvx_hash_table (info);
1105
1106
0
  if (isec->output_section->index <= htab->top_index)
1107
0
    {
1108
0
      asection **list = htab->input_list + isec->output_section->index;
1109
1110
0
      if (*list != bfd_abs_section_ptr)
1111
0
  {
1112
    /* Steal the link_sec pointer for our list.  */
1113
    /* This happens to make the list in reverse order,
1114
       which is what we want.  */
1115
0
    PREV_SEC (isec) = *list;
1116
0
    *list = isec;
1117
0
  }
1118
0
    }
1119
0
}
1120
1121
/* See whether we can group stub sections together.  Grouping stub
1122
   sections may result in fewer stubs.  More importantly, we need to
1123
   put all .init* and .fini* stubs at the beginning of the .init or
1124
   .fini output sections respectively, because glibc splits the
1125
   _init and _fini functions into multiple parts.  Putting a stub in
1126
   the middle of a function is not a good idea.  */
1127
1128
static void
1129
group_sections (struct elf_kvx_link_hash_table *htab,
1130
    bfd_size_type stub_group_size,
1131
    bool stubs_always_after_branch)
1132
0
{
1133
0
  asection **list = htab->input_list;
1134
1135
0
  do
1136
0
    {
1137
0
      asection *tail = *list;
1138
0
      asection *head;
1139
1140
0
      if (tail == bfd_abs_section_ptr)
1141
0
  continue;
1142
1143
      /* Reverse the list: we must avoid placing stubs at the
1144
   beginning of the section because the beginning of the text
1145
   section may be required for an interrupt vector in bare metal
1146
   code.  */
1147
0
#define NEXT_SEC PREV_SEC
1148
0
      head = NULL;
1149
0
      while (tail != NULL)
1150
0
  {
1151
    /* Pop from tail.  */
1152
0
    asection *item = tail;
1153
0
    tail = PREV_SEC (item);
1154
1155
    /* Push on head.  */
1156
0
    NEXT_SEC (item) = head;
1157
0
    head = item;
1158
0
  }
1159
1160
0
      while (head != NULL)
1161
0
  {
1162
0
    asection *curr;
1163
0
    asection *next;
1164
0
    bfd_vma stub_group_start = head->output_offset;
1165
0
    bfd_vma end_of_next;
1166
1167
0
    curr = head;
1168
0
    while (NEXT_SEC (curr) != NULL)
1169
0
      {
1170
0
        next = NEXT_SEC (curr);
1171
0
        end_of_next = next->output_offset + next->size;
1172
0
        if (end_of_next - stub_group_start >= stub_group_size)
1173
    /* End of NEXT is too far from start, so stop.  */
1174
0
    break;
1175
        /* Add NEXT to the group.  */
1176
0
        curr = next;
1177
0
      }
1178
1179
    /* OK, the size from the start to the start of CURR is less
1180
       than stub_group_size and thus can be handled by one stub
1181
       section.  (Or the head section is itself larger than
1182
       stub_group_size, in which case we may be toast.)
1183
       We should really be keeping track of the total size of
1184
       stubs added here, as stubs contribute to the final output
1185
       section size.  */
1186
0
    do
1187
0
      {
1188
0
        next = NEXT_SEC (head);
1189
        /* Set up this stub group.  */
1190
0
        htab->stub_group[head->id].link_sec = curr;
1191
0
      }
1192
0
    while (head != curr && (head = next) != NULL);
1193
1194
    /* But wait, there's more!  Input sections up to stub_group_size
1195
       bytes after the stub section can be handled by it too.  */
1196
0
    if (!stubs_always_after_branch)
1197
0
      {
1198
0
        stub_group_start = curr->output_offset + curr->size;
1199
1200
0
        while (next != NULL)
1201
0
    {
1202
0
      end_of_next = next->output_offset + next->size;
1203
0
      if (end_of_next - stub_group_start >= stub_group_size)
1204
        /* End of NEXT is too far from stubs, so stop.  */
1205
0
        break;
1206
      /* Add NEXT to the stub group.  */
1207
0
      head = next;
1208
0
      next = NEXT_SEC (head);
1209
0
      htab->stub_group[head->id].link_sec = curr;
1210
0
    }
1211
0
      }
1212
0
    head = next;
1213
0
  }
1214
0
    }
1215
0
  while (list++ != htab->input_list + htab->top_index);
1216
1217
0
  free (htab->input_list);
1218
0
}
1219
1220
static void
1221
_bfd_kvx_resize_stubs (struct elf_kvx_link_hash_table *htab)
1222
0
{
1223
0
  asection *section;
1224
1225
  /* OK, we've added some stubs.  Find out the new size of the
1226
     stub sections.  */
1227
0
  for (section = htab->stub_bfd->sections;
1228
0
       section != NULL; section = section->next)
1229
0
    {
1230
      /* Ignore non-stub sections.  */
1231
0
      if (!strstr (section->name, STUB_SUFFIX))
1232
0
  continue;
1233
0
      section->size = 0;
1234
0
    }
1235
1236
0
  bfd_hash_traverse (&htab->stub_hash_table, kvx_size_one_stub, htab);
1237
0
}
1238
1239
/* Satisfy the ELF linker by filling in some fields in our fake bfd.  */
1240
1241
bool
1242
kvx_elf64_init_stub_bfd (struct bfd_link_info *info,
1243
      bfd *stub_bfd)
1244
0
{
1245
0
  struct elf_kvx_link_hash_table *htab;
1246
1247
0
  elf_elfheader (stub_bfd)->e_ident[EI_CLASS] = ELFCLASS64;
1248
1249
/* Always hook our dynamic sections into the first bfd, which is the
1250
   linker created stub bfd.  This ensures that the GOT header is at
1251
   the start of the output TOC section.  */
1252
0
  htab = elf_kvx_hash_table (info);
1253
0
  if (htab == NULL)
1254
0
    return false;
1255
1256
0
  return true;
1257
0
}
1258
1259
/* Determine and set the size of the stub section for a final link.
1260
1261
   The basic idea here is to examine all the relocations looking for
1262
   PC-relative calls to a target that is unreachable with a 27bits
1263
   immediate (found in call and goto).  */
1264
1265
bool
1266
elf64_kvx_size_stubs (bfd *output_bfd,
1267
         bfd *stub_bfd,
1268
         struct bfd_link_info *info,
1269
         bfd_signed_vma group_size,
1270
         asection * (*add_stub_section) (const char *,
1271
                 asection *),
1272
         void (*layout_sections_again) (void))
1273
0
{
1274
0
  bfd_size_type stub_group_size;
1275
0
  bool stubs_always_before_branch;
1276
0
  bool stub_changed = false;
1277
0
  struct elf_kvx_link_hash_table *htab = elf_kvx_hash_table (info);
1278
1279
  /* Propagate mach to stub bfd, because it may not have been
1280
     finalized when we created stub_bfd.  */
1281
0
  bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
1282
0
         bfd_get_mach (output_bfd));
1283
1284
  /* Stash our params away.  */
1285
0
  htab->stub_bfd = stub_bfd;
1286
0
  htab->add_stub_section = add_stub_section;
1287
0
  htab->layout_sections_again = layout_sections_again;
1288
0
  stubs_always_before_branch = group_size < 0;
1289
0
  if (group_size < 0)
1290
0
    stub_group_size = -group_size;
1291
0
  else
1292
0
    stub_group_size = group_size;
1293
1294
0
  if (stub_group_size == 1)
1295
0
    {
1296
      /* Default values.  */
1297
      /* KVX branch range is +-256MB. The value used is 1MB less.  */
1298
0
      stub_group_size = 255 * 1024 * 1024;
1299
0
    }
1300
1301
0
  group_sections (htab, stub_group_size, stubs_always_before_branch);
1302
1303
0
  (*htab->layout_sections_again) ();
1304
1305
0
  while (1)
1306
0
    {
1307
0
      bfd *input_bfd;
1308
1309
0
      for (input_bfd = info->input_bfds;
1310
0
     input_bfd != NULL; input_bfd = input_bfd->link.next)
1311
0
  {
1312
0
    Elf_Internal_Shdr *symtab_hdr;
1313
0
    asection *section;
1314
0
    Elf_Internal_Sym *local_syms = NULL;
1315
1316
0
    if (!is_kvx_elf (input_bfd)
1317
0
        || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
1318
0
      continue;
1319
1320
    /* We'll need the symbol table in a second.  */
1321
0
    symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
1322
0
    if (symtab_hdr->sh_info == 0)
1323
0
      continue;
1324
1325
    /* Walk over each section attached to the input bfd.  */
1326
0
    for (section = input_bfd->sections;
1327
0
         section != NULL; section = section->next)
1328
0
      {
1329
0
        Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1330
1331
        /* If there aren't any relocs, then there's nothing more
1332
     to do.  */
1333
0
        if ((section->flags & SEC_RELOC) == 0
1334
0
      || section->reloc_count == 0
1335
0
      || (section->flags & SEC_CODE) == 0)
1336
0
    continue;
1337
1338
        /* If this section is a link-once section that will be
1339
     discarded, then don't create any stubs.  */
1340
0
        if (section->output_section == NULL
1341
0
      || section->output_section->owner != output_bfd)
1342
0
    continue;
1343
1344
        /* Get the relocs.  */
1345
0
        internal_relocs
1346
0
    = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
1347
0
               NULL, info->keep_memory);
1348
0
        if (internal_relocs == NULL)
1349
0
    goto error_ret_free_local;
1350
1351
        /* Now examine each relocation.  */
1352
0
        irela = internal_relocs;
1353
0
        irelaend = irela + section->reloc_count;
1354
0
        for (; irela < irelaend; irela++)
1355
0
    {
1356
0
      unsigned int r_type, r_indx;
1357
0
      enum elf_kvx_stub_type stub_type;
1358
0
      struct elf_kvx_stub_hash_entry *stub_entry;
1359
0
      asection *sym_sec;
1360
0
      bfd_vma sym_value;
1361
0
      bfd_vma destination;
1362
0
      struct elf_kvx_link_hash_entry *hash;
1363
0
      const char *sym_name;
1364
0
      char *stub_name;
1365
0
      const asection *id_sec;
1366
0
      unsigned char st_type;
1367
0
      bfd_size_type len;
1368
1369
0
      r_type = ELF64_R_TYPE (irela->r_info);
1370
0
      r_indx = ELF64_R_SYM (irela->r_info);
1371
1372
0
      if (r_type >= (unsigned int) R_KVX_end)
1373
0
        {
1374
0
          bfd_set_error (bfd_error_bad_value);
1375
0
        error_ret_free_internal:
1376
0
          if (elf_section_data (section)->relocs == NULL)
1377
0
      free (internal_relocs);
1378
0
          goto error_ret_free_local;
1379
0
        }
1380
1381
      /* Only look for stubs on unconditional branch and
1382
         branch and link instructions.  */
1383
      /* This catches CALL and GOTO insn */
1384
0
      if (r_type != (unsigned int) R_KVX_PCREL27)
1385
0
        continue;
1386
1387
      /* Now determine the call target, its name, value,
1388
         section.  */
1389
0
      sym_sec = NULL;
1390
0
      sym_value = 0;
1391
0
      destination = 0;
1392
0
      hash = NULL;
1393
0
      sym_name = NULL;
1394
0
      if (r_indx < symtab_hdr->sh_info)
1395
0
        {
1396
          /* It's a local symbol.  */
1397
0
          Elf_Internal_Sym *sym;
1398
0
          Elf_Internal_Shdr *hdr;
1399
1400
0
          if (local_syms == NULL)
1401
0
      {
1402
0
        local_syms
1403
0
          = (Elf_Internal_Sym *) symtab_hdr->contents;
1404
0
        if (local_syms == NULL)
1405
0
          local_syms
1406
0
            = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
1407
0
                  symtab_hdr->sh_info, 0,
1408
0
                  NULL, NULL, NULL);
1409
0
        if (local_syms == NULL)
1410
0
          goto error_ret_free_internal;
1411
0
      }
1412
1413
0
          sym = local_syms + r_indx;
1414
0
          hdr = elf_elfsections (input_bfd)[sym->st_shndx];
1415
0
          sym_sec = hdr->bfd_section;
1416
0
          if (!sym_sec)
1417
      /* This is an undefined symbol.  It can never
1418
         be resolved.  */
1419
0
      continue;
1420
1421
0
          if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
1422
0
      sym_value = sym->st_value;
1423
0
          destination = (sym_value + irela->r_addend
1424
0
             + sym_sec->output_offset
1425
0
             + sym_sec->output_section->vma);
1426
0
          st_type = ELF_ST_TYPE (sym->st_info);
1427
0
          sym_name
1428
0
      = bfd_elf_string_from_elf_section (input_bfd,
1429
0
                 symtab_hdr->sh_link,
1430
0
                 sym->st_name);
1431
0
        }
1432
0
      else
1433
0
        {
1434
0
          int e_indx;
1435
1436
0
          e_indx = r_indx - symtab_hdr->sh_info;
1437
0
          hash = ((struct elf_kvx_link_hash_entry *)
1438
0
            elf_sym_hashes (input_bfd)[e_indx]);
1439
1440
0
          while (hash->root.root.type == bfd_link_hash_indirect
1441
0
           || hash->root.root.type == bfd_link_hash_warning)
1442
0
      hash = ((struct elf_kvx_link_hash_entry *)
1443
0
        hash->root.root.u.i.link);
1444
1445
0
          if (hash->root.root.type == bfd_link_hash_defined
1446
0
        || hash->root.root.type == bfd_link_hash_defweak)
1447
0
      {
1448
0
        struct elf_kvx_link_hash_table *globals =
1449
0
          elf_kvx_hash_table (info);
1450
0
        sym_sec = hash->root.root.u.def.section;
1451
0
        sym_value = hash->root.root.u.def.value;
1452
        /* For a destination in a shared library,
1453
           use the PLT stub as target address to
1454
           decide whether a branch stub is
1455
           needed.  */
1456
0
        if (globals->root.splt != NULL && hash != NULL
1457
0
            && hash->root.plt.offset != (bfd_vma) - 1)
1458
0
          {
1459
0
            sym_sec = globals->root.splt;
1460
0
            sym_value = hash->root.plt.offset;
1461
0
            if (sym_sec->output_section != NULL)
1462
0
        destination = (sym_value
1463
0
                 + sym_sec->output_offset
1464
0
                 + sym_sec->output_section->vma);
1465
0
          }
1466
0
        else if (sym_sec->output_section != NULL)
1467
0
          destination = (sym_value + irela->r_addend
1468
0
             + sym_sec->output_offset
1469
0
             + sym_sec->output_section->vma);
1470
0
      }
1471
0
          else if (hash->root.root.type == bfd_link_hash_undefined
1472
0
             || (hash->root.root.type
1473
0
           == bfd_link_hash_undefweak))
1474
0
      {
1475
        /* For a shared library, use the PLT stub as
1476
           target address to decide whether a long
1477
           branch stub is needed.
1478
           For absolute code, they cannot be handled.  */
1479
0
        struct elf_kvx_link_hash_table *globals =
1480
0
          elf_kvx_hash_table (info);
1481
1482
0
        if (globals->root.splt != NULL && hash != NULL
1483
0
            && hash->root.plt.offset != (bfd_vma) - 1)
1484
0
          {
1485
0
            sym_sec = globals->root.splt;
1486
0
            sym_value = hash->root.plt.offset;
1487
0
            if (sym_sec->output_section != NULL)
1488
0
        destination = (sym_value
1489
0
                 + sym_sec->output_offset
1490
0
                 + sym_sec->output_section->vma);
1491
0
          }
1492
0
        else
1493
0
          continue;
1494
0
      }
1495
0
          else
1496
0
      {
1497
0
        bfd_set_error (bfd_error_bad_value);
1498
0
        goto error_ret_free_internal;
1499
0
      }
1500
0
          st_type = ELF_ST_TYPE (hash->root.type);
1501
0
          sym_name = hash->root.root.root.string;
1502
0
        }
1503
1504
      /* Determine what (if any) linker stub is needed.  */
1505
0
      stub_type = kvx_type_of_stub (section, irela, sym_sec,
1506
0
            st_type, destination);
1507
0
      if (stub_type == kvx_stub_none)
1508
0
        continue;
1509
1510
      /* Support for grouping stub sections.  */
1511
0
      id_sec = htab->stub_group[section->id].link_sec;
1512
1513
      /* Get the name of this stub.  */
1514
0
      stub_name = elf64_kvx_stub_name (id_sec, sym_sec, hash,
1515
0
              irela);
1516
0
      if (!stub_name)
1517
0
        goto error_ret_free_internal;
1518
1519
0
      stub_entry =
1520
0
        kvx_stub_hash_lookup (&htab->stub_hash_table,
1521
0
           stub_name, false, false);
1522
0
      if (stub_entry != NULL)
1523
0
        {
1524
          /* The proper stub has already been created.  */
1525
0
          free (stub_name);
1526
          /* Always update this stub's target since it may have
1527
       changed after layout.  */
1528
0
          stub_entry->target_value = sym_value + irela->r_addend;
1529
0
          continue;
1530
0
        }
1531
1532
0
      stub_entry = _bfd_kvx_add_stub_entry_in_group
1533
0
        (stub_name, section, htab);
1534
0
      if (stub_entry == NULL)
1535
0
        {
1536
0
          free (stub_name);
1537
0
          goto error_ret_free_internal;
1538
0
        }
1539
1540
0
      stub_entry->target_value = sym_value + irela->r_addend;
1541
0
      stub_entry->target_section = sym_sec;
1542
0
      stub_entry->stub_type = stub_type;
1543
0
      stub_entry->h = hash;
1544
0
      stub_entry->st_type = st_type;
1545
1546
0
      if (sym_name == NULL)
1547
0
        sym_name = "unnamed";
1548
0
      len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
1549
0
      stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
1550
0
      if (stub_entry->output_name == NULL)
1551
0
        {
1552
0
          free (stub_name);
1553
0
          goto error_ret_free_internal;
1554
0
        }
1555
1556
0
      snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
1557
0
          sym_name);
1558
1559
0
      stub_changed = true;
1560
0
    }
1561
1562
        /* We're done with the internal relocs, free them.  */
1563
0
        if (elf_section_data (section)->relocs == NULL)
1564
0
    free (internal_relocs);
1565
0
      }
1566
0
  }
1567
1568
0
      if (!stub_changed)
1569
0
  break;
1570
1571
0
      _bfd_kvx_resize_stubs (htab);
1572
1573
      /* Ask the linker to do its stuff.  */
1574
0
      (*htab->layout_sections_again) ();
1575
0
      stub_changed = false;
1576
0
    }
1577
1578
0
  return true;
1579
1580
0
error_ret_free_local:
1581
0
  return false;
1582
1583
0
}
1584
1585
/* Build all the stubs associated with the current output file.  The
1586
   stubs are kept in a hash table attached to the main linker hash
1587
   table.  We also set up the .plt entries for statically linked PIC
1588
   functions here.  This function is called via kvx_elf_finish in the
1589
   linker.  */
1590
1591
bool
1592
elf64_kvx_build_stubs (struct bfd_link_info *info)
1593
0
{
1594
0
  asection *stub_sec;
1595
0
  struct bfd_hash_table *table;
1596
0
  struct elf_kvx_link_hash_table *htab;
1597
1598
0
  htab = elf_kvx_hash_table (info);
1599
1600
0
  for (stub_sec = htab->stub_bfd->sections;
1601
0
       stub_sec != NULL; stub_sec = stub_sec->next)
1602
0
    {
1603
0
      bfd_size_type size;
1604
1605
      /* Ignore non-stub sections.  */
1606
0
      if (!strstr (stub_sec->name, STUB_SUFFIX))
1607
0
  continue;
1608
1609
      /* Allocate memory to hold the linker stubs.  */
1610
0
      size = stub_sec->size;
1611
0
      stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
1612
0
      if (stub_sec->contents == NULL && size != 0)
1613
0
  return false;
1614
0
      stub_sec->alloced = 1;
1615
0
      stub_sec->size = 0;
1616
0
    }
1617
1618
  /* Build the stubs as directed by the stub hash table.  */
1619
0
  table = &htab->stub_hash_table;
1620
0
  bfd_hash_traverse (table, kvx_build_one_stub, info);
1621
1622
0
  return true;
1623
0
}
1624
1625
static bfd_vma
1626
kvx_calculate_got_entry_vma (struct elf_link_hash_entry *h,
1627
         struct elf_kvx_link_hash_table
1628
         *globals, struct bfd_link_info *info,
1629
         bfd_vma value, bfd *output_bfd,
1630
         bool *unresolved_reloc_p)
1631
0
{
1632
0
  bfd_vma off = (bfd_vma) - 1;
1633
0
  asection *basegot = globals->root.sgot;
1634
0
  bool dyn = globals->root.dynamic_sections_created;
1635
1636
0
  if (h != NULL)
1637
0
    {
1638
0
      BFD_ASSERT (basegot != NULL);
1639
0
      off = h->got.offset;
1640
0
      BFD_ASSERT (off != (bfd_vma) - 1);
1641
0
      if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
1642
0
    || (bfd_link_pic (info)
1643
0
        && SYMBOL_REFERENCES_LOCAL (info, h))
1644
0
    || (ELF_ST_VISIBILITY (h->other)
1645
0
        && h->root.type == bfd_link_hash_undefweak))
1646
0
  {
1647
    /* This is actually a static link, or it is a -Bsymbolic link
1648
       and the symbol is defined locally.  We must initialize this
1649
       entry in the global offset table.  Since the offset must
1650
       always be a multiple of 8 (4 in the case of ILP32), we use
1651
       the least significant bit to record whether we have
1652
       initialized it already.
1653
       When doing a dynamic link, we create a .rel(a).got relocation
1654
       entry to initialize the value.  This is done in the
1655
       finish_dynamic_symbol routine.  */
1656
0
    if ((off & 1) != 0)
1657
0
      off &= ~1;
1658
0
    else
1659
0
      {
1660
0
        bfd_put_64 (output_bfd, value, basegot->contents + off);
1661
0
        h->got.offset |= 1;
1662
0
      }
1663
0
  }
1664
0
      else
1665
0
  *unresolved_reloc_p = false;
1666
0
    }
1667
1668
0
  return off;
1669
0
}
1670
1671
static unsigned int
1672
kvx_reloc_got_type (bfd_reloc_code_real_type r_type)
1673
0
{
1674
0
  switch (r_type)
1675
0
    {
1676
      /* Extracted with:
1677
   awk 'match ($0, /HOWTO.*R_(KVX.*_GOT(OFF)?(64)?_.*),/,ary) \
1678
   {print "case BFD_RELOC_" ary[1] ":";}' elfxx-kvxc.def  */
1679
0
    case BFD_RELOC_KVX_S37_GOTOFF_LO10:
1680
0
    case BFD_RELOC_KVX_S37_GOTOFF_UP27:
1681
1682
0
    case BFD_RELOC_KVX_S37_GOT_LO10:
1683
0
    case BFD_RELOC_KVX_S37_GOT_UP27:
1684
1685
0
    case BFD_RELOC_KVX_S43_GOTOFF_LO10:
1686
0
    case BFD_RELOC_KVX_S43_GOTOFF_UP27:
1687
0
    case BFD_RELOC_KVX_S43_GOTOFF_EX6:
1688
1689
0
    case BFD_RELOC_KVX_S43_GOT_LO10:
1690
0
    case BFD_RELOC_KVX_S43_GOT_UP27:
1691
0
    case BFD_RELOC_KVX_S43_GOT_EX6:
1692
0
      return GOT_NORMAL;
1693
1694
0
    case BFD_RELOC_KVX_S37_TLS_GD_LO10:
1695
0
    case BFD_RELOC_KVX_S37_TLS_GD_UP27:
1696
0
    case BFD_RELOC_KVX_S43_TLS_GD_LO10:
1697
0
    case BFD_RELOC_KVX_S43_TLS_GD_UP27:
1698
0
    case BFD_RELOC_KVX_S43_TLS_GD_EX6:
1699
0
      return GOT_TLS_GD;
1700
1701
0
    case BFD_RELOC_KVX_S37_TLS_LD_LO10:
1702
0
    case BFD_RELOC_KVX_S37_TLS_LD_UP27:
1703
0
    case BFD_RELOC_KVX_S43_TLS_LD_LO10:
1704
0
    case BFD_RELOC_KVX_S43_TLS_LD_UP27:
1705
0
    case BFD_RELOC_KVX_S43_TLS_LD_EX6:
1706
0
      return GOT_TLS_LD;
1707
1708
0
    case BFD_RELOC_KVX_S37_TLS_IE_LO10:
1709
0
    case BFD_RELOC_KVX_S37_TLS_IE_UP27:
1710
0
    case BFD_RELOC_KVX_S43_TLS_IE_LO10:
1711
0
    case BFD_RELOC_KVX_S43_TLS_IE_UP27:
1712
0
    case BFD_RELOC_KVX_S43_TLS_IE_EX6:
1713
0
      return GOT_TLS_IE;
1714
1715
0
    default:
1716
0
      break;
1717
0
    }
1718
0
  return GOT_UNKNOWN;
1719
0
}
1720
1721
static bool
1722
kvx_can_relax_tls (bfd *input_bfd ATTRIBUTE_UNUSED,
1723
           struct bfd_link_info *info ATTRIBUTE_UNUSED,
1724
           bfd_reloc_code_real_type r_type ATTRIBUTE_UNUSED,
1725
           struct elf_link_hash_entry *h ATTRIBUTE_UNUSED,
1726
           unsigned long r_symndx ATTRIBUTE_UNUSED)
1727
0
{
1728
0
  if (! IS_KVX_TLS_RELAX_RELOC (r_type))
1729
0
    return false;
1730
1731
  /* Relaxing hook. Disabled on KVX. */
1732
  /* See elfnn-aarch64.c */
1733
0
  return true;
1734
0
}
1735
1736
/* Given the relocation code R_TYPE, return the relaxed bfd reloc
1737
   enumerator.  */
1738
1739
static bfd_reloc_code_real_type
1740
kvx_tls_transition (bfd *input_bfd,
1741
      struct bfd_link_info *info,
1742
      unsigned int r_type,
1743
      struct elf_link_hash_entry *h,
1744
      unsigned long r_symndx)
1745
0
{
1746
0
  bfd_reloc_code_real_type bfd_r_type
1747
0
    = elf64_kvx_bfd_reloc_from_type (input_bfd, r_type);
1748
1749
0
  if (! kvx_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
1750
0
    return bfd_r_type;
1751
1752
0
  return bfd_r_type;
1753
0
}
1754
1755
/* Return the base VMA address which should be subtracted from real addresses
1756
   when resolving R_KVX_*_TLS_GD_* and R_KVX_*_TLS_LD_* relocation.  */
1757
1758
static bfd_vma
1759
dtpoff_base (struct bfd_link_info *info)
1760
0
{
1761
  /* If tls_sec is NULL, we should have signalled an error already.  */
1762
0
  BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
1763
0
  return elf_hash_table (info)->tls_sec->vma;
1764
0
}
1765
1766
/* Return the base VMA address which should be subtracted from real addresses
1767
   when resolving R_KVX_*_TLS_IE_* and R_KVX_*_TLS_LE_* relocations.  */
1768
1769
static bfd_vma
1770
tpoff_base (struct bfd_link_info *info)
1771
0
{
1772
0
  struct elf_link_hash_table *htab = elf_hash_table (info);
1773
1774
  /* If tls_sec is NULL, we should have signalled an error already.  */
1775
0
  BFD_ASSERT (htab->tls_sec != NULL);
1776
1777
0
  bfd_vma base = align_power ((bfd_vma) 0,
1778
0
            htab->tls_sec->alignment_power);
1779
0
  return htab->tls_sec->vma - base;
1780
0
}
1781
1782
static bfd_vma *
1783
symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
1784
           unsigned long r_symndx)
1785
0
{
1786
  /* Calculate the address of the GOT entry for symbol
1787
     referred to in h.  */
1788
0
  if (h != NULL)
1789
0
    return &h->got.offset;
1790
0
  else
1791
0
    {
1792
      /* local symbol */
1793
0
      struct elf_kvx_local_symbol *l;
1794
1795
0
      l = elf_kvx_locals (input_bfd);
1796
0
      return &l[r_symndx].got_offset;
1797
0
    }
1798
0
}
1799
1800
static void
1801
symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
1802
      unsigned long r_symndx)
1803
0
{
1804
0
  bfd_vma *p;
1805
0
  p = symbol_got_offset_ref (input_bfd, h, r_symndx);
1806
0
  *p |= 1;
1807
0
}
1808
1809
static int
1810
symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
1811
        unsigned long r_symndx)
1812
0
{
1813
0
  bfd_vma value;
1814
0
  value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
1815
0
  return value & 1;
1816
0
}
1817
1818
static bfd_vma
1819
symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
1820
       unsigned long r_symndx)
1821
0
{
1822
0
  bfd_vma value;
1823
0
  value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
1824
0
  value &= ~1;
1825
0
  return value;
1826
0
}
1827
1828
/* N_ONES produces N one bits, without overflowing machine arithmetic.  */
1829
0
#define N_ONES(n) (((((bfd_vma) 1 << ((n) -1)) - 1) << 1) | 1)
1830
1831
/* This is a copy/paste + modification from
1832
   reloc.c:_bfd_relocate_contents. Relocations are applied to 32bits
1833
   words, so all overflow checks will overflow for values above
1834
   32bits.  */
1835
static bfd_reloc_status_type
1836
check_signed_overflow (enum complain_overflow complain_on_overflow,
1837
           bfd_reloc_code_real_type bfd_r_type, bfd *input_bfd,
1838
           bfd_vma relocation)
1839
0
{
1840
0
  bfd_reloc_status_type flag = bfd_reloc_ok;
1841
0
  bfd_vma addrmask, fieldmask, signmask, ss;
1842
0
  bfd_vma a, b, sum;
1843
0
  bfd_vma x = 0;
1844
1845
  /* These usually come from howto struct. As we don't check for
1846
     values fitting in bitfields or in subpart of words, we set all
1847
     these to values to check as if the field is starting from first
1848
     bit.  */
1849
0
  unsigned int rightshift = 0;
1850
0
  unsigned int bitpos = 0;
1851
0
  unsigned int bitsize = 0;
1852
0
  bfd_vma src_mask = -1;
1853
1854
  /* Only regular symbol relocations are checked here. Others
1855
     relocations (GOT, TLS) could be checked if the need is
1856
     confirmed. At the moment, we keep previous behavior
1857
     (ie. unchecked) for those. */
1858
0
  switch (bfd_r_type)
1859
0
    {
1860
0
    case BFD_RELOC_KVX_S37_LO10:
1861
0
    case BFD_RELOC_KVX_S37_UP27:
1862
0
      bitsize = 37;
1863
0
      break;
1864
1865
0
    case BFD_RELOC_KVX_S32_LO5:
1866
0
    case BFD_RELOC_KVX_S32_UP27:
1867
0
      bitsize = 32;
1868
0
      break;
1869
1870
0
    case BFD_RELOC_KVX_S43_LO10:
1871
0
    case BFD_RELOC_KVX_S43_UP27:
1872
0
    case BFD_RELOC_KVX_S43_EX6:
1873
0
      bitsize = 43;
1874
0
      break;
1875
1876
0
    case BFD_RELOC_KVX_S64_LO10:
1877
0
    case BFD_RELOC_KVX_S64_UP27:
1878
0
    case BFD_RELOC_KVX_S64_EX27:
1879
0
      bitsize = 64;
1880
0
      break;
1881
1882
0
    default:
1883
0
      return bfd_reloc_ok;
1884
0
    }
1885
1886
  /* direct copy/paste from reloc.c below */
1887
1888
  /* Get the values to be added together.  For signed and unsigned
1889
     relocations, we assume that all values should be truncated to
1890
     the size of an address.  For bitfields, all the bits matter.
1891
     See also bfd_check_overflow.  */
1892
0
  fieldmask = N_ONES (bitsize);
1893
0
  signmask = ~fieldmask;
1894
0
  addrmask = (N_ONES (bfd_arch_bits_per_address (input_bfd))
1895
0
        | (fieldmask << rightshift));
1896
0
  a = (relocation & addrmask) >> rightshift;
1897
0
  b = (x & src_mask & addrmask) >> bitpos;
1898
0
  addrmask >>= rightshift;
1899
1900
0
  switch (complain_on_overflow)
1901
0
    {
1902
0
    case complain_overflow_signed:
1903
      /* If any sign bits are set, all sign bits must be set.
1904
   That is, A must be a valid negative address after
1905
   shifting.  */
1906
0
      signmask = ~(fieldmask >> 1);
1907
      /* Fall thru */
1908
1909
0
    case complain_overflow_bitfield:
1910
      /* Much like the signed check, but for a field one bit
1911
   wider.  We allow a bitfield to represent numbers in the
1912
   range -2**n to 2**n-1, where n is the number of bits in the
1913
   field.  Note that when bfd_vma is 32 bits, a 32-bit reloc
1914
   can't overflow, which is exactly what we want.  */
1915
0
      ss = a & signmask;
1916
0
      if (ss != 0 && ss != (addrmask & signmask))
1917
0
  flag = bfd_reloc_overflow;
1918
1919
      /* We only need this next bit of code if the sign bit of B
1920
   is below the sign bit of A.  This would only happen if
1921
   SRC_MASK had fewer bits than BITSIZE.  Note that if
1922
   SRC_MASK has more bits than BITSIZE, we can get into
1923
   trouble; we would need to verify that B is in range, as
1924
   we do for A above.  */
1925
0
      ss = ((~src_mask) >> 1) & src_mask;
1926
0
      ss >>= bitpos;
1927
1928
      /* Set all the bits above the sign bit.  */
1929
0
      b = (b ^ ss) - ss;
1930
1931
      /* Now we can do the addition.  */
1932
0
      sum = a + b;
1933
1934
      /* See if the result has the correct sign.  Bits above the
1935
   sign bit are junk now; ignore them.  If the sum is
1936
   positive, make sure we did not have all negative inputs;
1937
   if the sum is negative, make sure we did not have all
1938
   positive inputs.  The test below looks only at the sign
1939
   bits, and it really just
1940
   SIGN (A) == SIGN (B) && SIGN (A) != SIGN (SUM)
1941
1942
   We mask with addrmask here to explicitly allow an address
1943
   wrap-around.  The Linux kernel relies on it, and it is
1944
   the only way to write assembler code which can run when
1945
   loaded at a location 0x80000000 away from the location at
1946
   which it is linked.  */
1947
0
      if (((~(a ^ b)) & (a ^ sum)) & signmask & addrmask)
1948
0
  flag = bfd_reloc_overflow;
1949
0
      break;
1950
1951
0
    case complain_overflow_unsigned:
1952
      /* Checking for an unsigned overflow is relatively easy:
1953
   trim the addresses and add, and trim the result as well.
1954
   Overflow is normally indicated when the result does not
1955
   fit in the field.  However, we also need to consider the
1956
   case when, e.g., fieldmask is 0x7fffffff or smaller, an
1957
   input is 0x80000000, and bfd_vma is only 32 bits; then we
1958
   will get sum == 0, but there is an overflow, since the
1959
   inputs did not fit in the field.  Instead of doing a
1960
   separate test, we can check for this by or-ing in the
1961
   operands when testing for the sum overflowing its final
1962
   field.  */
1963
0
      sum = (a + b) & addrmask;
1964
0
      if ((a | b | sum) & signmask)
1965
0
  flag = bfd_reloc_overflow;
1966
0
      break;
1967
1968
0
    default:
1969
0
      abort ();
1970
0
    }
1971
0
  return flag;
1972
0
}
1973
1974
/* Perform a relocation as part of a final link.  */
1975
static bfd_reloc_status_type
1976
elf64_kvx_final_link_relocate (reloc_howto_type *howto,
1977
             bfd *input_bfd,
1978
             bfd *output_bfd,
1979
             asection *input_section,
1980
             bfd_byte *contents,
1981
             Elf_Internal_Rela *rel,
1982
             bfd_vma value,
1983
             struct bfd_link_info *info,
1984
             asection *sym_sec,
1985
             struct elf_link_hash_entry *h,
1986
             bool *unresolved_reloc_p,
1987
             bool save_addend,
1988
             bfd_vma *saved_addend,
1989
             Elf_Internal_Sym *sym)
1990
0
{
1991
0
  Elf_Internal_Shdr *symtab_hdr;
1992
0
  unsigned int r_type = howto->type;
1993
0
  bfd_reloc_code_real_type bfd_r_type
1994
0
    = elf64_kvx_bfd_reloc_from_howto (howto);
1995
0
  bfd_reloc_code_real_type new_bfd_r_type;
1996
0
  unsigned long r_symndx;
1997
0
  bfd_byte *hit_data = contents + rel->r_offset;
1998
0
  bfd_vma place, off;
1999
0
  bfd_vma addend;
2000
0
  struct elf_kvx_link_hash_table *globals;
2001
0
  bool weak_undef_p;
2002
0
  asection *base_got;
2003
0
  bfd_reloc_status_type rret = bfd_reloc_ok;
2004
0
  bool resolved_to_zero;
2005
0
  globals = elf_kvx_hash_table (info);
2006
2007
0
  symtab_hdr = &elf_symtab_hdr (input_bfd);
2008
2009
0
  BFD_ASSERT (is_kvx_elf (input_bfd));
2010
2011
0
  r_symndx = ELF64_R_SYM (rel->r_info);
2012
2013
  /* It is possible to have linker relaxations on some TLS access
2014
     models.  Update our information here.  */
2015
0
  new_bfd_r_type = kvx_tls_transition (input_bfd, info, r_type, h, r_symndx);
2016
0
  if (new_bfd_r_type != bfd_r_type)
2017
0
    {
2018
0
      bfd_r_type = new_bfd_r_type;
2019
0
      howto = elf64_kvx_howto_from_bfd_reloc (bfd_r_type);
2020
0
      BFD_ASSERT (howto != NULL);
2021
0
      r_type = howto->type;
2022
0
    }
2023
2024
0
  place = input_section->output_section->vma
2025
0
    + input_section->output_offset + rel->r_offset;
2026
2027
  /* Get addend, accumulating the addend for consecutive relocs
2028
     which refer to the same offset.  */
2029
0
  addend = saved_addend ? *saved_addend : 0;
2030
0
  addend += rel->r_addend;
2031
2032
0
  weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
2033
0
      : bfd_is_und_section (sym_sec));
2034
0
  resolved_to_zero = (h != NULL
2035
0
          && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
2036
2037
0
  switch (bfd_r_type)
2038
0
    {
2039
0
    case BFD_RELOC_KVX_64:
2040
0
#if ARCH_SIZE == 64
2041
0
    case BFD_RELOC_KVX_32:
2042
0
#endif
2043
0
    case BFD_RELOC_KVX_S37_LO10:
2044
0
    case BFD_RELOC_KVX_S37_UP27:
2045
2046
0
    case BFD_RELOC_KVX_S32_LO5:
2047
0
    case BFD_RELOC_KVX_S32_UP27:
2048
2049
0
    case BFD_RELOC_KVX_S43_LO10:
2050
0
    case BFD_RELOC_KVX_S43_UP27:
2051
0
    case BFD_RELOC_KVX_S43_EX6:
2052
2053
0
    case BFD_RELOC_KVX_S64_LO10:
2054
0
    case BFD_RELOC_KVX_S64_UP27:
2055
0
    case BFD_RELOC_KVX_S64_EX27:
2056
      /* When generating a shared library or PIE, these relocations
2057
   are copied into the output file to be resolved at run time.  */
2058
0
      if (bfd_link_pic (info)
2059
0
    && (input_section->flags & SEC_ALLOC)
2060
0
    && (h == NULL
2061
0
        || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2062
0
      && !resolved_to_zero)
2063
0
        || h->root.type != bfd_link_hash_undefweak))
2064
0
  {
2065
0
    Elf_Internal_Rela outrel;
2066
0
    bfd_byte *loc;
2067
0
    bool skip, relocate;
2068
0
    asection *sreloc;
2069
2070
0
    *unresolved_reloc_p = false;
2071
2072
0
    skip = false;
2073
0
    relocate = false;
2074
2075
0
    outrel.r_addend = addend;
2076
0
    outrel.r_offset =
2077
0
      _bfd_elf_section_offset (output_bfd, info, input_section,
2078
0
             rel->r_offset);
2079
0
    if (outrel.r_offset == (bfd_vma) - 1)
2080
0
      skip = true;
2081
0
    else if (outrel.r_offset == (bfd_vma) - 2)
2082
0
      {
2083
0
        skip = true;
2084
0
        relocate = true;
2085
0
      }
2086
2087
0
    outrel.r_offset += (input_section->output_section->vma
2088
0
            + input_section->output_offset);
2089
2090
0
    if (skip)
2091
0
      memset (&outrel, 0, sizeof outrel);
2092
0
    else if (h != NULL
2093
0
       && h->dynindx != -1
2094
0
       && (!bfd_link_pic (info) || !info->symbolic
2095
0
           || !h->def_regular))
2096
0
      outrel.r_info = ELF64_R_INFO (h->dynindx, r_type);
2097
0
    else if (bfd_r_type == BFD_RELOC_KVX_32
2098
0
       || bfd_r_type == BFD_RELOC_KVX_64)
2099
0
      {
2100
0
        int symbol;
2101
2102
        /* On SVR4-ish systems, the dynamic loader cannot
2103
     relocate the text and data segments independently,
2104
     so the symbol does not matter.  */
2105
0
        symbol = 0;
2106
0
        outrel.r_info = ELF64_R_INFO (symbol, R_KVX_RELATIVE);
2107
0
        outrel.r_addend += value;
2108
0
      }
2109
0
    else if (bfd_link_pic (info) && info->symbolic)
2110
0
      {
2111
0
        goto skip_because_pic;
2112
0
      }
2113
0
    else
2114
0
      {
2115
        /* We may endup here from bad input code trying to
2116
     insert relocation on symbols within code.  We do not
2117
     want that currently, and such code should use GOT +
2118
     KVX_32/64 reloc that translate in KVX_RELATIVE.  */
2119
0
        const char *name;
2120
0
        if (h && h->root.root.string)
2121
0
    name = h->root.root.string;
2122
0
        else
2123
0
    name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2124
0
           NULL);
2125
2126
0
        (*_bfd_error_handler)
2127
    /* xgettext:c-format */
2128
0
    (_("%pB(%pA+%#" PRIx64 "): "
2129
0
       "unresolvable %s relocation in section `%s'"),
2130
0
     input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
2131
0
     name);
2132
0
        return bfd_reloc_notsupported;
2133
0
      }
2134
2135
0
    sreloc = elf_section_data (input_section)->sreloc;
2136
0
    if (sreloc == NULL || sreloc->contents == NULL)
2137
0
      return bfd_reloc_notsupported;
2138
2139
0
    loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
2140
0
    bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
2141
2142
0
    if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
2143
0
      {
2144
        /* Sanity to check that we have previously allocated
2145
     sufficient space in the relocation section for the
2146
     number of relocations we actually want to emit.  */
2147
0
        abort ();
2148
0
      }
2149
2150
    /* If this reloc is against an external symbol, we do not want to
2151
       fiddle with the addend.  Otherwise, we need to include the symbol
2152
       value so that it becomes an addend for the dynamic reloc.  */
2153
0
    if (!relocate)
2154
0
      return bfd_reloc_ok;
2155
2156
0
    rret = check_signed_overflow (complain_overflow_signed, bfd_r_type,
2157
0
          input_bfd, value + addend);
2158
0
    if (rret != bfd_reloc_ok)
2159
0
      return rret;
2160
2161
0
    return _bfd_final_link_relocate (howto, input_bfd, input_section,
2162
0
             contents, rel->r_offset, value,
2163
0
             addend);
2164
0
  }
2165
2166
0
    skip_because_pic:
2167
0
      rret = check_signed_overflow (complain_overflow_signed, bfd_r_type,
2168
0
            input_bfd, value + addend);
2169
0
      if (rret != bfd_reloc_ok)
2170
0
  return rret;
2171
2172
0
      return _bfd_final_link_relocate (howto, input_bfd, input_section,
2173
0
               contents, rel->r_offset, value,
2174
0
               addend);
2175
0
      break;
2176
2177
0
    case BFD_RELOC_KVX_PCREL17:
2178
0
    case BFD_RELOC_KVX_PCREL27:
2179
0
      {
2180
  /* BCU insn are always first in a bundle, so there is no need
2181
     to correct the address using offset within bundle.  */
2182
2183
0
  asection *splt = globals->root.splt;
2184
0
  bool via_plt_p =
2185
0
    splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
2186
2187
  /* A call to an undefined weak symbol is converted to a jump to
2188
     the next instruction unless a PLT entry will be created.
2189
     The jump to the next instruction is optimized as a NOP.
2190
     Do the same for local undefined symbols.  */
2191
0
  if (weak_undef_p && ! via_plt_p)
2192
0
    {
2193
0
      bfd_putl32 (INSN_NOP, hit_data);
2194
0
      return bfd_reloc_ok;
2195
0
    }
2196
2197
  /* If the call goes through a PLT entry, make sure to
2198
     check distance to the right destination address.  */
2199
0
  if (via_plt_p)
2200
0
    value = (splt->output_section->vma
2201
0
       + splt->output_offset + h->plt.offset);
2202
2203
  /* Check if a stub has to be inserted because the destination
2204
     is too far away.  */
2205
0
  struct elf_kvx_stub_hash_entry *stub_entry = NULL;
2206
2207
  /* If the target symbol is global and marked as a function the
2208
     relocation applies a function call or a tail call.  In this
2209
     situation we can veneer out of range branches.  The veneers
2210
     use R16 and R17 hence cannot be used arbitrary out of range
2211
     branches that occur within the body of a function.  */
2212
2213
  /* Check if a stub has to be inserted because the destination
2214
     is too far away.  */
2215
0
  if (! kvx_valid_call_p (value, place))
2216
0
    {
2217
      /* The target is out of reach, so redirect the branch to
2218
         the local stub for this function.  */
2219
0
      stub_entry = elf64_kvx_get_stub_entry (input_section,
2220
0
               sym_sec, h,
2221
0
               rel, globals);
2222
0
      if (stub_entry != NULL)
2223
0
        value = (stub_entry->stub_offset
2224
0
           + stub_entry->stub_sec->output_offset
2225
0
           + stub_entry->stub_sec->output_section->vma);
2226
      /* We have redirected the destination to stub entry address,
2227
         so ignore any addend record in the original rela entry.  */
2228
0
      addend = 0;
2229
0
    }
2230
0
      }
2231
0
      *unresolved_reloc_p = false;
2232
2233
      /* FALLTHROUGH */
2234
2235
      /* PCREL 32 are used in dwarf2 table for exception handling */
2236
0
    case BFD_RELOC_KVX_32_PCREL:
2237
0
    case BFD_RELOC_KVX_S64_PCREL_LO10:
2238
0
    case BFD_RELOC_KVX_S64_PCREL_UP27:
2239
0
    case BFD_RELOC_KVX_S64_PCREL_EX27:
2240
0
    case BFD_RELOC_KVX_S37_PCREL_LO10:
2241
0
    case BFD_RELOC_KVX_S37_PCREL_UP27:
2242
0
    case BFD_RELOC_KVX_S43_PCREL_LO10:
2243
0
    case BFD_RELOC_KVX_S43_PCREL_UP27:
2244
0
    case BFD_RELOC_KVX_S43_PCREL_EX6:
2245
0
      return _bfd_final_link_relocate (howto, input_bfd, input_section,
2246
0
               contents, rel->r_offset, value,
2247
0
               addend);
2248
0
      break;
2249
2250
0
    case BFD_RELOC_KVX_S37_TLS_LE_LO10:
2251
0
    case BFD_RELOC_KVX_S37_TLS_LE_UP27:
2252
2253
0
    case BFD_RELOC_KVX_S43_TLS_LE_LO10:
2254
0
    case BFD_RELOC_KVX_S43_TLS_LE_UP27:
2255
0
    case BFD_RELOC_KVX_S43_TLS_LE_EX6:
2256
0
      return _bfd_final_link_relocate (howto, input_bfd, input_section,
2257
0
               contents, rel->r_offset,
2258
0
               value - tpoff_base (info), addend);
2259
0
      break;
2260
2261
0
    case BFD_RELOC_KVX_S37_TLS_DTPOFF_LO10:
2262
0
    case BFD_RELOC_KVX_S37_TLS_DTPOFF_UP27:
2263
2264
0
    case BFD_RELOC_KVX_S43_TLS_DTPOFF_LO10:
2265
0
    case BFD_RELOC_KVX_S43_TLS_DTPOFF_UP27:
2266
0
    case BFD_RELOC_KVX_S43_TLS_DTPOFF_EX6:
2267
0
      return _bfd_final_link_relocate (howto, input_bfd, input_section,
2268
0
               contents, rel->r_offset,
2269
0
               value - dtpoff_base (info), addend);
2270
2271
0
    case BFD_RELOC_KVX_S37_TLS_GD_UP27:
2272
0
    case BFD_RELOC_KVX_S37_TLS_GD_LO10:
2273
2274
0
    case BFD_RELOC_KVX_S43_TLS_GD_UP27:
2275
0
    case BFD_RELOC_KVX_S43_TLS_GD_EX6:
2276
0
    case BFD_RELOC_KVX_S43_TLS_GD_LO10:
2277
2278
0
    case BFD_RELOC_KVX_S37_TLS_IE_UP27:
2279
0
    case BFD_RELOC_KVX_S37_TLS_IE_LO10:
2280
2281
0
    case BFD_RELOC_KVX_S43_TLS_IE_UP27:
2282
0
    case BFD_RELOC_KVX_S43_TLS_IE_EX6:
2283
0
    case BFD_RELOC_KVX_S43_TLS_IE_LO10:
2284
2285
0
    case BFD_RELOC_KVX_S37_TLS_LD_UP27:
2286
0
    case BFD_RELOC_KVX_S37_TLS_LD_LO10:
2287
2288
0
    case BFD_RELOC_KVX_S43_TLS_LD_UP27:
2289
0
    case BFD_RELOC_KVX_S43_TLS_LD_EX6:
2290
0
    case BFD_RELOC_KVX_S43_TLS_LD_LO10:
2291
2292
0
      if (globals->root.sgot == NULL)
2293
0
  return bfd_reloc_notsupported;
2294
0
      value = symbol_got_offset (input_bfd, h, r_symndx);
2295
2296
0
      _bfd_final_link_relocate (howto, input_bfd, input_section,
2297
0
        contents, rel->r_offset, value, addend);
2298
0
      *unresolved_reloc_p = false;
2299
0
      break;
2300
2301
0
    case BFD_RELOC_KVX_S37_GOTADDR_UP27:
2302
0
    case BFD_RELOC_KVX_S37_GOTADDR_LO10:
2303
2304
0
    case BFD_RELOC_KVX_S43_GOTADDR_UP27:
2305
0
    case BFD_RELOC_KVX_S43_GOTADDR_EX6:
2306
0
    case BFD_RELOC_KVX_S43_GOTADDR_LO10:
2307
2308
0
    case BFD_RELOC_KVX_S64_GOTADDR_UP27:
2309
0
    case BFD_RELOC_KVX_S64_GOTADDR_EX27:
2310
0
    case BFD_RELOC_KVX_S64_GOTADDR_LO10:
2311
0
      {
2312
0
  if (globals->root.sgot == NULL)
2313
0
    BFD_ASSERT (h != NULL);
2314
2315
0
  value = globals->root.sgot->output_section->vma
2316
0
    + globals->root.sgot->output_offset;
2317
2318
0
  return _bfd_final_link_relocate (howto, input_bfd, input_section,
2319
0
           contents, rel->r_offset, value,
2320
0
           addend);
2321
0
      }
2322
0
      break;
2323
2324
0
    case BFD_RELOC_KVX_S37_GOTOFF_LO10:
2325
0
    case BFD_RELOC_KVX_S37_GOTOFF_UP27:
2326
2327
0
    case BFD_RELOC_KVX_32_GOTOFF:
2328
0
    case BFD_RELOC_KVX_64_GOTOFF:
2329
2330
0
    case BFD_RELOC_KVX_S43_GOTOFF_LO10:
2331
0
    case BFD_RELOC_KVX_S43_GOTOFF_UP27:
2332
0
    case BFD_RELOC_KVX_S43_GOTOFF_EX6:
2333
2334
0
      {
2335
0
  asection *basegot = globals->root.sgot;
2336
  /* BFD_ASSERT(h == NULL); */
2337
0
  BFD_ASSERT(globals->root.sgot != NULL);
2338
0
  value -= basegot->output_section->vma + basegot->output_offset;
2339
0
  return _bfd_final_link_relocate (howto, input_bfd, input_section,
2340
0
           contents, rel->r_offset, value,
2341
0
           addend);
2342
0
      }
2343
0
      break;
2344
2345
0
    case BFD_RELOC_KVX_S37_GOT_LO10:
2346
0
    case BFD_RELOC_KVX_S37_GOT_UP27:
2347
2348
0
    case BFD_RELOC_KVX_32_GOT:
2349
0
    case BFD_RELOC_KVX_64_GOT:
2350
2351
0
    case BFD_RELOC_KVX_S43_GOT_LO10:
2352
0
    case BFD_RELOC_KVX_S43_GOT_UP27:
2353
0
    case BFD_RELOC_KVX_S43_GOT_EX6:
2354
2355
0
      if (globals->root.sgot == NULL)
2356
0
  BFD_ASSERT (h != NULL);
2357
2358
0
      if (h != NULL)
2359
0
  {
2360
0
    value = kvx_calculate_got_entry_vma (h, globals, info, value,
2361
0
                 output_bfd,
2362
0
                 unresolved_reloc_p);
2363
#ifdef UGLY_DEBUG
2364
    printf("GOT_LO/HI for %s, value %x\n", h->root.root.string, value);
2365
#endif
2366
2367
0
    return _bfd_final_link_relocate (howto, input_bfd, input_section,
2368
0
             contents, rel->r_offset, value,
2369
0
             addend);
2370
0
  }
2371
0
      else
2372
0
  {
2373
#ifdef UGLY_DEBUG
2374
    printf("GOT_LO/HI with h NULL, initial value %x\n", value);
2375
#endif
2376
0
    struct elf_kvx_local_symbol *locals = elf_kvx_locals (input_bfd);
2377
2378
0
    if (locals == NULL)
2379
0
      {
2380
0
        int howto_index = bfd_r_type - BFD_RELOC_KVX_RELOC_START;
2381
0
        _bfd_error_handler
2382
    /* xgettext:c-format */
2383
0
    (_("%pB: local symbol descriptor table be NULL when applying "
2384
0
       "relocation %s against local symbol"),
2385
0
     input_bfd, elf_kvx_howto_table[howto_index].name);
2386
0
        abort ();
2387
0
      }
2388
2389
0
    off = symbol_got_offset (input_bfd, h, r_symndx);
2390
0
    base_got = globals->root.sgot;
2391
0
    bfd_vma got_entry_addr = (base_got->output_section->vma
2392
0
            + base_got->output_offset + off);
2393
2394
0
    if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2395
0
      {
2396
0
        bfd_put_64 (output_bfd, value, base_got->contents + off);
2397
2398
0
        if (bfd_link_pic (info))
2399
0
    {
2400
0
      asection *s;
2401
0
      Elf_Internal_Rela outrel;
2402
2403
      /* For PIC executables and shared libraries we need
2404
         to relocate the GOT entry at run time.  */
2405
0
      s = globals->root.srelgot;
2406
0
      if (s == NULL)
2407
0
        abort ();
2408
2409
0
      outrel.r_offset = got_entry_addr;
2410
0
      outrel.r_info = ELF64_R_INFO (0, R_KVX_RELATIVE);
2411
0
      outrel.r_addend = value;
2412
0
      _bfd_elf_append_rela (output_bfd, s, &outrel);
2413
0
    }
2414
2415
0
        symbol_got_offset_mark (input_bfd, h, r_symndx);
2416
0
      }
2417
2418
    /* Update the relocation value to GOT entry addr as we have
2419
       transformed the direct data access into an indirect data
2420
       access through GOT.  */
2421
0
    value = got_entry_addr;
2422
2423
0
    return _bfd_final_link_relocate (howto, input_bfd, input_section,
2424
0
             contents, rel->r_offset, off, 0);
2425
0
  }
2426
0
      break;
2427
2428
0
    default:
2429
0
      return bfd_reloc_notsupported;
2430
0
    }
2431
2432
0
  if (saved_addend)
2433
0
    *saved_addend = value;
2434
2435
  /* Only apply the final relocation in a sequence.  */
2436
0
  if (save_addend)
2437
0
    return bfd_reloc_continue;
2438
2439
0
  return _bfd_kvx_elf_put_addend (input_bfd, hit_data, bfd_r_type,
2440
0
          howto, value);
2441
0
}
2442
2443
2444
2445
/* Relocate a KVX ELF section.  */
2446
2447
static int
2448
elf64_kvx_relocate_section (bfd *output_bfd,
2449
          struct bfd_link_info *info,
2450
          bfd *input_bfd,
2451
          asection *input_section,
2452
          bfd_byte *contents,
2453
          Elf_Internal_Rela *relocs,
2454
          Elf_Internal_Sym *local_syms,
2455
          asection **local_sections)
2456
0
{
2457
0
  Elf_Internal_Shdr *symtab_hdr;
2458
0
  struct elf_link_hash_entry **sym_hashes;
2459
0
  Elf_Internal_Rela *rel;
2460
0
  Elf_Internal_Rela *relend;
2461
0
  const char *name;
2462
0
  struct elf_kvx_link_hash_table *globals;
2463
0
  bool save_addend = false;
2464
0
  bfd_vma addend = 0;
2465
2466
0
  globals = elf_kvx_hash_table (info);
2467
2468
0
  symtab_hdr = &elf_symtab_hdr (input_bfd);
2469
0
  sym_hashes = elf_sym_hashes (input_bfd);
2470
2471
0
  rel = relocs;
2472
0
  relend = relocs + input_section->reloc_count;
2473
0
  for (; rel < relend; rel++)
2474
0
    {
2475
0
      unsigned int r_type;
2476
0
      bfd_reloc_code_real_type bfd_r_type;
2477
0
      reloc_howto_type *howto;
2478
0
      unsigned long r_symndx;
2479
0
      Elf_Internal_Sym *sym;
2480
0
      asection *sec;
2481
0
      struct elf_link_hash_entry *h;
2482
0
      bfd_vma relocation;
2483
0
      bfd_reloc_status_type r;
2484
0
      arelent bfd_reloc;
2485
0
      char sym_type;
2486
0
      bool unresolved_reloc = false;
2487
0
      char *error_message = NULL;
2488
2489
0
      r_symndx = ELF64_R_SYM (rel->r_info);
2490
0
      r_type = ELF64_R_TYPE (rel->r_info);
2491
2492
0
      bfd_reloc.howto = elf64_kvx_howto_from_type (input_bfd, r_type);
2493
0
      howto = bfd_reloc.howto;
2494
2495
0
      if (howto == NULL)
2496
0
  return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2497
2498
0
      bfd_r_type = elf64_kvx_bfd_reloc_from_howto (howto);
2499
2500
0
      h = NULL;
2501
0
      sym = NULL;
2502
0
      sec = NULL;
2503
2504
0
      if (r_symndx < symtab_hdr->sh_info) /* A local symbol. */
2505
0
  {
2506
0
    sym = local_syms + r_symndx;
2507
0
    sym_type = ELF64_ST_TYPE (sym->st_info);
2508
0
    sec = local_sections[r_symndx];
2509
2510
    /* An object file might have a reference to a local
2511
       undefined symbol.  This is a draft object file, but we
2512
       should at least do something about it.  */
2513
0
    if (r_type != R_KVX_NONE
2514
0
        && r_type != R_KVX_S37_GOTADDR_LO10
2515
0
        && r_type != R_KVX_S37_GOTADDR_UP27
2516
0
        && r_type != R_KVX_S64_GOTADDR_LO10
2517
0
        && r_type != R_KVX_S64_GOTADDR_UP27
2518
0
        && r_type != R_KVX_S64_GOTADDR_EX27
2519
0
        && r_type != R_KVX_S43_GOTADDR_LO10
2520
0
        && r_type != R_KVX_S43_GOTADDR_UP27
2521
0
        && r_type != R_KVX_S43_GOTADDR_EX6
2522
0
        && bfd_is_und_section (sec)
2523
0
        && ELF_ST_BIND (sym->st_info) != STB_WEAK)
2524
0
      (*info->callbacks->undefined_symbol)
2525
0
        (info, bfd_elf_string_from_elf_section
2526
0
         (input_bfd, symtab_hdr->sh_link, sym->st_name),
2527
0
         input_bfd, input_section, rel->r_offset, true);
2528
2529
0
    relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2530
0
  }
2531
0
      else
2532
0
  {
2533
0
    bool warned, ignored;
2534
2535
0
    RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2536
0
           r_symndx, symtab_hdr, sym_hashes,
2537
0
           h, sec, relocation,
2538
0
           unresolved_reloc, warned, ignored);
2539
2540
0
    sym_type = h->type;
2541
0
  }
2542
2543
0
      if (sec != NULL && discarded_section (sec))
2544
0
  RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
2545
0
           rel, 1, relend, R_KVX_NONE,
2546
0
           howto, 0, contents);
2547
2548
0
      if (bfd_link_relocatable (info))
2549
0
  continue;
2550
2551
0
      if (h != NULL)
2552
0
  name = h->root.root.string;
2553
0
      else
2554
0
  {
2555
0
    name = (bfd_elf_string_from_elf_section
2556
0
      (input_bfd, symtab_hdr->sh_link, sym->st_name));
2557
0
    if (name == NULL || *name == '\0')
2558
0
      name = bfd_section_name (sec);
2559
0
  }
2560
2561
0
      if (r_symndx != 0
2562
0
    && r_type != R_KVX_NONE
2563
0
    && (h == NULL
2564
0
        || h->root.type == bfd_link_hash_defined
2565
0
        || h->root.type == bfd_link_hash_defweak)
2566
0
    && IS_KVX_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
2567
0
  {
2568
0
    (*_bfd_error_handler)
2569
0
      ((sym_type == STT_TLS
2570
        /* xgettext:c-format */
2571
0
        ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
2572
        /* xgettext:c-format */
2573
0
        : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
2574
0
       input_bfd,
2575
0
       input_section, (uint64_t) rel->r_offset, howto->name, name);
2576
0
  }
2577
2578
      /* Original aarch64 has relaxation handling for TLS here. */
2579
0
      r = bfd_reloc_continue;
2580
2581
      /* There may be multiple consecutive relocations for the
2582
   same offset.  In that case we are supposed to treat the
2583
   output of each relocation as the addend for the next.  */
2584
0
      if (rel + 1 < relend
2585
0
    && rel->r_offset == rel[1].r_offset
2586
0
    && ELF64_R_TYPE (rel[1].r_info) != R_KVX_NONE)
2587
2588
0
  save_addend = true;
2589
0
      else
2590
0
  save_addend = false;
2591
2592
0
      if (r == bfd_reloc_continue)
2593
0
  r = elf64_kvx_final_link_relocate (howto, input_bfd, output_bfd,
2594
0
             input_section, contents, rel,
2595
0
             relocation, info, sec,
2596
0
             h, &unresolved_reloc,
2597
0
             save_addend, &addend, sym);
2598
2599
0
      switch (elf64_kvx_bfd_reloc_from_type (input_bfd, r_type))
2600
0
  {
2601
0
  case BFD_RELOC_KVX_S37_TLS_GD_LO10:
2602
0
  case BFD_RELOC_KVX_S37_TLS_GD_UP27:
2603
2604
0
  case BFD_RELOC_KVX_S43_TLS_GD_LO10:
2605
0
  case BFD_RELOC_KVX_S43_TLS_GD_UP27:
2606
0
  case BFD_RELOC_KVX_S43_TLS_GD_EX6:
2607
2608
0
  case BFD_RELOC_KVX_S37_TLS_LD_LO10:
2609
0
  case BFD_RELOC_KVX_S37_TLS_LD_UP27:
2610
2611
0
  case BFD_RELOC_KVX_S43_TLS_LD_LO10:
2612
0
  case BFD_RELOC_KVX_S43_TLS_LD_UP27:
2613
0
  case BFD_RELOC_KVX_S43_TLS_LD_EX6:
2614
2615
0
    if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2616
0
      {
2617
0
        bool need_relocs = false;
2618
0
        bfd_byte *loc;
2619
0
        int indx;
2620
0
        bfd_vma off;
2621
2622
0
        off = symbol_got_offset (input_bfd, h, r_symndx);
2623
0
        indx = h && h->dynindx != -1 ? h->dynindx : 0;
2624
2625
0
        need_relocs =
2626
0
    (bfd_link_pic (info) || indx != 0) &&
2627
0
    (h == NULL
2628
0
     || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2629
0
     || h->root.type != bfd_link_hash_undefweak);
2630
2631
0
        BFD_ASSERT (globals->root.srelgot != NULL);
2632
2633
0
        if (need_relocs)
2634
0
    {
2635
0
      Elf_Internal_Rela rela;
2636
0
      rela.r_info = ELF64_R_INFO (indx, R_KVX_64_DTPMOD);
2637
0
      rela.r_addend = 0;
2638
0
      rela.r_offset = globals->root.sgot->output_section->vma +
2639
0
        globals->root.sgot->output_offset + off;
2640
2641
0
      loc = globals->root.srelgot->contents;
2642
0
      loc += globals->root.srelgot->reloc_count++
2643
0
        * RELOC_SIZE (htab);
2644
0
      bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
2645
2646
0
      bfd_reloc_code_real_type real_type =
2647
0
        elf64_kvx_bfd_reloc_from_type (input_bfd, r_type);
2648
2649
0
      if (real_type == BFD_RELOC_KVX_S37_TLS_LD_LO10
2650
0
          || real_type == BFD_RELOC_KVX_S37_TLS_LD_UP27
2651
0
          || real_type == BFD_RELOC_KVX_S43_TLS_LD_LO10
2652
0
          || real_type == BFD_RELOC_KVX_S43_TLS_LD_UP27
2653
0
          || real_type == BFD_RELOC_KVX_S43_TLS_LD_EX6)
2654
0
        {
2655
          /* For local dynamic, don't generate DTPOFF in any case.
2656
       Initialize the DTPOFF slot into zero, so we get module
2657
       base address when invoke runtime TLS resolver.  */
2658
0
          bfd_put_64 (output_bfd, 0,
2659
0
          globals->root.sgot->contents + off
2660
0
          + GOT_ENTRY_SIZE);
2661
0
        }
2662
0
      else if (indx == 0)
2663
0
        {
2664
0
          bfd_put_64 (output_bfd,
2665
0
          relocation - dtpoff_base (info),
2666
0
          globals->root.sgot->contents + off
2667
0
          + GOT_ENTRY_SIZE);
2668
0
        }
2669
0
      else
2670
0
        {
2671
          /* This TLS symbol is global. We emit a
2672
       relocation to fixup the tls offset at load
2673
       time.  */
2674
0
          rela.r_info =
2675
0
      ELF64_R_INFO (indx, R_KVX_64_DTPOFF);
2676
0
          rela.r_addend = 0;
2677
0
          rela.r_offset =
2678
0
      (globals->root.sgot->output_section->vma
2679
0
       + globals->root.sgot->output_offset + off
2680
0
       + GOT_ENTRY_SIZE);
2681
2682
0
          loc = globals->root.srelgot->contents;
2683
0
          loc += globals->root.srelgot->reloc_count++
2684
0
      * RELOC_SIZE (globals);
2685
0
          bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
2686
0
          bfd_put_64 (output_bfd, (bfd_vma) 0,
2687
0
          globals->root.sgot->contents + off
2688
0
          + GOT_ENTRY_SIZE);
2689
0
        }
2690
0
    }
2691
0
        else
2692
0
    {
2693
0
      bfd_put_64 (output_bfd, (bfd_vma) 1,
2694
0
            globals->root.sgot->contents + off);
2695
0
      bfd_put_64 (output_bfd,
2696
0
            relocation - dtpoff_base (info),
2697
0
            globals->root.sgot->contents + off
2698
0
            + GOT_ENTRY_SIZE);
2699
0
    }
2700
2701
0
        symbol_got_offset_mark (input_bfd, h, r_symndx);
2702
0
      }
2703
0
    break;
2704
2705
0
  case BFD_RELOC_KVX_S37_TLS_IE_LO10:
2706
0
  case BFD_RELOC_KVX_S37_TLS_IE_UP27:
2707
2708
0
  case BFD_RELOC_KVX_S43_TLS_IE_LO10:
2709
0
  case BFD_RELOC_KVX_S43_TLS_IE_UP27:
2710
0
  case BFD_RELOC_KVX_S43_TLS_IE_EX6:
2711
0
    if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2712
0
      {
2713
0
        bool need_relocs = false;
2714
0
        bfd_byte *loc;
2715
0
        int indx;
2716
0
        bfd_vma off;
2717
2718
0
        off = symbol_got_offset (input_bfd, h, r_symndx);
2719
2720
0
        indx = h && h->dynindx != -1 ? h->dynindx : 0;
2721
2722
0
        need_relocs =
2723
0
    (bfd_link_pic (info) || indx != 0) &&
2724
0
    (h == NULL
2725
0
     || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2726
0
     || h->root.type != bfd_link_hash_undefweak);
2727
2728
0
        BFD_ASSERT (globals->root.srelgot != NULL);
2729
2730
0
        if (need_relocs)
2731
0
    {
2732
0
      Elf_Internal_Rela rela;
2733
2734
0
      if (indx == 0)
2735
0
        rela.r_addend = relocation - dtpoff_base (info);
2736
0
      else
2737
0
        rela.r_addend = 0;
2738
2739
0
      rela.r_info = ELF64_R_INFO (indx, R_KVX_64_TPOFF);
2740
0
      rela.r_offset = globals->root.sgot->output_section->vma +
2741
0
        globals->root.sgot->output_offset + off;
2742
2743
0
      loc = globals->root.srelgot->contents;
2744
0
      loc += globals->root.srelgot->reloc_count++
2745
0
        * RELOC_SIZE (htab);
2746
2747
0
      bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
2748
2749
0
      bfd_put_64 (output_bfd, rela.r_addend,
2750
0
            globals->root.sgot->contents + off);
2751
0
    }
2752
0
        else
2753
0
    bfd_put_64 (output_bfd, relocation - tpoff_base (info),
2754
0
          globals->root.sgot->contents + off);
2755
2756
0
        symbol_got_offset_mark (input_bfd, h, r_symndx);
2757
0
      }
2758
0
    break;
2759
2760
0
  default:
2761
0
    break;
2762
0
  }
2763
2764
      /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
2765
   because such sections are not SEC_ALLOC and thus ld.so will
2766
   not process them.  */
2767
0
      if (unresolved_reloc
2768
0
    && !((input_section->flags & SEC_DEBUGGING) != 0
2769
0
         && h->def_dynamic)
2770
0
    && _bfd_elf_section_offset (output_bfd, info, input_section,
2771
0
              +rel->r_offset) != (bfd_vma) - 1)
2772
0
  {
2773
0
    (*_bfd_error_handler)
2774
      /* xgettext:c-format */
2775
0
      (_("%pB(%pA+%#" PRIx64 "): "
2776
0
         "unresolvable %s relocation against symbol `%s'"),
2777
0
       input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
2778
0
       h->root.root.string);
2779
0
    return false;
2780
0
  }
2781
2782
0
      if (r != bfd_reloc_ok && r != bfd_reloc_continue)
2783
0
  {
2784
0
    switch (r)
2785
0
      {
2786
0
      case bfd_reloc_overflow:
2787
0
        (*info->callbacks->reloc_overflow)
2788
0
    (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
2789
0
     input_bfd, input_section, rel->r_offset);
2790
2791
        /* Original aarch64 code had a check for alignement correctness */
2792
0
        break;
2793
2794
0
      case bfd_reloc_undefined:
2795
0
        (*info->callbacks->undefined_symbol)
2796
0
    (info, name, input_bfd, input_section, rel->r_offset, true);
2797
0
        break;
2798
2799
0
      case bfd_reloc_outofrange:
2800
0
        error_message = _("out of range");
2801
0
        goto common_error;
2802
2803
0
      case bfd_reloc_notsupported:
2804
0
        error_message = _("unsupported relocation");
2805
0
        goto common_error;
2806
2807
0
      case bfd_reloc_dangerous:
2808
        /* error_message should already be set.  */
2809
0
        goto common_error;
2810
2811
0
      default:
2812
0
        error_message = _("unknown error");
2813
        /* Fall through.  */
2814
2815
0
      common_error:
2816
0
        BFD_ASSERT (error_message != NULL);
2817
0
        (*info->callbacks->reloc_dangerous)
2818
0
    (info, error_message, input_bfd, input_section, rel->r_offset);
2819
0
        break;
2820
0
      }
2821
0
  }
2822
2823
0
      if (!save_addend)
2824
0
  addend = 0;
2825
0
    }
2826
2827
0
  return true;
2828
0
}
2829
2830
/* Set the right machine number.  */
2831
2832
static bool
2833
elf64_kvx_object_p (bfd *abfd)
2834
437
{
2835
  /* must be coherent with default arch in cpu-kvx.c */
2836
437
  int e_set = bfd_mach_kv3_1;
2837
2838
437
  if (elf_elfheader (abfd)->e_machine == EM_KVX)
2839
437
    {
2840
437
      int e_core = elf_elfheader (abfd)->e_flags & ELF_KVX_CORE_MASK;
2841
437
      switch(e_core)
2842
437
  {
2843
0
#if ARCH_SIZE == 64
2844
89
  case ELF_KVX_CORE_KV3_1 : e_set = bfd_mach_kv3_1_64; break;
2845
7
  case ELF_KVX_CORE_KV3_2 : e_set = bfd_mach_kv3_2_64; break;
2846
12
  case ELF_KVX_CORE_KV4_1 : e_set = bfd_mach_kv4_1_64; break;
2847
#else
2848
  case ELF_KVX_CORE_KV3_1 : e_set = bfd_mach_kv3_1; break;
2849
  case ELF_KVX_CORE_KV3_2 : e_set = bfd_mach_kv3_2; break;
2850
  case ELF_KVX_CORE_KV4_1 : e_set = bfd_mach_kv4_1; break;
2851
#endif
2852
329
  default:
2853
329
    (*_bfd_error_handler)(_("%s: Bad ELF id: `%d'"),
2854
329
        abfd->filename, e_core);
2855
437
  }
2856
437
    }
2857
437
  return bfd_default_set_arch_mach (abfd, bfd_arch_kvx, e_set);
2858
437
}
2859
2860
/* Function to keep KVX specific flags in the ELF header.  */
2861
2862
static bool
2863
elf64_kvx_set_private_flags (bfd *abfd, flagword flags)
2864
0
{
2865
0
  if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
2866
0
    {
2867
0
    }
2868
0
  else
2869
0
    {
2870
0
      elf_elfheader (abfd)->e_flags = flags;
2871
0
      elf_flags_init (abfd) = true;
2872
0
    }
2873
2874
0
  return true;
2875
0
}
2876
2877
/* Merge backend specific data from an object file to the output
2878
   object file when linking.  */
2879
2880
static bool
2881
elf64_kvx_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
2882
0
{
2883
0
  bfd *obfd = info->output_bfd;
2884
0
  flagword out_flags;
2885
0
  flagword in_flags;
2886
0
  bool flags_compatible = true;
2887
0
  asection *sec;
2888
2889
  /* Check if we have the same endianess.  */
2890
0
  if (!_bfd_generic_verify_endian_match (ibfd, info))
2891
0
    return false;
2892
2893
0
  if (!is_kvx_elf (ibfd))
2894
0
    return true;
2895
2896
  /* The input BFD must have had its flags initialised.  */
2897
  /* The following seems bogus to me -- The flags are initialized in
2898
     the assembler but I don't think an elf_flags_init field is
2899
     written into the object.  */
2900
  /* BFD_ASSERT (elf_flags_init (ibfd)); */
2901
2902
0
  if (bfd_get_arch_size (ibfd) != bfd_get_arch_size (obfd))
2903
0
    {
2904
0
      const char *msg;
2905
2906
0
      if (bfd_get_arch_size (ibfd) == 32
2907
0
    && bfd_get_arch_size (obfd) == 64)
2908
0
  msg = _("%s: compiled as 32-bit object and %s is 64-bit");
2909
0
      else if (bfd_get_arch_size (ibfd) == 64
2910
0
         && bfd_get_arch_size (obfd) == 32)
2911
0
  msg = _("%s: compiled as 64-bit object and %s is 32-bit");
2912
0
      else
2913
0
  msg = _("%s: object size does not match that of target %s");
2914
2915
0
      (*_bfd_error_handler) (msg, bfd_get_filename (ibfd),
2916
0
           bfd_get_filename (obfd));
2917
0
      bfd_set_error (bfd_error_wrong_format);
2918
0
      return false;
2919
0
    }
2920
2921
0
  in_flags = elf_elfheader (ibfd)->e_flags;
2922
0
  out_flags = elf_elfheader (obfd)->e_flags;
2923
2924
0
  if (!elf_flags_init (obfd))
2925
0
    {
2926
      /* If the input is the default architecture and had the default
2927
   flags then do not bother setting the flags for the output
2928
   architecture, instead allow future merges to do this.  If no
2929
   future merges ever set these flags then they will retain their
2930
   uninitialised values, which surprise surprise, correspond
2931
   to the default values.  */
2932
0
      if (bfd_get_arch_info (ibfd)->the_default
2933
0
    && elf_elfheader (ibfd)->e_flags == 0)
2934
0
  return true;
2935
2936
0
      elf_flags_init (obfd) = true;
2937
0
      elf_elfheader (obfd)->e_flags = in_flags;
2938
2939
0
      if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
2940
0
    && bfd_get_arch_info (obfd)->the_default)
2941
0
  return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
2942
0
          bfd_get_mach (ibfd));
2943
2944
0
      return true;
2945
0
    }
2946
2947
  /* Identical flags must be compatible.  */
2948
0
  if (in_flags == out_flags)
2949
0
    return true;
2950
2951
  /* Check to see if the input BFD actually contains any sections.  If
2952
     not, its flags may not have been initialised either, but it
2953
     cannot actually cause any incompatiblity.  Do not short-circuit
2954
     dynamic objects; their section list may be emptied by
2955
     elf_link_add_object_symbols.
2956
2957
     Also check to see if there are no code sections in the input.
2958
     In this case there is no need to check for code specific flags.
2959
     XXX - do we need to worry about floating-point format compatability
2960
     in data sections ?  */
2961
0
  if (!(ibfd->flags & DYNAMIC))
2962
0
    {
2963
0
      bool null_input_bfd = true;
2964
0
      bool only_data_sections = true;
2965
2966
0
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2967
0
  {
2968
0
    if ((bfd_section_flags (sec)
2969
0
         & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
2970
0
        == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
2971
0
      only_data_sections = false;
2972
2973
0
    null_input_bfd = false;
2974
0
    break;
2975
0
  }
2976
2977
0
      if (null_input_bfd || only_data_sections)
2978
0
  return true;
2979
0
    }
2980
0
  return flags_compatible;
2981
0
}
2982
2983
/* Display the flags field.  */
2984
2985
static bool
2986
elf64_kvx_print_private_bfd_data (bfd *abfd, void *ptr)
2987
63
{
2988
63
  FILE *file = (FILE *) ptr;
2989
63
  unsigned long flags;
2990
2991
63
  BFD_ASSERT (abfd != NULL && ptr != NULL);
2992
2993
  /* Print normal ELF private data.  */
2994
63
  _bfd_elf_print_private_bfd_data (abfd, ptr);
2995
2996
63
  flags = elf_elfheader (abfd)->e_flags;
2997
  /* Ignore init flag - it may not be set, despite the flags field
2998
     containing valid data.  */
2999
3000
  /* xgettext:c-format */
3001
63
  fprintf (file, _("Private flags = 0x%lx : "), elf_elfheader (abfd)->e_flags);
3002
63
  if((flags & ELF_KVX_ABI_64B_ADDR_BIT) == ELF_KVX_ABI_64B_ADDR_BIT)
3003
2
    {
3004
2
      if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_1))
3005
0
  fprintf (file, _("Coolidge (kv3) V1 64 bits"));
3006
2
      else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_2))
3007
0
  fprintf (file, _("Coolidge (kv3) V2 64 bits"));
3008
2
      else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV4_1))
3009
0
  fprintf (file, _("Coolidge (kv4) V1 64 bits"));
3010
2
    }
3011
61
  else
3012
61
    {
3013
61
      if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_1))
3014
39
  fprintf (file, _("Coolidge (kv3) V1 32 bits"));
3015
22
      else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_2))
3016
0
  fprintf (file, _("Coolidge (kv3) V2 32 bits"));
3017
22
      else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV4_1))
3018
0
  fprintf (file, _("Coolidge (kv4) V1 32 bits"));
3019
61
    }
3020
3021
63
  fputc ('\n', file);
3022
3023
63
  return true;
3024
63
}
3025
3026
/* Adjust a symbol defined by a dynamic object and referenced by a
3027
   regular object.  The current definition is in some section of the
3028
   dynamic object, but we're not including those sections.  We have to
3029
   change the definition to something the rest of the link can
3030
   understand.  */
3031
3032
static bool
3033
elf64_kvx_adjust_dynamic_symbol (struct bfd_link_info *info,
3034
         struct elf_link_hash_entry *h)
3035
0
{
3036
0
  struct elf_kvx_link_hash_table *htab;
3037
0
  asection *s;
3038
3039
  /* If this is a function, put it in the procedure linkage table.  We
3040
     will fill in the contents of the procedure linkage table later,
3041
     when we know the address of the .got section.  */
3042
0
  if (h->type == STT_FUNC || h->needs_plt)
3043
0
    {
3044
0
      if (h->plt.refcount <= 0
3045
0
    || ((SYMBOL_CALLS_LOCAL (info, h)
3046
0
         || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3047
0
       && h->root.type == bfd_link_hash_undefweak))))
3048
0
  {
3049
    /* This case can occur if we saw a CALL26 reloc in
3050
       an input file, but the symbol wasn't referred to
3051
       by a dynamic object or all references were
3052
       garbage collected. In which case we can end up
3053
       resolving.  */
3054
0
    h->plt.offset = (bfd_vma) - 1;
3055
0
    h->needs_plt = 0;
3056
0
  }
3057
3058
0
      return true;
3059
0
    }
3060
0
  else
3061
    /* Otherwise, reset to -1.  */
3062
0
    h->plt.offset = (bfd_vma) - 1;
3063
3064
3065
  /* If this is a weak symbol, and there is a real definition, the
3066
     processor independent code will have arranged for us to see the
3067
     real definition first, and we can just use the same value.  */
3068
0
  if (h->is_weakalias)
3069
0
    {
3070
0
      struct elf_link_hash_entry *def = weakdef (h);
3071
0
      BFD_ASSERT (def->root.type == bfd_link_hash_defined);
3072
0
      h->root.u.def.section = def->root.u.def.section;
3073
0
      h->root.u.def.value = def->root.u.def.value;
3074
0
      if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
3075
0
  h->non_got_ref = def->non_got_ref;
3076
0
      return true;
3077
0
    }
3078
3079
  /* If we are creating a shared library, we must presume that the
3080
     only references to the symbol are via the global offset table.
3081
     For such cases we need not do anything here; the relocations will
3082
     be handled correctly by relocate_section.  */
3083
0
  if (bfd_link_pic (info))
3084
0
    return true;
3085
3086
  /* If there are no references to this symbol that do not use the
3087
     GOT, we don't need to generate a copy reloc.  */
3088
0
  if (!h->non_got_ref)
3089
0
    return true;
3090
3091
  /* If -z nocopyreloc was given, we won't generate them either.  */
3092
0
  if (info->nocopyreloc)
3093
0
    {
3094
0
      h->non_got_ref = 0;
3095
0
      return true;
3096
0
    }
3097
3098
  /* We must allocate the symbol in our .dynbss section, which will
3099
     become part of the .bss section of the executable.  There will be
3100
     an entry for this symbol in the .dynsym section.  The dynamic
3101
     object will contain position independent code, so all references
3102
     from the dynamic object to this symbol will go through the global
3103
     offset table.  The dynamic linker will use the .dynsym entry to
3104
     determine the address it must put in the global offset table, so
3105
     both the dynamic object and the regular object will refer to the
3106
     same memory location for the variable.  */
3107
3108
0
  htab = elf_kvx_hash_table (info);
3109
3110
  /* We must generate a R_KVX_COPY reloc to tell the dynamic linker
3111
     to copy the initial value out of the dynamic object and into the
3112
     runtime process image.  */
3113
0
  if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
3114
0
    {
3115
0
      htab->srelbss->size += RELOC_SIZE (htab);
3116
0
      h->needs_copy = 1;
3117
0
    }
3118
3119
0
  s = htab->sdynbss;
3120
3121
0
  return _bfd_elf_adjust_dynamic_copy (info, h, s);
3122
0
}
3123
3124
static bool
3125
elf64_kvx_allocate_local_symbols (bfd *abfd, unsigned number)
3126
0
{
3127
0
  struct elf_kvx_local_symbol *locals;
3128
0
  locals = elf_kvx_locals (abfd);
3129
0
  if (locals == NULL)
3130
0
    {
3131
0
      locals = (struct elf_kvx_local_symbol *)
3132
0
  bfd_zalloc (abfd, number * sizeof (struct elf_kvx_local_symbol));
3133
0
      if (locals == NULL)
3134
0
  return false;
3135
0
      elf_kvx_locals (abfd) = locals;
3136
0
    }
3137
0
  return true;
3138
0
}
3139
3140
/* Create the .got section to hold the global offset table.  */
3141
3142
static bool
3143
kvx_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
3144
0
{
3145
0
  elf_backend_data *bed = get_elf_backend_data (abfd);
3146
0
  flagword flags;
3147
0
  asection *s;
3148
0
  struct elf_link_hash_entry *h;
3149
0
  struct elf_link_hash_table *htab = elf_hash_table (info);
3150
3151
  /* This function may be called more than once.  */
3152
0
  s = bfd_get_linker_section (abfd, ".got");
3153
0
  if (s != NULL)
3154
0
    return true;
3155
3156
0
  flags = bed->dynamic_sec_flags;
3157
3158
0
  s = bfd_make_section_anyway_with_flags (abfd,
3159
0
            (bed->rela_plts_and_copies_p
3160
0
             ? ".rela.got" : ".rel.got"),
3161
0
            (bed->dynamic_sec_flags
3162
0
             | SEC_READONLY));
3163
0
  if (s == NULL
3164
0
      || !bfd_set_section_alignment (s, bed->s->log_file_align))
3165
3166
0
    return false;
3167
0
  htab->srelgot = s;
3168
3169
0
  s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
3170
0
  if (s == NULL
3171
0
      || !bfd_set_section_alignment (s, bed->s->log_file_align))
3172
0
    return false;
3173
0
  htab->sgot = s;
3174
0
  htab->sgot->size += GOT_ENTRY_SIZE;
3175
3176
0
  if (bed->want_got_sym)
3177
0
    {
3178
      /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
3179
   (or .got.plt) section.  We don't do this in the linker script
3180
   because we don't want to define the symbol if we are not creating
3181
   a global offset table.  */
3182
0
      h = _bfd_elf_define_linkage_sym (abfd, info, s,
3183
0
               "_GLOBAL_OFFSET_TABLE_");
3184
0
      elf_hash_table (info)->hgot = h;
3185
0
      if (h == NULL)
3186
0
  return false;
3187
0
    }
3188
3189
0
  if (bed->want_got_plt)
3190
0
    {
3191
0
      s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
3192
0
      if (s == NULL
3193
0
    || !bfd_set_section_alignment (s,
3194
0
           bed->s->log_file_align))
3195
0
  return false;
3196
0
      htab->sgotplt = s;
3197
0
    }
3198
3199
  /* The first bit of the global offset table is the header.  */
3200
0
  s->size += bed->got_header_size;
3201
3202
  /* we still need to handle got content when doing static link with PIC */
3203
0
  if (bfd_link_executable (info) && !bfd_link_pic (info)) {
3204
0
    htab->dynobj = abfd;
3205
0
  }
3206
3207
0
  return true;
3208
0
}
3209
3210
/* Look through the relocs for a section during the first phase.  */
3211
3212
static bool
3213
elf64_kvx_check_relocs (bfd *abfd, struct bfd_link_info *info,
3214
          asection *sec, const Elf_Internal_Rela *relocs)
3215
0
{
3216
0
  Elf_Internal_Shdr *symtab_hdr;
3217
0
  struct elf_link_hash_entry **sym_hashes;
3218
0
  const Elf_Internal_Rela *rel;
3219
0
  const Elf_Internal_Rela *rel_end;
3220
0
  asection *sreloc;
3221
3222
0
  struct elf_kvx_link_hash_table *htab;
3223
3224
0
  if (bfd_link_relocatable (info))
3225
0
    return true;
3226
3227
0
  BFD_ASSERT (is_kvx_elf (abfd));
3228
3229
0
  htab = elf_kvx_hash_table (info);
3230
0
  sreloc = NULL;
3231
3232
0
  symtab_hdr = &elf_symtab_hdr (abfd);
3233
0
  sym_hashes = elf_sym_hashes (abfd);
3234
3235
0
  rel_end = relocs + sec->reloc_count;
3236
0
  for (rel = relocs; rel < rel_end; rel++)
3237
0
    {
3238
0
      struct elf_link_hash_entry *h;
3239
0
      unsigned int r_symndx;
3240
0
      unsigned int r_type;
3241
0
      bfd_reloc_code_real_type bfd_r_type;
3242
0
      Elf_Internal_Sym *isym;
3243
3244
0
      r_symndx = ELF64_R_SYM (rel->r_info);
3245
0
      r_type = ELF64_R_TYPE (rel->r_info);
3246
3247
0
      if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
3248
0
  {
3249
    /* xgettext:c-format */
3250
0
    _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd, r_symndx);
3251
0
    return false;
3252
0
  }
3253
3254
0
      if (r_symndx < symtab_hdr->sh_info)
3255
0
  {
3256
    /* A local symbol.  */
3257
0
    isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3258
0
          abfd, r_symndx);
3259
0
    if (isym == NULL)
3260
0
      return false;
3261
3262
0
    h = NULL;
3263
0
  }
3264
0
      else
3265
0
  {
3266
0
    h = sym_hashes[r_symndx - symtab_hdr->sh_info];
3267
0
    while (h->root.type == bfd_link_hash_indirect
3268
0
     || h->root.type == bfd_link_hash_warning)
3269
0
      h = (struct elf_link_hash_entry *) h->root.u.i.link;
3270
0
  }
3271
3272
      /* Could be done earlier, if h were already available.  */
3273
0
      bfd_r_type = kvx_tls_transition (abfd, info, r_type, h, r_symndx);
3274
3275
0
      if (h != NULL)
3276
0
  {
3277
    /* Create the ifunc sections for static executables.  If we
3278
       never see an indirect function symbol nor we are building
3279
       a static executable, those sections will be empty and
3280
       won't appear in output.  */
3281
0
    switch (bfd_r_type)
3282
0
      {
3283
0
      default:
3284
0
        break;
3285
0
      }
3286
3287
    /* It is referenced by a non-shared object. */
3288
0
    h->ref_regular = 1;
3289
0
  }
3290
3291
0
      switch (bfd_r_type)
3292
0
  {
3293
3294
0
  case BFD_RELOC_KVX_S43_LO10:
3295
0
  case BFD_RELOC_KVX_S43_UP27:
3296
0
  case BFD_RELOC_KVX_S43_EX6:
3297
3298
0
  case BFD_RELOC_KVX_S37_LO10:
3299
0
  case BFD_RELOC_KVX_S37_UP27:
3300
3301
0
  case BFD_RELOC_KVX_S64_LO10:
3302
0
  case BFD_RELOC_KVX_S64_UP27:
3303
0
  case BFD_RELOC_KVX_S64_EX27:
3304
3305
0
  case BFD_RELOC_KVX_32:
3306
0
  case BFD_RELOC_KVX_64:
3307
3308
    /* We don't need to handle relocs into sections not going into
3309
       the "real" output.  */
3310
0
    if ((sec->flags & SEC_ALLOC) == 0)
3311
0
      break;
3312
3313
0
    if (h != NULL)
3314
0
      {
3315
0
        if (!bfd_link_pic (info))
3316
0
    h->non_got_ref = 1;
3317
3318
0
        h->plt.refcount += 1;
3319
0
        h->pointer_equality_needed = 1;
3320
0
      }
3321
3322
    /* No need to do anything if we're not creating a shared
3323
       object.  */
3324
0
    if (! bfd_link_pic (info))
3325
0
      break;
3326
3327
0
    {
3328
0
      struct elf_dyn_relocs *p;
3329
0
      struct elf_dyn_relocs **head;
3330
3331
      /* We must copy these reloc types into the output file.
3332
         Create a reloc section in dynobj and make room for
3333
         this reloc.  */
3334
0
      if (sreloc == NULL)
3335
0
        {
3336
0
    if (htab->root.dynobj == NULL)
3337
0
      htab->root.dynobj = abfd;
3338
3339
0
    sreloc = _bfd_elf_make_dynamic_reloc_section
3340
0
      (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ true);
3341
3342
0
    if (sreloc == NULL)
3343
0
      return false;
3344
0
        }
3345
3346
      /* If this is a global symbol, we count the number of
3347
         relocations we need for this symbol.  */
3348
0
      if (h != NULL)
3349
0
        {
3350
0
    head = &h->dyn_relocs;
3351
0
        }
3352
0
      else
3353
0
        {
3354
    /* Track dynamic relocs needed for local syms too.
3355
       We really need local syms available to do this
3356
       easily.  Oh well.  */
3357
3358
0
    asection *s;
3359
0
    void **vpp;
3360
3361
0
    isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3362
0
                abfd, r_symndx);
3363
0
    if (isym == NULL)
3364
0
      return false;
3365
3366
0
    s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3367
0
    if (s == NULL)
3368
0
      s = sec;
3369
3370
    /* Beware of type punned pointers vs strict aliasing
3371
       rules.  */
3372
0
    vpp = &(elf_section_data (s)->local_dynrel);
3373
0
    head = (struct elf_dyn_relocs **) vpp;
3374
0
        }
3375
3376
0
      p = *head;
3377
0
      if (p == NULL || p->sec != sec)
3378
0
        {
3379
0
    bfd_size_type amt = sizeof *p;
3380
0
    p = ((struct elf_dyn_relocs *)
3381
0
         bfd_zalloc (htab->root.dynobj, amt));
3382
0
    if (p == NULL)
3383
0
      return false;
3384
0
    p->next = *head;
3385
0
    *head = p;
3386
0
    p->sec = sec;
3387
0
        }
3388
3389
0
      p->count += 1;
3390
3391
0
    }
3392
0
    break;
3393
3394
0
  case BFD_RELOC_KVX_S37_GOT_LO10:
3395
0
  case BFD_RELOC_KVX_S37_GOT_UP27:
3396
3397
0
  case BFD_RELOC_KVX_S37_GOTOFF_LO10:
3398
0
  case BFD_RELOC_KVX_S37_GOTOFF_UP27:
3399
3400
0
  case BFD_RELOC_KVX_S43_GOT_LO10:
3401
0
  case BFD_RELOC_KVX_S43_GOT_UP27:
3402
0
  case BFD_RELOC_KVX_S43_GOT_EX6:
3403
3404
0
  case BFD_RELOC_KVX_S43_GOTOFF_LO10:
3405
0
  case BFD_RELOC_KVX_S43_GOTOFF_UP27:
3406
0
  case BFD_RELOC_KVX_S43_GOTOFF_EX6:
3407
3408
0
  case BFD_RELOC_KVX_S37_TLS_GD_LO10:
3409
0
  case BFD_RELOC_KVX_S37_TLS_GD_UP27:
3410
3411
0
  case BFD_RELOC_KVX_S43_TLS_GD_LO10:
3412
0
  case BFD_RELOC_KVX_S43_TLS_GD_UP27:
3413
0
  case BFD_RELOC_KVX_S43_TLS_GD_EX6:
3414
3415
0
  case BFD_RELOC_KVX_S37_TLS_IE_LO10:
3416
0
  case BFD_RELOC_KVX_S37_TLS_IE_UP27:
3417
3418
0
  case BFD_RELOC_KVX_S43_TLS_IE_LO10:
3419
0
  case BFD_RELOC_KVX_S43_TLS_IE_UP27:
3420
0
  case BFD_RELOC_KVX_S43_TLS_IE_EX6:
3421
3422
0
  case BFD_RELOC_KVX_S37_TLS_LD_LO10:
3423
0
  case BFD_RELOC_KVX_S37_TLS_LD_UP27:
3424
3425
0
  case BFD_RELOC_KVX_S43_TLS_LD_LO10:
3426
0
  case BFD_RELOC_KVX_S43_TLS_LD_UP27:
3427
0
  case BFD_RELOC_KVX_S43_TLS_LD_EX6:
3428
0
    {
3429
0
      unsigned got_type;
3430
0
      unsigned old_got_type;
3431
3432
0
      got_type = kvx_reloc_got_type (bfd_r_type);
3433
3434
0
      if (h)
3435
0
        {
3436
0
    h->got.refcount += 1;
3437
0
    old_got_type = elf_kvx_hash_entry (h)->got_type;
3438
0
        }
3439
0
      else
3440
0
        {
3441
0
    struct elf_kvx_local_symbol *locals;
3442
3443
0
    if (!elf64_kvx_allocate_local_symbols
3444
0
        (abfd, symtab_hdr->sh_info))
3445
0
      return false;
3446
3447
0
    locals = elf_kvx_locals (abfd);
3448
0
    BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
3449
0
    locals[r_symndx].got_refcount += 1;
3450
0
    old_got_type = locals[r_symndx].got_type;
3451
0
        }
3452
3453
      /* We will already have issued an error message if there
3454
         is a TLS/non-TLS mismatch, based on the symbol type.
3455
         So just combine any TLS types needed.  */
3456
0
      if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
3457
0
    && got_type != GOT_NORMAL)
3458
0
        got_type |= old_got_type;
3459
3460
      /* If the symbol is accessed by both IE and GD methods, we
3461
         are able to relax.  Turn off the GD flag, without
3462
         messing up with any other kind of TLS types that may be
3463
         involved.  */
3464
      /* Disabled untested and unused TLS */
3465
      /* if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type)) */
3466
      /*   got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD); */
3467
3468
0
      if (old_got_type != got_type)
3469
0
        {
3470
0
    if (h != NULL)
3471
0
      elf_kvx_hash_entry (h)->got_type = got_type;
3472
0
    else
3473
0
      {
3474
0
        struct elf_kvx_local_symbol *locals;
3475
0
        locals = elf_kvx_locals (abfd);
3476
0
        BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
3477
0
        locals[r_symndx].got_type = got_type;
3478
0
      }
3479
0
        }
3480
3481
0
      if (htab->root.dynobj == NULL)
3482
0
        htab->root.dynobj = abfd;
3483
0
      if (! kvx_elf_create_got_section (htab->root.dynobj, info))
3484
0
        return false;
3485
0
      break;
3486
0
    }
3487
3488
0
  case BFD_RELOC_KVX_S64_GOTADDR_LO10:
3489
0
  case BFD_RELOC_KVX_S64_GOTADDR_UP27:
3490
0
  case BFD_RELOC_KVX_S64_GOTADDR_EX27:
3491
3492
0
  case BFD_RELOC_KVX_S43_GOTADDR_LO10:
3493
0
  case BFD_RELOC_KVX_S43_GOTADDR_UP27:
3494
0
  case BFD_RELOC_KVX_S43_GOTADDR_EX6:
3495
3496
0
  case BFD_RELOC_KVX_S37_GOTADDR_LO10:
3497
0
  case BFD_RELOC_KVX_S37_GOTADDR_UP27:
3498
3499
0
    if (htab->root.dynobj == NULL)
3500
0
      htab->root.dynobj = abfd;
3501
0
    if (! kvx_elf_create_got_section (htab->root.dynobj, info))
3502
0
      return false;
3503
0
    break;
3504
3505
0
  case BFD_RELOC_KVX_PCREL27:
3506
0
  case BFD_RELOC_KVX_PCREL17:
3507
    /* If this is a local symbol then we resolve it
3508
       directly without creating a PLT entry.  */
3509
0
    if (h == NULL)
3510
0
      continue;
3511
3512
0
    h->needs_plt = 1;
3513
0
    if (h->plt.refcount <= 0)
3514
0
      h->plt.refcount = 1;
3515
0
    else
3516
0
      h->plt.refcount += 1;
3517
0
    break;
3518
3519
0
  default:
3520
0
    break;
3521
0
  }
3522
0
    }
3523
3524
0
  return true;
3525
0
}
3526
3527
static bool
3528
elf64_kvx_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
3529
7
{
3530
7
  Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form.  */
3531
3532
7
  if (!_bfd_elf_init_file_header (abfd, link_info))
3533
0
    return false;
3534
3535
7
  i_ehdrp = elf_elfheader (abfd);
3536
7
  i_ehdrp->e_ident[EI_ABIVERSION] = KVX_ELF_ABI_VERSION;
3537
7
  return true;
3538
7
}
3539
3540
static enum elf_reloc_type_class
3541
elf64_kvx_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
3542
        const asection *rel_sec ATTRIBUTE_UNUSED,
3543
        const Elf_Internal_Rela *rela)
3544
0
{
3545
0
  switch ((int) ELF64_R_TYPE (rela->r_info))
3546
0
    {
3547
0
    case R_KVX_RELATIVE:
3548
0
      return reloc_class_relative;
3549
0
    case R_KVX_JMP_SLOT:
3550
0
      return reloc_class_plt;
3551
0
    case R_KVX_COPY:
3552
0
      return reloc_class_copy;
3553
0
    default:
3554
0
      return reloc_class_normal;
3555
0
    }
3556
0
}
3557
3558
/* A structure used to record a list of sections, independently
3559
   of the next and prev fields in the asection structure.  */
3560
typedef struct section_list
3561
{
3562
  asection *sec;
3563
  struct section_list *next;
3564
  struct section_list *prev;
3565
}
3566
section_list;
3567
3568
typedef struct
3569
{
3570
  void *finfo;
3571
  struct bfd_link_info *info;
3572
  asection *sec;
3573
  int sec_shndx;
3574
  int (*func) (void *, const char *, Elf_Internal_Sym *,
3575
         asection *, struct elf_link_hash_entry *);
3576
} output_arch_syminfo;
3577
3578
/* Output a single local symbol for a generated stub.  */
3579
3580
static bool
3581
elf64_kvx_output_stub_sym (output_arch_syminfo *osi, const char *name,
3582
             bfd_vma offset, bfd_vma size)
3583
0
{
3584
0
  Elf_Internal_Sym sym;
3585
3586
0
  sym.st_value = (osi->sec->output_section->vma
3587
0
      + osi->sec->output_offset + offset);
3588
0
  sym.st_size = size;
3589
0
  sym.st_other = 0;
3590
0
  sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
3591
0
  sym.st_shndx = osi->sec_shndx;
3592
0
  return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
3593
0
}
3594
3595
static bool
3596
kvx_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
3597
0
{
3598
0
  struct elf_kvx_stub_hash_entry *stub_entry;
3599
0
  asection *stub_sec;
3600
0
  bfd_vma addr;
3601
0
  char *stub_name;
3602
0
  output_arch_syminfo *osi;
3603
3604
  /* Massage our args to the form they really have.  */
3605
0
  stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
3606
0
  osi = (output_arch_syminfo *) in_arg;
3607
3608
0
  stub_sec = stub_entry->stub_sec;
3609
3610
  /* Ensure this stub is attached to the current section being
3611
     processed.  */
3612
0
  if (stub_sec != osi->sec)
3613
0
    return true;
3614
3615
0
  addr = (bfd_vma) stub_entry->stub_offset;
3616
3617
0
  stub_name = stub_entry->output_name;
3618
3619
0
  switch (stub_entry->stub_type)
3620
0
    {
3621
0
    case kvx_stub_long_branch:
3622
0
      if (!elf64_kvx_output_stub_sym
3623
0
    (osi, stub_name, addr, sizeof (elf64_kvx_long_branch_stub)))
3624
0
  return false;
3625
0
      break;
3626
3627
0
    default:
3628
0
      abort ();
3629
0
    }
3630
3631
0
  return true;
3632
0
}
3633
3634
/* Output mapping symbols for linker generated sections.  */
3635
3636
static bool
3637
elf64_kvx_output_arch_local_syms (bfd *output_bfd,
3638
          struct bfd_link_info *info,
3639
          void *finfo,
3640
          int (*func) (void *, const char *,
3641
                 Elf_Internal_Sym *,
3642
                 asection *,
3643
                 struct elf_link_hash_entry *))
3644
0
{
3645
0
  output_arch_syminfo osi;
3646
0
  struct elf_kvx_link_hash_table *htab;
3647
3648
0
  htab = elf_kvx_hash_table (info);
3649
3650
0
  osi.finfo = finfo;
3651
0
  osi.info = info;
3652
0
  osi.func = func;
3653
3654
  /* Long calls stubs.  */
3655
0
  if (htab->stub_bfd && htab->stub_bfd->sections)
3656
0
    {
3657
0
      asection *stub_sec;
3658
3659
0
      for (stub_sec = htab->stub_bfd->sections;
3660
0
     stub_sec != NULL; stub_sec = stub_sec->next)
3661
0
  {
3662
    /* Ignore non-stub sections.  */
3663
0
    if (!strstr (stub_sec->name, STUB_SUFFIX))
3664
0
      continue;
3665
3666
0
    osi.sec = stub_sec;
3667
3668
0
    osi.sec_shndx = _bfd_elf_section_from_bfd_section
3669
0
      (output_bfd, osi.sec->output_section);
3670
3671
0
    bfd_hash_traverse (&htab->stub_hash_table, kvx_map_one_stub,
3672
0
           &osi);
3673
0
  }
3674
0
    }
3675
3676
  /* Finally, output mapping symbols for the PLT.  */
3677
0
  if (!htab->root.splt || htab->root.splt->size == 0)
3678
0
    return true;
3679
3680
0
  osi.sec_shndx = _bfd_elf_section_from_bfd_section
3681
0
    (output_bfd, htab->root.splt->output_section);
3682
0
  osi.sec = htab->root.splt;
3683
3684
0
  return true;
3685
3686
0
}
3687
3688
/* Allocate target specific section data.  */
3689
3690
static bool
3691
elf64_kvx_new_section_hook (bfd *abfd, asection *sec)
3692
5.59k
{
3693
5.59k
  _kvx_elf_section_data *sdata;
3694
3695
5.59k
  sdata = bfd_zalloc (abfd, sizeof (*sdata));
3696
5.59k
  if (sdata == NULL)
3697
0
    return false;
3698
5.59k
  sec->used_by_bfd = sdata;
3699
3700
5.59k
  return _bfd_elf_new_section_hook (abfd, sec);
3701
5.59k
}
3702
3703
/* Create dynamic sections. This is different from the ARM backend in that
3704
   the got, plt, gotplt and their relocation sections are all created in the
3705
   standard part of the bfd elf backend.  */
3706
3707
static bool
3708
elf64_kvx_create_dynamic_sections (bfd *dynobj,
3709
           struct bfd_link_info *info)
3710
0
{
3711
0
  struct elf_kvx_link_hash_table *htab;
3712
3713
  /* We need to create .got section.  */
3714
0
  if (!kvx_elf_create_got_section (dynobj, info))
3715
0
    return false;
3716
3717
0
  if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3718
0
    return false;
3719
3720
0
  htab = elf_kvx_hash_table (info);
3721
0
  htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3722
0
  if (!bfd_link_pic (info))
3723
0
    htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
3724
3725
0
  if (!htab->sdynbss || (!bfd_link_pic (info) && !htab->srelbss))
3726
0
    abort ();
3727
3728
0
  return true;
3729
0
}
3730
3731
3732
/* Allocate space in .plt, .got and associated reloc sections for
3733
   dynamic relocs.  */
3734
3735
static bool
3736
elf64_kvx_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
3737
0
{
3738
0
  struct bfd_link_info *info;
3739
0
  struct elf_kvx_link_hash_table *htab;
3740
0
  struct elf_dyn_relocs *p;
3741
3742
  /* An example of a bfd_link_hash_indirect symbol is versioned
3743
     symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
3744
     -> __gxx_personality_v0(bfd_link_hash_defined)
3745
3746
     There is no need to process bfd_link_hash_indirect symbols here
3747
     because we will also be presented with the concrete instance of
3748
     the symbol and elf64_kvx_copy_indirect_symbol () will have been
3749
     called to copy all relevant data from the generic to the concrete
3750
     symbol instance.  */
3751
0
  if (h->root.type == bfd_link_hash_indirect)
3752
0
    return true;
3753
3754
0
  if (h->root.type == bfd_link_hash_warning)
3755
0
    h = (struct elf_link_hash_entry *) h->root.u.i.link;
3756
3757
0
  info = (struct bfd_link_info *) inf;
3758
0
  htab = elf_kvx_hash_table (info);
3759
3760
0
  if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
3761
0
    {
3762
      /* Make sure this symbol is output as a dynamic symbol.
3763
   Undefined weak syms won't yet be marked as dynamic.  */
3764
0
      if (h->dynindx == -1 && !h->forced_local)
3765
0
  {
3766
0
    if (!bfd_elf_link_record_dynamic_symbol (info, h))
3767
0
      return false;
3768
0
  }
3769
3770
0
      if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
3771
0
  {
3772
0
    asection *s = htab->root.splt;
3773
3774
    /* If this is the first .plt entry, make room for the special
3775
       first entry.  */
3776
0
    if (s->size == 0)
3777
0
      s->size += htab->plt_header_size;
3778
3779
0
    h->plt.offset = s->size;
3780
3781
    /* If this symbol is not defined in a regular file, and we are
3782
       not generating a shared library, then set the symbol to this
3783
       location in the .plt.  This is required to make function
3784
       pointers compare as equal between the normal executable and
3785
       the shared library.  */
3786
0
    if (!bfd_link_pic (info) && !h->def_regular)
3787
0
      {
3788
0
        h->root.u.def.section = s;
3789
0
        h->root.u.def.value = h->plt.offset;
3790
0
      }
3791
3792
    /* Make room for this entry. For now we only create the
3793
       small model PLT entries. We later need to find a way
3794
       of relaxing into these from the large model PLT entries.  */
3795
0
    s->size += PLT_SMALL_ENTRY_SIZE;
3796
3797
    /* We also need to make an entry in the .got.plt section, which
3798
       will be placed in the .got section by the linker script.  */
3799
0
    htab->root.sgotplt->size += GOT_ENTRY_SIZE;
3800
3801
    /* We also need to make an entry in the .rela.plt section.  */
3802
0
    htab->root.srelplt->size += RELOC_SIZE (htab);
3803
3804
    /* We need to ensure that all GOT entries that serve the PLT
3805
       are consecutive with the special GOT slots [0] [1] and
3806
       [2]. Any addtional relocations must be placed after the
3807
       PLT related entries.  We abuse the reloc_count such that
3808
       during sizing we adjust reloc_count to indicate the
3809
       number of PLT related reserved entries.  In subsequent
3810
       phases when filling in the contents of the reloc entries,
3811
       PLT related entries are placed by computing their PLT
3812
       index (0 .. reloc_count). While other none PLT relocs are
3813
       placed at the slot indicated by reloc_count and
3814
       reloc_count is updated.  */
3815
3816
0
    htab->root.srelplt->reloc_count++;
3817
0
  }
3818
0
      else
3819
0
  {
3820
0
    h->plt.offset = (bfd_vma) - 1;
3821
0
    h->needs_plt = 0;
3822
0
  }
3823
0
    }
3824
0
  else
3825
0
    {
3826
0
      h->plt.offset = (bfd_vma) - 1;
3827
0
      h->needs_plt = 0;
3828
0
    }
3829
3830
0
  if (h->got.refcount > 0)
3831
0
    {
3832
0
      bool dyn;
3833
0
      unsigned got_type = elf_kvx_hash_entry (h)->got_type;
3834
3835
0
      h->got.offset = (bfd_vma) - 1;
3836
3837
0
      dyn = htab->root.dynamic_sections_created;
3838
3839
      /* Make sure this symbol is output as a dynamic symbol.
3840
   Undefined weak syms won't yet be marked as dynamic.  */
3841
0
      if (dyn && h->dynindx == -1 && !h->forced_local)
3842
0
  {
3843
0
    if (!bfd_elf_link_record_dynamic_symbol (info, h))
3844
0
      return false;
3845
0
  }
3846
3847
0
      if (got_type == GOT_UNKNOWN)
3848
0
  {
3849
0
    (*_bfd_error_handler)
3850
0
      (_("relocation against `%s' has faulty GOT type "),
3851
0
       (h) ? h->root.root.string : "a local symbol");
3852
0
    bfd_set_error (bfd_error_bad_value);
3853
0
    return false;
3854
0
  }
3855
0
      else if (got_type == GOT_NORMAL)
3856
0
  {
3857
0
    h->got.offset = htab->root.sgot->size;
3858
0
    htab->root.sgot->size += GOT_ENTRY_SIZE;
3859
0
    if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3860
0
         || h->root.type != bfd_link_hash_undefweak)
3861
0
        && (bfd_link_pic (info)
3862
0
      || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3863
0
      {
3864
0
        htab->root.srelgot->size += RELOC_SIZE (htab);
3865
0
      }
3866
0
  }
3867
0
      else
3868
0
  {
3869
0
    int indx;
3870
3871
    /* Any of these will require 2 GOT slots because
3872
     * they use __tls_get_addr() */
3873
0
    if (got_type & (GOT_TLS_GD | GOT_TLS_LD))
3874
0
      {
3875
0
        h->got.offset = htab->root.sgot->size;
3876
0
        htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
3877
0
      }
3878
3879
0
    if (got_type & GOT_TLS_IE)
3880
0
      {
3881
0
        h->got.offset = htab->root.sgot->size;
3882
0
        htab->root.sgot->size += GOT_ENTRY_SIZE;
3883
0
      }
3884
3885
0
    indx = h && h->dynindx != -1 ? h->dynindx : 0;
3886
0
    if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3887
0
         || h->root.type != bfd_link_hash_undefweak)
3888
0
        && (bfd_link_pic (info)
3889
0
      || indx != 0
3890
0
      || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3891
0
      {
3892
        /* Only the GD case requires 2 relocations. */
3893
0
        if (got_type & GOT_TLS_GD)
3894
0
    htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
3895
3896
        /* LD needs a DTPMOD reloc, IE needs a DTPOFF. */
3897
0
        if (got_type & (GOT_TLS_LD | GOT_TLS_IE))
3898
0
    htab->root.srelgot->size += RELOC_SIZE (htab);
3899
0
      }
3900
0
  }
3901
0
    }
3902
0
  else
3903
0
    {
3904
0
      h->got.offset = (bfd_vma) - 1;
3905
0
    }
3906
3907
0
  if (h->dyn_relocs == NULL)
3908
0
    return true;
3909
3910
  /* In the shared -Bsymbolic case, discard space allocated for
3911
     dynamic pc-relative relocs against symbols which turn out to be
3912
     defined in regular objects.  For the normal shared case, discard
3913
     space for pc-relative relocs that have become local due to symbol
3914
     visibility changes.  */
3915
3916
0
  if (bfd_link_pic (info))
3917
0
    {
3918
      /* Relocs that use pc_count are those that appear on a call
3919
   insn, or certain REL relocs that can generated via assembly.
3920
   We want calls to protected symbols to resolve directly to the
3921
   function rather than going via the plt.  If people want
3922
   function pointer comparisons to work as expected then they
3923
   should avoid writing weird assembly.  */
3924
0
      if (SYMBOL_CALLS_LOCAL (info, h))
3925
0
  {
3926
0
    struct elf_dyn_relocs **pp;
3927
3928
0
    for (pp = &h->dyn_relocs; (p = *pp) != NULL;)
3929
0
      {
3930
0
        p->count -= p->pc_count;
3931
0
        p->pc_count = 0;
3932
0
        if (p->count == 0)
3933
0
    *pp = p->next;
3934
0
        else
3935
0
    pp = &p->next;
3936
0
      }
3937
0
  }
3938
3939
      /* Also discard relocs on undefined weak syms with non-default
3940
   visibility.  */
3941
0
      if (h->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
3942
0
  {
3943
0
    if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3944
0
        || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
3945
0
      h->dyn_relocs = NULL;
3946
3947
    /* Make sure undefined weak symbols are output as a dynamic
3948
       symbol in PIEs.  */
3949
0
    else if (h->dynindx == -1
3950
0
       && !h->forced_local
3951
0
       && !bfd_elf_link_record_dynamic_symbol (info, h))
3952
0
      return false;
3953
0
  }
3954
3955
0
    }
3956
0
  else if (ELIMINATE_COPY_RELOCS)
3957
0
    {
3958
      /* For the non-shared case, discard space for relocs against
3959
   symbols which turn out to need copy relocs or are not
3960
   dynamic.  */
3961
3962
0
      if (!h->non_got_ref
3963
0
    && ((h->def_dynamic
3964
0
         && !h->def_regular)
3965
0
        || (htab->root.dynamic_sections_created
3966
0
      && (h->root.type == bfd_link_hash_undefweak
3967
0
          || h->root.type == bfd_link_hash_undefined))))
3968
0
  {
3969
    /* Make sure this symbol is output as a dynamic symbol.
3970
       Undefined weak syms won't yet be marked as dynamic.  */
3971
0
    if (h->dynindx == -1
3972
0
        && !h->forced_local
3973
0
        && !bfd_elf_link_record_dynamic_symbol (info, h))
3974
0
      return false;
3975
3976
    /* If that succeeded, we know we'll be keeping all the
3977
       relocs.  */
3978
0
    if (h->dynindx != -1)
3979
0
      goto keep;
3980
0
  }
3981
3982
0
      h->dyn_relocs = NULL;
3983
3984
0
    keep:;
3985
0
    }
3986
3987
  /* Finally, allocate space.  */
3988
0
  for (p = h->dyn_relocs; p != NULL; p = p->next)
3989
0
    {
3990
0
      asection *sreloc;
3991
3992
0
      sreloc = elf_section_data (p->sec)->sreloc;
3993
3994
0
      BFD_ASSERT (sreloc != NULL);
3995
3996
0
      sreloc->size += p->count * RELOC_SIZE (htab);
3997
0
    }
3998
3999
0
  return true;
4000
0
}
4001
4002
/* Find any dynamic relocs that apply to read-only sections.  */
4003
4004
static bool
4005
kvx_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
4006
0
{
4007
0
  struct elf_dyn_relocs * p;
4008
4009
0
  for (p = h->dyn_relocs; p != NULL; p = p->next)
4010
0
    {
4011
0
      asection *s = p->sec;
4012
4013
0
      if (s != NULL && (s->flags & SEC_READONLY) != 0)
4014
0
  {
4015
0
    struct bfd_link_info *info = (struct bfd_link_info *) inf;
4016
4017
0
    info->flags |= DF_TEXTREL;
4018
0
    info->callbacks->minfo (_("%pB: dynamic relocation against `%pT' in "
4019
0
            "read-only section `%pA'\n"),
4020
0
          s->owner, h->root.root.string, s);
4021
4022
    /* Not an error, just cut short the traversal.  */
4023
0
    return false;
4024
0
  }
4025
0
    }
4026
0
  return true;
4027
0
}
4028
4029
/* This is the most important function of all . Innocuosly named
4030
   though !  */
4031
static bool
4032
elf64_kvx_late_size_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
4033
            struct bfd_link_info *info)
4034
0
{
4035
0
  struct elf_kvx_link_hash_table *htab;
4036
0
  bfd *dynobj;
4037
0
  asection *s;
4038
0
  bool relocs;
4039
0
  bfd *ibfd;
4040
4041
0
  htab = elf_kvx_hash_table ((info));
4042
0
  dynobj = htab->root.dynobj;
4043
0
  if (dynobj == NULL)
4044
0
    return true;
4045
4046
0
  if (htab->root.dynamic_sections_created)
4047
0
    {
4048
0
      if (bfd_link_executable (info) && !info->nointerp)
4049
0
  {
4050
0
    s = htab->root.interp;
4051
0
    if (s == NULL)
4052
0
      abort ();
4053
0
    s->size = sizeof ELF_DYNAMIC_INTERPRETER;
4054
0
    s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
4055
0
    s->alloced = 1;
4056
0
  }
4057
0
    }
4058
4059
  /* Set up .got offsets for local syms, and space for local dynamic
4060
     relocs.  */
4061
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4062
0
    {
4063
0
      struct elf_kvx_local_symbol *locals = NULL;
4064
0
      Elf_Internal_Shdr *symtab_hdr;
4065
0
      asection *srel;
4066
0
      unsigned int i;
4067
4068
0
      if (!is_kvx_elf (ibfd))
4069
0
  continue;
4070
4071
0
      for (s = ibfd->sections; s != NULL; s = s->next)
4072
0
  {
4073
0
    struct elf_dyn_relocs *p;
4074
4075
0
    for (p = (struct elf_dyn_relocs *)
4076
0
     (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
4077
0
      {
4078
0
        if (!bfd_is_abs_section (p->sec)
4079
0
      && bfd_is_abs_section (p->sec->output_section))
4080
0
    {
4081
      /* Input section has been discarded, either because
4082
         it is a copy of a linkonce section or due to
4083
         linker script /DISCARD/, so we'll be discarding
4084
         the relocs too.  */
4085
0
    }
4086
0
        else if (p->count != 0)
4087
0
    {
4088
0
      srel = elf_section_data (p->sec)->sreloc;
4089
0
      srel->size += p->count * RELOC_SIZE (htab);
4090
0
      if ((p->sec->output_section->flags & SEC_READONLY) != 0)
4091
0
        info->flags |= DF_TEXTREL;
4092
0
    }
4093
0
      }
4094
0
  }
4095
4096
0
      locals = elf_kvx_locals (ibfd);
4097
0
      if (!locals)
4098
0
  continue;
4099
4100
0
      symtab_hdr = &elf_symtab_hdr (ibfd);
4101
0
      srel = htab->root.srelgot;
4102
0
      for (i = 0; i < symtab_hdr->sh_info; i++)
4103
0
  {
4104
0
    locals[i].got_offset = (bfd_vma) - 1;
4105
0
    if (locals[i].got_refcount > 0)
4106
0
      {
4107
0
        unsigned got_type = locals[i].got_type;
4108
0
        if (got_type & (GOT_TLS_GD | GOT_TLS_LD))
4109
0
    {
4110
0
      locals[i].got_offset = htab->root.sgot->size;
4111
0
      htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
4112
0
    }
4113
4114
0
        if (got_type & (GOT_NORMAL | GOT_TLS_IE ))
4115
0
    {
4116
0
      locals[i].got_offset = htab->root.sgot->size;
4117
0
      htab->root.sgot->size += GOT_ENTRY_SIZE;
4118
0
    }
4119
4120
0
        if (got_type == GOT_UNKNOWN)
4121
0
    {
4122
0
    }
4123
4124
0
        if (bfd_link_pic (info))
4125
0
    {
4126
0
      if (got_type & GOT_TLS_GD)
4127
0
        htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
4128
4129
0
      if (got_type & GOT_TLS_IE
4130
0
          || got_type & GOT_TLS_LD
4131
0
          || got_type & GOT_NORMAL)
4132
0
        htab->root.srelgot->size += RELOC_SIZE (htab);
4133
0
    }
4134
0
      }
4135
0
    else
4136
0
      {
4137
0
        locals[i].got_refcount = (bfd_vma) - 1;
4138
0
      }
4139
0
  }
4140
0
    }
4141
4142
4143
  /* Allocate global sym .plt and .got entries, and space for global
4144
     sym dynamic relocs.  */
4145
0
  elf_link_hash_traverse (&htab->root, elf64_kvx_allocate_dynrelocs,
4146
0
        info);
4147
4148
  /* For every jump slot reserved in the sgotplt, reloc_count is
4149
     incremented.  However, when we reserve space for TLS descriptors,
4150
     it's not incremented, so in order to compute the space reserved
4151
     for them, it suffices to multiply the reloc count by the jump
4152
     slot size.  */
4153
4154
0
  if (htab->root.srelplt)
4155
0
    htab->sgotplt_jump_table_size = kvx_compute_jump_table_size (htab);
4156
4157
  /* We now have determined the sizes of the various dynamic sections.
4158
     Allocate memory for them.  */
4159
0
  relocs = false;
4160
0
  for (s = dynobj->sections; s != NULL; s = s->next)
4161
0
    {
4162
0
      if ((s->flags & SEC_LINKER_CREATED) == 0)
4163
0
  continue;
4164
4165
0
      if (s == htab->root.splt
4166
0
    || s == htab->root.sgot
4167
0
    || s == htab->root.sgotplt
4168
0
    || s == htab->root.iplt
4169
0
    || s == htab->root.igotplt || s == htab->sdynbss)
4170
0
  {
4171
    /* Strip this section if we don't need it; see the
4172
       comment below.  */
4173
0
  }
4174
0
      else if (startswith (bfd_section_name (s), ".rela"))
4175
0
  {
4176
0
    if (s->size != 0 && s != htab->root.srelplt)
4177
0
      relocs = true;
4178
4179
    /* We use the reloc_count field as a counter if we need
4180
       to copy relocs into the output file.  */
4181
0
    if (s != htab->root.srelplt)
4182
0
      s->reloc_count = 0;
4183
0
  }
4184
0
      else
4185
0
  {
4186
    /* It's not one of our sections, so don't allocate space.  */
4187
0
    continue;
4188
0
  }
4189
4190
0
      if (s->size == 0)
4191
0
  {
4192
    /* If we don't need this section, strip it from the
4193
       output file.  This is mostly to handle .rela.bss and
4194
       .rela.plt.  We must create both sections in
4195
       create_dynamic_sections, because they must be created
4196
       before the linker maps input sections to output
4197
       sections.  The linker does that before
4198
       adjust_dynamic_symbol is called, and it is that
4199
       function which decides whether anything needs to go
4200
       into these sections.  */
4201
4202
0
    s->flags |= SEC_EXCLUDE;
4203
0
    continue;
4204
0
  }
4205
4206
0
      if ((s->flags & SEC_HAS_CONTENTS) == 0)
4207
0
  continue;
4208
4209
      /* Allocate memory for the section contents.  We use bfd_zalloc
4210
   here in case unused entries are not reclaimed before the
4211
   section's contents are written out.  This should not happen,
4212
   but this way if it does, we get a R_KVX_NONE reloc instead
4213
   of garbage.  */
4214
0
      s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
4215
0
      if (s->contents == NULL)
4216
0
  return false;
4217
0
      s->alloced = 1;
4218
0
    }
4219
4220
0
  if (htab->root.dynamic_sections_created)
4221
0
    {
4222
      /* Add some entries to the .dynamic section.  We fill in the
4223
   values later, in elf64_kvx_finish_dynamic_sections, but we
4224
   must add the entries now so that we get the correct size for
4225
   the .dynamic section.  The DT_DEBUG entry is filled in by the
4226
   dynamic linker and used by the debugger.  */
4227
0
#define add_dynamic_entry(TAG, VAL)     \
4228
0
      _bfd_elf_add_dynamic_entry (info, TAG, VAL)
4229
4230
0
      if (bfd_link_executable (info))
4231
0
  {
4232
0
    if (!add_dynamic_entry (DT_DEBUG, 0))
4233
0
      return false;
4234
0
  }
4235
4236
0
      if (htab->root.splt->size != 0)
4237
0
  {
4238
0
    if (!add_dynamic_entry (DT_PLTGOT, 0)
4239
0
        || !add_dynamic_entry (DT_PLTRELSZ, 0)
4240
0
        || !add_dynamic_entry (DT_PLTREL, DT_RELA)
4241
0
        || !add_dynamic_entry (DT_JMPREL, 0))
4242
0
      return false;
4243
0
  }
4244
4245
0
      if (relocs)
4246
0
  {
4247
0
    if (!add_dynamic_entry (DT_RELA, 0)
4248
0
        || !add_dynamic_entry (DT_RELASZ, 0)
4249
0
        || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
4250
0
      return false;
4251
4252
    /* If any dynamic relocs apply to a read-only section,
4253
       then we need a DT_TEXTREL entry.  */
4254
0
    if ((info->flags & DF_TEXTREL) == 0)
4255
0
      elf_link_hash_traverse (&htab->root, kvx_readonly_dynrelocs,
4256
0
            info);
4257
4258
0
    if ((info->flags & DF_TEXTREL) != 0)
4259
0
      {
4260
0
        if (!add_dynamic_entry (DT_TEXTREL, 0))
4261
0
    return false;
4262
0
      }
4263
0
  }
4264
0
    }
4265
0
#undef add_dynamic_entry
4266
4267
0
  return true;
4268
0
}
4269
4270
static inline void
4271
elf_kvx_update_plt_entry (bfd *output_bfd,
4272
        bfd_reloc_code_real_type r_type,
4273
        bfd_byte *plt_entry, bfd_vma value)
4274
0
{
4275
0
  reloc_howto_type *howto = elf64_kvx_howto_from_bfd_reloc (r_type);
4276
0
  BFD_ASSERT(howto != NULL);
4277
0
  _bfd_kvx_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
4278
0
}
4279
4280
static void
4281
elf64_kvx_create_small_pltn_entry (struct elf_link_hash_entry *h,
4282
           struct elf_kvx_link_hash_table *htab,
4283
           bfd *output_bfd)
4284
0
{
4285
0
  bfd_byte *plt_entry;
4286
0
  bfd_vma plt_index;
4287
0
  bfd_vma got_offset;
4288
0
  bfd_vma gotplt_entry_address;
4289
0
  bfd_vma plt_entry_address;
4290
0
  Elf_Internal_Rela rela;
4291
0
  bfd_byte *loc;
4292
0
  asection *plt, *gotplt, *relplt;
4293
4294
0
  plt = htab->root.splt;
4295
0
  gotplt = htab->root.sgotplt;
4296
0
  relplt = htab->root.srelplt;
4297
4298
  /* Get the index in the procedure linkage table which
4299
     corresponds to this symbol.  This is the index of this symbol
4300
     in all the symbols for which we are making plt entries.  The
4301
     first entry in the procedure linkage table is reserved.
4302
4303
     Get the offset into the .got table of the entry that
4304
     corresponds to this function.  Each .got entry is GOT_ENTRY_SIZE
4305
     bytes. The first three are reserved for the dynamic linker.
4306
4307
     For static executables, we don't reserve anything.  */
4308
4309
0
  if (plt == htab->root.splt)
4310
0
    {
4311
0
      plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
4312
0
      got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
4313
0
    }
4314
0
  else
4315
0
    {
4316
0
      plt_index = h->plt.offset / htab->plt_entry_size;
4317
0
      got_offset = plt_index * GOT_ENTRY_SIZE;
4318
0
    }
4319
4320
0
  plt_entry = plt->contents + h->plt.offset;
4321
0
  plt_entry_address = plt->output_section->vma
4322
0
    + plt->output_offset + h->plt.offset;
4323
0
  gotplt_entry_address = gotplt->output_section->vma +
4324
0
    gotplt->output_offset + got_offset;
4325
4326
  /* Copy in the boiler-plate for the PLTn entry.  */
4327
0
  memcpy (plt_entry, elf64_kvx_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
4328
4329
  /* Patch the loading of the GOT entry, relative to the PLT entry
4330
     address. */
4331
4332
  /* Use 37bits offset for both 32 and 64bits mode.
4333
     Fill the LO10 of of lw $r9 = 0[$r14].  */
4334
0
  elf_kvx_update_plt_entry(output_bfd, BFD_RELOC_KVX_S37_LO10,
4335
0
         plt_entry+4,
4336
0
         gotplt_entry_address - plt_entry_address);
4337
4338
  /* Fill the UP27 of of lw $r9 = 0[$r14].  */
4339
0
  elf_kvx_update_plt_entry(output_bfd, BFD_RELOC_KVX_S37_UP27,
4340
0
         plt_entry+8,
4341
0
         gotplt_entry_address - plt_entry_address);
4342
4343
0
  rela.r_offset = gotplt_entry_address;
4344
4345
  /* Fill in the entry in the .rela.plt section.  */
4346
0
  rela.r_info = ELF64_R_INFO (h->dynindx, R_KVX_JMP_SLOT);
4347
0
  rela.r_addend = 0;
4348
4349
  /* Compute the relocation entry to used based on PLT index and do
4350
     not adjust reloc_count. The reloc_count has already been adjusted
4351
     to account for this entry.  */
4352
0
  loc = relplt->contents + plt_index * RELOC_SIZE (htab);
4353
0
  bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4354
0
}
4355
4356
/* Size sections even though they're not dynamic.  We use it to setup
4357
   _TLS_MODULE_BASE_, if needed.  */
4358
4359
static bool
4360
elf64_kvx_early_size_sections (bfd *output_bfd, struct bfd_link_info *info)
4361
0
{
4362
0
  asection *tls_sec;
4363
4364
0
  if (bfd_link_relocatable (info))
4365
0
    return true;
4366
4367
0
  tls_sec = elf_hash_table (info)->tls_sec;
4368
4369
0
  if (tls_sec)
4370
0
    {
4371
0
      struct elf_link_hash_entry *tlsbase;
4372
4373
0
      tlsbase = elf_link_hash_lookup (elf_hash_table (info),
4374
0
              "_TLS_MODULE_BASE_", true, true, false);
4375
4376
0
      if (tlsbase)
4377
0
  {
4378
0
    struct bfd_link_hash_entry *h = NULL;
4379
0
    elf_backend_data *bed = get_elf_backend_data (output_bfd);
4380
4381
0
    if (!(_bfd_generic_link_add_one_symbol
4382
0
    (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
4383
0
     tls_sec, 0, NULL, false, bed->collect, &h)))
4384
0
      return false;
4385
4386
0
    tlsbase->type = STT_TLS;
4387
0
    tlsbase = (struct elf_link_hash_entry *) h;
4388
0
    tlsbase->def_regular = 1;
4389
0
    tlsbase->other = STV_HIDDEN;
4390
0
    (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
4391
0
  }
4392
0
    }
4393
4394
0
  return true;
4395
0
}
4396
4397
/* Finish up dynamic symbol handling.  We set the contents of various
4398
   dynamic sections here.  */
4399
static bool
4400
elf64_kvx_finish_dynamic_symbol (bfd *output_bfd,
4401
         struct bfd_link_info *info,
4402
         struct elf_link_hash_entry *h,
4403
         Elf_Internal_Sym *sym)
4404
0
{
4405
0
  struct elf_kvx_link_hash_table *htab;
4406
0
  htab = elf_kvx_hash_table (info);
4407
4408
0
  if (h->plt.offset != (bfd_vma) - 1)
4409
0
    {
4410
0
      asection *plt = NULL, *gotplt = NULL, *relplt = NULL;
4411
4412
      /* This symbol has an entry in the procedure linkage table.  Set
4413
   it up.  */
4414
4415
0
      if (htab->root.splt != NULL)
4416
0
  {
4417
0
    plt = htab->root.splt;
4418
0
    gotplt = htab->root.sgotplt;
4419
0
    relplt = htab->root.srelplt;
4420
0
  }
4421
4422
      /* This symbol has an entry in the procedure linkage table.  Set
4423
   it up.  */
4424
0
      if ((h->dynindx == -1
4425
0
     && !((h->forced_local || bfd_link_executable (info))
4426
0
    && h->def_regular
4427
0
    && h->type == STT_GNU_IFUNC))
4428
0
    || plt == NULL
4429
0
    || gotplt == NULL
4430
0
    || relplt == NULL)
4431
0
  abort ();
4432
4433
0
      elf64_kvx_create_small_pltn_entry (h, htab, output_bfd);
4434
0
      if (!h->def_regular)
4435
0
  {
4436
    /* Mark the symbol as undefined, rather than as defined in
4437
       the .plt section.  */
4438
0
    sym->st_shndx = SHN_UNDEF;
4439
    /* If the symbol is weak we need to clear the value.
4440
       Otherwise, the PLT entry would provide a definition for
4441
       the symbol even if the symbol wasn't defined anywhere,
4442
       and so the symbol would never be NULL.  Leave the value if
4443
       there were any relocations where pointer equality matters
4444
       (this is a clue for the dynamic linker, to make function
4445
       pointer comparisons work between an application and shared
4446
       library).  */
4447
0
    if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
4448
0
      sym->st_value = 0;
4449
0
  }
4450
0
    }
4451
4452
0
  if (h->got.offset != (bfd_vma) - 1
4453
0
      && elf_kvx_hash_entry (h)->got_type == GOT_NORMAL)
4454
0
    {
4455
0
      Elf_Internal_Rela rela;
4456
0
      bfd_byte *loc;
4457
4458
      /* This symbol has an entry in the global offset table.  Set it
4459
   up.  */
4460
0
      if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
4461
0
  abort ();
4462
4463
0
      rela.r_offset = (htab->root.sgot->output_section->vma
4464
0
           + htab->root.sgot->output_offset
4465
0
           + (h->got.offset & ~(bfd_vma) 1));
4466
4467
#ifdef UGLY_DEBUG
4468
      printf("setting rela at offset 0x%x(0x%x + 0x%x + 0x%x) for %s\n",
4469
       rela.r_offset,
4470
       htab->root.sgot->output_section->vma,
4471
       htab->root.sgot->output_offset,
4472
       h->got.offset,
4473
       h->root.root.string);
4474
#endif
4475
4476
0
      if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h))
4477
0
  {
4478
0
    if (!h->def_regular)
4479
0
      return false;
4480
4481
    /* in case of PLT related GOT entry, it is not clear who is
4482
       supposed to set the LSB of GOT entry...
4483
       kvx_calculate_got_entry_vma() would be a good candidate,
4484
       but it is not called currently
4485
       So we are commenting it ATM.  */
4486
    // BFD_ASSERT ((h->got.offset & 1) != 0);
4487
0
    rela.r_info = ELF64_R_INFO (0, R_KVX_RELATIVE);
4488
0
    rela.r_addend = (h->root.u.def.value
4489
0
         + h->root.u.def.section->output_section->vma
4490
0
         + h->root.u.def.section->output_offset);
4491
0
  }
4492
0
      else
4493
0
  {
4494
0
    BFD_ASSERT ((h->got.offset & 1) == 0);
4495
0
    bfd_put_64 (output_bfd, (bfd_vma) 0,
4496
0
          htab->root.sgot->contents + h->got.offset);
4497
0
    rela.r_info = ELF64_R_INFO (h->dynindx, R_KVX_GLOB_DAT);
4498
0
    rela.r_addend = 0;
4499
0
  }
4500
4501
0
      loc = htab->root.srelgot->contents;
4502
0
      loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
4503
0
      bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4504
0
    }
4505
4506
0
  if (h->needs_copy)
4507
0
    {
4508
0
      Elf_Internal_Rela rela;
4509
0
      bfd_byte *loc;
4510
4511
      /* This symbol needs a copy reloc.  Set it up.  */
4512
4513
0
      if (h->dynindx == -1
4514
0
    || (h->root.type != bfd_link_hash_defined
4515
0
        && h->root.type != bfd_link_hash_defweak)
4516
0
    || htab->srelbss == NULL)
4517
0
  abort ();
4518
4519
0
      rela.r_offset = (h->root.u.def.value
4520
0
           + h->root.u.def.section->output_section->vma
4521
0
           + h->root.u.def.section->output_offset);
4522
0
      rela.r_info = ELF64_R_INFO (h->dynindx, R_KVX_COPY);
4523
0
      rela.r_addend = 0;
4524
0
      loc = htab->srelbss->contents;
4525
0
      loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
4526
0
      bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4527
0
    }
4528
4529
  /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute.  SYM may
4530
     be NULL for local symbols.  */
4531
0
  if (sym != NULL
4532
0
      && (h == elf_hash_table (info)->hdynamic
4533
0
    || h == elf_hash_table (info)->hgot))
4534
0
    sym->st_shndx = SHN_ABS;
4535
4536
0
  return true;
4537
0
}
4538
4539
static void
4540
elf64_kvx_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
4541
         struct elf_kvx_link_hash_table *htab)
4542
0
{
4543
0
  memcpy (htab->root.splt->contents, elf64_kvx_small_plt0_entry,
4544
0
    PLT_ENTRY_SIZE);
4545
0
  elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
4546
0
    PLT_ENTRY_SIZE;
4547
0
}
4548
4549
static bool
4550
elf64_kvx_finish_dynamic_sections (bfd *output_bfd,
4551
           struct bfd_link_info *info,
4552
           bfd_byte *buf ATTRIBUTE_UNUSED)
4553
0
{
4554
0
  struct elf_kvx_link_hash_table *htab;
4555
0
  bfd *dynobj;
4556
0
  asection *sdyn;
4557
4558
0
  htab = elf_kvx_hash_table (info);
4559
0
  dynobj = htab->root.dynobj;
4560
0
  sdyn = bfd_get_linker_section (dynobj, ".dynamic");
4561
4562
0
  if (htab->root.dynamic_sections_created)
4563
0
    {
4564
0
      Elf64_External_Dyn *dyncon, *dynconend;
4565
4566
0
      if (sdyn == NULL || htab->root.sgot == NULL)
4567
0
  abort ();
4568
4569
0
      dyncon = (Elf64_External_Dyn *) sdyn->contents;
4570
0
      dynconend = (Elf64_External_Dyn *) (sdyn->contents + sdyn->size);
4571
0
      for (; dyncon < dynconend; dyncon++)
4572
0
  {
4573
0
    Elf_Internal_Dyn dyn;
4574
0
    asection *s;
4575
4576
0
    bfd_elf64_swap_dyn_in (dynobj, dyncon, &dyn);
4577
4578
0
    switch (dyn.d_tag)
4579
0
      {
4580
0
      default:
4581
0
        continue;
4582
4583
0
      case DT_PLTGOT:
4584
0
        s = htab->root.sgotplt;
4585
0
        dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
4586
0
        break;
4587
4588
0
      case DT_JMPREL:
4589
0
        s = htab->root.srelplt;
4590
0
        dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
4591
0
        break;
4592
4593
0
      case DT_PLTRELSZ:
4594
0
        s = htab->root.srelplt;
4595
0
        dyn.d_un.d_val = s->size;
4596
0
        break;
4597
4598
0
      case DT_RELASZ:
4599
        /* The procedure linkage table relocs (DT_JMPREL) should
4600
     not be included in the overall relocs (DT_RELA).
4601
     Therefore, we override the DT_RELASZ entry here to
4602
     make it not include the JMPREL relocs.  Since the
4603
     linker script arranges for .rela.plt to follow all
4604
     other relocation sections, we don't have to worry
4605
     about changing the DT_RELA entry.  */
4606
0
        if (htab->root.srelplt != NULL)
4607
0
    {
4608
0
      s = htab->root.srelplt;
4609
0
      dyn.d_un.d_val -= s->size;
4610
0
    }
4611
0
        break;
4612
0
      }
4613
4614
0
    bfd_elf64_swap_dyn_out (output_bfd, &dyn, dyncon);
4615
0
  }
4616
4617
0
    }
4618
4619
  /* Fill in the special first entry in the procedure linkage table.  */
4620
0
  if (htab->root.splt && htab->root.splt->size > 0)
4621
0
    {
4622
0
      elf64_kvx_init_small_plt0_entry (output_bfd, htab);
4623
4624
0
      elf_section_data (htab->root.splt->output_section)->
4625
0
  this_hdr.sh_entsize = htab->plt_entry_size;
4626
0
    }
4627
4628
0
  if (htab->root.sgotplt)
4629
0
    {
4630
0
      if (bfd_is_abs_section (htab->root.sgotplt->output_section))
4631
0
  {
4632
0
    (*_bfd_error_handler)
4633
0
      (_("discarded output section: `%pA'"), htab->root.sgotplt);
4634
0
    return false;
4635
0
  }
4636
4637
      /* Fill in the first three entries in the global offset table.  */
4638
0
      if (htab->root.sgotplt->size > 0)
4639
0
  {
4640
0
    bfd_put_64 (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
4641
4642
    /* Write GOT[1] and GOT[2], needed for the dynamic linker.  */
4643
0
    bfd_put_64 (output_bfd,
4644
0
          (bfd_vma) 0,
4645
0
          htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
4646
0
    bfd_put_64 (output_bfd,
4647
0
          (bfd_vma) 0,
4648
0
          htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
4649
0
  }
4650
4651
0
      if (htab->root.sgot)
4652
0
  {
4653
0
    if (htab->root.sgot->size > 0)
4654
0
      {
4655
0
        bfd_vma addr =
4656
0
    sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
4657
0
        bfd_put_64 (output_bfd, addr, htab->root.sgot->contents);
4658
0
      }
4659
0
  }
4660
4661
0
      elf_section_data (htab->root.sgotplt->output_section)->
4662
0
  this_hdr.sh_entsize = GOT_ENTRY_SIZE;
4663
0
    }
4664
4665
0
  if (htab->root.sgot && htab->root.sgot->size > 0)
4666
0
    elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
4667
0
      = GOT_ENTRY_SIZE;
4668
4669
0
  return true;
4670
0
}
4671
4672
/* Return address for Ith PLT stub in section PLT, for relocation REL
4673
   or (bfd_vma) -1 if it should not be included.  */
4674
4675
static bfd_vma
4676
elf64_kvx_plt_sym_val (bfd_vma i, const asection *plt,
4677
           const arelent *rel ATTRIBUTE_UNUSED)
4678
0
{
4679
0
  return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
4680
0
}
4681
4682
#define ELF_ARCH      bfd_arch_kvx
4683
#define ELF_TARGET_ID     KVX_ELF_DATA
4684
#define ELF_MACHINE_CODE    EM_KVX
4685
#define ELF_MAXPAGESIZE     0x10000
4686
#define ELF_MINPAGESIZE     0x1000
4687
#define ELF_COMMONPAGESIZE    0x1000
4688
4689
#define bfd_elf64_bfd_link_hash_table_create    \
4690
  elf64_kvx_link_hash_table_create
4691
4692
#define bfd_elf64_bfd_merge_private_bfd_data  \
4693
  elf64_kvx_merge_private_bfd_data
4694
4695
#define bfd_elf64_bfd_print_private_bfd_data  \
4696
  elf64_kvx_print_private_bfd_data
4697
4698
#define bfd_elf64_bfd_reloc_type_lookup   \
4699
  elf64_kvx_reloc_type_lookup
4700
4701
#define bfd_elf64_bfd_reloc_name_lookup   \
4702
  elf64_kvx_reloc_name_lookup
4703
4704
#define bfd_elf64_bfd_set_private_flags   \
4705
  elf64_kvx_set_private_flags
4706
4707
#define bfd_elf64_mkobject      \
4708
  elf64_kvx_mkobject
4709
4710
#define bfd_elf64_new_section_hook    \
4711
  elf64_kvx_new_section_hook
4712
4713
#define elf_backend_adjust_dynamic_symbol \
4714
  elf64_kvx_adjust_dynamic_symbol
4715
4716
#define elf_backend_early_size_sections   \
4717
  elf64_kvx_early_size_sections
4718
4719
#define elf_backend_check_relocs    \
4720
  elf64_kvx_check_relocs
4721
4722
#define elf_backend_copy_indirect_symbol  \
4723
  elf64_kvx_copy_indirect_symbol
4724
4725
/* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
4726
   to them in our hash.  */
4727
#define elf_backend_create_dynamic_sections \
4728
  elf64_kvx_create_dynamic_sections
4729
4730
#define elf_backend_init_index_section    \
4731
  _bfd_elf_init_2_index_sections
4732
4733
#define elf_backend_finish_dynamic_sections \
4734
  elf64_kvx_finish_dynamic_sections
4735
4736
#define elf_backend_finish_dynamic_symbol \
4737
  elf64_kvx_finish_dynamic_symbol
4738
4739
#define elf_backend_object_p      \
4740
  elf64_kvx_object_p
4741
4742
#define elf_backend_output_arch_local_syms      \
4743
  elf64_kvx_output_arch_local_syms
4744
4745
#define elf_backend_plt_sym_val     \
4746
  elf64_kvx_plt_sym_val
4747
4748
#define elf_backend_init_file_header    \
4749
  elf64_kvx_init_file_header
4750
4751
#define elf_backend_init_process_headers  \
4752
  elf64_kvx_init_process_headers
4753
4754
#define elf_backend_relocate_section    \
4755
  elf64_kvx_relocate_section
4756
4757
#define elf_backend_reloc_type_class    \
4758
  elf64_kvx_reloc_type_class
4759
4760
#define elf_backend_late_size_sections  \
4761
  elf64_kvx_late_size_sections
4762
4763
#define elf_backend_can_refcount       1
4764
#define elf_backend_can_gc_sections    1
4765
#define elf_backend_plt_readonly       1
4766
#define elf_backend_want_got_plt       1
4767
#define elf_backend_want_plt_sym       0
4768
#define elf_backend_may_use_rel_p      0
4769
#define elf_backend_may_use_rela_p     1
4770
#define elf_backend_default_use_rela_p 1
4771
#define elf_backend_rela_normal        1
4772
#define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
4773
#define elf_backend_default_execstack  0
4774
#define elf_backend_extern_protected_data 1
4775
#define elf_backend_hash_symbol elf_kvx_hash_symbol
4776
4777
#include "elf64-target.h"