Coverage Report

Created: 2024-05-21 06:29

/src/binutils-gdb/bfd/elf64-kvx.c
Line
Count
Source (jump to first uncovered line)
1
#line 1 "elfnn-kvx.c"
2
/* KVX-specific support for 64-bit ELF.
3
   Copyright (C) 2009-2024 Free Software Foundation, Inc.
4
   Contributed by Kalray SA.
5
6
   This file is part of BFD, the Binary File Descriptor library.
7
8
   This program is free software; you can redistribute it and/or modify
9
   it under the terms of the GNU General Public License as published by
10
   the Free Software Foundation; either version 3 of the License, or
11
   (at your option) any later version.
12
13
   This program is distributed in the hope that it will be useful,
14
   but WITHOUT ANY WARRANTY; without even the implied warranty of
15
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
   GNU General Public License for more details.
17
18
   You should have received a copy of the GNU General Public License
19
   along with this program; see the file COPYING3. If not,
20
   see <http://www.gnu.org/licenses/>.  */
21
22
#include "sysdep.h"
23
#include "bfd.h"
24
#include "libiberty.h"
25
#include "libbfd.h"
26
#include "elf-bfd.h"
27
#include "bfdlink.h"
28
#include "objalloc.h"
29
#include "elf/kvx.h"
30
#include "elfxx-kvx.h"
31
32
0
#define ARCH_SIZE 64
33
34
#if ARCH_SIZE == 64
35
0
#define LOG_FILE_ALIGN  3
36
#endif
37
38
#if ARCH_SIZE == 32
39
#define LOG_FILE_ALIGN  2
40
#endif
41
42
#define IS_KVX_TLS_RELOC(R_TYPE)      \
43
0
  ((R_TYPE) == BFD_RELOC_KVX_S37_TLS_LE_LO10  \
44
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LE_UP27  \
45
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_LO10  \
46
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_UP27  \
47
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_EX6  \
48
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_DTPOFF_LO10  \
49
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_DTPOFF_UP27  \
50
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_LO10  \
51
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_UP27  \
52
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_EX6  \
53
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_IE_LO10  \
54
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_IE_UP27  \
55
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_LO10  \
56
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_UP27  \
57
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_EX6  \
58
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_GD_LO10  \
59
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_GD_UP27  \
60
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_LO10  \
61
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_UP27  \
62
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_EX6  \
63
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LD_LO10  \
64
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LD_UP27  \
65
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_LO10  \
66
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_UP27  \
67
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_EX6  \
68
0
   )
69
70
0
#define IS_KVX_TLS_RELAX_RELOC(R_TYPE) 0
71
72
0
#define ELIMINATE_COPY_RELOCS 0
73
74
/* Return size of a relocation entry.  HTAB is the bfd's
75
   elf_kvx_link_hash_entry.  */
76
0
#define RELOC_SIZE(HTAB) (sizeof (Elf64_External_Rela))
77
78
/* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32.  */
79
0
#define GOT_ENTRY_SIZE                  (ARCH_SIZE / 8)
80
0
#define PLT_ENTRY_SIZE                  (32)
81
82
0
#define PLT_SMALL_ENTRY_SIZE            (4*4)
83
84
/* Encoding of the nop instruction */
85
0
#define INSN_NOP 0x00f0037f
86
87
#define kvx_compute_jump_table_size(htab)   \
88
0
  (((htab)->root.srelplt == NULL) ? 0      \
89
0
   : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
90
91
static const bfd_byte elf64_kvx_small_plt0_entry[PLT_ENTRY_SIZE] =
92
{
93
 /* FIXME KVX: no first entry, not used yet */
94
  0
95
};
96
97
/* Per function entry in a procedure linkage table looks like this
98
   if the distance between the PLTGOT and the PLT is < 4GB use
99
   these PLT entries.  */
100
static const bfd_byte elf64_kvx_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
101
{
102
  0x10, 0x00, 0xc4, 0x0f,       /* get $r16 = $pc     ;; */
103
#if ARCH_SIZE == 32
104
  0x10, 0x00, 0x40, 0xb0,       /* lwz $r16 = 0[$r16]   ;; */
105
#else
106
  0x10, 0x00, 0x40, 0xb8,       /* ld $r16 = 0[$r16] ;; */
107
#endif
108
  0x00, 0x00, 0x00, 0x18,       /* upper 27 bits for LSU */
109
  0x10, 0x00, 0xd8, 0x0f, /* igoto $r16          ;; */
110
};
111
112
/* Long stub use 43bits format of make. */
113
static const uint32_t elf64_kvx_long_branch_stub[] =
114
{
115
  0xe0400000,      /* make $r16 = LO10<emm43> EX6<imm43> */
116
  0x00000000,      /* UP27<imm43> ;; */
117
  0x0fd80010,      /* igoto "r16  ;; */
118
};
119
120
#define elf_info_to_howto               elf64_kvx_info_to_howto
121
#define elf_info_to_howto_rel           elf64_kvx_info_to_howto
122
123
0
#define KVX_ELF_ABI_VERSION   0
124
125
/* In case we're on a 32-bit machine, construct a 64-bit "-1" value.  */
126
#define ALL_ONES (~ (bfd_vma) 0)
127
128
/* Indexed by the bfd interal reloc enumerators.
129
   Therefore, the table needs to be synced with BFD_RELOC_KVX_*
130
   in reloc.c.   */
131
132
#define KVX_KV3_V1_KV3_V2_KV4_V1
133
#include "elfxx-kvx-relocs.h"
134
#undef KVX_KV3_V1_KV3_V2_KV4_V1
135
136
/* Given HOWTO, return the bfd internal relocation enumerator.  */
137
138
static bfd_reloc_code_real_type
139
elf64_kvx_bfd_reloc_from_howto (reloc_howto_type *howto)
140
0
{
141
0
  const int size = (int) ARRAY_SIZE (elf_kvx_howto_table);
142
0
  const ptrdiff_t offset = howto - elf_kvx_howto_table;
143
144
0
  if (offset >= 0 && offset < size)
145
0
    return BFD_RELOC_KVX_RELOC_START + offset + 1;
146
147
0
  return BFD_RELOC_KVX_RELOC_START + 1;
148
0
}
149
150
/* Given R_TYPE, return the bfd internal relocation enumerator.  */
151
152
static bfd_reloc_code_real_type
153
elf64_kvx_bfd_reloc_from_type (bfd *abfd ATTRIBUTE_UNUSED, unsigned int r_type)
154
323
{
155
323
  static bool initialized_p = false;
156
  /* Indexed by R_TYPE, values are offsets in the howto_table.  */
157
323
  static unsigned int offsets[R_KVX_end];
158
159
323
  if (!initialized_p)
160
2
    {
161
2
      unsigned int i;
162
163
168
      for (i = 0; i < ARRAY_SIZE (elf_kvx_howto_table); ++i)
164
166
  offsets[elf_kvx_howto_table[i].type] = i;
165
166
2
      initialized_p = true;
167
2
    }
168
169
  /* PR 17512: file: b371e70a.  */
170
323
  if (r_type >= R_KVX_end)
171
47
    {
172
47
      bfd_set_error (bfd_error_bad_value);
173
47
      return BFD_RELOC_KVX_RELOC_END;
174
47
    }
175
176
276
  return (BFD_RELOC_KVX_RELOC_START + 1) + offsets[r_type];
177
323
}
178
179
struct elf_kvx_reloc_map
180
{
181
  bfd_reloc_code_real_type from;
182
  bfd_reloc_code_real_type to;
183
};
184
185
/* Map bfd generic reloc to KVX-specific reloc.  */
186
static const struct elf_kvx_reloc_map elf_kvx_reloc_map[] =
187
{
188
  {BFD_RELOC_NONE, BFD_RELOC_KVX_NONE},
189
190
  /* Basic data relocations.  */
191
  {BFD_RELOC_CTOR, BFD_RELOC_KVX_64},
192
  {BFD_RELOC_64, BFD_RELOC_KVX_64},
193
  {BFD_RELOC_32, BFD_RELOC_KVX_32},
194
  {BFD_RELOC_16, BFD_RELOC_KVX_16},
195
  {BFD_RELOC_8,  BFD_RELOC_KVX_8},
196
197
  {BFD_RELOC_64_PCREL, BFD_RELOC_KVX_64_PCREL},
198
  {BFD_RELOC_32_PCREL, BFD_RELOC_KVX_32_PCREL},
199
};
200
201
/* Given the bfd internal relocation enumerator in CODE, return the
202
   corresponding howto entry.  */
203
204
static reloc_howto_type *
205
elf64_kvx_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
206
323
{
207
323
  unsigned int i;
208
209
  /* Convert bfd generic reloc to KVX-specific reloc.  */
210
323
  if (code < BFD_RELOC_KVX_RELOC_START || code > BFD_RELOC_KVX_RELOC_END)
211
0
    for (i = 0; i < ARRAY_SIZE (elf_kvx_reloc_map) ; i++)
212
0
      if (elf_kvx_reloc_map[i].from == code)
213
0
  {
214
0
    code = elf_kvx_reloc_map[i].to;
215
0
    break;
216
0
  }
217
218
323
  if (code > BFD_RELOC_KVX_RELOC_START && code < BFD_RELOC_KVX_RELOC_END)
219
276
      return &elf_kvx_howto_table[code - (BFD_RELOC_KVX_RELOC_START + 1)];
220
221
47
  return NULL;
222
323
}
223
224
static reloc_howto_type *
225
elf64_kvx_howto_from_type (bfd *abfd, unsigned int r_type)
226
323
{
227
323
  bfd_reloc_code_real_type val;
228
323
  reloc_howto_type *howto;
229
230
#if ARCH_SIZE == 32
231
  if (r_type > 256)
232
    {
233
      bfd_set_error (bfd_error_bad_value);
234
      return NULL;
235
    }
236
#endif
237
238
323
  val = elf64_kvx_bfd_reloc_from_type (abfd, r_type);
239
323
  howto = elf64_kvx_howto_from_bfd_reloc (val);
240
241
323
  if (howto != NULL)
242
276
    return howto;
243
244
47
  bfd_set_error (bfd_error_bad_value);
245
47
  return NULL;
246
323
}
247
248
static bool
249
elf64_kvx_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
250
       Elf_Internal_Rela *elf_reloc)
251
323
{
252
323
  unsigned int r_type;
253
254
323
  r_type = ELF64_R_TYPE (elf_reloc->r_info);
255
323
  bfd_reloc->howto = elf64_kvx_howto_from_type (abfd, r_type);
256
257
323
  if (bfd_reloc->howto == NULL)
258
47
    {
259
      /* xgettext:c-format */
260
47
      _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
261
47
        abfd, r_type);
262
47
      return false;
263
47
    }
264
276
  return true;
265
323
}
266
267
static reloc_howto_type *
268
elf64_kvx_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
269
           bfd_reloc_code_real_type code)
270
0
{
271
0
  reloc_howto_type *howto = elf64_kvx_howto_from_bfd_reloc (code);
272
273
0
  if (howto != NULL)
274
0
    return howto;
275
276
0
  bfd_set_error (bfd_error_bad_value);
277
0
  return NULL;
278
0
}
279
280
static reloc_howto_type *
281
elf64_kvx_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
282
           const char *r_name)
283
0
{
284
0
  unsigned int i;
285
286
0
  for (i = 0; i < ARRAY_SIZE (elf_kvx_howto_table); ++i)
287
0
    if (elf_kvx_howto_table[i].name != NULL
288
0
  && strcasecmp (elf_kvx_howto_table[i].name, r_name) == 0)
289
0
      return &elf_kvx_howto_table[i];
290
291
0
  return NULL;
292
0
}
293
294
#define TARGET_LITTLE_SYM               kvx_elf64_vec
295
#define TARGET_LITTLE_NAME              "elf64-kvx"
296
297
/* The linker script knows the section names for placement.
298
   The entry_names are used to do simple name mangling on the stubs.
299
   Given a function name, and its type, the stub can be found. The
300
   name can be changed. The only requirement is the %s be present.  */
301
0
#define STUB_ENTRY_NAME   "__%s_veneer"
302
303
/* The name of the dynamic interpreter.  This is put in the .interp
304
   section.  */
305
0
#define ELF_DYNAMIC_INTERPRETER     "/lib/ld.so.1"
306
307
308
/* PCREL 27 is signed-extended and scaled by 4 */
309
#define KVX_MAX_FWD_CALL_OFFSET \
310
0
  (((1 << 26) - 1) << 2)
311
#define KVX_MAX_BWD_CALL_OFFSET \
312
0
  (-((1 << 26) << 2))
313
314
/* Check that the destination of the call is within the PCREL27
315
   range. */
316
static int
317
kvx_valid_call_p (bfd_vma value, bfd_vma place)
318
0
{
319
0
  bfd_signed_vma offset = (bfd_signed_vma) (value - place);
320
0
  return (offset <= KVX_MAX_FWD_CALL_OFFSET
321
0
    && offset >= KVX_MAX_BWD_CALL_OFFSET);
322
0
}
323
324
/* Section name for stubs is the associated section name plus this
325
   string.  */
326
0
#define STUB_SUFFIX ".stub"
327
328
enum elf_kvx_stub_type
329
{
330
  kvx_stub_none,
331
  kvx_stub_long_branch,
332
};
333
334
struct elf_kvx_stub_hash_entry
335
{
336
  /* Base hash table entry structure.  */
337
  struct bfd_hash_entry root;
338
339
  /* The stub section.  */
340
  asection *stub_sec;
341
342
  /* Offset within stub_sec of the beginning of this stub.  */
343
  bfd_vma stub_offset;
344
345
  /* Given the symbol's value and its section we can determine its final
346
     value when building the stubs (so the stub knows where to jump).  */
347
  bfd_vma target_value;
348
  asection *target_section;
349
350
  enum elf_kvx_stub_type stub_type;
351
352
  /* The symbol table entry, if any, that this was derived from.  */
353
  struct elf_kvx_link_hash_entry *h;
354
355
  /* Destination symbol type */
356
  unsigned char st_type;
357
358
  /* Where this stub is being called from, or, in the case of combined
359
     stub sections, the first input section in the group.  */
360
  asection *id_sec;
361
362
  /* The name for the local symbol at the start of this stub.  The
363
     stub name in the hash table has to be unique; this does not, so
364
     it can be friendlier.  */
365
  char *output_name;
366
};
367
368
/* Used to build a map of a section.  This is required for mixed-endian
369
   code/data.  */
370
371
typedef struct elf_elf_section_map
372
{
373
  bfd_vma vma;
374
  char type;
375
}
376
elf_kvx_section_map;
377
378
379
typedef struct _kvx_elf_section_data
380
{
381
  struct bfd_elf_section_data elf;
382
  unsigned int mapcount;
383
  unsigned int mapsize;
384
  elf_kvx_section_map *map;
385
}
386
_kvx_elf_section_data;
387
388
#define elf_kvx_section_data(sec) \
389
  ((_kvx_elf_section_data *) elf_section_data (sec))
390
391
struct elf_kvx_local_symbol
392
{
393
  unsigned int got_type;
394
  bfd_signed_vma got_refcount;
395
  bfd_vma got_offset;
396
};
397
398
struct elf_kvx_obj_tdata
399
{
400
  struct elf_obj_tdata root;
401
402
  /* local symbol descriptors */
403
  struct elf_kvx_local_symbol *locals;
404
405
  /* Zero to warn when linking objects with incompatible enum sizes.  */
406
  int no_enum_size_warning;
407
408
  /* Zero to warn when linking objects with incompatible wchar_t sizes.  */
409
  int no_wchar_size_warning;
410
};
411
412
#define elf_kvx_tdata(bfd)        \
413
0
  ((struct elf_kvx_obj_tdata *) (bfd)->tdata.any)
414
415
0
#define elf_kvx_locals(bfd) (elf_kvx_tdata (bfd)->locals)
416
417
#define is_kvx_elf(bfd)       \
418
0
  (bfd_get_flavour (bfd) == bfd_target_elf_flavour  \
419
0
   && elf_tdata (bfd) != NULL        \
420
0
   && elf_object_id (bfd) == KVX_ELF_DATA)
421
422
static bool
423
elf64_kvx_mkobject (bfd *abfd)
424
123k
{
425
123k
  return bfd_elf_allocate_object (abfd, sizeof (struct elf_kvx_obj_tdata),
426
123k
          KVX_ELF_DATA);
427
123k
}
428
429
#define elf_kvx_hash_entry(ent) \
430
0
  ((struct elf_kvx_link_hash_entry *)(ent))
431
432
0
#define GOT_UNKNOWN    0
433
0
#define GOT_NORMAL     1
434
435
0
#define GOT_TLS_GD     2
436
0
#define GOT_TLS_IE     4
437
0
#define GOT_TLS_LD     8
438
439
/* KVX ELF linker hash entry.  */
440
struct elf_kvx_link_hash_entry
441
{
442
  struct elf_link_hash_entry root;
443
444
  /* Since PLT entries have variable size, we need to record the
445
     index into .got.plt instead of recomputing it from the PLT
446
     offset.  */
447
  bfd_signed_vma plt_got_offset;
448
449
  /* Bit mask representing the type of GOT entry(s) if any required by
450
     this symbol.  */
451
  unsigned int got_type;
452
453
  /* A pointer to the most recently used stub hash entry against this
454
     symbol.  */
455
  struct elf_kvx_stub_hash_entry *stub_cache;
456
};
457
458
/* Get the KVX elf linker hash table from a link_info structure.  */
459
#define elf_kvx_hash_table(info)          \
460
0
  ((struct elf_kvx_link_hash_table *) ((info)->hash))
461
462
#define kvx_stub_hash_lookup(table, string, create, copy)   \
463
0
  ((struct elf_kvx_stub_hash_entry *)       \
464
0
   bfd_hash_lookup ((table), (string), (create), (copy)))
465
466
/* KVX ELF linker hash table.  */
467
struct elf_kvx_link_hash_table
468
{
469
  /* The main hash table.  */
470
  struct elf_link_hash_table root;
471
472
  /* Nonzero to force PIC branch veneers.  */
473
  int pic_veneer;
474
475
  /* The number of bytes in the initial entry in the PLT.  */
476
  bfd_size_type plt_header_size;
477
478
  /* The number of bytes in the subsequent PLT etries.  */
479
  bfd_size_type plt_entry_size;
480
481
  /* The bytes of the subsequent PLT entry.  */
482
  const bfd_byte *plt_entry;
483
484
  /* Short-cuts to get to dynamic linker sections.  */
485
  asection *sdynbss;
486
  asection *srelbss;
487
488
  /* Small local sym cache.  */
489
  struct sym_cache sym_cache;
490
491
  /* For convenience in allocate_dynrelocs.  */
492
  bfd *obfd;
493
494
  /* The amount of space used by the reserved portion of the sgotplt
495
     section, plus whatever space is used by the jump slots.  */
496
  bfd_vma sgotplt_jump_table_size;
497
498
  /* The stub hash table.  */
499
  struct bfd_hash_table stub_hash_table;
500
501
  /* Linker stub bfd.  */
502
  bfd *stub_bfd;
503
504
  /* Linker call-backs.  */
505
  asection *(*add_stub_section) (const char *, asection *);
506
  void (*layout_sections_again) (void);
507
508
  /* Array to keep track of which stub sections have been created, and
509
     information on stub grouping.  */
510
  struct map_stub
511
  {
512
    /* This is the section to which stubs in the group will be
513
       attached.  */
514
    asection *link_sec;
515
    /* The stub section.  */
516
    asection *stub_sec;
517
  } *stub_group;
518
519
  /* Assorted information used by elf64_kvx_size_stubs.  */
520
  unsigned int bfd_count;
521
  unsigned int top_index;
522
  asection **input_list;
523
};
524
525
/* Create an entry in an KVX ELF linker hash table.  */
526
527
static struct bfd_hash_entry *
528
elf64_kvx_link_hash_newfunc (struct bfd_hash_entry *entry,
529
           struct bfd_hash_table *table,
530
           const char *string)
531
0
{
532
0
  struct elf_kvx_link_hash_entry *ret =
533
0
    (struct elf_kvx_link_hash_entry *) entry;
534
535
  /* Allocate the structure if it has not already been allocated by a
536
     subclass.  */
537
0
  if (ret == NULL)
538
0
    ret = bfd_hash_allocate (table,
539
0
           sizeof (struct elf_kvx_link_hash_entry));
540
0
  if (ret == NULL)
541
0
    return (struct bfd_hash_entry *) ret;
542
543
  /* Call the allocation method of the superclass.  */
544
0
  ret = ((struct elf_kvx_link_hash_entry *)
545
0
   _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
546
0
             table, string));
547
0
  if (ret != NULL)
548
0
    {
549
0
      ret->got_type = GOT_UNKNOWN;
550
0
      ret->plt_got_offset = (bfd_vma) - 1;
551
0
      ret->stub_cache = NULL;
552
0
    }
553
554
0
  return (struct bfd_hash_entry *) ret;
555
0
}
556
557
/* Initialize an entry in the stub hash table.  */
558
559
static struct bfd_hash_entry *
560
stub_hash_newfunc (struct bfd_hash_entry *entry,
561
       struct bfd_hash_table *table, const char *string)
562
0
{
563
  /* Allocate the structure if it has not already been allocated by a
564
     subclass.  */
565
0
  if (entry == NULL)
566
0
    {
567
0
      entry = bfd_hash_allocate (table,
568
0
         sizeof (struct
569
0
           elf_kvx_stub_hash_entry));
570
0
      if (entry == NULL)
571
0
  return entry;
572
0
    }
573
574
  /* Call the allocation method of the superclass.  */
575
0
  entry = bfd_hash_newfunc (entry, table, string);
576
0
  if (entry != NULL)
577
0
    {
578
0
      struct elf_kvx_stub_hash_entry *eh;
579
580
      /* Initialize the local fields.  */
581
0
      eh = (struct elf_kvx_stub_hash_entry *) entry;
582
0
      eh->stub_sec = NULL;
583
0
      eh->stub_offset = 0;
584
0
      eh->target_value = 0;
585
0
      eh->target_section = NULL;
586
0
      eh->stub_type = kvx_stub_none;
587
0
      eh->h = NULL;
588
0
      eh->id_sec = NULL;
589
0
    }
590
591
0
  return entry;
592
0
}
593
594
/* Copy the extra info we tack onto an elf_link_hash_entry.  */
595
596
static void
597
elf64_kvx_copy_indirect_symbol (struct bfd_link_info *info,
598
        struct elf_link_hash_entry *dir,
599
        struct elf_link_hash_entry *ind)
600
0
{
601
0
  struct elf_kvx_link_hash_entry *edir, *eind;
602
603
0
  edir = (struct elf_kvx_link_hash_entry *) dir;
604
0
  eind = (struct elf_kvx_link_hash_entry *) ind;
605
606
0
  if (ind->root.type == bfd_link_hash_indirect)
607
0
    {
608
      /* Copy over PLT info.  */
609
0
      if (dir->got.refcount <= 0)
610
0
  {
611
0
    edir->got_type = eind->got_type;
612
0
    eind->got_type = GOT_UNKNOWN;
613
0
  }
614
0
    }
615
616
0
  _bfd_elf_link_hash_copy_indirect (info, dir, ind);
617
0
}
618
619
/* Destroy a KVX elf linker hash table.  */
620
621
static void
622
elf64_kvx_link_hash_table_free (bfd *obfd)
623
0
{
624
0
  struct elf_kvx_link_hash_table *ret
625
0
    = (struct elf_kvx_link_hash_table *) obfd->link.hash;
626
627
0
  bfd_hash_table_free (&ret->stub_hash_table);
628
0
  _bfd_elf_link_hash_table_free (obfd);
629
0
}
630
631
/* Create a KVX elf linker hash table.  */
632
633
static struct bfd_link_hash_table *
634
elf64_kvx_link_hash_table_create (bfd *abfd)
635
0
{
636
0
  struct elf_kvx_link_hash_table *ret;
637
0
  bfd_size_type amt = sizeof (struct elf_kvx_link_hash_table);
638
639
0
  ret = bfd_zmalloc (amt);
640
0
  if (ret == NULL)
641
0
    return NULL;
642
643
0
  if (!_bfd_elf_link_hash_table_init
644
0
      (&ret->root, abfd, elf64_kvx_link_hash_newfunc,
645
0
       sizeof (struct elf_kvx_link_hash_entry), KVX_ELF_DATA))
646
0
    {
647
0
      free (ret);
648
0
      return NULL;
649
0
    }
650
651
0
  ret->plt_header_size = PLT_ENTRY_SIZE;
652
0
  ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
653
0
  ret->plt_entry = elf64_kvx_small_plt_entry;
654
655
0
  ret->obfd = abfd;
656
657
0
  if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
658
0
          sizeof (struct elf_kvx_stub_hash_entry)))
659
0
    {
660
0
      _bfd_elf_link_hash_table_free (abfd);
661
0
      return NULL;
662
0
    }
663
664
0
  ret->root.root.hash_table_free = elf64_kvx_link_hash_table_free;
665
666
0
  return &ret->root.root;
667
0
}
668
669
static bfd_reloc_status_type
670
kvx_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
671
        bfd_vma offset, bfd_vma value)
672
0
{
673
0
  reloc_howto_type *howto;
674
675
0
  howto = elf64_kvx_howto_from_type (input_bfd, r_type);
676
0
  r_type = elf64_kvx_bfd_reloc_from_type (input_bfd, r_type);
677
0
  return _bfd_kvx_elf_put_addend (input_bfd,
678
0
          input_section->contents + offset, r_type,
679
0
          howto, value);
680
0
}
681
682
/* Determine the type of stub needed, if any, for a call.  */
683
684
static enum elf_kvx_stub_type
685
kvx_type_of_stub (asection *input_sec,
686
      const Elf_Internal_Rela *rel,
687
      asection *sym_sec,
688
      unsigned char st_type,
689
      bfd_vma destination)
690
0
{
691
0
  bfd_vma location;
692
0
  bfd_signed_vma branch_offset;
693
0
  unsigned int r_type;
694
0
  enum elf_kvx_stub_type stub_type = kvx_stub_none;
695
696
0
  if (st_type != STT_FUNC
697
0
      && (sym_sec == input_sec))
698
0
    return stub_type;
699
700
  /* Determine where the call point is.  */
701
0
  location = (input_sec->output_offset
702
0
        + input_sec->output_section->vma + rel->r_offset);
703
704
0
  branch_offset = (bfd_signed_vma) (destination - location);
705
706
0
  r_type = ELF64_R_TYPE (rel->r_info);
707
708
  /* We don't want to redirect any old unconditional jump in this way,
709
     only one which is being used for a sibcall, where it is
710
     acceptable for the R16 and R17 registers to be clobbered.  */
711
0
  if (r_type == R_KVX_PCREL27
712
0
      && (branch_offset > KVX_MAX_FWD_CALL_OFFSET
713
0
    || branch_offset < KVX_MAX_BWD_CALL_OFFSET))
714
0
    {
715
0
      stub_type = kvx_stub_long_branch;
716
0
    }
717
718
0
  return stub_type;
719
0
}
720
721
/* Build a name for an entry in the stub hash table.  */
722
723
static char *
724
elf64_kvx_stub_name (const asection *input_section,
725
         const asection *sym_sec,
726
         const struct elf_kvx_link_hash_entry *hash,
727
         const Elf_Internal_Rela *rel)
728
0
{
729
0
  char *stub_name;
730
0
  bfd_size_type len;
731
732
0
  if (hash)
733
0
    {
734
0
      len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
735
0
      stub_name = bfd_malloc (len);
736
0
      if (stub_name != NULL)
737
0
  snprintf (stub_name, len, "%08x_%s+%" PRIx64 "x",
738
0
      (unsigned int) input_section->id,
739
0
      hash->root.root.root.string,
740
0
      (uint64_t) rel->r_addend);
741
0
    }
742
0
  else
743
0
    {
744
0
      len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
745
0
      stub_name = bfd_malloc (len);
746
0
      if (stub_name != NULL)
747
0
  snprintf (stub_name, len, "%08x_%x:%x+%" PRIx64 "x",
748
0
      (unsigned int) input_section->id,
749
0
      (unsigned int) sym_sec->id,
750
0
      (unsigned int) ELF64_R_SYM (rel->r_info),
751
0
      (uint64_t) rel->r_addend);
752
0
    }
753
754
0
  return stub_name;
755
0
}
756
757
/* Return true if symbol H should be hashed in the `.gnu.hash' section.  For
758
   executable PLT slots where the executable never takes the address of those
759
   functions, the function symbols are not added to the hash table.  */
760
761
static bool
762
elf_kvx_hash_symbol (struct elf_link_hash_entry *h)
763
0
{
764
0
  if (h->plt.offset != (bfd_vma) -1
765
0
      && !h->def_regular
766
0
      && !h->pointer_equality_needed)
767
0
    return false;
768
769
0
  return _bfd_elf_hash_symbol (h);
770
0
}
771
772
773
/* Look up an entry in the stub hash.  Stub entries are cached because
774
   creating the stub name takes a bit of time.  */
775
776
static struct elf_kvx_stub_hash_entry *
777
elf64_kvx_get_stub_entry (const asection *input_section,
778
        const asection *sym_sec,
779
        struct elf_link_hash_entry *hash,
780
        const Elf_Internal_Rela *rel,
781
        struct elf_kvx_link_hash_table *htab)
782
0
{
783
0
  struct elf_kvx_stub_hash_entry *stub_entry;
784
0
  struct elf_kvx_link_hash_entry *h =
785
0
    (struct elf_kvx_link_hash_entry *) hash;
786
0
  const asection *id_sec;
787
788
0
  if ((input_section->flags & SEC_CODE) == 0)
789
0
    return NULL;
790
791
  /* If this input section is part of a group of sections sharing one
792
     stub section, then use the id of the first section in the group.
793
     Stub names need to include a section id, as there may well be
794
     more than one stub used to reach say, printf, and we need to
795
     distinguish between them.  */
796
0
  id_sec = htab->stub_group[input_section->id].link_sec;
797
798
0
  if (h != NULL && h->stub_cache != NULL
799
0
      && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
800
0
    {
801
0
      stub_entry = h->stub_cache;
802
0
    }
803
0
  else
804
0
    {
805
0
      char *stub_name;
806
807
0
      stub_name = elf64_kvx_stub_name (id_sec, sym_sec, h, rel);
808
0
      if (stub_name == NULL)
809
0
  return NULL;
810
811
0
      stub_entry = kvx_stub_hash_lookup (&htab->stub_hash_table,
812
0
           stub_name, false, false);
813
0
      if (h != NULL)
814
0
  h->stub_cache = stub_entry;
815
816
0
      free (stub_name);
817
0
    }
818
819
0
  return stub_entry;
820
0
}
821
822
823
/* Create a stub section.  */
824
825
static asection *
826
_bfd_kvx_create_stub_section (asection *section,
827
            struct elf_kvx_link_hash_table *htab)
828
829
0
{
830
0
  size_t namelen;
831
0
  bfd_size_type len;
832
0
  char *s_name;
833
834
0
  namelen = strlen (section->name);
835
0
  len = namelen + sizeof (STUB_SUFFIX);
836
0
  s_name = bfd_alloc (htab->stub_bfd, len);
837
0
  if (s_name == NULL)
838
0
    return NULL;
839
840
0
  memcpy (s_name, section->name, namelen);
841
0
  memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
842
0
  return (*htab->add_stub_section) (s_name, section);
843
0
}
844
845
846
/* Find or create a stub section for a link section.
847
848
   Fix or create the stub section used to collect stubs attached to
849
   the specified link section.  */
850
851
static asection *
852
_bfd_kvx_get_stub_for_link_section (asection *link_section,
853
            struct elf_kvx_link_hash_table *htab)
854
0
{
855
0
  if (htab->stub_group[link_section->id].stub_sec == NULL)
856
0
    htab->stub_group[link_section->id].stub_sec
857
0
      = _bfd_kvx_create_stub_section (link_section, htab);
858
0
  return htab->stub_group[link_section->id].stub_sec;
859
0
}
860
861
862
/* Find or create a stub section in the stub group for an input
863
   section.  */
864
865
static asection *
866
_bfd_kvx_create_or_find_stub_sec (asection *section,
867
          struct elf_kvx_link_hash_table *htab)
868
0
{
869
0
  asection *link_sec = htab->stub_group[section->id].link_sec;
870
0
  return _bfd_kvx_get_stub_for_link_section (link_sec, htab);
871
0
}
872
873
874
/* Add a new stub entry in the stub group associated with an input
875
   section to the stub hash.  Not all fields of the new stub entry are
876
   initialised.  */
877
878
static struct elf_kvx_stub_hash_entry *
879
_bfd_kvx_add_stub_entry_in_group (const char *stub_name,
880
          asection *section,
881
          struct elf_kvx_link_hash_table *htab)
882
0
{
883
0
  asection *link_sec;
884
0
  asection *stub_sec;
885
0
  struct elf_kvx_stub_hash_entry *stub_entry;
886
887
0
  link_sec = htab->stub_group[section->id].link_sec;
888
0
  stub_sec = _bfd_kvx_create_or_find_stub_sec (section, htab);
889
890
  /* Enter this entry into the linker stub hash table.  */
891
0
  stub_entry = kvx_stub_hash_lookup (&htab->stub_hash_table, stub_name,
892
0
             true, false);
893
0
  if (stub_entry == NULL)
894
0
    {
895
      /* xgettext:c-format */
896
0
      _bfd_error_handler (_("%pB: cannot create stub entry %s"),
897
0
        section->owner, stub_name);
898
0
      return NULL;
899
0
    }
900
901
0
  stub_entry->stub_sec = stub_sec;
902
0
  stub_entry->stub_offset = 0;
903
0
  stub_entry->id_sec = link_sec;
904
905
0
  return stub_entry;
906
0
}
907
908
static bool
909
kvx_build_one_stub (struct bfd_hash_entry *gen_entry,
910
        void *in_arg)
911
0
{
912
0
  struct elf_kvx_stub_hash_entry *stub_entry;
913
0
  asection *stub_sec;
914
0
  bfd *stub_bfd;
915
0
  bfd_byte *loc;
916
0
  bfd_vma sym_value;
917
0
  unsigned int template_size;
918
0
  const uint32_t *template;
919
0
  unsigned int i;
920
0
  struct bfd_link_info *info;
921
922
  /* Massage our args to the form they really have.  */
923
0
  stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
924
925
0
  info = (struct bfd_link_info *) in_arg;
926
927
  /* Fail if the target section could not be assigned to an output
928
     section.  The user should fix his linker script.  */
929
0
  if (stub_entry->target_section->output_section == NULL
930
0
      && info->non_contiguous_regions)
931
0
    info->callbacks->einfo (_("%F%P: Could not assign '%pA' to an output section. "
932
0
            "Retry without "
933
0
            "--enable-non-contiguous-regions.\n"),
934
0
          stub_entry->target_section);
935
936
0
  stub_sec = stub_entry->stub_sec;
937
938
  /* Make a note of the offset within the stubs for this entry.  */
939
0
  stub_entry->stub_offset = stub_sec->size;
940
0
  loc = stub_sec->contents + stub_entry->stub_offset;
941
942
0
  stub_bfd = stub_sec->owner;
943
944
  /* This is the address of the stub destination.  */
945
0
  sym_value = (stub_entry->target_value
946
0
         + stub_entry->target_section->output_offset
947
0
         + stub_entry->target_section->output_section->vma);
948
949
0
  switch (stub_entry->stub_type)
950
0
    {
951
0
    case kvx_stub_long_branch:
952
0
      template = elf64_kvx_long_branch_stub;
953
0
      template_size = sizeof (elf64_kvx_long_branch_stub);
954
0
      break;
955
0
    default:
956
0
      abort ();
957
0
    }
958
959
0
  for (i = 0; i < (template_size / sizeof template[0]); i++)
960
0
    {
961
0
      bfd_putl32 (template[i], loc);
962
0
      loc += 4;
963
0
    }
964
965
0
  stub_sec->size += template_size;
966
967
0
  switch (stub_entry->stub_type)
968
0
    {
969
0
    case kvx_stub_long_branch:
970
      /* The stub uses a make insn with 43bits immediate.
971
   We need to apply 3 relocations:
972
   BFD_RELOC_KVX_S43_LO10,
973
   BFD_RELOC_KVX_S43_UP27,
974
   BFD_RELOC_KVX_S43_EX6.  */
975
0
      if (kvx_relocate (R_KVX_S43_LO10, stub_bfd, stub_sec,
976
0
      stub_entry->stub_offset, sym_value) != bfd_reloc_ok)
977
0
  BFD_FAIL ();
978
0
      if (kvx_relocate (R_KVX_S43_EX6, stub_bfd, stub_sec,
979
0
      stub_entry->stub_offset, sym_value) != bfd_reloc_ok)
980
0
  BFD_FAIL ();
981
0
      if (kvx_relocate (R_KVX_S43_UP27, stub_bfd, stub_sec,
982
0
      stub_entry->stub_offset + 4, sym_value) != bfd_reloc_ok)
983
0
  BFD_FAIL ();
984
0
      break;
985
0
    default:
986
0
      abort ();
987
0
    }
988
989
0
  return true;
990
0
}
991
992
/* As above, but don't actually build the stub.  Just bump offset so
993
   we know stub section sizes.  */
994
995
static bool
996
kvx_size_one_stub (struct bfd_hash_entry *gen_entry,
997
       void *in_arg ATTRIBUTE_UNUSED)
998
0
{
999
0
  struct elf_kvx_stub_hash_entry *stub_entry;
1000
0
  int size;
1001
1002
  /* Massage our args to the form they really have.  */
1003
0
  stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
1004
1005
0
  switch (stub_entry->stub_type)
1006
0
    {
1007
0
    case kvx_stub_long_branch:
1008
0
      size = sizeof (elf64_kvx_long_branch_stub);
1009
0
      break;
1010
0
    default:
1011
0
      abort ();
1012
0
    }
1013
1014
0
  stub_entry->stub_sec->size += size;
1015
0
  return true;
1016
0
}
1017
1018
/* External entry points for sizing and building linker stubs.  */
1019
1020
/* Set up various things so that we can make a list of input sections
1021
   for each output section included in the link.  Returns -1 on error,
1022
   0 when no stubs will be needed, and 1 on success.  */
1023
1024
int
1025
elf64_kvx_setup_section_lists (bfd *output_bfd,
1026
             struct bfd_link_info *info)
1027
0
{
1028
0
  bfd *input_bfd;
1029
0
  unsigned int bfd_count;
1030
0
  unsigned int top_id, top_index;
1031
0
  asection *section;
1032
0
  asection **input_list, **list;
1033
0
  bfd_size_type amt;
1034
0
  struct elf_kvx_link_hash_table *htab =
1035
0
    elf_kvx_hash_table (info);
1036
1037
0
  if (!is_elf_hash_table ((const struct bfd_link_hash_table *)htab))
1038
0
    return 0;
1039
1040
  /* Count the number of input BFDs and find the top input section id.  */
1041
0
  for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
1042
0
       input_bfd != NULL; input_bfd = input_bfd->link.next)
1043
0
    {
1044
0
      bfd_count += 1;
1045
0
      for (section = input_bfd->sections;
1046
0
     section != NULL; section = section->next)
1047
0
  {
1048
0
    if (top_id < section->id)
1049
0
      top_id = section->id;
1050
0
  }
1051
0
    }
1052
0
  htab->bfd_count = bfd_count;
1053
1054
0
  amt = sizeof (struct map_stub) * (top_id + 1);
1055
0
  htab->stub_group = bfd_zmalloc (amt);
1056
0
  if (htab->stub_group == NULL)
1057
0
    return -1;
1058
1059
  /* We can't use output_bfd->section_count here to find the top output
1060
     section index as some sections may have been removed, and
1061
     _bfd_strip_section_from_output doesn't renumber the indices.  */
1062
0
  for (section = output_bfd->sections, top_index = 0;
1063
0
       section != NULL; section = section->next)
1064
0
    {
1065
0
      if (top_index < section->index)
1066
0
  top_index = section->index;
1067
0
    }
1068
1069
0
  htab->top_index = top_index;
1070
0
  amt = sizeof (asection *) * (top_index + 1);
1071
0
  input_list = bfd_malloc (amt);
1072
0
  htab->input_list = input_list;
1073
0
  if (input_list == NULL)
1074
0
    return -1;
1075
1076
  /* For sections we aren't interested in, mark their entries with a
1077
     value we can check later.  */
1078
0
  list = input_list + top_index;
1079
0
  do
1080
0
    *list = bfd_abs_section_ptr;
1081
0
  while (list-- != input_list);
1082
1083
0
  for (section = output_bfd->sections;
1084
0
       section != NULL; section = section->next)
1085
0
    {
1086
0
      if ((section->flags & SEC_CODE) != 0)
1087
0
  input_list[section->index] = NULL;
1088
0
    }
1089
1090
0
  return 1;
1091
0
}
1092
1093
/* Used by elf64_kvx_next_input_section and group_sections.  */
1094
0
#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
1095
1096
/* The linker repeatedly calls this function for each input section,
1097
   in the order that input sections are linked into output sections.
1098
   Build lists of input sections to determine groupings between which
1099
   we may insert linker stubs.  */
1100
1101
void
1102
elf64_kvx_next_input_section (struct bfd_link_info *info, asection *isec)
1103
0
{
1104
0
  struct elf_kvx_link_hash_table *htab =
1105
0
    elf_kvx_hash_table (info);
1106
1107
0
  if (isec->output_section->index <= htab->top_index)
1108
0
    {
1109
0
      asection **list = htab->input_list + isec->output_section->index;
1110
1111
0
      if (*list != bfd_abs_section_ptr)
1112
0
  {
1113
    /* Steal the link_sec pointer for our list.  */
1114
    /* This happens to make the list in reverse order,
1115
       which is what we want.  */
1116
0
    PREV_SEC (isec) = *list;
1117
0
    *list = isec;
1118
0
  }
1119
0
    }
1120
0
}
1121
1122
/* See whether we can group stub sections together.  Grouping stub
1123
   sections may result in fewer stubs.  More importantly, we need to
1124
   put all .init* and .fini* stubs at the beginning of the .init or
1125
   .fini output sections respectively, because glibc splits the
1126
   _init and _fini functions into multiple parts.  Putting a stub in
1127
   the middle of a function is not a good idea.  */
1128
1129
static void
1130
group_sections (struct elf_kvx_link_hash_table *htab,
1131
    bfd_size_type stub_group_size,
1132
    bool stubs_always_after_branch)
1133
0
{
1134
0
  asection **list = htab->input_list;
1135
1136
0
  do
1137
0
    {
1138
0
      asection *tail = *list;
1139
0
      asection *head;
1140
1141
0
      if (tail == bfd_abs_section_ptr)
1142
0
  continue;
1143
1144
      /* Reverse the list: we must avoid placing stubs at the
1145
   beginning of the section because the beginning of the text
1146
   section may be required for an interrupt vector in bare metal
1147
   code.  */
1148
0
#define NEXT_SEC PREV_SEC
1149
0
      head = NULL;
1150
0
      while (tail != NULL)
1151
0
  {
1152
    /* Pop from tail.  */
1153
0
    asection *item = tail;
1154
0
    tail = PREV_SEC (item);
1155
1156
    /* Push on head.  */
1157
0
    NEXT_SEC (item) = head;
1158
0
    head = item;
1159
0
  }
1160
1161
0
      while (head != NULL)
1162
0
  {
1163
0
    asection *curr;
1164
0
    asection *next;
1165
0
    bfd_vma stub_group_start = head->output_offset;
1166
0
    bfd_vma end_of_next;
1167
1168
0
    curr = head;
1169
0
    while (NEXT_SEC (curr) != NULL)
1170
0
      {
1171
0
        next = NEXT_SEC (curr);
1172
0
        end_of_next = next->output_offset + next->size;
1173
0
        if (end_of_next - stub_group_start >= stub_group_size)
1174
    /* End of NEXT is too far from start, so stop.  */
1175
0
    break;
1176
        /* Add NEXT to the group.  */
1177
0
        curr = next;
1178
0
      }
1179
1180
    /* OK, the size from the start to the start of CURR is less
1181
       than stub_group_size and thus can be handled by one stub
1182
       section.  (Or the head section is itself larger than
1183
       stub_group_size, in which case we may be toast.)
1184
       We should really be keeping track of the total size of
1185
       stubs added here, as stubs contribute to the final output
1186
       section size.  */
1187
0
    do
1188
0
      {
1189
0
        next = NEXT_SEC (head);
1190
        /* Set up this stub group.  */
1191
0
        htab->stub_group[head->id].link_sec = curr;
1192
0
      }
1193
0
    while (head != curr && (head = next) != NULL);
1194
1195
    /* But wait, there's more!  Input sections up to stub_group_size
1196
       bytes after the stub section can be handled by it too.  */
1197
0
    if (!stubs_always_after_branch)
1198
0
      {
1199
0
        stub_group_start = curr->output_offset + curr->size;
1200
1201
0
        while (next != NULL)
1202
0
    {
1203
0
      end_of_next = next->output_offset + next->size;
1204
0
      if (end_of_next - stub_group_start >= stub_group_size)
1205
        /* End of NEXT is too far from stubs, so stop.  */
1206
0
        break;
1207
      /* Add NEXT to the stub group.  */
1208
0
      head = next;
1209
0
      next = NEXT_SEC (head);
1210
0
      htab->stub_group[head->id].link_sec = curr;
1211
0
    }
1212
0
      }
1213
0
    head = next;
1214
0
  }
1215
0
    }
1216
0
  while (list++ != htab->input_list + htab->top_index);
1217
1218
0
  free (htab->input_list);
1219
0
}
1220
1221
static void
1222
_bfd_kvx_resize_stubs (struct elf_kvx_link_hash_table *htab)
1223
0
{
1224
0
  asection *section;
1225
1226
  /* OK, we've added some stubs.  Find out the new size of the
1227
     stub sections.  */
1228
0
  for (section = htab->stub_bfd->sections;
1229
0
       section != NULL; section = section->next)
1230
0
    {
1231
      /* Ignore non-stub sections.  */
1232
0
      if (!strstr (section->name, STUB_SUFFIX))
1233
0
  continue;
1234
0
      section->size = 0;
1235
0
    }
1236
1237
0
  bfd_hash_traverse (&htab->stub_hash_table, kvx_size_one_stub, htab);
1238
0
}
1239
1240
/* Satisfy the ELF linker by filling in some fields in our fake bfd.  */
1241
1242
bool
1243
kvx_elf64_init_stub_bfd (struct bfd_link_info *info,
1244
      bfd *stub_bfd)
1245
0
{
1246
0
  struct elf_kvx_link_hash_table *htab;
1247
1248
0
  elf_elfheader (stub_bfd)->e_ident[EI_CLASS] = ELFCLASS64;
1249
1250
/* Always hook our dynamic sections into the first bfd, which is the
1251
   linker created stub bfd.  This ensures that the GOT header is at
1252
   the start of the output TOC section.  */
1253
0
  htab = elf_kvx_hash_table (info);
1254
0
  if (htab == NULL)
1255
0
    return false;
1256
1257
0
  return true;
1258
0
}
1259
1260
/* Determine and set the size of the stub section for a final link.
1261
1262
   The basic idea here is to examine all the relocations looking for
1263
   PC-relative calls to a target that is unreachable with a 27bits
1264
   immediate (found in call and goto).  */
1265
1266
bool
1267
elf64_kvx_size_stubs (bfd *output_bfd,
1268
         bfd *stub_bfd,
1269
         struct bfd_link_info *info,
1270
         bfd_signed_vma group_size,
1271
         asection * (*add_stub_section) (const char *,
1272
                 asection *),
1273
         void (*layout_sections_again) (void))
1274
0
{
1275
0
  bfd_size_type stub_group_size;
1276
0
  bool stubs_always_before_branch;
1277
0
  bool stub_changed = false;
1278
0
  struct elf_kvx_link_hash_table *htab = elf_kvx_hash_table (info);
1279
1280
  /* Propagate mach to stub bfd, because it may not have been
1281
     finalized when we created stub_bfd.  */
1282
0
  bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
1283
0
         bfd_get_mach (output_bfd));
1284
1285
  /* Stash our params away.  */
1286
0
  htab->stub_bfd = stub_bfd;
1287
0
  htab->add_stub_section = add_stub_section;
1288
0
  htab->layout_sections_again = layout_sections_again;
1289
0
  stubs_always_before_branch = group_size < 0;
1290
0
  if (group_size < 0)
1291
0
    stub_group_size = -group_size;
1292
0
  else
1293
0
    stub_group_size = group_size;
1294
1295
0
  if (stub_group_size == 1)
1296
0
    {
1297
      /* Default values.  */
1298
      /* KVX branch range is +-256MB. The value used is 1MB less.  */
1299
0
      stub_group_size = 255 * 1024 * 1024;
1300
0
    }
1301
1302
0
  group_sections (htab, stub_group_size, stubs_always_before_branch);
1303
1304
0
  (*htab->layout_sections_again) ();
1305
1306
0
  while (1)
1307
0
    {
1308
0
      bfd *input_bfd;
1309
1310
0
      for (input_bfd = info->input_bfds;
1311
0
     input_bfd != NULL; input_bfd = input_bfd->link.next)
1312
0
  {
1313
0
    Elf_Internal_Shdr *symtab_hdr;
1314
0
    asection *section;
1315
0
    Elf_Internal_Sym *local_syms = NULL;
1316
1317
0
    if (!is_kvx_elf (input_bfd)
1318
0
        || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
1319
0
      continue;
1320
1321
    /* We'll need the symbol table in a second.  */
1322
0
    symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
1323
0
    if (symtab_hdr->sh_info == 0)
1324
0
      continue;
1325
1326
    /* Walk over each section attached to the input bfd.  */
1327
0
    for (section = input_bfd->sections;
1328
0
         section != NULL; section = section->next)
1329
0
      {
1330
0
        Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1331
1332
        /* If there aren't any relocs, then there's nothing more
1333
     to do.  */
1334
0
        if ((section->flags & SEC_RELOC) == 0
1335
0
      || section->reloc_count == 0
1336
0
      || (section->flags & SEC_CODE) == 0)
1337
0
    continue;
1338
1339
        /* If this section is a link-once section that will be
1340
     discarded, then don't create any stubs.  */
1341
0
        if (section->output_section == NULL
1342
0
      || section->output_section->owner != output_bfd)
1343
0
    continue;
1344
1345
        /* Get the relocs.  */
1346
0
        internal_relocs
1347
0
    = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
1348
0
               NULL, info->keep_memory);
1349
0
        if (internal_relocs == NULL)
1350
0
    goto error_ret_free_local;
1351
1352
        /* Now examine each relocation.  */
1353
0
        irela = internal_relocs;
1354
0
        irelaend = irela + section->reloc_count;
1355
0
        for (; irela < irelaend; irela++)
1356
0
    {
1357
0
      unsigned int r_type, r_indx;
1358
0
      enum elf_kvx_stub_type stub_type;
1359
0
      struct elf_kvx_stub_hash_entry *stub_entry;
1360
0
      asection *sym_sec;
1361
0
      bfd_vma sym_value;
1362
0
      bfd_vma destination;
1363
0
      struct elf_kvx_link_hash_entry *hash;
1364
0
      const char *sym_name;
1365
0
      char *stub_name;
1366
0
      const asection *id_sec;
1367
0
      unsigned char st_type;
1368
0
      bfd_size_type len;
1369
1370
0
      r_type = ELF64_R_TYPE (irela->r_info);
1371
0
      r_indx = ELF64_R_SYM (irela->r_info);
1372
1373
0
      if (r_type >= (unsigned int) R_KVX_end)
1374
0
        {
1375
0
          bfd_set_error (bfd_error_bad_value);
1376
0
        error_ret_free_internal:
1377
0
          if (elf_section_data (section)->relocs == NULL)
1378
0
      free (internal_relocs);
1379
0
          goto error_ret_free_local;
1380
0
        }
1381
1382
      /* Only look for stubs on unconditional branch and
1383
         branch and link instructions.  */
1384
      /* This catches CALL and GOTO insn */
1385
0
      if (r_type != (unsigned int) R_KVX_PCREL27)
1386
0
        continue;
1387
1388
      /* Now determine the call target, its name, value,
1389
         section.  */
1390
0
      sym_sec = NULL;
1391
0
      sym_value = 0;
1392
0
      destination = 0;
1393
0
      hash = NULL;
1394
0
      sym_name = NULL;
1395
0
      if (r_indx < symtab_hdr->sh_info)
1396
0
        {
1397
          /* It's a local symbol.  */
1398
0
          Elf_Internal_Sym *sym;
1399
0
          Elf_Internal_Shdr *hdr;
1400
1401
0
          if (local_syms == NULL)
1402
0
      {
1403
0
        local_syms
1404
0
          = (Elf_Internal_Sym *) symtab_hdr->contents;
1405
0
        if (local_syms == NULL)
1406
0
          local_syms
1407
0
            = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
1408
0
                  symtab_hdr->sh_info, 0,
1409
0
                  NULL, NULL, NULL);
1410
0
        if (local_syms == NULL)
1411
0
          goto error_ret_free_internal;
1412
0
      }
1413
1414
0
          sym = local_syms + r_indx;
1415
0
          hdr = elf_elfsections (input_bfd)[sym->st_shndx];
1416
0
          sym_sec = hdr->bfd_section;
1417
0
          if (!sym_sec)
1418
      /* This is an undefined symbol.  It can never
1419
         be resolved.  */
1420
0
      continue;
1421
1422
0
          if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
1423
0
      sym_value = sym->st_value;
1424
0
          destination = (sym_value + irela->r_addend
1425
0
             + sym_sec->output_offset
1426
0
             + sym_sec->output_section->vma);
1427
0
          st_type = ELF_ST_TYPE (sym->st_info);
1428
0
          sym_name
1429
0
      = bfd_elf_string_from_elf_section (input_bfd,
1430
0
                 symtab_hdr->sh_link,
1431
0
                 sym->st_name);
1432
0
        }
1433
0
      else
1434
0
        {
1435
0
          int e_indx;
1436
1437
0
          e_indx = r_indx - symtab_hdr->sh_info;
1438
0
          hash = ((struct elf_kvx_link_hash_entry *)
1439
0
            elf_sym_hashes (input_bfd)[e_indx]);
1440
1441
0
          while (hash->root.root.type == bfd_link_hash_indirect
1442
0
           || hash->root.root.type == bfd_link_hash_warning)
1443
0
      hash = ((struct elf_kvx_link_hash_entry *)
1444
0
        hash->root.root.u.i.link);
1445
1446
0
          if (hash->root.root.type == bfd_link_hash_defined
1447
0
        || hash->root.root.type == bfd_link_hash_defweak)
1448
0
      {
1449
0
        struct elf_kvx_link_hash_table *globals =
1450
0
          elf_kvx_hash_table (info);
1451
0
        sym_sec = hash->root.root.u.def.section;
1452
0
        sym_value = hash->root.root.u.def.value;
1453
        /* For a destination in a shared library,
1454
           use the PLT stub as target address to
1455
           decide whether a branch stub is
1456
           needed.  */
1457
0
        if (globals->root.splt != NULL && hash != NULL
1458
0
            && hash->root.plt.offset != (bfd_vma) - 1)
1459
0
          {
1460
0
            sym_sec = globals->root.splt;
1461
0
            sym_value = hash->root.plt.offset;
1462
0
            if (sym_sec->output_section != NULL)
1463
0
        destination = (sym_value
1464
0
                 + sym_sec->output_offset
1465
0
                 + sym_sec->output_section->vma);
1466
0
          }
1467
0
        else if (sym_sec->output_section != NULL)
1468
0
          destination = (sym_value + irela->r_addend
1469
0
             + sym_sec->output_offset
1470
0
             + sym_sec->output_section->vma);
1471
0
      }
1472
0
          else if (hash->root.root.type == bfd_link_hash_undefined
1473
0
             || (hash->root.root.type
1474
0
           == bfd_link_hash_undefweak))
1475
0
      {
1476
        /* For a shared library, use the PLT stub as
1477
           target address to decide whether a long
1478
           branch stub is needed.
1479
           For absolute code, they cannot be handled.  */
1480
0
        struct elf_kvx_link_hash_table *globals =
1481
0
          elf_kvx_hash_table (info);
1482
1483
0
        if (globals->root.splt != NULL && hash != NULL
1484
0
            && hash->root.plt.offset != (bfd_vma) - 1)
1485
0
          {
1486
0
            sym_sec = globals->root.splt;
1487
0
            sym_value = hash->root.plt.offset;
1488
0
            if (sym_sec->output_section != NULL)
1489
0
        destination = (sym_value
1490
0
                 + sym_sec->output_offset
1491
0
                 + sym_sec->output_section->vma);
1492
0
          }
1493
0
        else
1494
0
          continue;
1495
0
      }
1496
0
          else
1497
0
      {
1498
0
        bfd_set_error (bfd_error_bad_value);
1499
0
        goto error_ret_free_internal;
1500
0
      }
1501
0
          st_type = ELF_ST_TYPE (hash->root.type);
1502
0
          sym_name = hash->root.root.root.string;
1503
0
        }
1504
1505
      /* Determine what (if any) linker stub is needed.  */
1506
0
      stub_type = kvx_type_of_stub (section, irela, sym_sec,
1507
0
            st_type, destination);
1508
0
      if (stub_type == kvx_stub_none)
1509
0
        continue;
1510
1511
      /* Support for grouping stub sections.  */
1512
0
      id_sec = htab->stub_group[section->id].link_sec;
1513
1514
      /* Get the name of this stub.  */
1515
0
      stub_name = elf64_kvx_stub_name (id_sec, sym_sec, hash,
1516
0
              irela);
1517
0
      if (!stub_name)
1518
0
        goto error_ret_free_internal;
1519
1520
0
      stub_entry =
1521
0
        kvx_stub_hash_lookup (&htab->stub_hash_table,
1522
0
           stub_name, false, false);
1523
0
      if (stub_entry != NULL)
1524
0
        {
1525
          /* The proper stub has already been created.  */
1526
0
          free (stub_name);
1527
          /* Always update this stub's target since it may have
1528
       changed after layout.  */
1529
0
          stub_entry->target_value = sym_value + irela->r_addend;
1530
0
          continue;
1531
0
        }
1532
1533
0
      stub_entry = _bfd_kvx_add_stub_entry_in_group
1534
0
        (stub_name, section, htab);
1535
0
      if (stub_entry == NULL)
1536
0
        {
1537
0
          free (stub_name);
1538
0
          goto error_ret_free_internal;
1539
0
        }
1540
1541
0
      stub_entry->target_value = sym_value + irela->r_addend;
1542
0
      stub_entry->target_section = sym_sec;
1543
0
      stub_entry->stub_type = stub_type;
1544
0
      stub_entry->h = hash;
1545
0
      stub_entry->st_type = st_type;
1546
1547
0
      if (sym_name == NULL)
1548
0
        sym_name = "unnamed";
1549
0
      len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
1550
0
      stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
1551
0
      if (stub_entry->output_name == NULL)
1552
0
        {
1553
0
          free (stub_name);
1554
0
          goto error_ret_free_internal;
1555
0
        }
1556
1557
0
      snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
1558
0
          sym_name);
1559
1560
0
      stub_changed = true;
1561
0
    }
1562
1563
        /* We're done with the internal relocs, free them.  */
1564
0
        if (elf_section_data (section)->relocs == NULL)
1565
0
    free (internal_relocs);
1566
0
      }
1567
0
  }
1568
1569
0
      if (!stub_changed)
1570
0
  break;
1571
1572
0
      _bfd_kvx_resize_stubs (htab);
1573
1574
      /* Ask the linker to do its stuff.  */
1575
0
      (*htab->layout_sections_again) ();
1576
0
      stub_changed = false;
1577
0
    }
1578
1579
0
  return true;
1580
1581
0
error_ret_free_local:
1582
0
  return false;
1583
1584
0
}
1585
1586
/* Build all the stubs associated with the current output file.  The
1587
   stubs are kept in a hash table attached to the main linker hash
1588
   table.  We also set up the .plt entries for statically linked PIC
1589
   functions here.  This function is called via kvx_elf_finish in the
1590
   linker.  */
1591
1592
bool
1593
elf64_kvx_build_stubs (struct bfd_link_info *info)
1594
0
{
1595
0
  asection *stub_sec;
1596
0
  struct bfd_hash_table *table;
1597
0
  struct elf_kvx_link_hash_table *htab;
1598
1599
0
  htab = elf_kvx_hash_table (info);
1600
1601
0
  for (stub_sec = htab->stub_bfd->sections;
1602
0
       stub_sec != NULL; stub_sec = stub_sec->next)
1603
0
    {
1604
0
      bfd_size_type size;
1605
1606
      /* Ignore non-stub sections.  */
1607
0
      if (!strstr (stub_sec->name, STUB_SUFFIX))
1608
0
  continue;
1609
1610
      /* Allocate memory to hold the linker stubs.  */
1611
0
      size = stub_sec->size;
1612
0
      stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
1613
0
      if (stub_sec->contents == NULL && size != 0)
1614
0
  return false;
1615
0
      stub_sec->size = 0;
1616
0
    }
1617
1618
  /* Build the stubs as directed by the stub hash table.  */
1619
0
  table = &htab->stub_hash_table;
1620
0
  bfd_hash_traverse (table, kvx_build_one_stub, info);
1621
1622
0
  return true;
1623
0
}
1624
1625
static bfd_vma
1626
kvx_calculate_got_entry_vma (struct elf_link_hash_entry *h,
1627
         struct elf_kvx_link_hash_table
1628
         *globals, struct bfd_link_info *info,
1629
         bfd_vma value, bfd *output_bfd,
1630
         bool *unresolved_reloc_p)
1631
0
{
1632
0
  bfd_vma off = (bfd_vma) - 1;
1633
0
  asection *basegot = globals->root.sgot;
1634
0
  bool dyn = globals->root.dynamic_sections_created;
1635
1636
0
  if (h != NULL)
1637
0
    {
1638
0
      BFD_ASSERT (basegot != NULL);
1639
0
      off = h->got.offset;
1640
0
      BFD_ASSERT (off != (bfd_vma) - 1);
1641
0
      if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
1642
0
    || (bfd_link_pic (info)
1643
0
        && SYMBOL_REFERENCES_LOCAL (info, h))
1644
0
    || (ELF_ST_VISIBILITY (h->other)
1645
0
        && h->root.type == bfd_link_hash_undefweak))
1646
0
  {
1647
    /* This is actually a static link, or it is a -Bsymbolic link
1648
       and the symbol is defined locally.  We must initialize this
1649
       entry in the global offset table.  Since the offset must
1650
       always be a multiple of 8 (4 in the case of ILP32), we use
1651
       the least significant bit to record whether we have
1652
       initialized it already.
1653
       When doing a dynamic link, we create a .rel(a).got relocation
1654
       entry to initialize the value.  This is done in the
1655
       finish_dynamic_symbol routine.  */
1656
0
    if ((off & 1) != 0)
1657
0
      off &= ~1;
1658
0
    else
1659
0
      {
1660
0
        bfd_put_64 (output_bfd, value, basegot->contents + off);
1661
0
        h->got.offset |= 1;
1662
0
      }
1663
0
  }
1664
0
      else
1665
0
  *unresolved_reloc_p = false;
1666
0
    }
1667
1668
0
  return off;
1669
0
}
1670
1671
static unsigned int
1672
kvx_reloc_got_type (bfd_reloc_code_real_type r_type)
1673
0
{
1674
0
  switch (r_type)
1675
0
    {
1676
      /* Extracted with:
1677
   awk 'match ($0, /HOWTO.*R_(KVX.*_GOT(OFF)?(64)?_.*),/,ary) \
1678
   {print "case BFD_RELOC_" ary[1] ":";}' elfxx-kvxc.def  */
1679
0
    case BFD_RELOC_KVX_S37_GOTOFF_LO10:
1680
0
    case BFD_RELOC_KVX_S37_GOTOFF_UP27:
1681
1682
0
    case BFD_RELOC_KVX_S37_GOT_LO10:
1683
0
    case BFD_RELOC_KVX_S37_GOT_UP27:
1684
1685
0
    case BFD_RELOC_KVX_S43_GOTOFF_LO10:
1686
0
    case BFD_RELOC_KVX_S43_GOTOFF_UP27:
1687
0
    case BFD_RELOC_KVX_S43_GOTOFF_EX6:
1688
1689
0
    case BFD_RELOC_KVX_S43_GOT_LO10:
1690
0
    case BFD_RELOC_KVX_S43_GOT_UP27:
1691
0
    case BFD_RELOC_KVX_S43_GOT_EX6:
1692
0
      return GOT_NORMAL;
1693
1694
0
    case BFD_RELOC_KVX_S37_TLS_GD_LO10:
1695
0
    case BFD_RELOC_KVX_S37_TLS_GD_UP27:
1696
0
    case BFD_RELOC_KVX_S43_TLS_GD_LO10:
1697
0
    case BFD_RELOC_KVX_S43_TLS_GD_UP27:
1698
0
    case BFD_RELOC_KVX_S43_TLS_GD_EX6:
1699
0
      return GOT_TLS_GD;
1700
1701
0
    case BFD_RELOC_KVX_S37_TLS_LD_LO10:
1702
0
    case BFD_RELOC_KVX_S37_TLS_LD_UP27:
1703
0
    case BFD_RELOC_KVX_S43_TLS_LD_LO10:
1704
0
    case BFD_RELOC_KVX_S43_TLS_LD_UP27:
1705
0
    case BFD_RELOC_KVX_S43_TLS_LD_EX6:
1706
0
      return GOT_TLS_LD;
1707
1708
0
    case BFD_RELOC_KVX_S37_TLS_IE_LO10:
1709
0
    case BFD_RELOC_KVX_S37_TLS_IE_UP27:
1710
0
    case BFD_RELOC_KVX_S43_TLS_IE_LO10:
1711
0
    case BFD_RELOC_KVX_S43_TLS_IE_UP27:
1712
0
    case BFD_RELOC_KVX_S43_TLS_IE_EX6:
1713
0
      return GOT_TLS_IE;
1714
1715
0
    default:
1716
0
      break;
1717
0
    }
1718
0
  return GOT_UNKNOWN;
1719
0
}
1720
1721
static bool
1722
kvx_can_relax_tls (bfd *input_bfd ATTRIBUTE_UNUSED,
1723
           struct bfd_link_info *info ATTRIBUTE_UNUSED,
1724
           bfd_reloc_code_real_type r_type ATTRIBUTE_UNUSED,
1725
           struct elf_link_hash_entry *h ATTRIBUTE_UNUSED,
1726
           unsigned long r_symndx ATTRIBUTE_UNUSED)
1727
0
{
1728
0
  if (! IS_KVX_TLS_RELAX_RELOC (r_type))
1729
0
    return false;
1730
1731
  /* Relaxing hook. Disabled on KVX. */
1732
  /* See elfnn-aarch64.c */
1733
0
  return true;
1734
0
}
1735
1736
/* Given the relocation code R_TYPE, return the relaxed bfd reloc
1737
   enumerator.  */
1738
1739
static bfd_reloc_code_real_type
1740
kvx_tls_transition (bfd *input_bfd,
1741
      struct bfd_link_info *info,
1742
      unsigned int r_type,
1743
      struct elf_link_hash_entry *h,
1744
      unsigned long r_symndx)
1745
0
{
1746
0
  bfd_reloc_code_real_type bfd_r_type
1747
0
    = elf64_kvx_bfd_reloc_from_type (input_bfd, r_type);
1748
1749
0
  if (! kvx_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
1750
0
    return bfd_r_type;
1751
1752
0
  return bfd_r_type;
1753
0
}
1754
1755
/* Return the base VMA address which should be subtracted from real addresses
1756
   when resolving R_KVX_*_TLS_GD_* and R_KVX_*_TLS_LD_* relocation.  */
1757
1758
static bfd_vma
1759
dtpoff_base (struct bfd_link_info *info)
1760
0
{
1761
  /* If tls_sec is NULL, we should have signalled an error already.  */
1762
0
  BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
1763
0
  return elf_hash_table (info)->tls_sec->vma;
1764
0
}
1765
1766
/* Return the base VMA address which should be subtracted from real addresses
1767
   when resolving R_KVX_*_TLS_IE_* and R_KVX_*_TLS_LE_* relocations.  */
1768
1769
static bfd_vma
1770
tpoff_base (struct bfd_link_info *info)
1771
0
{
1772
0
  struct elf_link_hash_table *htab = elf_hash_table (info);
1773
1774
  /* If tls_sec is NULL, we should have signalled an error already.  */
1775
0
  BFD_ASSERT (htab->tls_sec != NULL);
1776
1777
0
  bfd_vma base = align_power ((bfd_vma) 0,
1778
0
            htab->tls_sec->alignment_power);
1779
0
  return htab->tls_sec->vma - base;
1780
0
}
1781
1782
static bfd_vma *
1783
symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
1784
           unsigned long r_symndx)
1785
0
{
1786
  /* Calculate the address of the GOT entry for symbol
1787
     referred to in h.  */
1788
0
  if (h != NULL)
1789
0
    return &h->got.offset;
1790
0
  else
1791
0
    {
1792
      /* local symbol */
1793
0
      struct elf_kvx_local_symbol *l;
1794
1795
0
      l = elf_kvx_locals (input_bfd);
1796
0
      return &l[r_symndx].got_offset;
1797
0
    }
1798
0
}
1799
1800
static void
1801
symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
1802
      unsigned long r_symndx)
1803
0
{
1804
0
  bfd_vma *p;
1805
0
  p = symbol_got_offset_ref (input_bfd, h, r_symndx);
1806
0
  *p |= 1;
1807
0
}
1808
1809
static int
1810
symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
1811
        unsigned long r_symndx)
1812
0
{
1813
0
  bfd_vma value;
1814
0
  value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
1815
0
  return value & 1;
1816
0
}
1817
1818
static bfd_vma
1819
symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
1820
       unsigned long r_symndx)
1821
0
{
1822
0
  bfd_vma value;
1823
0
  value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
1824
0
  value &= ~1;
1825
0
  return value;
1826
0
}
1827
1828
/* N_ONES produces N one bits, without overflowing machine arithmetic.  */
1829
0
#define N_ONES(n) (((((bfd_vma) 1 << ((n) -1)) - 1) << 1) | 1)
1830
1831
/* This is a copy/paste + modification from
1832
   reloc.c:_bfd_relocate_contents. Relocations are applied to 32bits
1833
   words, so all overflow checks will overflow for values above
1834
   32bits.  */
1835
static bfd_reloc_status_type
1836
check_signed_overflow (enum complain_overflow complain_on_overflow,
1837
           bfd_reloc_code_real_type bfd_r_type, bfd *input_bfd,
1838
           bfd_vma relocation)
1839
0
{
1840
0
  bfd_reloc_status_type flag = bfd_reloc_ok;
1841
0
  bfd_vma addrmask, fieldmask, signmask, ss;
1842
0
  bfd_vma a, b, sum;
1843
0
  bfd_vma x = 0;
1844
1845
  /* These usually come from howto struct. As we don't check for
1846
     values fitting in bitfields or in subpart of words, we set all
1847
     these to values to check as if the field is starting from first
1848
     bit.  */
1849
0
  unsigned int rightshift = 0;
1850
0
  unsigned int bitpos = 0;
1851
0
  unsigned int bitsize = 0;
1852
0
  bfd_vma src_mask = -1;
1853
1854
  /* Only regular symbol relocations are checked here. Others
1855
     relocations (GOT, TLS) could be checked if the need is
1856
     confirmed. At the moment, we keep previous behavior
1857
     (ie. unchecked) for those. */
1858
0
  switch (bfd_r_type)
1859
0
    {
1860
0
    case BFD_RELOC_KVX_S37_LO10:
1861
0
    case BFD_RELOC_KVX_S37_UP27:
1862
0
      bitsize = 37;
1863
0
      break;
1864
1865
0
    case BFD_RELOC_KVX_S32_LO5:
1866
0
    case BFD_RELOC_KVX_S32_UP27:
1867
0
      bitsize = 32;
1868
0
      break;
1869
1870
0
    case BFD_RELOC_KVX_S43_LO10:
1871
0
    case BFD_RELOC_KVX_S43_UP27:
1872
0
    case BFD_RELOC_KVX_S43_EX6:
1873
0
      bitsize = 43;
1874
0
      break;
1875
1876
0
    case BFD_RELOC_KVX_S64_LO10:
1877
0
    case BFD_RELOC_KVX_S64_UP27:
1878
0
    case BFD_RELOC_KVX_S64_EX27:
1879
0
      bitsize = 64;
1880
0
      break;
1881
1882
0
    default:
1883
0
      return bfd_reloc_ok;
1884
0
    }
1885
1886
  /* direct copy/paste from reloc.c below */
1887
1888
  /* Get the values to be added together.  For signed and unsigned
1889
     relocations, we assume that all values should be truncated to
1890
     the size of an address.  For bitfields, all the bits matter.
1891
     See also bfd_check_overflow.  */
1892
0
  fieldmask = N_ONES (bitsize);
1893
0
  signmask = ~fieldmask;
1894
0
  addrmask = (N_ONES (bfd_arch_bits_per_address (input_bfd))
1895
0
        | (fieldmask << rightshift));
1896
0
  a = (relocation & addrmask) >> rightshift;
1897
0
  b = (x & src_mask & addrmask) >> bitpos;
1898
0
  addrmask >>= rightshift;
1899
1900
0
  switch (complain_on_overflow)
1901
0
    {
1902
0
    case complain_overflow_signed:
1903
      /* If any sign bits are set, all sign bits must be set.
1904
   That is, A must be a valid negative address after
1905
   shifting.  */
1906
0
      signmask = ~(fieldmask >> 1);
1907
      /* Fall thru */
1908
1909
0
    case complain_overflow_bitfield:
1910
      /* Much like the signed check, but for a field one bit
1911
   wider.  We allow a bitfield to represent numbers in the
1912
   range -2**n to 2**n-1, where n is the number of bits in the
1913
   field.  Note that when bfd_vma is 32 bits, a 32-bit reloc
1914
   can't overflow, which is exactly what we want.  */
1915
0
      ss = a & signmask;
1916
0
      if (ss != 0 && ss != (addrmask & signmask))
1917
0
  flag = bfd_reloc_overflow;
1918
1919
      /* We only need this next bit of code if the sign bit of B
1920
   is below the sign bit of A.  This would only happen if
1921
   SRC_MASK had fewer bits than BITSIZE.  Note that if
1922
   SRC_MASK has more bits than BITSIZE, we can get into
1923
   trouble; we would need to verify that B is in range, as
1924
   we do for A above.  */
1925
0
      ss = ((~src_mask) >> 1) & src_mask;
1926
0
      ss >>= bitpos;
1927
1928
      /* Set all the bits above the sign bit.  */
1929
0
      b = (b ^ ss) - ss;
1930
1931
      /* Now we can do the addition.  */
1932
0
      sum = a + b;
1933
1934
      /* See if the result has the correct sign.  Bits above the
1935
   sign bit are junk now; ignore them.  If the sum is
1936
   positive, make sure we did not have all negative inputs;
1937
   if the sum is negative, make sure we did not have all
1938
   positive inputs.  The test below looks only at the sign
1939
   bits, and it really just
1940
   SIGN (A) == SIGN (B) && SIGN (A) != SIGN (SUM)
1941
1942
   We mask with addrmask here to explicitly allow an address
1943
   wrap-around.  The Linux kernel relies on it, and it is
1944
   the only way to write assembler code which can run when
1945
   loaded at a location 0x80000000 away from the location at
1946
   which it is linked.  */
1947
0
      if (((~(a ^ b)) & (a ^ sum)) & signmask & addrmask)
1948
0
  flag = bfd_reloc_overflow;
1949
0
      break;
1950
1951
0
    case complain_overflow_unsigned:
1952
      /* Checking for an unsigned overflow is relatively easy:
1953
   trim the addresses and add, and trim the result as well.
1954
   Overflow is normally indicated when the result does not
1955
   fit in the field.  However, we also need to consider the
1956
   case when, e.g., fieldmask is 0x7fffffff or smaller, an
1957
   input is 0x80000000, and bfd_vma is only 32 bits; then we
1958
   will get sum == 0, but there is an overflow, since the
1959
   inputs did not fit in the field.  Instead of doing a
1960
   separate test, we can check for this by or-ing in the
1961
   operands when testing for the sum overflowing its final
1962
   field.  */
1963
0
      sum = (a + b) & addrmask;
1964
0
      if ((a | b | sum) & signmask)
1965
0
  flag = bfd_reloc_overflow;
1966
0
      break;
1967
1968
0
    default:
1969
0
      abort ();
1970
0
    }
1971
0
  return flag;
1972
0
}
1973
1974
/* Perform a relocation as part of a final link.  */
1975
static bfd_reloc_status_type
1976
elf64_kvx_final_link_relocate (reloc_howto_type *howto,
1977
             bfd *input_bfd,
1978
             bfd *output_bfd,
1979
             asection *input_section,
1980
             bfd_byte *contents,
1981
             Elf_Internal_Rela *rel,
1982
             bfd_vma value,
1983
             struct bfd_link_info *info,
1984
             asection *sym_sec,
1985
             struct elf_link_hash_entry *h,
1986
             bool *unresolved_reloc_p,
1987
             bool save_addend,
1988
             bfd_vma *saved_addend,
1989
             Elf_Internal_Sym *sym)
1990
0
{
1991
0
  Elf_Internal_Shdr *symtab_hdr;
1992
0
  unsigned int r_type = howto->type;
1993
0
  bfd_reloc_code_real_type bfd_r_type
1994
0
    = elf64_kvx_bfd_reloc_from_howto (howto);
1995
0
  bfd_reloc_code_real_type new_bfd_r_type;
1996
0
  unsigned long r_symndx;
1997
0
  bfd_byte *hit_data = contents + rel->r_offset;
1998
0
  bfd_vma place, off;
1999
0
  bfd_vma addend;
2000
0
  struct elf_kvx_link_hash_table *globals;
2001
0
  bool weak_undef_p;
2002
0
  asection *base_got;
2003
0
  bfd_reloc_status_type rret = bfd_reloc_ok;
2004
0
  bool resolved_to_zero;
2005
0
  globals = elf_kvx_hash_table (info);
2006
2007
0
  symtab_hdr = &elf_symtab_hdr (input_bfd);
2008
2009
0
  BFD_ASSERT (is_kvx_elf (input_bfd));
2010
2011
0
  r_symndx = ELF64_R_SYM (rel->r_info);
2012
2013
  /* It is possible to have linker relaxations on some TLS access
2014
     models.  Update our information here.  */
2015
0
  new_bfd_r_type = kvx_tls_transition (input_bfd, info, r_type, h, r_symndx);
2016
0
  if (new_bfd_r_type != bfd_r_type)
2017
0
    {
2018
0
      bfd_r_type = new_bfd_r_type;
2019
0
      howto = elf64_kvx_howto_from_bfd_reloc (bfd_r_type);
2020
0
      BFD_ASSERT (howto != NULL);
2021
0
      r_type = howto->type;
2022
0
    }
2023
2024
0
  place = input_section->output_section->vma
2025
0
    + input_section->output_offset + rel->r_offset;
2026
2027
  /* Get addend, accumulating the addend for consecutive relocs
2028
     which refer to the same offset.  */
2029
0
  addend = saved_addend ? *saved_addend : 0;
2030
0
  addend += rel->r_addend;
2031
2032
0
  weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
2033
0
      : bfd_is_und_section (sym_sec));
2034
0
  resolved_to_zero = (h != NULL
2035
0
          && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
2036
2037
0
  switch (bfd_r_type)
2038
0
    {
2039
0
    case BFD_RELOC_KVX_64:
2040
0
#if ARCH_SIZE == 64
2041
0
    case BFD_RELOC_KVX_32:
2042
0
#endif
2043
0
    case BFD_RELOC_KVX_S37_LO10:
2044
0
    case BFD_RELOC_KVX_S37_UP27:
2045
2046
0
    case BFD_RELOC_KVX_S32_LO5:
2047
0
    case BFD_RELOC_KVX_S32_UP27:
2048
2049
0
    case BFD_RELOC_KVX_S43_LO10:
2050
0
    case BFD_RELOC_KVX_S43_UP27:
2051
0
    case BFD_RELOC_KVX_S43_EX6:
2052
2053
0
    case BFD_RELOC_KVX_S64_LO10:
2054
0
    case BFD_RELOC_KVX_S64_UP27:
2055
0
    case BFD_RELOC_KVX_S64_EX27:
2056
      /* When generating a shared library or PIE, these relocations
2057
   are copied into the output file to be resolved at run time.  */
2058
0
      if (bfd_link_pic (info)
2059
0
    && (input_section->flags & SEC_ALLOC)
2060
0
    && (h == NULL
2061
0
        || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2062
0
      && !resolved_to_zero)
2063
0
        || h->root.type != bfd_link_hash_undefweak))
2064
0
  {
2065
0
    Elf_Internal_Rela outrel;
2066
0
    bfd_byte *loc;
2067
0
    bool skip, relocate;
2068
0
    asection *sreloc;
2069
2070
0
    *unresolved_reloc_p = false;
2071
2072
0
    skip = false;
2073
0
    relocate = false;
2074
2075
0
    outrel.r_addend = addend;
2076
0
    outrel.r_offset =
2077
0
      _bfd_elf_section_offset (output_bfd, info, input_section,
2078
0
             rel->r_offset);
2079
0
    if (outrel.r_offset == (bfd_vma) - 1)
2080
0
      skip = true;
2081
0
    else if (outrel.r_offset == (bfd_vma) - 2)
2082
0
      {
2083
0
        skip = true;
2084
0
        relocate = true;
2085
0
      }
2086
2087
0
    outrel.r_offset += (input_section->output_section->vma
2088
0
            + input_section->output_offset);
2089
2090
0
    if (skip)
2091
0
      memset (&outrel, 0, sizeof outrel);
2092
0
    else if (h != NULL
2093
0
       && h->dynindx != -1
2094
0
       && (!bfd_link_pic (info) || !info->symbolic
2095
0
           || !h->def_regular))
2096
0
      outrel.r_info = ELF64_R_INFO (h->dynindx, r_type);
2097
0
    else if (bfd_r_type == BFD_RELOC_KVX_32
2098
0
       || bfd_r_type == BFD_RELOC_KVX_64)
2099
0
      {
2100
0
        int symbol;
2101
2102
        /* On SVR4-ish systems, the dynamic loader cannot
2103
     relocate the text and data segments independently,
2104
     so the symbol does not matter.  */
2105
0
        symbol = 0;
2106
0
        outrel.r_info = ELF64_R_INFO (symbol, R_KVX_RELATIVE);
2107
0
        outrel.r_addend += value;
2108
0
      }
2109
0
    else if (bfd_link_pic (info) && info->symbolic)
2110
0
      {
2111
0
        goto skip_because_pic;
2112
0
      }
2113
0
    else
2114
0
      {
2115
        /* We may endup here from bad input code trying to
2116
     insert relocation on symbols within code.  We do not
2117
     want that currently, and such code should use GOT +
2118
     KVX_32/64 reloc that translate in KVX_RELATIVE.  */
2119
0
        const char *name;
2120
0
        if (h && h->root.root.string)
2121
0
    name = h->root.root.string;
2122
0
        else
2123
0
    name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2124
0
           NULL);
2125
2126
0
        (*_bfd_error_handler)
2127
    /* xgettext:c-format */
2128
0
    (_("%pB(%pA+%#" PRIx64 "): "
2129
0
       "unresolvable %s relocation in section `%s'"),
2130
0
     input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
2131
0
     name);
2132
0
        return bfd_reloc_notsupported;
2133
0
      }
2134
2135
0
    sreloc = elf_section_data (input_section)->sreloc;
2136
0
    if (sreloc == NULL || sreloc->contents == NULL)
2137
0
      return bfd_reloc_notsupported;
2138
2139
0
    loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
2140
0
    bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
2141
2142
0
    if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
2143
0
      {
2144
        /* Sanity to check that we have previously allocated
2145
     sufficient space in the relocation section for the
2146
     number of relocations we actually want to emit.  */
2147
0
        abort ();
2148
0
      }
2149
2150
    /* If this reloc is against an external symbol, we do not want to
2151
       fiddle with the addend.  Otherwise, we need to include the symbol
2152
       value so that it becomes an addend for the dynamic reloc.  */
2153
0
    if (!relocate)
2154
0
      return bfd_reloc_ok;
2155
2156
0
    rret = check_signed_overflow (complain_overflow_signed, bfd_r_type,
2157
0
          input_bfd, value + addend);
2158
0
    if (rret != bfd_reloc_ok)
2159
0
      return rret;
2160
2161
0
    return _bfd_final_link_relocate (howto, input_bfd, input_section,
2162
0
             contents, rel->r_offset, value,
2163
0
             addend);
2164
0
  }
2165
2166
0
    skip_because_pic:
2167
0
      rret = check_signed_overflow (complain_overflow_signed, bfd_r_type,
2168
0
            input_bfd, value + addend);
2169
0
      if (rret != bfd_reloc_ok)
2170
0
  return rret;
2171
2172
0
      return _bfd_final_link_relocate (howto, input_bfd, input_section,
2173
0
               contents, rel->r_offset, value,
2174
0
               addend);
2175
0
      break;
2176
2177
0
    case BFD_RELOC_KVX_PCREL17:
2178
0
    case BFD_RELOC_KVX_PCREL27:
2179
0
      {
2180
  /* BCU insn are always first in a bundle, so there is no need
2181
     to correct the address using offset within bundle.  */
2182
2183
0
  asection *splt = globals->root.splt;
2184
0
  bool via_plt_p =
2185
0
    splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
2186
2187
  /* A call to an undefined weak symbol is converted to a jump to
2188
     the next instruction unless a PLT entry will be created.
2189
     The jump to the next instruction is optimized as a NOP.
2190
     Do the same for local undefined symbols.  */
2191
0
  if (weak_undef_p && ! via_plt_p)
2192
0
    {
2193
0
      bfd_putl32 (INSN_NOP, hit_data);
2194
0
      return bfd_reloc_ok;
2195
0
    }
2196
2197
  /* If the call goes through a PLT entry, make sure to
2198
     check distance to the right destination address.  */
2199
0
  if (via_plt_p)
2200
0
    value = (splt->output_section->vma
2201
0
       + splt->output_offset + h->plt.offset);
2202
2203
  /* Check if a stub has to be inserted because the destination
2204
     is too far away.  */
2205
0
  struct elf_kvx_stub_hash_entry *stub_entry = NULL;
2206
2207
  /* If the target symbol is global and marked as a function the
2208
     relocation applies a function call or a tail call.  In this
2209
     situation we can veneer out of range branches.  The veneers
2210
     use R16 and R17 hence cannot be used arbitrary out of range
2211
     branches that occur within the body of a function.  */
2212
2213
  /* Check if a stub has to be inserted because the destination
2214
     is too far away.  */
2215
0
  if (! kvx_valid_call_p (value, place))
2216
0
    {
2217
      /* The target is out of reach, so redirect the branch to
2218
         the local stub for this function.  */
2219
0
      stub_entry = elf64_kvx_get_stub_entry (input_section,
2220
0
               sym_sec, h,
2221
0
               rel, globals);
2222
0
      if (stub_entry != NULL)
2223
0
        value = (stub_entry->stub_offset
2224
0
           + stub_entry->stub_sec->output_offset
2225
0
           + stub_entry->stub_sec->output_section->vma);
2226
      /* We have redirected the destination to stub entry address,
2227
         so ignore any addend record in the original rela entry.  */
2228
0
      addend = 0;
2229
0
    }
2230
0
      }
2231
0
      *unresolved_reloc_p = false;
2232
2233
      /* FALLTHROUGH */
2234
2235
      /* PCREL 32 are used in dwarf2 table for exception handling */
2236
0
    case BFD_RELOC_KVX_32_PCREL:
2237
0
    case BFD_RELOC_KVX_S64_PCREL_LO10:
2238
0
    case BFD_RELOC_KVX_S64_PCREL_UP27:
2239
0
    case BFD_RELOC_KVX_S64_PCREL_EX27:
2240
0
    case BFD_RELOC_KVX_S37_PCREL_LO10:
2241
0
    case BFD_RELOC_KVX_S37_PCREL_UP27:
2242
0
    case BFD_RELOC_KVX_S43_PCREL_LO10:
2243
0
    case BFD_RELOC_KVX_S43_PCREL_UP27:
2244
0
    case BFD_RELOC_KVX_S43_PCREL_EX6:
2245
0
      return _bfd_final_link_relocate (howto, input_bfd, input_section,
2246
0
               contents, rel->r_offset, value,
2247
0
               addend);
2248
0
      break;
2249
2250
0
    case BFD_RELOC_KVX_S37_TLS_LE_LO10:
2251
0
    case BFD_RELOC_KVX_S37_TLS_LE_UP27:
2252
2253
0
    case BFD_RELOC_KVX_S43_TLS_LE_LO10:
2254
0
    case BFD_RELOC_KVX_S43_TLS_LE_UP27:
2255
0
    case BFD_RELOC_KVX_S43_TLS_LE_EX6:
2256
0
      return _bfd_final_link_relocate (howto, input_bfd, input_section,
2257
0
               contents, rel->r_offset,
2258
0
               value - tpoff_base (info), addend);
2259
0
      break;
2260
2261
0
    case BFD_RELOC_KVX_S37_TLS_DTPOFF_LO10:
2262
0
    case BFD_RELOC_KVX_S37_TLS_DTPOFF_UP27:
2263
2264
0
    case BFD_RELOC_KVX_S43_TLS_DTPOFF_LO10:
2265
0
    case BFD_RELOC_KVX_S43_TLS_DTPOFF_UP27:
2266
0
    case BFD_RELOC_KVX_S43_TLS_DTPOFF_EX6:
2267
0
      return _bfd_final_link_relocate (howto, input_bfd, input_section,
2268
0
               contents, rel->r_offset,
2269
0
               value - dtpoff_base (info), addend);
2270
2271
0
    case BFD_RELOC_KVX_S37_TLS_GD_UP27:
2272
0
    case BFD_RELOC_KVX_S37_TLS_GD_LO10:
2273
2274
0
    case BFD_RELOC_KVX_S43_TLS_GD_UP27:
2275
0
    case BFD_RELOC_KVX_S43_TLS_GD_EX6:
2276
0
    case BFD_RELOC_KVX_S43_TLS_GD_LO10:
2277
2278
0
    case BFD_RELOC_KVX_S37_TLS_IE_UP27:
2279
0
    case BFD_RELOC_KVX_S37_TLS_IE_LO10:
2280
2281
0
    case BFD_RELOC_KVX_S43_TLS_IE_UP27:
2282
0
    case BFD_RELOC_KVX_S43_TLS_IE_EX6:
2283
0
    case BFD_RELOC_KVX_S43_TLS_IE_LO10:
2284
2285
0
    case BFD_RELOC_KVX_S37_TLS_LD_UP27:
2286
0
    case BFD_RELOC_KVX_S37_TLS_LD_LO10:
2287
2288
0
    case BFD_RELOC_KVX_S43_TLS_LD_UP27:
2289
0
    case BFD_RELOC_KVX_S43_TLS_LD_EX6:
2290
0
    case BFD_RELOC_KVX_S43_TLS_LD_LO10:
2291
2292
0
      if (globals->root.sgot == NULL)
2293
0
  return bfd_reloc_notsupported;
2294
0
      value = symbol_got_offset (input_bfd, h, r_symndx);
2295
2296
0
      _bfd_final_link_relocate (howto, input_bfd, input_section,
2297
0
        contents, rel->r_offset, value, addend);
2298
0
      *unresolved_reloc_p = false;
2299
0
      break;
2300
2301
0
    case BFD_RELOC_KVX_S37_GOTADDR_UP27:
2302
0
    case BFD_RELOC_KVX_S37_GOTADDR_LO10:
2303
2304
0
    case BFD_RELOC_KVX_S43_GOTADDR_UP27:
2305
0
    case BFD_RELOC_KVX_S43_GOTADDR_EX6:
2306
0
    case BFD_RELOC_KVX_S43_GOTADDR_LO10:
2307
2308
0
    case BFD_RELOC_KVX_S64_GOTADDR_UP27:
2309
0
    case BFD_RELOC_KVX_S64_GOTADDR_EX27:
2310
0
    case BFD_RELOC_KVX_S64_GOTADDR_LO10:
2311
0
      {
2312
0
  if (globals->root.sgot == NULL)
2313
0
    BFD_ASSERT (h != NULL);
2314
2315
0
  value = globals->root.sgot->output_section->vma
2316
0
    + globals->root.sgot->output_offset;
2317
2318
0
  return _bfd_final_link_relocate (howto, input_bfd, input_section,
2319
0
           contents, rel->r_offset, value,
2320
0
           addend);
2321
0
      }
2322
0
      break;
2323
2324
0
    case BFD_RELOC_KVX_S37_GOTOFF_LO10:
2325
0
    case BFD_RELOC_KVX_S37_GOTOFF_UP27:
2326
2327
0
    case BFD_RELOC_KVX_32_GOTOFF:
2328
0
    case BFD_RELOC_KVX_64_GOTOFF:
2329
2330
0
    case BFD_RELOC_KVX_S43_GOTOFF_LO10:
2331
0
    case BFD_RELOC_KVX_S43_GOTOFF_UP27:
2332
0
    case BFD_RELOC_KVX_S43_GOTOFF_EX6:
2333
2334
0
      {
2335
0
  asection *basegot = globals->root.sgot;
2336
  /* BFD_ASSERT(h == NULL); */
2337
0
  BFD_ASSERT(globals->root.sgot != NULL);
2338
0
  value -= basegot->output_section->vma + basegot->output_offset;
2339
0
  return _bfd_final_link_relocate (howto, input_bfd, input_section,
2340
0
           contents, rel->r_offset, value,
2341
0
           addend);
2342
0
      }
2343
0
      break;
2344
2345
0
    case BFD_RELOC_KVX_S37_GOT_LO10:
2346
0
    case BFD_RELOC_KVX_S37_GOT_UP27:
2347
2348
0
    case BFD_RELOC_KVX_32_GOT:
2349
0
    case BFD_RELOC_KVX_64_GOT:
2350
2351
0
    case BFD_RELOC_KVX_S43_GOT_LO10:
2352
0
    case BFD_RELOC_KVX_S43_GOT_UP27:
2353
0
    case BFD_RELOC_KVX_S43_GOT_EX6:
2354
2355
0
      if (globals->root.sgot == NULL)
2356
0
  BFD_ASSERT (h != NULL);
2357
2358
0
      if (h != NULL)
2359
0
  {
2360
0
    value = kvx_calculate_got_entry_vma (h, globals, info, value,
2361
0
                 output_bfd,
2362
0
                 unresolved_reloc_p);
2363
#ifdef UGLY_DEBUG
2364
    printf("GOT_LO/HI for %s, value %x\n", h->root.root.string, value);
2365
#endif
2366
2367
0
    return _bfd_final_link_relocate (howto, input_bfd, input_section,
2368
0
             contents, rel->r_offset, value,
2369
0
             addend);
2370
0
  }
2371
0
      else
2372
0
  {
2373
#ifdef UGLY_DEBUG
2374
    printf("GOT_LO/HI with h NULL, initial value %x\n", value);
2375
#endif
2376
0
    struct elf_kvx_local_symbol *locals = elf_kvx_locals (input_bfd);
2377
2378
0
    if (locals == NULL)
2379
0
      {
2380
0
        int howto_index = bfd_r_type - BFD_RELOC_KVX_RELOC_START;
2381
0
        _bfd_error_handler
2382
    /* xgettext:c-format */
2383
0
    (_("%pB: local symbol descriptor table be NULL when applying "
2384
0
       "relocation %s against local symbol"),
2385
0
     input_bfd, elf_kvx_howto_table[howto_index].name);
2386
0
        abort ();
2387
0
      }
2388
2389
0
    off = symbol_got_offset (input_bfd, h, r_symndx);
2390
0
    base_got = globals->root.sgot;
2391
0
    bfd_vma got_entry_addr = (base_got->output_section->vma
2392
0
            + base_got->output_offset + off);
2393
2394
0
    if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2395
0
      {
2396
0
        bfd_put_64 (output_bfd, value, base_got->contents + off);
2397
2398
0
        if (bfd_link_pic (info))
2399
0
    {
2400
0
      asection *s;
2401
0
      Elf_Internal_Rela outrel;
2402
2403
      /* For PIC executables and shared libraries we need
2404
         to relocate the GOT entry at run time.  */
2405
0
      s = globals->root.srelgot;
2406
0
      if (s == NULL)
2407
0
        abort ();
2408
2409
0
      outrel.r_offset = got_entry_addr;
2410
0
      outrel.r_info = ELF64_R_INFO (0, R_KVX_RELATIVE);
2411
0
      outrel.r_addend = value;
2412
0
      elf_append_rela (output_bfd, s, &outrel);
2413
0
    }
2414
2415
0
        symbol_got_offset_mark (input_bfd, h, r_symndx);
2416
0
      }
2417
2418
    /* Update the relocation value to GOT entry addr as we have
2419
       transformed the direct data access into an indirect data
2420
       access through GOT.  */
2421
0
    value = got_entry_addr;
2422
2423
0
    return _bfd_final_link_relocate (howto, input_bfd, input_section,
2424
0
             contents, rel->r_offset, off, 0);
2425
0
  }
2426
0
      break;
2427
2428
0
    default:
2429
0
      return bfd_reloc_notsupported;
2430
0
    }
2431
2432
0
  if (saved_addend)
2433
0
    *saved_addend = value;
2434
2435
  /* Only apply the final relocation in a sequence.  */
2436
0
  if (save_addend)
2437
0
    return bfd_reloc_continue;
2438
2439
0
  return _bfd_kvx_elf_put_addend (input_bfd, hit_data, bfd_r_type,
2440
0
          howto, value);
2441
0
}
2442
2443
2444
2445
/* Relocate a KVX ELF section.  */
2446
2447
static int
2448
elf64_kvx_relocate_section (bfd *output_bfd,
2449
          struct bfd_link_info *info,
2450
          bfd *input_bfd,
2451
          asection *input_section,
2452
          bfd_byte *contents,
2453
          Elf_Internal_Rela *relocs,
2454
          Elf_Internal_Sym *local_syms,
2455
          asection **local_sections)
2456
0
{
2457
0
  Elf_Internal_Shdr *symtab_hdr;
2458
0
  struct elf_link_hash_entry **sym_hashes;
2459
0
  Elf_Internal_Rela *rel;
2460
0
  Elf_Internal_Rela *relend;
2461
0
  const char *name;
2462
0
  struct elf_kvx_link_hash_table *globals;
2463
0
  bool save_addend = false;
2464
0
  bfd_vma addend = 0;
2465
2466
0
  globals = elf_kvx_hash_table (info);
2467
2468
0
  symtab_hdr = &elf_symtab_hdr (input_bfd);
2469
0
  sym_hashes = elf_sym_hashes (input_bfd);
2470
2471
0
  rel = relocs;
2472
0
  relend = relocs + input_section->reloc_count;
2473
0
  for (; rel < relend; rel++)
2474
0
    {
2475
0
      unsigned int r_type;
2476
0
      bfd_reloc_code_real_type bfd_r_type;
2477
0
      reloc_howto_type *howto;
2478
0
      unsigned long r_symndx;
2479
0
      Elf_Internal_Sym *sym;
2480
0
      asection *sec;
2481
0
      struct elf_link_hash_entry *h;
2482
0
      bfd_vma relocation;
2483
0
      bfd_reloc_status_type r;
2484
0
      arelent bfd_reloc;
2485
0
      char sym_type;
2486
0
      bool unresolved_reloc = false;
2487
0
      char *error_message = NULL;
2488
2489
0
      r_symndx = ELF64_R_SYM (rel->r_info);
2490
0
      r_type = ELF64_R_TYPE (rel->r_info);
2491
2492
0
      bfd_reloc.howto = elf64_kvx_howto_from_type (input_bfd, r_type);
2493
0
      howto = bfd_reloc.howto;
2494
2495
0
      if (howto == NULL)
2496
0
  return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2497
2498
0
      bfd_r_type = elf64_kvx_bfd_reloc_from_howto (howto);
2499
2500
0
      h = NULL;
2501
0
      sym = NULL;
2502
0
      sec = NULL;
2503
2504
0
      if (r_symndx < symtab_hdr->sh_info) /* A local symbol. */
2505
0
  {
2506
0
    sym = local_syms + r_symndx;
2507
0
    sym_type = ELF64_ST_TYPE (sym->st_info);
2508
0
    sec = local_sections[r_symndx];
2509
2510
    /* An object file might have a reference to a local
2511
       undefined symbol.  This is a draft object file, but we
2512
       should at least do something about it.  */
2513
0
    if (r_type != R_KVX_NONE
2514
0
        && r_type != R_KVX_S37_GOTADDR_LO10
2515
0
        && r_type != R_KVX_S37_GOTADDR_UP27
2516
0
        && r_type != R_KVX_S64_GOTADDR_LO10
2517
0
        && r_type != R_KVX_S64_GOTADDR_UP27
2518
0
        && r_type != R_KVX_S64_GOTADDR_EX27
2519
0
        && r_type != R_KVX_S43_GOTADDR_LO10
2520
0
        && r_type != R_KVX_S43_GOTADDR_UP27
2521
0
        && r_type != R_KVX_S43_GOTADDR_EX6
2522
0
        && bfd_is_und_section (sec)
2523
0
        && ELF_ST_BIND (sym->st_info) != STB_WEAK)
2524
0
      (*info->callbacks->undefined_symbol)
2525
0
        (info, bfd_elf_string_from_elf_section
2526
0
         (input_bfd, symtab_hdr->sh_link, sym->st_name),
2527
0
         input_bfd, input_section, rel->r_offset, true);
2528
2529
0
    relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2530
0
  }
2531
0
      else
2532
0
  {
2533
0
    bool warned, ignored;
2534
2535
0
    RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2536
0
           r_symndx, symtab_hdr, sym_hashes,
2537
0
           h, sec, relocation,
2538
0
           unresolved_reloc, warned, ignored);
2539
2540
0
    sym_type = h->type;
2541
0
  }
2542
2543
0
      if (sec != NULL && discarded_section (sec))
2544
0
  RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
2545
0
           rel, 1, relend, howto, 0, contents);
2546
2547
0
      if (bfd_link_relocatable (info))
2548
0
  continue;
2549
2550
0
      if (h != NULL)
2551
0
  name = h->root.root.string;
2552
0
      else
2553
0
  {
2554
0
    name = (bfd_elf_string_from_elf_section
2555
0
      (input_bfd, symtab_hdr->sh_link, sym->st_name));
2556
0
    if (name == NULL || *name == '\0')
2557
0
      name = bfd_section_name (sec);
2558
0
  }
2559
2560
0
      if (r_symndx != 0
2561
0
    && r_type != R_KVX_NONE
2562
0
    && (h == NULL
2563
0
        || h->root.type == bfd_link_hash_defined
2564
0
        || h->root.type == bfd_link_hash_defweak)
2565
0
    && IS_KVX_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
2566
0
  {
2567
0
    (*_bfd_error_handler)
2568
0
      ((sym_type == STT_TLS
2569
        /* xgettext:c-format */
2570
0
        ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
2571
        /* xgettext:c-format */
2572
0
        : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
2573
0
       input_bfd,
2574
0
       input_section, (uint64_t) rel->r_offset, howto->name, name);
2575
0
  }
2576
2577
      /* Original aarch64 has relaxation handling for TLS here. */
2578
0
      r = bfd_reloc_continue;
2579
2580
      /* There may be multiple consecutive relocations for the
2581
   same offset.  In that case we are supposed to treat the
2582
   output of each relocation as the addend for the next.  */
2583
0
      if (rel + 1 < relend
2584
0
    && rel->r_offset == rel[1].r_offset
2585
0
    && ELF64_R_TYPE (rel[1].r_info) != R_KVX_NONE)
2586
2587
0
  save_addend = true;
2588
0
      else
2589
0
  save_addend = false;
2590
2591
0
      if (r == bfd_reloc_continue)
2592
0
  r = elf64_kvx_final_link_relocate (howto, input_bfd, output_bfd,
2593
0
             input_section, contents, rel,
2594
0
             relocation, info, sec,
2595
0
             h, &unresolved_reloc,
2596
0
             save_addend, &addend, sym);
2597
2598
0
      switch (elf64_kvx_bfd_reloc_from_type (input_bfd, r_type))
2599
0
  {
2600
0
  case BFD_RELOC_KVX_S37_TLS_GD_LO10:
2601
0
  case BFD_RELOC_KVX_S37_TLS_GD_UP27:
2602
2603
0
  case BFD_RELOC_KVX_S43_TLS_GD_LO10:
2604
0
  case BFD_RELOC_KVX_S43_TLS_GD_UP27:
2605
0
  case BFD_RELOC_KVX_S43_TLS_GD_EX6:
2606
2607
0
  case BFD_RELOC_KVX_S37_TLS_LD_LO10:
2608
0
  case BFD_RELOC_KVX_S37_TLS_LD_UP27:
2609
2610
0
  case BFD_RELOC_KVX_S43_TLS_LD_LO10:
2611
0
  case BFD_RELOC_KVX_S43_TLS_LD_UP27:
2612
0
  case BFD_RELOC_KVX_S43_TLS_LD_EX6:
2613
2614
0
    if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2615
0
      {
2616
0
        bool need_relocs = false;
2617
0
        bfd_byte *loc;
2618
0
        int indx;
2619
0
        bfd_vma off;
2620
2621
0
        off = symbol_got_offset (input_bfd, h, r_symndx);
2622
0
        indx = h && h->dynindx != -1 ? h->dynindx : 0;
2623
2624
0
        need_relocs =
2625
0
    (bfd_link_pic (info) || indx != 0) &&
2626
0
    (h == NULL
2627
0
     || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2628
0
     || h->root.type != bfd_link_hash_undefweak);
2629
2630
0
        BFD_ASSERT (globals->root.srelgot != NULL);
2631
2632
0
        if (need_relocs)
2633
0
    {
2634
0
      Elf_Internal_Rela rela;
2635
0
      rela.r_info = ELF64_R_INFO (indx, R_KVX_64_DTPMOD);
2636
0
      rela.r_addend = 0;
2637
0
      rela.r_offset = globals->root.sgot->output_section->vma +
2638
0
        globals->root.sgot->output_offset + off;
2639
2640
0
      loc = globals->root.srelgot->contents;
2641
0
      loc += globals->root.srelgot->reloc_count++
2642
0
        * RELOC_SIZE (htab);
2643
0
      bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
2644
2645
0
      bfd_reloc_code_real_type real_type =
2646
0
        elf64_kvx_bfd_reloc_from_type (input_bfd, r_type);
2647
2648
0
      if (real_type == BFD_RELOC_KVX_S37_TLS_LD_LO10
2649
0
          || real_type == BFD_RELOC_KVX_S37_TLS_LD_UP27
2650
0
          || real_type == BFD_RELOC_KVX_S43_TLS_LD_LO10
2651
0
          || real_type == BFD_RELOC_KVX_S43_TLS_LD_UP27
2652
0
          || real_type == BFD_RELOC_KVX_S43_TLS_LD_EX6)
2653
0
        {
2654
          /* For local dynamic, don't generate DTPOFF in any case.
2655
       Initialize the DTPOFF slot into zero, so we get module
2656
       base address when invoke runtime TLS resolver.  */
2657
0
          bfd_put_64 (output_bfd, 0,
2658
0
          globals->root.sgot->contents + off
2659
0
          + GOT_ENTRY_SIZE);
2660
0
        }
2661
0
      else if (indx == 0)
2662
0
        {
2663
0
          bfd_put_64 (output_bfd,
2664
0
          relocation - dtpoff_base (info),
2665
0
          globals->root.sgot->contents + off
2666
0
          + GOT_ENTRY_SIZE);
2667
0
        }
2668
0
      else
2669
0
        {
2670
          /* This TLS symbol is global. We emit a
2671
       relocation to fixup the tls offset at load
2672
       time.  */
2673
0
          rela.r_info =
2674
0
      ELF64_R_INFO (indx, R_KVX_64_DTPOFF);
2675
0
          rela.r_addend = 0;
2676
0
          rela.r_offset =
2677
0
      (globals->root.sgot->output_section->vma
2678
0
       + globals->root.sgot->output_offset + off
2679
0
       + GOT_ENTRY_SIZE);
2680
2681
0
          loc = globals->root.srelgot->contents;
2682
0
          loc += globals->root.srelgot->reloc_count++
2683
0
      * RELOC_SIZE (globals);
2684
0
          bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
2685
0
          bfd_put_64 (output_bfd, (bfd_vma) 0,
2686
0
          globals->root.sgot->contents + off
2687
0
          + GOT_ENTRY_SIZE);
2688
0
        }
2689
0
    }
2690
0
        else
2691
0
    {
2692
0
      bfd_put_64 (output_bfd, (bfd_vma) 1,
2693
0
            globals->root.sgot->contents + off);
2694
0
      bfd_put_64 (output_bfd,
2695
0
            relocation - dtpoff_base (info),
2696
0
            globals->root.sgot->contents + off
2697
0
            + GOT_ENTRY_SIZE);
2698
0
    }
2699
2700
0
        symbol_got_offset_mark (input_bfd, h, r_symndx);
2701
0
      }
2702
0
    break;
2703
2704
0
  case BFD_RELOC_KVX_S37_TLS_IE_LO10:
2705
0
  case BFD_RELOC_KVX_S37_TLS_IE_UP27:
2706
2707
0
  case BFD_RELOC_KVX_S43_TLS_IE_LO10:
2708
0
  case BFD_RELOC_KVX_S43_TLS_IE_UP27:
2709
0
  case BFD_RELOC_KVX_S43_TLS_IE_EX6:
2710
0
    if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2711
0
      {
2712
0
        bool need_relocs = false;
2713
0
        bfd_byte *loc;
2714
0
        int indx;
2715
0
        bfd_vma off;
2716
2717
0
        off = symbol_got_offset (input_bfd, h, r_symndx);
2718
2719
0
        indx = h && h->dynindx != -1 ? h->dynindx : 0;
2720
2721
0
        need_relocs =
2722
0
    (bfd_link_pic (info) || indx != 0) &&
2723
0
    (h == NULL
2724
0
     || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2725
0
     || h->root.type != bfd_link_hash_undefweak);
2726
2727
0
        BFD_ASSERT (globals->root.srelgot != NULL);
2728
2729
0
        if (need_relocs)
2730
0
    {
2731
0
      Elf_Internal_Rela rela;
2732
2733
0
      if (indx == 0)
2734
0
        rela.r_addend = relocation - dtpoff_base (info);
2735
0
      else
2736
0
        rela.r_addend = 0;
2737
2738
0
      rela.r_info = ELF64_R_INFO (indx, R_KVX_64_TPOFF);
2739
0
      rela.r_offset = globals->root.sgot->output_section->vma +
2740
0
        globals->root.sgot->output_offset + off;
2741
2742
0
      loc = globals->root.srelgot->contents;
2743
0
      loc += globals->root.srelgot->reloc_count++
2744
0
        * RELOC_SIZE (htab);
2745
2746
0
      bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
2747
2748
0
      bfd_put_64 (output_bfd, rela.r_addend,
2749
0
            globals->root.sgot->contents + off);
2750
0
    }
2751
0
        else
2752
0
    bfd_put_64 (output_bfd, relocation - tpoff_base (info),
2753
0
          globals->root.sgot->contents + off);
2754
2755
0
        symbol_got_offset_mark (input_bfd, h, r_symndx);
2756
0
      }
2757
0
    break;
2758
2759
0
  default:
2760
0
    break;
2761
0
  }
2762
2763
      /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
2764
   because such sections are not SEC_ALLOC and thus ld.so will
2765
   not process them.  */
2766
0
      if (unresolved_reloc
2767
0
    && !((input_section->flags & SEC_DEBUGGING) != 0
2768
0
         && h->def_dynamic)
2769
0
    && _bfd_elf_section_offset (output_bfd, info, input_section,
2770
0
              +rel->r_offset) != (bfd_vma) - 1)
2771
0
  {
2772
0
    (*_bfd_error_handler)
2773
      /* xgettext:c-format */
2774
0
      (_("%pB(%pA+%#" PRIx64 "): "
2775
0
         "unresolvable %s relocation against symbol `%s'"),
2776
0
       input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
2777
0
       h->root.root.string);
2778
0
    return false;
2779
0
  }
2780
2781
0
      if (r != bfd_reloc_ok && r != bfd_reloc_continue)
2782
0
  {
2783
0
    switch (r)
2784
0
      {
2785
0
      case bfd_reloc_overflow:
2786
0
        (*info->callbacks->reloc_overflow)
2787
0
    (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
2788
0
     input_bfd, input_section, rel->r_offset);
2789
2790
        /* Original aarch64 code had a check for alignement correctness */
2791
0
        break;
2792
2793
0
      case bfd_reloc_undefined:
2794
0
        (*info->callbacks->undefined_symbol)
2795
0
    (info, name, input_bfd, input_section, rel->r_offset, true);
2796
0
        break;
2797
2798
0
      case bfd_reloc_outofrange:
2799
0
        error_message = _("out of range");
2800
0
        goto common_error;
2801
2802
0
      case bfd_reloc_notsupported:
2803
0
        error_message = _("unsupported relocation");
2804
0
        goto common_error;
2805
2806
0
      case bfd_reloc_dangerous:
2807
        /* error_message should already be set.  */
2808
0
        goto common_error;
2809
2810
0
      default:
2811
0
        error_message = _("unknown error");
2812
        /* Fall through.  */
2813
2814
0
      common_error:
2815
0
        BFD_ASSERT (error_message != NULL);
2816
0
        (*info->callbacks->reloc_dangerous)
2817
0
    (info, error_message, input_bfd, input_section, rel->r_offset);
2818
0
        break;
2819
0
      }
2820
0
  }
2821
2822
0
      if (!save_addend)
2823
0
  addend = 0;
2824
0
    }
2825
2826
0
  return true;
2827
0
}
2828
2829
/* Set the right machine number.  */
2830
2831
static bool
2832
elf64_kvx_object_p (bfd *abfd)
2833
3.09k
{
2834
  /* must be coherent with default arch in cpu-kvx.c */
2835
3.09k
  int e_set = bfd_mach_kv3_1;
2836
2837
3.09k
  if (elf_elfheader (abfd)->e_machine == EM_KVX)
2838
3.09k
    {
2839
3.09k
      int e_core = elf_elfheader (abfd)->e_flags & ELF_KVX_CORE_MASK;
2840
3.09k
      switch(e_core)
2841
3.09k
  {
2842
0
#if ARCH_SIZE == 64
2843
37
  case ELF_KVX_CORE_KV3_1 : e_set = bfd_mach_kv3_1_64; break;
2844
2
  case ELF_KVX_CORE_KV3_2 : e_set = bfd_mach_kv3_2_64; break;
2845
4
  case ELF_KVX_CORE_KV4_1 : e_set = bfd_mach_kv4_1_64; break;
2846
#else
2847
  case ELF_KVX_CORE_KV3_1 : e_set = bfd_mach_kv3_1; break;
2848
  case ELF_KVX_CORE_KV3_2 : e_set = bfd_mach_kv3_2; break;
2849
  case ELF_KVX_CORE_KV4_1 : e_set = bfd_mach_kv4_1; break;
2850
#endif
2851
3.04k
  default:
2852
3.04k
    (*_bfd_error_handler)(_("%s: Bad ELF id: `%d'"),
2853
3.04k
        abfd->filename, e_core);
2854
3.09k
  }
2855
3.09k
    }
2856
3.09k
  return bfd_default_set_arch_mach (abfd, bfd_arch_kvx, e_set);
2857
3.09k
}
2858
2859
/* Function to keep KVX specific flags in the ELF header.  */
2860
2861
static bool
2862
elf64_kvx_set_private_flags (bfd *abfd, flagword flags)
2863
0
{
2864
0
  if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
2865
0
    {
2866
0
    }
2867
0
  else
2868
0
    {
2869
0
      elf_elfheader (abfd)->e_flags = flags;
2870
0
      elf_flags_init (abfd) = true;
2871
0
    }
2872
2873
0
  return true;
2874
0
}
2875
2876
/* Merge backend specific data from an object file to the output
2877
   object file when linking.  */
2878
2879
static bool
2880
elf64_kvx_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
2881
0
{
2882
0
  bfd *obfd = info->output_bfd;
2883
0
  flagword out_flags;
2884
0
  flagword in_flags;
2885
0
  bool flags_compatible = true;
2886
0
  asection *sec;
2887
2888
  /* Check if we have the same endianess.  */
2889
0
  if (!_bfd_generic_verify_endian_match (ibfd, info))
2890
0
    return false;
2891
2892
0
  if (!is_kvx_elf (ibfd) || !is_kvx_elf (obfd))
2893
0
    return true;
2894
2895
  /* The input BFD must have had its flags initialised.  */
2896
  /* The following seems bogus to me -- The flags are initialized in
2897
     the assembler but I don't think an elf_flags_init field is
2898
     written into the object.  */
2899
  /* BFD_ASSERT (elf_flags_init (ibfd)); */
2900
2901
0
  if (bfd_get_arch_size (ibfd) != bfd_get_arch_size (obfd))
2902
0
    {
2903
0
      const char *msg;
2904
2905
0
      if (bfd_get_arch_size (ibfd) == 32
2906
0
    && bfd_get_arch_size (obfd) == 64)
2907
0
  msg = _("%s: compiled as 32-bit object and %s is 64-bit");
2908
0
      else if (bfd_get_arch_size (ibfd) == 64
2909
0
         && bfd_get_arch_size (obfd) == 32)
2910
0
  msg = _("%s: compiled as 64-bit object and %s is 32-bit");
2911
0
      else
2912
0
  msg = _("%s: object size does not match that of target %s");
2913
2914
0
      (*_bfd_error_handler) (msg, bfd_get_filename (ibfd),
2915
0
           bfd_get_filename (obfd));
2916
0
      bfd_set_error (bfd_error_wrong_format);
2917
0
      return false;
2918
0
    }
2919
2920
0
  in_flags = elf_elfheader (ibfd)->e_flags;
2921
0
  out_flags = elf_elfheader (obfd)->e_flags;
2922
2923
0
  if (!elf_flags_init (obfd))
2924
0
    {
2925
      /* If the input is the default architecture and had the default
2926
   flags then do not bother setting the flags for the output
2927
   architecture, instead allow future merges to do this.  If no
2928
   future merges ever set these flags then they will retain their
2929
   uninitialised values, which surprise surprise, correspond
2930
   to the default values.  */
2931
0
      if (bfd_get_arch_info (ibfd)->the_default
2932
0
    && elf_elfheader (ibfd)->e_flags == 0)
2933
0
  return true;
2934
2935
0
      elf_flags_init (obfd) = true;
2936
0
      elf_elfheader (obfd)->e_flags = in_flags;
2937
2938
0
      if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
2939
0
    && bfd_get_arch_info (obfd)->the_default)
2940
0
  return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
2941
0
          bfd_get_mach (ibfd));
2942
2943
0
      return true;
2944
0
    }
2945
2946
  /* Identical flags must be compatible.  */
2947
0
  if (in_flags == out_flags)
2948
0
    return true;
2949
2950
  /* Check to see if the input BFD actually contains any sections.  If
2951
     not, its flags may not have been initialised either, but it
2952
     cannot actually cause any incompatiblity.  Do not short-circuit
2953
     dynamic objects; their section list may be emptied by
2954
     elf_link_add_object_symbols.
2955
2956
     Also check to see if there are no code sections in the input.
2957
     In this case there is no need to check for code specific flags.
2958
     XXX - do we need to worry about floating-point format compatability
2959
     in data sections ?  */
2960
0
  if (!(ibfd->flags & DYNAMIC))
2961
0
    {
2962
0
      bool null_input_bfd = true;
2963
0
      bool only_data_sections = true;
2964
2965
0
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2966
0
  {
2967
0
    if ((bfd_section_flags (sec)
2968
0
         & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
2969
0
        == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
2970
0
      only_data_sections = false;
2971
2972
0
    null_input_bfd = false;
2973
0
    break;
2974
0
  }
2975
2976
0
      if (null_input_bfd || only_data_sections)
2977
0
  return true;
2978
0
    }
2979
0
  return flags_compatible;
2980
0
}
2981
2982
/* Display the flags field.  */
2983
2984
static bool
2985
elf64_kvx_print_private_bfd_data (bfd *abfd, void *ptr)
2986
42
{
2987
42
  FILE *file = (FILE *) ptr;
2988
42
  unsigned long flags;
2989
2990
42
  BFD_ASSERT (abfd != NULL && ptr != NULL);
2991
2992
  /* Print normal ELF private data.  */
2993
42
  _bfd_elf_print_private_bfd_data (abfd, ptr);
2994
2995
42
  flags = elf_elfheader (abfd)->e_flags;
2996
  /* Ignore init flag - it may not be set, despite the flags field
2997
     containing valid data.  */
2998
2999
  /* xgettext:c-format */
3000
42
  fprintf (file, _("Private flags = 0x%lx : "), elf_elfheader (abfd)->e_flags);
3001
42
  if((flags & ELF_KVX_ABI_64B_ADDR_BIT) == ELF_KVX_ABI_64B_ADDR_BIT)
3002
3
    {
3003
3
      if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_1))
3004
0
  fprintf (file, _("Coolidge (kv3) V1 64 bits"));
3005
3
      else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_2))
3006
0
  fprintf (file, _("Coolidge (kv3) V2 64 bits"));
3007
3
      else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV4_1))
3008
0
  fprintf (file, _("Coolidge (kv4) V1 64 bits"));
3009
3
    }
3010
39
  else
3011
39
    {
3012
39
      if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_1))
3013
17
  fprintf (file, _("Coolidge (kv3) V1 32 bits"));
3014
22
      else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_2))
3015
0
  fprintf (file, _("Coolidge (kv3) V2 32 bits"));
3016
22
      else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV4_1))
3017
1
  fprintf (file, _("Coolidge (kv4) V1 32 bits"));
3018
39
    }
3019
3020
42
  fputc ('\n', file);
3021
3022
42
  return true;
3023
42
}
3024
3025
/* Adjust a symbol defined by a dynamic object and referenced by a
3026
   regular object.  The current definition is in some section of the
3027
   dynamic object, but we're not including those sections.  We have to
3028
   change the definition to something the rest of the link can
3029
   understand.  */
3030
3031
static bool
3032
elf64_kvx_adjust_dynamic_symbol (struct bfd_link_info *info,
3033
         struct elf_link_hash_entry *h)
3034
0
{
3035
0
  struct elf_kvx_link_hash_table *htab;
3036
0
  asection *s;
3037
3038
  /* If this is a function, put it in the procedure linkage table.  We
3039
     will fill in the contents of the procedure linkage table later,
3040
     when we know the address of the .got section.  */
3041
0
  if (h->type == STT_FUNC || h->needs_plt)
3042
0
    {
3043
0
      if (h->plt.refcount <= 0
3044
0
    || ((SYMBOL_CALLS_LOCAL (info, h)
3045
0
         || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3046
0
       && h->root.type == bfd_link_hash_undefweak))))
3047
0
  {
3048
    /* This case can occur if we saw a CALL26 reloc in
3049
       an input file, but the symbol wasn't referred to
3050
       by a dynamic object or all references were
3051
       garbage collected. In which case we can end up
3052
       resolving.  */
3053
0
    h->plt.offset = (bfd_vma) - 1;
3054
0
    h->needs_plt = 0;
3055
0
  }
3056
3057
0
      return true;
3058
0
    }
3059
0
  else
3060
    /* Otherwise, reset to -1.  */
3061
0
    h->plt.offset = (bfd_vma) - 1;
3062
3063
3064
  /* If this is a weak symbol, and there is a real definition, the
3065
     processor independent code will have arranged for us to see the
3066
     real definition first, and we can just use the same value.  */
3067
0
  if (h->is_weakalias)
3068
0
    {
3069
0
      struct elf_link_hash_entry *def = weakdef (h);
3070
0
      BFD_ASSERT (def->root.type == bfd_link_hash_defined);
3071
0
      h->root.u.def.section = def->root.u.def.section;
3072
0
      h->root.u.def.value = def->root.u.def.value;
3073
0
      if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
3074
0
  h->non_got_ref = def->non_got_ref;
3075
0
      return true;
3076
0
    }
3077
3078
  /* If we are creating a shared library, we must presume that the
3079
     only references to the symbol are via the global offset table.
3080
     For such cases we need not do anything here; the relocations will
3081
     be handled correctly by relocate_section.  */
3082
0
  if (bfd_link_pic (info))
3083
0
    return true;
3084
3085
  /* If there are no references to this symbol that do not use the
3086
     GOT, we don't need to generate a copy reloc.  */
3087
0
  if (!h->non_got_ref)
3088
0
    return true;
3089
3090
  /* If -z nocopyreloc was given, we won't generate them either.  */
3091
0
  if (info->nocopyreloc)
3092
0
    {
3093
0
      h->non_got_ref = 0;
3094
0
      return true;
3095
0
    }
3096
3097
  /* We must allocate the symbol in our .dynbss section, which will
3098
     become part of the .bss section of the executable.  There will be
3099
     an entry for this symbol in the .dynsym section.  The dynamic
3100
     object will contain position independent code, so all references
3101
     from the dynamic object to this symbol will go through the global
3102
     offset table.  The dynamic linker will use the .dynsym entry to
3103
     determine the address it must put in the global offset table, so
3104
     both the dynamic object and the regular object will refer to the
3105
     same memory location for the variable.  */
3106
3107
0
  htab = elf_kvx_hash_table (info);
3108
3109
  /* We must generate a R_KVX_COPY reloc to tell the dynamic linker
3110
     to copy the initial value out of the dynamic object and into the
3111
     runtime process image.  */
3112
0
  if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
3113
0
    {
3114
0
      htab->srelbss->size += RELOC_SIZE (htab);
3115
0
      h->needs_copy = 1;
3116
0
    }
3117
3118
0
  s = htab->sdynbss;
3119
3120
0
  return _bfd_elf_adjust_dynamic_copy (info, h, s);
3121
0
}
3122
3123
static bool
3124
elf64_kvx_allocate_local_symbols (bfd *abfd, unsigned number)
3125
0
{
3126
0
  struct elf_kvx_local_symbol *locals;
3127
0
  locals = elf_kvx_locals (abfd);
3128
0
  if (locals == NULL)
3129
0
    {
3130
0
      locals = (struct elf_kvx_local_symbol *)
3131
0
  bfd_zalloc (abfd, number * sizeof (struct elf_kvx_local_symbol));
3132
0
      if (locals == NULL)
3133
0
  return false;
3134
0
      elf_kvx_locals (abfd) = locals;
3135
0
    }
3136
0
  return true;
3137
0
}
3138
3139
/* Create the .got section to hold the global offset table.  */
3140
3141
static bool
3142
kvx_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
3143
0
{
3144
0
  const struct elf_backend_data *bed = get_elf_backend_data (abfd);
3145
0
  flagword flags;
3146
0
  asection *s;
3147
0
  struct elf_link_hash_entry *h;
3148
0
  struct elf_link_hash_table *htab = elf_hash_table (info);
3149
3150
  /* This function may be called more than once.  */
3151
0
  s = bfd_get_linker_section (abfd, ".got");
3152
0
  if (s != NULL)
3153
0
    return true;
3154
3155
0
  flags = bed->dynamic_sec_flags;
3156
3157
0
  s = bfd_make_section_anyway_with_flags (abfd,
3158
0
            (bed->rela_plts_and_copies_p
3159
0
             ? ".rela.got" : ".rel.got"),
3160
0
            (bed->dynamic_sec_flags
3161
0
             | SEC_READONLY));
3162
0
  if (s == NULL
3163
0
      || !bfd_set_section_alignment (s, bed->s->log_file_align))
3164
3165
0
    return false;
3166
0
  htab->srelgot = s;
3167
3168
0
  s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
3169
0
  if (s == NULL
3170
0
      || !bfd_set_section_alignment (s, bed->s->log_file_align))
3171
0
    return false;
3172
0
  htab->sgot = s;
3173
0
  htab->sgot->size += GOT_ENTRY_SIZE;
3174
3175
0
  if (bed->want_got_sym)
3176
0
    {
3177
      /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
3178
   (or .got.plt) section.  We don't do this in the linker script
3179
   because we don't want to define the symbol if we are not creating
3180
   a global offset table.  */
3181
0
      h = _bfd_elf_define_linkage_sym (abfd, info, s,
3182
0
               "_GLOBAL_OFFSET_TABLE_");
3183
0
      elf_hash_table (info)->hgot = h;
3184
0
      if (h == NULL)
3185
0
  return false;
3186
0
    }
3187
3188
0
  if (bed->want_got_plt)
3189
0
    {
3190
0
      s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
3191
0
      if (s == NULL
3192
0
    || !bfd_set_section_alignment (s,
3193
0
           bed->s->log_file_align))
3194
0
  return false;
3195
0
      htab->sgotplt = s;
3196
0
    }
3197
3198
  /* The first bit of the global offset table is the header.  */
3199
0
  s->size += bed->got_header_size;
3200
3201
  /* we still need to handle got content when doing static link with PIC */
3202
0
  if (bfd_link_executable (info) && !bfd_link_pic (info)) {
3203
0
    htab->dynobj = abfd;
3204
0
  }
3205
3206
0
  return true;
3207
0
}
3208
3209
/* Look through the relocs for a section during the first phase.  */
3210
3211
static bool
3212
elf64_kvx_check_relocs (bfd *abfd, struct bfd_link_info *info,
3213
          asection *sec, const Elf_Internal_Rela *relocs)
3214
0
{
3215
0
  Elf_Internal_Shdr *symtab_hdr;
3216
0
  struct elf_link_hash_entry **sym_hashes;
3217
0
  const Elf_Internal_Rela *rel;
3218
0
  const Elf_Internal_Rela *rel_end;
3219
0
  asection *sreloc;
3220
3221
0
  struct elf_kvx_link_hash_table *htab;
3222
3223
0
  if (bfd_link_relocatable (info))
3224
0
    return true;
3225
3226
0
  BFD_ASSERT (is_kvx_elf (abfd));
3227
3228
0
  htab = elf_kvx_hash_table (info);
3229
0
  sreloc = NULL;
3230
3231
0
  symtab_hdr = &elf_symtab_hdr (abfd);
3232
0
  sym_hashes = elf_sym_hashes (abfd);
3233
3234
0
  rel_end = relocs + sec->reloc_count;
3235
0
  for (rel = relocs; rel < rel_end; rel++)
3236
0
    {
3237
0
      struct elf_link_hash_entry *h;
3238
0
      unsigned int r_symndx;
3239
0
      unsigned int r_type;
3240
0
      bfd_reloc_code_real_type bfd_r_type;
3241
0
      Elf_Internal_Sym *isym;
3242
3243
0
      r_symndx = ELF64_R_SYM (rel->r_info);
3244
0
      r_type = ELF64_R_TYPE (rel->r_info);
3245
3246
0
      if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
3247
0
  {
3248
    /* xgettext:c-format */
3249
0
    _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd, r_symndx);
3250
0
    return false;
3251
0
  }
3252
3253
0
      if (r_symndx < symtab_hdr->sh_info)
3254
0
  {
3255
    /* A local symbol.  */
3256
0
    isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3257
0
          abfd, r_symndx);
3258
0
    if (isym == NULL)
3259
0
      return false;
3260
3261
0
    h = NULL;
3262
0
  }
3263
0
      else
3264
0
  {
3265
0
    h = sym_hashes[r_symndx - symtab_hdr->sh_info];
3266
0
    while (h->root.type == bfd_link_hash_indirect
3267
0
     || h->root.type == bfd_link_hash_warning)
3268
0
      h = (struct elf_link_hash_entry *) h->root.u.i.link;
3269
0
  }
3270
3271
      /* Could be done earlier, if h were already available.  */
3272
0
      bfd_r_type = kvx_tls_transition (abfd, info, r_type, h, r_symndx);
3273
3274
0
      if (h != NULL)
3275
0
  {
3276
    /* Create the ifunc sections for static executables.  If we
3277
       never see an indirect function symbol nor we are building
3278
       a static executable, those sections will be empty and
3279
       won't appear in output.  */
3280
0
    switch (bfd_r_type)
3281
0
      {
3282
0
      default:
3283
0
        break;
3284
0
      }
3285
3286
    /* It is referenced by a non-shared object. */
3287
0
    h->ref_regular = 1;
3288
0
  }
3289
3290
0
      switch (bfd_r_type)
3291
0
  {
3292
3293
0
  case BFD_RELOC_KVX_S43_LO10:
3294
0
  case BFD_RELOC_KVX_S43_UP27:
3295
0
  case BFD_RELOC_KVX_S43_EX6:
3296
3297
0
  case BFD_RELOC_KVX_S37_LO10:
3298
0
  case BFD_RELOC_KVX_S37_UP27:
3299
3300
0
  case BFD_RELOC_KVX_S64_LO10:
3301
0
  case BFD_RELOC_KVX_S64_UP27:
3302
0
  case BFD_RELOC_KVX_S64_EX27:
3303
3304
0
  case BFD_RELOC_KVX_32:
3305
0
  case BFD_RELOC_KVX_64:
3306
3307
    /* We don't need to handle relocs into sections not going into
3308
       the "real" output.  */
3309
0
    if ((sec->flags & SEC_ALLOC) == 0)
3310
0
      break;
3311
3312
0
    if (h != NULL)
3313
0
      {
3314
0
        if (!bfd_link_pic (info))
3315
0
    h->non_got_ref = 1;
3316
3317
0
        h->plt.refcount += 1;
3318
0
        h->pointer_equality_needed = 1;
3319
0
      }
3320
3321
    /* No need to do anything if we're not creating a shared
3322
       object.  */
3323
0
    if (! bfd_link_pic (info))
3324
0
      break;
3325
3326
0
    {
3327
0
      struct elf_dyn_relocs *p;
3328
0
      struct elf_dyn_relocs **head;
3329
3330
      /* We must copy these reloc types into the output file.
3331
         Create a reloc section in dynobj and make room for
3332
         this reloc.  */
3333
0
      if (sreloc == NULL)
3334
0
        {
3335
0
    if (htab->root.dynobj == NULL)
3336
0
      htab->root.dynobj = abfd;
3337
3338
0
    sreloc = _bfd_elf_make_dynamic_reloc_section
3339
0
      (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ true);
3340
3341
0
    if (sreloc == NULL)
3342
0
      return false;
3343
0
        }
3344
3345
      /* If this is a global symbol, we count the number of
3346
         relocations we need for this symbol.  */
3347
0
      if (h != NULL)
3348
0
        {
3349
0
    head = &h->dyn_relocs;
3350
0
        }
3351
0
      else
3352
0
        {
3353
    /* Track dynamic relocs needed for local syms too.
3354
       We really need local syms available to do this
3355
       easily.  Oh well.  */
3356
3357
0
    asection *s;
3358
0
    void **vpp;
3359
3360
0
    isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3361
0
                abfd, r_symndx);
3362
0
    if (isym == NULL)
3363
0
      return false;
3364
3365
0
    s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3366
0
    if (s == NULL)
3367
0
      s = sec;
3368
3369
    /* Beware of type punned pointers vs strict aliasing
3370
       rules.  */
3371
0
    vpp = &(elf_section_data (s)->local_dynrel);
3372
0
    head = (struct elf_dyn_relocs **) vpp;
3373
0
        }
3374
3375
0
      p = *head;
3376
0
      if (p == NULL || p->sec != sec)
3377
0
        {
3378
0
    bfd_size_type amt = sizeof *p;
3379
0
    p = ((struct elf_dyn_relocs *)
3380
0
         bfd_zalloc (htab->root.dynobj, amt));
3381
0
    if (p == NULL)
3382
0
      return false;
3383
0
    p->next = *head;
3384
0
    *head = p;
3385
0
    p->sec = sec;
3386
0
        }
3387
3388
0
      p->count += 1;
3389
3390
0
    }
3391
0
    break;
3392
3393
0
  case BFD_RELOC_KVX_S37_GOT_LO10:
3394
0
  case BFD_RELOC_KVX_S37_GOT_UP27:
3395
3396
0
  case BFD_RELOC_KVX_S37_GOTOFF_LO10:
3397
0
  case BFD_RELOC_KVX_S37_GOTOFF_UP27:
3398
3399
0
  case BFD_RELOC_KVX_S43_GOT_LO10:
3400
0
  case BFD_RELOC_KVX_S43_GOT_UP27:
3401
0
  case BFD_RELOC_KVX_S43_GOT_EX6:
3402
3403
0
  case BFD_RELOC_KVX_S43_GOTOFF_LO10:
3404
0
  case BFD_RELOC_KVX_S43_GOTOFF_UP27:
3405
0
  case BFD_RELOC_KVX_S43_GOTOFF_EX6:
3406
3407
0
  case BFD_RELOC_KVX_S37_TLS_GD_LO10:
3408
0
  case BFD_RELOC_KVX_S37_TLS_GD_UP27:
3409
3410
0
  case BFD_RELOC_KVX_S43_TLS_GD_LO10:
3411
0
  case BFD_RELOC_KVX_S43_TLS_GD_UP27:
3412
0
  case BFD_RELOC_KVX_S43_TLS_GD_EX6:
3413
3414
0
  case BFD_RELOC_KVX_S37_TLS_IE_LO10:
3415
0
  case BFD_RELOC_KVX_S37_TLS_IE_UP27:
3416
3417
0
  case BFD_RELOC_KVX_S43_TLS_IE_LO10:
3418
0
  case BFD_RELOC_KVX_S43_TLS_IE_UP27:
3419
0
  case BFD_RELOC_KVX_S43_TLS_IE_EX6:
3420
3421
0
  case BFD_RELOC_KVX_S37_TLS_LD_LO10:
3422
0
  case BFD_RELOC_KVX_S37_TLS_LD_UP27:
3423
3424
0
  case BFD_RELOC_KVX_S43_TLS_LD_LO10:
3425
0
  case BFD_RELOC_KVX_S43_TLS_LD_UP27:
3426
0
  case BFD_RELOC_KVX_S43_TLS_LD_EX6:
3427
0
    {
3428
0
      unsigned got_type;
3429
0
      unsigned old_got_type;
3430
3431
0
      got_type = kvx_reloc_got_type (bfd_r_type);
3432
3433
0
      if (h)
3434
0
        {
3435
0
    h->got.refcount += 1;
3436
0
    old_got_type = elf_kvx_hash_entry (h)->got_type;
3437
0
        }
3438
0
      else
3439
0
        {
3440
0
    struct elf_kvx_local_symbol *locals;
3441
3442
0
    if (!elf64_kvx_allocate_local_symbols
3443
0
        (abfd, symtab_hdr->sh_info))
3444
0
      return false;
3445
3446
0
    locals = elf_kvx_locals (abfd);
3447
0
    BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
3448
0
    locals[r_symndx].got_refcount += 1;
3449
0
    old_got_type = locals[r_symndx].got_type;
3450
0
        }
3451
3452
      /* We will already have issued an error message if there
3453
         is a TLS/non-TLS mismatch, based on the symbol type.
3454
         So just combine any TLS types needed.  */
3455
0
      if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
3456
0
    && got_type != GOT_NORMAL)
3457
0
        got_type |= old_got_type;
3458
3459
      /* If the symbol is accessed by both IE and GD methods, we
3460
         are able to relax.  Turn off the GD flag, without
3461
         messing up with any other kind of TLS types that may be
3462
         involved.  */
3463
      /* Disabled untested and unused TLS */
3464
      /* if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type)) */
3465
      /*   got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD); */
3466
3467
0
      if (old_got_type != got_type)
3468
0
        {
3469
0
    if (h != NULL)
3470
0
      elf_kvx_hash_entry (h)->got_type = got_type;
3471
0
    else
3472
0
      {
3473
0
        struct elf_kvx_local_symbol *locals;
3474
0
        locals = elf_kvx_locals (abfd);
3475
0
        BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
3476
0
        locals[r_symndx].got_type = got_type;
3477
0
      }
3478
0
        }
3479
3480
0
      if (htab->root.dynobj == NULL)
3481
0
        htab->root.dynobj = abfd;
3482
0
      if (! kvx_elf_create_got_section (htab->root.dynobj, info))
3483
0
        return false;
3484
0
      break;
3485
0
    }
3486
3487
0
  case BFD_RELOC_KVX_S64_GOTADDR_LO10:
3488
0
  case BFD_RELOC_KVX_S64_GOTADDR_UP27:
3489
0
  case BFD_RELOC_KVX_S64_GOTADDR_EX27:
3490
3491
0
  case BFD_RELOC_KVX_S43_GOTADDR_LO10:
3492
0
  case BFD_RELOC_KVX_S43_GOTADDR_UP27:
3493
0
  case BFD_RELOC_KVX_S43_GOTADDR_EX6:
3494
3495
0
  case BFD_RELOC_KVX_S37_GOTADDR_LO10:
3496
0
  case BFD_RELOC_KVX_S37_GOTADDR_UP27:
3497
3498
0
    if (htab->root.dynobj == NULL)
3499
0
      htab->root.dynobj = abfd;
3500
0
    if (! kvx_elf_create_got_section (htab->root.dynobj, info))
3501
0
      return false;
3502
0
    break;
3503
3504
0
  case BFD_RELOC_KVX_PCREL27:
3505
0
  case BFD_RELOC_KVX_PCREL17:
3506
    /* If this is a local symbol then we resolve it
3507
       directly without creating a PLT entry.  */
3508
0
    if (h == NULL)
3509
0
      continue;
3510
3511
0
    h->needs_plt = 1;
3512
0
    if (h->plt.refcount <= 0)
3513
0
      h->plt.refcount = 1;
3514
0
    else
3515
0
      h->plt.refcount += 1;
3516
0
    break;
3517
3518
0
  default:
3519
0
    break;
3520
0
  }
3521
0
    }
3522
3523
0
  return true;
3524
0
}
3525
3526
static bool
3527
elf64_kvx_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
3528
0
{
3529
0
  Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form.  */
3530
3531
0
  if (!_bfd_elf_init_file_header (abfd, link_info))
3532
0
    return false;
3533
3534
0
  i_ehdrp = elf_elfheader (abfd);
3535
0
  i_ehdrp->e_ident[EI_ABIVERSION] = KVX_ELF_ABI_VERSION;
3536
0
  return true;
3537
0
}
3538
3539
static enum elf_reloc_type_class
3540
elf64_kvx_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
3541
        const asection *rel_sec ATTRIBUTE_UNUSED,
3542
        const Elf_Internal_Rela *rela)
3543
0
{
3544
0
  switch ((int) ELF64_R_TYPE (rela->r_info))
3545
0
    {
3546
0
    case R_KVX_RELATIVE:
3547
0
      return reloc_class_relative;
3548
0
    case R_KVX_JMP_SLOT:
3549
0
      return reloc_class_plt;
3550
0
    case R_KVX_COPY:
3551
0
      return reloc_class_copy;
3552
0
    default:
3553
0
      return reloc_class_normal;
3554
0
    }
3555
0
}
3556
3557
/* A structure used to record a list of sections, independently
3558
   of the next and prev fields in the asection structure.  */
3559
typedef struct section_list
3560
{
3561
  asection *sec;
3562
  struct section_list *next;
3563
  struct section_list *prev;
3564
}
3565
section_list;
3566
3567
typedef struct
3568
{
3569
  void *finfo;
3570
  struct bfd_link_info *info;
3571
  asection *sec;
3572
  int sec_shndx;
3573
  int (*func) (void *, const char *, Elf_Internal_Sym *,
3574
         asection *, struct elf_link_hash_entry *);
3575
} output_arch_syminfo;
3576
3577
/* Output a single local symbol for a generated stub.  */
3578
3579
static bool
3580
elf64_kvx_output_stub_sym (output_arch_syminfo *osi, const char *name,
3581
             bfd_vma offset, bfd_vma size)
3582
0
{
3583
0
  Elf_Internal_Sym sym;
3584
3585
0
  sym.st_value = (osi->sec->output_section->vma
3586
0
      + osi->sec->output_offset + offset);
3587
0
  sym.st_size = size;
3588
0
  sym.st_other = 0;
3589
0
  sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
3590
0
  sym.st_shndx = osi->sec_shndx;
3591
0
  return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
3592
0
}
3593
3594
static bool
3595
kvx_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
3596
0
{
3597
0
  struct elf_kvx_stub_hash_entry *stub_entry;
3598
0
  asection *stub_sec;
3599
0
  bfd_vma addr;
3600
0
  char *stub_name;
3601
0
  output_arch_syminfo *osi;
3602
3603
  /* Massage our args to the form they really have.  */
3604
0
  stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
3605
0
  osi = (output_arch_syminfo *) in_arg;
3606
3607
0
  stub_sec = stub_entry->stub_sec;
3608
3609
  /* Ensure this stub is attached to the current section being
3610
     processed.  */
3611
0
  if (stub_sec != osi->sec)
3612
0
    return true;
3613
3614
0
  addr = (bfd_vma) stub_entry->stub_offset;
3615
3616
0
  stub_name = stub_entry->output_name;
3617
3618
0
  switch (stub_entry->stub_type)
3619
0
    {
3620
0
    case kvx_stub_long_branch:
3621
0
      if (!elf64_kvx_output_stub_sym
3622
0
    (osi, stub_name, addr, sizeof (elf64_kvx_long_branch_stub)))
3623
0
  return false;
3624
0
      break;
3625
3626
0
    default:
3627
0
      abort ();
3628
0
    }
3629
3630
0
  return true;
3631
0
}
3632
3633
/* Output mapping symbols for linker generated sections.  */
3634
3635
static bool
3636
elf64_kvx_output_arch_local_syms (bfd *output_bfd,
3637
          struct bfd_link_info *info,
3638
          void *finfo,
3639
          int (*func) (void *, const char *,
3640
                 Elf_Internal_Sym *,
3641
                 asection *,
3642
                 struct elf_link_hash_entry *))
3643
0
{
3644
0
  output_arch_syminfo osi;
3645
0
  struct elf_kvx_link_hash_table *htab;
3646
3647
0
  htab = elf_kvx_hash_table (info);
3648
3649
0
  osi.finfo = finfo;
3650
0
  osi.info = info;
3651
0
  osi.func = func;
3652
3653
  /* Long calls stubs.  */
3654
0
  if (htab->stub_bfd && htab->stub_bfd->sections)
3655
0
    {
3656
0
      asection *stub_sec;
3657
3658
0
      for (stub_sec = htab->stub_bfd->sections;
3659
0
     stub_sec != NULL; stub_sec = stub_sec->next)
3660
0
  {
3661
    /* Ignore non-stub sections.  */
3662
0
    if (!strstr (stub_sec->name, STUB_SUFFIX))
3663
0
      continue;
3664
3665
0
    osi.sec = stub_sec;
3666
3667
0
    osi.sec_shndx = _bfd_elf_section_from_bfd_section
3668
0
      (output_bfd, osi.sec->output_section);
3669
3670
0
    bfd_hash_traverse (&htab->stub_hash_table, kvx_map_one_stub,
3671
0
           &osi);
3672
0
  }
3673
0
    }
3674
3675
  /* Finally, output mapping symbols for the PLT.  */
3676
0
  if (!htab->root.splt || htab->root.splt->size == 0)
3677
0
    return true;
3678
3679
0
  osi.sec_shndx = _bfd_elf_section_from_bfd_section
3680
0
    (output_bfd, htab->root.splt->output_section);
3681
0
  osi.sec = htab->root.splt;
3682
3683
0
  return true;
3684
3685
0
}
3686
3687
/* Allocate target specific section data.  */
3688
3689
static bool
3690
elf64_kvx_new_section_hook (bfd *abfd, asection *sec)
3691
1.47k
{
3692
1.47k
  if (!sec->used_by_bfd)
3693
1.47k
    {
3694
1.47k
      _kvx_elf_section_data *sdata;
3695
1.47k
      bfd_size_type amt = sizeof (*sdata);
3696
3697
1.47k
      sdata = bfd_zalloc (abfd, amt);
3698
1.47k
      if (sdata == NULL)
3699
0
  return false;
3700
1.47k
      sec->used_by_bfd = sdata;
3701
1.47k
    }
3702
3703
1.47k
  return _bfd_elf_new_section_hook (abfd, sec);
3704
1.47k
}
3705
3706
/* Create dynamic sections. This is different from the ARM backend in that
3707
   the got, plt, gotplt and their relocation sections are all created in the
3708
   standard part of the bfd elf backend.  */
3709
3710
static bool
3711
elf64_kvx_create_dynamic_sections (bfd *dynobj,
3712
           struct bfd_link_info *info)
3713
0
{
3714
0
  struct elf_kvx_link_hash_table *htab;
3715
3716
  /* We need to create .got section.  */
3717
0
  if (!kvx_elf_create_got_section (dynobj, info))
3718
0
    return false;
3719
3720
0
  if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3721
0
    return false;
3722
3723
0
  htab = elf_kvx_hash_table (info);
3724
0
  htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3725
0
  if (!bfd_link_pic (info))
3726
0
    htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
3727
3728
0
  if (!htab->sdynbss || (!bfd_link_pic (info) && !htab->srelbss))
3729
0
    abort ();
3730
3731
0
  return true;
3732
0
}
3733
3734
3735
/* Allocate space in .plt, .got and associated reloc sections for
3736
   dynamic relocs.  */
3737
3738
static bool
3739
elf64_kvx_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
3740
0
{
3741
0
  struct bfd_link_info *info;
3742
0
  struct elf_kvx_link_hash_table *htab;
3743
0
  struct elf_dyn_relocs *p;
3744
3745
  /* An example of a bfd_link_hash_indirect symbol is versioned
3746
     symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
3747
     -> __gxx_personality_v0(bfd_link_hash_defined)
3748
3749
     There is no need to process bfd_link_hash_indirect symbols here
3750
     because we will also be presented with the concrete instance of
3751
     the symbol and elf64_kvx_copy_indirect_symbol () will have been
3752
     called to copy all relevant data from the generic to the concrete
3753
     symbol instance.  */
3754
0
  if (h->root.type == bfd_link_hash_indirect)
3755
0
    return true;
3756
3757
0
  if (h->root.type == bfd_link_hash_warning)
3758
0
    h = (struct elf_link_hash_entry *) h->root.u.i.link;
3759
3760
0
  info = (struct bfd_link_info *) inf;
3761
0
  htab = elf_kvx_hash_table (info);
3762
3763
0
  if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
3764
0
    {
3765
      /* Make sure this symbol is output as a dynamic symbol.
3766
   Undefined weak syms won't yet be marked as dynamic.  */
3767
0
      if (h->dynindx == -1 && !h->forced_local)
3768
0
  {
3769
0
    if (!bfd_elf_link_record_dynamic_symbol (info, h))
3770
0
      return false;
3771
0
  }
3772
3773
0
      if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
3774
0
  {
3775
0
    asection *s = htab->root.splt;
3776
3777
    /* If this is the first .plt entry, make room for the special
3778
       first entry.  */
3779
0
    if (s->size == 0)
3780
0
      s->size += htab->plt_header_size;
3781
3782
0
    h->plt.offset = s->size;
3783
3784
    /* If this symbol is not defined in a regular file, and we are
3785
       not generating a shared library, then set the symbol to this
3786
       location in the .plt.  This is required to make function
3787
       pointers compare as equal between the normal executable and
3788
       the shared library.  */
3789
0
    if (!bfd_link_pic (info) && !h->def_regular)
3790
0
      {
3791
0
        h->root.u.def.section = s;
3792
0
        h->root.u.def.value = h->plt.offset;
3793
0
      }
3794
3795
    /* Make room for this entry. For now we only create the
3796
       small model PLT entries. We later need to find a way
3797
       of relaxing into these from the large model PLT entries.  */
3798
0
    s->size += PLT_SMALL_ENTRY_SIZE;
3799
3800
    /* We also need to make an entry in the .got.plt section, which
3801
       will be placed in the .got section by the linker script.  */
3802
0
    htab->root.sgotplt->size += GOT_ENTRY_SIZE;
3803
3804
    /* We also need to make an entry in the .rela.plt section.  */
3805
0
    htab->root.srelplt->size += RELOC_SIZE (htab);
3806
3807
    /* We need to ensure that all GOT entries that serve the PLT
3808
       are consecutive with the special GOT slots [0] [1] and
3809
       [2]. Any addtional relocations must be placed after the
3810
       PLT related entries.  We abuse the reloc_count such that
3811
       during sizing we adjust reloc_count to indicate the
3812
       number of PLT related reserved entries.  In subsequent
3813
       phases when filling in the contents of the reloc entries,
3814
       PLT related entries are placed by computing their PLT
3815
       index (0 .. reloc_count). While other none PLT relocs are
3816
       placed at the slot indicated by reloc_count and
3817
       reloc_count is updated.  */
3818
3819
0
    htab->root.srelplt->reloc_count++;
3820
0
  }
3821
0
      else
3822
0
  {
3823
0
    h->plt.offset = (bfd_vma) - 1;
3824
0
    h->needs_plt = 0;
3825
0
  }
3826
0
    }
3827
0
  else
3828
0
    {
3829
0
      h->plt.offset = (bfd_vma) - 1;
3830
0
      h->needs_plt = 0;
3831
0
    }
3832
3833
0
  if (h->got.refcount > 0)
3834
0
    {
3835
0
      bool dyn;
3836
0
      unsigned got_type = elf_kvx_hash_entry (h)->got_type;
3837
3838
0
      h->got.offset = (bfd_vma) - 1;
3839
3840
0
      dyn = htab->root.dynamic_sections_created;
3841
3842
      /* Make sure this symbol is output as a dynamic symbol.
3843
   Undefined weak syms won't yet be marked as dynamic.  */
3844
0
      if (dyn && h->dynindx == -1 && !h->forced_local)
3845
0
  {
3846
0
    if (!bfd_elf_link_record_dynamic_symbol (info, h))
3847
0
      return false;
3848
0
  }
3849
3850
0
      if (got_type == GOT_UNKNOWN)
3851
0
  {
3852
0
    (*_bfd_error_handler)
3853
0
      (_("relocation against `%s' has faulty GOT type "),
3854
0
       (h) ? h->root.root.string : "a local symbol");
3855
0
    bfd_set_error (bfd_error_bad_value);
3856
0
    return false;
3857
0
  }
3858
0
      else if (got_type == GOT_NORMAL)
3859
0
  {
3860
0
    h->got.offset = htab->root.sgot->size;
3861
0
    htab->root.sgot->size += GOT_ENTRY_SIZE;
3862
0
    if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3863
0
         || h->root.type != bfd_link_hash_undefweak)
3864
0
        && (bfd_link_pic (info)
3865
0
      || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3866
0
      {
3867
0
        htab->root.srelgot->size += RELOC_SIZE (htab);
3868
0
      }
3869
0
  }
3870
0
      else
3871
0
  {
3872
0
    int indx;
3873
3874
    /* Any of these will require 2 GOT slots because
3875
     * they use __tls_get_addr() */
3876
0
    if (got_type & (GOT_TLS_GD | GOT_TLS_LD))
3877
0
      {
3878
0
        h->got.offset = htab->root.sgot->size;
3879
0
        htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
3880
0
      }
3881
3882
0
    if (got_type & GOT_TLS_IE)
3883
0
      {
3884
0
        h->got.offset = htab->root.sgot->size;
3885
0
        htab->root.sgot->size += GOT_ENTRY_SIZE;
3886
0
      }
3887
3888
0
    indx = h && h->dynindx != -1 ? h->dynindx : 0;
3889
0
    if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3890
0
         || h->root.type != bfd_link_hash_undefweak)
3891
0
        && (bfd_link_pic (info)
3892
0
      || indx != 0
3893
0
      || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3894
0
      {
3895
        /* Only the GD case requires 2 relocations. */
3896
0
        if (got_type & GOT_TLS_GD)
3897
0
    htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
3898
3899
        /* LD needs a DTPMOD reloc, IE needs a DTPOFF. */
3900
0
        if (got_type & (GOT_TLS_LD | GOT_TLS_IE))
3901
0
    htab->root.srelgot->size += RELOC_SIZE (htab);
3902
0
      }
3903
0
  }
3904
0
    }
3905
0
  else
3906
0
    {
3907
0
      h->got.offset = (bfd_vma) - 1;
3908
0
    }
3909
3910
0
  if (h->dyn_relocs == NULL)
3911
0
    return true;
3912
3913
  /* In the shared -Bsymbolic case, discard space allocated for
3914
     dynamic pc-relative relocs against symbols which turn out to be
3915
     defined in regular objects.  For the normal shared case, discard
3916
     space for pc-relative relocs that have become local due to symbol
3917
     visibility changes.  */
3918
3919
0
  if (bfd_link_pic (info))
3920
0
    {
3921
      /* Relocs that use pc_count are those that appear on a call
3922
   insn, or certain REL relocs that can generated via assembly.
3923
   We want calls to protected symbols to resolve directly to the
3924
   function rather than going via the plt.  If people want
3925
   function pointer comparisons to work as expected then they
3926
   should avoid writing weird assembly.  */
3927
0
      if (SYMBOL_CALLS_LOCAL (info, h))
3928
0
  {
3929
0
    struct elf_dyn_relocs **pp;
3930
3931
0
    for (pp = &h->dyn_relocs; (p = *pp) != NULL;)
3932
0
      {
3933
0
        p->count -= p->pc_count;
3934
0
        p->pc_count = 0;
3935
0
        if (p->count == 0)
3936
0
    *pp = p->next;
3937
0
        else
3938
0
    pp = &p->next;
3939
0
      }
3940
0
  }
3941
3942
      /* Also discard relocs on undefined weak syms with non-default
3943
   visibility.  */
3944
0
      if (h->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
3945
0
  {
3946
0
    if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3947
0
        || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
3948
0
      h->dyn_relocs = NULL;
3949
3950
    /* Make sure undefined weak symbols are output as a dynamic
3951
       symbol in PIEs.  */
3952
0
    else if (h->dynindx == -1
3953
0
       && !h->forced_local
3954
0
       && !bfd_elf_link_record_dynamic_symbol (info, h))
3955
0
      return false;
3956
0
  }
3957
3958
0
    }
3959
0
  else if (ELIMINATE_COPY_RELOCS)
3960
0
    {
3961
      /* For the non-shared case, discard space for relocs against
3962
   symbols which turn out to need copy relocs or are not
3963
   dynamic.  */
3964
3965
0
      if (!h->non_got_ref
3966
0
    && ((h->def_dynamic
3967
0
         && !h->def_regular)
3968
0
        || (htab->root.dynamic_sections_created
3969
0
      && (h->root.type == bfd_link_hash_undefweak
3970
0
          || h->root.type == bfd_link_hash_undefined))))
3971
0
  {
3972
    /* Make sure this symbol is output as a dynamic symbol.
3973
       Undefined weak syms won't yet be marked as dynamic.  */
3974
0
    if (h->dynindx == -1
3975
0
        && !h->forced_local
3976
0
        && !bfd_elf_link_record_dynamic_symbol (info, h))
3977
0
      return false;
3978
3979
    /* If that succeeded, we know we'll be keeping all the
3980
       relocs.  */
3981
0
    if (h->dynindx != -1)
3982
0
      goto keep;
3983
0
  }
3984
3985
0
      h->dyn_relocs = NULL;
3986
3987
0
    keep:;
3988
0
    }
3989
3990
  /* Finally, allocate space.  */
3991
0
  for (p = h->dyn_relocs; p != NULL; p = p->next)
3992
0
    {
3993
0
      asection *sreloc;
3994
3995
0
      sreloc = elf_section_data (p->sec)->sreloc;
3996
3997
0
      BFD_ASSERT (sreloc != NULL);
3998
3999
0
      sreloc->size += p->count * RELOC_SIZE (htab);
4000
0
    }
4001
4002
0
  return true;
4003
0
}
4004
4005
/* Find any dynamic relocs that apply to read-only sections.  */
4006
4007
static bool
4008
kvx_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
4009
0
{
4010
0
  struct elf_dyn_relocs * p;
4011
4012
0
  for (p = h->dyn_relocs; p != NULL; p = p->next)
4013
0
    {
4014
0
      asection *s = p->sec;
4015
4016
0
      if (s != NULL && (s->flags & SEC_READONLY) != 0)
4017
0
  {
4018
0
    struct bfd_link_info *info = (struct bfd_link_info *) inf;
4019
4020
0
    info->flags |= DF_TEXTREL;
4021
0
    info->callbacks->minfo (_("%pB: dynamic relocation against `%pT' in "
4022
0
            "read-only section `%pA'\n"),
4023
0
          s->owner, h->root.root.string, s);
4024
4025
    /* Not an error, just cut short the traversal.  */
4026
0
    return false;
4027
0
  }
4028
0
    }
4029
0
  return true;
4030
0
}
4031
4032
/* This is the most important function of all . Innocuosly named
4033
   though !  */
4034
static bool
4035
elf64_kvx_late_size_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
4036
            struct bfd_link_info *info)
4037
0
{
4038
0
  struct elf_kvx_link_hash_table *htab;
4039
0
  bfd *dynobj;
4040
0
  asection *s;
4041
0
  bool relocs;
4042
0
  bfd *ibfd;
4043
4044
0
  htab = elf_kvx_hash_table ((info));
4045
0
  dynobj = htab->root.dynobj;
4046
0
  if (dynobj == NULL)
4047
0
    return true;
4048
4049
0
  if (htab->root.dynamic_sections_created)
4050
0
    {
4051
0
      if (bfd_link_executable (info) && !info->nointerp)
4052
0
  {
4053
0
    s = bfd_get_linker_section (dynobj, ".interp");
4054
0
    if (s == NULL)
4055
0
      abort ();
4056
0
    s->size = sizeof ELF_DYNAMIC_INTERPRETER;
4057
0
    s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
4058
0
  }
4059
0
    }
4060
4061
  /* Set up .got offsets for local syms, and space for local dynamic
4062
     relocs.  */
4063
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4064
0
    {
4065
0
      struct elf_kvx_local_symbol *locals = NULL;
4066
0
      Elf_Internal_Shdr *symtab_hdr;
4067
0
      asection *srel;
4068
0
      unsigned int i;
4069
4070
0
      if (!is_kvx_elf (ibfd))
4071
0
  continue;
4072
4073
0
      for (s = ibfd->sections; s != NULL; s = s->next)
4074
0
  {
4075
0
    struct elf_dyn_relocs *p;
4076
4077
0
    for (p = (struct elf_dyn_relocs *)
4078
0
     (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
4079
0
      {
4080
0
        if (!bfd_is_abs_section (p->sec)
4081
0
      && bfd_is_abs_section (p->sec->output_section))
4082
0
    {
4083
      /* Input section has been discarded, either because
4084
         it is a copy of a linkonce section or due to
4085
         linker script /DISCARD/, so we'll be discarding
4086
         the relocs too.  */
4087
0
    }
4088
0
        else if (p->count != 0)
4089
0
    {
4090
0
      srel = elf_section_data (p->sec)->sreloc;
4091
0
      srel->size += p->count * RELOC_SIZE (htab);
4092
0
      if ((p->sec->output_section->flags & SEC_READONLY) != 0)
4093
0
        info->flags |= DF_TEXTREL;
4094
0
    }
4095
0
      }
4096
0
  }
4097
4098
0
      locals = elf_kvx_locals (ibfd);
4099
0
      if (!locals)
4100
0
  continue;
4101
4102
0
      symtab_hdr = &elf_symtab_hdr (ibfd);
4103
0
      srel = htab->root.srelgot;
4104
0
      for (i = 0; i < symtab_hdr->sh_info; i++)
4105
0
  {
4106
0
    locals[i].got_offset = (bfd_vma) - 1;
4107
0
    if (locals[i].got_refcount > 0)
4108
0
      {
4109
0
        unsigned got_type = locals[i].got_type;
4110
0
        if (got_type & (GOT_TLS_GD | GOT_TLS_LD))
4111
0
    {
4112
0
      locals[i].got_offset = htab->root.sgot->size;
4113
0
      htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
4114
0
    }
4115
4116
0
        if (got_type & (GOT_NORMAL | GOT_TLS_IE ))
4117
0
    {
4118
0
      locals[i].got_offset = htab->root.sgot->size;
4119
0
      htab->root.sgot->size += GOT_ENTRY_SIZE;
4120
0
    }
4121
4122
0
        if (got_type == GOT_UNKNOWN)
4123
0
    {
4124
0
    }
4125
4126
0
        if (bfd_link_pic (info))
4127
0
    {
4128
0
      if (got_type & GOT_TLS_GD)
4129
0
        htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
4130
4131
0
      if (got_type & GOT_TLS_IE
4132
0
          || got_type & GOT_TLS_LD
4133
0
          || got_type & GOT_NORMAL)
4134
0
        htab->root.srelgot->size += RELOC_SIZE (htab);
4135
0
    }
4136
0
      }
4137
0
    else
4138
0
      {
4139
0
        locals[i].got_refcount = (bfd_vma) - 1;
4140
0
      }
4141
0
  }
4142
0
    }
4143
4144
4145
  /* Allocate global sym .plt and .got entries, and space for global
4146
     sym dynamic relocs.  */
4147
0
  elf_link_hash_traverse (&htab->root, elf64_kvx_allocate_dynrelocs,
4148
0
        info);
4149
4150
  /* For every jump slot reserved in the sgotplt, reloc_count is
4151
     incremented.  However, when we reserve space for TLS descriptors,
4152
     it's not incremented, so in order to compute the space reserved
4153
     for them, it suffices to multiply the reloc count by the jump
4154
     slot size.  */
4155
4156
0
  if (htab->root.srelplt)
4157
0
    htab->sgotplt_jump_table_size = kvx_compute_jump_table_size (htab);
4158
4159
  /* We now have determined the sizes of the various dynamic sections.
4160
     Allocate memory for them.  */
4161
0
  relocs = false;
4162
0
  for (s = dynobj->sections; s != NULL; s = s->next)
4163
0
    {
4164
0
      if ((s->flags & SEC_LINKER_CREATED) == 0)
4165
0
  continue;
4166
4167
0
      if (s == htab->root.splt
4168
0
    || s == htab->root.sgot
4169
0
    || s == htab->root.sgotplt
4170
0
    || s == htab->root.iplt
4171
0
    || s == htab->root.igotplt || s == htab->sdynbss)
4172
0
  {
4173
    /* Strip this section if we don't need it; see the
4174
       comment below.  */
4175
0
  }
4176
0
      else if (startswith (bfd_section_name (s), ".rela"))
4177
0
  {
4178
0
    if (s->size != 0 && s != htab->root.srelplt)
4179
0
      relocs = true;
4180
4181
    /* We use the reloc_count field as a counter if we need
4182
       to copy relocs into the output file.  */
4183
0
    if (s != htab->root.srelplt)
4184
0
      s->reloc_count = 0;
4185
0
  }
4186
0
      else
4187
0
  {
4188
    /* It's not one of our sections, so don't allocate space.  */
4189
0
    continue;
4190
0
  }
4191
4192
0
      if (s->size == 0)
4193
0
  {
4194
    /* If we don't need this section, strip it from the
4195
       output file.  This is mostly to handle .rela.bss and
4196
       .rela.plt.  We must create both sections in
4197
       create_dynamic_sections, because they must be created
4198
       before the linker maps input sections to output
4199
       sections.  The linker does that before
4200
       adjust_dynamic_symbol is called, and it is that
4201
       function which decides whether anything needs to go
4202
       into these sections.  */
4203
4204
0
    s->flags |= SEC_EXCLUDE;
4205
0
    continue;
4206
0
  }
4207
4208
0
      if ((s->flags & SEC_HAS_CONTENTS) == 0)
4209
0
  continue;
4210
4211
      /* Allocate memory for the section contents.  We use bfd_zalloc
4212
   here in case unused entries are not reclaimed before the
4213
   section's contents are written out.  This should not happen,
4214
   but this way if it does, we get a R_KVX_NONE reloc instead
4215
   of garbage.  */
4216
0
      s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
4217
0
      if (s->contents == NULL)
4218
0
  return false;
4219
0
    }
4220
4221
0
  if (htab->root.dynamic_sections_created)
4222
0
    {
4223
      /* Add some entries to the .dynamic section.  We fill in the
4224
   values later, in elf64_kvx_finish_dynamic_sections, but we
4225
   must add the entries now so that we get the correct size for
4226
   the .dynamic section.  The DT_DEBUG entry is filled in by the
4227
   dynamic linker and used by the debugger.  */
4228
0
#define add_dynamic_entry(TAG, VAL)     \
4229
0
      _bfd_elf_add_dynamic_entry (info, TAG, VAL)
4230
4231
0
      if (bfd_link_executable (info))
4232
0
  {
4233
0
    if (!add_dynamic_entry (DT_DEBUG, 0))
4234
0
      return false;
4235
0
  }
4236
4237
0
      if (htab->root.splt->size != 0)
4238
0
  {
4239
0
    if (!add_dynamic_entry (DT_PLTGOT, 0)
4240
0
        || !add_dynamic_entry (DT_PLTRELSZ, 0)
4241
0
        || !add_dynamic_entry (DT_PLTREL, DT_RELA)
4242
0
        || !add_dynamic_entry (DT_JMPREL, 0))
4243
0
      return false;
4244
0
  }
4245
4246
0
      if (relocs)
4247
0
  {
4248
0
    if (!add_dynamic_entry (DT_RELA, 0)
4249
0
        || !add_dynamic_entry (DT_RELASZ, 0)
4250
0
        || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
4251
0
      return false;
4252
4253
    /* If any dynamic relocs apply to a read-only section,
4254
       then we need a DT_TEXTREL entry.  */
4255
0
    if ((info->flags & DF_TEXTREL) == 0)
4256
0
      elf_link_hash_traverse (&htab->root, kvx_readonly_dynrelocs,
4257
0
            info);
4258
4259
0
    if ((info->flags & DF_TEXTREL) != 0)
4260
0
      {
4261
0
        if (!add_dynamic_entry (DT_TEXTREL, 0))
4262
0
    return false;
4263
0
      }
4264
0
  }
4265
0
    }
4266
0
#undef add_dynamic_entry
4267
4268
0
  return true;
4269
0
}
4270
4271
static inline void
4272
elf_kvx_update_plt_entry (bfd *output_bfd,
4273
        bfd_reloc_code_real_type r_type,
4274
        bfd_byte *plt_entry, bfd_vma value)
4275
0
{
4276
0
  reloc_howto_type *howto = elf64_kvx_howto_from_bfd_reloc (r_type);
4277
0
  BFD_ASSERT(howto != NULL);
4278
0
  _bfd_kvx_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
4279
0
}
4280
4281
static void
4282
elf64_kvx_create_small_pltn_entry (struct elf_link_hash_entry *h,
4283
           struct elf_kvx_link_hash_table *htab,
4284
           bfd *output_bfd)
4285
0
{
4286
0
  bfd_byte *plt_entry;
4287
0
  bfd_vma plt_index;
4288
0
  bfd_vma got_offset;
4289
0
  bfd_vma gotplt_entry_address;
4290
0
  bfd_vma plt_entry_address;
4291
0
  Elf_Internal_Rela rela;
4292
0
  bfd_byte *loc;
4293
0
  asection *plt, *gotplt, *relplt;
4294
4295
0
  plt = htab->root.splt;
4296
0
  gotplt = htab->root.sgotplt;
4297
0
  relplt = htab->root.srelplt;
4298
4299
  /* Get the index in the procedure linkage table which
4300
     corresponds to this symbol.  This is the index of this symbol
4301
     in all the symbols for which we are making plt entries.  The
4302
     first entry in the procedure linkage table is reserved.
4303
4304
     Get the offset into the .got table of the entry that
4305
     corresponds to this function.  Each .got entry is GOT_ENTRY_SIZE
4306
     bytes. The first three are reserved for the dynamic linker.
4307
4308
     For static executables, we don't reserve anything.  */
4309
4310
0
  if (plt == htab->root.splt)
4311
0
    {
4312
0
      plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
4313
0
      got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
4314
0
    }
4315
0
  else
4316
0
    {
4317
0
      plt_index = h->plt.offset / htab->plt_entry_size;
4318
0
      got_offset = plt_index * GOT_ENTRY_SIZE;
4319
0
    }
4320
4321
0
  plt_entry = plt->contents + h->plt.offset;
4322
0
  plt_entry_address = plt->output_section->vma
4323
0
    + plt->output_offset + h->plt.offset;
4324
0
  gotplt_entry_address = gotplt->output_section->vma +
4325
0
    gotplt->output_offset + got_offset;
4326
4327
  /* Copy in the boiler-plate for the PLTn entry.  */
4328
0
  memcpy (plt_entry, elf64_kvx_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
4329
4330
  /* Patch the loading of the GOT entry, relative to the PLT entry
4331
     address. */
4332
4333
  /* Use 37bits offset for both 32 and 64bits mode.
4334
     Fill the LO10 of of lw $r9 = 0[$r14].  */
4335
0
  elf_kvx_update_plt_entry(output_bfd, BFD_RELOC_KVX_S37_LO10,
4336
0
         plt_entry+4,
4337
0
         gotplt_entry_address - plt_entry_address);
4338
4339
  /* Fill the UP27 of of lw $r9 = 0[$r14].  */
4340
0
  elf_kvx_update_plt_entry(output_bfd, BFD_RELOC_KVX_S37_UP27,
4341
0
         plt_entry+8,
4342
0
         gotplt_entry_address - plt_entry_address);
4343
4344
0
  rela.r_offset = gotplt_entry_address;
4345
4346
  /* Fill in the entry in the .rela.plt section.  */
4347
0
  rela.r_info = ELF64_R_INFO (h->dynindx, R_KVX_JMP_SLOT);
4348
0
  rela.r_addend = 0;
4349
4350
  /* Compute the relocation entry to used based on PLT index and do
4351
     not adjust reloc_count. The reloc_count has already been adjusted
4352
     to account for this entry.  */
4353
0
  loc = relplt->contents + plt_index * RELOC_SIZE (htab);
4354
0
  bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4355
0
}
4356
4357
/* Size sections even though they're not dynamic.  We use it to setup
4358
   _TLS_MODULE_BASE_, if needed.  */
4359
4360
static bool
4361
elf64_kvx_early_size_sections (bfd *output_bfd, struct bfd_link_info *info)
4362
0
{
4363
0
  asection *tls_sec;
4364
4365
0
  if (bfd_link_relocatable (info))
4366
0
    return true;
4367
4368
0
  tls_sec = elf_hash_table (info)->tls_sec;
4369
4370
0
  if (tls_sec)
4371
0
    {
4372
0
      struct elf_link_hash_entry *tlsbase;
4373
4374
0
      tlsbase = elf_link_hash_lookup (elf_hash_table (info),
4375
0
              "_TLS_MODULE_BASE_", true, true, false);
4376
4377
0
      if (tlsbase)
4378
0
  {
4379
0
    struct bfd_link_hash_entry *h = NULL;
4380
0
    const struct elf_backend_data *bed =
4381
0
      get_elf_backend_data (output_bfd);
4382
4383
0
    if (!(_bfd_generic_link_add_one_symbol
4384
0
    (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
4385
0
     tls_sec, 0, NULL, false, bed->collect, &h)))
4386
0
      return false;
4387
4388
0
    tlsbase->type = STT_TLS;
4389
0
    tlsbase = (struct elf_link_hash_entry *) h;
4390
0
    tlsbase->def_regular = 1;
4391
0
    tlsbase->other = STV_HIDDEN;
4392
0
    (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
4393
0
  }
4394
0
    }
4395
4396
0
  return true;
4397
0
}
4398
4399
/* Finish up dynamic symbol handling.  We set the contents of various
4400
   dynamic sections here.  */
4401
static bool
4402
elf64_kvx_finish_dynamic_symbol (bfd *output_bfd,
4403
         struct bfd_link_info *info,
4404
         struct elf_link_hash_entry *h,
4405
         Elf_Internal_Sym *sym)
4406
0
{
4407
0
  struct elf_kvx_link_hash_table *htab;
4408
0
  htab = elf_kvx_hash_table (info);
4409
4410
0
  if (h->plt.offset != (bfd_vma) - 1)
4411
0
    {
4412
0
      asection *plt = NULL, *gotplt = NULL, *relplt = NULL;
4413
4414
      /* This symbol has an entry in the procedure linkage table.  Set
4415
   it up.  */
4416
4417
0
      if (htab->root.splt != NULL)
4418
0
  {
4419
0
    plt = htab->root.splt;
4420
0
    gotplt = htab->root.sgotplt;
4421
0
    relplt = htab->root.srelplt;
4422
0
  }
4423
4424
      /* This symbol has an entry in the procedure linkage table.  Set
4425
   it up.  */
4426
0
      if ((h->dynindx == -1
4427
0
     && !((h->forced_local || bfd_link_executable (info))
4428
0
    && h->def_regular
4429
0
    && h->type == STT_GNU_IFUNC))
4430
0
    || plt == NULL
4431
0
    || gotplt == NULL
4432
0
    || relplt == NULL)
4433
0
  abort ();
4434
4435
0
      elf64_kvx_create_small_pltn_entry (h, htab, output_bfd);
4436
0
      if (!h->def_regular)
4437
0
  {
4438
    /* Mark the symbol as undefined, rather than as defined in
4439
       the .plt section.  */
4440
0
    sym->st_shndx = SHN_UNDEF;
4441
    /* If the symbol is weak we need to clear the value.
4442
       Otherwise, the PLT entry would provide a definition for
4443
       the symbol even if the symbol wasn't defined anywhere,
4444
       and so the symbol would never be NULL.  Leave the value if
4445
       there were any relocations where pointer equality matters
4446
       (this is a clue for the dynamic linker, to make function
4447
       pointer comparisons work between an application and shared
4448
       library).  */
4449
0
    if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
4450
0
      sym->st_value = 0;
4451
0
  }
4452
0
    }
4453
4454
0
  if (h->got.offset != (bfd_vma) - 1
4455
0
      && elf_kvx_hash_entry (h)->got_type == GOT_NORMAL)
4456
0
    {
4457
0
      Elf_Internal_Rela rela;
4458
0
      bfd_byte *loc;
4459
4460
      /* This symbol has an entry in the global offset table.  Set it
4461
   up.  */
4462
0
      if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
4463
0
  abort ();
4464
4465
0
      rela.r_offset = (htab->root.sgot->output_section->vma
4466
0
           + htab->root.sgot->output_offset
4467
0
           + (h->got.offset & ~(bfd_vma) 1));
4468
4469
#ifdef UGLY_DEBUG
4470
      printf("setting rela at offset 0x%x(0x%x + 0x%x + 0x%x) for %s\n",
4471
       rela.r_offset,
4472
       htab->root.sgot->output_section->vma,
4473
       htab->root.sgot->output_offset,
4474
       h->got.offset,
4475
       h->root.root.string);
4476
#endif
4477
4478
0
      if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h))
4479
0
  {
4480
0
    if (!h->def_regular)
4481
0
      return false;
4482
4483
    /* in case of PLT related GOT entry, it is not clear who is
4484
       supposed to set the LSB of GOT entry...
4485
       kvx_calculate_got_entry_vma() would be a good candidate,
4486
       but it is not called currently
4487
       So we are commenting it ATM.  */
4488
    // BFD_ASSERT ((h->got.offset & 1) != 0);
4489
0
    rela.r_info = ELF64_R_INFO (0, R_KVX_RELATIVE);
4490
0
    rela.r_addend = (h->root.u.def.value
4491
0
         + h->root.u.def.section->output_section->vma
4492
0
         + h->root.u.def.section->output_offset);
4493
0
  }
4494
0
      else
4495
0
  {
4496
0
    BFD_ASSERT ((h->got.offset & 1) == 0);
4497
0
    bfd_put_64 (output_bfd, (bfd_vma) 0,
4498
0
          htab->root.sgot->contents + h->got.offset);
4499
0
    rela.r_info = ELF64_R_INFO (h->dynindx, R_KVX_GLOB_DAT);
4500
0
    rela.r_addend = 0;
4501
0
  }
4502
4503
0
      loc = htab->root.srelgot->contents;
4504
0
      loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
4505
0
      bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4506
0
    }
4507
4508
0
  if (h->needs_copy)
4509
0
    {
4510
0
      Elf_Internal_Rela rela;
4511
0
      bfd_byte *loc;
4512
4513
      /* This symbol needs a copy reloc.  Set it up.  */
4514
4515
0
      if (h->dynindx == -1
4516
0
    || (h->root.type != bfd_link_hash_defined
4517
0
        && h->root.type != bfd_link_hash_defweak)
4518
0
    || htab->srelbss == NULL)
4519
0
  abort ();
4520
4521
0
      rela.r_offset = (h->root.u.def.value
4522
0
           + h->root.u.def.section->output_section->vma
4523
0
           + h->root.u.def.section->output_offset);
4524
0
      rela.r_info = ELF64_R_INFO (h->dynindx, R_KVX_COPY);
4525
0
      rela.r_addend = 0;
4526
0
      loc = htab->srelbss->contents;
4527
0
      loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
4528
0
      bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4529
0
    }
4530
4531
  /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute.  SYM may
4532
     be NULL for local symbols.  */
4533
0
  if (sym != NULL
4534
0
      && (h == elf_hash_table (info)->hdynamic
4535
0
    || h == elf_hash_table (info)->hgot))
4536
0
    sym->st_shndx = SHN_ABS;
4537
4538
0
  return true;
4539
0
}
4540
4541
static void
4542
elf64_kvx_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
4543
         struct elf_kvx_link_hash_table *htab)
4544
0
{
4545
0
  memcpy (htab->root.splt->contents, elf64_kvx_small_plt0_entry,
4546
0
    PLT_ENTRY_SIZE);
4547
0
  elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
4548
0
    PLT_ENTRY_SIZE;
4549
0
}
4550
4551
static bool
4552
elf64_kvx_finish_dynamic_sections (bfd *output_bfd,
4553
           struct bfd_link_info *info)
4554
0
{
4555
0
  struct elf_kvx_link_hash_table *htab;
4556
0
  bfd *dynobj;
4557
0
  asection *sdyn;
4558
4559
0
  htab = elf_kvx_hash_table (info);
4560
0
  dynobj = htab->root.dynobj;
4561
0
  sdyn = bfd_get_linker_section (dynobj, ".dynamic");
4562
4563
0
  if (htab->root.dynamic_sections_created)
4564
0
    {
4565
0
      Elf64_External_Dyn *dyncon, *dynconend;
4566
4567
0
      if (sdyn == NULL || htab->root.sgot == NULL)
4568
0
  abort ();
4569
4570
0
      dyncon = (Elf64_External_Dyn *) sdyn->contents;
4571
0
      dynconend = (Elf64_External_Dyn *) (sdyn->contents + sdyn->size);
4572
0
      for (; dyncon < dynconend; dyncon++)
4573
0
  {
4574
0
    Elf_Internal_Dyn dyn;
4575
0
    asection *s;
4576
4577
0
    bfd_elf64_swap_dyn_in (dynobj, dyncon, &dyn);
4578
4579
0
    switch (dyn.d_tag)
4580
0
      {
4581
0
      default:
4582
0
        continue;
4583
4584
0
      case DT_PLTGOT:
4585
0
        s = htab->root.sgotplt;
4586
0
        dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
4587
0
        break;
4588
4589
0
      case DT_JMPREL:
4590
0
        s = htab->root.srelplt;
4591
0
        dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
4592
0
        break;
4593
4594
0
      case DT_PLTRELSZ:
4595
0
        s = htab->root.srelplt;
4596
0
        dyn.d_un.d_val = s->size;
4597
0
        break;
4598
4599
0
      case DT_RELASZ:
4600
        /* The procedure linkage table relocs (DT_JMPREL) should
4601
     not be included in the overall relocs (DT_RELA).
4602
     Therefore, we override the DT_RELASZ entry here to
4603
     make it not include the JMPREL relocs.  Since the
4604
     linker script arranges for .rela.plt to follow all
4605
     other relocation sections, we don't have to worry
4606
     about changing the DT_RELA entry.  */
4607
0
        if (htab->root.srelplt != NULL)
4608
0
    {
4609
0
      s = htab->root.srelplt;
4610
0
      dyn.d_un.d_val -= s->size;
4611
0
    }
4612
0
        break;
4613
0
      }
4614
4615
0
    bfd_elf64_swap_dyn_out (output_bfd, &dyn, dyncon);
4616
0
  }
4617
4618
0
    }
4619
4620
  /* Fill in the special first entry in the procedure linkage table.  */
4621
0
  if (htab->root.splt && htab->root.splt->size > 0)
4622
0
    {
4623
0
      elf64_kvx_init_small_plt0_entry (output_bfd, htab);
4624
4625
0
      elf_section_data (htab->root.splt->output_section)->
4626
0
  this_hdr.sh_entsize = htab->plt_entry_size;
4627
0
    }
4628
4629
0
  if (htab->root.sgotplt)
4630
0
    {
4631
0
      if (bfd_is_abs_section (htab->root.sgotplt->output_section))
4632
0
  {
4633
0
    (*_bfd_error_handler)
4634
0
      (_("discarded output section: `%pA'"), htab->root.sgotplt);
4635
0
    return false;
4636
0
  }
4637
4638
      /* Fill in the first three entries in the global offset table.  */
4639
0
      if (htab->root.sgotplt->size > 0)
4640
0
  {
4641
0
    bfd_put_64 (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
4642
4643
    /* Write GOT[1] and GOT[2], needed for the dynamic linker.  */
4644
0
    bfd_put_64 (output_bfd,
4645
0
          (bfd_vma) 0,
4646
0
          htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
4647
0
    bfd_put_64 (output_bfd,
4648
0
          (bfd_vma) 0,
4649
0
          htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
4650
0
  }
4651
4652
0
      if (htab->root.sgot)
4653
0
  {
4654
0
    if (htab->root.sgot->size > 0)
4655
0
      {
4656
0
        bfd_vma addr =
4657
0
    sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
4658
0
        bfd_put_64 (output_bfd, addr, htab->root.sgot->contents);
4659
0
      }
4660
0
  }
4661
4662
0
      elf_section_data (htab->root.sgotplt->output_section)->
4663
0
  this_hdr.sh_entsize = GOT_ENTRY_SIZE;
4664
0
    }
4665
4666
0
  if (htab->root.sgot && htab->root.sgot->size > 0)
4667
0
    elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
4668
0
      = GOT_ENTRY_SIZE;
4669
4670
0
  return true;
4671
0
}
4672
4673
/* Return address for Ith PLT stub in section PLT, for relocation REL
4674
   or (bfd_vma) -1 if it should not be included.  */
4675
4676
static bfd_vma
4677
elf64_kvx_plt_sym_val (bfd_vma i, const asection *plt,
4678
           const arelent *rel ATTRIBUTE_UNUSED)
4679
0
{
4680
0
  return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
4681
0
}
4682
4683
#define ELF_ARCH      bfd_arch_kvx
4684
#define ELF_MACHINE_CODE    EM_KVX
4685
#define ELF_MAXPAGESIZE     0x10000
4686
#define ELF_MINPAGESIZE     0x1000
4687
#define ELF_COMMONPAGESIZE    0x1000
4688
4689
#define bfd_elf64_bfd_link_hash_table_create    \
4690
  elf64_kvx_link_hash_table_create
4691
4692
#define bfd_elf64_bfd_merge_private_bfd_data  \
4693
  elf64_kvx_merge_private_bfd_data
4694
4695
#define bfd_elf64_bfd_print_private_bfd_data  \
4696
  elf64_kvx_print_private_bfd_data
4697
4698
#define bfd_elf64_bfd_reloc_type_lookup   \
4699
  elf64_kvx_reloc_type_lookup
4700
4701
#define bfd_elf64_bfd_reloc_name_lookup   \
4702
  elf64_kvx_reloc_name_lookup
4703
4704
#define bfd_elf64_bfd_set_private_flags   \
4705
  elf64_kvx_set_private_flags
4706
4707
#define bfd_elf64_mkobject      \
4708
  elf64_kvx_mkobject
4709
4710
#define bfd_elf64_new_section_hook    \
4711
  elf64_kvx_new_section_hook
4712
4713
#define elf_backend_adjust_dynamic_symbol \
4714
  elf64_kvx_adjust_dynamic_symbol
4715
4716
#define elf_backend_early_size_sections   \
4717
  elf64_kvx_early_size_sections
4718
4719
#define elf_backend_check_relocs    \
4720
  elf64_kvx_check_relocs
4721
4722
#define elf_backend_copy_indirect_symbol  \
4723
  elf64_kvx_copy_indirect_symbol
4724
4725
/* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
4726
   to them in our hash.  */
4727
#define elf_backend_create_dynamic_sections \
4728
  elf64_kvx_create_dynamic_sections
4729
4730
#define elf_backend_init_index_section    \
4731
  _bfd_elf_init_2_index_sections
4732
4733
#define elf_backend_finish_dynamic_sections \
4734
  elf64_kvx_finish_dynamic_sections
4735
4736
#define elf_backend_finish_dynamic_symbol \
4737
  elf64_kvx_finish_dynamic_symbol
4738
4739
#define elf_backend_object_p      \
4740
  elf64_kvx_object_p
4741
4742
#define elf_backend_output_arch_local_syms      \
4743
  elf64_kvx_output_arch_local_syms
4744
4745
#define elf_backend_plt_sym_val     \
4746
  elf64_kvx_plt_sym_val
4747
4748
#define elf_backend_init_file_header    \
4749
  elf64_kvx_init_file_header
4750
4751
#define elf_backend_init_process_headers  \
4752
  elf64_kvx_init_process_headers
4753
4754
#define elf_backend_relocate_section    \
4755
  elf64_kvx_relocate_section
4756
4757
#define elf_backend_reloc_type_class    \
4758
  elf64_kvx_reloc_type_class
4759
4760
#define elf_backend_late_size_sections  \
4761
  elf64_kvx_late_size_sections
4762
4763
#define elf_backend_can_refcount       1
4764
#define elf_backend_can_gc_sections    1
4765
#define elf_backend_plt_readonly       1
4766
#define elf_backend_want_got_plt       1
4767
#define elf_backend_want_plt_sym       0
4768
#define elf_backend_may_use_rel_p      0
4769
#define elf_backend_may_use_rela_p     1
4770
#define elf_backend_default_use_rela_p 1
4771
#define elf_backend_rela_normal        1
4772
#define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
4773
#define elf_backend_default_execstack  0
4774
#define elf_backend_extern_protected_data 1
4775
#define elf_backend_hash_symbol elf_kvx_hash_symbol
4776
4777
#include "elf64-target.h"