Coverage Report

Created: 2023-08-28 06:28

/src/binutils-gdb/bfd/elf32-kvx.c
Line
Count
Source (jump to first uncovered line)
1
#line 1 "elfnn-kvx.c"
2
/* KVX-specific support for 32-bit ELF.
3
   Copyright (C) 2009-2023 Free Software Foundation, Inc.
4
   Contributed by Kalray SA.
5
6
   This file is part of BFD, the Binary File Descriptor library.
7
8
   This program is free software; you can redistribute it and/or modify
9
   it under the terms of the GNU General Public License as published by
10
   the Free Software Foundation; either version 3 of the License, or
11
   (at your option) any later version.
12
13
   This program is distributed in the hope that it will be useful,
14
   but WITHOUT ANY WARRANTY; without even the implied warranty of
15
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
   GNU General Public License for more details.
17
18
   You should have received a copy of the GNU General Public License
19
   along with this program; see the file COPYING3. If not,
20
   see <http://www.gnu.org/licenses/>.  */
21
22
#include "sysdep.h"
23
#include "bfd.h"
24
#include "libiberty.h"
25
#include "libbfd.h"
26
#include "elf-bfd.h"
27
#include "bfdlink.h"
28
#include "objalloc.h"
29
#include "elf/kvx.h"
30
#include "elfxx-kvx.h"
31
32
0
#define ARCH_SIZE 32
33
34
#if ARCH_SIZE == 64
35
#define LOG_FILE_ALIGN  3
36
#endif
37
38
#if ARCH_SIZE == 32
39
0
#define LOG_FILE_ALIGN  2
40
#endif
41
42
#define IS_KVX_TLS_RELOC(R_TYPE)      \
43
0
  ((R_TYPE) == BFD_RELOC_KVX_S37_TLS_LE_LO10  \
44
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LE_UP27  \
45
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_LO10  \
46
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_UP27  \
47
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LE_EX6  \
48
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_DTPOFF_LO10  \
49
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_DTPOFF_UP27  \
50
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_LO10  \
51
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_UP27  \
52
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_DTPOFF_EX6  \
53
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_IE_LO10  \
54
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_IE_UP27  \
55
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_LO10  \
56
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_UP27  \
57
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_IE_EX6  \
58
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_GD_LO10  \
59
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_GD_UP27  \
60
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_LO10  \
61
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_UP27  \
62
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_GD_EX6  \
63
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LD_LO10  \
64
0
   || (R_TYPE) == BFD_RELOC_KVX_S37_TLS_LD_UP27  \
65
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_LO10  \
66
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_UP27  \
67
0
   || (R_TYPE) == BFD_RELOC_KVX_S43_TLS_LD_EX6  \
68
0
   )
69
70
0
#define IS_KVX_TLS_RELAX_RELOC(R_TYPE) 0
71
72
0
#define ELIMINATE_COPY_RELOCS 0
73
74
/* Return size of a relocation entry.  HTAB is the bfd's
75
   elf_kvx_link_hash_entry.  */
76
0
#define RELOC_SIZE(HTAB) (sizeof (Elf32_External_Rela))
77
78
/* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32.  */
79
0
#define GOT_ENTRY_SIZE                  (ARCH_SIZE / 8)
80
0
#define PLT_ENTRY_SIZE                  (32)
81
82
0
#define PLT_SMALL_ENTRY_SIZE            (4*4)
83
84
/* Encoding of the nop instruction */
85
0
#define INSN_NOP 0x00f0037f
86
87
#define kvx_compute_jump_table_size(htab)   \
88
0
  (((htab)->root.srelplt == NULL) ? 0      \
89
0
   : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
90
91
static const bfd_byte elf32_kvx_small_plt0_entry[PLT_ENTRY_SIZE] =
92
{
93
 /* FIXME KVX: no first entry, not used yet */
94
  0
95
};
96
97
/* Per function entry in a procedure linkage table looks like this
98
   if the distance between the PLTGOT and the PLT is < 4GB use
99
   these PLT entries.  */
100
static const bfd_byte elf32_kvx_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
101
{
102
  0x10, 0x00, 0xc4, 0x0f,       /* get $r16 = $pc     ;; */
103
#if ARCH_SIZE == 32
104
  0x10, 0x00, 0x40, 0xb0,       /* lwz $r16 = 0[$r16]   ;; */
105
#else
106
  0x10, 0x00, 0x40, 0xb8,       /* ld $r16 = 0[$r16] ;; */
107
#endif
108
  0x00, 0x00, 0x00, 0x18,       /* upper 27 bits for LSU */
109
  0x10, 0x00, 0xd8, 0x0f, /* igoto $r16          ;; */
110
};
111
112
/* Long stub use 43bits format of make. */
113
static const uint32_t elf32_kvx_long_branch_stub[] =
114
{
115
  0xe0400000,      /* make $r16 = LO10<emm43> EX6<imm43> */
116
  0x00000000,      /* UP27<imm43> ;; */
117
  0x0fd80010,      /* igoto "r16  ;; */
118
};
119
120
#define elf_info_to_howto               elf32_kvx_info_to_howto
121
#define elf_info_to_howto_rel           elf32_kvx_info_to_howto
122
123
0
#define KVX_ELF_ABI_VERSION   0
124
125
/* In case we're on a 32-bit machine, construct a 64-bit "-1" value.  */
126
#define ALL_ONES (~ (bfd_vma) 0)
127
128
/* Indexed by the bfd interal reloc enumerators.
129
   Therefore, the table needs to be synced with BFD_RELOC_KVX_*
130
   in reloc.c.   */
131
132
#define KVX_KV3_V1_KV3_V2_KV4_V1
133
#include "elfxx-kvx-relocs.h"
134
#undef KVX_KV3_V1_KV3_V2_KV4_V1
135
136
/* Given HOWTO, return the bfd internal relocation enumerator.  */
137
138
static bfd_reloc_code_real_type
139
elf32_kvx_bfd_reloc_from_howto (reloc_howto_type *howto)
140
0
{
141
0
  const int size = (int) ARRAY_SIZE (elf_kvx_howto_table);
142
0
  const ptrdiff_t offset = howto - elf_kvx_howto_table;
143
144
0
  if (offset >= 0 && offset < size)
145
0
    return BFD_RELOC_KVX_RELOC_START + offset + 1;
146
147
0
  return BFD_RELOC_KVX_RELOC_START + 1;
148
0
}
149
150
/* Given R_TYPE, return the bfd internal relocation enumerator.  */
151
152
static bfd_reloc_code_real_type
153
elf32_kvx_bfd_reloc_from_type (bfd *abfd ATTRIBUTE_UNUSED, unsigned int r_type)
154
0
{
155
0
  static bool initialized_p = false;
156
  /* Indexed by R_TYPE, values are offsets in the howto_table.  */
157
0
  static unsigned int offsets[R_KVX_end];
158
159
0
  if (!initialized_p)
160
0
    {
161
0
      unsigned int i;
162
163
0
      for (i = 0; i < ARRAY_SIZE (elf_kvx_howto_table); ++i)
164
0
  offsets[elf_kvx_howto_table[i].type] = i;
165
166
0
      initialized_p = true;
167
0
    }
168
169
  /* PR 17512: file: b371e70a.  */
170
0
  if (r_type >= R_KVX_end)
171
0
    {
172
0
      bfd_set_error (bfd_error_bad_value);
173
0
      return BFD_RELOC_KVX_RELOC_END;
174
0
    }
175
176
0
  return (BFD_RELOC_KVX_RELOC_START + 1) + offsets[r_type];
177
0
}
178
179
struct elf_kvx_reloc_map
180
{
181
  bfd_reloc_code_real_type from;
182
  bfd_reloc_code_real_type to;
183
};
184
185
/* Map bfd generic reloc to KVX-specific reloc.  */
186
static const struct elf_kvx_reloc_map elf_kvx_reloc_map[] =
187
{
188
  {BFD_RELOC_NONE, BFD_RELOC_KVX_NONE},
189
190
  /* Basic data relocations.  */
191
  {BFD_RELOC_CTOR, BFD_RELOC_KVX_32},
192
  {BFD_RELOC_64, BFD_RELOC_KVX_64},
193
  {BFD_RELOC_32, BFD_RELOC_KVX_32},
194
  {BFD_RELOC_16, BFD_RELOC_KVX_16},
195
  {BFD_RELOC_8,  BFD_RELOC_KVX_8},
196
197
  {BFD_RELOC_64_PCREL, BFD_RELOC_KVX_64_PCREL},
198
  {BFD_RELOC_32_PCREL, BFD_RELOC_KVX_32_PCREL},
199
};
200
201
/* Given the bfd internal relocation enumerator in CODE, return the
202
   corresponding howto entry.  */
203
204
static reloc_howto_type *
205
elf32_kvx_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
206
0
{
207
0
  unsigned int i;
208
209
  /* Convert bfd generic reloc to KVX-specific reloc.  */
210
0
  if (code < BFD_RELOC_KVX_RELOC_START || code > BFD_RELOC_KVX_RELOC_END)
211
0
    for (i = 0; i < ARRAY_SIZE (elf_kvx_reloc_map) ; i++)
212
0
      if (elf_kvx_reloc_map[i].from == code)
213
0
  {
214
0
    code = elf_kvx_reloc_map[i].to;
215
0
    break;
216
0
  }
217
218
0
  if (code > BFD_RELOC_KVX_RELOC_START && code < BFD_RELOC_KVX_RELOC_END)
219
0
      return &elf_kvx_howto_table[code - (BFD_RELOC_KVX_RELOC_START + 1)];
220
221
0
  return NULL;
222
0
}
223
224
static reloc_howto_type *
225
elf32_kvx_howto_from_type (bfd *abfd, unsigned int r_type)
226
0
{
227
0
  bfd_reloc_code_real_type val;
228
0
  reloc_howto_type *howto;
229
230
0
#if ARCH_SIZE == 32
231
0
  if (r_type > 256)
232
0
    {
233
0
      bfd_set_error (bfd_error_bad_value);
234
0
      return NULL;
235
0
    }
236
0
#endif
237
238
0
  val = elf32_kvx_bfd_reloc_from_type (abfd, r_type);
239
0
  howto = elf32_kvx_howto_from_bfd_reloc (val);
240
241
0
  if (howto != NULL)
242
0
    return howto;
243
244
0
  bfd_set_error (bfd_error_bad_value);
245
0
  return NULL;
246
0
}
247
248
static bool
249
elf32_kvx_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
250
       Elf_Internal_Rela *elf_reloc)
251
0
{
252
0
  unsigned int r_type;
253
254
0
  r_type = ELF32_R_TYPE (elf_reloc->r_info);
255
0
  bfd_reloc->howto = elf32_kvx_howto_from_type (abfd, r_type);
256
257
0
  if (bfd_reloc->howto == NULL)
258
0
    {
259
      /* xgettext:c-format */
260
0
      _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
261
0
        abfd, r_type);
262
0
      return false;
263
0
    }
264
0
  return true;
265
0
}
266
267
static reloc_howto_type *
268
elf32_kvx_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
269
           bfd_reloc_code_real_type code)
270
0
{
271
0
  reloc_howto_type *howto = elf32_kvx_howto_from_bfd_reloc (code);
272
273
0
  if (howto != NULL)
274
0
    return howto;
275
276
0
  bfd_set_error (bfd_error_bad_value);
277
0
  return NULL;
278
0
}
279
280
static reloc_howto_type *
281
elf32_kvx_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
282
           const char *r_name)
283
0
{
284
0
  unsigned int i;
285
286
0
  for (i = 0; i < ARRAY_SIZE (elf_kvx_howto_table); ++i)
287
0
    if (elf_kvx_howto_table[i].name != NULL
288
0
  && strcasecmp (elf_kvx_howto_table[i].name, r_name) == 0)
289
0
      return &elf_kvx_howto_table[i];
290
291
0
  return NULL;
292
0
}
293
294
#define TARGET_LITTLE_SYM               kvx_elf32_vec
295
#define TARGET_LITTLE_NAME              "elf32-kvx"
296
297
/* The linker script knows the section names for placement.
298
   The entry_names are used to do simple name mangling on the stubs.
299
   Given a function name, and its type, the stub can be found. The
300
   name can be changed. The only requirement is the %s be present.  */
301
0
#define STUB_ENTRY_NAME   "__%s_veneer"
302
303
/* The name of the dynamic interpreter.  This is put in the .interp
304
   section.  */
305
0
#define ELF_DYNAMIC_INTERPRETER     "/lib/ld.so.1"
306
307
308
/* PCREL 27 is signed-extended and scaled by 4 */
309
#define KVX_MAX_FWD_CALL_OFFSET \
310
0
  (((1 << 26) - 1) << 2)
311
#define KVX_MAX_BWD_CALL_OFFSET \
312
0
  (-((1 << 26) << 2))
313
314
/* Check that the destination of the call is within the PCREL27
315
   range. */
316
static int
317
kvx_valid_call_p (bfd_vma value, bfd_vma place)
318
0
{
319
0
  bfd_signed_vma offset = (bfd_signed_vma) (value - place);
320
0
  return (offset <= KVX_MAX_FWD_CALL_OFFSET
321
0
    && offset >= KVX_MAX_BWD_CALL_OFFSET);
322
0
}
323
324
/* Section name for stubs is the associated section name plus this
325
   string.  */
326
0
#define STUB_SUFFIX ".stub"
327
328
enum elf_kvx_stub_type
329
{
330
  kvx_stub_none,
331
  kvx_stub_long_branch,
332
};
333
334
struct elf_kvx_stub_hash_entry
335
{
336
  /* Base hash table entry structure.  */
337
  struct bfd_hash_entry root;
338
339
  /* The stub section.  */
340
  asection *stub_sec;
341
342
  /* Offset within stub_sec of the beginning of this stub.  */
343
  bfd_vma stub_offset;
344
345
  /* Given the symbol's value and its section we can determine its final
346
     value when building the stubs (so the stub knows where to jump).  */
347
  bfd_vma target_value;
348
  asection *target_section;
349
350
  enum elf_kvx_stub_type stub_type;
351
352
  /* The symbol table entry, if any, that this was derived from.  */
353
  struct elf_kvx_link_hash_entry *h;
354
355
  /* Destination symbol type */
356
  unsigned char st_type;
357
358
  /* Where this stub is being called from, or, in the case of combined
359
     stub sections, the first input section in the group.  */
360
  asection *id_sec;
361
362
  /* The name for the local symbol at the start of this stub.  The
363
     stub name in the hash table has to be unique; this does not, so
364
     it can be friendlier.  */
365
  char *output_name;
366
};
367
368
/* Used to build a map of a section.  This is required for mixed-endian
369
   code/data.  */
370
371
typedef struct elf_elf_section_map
372
{
373
  bfd_vma vma;
374
  char type;
375
}
376
elf_kvx_section_map;
377
378
379
typedef struct _kvx_elf_section_data
380
{
381
  struct bfd_elf_section_data elf;
382
  unsigned int mapcount;
383
  unsigned int mapsize;
384
  elf_kvx_section_map *map;
385
}
386
_kvx_elf_section_data;
387
388
#define elf_kvx_section_data(sec) \
389
  ((_kvx_elf_section_data *) elf_section_data (sec))
390
391
struct elf_kvx_local_symbol
392
{
393
  unsigned int got_type;
394
  bfd_signed_vma got_refcount;
395
  bfd_vma got_offset;
396
};
397
398
struct elf_kvx_obj_tdata
399
{
400
  struct elf_obj_tdata root;
401
402
  /* local symbol descriptors */
403
  struct elf_kvx_local_symbol *locals;
404
405
  /* Zero to warn when linking objects with incompatible enum sizes.  */
406
  int no_enum_size_warning;
407
408
  /* Zero to warn when linking objects with incompatible wchar_t sizes.  */
409
  int no_wchar_size_warning;
410
};
411
412
#define elf_kvx_tdata(bfd)        \
413
0
  ((struct elf_kvx_obj_tdata *) (bfd)->tdata.any)
414
415
0
#define elf_kvx_locals(bfd) (elf_kvx_tdata (bfd)->locals)
416
417
#define is_kvx_elf(bfd)       \
418
0
  (bfd_get_flavour (bfd) == bfd_target_elf_flavour  \
419
0
   && elf_tdata (bfd) != NULL        \
420
0
   && elf_object_id (bfd) == KVX_ELF_DATA)
421
422
static bool
423
elf32_kvx_mkobject (bfd *abfd)
424
22.1k
{
425
22.1k
  return bfd_elf_allocate_object (abfd, sizeof (struct elf_kvx_obj_tdata),
426
22.1k
          KVX_ELF_DATA);
427
22.1k
}
428
429
#define elf_kvx_hash_entry(ent) \
430
0
  ((struct elf_kvx_link_hash_entry *)(ent))
431
432
0
#define GOT_UNKNOWN    0
433
0
#define GOT_NORMAL     1
434
435
0
#define GOT_TLS_GD     2
436
0
#define GOT_TLS_IE     4
437
0
#define GOT_TLS_LD     8
438
439
/* KVX ELF linker hash entry.  */
440
struct elf_kvx_link_hash_entry
441
{
442
  struct elf_link_hash_entry root;
443
444
  /* Since PLT entries have variable size, we need to record the
445
     index into .got.plt instead of recomputing it from the PLT
446
     offset.  */
447
  bfd_signed_vma plt_got_offset;
448
449
  /* Bit mask representing the type of GOT entry(s) if any required by
450
     this symbol.  */
451
  unsigned int got_type;
452
453
  /* A pointer to the most recently used stub hash entry against this
454
     symbol.  */
455
  struct elf_kvx_stub_hash_entry *stub_cache;
456
};
457
458
/* Get the KVX elf linker hash table from a link_info structure.  */
459
#define elf_kvx_hash_table(info)          \
460
0
  ((struct elf_kvx_link_hash_table *) ((info)->hash))
461
462
#define kvx_stub_hash_lookup(table, string, create, copy)   \
463
0
  ((struct elf_kvx_stub_hash_entry *)       \
464
0
   bfd_hash_lookup ((table), (string), (create), (copy)))
465
466
/* KVX ELF linker hash table.  */
467
struct elf_kvx_link_hash_table
468
{
469
  /* The main hash table.  */
470
  struct elf_link_hash_table root;
471
472
  /* Nonzero to force PIC branch veneers.  */
473
  int pic_veneer;
474
475
  /* The number of bytes in the initial entry in the PLT.  */
476
  bfd_size_type plt_header_size;
477
478
  /* The number of bytes in the subsequent PLT etries.  */
479
  bfd_size_type plt_entry_size;
480
481
  /* The bytes of the subsequent PLT entry.  */
482
  const bfd_byte *plt_entry;
483
484
  /* Short-cuts to get to dynamic linker sections.  */
485
  asection *sdynbss;
486
  asection *srelbss;
487
488
  /* Small local sym cache.  */
489
  struct sym_cache sym_cache;
490
491
  /* For convenience in allocate_dynrelocs.  */
492
  bfd *obfd;
493
494
  /* The amount of space used by the reserved portion of the sgotplt
495
     section, plus whatever space is used by the jump slots.  */
496
  bfd_vma sgotplt_jump_table_size;
497
498
  /* The stub hash table.  */
499
  struct bfd_hash_table stub_hash_table;
500
501
  /* Linker stub bfd.  */
502
  bfd *stub_bfd;
503
504
  /* Linker call-backs.  */
505
  asection *(*add_stub_section) (const char *, asection *);
506
  void (*layout_sections_again) (void);
507
508
  /* Array to keep track of which stub sections have been created, and
509
     information on stub grouping.  */
510
  struct map_stub
511
  {
512
    /* This is the section to which stubs in the group will be
513
       attached.  */
514
    asection *link_sec;
515
    /* The stub section.  */
516
    asection *stub_sec;
517
  } *stub_group;
518
519
  /* Assorted information used by elf32_kvx_size_stubs.  */
520
  unsigned int bfd_count;
521
  unsigned int top_index;
522
  asection **input_list;
523
};
524
525
/* Create an entry in an KVX ELF linker hash table.  */
526
527
static struct bfd_hash_entry *
528
elf32_kvx_link_hash_newfunc (struct bfd_hash_entry *entry,
529
           struct bfd_hash_table *table,
530
           const char *string)
531
0
{
532
0
  struct elf_kvx_link_hash_entry *ret =
533
0
    (struct elf_kvx_link_hash_entry *) entry;
534
535
  /* Allocate the structure if it has not already been allocated by a
536
     subclass.  */
537
0
  if (ret == NULL)
538
0
    ret = bfd_hash_allocate (table,
539
0
           sizeof (struct elf_kvx_link_hash_entry));
540
0
  if (ret == NULL)
541
0
    return (struct bfd_hash_entry *) ret;
542
543
  /* Call the allocation method of the superclass.  */
544
0
  ret = ((struct elf_kvx_link_hash_entry *)
545
0
   _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
546
0
             table, string));
547
0
  if (ret != NULL)
548
0
    {
549
0
      ret->got_type = GOT_UNKNOWN;
550
0
      ret->plt_got_offset = (bfd_vma) - 1;
551
0
      ret->stub_cache = NULL;
552
0
    }
553
554
0
  return (struct bfd_hash_entry *) ret;
555
0
}
556
557
/* Initialize an entry in the stub hash table.  */
558
559
static struct bfd_hash_entry *
560
stub_hash_newfunc (struct bfd_hash_entry *entry,
561
       struct bfd_hash_table *table, const char *string)
562
0
{
563
  /* Allocate the structure if it has not already been allocated by a
564
     subclass.  */
565
0
  if (entry == NULL)
566
0
    {
567
0
      entry = bfd_hash_allocate (table,
568
0
         sizeof (struct
569
0
           elf_kvx_stub_hash_entry));
570
0
      if (entry == NULL)
571
0
  return entry;
572
0
    }
573
574
  /* Call the allocation method of the superclass.  */
575
0
  entry = bfd_hash_newfunc (entry, table, string);
576
0
  if (entry != NULL)
577
0
    {
578
0
      struct elf_kvx_stub_hash_entry *eh;
579
580
      /* Initialize the local fields.  */
581
0
      eh = (struct elf_kvx_stub_hash_entry *) entry;
582
0
      eh->stub_sec = NULL;
583
0
      eh->stub_offset = 0;
584
0
      eh->target_value = 0;
585
0
      eh->target_section = NULL;
586
0
      eh->stub_type = kvx_stub_none;
587
0
      eh->h = NULL;
588
0
      eh->id_sec = NULL;
589
0
    }
590
591
0
  return entry;
592
0
}
593
594
/* Copy the extra info we tack onto an elf_link_hash_entry.  */
595
596
static void
597
elf32_kvx_copy_indirect_symbol (struct bfd_link_info *info,
598
        struct elf_link_hash_entry *dir,
599
        struct elf_link_hash_entry *ind)
600
0
{
601
0
  struct elf_kvx_link_hash_entry *edir, *eind;
602
603
0
  edir = (struct elf_kvx_link_hash_entry *) dir;
604
0
  eind = (struct elf_kvx_link_hash_entry *) ind;
605
606
0
  if (ind->root.type == bfd_link_hash_indirect)
607
0
    {
608
      /* Copy over PLT info.  */
609
0
      if (dir->got.refcount <= 0)
610
0
  {
611
0
    edir->got_type = eind->got_type;
612
0
    eind->got_type = GOT_UNKNOWN;
613
0
  }
614
0
    }
615
616
0
  _bfd_elf_link_hash_copy_indirect (info, dir, ind);
617
0
}
618
619
/* Destroy a KVX elf linker hash table.  */
620
621
static void
622
elf32_kvx_link_hash_table_free (bfd *obfd)
623
0
{
624
0
  struct elf_kvx_link_hash_table *ret
625
0
    = (struct elf_kvx_link_hash_table *) obfd->link.hash;
626
627
0
  bfd_hash_table_free (&ret->stub_hash_table);
628
0
  _bfd_elf_link_hash_table_free (obfd);
629
0
}
630
631
/* Create a KVX elf linker hash table.  */
632
633
static struct bfd_link_hash_table *
634
elf32_kvx_link_hash_table_create (bfd *abfd)
635
0
{
636
0
  struct elf_kvx_link_hash_table *ret;
637
0
  bfd_size_type amt = sizeof (struct elf_kvx_link_hash_table);
638
639
0
  ret = bfd_zmalloc (amt);
640
0
  if (ret == NULL)
641
0
    return NULL;
642
643
0
  if (!_bfd_elf_link_hash_table_init
644
0
      (&ret->root, abfd, elf32_kvx_link_hash_newfunc,
645
0
       sizeof (struct elf_kvx_link_hash_entry), KVX_ELF_DATA))
646
0
    {
647
0
      free (ret);
648
0
      return NULL;
649
0
    }
650
651
0
  ret->plt_header_size = PLT_ENTRY_SIZE;
652
0
  ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
653
0
  ret->plt_entry = elf32_kvx_small_plt_entry;
654
655
0
  ret->obfd = abfd;
656
657
0
  if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
658
0
          sizeof (struct elf_kvx_stub_hash_entry)))
659
0
    {
660
0
      _bfd_elf_link_hash_table_free (abfd);
661
0
      return NULL;
662
0
    }
663
664
0
  ret->root.root.hash_table_free = elf32_kvx_link_hash_table_free;
665
666
0
  return &ret->root.root;
667
0
}
668
669
static bfd_reloc_status_type
670
kvx_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
671
        bfd_vma offset, bfd_vma value)
672
0
{
673
0
  reloc_howto_type *howto;
674
675
0
  howto = elf32_kvx_howto_from_type (input_bfd, r_type);
676
0
  r_type = elf32_kvx_bfd_reloc_from_type (input_bfd, r_type);
677
0
  return _bfd_kvx_elf_put_addend (input_bfd,
678
0
          input_section->contents + offset, r_type,
679
0
          howto, value);
680
0
}
681
682
/* Determine the type of stub needed, if any, for a call.  */
683
684
static enum elf_kvx_stub_type
685
kvx_type_of_stub (asection *input_sec,
686
      const Elf_Internal_Rela *rel,
687
      asection *sym_sec,
688
      unsigned char st_type,
689
      bfd_vma destination)
690
0
{
691
0
  bfd_vma location;
692
0
  bfd_signed_vma branch_offset;
693
0
  unsigned int r_type;
694
0
  enum elf_kvx_stub_type stub_type = kvx_stub_none;
695
696
0
  if (st_type != STT_FUNC
697
0
      && (sym_sec == input_sec))
698
0
    return stub_type;
699
700
  /* Determine where the call point is.  */
701
0
  location = (input_sec->output_offset
702
0
        + input_sec->output_section->vma + rel->r_offset);
703
704
0
  branch_offset = (bfd_signed_vma) (destination - location);
705
706
0
  r_type = ELF32_R_TYPE (rel->r_info);
707
708
  /* We don't want to redirect any old unconditional jump in this way,
709
     only one which is being used for a sibcall, where it is
710
     acceptable for the R16 and R17 registers to be clobbered.  */
711
0
  if (r_type == R_KVX_PCREL27
712
0
      && (branch_offset > KVX_MAX_FWD_CALL_OFFSET
713
0
    || branch_offset < KVX_MAX_BWD_CALL_OFFSET))
714
0
    {
715
0
      stub_type = kvx_stub_long_branch;
716
0
    }
717
718
0
  return stub_type;
719
0
}
720
721
/* Build a name for an entry in the stub hash table.  */
722
723
static char *
724
elf32_kvx_stub_name (const asection *input_section,
725
         const asection *sym_sec,
726
         const struct elf_kvx_link_hash_entry *hash,
727
         const Elf_Internal_Rela *rel)
728
0
{
729
0
  char *stub_name;
730
0
  bfd_size_type len;
731
732
0
  if (hash)
733
0
    {
734
0
      len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
735
0
      stub_name = bfd_malloc (len);
736
0
      if (stub_name != NULL)
737
0
  snprintf (stub_name, len, "%08x_%s+%" PRIx64 "x",
738
0
      (unsigned int) input_section->id,
739
0
      hash->root.root.root.string,
740
0
      (uint64_t) rel->r_addend);
741
0
    }
742
0
  else
743
0
    {
744
0
      len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
745
0
      stub_name = bfd_malloc (len);
746
0
      if (stub_name != NULL)
747
0
  snprintf (stub_name, len, "%08x_%x:%x+%" PRIx64 "x",
748
0
      (unsigned int) input_section->id,
749
0
      (unsigned int) sym_sec->id,
750
0
      (unsigned int) ELF32_R_SYM (rel->r_info),
751
0
      (uint64_t) rel->r_addend);
752
0
    }
753
754
0
  return stub_name;
755
0
}
756
757
/* Return true if symbol H should be hashed in the `.gnu.hash' section.  For
758
   executable PLT slots where the executable never takes the address of those
759
   functions, the function symbols are not added to the hash table.  */
760
761
static bool
762
elf_kvx_hash_symbol (struct elf_link_hash_entry *h)
763
0
{
764
0
  if (h->plt.offset != (bfd_vma) -1
765
0
      && !h->def_regular
766
0
      && !h->pointer_equality_needed)
767
0
    return false;
768
769
0
  return _bfd_elf_hash_symbol (h);
770
0
}
771
772
773
/* Look up an entry in the stub hash.  Stub entries are cached because
774
   creating the stub name takes a bit of time.  */
775
776
static struct elf_kvx_stub_hash_entry *
777
elf32_kvx_get_stub_entry (const asection *input_section,
778
        const asection *sym_sec,
779
        struct elf_link_hash_entry *hash,
780
        const Elf_Internal_Rela *rel,
781
        struct elf_kvx_link_hash_table *htab)
782
0
{
783
0
  struct elf_kvx_stub_hash_entry *stub_entry;
784
0
  struct elf_kvx_link_hash_entry *h =
785
0
    (struct elf_kvx_link_hash_entry *) hash;
786
0
  const asection *id_sec;
787
788
0
  if ((input_section->flags & SEC_CODE) == 0)
789
0
    return NULL;
790
791
  /* If this input section is part of a group of sections sharing one
792
     stub section, then use the id of the first section in the group.
793
     Stub names need to include a section id, as there may well be
794
     more than one stub used to reach say, printf, and we need to
795
     distinguish between them.  */
796
0
  id_sec = htab->stub_group[input_section->id].link_sec;
797
798
0
  if (h != NULL && h->stub_cache != NULL
799
0
      && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
800
0
    {
801
0
      stub_entry = h->stub_cache;
802
0
    }
803
0
  else
804
0
    {
805
0
      char *stub_name;
806
807
0
      stub_name = elf32_kvx_stub_name (id_sec, sym_sec, h, rel);
808
0
      if (stub_name == NULL)
809
0
  return NULL;
810
811
0
      stub_entry = kvx_stub_hash_lookup (&htab->stub_hash_table,
812
0
           stub_name, false, false);
813
0
      if (h != NULL)
814
0
  h->stub_cache = stub_entry;
815
816
0
      free (stub_name);
817
0
    }
818
819
0
  return stub_entry;
820
0
}
821
822
823
/* Create a stub section.  */
824
825
static asection *
826
_bfd_kvx_create_stub_section (asection *section,
827
            struct elf_kvx_link_hash_table *htab)
828
829
0
{
830
0
  size_t namelen;
831
0
  bfd_size_type len;
832
0
  char *s_name;
833
834
0
  namelen = strlen (section->name);
835
0
  len = namelen + sizeof (STUB_SUFFIX);
836
0
  s_name = bfd_alloc (htab->stub_bfd, len);
837
0
  if (s_name == NULL)
838
0
    return NULL;
839
840
0
  memcpy (s_name, section->name, namelen);
841
0
  memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
842
0
  return (*htab->add_stub_section) (s_name, section);
843
0
}
844
845
846
/* Find or create a stub section for a link section.
847
848
   Fix or create the stub section used to collect stubs attached to
849
   the specified link section.  */
850
851
static asection *
852
_bfd_kvx_get_stub_for_link_section (asection *link_section,
853
            struct elf_kvx_link_hash_table *htab)
854
0
{
855
0
  if (htab->stub_group[link_section->id].stub_sec == NULL)
856
0
    htab->stub_group[link_section->id].stub_sec
857
0
      = _bfd_kvx_create_stub_section (link_section, htab);
858
0
  return htab->stub_group[link_section->id].stub_sec;
859
0
}
860
861
862
/* Find or create a stub section in the stub group for an input
863
   section.  */
864
865
static asection *
866
_bfd_kvx_create_or_find_stub_sec (asection *section,
867
          struct elf_kvx_link_hash_table *htab)
868
0
{
869
0
  asection *link_sec = htab->stub_group[section->id].link_sec;
870
0
  return _bfd_kvx_get_stub_for_link_section (link_sec, htab);
871
0
}
872
873
874
/* Add a new stub entry in the stub group associated with an input
875
   section to the stub hash.  Not all fields of the new stub entry are
876
   initialised.  */
877
878
static struct elf_kvx_stub_hash_entry *
879
_bfd_kvx_add_stub_entry_in_group (const char *stub_name,
880
          asection *section,
881
          struct elf_kvx_link_hash_table *htab)
882
0
{
883
0
  asection *link_sec;
884
0
  asection *stub_sec;
885
0
  struct elf_kvx_stub_hash_entry *stub_entry;
886
887
0
  link_sec = htab->stub_group[section->id].link_sec;
888
0
  stub_sec = _bfd_kvx_create_or_find_stub_sec (section, htab);
889
890
  /* Enter this entry into the linker stub hash table.  */
891
0
  stub_entry = kvx_stub_hash_lookup (&htab->stub_hash_table, stub_name,
892
0
             true, false);
893
0
  if (stub_entry == NULL)
894
0
    {
895
      /* xgettext:c-format */
896
0
      _bfd_error_handler (_("%pB: cannot create stub entry %s"),
897
0
        section->owner, stub_name);
898
0
      return NULL;
899
0
    }
900
901
0
  stub_entry->stub_sec = stub_sec;
902
0
  stub_entry->stub_offset = 0;
903
0
  stub_entry->id_sec = link_sec;
904
905
0
  return stub_entry;
906
0
}
907
908
static bool
909
kvx_build_one_stub (struct bfd_hash_entry *gen_entry,
910
        void *in_arg)
911
0
{
912
0
  struct elf_kvx_stub_hash_entry *stub_entry;
913
0
  asection *stub_sec;
914
0
  bfd *stub_bfd;
915
0
  bfd_byte *loc;
916
0
  bfd_vma sym_value;
917
0
  unsigned int template_size;
918
0
  const uint32_t *template;
919
0
  unsigned int i;
920
0
  struct bfd_link_info *info;
921
922
  /* Massage our args to the form they really have.  */
923
0
  stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
924
925
0
  info = (struct bfd_link_info *) in_arg;
926
927
  /* Fail if the target section could not be assigned to an output
928
     section.  The user should fix his linker script.  */
929
0
  if (stub_entry->target_section->output_section == NULL
930
0
      && info->non_contiguous_regions)
931
0
    info->callbacks->einfo (_("%F%P: Could not assign '%pA' to an output section. "
932
0
            "Retry without "
933
0
            "--enable-non-contiguous-regions.\n"),
934
0
          stub_entry->target_section);
935
936
0
  stub_sec = stub_entry->stub_sec;
937
938
  /* Make a note of the offset within the stubs for this entry.  */
939
0
  stub_entry->stub_offset = stub_sec->size;
940
0
  loc = stub_sec->contents + stub_entry->stub_offset;
941
942
0
  stub_bfd = stub_sec->owner;
943
944
  /* This is the address of the stub destination.  */
945
0
  sym_value = (stub_entry->target_value
946
0
         + stub_entry->target_section->output_offset
947
0
         + stub_entry->target_section->output_section->vma);
948
949
0
  switch (stub_entry->stub_type)
950
0
    {
951
0
    case kvx_stub_long_branch:
952
0
      template = elf32_kvx_long_branch_stub;
953
0
      template_size = sizeof (elf32_kvx_long_branch_stub);
954
0
      break;
955
0
    default:
956
0
      abort ();
957
0
    }
958
959
0
  for (i = 0; i < (template_size / sizeof template[0]); i++)
960
0
    {
961
0
      bfd_putl32 (template[i], loc);
962
0
      loc += 4;
963
0
    }
964
965
0
  stub_sec->size += template_size;
966
967
0
  switch (stub_entry->stub_type)
968
0
    {
969
0
    case kvx_stub_long_branch:
970
      /* The stub uses a make insn with 43bits immediate.
971
   We need to apply 3 relocations:
972
   BFD_RELOC_KVX_S43_LO10,
973
   BFD_RELOC_KVX_S43_UP27,
974
   BFD_RELOC_KVX_S43_EX6.  */
975
0
      if (kvx_relocate (R_KVX_S43_LO10, stub_bfd, stub_sec,
976
0
      stub_entry->stub_offset, sym_value) != bfd_reloc_ok)
977
0
  BFD_FAIL ();
978
0
      if (kvx_relocate (R_KVX_S43_EX6, stub_bfd, stub_sec,
979
0
      stub_entry->stub_offset, sym_value) != bfd_reloc_ok)
980
0
  BFD_FAIL ();
981
0
      if (kvx_relocate (R_KVX_S43_UP27, stub_bfd, stub_sec,
982
0
      stub_entry->stub_offset + 4, sym_value) != bfd_reloc_ok)
983
0
  BFD_FAIL ();
984
0
      break;
985
0
    default:
986
0
      abort ();
987
0
    }
988
989
0
  return true;
990
0
}
991
992
/* As above, but don't actually build the stub.  Just bump offset so
993
   we know stub section sizes.  */
994
995
static bool
996
kvx_size_one_stub (struct bfd_hash_entry *gen_entry,
997
       void *in_arg ATTRIBUTE_UNUSED)
998
0
{
999
0
  struct elf_kvx_stub_hash_entry *stub_entry;
1000
0
  int size;
1001
1002
  /* Massage our args to the form they really have.  */
1003
0
  stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
1004
1005
0
  switch (stub_entry->stub_type)
1006
0
    {
1007
0
    case kvx_stub_long_branch:
1008
0
      size = sizeof (elf32_kvx_long_branch_stub);
1009
0
      break;
1010
0
    default:
1011
0
      abort ();
1012
0
    }
1013
1014
0
  stub_entry->stub_sec->size += size;
1015
0
  return true;
1016
0
}
1017
1018
/* External entry points for sizing and building linker stubs.  */
1019
1020
/* Set up various things so that we can make a list of input sections
1021
   for each output section included in the link.  Returns -1 on error,
1022
   0 when no stubs will be needed, and 1 on success.  */
1023
1024
int
1025
elf32_kvx_setup_section_lists (bfd *output_bfd,
1026
             struct bfd_link_info *info)
1027
0
{
1028
0
  bfd *input_bfd;
1029
0
  unsigned int bfd_count;
1030
0
  unsigned int top_id, top_index;
1031
0
  asection *section;
1032
0
  asection **input_list, **list;
1033
0
  bfd_size_type amt;
1034
0
  struct elf_kvx_link_hash_table *htab =
1035
0
    elf_kvx_hash_table (info);
1036
1037
0
  if (!is_elf_hash_table ((const struct bfd_link_hash_table *)htab))
1038
0
    return 0;
1039
1040
  /* Count the number of input BFDs and find the top input section id.  */
1041
0
  for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
1042
0
       input_bfd != NULL; input_bfd = input_bfd->link.next)
1043
0
    {
1044
0
      bfd_count += 1;
1045
0
      for (section = input_bfd->sections;
1046
0
     section != NULL; section = section->next)
1047
0
  {
1048
0
    if (top_id < section->id)
1049
0
      top_id = section->id;
1050
0
  }
1051
0
    }
1052
0
  htab->bfd_count = bfd_count;
1053
1054
0
  amt = sizeof (struct map_stub) * (top_id + 1);
1055
0
  htab->stub_group = bfd_zmalloc (amt);
1056
0
  if (htab->stub_group == NULL)
1057
0
    return -1;
1058
1059
  /* We can't use output_bfd->section_count here to find the top output
1060
     section index as some sections may have been removed, and
1061
     _bfd_strip_section_from_output doesn't renumber the indices.  */
1062
0
  for (section = output_bfd->sections, top_index = 0;
1063
0
       section != NULL; section = section->next)
1064
0
    {
1065
0
      if (top_index < section->index)
1066
0
  top_index = section->index;
1067
0
    }
1068
1069
0
  htab->top_index = top_index;
1070
0
  amt = sizeof (asection *) * (top_index + 1);
1071
0
  input_list = bfd_malloc (amt);
1072
0
  htab->input_list = input_list;
1073
0
  if (input_list == NULL)
1074
0
    return -1;
1075
1076
  /* For sections we aren't interested in, mark their entries with a
1077
     value we can check later.  */
1078
0
  list = input_list + top_index;
1079
0
  do
1080
0
    *list = bfd_abs_section_ptr;
1081
0
  while (list-- != input_list);
1082
1083
0
  for (section = output_bfd->sections;
1084
0
       section != NULL; section = section->next)
1085
0
    {
1086
0
      if ((section->flags & SEC_CODE) != 0)
1087
0
  input_list[section->index] = NULL;
1088
0
    }
1089
1090
0
  return 1;
1091
0
}
1092
1093
/* Used by elf32_kvx_next_input_section and group_sections.  */
1094
0
#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
1095
1096
/* The linker repeatedly calls this function for each input section,
1097
   in the order that input sections are linked into output sections.
1098
   Build lists of input sections to determine groupings between which
1099
   we may insert linker stubs.  */
1100
1101
void
1102
elf32_kvx_next_input_section (struct bfd_link_info *info, asection *isec)
1103
0
{
1104
0
  struct elf_kvx_link_hash_table *htab =
1105
0
    elf_kvx_hash_table (info);
1106
1107
0
  if (isec->output_section->index <= htab->top_index)
1108
0
    {
1109
0
      asection **list = htab->input_list + isec->output_section->index;
1110
1111
0
      if (*list != bfd_abs_section_ptr)
1112
0
  {
1113
    /* Steal the link_sec pointer for our list.  */
1114
    /* This happens to make the list in reverse order,
1115
       which is what we want.  */
1116
0
    PREV_SEC (isec) = *list;
1117
0
    *list = isec;
1118
0
  }
1119
0
    }
1120
0
}
1121
1122
/* See whether we can group stub sections together.  Grouping stub
1123
   sections may result in fewer stubs.  More importantly, we need to
1124
   put all .init* and .fini* stubs at the beginning of the .init or
1125
   .fini output sections respectively, because glibc splits the
1126
   _init and _fini functions into multiple parts.  Putting a stub in
1127
   the middle of a function is not a good idea.  */
1128
1129
static void
1130
group_sections (struct elf_kvx_link_hash_table *htab,
1131
    bfd_size_type stub_group_size,
1132
    bool stubs_always_after_branch)
1133
0
{
1134
0
  asection **list = htab->input_list;
1135
1136
0
  do
1137
0
    {
1138
0
      asection *tail = *list;
1139
0
      asection *head;
1140
1141
0
      if (tail == bfd_abs_section_ptr)
1142
0
  continue;
1143
1144
      /* Reverse the list: we must avoid placing stubs at the
1145
   beginning of the section because the beginning of the text
1146
   section may be required for an interrupt vector in bare metal
1147
   code.  */
1148
0
#define NEXT_SEC PREV_SEC
1149
0
      head = NULL;
1150
0
      while (tail != NULL)
1151
0
  {
1152
    /* Pop from tail.  */
1153
0
    asection *item = tail;
1154
0
    tail = PREV_SEC (item);
1155
1156
    /* Push on head.  */
1157
0
    NEXT_SEC (item) = head;
1158
0
    head = item;
1159
0
  }
1160
1161
0
      while (head != NULL)
1162
0
  {
1163
0
    asection *curr;
1164
0
    asection *next;
1165
0
    bfd_vma stub_group_start = head->output_offset;
1166
0
    bfd_vma end_of_next;
1167
1168
0
    curr = head;
1169
0
    while (NEXT_SEC (curr) != NULL)
1170
0
      {
1171
0
        next = NEXT_SEC (curr);
1172
0
        end_of_next = next->output_offset + next->size;
1173
0
        if (end_of_next - stub_group_start >= stub_group_size)
1174
    /* End of NEXT is too far from start, so stop.  */
1175
0
    break;
1176
        /* Add NEXT to the group.  */
1177
0
        curr = next;
1178
0
      }
1179
1180
    /* OK, the size from the start to the start of CURR is less
1181
       than stub_group_size and thus can be handled by one stub
1182
       section.  (Or the head section is itself larger than
1183
       stub_group_size, in which case we may be toast.)
1184
       We should really be keeping track of the total size of
1185
       stubs added here, as stubs contribute to the final output
1186
       section size.  */
1187
0
    do
1188
0
      {
1189
0
        next = NEXT_SEC (head);
1190
        /* Set up this stub group.  */
1191
0
        htab->stub_group[head->id].link_sec = curr;
1192
0
      }
1193
0
    while (head != curr && (head = next) != NULL);
1194
1195
    /* But wait, there's more!  Input sections up to stub_group_size
1196
       bytes after the stub section can be handled by it too.  */
1197
0
    if (!stubs_always_after_branch)
1198
0
      {
1199
0
        stub_group_start = curr->output_offset + curr->size;
1200
1201
0
        while (next != NULL)
1202
0
    {
1203
0
      end_of_next = next->output_offset + next->size;
1204
0
      if (end_of_next - stub_group_start >= stub_group_size)
1205
        /* End of NEXT is too far from stubs, so stop.  */
1206
0
        break;
1207
      /* Add NEXT to the stub group.  */
1208
0
      head = next;
1209
0
      next = NEXT_SEC (head);
1210
0
      htab->stub_group[head->id].link_sec = curr;
1211
0
    }
1212
0
      }
1213
0
    head = next;
1214
0
  }
1215
0
    }
1216
0
  while (list++ != htab->input_list + htab->top_index);
1217
1218
0
  free (htab->input_list);
1219
0
}
1220
1221
static void
1222
_bfd_kvx_resize_stubs (struct elf_kvx_link_hash_table *htab)
1223
0
{
1224
0
  asection *section;
1225
1226
  /* OK, we've added some stubs.  Find out the new size of the
1227
     stub sections.  */
1228
0
  for (section = htab->stub_bfd->sections;
1229
0
       section != NULL; section = section->next)
1230
0
    {
1231
      /* Ignore non-stub sections.  */
1232
0
      if (!strstr (section->name, STUB_SUFFIX))
1233
0
  continue;
1234
0
      section->size = 0;
1235
0
    }
1236
1237
0
  bfd_hash_traverse (&htab->stub_hash_table, kvx_size_one_stub, htab);
1238
0
}
1239
1240
/* Satisfy the ELF linker by filling in some fields in our fake bfd.  */
1241
1242
bool
1243
kvx_elf32_init_stub_bfd (struct bfd_link_info *info,
1244
      bfd *stub_bfd)
1245
0
{
1246
0
  struct elf_kvx_link_hash_table *htab;
1247
1248
0
  elf_elfheader (stub_bfd)->e_ident[EI_CLASS] = ELFCLASS32;
1249
1250
/* Always hook our dynamic sections into the first bfd, which is the
1251
   linker created stub bfd.  This ensures that the GOT header is at
1252
   the start of the output TOC section.  */
1253
0
  htab = elf_kvx_hash_table (info);
1254
0
  if (htab == NULL)
1255
0
    return false;
1256
1257
0
  return true;
1258
0
}
1259
1260
/* Determine and set the size of the stub section for a final link.
1261
1262
   The basic idea here is to examine all the relocations looking for
1263
   PC-relative calls to a target that is unreachable with a 27bits
1264
   immediate (found in call and goto).  */
1265
1266
bool
1267
elf32_kvx_size_stubs (bfd *output_bfd,
1268
         bfd *stub_bfd,
1269
         struct bfd_link_info *info,
1270
         bfd_signed_vma group_size,
1271
         asection * (*add_stub_section) (const char *,
1272
                 asection *),
1273
         void (*layout_sections_again) (void))
1274
0
{
1275
0
  bfd_size_type stub_group_size;
1276
0
  bool stubs_always_before_branch;
1277
0
  bool stub_changed = false;
1278
0
  struct elf_kvx_link_hash_table *htab = elf_kvx_hash_table (info);
1279
1280
  /* Propagate mach to stub bfd, because it may not have been
1281
     finalized when we created stub_bfd.  */
1282
0
  bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
1283
0
         bfd_get_mach (output_bfd));
1284
1285
  /* Stash our params away.  */
1286
0
  htab->stub_bfd = stub_bfd;
1287
0
  htab->add_stub_section = add_stub_section;
1288
0
  htab->layout_sections_again = layout_sections_again;
1289
0
  stubs_always_before_branch = group_size < 0;
1290
0
  if (group_size < 0)
1291
0
    stub_group_size = -group_size;
1292
0
  else
1293
0
    stub_group_size = group_size;
1294
1295
0
  if (stub_group_size == 1)
1296
0
    {
1297
      /* Default values.  */
1298
      /* KVX branch range is +-256MB. The value used is 1MB less.  */
1299
0
      stub_group_size = 255 * 1024 * 1024;
1300
0
    }
1301
1302
0
  group_sections (htab, stub_group_size, stubs_always_before_branch);
1303
1304
0
  (*htab->layout_sections_again) ();
1305
1306
0
  while (1)
1307
0
    {
1308
0
      bfd *input_bfd;
1309
1310
0
      for (input_bfd = info->input_bfds;
1311
0
     input_bfd != NULL; input_bfd = input_bfd->link.next)
1312
0
  {
1313
0
    Elf_Internal_Shdr *symtab_hdr;
1314
0
    asection *section;
1315
0
    Elf_Internal_Sym *local_syms = NULL;
1316
1317
0
    if (!is_kvx_elf (input_bfd)
1318
0
        || (input_bfd->flags & BFD_LINKER_CREATED) != 0)
1319
0
      continue;
1320
1321
    /* We'll need the symbol table in a second.  */
1322
0
    symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
1323
0
    if (symtab_hdr->sh_info == 0)
1324
0
      continue;
1325
1326
    /* Walk over each section attached to the input bfd.  */
1327
0
    for (section = input_bfd->sections;
1328
0
         section != NULL; section = section->next)
1329
0
      {
1330
0
        Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1331
1332
        /* If there aren't any relocs, then there's nothing more
1333
     to do.  */
1334
0
        if ((section->flags & SEC_RELOC) == 0
1335
0
      || section->reloc_count == 0
1336
0
      || (section->flags & SEC_CODE) == 0)
1337
0
    continue;
1338
1339
        /* If this section is a link-once section that will be
1340
     discarded, then don't create any stubs.  */
1341
0
        if (section->output_section == NULL
1342
0
      || section->output_section->owner != output_bfd)
1343
0
    continue;
1344
1345
        /* Get the relocs.  */
1346
0
        internal_relocs
1347
0
    = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
1348
0
               NULL, info->keep_memory);
1349
0
        if (internal_relocs == NULL)
1350
0
    goto error_ret_free_local;
1351
1352
        /* Now examine each relocation.  */
1353
0
        irela = internal_relocs;
1354
0
        irelaend = irela + section->reloc_count;
1355
0
        for (; irela < irelaend; irela++)
1356
0
    {
1357
0
      unsigned int r_type, r_indx;
1358
0
      enum elf_kvx_stub_type stub_type;
1359
0
      struct elf_kvx_stub_hash_entry *stub_entry;
1360
0
      asection *sym_sec;
1361
0
      bfd_vma sym_value;
1362
0
      bfd_vma destination;
1363
0
      struct elf_kvx_link_hash_entry *hash;
1364
0
      const char *sym_name;
1365
0
      char *stub_name;
1366
0
      const asection *id_sec;
1367
0
      unsigned char st_type;
1368
0
      bfd_size_type len;
1369
1370
0
      r_type = ELF32_R_TYPE (irela->r_info);
1371
0
      r_indx = ELF32_R_SYM (irela->r_info);
1372
1373
0
      if (r_type >= (unsigned int) R_KVX_end)
1374
0
        {
1375
0
          bfd_set_error (bfd_error_bad_value);
1376
0
        error_ret_free_internal:
1377
0
          if (elf_section_data (section)->relocs == NULL)
1378
0
      free (internal_relocs);
1379
0
          goto error_ret_free_local;
1380
0
        }
1381
1382
      /* Only look for stubs on unconditional branch and
1383
         branch and link instructions.  */
1384
      /* This catches CALL and GOTO insn */
1385
0
      if (r_type != (unsigned int) R_KVX_PCREL27)
1386
0
        continue;
1387
1388
      /* Now determine the call target, its name, value,
1389
         section.  */
1390
0
      sym_sec = NULL;
1391
0
      sym_value = 0;
1392
0
      destination = 0;
1393
0
      hash = NULL;
1394
0
      sym_name = NULL;
1395
0
      if (r_indx < symtab_hdr->sh_info)
1396
0
        {
1397
          /* It's a local symbol.  */
1398
0
          Elf_Internal_Sym *sym;
1399
0
          Elf_Internal_Shdr *hdr;
1400
1401
0
          if (local_syms == NULL)
1402
0
      {
1403
0
        local_syms
1404
0
          = (Elf_Internal_Sym *) symtab_hdr->contents;
1405
0
        if (local_syms == NULL)
1406
0
          local_syms
1407
0
            = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
1408
0
                  symtab_hdr->sh_info, 0,
1409
0
                  NULL, NULL, NULL);
1410
0
        if (local_syms == NULL)
1411
0
          goto error_ret_free_internal;
1412
0
      }
1413
1414
0
          sym = local_syms + r_indx;
1415
0
          hdr = elf_elfsections (input_bfd)[sym->st_shndx];
1416
0
          sym_sec = hdr->bfd_section;
1417
0
          if (!sym_sec)
1418
      /* This is an undefined symbol.  It can never
1419
         be resolved.  */
1420
0
      continue;
1421
1422
0
          if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
1423
0
      sym_value = sym->st_value;
1424
0
          destination = (sym_value + irela->r_addend
1425
0
             + sym_sec->output_offset
1426
0
             + sym_sec->output_section->vma);
1427
0
          st_type = ELF_ST_TYPE (sym->st_info);
1428
0
          sym_name
1429
0
      = bfd_elf_string_from_elf_section (input_bfd,
1430
0
                 symtab_hdr->sh_link,
1431
0
                 sym->st_name);
1432
0
        }
1433
0
      else
1434
0
        {
1435
0
          int e_indx;
1436
1437
0
          e_indx = r_indx - symtab_hdr->sh_info;
1438
0
          hash = ((struct elf_kvx_link_hash_entry *)
1439
0
            elf_sym_hashes (input_bfd)[e_indx]);
1440
1441
0
          while (hash->root.root.type == bfd_link_hash_indirect
1442
0
           || hash->root.root.type == bfd_link_hash_warning)
1443
0
      hash = ((struct elf_kvx_link_hash_entry *)
1444
0
        hash->root.root.u.i.link);
1445
1446
0
          if (hash->root.root.type == bfd_link_hash_defined
1447
0
        || hash->root.root.type == bfd_link_hash_defweak)
1448
0
      {
1449
0
        struct elf_kvx_link_hash_table *globals =
1450
0
          elf_kvx_hash_table (info);
1451
0
        sym_sec = hash->root.root.u.def.section;
1452
0
        sym_value = hash->root.root.u.def.value;
1453
        /* For a destination in a shared library,
1454
           use the PLT stub as target address to
1455
           decide whether a branch stub is
1456
           needed.  */
1457
0
        if (globals->root.splt != NULL && hash != NULL
1458
0
            && hash->root.plt.offset != (bfd_vma) - 1)
1459
0
          {
1460
0
            sym_sec = globals->root.splt;
1461
0
            sym_value = hash->root.plt.offset;
1462
0
            if (sym_sec->output_section != NULL)
1463
0
        destination = (sym_value
1464
0
                 + sym_sec->output_offset
1465
0
                 + sym_sec->output_section->vma);
1466
0
          }
1467
0
        else if (sym_sec->output_section != NULL)
1468
0
          destination = (sym_value + irela->r_addend
1469
0
             + sym_sec->output_offset
1470
0
             + sym_sec->output_section->vma);
1471
0
      }
1472
0
          else if (hash->root.root.type == bfd_link_hash_undefined
1473
0
             || (hash->root.root.type
1474
0
           == bfd_link_hash_undefweak))
1475
0
      {
1476
        /* For a shared library, use the PLT stub as
1477
           target address to decide whether a long
1478
           branch stub is needed.
1479
           For absolute code, they cannot be handled.  */
1480
0
        struct elf_kvx_link_hash_table *globals =
1481
0
          elf_kvx_hash_table (info);
1482
1483
0
        if (globals->root.splt != NULL && hash != NULL
1484
0
            && hash->root.plt.offset != (bfd_vma) - 1)
1485
0
          {
1486
0
            sym_sec = globals->root.splt;
1487
0
            sym_value = hash->root.plt.offset;
1488
0
            if (sym_sec->output_section != NULL)
1489
0
        destination = (sym_value
1490
0
                 + sym_sec->output_offset
1491
0
                 + sym_sec->output_section->vma);
1492
0
          }
1493
0
        else
1494
0
          continue;
1495
0
      }
1496
0
          else
1497
0
      {
1498
0
        bfd_set_error (bfd_error_bad_value);
1499
0
        goto error_ret_free_internal;
1500
0
      }
1501
0
          st_type = ELF_ST_TYPE (hash->root.type);
1502
0
          sym_name = hash->root.root.root.string;
1503
0
        }
1504
1505
      /* Determine what (if any) linker stub is needed.  */
1506
0
      stub_type = kvx_type_of_stub (section, irela, sym_sec,
1507
0
            st_type, destination);
1508
0
      if (stub_type == kvx_stub_none)
1509
0
        continue;
1510
1511
      /* Support for grouping stub sections.  */
1512
0
      id_sec = htab->stub_group[section->id].link_sec;
1513
1514
      /* Get the name of this stub.  */
1515
0
      stub_name = elf32_kvx_stub_name (id_sec, sym_sec, hash,
1516
0
              irela);
1517
0
      if (!stub_name)
1518
0
        goto error_ret_free_internal;
1519
1520
0
      stub_entry =
1521
0
        kvx_stub_hash_lookup (&htab->stub_hash_table,
1522
0
           stub_name, false, false);
1523
0
      if (stub_entry != NULL)
1524
0
        {
1525
          /* The proper stub has already been created.  */
1526
0
          free (stub_name);
1527
          /* Always update this stub's target since it may have
1528
       changed after layout.  */
1529
0
          stub_entry->target_value = sym_value + irela->r_addend;
1530
0
          continue;
1531
0
        }
1532
1533
0
      stub_entry = _bfd_kvx_add_stub_entry_in_group
1534
0
        (stub_name, section, htab);
1535
0
      if (stub_entry == NULL)
1536
0
        {
1537
0
          free (stub_name);
1538
0
          goto error_ret_free_internal;
1539
0
        }
1540
1541
0
      stub_entry->target_value = sym_value + irela->r_addend;
1542
0
      stub_entry->target_section = sym_sec;
1543
0
      stub_entry->stub_type = stub_type;
1544
0
      stub_entry->h = hash;
1545
0
      stub_entry->st_type = st_type;
1546
1547
0
      if (sym_name == NULL)
1548
0
        sym_name = "unnamed";
1549
0
      len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
1550
0
      stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
1551
0
      if (stub_entry->output_name == NULL)
1552
0
        {
1553
0
          free (stub_name);
1554
0
          goto error_ret_free_internal;
1555
0
        }
1556
1557
0
      snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
1558
0
          sym_name);
1559
1560
0
      stub_changed = true;
1561
0
    }
1562
1563
        /* We're done with the internal relocs, free them.  */
1564
0
        if (elf_section_data (section)->relocs == NULL)
1565
0
    free (internal_relocs);
1566
0
      }
1567
0
  }
1568
1569
0
      if (!stub_changed)
1570
0
  break;
1571
1572
0
      _bfd_kvx_resize_stubs (htab);
1573
1574
      /* Ask the linker to do its stuff.  */
1575
0
      (*htab->layout_sections_again) ();
1576
0
      stub_changed = false;
1577
0
    }
1578
1579
0
  return true;
1580
1581
0
error_ret_free_local:
1582
0
  return false;
1583
1584
0
}
1585
1586
/* Build all the stubs associated with the current output file.  The
1587
   stubs are kept in a hash table attached to the main linker hash
1588
   table.  We also set up the .plt entries for statically linked PIC
1589
   functions here.  This function is called via kvx_elf_finish in the
1590
   linker.  */
1591
1592
bool
1593
elf32_kvx_build_stubs (struct bfd_link_info *info)
1594
0
{
1595
0
  asection *stub_sec;
1596
0
  struct bfd_hash_table *table;
1597
0
  struct elf_kvx_link_hash_table *htab;
1598
1599
0
  htab = elf_kvx_hash_table (info);
1600
1601
0
  for (stub_sec = htab->stub_bfd->sections;
1602
0
       stub_sec != NULL; stub_sec = stub_sec->next)
1603
0
    {
1604
0
      bfd_size_type size;
1605
1606
      /* Ignore non-stub sections.  */
1607
0
      if (!strstr (stub_sec->name, STUB_SUFFIX))
1608
0
  continue;
1609
1610
      /* Allocate memory to hold the linker stubs.  */
1611
0
      size = stub_sec->size;
1612
0
      stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
1613
0
      if (stub_sec->contents == NULL && size != 0)
1614
0
  return false;
1615
0
      stub_sec->size = 0;
1616
0
    }
1617
1618
  /* Build the stubs as directed by the stub hash table.  */
1619
0
  table = &htab->stub_hash_table;
1620
0
  bfd_hash_traverse (table, kvx_build_one_stub, info);
1621
1622
0
  return true;
1623
0
}
1624
1625
static bfd_vma
1626
kvx_calculate_got_entry_vma (struct elf_link_hash_entry *h,
1627
         struct elf_kvx_link_hash_table
1628
         *globals, struct bfd_link_info *info,
1629
         bfd_vma value, bfd *output_bfd,
1630
         bool *unresolved_reloc_p)
1631
0
{
1632
0
  bfd_vma off = (bfd_vma) - 1;
1633
0
  asection *basegot = globals->root.sgot;
1634
0
  bool dyn = globals->root.dynamic_sections_created;
1635
1636
0
  if (h != NULL)
1637
0
    {
1638
0
      BFD_ASSERT (basegot != NULL);
1639
0
      off = h->got.offset;
1640
0
      BFD_ASSERT (off != (bfd_vma) - 1);
1641
0
      if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
1642
0
    || (bfd_link_pic (info)
1643
0
        && SYMBOL_REFERENCES_LOCAL (info, h))
1644
0
    || (ELF_ST_VISIBILITY (h->other)
1645
0
        && h->root.type == bfd_link_hash_undefweak))
1646
0
  {
1647
    /* This is actually a static link, or it is a -Bsymbolic link
1648
       and the symbol is defined locally.  We must initialize this
1649
       entry in the global offset table.  Since the offset must
1650
       always be a multiple of 8 (4 in the case of ILP32), we use
1651
       the least significant bit to record whether we have
1652
       initialized it already.
1653
       When doing a dynamic link, we create a .rel(a).got relocation
1654
       entry to initialize the value.  This is done in the
1655
       finish_dynamic_symbol routine.  */
1656
0
    if ((off & 1) != 0)
1657
0
      off &= ~1;
1658
0
    else
1659
0
      {
1660
0
        bfd_put_32 (output_bfd, value, basegot->contents + off);
1661
0
        h->got.offset |= 1;
1662
0
      }
1663
0
  }
1664
0
      else
1665
0
  *unresolved_reloc_p = false;
1666
0
    }
1667
1668
0
  return off;
1669
0
}
1670
1671
static unsigned int
1672
kvx_reloc_got_type (bfd_reloc_code_real_type r_type)
1673
0
{
1674
0
  switch (r_type)
1675
0
    {
1676
      /* Extracted with:
1677
   awk 'match ($0, /HOWTO.*R_(KVX.*_GOT(OFF)?(64)?_.*),/,ary) \
1678
   {print "case BFD_RELOC_" ary[1] ":";}' elfxx-kvxc.def  */
1679
0
    case BFD_RELOC_KVX_S37_GOTOFF_LO10:
1680
0
    case BFD_RELOC_KVX_S37_GOTOFF_UP27:
1681
1682
0
    case BFD_RELOC_KVX_S37_GOT_LO10:
1683
0
    case BFD_RELOC_KVX_S37_GOT_UP27:
1684
1685
0
    case BFD_RELOC_KVX_S43_GOTOFF_LO10:
1686
0
    case BFD_RELOC_KVX_S43_GOTOFF_UP27:
1687
0
    case BFD_RELOC_KVX_S43_GOTOFF_EX6:
1688
1689
0
    case BFD_RELOC_KVX_S43_GOT_LO10:
1690
0
    case BFD_RELOC_KVX_S43_GOT_UP27:
1691
0
    case BFD_RELOC_KVX_S43_GOT_EX6:
1692
0
      return GOT_NORMAL;
1693
1694
0
    case BFD_RELOC_KVX_S37_TLS_GD_LO10:
1695
0
    case BFD_RELOC_KVX_S37_TLS_GD_UP27:
1696
0
    case BFD_RELOC_KVX_S43_TLS_GD_LO10:
1697
0
    case BFD_RELOC_KVX_S43_TLS_GD_UP27:
1698
0
    case BFD_RELOC_KVX_S43_TLS_GD_EX6:
1699
0
      return GOT_TLS_GD;
1700
1701
0
    case BFD_RELOC_KVX_S37_TLS_LD_LO10:
1702
0
    case BFD_RELOC_KVX_S37_TLS_LD_UP27:
1703
0
    case BFD_RELOC_KVX_S43_TLS_LD_LO10:
1704
0
    case BFD_RELOC_KVX_S43_TLS_LD_UP27:
1705
0
    case BFD_RELOC_KVX_S43_TLS_LD_EX6:
1706
0
      return GOT_TLS_LD;
1707
1708
0
    case BFD_RELOC_KVX_S37_TLS_IE_LO10:
1709
0
    case BFD_RELOC_KVX_S37_TLS_IE_UP27:
1710
0
    case BFD_RELOC_KVX_S43_TLS_IE_LO10:
1711
0
    case BFD_RELOC_KVX_S43_TLS_IE_UP27:
1712
0
    case BFD_RELOC_KVX_S43_TLS_IE_EX6:
1713
0
      return GOT_TLS_IE;
1714
1715
0
    default:
1716
0
      break;
1717
0
    }
1718
0
  return GOT_UNKNOWN;
1719
0
}
1720
1721
static bool
1722
kvx_can_relax_tls (bfd *input_bfd ATTRIBUTE_UNUSED,
1723
           struct bfd_link_info *info ATTRIBUTE_UNUSED,
1724
           bfd_reloc_code_real_type r_type ATTRIBUTE_UNUSED,
1725
           struct elf_link_hash_entry *h ATTRIBUTE_UNUSED,
1726
           unsigned long r_symndx ATTRIBUTE_UNUSED)
1727
0
{
1728
0
  if (! IS_KVX_TLS_RELAX_RELOC (r_type))
1729
0
    return false;
1730
1731
  /* Relaxing hook. Disabled on KVX. */
1732
  /* See elfnn-aarch64.c */
1733
0
  return true;
1734
0
}
1735
1736
/* Given the relocation code R_TYPE, return the relaxed bfd reloc
1737
   enumerator.  */
1738
1739
static bfd_reloc_code_real_type
1740
kvx_tls_transition (bfd *input_bfd,
1741
      struct bfd_link_info *info,
1742
      unsigned int r_type,
1743
      struct elf_link_hash_entry *h,
1744
      unsigned long r_symndx)
1745
0
{
1746
0
  bfd_reloc_code_real_type bfd_r_type
1747
0
    = elf32_kvx_bfd_reloc_from_type (input_bfd, r_type);
1748
1749
0
  if (! kvx_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
1750
0
    return bfd_r_type;
1751
1752
0
  return bfd_r_type;
1753
0
}
1754
1755
/* Return the base VMA address which should be subtracted from real addresses
1756
   when resolving R_KVX_*_TLS_GD_* and R_KVX_*_TLS_LD_* relocation.  */
1757
1758
static bfd_vma
1759
dtpoff_base (struct bfd_link_info *info)
1760
0
{
1761
  /* If tls_sec is NULL, we should have signalled an error already.  */
1762
0
  BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
1763
0
  return elf_hash_table (info)->tls_sec->vma;
1764
0
}
1765
1766
/* Return the base VMA address which should be subtracted from real addresses
1767
   when resolving R_KVX_*_TLS_IE_* and R_KVX_*_TLS_LE_* relocations.  */
1768
1769
static bfd_vma
1770
tpoff_base (struct bfd_link_info *info)
1771
0
{
1772
0
  struct elf_link_hash_table *htab = elf_hash_table (info);
1773
1774
  /* If tls_sec is NULL, we should have signalled an error already.  */
1775
0
  BFD_ASSERT (htab->tls_sec != NULL);
1776
1777
0
  bfd_vma base = align_power ((bfd_vma) 0,
1778
0
            htab->tls_sec->alignment_power);
1779
0
  return htab->tls_sec->vma - base;
1780
0
}
1781
1782
static bfd_vma *
1783
symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
1784
           unsigned long r_symndx)
1785
0
{
1786
  /* Calculate the address of the GOT entry for symbol
1787
     referred to in h.  */
1788
0
  if (h != NULL)
1789
0
    return &h->got.offset;
1790
0
  else
1791
0
    {
1792
      /* local symbol */
1793
0
      struct elf_kvx_local_symbol *l;
1794
1795
0
      l = elf_kvx_locals (input_bfd);
1796
0
      return &l[r_symndx].got_offset;
1797
0
    }
1798
0
}
1799
1800
static void
1801
symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
1802
      unsigned long r_symndx)
1803
0
{
1804
0
  bfd_vma *p;
1805
0
  p = symbol_got_offset_ref (input_bfd, h, r_symndx);
1806
0
  *p |= 1;
1807
0
}
1808
1809
static int
1810
symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
1811
        unsigned long r_symndx)
1812
0
{
1813
0
  bfd_vma value;
1814
0
  value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
1815
0
  return value & 1;
1816
0
}
1817
1818
static bfd_vma
1819
symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
1820
       unsigned long r_symndx)
1821
0
{
1822
0
  bfd_vma value;
1823
0
  value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
1824
0
  value &= ~1;
1825
0
  return value;
1826
0
}
1827
1828
/* N_ONES produces N one bits, without overflowing machine arithmetic.  */
1829
0
#define N_ONES(n) (((((bfd_vma) 1 << ((n) -1)) - 1) << 1) | 1)
1830
1831
/* This is a copy/paste + modification from
1832
   reloc.c:_bfd_relocate_contents. Relocations are applied to 32bits
1833
   words, so all overflow checks will overflow for values above
1834
   32bits.  */
1835
static bfd_reloc_status_type
1836
check_signed_overflow (enum complain_overflow complain_on_overflow,
1837
           bfd_reloc_code_real_type bfd_r_type, bfd *input_bfd,
1838
           bfd_vma relocation)
1839
0
{
1840
0
  bfd_reloc_status_type flag = bfd_reloc_ok;
1841
0
  bfd_vma addrmask, fieldmask, signmask, ss;
1842
0
  bfd_vma a, b, sum;
1843
0
  bfd_vma x = 0;
1844
1845
  /* These usually come from howto struct. As we don't check for
1846
     values fitting in bitfields or in subpart of words, we set all
1847
     these to values to check as if the field is starting from first
1848
     bit.  */
1849
0
  unsigned int rightshift = 0;
1850
0
  unsigned int bitpos = 0;
1851
0
  unsigned int bitsize = 0;
1852
0
  bfd_vma src_mask = -1;
1853
1854
  /* Only regular symbol relocations are checked here. Others
1855
     relocations (GOT, TLS) could be checked if the need is
1856
     confirmed. At the moment, we keep previous behavior
1857
     (ie. unchecked) for those. */
1858
0
  switch (bfd_r_type)
1859
0
    {
1860
0
    case BFD_RELOC_KVX_S37_LO10:
1861
0
    case BFD_RELOC_KVX_S37_UP27:
1862
0
      bitsize = 37;
1863
0
      break;
1864
1865
0
    case BFD_RELOC_KVX_S32_LO5:
1866
0
    case BFD_RELOC_KVX_S32_UP27:
1867
0
      bitsize = 32;
1868
0
      break;
1869
1870
0
    case BFD_RELOC_KVX_S43_LO10:
1871
0
    case BFD_RELOC_KVX_S43_UP27:
1872
0
    case BFD_RELOC_KVX_S43_EX6:
1873
0
      bitsize = 43;
1874
0
      break;
1875
1876
0
    case BFD_RELOC_KVX_S64_LO10:
1877
0
    case BFD_RELOC_KVX_S64_UP27:
1878
0
    case BFD_RELOC_KVX_S64_EX27:
1879
0
      bitsize = 64;
1880
0
      break;
1881
1882
0
    default:
1883
0
      return bfd_reloc_ok;
1884
0
    }
1885
1886
  /* direct copy/paste from reloc.c below */
1887
1888
  /* Get the values to be added together.  For signed and unsigned
1889
     relocations, we assume that all values should be truncated to
1890
     the size of an address.  For bitfields, all the bits matter.
1891
     See also bfd_check_overflow.  */
1892
0
  fieldmask = N_ONES (bitsize);
1893
0
  signmask = ~fieldmask;
1894
0
  addrmask = (N_ONES (bfd_arch_bits_per_address (input_bfd))
1895
0
        | (fieldmask << rightshift));
1896
0
  a = (relocation & addrmask) >> rightshift;
1897
0
  b = (x & src_mask & addrmask) >> bitpos;
1898
0
  addrmask >>= rightshift;
1899
1900
0
  switch (complain_on_overflow)
1901
0
    {
1902
0
    case complain_overflow_signed:
1903
      /* If any sign bits are set, all sign bits must be set.
1904
   That is, A must be a valid negative address after
1905
   shifting.  */
1906
0
      signmask = ~(fieldmask >> 1);
1907
      /* Fall thru */
1908
1909
0
    case complain_overflow_bitfield:
1910
      /* Much like the signed check, but for a field one bit
1911
   wider.  We allow a bitfield to represent numbers in the
1912
   range -2**n to 2**n-1, where n is the number of bits in the
1913
   field.  Note that when bfd_vma is 32 bits, a 32-bit reloc
1914
   can't overflow, which is exactly what we want.  */
1915
0
      ss = a & signmask;
1916
0
      if (ss != 0 && ss != (addrmask & signmask))
1917
0
  flag = bfd_reloc_overflow;
1918
1919
      /* We only need this next bit of code if the sign bit of B
1920
   is below the sign bit of A.  This would only happen if
1921
   SRC_MASK had fewer bits than BITSIZE.  Note that if
1922
   SRC_MASK has more bits than BITSIZE, we can get into
1923
   trouble; we would need to verify that B is in range, as
1924
   we do for A above.  */
1925
0
      ss = ((~src_mask) >> 1) & src_mask;
1926
0
      ss >>= bitpos;
1927
1928
      /* Set all the bits above the sign bit.  */
1929
0
      b = (b ^ ss) - ss;
1930
1931
      /* Now we can do the addition.  */
1932
0
      sum = a + b;
1933
1934
      /* See if the result has the correct sign.  Bits above the
1935
   sign bit are junk now; ignore them.  If the sum is
1936
   positive, make sure we did not have all negative inputs;
1937
   if the sum is negative, make sure we did not have all
1938
   positive inputs.  The test below looks only at the sign
1939
   bits, and it really just
1940
   SIGN (A) == SIGN (B) && SIGN (A) != SIGN (SUM)
1941
1942
   We mask with addrmask here to explicitly allow an address
1943
   wrap-around.  The Linux kernel relies on it, and it is
1944
   the only way to write assembler code which can run when
1945
   loaded at a location 0x80000000 away from the location at
1946
   which it is linked.  */
1947
0
      if (((~(a ^ b)) & (a ^ sum)) & signmask & addrmask)
1948
0
  flag = bfd_reloc_overflow;
1949
0
      break;
1950
1951
0
    case complain_overflow_unsigned:
1952
      /* Checking for an unsigned overflow is relatively easy:
1953
   trim the addresses and add, and trim the result as well.
1954
   Overflow is normally indicated when the result does not
1955
   fit in the field.  However, we also need to consider the
1956
   case when, e.g., fieldmask is 0x7fffffff or smaller, an
1957
   input is 0x80000000, and bfd_vma is only 32 bits; then we
1958
   will get sum == 0, but there is an overflow, since the
1959
   inputs did not fit in the field.  Instead of doing a
1960
   separate test, we can check for this by or-ing in the
1961
   operands when testing for the sum overflowing its final
1962
   field.  */
1963
0
      sum = (a + b) & addrmask;
1964
0
      if ((a | b | sum) & signmask)
1965
0
  flag = bfd_reloc_overflow;
1966
0
      break;
1967
1968
0
    default:
1969
0
      abort ();
1970
0
    }
1971
0
  return flag;
1972
0
}
1973
1974
/* Perform a relocation as part of a final link.  */
1975
static bfd_reloc_status_type
1976
elf32_kvx_final_link_relocate (reloc_howto_type *howto,
1977
             bfd *input_bfd,
1978
             bfd *output_bfd,
1979
             asection *input_section,
1980
             bfd_byte *contents,
1981
             Elf_Internal_Rela *rel,
1982
             bfd_vma value,
1983
             struct bfd_link_info *info,
1984
             asection *sym_sec,
1985
             struct elf_link_hash_entry *h,
1986
             bool *unresolved_reloc_p,
1987
             bool save_addend,
1988
             bfd_vma *saved_addend,
1989
             Elf_Internal_Sym *sym)
1990
0
{
1991
0
  Elf_Internal_Shdr *symtab_hdr;
1992
0
  unsigned int r_type = howto->type;
1993
0
  bfd_reloc_code_real_type bfd_r_type
1994
0
    = elf32_kvx_bfd_reloc_from_howto (howto);
1995
0
  bfd_reloc_code_real_type new_bfd_r_type;
1996
0
  unsigned long r_symndx;
1997
0
  bfd_byte *hit_data = contents + rel->r_offset;
1998
0
  bfd_vma place, off;
1999
0
  bfd_vma addend;
2000
0
  struct elf_kvx_link_hash_table *globals;
2001
0
  bool weak_undef_p;
2002
0
  asection *base_got;
2003
0
  bfd_reloc_status_type rret = bfd_reloc_ok;
2004
0
  bool resolved_to_zero;
2005
0
  globals = elf_kvx_hash_table (info);
2006
2007
0
  symtab_hdr = &elf_symtab_hdr (input_bfd);
2008
2009
0
  BFD_ASSERT (is_kvx_elf (input_bfd));
2010
2011
0
  r_symndx = ELF32_R_SYM (rel->r_info);
2012
2013
  /* It is possible to have linker relaxations on some TLS access
2014
     models.  Update our information here.  */
2015
0
  new_bfd_r_type = kvx_tls_transition (input_bfd, info, r_type, h, r_symndx);
2016
0
  if (new_bfd_r_type != bfd_r_type)
2017
0
    {
2018
0
      bfd_r_type = new_bfd_r_type;
2019
0
      howto = elf32_kvx_howto_from_bfd_reloc (bfd_r_type);
2020
0
      BFD_ASSERT (howto != NULL);
2021
0
      r_type = howto->type;
2022
0
    }
2023
2024
0
  place = input_section->output_section->vma
2025
0
    + input_section->output_offset + rel->r_offset;
2026
2027
  /* Get addend, accumulating the addend for consecutive relocs
2028
     which refer to the same offset.  */
2029
0
  addend = saved_addend ? *saved_addend : 0;
2030
0
  addend += rel->r_addend;
2031
2032
0
  weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
2033
0
      : bfd_is_und_section (sym_sec));
2034
0
  resolved_to_zero = (h != NULL
2035
0
          && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
2036
2037
0
  switch (bfd_r_type)
2038
0
    {
2039
0
    case BFD_RELOC_KVX_32:
2040
#if ARCH_SIZE == 64
2041
    case BFD_RELOC_KVX_32:
2042
#endif
2043
0
    case BFD_RELOC_KVX_S37_LO10:
2044
0
    case BFD_RELOC_KVX_S37_UP27:
2045
2046
0
    case BFD_RELOC_KVX_S32_LO5:
2047
0
    case BFD_RELOC_KVX_S32_UP27:
2048
2049
0
    case BFD_RELOC_KVX_S43_LO10:
2050
0
    case BFD_RELOC_KVX_S43_UP27:
2051
0
    case BFD_RELOC_KVX_S43_EX6:
2052
2053
0
    case BFD_RELOC_KVX_S64_LO10:
2054
0
    case BFD_RELOC_KVX_S64_UP27:
2055
0
    case BFD_RELOC_KVX_S64_EX27:
2056
      /* When generating a shared object or relocatable executable, these
2057
   relocations are copied into the output file to be resolved at
2058
   run time.  */
2059
0
      if (((bfd_link_pic (info) == true)
2060
0
     || globals->root.is_relocatable_executable)
2061
0
    && (input_section->flags & SEC_ALLOC)
2062
0
    && (h == NULL
2063
0
        || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2064
0
      && !resolved_to_zero)
2065
0
        || h->root.type != bfd_link_hash_undefweak))
2066
0
  {
2067
0
    Elf_Internal_Rela outrel;
2068
0
    bfd_byte *loc;
2069
0
    bool skip, relocate;
2070
0
    asection *sreloc;
2071
2072
0
    *unresolved_reloc_p = false;
2073
2074
0
    skip = false;
2075
0
    relocate = false;
2076
2077
0
    outrel.r_addend = addend;
2078
0
    outrel.r_offset =
2079
0
      _bfd_elf_section_offset (output_bfd, info, input_section,
2080
0
             rel->r_offset);
2081
0
    if (outrel.r_offset == (bfd_vma) - 1)
2082
0
      skip = true;
2083
0
    else if (outrel.r_offset == (bfd_vma) - 2)
2084
0
      {
2085
0
        skip = true;
2086
0
        relocate = true;
2087
0
      }
2088
2089
0
    outrel.r_offset += (input_section->output_section->vma
2090
0
            + input_section->output_offset);
2091
2092
0
    if (skip)
2093
0
      memset (&outrel, 0, sizeof outrel);
2094
0
    else if (h != NULL
2095
0
       && h->dynindx != -1
2096
0
       && (!bfd_link_pic (info) || !info->symbolic
2097
0
           || !h->def_regular))
2098
0
      outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
2099
0
    else if (bfd_r_type == BFD_RELOC_KVX_32
2100
0
       || bfd_r_type == BFD_RELOC_KVX_64)
2101
0
      {
2102
0
        int symbol;
2103
2104
        /* On SVR4-ish systems, the dynamic loader cannot
2105
     relocate the text and data segments independently,
2106
     so the symbol does not matter.  */
2107
0
        symbol = 0;
2108
0
        outrel.r_info = ELF32_R_INFO (symbol, R_KVX_RELATIVE);
2109
0
        outrel.r_addend += value;
2110
0
      }
2111
0
    else if (bfd_link_pic (info) && info->symbolic)
2112
0
      {
2113
0
        goto skip_because_pic;
2114
0
      }
2115
0
    else
2116
0
      {
2117
        /* We may endup here from bad input code trying to
2118
     insert relocation on symbols within code.  We do not
2119
     want that currently, and such code should use GOT +
2120
     KVX_32/64 reloc that translate in KVX_RELATIVE.  */
2121
0
        const char *name;
2122
0
        if (h && h->root.root.string)
2123
0
    name = h->root.root.string;
2124
0
        else
2125
0
    name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2126
0
           NULL);
2127
2128
0
        (*_bfd_error_handler)
2129
    /* xgettext:c-format */
2130
0
    (_("%pB(%pA+%#" PRIx64 "): "
2131
0
       "unresolvable %s relocation in section `%s'"),
2132
0
     input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
2133
0
     name);
2134
0
        return bfd_reloc_notsupported;
2135
0
      }
2136
2137
0
    sreloc = elf_section_data (input_section)->sreloc;
2138
0
    if (sreloc == NULL || sreloc->contents == NULL)
2139
0
      return bfd_reloc_notsupported;
2140
2141
0
    loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
2142
0
    bfd_elf32_swap_reloca_out (output_bfd, &outrel, loc);
2143
2144
0
    if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
2145
0
      {
2146
        /* Sanity to check that we have previously allocated
2147
     sufficient space in the relocation section for the
2148
     number of relocations we actually want to emit.  */
2149
0
        abort ();
2150
0
      }
2151
2152
    /* If this reloc is against an external symbol, we do not want to
2153
       fiddle with the addend.  Otherwise, we need to include the symbol
2154
       value so that it becomes an addend for the dynamic reloc.  */
2155
0
    if (!relocate)
2156
0
      return bfd_reloc_ok;
2157
2158
0
    rret = check_signed_overflow (complain_overflow_signed, bfd_r_type,
2159
0
          input_bfd, value + addend);
2160
0
    if (rret != bfd_reloc_ok)
2161
0
      return rret;
2162
2163
0
    return _bfd_final_link_relocate (howto, input_bfd, input_section,
2164
0
             contents, rel->r_offset, value,
2165
0
             addend);
2166
0
  }
2167
2168
0
    skip_because_pic:
2169
0
      rret = check_signed_overflow (complain_overflow_signed, bfd_r_type,
2170
0
            input_bfd, value + addend);
2171
0
      if (rret != bfd_reloc_ok)
2172
0
  return rret;
2173
2174
0
      return _bfd_final_link_relocate (howto, input_bfd, input_section,
2175
0
               contents, rel->r_offset, value,
2176
0
               addend);
2177
0
      break;
2178
2179
0
    case BFD_RELOC_KVX_PCREL17:
2180
0
    case BFD_RELOC_KVX_PCREL27:
2181
0
      {
2182
  /* BCU insn are always first in a bundle, so there is no need
2183
     to correct the address using offset within bundle.  */
2184
2185
0
  asection *splt = globals->root.splt;
2186
0
  bool via_plt_p =
2187
0
    splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
2188
2189
  /* A call to an undefined weak symbol is converted to a jump to
2190
     the next instruction unless a PLT entry will be created.
2191
     The jump to the next instruction is optimized as a NOP.
2192
     Do the same for local undefined symbols.  */
2193
0
  if (weak_undef_p && ! via_plt_p)
2194
0
    {
2195
0
      bfd_putl32 (INSN_NOP, hit_data);
2196
0
      return bfd_reloc_ok;
2197
0
    }
2198
2199
  /* If the call goes through a PLT entry, make sure to
2200
     check distance to the right destination address.  */
2201
0
  if (via_plt_p)
2202
0
    value = (splt->output_section->vma
2203
0
       + splt->output_offset + h->plt.offset);
2204
2205
  /* Check if a stub has to be inserted because the destination
2206
     is too far away.  */
2207
0
  struct elf_kvx_stub_hash_entry *stub_entry = NULL;
2208
2209
  /* If the target symbol is global and marked as a function the
2210
     relocation applies a function call or a tail call.  In this
2211
     situation we can veneer out of range branches.  The veneers
2212
     use R16 and R17 hence cannot be used arbitrary out of range
2213
     branches that occur within the body of a function.  */
2214
2215
  /* Check if a stub has to be inserted because the destination
2216
     is too far away.  */
2217
0
  if (! kvx_valid_call_p (value, place))
2218
0
    {
2219
      /* The target is out of reach, so redirect the branch to
2220
         the local stub for this function.  */
2221
0
      stub_entry = elf32_kvx_get_stub_entry (input_section,
2222
0
               sym_sec, h,
2223
0
               rel, globals);
2224
0
      if (stub_entry != NULL)
2225
0
        value = (stub_entry->stub_offset
2226
0
           + stub_entry->stub_sec->output_offset
2227
0
           + stub_entry->stub_sec->output_section->vma);
2228
      /* We have redirected the destination to stub entry address,
2229
         so ignore any addend record in the original rela entry.  */
2230
0
      addend = 0;
2231
0
    }
2232
0
      }
2233
0
      *unresolved_reloc_p = false;
2234
2235
      /* FALLTHROUGH */
2236
2237
      /* PCREL 32 are used in dwarf2 table for exception handling */
2238
0
    case BFD_RELOC_KVX_32_PCREL:
2239
0
    case BFD_RELOC_KVX_S64_PCREL_LO10:
2240
0
    case BFD_RELOC_KVX_S64_PCREL_UP27:
2241
0
    case BFD_RELOC_KVX_S64_PCREL_EX27:
2242
0
    case BFD_RELOC_KVX_S37_PCREL_LO10:
2243
0
    case BFD_RELOC_KVX_S37_PCREL_UP27:
2244
0
    case BFD_RELOC_KVX_S43_PCREL_LO10:
2245
0
    case BFD_RELOC_KVX_S43_PCREL_UP27:
2246
0
    case BFD_RELOC_KVX_S43_PCREL_EX6:
2247
0
      return _bfd_final_link_relocate (howto, input_bfd, input_section,
2248
0
               contents, rel->r_offset, value,
2249
0
               addend);
2250
0
      break;
2251
2252
0
    case BFD_RELOC_KVX_S37_TLS_LE_LO10:
2253
0
    case BFD_RELOC_KVX_S37_TLS_LE_UP27:
2254
2255
0
    case BFD_RELOC_KVX_S43_TLS_LE_LO10:
2256
0
    case BFD_RELOC_KVX_S43_TLS_LE_UP27:
2257
0
    case BFD_RELOC_KVX_S43_TLS_LE_EX6:
2258
0
      return _bfd_final_link_relocate (howto, input_bfd, input_section,
2259
0
               contents, rel->r_offset,
2260
0
               value - tpoff_base (info), addend);
2261
0
      break;
2262
2263
0
    case BFD_RELOC_KVX_S37_TLS_DTPOFF_LO10:
2264
0
    case BFD_RELOC_KVX_S37_TLS_DTPOFF_UP27:
2265
2266
0
    case BFD_RELOC_KVX_S43_TLS_DTPOFF_LO10:
2267
0
    case BFD_RELOC_KVX_S43_TLS_DTPOFF_UP27:
2268
0
    case BFD_RELOC_KVX_S43_TLS_DTPOFF_EX6:
2269
0
      return _bfd_final_link_relocate (howto, input_bfd, input_section,
2270
0
               contents, rel->r_offset,
2271
0
               value - dtpoff_base (info), addend);
2272
2273
0
    case BFD_RELOC_KVX_S37_TLS_GD_UP27:
2274
0
    case BFD_RELOC_KVX_S37_TLS_GD_LO10:
2275
2276
0
    case BFD_RELOC_KVX_S43_TLS_GD_UP27:
2277
0
    case BFD_RELOC_KVX_S43_TLS_GD_EX6:
2278
0
    case BFD_RELOC_KVX_S43_TLS_GD_LO10:
2279
2280
0
    case BFD_RELOC_KVX_S37_TLS_IE_UP27:
2281
0
    case BFD_RELOC_KVX_S37_TLS_IE_LO10:
2282
2283
0
    case BFD_RELOC_KVX_S43_TLS_IE_UP27:
2284
0
    case BFD_RELOC_KVX_S43_TLS_IE_EX6:
2285
0
    case BFD_RELOC_KVX_S43_TLS_IE_LO10:
2286
2287
0
    case BFD_RELOC_KVX_S37_TLS_LD_UP27:
2288
0
    case BFD_RELOC_KVX_S37_TLS_LD_LO10:
2289
2290
0
    case BFD_RELOC_KVX_S43_TLS_LD_UP27:
2291
0
    case BFD_RELOC_KVX_S43_TLS_LD_EX6:
2292
0
    case BFD_RELOC_KVX_S43_TLS_LD_LO10:
2293
2294
0
      if (globals->root.sgot == NULL)
2295
0
  return bfd_reloc_notsupported;
2296
0
      value = symbol_got_offset (input_bfd, h, r_symndx);
2297
2298
0
      _bfd_final_link_relocate (howto, input_bfd, input_section,
2299
0
        contents, rel->r_offset, value, addend);
2300
0
      *unresolved_reloc_p = false;
2301
0
      break;
2302
2303
0
    case BFD_RELOC_KVX_S37_GOTADDR_UP27:
2304
0
    case BFD_RELOC_KVX_S37_GOTADDR_LO10:
2305
2306
0
    case BFD_RELOC_KVX_S43_GOTADDR_UP27:
2307
0
    case BFD_RELOC_KVX_S43_GOTADDR_EX6:
2308
0
    case BFD_RELOC_KVX_S43_GOTADDR_LO10:
2309
2310
0
    case BFD_RELOC_KVX_S64_GOTADDR_UP27:
2311
0
    case BFD_RELOC_KVX_S64_GOTADDR_EX27:
2312
0
    case BFD_RELOC_KVX_S64_GOTADDR_LO10:
2313
0
      {
2314
0
  if (globals->root.sgot == NULL)
2315
0
    BFD_ASSERT (h != NULL);
2316
2317
0
  value = globals->root.sgot->output_section->vma
2318
0
    + globals->root.sgot->output_offset;
2319
2320
0
  return _bfd_final_link_relocate (howto, input_bfd, input_section,
2321
0
           contents, rel->r_offset, value,
2322
0
           addend);
2323
0
      }
2324
0
      break;
2325
2326
0
    case BFD_RELOC_KVX_S37_GOTOFF_LO10:
2327
0
    case BFD_RELOC_KVX_S37_GOTOFF_UP27:
2328
2329
0
    case BFD_RELOC_KVX_32_GOTOFF:
2330
0
    case BFD_RELOC_KVX_64_GOTOFF:
2331
2332
0
    case BFD_RELOC_KVX_S43_GOTOFF_LO10:
2333
0
    case BFD_RELOC_KVX_S43_GOTOFF_UP27:
2334
0
    case BFD_RELOC_KVX_S43_GOTOFF_EX6:
2335
2336
0
      {
2337
0
  asection *basegot = globals->root.sgot;
2338
  /* BFD_ASSERT(h == NULL); */
2339
0
  BFD_ASSERT(globals->root.sgot != NULL);
2340
0
  value -= basegot->output_section->vma + basegot->output_offset;
2341
0
  return _bfd_final_link_relocate (howto, input_bfd, input_section,
2342
0
           contents, rel->r_offset, value,
2343
0
           addend);
2344
0
      }
2345
0
      break;
2346
2347
0
    case BFD_RELOC_KVX_S37_GOT_LO10:
2348
0
    case BFD_RELOC_KVX_S37_GOT_UP27:
2349
2350
0
    case BFD_RELOC_KVX_32_GOT:
2351
0
    case BFD_RELOC_KVX_64_GOT:
2352
2353
0
    case BFD_RELOC_KVX_S43_GOT_LO10:
2354
0
    case BFD_RELOC_KVX_S43_GOT_UP27:
2355
0
    case BFD_RELOC_KVX_S43_GOT_EX6:
2356
2357
0
      if (globals->root.sgot == NULL)
2358
0
  BFD_ASSERT (h != NULL);
2359
2360
0
      if (h != NULL)
2361
0
  {
2362
0
    value = kvx_calculate_got_entry_vma (h, globals, info, value,
2363
0
                 output_bfd,
2364
0
                 unresolved_reloc_p);
2365
#ifdef UGLY_DEBUG
2366
    printf("GOT_LO/HI for %s, value %x\n", h->root.root.string, value);
2367
#endif
2368
2369
0
    return _bfd_final_link_relocate (howto, input_bfd, input_section,
2370
0
             contents, rel->r_offset, value,
2371
0
             addend);
2372
0
  }
2373
0
      else
2374
0
  {
2375
#ifdef UGLY_DEBUG
2376
    printf("GOT_LO/HI with h NULL, initial value %x\n", value);
2377
#endif
2378
0
    struct elf_kvx_local_symbol *locals = elf_kvx_locals (input_bfd);
2379
2380
0
    if (locals == NULL)
2381
0
      {
2382
0
        int howto_index = bfd_r_type - BFD_RELOC_KVX_RELOC_START;
2383
0
        _bfd_error_handler
2384
    /* xgettext:c-format */
2385
0
    (_("%pB: local symbol descriptor table be NULL when applying "
2386
0
       "relocation %s against local symbol"),
2387
0
     input_bfd, elf_kvx_howto_table[howto_index].name);
2388
0
        abort ();
2389
0
      }
2390
2391
0
    off = symbol_got_offset (input_bfd, h, r_symndx);
2392
0
    base_got = globals->root.sgot;
2393
0
    bfd_vma got_entry_addr = (base_got->output_section->vma
2394
0
            + base_got->output_offset + off);
2395
2396
0
    if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2397
0
      {
2398
0
        bfd_put_64 (output_bfd, value, base_got->contents + off);
2399
2400
0
        if (bfd_link_pic (info))
2401
0
    {
2402
0
      asection *s;
2403
0
      Elf_Internal_Rela outrel;
2404
2405
      /* For PIC executables and shared libraries we need
2406
         to relocate the GOT entry at run time.  */
2407
0
      s = globals->root.srelgot;
2408
0
      if (s == NULL)
2409
0
        abort ();
2410
2411
0
      outrel.r_offset = got_entry_addr;
2412
0
      outrel.r_info = ELF32_R_INFO (0, R_KVX_RELATIVE);
2413
0
      outrel.r_addend = value;
2414
0
      elf_append_rela (output_bfd, s, &outrel);
2415
0
    }
2416
2417
0
        symbol_got_offset_mark (input_bfd, h, r_symndx);
2418
0
      }
2419
2420
    /* Update the relocation value to GOT entry addr as we have
2421
       transformed the direct data access into an indirect data
2422
       access through GOT.  */
2423
0
    value = got_entry_addr;
2424
2425
0
    return _bfd_final_link_relocate (howto, input_bfd, input_section,
2426
0
             contents, rel->r_offset, off, 0);
2427
0
  }
2428
0
      break;
2429
2430
0
    default:
2431
0
      return bfd_reloc_notsupported;
2432
0
    }
2433
2434
0
  if (saved_addend)
2435
0
    *saved_addend = value;
2436
2437
  /* Only apply the final relocation in a sequence.  */
2438
0
  if (save_addend)
2439
0
    return bfd_reloc_continue;
2440
2441
0
  return _bfd_kvx_elf_put_addend (input_bfd, hit_data, bfd_r_type,
2442
0
          howto, value);
2443
0
}
2444
2445
2446
2447
/* Relocate a KVX ELF section.  */
2448
2449
static int
2450
elf32_kvx_relocate_section (bfd *output_bfd,
2451
          struct bfd_link_info *info,
2452
          bfd *input_bfd,
2453
          asection *input_section,
2454
          bfd_byte *contents,
2455
          Elf_Internal_Rela *relocs,
2456
          Elf_Internal_Sym *local_syms,
2457
          asection **local_sections)
2458
0
{
2459
0
  Elf_Internal_Shdr *symtab_hdr;
2460
0
  struct elf_link_hash_entry **sym_hashes;
2461
0
  Elf_Internal_Rela *rel;
2462
0
  Elf_Internal_Rela *relend;
2463
0
  const char *name;
2464
0
  struct elf_kvx_link_hash_table *globals;
2465
0
  bool save_addend = false;
2466
0
  bfd_vma addend = 0;
2467
2468
0
  globals = elf_kvx_hash_table (info);
2469
2470
0
  symtab_hdr = &elf_symtab_hdr (input_bfd);
2471
0
  sym_hashes = elf_sym_hashes (input_bfd);
2472
2473
0
  rel = relocs;
2474
0
  relend = relocs + input_section->reloc_count;
2475
0
  for (; rel < relend; rel++)
2476
0
    {
2477
0
      unsigned int r_type;
2478
0
      bfd_reloc_code_real_type bfd_r_type;
2479
0
      reloc_howto_type *howto;
2480
0
      unsigned long r_symndx;
2481
0
      Elf_Internal_Sym *sym;
2482
0
      asection *sec;
2483
0
      struct elf_link_hash_entry *h;
2484
0
      bfd_vma relocation;
2485
0
      bfd_reloc_status_type r;
2486
0
      arelent bfd_reloc;
2487
0
      char sym_type;
2488
0
      bool unresolved_reloc = false;
2489
0
      char *error_message = NULL;
2490
2491
0
      r_symndx = ELF32_R_SYM (rel->r_info);
2492
0
      r_type = ELF32_R_TYPE (rel->r_info);
2493
2494
0
      bfd_reloc.howto = elf32_kvx_howto_from_type (input_bfd, r_type);
2495
0
      howto = bfd_reloc.howto;
2496
2497
0
      if (howto == NULL)
2498
0
  return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2499
2500
0
      bfd_r_type = elf32_kvx_bfd_reloc_from_howto (howto);
2501
2502
0
      h = NULL;
2503
0
      sym = NULL;
2504
0
      sec = NULL;
2505
2506
0
      if (r_symndx < symtab_hdr->sh_info) /* A local symbol. */
2507
0
  {
2508
0
    sym = local_syms + r_symndx;
2509
0
    sym_type = ELF32_ST_TYPE (sym->st_info);
2510
0
    sec = local_sections[r_symndx];
2511
2512
    /* An object file might have a reference to a local
2513
       undefined symbol.  This is a draft object file, but we
2514
       should at least do something about it.  */
2515
0
    if (r_type != R_KVX_NONE
2516
0
        && r_type != R_KVX_S37_GOTADDR_LO10
2517
0
        && r_type != R_KVX_S37_GOTADDR_UP27
2518
0
        && r_type != R_KVX_S64_GOTADDR_LO10
2519
0
        && r_type != R_KVX_S64_GOTADDR_UP27
2520
0
        && r_type != R_KVX_S64_GOTADDR_EX27
2521
0
        && r_type != R_KVX_S43_GOTADDR_LO10
2522
0
        && r_type != R_KVX_S43_GOTADDR_UP27
2523
0
        && r_type != R_KVX_S43_GOTADDR_EX6
2524
0
        && bfd_is_und_section (sec)
2525
0
        && ELF_ST_BIND (sym->st_info) != STB_WEAK)
2526
0
      (*info->callbacks->undefined_symbol)
2527
0
        (info, bfd_elf_string_from_elf_section
2528
0
         (input_bfd, symtab_hdr->sh_link, sym->st_name),
2529
0
         input_bfd, input_section, rel->r_offset, true);
2530
2531
0
    relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2532
0
  }
2533
0
      else
2534
0
  {
2535
0
    bool warned, ignored;
2536
2537
0
    RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2538
0
           r_symndx, symtab_hdr, sym_hashes,
2539
0
           h, sec, relocation,
2540
0
           unresolved_reloc, warned, ignored);
2541
2542
0
    sym_type = h->type;
2543
0
  }
2544
2545
0
      if (sec != NULL && discarded_section (sec))
2546
0
  RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
2547
0
           rel, 1, relend, howto, 0, contents);
2548
2549
0
      if (bfd_link_relocatable (info))
2550
0
  continue;
2551
2552
0
      if (h != NULL)
2553
0
  name = h->root.root.string;
2554
0
      else
2555
0
  {
2556
0
    name = (bfd_elf_string_from_elf_section
2557
0
      (input_bfd, symtab_hdr->sh_link, sym->st_name));
2558
0
    if (name == NULL || *name == '\0')
2559
0
      name = bfd_section_name (sec);
2560
0
  }
2561
2562
0
      if (r_symndx != 0
2563
0
    && r_type != R_KVX_NONE
2564
0
    && (h == NULL
2565
0
        || h->root.type == bfd_link_hash_defined
2566
0
        || h->root.type == bfd_link_hash_defweak)
2567
0
    && IS_KVX_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
2568
0
  {
2569
0
    (*_bfd_error_handler)
2570
0
      ((sym_type == STT_TLS
2571
        /* xgettext:c-format */
2572
0
        ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
2573
        /* xgettext:c-format */
2574
0
        : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
2575
0
       input_bfd,
2576
0
       input_section, (uint64_t) rel->r_offset, howto->name, name);
2577
0
  }
2578
2579
      /* Original aarch64 has relaxation handling for TLS here. */
2580
0
      r = bfd_reloc_continue;
2581
2582
      /* There may be multiple consecutive relocations for the
2583
   same offset.  In that case we are supposed to treat the
2584
   output of each relocation as the addend for the next.  */
2585
0
      if (rel + 1 < relend
2586
0
    && rel->r_offset == rel[1].r_offset
2587
0
    && ELF32_R_TYPE (rel[1].r_info) != R_KVX_NONE)
2588
2589
0
  save_addend = true;
2590
0
      else
2591
0
  save_addend = false;
2592
2593
0
      if (r == bfd_reloc_continue)
2594
0
  r = elf32_kvx_final_link_relocate (howto, input_bfd, output_bfd,
2595
0
             input_section, contents, rel,
2596
0
             relocation, info, sec,
2597
0
             h, &unresolved_reloc,
2598
0
             save_addend, &addend, sym);
2599
2600
0
      switch (elf32_kvx_bfd_reloc_from_type (input_bfd, r_type))
2601
0
  {
2602
0
  case BFD_RELOC_KVX_S37_TLS_GD_LO10:
2603
0
  case BFD_RELOC_KVX_S37_TLS_GD_UP27:
2604
2605
0
  case BFD_RELOC_KVX_S43_TLS_GD_LO10:
2606
0
  case BFD_RELOC_KVX_S43_TLS_GD_UP27:
2607
0
  case BFD_RELOC_KVX_S43_TLS_GD_EX6:
2608
2609
0
  case BFD_RELOC_KVX_S37_TLS_LD_LO10:
2610
0
  case BFD_RELOC_KVX_S37_TLS_LD_UP27:
2611
2612
0
  case BFD_RELOC_KVX_S43_TLS_LD_LO10:
2613
0
  case BFD_RELOC_KVX_S43_TLS_LD_UP27:
2614
0
  case BFD_RELOC_KVX_S43_TLS_LD_EX6:
2615
2616
0
    if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2617
0
      {
2618
0
        bool need_relocs = false;
2619
0
        bfd_byte *loc;
2620
0
        int indx;
2621
0
        bfd_vma off;
2622
2623
0
        off = symbol_got_offset (input_bfd, h, r_symndx);
2624
0
        indx = h && h->dynindx != -1 ? h->dynindx : 0;
2625
2626
0
        need_relocs =
2627
0
    (bfd_link_pic (info) || indx != 0) &&
2628
0
    (h == NULL
2629
0
     || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2630
0
     || h->root.type != bfd_link_hash_undefweak);
2631
2632
0
        BFD_ASSERT (globals->root.srelgot != NULL);
2633
2634
0
        if (need_relocs)
2635
0
    {
2636
0
      Elf_Internal_Rela rela;
2637
0
      rela.r_info = ELF32_R_INFO (indx, R_KVX_64_DTPMOD);
2638
0
      rela.r_addend = 0;
2639
0
      rela.r_offset = globals->root.sgot->output_section->vma +
2640
0
        globals->root.sgot->output_offset + off;
2641
2642
0
      loc = globals->root.srelgot->contents;
2643
0
      loc += globals->root.srelgot->reloc_count++
2644
0
        * RELOC_SIZE (htab);
2645
0
      bfd_elf32_swap_reloca_out (output_bfd, &rela, loc);
2646
2647
0
      bfd_reloc_code_real_type real_type =
2648
0
        elf32_kvx_bfd_reloc_from_type (input_bfd, r_type);
2649
2650
0
      if (real_type == BFD_RELOC_KVX_S37_TLS_LD_LO10
2651
0
          || real_type == BFD_RELOC_KVX_S37_TLS_LD_UP27
2652
0
          || real_type == BFD_RELOC_KVX_S43_TLS_LD_LO10
2653
0
          || real_type == BFD_RELOC_KVX_S43_TLS_LD_UP27
2654
0
          || real_type == BFD_RELOC_KVX_S43_TLS_LD_EX6)
2655
0
        {
2656
          /* For local dynamic, don't generate DTPOFF in any case.
2657
       Initialize the DTPOFF slot into zero, so we get module
2658
       base address when invoke runtime TLS resolver.  */
2659
0
          bfd_put_32 (output_bfd, 0,
2660
0
          globals->root.sgot->contents + off
2661
0
          + GOT_ENTRY_SIZE);
2662
0
        }
2663
0
      else if (indx == 0)
2664
0
        {
2665
0
          bfd_put_32 (output_bfd,
2666
0
          relocation - dtpoff_base (info),
2667
0
          globals->root.sgot->contents + off
2668
0
          + GOT_ENTRY_SIZE);
2669
0
        }
2670
0
      else
2671
0
        {
2672
          /* This TLS symbol is global. We emit a
2673
       relocation to fixup the tls offset at load
2674
       time.  */
2675
0
          rela.r_info =
2676
0
      ELF32_R_INFO (indx, R_KVX_64_DTPOFF);
2677
0
          rela.r_addend = 0;
2678
0
          rela.r_offset =
2679
0
      (globals->root.sgot->output_section->vma
2680
0
       + globals->root.sgot->output_offset + off
2681
0
       + GOT_ENTRY_SIZE);
2682
2683
0
          loc = globals->root.srelgot->contents;
2684
0
          loc += globals->root.srelgot->reloc_count++
2685
0
      * RELOC_SIZE (globals);
2686
0
          bfd_elf32_swap_reloca_out (output_bfd, &rela, loc);
2687
0
          bfd_put_32 (output_bfd, (bfd_vma) 0,
2688
0
          globals->root.sgot->contents + off
2689
0
          + GOT_ENTRY_SIZE);
2690
0
        }
2691
0
    }
2692
0
        else
2693
0
    {
2694
0
      bfd_put_32 (output_bfd, (bfd_vma) 1,
2695
0
            globals->root.sgot->contents + off);
2696
0
      bfd_put_32 (output_bfd,
2697
0
            relocation - dtpoff_base (info),
2698
0
            globals->root.sgot->contents + off
2699
0
            + GOT_ENTRY_SIZE);
2700
0
    }
2701
2702
0
        symbol_got_offset_mark (input_bfd, h, r_symndx);
2703
0
      }
2704
0
    break;
2705
2706
0
  case BFD_RELOC_KVX_S37_TLS_IE_LO10:
2707
0
  case BFD_RELOC_KVX_S37_TLS_IE_UP27:
2708
2709
0
  case BFD_RELOC_KVX_S43_TLS_IE_LO10:
2710
0
  case BFD_RELOC_KVX_S43_TLS_IE_UP27:
2711
0
  case BFD_RELOC_KVX_S43_TLS_IE_EX6:
2712
0
    if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
2713
0
      {
2714
0
        bool need_relocs = false;
2715
0
        bfd_byte *loc;
2716
0
        int indx;
2717
0
        bfd_vma off;
2718
2719
0
        off = symbol_got_offset (input_bfd, h, r_symndx);
2720
2721
0
        indx = h && h->dynindx != -1 ? h->dynindx : 0;
2722
2723
0
        need_relocs =
2724
0
    (bfd_link_pic (info) || indx != 0) &&
2725
0
    (h == NULL
2726
0
     || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2727
0
     || h->root.type != bfd_link_hash_undefweak);
2728
2729
0
        BFD_ASSERT (globals->root.srelgot != NULL);
2730
2731
0
        if (need_relocs)
2732
0
    {
2733
0
      Elf_Internal_Rela rela;
2734
2735
0
      if (indx == 0)
2736
0
        rela.r_addend = relocation - dtpoff_base (info);
2737
0
      else
2738
0
        rela.r_addend = 0;
2739
2740
0
      rela.r_info = ELF32_R_INFO (indx, R_KVX_64_TPOFF);
2741
0
      rela.r_offset = globals->root.sgot->output_section->vma +
2742
0
        globals->root.sgot->output_offset + off;
2743
2744
0
      loc = globals->root.srelgot->contents;
2745
0
      loc += globals->root.srelgot->reloc_count++
2746
0
        * RELOC_SIZE (htab);
2747
2748
0
      bfd_elf32_swap_reloca_out (output_bfd, &rela, loc);
2749
2750
0
      bfd_put_32 (output_bfd, rela.r_addend,
2751
0
            globals->root.sgot->contents + off);
2752
0
    }
2753
0
        else
2754
0
    bfd_put_32 (output_bfd, relocation - tpoff_base (info),
2755
0
          globals->root.sgot->contents + off);
2756
2757
0
        symbol_got_offset_mark (input_bfd, h, r_symndx);
2758
0
      }
2759
0
    break;
2760
2761
0
  default:
2762
0
    break;
2763
0
  }
2764
2765
      /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
2766
   because such sections are not SEC_ALLOC and thus ld.so will
2767
   not process them.  */
2768
0
      if (unresolved_reloc
2769
0
    && !((input_section->flags & SEC_DEBUGGING) != 0
2770
0
         && h->def_dynamic)
2771
0
    && _bfd_elf_section_offset (output_bfd, info, input_section,
2772
0
              +rel->r_offset) != (bfd_vma) - 1)
2773
0
  {
2774
0
    (*_bfd_error_handler)
2775
      /* xgettext:c-format */
2776
0
      (_("%pB(%pA+%#" PRIx64 "): "
2777
0
         "unresolvable %s relocation against symbol `%s'"),
2778
0
       input_bfd, input_section, (uint64_t) rel->r_offset, howto->name,
2779
0
       h->root.root.string);
2780
0
    return false;
2781
0
  }
2782
2783
0
      if (r != bfd_reloc_ok && r != bfd_reloc_continue)
2784
0
  {
2785
0
    switch (r)
2786
0
      {
2787
0
      case bfd_reloc_overflow:
2788
0
        (*info->callbacks->reloc_overflow)
2789
0
    (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
2790
0
     input_bfd, input_section, rel->r_offset);
2791
2792
        /* Original aarch64 code had a check for alignement correctness */
2793
0
        break;
2794
2795
0
      case bfd_reloc_undefined:
2796
0
        (*info->callbacks->undefined_symbol)
2797
0
    (info, name, input_bfd, input_section, rel->r_offset, true);
2798
0
        break;
2799
2800
0
      case bfd_reloc_outofrange:
2801
0
        error_message = _("out of range");
2802
0
        goto common_error;
2803
2804
0
      case bfd_reloc_notsupported:
2805
0
        error_message = _("unsupported relocation");
2806
0
        goto common_error;
2807
2808
0
      case bfd_reloc_dangerous:
2809
        /* error_message should already be set.  */
2810
0
        goto common_error;
2811
2812
0
      default:
2813
0
        error_message = _("unknown error");
2814
        /* Fall through.  */
2815
2816
0
      common_error:
2817
0
        BFD_ASSERT (error_message != NULL);
2818
0
        (*info->callbacks->reloc_dangerous)
2819
0
    (info, error_message, input_bfd, input_section, rel->r_offset);
2820
0
        break;
2821
0
      }
2822
0
  }
2823
2824
0
      if (!save_addend)
2825
0
  addend = 0;
2826
0
    }
2827
2828
0
  return true;
2829
0
}
2830
2831
/* Set the right machine number.  */
2832
2833
static bool
2834
elf32_kvx_object_p (bfd *abfd)
2835
2
{
2836
  /* must be coherent with default arch in cpu-kvx.c */
2837
2
  int e_set = bfd_mach_kv3_1;
2838
2839
2
  if (elf_elfheader (abfd)->e_machine == EM_KVX)
2840
2
    {
2841
2
      int e_core = elf_elfheader (abfd)->e_flags & ELF_KVX_CORE_MASK;
2842
2
      switch(e_core)
2843
2
  {
2844
#if ARCH_SIZE == 64
2845
  case ELF_KVX_CORE_KV3_1 : e_set = bfd_mach_kv3_1_64; break;
2846
  case ELF_KVX_CORE_KV3_2 : e_set = bfd_mach_kv3_2_64; break;
2847
  case ELF_KVX_CORE_KV4_1 : e_set = bfd_mach_kv4_1_64; break;
2848
#else
2849
0
  case ELF_KVX_CORE_KV3_1 : e_set = bfd_mach_kv3_1; break;
2850
0
  case ELF_KVX_CORE_KV3_2 : e_set = bfd_mach_kv3_2; break;
2851
0
  case ELF_KVX_CORE_KV4_1 : e_set = bfd_mach_kv4_1; break;
2852
0
#endif
2853
2
  default:
2854
2
    (*_bfd_error_handler)(_("%s: Bad ELF id: `%d'"),
2855
2
        abfd->filename, e_core);
2856
2
  }
2857
2
    }
2858
2
  return bfd_default_set_arch_mach (abfd, bfd_arch_kvx, e_set);
2859
2
}
2860
2861
/* Function to keep KVX specific flags in the ELF header.  */
2862
2863
static bool
2864
elf32_kvx_set_private_flags (bfd *abfd, flagword flags)
2865
0
{
2866
0
  if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
2867
0
    {
2868
0
    }
2869
0
  else
2870
0
    {
2871
0
      elf_elfheader (abfd)->e_flags = flags;
2872
0
      elf_flags_init (abfd) = true;
2873
0
    }
2874
2875
0
  return true;
2876
0
}
2877
2878
/* Merge backend specific data from an object file to the output
2879
   object file when linking.  */
2880
2881
static bool
2882
elf32_kvx_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
2883
0
{
2884
0
  bfd *obfd = info->output_bfd;
2885
0
  flagword out_flags;
2886
0
  flagword in_flags;
2887
0
  bool flags_compatible = true;
2888
0
  asection *sec;
2889
2890
  /* Check if we have the same endianess.  */
2891
0
  if (!_bfd_generic_verify_endian_match (ibfd, info))
2892
0
    return false;
2893
2894
0
  if (!is_kvx_elf (ibfd) || !is_kvx_elf (obfd))
2895
0
    return true;
2896
2897
  /* The input BFD must have had its flags initialised.  */
2898
  /* The following seems bogus to me -- The flags are initialized in
2899
     the assembler but I don't think an elf_flags_init field is
2900
     written into the object.  */
2901
  /* BFD_ASSERT (elf_flags_init (ibfd)); */
2902
2903
0
  if (bfd_get_arch_size (ibfd) != bfd_get_arch_size (obfd))
2904
0
    {
2905
0
      const char *msg;
2906
2907
0
      if (bfd_get_arch_size (ibfd) == 32
2908
0
    && bfd_get_arch_size (obfd) == 64)
2909
0
  msg = _("%s: compiled as 32-bit object and %s is 64-bit");
2910
0
      else if (bfd_get_arch_size (ibfd) == 64
2911
0
         && bfd_get_arch_size (obfd) == 32)
2912
0
  msg = _("%s: compiled as 64-bit object and %s is 32-bit");
2913
0
      else
2914
0
  msg = _("%s: object size does not match that of target %s");
2915
2916
0
      (*_bfd_error_handler) (msg, bfd_get_filename (ibfd),
2917
0
           bfd_get_filename (obfd));
2918
0
      bfd_set_error (bfd_error_wrong_format);
2919
0
      return false;
2920
0
    }
2921
2922
0
  in_flags = elf_elfheader (ibfd)->e_flags;
2923
0
  out_flags = elf_elfheader (obfd)->e_flags;
2924
2925
0
  if (!elf_flags_init (obfd))
2926
0
    {
2927
      /* If the input is the default architecture and had the default
2928
   flags then do not bother setting the flags for the output
2929
   architecture, instead allow future merges to do this.  If no
2930
   future merges ever set these flags then they will retain their
2931
   uninitialised values, which surprise surprise, correspond
2932
   to the default values.  */
2933
0
      if (bfd_get_arch_info (ibfd)->the_default
2934
0
    && elf_elfheader (ibfd)->e_flags == 0)
2935
0
  return true;
2936
2937
0
      elf_flags_init (obfd) = true;
2938
0
      elf_elfheader (obfd)->e_flags = in_flags;
2939
2940
0
      if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
2941
0
    && bfd_get_arch_info (obfd)->the_default)
2942
0
  return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
2943
0
          bfd_get_mach (ibfd));
2944
2945
0
      return true;
2946
0
    }
2947
2948
  /* Identical flags must be compatible.  */
2949
0
  if (in_flags == out_flags)
2950
0
    return true;
2951
2952
  /* Check to see if the input BFD actually contains any sections.  If
2953
     not, its flags may not have been initialised either, but it
2954
     cannot actually cause any incompatiblity.  Do not short-circuit
2955
     dynamic objects; their section list may be emptied by
2956
     elf_link_add_object_symbols.
2957
2958
     Also check to see if there are no code sections in the input.
2959
     In this case there is no need to check for code specific flags.
2960
     XXX - do we need to worry about floating-point format compatability
2961
     in data sections ?  */
2962
0
  if (!(ibfd->flags & DYNAMIC))
2963
0
    {
2964
0
      bool null_input_bfd = true;
2965
0
      bool only_data_sections = true;
2966
2967
0
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2968
0
  {
2969
0
    if ((bfd_section_flags (sec)
2970
0
         & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
2971
0
        == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
2972
0
      only_data_sections = false;
2973
2974
0
    null_input_bfd = false;
2975
0
    break;
2976
0
  }
2977
2978
0
      if (null_input_bfd || only_data_sections)
2979
0
  return true;
2980
0
    }
2981
0
  return flags_compatible;
2982
0
}
2983
2984
/* Display the flags field.  */
2985
2986
static bool
2987
elf32_kvx_print_private_bfd_data (bfd *abfd, void *ptr)
2988
0
{
2989
0
  FILE *file = (FILE *) ptr;
2990
0
  unsigned long flags;
2991
2992
0
  BFD_ASSERT (abfd != NULL && ptr != NULL);
2993
2994
  /* Print normal ELF private data.  */
2995
0
  _bfd_elf_print_private_bfd_data (abfd, ptr);
2996
2997
0
  flags = elf_elfheader (abfd)->e_flags;
2998
  /* Ignore init flag - it may not be set, despite the flags field
2999
     containing valid data.  */
3000
3001
  /* xgettext:c-format */
3002
0
  fprintf (file, _("Private flags = 0x%lx : "), elf_elfheader (abfd)->e_flags);
3003
0
  if((flags & ELF_KVX_ABI_64B_ADDR_BIT) == ELF_KVX_ABI_64B_ADDR_BIT)
3004
0
    {
3005
0
      if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_1))
3006
0
  fprintf (file, _("Coolidge (kv3) V1 64 bits"));
3007
0
      else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_2))
3008
0
  fprintf (file, _("Coolidge (kv3) V2 64 bits"));
3009
0
      else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV4_1))
3010
0
  fprintf (file, _("Coolidge (kv4) V1 64 bits"));
3011
0
    }
3012
0
  else
3013
0
    {
3014
0
      if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_1))
3015
0
  fprintf (file, _("Coolidge (kv3) V1 32 bits"));
3016
0
      else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV3_2))
3017
0
  fprintf (file, _("Coolidge (kv3) V2 32 bits"));
3018
0
      else if (ELF_KVX_CHECK_CORE(flags,ELF_KVX_CORE_KV4_1))
3019
0
  fprintf (file, _("Coolidge (kv4) V1 32 bits"));
3020
0
    }
3021
3022
0
  fputc ('\n', file);
3023
3024
0
  return true;
3025
0
}
3026
3027
/* Adjust a symbol defined by a dynamic object and referenced by a
3028
   regular object.  The current definition is in some section of the
3029
   dynamic object, but we're not including those sections.  We have to
3030
   change the definition to something the rest of the link can
3031
   understand.  */
3032
3033
static bool
3034
elf32_kvx_adjust_dynamic_symbol (struct bfd_link_info *info,
3035
         struct elf_link_hash_entry *h)
3036
0
{
3037
0
  struct elf_kvx_link_hash_table *htab;
3038
0
  asection *s;
3039
3040
  /* If this is a function, put it in the procedure linkage table.  We
3041
     will fill in the contents of the procedure linkage table later,
3042
     when we know the address of the .got section.  */
3043
0
  if (h->type == STT_FUNC || h->needs_plt)
3044
0
    {
3045
0
      if (h->plt.refcount <= 0
3046
0
    || ((SYMBOL_CALLS_LOCAL (info, h)
3047
0
         || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3048
0
       && h->root.type == bfd_link_hash_undefweak))))
3049
0
  {
3050
    /* This case can occur if we saw a CALL26 reloc in
3051
       an input file, but the symbol wasn't referred to
3052
       by a dynamic object or all references were
3053
       garbage collected. In which case we can end up
3054
       resolving.  */
3055
0
    h->plt.offset = (bfd_vma) - 1;
3056
0
    h->needs_plt = 0;
3057
0
  }
3058
3059
0
      return true;
3060
0
    }
3061
0
  else
3062
    /* Otherwise, reset to -1.  */
3063
0
    h->plt.offset = (bfd_vma) - 1;
3064
3065
3066
  /* If this is a weak symbol, and there is a real definition, the
3067
     processor independent code will have arranged for us to see the
3068
     real definition first, and we can just use the same value.  */
3069
0
  if (h->is_weakalias)
3070
0
    {
3071
0
      struct elf_link_hash_entry *def = weakdef (h);
3072
0
      BFD_ASSERT (def->root.type == bfd_link_hash_defined);
3073
0
      h->root.u.def.section = def->root.u.def.section;
3074
0
      h->root.u.def.value = def->root.u.def.value;
3075
0
      if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
3076
0
  h->non_got_ref = def->non_got_ref;
3077
0
      return true;
3078
0
    }
3079
3080
  /* If we are creating a shared library, we must presume that the
3081
     only references to the symbol are via the global offset table.
3082
     For such cases we need not do anything here; the relocations will
3083
     be handled correctly by relocate_section.  */
3084
0
  if (bfd_link_pic (info))
3085
0
    return true;
3086
3087
  /* If there are no references to this symbol that do not use the
3088
     GOT, we don't need to generate a copy reloc.  */
3089
0
  if (!h->non_got_ref)
3090
0
    return true;
3091
3092
  /* If -z nocopyreloc was given, we won't generate them either.  */
3093
0
  if (info->nocopyreloc)
3094
0
    {
3095
0
      h->non_got_ref = 0;
3096
0
      return true;
3097
0
    }
3098
3099
  /* We must allocate the symbol in our .dynbss section, which will
3100
     become part of the .bss section of the executable.  There will be
3101
     an entry for this symbol in the .dynsym section.  The dynamic
3102
     object will contain position independent code, so all references
3103
     from the dynamic object to this symbol will go through the global
3104
     offset table.  The dynamic linker will use the .dynsym entry to
3105
     determine the address it must put in the global offset table, so
3106
     both the dynamic object and the regular object will refer to the
3107
     same memory location for the variable.  */
3108
3109
0
  htab = elf_kvx_hash_table (info);
3110
3111
  /* We must generate a R_KVX_COPY reloc to tell the dynamic linker
3112
     to copy the initial value out of the dynamic object and into the
3113
     runtime process image.  */
3114
0
  if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
3115
0
    {
3116
0
      htab->srelbss->size += RELOC_SIZE (htab);
3117
0
      h->needs_copy = 1;
3118
0
    }
3119
3120
0
  s = htab->sdynbss;
3121
3122
0
  return _bfd_elf_adjust_dynamic_copy (info, h, s);
3123
0
}
3124
3125
static bool
3126
elf32_kvx_allocate_local_symbols (bfd *abfd, unsigned number)
3127
0
{
3128
0
  struct elf_kvx_local_symbol *locals;
3129
0
  locals = elf_kvx_locals (abfd);
3130
0
  if (locals == NULL)
3131
0
    {
3132
0
      locals = (struct elf_kvx_local_symbol *)
3133
0
  bfd_zalloc (abfd, number * sizeof (struct elf_kvx_local_symbol));
3134
0
      if (locals == NULL)
3135
0
  return false;
3136
0
      elf_kvx_locals (abfd) = locals;
3137
0
    }
3138
0
  return true;
3139
0
}
3140
3141
/* Create the .got section to hold the global offset table.  */
3142
3143
static bool
3144
kvx_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
3145
0
{
3146
0
  const struct elf_backend_data *bed = get_elf_backend_data (abfd);
3147
0
  flagword flags;
3148
0
  asection *s;
3149
0
  struct elf_link_hash_entry *h;
3150
0
  struct elf_link_hash_table *htab = elf_hash_table (info);
3151
3152
  /* This function may be called more than once.  */
3153
0
  s = bfd_get_linker_section (abfd, ".got");
3154
0
  if (s != NULL)
3155
0
    return true;
3156
3157
0
  flags = bed->dynamic_sec_flags;
3158
3159
0
  s = bfd_make_section_anyway_with_flags (abfd,
3160
0
            (bed->rela_plts_and_copies_p
3161
0
             ? ".rela.got" : ".rel.got"),
3162
0
            (bed->dynamic_sec_flags
3163
0
             | SEC_READONLY));
3164
0
  if (s == NULL
3165
0
      || !bfd_set_section_alignment (s, bed->s->log_file_align))
3166
3167
0
    return false;
3168
0
  htab->srelgot = s;
3169
3170
0
  s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
3171
0
  if (s == NULL
3172
0
      || !bfd_set_section_alignment (s, bed->s->log_file_align))
3173
0
    return false;
3174
0
  htab->sgot = s;
3175
0
  htab->sgot->size += GOT_ENTRY_SIZE;
3176
3177
0
  if (bed->want_got_sym)
3178
0
    {
3179
      /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
3180
   (or .got.plt) section.  We don't do this in the linker script
3181
   because we don't want to define the symbol if we are not creating
3182
   a global offset table.  */
3183
0
      h = _bfd_elf_define_linkage_sym (abfd, info, s,
3184
0
               "_GLOBAL_OFFSET_TABLE_");
3185
0
      elf_hash_table (info)->hgot = h;
3186
0
      if (h == NULL)
3187
0
  return false;
3188
0
    }
3189
3190
0
  if (bed->want_got_plt)
3191
0
    {
3192
0
      s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
3193
0
      if (s == NULL
3194
0
    || !bfd_set_section_alignment (s,
3195
0
           bed->s->log_file_align))
3196
0
  return false;
3197
0
      htab->sgotplt = s;
3198
0
    }
3199
3200
  /* The first bit of the global offset table is the header.  */
3201
0
  s->size += bed->got_header_size;
3202
3203
  /* we still need to handle got content when doing static link with PIC */
3204
0
  if (bfd_link_executable (info) && !bfd_link_pic (info)) {
3205
0
    htab->dynobj = abfd;
3206
0
  }
3207
3208
0
  return true;
3209
0
}
3210
3211
/* Look through the relocs for a section during the first phase.  */
3212
3213
static bool
3214
elf32_kvx_check_relocs (bfd *abfd, struct bfd_link_info *info,
3215
          asection *sec, const Elf_Internal_Rela *relocs)
3216
0
{
3217
0
  Elf_Internal_Shdr *symtab_hdr;
3218
0
  struct elf_link_hash_entry **sym_hashes;
3219
0
  const Elf_Internal_Rela *rel;
3220
0
  const Elf_Internal_Rela *rel_end;
3221
0
  asection *sreloc;
3222
3223
0
  struct elf_kvx_link_hash_table *htab;
3224
3225
0
  if (bfd_link_relocatable (info))
3226
0
    return true;
3227
3228
0
  BFD_ASSERT (is_kvx_elf (abfd));
3229
3230
0
  htab = elf_kvx_hash_table (info);
3231
0
  sreloc = NULL;
3232
3233
0
  symtab_hdr = &elf_symtab_hdr (abfd);
3234
0
  sym_hashes = elf_sym_hashes (abfd);
3235
3236
0
  rel_end = relocs + sec->reloc_count;
3237
0
  for (rel = relocs; rel < rel_end; rel++)
3238
0
    {
3239
0
      struct elf_link_hash_entry *h;
3240
0
      unsigned int r_symndx;
3241
0
      unsigned int r_type;
3242
0
      bfd_reloc_code_real_type bfd_r_type;
3243
0
      Elf_Internal_Sym *isym;
3244
3245
0
      r_symndx = ELF32_R_SYM (rel->r_info);
3246
0
      r_type = ELF32_R_TYPE (rel->r_info);
3247
3248
0
      if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
3249
0
  {
3250
    /* xgettext:c-format */
3251
0
    _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd, r_symndx);
3252
0
    return false;
3253
0
  }
3254
3255
0
      if (r_symndx < symtab_hdr->sh_info)
3256
0
  {
3257
    /* A local symbol.  */
3258
0
    isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3259
0
          abfd, r_symndx);
3260
0
    if (isym == NULL)
3261
0
      return false;
3262
3263
0
    h = NULL;
3264
0
  }
3265
0
      else
3266
0
  {
3267
0
    h = sym_hashes[r_symndx - symtab_hdr->sh_info];
3268
0
    while (h->root.type == bfd_link_hash_indirect
3269
0
     || h->root.type == bfd_link_hash_warning)
3270
0
      h = (struct elf_link_hash_entry *) h->root.u.i.link;
3271
0
  }
3272
3273
      /* Could be done earlier, if h were already available.  */
3274
0
      bfd_r_type = kvx_tls_transition (abfd, info, r_type, h, r_symndx);
3275
3276
0
      if (h != NULL)
3277
0
  {
3278
    /* Create the ifunc sections for static executables.  If we
3279
       never see an indirect function symbol nor we are building
3280
       a static executable, those sections will be empty and
3281
       won't appear in output.  */
3282
0
    switch (bfd_r_type)
3283
0
      {
3284
0
      default:
3285
0
        break;
3286
0
      }
3287
3288
    /* It is referenced by a non-shared object. */
3289
0
    h->ref_regular = 1;
3290
0
  }
3291
3292
0
      switch (bfd_r_type)
3293
0
  {
3294
3295
0
  case BFD_RELOC_KVX_S43_LO10:
3296
0
  case BFD_RELOC_KVX_S43_UP27:
3297
0
  case BFD_RELOC_KVX_S43_EX6:
3298
3299
0
  case BFD_RELOC_KVX_S37_LO10:
3300
0
  case BFD_RELOC_KVX_S37_UP27:
3301
3302
0
  case BFD_RELOC_KVX_S64_LO10:
3303
0
  case BFD_RELOC_KVX_S64_UP27:
3304
0
  case BFD_RELOC_KVX_S64_EX27:
3305
3306
0
  case BFD_RELOC_KVX_32:
3307
0
  case BFD_RELOC_KVX_64:
3308
3309
    /* We don't need to handle relocs into sections not going into
3310
       the "real" output.  */
3311
0
    if ((sec->flags & SEC_ALLOC) == 0)
3312
0
      break;
3313
3314
0
    if (h != NULL)
3315
0
      {
3316
0
        if (!bfd_link_pic (info))
3317
0
    h->non_got_ref = 1;
3318
3319
0
        h->plt.refcount += 1;
3320
0
        h->pointer_equality_needed = 1;
3321
0
      }
3322
3323
    /* No need to do anything if we're not creating a shared
3324
       object.  */
3325
0
    if (! bfd_link_pic (info))
3326
0
      break;
3327
3328
0
    {
3329
0
      struct elf_dyn_relocs *p;
3330
0
      struct elf_dyn_relocs **head;
3331
3332
      /* We must copy these reloc types into the output file.
3333
         Create a reloc section in dynobj and make room for
3334
         this reloc.  */
3335
0
      if (sreloc == NULL)
3336
0
        {
3337
0
    if (htab->root.dynobj == NULL)
3338
0
      htab->root.dynobj = abfd;
3339
3340
0
    sreloc = _bfd_elf_make_dynamic_reloc_section
3341
0
      (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ true);
3342
3343
0
    if (sreloc == NULL)
3344
0
      return false;
3345
0
        }
3346
3347
      /* If this is a global symbol, we count the number of
3348
         relocations we need for this symbol.  */
3349
0
      if (h != NULL)
3350
0
        {
3351
0
    head = &h->dyn_relocs;
3352
0
        }
3353
0
      else
3354
0
        {
3355
    /* Track dynamic relocs needed for local syms too.
3356
       We really need local syms available to do this
3357
       easily.  Oh well.  */
3358
3359
0
    asection *s;
3360
0
    void **vpp;
3361
3362
0
    isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3363
0
                abfd, r_symndx);
3364
0
    if (isym == NULL)
3365
0
      return false;
3366
3367
0
    s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3368
0
    if (s == NULL)
3369
0
      s = sec;
3370
3371
    /* Beware of type punned pointers vs strict aliasing
3372
       rules.  */
3373
0
    vpp = &(elf_section_data (s)->local_dynrel);
3374
0
    head = (struct elf_dyn_relocs **) vpp;
3375
0
        }
3376
3377
0
      p = *head;
3378
0
      if (p == NULL || p->sec != sec)
3379
0
        {
3380
0
    bfd_size_type amt = sizeof *p;
3381
0
    p = ((struct elf_dyn_relocs *)
3382
0
         bfd_zalloc (htab->root.dynobj, amt));
3383
0
    if (p == NULL)
3384
0
      return false;
3385
0
    p->next = *head;
3386
0
    *head = p;
3387
0
    p->sec = sec;
3388
0
        }
3389
3390
0
      p->count += 1;
3391
3392
0
    }
3393
0
    break;
3394
3395
0
  case BFD_RELOC_KVX_S37_GOT_LO10:
3396
0
  case BFD_RELOC_KVX_S37_GOT_UP27:
3397
3398
0
  case BFD_RELOC_KVX_S37_GOTOFF_LO10:
3399
0
  case BFD_RELOC_KVX_S37_GOTOFF_UP27:
3400
3401
0
  case BFD_RELOC_KVX_S43_GOT_LO10:
3402
0
  case BFD_RELOC_KVX_S43_GOT_UP27:
3403
0
  case BFD_RELOC_KVX_S43_GOT_EX6:
3404
3405
0
  case BFD_RELOC_KVX_S43_GOTOFF_LO10:
3406
0
  case BFD_RELOC_KVX_S43_GOTOFF_UP27:
3407
0
  case BFD_RELOC_KVX_S43_GOTOFF_EX6:
3408
3409
0
  case BFD_RELOC_KVX_S37_TLS_GD_LO10:
3410
0
  case BFD_RELOC_KVX_S37_TLS_GD_UP27:
3411
3412
0
  case BFD_RELOC_KVX_S43_TLS_GD_LO10:
3413
0
  case BFD_RELOC_KVX_S43_TLS_GD_UP27:
3414
0
  case BFD_RELOC_KVX_S43_TLS_GD_EX6:
3415
3416
0
  case BFD_RELOC_KVX_S37_TLS_IE_LO10:
3417
0
  case BFD_RELOC_KVX_S37_TLS_IE_UP27:
3418
3419
0
  case BFD_RELOC_KVX_S43_TLS_IE_LO10:
3420
0
  case BFD_RELOC_KVX_S43_TLS_IE_UP27:
3421
0
  case BFD_RELOC_KVX_S43_TLS_IE_EX6:
3422
3423
0
  case BFD_RELOC_KVX_S37_TLS_LD_LO10:
3424
0
  case BFD_RELOC_KVX_S37_TLS_LD_UP27:
3425
3426
0
  case BFD_RELOC_KVX_S43_TLS_LD_LO10:
3427
0
  case BFD_RELOC_KVX_S43_TLS_LD_UP27:
3428
0
  case BFD_RELOC_KVX_S43_TLS_LD_EX6:
3429
0
    {
3430
0
      unsigned got_type;
3431
0
      unsigned old_got_type;
3432
3433
0
      got_type = kvx_reloc_got_type (bfd_r_type);
3434
3435
0
      if (h)
3436
0
        {
3437
0
    h->got.refcount += 1;
3438
0
    old_got_type = elf_kvx_hash_entry (h)->got_type;
3439
0
        }
3440
0
      else
3441
0
        {
3442
0
    struct elf_kvx_local_symbol *locals;
3443
3444
0
    if (!elf32_kvx_allocate_local_symbols
3445
0
        (abfd, symtab_hdr->sh_info))
3446
0
      return false;
3447
3448
0
    locals = elf_kvx_locals (abfd);
3449
0
    BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
3450
0
    locals[r_symndx].got_refcount += 1;
3451
0
    old_got_type = locals[r_symndx].got_type;
3452
0
        }
3453
3454
      /* We will already have issued an error message if there
3455
         is a TLS/non-TLS mismatch, based on the symbol type.
3456
         So just combine any TLS types needed.  */
3457
0
      if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
3458
0
    && got_type != GOT_NORMAL)
3459
0
        got_type |= old_got_type;
3460
3461
      /* If the symbol is accessed by both IE and GD methods, we
3462
         are able to relax.  Turn off the GD flag, without
3463
         messing up with any other kind of TLS types that may be
3464
         involved.  */
3465
      /* Disabled untested and unused TLS */
3466
      /* if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type)) */
3467
      /*   got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD); */
3468
3469
0
      if (old_got_type != got_type)
3470
0
        {
3471
0
    if (h != NULL)
3472
0
      elf_kvx_hash_entry (h)->got_type = got_type;
3473
0
    else
3474
0
      {
3475
0
        struct elf_kvx_local_symbol *locals;
3476
0
        locals = elf_kvx_locals (abfd);
3477
0
        BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
3478
0
        locals[r_symndx].got_type = got_type;
3479
0
      }
3480
0
        }
3481
3482
0
      if (htab->root.dynobj == NULL)
3483
0
        htab->root.dynobj = abfd;
3484
0
      if (! kvx_elf_create_got_section (htab->root.dynobj, info))
3485
0
        return false;
3486
0
      break;
3487
0
    }
3488
3489
0
  case BFD_RELOC_KVX_S64_GOTADDR_LO10:
3490
0
  case BFD_RELOC_KVX_S64_GOTADDR_UP27:
3491
0
  case BFD_RELOC_KVX_S64_GOTADDR_EX27:
3492
3493
0
  case BFD_RELOC_KVX_S43_GOTADDR_LO10:
3494
0
  case BFD_RELOC_KVX_S43_GOTADDR_UP27:
3495
0
  case BFD_RELOC_KVX_S43_GOTADDR_EX6:
3496
3497
0
  case BFD_RELOC_KVX_S37_GOTADDR_LO10:
3498
0
  case BFD_RELOC_KVX_S37_GOTADDR_UP27:
3499
3500
0
    if (htab->root.dynobj == NULL)
3501
0
      htab->root.dynobj = abfd;
3502
0
    if (! kvx_elf_create_got_section (htab->root.dynobj, info))
3503
0
      return false;
3504
0
    break;
3505
3506
0
  case BFD_RELOC_KVX_PCREL27:
3507
0
  case BFD_RELOC_KVX_PCREL17:
3508
    /* If this is a local symbol then we resolve it
3509
       directly without creating a PLT entry.  */
3510
0
    if (h == NULL)
3511
0
      continue;
3512
3513
0
    h->needs_plt = 1;
3514
0
    if (h->plt.refcount <= 0)
3515
0
      h->plt.refcount = 1;
3516
0
    else
3517
0
      h->plt.refcount += 1;
3518
0
    break;
3519
3520
0
  default:
3521
0
    break;
3522
0
  }
3523
0
    }
3524
3525
0
  return true;
3526
0
}
3527
3528
static bool
3529
elf32_kvx_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
3530
0
{
3531
0
  Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form.  */
3532
3533
0
  if (!_bfd_elf_init_file_header (abfd, link_info))
3534
0
    return false;
3535
3536
0
  i_ehdrp = elf_elfheader (abfd);
3537
0
  i_ehdrp->e_ident[EI_ABIVERSION] = KVX_ELF_ABI_VERSION;
3538
0
  return true;
3539
0
}
3540
3541
static enum elf_reloc_type_class
3542
elf32_kvx_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
3543
        const asection *rel_sec ATTRIBUTE_UNUSED,
3544
        const Elf_Internal_Rela *rela)
3545
0
{
3546
0
  switch ((int) ELF32_R_TYPE (rela->r_info))
3547
0
    {
3548
0
    case R_KVX_RELATIVE:
3549
0
      return reloc_class_relative;
3550
0
    case R_KVX_JMP_SLOT:
3551
0
      return reloc_class_plt;
3552
0
    case R_KVX_COPY:
3553
0
      return reloc_class_copy;
3554
0
    default:
3555
0
      return reloc_class_normal;
3556
0
    }
3557
0
}
3558
3559
/* A structure used to record a list of sections, independently
3560
   of the next and prev fields in the asection structure.  */
3561
typedef struct section_list
3562
{
3563
  asection *sec;
3564
  struct section_list *next;
3565
  struct section_list *prev;
3566
}
3567
section_list;
3568
3569
typedef struct
3570
{
3571
  void *finfo;
3572
  struct bfd_link_info *info;
3573
  asection *sec;
3574
  int sec_shndx;
3575
  int (*func) (void *, const char *, Elf_Internal_Sym *,
3576
         asection *, struct elf_link_hash_entry *);
3577
} output_arch_syminfo;
3578
3579
/* Output a single local symbol for a generated stub.  */
3580
3581
static bool
3582
elf32_kvx_output_stub_sym (output_arch_syminfo *osi, const char *name,
3583
             bfd_vma offset, bfd_vma size)
3584
0
{
3585
0
  Elf_Internal_Sym sym;
3586
3587
0
  sym.st_value = (osi->sec->output_section->vma
3588
0
      + osi->sec->output_offset + offset);
3589
0
  sym.st_size = size;
3590
0
  sym.st_other = 0;
3591
0
  sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
3592
0
  sym.st_shndx = osi->sec_shndx;
3593
0
  return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
3594
0
}
3595
3596
static bool
3597
kvx_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
3598
0
{
3599
0
  struct elf_kvx_stub_hash_entry *stub_entry;
3600
0
  asection *stub_sec;
3601
0
  bfd_vma addr;
3602
0
  char *stub_name;
3603
0
  output_arch_syminfo *osi;
3604
3605
  /* Massage our args to the form they really have.  */
3606
0
  stub_entry = (struct elf_kvx_stub_hash_entry *) gen_entry;
3607
0
  osi = (output_arch_syminfo *) in_arg;
3608
3609
0
  stub_sec = stub_entry->stub_sec;
3610
3611
  /* Ensure this stub is attached to the current section being
3612
     processed.  */
3613
0
  if (stub_sec != osi->sec)
3614
0
    return true;
3615
3616
0
  addr = (bfd_vma) stub_entry->stub_offset;
3617
3618
0
  stub_name = stub_entry->output_name;
3619
3620
0
  switch (stub_entry->stub_type)
3621
0
    {
3622
0
    case kvx_stub_long_branch:
3623
0
      if (!elf32_kvx_output_stub_sym
3624
0
    (osi, stub_name, addr, sizeof (elf32_kvx_long_branch_stub)))
3625
0
  return false;
3626
0
      break;
3627
3628
0
    default:
3629
0
      abort ();
3630
0
    }
3631
3632
0
  return true;
3633
0
}
3634
3635
/* Output mapping symbols for linker generated sections.  */
3636
3637
static bool
3638
elf32_kvx_output_arch_local_syms (bfd *output_bfd,
3639
          struct bfd_link_info *info,
3640
          void *finfo,
3641
          int (*func) (void *, const char *,
3642
                 Elf_Internal_Sym *,
3643
                 asection *,
3644
                 struct elf_link_hash_entry *))
3645
0
{
3646
0
  output_arch_syminfo osi;
3647
0
  struct elf_kvx_link_hash_table *htab;
3648
3649
0
  htab = elf_kvx_hash_table (info);
3650
3651
0
  osi.finfo = finfo;
3652
0
  osi.info = info;
3653
0
  osi.func = func;
3654
3655
  /* Long calls stubs.  */
3656
0
  if (htab->stub_bfd && htab->stub_bfd->sections)
3657
0
    {
3658
0
      asection *stub_sec;
3659
3660
0
      for (stub_sec = htab->stub_bfd->sections;
3661
0
     stub_sec != NULL; stub_sec = stub_sec->next)
3662
0
  {
3663
    /* Ignore non-stub sections.  */
3664
0
    if (!strstr (stub_sec->name, STUB_SUFFIX))
3665
0
      continue;
3666
3667
0
    osi.sec = stub_sec;
3668
3669
0
    osi.sec_shndx = _bfd_elf_section_from_bfd_section
3670
0
      (output_bfd, osi.sec->output_section);
3671
3672
0
    bfd_hash_traverse (&htab->stub_hash_table, kvx_map_one_stub,
3673
0
           &osi);
3674
0
  }
3675
0
    }
3676
3677
  /* Finally, output mapping symbols for the PLT.  */
3678
0
  if (!htab->root.splt || htab->root.splt->size == 0)
3679
0
    return true;
3680
3681
0
  osi.sec_shndx = _bfd_elf_section_from_bfd_section
3682
0
    (output_bfd, htab->root.splt->output_section);
3683
0
  osi.sec = htab->root.splt;
3684
3685
0
  return true;
3686
3687
0
}
3688
3689
/* Allocate target specific section data.  */
3690
3691
static bool
3692
elf32_kvx_new_section_hook (bfd *abfd, asection *sec)
3693
0
{
3694
0
  if (!sec->used_by_bfd)
3695
0
    {
3696
0
      _kvx_elf_section_data *sdata;
3697
0
      bfd_size_type amt = sizeof (*sdata);
3698
3699
0
      sdata = bfd_zalloc (abfd, amt);
3700
0
      if (sdata == NULL)
3701
0
  return false;
3702
0
      sec->used_by_bfd = sdata;
3703
0
    }
3704
3705
0
  return _bfd_elf_new_section_hook (abfd, sec);
3706
0
}
3707
3708
/* Create dynamic sections. This is different from the ARM backend in that
3709
   the got, plt, gotplt and their relocation sections are all created in the
3710
   standard part of the bfd elf backend.  */
3711
3712
static bool
3713
elf32_kvx_create_dynamic_sections (bfd *dynobj,
3714
           struct bfd_link_info *info)
3715
0
{
3716
0
  struct elf_kvx_link_hash_table *htab;
3717
3718
  /* We need to create .got section.  */
3719
0
  if (!kvx_elf_create_got_section (dynobj, info))
3720
0
    return false;
3721
3722
0
  if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3723
0
    return false;
3724
3725
0
  htab = elf_kvx_hash_table (info);
3726
0
  htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3727
0
  if (!bfd_link_pic (info))
3728
0
    htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
3729
3730
0
  if (!htab->sdynbss || (!bfd_link_pic (info) && !htab->srelbss))
3731
0
    abort ();
3732
3733
0
  return true;
3734
0
}
3735
3736
3737
/* Allocate space in .plt, .got and associated reloc sections for
3738
   dynamic relocs.  */
3739
3740
static bool
3741
elf32_kvx_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
3742
0
{
3743
0
  struct bfd_link_info *info;
3744
0
  struct elf_kvx_link_hash_table *htab;
3745
0
  struct elf_dyn_relocs *p;
3746
3747
  /* An example of a bfd_link_hash_indirect symbol is versioned
3748
     symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
3749
     -> __gxx_personality_v0(bfd_link_hash_defined)
3750
3751
     There is no need to process bfd_link_hash_indirect symbols here
3752
     because we will also be presented with the concrete instance of
3753
     the symbol and elf32_kvx_copy_indirect_symbol () will have been
3754
     called to copy all relevant data from the generic to the concrete
3755
     symbol instance.  */
3756
0
  if (h->root.type == bfd_link_hash_indirect)
3757
0
    return true;
3758
3759
0
  if (h->root.type == bfd_link_hash_warning)
3760
0
    h = (struct elf_link_hash_entry *) h->root.u.i.link;
3761
3762
0
  info = (struct bfd_link_info *) inf;
3763
0
  htab = elf_kvx_hash_table (info);
3764
3765
0
  if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
3766
0
    {
3767
      /* Make sure this symbol is output as a dynamic symbol.
3768
   Undefined weak syms won't yet be marked as dynamic.  */
3769
0
      if (h->dynindx == -1 && !h->forced_local)
3770
0
  {
3771
0
    if (!bfd_elf_link_record_dynamic_symbol (info, h))
3772
0
      return false;
3773
0
  }
3774
3775
0
      if (bfd_link_pic (info) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
3776
0
  {
3777
0
    asection *s = htab->root.splt;
3778
3779
    /* If this is the first .plt entry, make room for the special
3780
       first entry.  */
3781
0
    if (s->size == 0)
3782
0
      s->size += htab->plt_header_size;
3783
3784
0
    h->plt.offset = s->size;
3785
3786
    /* If this symbol is not defined in a regular file, and we are
3787
       not generating a shared library, then set the symbol to this
3788
       location in the .plt.  This is required to make function
3789
       pointers compare as equal between the normal executable and
3790
       the shared library.  */
3791
0
    if (!bfd_link_pic (info) && !h->def_regular)
3792
0
      {
3793
0
        h->root.u.def.section = s;
3794
0
        h->root.u.def.value = h->plt.offset;
3795
0
      }
3796
3797
    /* Make room for this entry. For now we only create the
3798
       small model PLT entries. We later need to find a way
3799
       of relaxing into these from the large model PLT entries.  */
3800
0
    s->size += PLT_SMALL_ENTRY_SIZE;
3801
3802
    /* We also need to make an entry in the .got.plt section, which
3803
       will be placed in the .got section by the linker script.  */
3804
0
    htab->root.sgotplt->size += GOT_ENTRY_SIZE;
3805
3806
    /* We also need to make an entry in the .rela.plt section.  */
3807
0
    htab->root.srelplt->size += RELOC_SIZE (htab);
3808
3809
    /* We need to ensure that all GOT entries that serve the PLT
3810
       are consecutive with the special GOT slots [0] [1] and
3811
       [2]. Any addtional relocations must be placed after the
3812
       PLT related entries.  We abuse the reloc_count such that
3813
       during sizing we adjust reloc_count to indicate the
3814
       number of PLT related reserved entries.  In subsequent
3815
       phases when filling in the contents of the reloc entries,
3816
       PLT related entries are placed by computing their PLT
3817
       index (0 .. reloc_count). While other none PLT relocs are
3818
       placed at the slot indicated by reloc_count and
3819
       reloc_count is updated.  */
3820
3821
0
    htab->root.srelplt->reloc_count++;
3822
0
  }
3823
0
      else
3824
0
  {
3825
0
    h->plt.offset = (bfd_vma) - 1;
3826
0
    h->needs_plt = 0;
3827
0
  }
3828
0
    }
3829
0
  else
3830
0
    {
3831
0
      h->plt.offset = (bfd_vma) - 1;
3832
0
      h->needs_plt = 0;
3833
0
    }
3834
3835
0
  if (h->got.refcount > 0)
3836
0
    {
3837
0
      bool dyn;
3838
0
      unsigned got_type = elf_kvx_hash_entry (h)->got_type;
3839
3840
0
      h->got.offset = (bfd_vma) - 1;
3841
3842
0
      dyn = htab->root.dynamic_sections_created;
3843
3844
      /* Make sure this symbol is output as a dynamic symbol.
3845
   Undefined weak syms won't yet be marked as dynamic.  */
3846
0
      if (dyn && h->dynindx == -1 && !h->forced_local)
3847
0
  {
3848
0
    if (!bfd_elf_link_record_dynamic_symbol (info, h))
3849
0
      return false;
3850
0
  }
3851
3852
0
      if (got_type == GOT_UNKNOWN)
3853
0
  {
3854
0
    (*_bfd_error_handler)
3855
0
      (_("relocation against `%s' has faulty GOT type "),
3856
0
       (h) ? h->root.root.string : "a local symbol");
3857
0
    bfd_set_error (bfd_error_bad_value);
3858
0
    return false;
3859
0
  }
3860
0
      else if (got_type == GOT_NORMAL)
3861
0
  {
3862
0
    h->got.offset = htab->root.sgot->size;
3863
0
    htab->root.sgot->size += GOT_ENTRY_SIZE;
3864
0
    if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3865
0
         || h->root.type != bfd_link_hash_undefweak)
3866
0
        && (bfd_link_pic (info)
3867
0
      || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3868
0
      {
3869
0
        htab->root.srelgot->size += RELOC_SIZE (htab);
3870
0
      }
3871
0
  }
3872
0
      else
3873
0
  {
3874
0
    int indx;
3875
3876
    /* Any of these will require 2 GOT slots because
3877
     * they use __tls_get_addr() */
3878
0
    if (got_type & (GOT_TLS_GD | GOT_TLS_LD))
3879
0
      {
3880
0
        h->got.offset = htab->root.sgot->size;
3881
0
        htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
3882
0
      }
3883
3884
0
    if (got_type & GOT_TLS_IE)
3885
0
      {
3886
0
        h->got.offset = htab->root.sgot->size;
3887
0
        htab->root.sgot->size += GOT_ENTRY_SIZE;
3888
0
      }
3889
3890
0
    indx = h && h->dynindx != -1 ? h->dynindx : 0;
3891
0
    if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3892
0
         || h->root.type != bfd_link_hash_undefweak)
3893
0
        && (bfd_link_pic (info)
3894
0
      || indx != 0
3895
0
      || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3896
0
      {
3897
        /* Only the GD case requires 2 relocations. */
3898
0
        if (got_type & GOT_TLS_GD)
3899
0
    htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
3900
3901
        /* LD needs a DTPMOD reloc, IE needs a DTPOFF. */
3902
0
        if (got_type & (GOT_TLS_LD | GOT_TLS_IE))
3903
0
    htab->root.srelgot->size += RELOC_SIZE (htab);
3904
0
      }
3905
0
  }
3906
0
    }
3907
0
  else
3908
0
    {
3909
0
      h->got.offset = (bfd_vma) - 1;
3910
0
    }
3911
3912
0
  if (h->dyn_relocs == NULL)
3913
0
    return true;
3914
3915
  /* In the shared -Bsymbolic case, discard space allocated for
3916
     dynamic pc-relative relocs against symbols which turn out to be
3917
     defined in regular objects.  For the normal shared case, discard
3918
     space for pc-relative relocs that have become local due to symbol
3919
     visibility changes.  */
3920
3921
0
  if (bfd_link_pic (info))
3922
0
    {
3923
      /* Relocs that use pc_count are those that appear on a call
3924
   insn, or certain REL relocs that can generated via assembly.
3925
   We want calls to protected symbols to resolve directly to the
3926
   function rather than going via the plt.  If people want
3927
   function pointer comparisons to work as expected then they
3928
   should avoid writing weird assembly.  */
3929
0
      if (SYMBOL_CALLS_LOCAL (info, h))
3930
0
  {
3931
0
    struct elf_dyn_relocs **pp;
3932
3933
0
    for (pp = &h->dyn_relocs; (p = *pp) != NULL;)
3934
0
      {
3935
0
        p->count -= p->pc_count;
3936
0
        p->pc_count = 0;
3937
0
        if (p->count == 0)
3938
0
    *pp = p->next;
3939
0
        else
3940
0
    pp = &p->next;
3941
0
      }
3942
0
  }
3943
3944
      /* Also discard relocs on undefined weak syms with non-default
3945
   visibility.  */
3946
0
      if (h->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
3947
0
  {
3948
0
    if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3949
0
        || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
3950
0
      h->dyn_relocs = NULL;
3951
3952
    /* Make sure undefined weak symbols are output as a dynamic
3953
       symbol in PIEs.  */
3954
0
    else if (h->dynindx == -1
3955
0
       && !h->forced_local
3956
0
       && !bfd_elf_link_record_dynamic_symbol (info, h))
3957
0
      return false;
3958
0
  }
3959
3960
0
    }
3961
0
  else if (ELIMINATE_COPY_RELOCS)
3962
0
    {
3963
      /* For the non-shared case, discard space for relocs against
3964
   symbols which turn out to need copy relocs or are not
3965
   dynamic.  */
3966
3967
0
      if (!h->non_got_ref
3968
0
    && ((h->def_dynamic
3969
0
         && !h->def_regular)
3970
0
        || (htab->root.dynamic_sections_created
3971
0
      && (h->root.type == bfd_link_hash_undefweak
3972
0
          || h->root.type == bfd_link_hash_undefined))))
3973
0
  {
3974
    /* Make sure this symbol is output as a dynamic symbol.
3975
       Undefined weak syms won't yet be marked as dynamic.  */
3976
0
    if (h->dynindx == -1
3977
0
        && !h->forced_local
3978
0
        && !bfd_elf_link_record_dynamic_symbol (info, h))
3979
0
      return false;
3980
3981
    /* If that succeeded, we know we'll be keeping all the
3982
       relocs.  */
3983
0
    if (h->dynindx != -1)
3984
0
      goto keep;
3985
0
  }
3986
3987
0
      h->dyn_relocs = NULL;
3988
3989
0
    keep:;
3990
0
    }
3991
3992
  /* Finally, allocate space.  */
3993
0
  for (p = h->dyn_relocs; p != NULL; p = p->next)
3994
0
    {
3995
0
      asection *sreloc;
3996
3997
0
      sreloc = elf_section_data (p->sec)->sreloc;
3998
3999
0
      BFD_ASSERT (sreloc != NULL);
4000
4001
0
      sreloc->size += p->count * RELOC_SIZE (htab);
4002
0
    }
4003
4004
0
  return true;
4005
0
}
4006
4007
/* Find any dynamic relocs that apply to read-only sections.  */
4008
4009
static bool
4010
kvx_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
4011
0
{
4012
0
  struct elf_dyn_relocs * p;
4013
4014
0
  for (p = h->dyn_relocs; p != NULL; p = p->next)
4015
0
    {
4016
0
      asection *s = p->sec;
4017
4018
0
      if (s != NULL && (s->flags & SEC_READONLY) != 0)
4019
0
  {
4020
0
    struct bfd_link_info *info = (struct bfd_link_info *) inf;
4021
4022
0
    info->flags |= DF_TEXTREL;
4023
0
    info->callbacks->minfo (_("%pB: dynamic relocation against `%pT' in "
4024
0
            "read-only section `%pA'\n"),
4025
0
          s->owner, h->root.root.string, s);
4026
4027
    /* Not an error, just cut short the traversal.  */
4028
0
    return false;
4029
0
  }
4030
0
    }
4031
0
  return true;
4032
0
}
4033
4034
/* This is the most important function of all . Innocuosly named
4035
   though !  */
4036
static bool
4037
elf32_kvx_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
4038
         struct bfd_link_info *info)
4039
0
{
4040
0
  struct elf_kvx_link_hash_table *htab;
4041
0
  bfd *dynobj;
4042
0
  asection *s;
4043
0
  bool relocs;
4044
0
  bfd *ibfd;
4045
4046
0
  htab = elf_kvx_hash_table ((info));
4047
0
  dynobj = htab->root.dynobj;
4048
4049
0
  BFD_ASSERT (dynobj != NULL);
4050
4051
0
  if (htab->root.dynamic_sections_created)
4052
0
    {
4053
0
      if (bfd_link_executable (info) && !info->nointerp)
4054
0
  {
4055
0
    s = bfd_get_linker_section (dynobj, ".interp");
4056
0
    if (s == NULL)
4057
0
      abort ();
4058
0
    s->size = sizeof ELF_DYNAMIC_INTERPRETER;
4059
0
    s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
4060
0
  }
4061
0
    }
4062
4063
  /* Set up .got offsets for local syms, and space for local dynamic
4064
     relocs.  */
4065
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4066
0
    {
4067
0
      struct elf_kvx_local_symbol *locals = NULL;
4068
0
      Elf_Internal_Shdr *symtab_hdr;
4069
0
      asection *srel;
4070
0
      unsigned int i;
4071
4072
0
      if (!is_kvx_elf (ibfd))
4073
0
  continue;
4074
4075
0
      for (s = ibfd->sections; s != NULL; s = s->next)
4076
0
  {
4077
0
    struct elf_dyn_relocs *p;
4078
4079
0
    for (p = (struct elf_dyn_relocs *)
4080
0
     (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
4081
0
      {
4082
0
        if (!bfd_is_abs_section (p->sec)
4083
0
      && bfd_is_abs_section (p->sec->output_section))
4084
0
    {
4085
      /* Input section has been discarded, either because
4086
         it is a copy of a linkonce section or due to
4087
         linker script /DISCARD/, so we'll be discarding
4088
         the relocs too.  */
4089
0
    }
4090
0
        else if (p->count != 0)
4091
0
    {
4092
0
      srel = elf_section_data (p->sec)->sreloc;
4093
0
      srel->size += p->count * RELOC_SIZE (htab);
4094
0
      if ((p->sec->output_section->flags & SEC_READONLY) != 0)
4095
0
        info->flags |= DF_TEXTREL;
4096
0
    }
4097
0
      }
4098
0
  }
4099
4100
0
      locals = elf_kvx_locals (ibfd);
4101
0
      if (!locals)
4102
0
  continue;
4103
4104
0
      symtab_hdr = &elf_symtab_hdr (ibfd);
4105
0
      srel = htab->root.srelgot;
4106
0
      for (i = 0; i < symtab_hdr->sh_info; i++)
4107
0
  {
4108
0
    locals[i].got_offset = (bfd_vma) - 1;
4109
0
    if (locals[i].got_refcount > 0)
4110
0
      {
4111
0
        unsigned got_type = locals[i].got_type;
4112
0
        if (got_type & (GOT_TLS_GD | GOT_TLS_LD))
4113
0
    {
4114
0
      locals[i].got_offset = htab->root.sgot->size;
4115
0
      htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
4116
0
    }
4117
4118
0
        if (got_type & (GOT_NORMAL | GOT_TLS_IE ))
4119
0
    {
4120
0
      locals[i].got_offset = htab->root.sgot->size;
4121
0
      htab->root.sgot->size += GOT_ENTRY_SIZE;
4122
0
    }
4123
4124
0
        if (got_type == GOT_UNKNOWN)
4125
0
    {
4126
0
    }
4127
4128
0
        if (bfd_link_pic (info))
4129
0
    {
4130
0
      if (got_type & GOT_TLS_GD)
4131
0
        htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
4132
4133
0
      if (got_type & GOT_TLS_IE
4134
0
          || got_type & GOT_TLS_LD
4135
0
          || got_type & GOT_NORMAL)
4136
0
        htab->root.srelgot->size += RELOC_SIZE (htab);
4137
0
    }
4138
0
      }
4139
0
    else
4140
0
      {
4141
0
        locals[i].got_refcount = (bfd_vma) - 1;
4142
0
      }
4143
0
  }
4144
0
    }
4145
4146
4147
  /* Allocate global sym .plt and .got entries, and space for global
4148
     sym dynamic relocs.  */
4149
0
  elf_link_hash_traverse (&htab->root, elf32_kvx_allocate_dynrelocs,
4150
0
        info);
4151
4152
  /* For every jump slot reserved in the sgotplt, reloc_count is
4153
     incremented.  However, when we reserve space for TLS descriptors,
4154
     it's not incremented, so in order to compute the space reserved
4155
     for them, it suffices to multiply the reloc count by the jump
4156
     slot size.  */
4157
4158
0
  if (htab->root.srelplt)
4159
0
    htab->sgotplt_jump_table_size = kvx_compute_jump_table_size (htab);
4160
4161
  /* We now have determined the sizes of the various dynamic sections.
4162
     Allocate memory for them.  */
4163
0
  relocs = false;
4164
0
  for (s = dynobj->sections; s != NULL; s = s->next)
4165
0
    {
4166
0
      if ((s->flags & SEC_LINKER_CREATED) == 0)
4167
0
  continue;
4168
4169
0
      if (s == htab->root.splt
4170
0
    || s == htab->root.sgot
4171
0
    || s == htab->root.sgotplt
4172
0
    || s == htab->root.iplt
4173
0
    || s == htab->root.igotplt || s == htab->sdynbss)
4174
0
  {
4175
    /* Strip this section if we don't need it; see the
4176
       comment below.  */
4177
0
  }
4178
0
      else if (startswith (bfd_section_name (s), ".rela"))
4179
0
  {
4180
0
    if (s->size != 0 && s != htab->root.srelplt)
4181
0
      relocs = true;
4182
4183
    /* We use the reloc_count field as a counter if we need
4184
       to copy relocs into the output file.  */
4185
0
    if (s != htab->root.srelplt)
4186
0
      s->reloc_count = 0;
4187
0
  }
4188
0
      else
4189
0
  {
4190
    /* It's not one of our sections, so don't allocate space.  */
4191
0
    continue;
4192
0
  }
4193
4194
0
      if (s->size == 0)
4195
0
  {
4196
    /* If we don't need this section, strip it from the
4197
       output file.  This is mostly to handle .rela.bss and
4198
       .rela.plt.  We must create both sections in
4199
       create_dynamic_sections, because they must be created
4200
       before the linker maps input sections to output
4201
       sections.  The linker does that before
4202
       adjust_dynamic_symbol is called, and it is that
4203
       function which decides whether anything needs to go
4204
       into these sections.  */
4205
4206
0
    s->flags |= SEC_EXCLUDE;
4207
0
    continue;
4208
0
  }
4209
4210
0
      if ((s->flags & SEC_HAS_CONTENTS) == 0)
4211
0
  continue;
4212
4213
      /* Allocate memory for the section contents.  We use bfd_zalloc
4214
   here in case unused entries are not reclaimed before the
4215
   section's contents are written out.  This should not happen,
4216
   but this way if it does, we get a R_KVX_NONE reloc instead
4217
   of garbage.  */
4218
0
      s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
4219
0
      if (s->contents == NULL)
4220
0
  return false;
4221
0
    }
4222
4223
0
  if (htab->root.dynamic_sections_created)
4224
0
    {
4225
      /* Add some entries to the .dynamic section.  We fill in the
4226
   values later, in elf32_kvx_finish_dynamic_sections, but we
4227
   must add the entries now so that we get the correct size for
4228
   the .dynamic section.  The DT_DEBUG entry is filled in by the
4229
   dynamic linker and used by the debugger.  */
4230
0
#define add_dynamic_entry(TAG, VAL)     \
4231
0
      _bfd_elf_add_dynamic_entry (info, TAG, VAL)
4232
4233
0
      if (bfd_link_executable (info))
4234
0
  {
4235
0
    if (!add_dynamic_entry (DT_DEBUG, 0))
4236
0
      return false;
4237
0
  }
4238
4239
0
      if (htab->root.splt->size != 0)
4240
0
  {
4241
0
    if (!add_dynamic_entry (DT_PLTGOT, 0)
4242
0
        || !add_dynamic_entry (DT_PLTRELSZ, 0)
4243
0
        || !add_dynamic_entry (DT_PLTREL, DT_RELA)
4244
0
        || !add_dynamic_entry (DT_JMPREL, 0))
4245
0
      return false;
4246
0
  }
4247
4248
0
      if (relocs)
4249
0
  {
4250
0
    if (!add_dynamic_entry (DT_RELA, 0)
4251
0
        || !add_dynamic_entry (DT_RELASZ, 0)
4252
0
        || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
4253
0
      return false;
4254
4255
    /* If any dynamic relocs apply to a read-only section,
4256
       then we need a DT_TEXTREL entry.  */
4257
0
    if ((info->flags & DF_TEXTREL) == 0)
4258
0
      elf_link_hash_traverse (&htab->root, kvx_readonly_dynrelocs,
4259
0
            info);
4260
4261
0
    if ((info->flags & DF_TEXTREL) != 0)
4262
0
      {
4263
0
        if (!add_dynamic_entry (DT_TEXTREL, 0))
4264
0
    return false;
4265
0
      }
4266
0
  }
4267
0
    }
4268
0
#undef add_dynamic_entry
4269
4270
0
  return true;
4271
0
}
4272
4273
static inline void
4274
elf_kvx_update_plt_entry (bfd *output_bfd,
4275
        bfd_reloc_code_real_type r_type,
4276
        bfd_byte *plt_entry, bfd_vma value)
4277
0
{
4278
0
  reloc_howto_type *howto = elf32_kvx_howto_from_bfd_reloc (r_type);
4279
0
  BFD_ASSERT(howto != NULL);
4280
0
  _bfd_kvx_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
4281
0
}
4282
4283
static void
4284
elf32_kvx_create_small_pltn_entry (struct elf_link_hash_entry *h,
4285
           struct elf_kvx_link_hash_table *htab,
4286
           bfd *output_bfd)
4287
0
{
4288
0
  bfd_byte *plt_entry;
4289
0
  bfd_vma plt_index;
4290
0
  bfd_vma got_offset;
4291
0
  bfd_vma gotplt_entry_address;
4292
0
  bfd_vma plt_entry_address;
4293
0
  Elf_Internal_Rela rela;
4294
0
  bfd_byte *loc;
4295
0
  asection *plt, *gotplt, *relplt;
4296
4297
0
  plt = htab->root.splt;
4298
0
  gotplt = htab->root.sgotplt;
4299
0
  relplt = htab->root.srelplt;
4300
4301
  /* Get the index in the procedure linkage table which
4302
     corresponds to this symbol.  This is the index of this symbol
4303
     in all the symbols for which we are making plt entries.  The
4304
     first entry in the procedure linkage table is reserved.
4305
4306
     Get the offset into the .got table of the entry that
4307
     corresponds to this function.  Each .got entry is GOT_ENTRY_SIZE
4308
     bytes. The first three are reserved for the dynamic linker.
4309
4310
     For static executables, we don't reserve anything.  */
4311
4312
0
  if (plt == htab->root.splt)
4313
0
    {
4314
0
      plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
4315
0
      got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
4316
0
    }
4317
0
  else
4318
0
    {
4319
0
      plt_index = h->plt.offset / htab->plt_entry_size;
4320
0
      got_offset = plt_index * GOT_ENTRY_SIZE;
4321
0
    }
4322
4323
0
  plt_entry = plt->contents + h->plt.offset;
4324
0
  plt_entry_address = plt->output_section->vma
4325
0
    + plt->output_offset + h->plt.offset;
4326
0
  gotplt_entry_address = gotplt->output_section->vma +
4327
0
    gotplt->output_offset + got_offset;
4328
4329
  /* Copy in the boiler-plate for the PLTn entry.  */
4330
0
  memcpy (plt_entry, elf32_kvx_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
4331
4332
  /* Patch the loading of the GOT entry, relative to the PLT entry
4333
     address. */
4334
4335
  /* Use 37bits offset for both 32 and 64bits mode.
4336
     Fill the LO10 of of lw $r9 = 0[$r14].  */
4337
0
  elf_kvx_update_plt_entry(output_bfd, BFD_RELOC_KVX_S37_LO10,
4338
0
         plt_entry+4,
4339
0
         gotplt_entry_address - plt_entry_address);
4340
4341
  /* Fill the UP27 of of lw $r9 = 0[$r14].  */
4342
0
  elf_kvx_update_plt_entry(output_bfd, BFD_RELOC_KVX_S37_UP27,
4343
0
         plt_entry+8,
4344
0
         gotplt_entry_address - plt_entry_address);
4345
4346
0
  rela.r_offset = gotplt_entry_address;
4347
4348
  /* Fill in the entry in the .rela.plt section.  */
4349
0
  rela.r_info = ELF32_R_INFO (h->dynindx, R_KVX_JMP_SLOT);
4350
0
  rela.r_addend = 0;
4351
4352
  /* Compute the relocation entry to used based on PLT index and do
4353
     not adjust reloc_count. The reloc_count has already been adjusted
4354
     to account for this entry.  */
4355
0
  loc = relplt->contents + plt_index * RELOC_SIZE (htab);
4356
0
  bfd_elf32_swap_reloca_out (output_bfd, &rela, loc);
4357
0
}
4358
4359
/* Size sections even though they're not dynamic.  We use it to setup
4360
   _TLS_MODULE_BASE_, if needed.  */
4361
4362
static bool
4363
elf32_kvx_always_size_sections (bfd *output_bfd,
4364
        struct bfd_link_info *info)
4365
0
{
4366
0
  asection *tls_sec;
4367
4368
0
  if (bfd_link_relocatable (info))
4369
0
    return true;
4370
4371
0
  tls_sec = elf_hash_table (info)->tls_sec;
4372
4373
0
  if (tls_sec)
4374
0
    {
4375
0
      struct elf_link_hash_entry *tlsbase;
4376
4377
0
      tlsbase = elf_link_hash_lookup (elf_hash_table (info),
4378
0
              "_TLS_MODULE_BASE_", true, true, false);
4379
4380
0
      if (tlsbase)
4381
0
  {
4382
0
    struct bfd_link_hash_entry *h = NULL;
4383
0
    const struct elf_backend_data *bed =
4384
0
      get_elf_backend_data (output_bfd);
4385
4386
0
    if (!(_bfd_generic_link_add_one_symbol
4387
0
    (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
4388
0
     tls_sec, 0, NULL, false, bed->collect, &h)))
4389
0
      return false;
4390
4391
0
    tlsbase->type = STT_TLS;
4392
0
    tlsbase = (struct elf_link_hash_entry *) h;
4393
0
    tlsbase->def_regular = 1;
4394
0
    tlsbase->other = STV_HIDDEN;
4395
0
    (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
4396
0
  }
4397
0
    }
4398
4399
0
  return true;
4400
0
}
4401
4402
/* Finish up dynamic symbol handling.  We set the contents of various
4403
   dynamic sections here.  */
4404
static bool
4405
elf32_kvx_finish_dynamic_symbol (bfd *output_bfd,
4406
         struct bfd_link_info *info,
4407
         struct elf_link_hash_entry *h,
4408
         Elf_Internal_Sym *sym)
4409
0
{
4410
0
  struct elf_kvx_link_hash_table *htab;
4411
0
  htab = elf_kvx_hash_table (info);
4412
4413
0
  if (h->plt.offset != (bfd_vma) - 1)
4414
0
    {
4415
0
      asection *plt = NULL, *gotplt = NULL, *relplt = NULL;
4416
4417
      /* This symbol has an entry in the procedure linkage table.  Set
4418
   it up.  */
4419
4420
0
      if (htab->root.splt != NULL)
4421
0
  {
4422
0
    plt = htab->root.splt;
4423
0
    gotplt = htab->root.sgotplt;
4424
0
    relplt = htab->root.srelplt;
4425
0
  }
4426
4427
      /* This symbol has an entry in the procedure linkage table.  Set
4428
   it up.  */
4429
0
      if ((h->dynindx == -1
4430
0
     && !((h->forced_local || bfd_link_executable (info))
4431
0
    && h->def_regular
4432
0
    && h->type == STT_GNU_IFUNC))
4433
0
    || plt == NULL
4434
0
    || gotplt == NULL
4435
0
    || relplt == NULL)
4436
0
  abort ();
4437
4438
0
      elf32_kvx_create_small_pltn_entry (h, htab, output_bfd);
4439
0
      if (!h->def_regular)
4440
0
  {
4441
    /* Mark the symbol as undefined, rather than as defined in
4442
       the .plt section.  */
4443
0
    sym->st_shndx = SHN_UNDEF;
4444
    /* If the symbol is weak we need to clear the value.
4445
       Otherwise, the PLT entry would provide a definition for
4446
       the symbol even if the symbol wasn't defined anywhere,
4447
       and so the symbol would never be NULL.  Leave the value if
4448
       there were any relocations where pointer equality matters
4449
       (this is a clue for the dynamic linker, to make function
4450
       pointer comparisons work between an application and shared
4451
       library).  */
4452
0
    if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
4453
0
      sym->st_value = 0;
4454
0
  }
4455
0
    }
4456
4457
0
  if (h->got.offset != (bfd_vma) - 1
4458
0
      && elf_kvx_hash_entry (h)->got_type == GOT_NORMAL)
4459
0
    {
4460
0
      Elf_Internal_Rela rela;
4461
0
      bfd_byte *loc;
4462
4463
      /* This symbol has an entry in the global offset table.  Set it
4464
   up.  */
4465
0
      if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
4466
0
  abort ();
4467
4468
0
      rela.r_offset = (htab->root.sgot->output_section->vma
4469
0
           + htab->root.sgot->output_offset
4470
0
           + (h->got.offset & ~(bfd_vma) 1));
4471
4472
#ifdef UGLY_DEBUG
4473
      printf("setting rela at offset 0x%x(0x%x + 0x%x + 0x%x) for %s\n",
4474
       rela.r_offset,
4475
       htab->root.sgot->output_section->vma,
4476
       htab->root.sgot->output_offset,
4477
       h->got.offset,
4478
       h->root.root.string);
4479
#endif
4480
4481
0
      if (bfd_link_pic (info) && SYMBOL_REFERENCES_LOCAL (info, h))
4482
0
  {
4483
0
    if (!h->def_regular)
4484
0
      return false;
4485
4486
    /* in case of PLT related GOT entry, it is not clear who is
4487
       supposed to set the LSB of GOT entry...
4488
       kvx_calculate_got_entry_vma() would be a good candidate,
4489
       but it is not called currently
4490
       So we are commenting it ATM.  */
4491
    // BFD_ASSERT ((h->got.offset & 1) != 0);
4492
0
    rela.r_info = ELF32_R_INFO (0, R_KVX_RELATIVE);
4493
0
    rela.r_addend = (h->root.u.def.value
4494
0
         + h->root.u.def.section->output_section->vma
4495
0
         + h->root.u.def.section->output_offset);
4496
0
  }
4497
0
      else
4498
0
  {
4499
0
    BFD_ASSERT ((h->got.offset & 1) == 0);
4500
0
    bfd_put_32 (output_bfd, (bfd_vma) 0,
4501
0
          htab->root.sgot->contents + h->got.offset);
4502
0
    rela.r_info = ELF32_R_INFO (h->dynindx, R_KVX_GLOB_DAT);
4503
0
    rela.r_addend = 0;
4504
0
  }
4505
4506
0
      loc = htab->root.srelgot->contents;
4507
0
      loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
4508
0
      bfd_elf32_swap_reloca_out (output_bfd, &rela, loc);
4509
0
    }
4510
4511
0
  if (h->needs_copy)
4512
0
    {
4513
0
      Elf_Internal_Rela rela;
4514
0
      bfd_byte *loc;
4515
4516
      /* This symbol needs a copy reloc.  Set it up.  */
4517
4518
0
      if (h->dynindx == -1
4519
0
    || (h->root.type != bfd_link_hash_defined
4520
0
        && h->root.type != bfd_link_hash_defweak)
4521
0
    || htab->srelbss == NULL)
4522
0
  abort ();
4523
4524
0
      rela.r_offset = (h->root.u.def.value
4525
0
           + h->root.u.def.section->output_section->vma
4526
0
           + h->root.u.def.section->output_offset);
4527
0
      rela.r_info = ELF32_R_INFO (h->dynindx, R_KVX_COPY);
4528
0
      rela.r_addend = 0;
4529
0
      loc = htab->srelbss->contents;
4530
0
      loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
4531
0
      bfd_elf32_swap_reloca_out (output_bfd, &rela, loc);
4532
0
    }
4533
4534
  /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute.  SYM may
4535
     be NULL for local symbols.  */
4536
0
  if (sym != NULL
4537
0
      && (h == elf_hash_table (info)->hdynamic
4538
0
    || h == elf_hash_table (info)->hgot))
4539
0
    sym->st_shndx = SHN_ABS;
4540
4541
0
  return true;
4542
0
}
4543
4544
static void
4545
elf32_kvx_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
4546
         struct elf_kvx_link_hash_table *htab)
4547
0
{
4548
0
  memcpy (htab->root.splt->contents, elf32_kvx_small_plt0_entry,
4549
0
    PLT_ENTRY_SIZE);
4550
0
  elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
4551
0
    PLT_ENTRY_SIZE;
4552
0
}
4553
4554
static bool
4555
elf32_kvx_finish_dynamic_sections (bfd *output_bfd,
4556
           struct bfd_link_info *info)
4557
0
{
4558
0
  struct elf_kvx_link_hash_table *htab;
4559
0
  bfd *dynobj;
4560
0
  asection *sdyn;
4561
4562
0
  htab = elf_kvx_hash_table (info);
4563
0
  dynobj = htab->root.dynobj;
4564
0
  sdyn = bfd_get_linker_section (dynobj, ".dynamic");
4565
4566
0
  if (htab->root.dynamic_sections_created)
4567
0
    {
4568
0
      Elf32_External_Dyn *dyncon, *dynconend;
4569
4570
0
      if (sdyn == NULL || htab->root.sgot == NULL)
4571
0
  abort ();
4572
4573
0
      dyncon = (Elf32_External_Dyn *) sdyn->contents;
4574
0
      dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
4575
0
      for (; dyncon < dynconend; dyncon++)
4576
0
  {
4577
0
    Elf_Internal_Dyn dyn;
4578
0
    asection *s;
4579
4580
0
    bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
4581
4582
0
    switch (dyn.d_tag)
4583
0
      {
4584
0
      default:
4585
0
        continue;
4586
4587
0
      case DT_PLTGOT:
4588
0
        s = htab->root.sgotplt;
4589
0
        dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
4590
0
        break;
4591
4592
0
      case DT_JMPREL:
4593
0
        s = htab->root.srelplt;
4594
0
        dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
4595
0
        break;
4596
4597
0
      case DT_PLTRELSZ:
4598
0
        s = htab->root.srelplt;
4599
0
        dyn.d_un.d_val = s->size;
4600
0
        break;
4601
4602
0
      case DT_RELASZ:
4603
        /* The procedure linkage table relocs (DT_JMPREL) should
4604
     not be included in the overall relocs (DT_RELA).
4605
     Therefore, we override the DT_RELASZ entry here to
4606
     make it not include the JMPREL relocs.  Since the
4607
     linker script arranges for .rela.plt to follow all
4608
     other relocation sections, we don't have to worry
4609
     about changing the DT_RELA entry.  */
4610
0
        if (htab->root.srelplt != NULL)
4611
0
    {
4612
0
      s = htab->root.srelplt;
4613
0
      dyn.d_un.d_val -= s->size;
4614
0
    }
4615
0
        break;
4616
0
      }
4617
4618
0
    bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
4619
0
  }
4620
4621
0
    }
4622
4623
  /* Fill in the special first entry in the procedure linkage table.  */
4624
0
  if (htab->root.splt && htab->root.splt->size > 0)
4625
0
    {
4626
0
      elf32_kvx_init_small_plt0_entry (output_bfd, htab);
4627
4628
0
      elf_section_data (htab->root.splt->output_section)->
4629
0
  this_hdr.sh_entsize = htab->plt_entry_size;
4630
0
    }
4631
4632
0
  if (htab->root.sgotplt)
4633
0
    {
4634
0
      if (bfd_is_abs_section (htab->root.sgotplt->output_section))
4635
0
  {
4636
0
    (*_bfd_error_handler)
4637
0
      (_("discarded output section: `%pA'"), htab->root.sgotplt);
4638
0
    return false;
4639
0
  }
4640
4641
      /* Fill in the first three entries in the global offset table.  */
4642
0
      if (htab->root.sgotplt->size > 0)
4643
0
  {
4644
0
    bfd_put_32 (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
4645
4646
    /* Write GOT[1] and GOT[2], needed for the dynamic linker.  */
4647
0
    bfd_put_32 (output_bfd,
4648
0
          (bfd_vma) 0,
4649
0
          htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
4650
0
    bfd_put_32 (output_bfd,
4651
0
          (bfd_vma) 0,
4652
0
          htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
4653
0
  }
4654
4655
0
      if (htab->root.sgot)
4656
0
  {
4657
0
    if (htab->root.sgot->size > 0)
4658
0
      {
4659
0
        bfd_vma addr =
4660
0
    sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
4661
0
        bfd_put_32 (output_bfd, addr, htab->root.sgot->contents);
4662
0
      }
4663
0
  }
4664
4665
0
      elf_section_data (htab->root.sgotplt->output_section)->
4666
0
  this_hdr.sh_entsize = GOT_ENTRY_SIZE;
4667
0
    }
4668
4669
0
  if (htab->root.sgot && htab->root.sgot->size > 0)
4670
0
    elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
4671
0
      = GOT_ENTRY_SIZE;
4672
4673
0
  return true;
4674
0
}
4675
4676
/* Return address for Ith PLT stub in section PLT, for relocation REL
4677
   or (bfd_vma) -1 if it should not be included.  */
4678
4679
static bfd_vma
4680
elf32_kvx_plt_sym_val (bfd_vma i, const asection *plt,
4681
           const arelent *rel ATTRIBUTE_UNUSED)
4682
0
{
4683
0
  return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
4684
0
}
4685
4686
#define ELF_ARCH      bfd_arch_kvx
4687
#define ELF_MACHINE_CODE    EM_KVX
4688
#define ELF_MAXPAGESIZE     0x10000
4689
#define ELF_MINPAGESIZE     0x1000
4690
#define ELF_COMMONPAGESIZE    0x1000
4691
4692
#define bfd_elf32_bfd_link_hash_table_create    \
4693
  elf32_kvx_link_hash_table_create
4694
4695
#define bfd_elf32_bfd_merge_private_bfd_data  \
4696
  elf32_kvx_merge_private_bfd_data
4697
4698
#define bfd_elf32_bfd_print_private_bfd_data  \
4699
  elf32_kvx_print_private_bfd_data
4700
4701
#define bfd_elf32_bfd_reloc_type_lookup   \
4702
  elf32_kvx_reloc_type_lookup
4703
4704
#define bfd_elf32_bfd_reloc_name_lookup   \
4705
  elf32_kvx_reloc_name_lookup
4706
4707
#define bfd_elf32_bfd_set_private_flags   \
4708
  elf32_kvx_set_private_flags
4709
4710
#define bfd_elf32_mkobject      \
4711
  elf32_kvx_mkobject
4712
4713
#define bfd_elf32_new_section_hook    \
4714
  elf32_kvx_new_section_hook
4715
4716
#define elf_backend_adjust_dynamic_symbol \
4717
  elf32_kvx_adjust_dynamic_symbol
4718
4719
#define elf_backend_always_size_sections  \
4720
  elf32_kvx_always_size_sections
4721
4722
#define elf_backend_check_relocs    \
4723
  elf32_kvx_check_relocs
4724
4725
#define elf_backend_copy_indirect_symbol  \
4726
  elf32_kvx_copy_indirect_symbol
4727
4728
/* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
4729
   to them in our hash.  */
4730
#define elf_backend_create_dynamic_sections \
4731
  elf32_kvx_create_dynamic_sections
4732
4733
#define elf_backend_init_index_section    \
4734
  _bfd_elf_init_2_index_sections
4735
4736
#define elf_backend_finish_dynamic_sections \
4737
  elf32_kvx_finish_dynamic_sections
4738
4739
#define elf_backend_finish_dynamic_symbol \
4740
  elf32_kvx_finish_dynamic_symbol
4741
4742
#define elf_backend_object_p      \
4743
  elf32_kvx_object_p
4744
4745
#define elf_backend_output_arch_local_syms      \
4746
  elf32_kvx_output_arch_local_syms
4747
4748
#define elf_backend_plt_sym_val     \
4749
  elf32_kvx_plt_sym_val
4750
4751
#define elf_backend_init_file_header    \
4752
  elf32_kvx_init_file_header
4753
4754
#define elf_backend_init_process_headers  \
4755
  elf32_kvx_init_process_headers
4756
4757
#define elf_backend_relocate_section    \
4758
  elf32_kvx_relocate_section
4759
4760
#define elf_backend_reloc_type_class    \
4761
  elf32_kvx_reloc_type_class
4762
4763
#define elf_backend_size_dynamic_sections \
4764
  elf32_kvx_size_dynamic_sections
4765
4766
#define elf_backend_can_refcount       1
4767
#define elf_backend_can_gc_sections    1
4768
#define elf_backend_plt_readonly       1
4769
#define elf_backend_want_got_plt       1
4770
#define elf_backend_want_plt_sym       0
4771
#define elf_backend_may_use_rel_p      0
4772
#define elf_backend_may_use_rela_p     1
4773
#define elf_backend_default_use_rela_p 1
4774
#define elf_backend_rela_normal        1
4775
#define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
4776
#define elf_backend_default_execstack  0
4777
#define elf_backend_extern_protected_data 1
4778
#define elf_backend_hash_symbol elf_kvx_hash_symbol
4779
4780
#include "elf32-target.h"