Coverage Report

Created: 2023-08-28 06:28

/src/binutils-gdb/bfd/elf32-spu.c
Line
Count
Source (jump to first uncovered line)
1
/* SPU specific support for 32-bit ELF
2
3
   Copyright (C) 2006-2023 Free Software Foundation, Inc.
4
5
   This file is part of BFD, the Binary File Descriptor library.
6
7
   This program is free software; you can redistribute it and/or modify
8
   it under the terms of the GNU General Public License as published by
9
   the Free Software Foundation; either version 3 of the License, or
10
   (at your option) any later version.
11
12
   This program is distributed in the hope that it will be useful,
13
   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
   GNU General Public License for more details.
16
17
   You should have received a copy of the GNU General Public License along
18
   with this program; if not, write to the Free Software Foundation, Inc.,
19
   51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
20
21
#include "sysdep.h"
22
#include "libiberty.h"
23
#include "bfd.h"
24
#include "bfdlink.h"
25
#include "libbfd.h"
26
#include "elf-bfd.h"
27
#include "elf/spu.h"
28
#include "elf32-spu.h"
29
30
/* All users of this file have bfd_octets_per_byte (abfd, sec) == 1.  */
31
0
#define OCTETS_PER_BYTE(ABFD, SEC) 1
32
33
/* We use RELA style relocs.  Don't define USE_REL.  */
34
35
static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
36
             void *, asection *,
37
             bfd *, char **);
38
39
/* Values of type 'enum elf_spu_reloc_type' are used to index this
40
   array, so it must be declared in the order of that type.  */
41
42
static reloc_howto_type elf_howto_table[] = {
43
  HOWTO (R_SPU_NONE,     0, 0,  0, false,  0, complain_overflow_dont,
44
   bfd_elf_generic_reloc, "SPU_NONE",
45
   false, 0, 0x00000000, false),
46
  HOWTO (R_SPU_ADDR10,     4, 4, 10, false, 14, complain_overflow_bitfield,
47
   bfd_elf_generic_reloc, "SPU_ADDR10",
48
   false, 0, 0x00ffc000, false),
49
  HOWTO (R_SPU_ADDR16,     2, 4, 16, false,  7, complain_overflow_bitfield,
50
   bfd_elf_generic_reloc, "SPU_ADDR16",
51
   false, 0, 0x007fff80, false),
52
  HOWTO (R_SPU_ADDR16_HI, 16, 4, 16, false,  7, complain_overflow_bitfield,
53
   bfd_elf_generic_reloc, "SPU_ADDR16_HI",
54
   false, 0, 0x007fff80, false),
55
  HOWTO (R_SPU_ADDR16_LO,  0, 4, 16, false,  7, complain_overflow_dont,
56
   bfd_elf_generic_reloc, "SPU_ADDR16_LO",
57
   false, 0, 0x007fff80, false),
58
  HOWTO (R_SPU_ADDR18,     0, 4, 18, false,  7, complain_overflow_bitfield,
59
   bfd_elf_generic_reloc, "SPU_ADDR18",
60
   false, 0, 0x01ffff80, false),
61
  HOWTO (R_SPU_ADDR32,     0, 4, 32, false,  0, complain_overflow_dont,
62
   bfd_elf_generic_reloc, "SPU_ADDR32",
63
   false, 0, 0xffffffff, false),
64
  HOWTO (R_SPU_REL16,    2, 4, 16,  true,  7, complain_overflow_bitfield,
65
   bfd_elf_generic_reloc, "SPU_REL16",
66
   false, 0, 0x007fff80, true),
67
  HOWTO (R_SPU_ADDR7,    0, 4,  7, false, 14, complain_overflow_dont,
68
   bfd_elf_generic_reloc, "SPU_ADDR7",
69
   false, 0, 0x001fc000, false),
70
  HOWTO (R_SPU_REL9,     2, 4,  9,  true,  0, complain_overflow_signed,
71
   spu_elf_rel9,    "SPU_REL9",
72
   false, 0, 0x0180007f, true),
73
  HOWTO (R_SPU_REL9I,    2, 4,  9,  true,  0, complain_overflow_signed,
74
   spu_elf_rel9,    "SPU_REL9I",
75
   false, 0, 0x0000c07f, true),
76
  HOWTO (R_SPU_ADDR10I,    0, 4, 10, false, 14, complain_overflow_signed,
77
   bfd_elf_generic_reloc, "SPU_ADDR10I",
78
   false, 0, 0x00ffc000, false),
79
  HOWTO (R_SPU_ADDR16I,    0, 4, 16, false,  7, complain_overflow_signed,
80
   bfd_elf_generic_reloc, "SPU_ADDR16I",
81
   false, 0, 0x007fff80, false),
82
  HOWTO (R_SPU_REL32,    0, 4, 32, true,  0, complain_overflow_dont,
83
   bfd_elf_generic_reloc, "SPU_REL32",
84
   false, 0, 0xffffffff, true),
85
  HOWTO (R_SPU_ADDR16X,    0, 4, 16, false,  7, complain_overflow_bitfield,
86
   bfd_elf_generic_reloc, "SPU_ADDR16X",
87
   false, 0, 0x007fff80, false),
88
  HOWTO (R_SPU_PPU32,    0, 4, 32, false,  0, complain_overflow_dont,
89
   bfd_elf_generic_reloc, "SPU_PPU32",
90
   false, 0, 0xffffffff, false),
91
  HOWTO (R_SPU_PPU64,    0, 8, 64, false,  0, complain_overflow_dont,
92
   bfd_elf_generic_reloc, "SPU_PPU64",
93
   false, 0, -1, false),
94
  HOWTO (R_SPU_ADD_PIC,    0, 0,  0, false,  0, complain_overflow_dont,
95
   bfd_elf_generic_reloc, "SPU_ADD_PIC",
96
   false, 0, 0x00000000, false),
97
};
98
99
static struct bfd_elf_special_section const spu_elf_special_sections[] = {
100
  { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
101
  { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
102
  { NULL, 0, 0, 0, 0 }
103
};
104
105
static enum elf_spu_reloc_type
106
spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
107
0
{
108
0
  switch (code)
109
0
    {
110
0
    default:
111
0
      return (enum elf_spu_reloc_type) -1;
112
0
    case BFD_RELOC_NONE:
113
0
      return R_SPU_NONE;
114
0
    case BFD_RELOC_SPU_IMM10W:
115
0
      return R_SPU_ADDR10;
116
0
    case BFD_RELOC_SPU_IMM16W:
117
0
      return R_SPU_ADDR16;
118
0
    case BFD_RELOC_SPU_LO16:
119
0
      return R_SPU_ADDR16_LO;
120
0
    case BFD_RELOC_SPU_HI16:
121
0
      return R_SPU_ADDR16_HI;
122
0
    case BFD_RELOC_SPU_IMM18:
123
0
      return R_SPU_ADDR18;
124
0
    case BFD_RELOC_SPU_PCREL16:
125
0
      return R_SPU_REL16;
126
0
    case BFD_RELOC_SPU_IMM7:
127
0
      return R_SPU_ADDR7;
128
0
    case BFD_RELOC_SPU_IMM8:
129
0
      return R_SPU_NONE;
130
0
    case BFD_RELOC_SPU_PCREL9a:
131
0
      return R_SPU_REL9;
132
0
    case BFD_RELOC_SPU_PCREL9b:
133
0
      return R_SPU_REL9I;
134
0
    case BFD_RELOC_SPU_IMM10:
135
0
      return R_SPU_ADDR10I;
136
0
    case BFD_RELOC_SPU_IMM16:
137
0
      return R_SPU_ADDR16I;
138
0
    case BFD_RELOC_32:
139
0
      return R_SPU_ADDR32;
140
0
    case BFD_RELOC_32_PCREL:
141
0
      return R_SPU_REL32;
142
0
    case BFD_RELOC_SPU_PPU32:
143
0
      return R_SPU_PPU32;
144
0
    case BFD_RELOC_SPU_PPU64:
145
0
      return R_SPU_PPU64;
146
0
    case BFD_RELOC_SPU_ADD_PIC:
147
0
      return R_SPU_ADD_PIC;
148
0
    }
149
0
}
150
151
static bool
152
spu_elf_info_to_howto (bfd *abfd,
153
           arelent *cache_ptr,
154
           Elf_Internal_Rela *dst)
155
0
{
156
0
  enum elf_spu_reloc_type r_type;
157
158
0
  r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
159
  /* PR 17512: file: 90c2a92e.  */
160
0
  if (r_type >= R_SPU_max)
161
0
    {
162
      /* xgettext:c-format */
163
0
      _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
164
0
        abfd, r_type);
165
0
      bfd_set_error (bfd_error_bad_value);
166
0
      return false;
167
0
    }
168
0
  cache_ptr->howto = &elf_howto_table[(int) r_type];
169
0
  return true;
170
0
}
171
172
static reloc_howto_type *
173
spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
174
         bfd_reloc_code_real_type code)
175
0
{
176
0
  enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
177
178
0
  if (r_type == (enum elf_spu_reloc_type) -1)
179
0
    return NULL;
180
181
0
  return elf_howto_table + r_type;
182
0
}
183
184
static reloc_howto_type *
185
spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
186
         const char *r_name)
187
0
{
188
0
  unsigned int i;
189
190
0
  for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
191
0
    if (elf_howto_table[i].name != NULL
192
0
  && strcasecmp (elf_howto_table[i].name, r_name) == 0)
193
0
      return &elf_howto_table[i];
194
195
0
  return NULL;
196
0
}
197
198
/* Apply R_SPU_REL9 and R_SPU_REL9I relocs.  */
199
200
static bfd_reloc_status_type
201
spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
202
        void *data, asection *input_section,
203
        bfd *output_bfd, char **error_message)
204
0
{
205
0
  bfd_size_type octets;
206
0
  bfd_vma val;
207
0
  long insn;
208
209
  /* If this is a relocatable link (output_bfd test tells us), just
210
     call the generic function.  Any adjustment will be done at final
211
     link time.  */
212
0
  if (output_bfd != NULL)
213
0
    return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
214
0
          input_section, output_bfd, error_message);
215
216
0
  if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
217
0
    return bfd_reloc_outofrange;
218
0
  octets = reloc_entry->address * OCTETS_PER_BYTE (abfd, input_section);
219
220
  /* Get symbol value.  */
221
0
  val = 0;
222
0
  if (!bfd_is_com_section (symbol->section))
223
0
    val = symbol->value;
224
0
  if (symbol->section->output_section)
225
0
    val += symbol->section->output_section->vma;
226
227
0
  val += reloc_entry->addend;
228
229
  /* Make it pc-relative.  */
230
0
  val -= input_section->output_section->vma + input_section->output_offset;
231
232
0
  val >>= 2;
233
0
  if (val + 256 >= 512)
234
0
    return bfd_reloc_overflow;
235
236
0
  insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
237
238
  /* Move two high bits of value to REL9I and REL9 position.
239
     The mask will take care of selecting the right field.  */
240
0
  val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
241
0
  insn &= ~reloc_entry->howto->dst_mask;
242
0
  insn |= val & reloc_entry->howto->dst_mask;
243
0
  bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
244
0
  return bfd_reloc_ok;
245
0
}
246
247
static bool
248
spu_elf_new_section_hook (bfd *abfd, asection *sec)
249
0
{
250
0
  if (!sec->used_by_bfd)
251
0
    {
252
0
      struct _spu_elf_section_data *sdata;
253
254
0
      sdata = bfd_zalloc (abfd, sizeof (*sdata));
255
0
      if (sdata == NULL)
256
0
  return false;
257
0
      sec->used_by_bfd = sdata;
258
0
    }
259
260
0
  return _bfd_elf_new_section_hook (abfd, sec);
261
0
}
262
263
/* Set up overlay info for executables.  */
264
265
static bool
266
spu_elf_object_p (bfd *abfd)
267
0
{
268
0
  if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
269
0
    {
270
0
      unsigned int i, num_ovl, num_buf;
271
0
      Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
272
0
      Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
273
0
      Elf_Internal_Phdr *last_phdr = NULL;
274
275
0
      for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
276
0
  if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
277
0
    {
278
0
      unsigned int j;
279
280
0
      ++num_ovl;
281
0
      if (last_phdr == NULL
282
0
    || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
283
0
        ++num_buf;
284
0
      last_phdr = phdr;
285
0
      for (j = 1; j < elf_numsections (abfd); j++)
286
0
        {
287
0
    Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
288
289
0
    if (shdr->bfd_section != NULL
290
0
        && ELF_SECTION_SIZE (shdr, phdr) != 0
291
0
        && ELF_SECTION_IN_SEGMENT (shdr, phdr))
292
0
      {
293
0
        asection *sec = shdr->bfd_section;
294
0
        spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
295
0
        spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
296
0
      }
297
0
        }
298
0
    }
299
0
    }
300
0
  return true;
301
0
}
302
303
/* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
304
   strip --strip-unneeded will not remove them.  */
305
306
static void
307
spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
308
0
{
309
0
  if (sym->name != NULL
310
0
      && sym->section != bfd_abs_section_ptr
311
0
      && startswith (sym->name, "_EAR_"))
312
0
    sym->flags |= BSF_KEEP;
313
0
}
314
315
/* SPU ELF linker hash table.  */
316
317
struct spu_link_hash_table
318
{
319
  struct elf_link_hash_table elf;
320
321
  struct spu_elf_params *params;
322
323
  /* Shortcuts to overlay sections.  */
324
  asection *ovtab;
325
  asection *init;
326
  asection *toe;
327
  asection **ovl_sec;
328
329
  /* Count of stubs in each overlay section.  */
330
  unsigned int *stub_count;
331
332
  /* The stub section for each overlay section.  */
333
  asection **stub_sec;
334
335
  struct elf_link_hash_entry *ovly_entry[2];
336
337
  /* Number of overlay buffers.  */
338
  unsigned int num_buf;
339
340
  /* Total number of overlays.  */
341
  unsigned int num_overlays;
342
343
  /* For soft icache.  */
344
  unsigned int line_size_log2;
345
  unsigned int num_lines_log2;
346
  unsigned int fromelem_size_log2;
347
348
  /* How much memory we have.  */
349
  unsigned int local_store;
350
351
  /* Count of overlay stubs needed in non-overlay area.  */
352
  unsigned int non_ovly_stub;
353
354
  /* Pointer to the fixup section */
355
  asection *sfixup;
356
357
  /* Set on error.  */
358
  unsigned int stub_err : 1;
359
};
360
361
/* Hijack the generic got fields for overlay stub accounting.  */
362
363
struct got_entry
364
{
365
  struct got_entry *next;
366
  unsigned int ovl;
367
  union {
368
    bfd_vma addend;
369
    bfd_vma br_addr;
370
  };
371
  bfd_vma stub_addr;
372
};
373
374
#define spu_hash_table(p) \
375
0
  ((is_elf_hash_table ((p)->hash)          \
376
0
    && elf_hash_table_id (elf_hash_table (p)) == SPU_ELF_DATA)   \
377
0
   ? (struct spu_link_hash_table *) (p)->hash : NULL)
378
379
struct call_info
380
{
381
  struct function_info *fun;
382
  struct call_info *next;
383
  unsigned int count;
384
  unsigned int max_depth;
385
  unsigned int is_tail : 1;
386
  unsigned int is_pasted : 1;
387
  unsigned int broken_cycle : 1;
388
  unsigned int priority : 13;
389
};
390
391
struct function_info
392
{
393
  /* List of functions called.  Also branches to hot/cold part of
394
     function.  */
395
  struct call_info *call_list;
396
  /* For hot/cold part of function, point to owner.  */
397
  struct function_info *start;
398
  /* Symbol at start of function.  */
399
  union {
400
    Elf_Internal_Sym *sym;
401
    struct elf_link_hash_entry *h;
402
  } u;
403
  /* Function section.  */
404
  asection *sec;
405
  asection *rodata;
406
  /* Where last called from, and number of sections called from.  */
407
  asection *last_caller;
408
  unsigned int call_count;
409
  /* Address range of (this part of) function.  */
410
  bfd_vma lo, hi;
411
  /* Offset where we found a store of lr, or -1 if none found.  */
412
  bfd_vma lr_store;
413
  /* Offset where we found the stack adjustment insn.  */
414
  bfd_vma sp_adjust;
415
  /* Stack usage.  */
416
  int stack;
417
  /* Distance from root of call tree.  Tail and hot/cold branches
418
     count as one deeper.  We aren't counting stack frames here.  */
419
  unsigned int depth;
420
  /* Set if global symbol.  */
421
  unsigned int global : 1;
422
  /* Set if known to be start of function (as distinct from a hunk
423
     in hot/cold section.  */
424
  unsigned int is_func : 1;
425
  /* Set if not a root node.  */
426
  unsigned int non_root : 1;
427
  /* Flags used during call tree traversal.  It's cheaper to replicate
428
     the visit flags than have one which needs clearing after a traversal.  */
429
  unsigned int visit1 : 1;
430
  unsigned int visit2 : 1;
431
  unsigned int marking : 1;
432
  unsigned int visit3 : 1;
433
  unsigned int visit4 : 1;
434
  unsigned int visit5 : 1;
435
  unsigned int visit6 : 1;
436
  unsigned int visit7 : 1;
437
};
438
439
struct spu_elf_stack_info
440
{
441
  int num_fun;
442
  int max_fun;
443
  /* Variable size array describing functions, one per contiguous
444
     address range belonging to a function.  */
445
  struct function_info fun[1];
446
};
447
448
static struct function_info *find_function (asection *, bfd_vma,
449
              struct bfd_link_info *);
450
451
/* Create a spu ELF linker hash table.  */
452
453
static struct bfd_link_hash_table *
454
spu_elf_link_hash_table_create (bfd *abfd)
455
0
{
456
0
  struct spu_link_hash_table *htab;
457
458
0
  htab = bfd_zmalloc (sizeof (*htab));
459
0
  if (htab == NULL)
460
0
    return NULL;
461
462
0
  if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
463
0
              _bfd_elf_link_hash_newfunc,
464
0
              sizeof (struct elf_link_hash_entry),
465
0
              SPU_ELF_DATA))
466
0
    {
467
0
      free (htab);
468
0
      return NULL;
469
0
    }
470
471
0
  htab->elf.init_got_refcount.refcount = 0;
472
0
  htab->elf.init_got_refcount.glist = NULL;
473
0
  htab->elf.init_got_offset.offset = 0;
474
0
  htab->elf.init_got_offset.glist = NULL;
475
0
  return &htab->elf.root;
476
0
}
477
478
void
479
spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
480
0
{
481
0
  bfd_vma max_branch_log2;
482
483
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
484
0
  htab->params = params;
485
0
  htab->line_size_log2 = bfd_log2 (htab->params->line_size);
486
0
  htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
487
488
  /* For the software i-cache, we provide a "from" list whose size
489
     is a power-of-two number of quadwords, big enough to hold one
490
     byte per outgoing branch.  Compute this number here.  */
491
0
  max_branch_log2 = bfd_log2 (htab->params->max_branch);
492
0
  htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
493
0
}
494
495
/* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
496
   to (hash, NULL) for global symbols, and (NULL, sym) for locals.  Set
497
   *SYMSECP to the symbol's section.  *LOCSYMSP caches local syms.  */
498
499
static bool
500
get_sym_h (struct elf_link_hash_entry **hp,
501
     Elf_Internal_Sym **symp,
502
     asection **symsecp,
503
     Elf_Internal_Sym **locsymsp,
504
     unsigned long r_symndx,
505
     bfd *ibfd)
506
0
{
507
0
  Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
508
509
0
  if (r_symndx >= symtab_hdr->sh_info)
510
0
    {
511
0
      struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
512
0
      struct elf_link_hash_entry *h;
513
514
0
      h = sym_hashes[r_symndx - symtab_hdr->sh_info];
515
0
      while (h->root.type == bfd_link_hash_indirect
516
0
       || h->root.type == bfd_link_hash_warning)
517
0
  h = (struct elf_link_hash_entry *) h->root.u.i.link;
518
519
0
      if (hp != NULL)
520
0
  *hp = h;
521
522
0
      if (symp != NULL)
523
0
  *symp = NULL;
524
525
0
      if (symsecp != NULL)
526
0
  {
527
0
    asection *symsec = NULL;
528
0
    if (h->root.type == bfd_link_hash_defined
529
0
        || h->root.type == bfd_link_hash_defweak)
530
0
      symsec = h->root.u.def.section;
531
0
    *symsecp = symsec;
532
0
  }
533
0
    }
534
0
  else
535
0
    {
536
0
      Elf_Internal_Sym *sym;
537
0
      Elf_Internal_Sym *locsyms = *locsymsp;
538
539
0
      if (locsyms == NULL)
540
0
  {
541
0
    locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
542
0
    if (locsyms == NULL)
543
0
      locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
544
0
              symtab_hdr->sh_info,
545
0
              0, NULL, NULL, NULL);
546
0
    if (locsyms == NULL)
547
0
      return false;
548
0
    *locsymsp = locsyms;
549
0
  }
550
0
      sym = locsyms + r_symndx;
551
552
0
      if (hp != NULL)
553
0
  *hp = NULL;
554
555
0
      if (symp != NULL)
556
0
  *symp = sym;
557
558
0
      if (symsecp != NULL)
559
0
  *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
560
0
    }
561
562
0
  return true;
563
0
}
564
565
/* Create the note section if not already present.  This is done early so
566
   that the linker maps the sections to the right place in the output.  */
567
568
bool
569
spu_elf_create_sections (struct bfd_link_info *info)
570
0
{
571
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
572
0
  bfd *ibfd;
573
574
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
575
0
    if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
576
0
      break;
577
578
0
  if (ibfd == NULL)
579
0
    {
580
      /* Make SPU_PTNOTE_SPUNAME section.  */
581
0
      asection *s;
582
0
      size_t name_len;
583
0
      size_t size;
584
0
      bfd_byte *data;
585
0
      flagword flags;
586
587
0
      ibfd = info->input_bfds;
588
      /* This should really be SEC_LINKER_CREATED, but then we'd need
589
   to write out the section ourselves.  */
590
0
      flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
591
0
      s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
592
0
      if (s == NULL
593
0
    || !bfd_set_section_alignment (s, 4))
594
0
  return false;
595
      /* Because we didn't set SEC_LINKER_CREATED we need to set the
596
   proper section type.  */
597
0
      elf_section_type (s) = SHT_NOTE;
598
599
0
      name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
600
0
      size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
601
0
      size += (name_len + 3) & -4;
602
603
0
      if (!bfd_set_section_size (s, size))
604
0
  return false;
605
606
0
      data = bfd_zalloc (ibfd, size);
607
0
      if (data == NULL)
608
0
  return false;
609
610
0
      bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
611
0
      bfd_put_32 (ibfd, name_len, data + 4);
612
0
      bfd_put_32 (ibfd, 1, data + 8);
613
0
      memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
614
0
      memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
615
0
        bfd_get_filename (info->output_bfd), name_len);
616
0
      s->contents = data;
617
0
    }
618
619
0
  if (htab->params->emit_fixups)
620
0
    {
621
0
      asection *s;
622
0
      flagword flags;
623
624
0
      if (htab->elf.dynobj == NULL)
625
0
  htab->elf.dynobj = ibfd;
626
0
      ibfd = htab->elf.dynobj;
627
0
      flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
628
0
         | SEC_IN_MEMORY | SEC_LINKER_CREATED);
629
0
      s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
630
0
      if (s == NULL || !bfd_set_section_alignment (s, 2))
631
0
  return false;
632
0
      htab->sfixup = s;
633
0
    }
634
635
0
  return true;
636
0
}
637
638
/* qsort predicate to sort sections by vma.  */
639
640
static int
641
sort_sections (const void *a, const void *b)
642
0
{
643
0
  const asection *const *s1 = a;
644
0
  const asection *const *s2 = b;
645
0
  bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
646
647
0
  if (delta != 0)
648
0
    return delta < 0 ? -1 : 1;
649
650
0
  return (*s1)->index - (*s2)->index;
651
0
}
652
653
/* Identify overlays in the output bfd, and number them.
654
   Returns 0 on error, 1 if no overlays, 2 if overlays.  */
655
656
int
657
spu_elf_find_overlays (struct bfd_link_info *info)
658
0
{
659
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
660
0
  asection **alloc_sec;
661
0
  unsigned int i, n, ovl_index, num_buf;
662
0
  asection *s;
663
0
  bfd_vma ovl_end;
664
0
  static const char *const entry_names[2][2] = {
665
0
    { "__ovly_load", "__icache_br_handler" },
666
0
    { "__ovly_return", "__icache_call_handler" }
667
0
  };
668
669
0
  if (info->output_bfd->section_count < 2)
670
0
    return 1;
671
672
0
  alloc_sec
673
0
    = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
674
0
  if (alloc_sec == NULL)
675
0
    return 0;
676
677
  /* Pick out all the alloced sections.  */
678
0
  for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
679
0
    if ((s->flags & SEC_ALLOC) != 0
680
0
  && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
681
0
  && s->size != 0)
682
0
      alloc_sec[n++] = s;
683
684
0
  if (n == 0)
685
0
    {
686
0
      free (alloc_sec);
687
0
      return 1;
688
0
    }
689
690
  /* Sort them by vma.  */
691
0
  qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
692
693
0
  ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
694
0
  if (htab->params->ovly_flavour == ovly_soft_icache)
695
0
    {
696
0
      unsigned int prev_buf = 0, set_id = 0;
697
698
      /* Look for an overlapping vma to find the first overlay section.  */
699
0
      bfd_vma vma_start = 0;
700
701
0
      for (i = 1; i < n; i++)
702
0
  {
703
0
    s = alloc_sec[i];
704
0
    if (s->vma < ovl_end)
705
0
      {
706
0
        asection *s0 = alloc_sec[i - 1];
707
0
        vma_start = s0->vma;
708
0
        ovl_end = (s0->vma
709
0
       + ((bfd_vma) 1
710
0
          << (htab->num_lines_log2 + htab->line_size_log2)));
711
0
        --i;
712
0
        break;
713
0
      }
714
0
    else
715
0
      ovl_end = s->vma + s->size;
716
0
  }
717
718
      /* Now find any sections within the cache area.  */
719
0
      for (ovl_index = 0, num_buf = 0; i < n; i++)
720
0
  {
721
0
    s = alloc_sec[i];
722
0
    if (s->vma >= ovl_end)
723
0
      break;
724
725
    /* A section in an overlay area called .ovl.init is not
726
       an overlay, in the sense that it might be loaded in
727
       by the overlay manager, but rather the initial
728
       section contents for the overlay buffer.  */
729
0
    if (!startswith (s->name, ".ovl.init"))
730
0
      {
731
0
        num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
732
0
        set_id = (num_buf == prev_buf)? set_id + 1 : 0;
733
0
        prev_buf = num_buf;
734
735
0
        if ((s->vma - vma_start) & (htab->params->line_size - 1))
736
0
    {
737
0
      info->callbacks->einfo (_("%X%P: overlay section %pA "
738
0
              "does not start on a cache line\n"),
739
0
            s);
740
0
      bfd_set_error (bfd_error_bad_value);
741
0
      return 0;
742
0
    }
743
0
        else if (s->size > htab->params->line_size)
744
0
    {
745
0
      info->callbacks->einfo (_("%X%P: overlay section %pA "
746
0
              "is larger than a cache line\n"),
747
0
            s);
748
0
      bfd_set_error (bfd_error_bad_value);
749
0
      return 0;
750
0
    }
751
752
0
        alloc_sec[ovl_index++] = s;
753
0
        spu_elf_section_data (s)->u.o.ovl_index
754
0
    = (set_id << htab->num_lines_log2) + num_buf;
755
0
        spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
756
0
      }
757
0
  }
758
759
      /* Ensure there are no more overlay sections.  */
760
0
      for ( ; i < n; i++)
761
0
  {
762
0
    s = alloc_sec[i];
763
0
    if (s->vma < ovl_end)
764
0
      {
765
0
        info->callbacks->einfo (_("%X%P: overlay section %pA "
766
0
          "is not in cache area\n"),
767
0
              alloc_sec[i-1]);
768
0
        bfd_set_error (bfd_error_bad_value);
769
0
        return 0;
770
0
      }
771
0
    else
772
0
      ovl_end = s->vma + s->size;
773
0
  }
774
0
    }
775
0
  else
776
0
    {
777
      /* Look for overlapping vmas.  Any with overlap must be overlays.
778
   Count them.  Also count the number of overlay regions.  */
779
0
      for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
780
0
  {
781
0
    s = alloc_sec[i];
782
0
    if (s->vma < ovl_end)
783
0
      {
784
0
        asection *s0 = alloc_sec[i - 1];
785
786
0
        if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
787
0
    {
788
0
      ++num_buf;
789
0
      if (!startswith (s0->name, ".ovl.init"))
790
0
        {
791
0
          alloc_sec[ovl_index] = s0;
792
0
          spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
793
0
          spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
794
0
        }
795
0
      else
796
0
        ovl_end = s->vma + s->size;
797
0
    }
798
0
        if (!startswith (s->name, ".ovl.init"))
799
0
    {
800
0
      alloc_sec[ovl_index] = s;
801
0
      spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
802
0
      spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
803
0
      if (s0->vma != s->vma)
804
0
        {
805
          /* xgettext:c-format */
806
0
          info->callbacks->einfo (_("%X%P: overlay sections %pA "
807
0
            "and %pA do not start at the "
808
0
            "same address\n"),
809
0
                s0, s);
810
0
          bfd_set_error (bfd_error_bad_value);
811
0
          return 0;
812
0
        }
813
0
      if (ovl_end < s->vma + s->size)
814
0
        ovl_end = s->vma + s->size;
815
0
    }
816
0
      }
817
0
    else
818
0
      ovl_end = s->vma + s->size;
819
0
  }
820
0
    }
821
822
0
  htab->num_overlays = ovl_index;
823
0
  htab->num_buf = num_buf;
824
0
  htab->ovl_sec = alloc_sec;
825
826
0
  if (ovl_index == 0)
827
0
    return 1;
828
829
0
  for (i = 0; i < 2; i++)
830
0
    {
831
0
      const char *name;
832
0
      struct elf_link_hash_entry *h;
833
834
0
      name = entry_names[i][htab->params->ovly_flavour];
835
0
      h = elf_link_hash_lookup (&htab->elf, name, true, false, false);
836
0
      if (h == NULL)
837
0
  return 0;
838
839
0
      if (h->root.type == bfd_link_hash_new)
840
0
  {
841
0
    h->root.type = bfd_link_hash_undefined;
842
0
    h->ref_regular = 1;
843
0
    h->ref_regular_nonweak = 1;
844
0
    h->non_elf = 0;
845
0
  }
846
0
      htab->ovly_entry[i] = h;
847
0
    }
848
849
0
  return 2;
850
0
}
851
852
/* Non-zero to use bra in overlay stubs rather than br.  */
853
0
#define BRA_STUBS 0
854
855
#define BRA 0x30000000
856
#define BRASL 0x31000000
857
#define BR  0x32000000
858
#define BRSL  0x33000000
859
#define NOP 0x40200000
860
#define LNOP  0x00200000
861
#define ILA 0x42000000
862
863
/* Return true for all relative and absolute branch instructions.
864
   bra   00110000 0..
865
   brasl 00110001 0..
866
   br    00110010 0..
867
   brsl  00110011 0..
868
   brz   00100000 0..
869
   brnz  00100001 0..
870
   brhz  00100010 0..
871
   brhnz 00100011 0..  */
872
873
static bool
874
is_branch (const unsigned char *insn)
875
0
{
876
0
  return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
877
0
}
878
879
/* Return true for all indirect branch instructions.
880
   bi     00110101 000
881
   bisl   00110101 001
882
   iret   00110101 010
883
   bisled 00110101 011
884
   biz    00100101 000
885
   binz   00100101 001
886
   bihz   00100101 010
887
   bihnz  00100101 011  */
888
889
static bool
890
is_indirect_branch (const unsigned char *insn)
891
0
{
892
0
  return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
893
0
}
894
895
/* Return true for branch hint instructions.
896
   hbra  0001000..
897
   hbrr  0001001..  */
898
899
static bool
900
is_hint (const unsigned char *insn)
901
0
{
902
0
  return (insn[0] & 0xfc) == 0x10;
903
0
}
904
905
/* True if INPUT_SECTION might need overlay stubs.  */
906
907
static bool
908
maybe_needs_stubs (asection *input_section)
909
0
{
910
  /* No stubs for debug sections and suchlike.  */
911
0
  if ((input_section->flags & SEC_ALLOC) == 0)
912
0
    return false;
913
914
  /* No stubs for link-once sections that will be discarded.  */
915
0
  if (input_section->output_section == bfd_abs_section_ptr)
916
0
    return false;
917
918
  /* Don't create stubs for .eh_frame references.  */
919
0
  if (strcmp (input_section->name, ".eh_frame") == 0)
920
0
    return false;
921
922
0
  return true;
923
0
}
924
925
enum _stub_type
926
{
927
  no_stub,
928
  call_ovl_stub,
929
  br000_ovl_stub,
930
  br001_ovl_stub,
931
  br010_ovl_stub,
932
  br011_ovl_stub,
933
  br100_ovl_stub,
934
  br101_ovl_stub,
935
  br110_ovl_stub,
936
  br111_ovl_stub,
937
  nonovl_stub,
938
  stub_error
939
};
940
941
/* Return non-zero if this reloc symbol should go via an overlay stub.
942
   Return 2 if the stub must be in non-overlay area.  */
943
944
static enum _stub_type
945
needs_ovl_stub (struct elf_link_hash_entry *h,
946
    Elf_Internal_Sym *sym,
947
    asection *sym_sec,
948
    asection *input_section,
949
    Elf_Internal_Rela *irela,
950
    bfd_byte *contents,
951
    struct bfd_link_info *info)
952
0
{
953
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
954
0
  enum elf_spu_reloc_type r_type;
955
0
  unsigned int sym_type;
956
0
  bool branch, hint, call;
957
0
  enum _stub_type ret = no_stub;
958
0
  bfd_byte insn[4];
959
960
0
  if (sym_sec == NULL
961
0
      || sym_sec->output_section == bfd_abs_section_ptr
962
0
      || spu_elf_section_data (sym_sec->output_section) == NULL)
963
0
    return ret;
964
965
0
  if (h != NULL)
966
0
    {
967
      /* Ensure no stubs for user supplied overlay manager syms.  */
968
0
      if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
969
0
  return ret;
970
971
      /* setjmp always goes via an overlay stub, because then the return
972
   and hence the longjmp goes via __ovly_return.  That magically
973
   makes setjmp/longjmp between overlays work.  */
974
0
      if (startswith (h->root.root.string, "setjmp")
975
0
    && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
976
0
  ret = call_ovl_stub;
977
0
    }
978
979
0
  if (h != NULL)
980
0
    sym_type = h->type;
981
0
  else
982
0
    sym_type = ELF_ST_TYPE (sym->st_info);
983
984
0
  r_type = ELF32_R_TYPE (irela->r_info);
985
0
  branch = false;
986
0
  hint = false;
987
0
  call = false;
988
0
  if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
989
0
    {
990
0
      if (contents == NULL)
991
0
  {
992
0
    contents = insn;
993
0
    if (!bfd_get_section_contents (input_section->owner,
994
0
           input_section,
995
0
           contents,
996
0
           irela->r_offset, 4))
997
0
      return stub_error;
998
0
  }
999
0
      else
1000
0
  contents += irela->r_offset;
1001
1002
0
      branch = is_branch (contents);
1003
0
      hint = is_hint (contents);
1004
0
      if (branch || hint)
1005
0
  {
1006
0
    call = (contents[0] & 0xfd) == 0x31;
1007
0
    if (call
1008
0
        && sym_type != STT_FUNC
1009
0
        && contents != insn)
1010
0
      {
1011
        /* It's common for people to write assembly and forget
1012
     to give function symbols the right type.  Handle
1013
     calls to such symbols, but warn so that (hopefully)
1014
     people will fix their code.  We need the symbol
1015
     type to be correct to distinguish function pointer
1016
     initialisation from other pointer initialisations.  */
1017
0
        const char *sym_name;
1018
1019
0
        if (h != NULL)
1020
0
    sym_name = h->root.root.string;
1021
0
        else
1022
0
    {
1023
0
      Elf_Internal_Shdr *symtab_hdr;
1024
0
      symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1025
0
      sym_name = bfd_elf_sym_name (input_section->owner,
1026
0
                 symtab_hdr,
1027
0
                 sym,
1028
0
                 sym_sec);
1029
0
    }
1030
0
        _bfd_error_handler
1031
    /* xgettext:c-format */
1032
0
    (_("warning: call to non-function symbol %s defined in %pB"),
1033
0
     sym_name, sym_sec->owner);
1034
1035
0
      }
1036
0
  }
1037
0
    }
1038
1039
0
  if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1040
0
      || (sym_type != STT_FUNC
1041
0
    && !(branch || hint)
1042
0
    && (sym_sec->flags & SEC_CODE) == 0))
1043
0
    return no_stub;
1044
1045
  /* Usually, symbols in non-overlay sections don't need stubs.  */
1046
0
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1047
0
      && !htab->params->non_overlay_stubs)
1048
0
    return ret;
1049
1050
  /* A reference from some other section to a symbol in an overlay
1051
     section needs a stub.  */
1052
0
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1053
0
       != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1054
0
    {
1055
0
      unsigned int lrlive = 0;
1056
0
      if (branch)
1057
0
  lrlive = (contents[1] & 0x70) >> 4;
1058
1059
0
      if (!lrlive && (call || sym_type == STT_FUNC))
1060
0
  ret = call_ovl_stub;
1061
0
      else
1062
0
  ret = br000_ovl_stub + lrlive;
1063
0
    }
1064
1065
  /* If this insn isn't a branch then we are possibly taking the
1066
     address of a function and passing it out somehow.  Soft-icache code
1067
     always generates inline code to do indirect branches.  */
1068
0
  if (!(branch || hint)
1069
0
      && sym_type == STT_FUNC
1070
0
      && htab->params->ovly_flavour != ovly_soft_icache)
1071
0
    ret = nonovl_stub;
1072
1073
0
  return ret;
1074
0
}
1075
1076
static bool
1077
count_stub (struct spu_link_hash_table *htab,
1078
      bfd *ibfd,
1079
      asection *isec,
1080
      enum _stub_type stub_type,
1081
      struct elf_link_hash_entry *h,
1082
      const Elf_Internal_Rela *irela)
1083
0
{
1084
0
  unsigned int ovl = 0;
1085
0
  struct got_entry *g, **head;
1086
0
  bfd_vma addend;
1087
1088
  /* If this instruction is a branch or call, we need a stub
1089
     for it.  One stub per function per overlay.
1090
     If it isn't a branch, then we are taking the address of
1091
     this function so need a stub in the non-overlay area
1092
     for it.  One stub per function.  */
1093
0
  if (stub_type != nonovl_stub)
1094
0
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1095
1096
0
  if (h != NULL)
1097
0
    head = &h->got.glist;
1098
0
  else
1099
0
    {
1100
0
      if (elf_local_got_ents (ibfd) == NULL)
1101
0
  {
1102
0
    bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1103
0
             * sizeof (*elf_local_got_ents (ibfd)));
1104
0
    elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1105
0
    if (elf_local_got_ents (ibfd) == NULL)
1106
0
      return false;
1107
0
  }
1108
0
      head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1109
0
    }
1110
1111
0
  if (htab->params->ovly_flavour == ovly_soft_icache)
1112
0
    {
1113
0
      htab->stub_count[ovl] += 1;
1114
0
      return true;
1115
0
    }
1116
1117
0
  addend = 0;
1118
0
  if (irela != NULL)
1119
0
    addend = irela->r_addend;
1120
1121
0
  if (ovl == 0)
1122
0
    {
1123
0
      struct got_entry *gnext;
1124
1125
0
      for (g = *head; g != NULL; g = g->next)
1126
0
  if (g->addend == addend && g->ovl == 0)
1127
0
    break;
1128
1129
0
      if (g == NULL)
1130
0
  {
1131
    /* Need a new non-overlay area stub.  Zap other stubs.  */
1132
0
    for (g = *head; g != NULL; g = gnext)
1133
0
      {
1134
0
        gnext = g->next;
1135
0
        if (g->addend == addend)
1136
0
    {
1137
0
      htab->stub_count[g->ovl] -= 1;
1138
0
      free (g);
1139
0
    }
1140
0
      }
1141
0
  }
1142
0
    }
1143
0
  else
1144
0
    {
1145
0
      for (g = *head; g != NULL; g = g->next)
1146
0
  if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1147
0
    break;
1148
0
    }
1149
1150
0
  if (g == NULL)
1151
0
    {
1152
0
      g = bfd_malloc (sizeof *g);
1153
0
      if (g == NULL)
1154
0
  return false;
1155
0
      g->ovl = ovl;
1156
0
      g->addend = addend;
1157
0
      g->stub_addr = (bfd_vma) -1;
1158
0
      g->next = *head;
1159
0
      *head = g;
1160
1161
0
      htab->stub_count[ovl] += 1;
1162
0
    }
1163
1164
0
  return true;
1165
0
}
1166
1167
/* Support two sizes of overlay stubs, a slower more compact stub of two
1168
   instructions, and a faster stub of four instructions.
1169
   Soft-icache stubs are four or eight words.  */
1170
1171
static unsigned int
1172
ovl_stub_size (struct spu_elf_params *params)
1173
0
{
1174
0
  return 16 << params->ovly_flavour >> params->compact_stub;
1175
0
}
1176
1177
static unsigned int
1178
ovl_stub_size_log2 (struct spu_elf_params *params)
1179
0
{
1180
0
  return 4 + params->ovly_flavour - params->compact_stub;
1181
0
}
1182
1183
/* Two instruction overlay stubs look like:
1184
1185
   brsl $75,__ovly_load
1186
   .word target_ovl_and_address
1187
1188
   ovl_and_address is a word with the overlay number in the top 14 bits
1189
   and local store address in the bottom 18 bits.
1190
1191
   Four instruction overlay stubs look like:
1192
1193
   ila $78,ovl_number
1194
   lnop
1195
   ila $79,target_address
1196
   br __ovly_load
1197
1198
   Software icache stubs are:
1199
1200
   .word target_index
1201
   .word target_ia;
1202
   .word lrlive_branchlocalstoreaddr;
1203
   brasl $75,__icache_br_handler
1204
   .quad xor_pattern
1205
*/
1206
1207
static bool
1208
build_stub (struct bfd_link_info *info,
1209
      bfd *ibfd,
1210
      asection *isec,
1211
      enum _stub_type stub_type,
1212
      struct elf_link_hash_entry *h,
1213
      const Elf_Internal_Rela *irela,
1214
      bfd_vma dest,
1215
      asection *dest_sec)
1216
0
{
1217
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
1218
0
  unsigned int ovl, dest_ovl, set_id;
1219
0
  struct got_entry *g, **head;
1220
0
  asection *sec;
1221
0
  bfd_vma addend, from, to, br_dest, patt;
1222
0
  unsigned int lrlive;
1223
1224
0
  ovl = 0;
1225
0
  if (stub_type != nonovl_stub)
1226
0
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1227
1228
0
  if (h != NULL)
1229
0
    head = &h->got.glist;
1230
0
  else
1231
0
    head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1232
1233
0
  addend = 0;
1234
0
  if (irela != NULL)
1235
0
    addend = irela->r_addend;
1236
1237
0
  if (htab->params->ovly_flavour == ovly_soft_icache)
1238
0
    {
1239
0
      g = bfd_malloc (sizeof *g);
1240
0
      if (g == NULL)
1241
0
  return false;
1242
0
      g->ovl = ovl;
1243
0
      g->br_addr = 0;
1244
0
      if (irela != NULL)
1245
0
  g->br_addr = (irela->r_offset
1246
0
          + isec->output_offset
1247
0
          + isec->output_section->vma);
1248
0
      g->next = *head;
1249
0
      *head = g;
1250
0
    }
1251
0
  else
1252
0
    {
1253
0
      for (g = *head; g != NULL; g = g->next)
1254
0
  if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1255
0
    break;
1256
0
      if (g == NULL)
1257
0
  abort ();
1258
1259
0
      if (g->ovl == 0 && ovl != 0)
1260
0
  return true;
1261
1262
0
      if (g->stub_addr != (bfd_vma) -1)
1263
0
  return true;
1264
0
    }
1265
1266
0
  sec = htab->stub_sec[ovl];
1267
0
  dest += dest_sec->output_offset + dest_sec->output_section->vma;
1268
0
  from = sec->size + sec->output_offset + sec->output_section->vma;
1269
0
  g->stub_addr = from;
1270
0
  to = (htab->ovly_entry[0]->root.u.def.value
1271
0
  + htab->ovly_entry[0]->root.u.def.section->output_offset
1272
0
  + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1273
1274
0
  if (((dest | to | from) & 3) != 0)
1275
0
    {
1276
0
      htab->stub_err = 1;
1277
0
      return false;
1278
0
    }
1279
0
  dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1280
1281
0
  if (htab->params->ovly_flavour == ovly_normal
1282
0
      && !htab->params->compact_stub)
1283
0
    {
1284
0
      bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1285
0
      sec->contents + sec->size);
1286
0
      bfd_put_32 (sec->owner, LNOP,
1287
0
      sec->contents + sec->size + 4);
1288
0
      bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1289
0
      sec->contents + sec->size + 8);
1290
0
      if (!BRA_STUBS)
1291
0
  bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1292
0
        sec->contents + sec->size + 12);
1293
0
      else
1294
0
  bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1295
0
        sec->contents + sec->size + 12);
1296
0
    }
1297
0
  else if (htab->params->ovly_flavour == ovly_normal
1298
0
     && htab->params->compact_stub)
1299
0
    {
1300
0
      if (!BRA_STUBS)
1301
0
  bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1302
0
        sec->contents + sec->size);
1303
0
      else
1304
0
  bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1305
0
        sec->contents + sec->size);
1306
0
      bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1307
0
      sec->contents + sec->size + 4);
1308
0
    }
1309
0
  else if (htab->params->ovly_flavour == ovly_soft_icache
1310
0
     && htab->params->compact_stub)
1311
0
    {
1312
0
      lrlive = 0;
1313
0
      if (stub_type == nonovl_stub)
1314
0
  ;
1315
0
      else if (stub_type == call_ovl_stub)
1316
  /* A brsl makes lr live and *(*sp+16) is live.
1317
     Tail calls have the same liveness.  */
1318
0
  lrlive = 5;
1319
0
      else if (!htab->params->lrlive_analysis)
1320
  /* Assume stack frame and lr save.  */
1321
0
  lrlive = 1;
1322
0
      else if (irela != NULL)
1323
0
  {
1324
    /* Analyse branch instructions.  */
1325
0
    struct function_info *caller;
1326
0
    bfd_vma off;
1327
1328
0
    caller = find_function (isec, irela->r_offset, info);
1329
0
    if (caller->start == NULL)
1330
0
      off = irela->r_offset;
1331
0
    else
1332
0
      {
1333
0
        struct function_info *found = NULL;
1334
1335
        /* Find the earliest piece of this function that
1336
     has frame adjusting instructions.  We might
1337
     see dynamic frame adjustment (eg. for alloca)
1338
     in some later piece, but functions using
1339
     alloca always set up a frame earlier.  Frame
1340
     setup instructions are always in one piece.  */
1341
0
        if (caller->lr_store != (bfd_vma) -1
1342
0
      || caller->sp_adjust != (bfd_vma) -1)
1343
0
    found = caller;
1344
0
        while (caller->start != NULL)
1345
0
    {
1346
0
      caller = caller->start;
1347
0
      if (caller->lr_store != (bfd_vma) -1
1348
0
          || caller->sp_adjust != (bfd_vma) -1)
1349
0
        found = caller;
1350
0
    }
1351
0
        if (found != NULL)
1352
0
    caller = found;
1353
0
        off = (bfd_vma) -1;
1354
0
      }
1355
1356
0
    if (off > caller->sp_adjust)
1357
0
      {
1358
0
        if (off > caller->lr_store)
1359
    /* Only *(*sp+16) is live.  */
1360
0
    lrlive = 1;
1361
0
        else
1362
    /* If no lr save, then we must be in a
1363
       leaf function with a frame.
1364
       lr is still live.  */
1365
0
    lrlive = 4;
1366
0
      }
1367
0
    else if (off > caller->lr_store)
1368
0
      {
1369
        /* Between lr save and stack adjust.  */
1370
0
        lrlive = 3;
1371
        /* This should never happen since prologues won't
1372
     be split here.  */
1373
0
        BFD_ASSERT (0);
1374
0
      }
1375
0
    else
1376
      /* On entry to function.  */
1377
0
      lrlive = 5;
1378
1379
0
    if (stub_type != br000_ovl_stub
1380
0
        && lrlive != stub_type - br000_ovl_stub)
1381
      /* xgettext:c-format */
1382
0
      info->callbacks->einfo (_("%pA:0x%v lrlive .brinfo (%u) differs "
1383
0
              "from analysis (%u)\n"),
1384
0
            isec, irela->r_offset, lrlive,
1385
0
            stub_type - br000_ovl_stub);
1386
0
  }
1387
1388
      /* If given lrlive info via .brinfo, use it.  */
1389
0
      if (stub_type > br000_ovl_stub)
1390
0
  lrlive = stub_type - br000_ovl_stub;
1391
1392
0
      if (ovl == 0)
1393
0
  to = (htab->ovly_entry[1]->root.u.def.value
1394
0
        + htab->ovly_entry[1]->root.u.def.section->output_offset
1395
0
        + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1396
1397
      /* The branch that uses this stub goes to stub_addr + 4.  We'll
1398
   set up an xor pattern that can be used by the icache manager
1399
   to modify this branch to go directly to its destination.  */
1400
0
      g->stub_addr += 4;
1401
0
      br_dest = g->stub_addr;
1402
0
      if (irela == NULL)
1403
0
  {
1404
    /* Except in the case of _SPUEAR_ stubs, the branch in
1405
       question is the one in the stub itself.  */
1406
0
    BFD_ASSERT (stub_type == nonovl_stub);
1407
0
    g->br_addr = g->stub_addr;
1408
0
    br_dest = to;
1409
0
  }
1410
1411
0
      set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1412
0
      bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1413
0
      sec->contents + sec->size);
1414
0
      bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1415
0
      sec->contents + sec->size + 4);
1416
0
      bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1417
0
      sec->contents + sec->size + 8);
1418
0
      patt = dest ^ br_dest;
1419
0
      if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1420
0
  patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1421
0
      bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1422
0
      sec->contents + sec->size + 12);
1423
1424
0
      if (ovl == 0)
1425
  /* Extra space for linked list entries.  */
1426
0
  sec->size += 16;
1427
0
    }
1428
0
  else
1429
0
    abort ();
1430
1431
0
  sec->size += ovl_stub_size (htab->params);
1432
1433
0
  if (htab->params->emit_stub_syms)
1434
0
    {
1435
0
      size_t len;
1436
0
      char *name;
1437
0
      int add;
1438
1439
0
      len = 8 + sizeof (".ovl_call.") - 1;
1440
0
      if (h != NULL)
1441
0
  len += strlen (h->root.root.string);
1442
0
      else
1443
0
  len += 8 + 1 + 8;
1444
0
      add = 0;
1445
0
      if (irela != NULL)
1446
0
  add = (int) irela->r_addend & 0xffffffff;
1447
0
      if (add != 0)
1448
0
  len += 1 + 8;
1449
0
      name = bfd_malloc (len + 1);
1450
0
      if (name == NULL)
1451
0
  return false;
1452
1453
0
      sprintf (name, "%08x.ovl_call.", g->ovl);
1454
0
      if (h != NULL)
1455
0
  strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1456
0
      else
1457
0
  sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1458
0
     dest_sec->id & 0xffffffff,
1459
0
     (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1460
0
      if (add != 0)
1461
0
  sprintf (name + len - 9, "+%x", add);
1462
1463
0
      h = elf_link_hash_lookup (&htab->elf, name, true, true, false);
1464
0
      free (name);
1465
0
      if (h == NULL)
1466
0
  return false;
1467
0
      if (h->root.type == bfd_link_hash_new)
1468
0
  {
1469
0
    h->root.type = bfd_link_hash_defined;
1470
0
    h->root.u.def.section = sec;
1471
0
    h->size = ovl_stub_size (htab->params);
1472
0
    h->root.u.def.value = sec->size - h->size;
1473
0
    h->type = STT_FUNC;
1474
0
    h->ref_regular = 1;
1475
0
    h->def_regular = 1;
1476
0
    h->ref_regular_nonweak = 1;
1477
0
    h->forced_local = 1;
1478
0
    h->non_elf = 0;
1479
0
  }
1480
0
    }
1481
1482
0
  return true;
1483
0
}
1484
1485
/* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1486
   symbols.  */
1487
1488
static bool
1489
allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1490
0
{
1491
  /* Symbols starting with _SPUEAR_ need a stub because they may be
1492
     invoked by the PPU.  */
1493
0
  struct bfd_link_info *info = inf;
1494
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
1495
0
  asection *sym_sec;
1496
1497
0
  if ((h->root.type == bfd_link_hash_defined
1498
0
       || h->root.type == bfd_link_hash_defweak)
1499
0
      && h->def_regular
1500
0
      && startswith (h->root.root.string, "_SPUEAR_")
1501
0
      && (sym_sec = h->root.u.def.section) != NULL
1502
0
      && sym_sec->output_section != bfd_abs_section_ptr
1503
0
      && spu_elf_section_data (sym_sec->output_section) != NULL
1504
0
      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1505
0
    || htab->params->non_overlay_stubs))
1506
0
    {
1507
0
      return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1508
0
    }
1509
1510
0
  return true;
1511
0
}
1512
1513
static bool
1514
build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1515
0
{
1516
  /* Symbols starting with _SPUEAR_ need a stub because they may be
1517
     invoked by the PPU.  */
1518
0
  struct bfd_link_info *info = inf;
1519
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
1520
0
  asection *sym_sec;
1521
1522
0
  if ((h->root.type == bfd_link_hash_defined
1523
0
       || h->root.type == bfd_link_hash_defweak)
1524
0
      && h->def_regular
1525
0
      && startswith (h->root.root.string, "_SPUEAR_")
1526
0
      && (sym_sec = h->root.u.def.section) != NULL
1527
0
      && sym_sec->output_section != bfd_abs_section_ptr
1528
0
      && spu_elf_section_data (sym_sec->output_section) != NULL
1529
0
      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1530
0
    || htab->params->non_overlay_stubs))
1531
0
    {
1532
0
      return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1533
0
       h->root.u.def.value, sym_sec);
1534
0
    }
1535
1536
0
  return true;
1537
0
}
1538
1539
/* Size or build stubs.  */
1540
1541
static bool
1542
process_stubs (struct bfd_link_info *info, bool build)
1543
0
{
1544
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
1545
0
  bfd *ibfd;
1546
1547
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
1548
0
    {
1549
0
      extern const bfd_target spu_elf32_vec;
1550
0
      Elf_Internal_Shdr *symtab_hdr;
1551
0
      asection *isec;
1552
0
      Elf_Internal_Sym *local_syms = NULL;
1553
1554
0
      if (ibfd->xvec != &spu_elf32_vec)
1555
0
  continue;
1556
1557
      /* We'll need the symbol table in a second.  */
1558
0
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1559
0
      if (symtab_hdr->sh_info == 0)
1560
0
  continue;
1561
1562
      /* Walk over each section attached to the input bfd.  */
1563
0
      for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1564
0
  {
1565
0
    Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1566
1567
    /* If there aren't any relocs, then there's nothing more to do.  */
1568
0
    if ((isec->flags & SEC_RELOC) == 0
1569
0
        || isec->reloc_count == 0)
1570
0
      continue;
1571
1572
0
    if (!maybe_needs_stubs (isec))
1573
0
      continue;
1574
1575
    /* Get the relocs.  */
1576
0
    internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1577
0
                   info->keep_memory);
1578
0
    if (internal_relocs == NULL)
1579
0
      goto error_ret_free_local;
1580
1581
    /* Now examine each relocation.  */
1582
0
    irela = internal_relocs;
1583
0
    irelaend = irela + isec->reloc_count;
1584
0
    for (; irela < irelaend; irela++)
1585
0
      {
1586
0
        enum elf_spu_reloc_type r_type;
1587
0
        unsigned int r_indx;
1588
0
        asection *sym_sec;
1589
0
        Elf_Internal_Sym *sym;
1590
0
        struct elf_link_hash_entry *h;
1591
0
        enum _stub_type stub_type;
1592
1593
0
        r_type = ELF32_R_TYPE (irela->r_info);
1594
0
        r_indx = ELF32_R_SYM (irela->r_info);
1595
1596
0
        if (r_type >= R_SPU_max)
1597
0
    {
1598
0
      bfd_set_error (bfd_error_bad_value);
1599
0
    error_ret_free_internal:
1600
0
      if (elf_section_data (isec)->relocs != internal_relocs)
1601
0
        free (internal_relocs);
1602
0
    error_ret_free_local:
1603
0
      if (symtab_hdr->contents != (unsigned char *) local_syms)
1604
0
        free (local_syms);
1605
0
      return false;
1606
0
    }
1607
1608
        /* Determine the reloc target section.  */
1609
0
        if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1610
0
    goto error_ret_free_internal;
1611
1612
0
        stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1613
0
            NULL, info);
1614
0
        if (stub_type == no_stub)
1615
0
    continue;
1616
0
        else if (stub_type == stub_error)
1617
0
    goto error_ret_free_internal;
1618
1619
0
        if (htab->stub_count == NULL)
1620
0
    {
1621
0
      bfd_size_type amt;
1622
0
      amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1623
0
      htab->stub_count = bfd_zmalloc (amt);
1624
0
      if (htab->stub_count == NULL)
1625
0
        goto error_ret_free_internal;
1626
0
    }
1627
1628
0
        if (!build)
1629
0
    {
1630
0
      if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1631
0
        goto error_ret_free_internal;
1632
0
    }
1633
0
        else
1634
0
    {
1635
0
      bfd_vma dest;
1636
1637
0
      if (h != NULL)
1638
0
        dest = h->root.u.def.value;
1639
0
      else
1640
0
        dest = sym->st_value;
1641
0
      dest += irela->r_addend;
1642
0
      if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1643
0
           dest, sym_sec))
1644
0
        goto error_ret_free_internal;
1645
0
    }
1646
0
      }
1647
1648
    /* We're done with the internal relocs, free them.  */
1649
0
    if (elf_section_data (isec)->relocs != internal_relocs)
1650
0
      free (internal_relocs);
1651
0
  }
1652
1653
0
      if (local_syms != NULL
1654
0
    && symtab_hdr->contents != (unsigned char *) local_syms)
1655
0
  {
1656
0
    if (!info->keep_memory)
1657
0
      free (local_syms);
1658
0
    else
1659
0
      symtab_hdr->contents = (unsigned char *) local_syms;
1660
0
  }
1661
0
    }
1662
1663
0
  return true;
1664
0
}
1665
1666
/* Allocate space for overlay call and return stubs.
1667
   Return 0 on error, 1 if no overlays, 2 otherwise.  */
1668
1669
int
1670
spu_elf_size_stubs (struct bfd_link_info *info)
1671
0
{
1672
0
  struct spu_link_hash_table *htab;
1673
0
  bfd *ibfd;
1674
0
  bfd_size_type amt;
1675
0
  flagword flags;
1676
0
  unsigned int i;
1677
0
  asection *stub;
1678
1679
0
  if (!process_stubs (info, false))
1680
0
    return 0;
1681
1682
0
  htab = spu_hash_table (info);
1683
0
  elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1684
0
  if (htab->stub_err)
1685
0
    return 0;
1686
1687
0
  ibfd = info->input_bfds;
1688
0
  if (htab->stub_count != NULL)
1689
0
    {
1690
0
      amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1691
0
      htab->stub_sec = bfd_zmalloc (amt);
1692
0
      if (htab->stub_sec == NULL)
1693
0
  return 0;
1694
1695
0
      flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1696
0
         | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1697
0
      stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1698
0
      htab->stub_sec[0] = stub;
1699
0
      if (stub == NULL
1700
0
    || !bfd_set_section_alignment (stub,
1701
0
           ovl_stub_size_log2 (htab->params)))
1702
0
  return 0;
1703
0
      stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1704
0
      if (htab->params->ovly_flavour == ovly_soft_icache)
1705
  /* Extra space for linked list entries.  */
1706
0
  stub->size += htab->stub_count[0] * 16;
1707
1708
0
      for (i = 0; i < htab->num_overlays; ++i)
1709
0
  {
1710
0
    asection *osec = htab->ovl_sec[i];
1711
0
    unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1712
0
    stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1713
0
    htab->stub_sec[ovl] = stub;
1714
0
    if (stub == NULL
1715
0
        || !bfd_set_section_alignment (stub,
1716
0
               ovl_stub_size_log2 (htab->params)))
1717
0
      return 0;
1718
0
    stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1719
0
  }
1720
0
    }
1721
1722
0
  if (htab->params->ovly_flavour == ovly_soft_icache)
1723
0
    {
1724
      /* Space for icache manager tables.
1725
   a) Tag array, one quadword per cache line.
1726
   b) Rewrite "to" list, one quadword per cache line.
1727
   c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1728
      a power-of-two number of full quadwords) per cache line.  */
1729
1730
0
      flags = SEC_ALLOC;
1731
0
      htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1732
0
      if (htab->ovtab == NULL
1733
0
    || !bfd_set_section_alignment (htab->ovtab, 4))
1734
0
  return 0;
1735
1736
0
      htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1737
0
        << htab->num_lines_log2;
1738
1739
0
      flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1740
0
      htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1741
0
      if (htab->init == NULL
1742
0
    || !bfd_set_section_alignment (htab->init, 4))
1743
0
  return 0;
1744
1745
0
      htab->init->size = 16;
1746
0
    }
1747
0
  else if (htab->stub_count == NULL)
1748
0
    return 1;
1749
0
  else
1750
0
    {
1751
      /* htab->ovtab consists of two arrays.
1752
   .  struct {
1753
   .    u32 vma;
1754
   .    u32 size;
1755
   .    u32 file_off;
1756
   .    u32 buf;
1757
   .  } _ovly_table[];
1758
   .
1759
   .  struct {
1760
   .    u32 mapped;
1761
   .  } _ovly_buf_table[];
1762
   .  */
1763
1764
0
      flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1765
0
      htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1766
0
      if (htab->ovtab == NULL
1767
0
    || !bfd_set_section_alignment (htab->ovtab, 4))
1768
0
  return 0;
1769
1770
0
      htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1771
0
    }
1772
1773
0
  htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1774
0
  if (htab->toe == NULL
1775
0
      || !bfd_set_section_alignment (htab->toe, 4))
1776
0
    return 0;
1777
0
  htab->toe->size = 16;
1778
1779
0
  return 2;
1780
0
}
1781
1782
/* Called from ld to place overlay manager data sections.  This is done
1783
   after the overlay manager itself is loaded, mainly so that the
1784
   linker's htab->init section is placed after any other .ovl.init
1785
   sections.  */
1786
1787
void
1788
spu_elf_place_overlay_data (struct bfd_link_info *info)
1789
0
{
1790
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
1791
0
  unsigned int i;
1792
1793
0
  if (htab->stub_sec != NULL)
1794
0
    {
1795
0
      (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1796
1797
0
      for (i = 0; i < htab->num_overlays; ++i)
1798
0
  {
1799
0
    asection *osec = htab->ovl_sec[i];
1800
0
    unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1801
0
    (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1802
0
  }
1803
0
    }
1804
1805
0
  if (htab->params->ovly_flavour == ovly_soft_icache)
1806
0
    (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1807
1808
0
  if (htab->ovtab != NULL)
1809
0
    {
1810
0
      const char *ovout = ".data";
1811
0
      if (htab->params->ovly_flavour == ovly_soft_icache)
1812
0
  ovout = ".bss";
1813
0
      (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1814
0
    }
1815
1816
0
  if (htab->toe != NULL)
1817
0
    (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1818
0
}
1819
1820
/* Functions to handle embedded spu_ovl.o object.  */
1821
1822
static void *
1823
ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1824
0
{
1825
0
  return stream;
1826
0
}
1827
1828
static file_ptr
1829
ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1830
         void *stream,
1831
         void *buf,
1832
         file_ptr nbytes,
1833
         file_ptr offset)
1834
0
{
1835
0
  struct _ovl_stream *os;
1836
0
  size_t count;
1837
0
  size_t max;
1838
1839
0
  os = (struct _ovl_stream *) stream;
1840
0
  max = (const char *) os->end - (const char *) os->start;
1841
1842
0
  if ((ufile_ptr) offset >= max)
1843
0
    return 0;
1844
1845
0
  count = nbytes;
1846
0
  if (count > max - offset)
1847
0
    count = max - offset;
1848
1849
0
  memcpy (buf, (const char *) os->start + offset, count);
1850
0
  return count;
1851
0
}
1852
1853
static int
1854
ovl_mgr_stat (struct bfd *abfd ATTRIBUTE_UNUSED,
1855
        void *stream,
1856
        struct stat *sb)
1857
0
{
1858
0
  struct _ovl_stream *os = (struct _ovl_stream *) stream;
1859
1860
0
  memset (sb, 0, sizeof (*sb));
1861
0
  sb->st_size = (const char *) os->end - (const char *) os->start;
1862
0
  return 0;
1863
0
}
1864
1865
bool
1866
spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1867
0
{
1868
0
  *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1869
0
            "elf32-spu",
1870
0
            ovl_mgr_open,
1871
0
            (void *) stream,
1872
0
            ovl_mgr_pread,
1873
0
            NULL,
1874
0
            ovl_mgr_stat);
1875
0
  return *ovl_bfd != NULL;
1876
0
}
1877
1878
static unsigned int
1879
overlay_index (asection *sec)
1880
0
{
1881
0
  if (sec == NULL
1882
0
      || sec->output_section == bfd_abs_section_ptr)
1883
0
    return 0;
1884
0
  return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1885
0
}
1886
1887
/* Define an STT_OBJECT symbol.  */
1888
1889
static struct elf_link_hash_entry *
1890
define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1891
0
{
1892
0
  struct elf_link_hash_entry *h;
1893
1894
0
  h = elf_link_hash_lookup (&htab->elf, name, true, false, false);
1895
0
  if (h == NULL)
1896
0
    return NULL;
1897
1898
0
  if (h->root.type != bfd_link_hash_defined
1899
0
      || !h->def_regular)
1900
0
    {
1901
0
      h->root.type = bfd_link_hash_defined;
1902
0
      h->root.u.def.section = htab->ovtab;
1903
0
      h->type = STT_OBJECT;
1904
0
      h->ref_regular = 1;
1905
0
      h->def_regular = 1;
1906
0
      h->ref_regular_nonweak = 1;
1907
0
      h->non_elf = 0;
1908
0
    }
1909
0
  else if (h->root.u.def.section->owner != NULL)
1910
0
    {
1911
      /* xgettext:c-format */
1912
0
      _bfd_error_handler (_("%pB is not allowed to define %s"),
1913
0
        h->root.u.def.section->owner,
1914
0
        h->root.root.string);
1915
0
      bfd_set_error (bfd_error_bad_value);
1916
0
      return NULL;
1917
0
    }
1918
0
  else
1919
0
    {
1920
0
      _bfd_error_handler (_("you are not allowed to define %s in a script"),
1921
0
        h->root.root.string);
1922
0
      bfd_set_error (bfd_error_bad_value);
1923
0
      return NULL;
1924
0
    }
1925
1926
0
  return h;
1927
0
}
1928
1929
/* Fill in all stubs and the overlay tables.  */
1930
1931
static bool
1932
spu_elf_build_stubs (struct bfd_link_info *info)
1933
0
{
1934
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
1935
0
  struct elf_link_hash_entry *h;
1936
0
  bfd_byte *p;
1937
0
  asection *s;
1938
0
  bfd *obfd;
1939
0
  unsigned int i;
1940
1941
0
  if (htab->num_overlays != 0)
1942
0
    {
1943
0
      for (i = 0; i < 2; i++)
1944
0
  {
1945
0
    h = htab->ovly_entry[i];
1946
0
    if (h != NULL
1947
0
        && (h->root.type == bfd_link_hash_defined
1948
0
      || h->root.type == bfd_link_hash_defweak)
1949
0
        && h->def_regular)
1950
0
      {
1951
0
        s = h->root.u.def.section->output_section;
1952
0
        if (spu_elf_section_data (s)->u.o.ovl_index)
1953
0
    {
1954
0
      _bfd_error_handler (_("%s in overlay section"),
1955
0
              h->root.root.string);
1956
0
      bfd_set_error (bfd_error_bad_value);
1957
0
      return false;
1958
0
    }
1959
0
      }
1960
0
  }
1961
0
    }
1962
1963
0
  if (htab->stub_sec != NULL)
1964
0
    {
1965
0
      for (i = 0; i <= htab->num_overlays; i++)
1966
0
  if (htab->stub_sec[i]->size != 0)
1967
0
    {
1968
0
      htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1969
0
                  htab->stub_sec[i]->size);
1970
0
      if (htab->stub_sec[i]->contents == NULL)
1971
0
        return false;
1972
0
      htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1973
0
      htab->stub_sec[i]->size = 0;
1974
0
    }
1975
1976
      /* Fill in all the stubs.  */
1977
0
      process_stubs (info, true);
1978
0
      if (!htab->stub_err)
1979
0
  elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1980
1981
0
      if (htab->stub_err)
1982
0
  {
1983
0
    _bfd_error_handler (_("overlay stub relocation overflow"));
1984
0
    bfd_set_error (bfd_error_bad_value);
1985
0
    return false;
1986
0
  }
1987
1988
0
      for (i = 0; i <= htab->num_overlays; i++)
1989
0
  {
1990
0
    if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1991
0
      {
1992
0
        _bfd_error_handler  (_("stubs don't match calculated size"));
1993
0
        bfd_set_error (bfd_error_bad_value);
1994
0
        return false;
1995
0
      }
1996
0
    htab->stub_sec[i]->rawsize = 0;
1997
0
  }
1998
0
    }
1999
2000
0
  if (htab->ovtab == NULL || htab->ovtab->size == 0)
2001
0
    return true;
2002
2003
0
  htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
2004
0
  if (htab->ovtab->contents == NULL)
2005
0
    return false;
2006
2007
0
  p = htab->ovtab->contents;
2008
0
  if (htab->params->ovly_flavour == ovly_soft_icache)
2009
0
    {
2010
0
      bfd_vma off;
2011
2012
0
      h = define_ovtab_symbol (htab, "__icache_tag_array");
2013
0
      if (h == NULL)
2014
0
  return false;
2015
0
      h->root.u.def.value = 0;
2016
0
      h->size = 16 << htab->num_lines_log2;
2017
0
      off = h->size;
2018
2019
0
      h = define_ovtab_symbol (htab, "__icache_tag_array_size");
2020
0
      if (h == NULL)
2021
0
  return false;
2022
0
      h->root.u.def.value = 16 << htab->num_lines_log2;
2023
0
      h->root.u.def.section = bfd_abs_section_ptr;
2024
2025
0
      h = define_ovtab_symbol (htab, "__icache_rewrite_to");
2026
0
      if (h == NULL)
2027
0
  return false;
2028
0
      h->root.u.def.value = off;
2029
0
      h->size = 16 << htab->num_lines_log2;
2030
0
      off += h->size;
2031
2032
0
      h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
2033
0
      if (h == NULL)
2034
0
  return false;
2035
0
      h->root.u.def.value = 16 << htab->num_lines_log2;
2036
0
      h->root.u.def.section = bfd_abs_section_ptr;
2037
2038
0
      h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2039
0
      if (h == NULL)
2040
0
  return false;
2041
0
      h->root.u.def.value = off;
2042
0
      h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2043
0
      off += h->size;
2044
2045
0
      h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2046
0
      if (h == NULL)
2047
0
  return false;
2048
0
      h->root.u.def.value = 16 << (htab->fromelem_size_log2
2049
0
           + htab->num_lines_log2);
2050
0
      h->root.u.def.section = bfd_abs_section_ptr;
2051
2052
0
      h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2053
0
      if (h == NULL)
2054
0
  return false;
2055
0
      h->root.u.def.value = htab->fromelem_size_log2;
2056
0
      h->root.u.def.section = bfd_abs_section_ptr;
2057
2058
0
      h = define_ovtab_symbol (htab, "__icache_base");
2059
0
      if (h == NULL)
2060
0
  return false;
2061
0
      h->root.u.def.value = htab->ovl_sec[0]->vma;
2062
0
      h->root.u.def.section = bfd_abs_section_ptr;
2063
0
      h->size = htab->num_buf << htab->line_size_log2;
2064
2065
0
      h = define_ovtab_symbol (htab, "__icache_linesize");
2066
0
      if (h == NULL)
2067
0
  return false;
2068
0
      h->root.u.def.value = 1 << htab->line_size_log2;
2069
0
      h->root.u.def.section = bfd_abs_section_ptr;
2070
2071
0
      h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2072
0
      if (h == NULL)
2073
0
  return false;
2074
0
      h->root.u.def.value = htab->line_size_log2;
2075
0
      h->root.u.def.section = bfd_abs_section_ptr;
2076
2077
0
      h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2078
0
      if (h == NULL)
2079
0
  return false;
2080
0
      h->root.u.def.value = -htab->line_size_log2;
2081
0
      h->root.u.def.section = bfd_abs_section_ptr;
2082
2083
0
      h = define_ovtab_symbol (htab, "__icache_cachesize");
2084
0
      if (h == NULL)
2085
0
  return false;
2086
0
      h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2087
0
      h->root.u.def.section = bfd_abs_section_ptr;
2088
2089
0
      h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2090
0
      if (h == NULL)
2091
0
  return false;
2092
0
      h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2093
0
      h->root.u.def.section = bfd_abs_section_ptr;
2094
2095
0
      h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2096
0
      if (h == NULL)
2097
0
  return false;
2098
0
      h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2099
0
      h->root.u.def.section = bfd_abs_section_ptr;
2100
2101
0
      if (htab->init != NULL && htab->init->size != 0)
2102
0
  {
2103
0
    htab->init->contents = bfd_zalloc (htab->init->owner,
2104
0
               htab->init->size);
2105
0
    if (htab->init->contents == NULL)
2106
0
      return false;
2107
2108
0
    h = define_ovtab_symbol (htab, "__icache_fileoff");
2109
0
    if (h == NULL)
2110
0
      return false;
2111
0
    h->root.u.def.value = 0;
2112
0
    h->root.u.def.section = htab->init;
2113
0
    h->size = 8;
2114
0
  }
2115
0
    }
2116
0
  else
2117
0
    {
2118
      /* Write out _ovly_table.  */
2119
      /* set low bit of .size to mark non-overlay area as present.  */
2120
0
      p[7] = 1;
2121
0
      obfd = htab->ovtab->output_section->owner;
2122
0
      for (s = obfd->sections; s != NULL; s = s->next)
2123
0
  {
2124
0
    unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2125
2126
0
    if (ovl_index != 0)
2127
0
      {
2128
0
        unsigned long off = ovl_index * 16;
2129
0
        unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2130
2131
0
        bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2132
0
        bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2133
0
        p + off + 4);
2134
        /* file_off written later in spu_elf_modify_headers.  */
2135
0
        bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2136
0
      }
2137
0
  }
2138
2139
0
      h = define_ovtab_symbol (htab, "_ovly_table");
2140
0
      if (h == NULL)
2141
0
  return false;
2142
0
      h->root.u.def.value = 16;
2143
0
      h->size = htab->num_overlays * 16;
2144
2145
0
      h = define_ovtab_symbol (htab, "_ovly_table_end");
2146
0
      if (h == NULL)
2147
0
  return false;
2148
0
      h->root.u.def.value = htab->num_overlays * 16 + 16;
2149
0
      h->size = 0;
2150
2151
0
      h = define_ovtab_symbol (htab, "_ovly_buf_table");
2152
0
      if (h == NULL)
2153
0
  return false;
2154
0
      h->root.u.def.value = htab->num_overlays * 16 + 16;
2155
0
      h->size = htab->num_buf * 4;
2156
2157
0
      h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2158
0
      if (h == NULL)
2159
0
  return false;
2160
0
      h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2161
0
      h->size = 0;
2162
0
    }
2163
2164
0
  h = define_ovtab_symbol (htab, "_EAR_");
2165
0
  if (h == NULL)
2166
0
    return false;
2167
0
  h->root.u.def.section = htab->toe;
2168
0
  h->root.u.def.value = 0;
2169
0
  h->size = 16;
2170
2171
0
  return true;
2172
0
}
2173
2174
/* Check that all loadable section VMAs lie in the range
2175
   LO .. HI inclusive, and stash some parameters for --auto-overlay.  */
2176
2177
asection *
2178
spu_elf_check_vma (struct bfd_link_info *info)
2179
0
{
2180
0
  struct elf_segment_map *m;
2181
0
  unsigned int i;
2182
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
2183
0
  bfd *abfd = info->output_bfd;
2184
0
  bfd_vma hi = htab->params->local_store_hi;
2185
0
  bfd_vma lo = htab->params->local_store_lo;
2186
2187
0
  htab->local_store = hi + 1 - lo;
2188
2189
0
  for (m = elf_seg_map (abfd); m != NULL; m = m->next)
2190
0
    if (m->p_type == PT_LOAD)
2191
0
      for (i = 0; i < m->count; i++)
2192
0
  if (m->sections[i]->size != 0
2193
0
      && (m->sections[i]->vma < lo
2194
0
    || m->sections[i]->vma > hi
2195
0
    || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2196
0
    return m->sections[i];
2197
2198
0
  return NULL;
2199
0
}
2200
2201
/* OFFSET in SEC (presumably) is the beginning of a function prologue.
2202
   Search for stack adjusting insns, and return the sp delta.
2203
   If a store of lr is found save the instruction offset to *LR_STORE.
2204
   If a stack adjusting instruction is found, save that offset to
2205
   *SP_ADJUST.  */
2206
2207
static int
2208
find_function_stack_adjust (asection *sec,
2209
          bfd_vma offset,
2210
          bfd_vma *lr_store,
2211
          bfd_vma *sp_adjust)
2212
0
{
2213
0
  int32_t reg[128];
2214
2215
0
  memset (reg, 0, sizeof (reg));
2216
0
  for ( ; offset + 4 <= sec->size; offset += 4)
2217
0
    {
2218
0
      unsigned char buf[4];
2219
0
      int rt, ra;
2220
0
      uint32_t imm;
2221
2222
      /* Assume no relocs on stack adjusing insns.  */
2223
0
      if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2224
0
  break;
2225
2226
0
      rt = buf[3] & 0x7f;
2227
0
      ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2228
2229
0
      if (buf[0] == 0x24 /* stqd */)
2230
0
  {
2231
0
    if (rt == 0 /* lr */ && ra == 1 /* sp */)
2232
0
      *lr_store = offset;
2233
0
    continue;
2234
0
  }
2235
2236
      /* Partly decoded immediate field.  */
2237
0
      imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2238
2239
0
      if (buf[0] == 0x1c /* ai */)
2240
0
  {
2241
0
    imm >>= 7;
2242
0
    imm = (imm ^ 0x200) - 0x200;
2243
0
    reg[rt] = reg[ra] + imm;
2244
2245
0
    if (rt == 1 /* sp */)
2246
0
      {
2247
0
        if (reg[rt] > 0)
2248
0
    break;
2249
0
        *sp_adjust = offset;
2250
0
        return reg[rt];
2251
0
      }
2252
0
  }
2253
0
      else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2254
0
  {
2255
0
    int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2256
2257
0
    reg[rt] = reg[ra] + reg[rb];
2258
0
    if (rt == 1)
2259
0
      {
2260
0
        if (reg[rt] > 0)
2261
0
    break;
2262
0
        *sp_adjust = offset;
2263
0
        return reg[rt];
2264
0
      }
2265
0
  }
2266
0
      else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2267
0
  {
2268
0
    int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2269
2270
0
    reg[rt] = reg[rb] - reg[ra];
2271
0
    if (rt == 1)
2272
0
      {
2273
0
        if (reg[rt] > 0)
2274
0
    break;
2275
0
        *sp_adjust = offset;
2276
0
        return reg[rt];
2277
0
      }
2278
0
  }
2279
0
      else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2280
0
  {
2281
0
    if (buf[0] >= 0x42 /* ila */)
2282
0
      imm |= (buf[0] & 1) << 17;
2283
0
    else
2284
0
      {
2285
0
        imm &= 0xffff;
2286
2287
0
        if (buf[0] == 0x40 /* il */)
2288
0
    {
2289
0
      if ((buf[1] & 0x80) == 0)
2290
0
        continue;
2291
0
      imm = (imm ^ 0x8000) - 0x8000;
2292
0
    }
2293
0
        else if ((buf[1] & 0x80) == 0 /* ilhu */)
2294
0
    imm <<= 16;
2295
0
      }
2296
0
    reg[rt] = imm;
2297
0
    continue;
2298
0
  }
2299
0
      else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2300
0
  {
2301
0
    reg[rt] |= imm & 0xffff;
2302
0
    continue;
2303
0
  }
2304
0
      else if (buf[0] == 0x04 /* ori */)
2305
0
  {
2306
0
    imm >>= 7;
2307
0
    imm = (imm ^ 0x200) - 0x200;
2308
0
    reg[rt] = reg[ra] | imm;
2309
0
    continue;
2310
0
  }
2311
0
      else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2312
0
  {
2313
0
    reg[rt] = (  ((imm & 0x8000) ? 0xff000000 : 0)
2314
0
         | ((imm & 0x4000) ? 0x00ff0000 : 0)
2315
0
         | ((imm & 0x2000) ? 0x0000ff00 : 0)
2316
0
         | ((imm & 0x1000) ? 0x000000ff : 0));
2317
0
    continue;
2318
0
  }
2319
0
      else if (buf[0] == 0x16 /* andbi */)
2320
0
  {
2321
0
    imm >>= 7;
2322
0
    imm &= 0xff;
2323
0
    imm |= imm << 8;
2324
0
    imm |= imm << 16;
2325
0
    reg[rt] = reg[ra] & imm;
2326
0
    continue;
2327
0
  }
2328
0
      else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2329
0
  {
2330
    /* Used in pic reg load.  Say rt is trashed.  Won't be used
2331
       in stack adjust, but we need to continue past this branch.  */
2332
0
    reg[rt] = 0;
2333
0
    continue;
2334
0
  }
2335
0
      else if (is_branch (buf) || is_indirect_branch (buf))
2336
  /* If we hit a branch then we must be out of the prologue.  */
2337
0
  break;
2338
0
    }
2339
2340
0
  return 0;
2341
0
}
2342
2343
/* qsort predicate to sort symbols by section and value.  */
2344
2345
static Elf_Internal_Sym *sort_syms_syms;
2346
static asection **sort_syms_psecs;
2347
2348
static int
2349
sort_syms (const void *a, const void *b)
2350
0
{
2351
0
  Elf_Internal_Sym *const *s1 = a;
2352
0
  Elf_Internal_Sym *const *s2 = b;
2353
0
  asection *sec1,*sec2;
2354
0
  bfd_signed_vma delta;
2355
2356
0
  sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2357
0
  sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2358
2359
0
  if (sec1 != sec2)
2360
0
    return sec1->index - sec2->index;
2361
2362
0
  delta = (*s1)->st_value - (*s2)->st_value;
2363
0
  if (delta != 0)
2364
0
    return delta < 0 ? -1 : 1;
2365
2366
0
  delta = (*s2)->st_size - (*s1)->st_size;
2367
0
  if (delta != 0)
2368
0
    return delta < 0 ? -1 : 1;
2369
2370
0
  return *s1 < *s2 ? -1 : 1;
2371
0
}
2372
2373
/* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2374
   entries for section SEC.  */
2375
2376
static struct spu_elf_stack_info *
2377
alloc_stack_info (asection *sec, int max_fun)
2378
0
{
2379
0
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2380
0
  bfd_size_type amt;
2381
2382
0
  amt = sizeof (struct spu_elf_stack_info);
2383
0
  amt += (max_fun - 1) * sizeof (struct function_info);
2384
0
  sec_data->u.i.stack_info = bfd_zmalloc (amt);
2385
0
  if (sec_data->u.i.stack_info != NULL)
2386
0
    sec_data->u.i.stack_info->max_fun = max_fun;
2387
0
  return sec_data->u.i.stack_info;
2388
0
}
2389
2390
/* Add a new struct function_info describing a (part of a) function
2391
   starting at SYM_H.  Keep the array sorted by address.  */
2392
2393
static struct function_info *
2394
maybe_insert_function (asection *sec,
2395
           void *sym_h,
2396
           bool global,
2397
           bool is_func)
2398
0
{
2399
0
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2400
0
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2401
0
  int i;
2402
0
  bfd_vma off, size;
2403
2404
0
  if (sinfo == NULL)
2405
0
    {
2406
0
      sinfo = alloc_stack_info (sec, 20);
2407
0
      if (sinfo == NULL)
2408
0
  return NULL;
2409
0
    }
2410
2411
0
  if (!global)
2412
0
    {
2413
0
      Elf_Internal_Sym *sym = sym_h;
2414
0
      off = sym->st_value;
2415
0
      size = sym->st_size;
2416
0
    }
2417
0
  else
2418
0
    {
2419
0
      struct elf_link_hash_entry *h = sym_h;
2420
0
      off = h->root.u.def.value;
2421
0
      size = h->size;
2422
0
    }
2423
2424
0
  for (i = sinfo->num_fun; --i >= 0; )
2425
0
    if (sinfo->fun[i].lo <= off)
2426
0
      break;
2427
2428
0
  if (i >= 0)
2429
0
    {
2430
      /* Don't add another entry for an alias, but do update some
2431
   info.  */
2432
0
      if (sinfo->fun[i].lo == off)
2433
0
  {
2434
    /* Prefer globals over local syms.  */
2435
0
    if (global && !sinfo->fun[i].global)
2436
0
      {
2437
0
        sinfo->fun[i].global = true;
2438
0
        sinfo->fun[i].u.h = sym_h;
2439
0
      }
2440
0
    if (is_func)
2441
0
      sinfo->fun[i].is_func = true;
2442
0
    return &sinfo->fun[i];
2443
0
  }
2444
      /* Ignore a zero-size symbol inside an existing function.  */
2445
0
      else if (sinfo->fun[i].hi > off && size == 0)
2446
0
  return &sinfo->fun[i];
2447
0
    }
2448
2449
0
  if (sinfo->num_fun >= sinfo->max_fun)
2450
0
    {
2451
0
      bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2452
0
      bfd_size_type old = amt;
2453
2454
0
      old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2455
0
      sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2456
0
      amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2457
0
      sinfo = bfd_realloc (sinfo, amt);
2458
0
      if (sinfo == NULL)
2459
0
  return NULL;
2460
0
      memset ((char *) sinfo + old, 0, amt - old);
2461
0
      sec_data->u.i.stack_info = sinfo;
2462
0
    }
2463
2464
0
  if (++i < sinfo->num_fun)
2465
0
    memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2466
0
       (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2467
0
  sinfo->fun[i].is_func = is_func;
2468
0
  sinfo->fun[i].global = global;
2469
0
  sinfo->fun[i].sec = sec;
2470
0
  if (global)
2471
0
    sinfo->fun[i].u.h = sym_h;
2472
0
  else
2473
0
    sinfo->fun[i].u.sym = sym_h;
2474
0
  sinfo->fun[i].lo = off;
2475
0
  sinfo->fun[i].hi = off + size;
2476
0
  sinfo->fun[i].lr_store = -1;
2477
0
  sinfo->fun[i].sp_adjust = -1;
2478
0
  sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2479
0
                 &sinfo->fun[i].lr_store,
2480
0
                 &sinfo->fun[i].sp_adjust);
2481
0
  sinfo->num_fun += 1;
2482
0
  return &sinfo->fun[i];
2483
0
}
2484
2485
/* Return the name of FUN.  */
2486
2487
static const char *
2488
func_name (struct function_info *fun)
2489
0
{
2490
0
  asection *sec;
2491
0
  bfd *ibfd;
2492
0
  Elf_Internal_Shdr *symtab_hdr;
2493
2494
0
  while (fun->start != NULL)
2495
0
    fun = fun->start;
2496
2497
0
  if (fun->global)
2498
0
    return fun->u.h->root.root.string;
2499
2500
0
  sec = fun->sec;
2501
0
  if (fun->u.sym->st_name == 0)
2502
0
    {
2503
0
      size_t len = strlen (sec->name);
2504
0
      char *name = bfd_malloc (len + 10);
2505
0
      if (name == NULL)
2506
0
  return "(null)";
2507
0
      sprintf (name, "%s+%lx", sec->name,
2508
0
         (unsigned long) fun->u.sym->st_value & 0xffffffff);
2509
0
      return name;
2510
0
    }
2511
0
  ibfd = sec->owner;
2512
0
  symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2513
0
  return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2514
0
}
2515
2516
/* Read the instruction at OFF in SEC.  Return true iff the instruction
2517
   is a nop, lnop, or stop 0 (all zero insn).  */
2518
2519
static bool
2520
is_nop (asection *sec, bfd_vma off)
2521
0
{
2522
0
  unsigned char insn[4];
2523
2524
0
  if (off + 4 > sec->size
2525
0
      || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2526
0
    return false;
2527
0
  if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2528
0
    return true;
2529
0
  if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2530
0
    return true;
2531
0
  return false;
2532
0
}
2533
2534
/* Extend the range of FUN to cover nop padding up to LIMIT.
2535
   Return TRUE iff some instruction other than a NOP was found.  */
2536
2537
static bool
2538
insns_at_end (struct function_info *fun, bfd_vma limit)
2539
0
{
2540
0
  bfd_vma off = (fun->hi + 3) & -4;
2541
2542
0
  while (off < limit && is_nop (fun->sec, off))
2543
0
    off += 4;
2544
0
  if (off < limit)
2545
0
    {
2546
0
      fun->hi = off;
2547
0
      return true;
2548
0
    }
2549
0
  fun->hi = limit;
2550
0
  return false;
2551
0
}
2552
2553
/* Check and fix overlapping function ranges.  Return TRUE iff there
2554
   are gaps in the current info we have about functions in SEC.  */
2555
2556
static bool
2557
check_function_ranges (asection *sec, struct bfd_link_info *info)
2558
0
{
2559
0
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2560
0
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2561
0
  int i;
2562
0
  bool gaps = false;
2563
2564
0
  if (sinfo == NULL)
2565
0
    return false;
2566
2567
0
  for (i = 1; i < sinfo->num_fun; i++)
2568
0
    if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2569
0
      {
2570
  /* Fix overlapping symbols.  */
2571
0
  const char *f1 = func_name (&sinfo->fun[i - 1]);
2572
0
  const char *f2 = func_name (&sinfo->fun[i]);
2573
2574
  /* xgettext:c-format */
2575
0
  info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2576
0
  sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2577
0
      }
2578
0
    else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2579
0
      gaps = true;
2580
2581
0
  if (sinfo->num_fun == 0)
2582
0
    gaps = true;
2583
0
  else
2584
0
    {
2585
0
      if (sinfo->fun[0].lo != 0)
2586
0
  gaps = true;
2587
0
      if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2588
0
  {
2589
0
    const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2590
2591
0
    info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2592
0
    sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2593
0
  }
2594
0
      else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2595
0
  gaps = true;
2596
0
    }
2597
0
  return gaps;
2598
0
}
2599
2600
/* Search current function info for a function that contains address
2601
   OFFSET in section SEC.  */
2602
2603
static struct function_info *
2604
find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2605
0
{
2606
0
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2607
0
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2608
0
  int lo, hi, mid;
2609
2610
0
  lo = 0;
2611
0
  hi = sinfo->num_fun;
2612
0
  while (lo < hi)
2613
0
    {
2614
0
      mid = (lo + hi) / 2;
2615
0
      if (offset < sinfo->fun[mid].lo)
2616
0
  hi = mid;
2617
0
      else if (offset >= sinfo->fun[mid].hi)
2618
0
  lo = mid + 1;
2619
0
      else
2620
0
  return &sinfo->fun[mid];
2621
0
    }
2622
  /* xgettext:c-format */
2623
0
  info->callbacks->einfo (_("%pA:0x%v not found in function table\n"),
2624
0
        sec, offset);
2625
0
  bfd_set_error (bfd_error_bad_value);
2626
0
  return NULL;
2627
0
}
2628
2629
/* Add CALLEE to CALLER call list if not already present.  Return TRUE
2630
   if CALLEE was new.  If this function return FALSE, CALLEE should
2631
   be freed.  */
2632
2633
static bool
2634
insert_callee (struct function_info *caller, struct call_info *callee)
2635
0
{
2636
0
  struct call_info **pp, *p;
2637
2638
0
  for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2639
0
    if (p->fun == callee->fun)
2640
0
      {
2641
  /* Tail calls use less stack than normal calls.  Retain entry
2642
     for normal call over one for tail call.  */
2643
0
  p->is_tail &= callee->is_tail;
2644
0
  if (!p->is_tail)
2645
0
    {
2646
0
      p->fun->start = NULL;
2647
0
      p->fun->is_func = true;
2648
0
    }
2649
0
  p->count += callee->count;
2650
  /* Reorder list so most recent call is first.  */
2651
0
  *pp = p->next;
2652
0
  p->next = caller->call_list;
2653
0
  caller->call_list = p;
2654
0
  return false;
2655
0
      }
2656
0
  callee->next = caller->call_list;
2657
0
  caller->call_list = callee;
2658
0
  return true;
2659
0
}
2660
2661
/* Copy CALL and insert the copy into CALLER.  */
2662
2663
static bool
2664
copy_callee (struct function_info *caller, const struct call_info *call)
2665
0
{
2666
0
  struct call_info *callee;
2667
0
  callee = bfd_malloc (sizeof (*callee));
2668
0
  if (callee == NULL)
2669
0
    return false;
2670
0
  *callee = *call;
2671
0
  if (!insert_callee (caller, callee))
2672
0
    free (callee);
2673
0
  return true;
2674
0
}
2675
2676
/* We're only interested in code sections.  Testing SEC_IN_MEMORY excludes
2677
   overlay stub sections.  */
2678
2679
static bool
2680
interesting_section (asection *s)
2681
0
{
2682
0
  return (s->output_section != bfd_abs_section_ptr
2683
0
    && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2684
0
        == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2685
0
    && s->size != 0);
2686
0
}
2687
2688
/* Rummage through the relocs for SEC, looking for function calls.
2689
   If CALL_TREE is true, fill in call graph.  If CALL_TREE is false,
2690
   mark destination symbols on calls as being functions.  Also
2691
   look at branches, which may be tail calls or go to hot/cold
2692
   section part of same function.  */
2693
2694
static bool
2695
mark_functions_via_relocs (asection *sec,
2696
         struct bfd_link_info *info,
2697
         int call_tree)
2698
0
{
2699
0
  Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2700
0
  Elf_Internal_Shdr *symtab_hdr;
2701
0
  void *psyms;
2702
0
  unsigned int priority = 0;
2703
0
  static bool warned;
2704
2705
0
  if (!interesting_section (sec)
2706
0
      || sec->reloc_count == 0)
2707
0
    return true;
2708
2709
0
  internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2710
0
                 info->keep_memory);
2711
0
  if (internal_relocs == NULL)
2712
0
    return false;
2713
2714
0
  symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2715
0
  psyms = &symtab_hdr->contents;
2716
0
  irela = internal_relocs;
2717
0
  irelaend = irela + sec->reloc_count;
2718
0
  for (; irela < irelaend; irela++)
2719
0
    {
2720
0
      enum elf_spu_reloc_type r_type;
2721
0
      unsigned int r_indx;
2722
0
      asection *sym_sec;
2723
0
      Elf_Internal_Sym *sym;
2724
0
      struct elf_link_hash_entry *h;
2725
0
      bfd_vma val;
2726
0
      bool nonbranch, is_call;
2727
0
      struct function_info *caller;
2728
0
      struct call_info *callee;
2729
2730
0
      r_type = ELF32_R_TYPE (irela->r_info);
2731
0
      nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2732
2733
0
      r_indx = ELF32_R_SYM (irela->r_info);
2734
0
      if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2735
0
  return false;
2736
2737
0
      if (sym_sec == NULL
2738
0
    || sym_sec->output_section == bfd_abs_section_ptr)
2739
0
  continue;
2740
2741
0
      is_call = false;
2742
0
      if (!nonbranch)
2743
0
  {
2744
0
    unsigned char insn[4];
2745
2746
0
    if (!bfd_get_section_contents (sec->owner, sec, insn,
2747
0
           irela->r_offset, 4))
2748
0
      return false;
2749
0
    if (is_branch (insn))
2750
0
      {
2751
0
        is_call = (insn[0] & 0xfd) == 0x31;
2752
0
        priority = insn[1] & 0x0f;
2753
0
        priority <<= 8;
2754
0
        priority |= insn[2];
2755
0
        priority <<= 8;
2756
0
        priority |= insn[3];
2757
0
        priority >>= 7;
2758
0
        if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2759
0
      != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2760
0
    {
2761
0
      if (!warned)
2762
0
        info->callbacks->einfo
2763
          /* xgettext:c-format */
2764
0
          (_("%pB(%pA+0x%v): call to non-code section"
2765
0
       " %pB(%pA), analysis incomplete\n"),
2766
0
           sec->owner, sec, irela->r_offset,
2767
0
           sym_sec->owner, sym_sec);
2768
0
      warned = true;
2769
0
      continue;
2770
0
    }
2771
0
      }
2772
0
    else
2773
0
      {
2774
0
        nonbranch = true;
2775
0
        if (is_hint (insn))
2776
0
    continue;
2777
0
      }
2778
0
  }
2779
2780
0
      if (nonbranch)
2781
0
  {
2782
    /* For --auto-overlay, count possible stubs we need for
2783
       function pointer references.  */
2784
0
    unsigned int sym_type;
2785
0
    if (h)
2786
0
      sym_type = h->type;
2787
0
    else
2788
0
      sym_type = ELF_ST_TYPE (sym->st_info);
2789
0
    if (sym_type == STT_FUNC)
2790
0
      {
2791
0
        if (call_tree && spu_hash_table (info)->params->auto_overlay)
2792
0
    spu_hash_table (info)->non_ovly_stub += 1;
2793
        /* If the symbol type is STT_FUNC then this must be a
2794
     function pointer initialisation.  */
2795
0
        continue;
2796
0
      }
2797
    /* Ignore data references.  */
2798
0
    if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2799
0
        != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2800
0
      continue;
2801
    /* Otherwise we probably have a jump table reloc for
2802
       a switch statement or some other reference to a
2803
       code label.  */
2804
0
  }
2805
2806
0
      if (h)
2807
0
  val = h->root.u.def.value;
2808
0
      else
2809
0
  val = sym->st_value;
2810
0
      val += irela->r_addend;
2811
2812
0
      if (!call_tree)
2813
0
  {
2814
0
    struct function_info *fun;
2815
2816
0
    if (irela->r_addend != 0)
2817
0
      {
2818
0
        Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2819
0
        if (fake == NULL)
2820
0
    return false;
2821
0
        fake->st_value = val;
2822
0
        fake->st_shndx
2823
0
    = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2824
0
        sym = fake;
2825
0
      }
2826
0
    if (sym)
2827
0
      fun = maybe_insert_function (sym_sec, sym, false, is_call);
2828
0
    else
2829
0
      fun = maybe_insert_function (sym_sec, h, true, is_call);
2830
0
    if (fun == NULL)
2831
0
      return false;
2832
0
    if (irela->r_addend != 0
2833
0
        && fun->u.sym != sym)
2834
0
      free (sym);
2835
0
    continue;
2836
0
  }
2837
2838
0
      caller = find_function (sec, irela->r_offset, info);
2839
0
      if (caller == NULL)
2840
0
  return false;
2841
0
      callee = bfd_malloc (sizeof *callee);
2842
0
      if (callee == NULL)
2843
0
  return false;
2844
2845
0
      callee->fun = find_function (sym_sec, val, info);
2846
0
      if (callee->fun == NULL)
2847
0
  return false;
2848
0
      callee->is_tail = !is_call;
2849
0
      callee->is_pasted = false;
2850
0
      callee->broken_cycle = false;
2851
0
      callee->priority = priority;
2852
0
      callee->count = nonbranch? 0 : 1;
2853
0
      if (callee->fun->last_caller != sec)
2854
0
  {
2855
0
    callee->fun->last_caller = sec;
2856
0
    callee->fun->call_count += 1;
2857
0
  }
2858
0
      if (!insert_callee (caller, callee))
2859
0
  free (callee);
2860
0
      else if (!is_call
2861
0
         && !callee->fun->is_func
2862
0
         && callee->fun->stack == 0)
2863
0
  {
2864
    /* This is either a tail call or a branch from one part of
2865
       the function to another, ie. hot/cold section.  If the
2866
       destination has been called by some other function then
2867
       it is a separate function.  We also assume that functions
2868
       are not split across input files.  */
2869
0
    if (sec->owner != sym_sec->owner)
2870
0
      {
2871
0
        callee->fun->start = NULL;
2872
0
        callee->fun->is_func = true;
2873
0
      }
2874
0
    else if (callee->fun->start == NULL)
2875
0
      {
2876
0
        struct function_info *caller_start = caller;
2877
0
        while (caller_start->start)
2878
0
    caller_start = caller_start->start;
2879
2880
0
        if (caller_start != callee->fun)
2881
0
    callee->fun->start = caller_start;
2882
0
      }
2883
0
    else
2884
0
      {
2885
0
        struct function_info *callee_start;
2886
0
        struct function_info *caller_start;
2887
0
        callee_start = callee->fun;
2888
0
        while (callee_start->start)
2889
0
    callee_start = callee_start->start;
2890
0
        caller_start = caller;
2891
0
        while (caller_start->start)
2892
0
    caller_start = caller_start->start;
2893
0
        if (caller_start != callee_start)
2894
0
    {
2895
0
      callee->fun->start = NULL;
2896
0
      callee->fun->is_func = true;
2897
0
    }
2898
0
      }
2899
0
  }
2900
0
    }
2901
2902
0
  return true;
2903
0
}
2904
2905
/* Handle something like .init or .fini, which has a piece of a function.
2906
   These sections are pasted together to form a single function.  */
2907
2908
static bool
2909
pasted_function (asection *sec)
2910
0
{
2911
0
  struct bfd_link_order *l;
2912
0
  struct _spu_elf_section_data *sec_data;
2913
0
  struct spu_elf_stack_info *sinfo;
2914
0
  Elf_Internal_Sym *fake;
2915
0
  struct function_info *fun, *fun_start;
2916
2917
0
  fake = bfd_zmalloc (sizeof (*fake));
2918
0
  if (fake == NULL)
2919
0
    return false;
2920
0
  fake->st_value = 0;
2921
0
  fake->st_size = sec->size;
2922
0
  fake->st_shndx
2923
0
    = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2924
0
  fun = maybe_insert_function (sec, fake, false, false);
2925
0
  if (!fun)
2926
0
    return false;
2927
2928
  /* Find a function immediately preceding this section.  */
2929
0
  fun_start = NULL;
2930
0
  for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2931
0
    {
2932
0
      if (l->u.indirect.section == sec)
2933
0
  {
2934
0
    if (fun_start != NULL)
2935
0
      {
2936
0
        struct call_info *callee = bfd_malloc (sizeof *callee);
2937
0
        if (callee == NULL)
2938
0
    return false;
2939
2940
0
        fun->start = fun_start;
2941
0
        callee->fun = fun;
2942
0
        callee->is_tail = true;
2943
0
        callee->is_pasted = true;
2944
0
        callee->broken_cycle = false;
2945
0
        callee->priority = 0;
2946
0
        callee->count = 1;
2947
0
        if (!insert_callee (fun_start, callee))
2948
0
    free (callee);
2949
0
        return true;
2950
0
      }
2951
0
    break;
2952
0
  }
2953
0
      if (l->type == bfd_indirect_link_order
2954
0
    && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2955
0
    && (sinfo = sec_data->u.i.stack_info) != NULL
2956
0
    && sinfo->num_fun != 0)
2957
0
  fun_start = &sinfo->fun[sinfo->num_fun - 1];
2958
0
    }
2959
2960
  /* Don't return an error if we did not find a function preceding this
2961
     section.  The section may have incorrect flags.  */
2962
0
  return true;
2963
0
}
2964
2965
/* Map address ranges in code sections to functions.  */
2966
2967
static bool
2968
discover_functions (struct bfd_link_info *info)
2969
0
{
2970
0
  bfd *ibfd;
2971
0
  int bfd_idx;
2972
0
  Elf_Internal_Sym ***psym_arr;
2973
0
  asection ***sec_arr;
2974
0
  bool gaps = false;
2975
2976
0
  bfd_idx = 0;
2977
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
2978
0
    bfd_idx++;
2979
2980
0
  psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2981
0
  if (psym_arr == NULL)
2982
0
    return false;
2983
0
  sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2984
0
  if (sec_arr == NULL)
2985
0
    return false;
2986
2987
0
  for (ibfd = info->input_bfds, bfd_idx = 0;
2988
0
       ibfd != NULL;
2989
0
       ibfd = ibfd->link.next, bfd_idx++)
2990
0
    {
2991
0
      extern const bfd_target spu_elf32_vec;
2992
0
      Elf_Internal_Shdr *symtab_hdr;
2993
0
      asection *sec;
2994
0
      size_t symcount;
2995
0
      Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2996
0
      asection **psecs, **p;
2997
2998
0
      if (ibfd->xvec != &spu_elf32_vec)
2999
0
  continue;
3000
3001
      /* Read all the symbols.  */
3002
0
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3003
0
      symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
3004
0
      if (symcount == 0)
3005
0
  {
3006
0
    if (!gaps)
3007
0
      for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3008
0
        if (interesting_section (sec))
3009
0
    {
3010
0
      gaps = true;
3011
0
      break;
3012
0
    }
3013
0
    continue;
3014
0
  }
3015
3016
      /* Don't use cached symbols since the generic ELF linker
3017
   code only reads local symbols, and we need globals too.  */
3018
0
      free (symtab_hdr->contents);
3019
0
      symtab_hdr->contents = NULL;
3020
0
      syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
3021
0
           NULL, NULL, NULL);
3022
0
      symtab_hdr->contents = (void *) syms;
3023
0
      if (syms == NULL)
3024
0
  return false;
3025
3026
      /* Select defined function symbols that are going to be output.  */
3027
0
      psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
3028
0
      if (psyms == NULL)
3029
0
  return false;
3030
0
      psym_arr[bfd_idx] = psyms;
3031
0
      psecs = bfd_malloc (symcount * sizeof (*psecs));
3032
0
      if (psecs == NULL)
3033
0
  return false;
3034
0
      sec_arr[bfd_idx] = psecs;
3035
0
      for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
3036
0
  if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
3037
0
      || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3038
0
    {
3039
0
      asection *s;
3040
3041
0
      *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3042
0
      if (s != NULL && interesting_section (s))
3043
0
        *psy++ = sy;
3044
0
    }
3045
0
      symcount = psy - psyms;
3046
0
      *psy = NULL;
3047
3048
      /* Sort them by section and offset within section.  */
3049
0
      sort_syms_syms = syms;
3050
0
      sort_syms_psecs = psecs;
3051
0
      qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3052
3053
      /* Now inspect the function symbols.  */
3054
0
      for (psy = psyms; psy < psyms + symcount; )
3055
0
  {
3056
0
    asection *s = psecs[*psy - syms];
3057
0
    Elf_Internal_Sym **psy2;
3058
3059
0
    for (psy2 = psy; ++psy2 < psyms + symcount; )
3060
0
      if (psecs[*psy2 - syms] != s)
3061
0
        break;
3062
3063
0
    if (!alloc_stack_info (s, psy2 - psy))
3064
0
      return false;
3065
0
    psy = psy2;
3066
0
  }
3067
3068
      /* First install info about properly typed and sized functions.
3069
   In an ideal world this will cover all code sections, except
3070
   when partitioning functions into hot and cold sections,
3071
   and the horrible pasted together .init and .fini functions.  */
3072
0
      for (psy = psyms; psy < psyms + symcount; ++psy)
3073
0
  {
3074
0
    sy = *psy;
3075
0
    if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3076
0
      {
3077
0
        asection *s = psecs[sy - syms];
3078
0
        if (!maybe_insert_function (s, sy, false, true))
3079
0
    return false;
3080
0
      }
3081
0
  }
3082
3083
0
      for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3084
0
  if (interesting_section (sec))
3085
0
    gaps |= check_function_ranges (sec, info);
3086
0
    }
3087
3088
0
  if (gaps)
3089
0
    {
3090
      /* See if we can discover more function symbols by looking at
3091
   relocations.  */
3092
0
      for (ibfd = info->input_bfds, bfd_idx = 0;
3093
0
     ibfd != NULL;
3094
0
     ibfd = ibfd->link.next, bfd_idx++)
3095
0
  {
3096
0
    asection *sec;
3097
3098
0
    if (psym_arr[bfd_idx] == NULL)
3099
0
      continue;
3100
3101
0
    for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3102
0
      if (!mark_functions_via_relocs (sec, info, false))
3103
0
        return false;
3104
0
  }
3105
3106
0
      for (ibfd = info->input_bfds, bfd_idx = 0;
3107
0
     ibfd != NULL;
3108
0
     ibfd = ibfd->link.next, bfd_idx++)
3109
0
  {
3110
0
    Elf_Internal_Shdr *symtab_hdr;
3111
0
    asection *sec;
3112
0
    Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3113
0
    asection **psecs;
3114
3115
0
    if ((psyms = psym_arr[bfd_idx]) == NULL)
3116
0
      continue;
3117
3118
0
    psecs = sec_arr[bfd_idx];
3119
3120
0
    symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3121
0
    syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3122
3123
0
    gaps = false;
3124
0
    for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3125
0
      if (interesting_section (sec))
3126
0
        gaps |= check_function_ranges (sec, info);
3127
0
    if (!gaps)
3128
0
      continue;
3129
3130
    /* Finally, install all globals.  */
3131
0
    for (psy = psyms; (sy = *psy) != NULL; ++psy)
3132
0
      {
3133
0
        asection *s;
3134
3135
0
        s = psecs[sy - syms];
3136
3137
        /* Global syms might be improperly typed functions.  */
3138
0
        if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3139
0
      && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3140
0
    {
3141
0
      if (!maybe_insert_function (s, sy, false, false))
3142
0
        return false;
3143
0
    }
3144
0
      }
3145
0
  }
3146
3147
0
      for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3148
0
  {
3149
0
    extern const bfd_target spu_elf32_vec;
3150
0
    asection *sec;
3151
3152
0
    if (ibfd->xvec != &spu_elf32_vec)
3153
0
      continue;
3154
3155
    /* Some of the symbols we've installed as marking the
3156
       beginning of functions may have a size of zero.  Extend
3157
       the range of such functions to the beginning of the
3158
       next symbol of interest.  */
3159
0
    for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3160
0
      if (interesting_section (sec))
3161
0
        {
3162
0
    struct _spu_elf_section_data *sec_data;
3163
0
    struct spu_elf_stack_info *sinfo;
3164
3165
0
    sec_data = spu_elf_section_data (sec);
3166
0
    sinfo = sec_data->u.i.stack_info;
3167
0
    if (sinfo != NULL && sinfo->num_fun != 0)
3168
0
      {
3169
0
        int fun_idx;
3170
0
        bfd_vma hi = sec->size;
3171
3172
0
        for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3173
0
          {
3174
0
      sinfo->fun[fun_idx].hi = hi;
3175
0
      hi = sinfo->fun[fun_idx].lo;
3176
0
          }
3177
3178
0
        sinfo->fun[0].lo = 0;
3179
0
      }
3180
    /* No symbols in this section.  Must be .init or .fini
3181
       or something similar.  */
3182
0
    else if (!pasted_function (sec))
3183
0
      return false;
3184
0
        }
3185
0
  }
3186
0
    }
3187
3188
0
  for (ibfd = info->input_bfds, bfd_idx = 0;
3189
0
       ibfd != NULL;
3190
0
       ibfd = ibfd->link.next, bfd_idx++)
3191
0
    {
3192
0
      if (psym_arr[bfd_idx] == NULL)
3193
0
  continue;
3194
3195
0
      free (psym_arr[bfd_idx]);
3196
0
      free (sec_arr[bfd_idx]);
3197
0
    }
3198
3199
0
  free (psym_arr);
3200
0
  free (sec_arr);
3201
3202
0
  return true;
3203
0
}
3204
3205
/* Iterate over all function_info we have collected, calling DOIT on
3206
   each node if ROOT_ONLY is false.  Only call DOIT on root nodes
3207
   if ROOT_ONLY.  */
3208
3209
static bool
3210
for_each_node (bool (*doit) (struct function_info *,
3211
           struct bfd_link_info *,
3212
           void *),
3213
         struct bfd_link_info *info,
3214
         void *param,
3215
         int root_only)
3216
0
{
3217
0
  bfd *ibfd;
3218
3219
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3220
0
    {
3221
0
      extern const bfd_target spu_elf32_vec;
3222
0
      asection *sec;
3223
3224
0
      if (ibfd->xvec != &spu_elf32_vec)
3225
0
  continue;
3226
3227
0
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3228
0
  {
3229
0
    struct _spu_elf_section_data *sec_data;
3230
0
    struct spu_elf_stack_info *sinfo;
3231
3232
0
    if ((sec_data = spu_elf_section_data (sec)) != NULL
3233
0
        && (sinfo = sec_data->u.i.stack_info) != NULL)
3234
0
      {
3235
0
        int i;
3236
0
        for (i = 0; i < sinfo->num_fun; ++i)
3237
0
    if (!root_only || !sinfo->fun[i].non_root)
3238
0
      if (!doit (&sinfo->fun[i], info, param))
3239
0
        return false;
3240
0
      }
3241
0
  }
3242
0
    }
3243
0
  return true;
3244
0
}
3245
3246
/* Transfer call info attached to struct function_info entries for
3247
   all of a given function's sections to the first entry.  */
3248
3249
static bool
3250
transfer_calls (struct function_info *fun,
3251
    struct bfd_link_info *info ATTRIBUTE_UNUSED,
3252
    void *param ATTRIBUTE_UNUSED)
3253
0
{
3254
0
  struct function_info *start = fun->start;
3255
3256
0
  if (start != NULL)
3257
0
    {
3258
0
      struct call_info *call, *call_next;
3259
3260
0
      while (start->start != NULL)
3261
0
  start = start->start;
3262
0
      for (call = fun->call_list; call != NULL; call = call_next)
3263
0
  {
3264
0
    call_next = call->next;
3265
0
    if (!insert_callee (start, call))
3266
0
      free (call);
3267
0
  }
3268
0
      fun->call_list = NULL;
3269
0
    }
3270
0
  return true;
3271
0
}
3272
3273
/* Mark nodes in the call graph that are called by some other node.  */
3274
3275
static bool
3276
mark_non_root (struct function_info *fun,
3277
         struct bfd_link_info *info ATTRIBUTE_UNUSED,
3278
         void *param ATTRIBUTE_UNUSED)
3279
0
{
3280
0
  struct call_info *call;
3281
3282
0
  if (fun->visit1)
3283
0
    return true;
3284
0
  fun->visit1 = true;
3285
0
  for (call = fun->call_list; call; call = call->next)
3286
0
    {
3287
0
      call->fun->non_root = true;
3288
0
      mark_non_root (call->fun, 0, 0);
3289
0
    }
3290
0
  return true;
3291
0
}
3292
3293
/* Remove cycles from the call graph.  Set depth of nodes.  */
3294
3295
static bool
3296
remove_cycles (struct function_info *fun,
3297
         struct bfd_link_info *info,
3298
         void *param)
3299
0
{
3300
0
  struct call_info **callp, *call;
3301
0
  unsigned int depth = *(unsigned int *) param;
3302
0
  unsigned int max_depth = depth;
3303
3304
0
  fun->depth = depth;
3305
0
  fun->visit2 = true;
3306
0
  fun->marking = true;
3307
3308
0
  callp = &fun->call_list;
3309
0
  while ((call = *callp) != NULL)
3310
0
    {
3311
0
      call->max_depth = depth + !call->is_pasted;
3312
0
      if (!call->fun->visit2)
3313
0
  {
3314
0
    if (!remove_cycles (call->fun, info, &call->max_depth))
3315
0
      return false;
3316
0
    if (max_depth < call->max_depth)
3317
0
      max_depth = call->max_depth;
3318
0
  }
3319
0
      else if (call->fun->marking)
3320
0
  {
3321
0
    struct spu_link_hash_table *htab = spu_hash_table (info);
3322
3323
0
    if (!htab->params->auto_overlay
3324
0
        && htab->params->stack_analysis)
3325
0
      {
3326
0
        const char *f1 = func_name (fun);
3327
0
        const char *f2 = func_name (call->fun);
3328
3329
        /* xgettext:c-format */
3330
0
        info->callbacks->info (_("stack analysis will ignore the call "
3331
0
               "from %s to %s\n"),
3332
0
             f1, f2);
3333
0
      }
3334
3335
0
    call->broken_cycle = true;
3336
0
  }
3337
0
      callp = &call->next;
3338
0
    }
3339
0
  fun->marking = false;
3340
0
  *(unsigned int *) param = max_depth;
3341
0
  return true;
3342
0
}
3343
3344
/* Check that we actually visited all nodes in remove_cycles.  If we
3345
   didn't, then there is some cycle in the call graph not attached to
3346
   any root node.  Arbitrarily choose a node in the cycle as a new
3347
   root and break the cycle.  */
3348
3349
static bool
3350
mark_detached_root (struct function_info *fun,
3351
        struct bfd_link_info *info,
3352
        void *param)
3353
0
{
3354
0
  if (fun->visit2)
3355
0
    return true;
3356
0
  fun->non_root = false;
3357
0
  *(unsigned int *) param = 0;
3358
0
  return remove_cycles (fun, info, param);
3359
0
}
3360
3361
/* Populate call_list for each function.  */
3362
3363
static bool
3364
build_call_tree (struct bfd_link_info *info)
3365
0
{
3366
0
  bfd *ibfd;
3367
0
  unsigned int depth;
3368
3369
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3370
0
    {
3371
0
      extern const bfd_target spu_elf32_vec;
3372
0
      asection *sec;
3373
3374
0
      if (ibfd->xvec != &spu_elf32_vec)
3375
0
  continue;
3376
3377
0
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3378
0
  if (!mark_functions_via_relocs (sec, info, true))
3379
0
    return false;
3380
0
    }
3381
3382
  /* Transfer call info from hot/cold section part of function
3383
     to main entry.  */
3384
0
  if (!spu_hash_table (info)->params->auto_overlay
3385
0
      && !for_each_node (transfer_calls, info, 0, false))
3386
0
    return false;
3387
3388
  /* Find the call graph root(s).  */
3389
0
  if (!for_each_node (mark_non_root, info, 0, false))
3390
0
    return false;
3391
3392
  /* Remove cycles from the call graph.  We start from the root node(s)
3393
     so that we break cycles in a reasonable place.  */
3394
0
  depth = 0;
3395
0
  if (!for_each_node (remove_cycles, info, &depth, true))
3396
0
    return false;
3397
3398
0
  return for_each_node (mark_detached_root, info, &depth, false);
3399
0
}
3400
3401
/* qsort predicate to sort calls by priority, max_depth then count.  */
3402
3403
static int
3404
sort_calls (const void *a, const void *b)
3405
0
{
3406
0
  struct call_info *const *c1 = a;
3407
0
  struct call_info *const *c2 = b;
3408
0
  int delta;
3409
3410
0
  delta = (*c2)->priority - (*c1)->priority;
3411
0
  if (delta != 0)
3412
0
    return delta;
3413
3414
0
  delta = (*c2)->max_depth - (*c1)->max_depth;
3415
0
  if (delta != 0)
3416
0
    return delta;
3417
3418
0
  delta = (*c2)->count - (*c1)->count;
3419
0
  if (delta != 0)
3420
0
    return delta;
3421
3422
0
  return (char *) c1 - (char *) c2;
3423
0
}
3424
3425
struct _mos_param {
3426
  unsigned int max_overlay_size;
3427
};
3428
3429
/* Set linker_mark and gc_mark on any sections that we will put in
3430
   overlays.  These flags are used by the generic ELF linker, but we
3431
   won't be continuing on to bfd_elf_final_link so it is OK to use
3432
   them.  linker_mark is clear before we get here.  Set segment_mark
3433
   on sections that are part of a pasted function (excluding the last
3434
   section).
3435
3436
   Set up function rodata section if --overlay-rodata.  We don't
3437
   currently include merged string constant rodata sections since
3438
3439
   Sort the call graph so that the deepest nodes will be visited
3440
   first.  */
3441
3442
static bool
3443
mark_overlay_section (struct function_info *fun,
3444
          struct bfd_link_info *info,
3445
          void *param)
3446
0
{
3447
0
  struct call_info *call;
3448
0
  unsigned int count;
3449
0
  struct _mos_param *mos_param = param;
3450
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
3451
3452
0
  if (fun->visit4)
3453
0
    return true;
3454
3455
0
  fun->visit4 = true;
3456
0
  if (!fun->sec->linker_mark
3457
0
      && (htab->params->ovly_flavour != ovly_soft_icache
3458
0
    || htab->params->non_ia_text
3459
0
    || startswith (fun->sec->name, ".text.ia.")
3460
0
    || strcmp (fun->sec->name, ".init") == 0
3461
0
    || strcmp (fun->sec->name, ".fini") == 0))
3462
0
    {
3463
0
      unsigned int size;
3464
3465
0
      fun->sec->linker_mark = 1;
3466
0
      fun->sec->gc_mark = 1;
3467
0
      fun->sec->segment_mark = 0;
3468
      /* Ensure SEC_CODE is set on this text section (it ought to
3469
   be!), and SEC_CODE is clear on rodata sections.  We use
3470
   this flag to differentiate the two overlay section types.  */
3471
0
      fun->sec->flags |= SEC_CODE;
3472
3473
0
      size = fun->sec->size;
3474
0
      if (htab->params->auto_overlay & OVERLAY_RODATA)
3475
0
  {
3476
0
    char *name = NULL;
3477
3478
    /* Find the rodata section corresponding to this function's
3479
       text section.  */
3480
0
    if (strcmp (fun->sec->name, ".text") == 0)
3481
0
      {
3482
0
        name = bfd_malloc (sizeof (".rodata"));
3483
0
        if (name == NULL)
3484
0
    return false;
3485
0
        memcpy (name, ".rodata", sizeof (".rodata"));
3486
0
      }
3487
0
    else if (startswith (fun->sec->name, ".text."))
3488
0
      {
3489
0
        size_t len = strlen (fun->sec->name);
3490
0
        name = bfd_malloc (len + 3);
3491
0
        if (name == NULL)
3492
0
    return false;
3493
0
        memcpy (name, ".rodata", sizeof (".rodata"));
3494
0
        memcpy (name + 7, fun->sec->name + 5, len - 4);
3495
0
      }
3496
0
    else if (startswith (fun->sec->name, ".gnu.linkonce.t."))
3497
0
      {
3498
0
        size_t len = strlen (fun->sec->name) + 1;
3499
0
        name = bfd_malloc (len);
3500
0
        if (name == NULL)
3501
0
    return false;
3502
0
        memcpy (name, fun->sec->name, len);
3503
0
        name[14] = 'r';
3504
0
      }
3505
3506
0
    if (name != NULL)
3507
0
      {
3508
0
        asection *rodata = NULL;
3509
0
        asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3510
0
        if (group_sec == NULL)
3511
0
    rodata = bfd_get_section_by_name (fun->sec->owner, name);
3512
0
        else
3513
0
    while (group_sec != NULL && group_sec != fun->sec)
3514
0
      {
3515
0
        if (strcmp (group_sec->name, name) == 0)
3516
0
          {
3517
0
      rodata = group_sec;
3518
0
      break;
3519
0
          }
3520
0
        group_sec = elf_section_data (group_sec)->next_in_group;
3521
0
      }
3522
0
        fun->rodata = rodata;
3523
0
        if (fun->rodata)
3524
0
    {
3525
0
      size += fun->rodata->size;
3526
0
      if (htab->params->line_size != 0
3527
0
          && size > htab->params->line_size)
3528
0
        {
3529
0
          size -= fun->rodata->size;
3530
0
          fun->rodata = NULL;
3531
0
        }
3532
0
      else
3533
0
        {
3534
0
          fun->rodata->linker_mark = 1;
3535
0
          fun->rodata->gc_mark = 1;
3536
0
          fun->rodata->flags &= ~SEC_CODE;
3537
0
        }
3538
0
    }
3539
0
        free (name);
3540
0
      }
3541
0
  }
3542
0
      if (mos_param->max_overlay_size < size)
3543
0
  mos_param->max_overlay_size = size;
3544
0
    }
3545
3546
0
  for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3547
0
    count += 1;
3548
3549
0
  if (count > 1)
3550
0
    {
3551
0
      struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3552
0
      if (calls == NULL)
3553
0
  return false;
3554
3555
0
      for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3556
0
  calls[count++] = call;
3557
3558
0
      qsort (calls, count, sizeof (*calls), sort_calls);
3559
3560
0
      fun->call_list = NULL;
3561
0
      while (count != 0)
3562
0
  {
3563
0
    --count;
3564
0
    calls[count]->next = fun->call_list;
3565
0
    fun->call_list = calls[count];
3566
0
  }
3567
0
      free (calls);
3568
0
    }
3569
3570
0
  for (call = fun->call_list; call != NULL; call = call->next)
3571
0
    {
3572
0
      if (call->is_pasted)
3573
0
  {
3574
    /* There can only be one is_pasted call per function_info.  */
3575
0
    BFD_ASSERT (!fun->sec->segment_mark);
3576
0
    fun->sec->segment_mark = 1;
3577
0
  }
3578
0
      if (!call->broken_cycle
3579
0
    && !mark_overlay_section (call->fun, info, param))
3580
0
  return false;
3581
0
    }
3582
3583
  /* Don't put entry code into an overlay.  The overlay manager needs
3584
     a stack!  Also, don't mark .ovl.init as an overlay.  */
3585
0
  if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3586
0
      == info->output_bfd->start_address
3587
0
      || startswith (fun->sec->output_section->name, ".ovl.init"))
3588
0
    {
3589
0
      fun->sec->linker_mark = 0;
3590
0
      if (fun->rodata != NULL)
3591
0
  fun->rodata->linker_mark = 0;
3592
0
    }
3593
0
  return true;
3594
0
}
3595
3596
/* If non-zero then unmark functions called from those within sections
3597
   that we need to unmark.  Unfortunately this isn't reliable since the
3598
   call graph cannot know the destination of function pointer calls.  */
3599
0
#define RECURSE_UNMARK 0
3600
3601
struct _uos_param {
3602
  asection *exclude_input_section;
3603
  asection *exclude_output_section;
3604
  unsigned long clearing;
3605
};
3606
3607
/* Undo some of mark_overlay_section's work.  */
3608
3609
static bool
3610
unmark_overlay_section (struct function_info *fun,
3611
      struct bfd_link_info *info,
3612
      void *param)
3613
0
{
3614
0
  struct call_info *call;
3615
0
  struct _uos_param *uos_param = param;
3616
0
  unsigned int excluded = 0;
3617
3618
0
  if (fun->visit5)
3619
0
    return true;
3620
3621
0
  fun->visit5 = true;
3622
3623
0
  excluded = 0;
3624
0
  if (fun->sec == uos_param->exclude_input_section
3625
0
      || fun->sec->output_section == uos_param->exclude_output_section)
3626
0
    excluded = 1;
3627
3628
0
  if (RECURSE_UNMARK)
3629
0
    uos_param->clearing += excluded;
3630
3631
0
  if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3632
0
    {
3633
0
      fun->sec->linker_mark = 0;
3634
0
      if (fun->rodata)
3635
0
  fun->rodata->linker_mark = 0;
3636
0
    }
3637
3638
0
  for (call = fun->call_list; call != NULL; call = call->next)
3639
0
    if (!call->broken_cycle
3640
0
  && !unmark_overlay_section (call->fun, info, param))
3641
0
      return false;
3642
3643
0
  if (RECURSE_UNMARK)
3644
0
    uos_param->clearing -= excluded;
3645
0
  return true;
3646
0
}
3647
3648
struct _cl_param {
3649
  unsigned int lib_size;
3650
  asection **lib_sections;
3651
};
3652
3653
/* Add sections we have marked as belonging to overlays to an array
3654
   for consideration as non-overlay sections.  The array consist of
3655
   pairs of sections, (text,rodata), for functions in the call graph.  */
3656
3657
static bool
3658
collect_lib_sections (struct function_info *fun,
3659
          struct bfd_link_info *info,
3660
          void *param)
3661
0
{
3662
0
  struct _cl_param *lib_param = param;
3663
0
  struct call_info *call;
3664
0
  unsigned int size;
3665
3666
0
  if (fun->visit6)
3667
0
    return true;
3668
3669
0
  fun->visit6 = true;
3670
0
  if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3671
0
    return true;
3672
3673
0
  size = fun->sec->size;
3674
0
  if (fun->rodata)
3675
0
    size += fun->rodata->size;
3676
3677
0
  if (size <= lib_param->lib_size)
3678
0
    {
3679
0
      *lib_param->lib_sections++ = fun->sec;
3680
0
      fun->sec->gc_mark = 0;
3681
0
      if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3682
0
  {
3683
0
    *lib_param->lib_sections++ = fun->rodata;
3684
0
    fun->rodata->gc_mark = 0;
3685
0
  }
3686
0
      else
3687
0
  *lib_param->lib_sections++ = NULL;
3688
0
    }
3689
3690
0
  for (call = fun->call_list; call != NULL; call = call->next)
3691
0
    if (!call->broken_cycle)
3692
0
      collect_lib_sections (call->fun, info, param);
3693
3694
0
  return true;
3695
0
}
3696
3697
/* qsort predicate to sort sections by call count.  */
3698
3699
static int
3700
sort_lib (const void *a, const void *b)
3701
0
{
3702
0
  asection *const *s1 = a;
3703
0
  asection *const *s2 = b;
3704
0
  struct _spu_elf_section_data *sec_data;
3705
0
  struct spu_elf_stack_info *sinfo;
3706
0
  int delta;
3707
3708
0
  delta = 0;
3709
0
  if ((sec_data = spu_elf_section_data (*s1)) != NULL
3710
0
      && (sinfo = sec_data->u.i.stack_info) != NULL)
3711
0
    {
3712
0
      int i;
3713
0
      for (i = 0; i < sinfo->num_fun; ++i)
3714
0
  delta -= sinfo->fun[i].call_count;
3715
0
    }
3716
3717
0
  if ((sec_data = spu_elf_section_data (*s2)) != NULL
3718
0
      && (sinfo = sec_data->u.i.stack_info) != NULL)
3719
0
    {
3720
0
      int i;
3721
0
      for (i = 0; i < sinfo->num_fun; ++i)
3722
0
  delta += sinfo->fun[i].call_count;
3723
0
    }
3724
3725
0
  if (delta != 0)
3726
0
    return delta;
3727
3728
0
  return s1 - s2;
3729
0
}
3730
3731
/* Remove some sections from those marked to be in overlays.  Choose
3732
   those that are called from many places, likely library functions.  */
3733
3734
static unsigned int
3735
auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3736
0
{
3737
0
  bfd *ibfd;
3738
0
  asection **lib_sections;
3739
0
  unsigned int i, lib_count;
3740
0
  struct _cl_param collect_lib_param;
3741
0
  struct function_info dummy_caller;
3742
0
  struct spu_link_hash_table *htab;
3743
3744
0
  memset (&dummy_caller, 0, sizeof (dummy_caller));
3745
0
  lib_count = 0;
3746
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3747
0
    {
3748
0
      extern const bfd_target spu_elf32_vec;
3749
0
      asection *sec;
3750
3751
0
      if (ibfd->xvec != &spu_elf32_vec)
3752
0
  continue;
3753
3754
0
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3755
0
  if (sec->linker_mark
3756
0
      && sec->size < lib_size
3757
0
      && (sec->flags & SEC_CODE) != 0)
3758
0
    lib_count += 1;
3759
0
    }
3760
0
  lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3761
0
  if (lib_sections == NULL)
3762
0
    return (unsigned int) -1;
3763
0
  collect_lib_param.lib_size = lib_size;
3764
0
  collect_lib_param.lib_sections = lib_sections;
3765
0
  if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3766
0
          true))
3767
0
    return (unsigned int) -1;
3768
0
  lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3769
3770
  /* Sort sections so that those with the most calls are first.  */
3771
0
  if (lib_count > 1)
3772
0
    qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3773
3774
0
  htab = spu_hash_table (info);
3775
0
  for (i = 0; i < lib_count; i++)
3776
0
    {
3777
0
      unsigned int tmp, stub_size;
3778
0
      asection *sec;
3779
0
      struct _spu_elf_section_data *sec_data;
3780
0
      struct spu_elf_stack_info *sinfo;
3781
3782
0
      sec = lib_sections[2 * i];
3783
      /* If this section is OK, its size must be less than lib_size.  */
3784
0
      tmp = sec->size;
3785
      /* If it has a rodata section, then add that too.  */
3786
0
      if (lib_sections[2 * i + 1])
3787
0
  tmp += lib_sections[2 * i + 1]->size;
3788
      /* Add any new overlay call stubs needed by the section.  */
3789
0
      stub_size = 0;
3790
0
      if (tmp < lib_size
3791
0
    && (sec_data = spu_elf_section_data (sec)) != NULL
3792
0
    && (sinfo = sec_data->u.i.stack_info) != NULL)
3793
0
  {
3794
0
    int k;
3795
0
    struct call_info *call;
3796
3797
0
    for (k = 0; k < sinfo->num_fun; ++k)
3798
0
      for (call = sinfo->fun[k].call_list; call; call = call->next)
3799
0
        if (call->fun->sec->linker_mark)
3800
0
    {
3801
0
      struct call_info *p;
3802
0
      for (p = dummy_caller.call_list; p; p = p->next)
3803
0
        if (p->fun == call->fun)
3804
0
          break;
3805
0
      if (!p)
3806
0
        stub_size += ovl_stub_size (htab->params);
3807
0
    }
3808
0
  }
3809
0
      if (tmp + stub_size < lib_size)
3810
0
  {
3811
0
    struct call_info **pp, *p;
3812
3813
    /* This section fits.  Mark it as non-overlay.  */
3814
0
    lib_sections[2 * i]->linker_mark = 0;
3815
0
    if (lib_sections[2 * i + 1])
3816
0
      lib_sections[2 * i + 1]->linker_mark = 0;
3817
0
    lib_size -= tmp + stub_size;
3818
    /* Call stubs to the section we just added are no longer
3819
       needed.  */
3820
0
    pp = &dummy_caller.call_list;
3821
0
    while ((p = *pp) != NULL)
3822
0
      if (!p->fun->sec->linker_mark)
3823
0
        {
3824
0
    lib_size += ovl_stub_size (htab->params);
3825
0
    *pp = p->next;
3826
0
    free (p);
3827
0
        }
3828
0
      else
3829
0
        pp = &p->next;
3830
    /* Add new call stubs to dummy_caller.  */
3831
0
    if ((sec_data = spu_elf_section_data (sec)) != NULL
3832
0
        && (sinfo = sec_data->u.i.stack_info) != NULL)
3833
0
      {
3834
0
        int k;
3835
0
        struct call_info *call;
3836
3837
0
        for (k = 0; k < sinfo->num_fun; ++k)
3838
0
    for (call = sinfo->fun[k].call_list;
3839
0
         call;
3840
0
         call = call->next)
3841
0
      if (call->fun->sec->linker_mark)
3842
0
        {
3843
0
          struct call_info *callee;
3844
0
          callee = bfd_malloc (sizeof (*callee));
3845
0
          if (callee == NULL)
3846
0
      return (unsigned int) -1;
3847
0
          *callee = *call;
3848
0
          if (!insert_callee (&dummy_caller, callee))
3849
0
      free (callee);
3850
0
        }
3851
0
      }
3852
0
  }
3853
0
    }
3854
0
  while (dummy_caller.call_list != NULL)
3855
0
    {
3856
0
      struct call_info *call = dummy_caller.call_list;
3857
0
      dummy_caller.call_list = call->next;
3858
0
      free (call);
3859
0
    }
3860
0
  for (i = 0; i < 2 * lib_count; i++)
3861
0
    if (lib_sections[i])
3862
0
      lib_sections[i]->gc_mark = 1;
3863
0
  free (lib_sections);
3864
0
  return lib_size;
3865
0
}
3866
3867
/* Build an array of overlay sections.  The deepest node's section is
3868
   added first, then its parent node's section, then everything called
3869
   from the parent section.  The idea being to group sections to
3870
   minimise calls between different overlays.  */
3871
3872
static bool
3873
collect_overlays (struct function_info *fun,
3874
      struct bfd_link_info *info,
3875
      void *param)
3876
0
{
3877
0
  struct call_info *call;
3878
0
  bool added_fun;
3879
0
  asection ***ovly_sections = param;
3880
3881
0
  if (fun->visit7)
3882
0
    return true;
3883
3884
0
  fun->visit7 = true;
3885
0
  for (call = fun->call_list; call != NULL; call = call->next)
3886
0
    if (!call->is_pasted && !call->broken_cycle)
3887
0
      {
3888
0
  if (!collect_overlays (call->fun, info, ovly_sections))
3889
0
    return false;
3890
0
  break;
3891
0
      }
3892
3893
0
  added_fun = false;
3894
0
  if (fun->sec->linker_mark && fun->sec->gc_mark)
3895
0
    {
3896
0
      fun->sec->gc_mark = 0;
3897
0
      *(*ovly_sections)++ = fun->sec;
3898
0
      if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3899
0
  {
3900
0
    fun->rodata->gc_mark = 0;
3901
0
    *(*ovly_sections)++ = fun->rodata;
3902
0
  }
3903
0
      else
3904
0
  *(*ovly_sections)++ = NULL;
3905
0
      added_fun = true;
3906
3907
      /* Pasted sections must stay with the first section.  We don't
3908
   put pasted sections in the array, just the first section.
3909
   Mark subsequent sections as already considered.  */
3910
0
      if (fun->sec->segment_mark)
3911
0
  {
3912
0
    struct function_info *call_fun = fun;
3913
0
    do
3914
0
      {
3915
0
        for (call = call_fun->call_list; call != NULL; call = call->next)
3916
0
    if (call->is_pasted)
3917
0
      {
3918
0
        call_fun = call->fun;
3919
0
        call_fun->sec->gc_mark = 0;
3920
0
        if (call_fun->rodata)
3921
0
          call_fun->rodata->gc_mark = 0;
3922
0
        break;
3923
0
      }
3924
0
        if (call == NULL)
3925
0
    abort ();
3926
0
      }
3927
0
    while (call_fun->sec->segment_mark);
3928
0
  }
3929
0
    }
3930
3931
0
  for (call = fun->call_list; call != NULL; call = call->next)
3932
0
    if (!call->broken_cycle
3933
0
  && !collect_overlays (call->fun, info, ovly_sections))
3934
0
      return false;
3935
3936
0
  if (added_fun)
3937
0
    {
3938
0
      struct _spu_elf_section_data *sec_data;
3939
0
      struct spu_elf_stack_info *sinfo;
3940
3941
0
      if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3942
0
    && (sinfo = sec_data->u.i.stack_info) != NULL)
3943
0
  {
3944
0
    int i;
3945
0
    for (i = 0; i < sinfo->num_fun; ++i)
3946
0
      if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3947
0
        return false;
3948
0
  }
3949
0
    }
3950
3951
0
  return true;
3952
0
}
3953
3954
struct _sum_stack_param {
3955
  size_t cum_stack;
3956
  size_t overall_stack;
3957
  bool emit_stack_syms;
3958
};
3959
3960
/* Descend the call graph for FUN, accumulating total stack required.  */
3961
3962
static bool
3963
sum_stack (struct function_info *fun,
3964
     struct bfd_link_info *info,
3965
     void *param)
3966
0
{
3967
0
  struct call_info *call;
3968
0
  struct function_info *max;
3969
0
  size_t stack, cum_stack;
3970
0
  const char *f1;
3971
0
  bool has_call;
3972
0
  struct _sum_stack_param *sum_stack_param = param;
3973
0
  struct spu_link_hash_table *htab;
3974
3975
0
  cum_stack = fun->stack;
3976
0
  sum_stack_param->cum_stack = cum_stack;
3977
0
  if (fun->visit3)
3978
0
    return true;
3979
3980
0
  has_call = false;
3981
0
  max = NULL;
3982
0
  for (call = fun->call_list; call; call = call->next)
3983
0
    {
3984
0
      if (call->broken_cycle)
3985
0
  continue;
3986
0
      if (!call->is_pasted)
3987
0
  has_call = true;
3988
0
      if (!sum_stack (call->fun, info, sum_stack_param))
3989
0
  return false;
3990
0
      stack = sum_stack_param->cum_stack;
3991
      /* Include caller stack for normal calls, don't do so for
3992
   tail calls.  fun->stack here is local stack usage for
3993
   this function.  */
3994
0
      if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3995
0
  stack += fun->stack;
3996
0
      if (cum_stack < stack)
3997
0
  {
3998
0
    cum_stack = stack;
3999
0
    max = call->fun;
4000
0
  }
4001
0
    }
4002
4003
0
  sum_stack_param->cum_stack = cum_stack;
4004
0
  stack = fun->stack;
4005
  /* Now fun->stack holds cumulative stack.  */
4006
0
  fun->stack = cum_stack;
4007
0
  fun->visit3 = true;
4008
4009
0
  if (!fun->non_root
4010
0
      && sum_stack_param->overall_stack < cum_stack)
4011
0
    sum_stack_param->overall_stack = cum_stack;
4012
4013
0
  htab = spu_hash_table (info);
4014
0
  if (htab->params->auto_overlay)
4015
0
    return true;
4016
4017
0
  f1 = func_name (fun);
4018
0
  if (htab->params->stack_analysis)
4019
0
    {
4020
0
      if (!fun->non_root)
4021
0
  info->callbacks->info ("  %s: 0x%v\n", f1, (bfd_vma) cum_stack);
4022
0
      info->callbacks->minfo ("%s: 0x%v 0x%v\n",
4023
0
            f1, (bfd_vma) stack, (bfd_vma) cum_stack);
4024
4025
0
      if (has_call)
4026
0
  {
4027
0
    info->callbacks->minfo (_("  calls:\n"));
4028
0
    for (call = fun->call_list; call; call = call->next)
4029
0
      if (!call->is_pasted && !call->broken_cycle)
4030
0
        {
4031
0
    const char *f2 = func_name (call->fun);
4032
0
    const char *ann1 = call->fun == max ? "*" : " ";
4033
0
    const char *ann2 = call->is_tail ? "t" : " ";
4034
4035
0
    info->callbacks->minfo ("   %s%s %s\n", ann1, ann2, f2);
4036
0
        }
4037
0
  }
4038
0
    }
4039
4040
0
  if (sum_stack_param->emit_stack_syms)
4041
0
    {
4042
0
      char *name = bfd_malloc (18 + strlen (f1));
4043
0
      struct elf_link_hash_entry *h;
4044
4045
0
      if (name == NULL)
4046
0
  return false;
4047
4048
0
      if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4049
0
  sprintf (name, "__stack_%s", f1);
4050
0
      else
4051
0
  sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4052
4053
0
      h = elf_link_hash_lookup (&htab->elf, name, true, true, false);
4054
0
      free (name);
4055
0
      if (h != NULL
4056
0
    && (h->root.type == bfd_link_hash_new
4057
0
        || h->root.type == bfd_link_hash_undefined
4058
0
        || h->root.type == bfd_link_hash_undefweak))
4059
0
  {
4060
0
    h->root.type = bfd_link_hash_defined;
4061
0
    h->root.u.def.section = bfd_abs_section_ptr;
4062
0
    h->root.u.def.value = cum_stack;
4063
0
    h->size = 0;
4064
0
    h->type = 0;
4065
0
    h->ref_regular = 1;
4066
0
    h->def_regular = 1;
4067
0
    h->ref_regular_nonweak = 1;
4068
0
    h->forced_local = 1;
4069
0
    h->non_elf = 0;
4070
0
  }
4071
0
    }
4072
4073
0
  return true;
4074
0
}
4075
4076
/* SEC is part of a pasted function.  Return the call_info for the
4077
   next section of this function.  */
4078
4079
static struct call_info *
4080
find_pasted_call (asection *sec)
4081
0
{
4082
0
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4083
0
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4084
0
  struct call_info *call;
4085
0
  int k;
4086
4087
0
  for (k = 0; k < sinfo->num_fun; ++k)
4088
0
    for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4089
0
      if (call->is_pasted)
4090
0
  return call;
4091
0
  abort ();
4092
0
  return 0;
4093
0
}
4094
4095
/* qsort predicate to sort bfds by file name.  */
4096
4097
static int
4098
sort_bfds (const void *a, const void *b)
4099
0
{
4100
0
  bfd *const *abfd1 = a;
4101
0
  bfd *const *abfd2 = b;
4102
4103
0
  return filename_cmp (bfd_get_filename (*abfd1), bfd_get_filename (*abfd2));
4104
0
}
4105
4106
static unsigned int
4107
print_one_overlay_section (FILE *script,
4108
         unsigned int base,
4109
         unsigned int count,
4110
         unsigned int ovlynum,
4111
         unsigned int *ovly_map,
4112
         asection **ovly_sections,
4113
         struct bfd_link_info *info)
4114
0
{
4115
0
  unsigned int j;
4116
4117
0
  for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4118
0
    {
4119
0
      asection *sec = ovly_sections[2 * j];
4120
4121
0
      if (fprintf (script, "   %s%c%s (%s)\n",
4122
0
       (sec->owner->my_archive != NULL
4123
0
        ? bfd_get_filename (sec->owner->my_archive) : ""),
4124
0
       info->path_separator,
4125
0
       bfd_get_filename (sec->owner),
4126
0
       sec->name) <= 0)
4127
0
  return -1;
4128
0
      if (sec->segment_mark)
4129
0
  {
4130
0
    struct call_info *call = find_pasted_call (sec);
4131
0
    while (call != NULL)
4132
0
      {
4133
0
        struct function_info *call_fun = call->fun;
4134
0
        sec = call_fun->sec;
4135
0
        if (fprintf (script, "   %s%c%s (%s)\n",
4136
0
         (sec->owner->my_archive != NULL
4137
0
          ? bfd_get_filename (sec->owner->my_archive) : ""),
4138
0
         info->path_separator,
4139
0
         bfd_get_filename (sec->owner),
4140
0
         sec->name) <= 0)
4141
0
    return -1;
4142
0
        for (call = call_fun->call_list; call; call = call->next)
4143
0
    if (call->is_pasted)
4144
0
      break;
4145
0
      }
4146
0
  }
4147
0
    }
4148
4149
0
  for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4150
0
    {
4151
0
      asection *sec = ovly_sections[2 * j + 1];
4152
0
      if (sec != NULL
4153
0
    && fprintf (script, "   %s%c%s (%s)\n",
4154
0
          (sec->owner->my_archive != NULL
4155
0
           ? bfd_get_filename (sec->owner->my_archive) : ""),
4156
0
          info->path_separator,
4157
0
          bfd_get_filename (sec->owner),
4158
0
          sec->name) <= 0)
4159
0
  return -1;
4160
4161
0
      sec = ovly_sections[2 * j];
4162
0
      if (sec->segment_mark)
4163
0
  {
4164
0
    struct call_info *call = find_pasted_call (sec);
4165
0
    while (call != NULL)
4166
0
      {
4167
0
        struct function_info *call_fun = call->fun;
4168
0
        sec = call_fun->rodata;
4169
0
        if (sec != NULL
4170
0
      && fprintf (script, "   %s%c%s (%s)\n",
4171
0
            (sec->owner->my_archive != NULL
4172
0
             ? bfd_get_filename (sec->owner->my_archive) : ""),
4173
0
            info->path_separator,
4174
0
            bfd_get_filename (sec->owner),
4175
0
            sec->name) <= 0)
4176
0
    return -1;
4177
0
        for (call = call_fun->call_list; call; call = call->next)
4178
0
    if (call->is_pasted)
4179
0
      break;
4180
0
      }
4181
0
  }
4182
0
    }
4183
4184
0
  return j;
4185
0
}
4186
4187
/* Handle --auto-overlay.  */
4188
4189
static void
4190
spu_elf_auto_overlay (struct bfd_link_info *info)
4191
0
{
4192
0
  bfd *ibfd;
4193
0
  bfd **bfd_arr;
4194
0
  struct elf_segment_map *m;
4195
0
  unsigned int fixed_size, lo, hi;
4196
0
  unsigned int reserved;
4197
0
  struct spu_link_hash_table *htab;
4198
0
  unsigned int base, i, count, bfd_count;
4199
0
  unsigned int region, ovlynum;
4200
0
  asection **ovly_sections, **ovly_p;
4201
0
  unsigned int *ovly_map;
4202
0
  FILE *script;
4203
0
  unsigned int total_overlay_size, overlay_size;
4204
0
  const char *ovly_mgr_entry;
4205
0
  struct elf_link_hash_entry *h;
4206
0
  struct _mos_param mos_param;
4207
0
  struct _uos_param uos_param;
4208
0
  struct function_info dummy_caller;
4209
4210
  /* Find the extents of our loadable image.  */
4211
0
  lo = (unsigned int) -1;
4212
0
  hi = 0;
4213
0
  for (m = elf_seg_map (info->output_bfd); m != NULL; m = m->next)
4214
0
    if (m->p_type == PT_LOAD)
4215
0
      for (i = 0; i < m->count; i++)
4216
0
  if (m->sections[i]->size != 0)
4217
0
    {
4218
0
      if (m->sections[i]->vma < lo)
4219
0
        lo = m->sections[i]->vma;
4220
0
      if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4221
0
        hi = m->sections[i]->vma + m->sections[i]->size - 1;
4222
0
    }
4223
0
  fixed_size = hi + 1 - lo;
4224
4225
0
  if (!discover_functions (info))
4226
0
    goto err_exit;
4227
4228
0
  if (!build_call_tree (info))
4229
0
    goto err_exit;
4230
4231
0
  htab = spu_hash_table (info);
4232
0
  reserved = htab->params->auto_overlay_reserved;
4233
0
  if (reserved == 0)
4234
0
    {
4235
0
      struct _sum_stack_param sum_stack_param;
4236
4237
0
      sum_stack_param.emit_stack_syms = 0;
4238
0
      sum_stack_param.overall_stack = 0;
4239
0
      if (!for_each_node (sum_stack, info, &sum_stack_param, true))
4240
0
  goto err_exit;
4241
0
      reserved = (sum_stack_param.overall_stack
4242
0
      + htab->params->extra_stack_space);
4243
0
    }
4244
4245
  /* No need for overlays if everything already fits.  */
4246
0
  if (fixed_size + reserved <= htab->local_store
4247
0
      && htab->params->ovly_flavour != ovly_soft_icache)
4248
0
    {
4249
0
      htab->params->auto_overlay = 0;
4250
0
      return;
4251
0
    }
4252
4253
0
  uos_param.exclude_input_section = 0;
4254
0
  uos_param.exclude_output_section
4255
0
    = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4256
4257
0
  ovly_mgr_entry = "__ovly_load";
4258
0
  if (htab->params->ovly_flavour == ovly_soft_icache)
4259
0
    ovly_mgr_entry = "__icache_br_handler";
4260
0
  h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4261
0
          false, false, false);
4262
0
  if (h != NULL
4263
0
      && (h->root.type == bfd_link_hash_defined
4264
0
    || h->root.type == bfd_link_hash_defweak)
4265
0
      && h->def_regular)
4266
0
    {
4267
      /* We have a user supplied overlay manager.  */
4268
0
      uos_param.exclude_input_section = h->root.u.def.section;
4269
0
    }
4270
0
  else
4271
0
    {
4272
      /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4273
   builtin version to .text, and will adjust .text size.  */
4274
0
      fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4275
0
    }
4276
4277
  /* Mark overlay sections, and find max overlay section size.  */
4278
0
  mos_param.max_overlay_size = 0;
4279
0
  if (!for_each_node (mark_overlay_section, info, &mos_param, true))
4280
0
    goto err_exit;
4281
4282
  /* We can't put the overlay manager or interrupt routines in
4283
     overlays.  */
4284
0
  uos_param.clearing = 0;
4285
0
  if ((uos_param.exclude_input_section
4286
0
       || uos_param.exclude_output_section)
4287
0
      && !for_each_node (unmark_overlay_section, info, &uos_param, true))
4288
0
    goto err_exit;
4289
4290
0
  bfd_count = 0;
4291
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4292
0
    ++bfd_count;
4293
0
  bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4294
0
  if (bfd_arr == NULL)
4295
0
    goto err_exit;
4296
4297
  /* Count overlay sections, and subtract their sizes from "fixed_size".  */
4298
0
  count = 0;
4299
0
  bfd_count = 0;
4300
0
  total_overlay_size = 0;
4301
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4302
0
    {
4303
0
      extern const bfd_target spu_elf32_vec;
4304
0
      asection *sec;
4305
0
      unsigned int old_count;
4306
4307
0
      if (ibfd->xvec != &spu_elf32_vec)
4308
0
  continue;
4309
4310
0
      old_count = count;
4311
0
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4312
0
  if (sec->linker_mark)
4313
0
    {
4314
0
      if ((sec->flags & SEC_CODE) != 0)
4315
0
        count += 1;
4316
0
      fixed_size -= sec->size;
4317
0
      total_overlay_size += sec->size;
4318
0
    }
4319
0
  else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4320
0
     && sec->output_section->owner == info->output_bfd
4321
0
     && startswith (sec->output_section->name, ".ovl.init"))
4322
0
    fixed_size -= sec->size;
4323
0
      if (count != old_count)
4324
0
  bfd_arr[bfd_count++] = ibfd;
4325
0
    }
4326
4327
  /* Since the overlay link script selects sections by file name and
4328
     section name, ensure that file names are unique.  */
4329
0
  if (bfd_count > 1)
4330
0
    {
4331
0
      bool ok = true;
4332
4333
0
      qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4334
0
      for (i = 1; i < bfd_count; ++i)
4335
0
  if (filename_cmp (bfd_get_filename (bfd_arr[i - 1]),
4336
0
        bfd_get_filename (bfd_arr[i])) == 0)
4337
0
    {
4338
0
      if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4339
0
        {
4340
0
    if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4341
      /* xgettext:c-format */
4342
0
      info->callbacks->einfo (_("%s duplicated in %s\n"),
4343
0
            bfd_get_filename (bfd_arr[i]),
4344
0
            bfd_get_filename (bfd_arr[i]->my_archive));
4345
0
    else
4346
0
      info->callbacks->einfo (_("%s duplicated\n"),
4347
0
            bfd_get_filename (bfd_arr[i]));
4348
0
    ok = false;
4349
0
        }
4350
0
    }
4351
0
      if (!ok)
4352
0
  {
4353
0
    info->callbacks->einfo (_("sorry, no support for duplicate "
4354
0
            "object files in auto-overlay script\n"));
4355
0
    bfd_set_error (bfd_error_bad_value);
4356
0
    goto err_exit;
4357
0
  }
4358
0
    }
4359
0
  free (bfd_arr);
4360
4361
0
  fixed_size += reserved;
4362
0
  fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4363
0
  if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4364
0
    {
4365
0
      if (htab->params->ovly_flavour == ovly_soft_icache)
4366
0
  {
4367
    /* Stubs in the non-icache area are bigger.  */
4368
0
    fixed_size += htab->non_ovly_stub * 16;
4369
    /* Space for icache manager tables.
4370
       a) Tag array, one quadword per cache line.
4371
       - word 0: ia address of present line, init to zero.  */
4372
0
    fixed_size += 16 << htab->num_lines_log2;
4373
    /* b) Rewrite "to" list, one quadword per cache line.  */
4374
0
    fixed_size += 16 << htab->num_lines_log2;
4375
    /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4376
    to a power-of-two number of full quadwords) per cache line.  */
4377
0
    fixed_size += 16 << (htab->fromelem_size_log2
4378
0
             + htab->num_lines_log2);
4379
    /* d) Pointer to __ea backing store (toe), 1 quadword.  */
4380
0
    fixed_size += 16;
4381
0
  }
4382
0
      else
4383
0
  {
4384
    /* Guess number of overlays.  Assuming overlay buffer is on
4385
       average only half full should be conservative.  */
4386
0
    ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4387
0
         / (htab->local_store - fixed_size));
4388
    /* Space for _ovly_table[], _ovly_buf_table[] and toe.  */
4389
0
    fixed_size += ovlynum * 16 + 16 + 4 + 16;
4390
0
  }
4391
0
    }
4392
4393
0
  if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4394
    /* xgettext:c-format */
4395
0
    info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4396
0
            "size of 0x%v exceeds local store\n"),
4397
0
          (bfd_vma) fixed_size,
4398
0
          (bfd_vma) mos_param.max_overlay_size);
4399
4400
  /* Now see if we should put some functions in the non-overlay area.  */
4401
0
  else if (fixed_size < htab->params->auto_overlay_fixed)
4402
0
    {
4403
0
      unsigned int max_fixed, lib_size;
4404
4405
0
      max_fixed = htab->local_store - mos_param.max_overlay_size;
4406
0
      if (max_fixed > htab->params->auto_overlay_fixed)
4407
0
  max_fixed = htab->params->auto_overlay_fixed;
4408
0
      lib_size = max_fixed - fixed_size;
4409
0
      lib_size = auto_ovl_lib_functions (info, lib_size);
4410
0
      if (lib_size == (unsigned int) -1)
4411
0
  goto err_exit;
4412
0
      fixed_size = max_fixed - lib_size;
4413
0
    }
4414
4415
  /* Build an array of sections, suitably sorted to place into
4416
     overlays.  */
4417
0
  ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4418
0
  if (ovly_sections == NULL)
4419
0
    goto err_exit;
4420
0
  ovly_p = ovly_sections;
4421
0
  if (!for_each_node (collect_overlays, info, &ovly_p, true))
4422
0
    goto err_exit;
4423
0
  count = (size_t) (ovly_p - ovly_sections) / 2;
4424
0
  ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4425
0
  if (ovly_map == NULL)
4426
0
    goto err_exit;
4427
4428
0
  memset (&dummy_caller, 0, sizeof (dummy_caller));
4429
0
  overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4430
0
  if (htab->params->line_size != 0)
4431
0
    overlay_size = htab->params->line_size;
4432
0
  base = 0;
4433
0
  ovlynum = 0;
4434
0
  while (base < count)
4435
0
    {
4436
0
      unsigned int size = 0, rosize = 0, roalign = 0;
4437
4438
0
      for (i = base; i < count; i++)
4439
0
  {
4440
0
    asection *sec, *rosec;
4441
0
    unsigned int tmp, rotmp;
4442
0
    unsigned int num_stubs;
4443
0
    struct call_info *call, *pasty;
4444
0
    struct _spu_elf_section_data *sec_data;
4445
0
    struct spu_elf_stack_info *sinfo;
4446
0
    unsigned int k;
4447
4448
    /* See whether we can add this section to the current
4449
       overlay without overflowing our overlay buffer.  */
4450
0
    sec = ovly_sections[2 * i];
4451
0
    tmp = align_power (size, sec->alignment_power) + sec->size;
4452
0
    rotmp = rosize;
4453
0
    rosec = ovly_sections[2 * i + 1];
4454
0
    if (rosec != NULL)
4455
0
      {
4456
0
        rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4457
0
        if (roalign < rosec->alignment_power)
4458
0
    roalign = rosec->alignment_power;
4459
0
      }
4460
0
    if (align_power (tmp, roalign) + rotmp > overlay_size)
4461
0
      break;
4462
0
    if (sec->segment_mark)
4463
0
      {
4464
        /* Pasted sections must stay together, so add their
4465
     sizes too.  */
4466
0
        pasty = find_pasted_call (sec);
4467
0
        while (pasty != NULL)
4468
0
    {
4469
0
      struct function_info *call_fun = pasty->fun;
4470
0
      tmp = (align_power (tmp, call_fun->sec->alignment_power)
4471
0
       + call_fun->sec->size);
4472
0
      if (call_fun->rodata)
4473
0
        {
4474
0
          rotmp = (align_power (rotmp,
4475
0
              call_fun->rodata->alignment_power)
4476
0
             + call_fun->rodata->size);
4477
0
          if (roalign < rosec->alignment_power)
4478
0
      roalign = rosec->alignment_power;
4479
0
        }
4480
0
      for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4481
0
        if (pasty->is_pasted)
4482
0
          break;
4483
0
    }
4484
0
      }
4485
0
    if (align_power (tmp, roalign) + rotmp > overlay_size)
4486
0
      break;
4487
4488
    /* If we add this section, we might need new overlay call
4489
       stubs.  Add any overlay section calls to dummy_call.  */
4490
0
    pasty = NULL;
4491
0
    sec_data = spu_elf_section_data (sec);
4492
0
    sinfo = sec_data->u.i.stack_info;
4493
0
    for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
4494
0
      for (call = sinfo->fun[k].call_list; call; call = call->next)
4495
0
        if (call->is_pasted)
4496
0
    {
4497
0
      BFD_ASSERT (pasty == NULL);
4498
0
      pasty = call;
4499
0
    }
4500
0
        else if (call->fun->sec->linker_mark)
4501
0
    {
4502
0
      if (!copy_callee (&dummy_caller, call))
4503
0
        goto err_exit;
4504
0
    }
4505
0
    while (pasty != NULL)
4506
0
      {
4507
0
        struct function_info *call_fun = pasty->fun;
4508
0
        pasty = NULL;
4509
0
        for (call = call_fun->call_list; call; call = call->next)
4510
0
    if (call->is_pasted)
4511
0
      {
4512
0
        BFD_ASSERT (pasty == NULL);
4513
0
        pasty = call;
4514
0
      }
4515
0
    else if (!copy_callee (&dummy_caller, call))
4516
0
      goto err_exit;
4517
0
      }
4518
4519
    /* Calculate call stub size.  */
4520
0
    num_stubs = 0;
4521
0
    for (call = dummy_caller.call_list; call; call = call->next)
4522
0
      {
4523
0
        unsigned int stub_delta = 1;
4524
4525
0
        if (htab->params->ovly_flavour == ovly_soft_icache)
4526
0
    stub_delta = call->count;
4527
0
        num_stubs += stub_delta;
4528
4529
        /* If the call is within this overlay, we won't need a
4530
     stub.  */
4531
0
        for (k = base; k < i + 1; k++)
4532
0
    if (call->fun->sec == ovly_sections[2 * k])
4533
0
      {
4534
0
        num_stubs -= stub_delta;
4535
0
        break;
4536
0
      }
4537
0
      }
4538
0
    if (htab->params->ovly_flavour == ovly_soft_icache
4539
0
        && num_stubs > htab->params->max_branch)
4540
0
      break;
4541
0
    if (align_power (tmp, roalign) + rotmp
4542
0
        + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4543
0
      break;
4544
0
    size = tmp;
4545
0
    rosize = rotmp;
4546
0
  }
4547
4548
0
      if (i == base)
4549
0
  {
4550
    /* xgettext:c-format */
4551
0
    info->callbacks->einfo (_("%pB:%pA%s exceeds overlay size\n"),
4552
0
          ovly_sections[2 * i]->owner,
4553
0
          ovly_sections[2 * i],
4554
0
          ovly_sections[2 * i + 1] ? " + rodata" : "");
4555
0
    bfd_set_error (bfd_error_bad_value);
4556
0
    goto err_exit;
4557
0
  }
4558
4559
0
      while (dummy_caller.call_list != NULL)
4560
0
  {
4561
0
    struct call_info *call = dummy_caller.call_list;
4562
0
    dummy_caller.call_list = call->next;
4563
0
    free (call);
4564
0
  }
4565
4566
0
      ++ovlynum;
4567
0
      while (base < i)
4568
0
  ovly_map[base++] = ovlynum;
4569
0
    }
4570
4571
0
  script = htab->params->spu_elf_open_overlay_script ();
4572
4573
0
  if (htab->params->ovly_flavour == ovly_soft_icache)
4574
0
    {
4575
0
      if (fprintf (script, "SECTIONS\n{\n") <= 0)
4576
0
  goto file_err;
4577
4578
0
      if (fprintf (script,
4579
0
       " . = ALIGN (%u);\n"
4580
0
       " .ovl.init : { *(.ovl.init) }\n"
4581
0
       " . = ABSOLUTE (ADDR (.ovl.init));\n",
4582
0
       htab->params->line_size) <= 0)
4583
0
  goto file_err;
4584
4585
0
      base = 0;
4586
0
      ovlynum = 1;
4587
0
      while (base < count)
4588
0
  {
4589
0
    unsigned int indx = ovlynum - 1;
4590
0
    unsigned int vma, lma;
4591
4592
0
    vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4593
0
    lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4594
4595
0
    if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4596
0
             ": AT (LOADADDR (.ovl.init) + %u) {\n",
4597
0
           ovlynum, vma, lma) <= 0)
4598
0
      goto file_err;
4599
4600
0
    base = print_one_overlay_section (script, base, count, ovlynum,
4601
0
              ovly_map, ovly_sections, info);
4602
0
    if (base == (unsigned) -1)
4603
0
      goto file_err;
4604
4605
0
    if (fprintf (script, "  }\n") <= 0)
4606
0
      goto file_err;
4607
4608
0
    ovlynum++;
4609
0
  }
4610
4611
0
      if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4612
0
       1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4613
0
  goto file_err;
4614
4615
0
      if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4616
0
  goto file_err;
4617
0
    }
4618
0
  else
4619
0
    {
4620
0
      if (fprintf (script, "SECTIONS\n{\n") <= 0)
4621
0
  goto file_err;
4622
4623
0
      if (fprintf (script,
4624
0
       " . = ALIGN (16);\n"
4625
0
       " .ovl.init : { *(.ovl.init) }\n"
4626
0
       " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4627
0
  goto file_err;
4628
4629
0
      for (region = 1; region <= htab->params->num_lines; region++)
4630
0
  {
4631
0
    ovlynum = region;
4632
0
    base = 0;
4633
0
    while (base < count && ovly_map[base] < ovlynum)
4634
0
      base++;
4635
4636
0
    if (base == count)
4637
0
      break;
4638
4639
0
    if (region == 1)
4640
0
      {
4641
        /* We need to set lma since we are overlaying .ovl.init.  */
4642
0
        if (fprintf (script,
4643
0
         " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4644
0
    goto file_err;
4645
0
      }
4646
0
    else
4647
0
      {
4648
0
        if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4649
0
    goto file_err;
4650
0
      }
4651
4652
0
    while (base < count)
4653
0
      {
4654
0
        if (fprintf (script, "  .ovly%u {\n", ovlynum) <= 0)
4655
0
    goto file_err;
4656
4657
0
        base = print_one_overlay_section (script, base, count, ovlynum,
4658
0
            ovly_map, ovly_sections, info);
4659
0
        if (base == (unsigned) -1)
4660
0
    goto file_err;
4661
4662
0
        if (fprintf (script, "  }\n") <= 0)
4663
0
    goto file_err;
4664
4665
0
        ovlynum += htab->params->num_lines;
4666
0
        while (base < count && ovly_map[base] < ovlynum)
4667
0
    base++;
4668
0
      }
4669
4670
0
    if (fprintf (script, " }\n") <= 0)
4671
0
      goto file_err;
4672
0
  }
4673
4674
0
      if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4675
0
  goto file_err;
4676
0
    }
4677
4678
0
  free (ovly_map);
4679
0
  free (ovly_sections);
4680
4681
0
  if (fclose (script) != 0)
4682
0
    goto file_err;
4683
4684
0
  if (htab->params->auto_overlay & AUTO_RELINK)
4685
0
    (*htab->params->spu_elf_relink) ();
4686
4687
0
  xexit (0);
4688
4689
0
 file_err:
4690
0
  bfd_set_error (bfd_error_system_call);
4691
0
 err_exit:
4692
0
  info->callbacks->einfo (_("%F%P: auto overlay error: %E\n"));
4693
0
  xexit (1);
4694
0
}
4695
4696
/* Provide an estimate of total stack required.  */
4697
4698
static bool
4699
spu_elf_stack_analysis (struct bfd_link_info *info)
4700
0
{
4701
0
  struct spu_link_hash_table *htab;
4702
0
  struct _sum_stack_param sum_stack_param;
4703
4704
0
  if (!discover_functions (info))
4705
0
    return false;
4706
4707
0
  if (!build_call_tree (info))
4708
0
    return false;
4709
4710
0
  htab = spu_hash_table (info);
4711
0
  if (htab->params->stack_analysis)
4712
0
    {
4713
0
      info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4714
0
      info->callbacks->minfo (_("\nStack size for functions.  "
4715
0
        "Annotations: '*' max stack, 't' tail call\n"));
4716
0
    }
4717
4718
0
  sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4719
0
  sum_stack_param.overall_stack = 0;
4720
0
  if (!for_each_node (sum_stack, info, &sum_stack_param, true))
4721
0
    return false;
4722
4723
0
  if (htab->params->stack_analysis)
4724
0
    info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4725
0
         (bfd_vma) sum_stack_param.overall_stack);
4726
0
  return true;
4727
0
}
4728
4729
/* Perform a final link.  */
4730
4731
static bool
4732
spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4733
0
{
4734
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
4735
4736
0
  if (htab->params->auto_overlay)
4737
0
    spu_elf_auto_overlay (info);
4738
4739
0
  if ((htab->params->stack_analysis
4740
0
       || (htab->params->ovly_flavour == ovly_soft_icache
4741
0
     && htab->params->lrlive_analysis))
4742
0
      && !spu_elf_stack_analysis (info))
4743
0
    info->callbacks->einfo (_("%X%P: stack/lrlive analysis error: %E\n"));
4744
4745
0
  if (!spu_elf_build_stubs (info))
4746
0
    info->callbacks->einfo (_("%F%P: can not build overlay stubs: %E\n"));
4747
4748
0
  return bfd_elf_final_link (output_bfd, info);
4749
0
}
4750
4751
/* Called when not normally emitting relocs, ie. !bfd_link_relocatable (info)
4752
   and !info->emitrelocations.  Returns a count of special relocs
4753
   that need to be emitted.  */
4754
4755
static unsigned int
4756
spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4757
0
{
4758
0
  Elf_Internal_Rela *relocs;
4759
0
  unsigned int count = 0;
4760
4761
0
  relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4762
0
              info->keep_memory);
4763
0
  if (relocs != NULL)
4764
0
    {
4765
0
      Elf_Internal_Rela *rel;
4766
0
      Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4767
4768
0
      for (rel = relocs; rel < relend; rel++)
4769
0
  {
4770
0
    int r_type = ELF32_R_TYPE (rel->r_info);
4771
0
    if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4772
0
      ++count;
4773
0
  }
4774
4775
0
      if (elf_section_data (sec)->relocs != relocs)
4776
0
  free (relocs);
4777
0
    }
4778
4779
0
  return count;
4780
0
}
4781
4782
/* Functions for adding fixup records to .fixup */
4783
4784
0
#define FIXUP_RECORD_SIZE 4
4785
4786
#define FIXUP_PUT(output_bfd,htab,index,addr) \
4787
0
    bfd_put_32 (output_bfd, addr, \
4788
0
          htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4789
#define FIXUP_GET(output_bfd,htab,index) \
4790
0
    bfd_get_32 (output_bfd, \
4791
0
          htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4792
4793
/* Store OFFSET in .fixup.  This assumes it will be called with an
4794
   increasing OFFSET.  When this OFFSET fits with the last base offset,
4795
   it just sets a bit, otherwise it adds a new fixup record.  */
4796
static void
4797
spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4798
        bfd_vma offset)
4799
0
{
4800
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
4801
0
  asection *sfixup = htab->sfixup;
4802
0
  bfd_vma qaddr = offset & ~(bfd_vma) 15;
4803
0
  bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4804
0
  if (sfixup->reloc_count == 0)
4805
0
    {
4806
0
      FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4807
0
      sfixup->reloc_count++;
4808
0
    }
4809
0
  else
4810
0
    {
4811
0
      bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4812
0
      if (qaddr != (base & ~(bfd_vma) 15))
4813
0
  {
4814
0
    if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4815
0
      _bfd_error_handler (_("fatal error while creating .fixup"));
4816
0
    FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4817
0
    sfixup->reloc_count++;
4818
0
  }
4819
0
      else
4820
0
  FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4821
0
    }
4822
0
}
4823
4824
/* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD.  */
4825
4826
static int
4827
spu_elf_relocate_section (bfd *output_bfd,
4828
        struct bfd_link_info *info,
4829
        bfd *input_bfd,
4830
        asection *input_section,
4831
        bfd_byte *contents,
4832
        Elf_Internal_Rela *relocs,
4833
        Elf_Internal_Sym *local_syms,
4834
        asection **local_sections)
4835
0
{
4836
0
  Elf_Internal_Shdr *symtab_hdr;
4837
0
  struct elf_link_hash_entry **sym_hashes;
4838
0
  Elf_Internal_Rela *rel, *relend;
4839
0
  struct spu_link_hash_table *htab;
4840
0
  asection *ea;
4841
0
  int ret = true;
4842
0
  bool emit_these_relocs = false;
4843
0
  bool is_ea_sym;
4844
0
  bool stubs;
4845
0
  unsigned int iovl = 0;
4846
4847
0
  htab = spu_hash_table (info);
4848
0
  stubs = (htab->stub_sec != NULL
4849
0
     && maybe_needs_stubs (input_section));
4850
0
  iovl = overlay_index (input_section);
4851
0
  ea = bfd_get_section_by_name (output_bfd, "._ea");
4852
0
  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4853
0
  sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4854
4855
0
  rel = relocs;
4856
0
  relend = relocs + input_section->reloc_count;
4857
0
  for (; rel < relend; rel++)
4858
0
    {
4859
0
      int r_type;
4860
0
      reloc_howto_type *howto;
4861
0
      unsigned int r_symndx;
4862
0
      Elf_Internal_Sym *sym;
4863
0
      asection *sec;
4864
0
      struct elf_link_hash_entry *h;
4865
0
      const char *sym_name;
4866
0
      bfd_vma relocation;
4867
0
      bfd_vma addend;
4868
0
      bfd_reloc_status_type r;
4869
0
      bool unresolved_reloc;
4870
0
      enum _stub_type stub_type;
4871
4872
0
      r_symndx = ELF32_R_SYM (rel->r_info);
4873
0
      r_type = ELF32_R_TYPE (rel->r_info);
4874
0
      howto = elf_howto_table + r_type;
4875
0
      unresolved_reloc = false;
4876
0
      h = NULL;
4877
0
      sym = NULL;
4878
0
      sec = NULL;
4879
0
      if (r_symndx < symtab_hdr->sh_info)
4880
0
  {
4881
0
    sym = local_syms + r_symndx;
4882
0
    sec = local_sections[r_symndx];
4883
0
    sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4884
0
    relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4885
0
  }
4886
0
      else
4887
0
  {
4888
0
    if (sym_hashes == NULL)
4889
0
      return false;
4890
4891
0
    h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4892
4893
0
    if (info->wrap_hash != NULL
4894
0
        && (input_section->flags & SEC_DEBUGGING) != 0)
4895
0
      h = ((struct elf_link_hash_entry *)
4896
0
     unwrap_hash_lookup (info, input_bfd, &h->root));
4897
4898
0
    while (h->root.type == bfd_link_hash_indirect
4899
0
     || h->root.type == bfd_link_hash_warning)
4900
0
      h = (struct elf_link_hash_entry *) h->root.u.i.link;
4901
4902
0
    relocation = 0;
4903
0
    if (h->root.type == bfd_link_hash_defined
4904
0
        || h->root.type == bfd_link_hash_defweak)
4905
0
      {
4906
0
        sec = h->root.u.def.section;
4907
0
        if (sec == NULL
4908
0
      || sec->output_section == NULL)
4909
    /* Set a flag that will be cleared later if we find a
4910
       relocation value for this symbol.  output_section
4911
       is typically NULL for symbols satisfied by a shared
4912
       library.  */
4913
0
    unresolved_reloc = true;
4914
0
        else
4915
0
    relocation = (h->root.u.def.value
4916
0
            + sec->output_section->vma
4917
0
            + sec->output_offset);
4918
0
      }
4919
0
    else if (h->root.type == bfd_link_hash_undefweak)
4920
0
      ;
4921
0
    else if (info->unresolved_syms_in_objects == RM_IGNORE
4922
0
       && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4923
0
      ;
4924
0
    else if (!bfd_link_relocatable (info)
4925
0
       && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4926
0
      {
4927
0
        bool err;
4928
4929
0
        err = (info->unresolved_syms_in_objects == RM_DIAGNOSE
4930
0
         && !info->warn_unresolved_syms)
4931
0
    || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT;
4932
4933
0
        info->callbacks->undefined_symbol
4934
0
    (info, h->root.root.string, input_bfd,
4935
0
     input_section, rel->r_offset, err);
4936
0
      }
4937
0
    sym_name = h->root.root.string;
4938
0
  }
4939
4940
0
      if (sec != NULL && discarded_section (sec))
4941
0
  RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4942
0
           rel, 1, relend, howto, 0, contents);
4943
4944
0
      if (bfd_link_relocatable (info))
4945
0
  continue;
4946
4947
      /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4948
0
      if (r_type == R_SPU_ADD_PIC
4949
0
    && h != NULL
4950
0
    && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4951
0
  {
4952
0
    bfd_byte *loc = contents + rel->r_offset;
4953
0
    loc[0] = 0x1c;
4954
0
    loc[1] = 0x00;
4955
0
    loc[2] &= 0x3f;
4956
0
  }
4957
4958
0
      is_ea_sym = (ea != NULL
4959
0
       && sec != NULL
4960
0
       && sec->output_section == ea);
4961
4962
      /* If this symbol is in an overlay area, we may need to relocate
4963
   to the overlay stub.  */
4964
0
      addend = rel->r_addend;
4965
0
      if (stubs
4966
0
    && !is_ea_sym
4967
0
    && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4968
0
            contents, info)) != no_stub)
4969
0
  {
4970
0
    unsigned int ovl = 0;
4971
0
    struct got_entry *g, **head;
4972
4973
0
    if (stub_type != nonovl_stub)
4974
0
      ovl = iovl;
4975
4976
0
    if (h != NULL)
4977
0
      head = &h->got.glist;
4978
0
    else
4979
0
      head = elf_local_got_ents (input_bfd) + r_symndx;
4980
4981
0
    for (g = *head; g != NULL; g = g->next)
4982
0
      if (htab->params->ovly_flavour == ovly_soft_icache
4983
0
    ? (g->ovl == ovl
4984
0
       && g->br_addr == (rel->r_offset
4985
0
             + input_section->output_offset
4986
0
             + input_section->output_section->vma))
4987
0
    : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4988
0
        break;
4989
0
    if (g == NULL)
4990
0
      abort ();
4991
4992
0
    relocation = g->stub_addr;
4993
0
    addend = 0;
4994
0
  }
4995
0
      else
4996
0
  {
4997
    /* For soft icache, encode the overlay index into addresses.  */
4998
0
    if (htab->params->ovly_flavour == ovly_soft_icache
4999
0
        && (r_type == R_SPU_ADDR16_HI
5000
0
      || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
5001
0
        && !is_ea_sym)
5002
0
      {
5003
0
        unsigned int ovl = overlay_index (sec);
5004
0
        if (ovl != 0)
5005
0
    {
5006
0
      unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
5007
0
      relocation += set_id << 18;
5008
0
    }
5009
0
      }
5010
0
  }
5011
5012
0
      if (htab->params->emit_fixups && !bfd_link_relocatable (info)
5013
0
    && (input_section->flags & SEC_ALLOC) != 0
5014
0
    && r_type == R_SPU_ADDR32)
5015
0
  {
5016
0
    bfd_vma offset;
5017
0
    offset = rel->r_offset + input_section->output_section->vma
5018
0
       + input_section->output_offset;
5019
0
    spu_elf_emit_fixup (output_bfd, info, offset);
5020
0
  }
5021
5022
0
      if (unresolved_reloc)
5023
0
  ;
5024
0
      else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5025
0
  {
5026
0
    if (is_ea_sym)
5027
0
      {
5028
        /* ._ea is a special section that isn't allocated in SPU
5029
     memory, but rather occupies space in PPU memory as
5030
     part of an embedded ELF image.  If this reloc is
5031
     against a symbol defined in ._ea, then transform the
5032
     reloc into an equivalent one without a symbol
5033
     relative to the start of the ELF image.  */
5034
0
        rel->r_addend += (relocation
5035
0
        - ea->vma
5036
0
        + elf_section_data (ea)->this_hdr.sh_offset);
5037
0
        rel->r_info = ELF32_R_INFO (0, r_type);
5038
0
      }
5039
0
    emit_these_relocs = true;
5040
0
    continue;
5041
0
  }
5042
0
      else if (is_ea_sym)
5043
0
  unresolved_reloc = true;
5044
5045
0
      if (unresolved_reloc
5046
0
    && _bfd_elf_section_offset (output_bfd, info, input_section,
5047
0
              rel->r_offset) != (bfd_vma) -1)
5048
0
  {
5049
0
    _bfd_error_handler
5050
      /* xgettext:c-format */
5051
0
      (_("%pB(%s+%#" PRIx64 "): "
5052
0
         "unresolvable %s relocation against symbol `%s'"),
5053
0
       input_bfd,
5054
0
       bfd_section_name (input_section),
5055
0
       (uint64_t) rel->r_offset,
5056
0
       howto->name,
5057
0
       sym_name);
5058
0
    ret = false;
5059
0
  }
5060
5061
0
      r = _bfd_final_link_relocate (howto,
5062
0
            input_bfd,
5063
0
            input_section,
5064
0
            contents,
5065
0
            rel->r_offset, relocation, addend);
5066
5067
0
      if (r != bfd_reloc_ok)
5068
0
  {
5069
0
    const char *msg = (const char *) 0;
5070
5071
0
    switch (r)
5072
0
      {
5073
0
      case bfd_reloc_overflow:
5074
0
        (*info->callbacks->reloc_overflow)
5075
0
    (info, (h ? &h->root : NULL), sym_name, howto->name,
5076
0
     (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
5077
0
        break;
5078
5079
0
      case bfd_reloc_undefined:
5080
0
        (*info->callbacks->undefined_symbol)
5081
0
    (info, sym_name, input_bfd, input_section, rel->r_offset, true);
5082
0
        break;
5083
5084
0
      case bfd_reloc_outofrange:
5085
0
        msg = _("internal error: out of range error");
5086
0
        goto common_error;
5087
5088
0
      case bfd_reloc_notsupported:
5089
0
        msg = _("internal error: unsupported relocation error");
5090
0
        goto common_error;
5091
5092
0
      case bfd_reloc_dangerous:
5093
0
        msg = _("internal error: dangerous error");
5094
0
        goto common_error;
5095
5096
0
      default:
5097
0
        msg = _("internal error: unknown error");
5098
        /* fall through */
5099
5100
0
      common_error:
5101
0
        ret = false;
5102
0
        (*info->callbacks->warning) (info, msg, sym_name, input_bfd,
5103
0
             input_section, rel->r_offset);
5104
0
        break;
5105
0
      }
5106
0
  }
5107
0
    }
5108
5109
0
  if (ret
5110
0
      && emit_these_relocs
5111
0
      && !info->emitrelocations)
5112
0
    {
5113
0
      Elf_Internal_Rela *wrel;
5114
0
      Elf_Internal_Shdr *rel_hdr;
5115
5116
0
      wrel = rel = relocs;
5117
0
      relend = relocs + input_section->reloc_count;
5118
0
      for (; rel < relend; rel++)
5119
0
  {
5120
0
    int r_type;
5121
5122
0
    r_type = ELF32_R_TYPE (rel->r_info);
5123
0
    if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5124
0
      *wrel++ = *rel;
5125
0
  }
5126
0
      input_section->reloc_count = wrel - relocs;
5127
      /* Backflips for _bfd_elf_link_output_relocs.  */
5128
0
      rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5129
0
      rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5130
0
      ret = 2;
5131
0
    }
5132
5133
0
  return ret;
5134
0
}
5135
5136
static bool
5137
spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
5138
         struct bfd_link_info *info ATTRIBUTE_UNUSED)
5139
0
{
5140
0
  return true;
5141
0
}
5142
5143
/* Adjust _SPUEAR_ syms to point at their overlay stubs.  */
5144
5145
static int
5146
spu_elf_output_symbol_hook (struct bfd_link_info *info,
5147
          const char *sym_name ATTRIBUTE_UNUSED,
5148
          Elf_Internal_Sym *sym,
5149
          asection *sym_sec ATTRIBUTE_UNUSED,
5150
          struct elf_link_hash_entry *h)
5151
0
{
5152
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
5153
5154
0
  if (!bfd_link_relocatable (info)
5155
0
      && htab->stub_sec != NULL
5156
0
      && h != NULL
5157
0
      && (h->root.type == bfd_link_hash_defined
5158
0
    || h->root.type == bfd_link_hash_defweak)
5159
0
      && h->def_regular
5160
0
      && startswith (h->root.root.string, "_SPUEAR_"))
5161
0
    {
5162
0
      struct got_entry *g;
5163
5164
0
      for (g = h->got.glist; g != NULL; g = g->next)
5165
0
  if (htab->params->ovly_flavour == ovly_soft_icache
5166
0
      ? g->br_addr == g->stub_addr
5167
0
      : g->addend == 0 && g->ovl == 0)
5168
0
    {
5169
0
      sym->st_shndx = (_bfd_elf_section_from_bfd_section
5170
0
           (htab->stub_sec[0]->output_section->owner,
5171
0
            htab->stub_sec[0]->output_section));
5172
0
      sym->st_value = g->stub_addr;
5173
0
      break;
5174
0
    }
5175
0
    }
5176
5177
0
  return 1;
5178
0
}
5179
5180
static int spu_plugin = 0;
5181
5182
void
5183
spu_elf_plugin (int val)
5184
0
{
5185
0
  spu_plugin = val;
5186
0
}
5187
5188
/* Set ELF header e_type for plugins.  */
5189
5190
static bool
5191
spu_elf_init_file_header (bfd *abfd, struct bfd_link_info *info)
5192
0
{
5193
0
  if (!_bfd_elf_init_file_header (abfd, info))
5194
0
    return false;
5195
5196
0
  if (spu_plugin)
5197
0
    {
5198
0
      Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5199
5200
0
      i_ehdrp->e_type = ET_DYN;
5201
0
    }
5202
0
  return true;
5203
0
}
5204
5205
/* We may add an extra PT_LOAD segment for .toe.  We also need extra
5206
   segments for overlays.  */
5207
5208
static int
5209
spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5210
0
{
5211
0
  int extra = 0;
5212
0
  asection *sec;
5213
5214
0
  if (info != NULL)
5215
0
    {
5216
0
      struct spu_link_hash_table *htab = spu_hash_table (info);
5217
0
      extra = htab->num_overlays;
5218
0
    }
5219
5220
0
  if (extra)
5221
0
    ++extra;
5222
5223
0
  sec = bfd_get_section_by_name (abfd, ".toe");
5224
0
  if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5225
0
    ++extra;
5226
5227
0
  return extra;
5228
0
}
5229
5230
/* Remove .toe section from other PT_LOAD segments and put it in
5231
   a segment of its own.  Put overlays in separate segments too.  */
5232
5233
static bool
5234
spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5235
0
{
5236
0
  asection *toe, *s;
5237
0
  struct elf_segment_map *m, *m_overlay;
5238
0
  struct elf_segment_map **p, **p_overlay, **first_load;
5239
0
  unsigned int i;
5240
5241
0
  if (info == NULL)
5242
0
    return true;
5243
5244
0
  toe = bfd_get_section_by_name (abfd, ".toe");
5245
0
  for (m = elf_seg_map (abfd); m != NULL; m = m->next)
5246
0
    if (m->p_type == PT_LOAD && m->count > 1)
5247
0
      for (i = 0; i < m->count; i++)
5248
0
  if ((s = m->sections[i]) == toe
5249
0
      || spu_elf_section_data (s)->u.o.ovl_index != 0)
5250
0
    {
5251
0
      struct elf_segment_map *m2;
5252
0
      bfd_vma amt;
5253
5254
0
      if (i + 1 < m->count)
5255
0
        {
5256
0
    amt = sizeof (struct elf_segment_map);
5257
0
    amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5258
0
    m2 = bfd_zalloc (abfd, amt);
5259
0
    if (m2 == NULL)
5260
0
      return false;
5261
0
    m2->count = m->count - (i + 1);
5262
0
    memcpy (m2->sections, m->sections + i + 1,
5263
0
      m2->count * sizeof (m->sections[0]));
5264
0
    m2->p_type = PT_LOAD;
5265
0
    m2->next = m->next;
5266
0
    m->next = m2;
5267
0
        }
5268
0
      m->count = 1;
5269
0
      if (i != 0)
5270
0
        {
5271
0
    m->count = i;
5272
0
    amt = sizeof (struct elf_segment_map);
5273
0
    m2 = bfd_zalloc (abfd, amt);
5274
0
    if (m2 == NULL)
5275
0
      return false;
5276
0
    m2->p_type = PT_LOAD;
5277
0
    m2->count = 1;
5278
0
    m2->sections[0] = s;
5279
0
    m2->next = m->next;
5280
0
    m->next = m2;
5281
0
        }
5282
0
      break;
5283
0
    }
5284
5285
5286
  /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5287
     PT_LOAD segments.  This can cause the .ovl.init section to be
5288
     overwritten with the contents of some overlay segment.  To work
5289
     around this issue, we ensure that all PF_OVERLAY segments are
5290
     sorted first amongst the program headers; this ensures that even
5291
     with a broken loader, the .ovl.init section (which is not marked
5292
     as PF_OVERLAY) will be placed into SPU local store on startup.  */
5293
5294
  /* Move all overlay segments onto a separate list.  */
5295
0
  p = &elf_seg_map (abfd);
5296
0
  p_overlay = &m_overlay;
5297
0
  m_overlay = NULL;
5298
0
  first_load = NULL;
5299
0
  while (*p != NULL)
5300
0
    {
5301
0
      if ((*p)->p_type == PT_LOAD)
5302
0
  {
5303
0
    if (!first_load)
5304
0
      first_load = p;
5305
0
    if ((*p)->count == 1
5306
0
        && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5307
0
      {
5308
0
        m = *p;
5309
0
        m->no_sort_lma = 1;
5310
0
        *p = m->next;
5311
0
        *p_overlay = m;
5312
0
        p_overlay = &m->next;
5313
0
        continue;
5314
0
      }
5315
0
  }
5316
0
      p = &((*p)->next);
5317
0
    }
5318
5319
  /* Re-insert overlay segments at the head of the segment map.  */
5320
0
  if (m_overlay != NULL)
5321
0
    {
5322
0
      p = first_load;
5323
0
      if (*p != NULL && (*p)->p_type == PT_LOAD && (*p)->includes_filehdr)
5324
  /* It doesn't really make sense for someone to include the ELF
5325
     file header into an spu image, but if they do the code that
5326
     assigns p_offset needs to see the segment containing the
5327
     header first.  */
5328
0
  p = &(*p)->next;
5329
0
      *p_overlay = *p;
5330
0
      *p = m_overlay;
5331
0
    }
5332
5333
0
  return true;
5334
0
}
5335
5336
/* Tweak the section type of .note.spu_name.  */
5337
5338
static bool
5339
spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5340
           Elf_Internal_Shdr *hdr,
5341
           asection *sec)
5342
0
{
5343
0
  if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5344
0
    hdr->sh_type = SHT_NOTE;
5345
0
  return true;
5346
0
}
5347
5348
/* Tweak phdrs before writing them out.  */
5349
5350
static bool
5351
spu_elf_modify_headers (bfd *abfd, struct bfd_link_info *info)
5352
0
{
5353
0
  if (info != NULL)
5354
0
    {
5355
0
      const struct elf_backend_data *bed;
5356
0
      struct elf_obj_tdata *tdata;
5357
0
      Elf_Internal_Phdr *phdr, *last;
5358
0
      struct spu_link_hash_table *htab;
5359
0
      unsigned int count;
5360
0
      unsigned int i;
5361
5362
0
      bed = get_elf_backend_data (abfd);
5363
0
      tdata = elf_tdata (abfd);
5364
0
      phdr = tdata->phdr;
5365
0
      count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
5366
0
      htab = spu_hash_table (info);
5367
0
      if (htab->num_overlays != 0)
5368
0
  {
5369
0
    struct elf_segment_map *m;
5370
0
    unsigned int o;
5371
5372
0
    for (i = 0, m = elf_seg_map (abfd); m; ++i, m = m->next)
5373
0
      if (m->count != 0
5374
0
    && ((o = spu_elf_section_data (m->sections[0])->u.o.ovl_index)
5375
0
        != 0))
5376
0
        {
5377
    /* Mark this as an overlay header.  */
5378
0
    phdr[i].p_flags |= PF_OVERLAY;
5379
5380
0
    if (htab->ovtab != NULL && htab->ovtab->size != 0
5381
0
        && htab->params->ovly_flavour != ovly_soft_icache)
5382
0
      {
5383
0
        bfd_byte *p = htab->ovtab->contents;
5384
0
        unsigned int off = o * 16 + 8;
5385
5386
        /* Write file_off into _ovly_table.  */
5387
0
        bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5388
0
      }
5389
0
        }
5390
    /* Soft-icache has its file offset put in .ovl.init.  */
5391
0
    if (htab->init != NULL && htab->init->size != 0)
5392
0
      {
5393
0
        bfd_vma val
5394
0
    = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5395
5396
0
        bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5397
0
      }
5398
0
  }
5399
5400
      /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5401
   of 16.  This should always be possible when using the standard
5402
   linker scripts, but don't create overlapping segments if
5403
   someone is playing games with linker scripts.  */
5404
0
      last = NULL;
5405
0
      for (i = count; i-- != 0; )
5406
0
  if (phdr[i].p_type == PT_LOAD)
5407
0
    {
5408
0
      unsigned adjust;
5409
5410
0
      adjust = -phdr[i].p_filesz & 15;
5411
0
      if (adjust != 0
5412
0
    && last != NULL
5413
0
    && (phdr[i].p_offset + phdr[i].p_filesz
5414
0
        > last->p_offset - adjust))
5415
0
        break;
5416
5417
0
      adjust = -phdr[i].p_memsz & 15;
5418
0
      if (adjust != 0
5419
0
    && last != NULL
5420
0
    && phdr[i].p_filesz != 0
5421
0
    && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5422
0
    && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5423
0
        break;
5424
5425
0
      if (phdr[i].p_filesz != 0)
5426
0
        last = &phdr[i];
5427
0
    }
5428
5429
0
      if (i == (unsigned int) -1)
5430
0
  for (i = count; i-- != 0; )
5431
0
    if (phdr[i].p_type == PT_LOAD)
5432
0
      {
5433
0
        unsigned adjust;
5434
5435
0
        adjust = -phdr[i].p_filesz & 15;
5436
0
        phdr[i].p_filesz += adjust;
5437
5438
0
        adjust = -phdr[i].p_memsz & 15;
5439
0
        phdr[i].p_memsz += adjust;
5440
0
      }
5441
0
    }
5442
5443
0
  return _bfd_elf_modify_headers (abfd, info);
5444
0
}
5445
5446
bool
5447
spu_elf_size_sections (bfd *obfd ATTRIBUTE_UNUSED, struct bfd_link_info *info)
5448
0
{
5449
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
5450
0
  if (htab->params->emit_fixups)
5451
0
    {
5452
0
      asection *sfixup = htab->sfixup;
5453
0
      int fixup_count = 0;
5454
0
      bfd *ibfd;
5455
0
      size_t size;
5456
5457
0
      for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
5458
0
  {
5459
0
    asection *isec;
5460
5461
0
    if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5462
0
      continue;
5463
5464
    /* Walk over each section attached to the input bfd.  */
5465
0
    for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5466
0
      {
5467
0
        Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5468
0
        bfd_vma base_end;
5469
5470
        /* If there aren't any relocs, then there's nothing more
5471
     to do.  */
5472
0
        if ((isec->flags & SEC_ALLOC) == 0
5473
0
      || (isec->flags & SEC_RELOC) == 0
5474
0
      || isec->reloc_count == 0)
5475
0
    continue;
5476
5477
        /* Get the relocs.  */
5478
0
        internal_relocs =
5479
0
    _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5480
0
             info->keep_memory);
5481
0
        if (internal_relocs == NULL)
5482
0
    return false;
5483
5484
        /* 1 quadword can contain up to 4 R_SPU_ADDR32
5485
     relocations.  They are stored in a single word by
5486
     saving the upper 28 bits of the address and setting the
5487
     lower 4 bits to a bit mask of the words that have the
5488
     relocation.  BASE_END keeps track of the next quadword. */
5489
0
        irela = internal_relocs;
5490
0
        irelaend = irela + isec->reloc_count;
5491
0
        base_end = 0;
5492
0
        for (; irela < irelaend; irela++)
5493
0
    if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5494
0
        && irela->r_offset >= base_end)
5495
0
      {
5496
0
        base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5497
0
        fixup_count++;
5498
0
      }
5499
0
      }
5500
0
  }
5501
5502
      /* We always have a NULL fixup as a sentinel */
5503
0
      size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5504
0
      if (!bfd_set_section_size (sfixup, size))
5505
0
  return false;
5506
0
      sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5507
0
      if (sfixup->contents == NULL)
5508
0
  return false;
5509
0
    }
5510
0
  return true;
5511
0
}
5512
5513
#define TARGET_BIG_SYM    spu_elf32_vec
5514
#define TARGET_BIG_NAME   "elf32-spu"
5515
#define ELF_ARCH    bfd_arch_spu
5516
#define ELF_TARGET_ID   SPU_ELF_DATA
5517
#define ELF_MACHINE_CODE  EM_SPU
5518
/* This matches the alignment need for DMA.  */
5519
#define ELF_MAXPAGESIZE   0x80
5520
#define elf_backend_rela_normal   1
5521
#define elf_backend_can_gc_sections 1
5522
5523
#define bfd_elf32_bfd_reloc_type_lookup   spu_elf_reloc_type_lookup
5524
#define bfd_elf32_bfd_reloc_name_lookup   spu_elf_reloc_name_lookup
5525
#define elf_info_to_howto     spu_elf_info_to_howto
5526
#define elf_backend_count_relocs    spu_elf_count_relocs
5527
#define elf_backend_relocate_section    spu_elf_relocate_section
5528
#define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
5529
#define elf_backend_symbol_processing   spu_elf_backend_symbol_processing
5530
#define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5531
#define elf_backend_object_p      spu_elf_object_p
5532
#define bfd_elf32_new_section_hook    spu_elf_new_section_hook
5533
#define bfd_elf32_bfd_link_hash_table_create  spu_elf_link_hash_table_create
5534
5535
#define elf_backend_additional_program_headers  spu_elf_additional_program_headers
5536
#define elf_backend_modify_segment_map    spu_elf_modify_segment_map
5537
#define elf_backend_modify_headers    spu_elf_modify_headers
5538
#define elf_backend_init_file_header    spu_elf_init_file_header
5539
#define elf_backend_fake_sections   spu_elf_fake_sections
5540
#define elf_backend_special_sections    spu_elf_special_sections
5541
#define bfd_elf32_bfd_final_link    spu_elf_final_link
5542
5543
#include "elf32-target.h"