Coverage Report

Created: 2025-06-24 06:45

/src/binutils-gdb/bfd/elf32-spu.c
Line
Count
Source (jump to first uncovered line)
1
/* SPU specific support for 32-bit ELF
2
3
   Copyright (C) 2006-2025 Free Software Foundation, Inc.
4
5
   This file is part of BFD, the Binary File Descriptor library.
6
7
   This program is free software; you can redistribute it and/or modify
8
   it under the terms of the GNU General Public License as published by
9
   the Free Software Foundation; either version 3 of the License, or
10
   (at your option) any later version.
11
12
   This program is distributed in the hope that it will be useful,
13
   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
   GNU General Public License for more details.
16
17
   You should have received a copy of the GNU General Public License along
18
   with this program; if not, write to the Free Software Foundation, Inc.,
19
   51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
20
21
#include "sysdep.h"
22
#include "libiberty.h"
23
#include "bfd.h"
24
#include "bfdlink.h"
25
#include "libbfd.h"
26
#include "elf-bfd.h"
27
#include "elf/spu.h"
28
#include "elf32-spu.h"
29
30
/* All users of this file have bfd_octets_per_byte (abfd, sec) == 1.  */
31
0
#define OCTETS_PER_BYTE(ABFD, SEC) 1
32
33
/* We use RELA style relocs.  Don't define USE_REL.  */
34
35
static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
36
             void *, asection *,
37
             bfd *, char **);
38
39
/* Values of type 'enum elf_spu_reloc_type' are used to index this
40
   array, so it must be declared in the order of that type.  */
41
42
static reloc_howto_type elf_howto_table[] = {
43
  HOWTO (R_SPU_NONE,     0, 0,  0, false,  0, complain_overflow_dont,
44
   bfd_elf_generic_reloc, "SPU_NONE",
45
   false, 0, 0x00000000, false),
46
  HOWTO (R_SPU_ADDR10,     4, 4, 10, false, 14, complain_overflow_bitfield,
47
   bfd_elf_generic_reloc, "SPU_ADDR10",
48
   false, 0, 0x00ffc000, false),
49
  HOWTO (R_SPU_ADDR16,     2, 4, 16, false,  7, complain_overflow_bitfield,
50
   bfd_elf_generic_reloc, "SPU_ADDR16",
51
   false, 0, 0x007fff80, false),
52
  HOWTO (R_SPU_ADDR16_HI, 16, 4, 16, false,  7, complain_overflow_bitfield,
53
   bfd_elf_generic_reloc, "SPU_ADDR16_HI",
54
   false, 0, 0x007fff80, false),
55
  HOWTO (R_SPU_ADDR16_LO,  0, 4, 16, false,  7, complain_overflow_dont,
56
   bfd_elf_generic_reloc, "SPU_ADDR16_LO",
57
   false, 0, 0x007fff80, false),
58
  HOWTO (R_SPU_ADDR18,     0, 4, 18, false,  7, complain_overflow_bitfield,
59
   bfd_elf_generic_reloc, "SPU_ADDR18",
60
   false, 0, 0x01ffff80, false),
61
  HOWTO (R_SPU_ADDR32,     0, 4, 32, false,  0, complain_overflow_dont,
62
   bfd_elf_generic_reloc, "SPU_ADDR32",
63
   false, 0, 0xffffffff, false),
64
  HOWTO (R_SPU_REL16,    2, 4, 16,  true,  7, complain_overflow_bitfield,
65
   bfd_elf_generic_reloc, "SPU_REL16",
66
   false, 0, 0x007fff80, true),
67
  HOWTO (R_SPU_ADDR7,    0, 4,  7, false, 14, complain_overflow_dont,
68
   bfd_elf_generic_reloc, "SPU_ADDR7",
69
   false, 0, 0x001fc000, false),
70
  HOWTO (R_SPU_REL9,     2, 4,  9,  true,  0, complain_overflow_signed,
71
   spu_elf_rel9,    "SPU_REL9",
72
   false, 0, 0x0180007f, true),
73
  HOWTO (R_SPU_REL9I,    2, 4,  9,  true,  0, complain_overflow_signed,
74
   spu_elf_rel9,    "SPU_REL9I",
75
   false, 0, 0x0000c07f, true),
76
  HOWTO (R_SPU_ADDR10I,    0, 4, 10, false, 14, complain_overflow_signed,
77
   bfd_elf_generic_reloc, "SPU_ADDR10I",
78
   false, 0, 0x00ffc000, false),
79
  HOWTO (R_SPU_ADDR16I,    0, 4, 16, false,  7, complain_overflow_signed,
80
   bfd_elf_generic_reloc, "SPU_ADDR16I",
81
   false, 0, 0x007fff80, false),
82
  HOWTO (R_SPU_REL32,    0, 4, 32, true,  0, complain_overflow_dont,
83
   bfd_elf_generic_reloc, "SPU_REL32",
84
   false, 0, 0xffffffff, true),
85
  HOWTO (R_SPU_ADDR16X,    0, 4, 16, false,  7, complain_overflow_bitfield,
86
   bfd_elf_generic_reloc, "SPU_ADDR16X",
87
   false, 0, 0x007fff80, false),
88
  HOWTO (R_SPU_PPU32,    0, 4, 32, false,  0, complain_overflow_dont,
89
   bfd_elf_generic_reloc, "SPU_PPU32",
90
   false, 0, 0xffffffff, false),
91
  HOWTO (R_SPU_PPU64,    0, 8, 64, false,  0, complain_overflow_dont,
92
   bfd_elf_generic_reloc, "SPU_PPU64",
93
   false, 0, -1, false),
94
  HOWTO (R_SPU_ADD_PIC,    0, 0,  0, false,  0, complain_overflow_dont,
95
   bfd_elf_generic_reloc, "SPU_ADD_PIC",
96
   false, 0, 0x00000000, false),
97
};
98
99
static struct bfd_elf_special_section const spu_elf_special_sections[] = {
100
  { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
101
  { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
102
  { NULL, 0, 0, 0, 0 }
103
};
104
105
static enum elf_spu_reloc_type
106
spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
107
0
{
108
0
  switch (code)
109
0
    {
110
0
    default:
111
0
      return (enum elf_spu_reloc_type) -1;
112
0
    case BFD_RELOC_NONE:
113
0
      return R_SPU_NONE;
114
0
    case BFD_RELOC_SPU_IMM10W:
115
0
      return R_SPU_ADDR10;
116
0
    case BFD_RELOC_SPU_IMM16W:
117
0
      return R_SPU_ADDR16;
118
0
    case BFD_RELOC_SPU_LO16:
119
0
      return R_SPU_ADDR16_LO;
120
0
    case BFD_RELOC_SPU_HI16:
121
0
      return R_SPU_ADDR16_HI;
122
0
    case BFD_RELOC_SPU_IMM18:
123
0
      return R_SPU_ADDR18;
124
0
    case BFD_RELOC_SPU_PCREL16:
125
0
      return R_SPU_REL16;
126
0
    case BFD_RELOC_SPU_IMM7:
127
0
      return R_SPU_ADDR7;
128
0
    case BFD_RELOC_SPU_IMM8:
129
0
      return R_SPU_NONE;
130
0
    case BFD_RELOC_SPU_PCREL9a:
131
0
      return R_SPU_REL9;
132
0
    case BFD_RELOC_SPU_PCREL9b:
133
0
      return R_SPU_REL9I;
134
0
    case BFD_RELOC_SPU_IMM10:
135
0
      return R_SPU_ADDR10I;
136
0
    case BFD_RELOC_SPU_IMM16:
137
0
      return R_SPU_ADDR16I;
138
0
    case BFD_RELOC_32:
139
0
      return R_SPU_ADDR32;
140
0
    case BFD_RELOC_32_PCREL:
141
0
      return R_SPU_REL32;
142
0
    case BFD_RELOC_SPU_PPU32:
143
0
      return R_SPU_PPU32;
144
0
    case BFD_RELOC_SPU_PPU64:
145
0
      return R_SPU_PPU64;
146
0
    case BFD_RELOC_SPU_ADD_PIC:
147
0
      return R_SPU_ADD_PIC;
148
0
    }
149
0
}
150
151
static bool
152
spu_elf_info_to_howto (bfd *abfd,
153
           arelent *cache_ptr,
154
           Elf_Internal_Rela *dst)
155
0
{
156
0
  enum elf_spu_reloc_type r_type;
157
158
0
  r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
159
  /* PR 17512: file: 90c2a92e.  */
160
0
  if (r_type >= R_SPU_max)
161
0
    {
162
      /* xgettext:c-format */
163
0
      _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
164
0
        abfd, r_type);
165
0
      bfd_set_error (bfd_error_bad_value);
166
0
      return false;
167
0
    }
168
0
  cache_ptr->howto = &elf_howto_table[(int) r_type];
169
0
  return true;
170
0
}
171
172
static reloc_howto_type *
173
spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
174
         bfd_reloc_code_real_type code)
175
0
{
176
0
  enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
177
178
0
  if (r_type == (enum elf_spu_reloc_type) -1)
179
0
    return NULL;
180
181
0
  return elf_howto_table + r_type;
182
0
}
183
184
static reloc_howto_type *
185
spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
186
         const char *r_name)
187
0
{
188
0
  unsigned int i;
189
190
0
  for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
191
0
    if (elf_howto_table[i].name != NULL
192
0
  && strcasecmp (elf_howto_table[i].name, r_name) == 0)
193
0
      return &elf_howto_table[i];
194
195
0
  return NULL;
196
0
}
197
198
/* Apply R_SPU_REL9 and R_SPU_REL9I relocs.  */
199
200
static bfd_reloc_status_type
201
spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
202
        void *data, asection *input_section,
203
        bfd *output_bfd, char **error_message)
204
0
{
205
0
  bfd_size_type octets;
206
0
  bfd_vma val;
207
0
  long insn;
208
209
  /* If this is a relocatable link (output_bfd test tells us), just
210
     call the generic function.  Any adjustment will be done at final
211
     link time.  */
212
0
  if (output_bfd != NULL)
213
0
    return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
214
0
          input_section, output_bfd, error_message);
215
216
0
  if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
217
0
    return bfd_reloc_outofrange;
218
0
  octets = reloc_entry->address * OCTETS_PER_BYTE (abfd, input_section);
219
220
  /* Get symbol value.  */
221
0
  val = 0;
222
0
  if (!bfd_is_com_section (symbol->section))
223
0
    val = symbol->value;
224
0
  if (symbol->section->output_section)
225
0
    val += symbol->section->output_section->vma;
226
227
0
  val += reloc_entry->addend;
228
229
  /* Make it pc-relative.  */
230
0
  val -= input_section->output_section->vma + input_section->output_offset;
231
232
0
  val >>= 2;
233
0
  if (val + 256 >= 512)
234
0
    return bfd_reloc_overflow;
235
236
0
  insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
237
238
  /* Move two high bits of value to REL9I and REL9 position.
239
     The mask will take care of selecting the right field.  */
240
0
  val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
241
0
  insn &= ~reloc_entry->howto->dst_mask;
242
0
  insn |= val & reloc_entry->howto->dst_mask;
243
0
  bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
244
0
  return bfd_reloc_ok;
245
0
}
246
247
static bool
248
spu_elf_new_section_hook (bfd *abfd, asection *sec)
249
6.26k
{
250
6.26k
  struct _spu_elf_section_data *sdata;
251
252
6.26k
  sdata = bfd_zalloc (abfd, sizeof (*sdata));
253
6.26k
  if (sdata == NULL)
254
0
    return false;
255
6.26k
  sec->used_by_bfd = sdata;
256
257
6.26k
  return _bfd_elf_new_section_hook (abfd, sec);
258
6.26k
}
259
260
/* Set up overlay info for executables.  */
261
262
static bool
263
spu_elf_object_p (bfd *abfd)
264
1.46k
{
265
1.46k
  if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
266
120
    {
267
120
      unsigned int i, num_ovl, num_buf;
268
120
      Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
269
120
      Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
270
120
      Elf_Internal_Phdr *last_phdr = NULL;
271
272
128k
      for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
273
128k
  if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
274
508
    {
275
508
      unsigned int j;
276
277
508
      ++num_ovl;
278
508
      if (last_phdr == NULL
279
508
    || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
280
226
        ++num_buf;
281
508
      last_phdr = phdr;
282
510
      for (j = 1; j < elf_numsections (abfd); j++)
283
2
        {
284
2
    Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
285
286
2
    if (shdr->bfd_section != NULL
287
2
        && ELF_SECTION_SIZE (shdr, phdr) != 0
288
2
        && ELF_SECTION_IN_SEGMENT (shdr, phdr))
289
0
      {
290
0
        asection *sec = shdr->bfd_section;
291
0
        spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
292
0
        spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
293
0
      }
294
2
        }
295
508
    }
296
120
    }
297
1.46k
  return true;
298
1.46k
}
299
300
/* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
301
   strip --strip-unneeded will not remove them.  */
302
303
static void
304
spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
305
0
{
306
0
  if (sym->name != NULL
307
0
      && sym->section != bfd_abs_section_ptr
308
0
      && startswith (sym->name, "_EAR_"))
309
0
    sym->flags |= BSF_KEEP;
310
0
}
311
312
/* SPU ELF linker hash table.  */
313
314
struct spu_link_hash_table
315
{
316
  struct elf_link_hash_table elf;
317
318
  struct spu_elf_params *params;
319
320
  /* Shortcuts to overlay sections.  */
321
  asection *ovtab;
322
  asection *init;
323
  asection *toe;
324
  asection **ovl_sec;
325
326
  /* Count of stubs in each overlay section.  */
327
  unsigned int *stub_count;
328
329
  /* The stub section for each overlay section.  */
330
  asection **stub_sec;
331
332
  struct elf_link_hash_entry *ovly_entry[2];
333
334
  /* Number of overlay buffers.  */
335
  unsigned int num_buf;
336
337
  /* Total number of overlays.  */
338
  unsigned int num_overlays;
339
340
  /* For soft icache.  */
341
  unsigned int line_size_log2;
342
  unsigned int num_lines_log2;
343
  unsigned int fromelem_size_log2;
344
345
  /* How much memory we have.  */
346
  unsigned int local_store;
347
348
  /* Count of overlay stubs needed in non-overlay area.  */
349
  unsigned int non_ovly_stub;
350
351
  /* Pointer to the fixup section */
352
  asection *sfixup;
353
354
  /* Set on error.  */
355
  unsigned int stub_err : 1;
356
};
357
358
/* Hijack the generic got fields for overlay stub accounting.  */
359
360
struct got_entry
361
{
362
  struct got_entry *next;
363
  unsigned int ovl;
364
  union {
365
    bfd_vma addend;
366
    bfd_vma br_addr;
367
  };
368
  bfd_vma stub_addr;
369
};
370
371
#define spu_hash_table(p) \
372
0
  ((is_elf_hash_table ((p)->hash)          \
373
0
    && elf_hash_table_id (elf_hash_table (p)) == SPU_ELF_DATA)   \
374
0
   ? (struct spu_link_hash_table *) (p)->hash : NULL)
375
376
struct call_info
377
{
378
  struct function_info *fun;
379
  struct call_info *next;
380
  unsigned int count;
381
  unsigned int max_depth;
382
  unsigned int is_tail : 1;
383
  unsigned int is_pasted : 1;
384
  unsigned int broken_cycle : 1;
385
  unsigned int priority : 13;
386
};
387
388
struct function_info
389
{
390
  /* List of functions called.  Also branches to hot/cold part of
391
     function.  */
392
  struct call_info *call_list;
393
  /* For hot/cold part of function, point to owner.  */
394
  struct function_info *start;
395
  /* Symbol at start of function.  */
396
  union {
397
    Elf_Internal_Sym *sym;
398
    struct elf_link_hash_entry *h;
399
  } u;
400
  /* Function section.  */
401
  asection *sec;
402
  asection *rodata;
403
  /* Where last called from, and number of sections called from.  */
404
  asection *last_caller;
405
  unsigned int call_count;
406
  /* Address range of (this part of) function.  */
407
  bfd_vma lo, hi;
408
  /* Offset where we found a store of lr, or -1 if none found.  */
409
  bfd_vma lr_store;
410
  /* Offset where we found the stack adjustment insn.  */
411
  bfd_vma sp_adjust;
412
  /* Stack usage.  */
413
  int stack;
414
  /* Distance from root of call tree.  Tail and hot/cold branches
415
     count as one deeper.  We aren't counting stack frames here.  */
416
  unsigned int depth;
417
  /* Set if global symbol.  */
418
  unsigned int global : 1;
419
  /* Set if known to be start of function (as distinct from a hunk
420
     in hot/cold section.  */
421
  unsigned int is_func : 1;
422
  /* Set if not a root node.  */
423
  unsigned int non_root : 1;
424
  /* Flags used during call tree traversal.  It's cheaper to replicate
425
     the visit flags than have one which needs clearing after a traversal.  */
426
  unsigned int visit1 : 1;
427
  unsigned int visit2 : 1;
428
  unsigned int marking : 1;
429
  unsigned int visit3 : 1;
430
  unsigned int visit4 : 1;
431
  unsigned int visit5 : 1;
432
  unsigned int visit6 : 1;
433
  unsigned int visit7 : 1;
434
};
435
436
struct spu_elf_stack_info
437
{
438
  int num_fun;
439
  int max_fun;
440
  /* Variable size array describing functions, one per contiguous
441
     address range belonging to a function.  */
442
  struct function_info fun[1];
443
};
444
445
static struct function_info *find_function (asection *, bfd_vma,
446
              struct bfd_link_info *);
447
448
/* Create a spu ELF linker hash table.  */
449
450
static struct bfd_link_hash_table *
451
spu_elf_link_hash_table_create (bfd *abfd)
452
0
{
453
0
  struct spu_link_hash_table *htab;
454
455
0
  htab = bfd_zmalloc (sizeof (*htab));
456
0
  if (htab == NULL)
457
0
    return NULL;
458
459
0
  if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
460
0
              _bfd_elf_link_hash_newfunc,
461
0
              sizeof (struct elf_link_hash_entry)))
462
0
    {
463
0
      free (htab);
464
0
      return NULL;
465
0
    }
466
467
0
  htab->elf.init_got_refcount.refcount = 0;
468
0
  htab->elf.init_got_refcount.glist = NULL;
469
0
  htab->elf.init_got_offset.offset = 0;
470
0
  htab->elf.init_got_offset.glist = NULL;
471
0
  return &htab->elf.root;
472
0
}
473
474
void
475
spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
476
0
{
477
0
  bfd_vma max_branch_log2;
478
479
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
480
0
  htab->params = params;
481
0
  htab->line_size_log2 = bfd_log2 (htab->params->line_size);
482
0
  htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
483
484
  /* For the software i-cache, we provide a "from" list whose size
485
     is a power-of-two number of quadwords, big enough to hold one
486
     byte per outgoing branch.  Compute this number here.  */
487
0
  max_branch_log2 = bfd_log2 (htab->params->max_branch);
488
0
  htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
489
0
}
490
491
/* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
492
   to (hash, NULL) for global symbols, and (NULL, sym) for locals.  Set
493
   *SYMSECP to the symbol's section.  *LOCSYMSP caches local syms.  */
494
495
static bool
496
get_sym_h (struct elf_link_hash_entry **hp,
497
     Elf_Internal_Sym **symp,
498
     asection **symsecp,
499
     Elf_Internal_Sym **locsymsp,
500
     unsigned long r_symndx,
501
     bfd *ibfd)
502
0
{
503
0
  Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
504
505
0
  if (r_symndx >= symtab_hdr->sh_info)
506
0
    {
507
0
      struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
508
0
      struct elf_link_hash_entry *h;
509
510
0
      h = sym_hashes[r_symndx - symtab_hdr->sh_info];
511
0
      while (h->root.type == bfd_link_hash_indirect
512
0
       || h->root.type == bfd_link_hash_warning)
513
0
  h = (struct elf_link_hash_entry *) h->root.u.i.link;
514
515
0
      if (hp != NULL)
516
0
  *hp = h;
517
518
0
      if (symp != NULL)
519
0
  *symp = NULL;
520
521
0
      if (symsecp != NULL)
522
0
  {
523
0
    asection *symsec = NULL;
524
0
    if (h->root.type == bfd_link_hash_defined
525
0
        || h->root.type == bfd_link_hash_defweak)
526
0
      symsec = h->root.u.def.section;
527
0
    *symsecp = symsec;
528
0
  }
529
0
    }
530
0
  else
531
0
    {
532
0
      Elf_Internal_Sym *sym;
533
0
      Elf_Internal_Sym *locsyms = *locsymsp;
534
535
0
      if (locsyms == NULL)
536
0
  {
537
0
    locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
538
0
    if (locsyms == NULL)
539
0
      locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
540
0
              symtab_hdr->sh_info,
541
0
              0, NULL, NULL, NULL);
542
0
    if (locsyms == NULL)
543
0
      return false;
544
0
    *locsymsp = locsyms;
545
0
  }
546
0
      sym = locsyms + r_symndx;
547
548
0
      if (hp != NULL)
549
0
  *hp = NULL;
550
551
0
      if (symp != NULL)
552
0
  *symp = sym;
553
554
0
      if (symsecp != NULL)
555
0
  *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
556
0
    }
557
558
0
  return true;
559
0
}
560
561
/* Create the note section if not already present.  This is done early so
562
   that the linker maps the sections to the right place in the output.  */
563
564
bool
565
spu_elf_create_sections (struct bfd_link_info *info)
566
0
{
567
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
568
0
  bfd *ibfd;
569
570
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
571
0
    if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
572
0
      break;
573
574
0
  if (ibfd == NULL)
575
0
    {
576
      /* Make SPU_PTNOTE_SPUNAME section.  */
577
0
      asection *s;
578
0
      size_t name_len;
579
0
      size_t size;
580
0
      bfd_byte *data;
581
0
      flagword flags;
582
583
0
      ibfd = info->input_bfds;
584
      /* This should really be SEC_LINKER_CREATED, but then we'd need
585
   to write out the section ourselves.  */
586
0
      flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
587
0
      s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
588
0
      if (s == NULL
589
0
    || !bfd_set_section_alignment (s, 4))
590
0
  return false;
591
      /* Because we didn't set SEC_LINKER_CREATED we need to set the
592
   proper section type.  */
593
0
      elf_section_type (s) = SHT_NOTE;
594
595
0
      name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
596
0
      size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
597
0
      size += (name_len + 3) & -4;
598
599
0
      if (!bfd_set_section_size (s, size))
600
0
  return false;
601
602
0
      data = bfd_zalloc (ibfd, size);
603
0
      if (data == NULL)
604
0
  return false;
605
606
0
      bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
607
0
      bfd_put_32 (ibfd, name_len, data + 4);
608
0
      bfd_put_32 (ibfd, 1, data + 8);
609
0
      memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
610
0
      memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
611
0
        bfd_get_filename (info->output_bfd), name_len);
612
0
      s->contents = data;
613
0
      s->alloced = 1;
614
0
    }
615
616
0
  if (htab->params->emit_fixups)
617
0
    {
618
0
      asection *s;
619
0
      flagword flags;
620
621
0
      if (htab->elf.dynobj == NULL)
622
0
  htab->elf.dynobj = ibfd;
623
0
      ibfd = htab->elf.dynobj;
624
0
      flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
625
0
         | SEC_IN_MEMORY | SEC_LINKER_CREATED);
626
0
      s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
627
0
      if (s == NULL || !bfd_set_section_alignment (s, 2))
628
0
  return false;
629
0
      htab->sfixup = s;
630
0
    }
631
632
0
  return true;
633
0
}
634
635
/* qsort predicate to sort sections by vma.  */
636
637
static int
638
sort_sections (const void *a, const void *b)
639
0
{
640
0
  const asection *const *s1 = a;
641
0
  const asection *const *s2 = b;
642
0
  bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
643
644
0
  if (delta != 0)
645
0
    return delta < 0 ? -1 : 1;
646
647
0
  return (*s1)->index - (*s2)->index;
648
0
}
649
650
/* Identify overlays in the output bfd, and number them.
651
   Returns 0 on error, 1 if no overlays, 2 if overlays.  */
652
653
int
654
spu_elf_find_overlays (struct bfd_link_info *info)
655
0
{
656
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
657
0
  asection **alloc_sec;
658
0
  unsigned int i, n, ovl_index, num_buf;
659
0
  asection *s;
660
0
  bfd_vma ovl_end;
661
0
  static const char *const entry_names[2][2] = {
662
0
    { "__ovly_load", "__icache_br_handler" },
663
0
    { "__ovly_return", "__icache_call_handler" }
664
0
  };
665
666
0
  if (info->output_bfd->section_count < 2)
667
0
    return 1;
668
669
0
  alloc_sec
670
0
    = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
671
0
  if (alloc_sec == NULL)
672
0
    return 0;
673
674
  /* Pick out all the alloced sections.  */
675
0
  for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
676
0
    if ((s->flags & SEC_ALLOC) != 0
677
0
  && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
678
0
  && s->size != 0)
679
0
      alloc_sec[n++] = s;
680
681
0
  if (n == 0)
682
0
    {
683
0
      free (alloc_sec);
684
0
      return 1;
685
0
    }
686
687
  /* Sort them by vma.  */
688
0
  qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
689
690
0
  ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
691
0
  if (htab->params->ovly_flavour == ovly_soft_icache)
692
0
    {
693
0
      unsigned int prev_buf = 0, set_id = 0;
694
695
      /* Look for an overlapping vma to find the first overlay section.  */
696
0
      bfd_vma vma_start = 0;
697
698
0
      for (i = 1; i < n; i++)
699
0
  {
700
0
    s = alloc_sec[i];
701
0
    if (s->vma < ovl_end)
702
0
      {
703
0
        asection *s0 = alloc_sec[i - 1];
704
0
        vma_start = s0->vma;
705
0
        ovl_end = (s0->vma
706
0
       + ((bfd_vma) 1
707
0
          << (htab->num_lines_log2 + htab->line_size_log2)));
708
0
        --i;
709
0
        break;
710
0
      }
711
0
    else
712
0
      ovl_end = s->vma + s->size;
713
0
  }
714
715
      /* Now find any sections within the cache area.  */
716
0
      for (ovl_index = 0, num_buf = 0; i < n; i++)
717
0
  {
718
0
    s = alloc_sec[i];
719
0
    if (s->vma >= ovl_end)
720
0
      break;
721
722
    /* A section in an overlay area called .ovl.init is not
723
       an overlay, in the sense that it might be loaded in
724
       by the overlay manager, but rather the initial
725
       section contents for the overlay buffer.  */
726
0
    if (!startswith (s->name, ".ovl.init"))
727
0
      {
728
0
        num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
729
0
        set_id = (num_buf == prev_buf)? set_id + 1 : 0;
730
0
        prev_buf = num_buf;
731
732
0
        if ((s->vma - vma_start) & (htab->params->line_size - 1))
733
0
    {
734
0
      info->callbacks->einfo (_("%X%P: overlay section %pA "
735
0
              "does not start on a cache line\n"),
736
0
            s);
737
0
      bfd_set_error (bfd_error_bad_value);
738
0
      return 0;
739
0
    }
740
0
        else if (s->size > htab->params->line_size)
741
0
    {
742
0
      info->callbacks->einfo (_("%X%P: overlay section %pA "
743
0
              "is larger than a cache line\n"),
744
0
            s);
745
0
      bfd_set_error (bfd_error_bad_value);
746
0
      return 0;
747
0
    }
748
749
0
        alloc_sec[ovl_index++] = s;
750
0
        spu_elf_section_data (s)->u.o.ovl_index
751
0
    = (set_id << htab->num_lines_log2) + num_buf;
752
0
        spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
753
0
      }
754
0
  }
755
756
      /* Ensure there are no more overlay sections.  */
757
0
      for ( ; i < n; i++)
758
0
  {
759
0
    s = alloc_sec[i];
760
0
    if (s->vma < ovl_end)
761
0
      {
762
0
        info->callbacks->einfo (_("%X%P: overlay section %pA "
763
0
          "is not in cache area\n"),
764
0
              alloc_sec[i-1]);
765
0
        bfd_set_error (bfd_error_bad_value);
766
0
        return 0;
767
0
      }
768
0
    else
769
0
      ovl_end = s->vma + s->size;
770
0
  }
771
0
    }
772
0
  else
773
0
    {
774
      /* Look for overlapping vmas.  Any with overlap must be overlays.
775
   Count them.  Also count the number of overlay regions.  */
776
0
      for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
777
0
  {
778
0
    s = alloc_sec[i];
779
0
    if (s->vma < ovl_end)
780
0
      {
781
0
        asection *s0 = alloc_sec[i - 1];
782
783
0
        if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
784
0
    {
785
0
      ++num_buf;
786
0
      if (!startswith (s0->name, ".ovl.init"))
787
0
        {
788
0
          alloc_sec[ovl_index] = s0;
789
0
          spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
790
0
          spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
791
0
        }
792
0
      else
793
0
        ovl_end = s->vma + s->size;
794
0
    }
795
0
        if (!startswith (s->name, ".ovl.init"))
796
0
    {
797
0
      alloc_sec[ovl_index] = s;
798
0
      spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
799
0
      spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
800
0
      if (s0->vma != s->vma)
801
0
        {
802
          /* xgettext:c-format */
803
0
          info->callbacks->einfo (_("%X%P: overlay sections %pA "
804
0
            "and %pA do not start at the "
805
0
            "same address\n"),
806
0
                s0, s);
807
0
          bfd_set_error (bfd_error_bad_value);
808
0
          return 0;
809
0
        }
810
0
      if (ovl_end < s->vma + s->size)
811
0
        ovl_end = s->vma + s->size;
812
0
    }
813
0
      }
814
0
    else
815
0
      ovl_end = s->vma + s->size;
816
0
  }
817
0
    }
818
819
0
  htab->num_overlays = ovl_index;
820
0
  htab->num_buf = num_buf;
821
0
  htab->ovl_sec = alloc_sec;
822
823
0
  if (ovl_index == 0)
824
0
    return 1;
825
826
0
  for (i = 0; i < 2; i++)
827
0
    {
828
0
      const char *name;
829
0
      struct elf_link_hash_entry *h;
830
831
0
      name = entry_names[i][htab->params->ovly_flavour];
832
0
      h = elf_link_hash_lookup (&htab->elf, name, true, false, false);
833
0
      if (h == NULL)
834
0
  return 0;
835
836
0
      if (h->root.type == bfd_link_hash_new)
837
0
  {
838
0
    h->root.type = bfd_link_hash_undefined;
839
0
    h->ref_regular = 1;
840
0
    h->ref_regular_nonweak = 1;
841
0
    h->non_elf = 0;
842
0
  }
843
0
      htab->ovly_entry[i] = h;
844
0
    }
845
846
0
  return 2;
847
0
}
848
849
/* Non-zero to use bra in overlay stubs rather than br.  */
850
0
#define BRA_STUBS 0
851
852
#define BRA 0x30000000
853
#define BRASL 0x31000000
854
#define BR  0x32000000
855
#define BRSL  0x33000000
856
#define NOP 0x40200000
857
#define LNOP  0x00200000
858
#define ILA 0x42000000
859
860
/* Return true for all relative and absolute branch instructions.
861
   bra   00110000 0..
862
   brasl 00110001 0..
863
   br    00110010 0..
864
   brsl  00110011 0..
865
   brz   00100000 0..
866
   brnz  00100001 0..
867
   brhz  00100010 0..
868
   brhnz 00100011 0..  */
869
870
static bool
871
is_branch (const unsigned char *insn)
872
0
{
873
0
  return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
874
0
}
875
876
/* Return true for all indirect branch instructions.
877
   bi     00110101 000
878
   bisl   00110101 001
879
   iret   00110101 010
880
   bisled 00110101 011
881
   biz    00100101 000
882
   binz   00100101 001
883
   bihz   00100101 010
884
   bihnz  00100101 011  */
885
886
static bool
887
is_indirect_branch (const unsigned char *insn)
888
0
{
889
0
  return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
890
0
}
891
892
/* Return true for branch hint instructions.
893
   hbra  0001000..
894
   hbrr  0001001..  */
895
896
static bool
897
is_hint (const unsigned char *insn)
898
0
{
899
0
  return (insn[0] & 0xfc) == 0x10;
900
0
}
901
902
/* True if INPUT_SECTION might need overlay stubs.  */
903
904
static bool
905
maybe_needs_stubs (asection *input_section)
906
0
{
907
  /* No stubs for debug sections and suchlike.  */
908
0
  if ((input_section->flags & SEC_ALLOC) == 0)
909
0
    return false;
910
911
  /* No stubs for link-once sections that will be discarded.  */
912
0
  if (input_section->output_section == bfd_abs_section_ptr)
913
0
    return false;
914
915
  /* Don't create stubs for .eh_frame references.  */
916
0
  if (strcmp (input_section->name, ".eh_frame") == 0)
917
0
    return false;
918
919
0
  return true;
920
0
}
921
922
enum _stub_type
923
{
924
  no_stub,
925
  call_ovl_stub,
926
  br000_ovl_stub,
927
  br001_ovl_stub,
928
  br010_ovl_stub,
929
  br011_ovl_stub,
930
  br100_ovl_stub,
931
  br101_ovl_stub,
932
  br110_ovl_stub,
933
  br111_ovl_stub,
934
  nonovl_stub,
935
  stub_error
936
};
937
938
/* Return non-zero if this reloc symbol should go via an overlay stub.
939
   Return 2 if the stub must be in non-overlay area.  */
940
941
static enum _stub_type
942
needs_ovl_stub (struct elf_link_hash_entry *h,
943
    Elf_Internal_Sym *sym,
944
    asection *sym_sec,
945
    asection *input_section,
946
    Elf_Internal_Rela *irela,
947
    bfd_byte *contents,
948
    struct bfd_link_info *info)
949
0
{
950
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
951
0
  enum elf_spu_reloc_type r_type;
952
0
  unsigned int sym_type;
953
0
  bool branch, hint, call;
954
0
  enum _stub_type ret = no_stub;
955
0
  bfd_byte insn[4];
956
957
0
  if (sym_sec == NULL
958
0
      || sym_sec->output_section == bfd_abs_section_ptr
959
0
      || spu_elf_section_data (sym_sec->output_section) == NULL)
960
0
    return ret;
961
962
0
  if (h != NULL)
963
0
    {
964
      /* Ensure no stubs for user supplied overlay manager syms.  */
965
0
      if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
966
0
  return ret;
967
968
      /* setjmp always goes via an overlay stub, because then the return
969
   and hence the longjmp goes via __ovly_return.  That magically
970
   makes setjmp/longjmp between overlays work.  */
971
0
      if (startswith (h->root.root.string, "setjmp")
972
0
    && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
973
0
  ret = call_ovl_stub;
974
0
    }
975
976
0
  if (h != NULL)
977
0
    sym_type = h->type;
978
0
  else
979
0
    sym_type = ELF_ST_TYPE (sym->st_info);
980
981
0
  r_type = ELF32_R_TYPE (irela->r_info);
982
0
  branch = false;
983
0
  hint = false;
984
0
  call = false;
985
0
  if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
986
0
    {
987
0
      if (contents == NULL)
988
0
  {
989
0
    contents = insn;
990
0
    if (!bfd_get_section_contents (input_section->owner,
991
0
           input_section,
992
0
           contents,
993
0
           irela->r_offset, 4))
994
0
      return stub_error;
995
0
  }
996
0
      else
997
0
  contents += irela->r_offset;
998
999
0
      branch = is_branch (contents);
1000
0
      hint = is_hint (contents);
1001
0
      if (branch || hint)
1002
0
  {
1003
0
    call = (contents[0] & 0xfd) == 0x31;
1004
0
    if (call
1005
0
        && sym_type != STT_FUNC
1006
0
        && contents != insn)
1007
0
      {
1008
        /* It's common for people to write assembly and forget
1009
     to give function symbols the right type.  Handle
1010
     calls to such symbols, but warn so that (hopefully)
1011
     people will fix their code.  We need the symbol
1012
     type to be correct to distinguish function pointer
1013
     initialisation from other pointer initialisations.  */
1014
0
        const char *sym_name;
1015
1016
0
        if (h != NULL)
1017
0
    sym_name = h->root.root.string;
1018
0
        else
1019
0
    {
1020
0
      Elf_Internal_Shdr *symtab_hdr;
1021
0
      symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1022
0
      sym_name = bfd_elf_sym_name (input_section->owner,
1023
0
                 symtab_hdr,
1024
0
                 sym,
1025
0
                 sym_sec);
1026
0
    }
1027
0
        _bfd_error_handler
1028
    /* xgettext:c-format */
1029
0
    (_("warning: call to non-function symbol %s defined in %pB"),
1030
0
     sym_name, sym_sec->owner);
1031
1032
0
      }
1033
0
  }
1034
0
    }
1035
1036
0
  if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1037
0
      || (sym_type != STT_FUNC
1038
0
    && !(branch || hint)
1039
0
    && (sym_sec->flags & SEC_CODE) == 0))
1040
0
    return no_stub;
1041
1042
  /* Usually, symbols in non-overlay sections don't need stubs.  */
1043
0
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1044
0
      && !htab->params->non_overlay_stubs)
1045
0
    return ret;
1046
1047
  /* A reference from some other section to a symbol in an overlay
1048
     section needs a stub.  */
1049
0
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1050
0
       != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1051
0
    {
1052
0
      unsigned int lrlive = 0;
1053
0
      if (branch)
1054
0
  lrlive = (contents[1] & 0x70) >> 4;
1055
1056
0
      if (!lrlive && (call || sym_type == STT_FUNC))
1057
0
  ret = call_ovl_stub;
1058
0
      else
1059
0
  ret = br000_ovl_stub + lrlive;
1060
0
    }
1061
1062
  /* If this insn isn't a branch then we are possibly taking the
1063
     address of a function and passing it out somehow.  Soft-icache code
1064
     always generates inline code to do indirect branches.  */
1065
0
  if (!(branch || hint)
1066
0
      && sym_type == STT_FUNC
1067
0
      && htab->params->ovly_flavour != ovly_soft_icache)
1068
0
    ret = nonovl_stub;
1069
1070
0
  return ret;
1071
0
}
1072
1073
static bool
1074
count_stub (struct spu_link_hash_table *htab,
1075
      bfd *ibfd,
1076
      asection *isec,
1077
      enum _stub_type stub_type,
1078
      struct elf_link_hash_entry *h,
1079
      const Elf_Internal_Rela *irela)
1080
0
{
1081
0
  unsigned int ovl = 0;
1082
0
  struct got_entry *g, **head;
1083
0
  bfd_vma addend;
1084
1085
  /* If this instruction is a branch or call, we need a stub
1086
     for it.  One stub per function per overlay.
1087
     If it isn't a branch, then we are taking the address of
1088
     this function so need a stub in the non-overlay area
1089
     for it.  One stub per function.  */
1090
0
  if (stub_type != nonovl_stub)
1091
0
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1092
1093
0
  if (h != NULL)
1094
0
    head = &h->got.glist;
1095
0
  else
1096
0
    {
1097
0
      if (elf_local_got_ents (ibfd) == NULL)
1098
0
  {
1099
0
    bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1100
0
             * sizeof (*elf_local_got_ents (ibfd)));
1101
0
    elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1102
0
    if (elf_local_got_ents (ibfd) == NULL)
1103
0
      return false;
1104
0
  }
1105
0
      head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1106
0
    }
1107
1108
0
  if (htab->params->ovly_flavour == ovly_soft_icache)
1109
0
    {
1110
0
      htab->stub_count[ovl] += 1;
1111
0
      return true;
1112
0
    }
1113
1114
0
  addend = 0;
1115
0
  if (irela != NULL)
1116
0
    addend = irela->r_addend;
1117
1118
0
  if (ovl == 0)
1119
0
    {
1120
0
      struct got_entry *gnext;
1121
1122
0
      for (g = *head; g != NULL; g = g->next)
1123
0
  if (g->addend == addend && g->ovl == 0)
1124
0
    break;
1125
1126
0
      if (g == NULL)
1127
0
  {
1128
    /* Need a new non-overlay area stub.  Zap other stubs.  */
1129
0
    for (g = *head; g != NULL; g = gnext)
1130
0
      {
1131
0
        gnext = g->next;
1132
0
        if (g->addend == addend)
1133
0
    {
1134
0
      htab->stub_count[g->ovl] -= 1;
1135
0
      free (g);
1136
0
    }
1137
0
      }
1138
0
  }
1139
0
    }
1140
0
  else
1141
0
    {
1142
0
      for (g = *head; g != NULL; g = g->next)
1143
0
  if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1144
0
    break;
1145
0
    }
1146
1147
0
  if (g == NULL)
1148
0
    {
1149
0
      g = bfd_malloc (sizeof *g);
1150
0
      if (g == NULL)
1151
0
  return false;
1152
0
      g->ovl = ovl;
1153
0
      g->addend = addend;
1154
0
      g->stub_addr = (bfd_vma) -1;
1155
0
      g->next = *head;
1156
0
      *head = g;
1157
1158
0
      htab->stub_count[ovl] += 1;
1159
0
    }
1160
1161
0
  return true;
1162
0
}
1163
1164
/* Support two sizes of overlay stubs, a slower more compact stub of two
1165
   instructions, and a faster stub of four instructions.
1166
   Soft-icache stubs are four or eight words.  */
1167
1168
static unsigned int
1169
ovl_stub_size (struct spu_elf_params *params)
1170
0
{
1171
0
  return 16 << params->ovly_flavour >> params->compact_stub;
1172
0
}
1173
1174
static unsigned int
1175
ovl_stub_size_log2 (struct spu_elf_params *params)
1176
0
{
1177
0
  return 4 + params->ovly_flavour - params->compact_stub;
1178
0
}
1179
1180
/* Two instruction overlay stubs look like:
1181
1182
   brsl $75,__ovly_load
1183
   .word target_ovl_and_address
1184
1185
   ovl_and_address is a word with the overlay number in the top 14 bits
1186
   and local store address in the bottom 18 bits.
1187
1188
   Four instruction overlay stubs look like:
1189
1190
   ila $78,ovl_number
1191
   lnop
1192
   ila $79,target_address
1193
   br __ovly_load
1194
1195
   Software icache stubs are:
1196
1197
   .word target_index
1198
   .word target_ia;
1199
   .word lrlive_branchlocalstoreaddr;
1200
   brasl $75,__icache_br_handler
1201
   .quad xor_pattern
1202
*/
1203
1204
static bool
1205
build_stub (struct bfd_link_info *info,
1206
      bfd *ibfd,
1207
      asection *isec,
1208
      enum _stub_type stub_type,
1209
      struct elf_link_hash_entry *h,
1210
      const Elf_Internal_Rela *irela,
1211
      bfd_vma dest,
1212
      asection *dest_sec)
1213
0
{
1214
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
1215
0
  unsigned int ovl, dest_ovl, set_id;
1216
0
  struct got_entry *g, **head;
1217
0
  asection *sec;
1218
0
  bfd_vma addend, from, to, br_dest, patt;
1219
0
  unsigned int lrlive;
1220
1221
0
  ovl = 0;
1222
0
  if (stub_type != nonovl_stub)
1223
0
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1224
1225
0
  if (h != NULL)
1226
0
    head = &h->got.glist;
1227
0
  else
1228
0
    head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1229
1230
0
  addend = 0;
1231
0
  if (irela != NULL)
1232
0
    addend = irela->r_addend;
1233
1234
0
  if (htab->params->ovly_flavour == ovly_soft_icache)
1235
0
    {
1236
0
      g = bfd_malloc (sizeof *g);
1237
0
      if (g == NULL)
1238
0
  return false;
1239
0
      g->ovl = ovl;
1240
0
      g->br_addr = 0;
1241
0
      if (irela != NULL)
1242
0
  g->br_addr = (irela->r_offset
1243
0
          + isec->output_offset
1244
0
          + isec->output_section->vma);
1245
0
      g->next = *head;
1246
0
      *head = g;
1247
0
    }
1248
0
  else
1249
0
    {
1250
0
      for (g = *head; g != NULL; g = g->next)
1251
0
  if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1252
0
    break;
1253
0
      if (g == NULL)
1254
0
  abort ();
1255
1256
0
      if (g->ovl == 0 && ovl != 0)
1257
0
  return true;
1258
1259
0
      if (g->stub_addr != (bfd_vma) -1)
1260
0
  return true;
1261
0
    }
1262
1263
0
  sec = htab->stub_sec[ovl];
1264
0
  dest += dest_sec->output_offset + dest_sec->output_section->vma;
1265
0
  from = sec->size + sec->output_offset + sec->output_section->vma;
1266
0
  g->stub_addr = from;
1267
0
  to = (htab->ovly_entry[0]->root.u.def.value
1268
0
  + htab->ovly_entry[0]->root.u.def.section->output_offset
1269
0
  + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1270
1271
0
  if (((dest | to | from) & 3) != 0)
1272
0
    {
1273
0
      htab->stub_err = 1;
1274
0
      return false;
1275
0
    }
1276
0
  dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1277
1278
0
  if (htab->params->ovly_flavour == ovly_normal
1279
0
      && !htab->params->compact_stub)
1280
0
    {
1281
0
      bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1282
0
      sec->contents + sec->size);
1283
0
      bfd_put_32 (sec->owner, LNOP,
1284
0
      sec->contents + sec->size + 4);
1285
0
      bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1286
0
      sec->contents + sec->size + 8);
1287
0
      if (!BRA_STUBS)
1288
0
  bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1289
0
        sec->contents + sec->size + 12);
1290
0
      else
1291
0
  bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1292
0
        sec->contents + sec->size + 12);
1293
0
    }
1294
0
  else if (htab->params->ovly_flavour == ovly_normal
1295
0
     && htab->params->compact_stub)
1296
0
    {
1297
0
      if (!BRA_STUBS)
1298
0
  bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1299
0
        sec->contents + sec->size);
1300
0
      else
1301
0
  bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1302
0
        sec->contents + sec->size);
1303
0
      bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1304
0
      sec->contents + sec->size + 4);
1305
0
    }
1306
0
  else if (htab->params->ovly_flavour == ovly_soft_icache
1307
0
     && htab->params->compact_stub)
1308
0
    {
1309
0
      lrlive = 0;
1310
0
      if (stub_type == nonovl_stub)
1311
0
  ;
1312
0
      else if (stub_type == call_ovl_stub)
1313
  /* A brsl makes lr live and *(*sp+16) is live.
1314
     Tail calls have the same liveness.  */
1315
0
  lrlive = 5;
1316
0
      else if (!htab->params->lrlive_analysis)
1317
  /* Assume stack frame and lr save.  */
1318
0
  lrlive = 1;
1319
0
      else if (irela != NULL)
1320
0
  {
1321
    /* Analyse branch instructions.  */
1322
0
    struct function_info *caller;
1323
0
    bfd_vma off;
1324
1325
0
    caller = find_function (isec, irela->r_offset, info);
1326
0
    if (caller->start == NULL)
1327
0
      off = irela->r_offset;
1328
0
    else
1329
0
      {
1330
0
        struct function_info *found = NULL;
1331
1332
        /* Find the earliest piece of this function that
1333
     has frame adjusting instructions.  We might
1334
     see dynamic frame adjustment (eg. for alloca)
1335
     in some later piece, but functions using
1336
     alloca always set up a frame earlier.  Frame
1337
     setup instructions are always in one piece.  */
1338
0
        if (caller->lr_store != (bfd_vma) -1
1339
0
      || caller->sp_adjust != (bfd_vma) -1)
1340
0
    found = caller;
1341
0
        while (caller->start != NULL)
1342
0
    {
1343
0
      caller = caller->start;
1344
0
      if (caller->lr_store != (bfd_vma) -1
1345
0
          || caller->sp_adjust != (bfd_vma) -1)
1346
0
        found = caller;
1347
0
    }
1348
0
        if (found != NULL)
1349
0
    caller = found;
1350
0
        off = (bfd_vma) -1;
1351
0
      }
1352
1353
0
    if (off > caller->sp_adjust)
1354
0
      {
1355
0
        if (off > caller->lr_store)
1356
    /* Only *(*sp+16) is live.  */
1357
0
    lrlive = 1;
1358
0
        else
1359
    /* If no lr save, then we must be in a
1360
       leaf function with a frame.
1361
       lr is still live.  */
1362
0
    lrlive = 4;
1363
0
      }
1364
0
    else if (off > caller->lr_store)
1365
0
      {
1366
        /* Between lr save and stack adjust.  */
1367
0
        lrlive = 3;
1368
        /* This should never happen since prologues won't
1369
     be split here.  */
1370
0
        BFD_ASSERT (0);
1371
0
      }
1372
0
    else
1373
      /* On entry to function.  */
1374
0
      lrlive = 5;
1375
1376
0
    if (stub_type != br000_ovl_stub
1377
0
        && lrlive != stub_type - br000_ovl_stub)
1378
      /* xgettext:c-format */
1379
0
      info->callbacks->einfo (_("%pA:0x%v lrlive .brinfo (%u) differs "
1380
0
              "from analysis (%u)\n"),
1381
0
            isec, irela->r_offset, lrlive,
1382
0
            stub_type - br000_ovl_stub);
1383
0
  }
1384
1385
      /* If given lrlive info via .brinfo, use it.  */
1386
0
      if (stub_type > br000_ovl_stub)
1387
0
  lrlive = stub_type - br000_ovl_stub;
1388
1389
0
      if (ovl == 0)
1390
0
  to = (htab->ovly_entry[1]->root.u.def.value
1391
0
        + htab->ovly_entry[1]->root.u.def.section->output_offset
1392
0
        + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1393
1394
      /* The branch that uses this stub goes to stub_addr + 4.  We'll
1395
   set up an xor pattern that can be used by the icache manager
1396
   to modify this branch to go directly to its destination.  */
1397
0
      g->stub_addr += 4;
1398
0
      br_dest = g->stub_addr;
1399
0
      if (irela == NULL)
1400
0
  {
1401
    /* Except in the case of _SPUEAR_ stubs, the branch in
1402
       question is the one in the stub itself.  */
1403
0
    BFD_ASSERT (stub_type == nonovl_stub);
1404
0
    g->br_addr = g->stub_addr;
1405
0
    br_dest = to;
1406
0
  }
1407
1408
0
      set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1409
0
      bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1410
0
      sec->contents + sec->size);
1411
0
      bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1412
0
      sec->contents + sec->size + 4);
1413
0
      bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1414
0
      sec->contents + sec->size + 8);
1415
0
      patt = dest ^ br_dest;
1416
0
      if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1417
0
  patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1418
0
      bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1419
0
      sec->contents + sec->size + 12);
1420
1421
0
      if (ovl == 0)
1422
  /* Extra space for linked list entries.  */
1423
0
  sec->size += 16;
1424
0
    }
1425
0
  else
1426
0
    abort ();
1427
1428
0
  sec->size += ovl_stub_size (htab->params);
1429
1430
0
  if (htab->params->emit_stub_syms)
1431
0
    {
1432
0
      size_t len;
1433
0
      char *name;
1434
0
      int add;
1435
1436
0
      len = 8 + sizeof (".ovl_call.") - 1;
1437
0
      if (h != NULL)
1438
0
  len += strlen (h->root.root.string);
1439
0
      else
1440
0
  len += 8 + 1 + 8;
1441
0
      add = 0;
1442
0
      if (irela != NULL)
1443
0
  add = (int) irela->r_addend & 0xffffffff;
1444
0
      if (add != 0)
1445
0
  len += 1 + 8;
1446
0
      name = bfd_malloc (len + 1);
1447
0
      if (name == NULL)
1448
0
  return false;
1449
1450
0
      sprintf (name, "%08x.ovl_call.", g->ovl);
1451
0
      if (h != NULL)
1452
0
  strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1453
0
      else
1454
0
  sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1455
0
     dest_sec->id & 0xffffffff,
1456
0
     (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1457
0
      if (add != 0)
1458
0
  sprintf (name + len - 9, "+%x", add);
1459
1460
0
      h = elf_link_hash_lookup (&htab->elf, name, true, true, false);
1461
0
      free (name);
1462
0
      if (h == NULL)
1463
0
  return false;
1464
0
      if (h->root.type == bfd_link_hash_new)
1465
0
  {
1466
0
    h->root.type = bfd_link_hash_defined;
1467
0
    h->root.u.def.section = sec;
1468
0
    h->size = ovl_stub_size (htab->params);
1469
0
    h->root.u.def.value = sec->size - h->size;
1470
0
    h->type = STT_FUNC;
1471
0
    h->ref_regular = 1;
1472
0
    h->def_regular = 1;
1473
0
    h->ref_regular_nonweak = 1;
1474
0
    h->forced_local = 1;
1475
0
    h->non_elf = 0;
1476
0
  }
1477
0
    }
1478
1479
0
  return true;
1480
0
}
1481
1482
/* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1483
   symbols.  */
1484
1485
static bool
1486
allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1487
0
{
1488
  /* Symbols starting with _SPUEAR_ need a stub because they may be
1489
     invoked by the PPU.  */
1490
0
  struct bfd_link_info *info = inf;
1491
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
1492
0
  asection *sym_sec;
1493
1494
0
  if ((h->root.type == bfd_link_hash_defined
1495
0
       || h->root.type == bfd_link_hash_defweak)
1496
0
      && h->def_regular
1497
0
      && startswith (h->root.root.string, "_SPUEAR_")
1498
0
      && (sym_sec = h->root.u.def.section) != NULL
1499
0
      && sym_sec->output_section != bfd_abs_section_ptr
1500
0
      && spu_elf_section_data (sym_sec->output_section) != NULL
1501
0
      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1502
0
    || htab->params->non_overlay_stubs))
1503
0
    {
1504
0
      return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1505
0
    }
1506
1507
0
  return true;
1508
0
}
1509
1510
static bool
1511
build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1512
0
{
1513
  /* Symbols starting with _SPUEAR_ need a stub because they may be
1514
     invoked by the PPU.  */
1515
0
  struct bfd_link_info *info = inf;
1516
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
1517
0
  asection *sym_sec;
1518
1519
0
  if ((h->root.type == bfd_link_hash_defined
1520
0
       || h->root.type == bfd_link_hash_defweak)
1521
0
      && h->def_regular
1522
0
      && startswith (h->root.root.string, "_SPUEAR_")
1523
0
      && (sym_sec = h->root.u.def.section) != NULL
1524
0
      && sym_sec->output_section != bfd_abs_section_ptr
1525
0
      && spu_elf_section_data (sym_sec->output_section) != NULL
1526
0
      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1527
0
    || htab->params->non_overlay_stubs))
1528
0
    {
1529
0
      return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1530
0
       h->root.u.def.value, sym_sec);
1531
0
    }
1532
1533
0
  return true;
1534
0
}
1535
1536
/* Size or build stubs.  */
1537
1538
static bool
1539
process_stubs (struct bfd_link_info *info, bool build)
1540
0
{
1541
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
1542
0
  bfd *ibfd;
1543
1544
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
1545
0
    {
1546
0
      extern const bfd_target spu_elf32_vec;
1547
0
      Elf_Internal_Shdr *symtab_hdr;
1548
0
      asection *isec;
1549
0
      Elf_Internal_Sym *local_syms = NULL;
1550
1551
0
      if (ibfd->xvec != &spu_elf32_vec)
1552
0
  continue;
1553
1554
      /* We'll need the symbol table in a second.  */
1555
0
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1556
0
      if (symtab_hdr->sh_info == 0)
1557
0
  continue;
1558
1559
      /* Walk over each section attached to the input bfd.  */
1560
0
      for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1561
0
  {
1562
0
    Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1563
1564
    /* If there aren't any relocs, then there's nothing more to do.  */
1565
0
    if ((isec->flags & SEC_RELOC) == 0
1566
0
        || isec->reloc_count == 0)
1567
0
      continue;
1568
1569
0
    if (!maybe_needs_stubs (isec))
1570
0
      continue;
1571
1572
    /* Get the relocs.  */
1573
0
    internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1574
0
                   info->keep_memory);
1575
0
    if (internal_relocs == NULL)
1576
0
      goto error_ret_free_local;
1577
1578
    /* Now examine each relocation.  */
1579
0
    irela = internal_relocs;
1580
0
    irelaend = irela + isec->reloc_count;
1581
0
    for (; irela < irelaend; irela++)
1582
0
      {
1583
0
        enum elf_spu_reloc_type r_type;
1584
0
        unsigned int r_indx;
1585
0
        asection *sym_sec;
1586
0
        Elf_Internal_Sym *sym;
1587
0
        struct elf_link_hash_entry *h;
1588
0
        enum _stub_type stub_type;
1589
1590
0
        r_type = ELF32_R_TYPE (irela->r_info);
1591
0
        r_indx = ELF32_R_SYM (irela->r_info);
1592
1593
0
        if (r_type >= R_SPU_max)
1594
0
    {
1595
0
      bfd_set_error (bfd_error_bad_value);
1596
0
    error_ret_free_internal:
1597
0
      if (elf_section_data (isec)->relocs != internal_relocs)
1598
0
        free (internal_relocs);
1599
0
    error_ret_free_local:
1600
0
      if (symtab_hdr->contents != (unsigned char *) local_syms)
1601
0
        free (local_syms);
1602
0
      return false;
1603
0
    }
1604
1605
        /* Determine the reloc target section.  */
1606
0
        if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1607
0
    goto error_ret_free_internal;
1608
1609
0
        stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1610
0
            NULL, info);
1611
0
        if (stub_type == no_stub)
1612
0
    continue;
1613
0
        else if (stub_type == stub_error)
1614
0
    goto error_ret_free_internal;
1615
1616
0
        if (htab->stub_count == NULL)
1617
0
    {
1618
0
      bfd_size_type amt;
1619
0
      amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1620
0
      htab->stub_count = bfd_zmalloc (amt);
1621
0
      if (htab->stub_count == NULL)
1622
0
        goto error_ret_free_internal;
1623
0
    }
1624
1625
0
        if (!build)
1626
0
    {
1627
0
      if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1628
0
        goto error_ret_free_internal;
1629
0
    }
1630
0
        else
1631
0
    {
1632
0
      bfd_vma dest;
1633
1634
0
      if (h != NULL)
1635
0
        dest = h->root.u.def.value;
1636
0
      else
1637
0
        dest = sym->st_value;
1638
0
      dest += irela->r_addend;
1639
0
      if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1640
0
           dest, sym_sec))
1641
0
        goto error_ret_free_internal;
1642
0
    }
1643
0
      }
1644
1645
    /* We're done with the internal relocs, free them.  */
1646
0
    if (elf_section_data (isec)->relocs != internal_relocs)
1647
0
      free (internal_relocs);
1648
0
  }
1649
1650
0
      if (local_syms != NULL
1651
0
    && symtab_hdr->contents != (unsigned char *) local_syms)
1652
0
  {
1653
0
    if (!info->keep_memory)
1654
0
      free (local_syms);
1655
0
    else
1656
0
      symtab_hdr->contents = (unsigned char *) local_syms;
1657
0
  }
1658
0
    }
1659
1660
0
  return true;
1661
0
}
1662
1663
/* Allocate space for overlay call and return stubs.
1664
   Return 0 on error, 1 if no overlays, 2 otherwise.  */
1665
1666
int
1667
spu_elf_size_stubs (struct bfd_link_info *info)
1668
0
{
1669
0
  struct spu_link_hash_table *htab;
1670
0
  bfd *ibfd;
1671
0
  bfd_size_type amt;
1672
0
  flagword flags;
1673
0
  unsigned int i;
1674
0
  asection *stub;
1675
1676
0
  if (!process_stubs (info, false))
1677
0
    return 0;
1678
1679
0
  htab = spu_hash_table (info);
1680
0
  elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1681
0
  if (htab->stub_err)
1682
0
    return 0;
1683
1684
0
  ibfd = info->input_bfds;
1685
0
  if (htab->stub_count != NULL)
1686
0
    {
1687
0
      amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1688
0
      htab->stub_sec = bfd_zmalloc (amt);
1689
0
      if (htab->stub_sec == NULL)
1690
0
  return 0;
1691
1692
0
      flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1693
0
         | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1694
0
      stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1695
0
      htab->stub_sec[0] = stub;
1696
0
      if (stub == NULL
1697
0
    || !bfd_set_section_alignment (stub,
1698
0
           ovl_stub_size_log2 (htab->params)))
1699
0
  return 0;
1700
0
      stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1701
0
      if (htab->params->ovly_flavour == ovly_soft_icache)
1702
  /* Extra space for linked list entries.  */
1703
0
  stub->size += htab->stub_count[0] * 16;
1704
1705
0
      for (i = 0; i < htab->num_overlays; ++i)
1706
0
  {
1707
0
    asection *osec = htab->ovl_sec[i];
1708
0
    unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1709
0
    stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1710
0
    htab->stub_sec[ovl] = stub;
1711
0
    if (stub == NULL
1712
0
        || !bfd_set_section_alignment (stub,
1713
0
               ovl_stub_size_log2 (htab->params)))
1714
0
      return 0;
1715
0
    stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1716
0
  }
1717
0
    }
1718
1719
0
  if (htab->params->ovly_flavour == ovly_soft_icache)
1720
0
    {
1721
      /* Space for icache manager tables.
1722
   a) Tag array, one quadword per cache line.
1723
   b) Rewrite "to" list, one quadword per cache line.
1724
   c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1725
      a power-of-two number of full quadwords) per cache line.  */
1726
1727
0
      flags = SEC_ALLOC;
1728
0
      htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1729
0
      if (htab->ovtab == NULL
1730
0
    || !bfd_set_section_alignment (htab->ovtab, 4))
1731
0
  return 0;
1732
1733
0
      htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1734
0
        << htab->num_lines_log2;
1735
1736
0
      flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1737
0
      htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1738
0
      if (htab->init == NULL
1739
0
    || !bfd_set_section_alignment (htab->init, 4))
1740
0
  return 0;
1741
1742
0
      htab->init->size = 16;
1743
0
    }
1744
0
  else if (htab->stub_count == NULL)
1745
0
    return 1;
1746
0
  else
1747
0
    {
1748
      /* htab->ovtab consists of two arrays.
1749
   .  struct {
1750
   .    u32 vma;
1751
   .    u32 size;
1752
   .    u32 file_off;
1753
   .    u32 buf;
1754
   .  } _ovly_table[];
1755
   .
1756
   .  struct {
1757
   .    u32 mapped;
1758
   .  } _ovly_buf_table[];
1759
   .  */
1760
1761
0
      flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1762
0
      htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1763
0
      if (htab->ovtab == NULL
1764
0
    || !bfd_set_section_alignment (htab->ovtab, 4))
1765
0
  return 0;
1766
1767
0
      htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1768
0
    }
1769
1770
0
  htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1771
0
  if (htab->toe == NULL
1772
0
      || !bfd_set_section_alignment (htab->toe, 4))
1773
0
    return 0;
1774
0
  htab->toe->size = 16;
1775
1776
0
  return 2;
1777
0
}
1778
1779
/* Called from ld to place overlay manager data sections.  This is done
1780
   after the overlay manager itself is loaded, mainly so that the
1781
   linker's htab->init section is placed after any other .ovl.init
1782
   sections.  */
1783
1784
void
1785
spu_elf_place_overlay_data (struct bfd_link_info *info)
1786
0
{
1787
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
1788
0
  unsigned int i;
1789
1790
0
  if (htab->stub_sec != NULL)
1791
0
    {
1792
0
      (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1793
1794
0
      for (i = 0; i < htab->num_overlays; ++i)
1795
0
  {
1796
0
    asection *osec = htab->ovl_sec[i];
1797
0
    unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1798
0
    (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1799
0
  }
1800
0
    }
1801
1802
0
  if (htab->params->ovly_flavour == ovly_soft_icache)
1803
0
    (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1804
1805
0
  if (htab->ovtab != NULL)
1806
0
    {
1807
0
      const char *ovout = ".data";
1808
0
      if (htab->params->ovly_flavour == ovly_soft_icache)
1809
0
  ovout = ".bss";
1810
0
      (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1811
0
    }
1812
1813
0
  if (htab->toe != NULL)
1814
0
    (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1815
0
}
1816
1817
/* Functions to handle embedded spu_ovl.o object.  */
1818
1819
static void *
1820
ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1821
0
{
1822
0
  return stream;
1823
0
}
1824
1825
static file_ptr
1826
ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1827
         void *stream,
1828
         void *buf,
1829
         file_ptr nbytes,
1830
         file_ptr offset)
1831
0
{
1832
0
  struct _ovl_stream *os;
1833
0
  size_t count;
1834
0
  size_t max;
1835
1836
0
  os = (struct _ovl_stream *) stream;
1837
0
  max = (const char *) os->end - (const char *) os->start;
1838
1839
0
  if ((ufile_ptr) offset >= max)
1840
0
    return 0;
1841
1842
0
  count = nbytes;
1843
0
  if (count > max - offset)
1844
0
    count = max - offset;
1845
1846
0
  memcpy (buf, (const char *) os->start + offset, count);
1847
0
  return count;
1848
0
}
1849
1850
static int
1851
ovl_mgr_stat (struct bfd *abfd ATTRIBUTE_UNUSED,
1852
        void *stream,
1853
        struct stat *sb)
1854
0
{
1855
0
  struct _ovl_stream *os = (struct _ovl_stream *) stream;
1856
1857
0
  memset (sb, 0, sizeof (*sb));
1858
0
  sb->st_size = (const char *) os->end - (const char *) os->start;
1859
0
  return 0;
1860
0
}
1861
1862
bool
1863
spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1864
0
{
1865
0
  *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1866
0
            "elf32-spu",
1867
0
            ovl_mgr_open,
1868
0
            (void *) stream,
1869
0
            ovl_mgr_pread,
1870
0
            NULL,
1871
0
            ovl_mgr_stat);
1872
0
  return *ovl_bfd != NULL;
1873
0
}
1874
1875
static unsigned int
1876
overlay_index (asection *sec)
1877
0
{
1878
0
  if (sec == NULL
1879
0
      || sec->output_section == bfd_abs_section_ptr)
1880
0
    return 0;
1881
0
  return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1882
0
}
1883
1884
/* Define an STT_OBJECT symbol.  */
1885
1886
static struct elf_link_hash_entry *
1887
define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1888
0
{
1889
0
  struct elf_link_hash_entry *h;
1890
1891
0
  h = elf_link_hash_lookup (&htab->elf, name, true, false, false);
1892
0
  if (h == NULL)
1893
0
    return NULL;
1894
1895
0
  if (h->root.type != bfd_link_hash_defined
1896
0
      || !h->def_regular)
1897
0
    {
1898
0
      h->root.type = bfd_link_hash_defined;
1899
0
      h->root.u.def.section = htab->ovtab;
1900
0
      h->type = STT_OBJECT;
1901
0
      h->ref_regular = 1;
1902
0
      h->def_regular = 1;
1903
0
      h->ref_regular_nonweak = 1;
1904
0
      h->non_elf = 0;
1905
0
    }
1906
0
  else if (h->root.u.def.section->owner != NULL)
1907
0
    {
1908
      /* xgettext:c-format */
1909
0
      _bfd_error_handler (_("%pB is not allowed to define %s"),
1910
0
        h->root.u.def.section->owner,
1911
0
        h->root.root.string);
1912
0
      bfd_set_error (bfd_error_bad_value);
1913
0
      return NULL;
1914
0
    }
1915
0
  else
1916
0
    {
1917
0
      _bfd_error_handler (_("you are not allowed to define %s in a script"),
1918
0
        h->root.root.string);
1919
0
      bfd_set_error (bfd_error_bad_value);
1920
0
      return NULL;
1921
0
    }
1922
1923
0
  return h;
1924
0
}
1925
1926
/* Fill in all stubs and the overlay tables.  */
1927
1928
static bool
1929
spu_elf_build_stubs (struct bfd_link_info *info)
1930
0
{
1931
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
1932
0
  struct elf_link_hash_entry *h;
1933
0
  bfd_byte *p;
1934
0
  asection *s;
1935
0
  bfd *obfd;
1936
0
  unsigned int i;
1937
1938
0
  if (htab->num_overlays != 0)
1939
0
    {
1940
0
      for (i = 0; i < 2; i++)
1941
0
  {
1942
0
    h = htab->ovly_entry[i];
1943
0
    if (h != NULL
1944
0
        && (h->root.type == bfd_link_hash_defined
1945
0
      || h->root.type == bfd_link_hash_defweak)
1946
0
        && h->def_regular)
1947
0
      {
1948
0
        s = h->root.u.def.section->output_section;
1949
0
        if (spu_elf_section_data (s)->u.o.ovl_index)
1950
0
    {
1951
0
      _bfd_error_handler (_("%s in overlay section"),
1952
0
              h->root.root.string);
1953
0
      bfd_set_error (bfd_error_bad_value);
1954
0
      return false;
1955
0
    }
1956
0
      }
1957
0
  }
1958
0
    }
1959
1960
0
  if (htab->stub_sec != NULL)
1961
0
    {
1962
0
      for (i = 0; i <= htab->num_overlays; i++)
1963
0
  if (htab->stub_sec[i]->size != 0)
1964
0
    {
1965
0
      htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1966
0
                  htab->stub_sec[i]->size);
1967
0
      if (htab->stub_sec[i]->contents == NULL)
1968
0
        return false;
1969
0
      htab->stub_sec[i]->alloced = 1;
1970
0
      htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1971
0
      htab->stub_sec[i]->size = 0;
1972
0
    }
1973
1974
      /* Fill in all the stubs.  */
1975
0
      process_stubs (info, true);
1976
0
      if (!htab->stub_err)
1977
0
  elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1978
1979
0
      if (htab->stub_err)
1980
0
  {
1981
0
    _bfd_error_handler (_("overlay stub relocation overflow"));
1982
0
    bfd_set_error (bfd_error_bad_value);
1983
0
    return false;
1984
0
  }
1985
1986
0
      for (i = 0; i <= htab->num_overlays; i++)
1987
0
  {
1988
0
    if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1989
0
      {
1990
0
        _bfd_error_handler  (_("stubs don't match calculated size"));
1991
0
        bfd_set_error (bfd_error_bad_value);
1992
0
        return false;
1993
0
      }
1994
0
    htab->stub_sec[i]->rawsize = 0;
1995
0
  }
1996
0
    }
1997
1998
0
  if (htab->ovtab == NULL || htab->ovtab->size == 0)
1999
0
    return true;
2000
2001
0
  htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
2002
0
  if (htab->ovtab->contents == NULL)
2003
0
    return false;
2004
0
  htab->ovtab->alloced = 1;
2005
2006
0
  p = htab->ovtab->contents;
2007
0
  if (htab->params->ovly_flavour == ovly_soft_icache)
2008
0
    {
2009
0
      bfd_vma off;
2010
2011
0
      h = define_ovtab_symbol (htab, "__icache_tag_array");
2012
0
      if (h == NULL)
2013
0
  return false;
2014
0
      h->root.u.def.value = 0;
2015
0
      h->size = 16 << htab->num_lines_log2;
2016
0
      off = h->size;
2017
2018
0
      h = define_ovtab_symbol (htab, "__icache_tag_array_size");
2019
0
      if (h == NULL)
2020
0
  return false;
2021
0
      h->root.u.def.value = 16 << htab->num_lines_log2;
2022
0
      h->root.u.def.section = bfd_abs_section_ptr;
2023
2024
0
      h = define_ovtab_symbol (htab, "__icache_rewrite_to");
2025
0
      if (h == NULL)
2026
0
  return false;
2027
0
      h->root.u.def.value = off;
2028
0
      h->size = 16 << htab->num_lines_log2;
2029
0
      off += h->size;
2030
2031
0
      h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
2032
0
      if (h == NULL)
2033
0
  return false;
2034
0
      h->root.u.def.value = 16 << htab->num_lines_log2;
2035
0
      h->root.u.def.section = bfd_abs_section_ptr;
2036
2037
0
      h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2038
0
      if (h == NULL)
2039
0
  return false;
2040
0
      h->root.u.def.value = off;
2041
0
      h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2042
0
      off += h->size;
2043
2044
0
      h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2045
0
      if (h == NULL)
2046
0
  return false;
2047
0
      h->root.u.def.value = 16 << (htab->fromelem_size_log2
2048
0
           + htab->num_lines_log2);
2049
0
      h->root.u.def.section = bfd_abs_section_ptr;
2050
2051
0
      h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2052
0
      if (h == NULL)
2053
0
  return false;
2054
0
      h->root.u.def.value = htab->fromelem_size_log2;
2055
0
      h->root.u.def.section = bfd_abs_section_ptr;
2056
2057
0
      h = define_ovtab_symbol (htab, "__icache_base");
2058
0
      if (h == NULL)
2059
0
  return false;
2060
0
      h->root.u.def.value = htab->ovl_sec[0]->vma;
2061
0
      h->root.u.def.section = bfd_abs_section_ptr;
2062
0
      h->size = htab->num_buf << htab->line_size_log2;
2063
2064
0
      h = define_ovtab_symbol (htab, "__icache_linesize");
2065
0
      if (h == NULL)
2066
0
  return false;
2067
0
      h->root.u.def.value = 1 << htab->line_size_log2;
2068
0
      h->root.u.def.section = bfd_abs_section_ptr;
2069
2070
0
      h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2071
0
      if (h == NULL)
2072
0
  return false;
2073
0
      h->root.u.def.value = htab->line_size_log2;
2074
0
      h->root.u.def.section = bfd_abs_section_ptr;
2075
2076
0
      h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2077
0
      if (h == NULL)
2078
0
  return false;
2079
0
      h->root.u.def.value = -htab->line_size_log2;
2080
0
      h->root.u.def.section = bfd_abs_section_ptr;
2081
2082
0
      h = define_ovtab_symbol (htab, "__icache_cachesize");
2083
0
      if (h == NULL)
2084
0
  return false;
2085
0
      h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2086
0
      h->root.u.def.section = bfd_abs_section_ptr;
2087
2088
0
      h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2089
0
      if (h == NULL)
2090
0
  return false;
2091
0
      h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2092
0
      h->root.u.def.section = bfd_abs_section_ptr;
2093
2094
0
      h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2095
0
      if (h == NULL)
2096
0
  return false;
2097
0
      h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2098
0
      h->root.u.def.section = bfd_abs_section_ptr;
2099
2100
0
      if (htab->init != NULL && htab->init->size != 0)
2101
0
  {
2102
0
    htab->init->contents = bfd_zalloc (htab->init->owner,
2103
0
               htab->init->size);
2104
0
    if (htab->init->contents == NULL)
2105
0
      return false;
2106
0
    htab->init->alloced = 1;
2107
2108
0
    h = define_ovtab_symbol (htab, "__icache_fileoff");
2109
0
    if (h == NULL)
2110
0
      return false;
2111
0
    h->root.u.def.value = 0;
2112
0
    h->root.u.def.section = htab->init;
2113
0
    h->size = 8;
2114
0
  }
2115
0
    }
2116
0
  else
2117
0
    {
2118
      /* Write out _ovly_table.  */
2119
      /* set low bit of .size to mark non-overlay area as present.  */
2120
0
      p[7] = 1;
2121
0
      obfd = htab->ovtab->output_section->owner;
2122
0
      for (s = obfd->sections; s != NULL; s = s->next)
2123
0
  {
2124
0
    unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2125
2126
0
    if (ovl_index != 0)
2127
0
      {
2128
0
        unsigned long off = ovl_index * 16;
2129
0
        unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2130
2131
0
        bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2132
0
        bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2133
0
        p + off + 4);
2134
        /* file_off written later in spu_elf_modify_headers.  */
2135
0
        bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2136
0
      }
2137
0
  }
2138
2139
0
      h = define_ovtab_symbol (htab, "_ovly_table");
2140
0
      if (h == NULL)
2141
0
  return false;
2142
0
      h->root.u.def.value = 16;
2143
0
      h->size = htab->num_overlays * 16;
2144
2145
0
      h = define_ovtab_symbol (htab, "_ovly_table_end");
2146
0
      if (h == NULL)
2147
0
  return false;
2148
0
      h->root.u.def.value = htab->num_overlays * 16 + 16;
2149
0
      h->size = 0;
2150
2151
0
      h = define_ovtab_symbol (htab, "_ovly_buf_table");
2152
0
      if (h == NULL)
2153
0
  return false;
2154
0
      h->root.u.def.value = htab->num_overlays * 16 + 16;
2155
0
      h->size = htab->num_buf * 4;
2156
2157
0
      h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2158
0
      if (h == NULL)
2159
0
  return false;
2160
0
      h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2161
0
      h->size = 0;
2162
0
    }
2163
2164
0
  h = define_ovtab_symbol (htab, "_EAR_");
2165
0
  if (h == NULL)
2166
0
    return false;
2167
0
  h->root.u.def.section = htab->toe;
2168
0
  h->root.u.def.value = 0;
2169
0
  h->size = 16;
2170
2171
0
  return true;
2172
0
}
2173
2174
/* Check that all loadable section VMAs lie in the range
2175
   LO .. HI inclusive, and stash some parameters for --auto-overlay.  */
2176
2177
asection *
2178
spu_elf_check_vma (struct bfd_link_info *info)
2179
0
{
2180
0
  struct elf_segment_map *m;
2181
0
  unsigned int i;
2182
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
2183
0
  bfd *abfd = info->output_bfd;
2184
0
  bfd_vma hi = htab->params->local_store_hi;
2185
0
  bfd_vma lo = htab->params->local_store_lo;
2186
2187
0
  htab->local_store = hi + 1 - lo;
2188
2189
0
  for (m = elf_seg_map (abfd); m != NULL; m = m->next)
2190
0
    if (m->p_type == PT_LOAD)
2191
0
      for (i = 0; i < m->count; i++)
2192
0
  if (m->sections[i]->size != 0
2193
0
      && (m->sections[i]->vma < lo
2194
0
    || m->sections[i]->vma > hi
2195
0
    || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2196
0
    return m->sections[i];
2197
2198
0
  return NULL;
2199
0
}
2200
2201
/* OFFSET in SEC (presumably) is the beginning of a function prologue.
2202
   Search for stack adjusting insns, and return the sp delta.
2203
   If a store of lr is found save the instruction offset to *LR_STORE.
2204
   If a stack adjusting instruction is found, save that offset to
2205
   *SP_ADJUST.  */
2206
2207
static int
2208
find_function_stack_adjust (asection *sec,
2209
          bfd_vma offset,
2210
          bfd_vma *lr_store,
2211
          bfd_vma *sp_adjust)
2212
0
{
2213
0
  int32_t reg[128];
2214
2215
0
  memset (reg, 0, sizeof (reg));
2216
0
  for ( ; offset + 4 <= sec->size; offset += 4)
2217
0
    {
2218
0
      unsigned char buf[4];
2219
0
      int rt, ra;
2220
0
      uint32_t imm;
2221
2222
      /* Assume no relocs on stack adjusing insns.  */
2223
0
      if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2224
0
  break;
2225
2226
0
      rt = buf[3] & 0x7f;
2227
0
      ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2228
2229
0
      if (buf[0] == 0x24 /* stqd */)
2230
0
  {
2231
0
    if (rt == 0 /* lr */ && ra == 1 /* sp */)
2232
0
      *lr_store = offset;
2233
0
    continue;
2234
0
  }
2235
2236
      /* Partly decoded immediate field.  */
2237
0
      imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2238
2239
0
      if (buf[0] == 0x1c /* ai */)
2240
0
  {
2241
0
    imm >>= 7;
2242
0
    imm = (imm ^ 0x200) - 0x200;
2243
0
    reg[rt] = reg[ra] + imm;
2244
2245
0
    if (rt == 1 /* sp */)
2246
0
      {
2247
0
        if (reg[rt] > 0)
2248
0
    break;
2249
0
        *sp_adjust = offset;
2250
0
        return reg[rt];
2251
0
      }
2252
0
  }
2253
0
      else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2254
0
  {
2255
0
    int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2256
2257
0
    reg[rt] = reg[ra] + reg[rb];
2258
0
    if (rt == 1)
2259
0
      {
2260
0
        if (reg[rt] > 0)
2261
0
    break;
2262
0
        *sp_adjust = offset;
2263
0
        return reg[rt];
2264
0
      }
2265
0
  }
2266
0
      else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2267
0
  {
2268
0
    int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2269
2270
0
    reg[rt] = reg[rb] - reg[ra];
2271
0
    if (rt == 1)
2272
0
      {
2273
0
        if (reg[rt] > 0)
2274
0
    break;
2275
0
        *sp_adjust = offset;
2276
0
        return reg[rt];
2277
0
      }
2278
0
  }
2279
0
      else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2280
0
  {
2281
0
    if (buf[0] >= 0x42 /* ila */)
2282
0
      imm |= (buf[0] & 1) << 17;
2283
0
    else
2284
0
      {
2285
0
        imm &= 0xffff;
2286
2287
0
        if (buf[0] == 0x40 /* il */)
2288
0
    {
2289
0
      if ((buf[1] & 0x80) == 0)
2290
0
        continue;
2291
0
      imm = (imm ^ 0x8000) - 0x8000;
2292
0
    }
2293
0
        else if ((buf[1] & 0x80) == 0 /* ilhu */)
2294
0
    imm <<= 16;
2295
0
      }
2296
0
    reg[rt] = imm;
2297
0
    continue;
2298
0
  }
2299
0
      else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2300
0
  {
2301
0
    reg[rt] |= imm & 0xffff;
2302
0
    continue;
2303
0
  }
2304
0
      else if (buf[0] == 0x04 /* ori */)
2305
0
  {
2306
0
    imm >>= 7;
2307
0
    imm = (imm ^ 0x200) - 0x200;
2308
0
    reg[rt] = reg[ra] | imm;
2309
0
    continue;
2310
0
  }
2311
0
      else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2312
0
  {
2313
0
    reg[rt] = (  ((imm & 0x8000) ? 0xff000000 : 0)
2314
0
         | ((imm & 0x4000) ? 0x00ff0000 : 0)
2315
0
         | ((imm & 0x2000) ? 0x0000ff00 : 0)
2316
0
         | ((imm & 0x1000) ? 0x000000ff : 0));
2317
0
    continue;
2318
0
  }
2319
0
      else if (buf[0] == 0x16 /* andbi */)
2320
0
  {
2321
0
    imm >>= 7;
2322
0
    imm &= 0xff;
2323
0
    imm |= imm << 8;
2324
0
    imm |= imm << 16;
2325
0
    reg[rt] = reg[ra] & imm;
2326
0
    continue;
2327
0
  }
2328
0
      else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2329
0
  {
2330
    /* Used in pic reg load.  Say rt is trashed.  Won't be used
2331
       in stack adjust, but we need to continue past this branch.  */
2332
0
    reg[rt] = 0;
2333
0
    continue;
2334
0
  }
2335
0
      else if (is_branch (buf) || is_indirect_branch (buf))
2336
  /* If we hit a branch then we must be out of the prologue.  */
2337
0
  break;
2338
0
    }
2339
2340
0
  return 0;
2341
0
}
2342
2343
/* qsort predicate to sort symbols by section and value.  */
2344
2345
static Elf_Internal_Sym *sort_syms_syms;
2346
static asection **sort_syms_psecs;
2347
2348
static int
2349
sort_syms (const void *a, const void *b)
2350
0
{
2351
0
  Elf_Internal_Sym *const *s1 = a;
2352
0
  Elf_Internal_Sym *const *s2 = b;
2353
0
  asection *sec1,*sec2;
2354
0
  bfd_signed_vma delta;
2355
2356
0
  sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2357
0
  sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2358
2359
0
  if (sec1 != sec2)
2360
0
    return sec1->index - sec2->index;
2361
2362
0
  delta = (*s1)->st_value - (*s2)->st_value;
2363
0
  if (delta != 0)
2364
0
    return delta < 0 ? -1 : 1;
2365
2366
0
  delta = (*s2)->st_size - (*s1)->st_size;
2367
0
  if (delta != 0)
2368
0
    return delta < 0 ? -1 : 1;
2369
2370
0
  return *s1 < *s2 ? -1 : 1;
2371
0
}
2372
2373
/* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2374
   entries for section SEC.  */
2375
2376
static struct spu_elf_stack_info *
2377
alloc_stack_info (asection *sec, int max_fun)
2378
0
{
2379
0
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2380
0
  bfd_size_type amt;
2381
2382
0
  amt = sizeof (struct spu_elf_stack_info);
2383
0
  amt += (max_fun - 1) * sizeof (struct function_info);
2384
0
  sec_data->u.i.stack_info = bfd_zmalloc (amt);
2385
0
  if (sec_data->u.i.stack_info != NULL)
2386
0
    sec_data->u.i.stack_info->max_fun = max_fun;
2387
0
  return sec_data->u.i.stack_info;
2388
0
}
2389
2390
/* Add a new struct function_info describing a (part of a) function
2391
   starting at SYM_H.  Keep the array sorted by address.  */
2392
2393
static struct function_info *
2394
maybe_insert_function (asection *sec,
2395
           void *sym_h,
2396
           bool global,
2397
           bool is_func)
2398
0
{
2399
0
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2400
0
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2401
0
  int i;
2402
0
  bfd_vma off, size;
2403
2404
0
  if (sinfo == NULL)
2405
0
    {
2406
0
      sinfo = alloc_stack_info (sec, 20);
2407
0
      if (sinfo == NULL)
2408
0
  return NULL;
2409
0
    }
2410
2411
0
  if (!global)
2412
0
    {
2413
0
      Elf_Internal_Sym *sym = sym_h;
2414
0
      off = sym->st_value;
2415
0
      size = sym->st_size;
2416
0
    }
2417
0
  else
2418
0
    {
2419
0
      struct elf_link_hash_entry *h = sym_h;
2420
0
      off = h->root.u.def.value;
2421
0
      size = h->size;
2422
0
    }
2423
2424
0
  for (i = sinfo->num_fun; --i >= 0; )
2425
0
    if (sinfo->fun[i].lo <= off)
2426
0
      break;
2427
2428
0
  if (i >= 0)
2429
0
    {
2430
      /* Don't add another entry for an alias, but do update some
2431
   info.  */
2432
0
      if (sinfo->fun[i].lo == off)
2433
0
  {
2434
    /* Prefer globals over local syms.  */
2435
0
    if (global && !sinfo->fun[i].global)
2436
0
      {
2437
0
        sinfo->fun[i].global = true;
2438
0
        sinfo->fun[i].u.h = sym_h;
2439
0
      }
2440
0
    if (is_func)
2441
0
      sinfo->fun[i].is_func = true;
2442
0
    return &sinfo->fun[i];
2443
0
  }
2444
      /* Ignore a zero-size symbol inside an existing function.  */
2445
0
      else if (sinfo->fun[i].hi > off && size == 0)
2446
0
  return &sinfo->fun[i];
2447
0
    }
2448
2449
0
  if (sinfo->num_fun >= sinfo->max_fun)
2450
0
    {
2451
0
      bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2452
0
      bfd_size_type old = amt;
2453
2454
0
      old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2455
0
      sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2456
0
      amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2457
0
      sinfo = bfd_realloc (sinfo, amt);
2458
0
      if (sinfo == NULL)
2459
0
  return NULL;
2460
0
      memset ((char *) sinfo + old, 0, amt - old);
2461
0
      sec_data->u.i.stack_info = sinfo;
2462
0
    }
2463
2464
0
  if (++i < sinfo->num_fun)
2465
0
    memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2466
0
       (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2467
0
  sinfo->fun[i].is_func = is_func;
2468
0
  sinfo->fun[i].global = global;
2469
0
  sinfo->fun[i].sec = sec;
2470
0
  if (global)
2471
0
    sinfo->fun[i].u.h = sym_h;
2472
0
  else
2473
0
    sinfo->fun[i].u.sym = sym_h;
2474
0
  sinfo->fun[i].lo = off;
2475
0
  sinfo->fun[i].hi = off + size;
2476
0
  sinfo->fun[i].lr_store = -1;
2477
0
  sinfo->fun[i].sp_adjust = -1;
2478
0
  sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2479
0
                 &sinfo->fun[i].lr_store,
2480
0
                 &sinfo->fun[i].sp_adjust);
2481
0
  sinfo->num_fun += 1;
2482
0
  return &sinfo->fun[i];
2483
0
}
2484
2485
/* Return the name of FUN.  */
2486
2487
static const char *
2488
func_name (struct function_info *fun)
2489
0
{
2490
0
  asection *sec;
2491
0
  bfd *ibfd;
2492
0
  Elf_Internal_Shdr *symtab_hdr;
2493
2494
0
  while (fun->start != NULL)
2495
0
    fun = fun->start;
2496
2497
0
  if (fun->global)
2498
0
    return fun->u.h->root.root.string;
2499
2500
0
  sec = fun->sec;
2501
0
  if (fun->u.sym->st_name == 0)
2502
0
    {
2503
0
      size_t len = strlen (sec->name);
2504
0
      char *name = bfd_malloc (len + 10);
2505
0
      if (name == NULL)
2506
0
  return "(null)";
2507
0
      sprintf (name, "%s+%lx", sec->name,
2508
0
         (unsigned long) fun->u.sym->st_value & 0xffffffff);
2509
0
      return name;
2510
0
    }
2511
0
  ibfd = sec->owner;
2512
0
  symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2513
0
  return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2514
0
}
2515
2516
/* Read the instruction at OFF in SEC.  Return true iff the instruction
2517
   is a nop, lnop, or stop 0 (all zero insn).  */
2518
2519
static bool
2520
is_nop (asection *sec, bfd_vma off)
2521
0
{
2522
0
  unsigned char insn[4];
2523
2524
0
  if (off + 4 > sec->size
2525
0
      || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2526
0
    return false;
2527
0
  if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2528
0
    return true;
2529
0
  if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2530
0
    return true;
2531
0
  return false;
2532
0
}
2533
2534
/* Extend the range of FUN to cover nop padding up to LIMIT.
2535
   Return TRUE iff some instruction other than a NOP was found.  */
2536
2537
static bool
2538
insns_at_end (struct function_info *fun, bfd_vma limit)
2539
0
{
2540
0
  bfd_vma off = (fun->hi + 3) & -4;
2541
2542
0
  while (off < limit && is_nop (fun->sec, off))
2543
0
    off += 4;
2544
0
  if (off < limit)
2545
0
    {
2546
0
      fun->hi = off;
2547
0
      return true;
2548
0
    }
2549
0
  fun->hi = limit;
2550
0
  return false;
2551
0
}
2552
2553
/* Check and fix overlapping function ranges.  Return TRUE iff there
2554
   are gaps in the current info we have about functions in SEC.  */
2555
2556
static bool
2557
check_function_ranges (asection *sec, struct bfd_link_info *info)
2558
0
{
2559
0
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2560
0
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2561
0
  int i;
2562
0
  bool gaps = false;
2563
2564
0
  if (sinfo == NULL)
2565
0
    return false;
2566
2567
0
  for (i = 1; i < sinfo->num_fun; i++)
2568
0
    if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2569
0
      {
2570
  /* Fix overlapping symbols.  */
2571
0
  const char *f1 = func_name (&sinfo->fun[i - 1]);
2572
0
  const char *f2 = func_name (&sinfo->fun[i]);
2573
2574
  /* xgettext:c-format */
2575
0
  info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2576
0
  sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2577
0
      }
2578
0
    else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2579
0
      gaps = true;
2580
2581
0
  if (sinfo->num_fun == 0)
2582
0
    gaps = true;
2583
0
  else
2584
0
    {
2585
0
      if (sinfo->fun[0].lo != 0)
2586
0
  gaps = true;
2587
0
      if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2588
0
  {
2589
0
    const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2590
2591
0
    info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2592
0
    sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2593
0
  }
2594
0
      else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2595
0
  gaps = true;
2596
0
    }
2597
0
  return gaps;
2598
0
}
2599
2600
/* Search current function info for a function that contains address
2601
   OFFSET in section SEC.  */
2602
2603
static struct function_info *
2604
find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2605
0
{
2606
0
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2607
0
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2608
0
  int lo, hi, mid;
2609
2610
0
  lo = 0;
2611
0
  hi = sinfo->num_fun;
2612
0
  while (lo < hi)
2613
0
    {
2614
0
      mid = (lo + hi) / 2;
2615
0
      if (offset < sinfo->fun[mid].lo)
2616
0
  hi = mid;
2617
0
      else if (offset >= sinfo->fun[mid].hi)
2618
0
  lo = mid + 1;
2619
0
      else
2620
0
  return &sinfo->fun[mid];
2621
0
    }
2622
  /* xgettext:c-format */
2623
0
  info->callbacks->einfo (_("%pA:0x%v not found in function table\n"),
2624
0
        sec, offset);
2625
0
  bfd_set_error (bfd_error_bad_value);
2626
0
  return NULL;
2627
0
}
2628
2629
/* Add CALLEE to CALLER call list if not already present.  Return TRUE
2630
   if CALLEE was new.  If this function return FALSE, CALLEE should
2631
   be freed.  */
2632
2633
static bool
2634
insert_callee (struct function_info *caller, struct call_info *callee)
2635
0
{
2636
0
  struct call_info **pp, *p;
2637
2638
0
  for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2639
0
    if (p->fun == callee->fun)
2640
0
      {
2641
  /* Tail calls use less stack than normal calls.  Retain entry
2642
     for normal call over one for tail call.  */
2643
0
  p->is_tail &= callee->is_tail;
2644
0
  if (!p->is_tail)
2645
0
    {
2646
0
      p->fun->start = NULL;
2647
0
      p->fun->is_func = true;
2648
0
    }
2649
0
  p->count += callee->count;
2650
  /* Reorder list so most recent call is first.  */
2651
0
  *pp = p->next;
2652
0
  p->next = caller->call_list;
2653
0
  caller->call_list = p;
2654
0
  return false;
2655
0
      }
2656
0
  callee->next = caller->call_list;
2657
0
  caller->call_list = callee;
2658
0
  return true;
2659
0
}
2660
2661
/* Copy CALL and insert the copy into CALLER.  */
2662
2663
static bool
2664
copy_callee (struct function_info *caller, const struct call_info *call)
2665
0
{
2666
0
  struct call_info *callee;
2667
0
  callee = bfd_malloc (sizeof (*callee));
2668
0
  if (callee == NULL)
2669
0
    return false;
2670
0
  *callee = *call;
2671
0
  if (!insert_callee (caller, callee))
2672
0
    free (callee);
2673
0
  return true;
2674
0
}
2675
2676
/* We're only interested in code sections.  Testing SEC_IN_MEMORY excludes
2677
   overlay stub sections.  */
2678
2679
static bool
2680
interesting_section (asection *s)
2681
0
{
2682
0
  return (s->output_section != bfd_abs_section_ptr
2683
0
    && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2684
0
        == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2685
0
    && s->size != 0);
2686
0
}
2687
2688
/* Rummage through the relocs for SEC, looking for function calls.
2689
   If CALL_TREE is true, fill in call graph.  If CALL_TREE is false,
2690
   mark destination symbols on calls as being functions.  Also
2691
   look at branches, which may be tail calls or go to hot/cold
2692
   section part of same function.  */
2693
2694
static bool
2695
mark_functions_via_relocs (asection *sec,
2696
         struct bfd_link_info *info,
2697
         int call_tree)
2698
0
{
2699
0
  Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2700
0
  Elf_Internal_Shdr *symtab_hdr;
2701
0
  void *psyms;
2702
0
  unsigned int priority = 0;
2703
0
  static bool warned;
2704
2705
0
  if (!interesting_section (sec)
2706
0
      || sec->reloc_count == 0)
2707
0
    return true;
2708
2709
0
  internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2710
0
                 info->keep_memory);
2711
0
  if (internal_relocs == NULL)
2712
0
    return false;
2713
2714
0
  symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2715
0
  psyms = &symtab_hdr->contents;
2716
0
  irela = internal_relocs;
2717
0
  irelaend = irela + sec->reloc_count;
2718
0
  for (; irela < irelaend; irela++)
2719
0
    {
2720
0
      enum elf_spu_reloc_type r_type;
2721
0
      unsigned int r_indx;
2722
0
      asection *sym_sec;
2723
0
      Elf_Internal_Sym *sym;
2724
0
      struct elf_link_hash_entry *h;
2725
0
      bfd_vma val;
2726
0
      bool nonbranch, is_call;
2727
0
      struct function_info *caller;
2728
0
      struct call_info *callee;
2729
2730
0
      r_type = ELF32_R_TYPE (irela->r_info);
2731
0
      nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2732
2733
0
      r_indx = ELF32_R_SYM (irela->r_info);
2734
0
      if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2735
0
  return false;
2736
2737
0
      if (sym_sec == NULL
2738
0
    || sym_sec->output_section == bfd_abs_section_ptr)
2739
0
  continue;
2740
2741
0
      is_call = false;
2742
0
      if (!nonbranch)
2743
0
  {
2744
0
    unsigned char insn[4];
2745
2746
0
    if (!bfd_get_section_contents (sec->owner, sec, insn,
2747
0
           irela->r_offset, 4))
2748
0
      return false;
2749
0
    if (is_branch (insn))
2750
0
      {
2751
0
        is_call = (insn[0] & 0xfd) == 0x31;
2752
0
        priority = insn[1] & 0x0f;
2753
0
        priority <<= 8;
2754
0
        priority |= insn[2];
2755
0
        priority <<= 8;
2756
0
        priority |= insn[3];
2757
0
        priority >>= 7;
2758
0
        if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2759
0
      != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2760
0
    {
2761
0
      if (!warned)
2762
0
        info->callbacks->einfo
2763
          /* xgettext:c-format */
2764
0
          (_("%pB(%pA+0x%v): call to non-code section"
2765
0
       " %pB(%pA), analysis incomplete\n"),
2766
0
           sec->owner, sec, irela->r_offset,
2767
0
           sym_sec->owner, sym_sec);
2768
0
      warned = true;
2769
0
      continue;
2770
0
    }
2771
0
      }
2772
0
    else
2773
0
      {
2774
0
        nonbranch = true;
2775
0
        if (is_hint (insn))
2776
0
    continue;
2777
0
      }
2778
0
  }
2779
2780
0
      if (nonbranch)
2781
0
  {
2782
    /* For --auto-overlay, count possible stubs we need for
2783
       function pointer references.  */
2784
0
    unsigned int sym_type;
2785
0
    if (h)
2786
0
      sym_type = h->type;
2787
0
    else
2788
0
      sym_type = ELF_ST_TYPE (sym->st_info);
2789
0
    if (sym_type == STT_FUNC)
2790
0
      {
2791
0
        if (call_tree && spu_hash_table (info)->params->auto_overlay)
2792
0
    spu_hash_table (info)->non_ovly_stub += 1;
2793
        /* If the symbol type is STT_FUNC then this must be a
2794
     function pointer initialisation.  */
2795
0
        continue;
2796
0
      }
2797
    /* Ignore data references.  */
2798
0
    if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2799
0
        != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2800
0
      continue;
2801
    /* Otherwise we probably have a jump table reloc for
2802
       a switch statement or some other reference to a
2803
       code label.  */
2804
0
  }
2805
2806
0
      if (h)
2807
0
  val = h->root.u.def.value;
2808
0
      else
2809
0
  val = sym->st_value;
2810
0
      val += irela->r_addend;
2811
2812
0
      if (!call_tree)
2813
0
  {
2814
0
    struct function_info *fun;
2815
2816
0
    if (irela->r_addend != 0)
2817
0
      {
2818
0
        Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2819
0
        if (fake == NULL)
2820
0
    return false;
2821
0
        fake->st_value = val;
2822
0
        fake->st_shndx
2823
0
    = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2824
0
        sym = fake;
2825
0
      }
2826
0
    if (sym)
2827
0
      fun = maybe_insert_function (sym_sec, sym, false, is_call);
2828
0
    else
2829
0
      fun = maybe_insert_function (sym_sec, h, true, is_call);
2830
0
    if (fun == NULL)
2831
0
      return false;
2832
0
    if (irela->r_addend != 0
2833
0
        && fun->u.sym != sym)
2834
0
      free (sym);
2835
0
    continue;
2836
0
  }
2837
2838
0
      caller = find_function (sec, irela->r_offset, info);
2839
0
      if (caller == NULL)
2840
0
  return false;
2841
0
      callee = bfd_malloc (sizeof *callee);
2842
0
      if (callee == NULL)
2843
0
  return false;
2844
2845
0
      callee->fun = find_function (sym_sec, val, info);
2846
0
      if (callee->fun == NULL)
2847
0
  return false;
2848
0
      callee->is_tail = !is_call;
2849
0
      callee->is_pasted = false;
2850
0
      callee->broken_cycle = false;
2851
0
      callee->priority = priority;
2852
0
      callee->count = nonbranch? 0 : 1;
2853
0
      if (callee->fun->last_caller != sec)
2854
0
  {
2855
0
    callee->fun->last_caller = sec;
2856
0
    callee->fun->call_count += 1;
2857
0
  }
2858
0
      if (!insert_callee (caller, callee))
2859
0
  free (callee);
2860
0
      else if (!is_call
2861
0
         && !callee->fun->is_func
2862
0
         && callee->fun->stack == 0)
2863
0
  {
2864
    /* This is either a tail call or a branch from one part of
2865
       the function to another, ie. hot/cold section.  If the
2866
       destination has been called by some other function then
2867
       it is a separate function.  We also assume that functions
2868
       are not split across input files.  */
2869
0
    if (sec->owner != sym_sec->owner)
2870
0
      {
2871
0
        callee->fun->start = NULL;
2872
0
        callee->fun->is_func = true;
2873
0
      }
2874
0
    else if (callee->fun->start == NULL)
2875
0
      {
2876
0
        struct function_info *caller_start = caller;
2877
0
        while (caller_start->start)
2878
0
    caller_start = caller_start->start;
2879
2880
0
        if (caller_start != callee->fun)
2881
0
    callee->fun->start = caller_start;
2882
0
      }
2883
0
    else
2884
0
      {
2885
0
        struct function_info *callee_start;
2886
0
        struct function_info *caller_start;
2887
0
        callee_start = callee->fun;
2888
0
        while (callee_start->start)
2889
0
    callee_start = callee_start->start;
2890
0
        caller_start = caller;
2891
0
        while (caller_start->start)
2892
0
    caller_start = caller_start->start;
2893
0
        if (caller_start != callee_start)
2894
0
    {
2895
0
      callee->fun->start = NULL;
2896
0
      callee->fun->is_func = true;
2897
0
    }
2898
0
      }
2899
0
  }
2900
0
    }
2901
2902
0
  return true;
2903
0
}
2904
2905
/* Handle something like .init or .fini, which has a piece of a function.
2906
   These sections are pasted together to form a single function.  */
2907
2908
static bool
2909
pasted_function (asection *sec)
2910
0
{
2911
0
  struct bfd_link_order *l;
2912
0
  struct _spu_elf_section_data *sec_data;
2913
0
  struct spu_elf_stack_info *sinfo;
2914
0
  Elf_Internal_Sym *fake;
2915
0
  struct function_info *fun, *fun_start;
2916
2917
0
  fake = bfd_zmalloc (sizeof (*fake));
2918
0
  if (fake == NULL)
2919
0
    return false;
2920
0
  fake->st_value = 0;
2921
0
  fake->st_size = sec->size;
2922
0
  fake->st_shndx
2923
0
    = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2924
0
  fun = maybe_insert_function (sec, fake, false, false);
2925
0
  if (!fun)
2926
0
    return false;
2927
2928
  /* Find a function immediately preceding this section.  */
2929
0
  fun_start = NULL;
2930
0
  for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2931
0
    {
2932
0
      if (l->u.indirect.section == sec)
2933
0
  {
2934
0
    if (fun_start != NULL)
2935
0
      {
2936
0
        struct call_info *callee = bfd_malloc (sizeof *callee);
2937
0
        if (callee == NULL)
2938
0
    return false;
2939
2940
0
        fun->start = fun_start;
2941
0
        callee->fun = fun;
2942
0
        callee->is_tail = true;
2943
0
        callee->is_pasted = true;
2944
0
        callee->broken_cycle = false;
2945
0
        callee->priority = 0;
2946
0
        callee->count = 1;
2947
0
        if (!insert_callee (fun_start, callee))
2948
0
    free (callee);
2949
0
        return true;
2950
0
      }
2951
0
    break;
2952
0
  }
2953
0
      if (l->type == bfd_indirect_link_order
2954
0
    && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2955
0
    && (sinfo = sec_data->u.i.stack_info) != NULL
2956
0
    && sinfo->num_fun != 0)
2957
0
  fun_start = &sinfo->fun[sinfo->num_fun - 1];
2958
0
    }
2959
2960
  /* Don't return an error if we did not find a function preceding this
2961
     section.  The section may have incorrect flags.  */
2962
0
  return true;
2963
0
}
2964
2965
/* Map address ranges in code sections to functions.  */
2966
2967
static bool
2968
discover_functions (struct bfd_link_info *info)
2969
0
{
2970
0
  bfd *ibfd;
2971
0
  int bfd_idx;
2972
0
  Elf_Internal_Sym ***psym_arr;
2973
0
  asection ***sec_arr;
2974
0
  bool gaps = false;
2975
2976
0
  bfd_idx = 0;
2977
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
2978
0
    bfd_idx++;
2979
2980
0
  psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2981
0
  if (psym_arr == NULL)
2982
0
    return false;
2983
0
  sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2984
0
  if (sec_arr == NULL)
2985
0
    return false;
2986
2987
0
  for (ibfd = info->input_bfds, bfd_idx = 0;
2988
0
       ibfd != NULL;
2989
0
       ibfd = ibfd->link.next, bfd_idx++)
2990
0
    {
2991
0
      extern const bfd_target spu_elf32_vec;
2992
0
      Elf_Internal_Shdr *symtab_hdr;
2993
0
      asection *sec;
2994
0
      size_t symcount;
2995
0
      Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2996
0
      asection **psecs, **p;
2997
2998
0
      if (ibfd->xvec != &spu_elf32_vec)
2999
0
  continue;
3000
3001
      /* Read all the symbols.  */
3002
0
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3003
0
      symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
3004
0
      if (symcount == 0)
3005
0
  {
3006
0
    if (!gaps)
3007
0
      for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3008
0
        if (interesting_section (sec))
3009
0
    {
3010
0
      gaps = true;
3011
0
      break;
3012
0
    }
3013
0
    continue;
3014
0
  }
3015
3016
      /* Don't use cached symbols since the generic ELF linker
3017
   code only reads local symbols, and we need globals too.  */
3018
0
      free (symtab_hdr->contents);
3019
0
      symtab_hdr->contents = NULL;
3020
0
      syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
3021
0
           NULL, NULL, NULL);
3022
0
      symtab_hdr->contents = (void *) syms;
3023
0
      if (syms == NULL)
3024
0
  return false;
3025
3026
      /* Select defined function symbols that are going to be output.  */
3027
0
      psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
3028
0
      if (psyms == NULL)
3029
0
  return false;
3030
0
      psym_arr[bfd_idx] = psyms;
3031
0
      psecs = bfd_malloc (symcount * sizeof (*psecs));
3032
0
      if (psecs == NULL)
3033
0
  return false;
3034
0
      sec_arr[bfd_idx] = psecs;
3035
0
      for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
3036
0
  if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
3037
0
      || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3038
0
    {
3039
0
      asection *s;
3040
3041
0
      *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3042
0
      if (s != NULL && interesting_section (s))
3043
0
        *psy++ = sy;
3044
0
    }
3045
0
      symcount = psy - psyms;
3046
0
      *psy = NULL;
3047
3048
      /* Sort them by section and offset within section.  */
3049
0
      sort_syms_syms = syms;
3050
0
      sort_syms_psecs = psecs;
3051
0
      qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3052
3053
      /* Now inspect the function symbols.  */
3054
0
      for (psy = psyms; psy < psyms + symcount; )
3055
0
  {
3056
0
    asection *s = psecs[*psy - syms];
3057
0
    Elf_Internal_Sym **psy2;
3058
3059
0
    for (psy2 = psy; ++psy2 < psyms + symcount; )
3060
0
      if (psecs[*psy2 - syms] != s)
3061
0
        break;
3062
3063
0
    if (!alloc_stack_info (s, psy2 - psy))
3064
0
      return false;
3065
0
    psy = psy2;
3066
0
  }
3067
3068
      /* First install info about properly typed and sized functions.
3069
   In an ideal world this will cover all code sections, except
3070
   when partitioning functions into hot and cold sections,
3071
   and the horrible pasted together .init and .fini functions.  */
3072
0
      for (psy = psyms; psy < psyms + symcount; ++psy)
3073
0
  {
3074
0
    sy = *psy;
3075
0
    if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3076
0
      {
3077
0
        asection *s = psecs[sy - syms];
3078
0
        if (!maybe_insert_function (s, sy, false, true))
3079
0
    return false;
3080
0
      }
3081
0
  }
3082
3083
0
      for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3084
0
  if (interesting_section (sec))
3085
0
    gaps |= check_function_ranges (sec, info);
3086
0
    }
3087
3088
0
  if (gaps)
3089
0
    {
3090
      /* See if we can discover more function symbols by looking at
3091
   relocations.  */
3092
0
      for (ibfd = info->input_bfds, bfd_idx = 0;
3093
0
     ibfd != NULL;
3094
0
     ibfd = ibfd->link.next, bfd_idx++)
3095
0
  {
3096
0
    asection *sec;
3097
3098
0
    if (psym_arr[bfd_idx] == NULL)
3099
0
      continue;
3100
3101
0
    for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3102
0
      if (!mark_functions_via_relocs (sec, info, false))
3103
0
        return false;
3104
0
  }
3105
3106
0
      for (ibfd = info->input_bfds, bfd_idx = 0;
3107
0
     ibfd != NULL;
3108
0
     ibfd = ibfd->link.next, bfd_idx++)
3109
0
  {
3110
0
    Elf_Internal_Shdr *symtab_hdr;
3111
0
    asection *sec;
3112
0
    Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3113
0
    asection **psecs;
3114
3115
0
    if ((psyms = psym_arr[bfd_idx]) == NULL)
3116
0
      continue;
3117
3118
0
    psecs = sec_arr[bfd_idx];
3119
3120
0
    symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3121
0
    syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3122
3123
0
    gaps = false;
3124
0
    for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3125
0
      if (interesting_section (sec))
3126
0
        gaps |= check_function_ranges (sec, info);
3127
0
    if (!gaps)
3128
0
      continue;
3129
3130
    /* Finally, install all globals.  */
3131
0
    for (psy = psyms; (sy = *psy) != NULL; ++psy)
3132
0
      {
3133
0
        asection *s;
3134
3135
0
        s = psecs[sy - syms];
3136
3137
        /* Global syms might be improperly typed functions.  */
3138
0
        if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3139
0
      && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3140
0
    {
3141
0
      if (!maybe_insert_function (s, sy, false, false))
3142
0
        return false;
3143
0
    }
3144
0
      }
3145
0
  }
3146
3147
0
      for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3148
0
  {
3149
0
    extern const bfd_target spu_elf32_vec;
3150
0
    asection *sec;
3151
3152
0
    if (ibfd->xvec != &spu_elf32_vec)
3153
0
      continue;
3154
3155
    /* Some of the symbols we've installed as marking the
3156
       beginning of functions may have a size of zero.  Extend
3157
       the range of such functions to the beginning of the
3158
       next symbol of interest.  */
3159
0
    for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3160
0
      if (interesting_section (sec))
3161
0
        {
3162
0
    struct _spu_elf_section_data *sec_data;
3163
0
    struct spu_elf_stack_info *sinfo;
3164
3165
0
    sec_data = spu_elf_section_data (sec);
3166
0
    sinfo = sec_data->u.i.stack_info;
3167
0
    if (sinfo != NULL && sinfo->num_fun != 0)
3168
0
      {
3169
0
        int fun_idx;
3170
0
        bfd_vma hi = sec->size;
3171
3172
0
        for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3173
0
          {
3174
0
      sinfo->fun[fun_idx].hi = hi;
3175
0
      hi = sinfo->fun[fun_idx].lo;
3176
0
          }
3177
3178
0
        sinfo->fun[0].lo = 0;
3179
0
      }
3180
    /* No symbols in this section.  Must be .init or .fini
3181
       or something similar.  */
3182
0
    else if (!pasted_function (sec))
3183
0
      return false;
3184
0
        }
3185
0
  }
3186
0
    }
3187
3188
0
  for (ibfd = info->input_bfds, bfd_idx = 0;
3189
0
       ibfd != NULL;
3190
0
       ibfd = ibfd->link.next, bfd_idx++)
3191
0
    {
3192
0
      if (psym_arr[bfd_idx] == NULL)
3193
0
  continue;
3194
3195
0
      free (psym_arr[bfd_idx]);
3196
0
      free (sec_arr[bfd_idx]);
3197
0
    }
3198
3199
0
  free (psym_arr);
3200
0
  free (sec_arr);
3201
3202
0
  return true;
3203
0
}
3204
3205
/* Iterate over all function_info we have collected, calling DOIT on
3206
   each node if ROOT_ONLY is false.  Only call DOIT on root nodes
3207
   if ROOT_ONLY.  */
3208
3209
static bool
3210
for_each_node (bool (*doit) (struct function_info *,
3211
           struct bfd_link_info *,
3212
           void *),
3213
         struct bfd_link_info *info,
3214
         void *param,
3215
         int root_only)
3216
0
{
3217
0
  bfd *ibfd;
3218
3219
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3220
0
    {
3221
0
      extern const bfd_target spu_elf32_vec;
3222
0
      asection *sec;
3223
3224
0
      if (ibfd->xvec != &spu_elf32_vec)
3225
0
  continue;
3226
3227
0
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3228
0
  {
3229
0
    struct _spu_elf_section_data *sec_data;
3230
0
    struct spu_elf_stack_info *sinfo;
3231
3232
0
    if ((sec_data = spu_elf_section_data (sec)) != NULL
3233
0
        && (sinfo = sec_data->u.i.stack_info) != NULL)
3234
0
      {
3235
0
        int i;
3236
0
        for (i = 0; i < sinfo->num_fun; ++i)
3237
0
    if (!root_only || !sinfo->fun[i].non_root)
3238
0
      if (!doit (&sinfo->fun[i], info, param))
3239
0
        return false;
3240
0
      }
3241
0
  }
3242
0
    }
3243
0
  return true;
3244
0
}
3245
3246
/* Transfer call info attached to struct function_info entries for
3247
   all of a given function's sections to the first entry.  */
3248
3249
static bool
3250
transfer_calls (struct function_info *fun,
3251
    struct bfd_link_info *info ATTRIBUTE_UNUSED,
3252
    void *param ATTRIBUTE_UNUSED)
3253
0
{
3254
0
  struct function_info *start = fun->start;
3255
3256
0
  if (start != NULL)
3257
0
    {
3258
0
      struct call_info *call, *call_next;
3259
3260
0
      while (start->start != NULL)
3261
0
  start = start->start;
3262
0
      for (call = fun->call_list; call != NULL; call = call_next)
3263
0
  {
3264
0
    call_next = call->next;
3265
0
    if (!insert_callee (start, call))
3266
0
      free (call);
3267
0
  }
3268
0
      fun->call_list = NULL;
3269
0
    }
3270
0
  return true;
3271
0
}
3272
3273
/* Mark nodes in the call graph that are called by some other node.  */
3274
3275
static bool
3276
mark_non_root (struct function_info *fun,
3277
         struct bfd_link_info *info ATTRIBUTE_UNUSED,
3278
         void *param ATTRIBUTE_UNUSED)
3279
0
{
3280
0
  struct call_info *call;
3281
3282
0
  if (fun->visit1)
3283
0
    return true;
3284
0
  fun->visit1 = true;
3285
0
  for (call = fun->call_list; call; call = call->next)
3286
0
    {
3287
0
      call->fun->non_root = true;
3288
0
      mark_non_root (call->fun, 0, 0);
3289
0
    }
3290
0
  return true;
3291
0
}
3292
3293
/* Remove cycles from the call graph.  Set depth of nodes.  */
3294
3295
static bool
3296
remove_cycles (struct function_info *fun,
3297
         struct bfd_link_info *info,
3298
         void *param)
3299
0
{
3300
0
  struct call_info **callp, *call;
3301
0
  unsigned int depth = *(unsigned int *) param;
3302
0
  unsigned int max_depth = depth;
3303
3304
0
  fun->depth = depth;
3305
0
  fun->visit2 = true;
3306
0
  fun->marking = true;
3307
3308
0
  callp = &fun->call_list;
3309
0
  while ((call = *callp) != NULL)
3310
0
    {
3311
0
      call->max_depth = depth + !call->is_pasted;
3312
0
      if (!call->fun->visit2)
3313
0
  {
3314
0
    if (!remove_cycles (call->fun, info, &call->max_depth))
3315
0
      return false;
3316
0
    if (max_depth < call->max_depth)
3317
0
      max_depth = call->max_depth;
3318
0
  }
3319
0
      else if (call->fun->marking)
3320
0
  {
3321
0
    struct spu_link_hash_table *htab = spu_hash_table (info);
3322
3323
0
    if (!htab->params->auto_overlay
3324
0
        && htab->params->stack_analysis)
3325
0
      {
3326
0
        const char *f1 = func_name (fun);
3327
0
        const char *f2 = func_name (call->fun);
3328
3329
        /* xgettext:c-format */
3330
0
        info->callbacks->info (_("stack analysis will ignore the call "
3331
0
               "from %s to %s\n"),
3332
0
             f1, f2);
3333
0
      }
3334
3335
0
    call->broken_cycle = true;
3336
0
  }
3337
0
      callp = &call->next;
3338
0
    }
3339
0
  fun->marking = false;
3340
0
  *(unsigned int *) param = max_depth;
3341
0
  return true;
3342
0
}
3343
3344
/* Check that we actually visited all nodes in remove_cycles.  If we
3345
   didn't, then there is some cycle in the call graph not attached to
3346
   any root node.  Arbitrarily choose a node in the cycle as a new
3347
   root and break the cycle.  */
3348
3349
static bool
3350
mark_detached_root (struct function_info *fun,
3351
        struct bfd_link_info *info,
3352
        void *param)
3353
0
{
3354
0
  if (fun->visit2)
3355
0
    return true;
3356
0
  fun->non_root = false;
3357
0
  *(unsigned int *) param = 0;
3358
0
  return remove_cycles (fun, info, param);
3359
0
}
3360
3361
/* Populate call_list for each function.  */
3362
3363
static bool
3364
build_call_tree (struct bfd_link_info *info)
3365
0
{
3366
0
  bfd *ibfd;
3367
0
  unsigned int depth;
3368
3369
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3370
0
    {
3371
0
      extern const bfd_target spu_elf32_vec;
3372
0
      asection *sec;
3373
3374
0
      if (ibfd->xvec != &spu_elf32_vec)
3375
0
  continue;
3376
3377
0
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3378
0
  if (!mark_functions_via_relocs (sec, info, true))
3379
0
    return false;
3380
0
    }
3381
3382
  /* Transfer call info from hot/cold section part of function
3383
     to main entry.  */
3384
0
  if (!spu_hash_table (info)->params->auto_overlay
3385
0
      && !for_each_node (transfer_calls, info, 0, false))
3386
0
    return false;
3387
3388
  /* Find the call graph root(s).  */
3389
0
  if (!for_each_node (mark_non_root, info, 0, false))
3390
0
    return false;
3391
3392
  /* Remove cycles from the call graph.  We start from the root node(s)
3393
     so that we break cycles in a reasonable place.  */
3394
0
  depth = 0;
3395
0
  if (!for_each_node (remove_cycles, info, &depth, true))
3396
0
    return false;
3397
3398
0
  return for_each_node (mark_detached_root, info, &depth, false);
3399
0
}
3400
3401
/* qsort predicate to sort calls by priority, max_depth then count.  */
3402
3403
static int
3404
sort_calls (const void *a, const void *b)
3405
0
{
3406
0
  struct call_info *const *c1 = a;
3407
0
  struct call_info *const *c2 = b;
3408
0
  int delta;
3409
3410
0
  delta = (*c2)->priority - (*c1)->priority;
3411
0
  if (delta != 0)
3412
0
    return delta;
3413
3414
0
  delta = (*c2)->max_depth - (*c1)->max_depth;
3415
0
  if (delta != 0)
3416
0
    return delta;
3417
3418
0
  delta = (*c2)->count - (*c1)->count;
3419
0
  if (delta != 0)
3420
0
    return delta;
3421
3422
0
  return (char *) c1 - (char *) c2;
3423
0
}
3424
3425
struct _mos_param {
3426
  unsigned int max_overlay_size;
3427
};
3428
3429
/* Set linker_mark and gc_mark on any sections that we will put in
3430
   overlays.  These flags are used by the generic ELF linker, but we
3431
   won't be continuing on to bfd_elf_final_link so it is OK to use
3432
   them.  linker_mark is clear before we get here.  Set segment_mark
3433
   on sections that are part of a pasted function (excluding the last
3434
   section).
3435
3436
   Set up function rodata section if --overlay-rodata.  We don't
3437
   currently include merged string constant rodata sections since
3438
3439
   Sort the call graph so that the deepest nodes will be visited
3440
   first.  */
3441
3442
static bool
3443
mark_overlay_section (struct function_info *fun,
3444
          struct bfd_link_info *info,
3445
          void *param)
3446
0
{
3447
0
  struct call_info *call;
3448
0
  unsigned int count;
3449
0
  struct _mos_param *mos_param = param;
3450
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
3451
3452
0
  if (fun->visit4)
3453
0
    return true;
3454
3455
0
  fun->visit4 = true;
3456
0
  if (!fun->sec->linker_mark
3457
0
      && (htab->params->ovly_flavour != ovly_soft_icache
3458
0
    || htab->params->non_ia_text
3459
0
    || startswith (fun->sec->name, ".text.ia.")
3460
0
    || strcmp (fun->sec->name, ".init") == 0
3461
0
    || strcmp (fun->sec->name, ".fini") == 0))
3462
0
    {
3463
0
      unsigned int size;
3464
3465
0
      fun->sec->linker_mark = 1;
3466
0
      fun->sec->gc_mark = 1;
3467
0
      fun->sec->segment_mark = 0;
3468
      /* Ensure SEC_CODE is set on this text section (it ought to
3469
   be!), and SEC_CODE is clear on rodata sections.  We use
3470
   this flag to differentiate the two overlay section types.  */
3471
0
      fun->sec->flags |= SEC_CODE;
3472
3473
0
      size = fun->sec->size;
3474
0
      if (htab->params->auto_overlay & OVERLAY_RODATA)
3475
0
  {
3476
0
    char *name = NULL;
3477
3478
    /* Find the rodata section corresponding to this function's
3479
       text section.  */
3480
0
    if (strcmp (fun->sec->name, ".text") == 0)
3481
0
      {
3482
0
        name = bfd_malloc (sizeof (".rodata"));
3483
0
        if (name == NULL)
3484
0
    return false;
3485
0
        memcpy (name, ".rodata", sizeof (".rodata"));
3486
0
      }
3487
0
    else if (startswith (fun->sec->name, ".text."))
3488
0
      {
3489
0
        size_t len = strlen (fun->sec->name);
3490
0
        name = bfd_malloc (len + 3);
3491
0
        if (name == NULL)
3492
0
    return false;
3493
0
        memcpy (name, ".rodata", sizeof (".rodata"));
3494
0
        memcpy (name + 7, fun->sec->name + 5, len - 4);
3495
0
      }
3496
0
    else if (startswith (fun->sec->name, ".gnu.linkonce.t."))
3497
0
      {
3498
0
        size_t len = strlen (fun->sec->name) + 1;
3499
0
        name = bfd_malloc (len);
3500
0
        if (name == NULL)
3501
0
    return false;
3502
0
        memcpy (name, fun->sec->name, len);
3503
0
        name[14] = 'r';
3504
0
      }
3505
3506
0
    if (name != NULL)
3507
0
      {
3508
0
        asection *rodata = NULL;
3509
0
        asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3510
0
        if (group_sec == NULL)
3511
0
    rodata = bfd_get_section_by_name (fun->sec->owner, name);
3512
0
        else
3513
0
    while (group_sec != NULL && group_sec != fun->sec)
3514
0
      {
3515
0
        if (strcmp (group_sec->name, name) == 0)
3516
0
          {
3517
0
      rodata = group_sec;
3518
0
      break;
3519
0
          }
3520
0
        group_sec = elf_section_data (group_sec)->next_in_group;
3521
0
      }
3522
0
        fun->rodata = rodata;
3523
0
        if (fun->rodata)
3524
0
    {
3525
0
      size += fun->rodata->size;
3526
0
      if (htab->params->line_size != 0
3527
0
          && size > htab->params->line_size)
3528
0
        {
3529
0
          size -= fun->rodata->size;
3530
0
          fun->rodata = NULL;
3531
0
        }
3532
0
      else
3533
0
        {
3534
0
          fun->rodata->linker_mark = 1;
3535
0
          fun->rodata->gc_mark = 1;
3536
0
          fun->rodata->flags &= ~SEC_CODE;
3537
0
        }
3538
0
    }
3539
0
        free (name);
3540
0
      }
3541
0
  }
3542
0
      if (mos_param->max_overlay_size < size)
3543
0
  mos_param->max_overlay_size = size;
3544
0
    }
3545
3546
0
  for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3547
0
    count += 1;
3548
3549
0
  if (count > 1)
3550
0
    {
3551
0
      struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3552
0
      if (calls == NULL)
3553
0
  return false;
3554
3555
0
      for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3556
0
  calls[count++] = call;
3557
3558
0
      qsort (calls, count, sizeof (*calls), sort_calls);
3559
3560
0
      fun->call_list = NULL;
3561
0
      while (count != 0)
3562
0
  {
3563
0
    --count;
3564
0
    calls[count]->next = fun->call_list;
3565
0
    fun->call_list = calls[count];
3566
0
  }
3567
0
      free (calls);
3568
0
    }
3569
3570
0
  for (call = fun->call_list; call != NULL; call = call->next)
3571
0
    {
3572
0
      if (call->is_pasted)
3573
0
  {
3574
    /* There can only be one is_pasted call per function_info.  */
3575
0
    BFD_ASSERT (!fun->sec->segment_mark);
3576
0
    fun->sec->segment_mark = 1;
3577
0
  }
3578
0
      if (!call->broken_cycle
3579
0
    && !mark_overlay_section (call->fun, info, param))
3580
0
  return false;
3581
0
    }
3582
3583
  /* Don't put entry code into an overlay.  The overlay manager needs
3584
     a stack!  Also, don't mark .ovl.init as an overlay.  */
3585
0
  if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3586
0
      == info->output_bfd->start_address
3587
0
      || startswith (fun->sec->output_section->name, ".ovl.init"))
3588
0
    {
3589
0
      fun->sec->linker_mark = 0;
3590
0
      if (fun->rodata != NULL)
3591
0
  fun->rodata->linker_mark = 0;
3592
0
    }
3593
0
  return true;
3594
0
}
3595
3596
/* If non-zero then unmark functions called from those within sections
3597
   that we need to unmark.  Unfortunately this isn't reliable since the
3598
   call graph cannot know the destination of function pointer calls.  */
3599
0
#define RECURSE_UNMARK 0
3600
3601
struct _uos_param {
3602
  asection *exclude_input_section;
3603
  asection *exclude_output_section;
3604
  unsigned long clearing;
3605
};
3606
3607
/* Undo some of mark_overlay_section's work.  */
3608
3609
static bool
3610
unmark_overlay_section (struct function_info *fun,
3611
      struct bfd_link_info *info,
3612
      void *param)
3613
0
{
3614
0
  struct call_info *call;
3615
0
  struct _uos_param *uos_param = param;
3616
0
  unsigned int excluded = 0;
3617
3618
0
  if (fun->visit5)
3619
0
    return true;
3620
3621
0
  fun->visit5 = true;
3622
3623
0
  excluded = 0;
3624
0
  if (fun->sec == uos_param->exclude_input_section
3625
0
      || fun->sec->output_section == uos_param->exclude_output_section)
3626
0
    excluded = 1;
3627
3628
0
  if (RECURSE_UNMARK)
3629
0
    uos_param->clearing += excluded;
3630
3631
0
  if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3632
0
    {
3633
0
      fun->sec->linker_mark = 0;
3634
0
      if (fun->rodata)
3635
0
  fun->rodata->linker_mark = 0;
3636
0
    }
3637
3638
0
  for (call = fun->call_list; call != NULL; call = call->next)
3639
0
    if (!call->broken_cycle
3640
0
  && !unmark_overlay_section (call->fun, info, param))
3641
0
      return false;
3642
3643
0
  if (RECURSE_UNMARK)
3644
0
    uos_param->clearing -= excluded;
3645
0
  return true;
3646
0
}
3647
3648
struct _cl_param {
3649
  unsigned int lib_size;
3650
  asection **lib_sections;
3651
};
3652
3653
/* Add sections we have marked as belonging to overlays to an array
3654
   for consideration as non-overlay sections.  The array consist of
3655
   pairs of sections, (text,rodata), for functions in the call graph.  */
3656
3657
static bool
3658
collect_lib_sections (struct function_info *fun,
3659
          struct bfd_link_info *info,
3660
          void *param)
3661
0
{
3662
0
  struct _cl_param *lib_param = param;
3663
0
  struct call_info *call;
3664
0
  unsigned int size;
3665
3666
0
  if (fun->visit6)
3667
0
    return true;
3668
3669
0
  fun->visit6 = true;
3670
0
  if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3671
0
    return true;
3672
3673
0
  size = fun->sec->size;
3674
0
  if (fun->rodata)
3675
0
    size += fun->rodata->size;
3676
3677
0
  if (size <= lib_param->lib_size)
3678
0
    {
3679
0
      *lib_param->lib_sections++ = fun->sec;
3680
0
      fun->sec->gc_mark = 0;
3681
0
      if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3682
0
  {
3683
0
    *lib_param->lib_sections++ = fun->rodata;
3684
0
    fun->rodata->gc_mark = 0;
3685
0
  }
3686
0
      else
3687
0
  *lib_param->lib_sections++ = NULL;
3688
0
    }
3689
3690
0
  for (call = fun->call_list; call != NULL; call = call->next)
3691
0
    if (!call->broken_cycle)
3692
0
      collect_lib_sections (call->fun, info, param);
3693
3694
0
  return true;
3695
0
}
3696
3697
/* qsort predicate to sort sections by call count.  */
3698
3699
static int
3700
sort_lib (const void *a, const void *b)
3701
0
{
3702
0
  asection *const *s1 = a;
3703
0
  asection *const *s2 = b;
3704
0
  struct _spu_elf_section_data *sec_data;
3705
0
  struct spu_elf_stack_info *sinfo;
3706
0
  int delta;
3707
3708
0
  delta = 0;
3709
0
  if ((sec_data = spu_elf_section_data (*s1)) != NULL
3710
0
      && (sinfo = sec_data->u.i.stack_info) != NULL)
3711
0
    {
3712
0
      int i;
3713
0
      for (i = 0; i < sinfo->num_fun; ++i)
3714
0
  delta -= sinfo->fun[i].call_count;
3715
0
    }
3716
3717
0
  if ((sec_data = spu_elf_section_data (*s2)) != NULL
3718
0
      && (sinfo = sec_data->u.i.stack_info) != NULL)
3719
0
    {
3720
0
      int i;
3721
0
      for (i = 0; i < sinfo->num_fun; ++i)
3722
0
  delta += sinfo->fun[i].call_count;
3723
0
    }
3724
3725
0
  if (delta != 0)
3726
0
    return delta;
3727
3728
0
  return s1 - s2;
3729
0
}
3730
3731
/* Remove some sections from those marked to be in overlays.  Choose
3732
   those that are called from many places, likely library functions.  */
3733
3734
static unsigned int
3735
auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3736
0
{
3737
0
  bfd *ibfd;
3738
0
  asection **lib_sections;
3739
0
  unsigned int i, lib_count;
3740
0
  struct _cl_param collect_lib_param;
3741
0
  struct function_info dummy_caller;
3742
0
  struct spu_link_hash_table *htab;
3743
3744
0
  memset (&dummy_caller, 0, sizeof (dummy_caller));
3745
0
  lib_count = 0;
3746
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3747
0
    {
3748
0
      extern const bfd_target spu_elf32_vec;
3749
0
      asection *sec;
3750
3751
0
      if (ibfd->xvec != &spu_elf32_vec)
3752
0
  continue;
3753
3754
0
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3755
0
  if (sec->linker_mark
3756
0
      && sec->size < lib_size
3757
0
      && (sec->flags & SEC_CODE) != 0)
3758
0
    lib_count += 1;
3759
0
    }
3760
0
  lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3761
0
  if (lib_sections == NULL)
3762
0
    return (unsigned int) -1;
3763
0
  collect_lib_param.lib_size = lib_size;
3764
0
  collect_lib_param.lib_sections = lib_sections;
3765
0
  if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3766
0
          true))
3767
0
    return (unsigned int) -1;
3768
0
  lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3769
3770
  /* Sort sections so that those with the most calls are first.  */
3771
0
  if (lib_count > 1)
3772
0
    qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3773
3774
0
  htab = spu_hash_table (info);
3775
0
  for (i = 0; i < lib_count; i++)
3776
0
    {
3777
0
      unsigned int tmp, stub_size;
3778
0
      asection *sec;
3779
0
      struct _spu_elf_section_data *sec_data;
3780
0
      struct spu_elf_stack_info *sinfo;
3781
3782
0
      sec = lib_sections[2 * i];
3783
      /* If this section is OK, its size must be less than lib_size.  */
3784
0
      tmp = sec->size;
3785
      /* If it has a rodata section, then add that too.  */
3786
0
      if (lib_sections[2 * i + 1])
3787
0
  tmp += lib_sections[2 * i + 1]->size;
3788
      /* Add any new overlay call stubs needed by the section.  */
3789
0
      stub_size = 0;
3790
0
      if (tmp < lib_size
3791
0
    && (sec_data = spu_elf_section_data (sec)) != NULL
3792
0
    && (sinfo = sec_data->u.i.stack_info) != NULL)
3793
0
  {
3794
0
    int k;
3795
0
    struct call_info *call;
3796
3797
0
    for (k = 0; k < sinfo->num_fun; ++k)
3798
0
      for (call = sinfo->fun[k].call_list; call; call = call->next)
3799
0
        if (call->fun->sec->linker_mark)
3800
0
    {
3801
0
      struct call_info *p;
3802
0
      for (p = dummy_caller.call_list; p; p = p->next)
3803
0
        if (p->fun == call->fun)
3804
0
          break;
3805
0
      if (!p)
3806
0
        stub_size += ovl_stub_size (htab->params);
3807
0
    }
3808
0
  }
3809
0
      if (tmp + stub_size < lib_size)
3810
0
  {
3811
0
    struct call_info **pp, *p;
3812
3813
    /* This section fits.  Mark it as non-overlay.  */
3814
0
    lib_sections[2 * i]->linker_mark = 0;
3815
0
    if (lib_sections[2 * i + 1])
3816
0
      lib_sections[2 * i + 1]->linker_mark = 0;
3817
0
    lib_size -= tmp + stub_size;
3818
    /* Call stubs to the section we just added are no longer
3819
       needed.  */
3820
0
    pp = &dummy_caller.call_list;
3821
0
    while ((p = *pp) != NULL)
3822
0
      if (!p->fun->sec->linker_mark)
3823
0
        {
3824
0
    lib_size += ovl_stub_size (htab->params);
3825
0
    *pp = p->next;
3826
0
    free (p);
3827
0
        }
3828
0
      else
3829
0
        pp = &p->next;
3830
    /* Add new call stubs to dummy_caller.  */
3831
0
    if ((sec_data = spu_elf_section_data (sec)) != NULL
3832
0
        && (sinfo = sec_data->u.i.stack_info) != NULL)
3833
0
      {
3834
0
        int k;
3835
0
        struct call_info *call;
3836
3837
0
        for (k = 0; k < sinfo->num_fun; ++k)
3838
0
    for (call = sinfo->fun[k].call_list;
3839
0
         call;
3840
0
         call = call->next)
3841
0
      if (call->fun->sec->linker_mark)
3842
0
        {
3843
0
          struct call_info *callee;
3844
0
          callee = bfd_malloc (sizeof (*callee));
3845
0
          if (callee == NULL)
3846
0
      return (unsigned int) -1;
3847
0
          *callee = *call;
3848
0
          if (!insert_callee (&dummy_caller, callee))
3849
0
      free (callee);
3850
0
        }
3851
0
      }
3852
0
  }
3853
0
    }
3854
0
  while (dummy_caller.call_list != NULL)
3855
0
    {
3856
0
      struct call_info *call = dummy_caller.call_list;
3857
0
      dummy_caller.call_list = call->next;
3858
0
      free (call);
3859
0
    }
3860
0
  for (i = 0; i < 2 * lib_count; i++)
3861
0
    if (lib_sections[i])
3862
0
      lib_sections[i]->gc_mark = 1;
3863
0
  free (lib_sections);
3864
0
  return lib_size;
3865
0
}
3866
3867
/* Build an array of overlay sections.  The deepest node's section is
3868
   added first, then its parent node's section, then everything called
3869
   from the parent section.  The idea being to group sections to
3870
   minimise calls between different overlays.  */
3871
3872
static bool
3873
collect_overlays (struct function_info *fun,
3874
      struct bfd_link_info *info,
3875
      void *param)
3876
0
{
3877
0
  struct call_info *call;
3878
0
  bool added_fun;
3879
0
  asection ***ovly_sections = param;
3880
3881
0
  if (fun->visit7)
3882
0
    return true;
3883
3884
0
  fun->visit7 = true;
3885
0
  for (call = fun->call_list; call != NULL; call = call->next)
3886
0
    if (!call->is_pasted && !call->broken_cycle)
3887
0
      {
3888
0
  if (!collect_overlays (call->fun, info, ovly_sections))
3889
0
    return false;
3890
0
  break;
3891
0
      }
3892
3893
0
  added_fun = false;
3894
0
  if (fun->sec->linker_mark && fun->sec->gc_mark)
3895
0
    {
3896
0
      fun->sec->gc_mark = 0;
3897
0
      *(*ovly_sections)++ = fun->sec;
3898
0
      if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3899
0
  {
3900
0
    fun->rodata->gc_mark = 0;
3901
0
    *(*ovly_sections)++ = fun->rodata;
3902
0
  }
3903
0
      else
3904
0
  *(*ovly_sections)++ = NULL;
3905
0
      added_fun = true;
3906
3907
      /* Pasted sections must stay with the first section.  We don't
3908
   put pasted sections in the array, just the first section.
3909
   Mark subsequent sections as already considered.  */
3910
0
      if (fun->sec->segment_mark)
3911
0
  {
3912
0
    struct function_info *call_fun = fun;
3913
0
    do
3914
0
      {
3915
0
        for (call = call_fun->call_list; call != NULL; call = call->next)
3916
0
    if (call->is_pasted)
3917
0
      {
3918
0
        call_fun = call->fun;
3919
0
        call_fun->sec->gc_mark = 0;
3920
0
        if (call_fun->rodata)
3921
0
          call_fun->rodata->gc_mark = 0;
3922
0
        break;
3923
0
      }
3924
0
        if (call == NULL)
3925
0
    abort ();
3926
0
      }
3927
0
    while (call_fun->sec->segment_mark);
3928
0
  }
3929
0
    }
3930
3931
0
  for (call = fun->call_list; call != NULL; call = call->next)
3932
0
    if (!call->broken_cycle
3933
0
  && !collect_overlays (call->fun, info, ovly_sections))
3934
0
      return false;
3935
3936
0
  if (added_fun)
3937
0
    {
3938
0
      struct _spu_elf_section_data *sec_data;
3939
0
      struct spu_elf_stack_info *sinfo;
3940
3941
0
      if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3942
0
    && (sinfo = sec_data->u.i.stack_info) != NULL)
3943
0
  {
3944
0
    int i;
3945
0
    for (i = 0; i < sinfo->num_fun; ++i)
3946
0
      if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3947
0
        return false;
3948
0
  }
3949
0
    }
3950
3951
0
  return true;
3952
0
}
3953
3954
struct _sum_stack_param {
3955
  size_t cum_stack;
3956
  size_t overall_stack;
3957
  bool emit_stack_syms;
3958
};
3959
3960
/* Descend the call graph for FUN, accumulating total stack required.  */
3961
3962
static bool
3963
sum_stack (struct function_info *fun,
3964
     struct bfd_link_info *info,
3965
     void *param)
3966
0
{
3967
0
  struct call_info *call;
3968
0
  struct function_info *max;
3969
0
  size_t stack, cum_stack;
3970
0
  const char *f1;
3971
0
  bool has_call;
3972
0
  struct _sum_stack_param *sum_stack_param = param;
3973
0
  struct spu_link_hash_table *htab;
3974
3975
0
  cum_stack = fun->stack;
3976
0
  sum_stack_param->cum_stack = cum_stack;
3977
0
  if (fun->visit3)
3978
0
    return true;
3979
3980
0
  has_call = false;
3981
0
  max = NULL;
3982
0
  for (call = fun->call_list; call; call = call->next)
3983
0
    {
3984
0
      if (call->broken_cycle)
3985
0
  continue;
3986
0
      if (!call->is_pasted)
3987
0
  has_call = true;
3988
0
      if (!sum_stack (call->fun, info, sum_stack_param))
3989
0
  return false;
3990
0
      stack = sum_stack_param->cum_stack;
3991
      /* Include caller stack for normal calls, don't do so for
3992
   tail calls.  fun->stack here is local stack usage for
3993
   this function.  */
3994
0
      if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3995
0
  stack += fun->stack;
3996
0
      if (cum_stack < stack)
3997
0
  {
3998
0
    cum_stack = stack;
3999
0
    max = call->fun;
4000
0
  }
4001
0
    }
4002
4003
0
  sum_stack_param->cum_stack = cum_stack;
4004
0
  stack = fun->stack;
4005
  /* Now fun->stack holds cumulative stack.  */
4006
0
  fun->stack = cum_stack;
4007
0
  fun->visit3 = true;
4008
4009
0
  if (!fun->non_root
4010
0
      && sum_stack_param->overall_stack < cum_stack)
4011
0
    sum_stack_param->overall_stack = cum_stack;
4012
4013
0
  htab = spu_hash_table (info);
4014
0
  if (htab->params->auto_overlay)
4015
0
    return true;
4016
4017
0
  f1 = func_name (fun);
4018
0
  if (htab->params->stack_analysis)
4019
0
    {
4020
0
      if (!fun->non_root)
4021
0
  info->callbacks->info ("  %s: 0x%v\n", f1, (bfd_vma) cum_stack);
4022
0
      info->callbacks->minfo ("%s: 0x%v 0x%v\n",
4023
0
            f1, (bfd_vma) stack, (bfd_vma) cum_stack);
4024
4025
0
      if (has_call)
4026
0
  {
4027
0
    info->callbacks->minfo (_("  calls:\n"));
4028
0
    for (call = fun->call_list; call; call = call->next)
4029
0
      if (!call->is_pasted && !call->broken_cycle)
4030
0
        {
4031
0
    const char *f2 = func_name (call->fun);
4032
0
    const char *ann1 = call->fun == max ? "*" : " ";
4033
0
    const char *ann2 = call->is_tail ? "t" : " ";
4034
4035
0
    info->callbacks->minfo ("   %s%s %s\n", ann1, ann2, f2);
4036
0
        }
4037
0
  }
4038
0
    }
4039
4040
0
  if (sum_stack_param->emit_stack_syms)
4041
0
    {
4042
0
      char *name = bfd_malloc (18 + strlen (f1));
4043
0
      struct elf_link_hash_entry *h;
4044
4045
0
      if (name == NULL)
4046
0
  return false;
4047
4048
0
      if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4049
0
  sprintf (name, "__stack_%s", f1);
4050
0
      else
4051
0
  sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4052
4053
0
      h = elf_link_hash_lookup (&htab->elf, name, true, true, false);
4054
0
      free (name);
4055
0
      if (h != NULL
4056
0
    && (h->root.type == bfd_link_hash_new
4057
0
        || h->root.type == bfd_link_hash_undefined
4058
0
        || h->root.type == bfd_link_hash_undefweak))
4059
0
  {
4060
0
    h->root.type = bfd_link_hash_defined;
4061
0
    h->root.u.def.section = bfd_abs_section_ptr;
4062
0
    h->root.u.def.value = cum_stack;
4063
0
    h->size = 0;
4064
0
    h->type = 0;
4065
0
    h->ref_regular = 1;
4066
0
    h->def_regular = 1;
4067
0
    h->ref_regular_nonweak = 1;
4068
0
    h->forced_local = 1;
4069
0
    h->non_elf = 0;
4070
0
  }
4071
0
    }
4072
4073
0
  return true;
4074
0
}
4075
4076
/* SEC is part of a pasted function.  Return the call_info for the
4077
   next section of this function.  */
4078
4079
static struct call_info *
4080
find_pasted_call (asection *sec)
4081
0
{
4082
0
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4083
0
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4084
0
  struct call_info *call;
4085
0
  int k;
4086
4087
0
  for (k = 0; k < sinfo->num_fun; ++k)
4088
0
    for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4089
0
      if (call->is_pasted)
4090
0
  return call;
4091
0
  abort ();
4092
0
  return 0;
4093
0
}
4094
4095
/* qsort predicate to sort bfds by file name.  */
4096
4097
static int
4098
sort_bfds (const void *a, const void *b)
4099
0
{
4100
0
  bfd *const *abfd1 = a;
4101
0
  bfd *const *abfd2 = b;
4102
4103
0
  return filename_cmp (bfd_get_filename (*abfd1), bfd_get_filename (*abfd2));
4104
0
}
4105
4106
static unsigned int
4107
print_one_overlay_section (FILE *script,
4108
         unsigned int base,
4109
         unsigned int count,
4110
         unsigned int ovlynum,
4111
         unsigned int *ovly_map,
4112
         asection **ovly_sections,
4113
         struct bfd_link_info *info)
4114
0
{
4115
0
  unsigned int j;
4116
4117
0
  for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4118
0
    {
4119
0
      asection *sec = ovly_sections[2 * j];
4120
4121
0
      if (fprintf (script, "   %s%c%s (%s)\n",
4122
0
       (sec->owner->my_archive != NULL
4123
0
        ? bfd_get_filename (sec->owner->my_archive) : ""),
4124
0
       info->path_separator,
4125
0
       bfd_get_filename (sec->owner),
4126
0
       sec->name) <= 0)
4127
0
  return -1;
4128
0
      if (sec->segment_mark)
4129
0
  {
4130
0
    struct call_info *call = find_pasted_call (sec);
4131
0
    while (call != NULL)
4132
0
      {
4133
0
        struct function_info *call_fun = call->fun;
4134
0
        sec = call_fun->sec;
4135
0
        if (fprintf (script, "   %s%c%s (%s)\n",
4136
0
         (sec->owner->my_archive != NULL
4137
0
          ? bfd_get_filename (sec->owner->my_archive) : ""),
4138
0
         info->path_separator,
4139
0
         bfd_get_filename (sec->owner),
4140
0
         sec->name) <= 0)
4141
0
    return -1;
4142
0
        for (call = call_fun->call_list; call; call = call->next)
4143
0
    if (call->is_pasted)
4144
0
      break;
4145
0
      }
4146
0
  }
4147
0
    }
4148
4149
0
  for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4150
0
    {
4151
0
      asection *sec = ovly_sections[2 * j + 1];
4152
0
      if (sec != NULL
4153
0
    && fprintf (script, "   %s%c%s (%s)\n",
4154
0
          (sec->owner->my_archive != NULL
4155
0
           ? bfd_get_filename (sec->owner->my_archive) : ""),
4156
0
          info->path_separator,
4157
0
          bfd_get_filename (sec->owner),
4158
0
          sec->name) <= 0)
4159
0
  return -1;
4160
4161
0
      sec = ovly_sections[2 * j];
4162
0
      if (sec->segment_mark)
4163
0
  {
4164
0
    struct call_info *call = find_pasted_call (sec);
4165
0
    while (call != NULL)
4166
0
      {
4167
0
        struct function_info *call_fun = call->fun;
4168
0
        sec = call_fun->rodata;
4169
0
        if (sec != NULL
4170
0
      && fprintf (script, "   %s%c%s (%s)\n",
4171
0
            (sec->owner->my_archive != NULL
4172
0
             ? bfd_get_filename (sec->owner->my_archive) : ""),
4173
0
            info->path_separator,
4174
0
            bfd_get_filename (sec->owner),
4175
0
            sec->name) <= 0)
4176
0
    return -1;
4177
0
        for (call = call_fun->call_list; call; call = call->next)
4178
0
    if (call->is_pasted)
4179
0
      break;
4180
0
      }
4181
0
  }
4182
0
    }
4183
4184
0
  return j;
4185
0
}
4186
4187
/* Handle --auto-overlay.  */
4188
4189
static void
4190
spu_elf_auto_overlay (struct bfd_link_info *info)
4191
0
{
4192
0
  bfd *ibfd;
4193
0
  bfd **bfd_arr;
4194
0
  struct elf_segment_map *m;
4195
0
  unsigned int fixed_size, lo, hi;
4196
0
  unsigned int reserved;
4197
0
  struct spu_link_hash_table *htab;
4198
0
  unsigned int base, i, count, bfd_count;
4199
0
  unsigned int region, ovlynum;
4200
0
  asection **ovly_sections, **ovly_p;
4201
0
  unsigned int *ovly_map;
4202
0
  FILE *script;
4203
0
  unsigned int total_overlay_size, overlay_size;
4204
0
  const char *ovly_mgr_entry;
4205
0
  struct elf_link_hash_entry *h;
4206
0
  struct _mos_param mos_param;
4207
0
  struct _uos_param uos_param;
4208
0
  struct function_info dummy_caller;
4209
4210
  /* Find the extents of our loadable image.  */
4211
0
  lo = (unsigned int) -1;
4212
0
  hi = 0;
4213
0
  for (m = elf_seg_map (info->output_bfd); m != NULL; m = m->next)
4214
0
    if (m->p_type == PT_LOAD)
4215
0
      for (i = 0; i < m->count; i++)
4216
0
  if (m->sections[i]->size != 0)
4217
0
    {
4218
0
      if (m->sections[i]->vma < lo)
4219
0
        lo = m->sections[i]->vma;
4220
0
      if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4221
0
        hi = m->sections[i]->vma + m->sections[i]->size - 1;
4222
0
    }
4223
0
  fixed_size = hi + 1 - lo;
4224
4225
0
  if (!discover_functions (info))
4226
0
    goto err_exit;
4227
4228
0
  if (!build_call_tree (info))
4229
0
    goto err_exit;
4230
4231
0
  htab = spu_hash_table (info);
4232
0
  reserved = htab->params->auto_overlay_reserved;
4233
0
  if (reserved == 0)
4234
0
    {
4235
0
      struct _sum_stack_param sum_stack_param;
4236
4237
0
      sum_stack_param.emit_stack_syms = 0;
4238
0
      sum_stack_param.overall_stack = 0;
4239
0
      if (!for_each_node (sum_stack, info, &sum_stack_param, true))
4240
0
  goto err_exit;
4241
0
      reserved = (sum_stack_param.overall_stack
4242
0
      + htab->params->extra_stack_space);
4243
0
    }
4244
4245
  /* No need for overlays if everything already fits.  */
4246
0
  if (fixed_size + reserved <= htab->local_store
4247
0
      && htab->params->ovly_flavour != ovly_soft_icache)
4248
0
    {
4249
0
      htab->params->auto_overlay = 0;
4250
0
      return;
4251
0
    }
4252
4253
0
  uos_param.exclude_input_section = 0;
4254
0
  uos_param.exclude_output_section
4255
0
    = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4256
4257
0
  ovly_mgr_entry = "__ovly_load";
4258
0
  if (htab->params->ovly_flavour == ovly_soft_icache)
4259
0
    ovly_mgr_entry = "__icache_br_handler";
4260
0
  h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4261
0
          false, false, false);
4262
0
  if (h != NULL
4263
0
      && (h->root.type == bfd_link_hash_defined
4264
0
    || h->root.type == bfd_link_hash_defweak)
4265
0
      && h->def_regular)
4266
0
    {
4267
      /* We have a user supplied overlay manager.  */
4268
0
      uos_param.exclude_input_section = h->root.u.def.section;
4269
0
    }
4270
0
  else
4271
0
    {
4272
      /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4273
   builtin version to .text, and will adjust .text size.  */
4274
0
      fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4275
0
    }
4276
4277
  /* Mark overlay sections, and find max overlay section size.  */
4278
0
  mos_param.max_overlay_size = 0;
4279
0
  if (!for_each_node (mark_overlay_section, info, &mos_param, true))
4280
0
    goto err_exit;
4281
4282
  /* We can't put the overlay manager or interrupt routines in
4283
     overlays.  */
4284
0
  uos_param.clearing = 0;
4285
0
  if ((uos_param.exclude_input_section
4286
0
       || uos_param.exclude_output_section)
4287
0
      && !for_each_node (unmark_overlay_section, info, &uos_param, true))
4288
0
    goto err_exit;
4289
4290
0
  bfd_count = 0;
4291
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4292
0
    ++bfd_count;
4293
0
  bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4294
0
  if (bfd_arr == NULL)
4295
0
    goto err_exit;
4296
4297
  /* Count overlay sections, and subtract their sizes from "fixed_size".  */
4298
0
  count = 0;
4299
0
  bfd_count = 0;
4300
0
  total_overlay_size = 0;
4301
0
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4302
0
    {
4303
0
      extern const bfd_target spu_elf32_vec;
4304
0
      asection *sec;
4305
0
      unsigned int old_count;
4306
4307
0
      if (ibfd->xvec != &spu_elf32_vec)
4308
0
  continue;
4309
4310
0
      old_count = count;
4311
0
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4312
0
  if (sec->linker_mark)
4313
0
    {
4314
0
      if ((sec->flags & SEC_CODE) != 0)
4315
0
        count += 1;
4316
0
      fixed_size -= sec->size;
4317
0
      total_overlay_size += sec->size;
4318
0
    }
4319
0
  else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4320
0
     && sec->output_section->owner == info->output_bfd
4321
0
     && startswith (sec->output_section->name, ".ovl.init"))
4322
0
    fixed_size -= sec->size;
4323
0
      if (count != old_count)
4324
0
  bfd_arr[bfd_count++] = ibfd;
4325
0
    }
4326
4327
  /* Since the overlay link script selects sections by file name and
4328
     section name, ensure that file names are unique.  */
4329
0
  if (bfd_count > 1)
4330
0
    {
4331
0
      bool ok = true;
4332
4333
0
      qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4334
0
      for (i = 1; i < bfd_count; ++i)
4335
0
  if (filename_cmp (bfd_get_filename (bfd_arr[i - 1]),
4336
0
        bfd_get_filename (bfd_arr[i])) == 0)
4337
0
    {
4338
0
      if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4339
0
        {
4340
0
    if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4341
      /* xgettext:c-format */
4342
0
      info->callbacks->einfo (_("%s duplicated in %s\n"),
4343
0
            bfd_get_filename (bfd_arr[i]),
4344
0
            bfd_get_filename (bfd_arr[i]->my_archive));
4345
0
    else
4346
0
      info->callbacks->einfo (_("%s duplicated\n"),
4347
0
            bfd_get_filename (bfd_arr[i]));
4348
0
    ok = false;
4349
0
        }
4350
0
    }
4351
0
      if (!ok)
4352
0
  {
4353
0
    info->callbacks->einfo (_("sorry, no support for duplicate "
4354
0
            "object files in auto-overlay script\n"));
4355
0
    bfd_set_error (bfd_error_bad_value);
4356
0
    goto err_exit;
4357
0
  }
4358
0
    }
4359
0
  free (bfd_arr);
4360
4361
0
  fixed_size += reserved;
4362
0
  fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4363
0
  if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4364
0
    {
4365
0
      if (htab->params->ovly_flavour == ovly_soft_icache)
4366
0
  {
4367
    /* Stubs in the non-icache area are bigger.  */
4368
0
    fixed_size += htab->non_ovly_stub * 16;
4369
    /* Space for icache manager tables.
4370
       a) Tag array, one quadword per cache line.
4371
       - word 0: ia address of present line, init to zero.  */
4372
0
    fixed_size += 16 << htab->num_lines_log2;
4373
    /* b) Rewrite "to" list, one quadword per cache line.  */
4374
0
    fixed_size += 16 << htab->num_lines_log2;
4375
    /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4376
    to a power-of-two number of full quadwords) per cache line.  */
4377
0
    fixed_size += 16 << (htab->fromelem_size_log2
4378
0
             + htab->num_lines_log2);
4379
    /* d) Pointer to __ea backing store (toe), 1 quadword.  */
4380
0
    fixed_size += 16;
4381
0
  }
4382
0
      else
4383
0
  {
4384
    /* Guess number of overlays.  Assuming overlay buffer is on
4385
       average only half full should be conservative.  */
4386
0
    ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4387
0
         / (htab->local_store - fixed_size));
4388
    /* Space for _ovly_table[], _ovly_buf_table[] and toe.  */
4389
0
    fixed_size += ovlynum * 16 + 16 + 4 + 16;
4390
0
  }
4391
0
    }
4392
4393
0
  if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4394
    /* xgettext:c-format */
4395
0
    info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4396
0
            "size of 0x%v exceeds local store\n"),
4397
0
          (bfd_vma) fixed_size,
4398
0
          (bfd_vma) mos_param.max_overlay_size);
4399
4400
  /* Now see if we should put some functions in the non-overlay area.  */
4401
0
  else if (fixed_size < htab->params->auto_overlay_fixed)
4402
0
    {
4403
0
      unsigned int max_fixed, lib_size;
4404
4405
0
      max_fixed = htab->local_store - mos_param.max_overlay_size;
4406
0
      if (max_fixed > htab->params->auto_overlay_fixed)
4407
0
  max_fixed = htab->params->auto_overlay_fixed;
4408
0
      lib_size = max_fixed - fixed_size;
4409
0
      lib_size = auto_ovl_lib_functions (info, lib_size);
4410
0
      if (lib_size == (unsigned int) -1)
4411
0
  goto err_exit;
4412
0
      fixed_size = max_fixed - lib_size;
4413
0
    }
4414
4415
  /* Build an array of sections, suitably sorted to place into
4416
     overlays.  */
4417
0
  ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4418
0
  if (ovly_sections == NULL)
4419
0
    goto err_exit;
4420
0
  ovly_p = ovly_sections;
4421
0
  if (!for_each_node (collect_overlays, info, &ovly_p, true))
4422
0
    goto err_exit;
4423
0
  count = (size_t) (ovly_p - ovly_sections) / 2;
4424
0
  ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4425
0
  if (ovly_map == NULL)
4426
0
    goto err_exit;
4427
4428
0
  memset (&dummy_caller, 0, sizeof (dummy_caller));
4429
0
  overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4430
0
  if (htab->params->line_size != 0)
4431
0
    overlay_size = htab->params->line_size;
4432
0
  base = 0;
4433
0
  ovlynum = 0;
4434
0
  while (base < count)
4435
0
    {
4436
0
      unsigned int size = 0, rosize = 0, roalign = 0;
4437
4438
0
      for (i = base; i < count; i++)
4439
0
  {
4440
0
    asection *sec, *rosec;
4441
0
    unsigned int tmp, rotmp;
4442
0
    unsigned int num_stubs;
4443
0
    struct call_info *call, *pasty;
4444
0
    struct _spu_elf_section_data *sec_data;
4445
0
    struct spu_elf_stack_info *sinfo;
4446
0
    unsigned int k;
4447
4448
    /* See whether we can add this section to the current
4449
       overlay without overflowing our overlay buffer.  */
4450
0
    sec = ovly_sections[2 * i];
4451
0
    tmp = align_power (size, sec->alignment_power) + sec->size;
4452
0
    rotmp = rosize;
4453
0
    rosec = ovly_sections[2 * i + 1];
4454
0
    if (rosec != NULL)
4455
0
      {
4456
0
        rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4457
0
        if (roalign < rosec->alignment_power)
4458
0
    roalign = rosec->alignment_power;
4459
0
      }
4460
0
    if (align_power (tmp, roalign) + rotmp > overlay_size)
4461
0
      break;
4462
0
    if (sec->segment_mark)
4463
0
      {
4464
        /* Pasted sections must stay together, so add their
4465
     sizes too.  */
4466
0
        pasty = find_pasted_call (sec);
4467
0
        while (pasty != NULL)
4468
0
    {
4469
0
      struct function_info *call_fun = pasty->fun;
4470
0
      tmp = (align_power (tmp, call_fun->sec->alignment_power)
4471
0
       + call_fun->sec->size);
4472
0
      if (call_fun->rodata)
4473
0
        {
4474
0
          rotmp = (align_power (rotmp,
4475
0
              call_fun->rodata->alignment_power)
4476
0
             + call_fun->rodata->size);
4477
0
          if (roalign < rosec->alignment_power)
4478
0
      roalign = rosec->alignment_power;
4479
0
        }
4480
0
      for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4481
0
        if (pasty->is_pasted)
4482
0
          break;
4483
0
    }
4484
0
      }
4485
0
    if (align_power (tmp, roalign) + rotmp > overlay_size)
4486
0
      break;
4487
4488
    /* If we add this section, we might need new overlay call
4489
       stubs.  Add any overlay section calls to dummy_call.  */
4490
0
    pasty = NULL;
4491
0
    sec_data = spu_elf_section_data (sec);
4492
0
    sinfo = sec_data->u.i.stack_info;
4493
0
    for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
4494
0
      for (call = sinfo->fun[k].call_list; call; call = call->next)
4495
0
        if (call->is_pasted)
4496
0
    {
4497
0
      BFD_ASSERT (pasty == NULL);
4498
0
      pasty = call;
4499
0
    }
4500
0
        else if (call->fun->sec->linker_mark)
4501
0
    {
4502
0
      if (!copy_callee (&dummy_caller, call))
4503
0
        goto err_exit;
4504
0
    }
4505
0
    while (pasty != NULL)
4506
0
      {
4507
0
        struct function_info *call_fun = pasty->fun;
4508
0
        pasty = NULL;
4509
0
        for (call = call_fun->call_list; call; call = call->next)
4510
0
    if (call->is_pasted)
4511
0
      {
4512
0
        BFD_ASSERT (pasty == NULL);
4513
0
        pasty = call;
4514
0
      }
4515
0
    else if (!copy_callee (&dummy_caller, call))
4516
0
      goto err_exit;
4517
0
      }
4518
4519
    /* Calculate call stub size.  */
4520
0
    num_stubs = 0;
4521
0
    for (call = dummy_caller.call_list; call; call = call->next)
4522
0
      {
4523
0
        unsigned int stub_delta = 1;
4524
4525
0
        if (htab->params->ovly_flavour == ovly_soft_icache)
4526
0
    stub_delta = call->count;
4527
0
        num_stubs += stub_delta;
4528
4529
        /* If the call is within this overlay, we won't need a
4530
     stub.  */
4531
0
        for (k = base; k < i + 1; k++)
4532
0
    if (call->fun->sec == ovly_sections[2 * k])
4533
0
      {
4534
0
        num_stubs -= stub_delta;
4535
0
        break;
4536
0
      }
4537
0
      }
4538
0
    if (htab->params->ovly_flavour == ovly_soft_icache
4539
0
        && num_stubs > htab->params->max_branch)
4540
0
      break;
4541
0
    if (align_power (tmp, roalign) + rotmp
4542
0
        + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4543
0
      break;
4544
0
    size = tmp;
4545
0
    rosize = rotmp;
4546
0
  }
4547
4548
0
      if (i == base)
4549
0
  {
4550
    /* xgettext:c-format */
4551
0
    info->callbacks->einfo (_("%pB:%pA%s exceeds overlay size\n"),
4552
0
          ovly_sections[2 * i]->owner,
4553
0
          ovly_sections[2 * i],
4554
0
          ovly_sections[2 * i + 1] ? " + rodata" : "");
4555
0
    bfd_set_error (bfd_error_bad_value);
4556
0
    goto err_exit;
4557
0
  }
4558
4559
0
      while (dummy_caller.call_list != NULL)
4560
0
  {
4561
0
    struct call_info *call = dummy_caller.call_list;
4562
0
    dummy_caller.call_list = call->next;
4563
0
    free (call);
4564
0
  }
4565
4566
0
      ++ovlynum;
4567
0
      while (base < i)
4568
0
  ovly_map[base++] = ovlynum;
4569
0
    }
4570
4571
0
  script = htab->params->spu_elf_open_overlay_script ();
4572
4573
0
  if (htab->params->ovly_flavour == ovly_soft_icache)
4574
0
    {
4575
0
      if (fprintf (script, "SECTIONS\n{\n") <= 0)
4576
0
  goto file_err;
4577
4578
0
      if (fprintf (script,
4579
0
       " . = ALIGN (%u);\n"
4580
0
       " .ovl.init : { *(.ovl.init) }\n"
4581
0
       " . = ABSOLUTE (ADDR (.ovl.init));\n",
4582
0
       htab->params->line_size) <= 0)
4583
0
  goto file_err;
4584
4585
0
      base = 0;
4586
0
      ovlynum = 1;
4587
0
      while (base < count)
4588
0
  {
4589
0
    unsigned int indx = ovlynum - 1;
4590
0
    unsigned int vma, lma;
4591
4592
0
    vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4593
0
    lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4594
4595
0
    if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4596
0
             ": AT (LOADADDR (.ovl.init) + %u) {\n",
4597
0
           ovlynum, vma, lma) <= 0)
4598
0
      goto file_err;
4599
4600
0
    base = print_one_overlay_section (script, base, count, ovlynum,
4601
0
              ovly_map, ovly_sections, info);
4602
0
    if (base == (unsigned) -1)
4603
0
      goto file_err;
4604
4605
0
    if (fprintf (script, "  }\n") <= 0)
4606
0
      goto file_err;
4607
4608
0
    ovlynum++;
4609
0
  }
4610
4611
0
      if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4612
0
       1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4613
0
  goto file_err;
4614
4615
0
      if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4616
0
  goto file_err;
4617
0
    }
4618
0
  else
4619
0
    {
4620
0
      if (fprintf (script, "SECTIONS\n{\n") <= 0)
4621
0
  goto file_err;
4622
4623
0
      if (fprintf (script,
4624
0
       " . = ALIGN (16);\n"
4625
0
       " .ovl.init : { *(.ovl.init) }\n"
4626
0
       " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4627
0
  goto file_err;
4628
4629
0
      for (region = 1; region <= htab->params->num_lines; region++)
4630
0
  {
4631
0
    ovlynum = region;
4632
0
    base = 0;
4633
0
    while (base < count && ovly_map[base] < ovlynum)
4634
0
      base++;
4635
4636
0
    if (base == count)
4637
0
      break;
4638
4639
0
    if (region == 1)
4640
0
      {
4641
        /* We need to set lma since we are overlaying .ovl.init.  */
4642
0
        if (fprintf (script,
4643
0
         " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4644
0
    goto file_err;
4645
0
      }
4646
0
    else
4647
0
      {
4648
0
        if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4649
0
    goto file_err;
4650
0
      }
4651
4652
0
    while (base < count)
4653
0
      {
4654
0
        if (fprintf (script, "  .ovly%u {\n", ovlynum) <= 0)
4655
0
    goto file_err;
4656
4657
0
        base = print_one_overlay_section (script, base, count, ovlynum,
4658
0
            ovly_map, ovly_sections, info);
4659
0
        if (base == (unsigned) -1)
4660
0
    goto file_err;
4661
4662
0
        if (fprintf (script, "  }\n") <= 0)
4663
0
    goto file_err;
4664
4665
0
        ovlynum += htab->params->num_lines;
4666
0
        while (base < count && ovly_map[base] < ovlynum)
4667
0
    base++;
4668
0
      }
4669
4670
0
    if (fprintf (script, " }\n") <= 0)
4671
0
      goto file_err;
4672
0
  }
4673
4674
0
      if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4675
0
  goto file_err;
4676
0
    }
4677
4678
0
  free (ovly_map);
4679
0
  free (ovly_sections);
4680
4681
0
  if (fclose (script) != 0)
4682
0
    goto file_err;
4683
4684
0
  if (htab->params->auto_overlay & AUTO_RELINK)
4685
0
    (*htab->params->spu_elf_relink) ();
4686
4687
0
  xexit (0);
4688
4689
0
 file_err:
4690
0
  bfd_set_error (bfd_error_system_call);
4691
0
 err_exit:
4692
0
  info->callbacks->fatal (_("%P: auto overlay error: %E\n"));
4693
0
}
4694
4695
/* Provide an estimate of total stack required.  */
4696
4697
static bool
4698
spu_elf_stack_analysis (struct bfd_link_info *info)
4699
0
{
4700
0
  struct spu_link_hash_table *htab;
4701
0
  struct _sum_stack_param sum_stack_param;
4702
4703
0
  if (!discover_functions (info))
4704
0
    return false;
4705
4706
0
  if (!build_call_tree (info))
4707
0
    return false;
4708
4709
0
  htab = spu_hash_table (info);
4710
0
  if (htab->params->stack_analysis)
4711
0
    {
4712
0
      info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4713
0
      info->callbacks->minfo (_("\nStack size for functions.  "
4714
0
        "Annotations: '*' max stack, 't' tail call\n"));
4715
0
    }
4716
4717
0
  sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4718
0
  sum_stack_param.overall_stack = 0;
4719
0
  if (!for_each_node (sum_stack, info, &sum_stack_param, true))
4720
0
    return false;
4721
4722
0
  if (htab->params->stack_analysis)
4723
0
    info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4724
0
         (bfd_vma) sum_stack_param.overall_stack);
4725
0
  return true;
4726
0
}
4727
4728
/* Perform a final link.  */
4729
4730
static bool
4731
spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4732
0
{
4733
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
4734
4735
0
  if (htab->params->auto_overlay)
4736
0
    spu_elf_auto_overlay (info);
4737
4738
0
  if ((htab->params->stack_analysis
4739
0
       || (htab->params->ovly_flavour == ovly_soft_icache
4740
0
     && htab->params->lrlive_analysis))
4741
0
      && !spu_elf_stack_analysis (info))
4742
0
    info->callbacks->einfo (_("%X%P: stack/lrlive analysis error: %E\n"));
4743
4744
0
  if (!spu_elf_build_stubs (info))
4745
0
    info->callbacks->fatal (_("%P: can not build overlay stubs: %E\n"));
4746
4747
0
  return bfd_elf_final_link (output_bfd, info);
4748
0
}
4749
4750
/* Called when not normally emitting relocs, ie. !bfd_link_relocatable (info)
4751
   and !info->emitrelocations.  Returns a count of special relocs
4752
   that need to be emitted.  */
4753
4754
static unsigned int
4755
spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4756
0
{
4757
0
  Elf_Internal_Rela *relocs;
4758
0
  unsigned int count = 0;
4759
4760
0
  relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4761
0
              info->keep_memory);
4762
0
  if (relocs != NULL)
4763
0
    {
4764
0
      Elf_Internal_Rela *rel;
4765
0
      Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4766
4767
0
      for (rel = relocs; rel < relend; rel++)
4768
0
  {
4769
0
    int r_type = ELF32_R_TYPE (rel->r_info);
4770
0
    if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4771
0
      ++count;
4772
0
  }
4773
4774
0
      if (elf_section_data (sec)->relocs != relocs)
4775
0
  free (relocs);
4776
0
    }
4777
4778
0
  return count;
4779
0
}
4780
4781
/* Functions for adding fixup records to .fixup */
4782
4783
0
#define FIXUP_RECORD_SIZE 4
4784
4785
#define FIXUP_PUT(output_bfd,htab,index,addr) \
4786
0
    bfd_put_32 (output_bfd, addr, \
4787
0
          htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4788
#define FIXUP_GET(output_bfd,htab,index) \
4789
0
    bfd_get_32 (output_bfd, \
4790
0
          htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4791
4792
/* Store OFFSET in .fixup.  This assumes it will be called with an
4793
   increasing OFFSET.  When this OFFSET fits with the last base offset,
4794
   it just sets a bit, otherwise it adds a new fixup record.  */
4795
static void
4796
spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4797
        bfd_vma offset)
4798
0
{
4799
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
4800
0
  asection *sfixup = htab->sfixup;
4801
0
  bfd_vma qaddr = offset & ~(bfd_vma) 15;
4802
0
  bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4803
0
  if (sfixup->reloc_count == 0)
4804
0
    {
4805
0
      FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4806
0
      sfixup->reloc_count++;
4807
0
    }
4808
0
  else
4809
0
    {
4810
0
      bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4811
0
      if (qaddr != (base & ~(bfd_vma) 15))
4812
0
  {
4813
0
    if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4814
0
      _bfd_error_handler (_("fatal error while creating .fixup"));
4815
0
    FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4816
0
    sfixup->reloc_count++;
4817
0
  }
4818
0
      else
4819
0
  FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4820
0
    }
4821
0
}
4822
4823
/* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD.  */
4824
4825
static int
4826
spu_elf_relocate_section (bfd *output_bfd,
4827
        struct bfd_link_info *info,
4828
        bfd *input_bfd,
4829
        asection *input_section,
4830
        bfd_byte *contents,
4831
        Elf_Internal_Rela *relocs,
4832
        Elf_Internal_Sym *local_syms,
4833
        asection **local_sections)
4834
0
{
4835
0
  Elf_Internal_Shdr *symtab_hdr;
4836
0
  struct elf_link_hash_entry **sym_hashes;
4837
0
  Elf_Internal_Rela *rel, *relend;
4838
0
  struct spu_link_hash_table *htab;
4839
0
  asection *ea;
4840
0
  int ret = true;
4841
0
  bool emit_these_relocs = false;
4842
0
  bool is_ea_sym;
4843
0
  bool stubs;
4844
0
  unsigned int iovl = 0;
4845
4846
0
  htab = spu_hash_table (info);
4847
0
  stubs = (htab->stub_sec != NULL
4848
0
     && maybe_needs_stubs (input_section));
4849
0
  iovl = overlay_index (input_section);
4850
0
  ea = bfd_get_section_by_name (output_bfd, "._ea");
4851
0
  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4852
0
  sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4853
4854
0
  rel = relocs;
4855
0
  relend = relocs + input_section->reloc_count;
4856
0
  for (; rel < relend; rel++)
4857
0
    {
4858
0
      int r_type;
4859
0
      reloc_howto_type *howto;
4860
0
      unsigned int r_symndx;
4861
0
      Elf_Internal_Sym *sym;
4862
0
      asection *sec;
4863
0
      struct elf_link_hash_entry *h;
4864
0
      const char *sym_name;
4865
0
      bfd_vma relocation;
4866
0
      bfd_vma addend;
4867
0
      bfd_reloc_status_type r;
4868
0
      bool unresolved_reloc;
4869
0
      enum _stub_type stub_type;
4870
4871
0
      r_symndx = ELF32_R_SYM (rel->r_info);
4872
0
      r_type = ELF32_R_TYPE (rel->r_info);
4873
0
      howto = elf_howto_table + r_type;
4874
0
      unresolved_reloc = false;
4875
0
      h = NULL;
4876
0
      sym = NULL;
4877
0
      sec = NULL;
4878
0
      if (r_symndx < symtab_hdr->sh_info)
4879
0
  {
4880
0
    sym = local_syms + r_symndx;
4881
0
    sec = local_sections[r_symndx];
4882
0
    sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4883
0
    relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4884
0
  }
4885
0
      else
4886
0
  {
4887
0
    if (sym_hashes == NULL)
4888
0
      return false;
4889
4890
0
    h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4891
4892
0
    if (info->wrap_hash != NULL
4893
0
        && (input_section->flags & SEC_DEBUGGING) != 0)
4894
0
      h = ((struct elf_link_hash_entry *)
4895
0
     unwrap_hash_lookup (info, input_bfd, &h->root));
4896
4897
0
    while (h->root.type == bfd_link_hash_indirect
4898
0
     || h->root.type == bfd_link_hash_warning)
4899
0
      h = (struct elf_link_hash_entry *) h->root.u.i.link;
4900
4901
0
    relocation = 0;
4902
0
    if (h->root.type == bfd_link_hash_defined
4903
0
        || h->root.type == bfd_link_hash_defweak)
4904
0
      {
4905
0
        sec = h->root.u.def.section;
4906
0
        if (sec == NULL
4907
0
      || sec->output_section == NULL)
4908
    /* Set a flag that will be cleared later if we find a
4909
       relocation value for this symbol.  output_section
4910
       is typically NULL for symbols satisfied by a shared
4911
       library.  */
4912
0
    unresolved_reloc = true;
4913
0
        else
4914
0
    relocation = (h->root.u.def.value
4915
0
            + sec->output_section->vma
4916
0
            + sec->output_offset);
4917
0
      }
4918
0
    else if (h->root.type == bfd_link_hash_undefweak)
4919
0
      ;
4920
0
    else if (info->unresolved_syms_in_objects == RM_IGNORE
4921
0
       && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4922
0
      ;
4923
0
    else if (!bfd_link_relocatable (info)
4924
0
       && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4925
0
      {
4926
0
        bool err;
4927
4928
0
        err = (info->unresolved_syms_in_objects == RM_DIAGNOSE
4929
0
         && !info->warn_unresolved_syms)
4930
0
    || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT;
4931
4932
0
        info->callbacks->undefined_symbol
4933
0
    (info, h->root.root.string, input_bfd,
4934
0
     input_section, rel->r_offset, err);
4935
0
      }
4936
0
    sym_name = h->root.root.string;
4937
0
  }
4938
4939
0
      if (sec != NULL && discarded_section (sec))
4940
0
  RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4941
0
           rel, 1, relend, howto, 0, contents);
4942
4943
0
      if (bfd_link_relocatable (info))
4944
0
  continue;
4945
4946
      /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4947
0
      if (r_type == R_SPU_ADD_PIC
4948
0
    && h != NULL
4949
0
    && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4950
0
  {
4951
0
    bfd_byte *loc = contents + rel->r_offset;
4952
0
    loc[0] = 0x1c;
4953
0
    loc[1] = 0x00;
4954
0
    loc[2] &= 0x3f;
4955
0
  }
4956
4957
0
      is_ea_sym = (ea != NULL
4958
0
       && sec != NULL
4959
0
       && sec->output_section == ea);
4960
4961
      /* If this symbol is in an overlay area, we may need to relocate
4962
   to the overlay stub.  */
4963
0
      addend = rel->r_addend;
4964
0
      if (stubs
4965
0
    && !is_ea_sym
4966
0
    && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4967
0
            contents, info)) != no_stub)
4968
0
  {
4969
0
    unsigned int ovl = 0;
4970
0
    struct got_entry *g, **head;
4971
4972
0
    if (stub_type != nonovl_stub)
4973
0
      ovl = iovl;
4974
4975
0
    if (h != NULL)
4976
0
      head = &h->got.glist;
4977
0
    else
4978
0
      head = elf_local_got_ents (input_bfd) + r_symndx;
4979
4980
0
    for (g = *head; g != NULL; g = g->next)
4981
0
      if (htab->params->ovly_flavour == ovly_soft_icache
4982
0
    ? (g->ovl == ovl
4983
0
       && g->br_addr == (rel->r_offset
4984
0
             + input_section->output_offset
4985
0
             + input_section->output_section->vma))
4986
0
    : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4987
0
        break;
4988
0
    if (g == NULL)
4989
0
      abort ();
4990
4991
0
    relocation = g->stub_addr;
4992
0
    addend = 0;
4993
0
  }
4994
0
      else
4995
0
  {
4996
    /* For soft icache, encode the overlay index into addresses.  */
4997
0
    if (htab->params->ovly_flavour == ovly_soft_icache
4998
0
        && (r_type == R_SPU_ADDR16_HI
4999
0
      || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
5000
0
        && !is_ea_sym)
5001
0
      {
5002
0
        unsigned int ovl = overlay_index (sec);
5003
0
        if (ovl != 0)
5004
0
    {
5005
0
      unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
5006
0
      relocation += set_id << 18;
5007
0
    }
5008
0
      }
5009
0
  }
5010
5011
0
      if (htab->params->emit_fixups && !bfd_link_relocatable (info)
5012
0
    && (input_section->flags & SEC_ALLOC) != 0
5013
0
    && r_type == R_SPU_ADDR32)
5014
0
  {
5015
0
    bfd_vma offset;
5016
0
    offset = rel->r_offset + input_section->output_section->vma
5017
0
       + input_section->output_offset;
5018
0
    spu_elf_emit_fixup (output_bfd, info, offset);
5019
0
  }
5020
5021
0
      if (unresolved_reloc)
5022
0
  ;
5023
0
      else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5024
0
  {
5025
0
    if (is_ea_sym)
5026
0
      {
5027
        /* ._ea is a special section that isn't allocated in SPU
5028
     memory, but rather occupies space in PPU memory as
5029
     part of an embedded ELF image.  If this reloc is
5030
     against a symbol defined in ._ea, then transform the
5031
     reloc into an equivalent one without a symbol
5032
     relative to the start of the ELF image.  */
5033
0
        rel->r_addend += (relocation
5034
0
        - ea->vma
5035
0
        + elf_section_data (ea)->this_hdr.sh_offset);
5036
0
        rel->r_info = ELF32_R_INFO (0, r_type);
5037
0
      }
5038
0
    emit_these_relocs = true;
5039
0
    continue;
5040
0
  }
5041
0
      else if (is_ea_sym)
5042
0
  unresolved_reloc = true;
5043
5044
0
      if (unresolved_reloc
5045
0
    && _bfd_elf_section_offset (output_bfd, info, input_section,
5046
0
              rel->r_offset) != (bfd_vma) -1)
5047
0
  {
5048
0
    _bfd_error_handler
5049
      /* xgettext:c-format */
5050
0
      (_("%pB(%s+%#" PRIx64 "): "
5051
0
         "unresolvable %s relocation against symbol `%s'"),
5052
0
       input_bfd,
5053
0
       bfd_section_name (input_section),
5054
0
       (uint64_t) rel->r_offset,
5055
0
       howto->name,
5056
0
       sym_name);
5057
0
    ret = false;
5058
0
  }
5059
5060
0
      r = _bfd_final_link_relocate (howto,
5061
0
            input_bfd,
5062
0
            input_section,
5063
0
            contents,
5064
0
            rel->r_offset, relocation, addend);
5065
5066
0
      if (r != bfd_reloc_ok)
5067
0
  {
5068
0
    const char *msg = (const char *) 0;
5069
5070
0
    switch (r)
5071
0
      {
5072
0
      case bfd_reloc_overflow:
5073
0
        (*info->callbacks->reloc_overflow)
5074
0
    (info, (h ? &h->root : NULL), sym_name, howto->name,
5075
0
     (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
5076
0
        break;
5077
5078
0
      case bfd_reloc_undefined:
5079
0
        (*info->callbacks->undefined_symbol)
5080
0
    (info, sym_name, input_bfd, input_section, rel->r_offset, true);
5081
0
        break;
5082
5083
0
      case bfd_reloc_outofrange:
5084
0
        msg = _("internal error: out of range error");
5085
0
        goto common_error;
5086
5087
0
      case bfd_reloc_notsupported:
5088
0
        msg = _("internal error: unsupported relocation error");
5089
0
        goto common_error;
5090
5091
0
      case bfd_reloc_dangerous:
5092
0
        msg = _("internal error: dangerous error");
5093
0
        goto common_error;
5094
5095
0
      default:
5096
0
        msg = _("internal error: unknown error");
5097
        /* fall through */
5098
5099
0
      common_error:
5100
0
        ret = false;
5101
0
        (*info->callbacks->warning) (info, msg, sym_name, input_bfd,
5102
0
             input_section, rel->r_offset);
5103
0
        break;
5104
0
      }
5105
0
  }
5106
0
    }
5107
5108
0
  if (ret
5109
0
      && emit_these_relocs
5110
0
      && !info->emitrelocations)
5111
0
    {
5112
0
      Elf_Internal_Rela *wrel;
5113
0
      Elf_Internal_Shdr *rel_hdr;
5114
5115
0
      wrel = rel = relocs;
5116
0
      relend = relocs + input_section->reloc_count;
5117
0
      for (; rel < relend; rel++)
5118
0
  {
5119
0
    int r_type;
5120
5121
0
    r_type = ELF32_R_TYPE (rel->r_info);
5122
0
    if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5123
0
      *wrel++ = *rel;
5124
0
  }
5125
0
      input_section->reloc_count = wrel - relocs;
5126
      /* Backflips for _bfd_elf_link_output_relocs.  */
5127
0
      rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5128
0
      rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5129
0
      ret = 2;
5130
0
    }
5131
5132
0
  return ret;
5133
0
}
5134
5135
static bool
5136
spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
5137
         struct bfd_link_info *info ATTRIBUTE_UNUSED)
5138
0
{
5139
0
  return true;
5140
0
}
5141
5142
/* Adjust _SPUEAR_ syms to point at their overlay stubs.  */
5143
5144
static int
5145
spu_elf_output_symbol_hook (struct bfd_link_info *info,
5146
          const char *sym_name ATTRIBUTE_UNUSED,
5147
          Elf_Internal_Sym *sym,
5148
          asection *sym_sec ATTRIBUTE_UNUSED,
5149
          struct elf_link_hash_entry *h)
5150
0
{
5151
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
5152
5153
0
  if (!bfd_link_relocatable (info)
5154
0
      && htab->stub_sec != NULL
5155
0
      && h != NULL
5156
0
      && (h->root.type == bfd_link_hash_defined
5157
0
    || h->root.type == bfd_link_hash_defweak)
5158
0
      && h->def_regular
5159
0
      && startswith (h->root.root.string, "_SPUEAR_"))
5160
0
    {
5161
0
      struct got_entry *g;
5162
5163
0
      for (g = h->got.glist; g != NULL; g = g->next)
5164
0
  if (htab->params->ovly_flavour == ovly_soft_icache
5165
0
      ? g->br_addr == g->stub_addr
5166
0
      : g->addend == 0 && g->ovl == 0)
5167
0
    {
5168
0
      sym->st_shndx = (_bfd_elf_section_from_bfd_section
5169
0
           (htab->stub_sec[0]->output_section->owner,
5170
0
            htab->stub_sec[0]->output_section));
5171
0
      sym->st_value = g->stub_addr;
5172
0
      break;
5173
0
    }
5174
0
    }
5175
5176
0
  return 1;
5177
0
}
5178
5179
static int spu_plugin = 0;
5180
5181
void
5182
spu_elf_plugin (int val)
5183
0
{
5184
0
  spu_plugin = val;
5185
0
}
5186
5187
/* Set ELF header e_type for plugins.  */
5188
5189
static bool
5190
spu_elf_init_file_header (bfd *abfd, struct bfd_link_info *info)
5191
0
{
5192
0
  if (!_bfd_elf_init_file_header (abfd, info))
5193
0
    return false;
5194
5195
0
  if (spu_plugin)
5196
0
    {
5197
0
      Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5198
5199
0
      i_ehdrp->e_type = ET_DYN;
5200
0
    }
5201
0
  return true;
5202
0
}
5203
5204
/* We may add an extra PT_LOAD segment for .toe.  We also need extra
5205
   segments for overlays.  */
5206
5207
static int
5208
spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5209
0
{
5210
0
  int extra = 0;
5211
0
  asection *sec;
5212
5213
0
  if (info != NULL)
5214
0
    {
5215
0
      struct spu_link_hash_table *htab = spu_hash_table (info);
5216
0
      extra = htab->num_overlays;
5217
0
    }
5218
5219
0
  if (extra)
5220
0
    ++extra;
5221
5222
0
  sec = bfd_get_section_by_name (abfd, ".toe");
5223
0
  if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5224
0
    ++extra;
5225
5226
0
  return extra;
5227
0
}
5228
5229
/* Remove .toe section from other PT_LOAD segments and put it in
5230
   a segment of its own.  Put overlays in separate segments too.  */
5231
5232
static bool
5233
spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5234
0
{
5235
0
  asection *toe, *s;
5236
0
  struct elf_segment_map *m, *m_overlay;
5237
0
  struct elf_segment_map **p, **p_overlay, **first_load;
5238
0
  unsigned int i;
5239
5240
0
  if (info == NULL)
5241
0
    return true;
5242
5243
0
  toe = bfd_get_section_by_name (abfd, ".toe");
5244
0
  for (m = elf_seg_map (abfd); m != NULL; m = m->next)
5245
0
    if (m->p_type == PT_LOAD && m->count > 1)
5246
0
      for (i = 0; i < m->count; i++)
5247
0
  if ((s = m->sections[i]) == toe
5248
0
      || spu_elf_section_data (s)->u.o.ovl_index != 0)
5249
0
    {
5250
0
      struct elf_segment_map *m2;
5251
0
      bfd_vma amt;
5252
5253
0
      if (i + 1 < m->count)
5254
0
        {
5255
0
    amt = sizeof (struct elf_segment_map);
5256
0
    amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5257
0
    m2 = bfd_zalloc (abfd, amt);
5258
0
    if (m2 == NULL)
5259
0
      return false;
5260
0
    m2->count = m->count - (i + 1);
5261
0
    memcpy (m2->sections, m->sections + i + 1,
5262
0
      m2->count * sizeof (m->sections[0]));
5263
0
    m2->p_type = PT_LOAD;
5264
0
    m2->next = m->next;
5265
0
    m->next = m2;
5266
0
        }
5267
0
      m->count = 1;
5268
0
      if (i != 0)
5269
0
        {
5270
0
    m->count = i;
5271
0
    amt = sizeof (struct elf_segment_map);
5272
0
    m2 = bfd_zalloc (abfd, amt);
5273
0
    if (m2 == NULL)
5274
0
      return false;
5275
0
    m2->p_type = PT_LOAD;
5276
0
    m2->count = 1;
5277
0
    m2->sections[0] = s;
5278
0
    m2->next = m->next;
5279
0
    m->next = m2;
5280
0
        }
5281
0
      break;
5282
0
    }
5283
5284
5285
  /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5286
     PT_LOAD segments.  This can cause the .ovl.init section to be
5287
     overwritten with the contents of some overlay segment.  To work
5288
     around this issue, we ensure that all PF_OVERLAY segments are
5289
     sorted first amongst the program headers; this ensures that even
5290
     with a broken loader, the .ovl.init section (which is not marked
5291
     as PF_OVERLAY) will be placed into SPU local store on startup.  */
5292
5293
  /* Move all overlay segments onto a separate list.  */
5294
0
  p = &elf_seg_map (abfd);
5295
0
  p_overlay = &m_overlay;
5296
0
  m_overlay = NULL;
5297
0
  first_load = NULL;
5298
0
  while (*p != NULL)
5299
0
    {
5300
0
      if ((*p)->p_type == PT_LOAD)
5301
0
  {
5302
0
    if (!first_load)
5303
0
      first_load = p;
5304
0
    if ((*p)->count == 1
5305
0
        && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5306
0
      {
5307
0
        m = *p;
5308
0
        m->no_sort_lma = 1;
5309
0
        *p = m->next;
5310
0
        *p_overlay = m;
5311
0
        p_overlay = &m->next;
5312
0
        continue;
5313
0
      }
5314
0
  }
5315
0
      p = &((*p)->next);
5316
0
    }
5317
5318
  /* Re-insert overlay segments at the head of the segment map.  */
5319
0
  if (m_overlay != NULL)
5320
0
    {
5321
0
      p = first_load;
5322
0
      if (*p != NULL && (*p)->p_type == PT_LOAD && (*p)->includes_filehdr)
5323
  /* It doesn't really make sense for someone to include the ELF
5324
     file header into an spu image, but if they do the code that
5325
     assigns p_offset needs to see the segment containing the
5326
     header first.  */
5327
0
  p = &(*p)->next;
5328
0
      *p_overlay = *p;
5329
0
      *p = m_overlay;
5330
0
    }
5331
5332
0
  return true;
5333
0
}
5334
5335
/* Tweak the section type of .note.spu_name.  */
5336
5337
static bool
5338
spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5339
           Elf_Internal_Shdr *hdr,
5340
           asection *sec)
5341
0
{
5342
0
  if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5343
0
    hdr->sh_type = SHT_NOTE;
5344
0
  return true;
5345
0
}
5346
5347
/* Tweak phdrs before writing them out.  */
5348
5349
static bool
5350
spu_elf_modify_headers (bfd *abfd, struct bfd_link_info *info)
5351
0
{
5352
0
  if (info != NULL)
5353
0
    {
5354
0
      const struct elf_backend_data *bed;
5355
0
      struct elf_obj_tdata *tdata;
5356
0
      Elf_Internal_Phdr *phdr, *last;
5357
0
      struct spu_link_hash_table *htab;
5358
0
      unsigned int count;
5359
0
      unsigned int i;
5360
5361
0
      bed = get_elf_backend_data (abfd);
5362
0
      tdata = elf_tdata (abfd);
5363
0
      phdr = tdata->phdr;
5364
0
      count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
5365
0
      htab = spu_hash_table (info);
5366
0
      if (htab->num_overlays != 0)
5367
0
  {
5368
0
    struct elf_segment_map *m;
5369
0
    unsigned int o;
5370
5371
0
    for (i = 0, m = elf_seg_map (abfd); m; ++i, m = m->next)
5372
0
      if (m->count != 0
5373
0
    && ((o = spu_elf_section_data (m->sections[0])->u.o.ovl_index)
5374
0
        != 0))
5375
0
        {
5376
    /* Mark this as an overlay header.  */
5377
0
    phdr[i].p_flags |= PF_OVERLAY;
5378
5379
0
    if (htab->ovtab != NULL && htab->ovtab->size != 0
5380
0
        && htab->params->ovly_flavour != ovly_soft_icache)
5381
0
      {
5382
0
        bfd_byte *p = htab->ovtab->contents;
5383
0
        unsigned int off = o * 16 + 8;
5384
5385
        /* Write file_off into _ovly_table.  */
5386
0
        bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5387
0
      }
5388
0
        }
5389
    /* Soft-icache has its file offset put in .ovl.init.  */
5390
0
    if (htab->init != NULL && htab->init->size != 0)
5391
0
      {
5392
0
        bfd_vma val
5393
0
    = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5394
5395
0
        bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5396
0
      }
5397
0
  }
5398
5399
      /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5400
   of 16.  This should always be possible when using the standard
5401
   linker scripts, but don't create overlapping segments if
5402
   someone is playing games with linker scripts.  */
5403
0
      last = NULL;
5404
0
      for (i = count; i-- != 0; )
5405
0
  if (phdr[i].p_type == PT_LOAD)
5406
0
    {
5407
0
      unsigned adjust;
5408
5409
0
      adjust = -phdr[i].p_filesz & 15;
5410
0
      if (adjust != 0
5411
0
    && last != NULL
5412
0
    && (phdr[i].p_offset + phdr[i].p_filesz
5413
0
        > last->p_offset - adjust))
5414
0
        break;
5415
5416
0
      adjust = -phdr[i].p_memsz & 15;
5417
0
      if (adjust != 0
5418
0
    && last != NULL
5419
0
    && phdr[i].p_filesz != 0
5420
0
    && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5421
0
    && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5422
0
        break;
5423
5424
0
      if (phdr[i].p_filesz != 0)
5425
0
        last = &phdr[i];
5426
0
    }
5427
5428
0
      if (i == (unsigned int) -1)
5429
0
  for (i = count; i-- != 0; )
5430
0
    if (phdr[i].p_type == PT_LOAD)
5431
0
      {
5432
0
        unsigned adjust;
5433
5434
0
        adjust = -phdr[i].p_filesz & 15;
5435
0
        phdr[i].p_filesz += adjust;
5436
5437
0
        adjust = -phdr[i].p_memsz & 15;
5438
0
        phdr[i].p_memsz += adjust;
5439
0
      }
5440
0
    }
5441
5442
0
  return _bfd_elf_modify_headers (abfd, info);
5443
0
}
5444
5445
bool
5446
spu_elf_size_sections (bfd *obfd ATTRIBUTE_UNUSED, struct bfd_link_info *info)
5447
0
{
5448
0
  struct spu_link_hash_table *htab = spu_hash_table (info);
5449
0
  if (htab->params->emit_fixups)
5450
0
    {
5451
0
      asection *sfixup = htab->sfixup;
5452
0
      int fixup_count = 0;
5453
0
      bfd *ibfd;
5454
0
      size_t size;
5455
5456
0
      for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
5457
0
  {
5458
0
    asection *isec;
5459
5460
0
    if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5461
0
      continue;
5462
5463
    /* Walk over each section attached to the input bfd.  */
5464
0
    for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5465
0
      {
5466
0
        Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5467
0
        bfd_vma base_end;
5468
5469
        /* If there aren't any relocs, then there's nothing more
5470
     to do.  */
5471
0
        if ((isec->flags & SEC_ALLOC) == 0
5472
0
      || (isec->flags & SEC_RELOC) == 0
5473
0
      || isec->reloc_count == 0)
5474
0
    continue;
5475
5476
        /* Get the relocs.  */
5477
0
        internal_relocs =
5478
0
    _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5479
0
             info->keep_memory);
5480
0
        if (internal_relocs == NULL)
5481
0
    return false;
5482
5483
        /* 1 quadword can contain up to 4 R_SPU_ADDR32
5484
     relocations.  They are stored in a single word by
5485
     saving the upper 28 bits of the address and setting the
5486
     lower 4 bits to a bit mask of the words that have the
5487
     relocation.  BASE_END keeps track of the next quadword. */
5488
0
        irela = internal_relocs;
5489
0
        irelaend = irela + isec->reloc_count;
5490
0
        base_end = 0;
5491
0
        for (; irela < irelaend; irela++)
5492
0
    if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5493
0
        && irela->r_offset >= base_end)
5494
0
      {
5495
0
        base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5496
0
        fixup_count++;
5497
0
      }
5498
0
      }
5499
0
  }
5500
5501
      /* We always have a NULL fixup as a sentinel */
5502
0
      size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5503
0
      if (!bfd_set_section_size (sfixup, size))
5504
0
  return false;
5505
0
      sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5506
0
      if (sfixup->contents == NULL)
5507
0
  return false;
5508
0
      sfixup->alloced = 1;
5509
0
    }
5510
0
  return true;
5511
0
}
5512
5513
#define TARGET_BIG_SYM    spu_elf32_vec
5514
#define TARGET_BIG_NAME   "elf32-spu"
5515
#define ELF_ARCH    bfd_arch_spu
5516
#define ELF_TARGET_ID   SPU_ELF_DATA
5517
#define ELF_MACHINE_CODE  EM_SPU
5518
/* This matches the alignment need for DMA.  */
5519
#define ELF_MAXPAGESIZE   0x80
5520
#define elf_backend_rela_normal   1
5521
#define elf_backend_can_gc_sections 1
5522
5523
#define bfd_elf32_bfd_reloc_type_lookup   spu_elf_reloc_type_lookup
5524
#define bfd_elf32_bfd_reloc_name_lookup   spu_elf_reloc_name_lookup
5525
#define elf_info_to_howto     spu_elf_info_to_howto
5526
#define elf_backend_count_relocs    spu_elf_count_relocs
5527
#define elf_backend_relocate_section    spu_elf_relocate_section
5528
#define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
5529
#define elf_backend_symbol_processing   spu_elf_backend_symbol_processing
5530
#define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5531
#define elf_backend_object_p      spu_elf_object_p
5532
#define bfd_elf32_new_section_hook    spu_elf_new_section_hook
5533
#define bfd_elf32_bfd_link_hash_table_create  spu_elf_link_hash_table_create
5534
5535
#define elf_backend_additional_program_headers  spu_elf_additional_program_headers
5536
#define elf_backend_modify_segment_map    spu_elf_modify_segment_map
5537
#define elf_backend_modify_headers    spu_elf_modify_headers
5538
#define elf_backend_init_file_header    spu_elf_init_file_header
5539
#define elf_backend_fake_sections   spu_elf_fake_sections
5540
#define elf_backend_special_sections    spu_elf_special_sections
5541
#define bfd_elf32_bfd_final_link    spu_elf_final_link
5542
5543
#include "elf32-target.h"