/src/elfutils/libdw/libdw_alloc.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* Memory handling for libdw. |
2 | | Copyright (C) 2003, 2004, 2006 Red Hat, Inc. |
3 | | This file is part of elfutils. |
4 | | Written by Ulrich Drepper <drepper@redhat.com>, 2003. |
5 | | |
6 | | This file is free software; you can redistribute it and/or modify |
7 | | it under the terms of either |
8 | | |
9 | | * the GNU Lesser General Public License as published by the Free |
10 | | Software Foundation; either version 3 of the License, or (at |
11 | | your option) any later version |
12 | | |
13 | | or |
14 | | |
15 | | * the GNU General Public License as published by the Free |
16 | | Software Foundation; either version 2 of the License, or (at |
17 | | your option) any later version |
18 | | |
19 | | or both in parallel, as here. |
20 | | |
21 | | elfutils is distributed in the hope that it will be useful, but |
22 | | WITHOUT ANY WARRANTY; without even the implied warranty of |
23 | | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
24 | | General Public License for more details. |
25 | | |
26 | | You should have received copies of the GNU General Public License and |
27 | | the GNU Lesser General Public License along with this program. If |
28 | | not, see <http://www.gnu.org/licenses/>. */ |
29 | | |
30 | | #ifdef HAVE_CONFIG_H |
31 | | # include <config.h> |
32 | | #endif |
33 | | |
34 | | #include <errno.h> |
35 | | #include <stdlib.h> |
36 | | #include "libdwP.h" |
37 | | #include "system.h" |
38 | | #include <stdatomic.h> |
39 | | #if USE_VG_ANNOTATIONS == 1 |
40 | | #include <helgrind.h> |
41 | | #else |
42 | | #define ANNOTATE_HAPPENS_BEFORE(X) |
43 | | #define ANNOTATE_HAPPENS_AFTER(X) |
44 | | #endif |
45 | | |
46 | 0 | #define THREAD_ID_UNSET ((size_t) -1) |
47 | | static __thread size_t thread_id = THREAD_ID_UNSET; |
48 | | static atomic_size_t next_id = 0; |
49 | | |
50 | | struct libdw_memblock * |
51 | | __libdw_alloc_tail (Dwarf *dbg) |
52 | 0 | { |
53 | 0 | if (thread_id == THREAD_ID_UNSET) |
54 | 0 | thread_id = atomic_fetch_add (&next_id, 1); |
55 | |
|
56 | 0 | pthread_rwlock_rdlock (&dbg->mem_rwl); |
57 | 0 | if (thread_id >= dbg->mem_stacks) |
58 | 0 | { |
59 | 0 | pthread_rwlock_unlock (&dbg->mem_rwl); |
60 | 0 | pthread_rwlock_wrlock (&dbg->mem_rwl); |
61 | | |
62 | | /* Another thread may have already reallocated. In theory using an |
63 | | atomic would be faster, but given that this only happens once per |
64 | | thread per Dwarf, some minor slowdown should be fine. */ |
65 | 0 | if (thread_id >= dbg->mem_stacks) |
66 | 0 | { |
67 | 0 | dbg->mem_tails = realloc (dbg->mem_tails, (thread_id+1) |
68 | 0 | * sizeof (struct libdw_memblock *)); |
69 | 0 | if (dbg->mem_tails == NULL) |
70 | 0 | { |
71 | 0 | pthread_rwlock_unlock (&dbg->mem_rwl); |
72 | 0 | dbg->oom_handler(); |
73 | 0 | } |
74 | 0 | for (size_t i = dbg->mem_stacks; i <= thread_id; i++) |
75 | 0 | dbg->mem_tails[i] = NULL; |
76 | 0 | dbg->mem_stacks = thread_id + 1; |
77 | 0 | ANNOTATE_HAPPENS_BEFORE (&dbg->mem_tails); |
78 | 0 | } |
79 | | |
80 | 0 | pthread_rwlock_unlock (&dbg->mem_rwl); |
81 | 0 | pthread_rwlock_rdlock (&dbg->mem_rwl); |
82 | 0 | } |
83 | | |
84 | | /* At this point, we have an entry in the tail array. */ |
85 | 0 | ANNOTATE_HAPPENS_AFTER (&dbg->mem_tails); |
86 | 0 | struct libdw_memblock *result = dbg->mem_tails[thread_id]; |
87 | 0 | if (result == NULL) |
88 | 0 | { |
89 | 0 | result = malloc (dbg->mem_default_size); |
90 | 0 | if (result == NULL) |
91 | 0 | { |
92 | 0 | pthread_rwlock_unlock (&dbg->mem_rwl); |
93 | 0 | dbg->oom_handler(); |
94 | 0 | } |
95 | 0 | result->size = dbg->mem_default_size |
96 | 0 | - offsetof (struct libdw_memblock, mem); |
97 | 0 | result->remaining = result->size; |
98 | 0 | result->prev = NULL; |
99 | 0 | dbg->mem_tails[thread_id] = result; |
100 | 0 | } |
101 | 0 | pthread_rwlock_unlock (&dbg->mem_rwl); |
102 | 0 | return result; |
103 | 0 | } |
104 | | |
105 | | /* Can only be called after a allocation for this thread has already |
106 | | been done, to possibly undo it. */ |
107 | | struct libdw_memblock * |
108 | | __libdw_thread_tail (Dwarf *dbg) |
109 | 0 | { |
110 | 0 | struct libdw_memblock *result; |
111 | 0 | pthread_rwlock_rdlock (&dbg->mem_rwl); |
112 | 0 | result = dbg->mem_tails[thread_id]; |
113 | 0 | pthread_rwlock_unlock (&dbg->mem_rwl); |
114 | 0 | return result; |
115 | 0 | } |
116 | | |
117 | | void * |
118 | | __libdw_allocate (Dwarf *dbg, size_t minsize, size_t align) |
119 | 0 | { |
120 | 0 | size_t size = MAX (dbg->mem_default_size, |
121 | 0 | (align - 1 + |
122 | 0 | 2 * minsize + offsetof (struct libdw_memblock, mem))); |
123 | 0 | struct libdw_memblock *newp = malloc (size); |
124 | 0 | if (newp == NULL) |
125 | 0 | dbg->oom_handler (); |
126 | | |
127 | 0 | uintptr_t result = ((uintptr_t) newp->mem + align - 1) & ~(align - 1); |
128 | |
|
129 | 0 | newp->size = size - offsetof (struct libdw_memblock, mem); |
130 | 0 | newp->remaining = (uintptr_t) newp + size - (result + minsize); |
131 | |
|
132 | 0 | pthread_rwlock_rdlock (&dbg->mem_rwl); |
133 | 0 | newp->prev = dbg->mem_tails[thread_id]; |
134 | 0 | dbg->mem_tails[thread_id] = newp; |
135 | 0 | pthread_rwlock_unlock (&dbg->mem_rwl); |
136 | |
|
137 | 0 | return (void *) result; |
138 | 0 | } |
139 | | |
140 | | |
141 | | Dwarf_OOM |
142 | | dwarf_new_oom_handler (Dwarf *dbg, Dwarf_OOM handler) |
143 | 0 | { |
144 | 0 | Dwarf_OOM old = dbg->oom_handler; |
145 | 0 | dbg->oom_handler = handler; |
146 | 0 | return old; |
147 | 0 | } |
148 | | |
149 | | |
150 | | void |
151 | | __attribute ((noreturn)) attribute_hidden |
152 | | __libdw_oom (void) |
153 | 0 | { |
154 | 0 | while (1) |
155 | 0 | error (EXIT_FAILURE, ENOMEM, "libdw"); |
156 | 0 | } |