/src/perfetto/buildtools/android-unwinding/libunwindstack/Symbols.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Copyright (C) 2017 The Android Open Source Project |
3 | | * |
4 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | * you may not use this file except in compliance with the License. |
6 | | * You may obtain a copy of the License at |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include <elf.h> |
18 | | #include <stdint.h> |
19 | | #include <string.h> |
20 | | |
21 | | #include <algorithm> |
22 | | #include <string> |
23 | | #include <vector> |
24 | | |
25 | | #include <unwindstack/Memory.h> |
26 | | |
27 | | #include "Check.h" |
28 | | #include "Symbols.h" |
29 | | |
30 | | namespace unwindstack { |
31 | | |
32 | | Symbols::Symbols(uint64_t offset, uint64_t size, uint64_t entry_size, uint64_t str_offset, |
33 | | uint64_t str_size) |
34 | 1.29k | : offset_(offset), |
35 | 1.29k | count_(entry_size != 0 ? ((size / entry_size > kMaxSymbols) ? kMaxSymbols : size / entry_size) |
36 | 1.29k | : 0), |
37 | 1.29k | entry_size_(entry_size), |
38 | 1.29k | str_offset_(str_offset) { |
39 | 1.29k | if (__builtin_add_overflow(str_offset_, str_size, &str_end_)) { |
40 | | // Set to the max so that the code will still try to get symbol names. |
41 | | // Any reads that might be invalid will simply return no data, so |
42 | | // this will not result in crashes. |
43 | | // The assumption is that this value might have been corrupted, but |
44 | | // enough of the elf data is valid such that the code can still |
45 | | // get symbol information. |
46 | 58 | str_end_ = UINT64_MAX; |
47 | 58 | } |
48 | 1.29k | } |
49 | | |
50 | | template <typename SymType> |
51 | 506k | static bool IsFunc(const SymType* entry) { |
52 | 506k | return entry->st_shndx != SHN_UNDEF && ELF32_ST_TYPE(entry->st_info) == STT_FUNC; |
53 | 506k | } Unexecuted instantiation: Symbols.cpp:bool unwindstack::IsFunc<Elf32_Sym>(Elf32_Sym const*) Symbols.cpp:bool unwindstack::IsFunc<Elf64_Sym>(Elf64_Sym const*) Line | Count | Source | 51 | 506k | static bool IsFunc(const SymType* entry) { | 52 | 506k | return entry->st_shndx != SHN_UNDEF && ELF32_ST_TYPE(entry->st_info) == STT_FUNC; | 53 | 506k | } |
|
54 | | |
55 | | // Binary search the symbol table to find function containing the given address. |
56 | | // Without remap, the symbol table is assumed to be sorted and accessed directly. |
57 | | // If the symbol table is not sorted this method might fail but should not crash. |
58 | | // When the indices are remapped, they are guaranteed to be sorted by address. |
59 | | template <typename SymType, bool RemapIndices> |
60 | 2.59k | Symbols::Info* Symbols::BinarySearch(uint64_t addr, Memory* elf_memory, uint64_t* func_offset) { |
61 | | // Fast-path: Check if the symbol has been already read from memory. |
62 | | // Otherwise use the cache iterator to constrain the binary search range. |
63 | | // (the symbol must be in the gap between this and the previous iterator) |
64 | 2.59k | auto it = symbols_.upper_bound(addr); |
65 | 2.59k | if (it != symbols_.end()) { |
66 | 3 | uint64_t sym_value = (it->first - it->second.size); // Function address. |
67 | 3 | if (sym_value <= addr) { |
68 | 0 | *func_offset = addr - sym_value; |
69 | 0 | return &it->second; |
70 | 0 | } |
71 | 3 | } |
72 | 2.59k | uint32_t count = RemapIndices ? remap_->size() : count_; |
73 | 2.59k | uint32_t last = (it != symbols_.end()) ? it->second.index : count; |
74 | 2.59k | uint32_t first = (it != symbols_.begin()) ? std::prev(it)->second.index + 1 : 0; |
75 | | |
76 | 5.77k | while (first < last) { |
77 | 4.93k | uint32_t current = first + (last - first) / 2; |
78 | 4.93k | uint32_t symbol_index = RemapIndices ? remap_.value()[current] : current; |
79 | 4.93k | uint64_t offset = symbol_index * entry_size_; |
80 | 4.93k | if (__builtin_add_overflow(offset, offset_, &offset)) { |
81 | | // The elf data might be malformed. |
82 | 17 | return nullptr; |
83 | 17 | } |
84 | 4.91k | SymType sym; |
85 | 4.91k | if (!elf_memory->ReadFully(offset, &sym, sizeof(sym))) { |
86 | 987 | return nullptr; |
87 | 987 | } |
88 | | // There shouldn't be multiple symbols with same end address, but in case there are, |
89 | | // overwrite the cache with the last entry, so that 'sym' and 'info' are consistent. |
90 | 3.92k | Info& info = symbols_[sym.st_value + sym.st_size]; |
91 | 3.92k | info = {.size = static_cast<uint32_t>(sym.st_size), .index = current}; |
92 | 3.92k | if (addr < sym.st_value) { |
93 | 3.01k | last = current; |
94 | 3.01k | } else if (addr < sym.st_value + sym.st_size) { |
95 | 745 | *func_offset = addr - sym.st_value; |
96 | 745 | return &info; |
97 | 745 | } else { |
98 | 166 | first = current + 1; |
99 | 166 | } |
100 | 3.92k | } |
101 | 845 | return nullptr; |
102 | 2.59k | } Unexecuted instantiation: unwindstack::Symbols::Info* unwindstack::Symbols::BinarySearch<Elf32_Sym, false>(unsigned long, unwindstack::Memory*, unsigned long*) Unexecuted instantiation: unwindstack::Symbols::Info* unwindstack::Symbols::BinarySearch<Elf32_Sym, true>(unsigned long, unwindstack::Memory*, unsigned long*) unwindstack::Symbols::Info* unwindstack::Symbols::BinarySearch<Elf64_Sym, false>(unsigned long, unwindstack::Memory*, unsigned long*) Line | Count | Source | 60 | 1.29k | Symbols::Info* Symbols::BinarySearch(uint64_t addr, Memory* elf_memory, uint64_t* func_offset) { | 61 | | // Fast-path: Check if the symbol has been already read from memory. | 62 | | // Otherwise use the cache iterator to constrain the binary search range. | 63 | | // (the symbol must be in the gap between this and the previous iterator) | 64 | 1.29k | auto it = symbols_.upper_bound(addr); | 65 | 1.29k | if (it != symbols_.end()) { | 66 | 0 | uint64_t sym_value = (it->first - it->second.size); // Function address. | 67 | 0 | if (sym_value <= addr) { | 68 | 0 | *func_offset = addr - sym_value; | 69 | 0 | return &it->second; | 70 | 0 | } | 71 | 0 | } | 72 | 1.29k | uint32_t count = RemapIndices ? remap_->size() : count_; | 73 | 1.29k | uint32_t last = (it != symbols_.end()) ? it->second.index : count; | 74 | 1.29k | uint32_t first = (it != symbols_.begin()) ? std::prev(it)->second.index + 1 : 0; | 75 | | | 76 | 2.10k | while (first < last) { | 77 | 1.81k | uint32_t current = first + (last - first) / 2; | 78 | 1.81k | uint32_t symbol_index = RemapIndices ? remap_.value()[current] : current; | 79 | 1.81k | uint64_t offset = symbol_index * entry_size_; | 80 | 1.81k | if (__builtin_add_overflow(offset, offset_, &offset)) { | 81 | | // The elf data might be malformed. | 82 | 17 | return nullptr; | 83 | 17 | } | 84 | 1.79k | SymType sym; | 85 | 1.79k | if (!elf_memory->ReadFully(offset, &sym, sizeof(sym))) { | 86 | 987 | return nullptr; | 87 | 987 | } | 88 | | // There shouldn't be multiple symbols with same end address, but in case there are, | 89 | | // overwrite the cache with the last entry, so that 'sym' and 'info' are consistent. | 90 | 811 | Info& info = symbols_[sym.st_value + sym.st_size]; | 91 | 811 | info = {.size = static_cast<uint32_t>(sym.st_size), .index = current}; | 92 | 811 | if (addr < sym.st_value) { | 93 | 801 | last = current; | 94 | 801 | } else if (addr < sym.st_value + sym.st_size) { | 95 | 2 | *func_offset = addr - sym.st_value; | 96 | 2 | return &info; | 97 | 8 | } else { | 98 | 8 | first = current + 1; | 99 | 8 | } | 100 | 811 | } | 101 | 289 | return nullptr; | 102 | 1.29k | } |
unwindstack::Symbols::Info* unwindstack::Symbols::BinarySearch<Elf64_Sym, true>(unsigned long, unwindstack::Memory*, unsigned long*) Line | Count | Source | 60 | 1.29k | Symbols::Info* Symbols::BinarySearch(uint64_t addr, Memory* elf_memory, uint64_t* func_offset) { | 61 | | // Fast-path: Check if the symbol has been already read from memory. | 62 | | // Otherwise use the cache iterator to constrain the binary search range. | 63 | | // (the symbol must be in the gap between this and the previous iterator) | 64 | 1.29k | auto it = symbols_.upper_bound(addr); | 65 | 1.29k | if (it != symbols_.end()) { | 66 | 3 | uint64_t sym_value = (it->first - it->second.size); // Function address. | 67 | 3 | if (sym_value <= addr) { | 68 | 0 | *func_offset = addr - sym_value; | 69 | 0 | return &it->second; | 70 | 0 | } | 71 | 3 | } | 72 | 1.29k | uint32_t count = RemapIndices ? remap_->size() : count_; | 73 | 1.29k | uint32_t last = (it != symbols_.end()) ? it->second.index : count; | 74 | 1.29k | uint32_t first = (it != symbols_.begin()) ? std::prev(it)->second.index + 1 : 0; | 75 | | | 76 | 3.67k | while (first < last) { | 77 | 3.11k | uint32_t current = first + (last - first) / 2; | 78 | 3.11k | uint32_t symbol_index = RemapIndices ? remap_.value()[current] : current; | 79 | 3.11k | uint64_t offset = symbol_index * entry_size_; | 80 | 3.11k | if (__builtin_add_overflow(offset, offset_, &offset)) { | 81 | | // The elf data might be malformed. | 82 | 0 | return nullptr; | 83 | 0 | } | 84 | 3.11k | SymType sym; | 85 | 3.11k | if (!elf_memory->ReadFully(offset, &sym, sizeof(sym))) { | 86 | 0 | return nullptr; | 87 | 0 | } | 88 | | // There shouldn't be multiple symbols with same end address, but in case there are, | 89 | | // overwrite the cache with the last entry, so that 'sym' and 'info' are consistent. | 90 | 3.11k | Info& info = symbols_[sym.st_value + sym.st_size]; | 91 | 3.11k | info = {.size = static_cast<uint32_t>(sym.st_size), .index = current}; | 92 | 3.11k | if (addr < sym.st_value) { | 93 | 2.21k | last = current; | 94 | 2.21k | } else if (addr < sym.st_value + sym.st_size) { | 95 | 743 | *func_offset = addr - sym.st_value; | 96 | 743 | return &info; | 97 | 743 | } else { | 98 | 158 | first = current + 1; | 99 | 158 | } | 100 | 3.11k | } | 101 | 556 | return nullptr; | 102 | 1.29k | } |
|
103 | | |
104 | | // Create remapping table which allows us to access symbols as if they were sorted by address. |
105 | | template <typename SymType> |
106 | 1.29k | void Symbols::BuildRemapTable(Memory* elf_memory) { |
107 | 1.29k | std::vector<uint64_t> addrs; // Addresses of all symbols (addrs[i] == symbols[i].st_value). |
108 | 1.29k | addrs.reserve(count_); |
109 | 1.29k | remap_.emplace(); // Construct the optional remap table. |
110 | 1.29k | remap_->reserve(count_); |
111 | 4.65k | for (size_t symbol_idx = 0; symbol_idx < count_;) { |
112 | | // Read symbols from memory. We intentionally bypass the cache to save memory. |
113 | | // Do the reads in batches so that we minimize the number of memory read calls. |
114 | 4.45k | uint64_t read_bytes = (count_ - symbol_idx) * entry_size_; |
115 | 4.45k | uint8_t buffer[1024]; |
116 | 4.45k | read_bytes = std::min<size_t>(sizeof(buffer), read_bytes); |
117 | 4.45k | uint64_t offset = symbol_idx * entry_size_; |
118 | 4.45k | if (__builtin_add_overflow(offset, offset_, &offset)) { |
119 | | // The elf data might be malformed. |
120 | 0 | break; |
121 | 0 | } |
122 | 4.45k | read_bytes = elf_memory->Read(offset, buffer, read_bytes); |
123 | 4.45k | if (read_bytes < sizeof(SymType)) { |
124 | | // The elf data might be malformed. |
125 | 1.09k | break; |
126 | 1.09k | } |
127 | 508k | for (uint64_t offset = 0; offset <= read_bytes - sizeof(SymType); |
128 | 505k | offset += entry_size_, symbol_idx++) { |
129 | 505k | SymType sym; |
130 | 505k | memcpy(&sym, &buffer[offset], sizeof(SymType)); // Copy to ensure alignment. |
131 | 505k | addrs.push_back(sym.st_value); // Always insert so it is indexable by symbol index. |
132 | | // NB: It is important to filter our zero-sized symbols since otherwise we can get |
133 | | // duplicate end addresses in the table (e.g. if there is custom "end" symbol marker). |
134 | 505k | if (IsFunc(&sym) && sym.st_size != 0) { |
135 | 59.6k | remap_->push_back(symbol_idx); // Indices of function symbols only. |
136 | 59.6k | } |
137 | 505k | } |
138 | 3.35k | } |
139 | | // Sort by address to make the remap list binary searchable (stable due to the a<b tie break). |
140 | 627k | auto comp = [&addrs](auto a, auto b) { return std::tie(addrs[a], a) < std::tie(addrs[b], b); }; Unexecuted instantiation: auto unwindstack::Symbols::BuildRemapTable<Elf32_Sym>(unwindstack::Memory*)::{lambda(auto:1, auto:2)#1}::operator()<unsigned int, unsigned int>(unsigned int, unsigned int) const auto unwindstack::Symbols::BuildRemapTable<Elf64_Sym>(unwindstack::Memory*)::{lambda(auto:1, auto:2)#1}::operator()<unsigned int, unsigned int>(unsigned int, unsigned int) const Line | Count | Source | 140 | 627k | auto comp = [&addrs](auto a, auto b) { return std::tie(addrs[a], a) < std::tie(addrs[b], b); }; |
|
141 | 1.29k | std::sort(remap_->begin(), remap_->end(), comp); |
142 | | // Remove duplicate entries (methods de-duplicated by the linker). |
143 | 58.7k | auto pred = [&addrs](auto a, auto b) { return addrs[a] == addrs[b]; }; Unexecuted instantiation: auto unwindstack::Symbols::BuildRemapTable<Elf32_Sym>(unwindstack::Memory*)::{lambda(auto:1, auto:2)#2}::operator()<unsigned int, unsigned int>(unsigned int, unsigned int) const auto unwindstack::Symbols::BuildRemapTable<Elf64_Sym>(unwindstack::Memory*)::{lambda(auto:1, auto:2)#2}::operator()<unsigned int, unsigned int>(unsigned int, unsigned int) const Line | Count | Source | 143 | 58.7k | auto pred = [&addrs](auto a, auto b) { return addrs[a] == addrs[b]; }; |
|
144 | 1.29k | remap_->erase(std::unique(remap_->begin(), remap_->end(), pred), remap_->end()); |
145 | 1.29k | remap_->shrink_to_fit(); |
146 | 1.29k | } Unexecuted instantiation: void unwindstack::Symbols::BuildRemapTable<Elf32_Sym>(unwindstack::Memory*) void unwindstack::Symbols::BuildRemapTable<Elf64_Sym>(unwindstack::Memory*) Line | Count | Source | 106 | 1.29k | void Symbols::BuildRemapTable(Memory* elf_memory) { | 107 | 1.29k | std::vector<uint64_t> addrs; // Addresses of all symbols (addrs[i] == symbols[i].st_value). | 108 | 1.29k | addrs.reserve(count_); | 109 | 1.29k | remap_.emplace(); // Construct the optional remap table. | 110 | 1.29k | remap_->reserve(count_); | 111 | 4.65k | for (size_t symbol_idx = 0; symbol_idx < count_;) { | 112 | | // Read symbols from memory. We intentionally bypass the cache to save memory. | 113 | | // Do the reads in batches so that we minimize the number of memory read calls. | 114 | 4.45k | uint64_t read_bytes = (count_ - symbol_idx) * entry_size_; | 115 | 4.45k | uint8_t buffer[1024]; | 116 | 4.45k | read_bytes = std::min<size_t>(sizeof(buffer), read_bytes); | 117 | 4.45k | uint64_t offset = symbol_idx * entry_size_; | 118 | 4.45k | if (__builtin_add_overflow(offset, offset_, &offset)) { | 119 | | // The elf data might be malformed. | 120 | 0 | break; | 121 | 0 | } | 122 | 4.45k | read_bytes = elf_memory->Read(offset, buffer, read_bytes); | 123 | 4.45k | if (read_bytes < sizeof(SymType)) { | 124 | | // The elf data might be malformed. | 125 | 1.09k | break; | 126 | 1.09k | } | 127 | 508k | for (uint64_t offset = 0; offset <= read_bytes - sizeof(SymType); | 128 | 505k | offset += entry_size_, symbol_idx++) { | 129 | 505k | SymType sym; | 130 | 505k | memcpy(&sym, &buffer[offset], sizeof(SymType)); // Copy to ensure alignment. | 131 | 505k | addrs.push_back(sym.st_value); // Always insert so it is indexable by symbol index. | 132 | | // NB: It is important to filter our zero-sized symbols since otherwise we can get | 133 | | // duplicate end addresses in the table (e.g. if there is custom "end" symbol marker). | 134 | 505k | if (IsFunc(&sym) && sym.st_size != 0) { | 135 | 59.6k | remap_->push_back(symbol_idx); // Indices of function symbols only. | 136 | 59.6k | } | 137 | 505k | } | 138 | 3.35k | } | 139 | | // Sort by address to make the remap list binary searchable (stable due to the a<b tie break). | 140 | 1.29k | auto comp = [&addrs](auto a, auto b) { return std::tie(addrs[a], a) < std::tie(addrs[b], b); }; | 141 | 1.29k | std::sort(remap_->begin(), remap_->end(), comp); | 142 | | // Remove duplicate entries (methods de-duplicated by the linker). | 143 | 1.29k | auto pred = [&addrs](auto a, auto b) { return addrs[a] == addrs[b]; }; | 144 | 1.29k | remap_->erase(std::unique(remap_->begin(), remap_->end(), pred), remap_->end()); | 145 | 1.29k | remap_->shrink_to_fit(); | 146 | 1.29k | } |
|
147 | | |
148 | | template <typename SymType> |
149 | | bool Symbols::GetName(uint64_t addr, Memory* elf_memory, SharedString* name, |
150 | 1.30k | uint64_t* func_offset) { |
151 | 1.30k | Info* info; |
152 | 1.30k | if (!remap_.has_value()) { |
153 | | // Assume the symbol table is sorted. If it is not, this will gracefully fail. |
154 | 1.29k | info = BinarySearch<SymType, false>(addr, elf_memory, func_offset); |
155 | 1.29k | if (info == nullptr) { |
156 | | // Create the remapping table and retry the search. |
157 | 1.29k | BuildRemapTable<SymType>(elf_memory); |
158 | 1.29k | symbols_.clear(); // Remove cached symbols since the access pattern will be different. |
159 | 1.29k | info = BinarySearch<SymType, true>(addr, elf_memory, func_offset); |
160 | 1.29k | } |
161 | 1.29k | } else { |
162 | | // Fast search using the previously created remap table. |
163 | 6 | info = BinarySearch<SymType, true>(addr, elf_memory, func_offset); |
164 | 6 | } |
165 | 1.30k | if (info == nullptr) { |
166 | 556 | return false; |
167 | 556 | } |
168 | | // Read and cache the symbol name. |
169 | 745 | if (info->name.is_null()) { |
170 | 745 | SymType sym; |
171 | 745 | uint32_t symbol_index = remap_.has_value() ? remap_.value()[info->index] : info->index; |
172 | 745 | uint64_t offset = symbol_index * entry_size_; |
173 | 745 | if (__builtin_add_overflow(offset, offset_, &offset)) { |
174 | | // The elf data might be malformed. |
175 | 0 | return false; |
176 | 0 | } |
177 | 745 | if (!elf_memory->ReadFully(offset, &sym, sizeof(sym))) { |
178 | 0 | return false; |
179 | 0 | } |
180 | 745 | std::string symbol_name; |
181 | 745 | uint64_t str; |
182 | 745 | if (__builtin_add_overflow(str_offset_, sym.st_name, &str) || str >= str_end_) { |
183 | 4 | return false; |
184 | 4 | } |
185 | 741 | if (!IsFunc(&sym) || !elf_memory->ReadString(str, &symbol_name, str_end_ - str)) { |
186 | 741 | return false; |
187 | 741 | } |
188 | 0 | info->name = SharedString(std::move(symbol_name)); |
189 | 0 | } |
190 | 0 | *name = info->name; |
191 | 0 | return true; |
192 | 745 | } Unexecuted instantiation: bool unwindstack::Symbols::GetName<Elf32_Sym>(unsigned long, unwindstack::Memory*, unwindstack::SharedString*, unsigned long*) bool unwindstack::Symbols::GetName<Elf64_Sym>(unsigned long, unwindstack::Memory*, unwindstack::SharedString*, unsigned long*) Line | Count | Source | 150 | 1.30k | uint64_t* func_offset) { | 151 | 1.30k | Info* info; | 152 | 1.30k | if (!remap_.has_value()) { | 153 | | // Assume the symbol table is sorted. If it is not, this will gracefully fail. | 154 | 1.29k | info = BinarySearch<SymType, false>(addr, elf_memory, func_offset); | 155 | 1.29k | if (info == nullptr) { | 156 | | // Create the remapping table and retry the search. | 157 | 1.29k | BuildRemapTable<SymType>(elf_memory); | 158 | 1.29k | symbols_.clear(); // Remove cached symbols since the access pattern will be different. | 159 | 1.29k | info = BinarySearch<SymType, true>(addr, elf_memory, func_offset); | 160 | 1.29k | } | 161 | 1.29k | } else { | 162 | | // Fast search using the previously created remap table. | 163 | 6 | info = BinarySearch<SymType, true>(addr, elf_memory, func_offset); | 164 | 6 | } | 165 | 1.30k | if (info == nullptr) { | 166 | 556 | return false; | 167 | 556 | } | 168 | | // Read and cache the symbol name. | 169 | 745 | if (info->name.is_null()) { | 170 | 745 | SymType sym; | 171 | 745 | uint32_t symbol_index = remap_.has_value() ? remap_.value()[info->index] : info->index; | 172 | 745 | uint64_t offset = symbol_index * entry_size_; | 173 | 745 | if (__builtin_add_overflow(offset, offset_, &offset)) { | 174 | | // The elf data might be malformed. | 175 | 0 | return false; | 176 | 0 | } | 177 | 745 | if (!elf_memory->ReadFully(offset, &sym, sizeof(sym))) { | 178 | 0 | return false; | 179 | 0 | } | 180 | 745 | std::string symbol_name; | 181 | 745 | uint64_t str; | 182 | 745 | if (__builtin_add_overflow(str_offset_, sym.st_name, &str) || str >= str_end_) { | 183 | 4 | return false; | 184 | 4 | } | 185 | 741 | if (!IsFunc(&sym) || !elf_memory->ReadString(str, &symbol_name, str_end_ - str)) { | 186 | 741 | return false; | 187 | 741 | } | 188 | 0 | info->name = SharedString(std::move(symbol_name)); | 189 | 0 | } | 190 | 0 | *name = info->name; | 191 | 0 | return true; | 192 | 745 | } |
|
193 | | |
194 | | template <typename SymType> |
195 | 0 | bool Symbols::GetGlobal(Memory* elf_memory, const std::string& name, uint64_t* memory_address) { |
196 | | // Lookup from cache. |
197 | 0 | auto it = global_variables_.find(name); |
198 | 0 | if (it != global_variables_.end()) { |
199 | 0 | if (it->second.has_value()) { |
200 | 0 | *memory_address = it->second.value(); |
201 | 0 | return true; |
202 | 0 | } |
203 | 0 | return false; |
204 | 0 | } |
205 | | |
206 | | // Linear scan of all symbols. |
207 | 0 | for (uint32_t i = 0; i < count_; i++) { |
208 | 0 | uint64_t offset = i * entry_size_; |
209 | 0 | if (__builtin_add_overflow(offset_, offset, &offset)) { |
210 | | // The elf data might be malformed. |
211 | 0 | return false; |
212 | 0 | } |
213 | 0 | SymType entry; |
214 | 0 | if (!elf_memory->ReadFully(offset, &entry, sizeof(entry))) { |
215 | 0 | return false; |
216 | 0 | } |
217 | | |
218 | 0 | if (entry.st_shndx != SHN_UNDEF && ELF32_ST_TYPE(entry.st_info) == STT_OBJECT && |
219 | 0 | ELF32_ST_BIND(entry.st_info) == STB_GLOBAL) { |
220 | 0 | uint64_t str_offset = str_offset_ + entry.st_name; |
221 | 0 | if (__builtin_add_overflow(str_offset_, entry.st_name, &str_offset)) { |
222 | | // The elf data might be malformed. |
223 | 0 | return false; |
224 | 0 | } |
225 | 0 | if (str_offset < str_end_) { |
226 | 0 | std::string symbol; |
227 | 0 | if (elf_memory->ReadString(str_offset, &symbol, str_end_ - str_offset) && symbol == name) { |
228 | 0 | global_variables_.emplace(name, entry.st_value); |
229 | 0 | *memory_address = entry.st_value; |
230 | 0 | return true; |
231 | 0 | } |
232 | 0 | } |
233 | 0 | } |
234 | 0 | } |
235 | 0 | global_variables_.emplace(name, std::optional<uint64_t>()); // Remember "not found" outcome. |
236 | 0 | return false; |
237 | 0 | } Unexecuted instantiation: bool unwindstack::Symbols::GetGlobal<Elf32_Sym>(unwindstack::Memory*, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, unsigned long*) Unexecuted instantiation: bool unwindstack::Symbols::GetGlobal<Elf64_Sym>(unwindstack::Memory*, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, unsigned long*) |
238 | | |
239 | | // Instantiate all of the needed template functions. |
240 | | template bool Symbols::GetName<Elf32_Sym>(uint64_t, Memory*, SharedString*, uint64_t*); |
241 | | template bool Symbols::GetName<Elf64_Sym>(uint64_t, Memory*, SharedString*, uint64_t*); |
242 | | |
243 | | template bool Symbols::GetGlobal<Elf32_Sym>(Memory*, const std::string&, uint64_t*); |
244 | | template bool Symbols::GetGlobal<Elf64_Sym>(Memory*, const std::string&, uint64_t*); |
245 | | } // namespace unwindstack |