/src/perfetto/buildtools/android-unwinding/libunwindstack/Memory.cpp
Line | Count | Source |
1 | | /* |
2 | | * Copyright (C) 2016 The Android Open Source Project |
3 | | * |
4 | | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | | * you may not use this file except in compliance with the License. |
6 | | * You may obtain a copy of the License at |
7 | | * |
8 | | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | | * |
10 | | * Unless required by applicable law or agreed to in writing, software |
11 | | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | | * See the License for the specific language governing permissions and |
14 | | * limitations under the License. |
15 | | */ |
16 | | |
17 | | #include <errno.h> |
18 | | #include <fcntl.h> |
19 | | #include <stdint.h> |
20 | | #include <string.h> |
21 | | #include <sys/mman.h> |
22 | | #include <sys/ptrace.h> |
23 | | #include <sys/stat.h> |
24 | | #include <sys/types.h> |
25 | | #include <sys/uio.h> |
26 | | #include <unistd.h> |
27 | | |
28 | | #include <algorithm> |
29 | | #include <memory> |
30 | | #include <mutex> |
31 | | #include <optional> |
32 | | #include <string> |
33 | | |
34 | | #include <android-base/unique_fd.h> |
35 | | |
36 | | #include <unwindstack/Log.h> |
37 | | #include <unwindstack/Memory.h> |
38 | | |
39 | | #include "MemoryBuffer.h" |
40 | | #include "MemoryCache.h" |
41 | | #include "MemoryFileAtOffset.h" |
42 | | #include "MemoryLocal.h" |
43 | | #include "MemoryLocalUnsafe.h" |
44 | | #include "MemoryOffline.h" |
45 | | #include "MemoryOfflineBuffer.h" |
46 | | #include "MemoryRange.h" |
47 | | #include "MemoryRemote.h" |
48 | | |
49 | | namespace unwindstack { |
50 | | |
51 | 0 | static size_t ProcessVmRead(pid_t pid, uint64_t remote_src, void* dst, size_t len) { |
52 | | |
53 | | // Split up the remote read across page boundaries. |
54 | | // From the manpage: |
55 | | // A partial read/write may result if one of the remote_iov elements points to an invalid |
56 | | // memory region in the remote process. |
57 | | // |
58 | | // Partial transfers apply at the granularity of iovec elements. These system calls won't |
59 | | // perform a partial transfer that splits a single iovec element. |
60 | 0 | constexpr size_t kMaxIovecs = 64; |
61 | 0 | struct iovec src_iovs[kMaxIovecs]; |
62 | |
|
63 | 0 | uint64_t cur = remote_src; |
64 | 0 | size_t total_read = 0; |
65 | 0 | while (len > 0) { |
66 | 0 | struct iovec dst_iov = { |
67 | 0 | .iov_base = &reinterpret_cast<uint8_t*>(dst)[total_read], .iov_len = len, |
68 | 0 | }; |
69 | |
|
70 | 0 | size_t iovecs_used = 0; |
71 | 0 | while (len > 0) { |
72 | 0 | if (iovecs_used == kMaxIovecs) { |
73 | 0 | break; |
74 | 0 | } |
75 | | |
76 | | // struct iovec uses void* for iov_base. |
77 | 0 | if (cur >= UINTPTR_MAX) { |
78 | 0 | errno = EFAULT; |
79 | 0 | return total_read; |
80 | 0 | } |
81 | | |
82 | 0 | src_iovs[iovecs_used].iov_base = reinterpret_cast<void*>(cur); |
83 | |
|
84 | 0 | uintptr_t misalignment = cur & (getpagesize() - 1); |
85 | 0 | size_t iov_len = getpagesize() - misalignment; |
86 | 0 | iov_len = std::min(iov_len, len); |
87 | |
|
88 | 0 | len -= iov_len; |
89 | 0 | if (__builtin_add_overflow(cur, iov_len, &cur)) { |
90 | 0 | errno = EFAULT; |
91 | 0 | return total_read; |
92 | 0 | } |
93 | | |
94 | 0 | src_iovs[iovecs_used].iov_len = iov_len; |
95 | 0 | ++iovecs_used; |
96 | 0 | } |
97 | | |
98 | 0 | ssize_t rc = process_vm_readv(pid, &dst_iov, 1, src_iovs, iovecs_used, 0); |
99 | 0 | if (rc == -1) { |
100 | 0 | return total_read; |
101 | 0 | } |
102 | 0 | total_read += rc; |
103 | 0 | } |
104 | 0 | return total_read; |
105 | 0 | } |
106 | | |
107 | 0 | static bool PtraceReadLong(pid_t pid, uint64_t addr, long* value) { |
108 | | // ptrace() returns -1 and sets errno when the operation fails. |
109 | | // To disambiguate -1 from a valid result, we clear errno beforehand. |
110 | 0 | errno = 0; |
111 | 0 | *value = ptrace(PTRACE_PEEKTEXT, pid, reinterpret_cast<void*>(addr), nullptr); |
112 | 0 | if (*value == -1 && errno) { |
113 | 0 | return false; |
114 | 0 | } |
115 | 0 | return true; |
116 | 0 | } |
117 | | |
118 | 0 | static size_t PtraceRead(pid_t pid, uint64_t addr, void* dst, size_t bytes) { |
119 | | // Make sure that there is no overflow. |
120 | 0 | uint64_t max_size; |
121 | 0 | if (__builtin_add_overflow(addr, bytes, &max_size)) { |
122 | 0 | return 0; |
123 | 0 | } |
124 | | |
125 | 0 | size_t bytes_read = 0; |
126 | 0 | long data; |
127 | 0 | size_t align_bytes = addr & (sizeof(long) - 1); |
128 | 0 | if (align_bytes != 0) { |
129 | 0 | if (!PtraceReadLong(pid, addr & ~(sizeof(long) - 1), &data)) { |
130 | 0 | return 0; |
131 | 0 | } |
132 | 0 | size_t copy_bytes = std::min(sizeof(long) - align_bytes, bytes); |
133 | 0 | memcpy(dst, reinterpret_cast<uint8_t*>(&data) + align_bytes, copy_bytes); |
134 | 0 | addr += copy_bytes; |
135 | 0 | dst = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(dst) + copy_bytes); |
136 | 0 | bytes -= copy_bytes; |
137 | 0 | bytes_read += copy_bytes; |
138 | 0 | } |
139 | | |
140 | 0 | for (size_t i = 0; i < bytes / sizeof(long); i++) { |
141 | 0 | if (!PtraceReadLong(pid, addr, &data)) { |
142 | 0 | return bytes_read; |
143 | 0 | } |
144 | 0 | memcpy(dst, &data, sizeof(long)); |
145 | 0 | dst = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(dst) + sizeof(long)); |
146 | 0 | addr += sizeof(long); |
147 | 0 | bytes_read += sizeof(long); |
148 | 0 | } |
149 | | |
150 | 0 | size_t left_over = bytes & (sizeof(long) - 1); |
151 | 0 | if (left_over) { |
152 | 0 | if (!PtraceReadLong(pid, addr, &data)) { |
153 | 0 | return bytes_read; |
154 | 0 | } |
155 | 0 | memcpy(dst, &data, left_over); |
156 | 0 | bytes_read += left_over; |
157 | 0 | } |
158 | 0 | return bytes_read; |
159 | 0 | } |
160 | | |
161 | 4.86M | bool Memory::ReadFully(uint64_t addr, void* dst, size_t size) { |
162 | 4.86M | size_t rc = Read(addr, dst, size); |
163 | 4.86M | return rc == size; |
164 | 4.86M | } |
165 | | |
166 | 64.7k | bool Memory::ReadString(uint64_t addr, std::string* dst, size_t max_read) { |
167 | 64.7k | char buffer[256]; // Large enough for 99% of symbol names. |
168 | 64.7k | size_t size = 0; // Number of bytes which were read into the buffer. |
169 | 64.8k | for (size_t offset = 0; offset < max_read; offset += size) { |
170 | | // Look for null-terminator first, so we can allocate string of exact size. |
171 | | // If we know the end of valid memory range, do the reads in larger blocks. |
172 | 64.8k | size_t read = std::min(sizeof(buffer), max_read - offset); |
173 | 64.8k | size = Read(addr + offset, buffer, read); |
174 | 64.8k | if (size == 0) { |
175 | 43.7k | return false; // We have not found end of string yet and we can not read more data. |
176 | 43.7k | } |
177 | 21.1k | size_t length = strnlen(buffer, size); // Index of the null-terminator. |
178 | 21.1k | if (length < size) { |
179 | | // We found the null-terminator. Allocate the string and set its content. |
180 | 20.9k | if (offset == 0) { |
181 | | // We did just single read, so the buffer already contains the whole string. |
182 | 20.8k | dst->assign(buffer, length); |
183 | 20.8k | return true; |
184 | 20.8k | } else { |
185 | | // The buffer contains only the last block. Read the whole string again. |
186 | 125 | dst->assign(offset + length, '\0'); |
187 | 125 | return ReadFully(addr, dst->data(), dst->size()); |
188 | 125 | } |
189 | 20.9k | } |
190 | 21.1k | } |
191 | 0 | return false; |
192 | 64.7k | } |
193 | | |
194 | | std::shared_ptr<Memory> Memory::CreateFileMemory(const std::string& path, uint64_t offset, |
195 | 0 | uint64_t size) { |
196 | 0 | auto memory = std::make_shared<MemoryFileAtOffset>(); |
197 | |
|
198 | 0 | if (memory->Init(path, offset, size)) { |
199 | 0 | return memory; |
200 | 0 | } |
201 | | |
202 | 0 | return nullptr; |
203 | 0 | } |
204 | | |
205 | 0 | std::shared_ptr<Memory> Memory::CreateProcessMemoryLocalUnsafe() { |
206 | 0 | return std::shared_ptr<Memory>(new MemoryLocalUnsafe()); |
207 | 0 | } |
208 | | |
209 | 0 | std::shared_ptr<Memory> Memory::CreateProcessMemory(pid_t pid) { |
210 | 0 | if (pid == getpid()) { |
211 | 0 | return std::shared_ptr<Memory>(new MemoryLocal()); |
212 | 0 | } |
213 | 0 | return std::shared_ptr<Memory>(new MemoryRemote(pid)); |
214 | 0 | } |
215 | | |
216 | 0 | std::shared_ptr<Memory> Memory::CreateProcessMemoryCached(pid_t pid) { |
217 | 0 | if (pid == getpid()) { |
218 | 0 | return std::shared_ptr<Memory>(new MemoryCache(new MemoryLocal())); |
219 | 0 | } |
220 | 0 | return std::shared_ptr<Memory>(new MemoryCache(new MemoryRemote(pid))); |
221 | 0 | } |
222 | | |
223 | 0 | std::shared_ptr<Memory> Memory::CreateProcessMemoryThreadCached(pid_t pid) { |
224 | 0 | if (pid == getpid()) { |
225 | 0 | return std::shared_ptr<Memory>(new MemoryThreadCache(new MemoryLocal())); |
226 | 0 | } |
227 | 0 | return std::shared_ptr<Memory>(new MemoryThreadCache(new MemoryRemote(pid))); |
228 | 0 | } |
229 | | |
230 | | std::shared_ptr<Memory> Memory::CreateOfflineMemory(const uint8_t* data, uint64_t start, |
231 | 0 | uint64_t end) { |
232 | 0 | return std::shared_ptr<Memory>(new MemoryOfflineBuffer(data, start, end)); |
233 | 0 | } |
234 | | |
235 | 0 | size_t MemoryBuffer::Read(uint64_t addr, void* dst, size_t size) { |
236 | 0 | if (addr < offset_) { |
237 | 0 | return 0; |
238 | 0 | } |
239 | 0 | addr -= offset_; |
240 | 0 | size_t raw_size = raw_.size(); |
241 | 0 | if (addr >= raw_size) { |
242 | 0 | return 0; |
243 | 0 | } |
244 | | |
245 | 0 | size_t bytes_left = raw_size - static_cast<size_t>(addr); |
246 | 0 | size_t actual_len = std::min(bytes_left, size); |
247 | 0 | memcpy(dst, &raw_[addr], actual_len); |
248 | 0 | return actual_len; |
249 | 0 | } |
250 | | |
251 | 0 | uint8_t* MemoryBuffer::GetPtr(size_t addr) { |
252 | 0 | if (addr < offset_) { |
253 | 0 | return nullptr; |
254 | 0 | } |
255 | 0 | addr -= offset_; |
256 | 0 | if (addr < raw_.size()) { |
257 | 0 | return &raw_[addr]; |
258 | 0 | } |
259 | 0 | return nullptr; |
260 | 0 | } |
261 | | |
262 | 1.71k | MemoryFileAtOffset::~MemoryFileAtOffset() { |
263 | 1.71k | Clear(); |
264 | 1.71k | } |
265 | | |
266 | 3.42k | void MemoryFileAtOffset::Clear() { |
267 | 3.42k | if (data_) { |
268 | 0 | munmap(&data_[-offset_], size_ + offset_); |
269 | 0 | data_ = nullptr; |
270 | 0 | } |
271 | 3.42k | } |
272 | | |
273 | 1.71k | bool MemoryFileAtOffset::Init(const std::string& file, uint64_t offset, uint64_t size) { |
274 | | // Clear out any previous data if it exists. |
275 | 1.71k | Clear(); |
276 | | |
277 | 1.71k | android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(file.c_str(), O_RDONLY | O_CLOEXEC))); |
278 | 1.71k | if (fd == -1) { |
279 | 1.71k | return false; |
280 | 1.71k | } |
281 | 0 | struct stat buf; |
282 | 0 | if (fstat(fd, &buf) == -1) { |
283 | 0 | return false; |
284 | 0 | } |
285 | 0 | if (offset >= static_cast<uint64_t>(buf.st_size)) { |
286 | 0 | return false; |
287 | 0 | } |
288 | | |
289 | 0 | offset_ = offset & (getpagesize() - 1); |
290 | 0 | uint64_t aligned_offset = offset & ~(getpagesize() - 1); |
291 | 0 | if (aligned_offset > static_cast<uint64_t>(buf.st_size) || |
292 | 0 | offset > static_cast<uint64_t>(buf.st_size)) { |
293 | 0 | return false; |
294 | 0 | } |
295 | | |
296 | 0 | size_ = buf.st_size - aligned_offset; |
297 | 0 | uint64_t max_size; |
298 | 0 | if (!__builtin_add_overflow(size, offset_, &max_size) && max_size < size_) { |
299 | | // Truncate the mapped size. |
300 | 0 | size_ = max_size; |
301 | 0 | } |
302 | 0 | void* map = mmap(nullptr, size_, PROT_READ, MAP_PRIVATE, fd, aligned_offset); |
303 | 0 | if (map == MAP_FAILED) { |
304 | 0 | return false; |
305 | 0 | } |
306 | | |
307 | 0 | data_ = &reinterpret_cast<uint8_t*>(map)[offset_]; |
308 | 0 | size_ -= offset_; |
309 | |
|
310 | 0 | return true; |
311 | 0 | } |
312 | | |
313 | 0 | size_t MemoryFileAtOffset::Read(uint64_t addr, void* dst, size_t size) { |
314 | 0 | if (addr >= size_) { |
315 | 0 | return 0; |
316 | 0 | } |
317 | | |
318 | 0 | size_t bytes_left = size_ - static_cast<size_t>(addr); |
319 | 0 | const unsigned char* actual_base = static_cast<const unsigned char*>(data_) + addr; |
320 | 0 | size_t actual_len = std::min(bytes_left, size); |
321 | |
|
322 | 0 | memcpy(dst, actual_base, actual_len); |
323 | 0 | return actual_len; |
324 | 0 | } |
325 | | |
326 | 0 | size_t MemoryRemote::Read(uint64_t addr, void* dst, size_t size) { |
327 | | #if !defined(__LP64__) |
328 | | // Cannot read an address greater than 32 bits in a 32 bit context. |
329 | | if (addr > UINT32_MAX) { |
330 | | return 0; |
331 | | } |
332 | | #endif |
333 | |
|
334 | 0 | size_t (*read_func)(pid_t, uint64_t, void*, size_t) = |
335 | 0 | reinterpret_cast<size_t (*)(pid_t, uint64_t, void*, size_t)>(read_redirect_func_.load()); |
336 | 0 | if (read_func != nullptr) { |
337 | 0 | return read_func(pid_, addr, dst, size); |
338 | 0 | } else { |
339 | | // Prefer process_vm_read, try it first. If it doesn't work, use the |
340 | | // ptrace function. If at least one of them returns at least some data, |
341 | | // set that as the permanent function to use. |
342 | | // This assumes that if process_vm_read works once, it will continue |
343 | | // to work. |
344 | 0 | size_t bytes = ProcessVmRead(pid_, addr, dst, size); |
345 | 0 | if (bytes > 0) { |
346 | 0 | read_redirect_func_ = reinterpret_cast<uintptr_t>(ProcessVmRead); |
347 | 0 | return bytes; |
348 | 0 | } |
349 | 0 | bytes = PtraceRead(pid_, addr, dst, size); |
350 | 0 | if (bytes > 0) { |
351 | 0 | read_redirect_func_ = reinterpret_cast<uintptr_t>(PtraceRead); |
352 | 0 | } |
353 | 0 | return bytes; |
354 | 0 | } |
355 | 0 | } |
356 | | |
357 | 0 | size_t MemoryLocal::Read(uint64_t addr, void* dst, size_t size) { |
358 | 0 | return ProcessVmRead(getpid(), addr, dst, size); |
359 | 0 | } |
360 | | |
361 | | MemoryRange::MemoryRange(const std::shared_ptr<Memory>& memory, uint64_t begin, uint64_t length, |
362 | | uint64_t offset) |
363 | 1.71k | : memory_(memory), begin_(begin), length_(length), offset_(offset) {} |
364 | | |
365 | 4.95M | size_t MemoryRange::Read(uint64_t addr, void* dst, size_t size) { |
366 | 4.95M | if (addr < offset_) { |
367 | 0 | return 0; |
368 | 0 | } |
369 | | |
370 | 4.95M | uint64_t read_offset = addr - offset_; |
371 | 4.95M | if (read_offset >= length_) { |
372 | 54.2k | return 0; |
373 | 54.2k | } |
374 | | |
375 | 4.90M | uint64_t read_length = std::min(static_cast<uint64_t>(size), length_ - read_offset); |
376 | 4.90M | uint64_t read_addr; |
377 | 4.90M | if (__builtin_add_overflow(read_offset, begin_, &read_addr)) { |
378 | 0 | return 0; |
379 | 0 | } |
380 | | |
381 | 4.90M | return memory_->Read(read_addr, dst, read_length); |
382 | 4.90M | } |
383 | | |
384 | 0 | bool MemoryRanges::Insert(MemoryRange* memory) { |
385 | 0 | uint64_t last_addr; |
386 | 0 | if (__builtin_add_overflow(memory->offset(), memory->length(), &last_addr)) { |
387 | | // This should never happen in the real world. However, it is possible |
388 | | // that an offset in a mapped in segment could be crafted such that |
389 | | // this value overflows. In that case, clamp the value to the max uint64 |
390 | | // value. |
391 | 0 | last_addr = UINT64_MAX; |
392 | 0 | } |
393 | 0 | auto entry = maps_.try_emplace(last_addr, memory); |
394 | 0 | if (entry.second) { |
395 | 0 | return true; |
396 | 0 | } |
397 | 0 | delete memory; |
398 | 0 | return false; |
399 | 0 | } |
400 | | |
401 | 0 | size_t MemoryRanges::Read(uint64_t addr, void* dst, size_t size) { |
402 | 0 | auto entry = maps_.upper_bound(addr); |
403 | 0 | if (entry != maps_.end()) { |
404 | 0 | return entry->second->Read(addr, dst, size); |
405 | 0 | } |
406 | 0 | return 0; |
407 | 0 | } |
408 | | |
409 | 0 | bool MemoryOffline::Init(const std::string& file, uint64_t offset) { |
410 | 0 | auto memory_file = std::make_shared<MemoryFileAtOffset>(); |
411 | 0 | if (!memory_file->Init(file, offset)) { |
412 | 0 | return false; |
413 | 0 | } |
414 | | |
415 | | // The first uint64_t value is the start of memory. |
416 | 0 | uint64_t start; |
417 | 0 | if (!memory_file->ReadFully(0, &start, sizeof(start))) { |
418 | 0 | return false; |
419 | 0 | } |
420 | | |
421 | 0 | uint64_t size = memory_file->Size(); |
422 | 0 | if (__builtin_sub_overflow(size, sizeof(start), &size)) { |
423 | 0 | return false; |
424 | 0 | } |
425 | | |
426 | 0 | memory_ = std::make_unique<MemoryRange>(memory_file, sizeof(start), size, start); |
427 | 0 | return true; |
428 | 0 | } |
429 | | |
430 | 0 | bool MemoryOffline::Init(const std::string& file, uint64_t offset, uint64_t start, uint64_t size) { |
431 | 0 | auto memory_file = std::make_shared<MemoryFileAtOffset>(); |
432 | 0 | if (!memory_file->Init(file, offset)) { |
433 | 0 | return false; |
434 | 0 | } |
435 | | |
436 | 0 | memory_ = std::make_unique<MemoryRange>(memory_file, 0, size, start); |
437 | 0 | return true; |
438 | 0 | } |
439 | | |
440 | 0 | size_t MemoryOffline::Read(uint64_t addr, void* dst, size_t size) { |
441 | 0 | if (!memory_) { |
442 | 0 | return 0; |
443 | 0 | } |
444 | | |
445 | 0 | return memory_->Read(addr, dst, size); |
446 | 0 | } |
447 | | |
448 | | MemoryOfflineBuffer::MemoryOfflineBuffer(const uint8_t* data, uint64_t start, uint64_t end) |
449 | 0 | : data_(data), start_(start), end_(end) {} |
450 | | |
451 | 0 | void MemoryOfflineBuffer::Reset(const uint8_t* data, uint64_t start, uint64_t end) { |
452 | 0 | data_ = data; |
453 | 0 | start_ = start; |
454 | 0 | end_ = end; |
455 | 0 | } |
456 | | |
457 | 0 | size_t MemoryOfflineBuffer::Read(uint64_t addr, void* dst, size_t size) { |
458 | 0 | if (addr < start_ || addr >= end_) { |
459 | 0 | return 0; |
460 | 0 | } |
461 | | |
462 | 0 | size_t read_length = std::min(size, static_cast<size_t>(end_ - addr)); |
463 | 0 | memcpy(dst, &data_[addr - start_], read_length); |
464 | 0 | return read_length; |
465 | 0 | } |
466 | | |
467 | 0 | MemoryOfflineParts::~MemoryOfflineParts() { |
468 | 0 | for (auto memory : memories_) { |
469 | 0 | delete memory; |
470 | 0 | } |
471 | 0 | } |
472 | | |
473 | 0 | size_t MemoryOfflineParts::Read(uint64_t addr, void* dst, size_t size) { |
474 | 0 | if (memories_.empty()) { |
475 | 0 | return 0; |
476 | 0 | } |
477 | | |
478 | | // Do a read on each memory object, no support for reading across the |
479 | | // different memory objects. |
480 | 0 | for (MemoryOffline* memory : memories_) { |
481 | 0 | size_t bytes = memory->Read(addr, dst, size); |
482 | 0 | if (bytes != 0) { |
483 | 0 | return bytes; |
484 | 0 | } |
485 | 0 | } |
486 | 0 | return 0; |
487 | 0 | } |
488 | | |
489 | | size_t MemoryCacheBase::InternalCachedRead(uint64_t addr, void* dst, size_t size, |
490 | 0 | CacheDataType* cache) { |
491 | 0 | uint64_t addr_page = addr >> kCacheBits; |
492 | 0 | auto entry = cache->find(addr_page); |
493 | 0 | uint8_t* cache_dst; |
494 | 0 | if (entry != cache->end()) { |
495 | 0 | cache_dst = entry->second; |
496 | 0 | } else { |
497 | 0 | cache_dst = (*cache)[addr_page]; |
498 | 0 | if (!impl_->ReadFully(addr_page << kCacheBits, cache_dst, kCacheSize)) { |
499 | | // Erase the entry. |
500 | 0 | cache->erase(addr_page); |
501 | 0 | return impl_->Read(addr, dst, size); |
502 | 0 | } |
503 | 0 | } |
504 | 0 | size_t max_read = ((addr_page + 1) << kCacheBits) - addr; |
505 | 0 | if (size <= max_read) { |
506 | 0 | memcpy(dst, &cache_dst[addr & kCacheMask], size); |
507 | 0 | return size; |
508 | 0 | } |
509 | | |
510 | | // The read crossed into another cached entry, since a read can only cross |
511 | | // into one extra cached page, duplicate the code rather than looping. |
512 | 0 | memcpy(dst, &cache_dst[addr & kCacheMask], max_read); |
513 | 0 | dst = &reinterpret_cast<uint8_t*>(dst)[max_read]; |
514 | 0 | addr_page++; |
515 | |
|
516 | 0 | entry = cache->find(addr_page); |
517 | 0 | if (entry != cache->end()) { |
518 | 0 | cache_dst = entry->second; |
519 | 0 | } else { |
520 | 0 | cache_dst = (*cache)[addr_page]; |
521 | 0 | if (!impl_->ReadFully(addr_page << kCacheBits, cache_dst, kCacheSize)) { |
522 | | // Erase the entry. |
523 | 0 | cache->erase(addr_page); |
524 | 0 | return impl_->Read(addr_page << kCacheBits, dst, size - max_read) + max_read; |
525 | 0 | } |
526 | 0 | } |
527 | 0 | memcpy(dst, cache_dst, size - max_read); |
528 | 0 | return size; |
529 | 0 | } |
530 | | |
531 | 0 | void MemoryCache::Clear() { |
532 | 0 | std::lock_guard<std::mutex> lock(cache_lock_); |
533 | 0 | cache_.clear(); |
534 | 0 | } |
535 | | |
536 | 0 | size_t MemoryCache::CachedRead(uint64_t addr, void* dst, size_t size) { |
537 | | // Use a single lock since this object is not designed to be performant |
538 | | // for multiple object reading from multiple threads. |
539 | 0 | std::lock_guard<std::mutex> lock(cache_lock_); |
540 | |
|
541 | 0 | return InternalCachedRead(addr, dst, size, &cache_); |
542 | 0 | } |
543 | | |
544 | 0 | MemoryThreadCache::MemoryThreadCache(Memory* memory) : MemoryCacheBase(memory) { |
545 | 0 | thread_cache_ = std::make_optional<pthread_t>(); |
546 | 0 | if (pthread_key_create(&*thread_cache_, [](void* memory) { |
547 | 0 | CacheDataType* cache = reinterpret_cast<CacheDataType*>(memory); |
548 | 0 | delete cache; |
549 | 0 | }) != 0) { |
550 | 0 | Log::AsyncSafe("Failed to create pthread key."); |
551 | 0 | thread_cache_.reset(); |
552 | 0 | } |
553 | 0 | } |
554 | | |
555 | 0 | MemoryThreadCache::~MemoryThreadCache() { |
556 | 0 | if (thread_cache_) { |
557 | 0 | CacheDataType* cache = reinterpret_cast<CacheDataType*>(pthread_getspecific(*thread_cache_)); |
558 | 0 | delete cache; |
559 | 0 | pthread_key_delete(*thread_cache_); |
560 | 0 | } |
561 | 0 | } |
562 | | |
563 | 0 | size_t MemoryThreadCache::CachedRead(uint64_t addr, void* dst, size_t size) { |
564 | 0 | if (!thread_cache_) { |
565 | 0 | return impl_->Read(addr, dst, size); |
566 | 0 | } |
567 | | |
568 | 0 | CacheDataType* cache = reinterpret_cast<CacheDataType*>(pthread_getspecific(*thread_cache_)); |
569 | 0 | if (cache == nullptr) { |
570 | 0 | cache = new CacheDataType; |
571 | 0 | pthread_setspecific(*thread_cache_, cache); |
572 | 0 | } |
573 | |
|
574 | 0 | return InternalCachedRead(addr, dst, size, cache); |
575 | 0 | } |
576 | | |
577 | 0 | void MemoryThreadCache::Clear() { |
578 | 0 | if (!thread_cache_) { |
579 | 0 | return; |
580 | 0 | } |
581 | | |
582 | 0 | CacheDataType* cache = reinterpret_cast<CacheDataType*>(pthread_getspecific(*thread_cache_)); |
583 | 0 | if (cache != nullptr) { |
584 | 0 | delete cache; |
585 | 0 | pthread_setspecific(*thread_cache_, nullptr); |
586 | 0 | } |
587 | 0 | } |
588 | | |
589 | 0 | size_t MemoryLocalUnsafe::Read(uint64_t addr, void* dst, size_t size) { |
590 | 0 | void* raw_ptr = reinterpret_cast<void*>(addr); |
591 | 0 | memcpy(dst, raw_ptr, size); |
592 | 0 | return size; |
593 | 0 | } |
594 | | |
595 | | } // namespace unwindstack |