/src/cpython/Python/tracemalloc.c
Line | Count | Source |
1 | | #include "Python.h" |
2 | | #include "pycore_fileutils.h" // _Py_write_noraise() |
3 | | #include "pycore_gc.h" // PyGC_Head |
4 | | #include "pycore_hashtable.h" // _Py_hashtable_t |
5 | | #include "pycore_initconfig.h" // _PyStatus_NO_MEMORY() |
6 | | #include "pycore_interpframe.h" // _PyInterpreterFrame |
7 | | #include "pycore_lock.h" // PyMutex_LockFlags() |
8 | | #include "pycore_object.h" // _PyType_PreHeaderSize() |
9 | | #include "pycore_pymem.h" // _Py_tracemalloc_config |
10 | | #include "pycore_runtime.h" // _Py_ID() |
11 | | #include "pycore_traceback.h" // _Py_DumpASCII() |
12 | | |
13 | | #include <stdlib.h> // malloc() |
14 | | |
15 | 32 | #define tracemalloc_config _PyRuntime.tracemalloc.config |
16 | | |
17 | | _Py_DECLARE_STR(anon_unknown, "<unknown>"); |
18 | | |
19 | | /* Forward declaration */ |
20 | | static void* raw_malloc(size_t size); |
21 | | static void raw_free(void *ptr); |
22 | | static int _PyTraceMalloc_TraceRef(PyObject *op, PyRefTracerEvent event, |
23 | | void* Py_UNUSED(ignore)); |
24 | | |
25 | | #ifdef Py_DEBUG |
26 | | # define TRACE_DEBUG |
27 | | #endif |
28 | | |
29 | 0 | #define TO_PTR(key) ((const void *)(uintptr_t)(key)) |
30 | 0 | #define FROM_PTR(key) ((uintptr_t)(key)) |
31 | | |
32 | 96 | #define allocators _PyRuntime.tracemalloc.allocators |
33 | | |
34 | | |
35 | | /* This lock is needed because tracemalloc_free() is called without |
36 | | the GIL held from PyMem_RawFree(). It cannot acquire the lock because it |
37 | | would introduce a deadlock in _PyThreadState_DeleteCurrent(). */ |
38 | 0 | #define tables_lock _PyRuntime.tracemalloc.tables_lock |
39 | 0 | #define TABLES_LOCK() PyMutex_LockFlags(&tables_lock, _Py_LOCK_DONT_DETACH) |
40 | 0 | #define TABLES_UNLOCK() PyMutex_Unlock(&tables_lock) |
41 | | |
42 | | |
43 | 0 | #define DEFAULT_DOMAIN 0 |
44 | | |
45 | | typedef struct tracemalloc_frame frame_t; |
46 | | typedef struct tracemalloc_traceback traceback_t; |
47 | | |
48 | | #define TRACEBACK_SIZE(NFRAME) \ |
49 | 32 | (sizeof(traceback_t) + sizeof(frame_t) * (NFRAME)) |
50 | | |
51 | | static const int MAX_NFRAME = UINT16_MAX; |
52 | | |
53 | | |
54 | 256 | #define tracemalloc_empty_traceback _PyRuntime.tracemalloc.empty_traceback |
55 | | |
56 | | |
57 | | /* Trace of a memory block */ |
58 | | typedef struct { |
59 | | /* Size of the memory block in bytes */ |
60 | | size_t size; |
61 | | |
62 | | /* Traceback where the memory block was allocated */ |
63 | | traceback_t *traceback; |
64 | | } trace_t; |
65 | | |
66 | | |
67 | 0 | #define tracemalloc_traced_memory _PyRuntime.tracemalloc.traced_memory |
68 | 0 | #define tracemalloc_peak_traced_memory _PyRuntime.tracemalloc.peak_traced_memory |
69 | 64 | #define tracemalloc_filenames _PyRuntime.tracemalloc.filenames |
70 | 0 | #define tracemalloc_traceback _PyRuntime.tracemalloc.traceback |
71 | 64 | #define tracemalloc_tracebacks _PyRuntime.tracemalloc.tracebacks |
72 | 64 | #define tracemalloc_traces _PyRuntime.tracemalloc.traces |
73 | 64 | #define tracemalloc_domains _PyRuntime.tracemalloc.domains |
74 | | |
75 | | |
76 | | #ifdef TRACE_DEBUG |
77 | | static void |
78 | | tracemalloc_error(const char *format, ...) |
79 | | { |
80 | | va_list ap; |
81 | | fprintf(stderr, "tracemalloc: "); |
82 | | va_start(ap, format); |
83 | | vfprintf(stderr, format, ap); |
84 | | va_end(ap); |
85 | | fprintf(stderr, "\n"); |
86 | | fflush(stderr); |
87 | | } |
88 | | #endif |
89 | | |
90 | | |
91 | 32 | #define tracemalloc_reentrant_key _PyRuntime.tracemalloc.reentrant_key |
92 | | |
93 | | /* Any non-NULL pointer can be used */ |
94 | 0 | #define REENTRANT Py_True |
95 | | |
96 | | static int |
97 | | get_reentrant(void) |
98 | 0 | { |
99 | 0 | assert(PyThread_tss_is_created(&tracemalloc_reentrant_key)); |
100 | |
|
101 | 0 | void *ptr = PyThread_tss_get(&tracemalloc_reentrant_key); |
102 | 0 | if (ptr != NULL) { |
103 | 0 | assert(ptr == REENTRANT); |
104 | 0 | return 1; |
105 | 0 | } |
106 | 0 | else { |
107 | 0 | return 0; |
108 | 0 | } |
109 | 0 | } |
110 | | |
111 | | static void |
112 | | set_reentrant(int reentrant) |
113 | 0 | { |
114 | 0 | assert(reentrant == 0 || reentrant == 1); |
115 | 0 | assert(PyThread_tss_is_created(&tracemalloc_reentrant_key)); |
116 | |
|
117 | 0 | if (reentrant) { |
118 | 0 | assert(!get_reentrant()); |
119 | 0 | PyThread_tss_set(&tracemalloc_reentrant_key, REENTRANT); |
120 | 0 | } |
121 | 0 | else { |
122 | 0 | assert(get_reentrant()); |
123 | 0 | PyThread_tss_set(&tracemalloc_reentrant_key, NULL); |
124 | 0 | } |
125 | 0 | } |
126 | | |
127 | | |
128 | | static Py_uhash_t |
129 | | hashtable_hash_pyobject(const void *key) |
130 | 0 | { |
131 | 0 | PyObject *obj = (PyObject *)key; |
132 | 0 | return PyObject_Hash(obj); |
133 | 0 | } |
134 | | |
135 | | |
136 | | static int |
137 | | hashtable_compare_unicode(const void *key1, const void *key2) |
138 | 0 | { |
139 | 0 | PyObject *obj1 = (PyObject *)key1; |
140 | 0 | PyObject *obj2 = (PyObject *)key2; |
141 | 0 | if (obj1 != NULL && obj2 != NULL) { |
142 | 0 | return (PyUnicode_Compare(obj1, obj2) == 0); |
143 | 0 | } |
144 | 0 | else { |
145 | 0 | return obj1 == obj2; |
146 | 0 | } |
147 | 0 | } |
148 | | |
149 | | |
150 | | static Py_uhash_t |
151 | | hashtable_hash_uint(const void *key_raw) |
152 | 0 | { |
153 | 0 | unsigned int key = (unsigned int)FROM_PTR(key_raw); |
154 | 0 | return (Py_uhash_t)key; |
155 | 0 | } |
156 | | |
157 | | |
158 | | static _Py_hashtable_t * |
159 | | hashtable_new(_Py_hashtable_hash_func hash_func, |
160 | | _Py_hashtable_compare_func compare_func, |
161 | | _Py_hashtable_destroy_func key_destroy_func, |
162 | | _Py_hashtable_destroy_func value_destroy_func) |
163 | 128 | { |
164 | 128 | _Py_hashtable_allocator_t hashtable_alloc = {malloc, free}; |
165 | 128 | return _Py_hashtable_new_full(hash_func, compare_func, |
166 | 128 | key_destroy_func, value_destroy_func, |
167 | 128 | &hashtable_alloc); |
168 | 128 | } |
169 | | |
170 | | |
171 | | static void* |
172 | | raw_malloc(size_t size) |
173 | 32 | { |
174 | 32 | return allocators.raw.malloc(allocators.raw.ctx, size); |
175 | 32 | } |
176 | | |
177 | | static void |
178 | | raw_free(void *ptr) |
179 | 0 | { |
180 | 0 | allocators.raw.free(allocators.raw.ctx, ptr); |
181 | 0 | } |
182 | | |
183 | | |
184 | | static Py_uhash_t |
185 | | hashtable_hash_traceback(const void *key) |
186 | 0 | { |
187 | 0 | const traceback_t *traceback = (const traceback_t *)key; |
188 | 0 | return traceback->hash; |
189 | 0 | } |
190 | | |
191 | | |
192 | | static int |
193 | | hashtable_compare_traceback(const void *key1, const void *key2) |
194 | 0 | { |
195 | 0 | const traceback_t *traceback1 = (const traceback_t *)key1; |
196 | 0 | const traceback_t *traceback2 = (const traceback_t *)key2; |
197 | |
|
198 | 0 | if (traceback1->nframe != traceback2->nframe) { |
199 | 0 | return 0; |
200 | 0 | } |
201 | 0 | if (traceback1->total_nframe != traceback2->total_nframe) { |
202 | 0 | return 0; |
203 | 0 | } |
204 | | |
205 | 0 | for (int i=0; i < traceback1->nframe; i++) { |
206 | 0 | const frame_t *frame1 = &traceback1->frames[i]; |
207 | 0 | const frame_t *frame2 = &traceback2->frames[i]; |
208 | |
|
209 | 0 | if (frame1->lineno != frame2->lineno) { |
210 | 0 | return 0; |
211 | 0 | } |
212 | 0 | if (frame1->filename != frame2->filename) { |
213 | 0 | assert(PyUnicode_Compare(frame1->filename, frame2->filename) != 0); |
214 | 0 | return 0; |
215 | 0 | } |
216 | 0 | } |
217 | 0 | return 1; |
218 | 0 | } |
219 | | |
220 | | |
221 | | static void |
222 | | tracemalloc_get_frame(_PyInterpreterFrame *pyframe, frame_t *frame) |
223 | 0 | { |
224 | 0 | assert(PyStackRef_CodeCheck(pyframe->f_executable)); |
225 | 0 | frame->filename = &_Py_STR(anon_unknown); |
226 | |
|
227 | 0 | int lineno = -1; |
228 | 0 | PyCodeObject *code = _PyFrame_GetCode(pyframe); |
229 | | // PyUnstable_InterpreterFrame_GetLine() cannot but used, since it uses |
230 | | // a critical section which can trigger a deadlock. |
231 | 0 | int lasti = _PyFrame_SafeGetLasti(pyframe); |
232 | 0 | if (lasti >= 0) { |
233 | 0 | lineno = _PyCode_SafeAddr2Line(code, lasti); |
234 | 0 | } |
235 | 0 | if (lineno < 0) { |
236 | 0 | lineno = 0; |
237 | 0 | } |
238 | 0 | frame->lineno = (unsigned int)lineno; |
239 | |
|
240 | 0 | PyObject *filename = code->co_filename; |
241 | 0 | if (filename == NULL) { |
242 | | #ifdef TRACE_DEBUG |
243 | | tracemalloc_error("failed to get the filename of the code object"); |
244 | | #endif |
245 | 0 | return; |
246 | 0 | } |
247 | | |
248 | 0 | if (!PyUnicode_Check(filename)) { |
249 | | #ifdef TRACE_DEBUG |
250 | | tracemalloc_error("filename is not a unicode string"); |
251 | | #endif |
252 | 0 | return; |
253 | 0 | } |
254 | | |
255 | | /* intern the filename */ |
256 | 0 | _Py_hashtable_entry_t *entry; |
257 | 0 | entry = _Py_hashtable_get_entry(tracemalloc_filenames, filename); |
258 | 0 | if (entry != NULL) { |
259 | 0 | filename = (PyObject *)entry->key; |
260 | 0 | } |
261 | 0 | else { |
262 | | /* tracemalloc_filenames is responsible to keep a reference |
263 | | to the filename */ |
264 | 0 | if (_Py_hashtable_set(tracemalloc_filenames, Py_NewRef(filename), |
265 | 0 | NULL) < 0) { |
266 | 0 | Py_DECREF(filename); |
267 | | #ifdef TRACE_DEBUG |
268 | | tracemalloc_error("failed to intern the filename"); |
269 | | #endif |
270 | 0 | return; |
271 | 0 | } |
272 | 0 | } |
273 | | |
274 | | /* the tracemalloc_filenames table keeps a reference to the filename */ |
275 | 0 | frame->filename = filename; |
276 | 0 | } |
277 | | |
278 | | |
279 | | static Py_uhash_t |
280 | | traceback_hash(traceback_t *traceback) |
281 | 32 | { |
282 | | /* code based on tuple_hash() of Objects/tupleobject.c */ |
283 | 32 | Py_uhash_t x, y; /* Unsigned for defined overflow behavior. */ |
284 | 32 | int len = traceback->nframe; |
285 | 32 | Py_uhash_t mult = PyHASH_MULTIPLIER; |
286 | 32 | frame_t *frame; |
287 | | |
288 | 32 | x = 0x345678UL; |
289 | 32 | frame = traceback->frames; |
290 | 64 | while (--len >= 0) { |
291 | 32 | y = (Py_uhash_t)PyObject_Hash(frame->filename); |
292 | 32 | y ^= (Py_uhash_t)frame->lineno; |
293 | 32 | frame++; |
294 | | |
295 | 32 | x = (x ^ y) * mult; |
296 | | /* the cast might truncate len; that doesn't change hash stability */ |
297 | 32 | mult += (Py_uhash_t)(82520UL + len + len); |
298 | 32 | } |
299 | 32 | x ^= traceback->total_nframe; |
300 | 32 | x += 97531UL; |
301 | 32 | return x; |
302 | 32 | } |
303 | | |
304 | | |
305 | | static void |
306 | | traceback_get_frames(traceback_t *traceback) |
307 | 0 | { |
308 | 0 | PyThreadState *tstate = _PyThreadState_GET(); |
309 | 0 | assert(tstate != NULL); |
310 | |
|
311 | 0 | _PyInterpreterFrame *pyframe = _PyThreadState_GetFrame(tstate); |
312 | 0 | while (pyframe) { |
313 | 0 | if (traceback->nframe < tracemalloc_config.max_nframe) { |
314 | 0 | tracemalloc_get_frame(pyframe, &traceback->frames[traceback->nframe]); |
315 | 0 | assert(traceback->frames[traceback->nframe].filename != NULL); |
316 | 0 | traceback->nframe++; |
317 | 0 | } |
318 | 0 | if (traceback->total_nframe < UINT16_MAX) { |
319 | 0 | traceback->total_nframe++; |
320 | 0 | } |
321 | 0 | pyframe = _PyFrame_GetFirstComplete(pyframe->previous); |
322 | 0 | } |
323 | 0 | } |
324 | | |
325 | | |
326 | | static traceback_t * |
327 | | traceback_new(void) |
328 | 0 | { |
329 | 0 | traceback_t *traceback; |
330 | 0 | _Py_hashtable_entry_t *entry; |
331 | |
|
332 | 0 | _Py_AssertHoldsTstate(); |
333 | | |
334 | | /* get frames */ |
335 | 0 | traceback = tracemalloc_traceback; |
336 | 0 | traceback->nframe = 0; |
337 | 0 | traceback->total_nframe = 0; |
338 | 0 | traceback_get_frames(traceback); |
339 | 0 | if (traceback->nframe == 0) { |
340 | 0 | return tracemalloc_empty_traceback; |
341 | 0 | } |
342 | 0 | traceback->hash = traceback_hash(traceback); |
343 | | |
344 | | /* intern the traceback */ |
345 | 0 | entry = _Py_hashtable_get_entry(tracemalloc_tracebacks, traceback); |
346 | 0 | if (entry != NULL) { |
347 | 0 | traceback = (traceback_t *)entry->key; |
348 | 0 | } |
349 | 0 | else { |
350 | 0 | traceback_t *copy; |
351 | 0 | size_t traceback_size; |
352 | |
|
353 | 0 | traceback_size = TRACEBACK_SIZE(traceback->nframe); |
354 | |
|
355 | 0 | copy = raw_malloc(traceback_size); |
356 | 0 | if (copy == NULL) { |
357 | | #ifdef TRACE_DEBUG |
358 | | tracemalloc_error("failed to intern the traceback: malloc failed"); |
359 | | #endif |
360 | 0 | return NULL; |
361 | 0 | } |
362 | 0 | memcpy(copy, traceback, traceback_size); |
363 | |
|
364 | 0 | if (_Py_hashtable_set(tracemalloc_tracebacks, copy, NULL) < 0) { |
365 | 0 | raw_free(copy); |
366 | | #ifdef TRACE_DEBUG |
367 | | tracemalloc_error("failed to intern the traceback: putdata failed"); |
368 | | #endif |
369 | 0 | return NULL; |
370 | 0 | } |
371 | 0 | traceback = copy; |
372 | 0 | } |
373 | 0 | return traceback; |
374 | 0 | } |
375 | | |
376 | | |
377 | | static _Py_hashtable_t* |
378 | | tracemalloc_create_traces_table(void) |
379 | 32 | { |
380 | 32 | return hashtable_new(_Py_hashtable_hash_ptr, |
381 | 32 | _Py_hashtable_compare_direct, |
382 | 32 | NULL, raw_free); |
383 | 32 | } |
384 | | |
385 | | |
386 | | static void |
387 | | tracemalloc_destroy_domain(void *value) |
388 | 0 | { |
389 | 0 | _Py_hashtable_t *ht = (_Py_hashtable_t*)value; |
390 | 0 | _Py_hashtable_destroy(ht); |
391 | 0 | } |
392 | | |
393 | | |
394 | | static _Py_hashtable_t* |
395 | | tracemalloc_create_domains_table(void) |
396 | 32 | { |
397 | 32 | return hashtable_new(hashtable_hash_uint, |
398 | 32 | _Py_hashtable_compare_direct, |
399 | 32 | NULL, |
400 | 32 | tracemalloc_destroy_domain); |
401 | 32 | } |
402 | | |
403 | | |
404 | | static _Py_hashtable_t* |
405 | | tracemalloc_get_traces_table(unsigned int domain) |
406 | 0 | { |
407 | 0 | if (domain == DEFAULT_DOMAIN) { |
408 | 0 | return tracemalloc_traces; |
409 | 0 | } |
410 | 0 | else { |
411 | 0 | return _Py_hashtable_get(tracemalloc_domains, TO_PTR(domain)); |
412 | 0 | } |
413 | 0 | } |
414 | | |
415 | | |
416 | | static void |
417 | | tracemalloc_remove_trace_unlocked(unsigned int domain, uintptr_t ptr) |
418 | 0 | { |
419 | 0 | assert(tracemalloc_config.tracing); |
420 | |
|
421 | 0 | _Py_hashtable_t *traces = tracemalloc_get_traces_table(domain); |
422 | 0 | if (!traces) { |
423 | 0 | return; |
424 | 0 | } |
425 | | |
426 | 0 | trace_t *trace = _Py_hashtable_steal(traces, TO_PTR(ptr)); |
427 | 0 | if (!trace) { |
428 | 0 | return; |
429 | 0 | } |
430 | 0 | assert(tracemalloc_traced_memory >= trace->size); |
431 | 0 | tracemalloc_traced_memory -= trace->size; |
432 | 0 | raw_free(trace); |
433 | 0 | } |
434 | | |
435 | | #define REMOVE_TRACE(ptr) \ |
436 | 0 | tracemalloc_remove_trace_unlocked(DEFAULT_DOMAIN, (uintptr_t)(ptr)) |
437 | | |
438 | | |
439 | | static int |
440 | | tracemalloc_add_trace_unlocked(unsigned int domain, uintptr_t ptr, |
441 | | size_t size) |
442 | 0 | { |
443 | 0 | assert(tracemalloc_config.tracing); |
444 | |
|
445 | 0 | traceback_t *traceback = traceback_new(); |
446 | 0 | if (traceback == NULL) { |
447 | 0 | return -1; |
448 | 0 | } |
449 | | |
450 | 0 | _Py_hashtable_t *traces = tracemalloc_get_traces_table(domain); |
451 | 0 | if (traces == NULL) { |
452 | 0 | traces = tracemalloc_create_traces_table(); |
453 | 0 | if (traces == NULL) { |
454 | 0 | return -1; |
455 | 0 | } |
456 | | |
457 | 0 | if (_Py_hashtable_set(tracemalloc_domains, TO_PTR(domain), traces) < 0) { |
458 | 0 | _Py_hashtable_destroy(traces); |
459 | 0 | return -1; |
460 | 0 | } |
461 | 0 | } |
462 | | |
463 | 0 | trace_t *trace = _Py_hashtable_get(traces, TO_PTR(ptr)); |
464 | 0 | if (trace != NULL) { |
465 | | /* the memory block is already tracked */ |
466 | 0 | assert(tracemalloc_traced_memory >= trace->size); |
467 | 0 | tracemalloc_traced_memory -= trace->size; |
468 | |
|
469 | 0 | trace->size = size; |
470 | 0 | trace->traceback = traceback; |
471 | 0 | } |
472 | 0 | else { |
473 | 0 | trace = raw_malloc(sizeof(trace_t)); |
474 | 0 | if (trace == NULL) { |
475 | 0 | return -1; |
476 | 0 | } |
477 | 0 | trace->size = size; |
478 | 0 | trace->traceback = traceback; |
479 | |
|
480 | 0 | int res = _Py_hashtable_set(traces, TO_PTR(ptr), trace); |
481 | 0 | if (res != 0) { |
482 | 0 | raw_free(trace); |
483 | 0 | return res; |
484 | 0 | } |
485 | 0 | } |
486 | | |
487 | 0 | assert(tracemalloc_traced_memory <= SIZE_MAX - size); |
488 | 0 | tracemalloc_traced_memory += size; |
489 | 0 | if (tracemalloc_traced_memory > tracemalloc_peak_traced_memory) { |
490 | 0 | tracemalloc_peak_traced_memory = tracemalloc_traced_memory; |
491 | 0 | } |
492 | 0 | return 0; |
493 | 0 | } |
494 | | |
495 | | #define ADD_TRACE(ptr, size) \ |
496 | 0 | tracemalloc_add_trace_unlocked(DEFAULT_DOMAIN, (uintptr_t)(ptr), size) |
497 | | |
498 | | |
499 | | static void* |
500 | | tracemalloc_alloc(int need_gil, int use_calloc, |
501 | | void *ctx, size_t nelem, size_t elsize) |
502 | 0 | { |
503 | 0 | assert(elsize == 0 || nelem <= SIZE_MAX / elsize); |
504 | |
|
505 | 0 | int reentrant = get_reentrant(); |
506 | | |
507 | | // Ignore reentrant call. |
508 | | // |
509 | | // For example, PyObjet_Malloc() calls |
510 | | // PyMem_Malloc() for allocations larger than 512 bytes: don't trace the |
511 | | // same memory allocation twice. |
512 | | // |
513 | | // If reentrant calls are not ignored, PyGILState_Ensure() can call |
514 | | // PyMem_RawMalloc() which would call PyGILState_Ensure() again in a loop. |
515 | 0 | if (!reentrant) { |
516 | 0 | set_reentrant(1); |
517 | 0 | } |
518 | |
|
519 | 0 | PyMemAllocatorEx *alloc = (PyMemAllocatorEx *)ctx; |
520 | 0 | void *ptr; |
521 | 0 | if (use_calloc) { |
522 | 0 | ptr = alloc->calloc(alloc->ctx, nelem, elsize); |
523 | 0 | } |
524 | 0 | else { |
525 | 0 | ptr = alloc->malloc(alloc->ctx, nelem * elsize); |
526 | 0 | } |
527 | |
|
528 | 0 | if (ptr == NULL) { |
529 | 0 | goto done; |
530 | 0 | } |
531 | 0 | if (reentrant) { |
532 | 0 | goto done; |
533 | 0 | } |
534 | | |
535 | 0 | PyGILState_STATE gil_state; |
536 | 0 | if (need_gil) { |
537 | 0 | gil_state = PyGILState_Ensure(); |
538 | 0 | } |
539 | 0 | TABLES_LOCK(); |
540 | |
|
541 | 0 | if (tracemalloc_config.tracing) { |
542 | 0 | if (ADD_TRACE(ptr, nelem * elsize) < 0) { |
543 | | // Failed to allocate a trace for the new memory block |
544 | 0 | alloc->free(alloc->ctx, ptr); |
545 | 0 | ptr = NULL; |
546 | 0 | } |
547 | 0 | } |
548 | | // else: gh-128679: tracemalloc.stop() was called by another thread |
549 | |
|
550 | 0 | TABLES_UNLOCK(); |
551 | 0 | if (need_gil) { |
552 | 0 | PyGILState_Release(gil_state); |
553 | 0 | } |
554 | |
|
555 | 0 | done: |
556 | 0 | if (!reentrant) { |
557 | 0 | set_reentrant(0); |
558 | 0 | } |
559 | 0 | return ptr; |
560 | 0 | } |
561 | | |
562 | | |
563 | | static void* |
564 | | tracemalloc_realloc(int need_gil, void *ctx, void *ptr, size_t new_size) |
565 | 0 | { |
566 | 0 | int reentrant = get_reentrant(); |
567 | | |
568 | | // Ignore reentrant call. PyObjet_Realloc() calls PyMem_Realloc() for |
569 | | // allocations larger than 512 bytes: don't trace the same memory block |
570 | | // twice. |
571 | 0 | if (!reentrant) { |
572 | 0 | set_reentrant(1); |
573 | 0 | } |
574 | |
|
575 | 0 | PyMemAllocatorEx *alloc = (PyMemAllocatorEx *)ctx; |
576 | 0 | void *ptr2 = alloc->realloc(alloc->ctx, ptr, new_size); |
577 | |
|
578 | 0 | if (ptr2 == NULL) { |
579 | 0 | goto done; |
580 | 0 | } |
581 | 0 | if (reentrant) { |
582 | 0 | goto done; |
583 | 0 | } |
584 | | |
585 | 0 | PyGILState_STATE gil_state; |
586 | 0 | if (need_gil) { |
587 | 0 | gil_state = PyGILState_Ensure(); |
588 | 0 | } |
589 | 0 | TABLES_LOCK(); |
590 | |
|
591 | 0 | if (!tracemalloc_config.tracing) { |
592 | | // gh-128679: tracemalloc.stop() was called by another thread |
593 | 0 | goto unlock; |
594 | 0 | } |
595 | | |
596 | 0 | if (ptr != NULL) { |
597 | | // An existing memory block has been resized |
598 | | |
599 | | // tracemalloc_add_trace_unlocked() updates the trace if there is |
600 | | // already a trace at address ptr2. |
601 | 0 | if (ptr2 != ptr) { |
602 | 0 | REMOVE_TRACE(ptr); |
603 | 0 | } |
604 | |
|
605 | 0 | if (ADD_TRACE(ptr2, new_size) < 0) { |
606 | | // Memory allocation failed. The error cannot be reported to the |
607 | | // caller, because realloc() already have shrunk the memory block |
608 | | // and so removed bytes. |
609 | | // |
610 | | // This case is very unlikely: a hash entry has just been released, |
611 | | // so the hash table should have at least one free entry. |
612 | | // |
613 | | // The GIL and the table lock ensures that only one thread is |
614 | | // allocating memory. |
615 | 0 | Py_FatalError("tracemalloc_realloc() failed to allocate a trace"); |
616 | 0 | } |
617 | 0 | } |
618 | 0 | else { |
619 | | // New allocation |
620 | |
|
621 | 0 | if (ADD_TRACE(ptr2, new_size) < 0) { |
622 | | // Failed to allocate a trace for the new memory block |
623 | 0 | alloc->free(alloc->ctx, ptr2); |
624 | 0 | ptr2 = NULL; |
625 | 0 | } |
626 | 0 | } |
627 | | |
628 | 0 | unlock: |
629 | 0 | TABLES_UNLOCK(); |
630 | 0 | if (need_gil) { |
631 | 0 | PyGILState_Release(gil_state); |
632 | 0 | } |
633 | |
|
634 | 0 | done: |
635 | 0 | if (!reentrant) { |
636 | 0 | set_reentrant(0); |
637 | 0 | } |
638 | 0 | return ptr2; |
639 | 0 | } |
640 | | |
641 | | |
642 | | static void |
643 | | tracemalloc_free(void *ctx, void *ptr) |
644 | 0 | { |
645 | 0 | if (ptr == NULL) { |
646 | 0 | return; |
647 | 0 | } |
648 | | |
649 | 0 | PyMemAllocatorEx *alloc = (PyMemAllocatorEx *)ctx; |
650 | 0 | alloc->free(alloc->ctx, ptr); |
651 | |
|
652 | 0 | if (get_reentrant()) { |
653 | 0 | return; |
654 | 0 | } |
655 | | |
656 | 0 | TABLES_LOCK(); |
657 | |
|
658 | 0 | if (tracemalloc_config.tracing) { |
659 | 0 | REMOVE_TRACE(ptr); |
660 | 0 | } |
661 | | // else: gh-128679: tracemalloc.stop() was called by another thread |
662 | |
|
663 | 0 | TABLES_UNLOCK(); |
664 | 0 | } |
665 | | |
666 | | |
667 | | static void* |
668 | | tracemalloc_malloc_gil(void *ctx, size_t size) |
669 | 0 | { |
670 | 0 | return tracemalloc_alloc(0, 0, ctx, 1, size); |
671 | 0 | } |
672 | | |
673 | | |
674 | | static void* |
675 | | tracemalloc_calloc_gil(void *ctx, size_t nelem, size_t elsize) |
676 | 0 | { |
677 | 0 | return tracemalloc_alloc(0, 1, ctx, nelem, elsize); |
678 | 0 | } |
679 | | |
680 | | |
681 | | static void* |
682 | | tracemalloc_realloc_gil(void *ctx, void *ptr, size_t new_size) |
683 | 0 | { |
684 | 0 | return tracemalloc_realloc(0, ctx, ptr, new_size); |
685 | 0 | } |
686 | | |
687 | | |
688 | | static void* |
689 | | tracemalloc_raw_malloc(void *ctx, size_t size) |
690 | 0 | { |
691 | 0 | return tracemalloc_alloc(1, 0, ctx, 1, size); |
692 | 0 | } |
693 | | |
694 | | |
695 | | static void* |
696 | | tracemalloc_raw_calloc(void *ctx, size_t nelem, size_t elsize) |
697 | 0 | { |
698 | 0 | return tracemalloc_alloc(1, 1, ctx, nelem, elsize); |
699 | 0 | } |
700 | | |
701 | | |
702 | | static void* |
703 | | tracemalloc_raw_realloc(void *ctx, void *ptr, size_t new_size) |
704 | 0 | { |
705 | 0 | return tracemalloc_realloc(1, ctx, ptr, new_size); |
706 | 0 | } |
707 | | |
708 | | |
709 | | static void |
710 | | tracemalloc_clear_filename(void *value) |
711 | 0 | { |
712 | 0 | PyObject *filename = (PyObject *)value; |
713 | 0 | Py_DECREF(filename); |
714 | 0 | } |
715 | | |
716 | | |
717 | | static void |
718 | | tracemalloc_clear_traces_unlocked(void) |
719 | 0 | { |
720 | | // Clearing tracemalloc_filenames requires the GIL to call Py_DECREF() |
721 | 0 | _Py_AssertHoldsTstate(); |
722 | |
|
723 | 0 | set_reentrant(1); |
724 | |
|
725 | 0 | _Py_hashtable_clear(tracemalloc_traces); |
726 | 0 | _Py_hashtable_clear(tracemalloc_domains); |
727 | 0 | _Py_hashtable_clear(tracemalloc_tracebacks); |
728 | 0 | _Py_hashtable_clear(tracemalloc_filenames); |
729 | |
|
730 | 0 | tracemalloc_traced_memory = 0; |
731 | 0 | tracemalloc_peak_traced_memory = 0; |
732 | |
|
733 | 0 | set_reentrant(0); |
734 | 0 | } |
735 | | |
736 | | |
737 | | PyStatus |
738 | | _PyTraceMalloc_Init(void) |
739 | 32 | { |
740 | 32 | assert(tracemalloc_config.initialized == TRACEMALLOC_NOT_INITIALIZED); |
741 | | |
742 | 32 | PyMem_GetAllocator(PYMEM_DOMAIN_RAW, &allocators.raw); |
743 | | |
744 | 32 | if (PyThread_tss_create(&tracemalloc_reentrant_key) != 0) { |
745 | 0 | return _PyStatus_NO_MEMORY(); |
746 | 0 | } |
747 | | |
748 | 32 | tracemalloc_filenames = hashtable_new(hashtable_hash_pyobject, |
749 | 32 | hashtable_compare_unicode, |
750 | 32 | tracemalloc_clear_filename, NULL); |
751 | | |
752 | 32 | tracemalloc_tracebacks = hashtable_new(hashtable_hash_traceback, |
753 | 32 | hashtable_compare_traceback, |
754 | 32 | raw_free, NULL); |
755 | | |
756 | 32 | tracemalloc_traces = tracemalloc_create_traces_table(); |
757 | 32 | tracemalloc_domains = tracemalloc_create_domains_table(); |
758 | | |
759 | 32 | if (tracemalloc_filenames == NULL || tracemalloc_tracebacks == NULL |
760 | 32 | || tracemalloc_traces == NULL || tracemalloc_domains == NULL) |
761 | 0 | { |
762 | 0 | return _PyStatus_NO_MEMORY(); |
763 | 0 | } |
764 | | |
765 | 32 | assert(tracemalloc_empty_traceback == NULL); |
766 | 32 | tracemalloc_empty_traceback = raw_malloc(TRACEBACK_SIZE(1)); |
767 | 32 | if (tracemalloc_empty_traceback == NULL) { |
768 | 0 | return _PyStatus_NO_MEMORY(); |
769 | 0 | } |
770 | | |
771 | 32 | tracemalloc_empty_traceback->nframe = 1; |
772 | 32 | tracemalloc_empty_traceback->total_nframe = 1; |
773 | | /* borrowed reference */ |
774 | 32 | tracemalloc_empty_traceback->frames[0].filename = &_Py_STR(anon_unknown); |
775 | 32 | tracemalloc_empty_traceback->frames[0].lineno = 0; |
776 | 32 | tracemalloc_empty_traceback->hash = traceback_hash(tracemalloc_empty_traceback); |
777 | | |
778 | 32 | tracemalloc_config.initialized = TRACEMALLOC_INITIALIZED; |
779 | 32 | return _PyStatus_OK(); |
780 | 32 | } |
781 | | |
782 | | |
783 | | static void |
784 | | tracemalloc_deinit(void) |
785 | 0 | { |
786 | 0 | if (tracemalloc_config.initialized != TRACEMALLOC_INITIALIZED) |
787 | 0 | return; |
788 | 0 | tracemalloc_config.initialized = TRACEMALLOC_FINALIZED; |
789 | |
|
790 | 0 | _PyTraceMalloc_Stop(); |
791 | | |
792 | | /* destroy hash tables */ |
793 | 0 | _Py_hashtable_destroy(tracemalloc_domains); |
794 | 0 | _Py_hashtable_destroy(tracemalloc_traces); |
795 | 0 | _Py_hashtable_destroy(tracemalloc_tracebacks); |
796 | 0 | _Py_hashtable_destroy(tracemalloc_filenames); |
797 | |
|
798 | 0 | PyThread_tss_delete(&tracemalloc_reentrant_key); |
799 | |
|
800 | 0 | raw_free(tracemalloc_empty_traceback); |
801 | 0 | tracemalloc_empty_traceback = NULL; |
802 | 0 | } |
803 | | |
804 | | |
805 | | int |
806 | | _PyTraceMalloc_Start(int max_nframe) |
807 | 0 | { |
808 | 0 | if (max_nframe < 1 || max_nframe > MAX_NFRAME) { |
809 | 0 | PyErr_Format(PyExc_ValueError, |
810 | 0 | "the number of frames must be in range [1; %i]", |
811 | 0 | MAX_NFRAME); |
812 | 0 | return -1; |
813 | 0 | } |
814 | | |
815 | 0 | if (_PyTraceMalloc_IsTracing()) { |
816 | | /* hooks already installed: do nothing */ |
817 | 0 | return 0; |
818 | 0 | } |
819 | | |
820 | 0 | tracemalloc_config.max_nframe = max_nframe; |
821 | | |
822 | | /* allocate a buffer to store a new traceback */ |
823 | 0 | size_t size = TRACEBACK_SIZE(max_nframe); |
824 | 0 | assert(tracemalloc_traceback == NULL); |
825 | 0 | tracemalloc_traceback = raw_malloc(size); |
826 | 0 | if (tracemalloc_traceback == NULL) { |
827 | 0 | PyErr_NoMemory(); |
828 | 0 | return -1; |
829 | 0 | } |
830 | | |
831 | 0 | PyMemAllocatorEx alloc; |
832 | 0 | alloc.malloc = tracemalloc_raw_malloc; |
833 | 0 | alloc.calloc = tracemalloc_raw_calloc; |
834 | 0 | alloc.realloc = tracemalloc_raw_realloc; |
835 | 0 | alloc.free = tracemalloc_free; |
836 | |
|
837 | 0 | alloc.ctx = &allocators.raw; |
838 | 0 | PyMem_GetAllocator(PYMEM_DOMAIN_RAW, &allocators.raw); |
839 | 0 | PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &alloc); |
840 | |
|
841 | 0 | alloc.malloc = tracemalloc_malloc_gil; |
842 | 0 | alloc.calloc = tracemalloc_calloc_gil; |
843 | 0 | alloc.realloc = tracemalloc_realloc_gil; |
844 | 0 | alloc.free = tracemalloc_free; |
845 | |
|
846 | 0 | alloc.ctx = &allocators.mem; |
847 | 0 | PyMem_GetAllocator(PYMEM_DOMAIN_MEM, &allocators.mem); |
848 | 0 | PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &alloc); |
849 | |
|
850 | 0 | alloc.ctx = &allocators.obj; |
851 | 0 | PyMem_GetAllocator(PYMEM_DOMAIN_OBJ, &allocators.obj); |
852 | 0 | PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &alloc); |
853 | |
|
854 | 0 | if (PyRefTracer_SetTracer(_PyTraceMalloc_TraceRef, NULL) < 0) { |
855 | 0 | return -1; |
856 | 0 | } |
857 | | |
858 | | /* everything is ready: start tracing Python memory allocations */ |
859 | 0 | TABLES_LOCK(); |
860 | 0 | _Py_atomic_store_int_relaxed(&tracemalloc_config.tracing, 1); |
861 | 0 | TABLES_UNLOCK(); |
862 | |
|
863 | 0 | return 0; |
864 | 0 | } |
865 | | |
866 | | |
867 | | void |
868 | | _PyTraceMalloc_Stop(void) |
869 | 0 | { |
870 | 0 | TABLES_LOCK(); |
871 | |
|
872 | 0 | if (!tracemalloc_config.tracing) { |
873 | 0 | TABLES_UNLOCK(); |
874 | 0 | return; |
875 | 0 | } |
876 | | |
877 | | /* stop tracing Python memory allocations */ |
878 | 0 | _Py_atomic_store_int_relaxed(&tracemalloc_config.tracing, 0); |
879 | | |
880 | | /* unregister the hook on memory allocators */ |
881 | 0 | PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &allocators.raw); |
882 | 0 | PyMem_SetAllocator(PYMEM_DOMAIN_MEM, &allocators.mem); |
883 | 0 | PyMem_SetAllocator(PYMEM_DOMAIN_OBJ, &allocators.obj); |
884 | |
|
885 | 0 | tracemalloc_clear_traces_unlocked(); |
886 | | |
887 | | /* release memory */ |
888 | 0 | raw_free(tracemalloc_traceback); |
889 | 0 | tracemalloc_traceback = NULL; |
890 | |
|
891 | 0 | TABLES_UNLOCK(); |
892 | | |
893 | | // Call it after TABLES_UNLOCK() since it calls _PyEval_StopTheWorldAll() |
894 | | // which would lead to a deadlock with TABLES_LOCK() which doesn't detach |
895 | | // the thread state. |
896 | 0 | (void)PyRefTracer_SetTracer(NULL, NULL); |
897 | 0 | } |
898 | | |
899 | | |
900 | | |
901 | | static PyObject* |
902 | | frame_to_pyobject(frame_t *frame) |
903 | 0 | { |
904 | 0 | assert(get_reentrant()); |
905 | |
|
906 | 0 | PyObject *frame_obj = PyTuple_New(2); |
907 | 0 | if (frame_obj == NULL) { |
908 | 0 | return NULL; |
909 | 0 | } |
910 | | |
911 | 0 | PyTuple_SET_ITEM(frame_obj, 0, Py_NewRef(frame->filename)); |
912 | |
|
913 | 0 | PyObject *lineno_obj = PyLong_FromUnsignedLong(frame->lineno); |
914 | 0 | if (lineno_obj == NULL) { |
915 | 0 | Py_DECREF(frame_obj); |
916 | 0 | return NULL; |
917 | 0 | } |
918 | 0 | PyTuple_SET_ITEM(frame_obj, 1, lineno_obj); |
919 | |
|
920 | 0 | return frame_obj; |
921 | 0 | } |
922 | | |
923 | | |
924 | | static PyObject* |
925 | | traceback_to_pyobject(traceback_t *traceback, _Py_hashtable_t *intern_table) |
926 | 0 | { |
927 | 0 | PyObject *frames; |
928 | 0 | if (intern_table != NULL) { |
929 | 0 | frames = _Py_hashtable_get(intern_table, (const void *)traceback); |
930 | 0 | if (frames) { |
931 | 0 | return Py_NewRef(frames); |
932 | 0 | } |
933 | 0 | } |
934 | | |
935 | 0 | frames = PyTuple_New(traceback->nframe); |
936 | 0 | if (frames == NULL) { |
937 | 0 | return NULL; |
938 | 0 | } |
939 | | |
940 | 0 | for (int i=0; i < traceback->nframe; i++) { |
941 | 0 | PyObject *frame = frame_to_pyobject(&traceback->frames[i]); |
942 | 0 | if (frame == NULL) { |
943 | 0 | Py_DECREF(frames); |
944 | 0 | return NULL; |
945 | 0 | } |
946 | 0 | PyTuple_SET_ITEM(frames, i, frame); |
947 | 0 | } |
948 | | |
949 | 0 | if (intern_table != NULL) { |
950 | 0 | if (_Py_hashtable_set(intern_table, traceback, frames) < 0) { |
951 | 0 | Py_DECREF(frames); |
952 | 0 | PyErr_NoMemory(); |
953 | 0 | return NULL; |
954 | 0 | } |
955 | | /* intern_table keeps a new reference to frames */ |
956 | 0 | Py_INCREF(frames); |
957 | 0 | } |
958 | 0 | return frames; |
959 | 0 | } |
960 | | |
961 | | |
962 | | static PyObject* |
963 | | trace_to_pyobject(unsigned int domain, const trace_t *trace, |
964 | | _Py_hashtable_t *intern_tracebacks) |
965 | 0 | { |
966 | 0 | assert(get_reentrant()); |
967 | |
|
968 | 0 | PyObject *trace_obj = PyTuple_New(4); |
969 | 0 | if (trace_obj == NULL) { |
970 | 0 | return NULL; |
971 | 0 | } |
972 | | |
973 | 0 | PyObject *obj = PyLong_FromSize_t(domain); |
974 | 0 | if (obj == NULL) { |
975 | 0 | Py_DECREF(trace_obj); |
976 | 0 | return NULL; |
977 | 0 | } |
978 | 0 | PyTuple_SET_ITEM(trace_obj, 0, obj); |
979 | |
|
980 | 0 | obj = PyLong_FromSize_t(trace->size); |
981 | 0 | if (obj == NULL) { |
982 | 0 | Py_DECREF(trace_obj); |
983 | 0 | return NULL; |
984 | 0 | } |
985 | 0 | PyTuple_SET_ITEM(trace_obj, 1, obj); |
986 | |
|
987 | 0 | obj = traceback_to_pyobject(trace->traceback, intern_tracebacks); |
988 | 0 | if (obj == NULL) { |
989 | 0 | Py_DECREF(trace_obj); |
990 | 0 | return NULL; |
991 | 0 | } |
992 | 0 | PyTuple_SET_ITEM(trace_obj, 2, obj); |
993 | |
|
994 | 0 | obj = PyLong_FromUnsignedLong(trace->traceback->total_nframe); |
995 | 0 | if (obj == NULL) { |
996 | 0 | Py_DECREF(trace_obj); |
997 | 0 | return NULL; |
998 | 0 | } |
999 | 0 | PyTuple_SET_ITEM(trace_obj, 3, obj); |
1000 | |
|
1001 | 0 | return trace_obj; |
1002 | 0 | } |
1003 | | |
1004 | | |
1005 | | typedef struct { |
1006 | | _Py_hashtable_t *traces; |
1007 | | _Py_hashtable_t *domains; |
1008 | | _Py_hashtable_t *tracebacks; |
1009 | | PyObject *list; |
1010 | | unsigned int domain; |
1011 | | } get_traces_t; |
1012 | | |
1013 | | |
1014 | | static int |
1015 | | tracemalloc_copy_trace(_Py_hashtable_t *traces, |
1016 | | const void *key, const void *value, |
1017 | | void *user_data) |
1018 | 0 | { |
1019 | 0 | _Py_hashtable_t *traces2 = (_Py_hashtable_t *)user_data; |
1020 | 0 | trace_t *trace = (trace_t *)value; |
1021 | |
|
1022 | 0 | trace_t *trace2 = raw_malloc(sizeof(trace_t)); |
1023 | 0 | if (trace2 == NULL) { |
1024 | 0 | return -1; |
1025 | 0 | } |
1026 | 0 | *trace2 = *trace; |
1027 | 0 | if (_Py_hashtable_set(traces2, key, trace2) < 0) { |
1028 | 0 | raw_free(trace2); |
1029 | 0 | return -1; |
1030 | 0 | } |
1031 | 0 | return 0; |
1032 | 0 | } |
1033 | | |
1034 | | |
1035 | | static _Py_hashtable_t* |
1036 | | tracemalloc_copy_traces(_Py_hashtable_t *traces) |
1037 | 0 | { |
1038 | 0 | _Py_hashtable_t *traces2 = tracemalloc_create_traces_table(); |
1039 | 0 | if (traces2 == NULL) { |
1040 | 0 | return NULL; |
1041 | 0 | } |
1042 | | |
1043 | 0 | int err = _Py_hashtable_foreach(traces, |
1044 | 0 | tracemalloc_copy_trace, |
1045 | 0 | traces2); |
1046 | 0 | if (err) { |
1047 | 0 | _Py_hashtable_destroy(traces2); |
1048 | 0 | return NULL; |
1049 | 0 | } |
1050 | 0 | return traces2; |
1051 | 0 | } |
1052 | | |
1053 | | |
1054 | | static int |
1055 | | tracemalloc_copy_domain(_Py_hashtable_t *domains, |
1056 | | const void *key, const void *value, |
1057 | | void *user_data) |
1058 | 0 | { |
1059 | 0 | _Py_hashtable_t *domains2 = (_Py_hashtable_t *)user_data; |
1060 | 0 | unsigned int domain = (unsigned int)FROM_PTR(key); |
1061 | 0 | _Py_hashtable_t *traces = (_Py_hashtable_t *)value; |
1062 | |
|
1063 | 0 | _Py_hashtable_t *traces2 = tracemalloc_copy_traces(traces); |
1064 | 0 | if (traces2 == NULL) { |
1065 | 0 | return -1; |
1066 | 0 | } |
1067 | 0 | if (_Py_hashtable_set(domains2, TO_PTR(domain), traces2) < 0) { |
1068 | 0 | _Py_hashtable_destroy(traces2); |
1069 | 0 | return -1; |
1070 | 0 | } |
1071 | 0 | return 0; |
1072 | 0 | } |
1073 | | |
1074 | | |
1075 | | static _Py_hashtable_t* |
1076 | | tracemalloc_copy_domains(_Py_hashtable_t *domains) |
1077 | 0 | { |
1078 | 0 | _Py_hashtable_t *domains2 = tracemalloc_create_domains_table(); |
1079 | 0 | if (domains2 == NULL) { |
1080 | 0 | return NULL; |
1081 | 0 | } |
1082 | | |
1083 | 0 | int err = _Py_hashtable_foreach(domains, |
1084 | 0 | tracemalloc_copy_domain, |
1085 | 0 | domains2); |
1086 | 0 | if (err) { |
1087 | 0 | _Py_hashtable_destroy(domains2); |
1088 | 0 | return NULL; |
1089 | 0 | } |
1090 | 0 | return domains2; |
1091 | 0 | } |
1092 | | |
1093 | | |
1094 | | static int |
1095 | | tracemalloc_get_traces_fill(_Py_hashtable_t *traces, |
1096 | | const void *key, const void *value, |
1097 | | void *user_data) |
1098 | 0 | { |
1099 | 0 | get_traces_t *get_traces = user_data; |
1100 | 0 | const trace_t *trace = (const trace_t *)value; |
1101 | |
|
1102 | 0 | PyObject *tuple = trace_to_pyobject(get_traces->domain, trace, |
1103 | 0 | get_traces->tracebacks); |
1104 | 0 | if (tuple == NULL) { |
1105 | 0 | return 1; |
1106 | 0 | } |
1107 | | |
1108 | 0 | int res = PyList_Append(get_traces->list, tuple); |
1109 | 0 | Py_DECREF(tuple); |
1110 | 0 | if (res < 0) { |
1111 | 0 | return 1; |
1112 | 0 | } |
1113 | 0 | return 0; |
1114 | 0 | } |
1115 | | |
1116 | | |
1117 | | static int |
1118 | | tracemalloc_get_traces_domain(_Py_hashtable_t *domains, |
1119 | | const void *key, const void *value, |
1120 | | void *user_data) |
1121 | 0 | { |
1122 | 0 | get_traces_t *get_traces = user_data; |
1123 | 0 | unsigned int domain = (unsigned int)FROM_PTR(key); |
1124 | 0 | _Py_hashtable_t *traces = (_Py_hashtable_t *)value; |
1125 | |
|
1126 | 0 | get_traces->domain = domain; |
1127 | 0 | return _Py_hashtable_foreach(traces, |
1128 | 0 | tracemalloc_get_traces_fill, |
1129 | 0 | get_traces); |
1130 | 0 | } |
1131 | | |
1132 | | |
1133 | | static void |
1134 | | tracemalloc_pyobject_decref(void *value) |
1135 | 0 | { |
1136 | 0 | PyObject *obj = (PyObject *)value; |
1137 | 0 | Py_DECREF(obj); |
1138 | 0 | } |
1139 | | |
1140 | | |
1141 | | static traceback_t* |
1142 | | tracemalloc_get_traceback_unlocked(unsigned int domain, uintptr_t ptr) |
1143 | 0 | { |
1144 | 0 | if (!tracemalloc_config.tracing) { |
1145 | 0 | return NULL; |
1146 | 0 | } |
1147 | | |
1148 | 0 | _Py_hashtable_t *traces = tracemalloc_get_traces_table(domain); |
1149 | 0 | if (!traces) { |
1150 | 0 | return NULL; |
1151 | 0 | } |
1152 | | |
1153 | 0 | trace_t *trace = _Py_hashtable_get(traces, TO_PTR(ptr)); |
1154 | 0 | if (!trace) { |
1155 | 0 | return NULL; |
1156 | 0 | } |
1157 | 0 | return trace->traceback; |
1158 | 0 | } |
1159 | | |
1160 | | |
1161 | 0 | #define PUTS(fd, str) (void)_Py_write_noraise(fd, str, (int)strlen(str)) |
1162 | | |
1163 | | static void |
1164 | | _PyMem_DumpFrame(int fd, frame_t * frame) |
1165 | 0 | { |
1166 | 0 | PUTS(fd, " File \""); |
1167 | 0 | _Py_DumpASCII(fd, frame->filename); |
1168 | 0 | PUTS(fd, "\", line "); |
1169 | 0 | _Py_DumpDecimal(fd, frame->lineno); |
1170 | 0 | PUTS(fd, "\n"); |
1171 | 0 | } |
1172 | | |
1173 | | /* Dump the traceback where a memory block was allocated into file descriptor |
1174 | | fd. The function may block on TABLES_LOCK() but it is unlikely. */ |
1175 | | void |
1176 | | _PyMem_DumpTraceback(int fd, const void *ptr) |
1177 | 0 | { |
1178 | 0 | TABLES_LOCK(); |
1179 | 0 | if (!tracemalloc_config.tracing) { |
1180 | 0 | PUTS(fd, "Enable tracemalloc to get the memory block " |
1181 | 0 | "allocation traceback\n\n"); |
1182 | 0 | goto done; |
1183 | 0 | } |
1184 | | |
1185 | 0 | traceback_t *traceback; |
1186 | 0 | traceback = tracemalloc_get_traceback_unlocked(DEFAULT_DOMAIN, |
1187 | 0 | (uintptr_t)ptr); |
1188 | 0 | if (traceback == NULL) { |
1189 | 0 | goto done; |
1190 | 0 | } |
1191 | | |
1192 | 0 | PUTS(fd, "Memory block allocated at (most recent call first):\n"); |
1193 | 0 | for (int i=0; i < traceback->nframe; i++) { |
1194 | 0 | _PyMem_DumpFrame(fd, &traceback->frames[i]); |
1195 | 0 | } |
1196 | 0 | PUTS(fd, "\n"); |
1197 | |
|
1198 | 0 | done: |
1199 | 0 | TABLES_UNLOCK(); |
1200 | 0 | } |
1201 | | |
1202 | | #undef PUTS |
1203 | | |
1204 | | |
1205 | | static int |
1206 | | tracemalloc_get_tracemalloc_memory_cb(_Py_hashtable_t *domains, |
1207 | | const void *key, const void *value, |
1208 | | void *user_data) |
1209 | 0 | { |
1210 | 0 | const _Py_hashtable_t *traces = value; |
1211 | 0 | size_t *size = (size_t*)user_data; |
1212 | 0 | *size += _Py_hashtable_size(traces); |
1213 | 0 | return 0; |
1214 | 0 | } |
1215 | | |
1216 | | int |
1217 | | PyTraceMalloc_Track(unsigned int domain, uintptr_t ptr, |
1218 | | size_t size) |
1219 | 0 | { |
1220 | 0 | if (_Py_atomic_load_int_relaxed(&tracemalloc_config.tracing) == 0) { |
1221 | | /* tracemalloc is not tracing: do nothing */ |
1222 | 0 | return -2; |
1223 | 0 | } |
1224 | 0 | PyGILState_STATE gil_state = PyGILState_Ensure(); |
1225 | 0 | TABLES_LOCK(); |
1226 | |
|
1227 | 0 | int result; |
1228 | 0 | if (tracemalloc_config.tracing) { |
1229 | 0 | result = tracemalloc_add_trace_unlocked(domain, ptr, size); |
1230 | 0 | } |
1231 | 0 | else { |
1232 | | /* tracemalloc is not tracing: do nothing */ |
1233 | 0 | result = -2; |
1234 | 0 | } |
1235 | |
|
1236 | 0 | TABLES_UNLOCK(); |
1237 | 0 | PyGILState_Release(gil_state); |
1238 | 0 | return result; |
1239 | 0 | } |
1240 | | |
1241 | | |
1242 | | int |
1243 | | PyTraceMalloc_Untrack(unsigned int domain, uintptr_t ptr) |
1244 | 0 | { |
1245 | 0 | if (_Py_atomic_load_int_relaxed(&tracemalloc_config.tracing) == 0) { |
1246 | | /* tracemalloc is not tracing: do nothing */ |
1247 | 0 | return -2; |
1248 | 0 | } |
1249 | | |
1250 | 0 | TABLES_LOCK(); |
1251 | |
|
1252 | 0 | int result; |
1253 | 0 | if (tracemalloc_config.tracing) { |
1254 | 0 | tracemalloc_remove_trace_unlocked(domain, ptr); |
1255 | 0 | result = 0; |
1256 | 0 | } |
1257 | 0 | else { |
1258 | | /* tracemalloc is not tracing: do nothing */ |
1259 | 0 | result = -2; |
1260 | 0 | } |
1261 | |
|
1262 | 0 | TABLES_UNLOCK(); |
1263 | 0 | return result; |
1264 | 0 | } |
1265 | | |
1266 | | |
1267 | | void |
1268 | | _PyTraceMalloc_Fini(void) |
1269 | 0 | { |
1270 | 0 | _Py_AssertHoldsTstate(); |
1271 | 0 | tracemalloc_deinit(); |
1272 | 0 | } |
1273 | | |
1274 | | |
1275 | | /* If the object memory block is already traced, update its trace |
1276 | | with the current Python traceback. |
1277 | | |
1278 | | Do nothing if tracemalloc is not tracing memory allocations |
1279 | | or if the object memory block is not already traced. */ |
1280 | | static int |
1281 | | _PyTraceMalloc_TraceRef(PyObject *op, PyRefTracerEvent event, |
1282 | | void* Py_UNUSED(ignore)) |
1283 | 0 | { |
1284 | 0 | if (event != PyRefTracer_CREATE) { |
1285 | 0 | return 0; |
1286 | 0 | } |
1287 | 0 | if (get_reentrant()) { |
1288 | 0 | return 0; |
1289 | 0 | } |
1290 | | |
1291 | 0 | _Py_AssertHoldsTstate(); |
1292 | 0 | TABLES_LOCK(); |
1293 | |
|
1294 | 0 | if (!tracemalloc_config.tracing) { |
1295 | 0 | goto done; |
1296 | 0 | } |
1297 | | |
1298 | 0 | PyTypeObject *type = Py_TYPE(op); |
1299 | 0 | const size_t presize = _PyType_PreHeaderSize(type); |
1300 | 0 | uintptr_t ptr = (uintptr_t)((char *)op - presize); |
1301 | |
|
1302 | 0 | trace_t *trace = _Py_hashtable_get(tracemalloc_traces, TO_PTR(ptr)); |
1303 | 0 | if (trace != NULL) { |
1304 | | /* update the traceback of the memory block */ |
1305 | 0 | traceback_t *traceback = traceback_new(); |
1306 | 0 | if (traceback != NULL) { |
1307 | 0 | trace->traceback = traceback; |
1308 | 0 | } |
1309 | 0 | } |
1310 | | /* else: cannot track the object, its memory block size is unknown */ |
1311 | |
|
1312 | 0 | done: |
1313 | 0 | TABLES_UNLOCK(); |
1314 | 0 | return 0; |
1315 | 0 | } |
1316 | | |
1317 | | |
1318 | | PyObject* |
1319 | | _PyTraceMalloc_GetTraceback(unsigned int domain, uintptr_t ptr) |
1320 | 0 | { |
1321 | 0 | TABLES_LOCK(); |
1322 | |
|
1323 | 0 | traceback_t *traceback = tracemalloc_get_traceback_unlocked(domain, ptr); |
1324 | 0 | PyObject *result; |
1325 | 0 | if (traceback) { |
1326 | 0 | set_reentrant(1); |
1327 | 0 | result = traceback_to_pyobject(traceback, NULL); |
1328 | 0 | set_reentrant(0); |
1329 | 0 | } |
1330 | 0 | else { |
1331 | 0 | result = Py_NewRef(Py_None); |
1332 | 0 | } |
1333 | |
|
1334 | 0 | TABLES_UNLOCK(); |
1335 | 0 | return result; |
1336 | 0 | } |
1337 | | |
1338 | | int |
1339 | | _PyTraceMalloc_IsTracing(void) |
1340 | 0 | { |
1341 | 0 | TABLES_LOCK(); |
1342 | 0 | int tracing = tracemalloc_config.tracing; |
1343 | 0 | TABLES_UNLOCK(); |
1344 | 0 | return tracing; |
1345 | 0 | } |
1346 | | |
1347 | | void |
1348 | | _PyTraceMalloc_ClearTraces(void) |
1349 | 0 | { |
1350 | 0 | TABLES_LOCK(); |
1351 | 0 | if (tracemalloc_config.tracing) { |
1352 | 0 | tracemalloc_clear_traces_unlocked(); |
1353 | 0 | } |
1354 | 0 | TABLES_UNLOCK(); |
1355 | 0 | } |
1356 | | |
1357 | | PyObject * |
1358 | | _PyTraceMalloc_GetTraces(void) |
1359 | 0 | { |
1360 | 0 | TABLES_LOCK(); |
1361 | 0 | set_reentrant(1); |
1362 | |
|
1363 | 0 | get_traces_t get_traces; |
1364 | 0 | get_traces.domain = DEFAULT_DOMAIN; |
1365 | 0 | get_traces.traces = NULL; |
1366 | 0 | get_traces.domains = NULL; |
1367 | 0 | get_traces.tracebacks = NULL; |
1368 | 0 | get_traces.list = PyList_New(0); |
1369 | 0 | if (get_traces.list == NULL) { |
1370 | 0 | goto finally; |
1371 | 0 | } |
1372 | | |
1373 | 0 | if (!tracemalloc_config.tracing) { |
1374 | 0 | goto finally; |
1375 | 0 | } |
1376 | | |
1377 | | /* the traceback hash table is used temporarily to intern traceback tuple |
1378 | | of (filename, lineno) tuples */ |
1379 | 0 | get_traces.tracebacks = hashtable_new(_Py_hashtable_hash_ptr, |
1380 | 0 | _Py_hashtable_compare_direct, |
1381 | 0 | NULL, tracemalloc_pyobject_decref); |
1382 | 0 | if (get_traces.tracebacks == NULL) { |
1383 | 0 | goto no_memory; |
1384 | 0 | } |
1385 | | |
1386 | | // Copy all traces so tracemalloc_get_traces_fill() doesn't have to disable |
1387 | | // temporarily tracemalloc which would impact other threads and so would |
1388 | | // miss allocations while get_traces() is called. |
1389 | 0 | get_traces.traces = tracemalloc_copy_traces(tracemalloc_traces); |
1390 | 0 | if (get_traces.traces == NULL) { |
1391 | 0 | goto no_memory; |
1392 | 0 | } |
1393 | | |
1394 | 0 | get_traces.domains = tracemalloc_copy_domains(tracemalloc_domains); |
1395 | 0 | if (get_traces.domains == NULL) { |
1396 | 0 | goto no_memory; |
1397 | 0 | } |
1398 | | |
1399 | | // Convert traces to a list of tuples |
1400 | 0 | int err = _Py_hashtable_foreach(get_traces.traces, |
1401 | 0 | tracemalloc_get_traces_fill, |
1402 | 0 | &get_traces); |
1403 | 0 | if (!err) { |
1404 | 0 | err = _Py_hashtable_foreach(get_traces.domains, |
1405 | 0 | tracemalloc_get_traces_domain, |
1406 | 0 | &get_traces); |
1407 | 0 | } |
1408 | |
|
1409 | 0 | if (err) { |
1410 | 0 | Py_CLEAR(get_traces.list); |
1411 | 0 | goto finally; |
1412 | 0 | } |
1413 | 0 | goto finally; |
1414 | | |
1415 | 0 | no_memory: |
1416 | 0 | PyErr_NoMemory(); |
1417 | 0 | Py_CLEAR(get_traces.list); |
1418 | 0 | goto finally; |
1419 | | |
1420 | 0 | finally: |
1421 | 0 | set_reentrant(0); |
1422 | 0 | TABLES_UNLOCK(); |
1423 | |
|
1424 | 0 | if (get_traces.tracebacks != NULL) { |
1425 | 0 | _Py_hashtable_destroy(get_traces.tracebacks); |
1426 | 0 | } |
1427 | 0 | if (get_traces.traces != NULL) { |
1428 | 0 | _Py_hashtable_destroy(get_traces.traces); |
1429 | 0 | } |
1430 | 0 | if (get_traces.domains != NULL) { |
1431 | 0 | _Py_hashtable_destroy(get_traces.domains); |
1432 | 0 | } |
1433 | |
|
1434 | 0 | return get_traces.list; |
1435 | 0 | } |
1436 | | |
1437 | | PyObject * |
1438 | | _PyTraceMalloc_GetObjectTraceback(PyObject *obj) |
1439 | | /*[clinic end generated code: output=41ee0553a658b0aa input=29495f1b21c53212]*/ |
1440 | 0 | { |
1441 | 0 | PyTypeObject *type = Py_TYPE(obj); |
1442 | 0 | const size_t presize = _PyType_PreHeaderSize(type); |
1443 | 0 | uintptr_t ptr = (uintptr_t)((char *)obj - presize); |
1444 | 0 | return _PyTraceMalloc_GetTraceback(DEFAULT_DOMAIN, ptr); |
1445 | 0 | } |
1446 | | |
1447 | | int _PyTraceMalloc_GetTracebackLimit(void) |
1448 | 0 | { |
1449 | 0 | return tracemalloc_config.max_nframe; |
1450 | 0 | } |
1451 | | |
1452 | | size_t |
1453 | | _PyTraceMalloc_GetMemory(void) |
1454 | 0 | { |
1455 | 0 | TABLES_LOCK(); |
1456 | 0 | size_t size; |
1457 | 0 | if (tracemalloc_config.tracing) { |
1458 | 0 | size = _Py_hashtable_size(tracemalloc_tracebacks); |
1459 | 0 | size += _Py_hashtable_size(tracemalloc_filenames); |
1460 | |
|
1461 | 0 | size += _Py_hashtable_size(tracemalloc_traces); |
1462 | 0 | _Py_hashtable_foreach(tracemalloc_domains, |
1463 | 0 | tracemalloc_get_tracemalloc_memory_cb, &size); |
1464 | 0 | } |
1465 | 0 | else { |
1466 | 0 | size = 0; |
1467 | 0 | } |
1468 | 0 | TABLES_UNLOCK(); |
1469 | 0 | return size; |
1470 | 0 | } |
1471 | | |
1472 | | |
1473 | | PyObject * |
1474 | | _PyTraceMalloc_GetTracedMemory(void) |
1475 | 0 | { |
1476 | 0 | TABLES_LOCK(); |
1477 | 0 | Py_ssize_t traced, peak; |
1478 | 0 | if (tracemalloc_config.tracing) { |
1479 | 0 | traced = tracemalloc_traced_memory; |
1480 | 0 | peak = tracemalloc_peak_traced_memory; |
1481 | 0 | } |
1482 | 0 | else { |
1483 | 0 | traced = 0; |
1484 | 0 | peak = 0; |
1485 | 0 | } |
1486 | 0 | TABLES_UNLOCK(); |
1487 | |
|
1488 | 0 | return Py_BuildValue("nn", traced, peak); |
1489 | 0 | } |
1490 | | |
1491 | | void |
1492 | | _PyTraceMalloc_ResetPeak(void) |
1493 | 0 | { |
1494 | 0 | TABLES_LOCK(); |
1495 | 0 | if (tracemalloc_config.tracing) { |
1496 | 0 | tracemalloc_peak_traced_memory = tracemalloc_traced_memory; |
1497 | 0 | } |
1498 | 0 | TABLES_UNLOCK(); |
1499 | 0 | } |