/src/mupdf/source/fitz/store.c
Line | Count | Source (jump to first uncovered line) |
1 | | // Copyright (C) 2004-2021 Artifex Software, Inc. |
2 | | // |
3 | | // This file is part of MuPDF. |
4 | | // |
5 | | // MuPDF is free software: you can redistribute it and/or modify it under the |
6 | | // terms of the GNU Affero General Public License as published by the Free |
7 | | // Software Foundation, either version 3 of the License, or (at your option) |
8 | | // any later version. |
9 | | // |
10 | | // MuPDF is distributed in the hope that it will be useful, but WITHOUT ANY |
11 | | // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
12 | | // FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more |
13 | | // details. |
14 | | // |
15 | | // You should have received a copy of the GNU Affero General Public License |
16 | | // along with MuPDF. If not, see <https://www.gnu.org/licenses/agpl-3.0.en.html> |
17 | | // |
18 | | // Alternative licensing terms are available from the licensor. |
19 | | // For commercial licensing, see <https://www.artifex.com/> or contact |
20 | | // Artifex Software, Inc., 39 Mesa Street, Suite 108A, San Francisco, |
21 | | // CA 94129, USA, for further information. |
22 | | |
23 | | #include "mupdf/fitz.h" |
24 | | |
25 | | #include <assert.h> |
26 | | #include <limits.h> |
27 | | #include <stdio.h> |
28 | | #include <string.h> |
29 | | |
30 | | typedef struct fz_item |
31 | | { |
32 | | void *key; |
33 | | fz_storable *val; |
34 | | size_t size; |
35 | | struct fz_item *next; |
36 | | struct fz_item *prev; |
37 | | fz_store *store; |
38 | | const fz_store_type *type; |
39 | | } fz_item; |
40 | | |
41 | | /* Every entry in fz_store is protected by the alloc lock */ |
42 | | struct fz_store |
43 | | { |
44 | | int refs; |
45 | | |
46 | | /* Every item in the store is kept in a doubly linked list, ordered |
47 | | * by usage (so LRU entries are at the end). */ |
48 | | fz_item *head; |
49 | | fz_item *tail; |
50 | | |
51 | | /* We have a hash table that allows to quickly find a subset of the |
52 | | * entries (those whose keys are indirect objects). */ |
53 | | fz_hash_table *hash; |
54 | | |
55 | | /* We keep track of the size of the store, and keep it below max. */ |
56 | | size_t max; |
57 | | size_t size; |
58 | | |
59 | | int defer_reap_count; |
60 | | int needs_reaping; |
61 | | int scavenging; |
62 | | }; |
63 | | |
64 | | void |
65 | | fz_new_store_context(fz_context *ctx, size_t max) |
66 | 13.4k | { |
67 | 13.4k | fz_store *store; |
68 | 13.4k | store = fz_malloc_struct(ctx, fz_store); |
69 | 26.9k | fz_try(ctx) |
70 | 26.9k | { |
71 | 13.4k | store->hash = fz_new_hash_table(ctx, 4096, sizeof(fz_store_hash), FZ_LOCK_ALLOC, NULL); |
72 | 13.4k | } |
73 | 26.9k | fz_catch(ctx) |
74 | 0 | { |
75 | 0 | fz_free(ctx, store); |
76 | 0 | fz_rethrow(ctx); |
77 | 0 | } |
78 | 13.4k | store->refs = 1; |
79 | 13.4k | store->head = NULL; |
80 | 13.4k | store->tail = NULL; |
81 | 13.4k | store->size = 0; |
82 | 13.4k | store->max = max; |
83 | 13.4k | store->defer_reap_count = 0; |
84 | 13.4k | store->needs_reaping = 0; |
85 | 13.4k | ctx->store = store; |
86 | 13.4k | } |
87 | | |
88 | | void * |
89 | | fz_keep_storable(fz_context *ctx, const fz_storable *sc) |
90 | 7.82M | { |
91 | | /* Explicitly drop const to allow us to use const |
92 | | * sanely throughout the code. */ |
93 | 7.82M | fz_storable *s = (fz_storable *)sc; |
94 | | |
95 | 7.82M | return fz_keep_imp(ctx, s, &s->refs); |
96 | 7.82M | } |
97 | | |
98 | | void *fz_keep_key_storable(fz_context *ctx, const fz_key_storable *sc) |
99 | 2.09M | { |
100 | 2.09M | return fz_keep_storable(ctx, &sc->storable); |
101 | 2.09M | } |
102 | | |
103 | | /* |
104 | | Entered with FZ_LOCK_ALLOC held. |
105 | | Drops FZ_LOCK_ALLOC. |
106 | | */ |
107 | | static void |
108 | | do_reap(fz_context *ctx) |
109 | 4.10k | { |
110 | 4.10k | fz_store *store = ctx->store; |
111 | 4.10k | fz_item *item, *prev, *remove; |
112 | | |
113 | 4.10k | if (store == NULL) |
114 | 0 | { |
115 | 0 | fz_unlock(ctx, FZ_LOCK_ALLOC); |
116 | 0 | return; |
117 | 0 | } |
118 | | |
119 | 4.10k | fz_assert_lock_held(ctx, FZ_LOCK_ALLOC); |
120 | | |
121 | 4.10k | ctx->store->needs_reaping = 0; |
122 | | |
123 | 4.10k | FZ_LOG_DUMP_STORE(ctx, "Before reaping store:\n"); |
124 | | |
125 | | /* Reap the items */ |
126 | 4.10k | remove = NULL; |
127 | 110k | for (item = store->tail; item; item = prev) |
128 | 106k | { |
129 | 106k | prev = item->prev; |
130 | | |
131 | 106k | if (item->type->needs_reap == NULL || item->type->needs_reap(ctx, item->key) == 0) |
132 | 65.9k | continue; |
133 | | |
134 | | /* We have to drop it */ |
135 | 40.8k | store->size -= item->size; |
136 | | |
137 | | /* Unlink from the linked list */ |
138 | 40.8k | if (item->next) |
139 | 7.58k | item->next->prev = item->prev; |
140 | 33.2k | else |
141 | 33.2k | store->tail = item->prev; |
142 | 40.8k | if (item->prev) |
143 | 38.4k | item->prev->next = item->next; |
144 | 2.42k | else |
145 | 2.42k | store->head = item->next; |
146 | | |
147 | | /* Remove from the hash table */ |
148 | 40.8k | if (item->type->make_hash_key) |
149 | 40.8k | { |
150 | 40.8k | fz_store_hash hash = { NULL }; |
151 | 40.8k | hash.drop = item->val->drop; |
152 | 40.8k | if (item->type->make_hash_key(ctx, &hash, item->key)) |
153 | 40.8k | fz_hash_remove(ctx, store->hash, &hash); |
154 | 40.8k | } |
155 | | |
156 | | /* Store whether to drop this value or not in 'prev' */ |
157 | 40.8k | if (item->val->refs > 0) |
158 | 40.8k | (void)Memento_dropRef(item->val); |
159 | 40.8k | item->prev = (item->val->refs > 0 && --item->val->refs == 0) ? item : NULL; |
160 | | |
161 | | /* Store it in our removal chain - just singly linked */ |
162 | 40.8k | item->next = remove; |
163 | 40.8k | remove = item; |
164 | 40.8k | } |
165 | 4.10k | fz_unlock(ctx, FZ_LOCK_ALLOC); |
166 | | |
167 | | /* Now drop the remove chain */ |
168 | 44.9k | for (item = remove; item != NULL; item = remove) |
169 | 40.8k | { |
170 | 40.8k | remove = item->next; |
171 | | |
172 | | /* Drop a reference to the value (freeing if required) */ |
173 | 40.8k | if (item->prev) |
174 | 40.8k | item->val->drop(ctx, item->val); |
175 | | |
176 | | /* Always drops the key and drop the item */ |
177 | 40.8k | item->type->drop_key(ctx, item->key); |
178 | 40.8k | fz_free(ctx, item); |
179 | 40.8k | } |
180 | 4.10k | FZ_LOG_DUMP_STORE(ctx, "After reaping store:\n"); |
181 | 4.10k | } |
182 | | |
183 | | void fz_drop_key_storable(fz_context *ctx, const fz_key_storable *sc) |
184 | 3.92M | { |
185 | | /* Explicitly drop const to allow us to use const |
186 | | * sanely throughout the code. */ |
187 | 3.92M | fz_key_storable *s = (fz_key_storable *)sc; |
188 | 3.92M | int drop; |
189 | 3.92M | int unlock = 1; |
190 | | |
191 | 3.92M | if (s == NULL) |
192 | 1.77M | return; |
193 | | |
194 | 2.14M | fz_lock(ctx, FZ_LOCK_ALLOC); |
195 | 2.14M | assert(s->storable.refs != 0); |
196 | 2.14M | if (s->storable.refs > 0) |
197 | 2.14M | { |
198 | 2.14M | (void)Memento_dropRef(s); |
199 | 2.14M | drop = --s->storable.refs == 0; |
200 | 2.14M | if (!drop && s->storable.refs == s->store_key_refs) |
201 | 38.7k | { |
202 | 38.7k | if (ctx->store->defer_reap_count > 0) |
203 | 35.2k | { |
204 | 35.2k | ctx->store->needs_reaping = 1; |
205 | 35.2k | } |
206 | 3.40k | else |
207 | 3.40k | { |
208 | 3.40k | do_reap(ctx); |
209 | 3.40k | unlock = 0; |
210 | 3.40k | } |
211 | 38.7k | } |
212 | 2.14M | } |
213 | 0 | else |
214 | 0 | drop = 0; |
215 | 2.14M | if (unlock) |
216 | 2.14M | fz_unlock(ctx, FZ_LOCK_ALLOC); |
217 | | /* |
218 | | If we are dropping the last reference to an object, then |
219 | | it cannot possibly be in the store (as the store always |
220 | | keeps a ref to everything in it, and doesn't drop via |
221 | | this method. So we can simply drop the storable object |
222 | | itself without any operations on the fz_store. |
223 | | */ |
224 | 2.14M | if (drop) |
225 | 79.1k | s->storable.drop(ctx, &s->storable); |
226 | 2.14M | } |
227 | | |
228 | | void *fz_keep_key_storable_key(fz_context *ctx, const fz_key_storable *sc) |
229 | 42.8k | { |
230 | | /* Explicitly drop const to allow us to use const |
231 | | * sanely throughout the code. */ |
232 | 42.8k | fz_key_storable *s = (fz_key_storable *)sc; |
233 | | |
234 | 42.8k | if (s == NULL) |
235 | 0 | return NULL; |
236 | | |
237 | 42.8k | fz_lock(ctx, FZ_LOCK_ALLOC); |
238 | 42.8k | if (s->storable.refs > 0) |
239 | 42.8k | { |
240 | 42.8k | (void)Memento_takeRef(s); |
241 | 42.8k | ++s->storable.refs; |
242 | 42.8k | ++s->store_key_refs; |
243 | 42.8k | } |
244 | 42.8k | fz_unlock(ctx, FZ_LOCK_ALLOC); |
245 | 42.8k | return s; |
246 | 42.8k | } |
247 | | |
248 | | void fz_drop_key_storable_key(fz_context *ctx, const fz_key_storable *sc) |
249 | 42.8k | { |
250 | | /* Explicitly drop const to allow us to use const |
251 | | * sanely throughout the code. */ |
252 | 42.8k | fz_key_storable *s = (fz_key_storable *)sc; |
253 | 42.8k | int drop; |
254 | | |
255 | 42.8k | if (s == NULL) |
256 | 0 | return; |
257 | | |
258 | 42.8k | fz_lock(ctx, FZ_LOCK_ALLOC); |
259 | 42.8k | assert(s->store_key_refs > 0 && s->storable.refs >= s->store_key_refs); |
260 | 42.8k | (void)Memento_dropRef(s); |
261 | 42.8k | drop = --s->storable.refs == 0; |
262 | 42.8k | --s->store_key_refs; |
263 | 42.8k | fz_unlock(ctx, FZ_LOCK_ALLOC); |
264 | | /* |
265 | | If we are dropping the last reference to an object, then |
266 | | it cannot possibly be in the store (as the store always |
267 | | keeps a ref to everything in it, and doesn't drop via |
268 | | this method. So we can simply drop the storable object |
269 | | itself without any operations on the fz_store. |
270 | | */ |
271 | 42.8k | if (drop) |
272 | 42.3k | s->storable.drop(ctx, &s->storable); |
273 | 42.8k | } |
274 | | |
275 | | static void |
276 | | evict(fz_context *ctx, fz_item *item) |
277 | 11.2k | { |
278 | 11.2k | fz_store *store = ctx->store; |
279 | 11.2k | int drop; |
280 | | |
281 | 11.2k | store->size -= item->size; |
282 | | /* Unlink from the linked list */ |
283 | 11.2k | if (item->next) |
284 | 4.84k | item->next->prev = item->prev; |
285 | 6.40k | else |
286 | 6.40k | store->tail = item->prev; |
287 | 11.2k | if (item->prev) |
288 | 4 | item->prev->next = item->next; |
289 | 11.2k | else |
290 | 11.2k | store->head = item->next; |
291 | | |
292 | | /* Drop a reference to the value (freeing if required) */ |
293 | 11.2k | if (item->val->refs > 0) |
294 | 11.2k | (void)Memento_dropRef(item->val); |
295 | 11.2k | drop = (item->val->refs > 0 && --item->val->refs == 0); |
296 | | |
297 | | /* Remove from the hash table */ |
298 | 11.2k | if (item->type->make_hash_key) |
299 | 11.2k | { |
300 | 11.2k | fz_store_hash hash = { NULL }; |
301 | 11.2k | hash.drop = item->val->drop; |
302 | 11.2k | if (item->type->make_hash_key(ctx, &hash, item->key)) |
303 | 11.2k | fz_hash_remove(ctx, store->hash, &hash); |
304 | 11.2k | } |
305 | 11.2k | fz_unlock(ctx, FZ_LOCK_ALLOC); |
306 | 11.2k | if (drop) |
307 | 11.2k | item->val->drop(ctx, item->val); |
308 | | |
309 | | /* Always drops the key and drop the item */ |
310 | 11.2k | item->type->drop_key(ctx, item->key); |
311 | 11.2k | fz_free(ctx, item); |
312 | 11.2k | fz_lock(ctx, FZ_LOCK_ALLOC); |
313 | 11.2k | } |
314 | | |
315 | | static size_t |
316 | | ensure_space(fz_context *ctx, size_t tofree) |
317 | 1 | { |
318 | 1 | fz_item *item, *prev; |
319 | 1 | size_t count; |
320 | 1 | fz_store *store = ctx->store; |
321 | 1 | fz_item *to_be_freed = NULL; |
322 | | |
323 | 1 | fz_assert_lock_held(ctx, FZ_LOCK_ALLOC); |
324 | | |
325 | | /* First check that we *can* free tofree; if not, we'd rather not |
326 | | * cache this. */ |
327 | 1 | count = 0; |
328 | 1 | for (item = store->tail; item; item = item->prev) |
329 | 0 | { |
330 | 0 | if (item->val->refs == 1) |
331 | 0 | { |
332 | 0 | count += item->size; |
333 | 0 | if (count >= tofree) |
334 | 0 | break; |
335 | 0 | } |
336 | 0 | } |
337 | | |
338 | | /* If we ran out of items to search, then we can never free enough */ |
339 | 1 | if (item == NULL) |
340 | 1 | { |
341 | 1 | return 0; |
342 | 1 | } |
343 | | |
344 | | /* Now move all the items to be freed onto 'to_be_freed' */ |
345 | 0 | count = 0; |
346 | 0 | for (item = store->tail; item; item = prev) |
347 | 0 | { |
348 | 0 | prev = item->prev; |
349 | 0 | if (item->val->refs != 1) |
350 | 0 | continue; |
351 | | |
352 | 0 | store->size -= item->size; |
353 | | |
354 | | /* Unlink from the linked list */ |
355 | 0 | if (item->next) |
356 | 0 | item->next->prev = item->prev; |
357 | 0 | else |
358 | 0 | store->tail = item->prev; |
359 | 0 | if (item->prev) |
360 | 0 | item->prev->next = item->next; |
361 | 0 | else |
362 | 0 | store->head = item->next; |
363 | | |
364 | | /* Remove from the hash table */ |
365 | 0 | if (item->type->make_hash_key) |
366 | 0 | { |
367 | 0 | fz_store_hash hash = { NULL }; |
368 | 0 | hash.drop = item->val->drop; |
369 | 0 | if (item->type->make_hash_key(ctx, &hash, item->key)) |
370 | 0 | fz_hash_remove(ctx, store->hash, &hash); |
371 | 0 | } |
372 | | |
373 | | /* Link into to_be_freed */ |
374 | 0 | item->next = to_be_freed; |
375 | 0 | to_be_freed = item; |
376 | |
|
377 | 0 | count += item->size; |
378 | 0 | if (count >= tofree) |
379 | 0 | break; |
380 | 0 | } |
381 | | |
382 | | /* Now we can safely drop the lock and free our pending items. These |
383 | | * have all been removed from both the store list, and the hash table, |
384 | | * so they can't be 'found' by anyone else in the meantime. */ |
385 | |
|
386 | 0 | while (to_be_freed) |
387 | 0 | { |
388 | 0 | fz_item *item = to_be_freed; |
389 | 0 | int drop; |
390 | |
|
391 | 0 | to_be_freed = to_be_freed->next; |
392 | | |
393 | | /* Drop a reference to the value (freeing if required) */ |
394 | 0 | if (item->val->refs > 0) |
395 | 0 | (void)Memento_dropRef(item->val); |
396 | 0 | drop = (item->val->refs > 0 && --item->val->refs == 0); |
397 | |
|
398 | 0 | fz_unlock(ctx, FZ_LOCK_ALLOC); |
399 | 0 | if (drop) |
400 | 0 | item->val->drop(ctx, item->val); |
401 | | |
402 | | /* Always drops the key and drop the item */ |
403 | 0 | item->type->drop_key(ctx, item->key); |
404 | 0 | fz_free(ctx, item); |
405 | 0 | fz_lock(ctx, FZ_LOCK_ALLOC); |
406 | 0 | } |
407 | |
|
408 | 0 | return count; |
409 | 1 | } |
410 | | |
411 | | static void |
412 | | touch(fz_store *store, fz_item *item) |
413 | 679k | { |
414 | 679k | if (item->next != item) |
415 | 598k | { |
416 | | /* Already in the list - unlink it */ |
417 | 598k | if (item->next) |
418 | 550k | item->next->prev = item->prev; |
419 | 47.8k | else |
420 | 47.8k | store->tail = item->prev; |
421 | 598k | if (item->prev) |
422 | 368k | item->prev->next = item->next; |
423 | 229k | else |
424 | 229k | store->head = item->next; |
425 | 598k | } |
426 | | /* Now relink it at the start of the LRU chain */ |
427 | 679k | item->next = store->head; |
428 | 679k | if (item->next) |
429 | 639k | item->next->prev = item; |
430 | 39.5k | else |
431 | 39.5k | store->tail = item; |
432 | 679k | store->head = item; |
433 | 679k | item->prev = NULL; |
434 | 679k | } |
435 | | |
436 | | void * |
437 | | fz_store_item(fz_context *ctx, void *key, void *val_, size_t itemsize, const fz_store_type *type) |
438 | 81.2k | { |
439 | 81.2k | fz_item *item = NULL; |
440 | 81.2k | size_t size; |
441 | 81.2k | fz_storable *val = (fz_storable *)val_; |
442 | 81.2k | fz_store *store = ctx->store; |
443 | 81.2k | fz_store_hash hash = { NULL }; |
444 | 81.2k | int use_hash = 0; |
445 | | |
446 | 81.2k | if (!store) |
447 | 0 | return NULL; |
448 | | |
449 | | /* If we fail for any reason, we swallow the exception and continue. |
450 | | * All that the above program will see is that we failed to store |
451 | | * the item. */ |
452 | | |
453 | 81.2k | item = Memento_label(fz_malloc_no_throw(ctx, sizeof (fz_item)), "fz_item"); |
454 | 81.2k | if (!item) |
455 | 0 | return NULL; |
456 | 81.2k | memset(item, 0, sizeof (fz_item)); |
457 | | |
458 | 81.2k | if (type->make_hash_key) |
459 | 81.2k | { |
460 | 81.2k | hash.drop = val->drop; |
461 | 81.2k | use_hash = type->make_hash_key(ctx, &hash, key); |
462 | 81.2k | } |
463 | | |
464 | 81.2k | type->keep_key(ctx, key); |
465 | 81.2k | fz_lock(ctx, FZ_LOCK_ALLOC); |
466 | | |
467 | | /* Fill out the item. To start with, we always set item->next == item |
468 | | * and item->prev == item. This is so that we can spot items that have |
469 | | * been put into the hash table without having made it into the linked |
470 | | * list yet. */ |
471 | 81.2k | item->key = key; |
472 | 81.2k | item->val = val; |
473 | 81.2k | item->size = itemsize; |
474 | 81.2k | item->next = item; |
475 | 81.2k | item->prev = item; |
476 | 81.2k | item->type = type; |
477 | | |
478 | | /* If we can index it fast, put it into the hash table. This serves |
479 | | * to check whether we have one there already. */ |
480 | 81.2k | if (use_hash) |
481 | 80.4k | { |
482 | 80.4k | fz_item *existing = NULL; |
483 | | |
484 | 160k | fz_try(ctx) |
485 | 160k | { |
486 | | /* May drop and retake the lock */ |
487 | 80.4k | existing = fz_hash_insert(ctx, store->hash, &hash, item); |
488 | 80.4k | } |
489 | 160k | fz_catch(ctx) |
490 | 0 | { |
491 | | /* Any error here means that item never made it into the |
492 | | * hash - so no one else can have a reference. */ |
493 | 0 | fz_unlock(ctx, FZ_LOCK_ALLOC); |
494 | 0 | fz_free(ctx, item); |
495 | 0 | type->drop_key(ctx, key); |
496 | 0 | return NULL; |
497 | 0 | } |
498 | 80.4k | if (existing) |
499 | 0 | { |
500 | | /* There was one there already! Take a new reference |
501 | | * to the existing one, and drop our current one. */ |
502 | 0 | fz_warn(ctx, "found duplicate %s in the store", type->name); |
503 | 0 | touch(store, existing); |
504 | 0 | if (existing->val->refs > 0) |
505 | 0 | { |
506 | 0 | (void)Memento_takeRef(existing->val); |
507 | 0 | existing->val->refs++; |
508 | 0 | } |
509 | 0 | fz_unlock(ctx, FZ_LOCK_ALLOC); |
510 | 0 | fz_free(ctx, item); |
511 | 0 | type->drop_key(ctx, key); |
512 | 0 | return existing->val; |
513 | 0 | } |
514 | 80.4k | } |
515 | | |
516 | | /* Now bump the ref */ |
517 | 81.2k | if (val->refs > 0) |
518 | 81.2k | { |
519 | 81.2k | (void)Memento_takeRef(val); |
520 | 81.2k | val->refs++; |
521 | 81.2k | } |
522 | | |
523 | | /* If we haven't got an infinite store, check for space within it */ |
524 | 81.2k | if (store->max != FZ_STORE_UNLIMITED) |
525 | 81.2k | { |
526 | | /* FIXME: Overflow? */ |
527 | 81.2k | size = store->size + itemsize; |
528 | 81.2k | if (size > store->max) |
529 | 1 | { |
530 | 1 | FZ_LOG_STORE(ctx, "Store size exceeded: item=%zu, size=%zu, max=%zu\n", |
531 | 1 | itemsize, store->size, store->max); |
532 | 1 | while (size > store->max) |
533 | 1 | { |
534 | 1 | size_t saved; |
535 | | |
536 | | /* First, do any outstanding reaping, even if defer_reap_count > 0 */ |
537 | 1 | if (store->needs_reaping) |
538 | 0 | { |
539 | 0 | do_reap(ctx); /* Drops alloc lock */ |
540 | 0 | fz_lock(ctx, FZ_LOCK_ALLOC); |
541 | 0 | } |
542 | 1 | size = store->size + itemsize; |
543 | 1 | if (size <= store->max) |
544 | 0 | break; |
545 | | |
546 | | /* ensure_space may drop, then retake the lock */ |
547 | 1 | saved = ensure_space(ctx, size - store->max); |
548 | 1 | size -= saved; |
549 | 1 | if (saved == 0) |
550 | 1 | { |
551 | | /* Failed to free any space. */ |
552 | | /* We used to 'unstore' it here, but that's wrong. |
553 | | * If we've already spent the memory to malloc it |
554 | | * then not putting it in the store just means that |
555 | | * a resource used multiple times will just be malloced |
556 | | * again. Better to put it in the store, have the |
557 | | * store account for it, and for it to potentially be reused. |
558 | | * When the caller drops the reference to it, it can then |
559 | | * be dropped from the store on the next attempt to store |
560 | | * anything else. */ |
561 | 1 | break; |
562 | 1 | } |
563 | 1 | } |
564 | 1 | FZ_LOG_DUMP_STORE(ctx, "After eviction:\n"); |
565 | 1 | } |
566 | 81.2k | } |
567 | 81.2k | store->size += itemsize; |
568 | | |
569 | | /* Regardless of whether it's indexed, it goes into the linked list */ |
570 | 81.2k | touch(store, item); |
571 | 81.2k | fz_unlock(ctx, FZ_LOCK_ALLOC); |
572 | | |
573 | 81.2k | return NULL; |
574 | 81.2k | } |
575 | | |
576 | | void * |
577 | | fz_find_item(fz_context *ctx, fz_store_drop_fn *drop, void *key, const fz_store_type *type) |
578 | 751k | { |
579 | 751k | fz_item *item; |
580 | 751k | fz_store *store = ctx->store; |
581 | 751k | fz_store_hash hash = { NULL }; |
582 | 751k | int use_hash = 0; |
583 | | |
584 | 751k | if (!store) |
585 | 0 | return NULL; |
586 | | |
587 | 751k | if (!key) |
588 | 34 | return NULL; |
589 | | |
590 | 751k | if (type->make_hash_key) |
591 | 751k | { |
592 | 751k | hash.drop = drop; |
593 | 751k | use_hash = type->make_hash_key(ctx, &hash, key); |
594 | 751k | } |
595 | | |
596 | 751k | fz_lock(ctx, FZ_LOCK_ALLOC); |
597 | 751k | if (use_hash) |
598 | 748k | { |
599 | | /* We can find objects keyed on indirected objects quickly */ |
600 | 748k | item = fz_hash_find(ctx, store->hash, &hash); |
601 | 748k | } |
602 | 2.86k | else |
603 | 2.86k | { |
604 | | /* Others we have to hunt for slowly */ |
605 | 11.9k | for (item = store->head; item; item = item->next) |
606 | 11.0k | { |
607 | 11.0k | if (item->val->drop == drop && !type->cmp_key(ctx, item->key, key)) |
608 | 2.03k | break; |
609 | 11.0k | } |
610 | 2.86k | } |
611 | 751k | if (item) |
612 | 598k | { |
613 | | /* LRU the block. This also serves to ensure that any item |
614 | | * picked up from the hash before it has made it into the |
615 | | * linked list does not get whipped out again due to the |
616 | | * store being full. */ |
617 | 598k | touch(store, item); |
618 | | /* And bump the refcount before returning */ |
619 | 598k | if (item->val->refs > 0) |
620 | 598k | { |
621 | 598k | (void)Memento_takeRef(item->val); |
622 | 598k | item->val->refs++; |
623 | 598k | } |
624 | 598k | fz_unlock(ctx, FZ_LOCK_ALLOC); |
625 | 598k | return (void *)item->val; |
626 | 598k | } |
627 | 153k | fz_unlock(ctx, FZ_LOCK_ALLOC); |
628 | | |
629 | 153k | return NULL; |
630 | 751k | } |
631 | | |
632 | | void |
633 | | fz_remove_item(fz_context *ctx, fz_store_drop_fn *drop, void *key, const fz_store_type *type) |
634 | 0 | { |
635 | 0 | fz_item *item; |
636 | 0 | fz_store *store = ctx->store; |
637 | 0 | int dodrop; |
638 | 0 | fz_store_hash hash = { NULL }; |
639 | 0 | int use_hash = 0; |
640 | |
|
641 | 0 | if (type->make_hash_key) |
642 | 0 | { |
643 | 0 | hash.drop = drop; |
644 | 0 | use_hash = type->make_hash_key(ctx, &hash, key); |
645 | 0 | } |
646 | |
|
647 | 0 | fz_lock(ctx, FZ_LOCK_ALLOC); |
648 | 0 | if (use_hash) |
649 | 0 | { |
650 | | /* We can find objects keyed on indirect objects quickly */ |
651 | 0 | item = fz_hash_find(ctx, store->hash, &hash); |
652 | 0 | if (item) |
653 | 0 | fz_hash_remove(ctx, store->hash, &hash); |
654 | 0 | } |
655 | 0 | else |
656 | 0 | { |
657 | | /* Others we have to hunt for slowly */ |
658 | 0 | for (item = store->head; item; item = item->next) |
659 | 0 | if (item->val->drop == drop && !type->cmp_key(ctx, item->key, key)) |
660 | 0 | break; |
661 | 0 | } |
662 | 0 | if (item) |
663 | 0 | { |
664 | | /* Momentarily things can be in the hash table without being |
665 | | * in the list. Don't attempt to unlink these. We indicate |
666 | | * such items by setting item->next == item. */ |
667 | 0 | if (item->next != item) |
668 | 0 | { |
669 | 0 | if (item->next) |
670 | 0 | item->next->prev = item->prev; |
671 | 0 | else |
672 | 0 | store->tail = item->prev; |
673 | 0 | if (item->prev) |
674 | 0 | item->prev->next = item->next; |
675 | 0 | else |
676 | 0 | store->head = item->next; |
677 | 0 | } |
678 | 0 | if (item->val->refs > 0) |
679 | 0 | (void)Memento_dropRef(item->val); |
680 | 0 | dodrop = (item->val->refs > 0 && --item->val->refs == 0); |
681 | 0 | fz_unlock(ctx, FZ_LOCK_ALLOC); |
682 | 0 | if (dodrop) |
683 | 0 | item->val->drop(ctx, item->val); |
684 | 0 | type->drop_key(ctx, item->key); |
685 | 0 | fz_free(ctx, item); |
686 | 0 | } |
687 | 0 | else |
688 | 0 | fz_unlock(ctx, FZ_LOCK_ALLOC); |
689 | 0 | } |
690 | | |
691 | | void |
692 | | fz_empty_store(fz_context *ctx) |
693 | 13.4k | { |
694 | 13.4k | fz_store *store = ctx->store; |
695 | | |
696 | 13.4k | if (store == NULL) |
697 | 0 | return; |
698 | | |
699 | 13.4k | fz_lock(ctx, FZ_LOCK_ALLOC); |
700 | | /* Run through all the items in the store */ |
701 | 24.7k | while (store->head) |
702 | 11.2k | evict(ctx, store->head); /* Drops then retakes lock */ |
703 | 13.4k | fz_unlock(ctx, FZ_LOCK_ALLOC); |
704 | 13.4k | } |
705 | | |
706 | | fz_store * |
707 | | fz_keep_store_context(fz_context *ctx) |
708 | 0 | { |
709 | 0 | if (ctx == NULL || ctx->store == NULL) |
710 | 0 | return NULL; |
711 | 0 | return fz_keep_imp(ctx, ctx->store, &ctx->store->refs); |
712 | 0 | } |
713 | | |
714 | | void |
715 | | fz_drop_store_context(fz_context *ctx) |
716 | 13.4k | { |
717 | 13.4k | if (!ctx) |
718 | 0 | return; |
719 | 13.4k | if (fz_drop_imp(ctx, ctx->store, &ctx->store->refs)) |
720 | 13.4k | { |
721 | 13.4k | fz_empty_store(ctx); |
722 | 13.4k | fz_drop_hash_table(ctx, ctx->store->hash); |
723 | 13.4k | fz_free(ctx, ctx->store); |
724 | 13.4k | ctx->store = NULL; |
725 | 13.4k | } |
726 | 13.4k | } |
727 | | |
728 | | static void |
729 | | fz_debug_store_item(fz_context *ctx, void *state, void *key_, int keylen, void *item_) |
730 | 0 | { |
731 | 0 | unsigned char *key = key_; |
732 | 0 | fz_item *item = item_; |
733 | 0 | int i; |
734 | 0 | char buf[256]; |
735 | 0 | fz_output *out = (fz_output *)state; |
736 | 0 | fz_unlock(ctx, FZ_LOCK_ALLOC); |
737 | 0 | item->type->format_key(ctx, buf, sizeof buf, item->key); |
738 | 0 | fz_lock(ctx, FZ_LOCK_ALLOC); |
739 | 0 | fz_write_printf(ctx, out, "STORE\thash["); |
740 | 0 | for (i=0; i < keylen; ++i) |
741 | 0 | fz_write_printf(ctx, out,"%02x", key[i]); |
742 | 0 | fz_write_printf(ctx, out, "][refs=%d][size=%d] key=%s val=%p\n", item->val->refs, (int)item->size, buf, (void *)item->val); |
743 | 0 | } |
744 | | |
745 | | static void |
746 | | fz_debug_store_locked(fz_context *ctx, fz_output *out) |
747 | 0 | { |
748 | 0 | fz_item *item, *next; |
749 | 0 | char buf[256]; |
750 | 0 | fz_store *store = ctx->store; |
751 | 0 | size_t list_total = 0; |
752 | |
|
753 | 0 | fz_write_printf(ctx, out, "STORE\t-- resource store contents --\n"); |
754 | |
|
755 | 0 | for (item = store->head; item; item = next) |
756 | 0 | { |
757 | 0 | next = item->next; |
758 | 0 | if (next) |
759 | 0 | { |
760 | 0 | (void)Memento_takeRef(next->val); |
761 | 0 | next->val->refs++; |
762 | 0 | } |
763 | 0 | fz_unlock(ctx, FZ_LOCK_ALLOC); |
764 | 0 | item->type->format_key(ctx, buf, sizeof buf, item->key); |
765 | 0 | fz_lock(ctx, FZ_LOCK_ALLOC); |
766 | 0 | fz_write_printf(ctx, out, "STORE\tstore[*][refs=%d][size=%d] key=%s val=%p\n", |
767 | 0 | item->val->refs, (int)item->size, buf, (void *)item->val); |
768 | 0 | list_total += item->size; |
769 | 0 | if (next) |
770 | 0 | { |
771 | 0 | (void)Memento_dropRef(next->val); |
772 | 0 | next->val->refs--; |
773 | 0 | } |
774 | 0 | } |
775 | |
|
776 | 0 | fz_write_printf(ctx, out, "STORE\t-- resource store hash contents --\n"); |
777 | 0 | fz_hash_for_each(ctx, store->hash, out, fz_debug_store_item); |
778 | 0 | fz_write_printf(ctx, out, "STORE\t-- end --\n"); |
779 | |
|
780 | 0 | fz_write_printf(ctx, out, "STORE\tmax=%zu, size=%zu, actual size=%zu\n", store->max, store->size, list_total); |
781 | 0 | } |
782 | | |
783 | | void |
784 | | fz_debug_store(fz_context *ctx, fz_output *out) |
785 | 0 | { |
786 | 0 | fz_lock(ctx, FZ_LOCK_ALLOC); |
787 | 0 | fz_debug_store_locked(ctx, out); |
788 | 0 | fz_unlock(ctx, FZ_LOCK_ALLOC); |
789 | 0 | } |
790 | | |
791 | | /* |
792 | | Consider if we have blocks of the following sizes in the store, from oldest |
793 | | to newest: |
794 | | |
795 | | A 32 |
796 | | B 64 |
797 | | C 128 |
798 | | D 256 |
799 | | |
800 | | Further suppose we need to free 97 bytes. Naively freeing blocks until we have |
801 | | freed enough would mean we'd free A, B and C, when we could have freed just C. |
802 | | |
803 | | We are forced into an n^2 algorithm by the need to drop the lock as part of the |
804 | | eviction, so we might as well embrace it and go for a solution that properly |
805 | | drops just C. |
806 | | |
807 | | The algorithm used is to scan the list of blocks from oldest to newest, counting |
808 | | how many bytes we can free in the blocks we pass. We stop this scan when we have |
809 | | found enough blocks. We then free the largest block. This releases the lock |
810 | | momentarily, which means we have to start the scan process all over again, so |
811 | | we repeat. This guarantees we only evict a minimum of blocks, but does mean we |
812 | | scan more blocks than we'd ideally like. |
813 | | */ |
814 | | static int |
815 | | scavenge(fz_context *ctx, size_t tofree) |
816 | 211 | { |
817 | 211 | fz_store *store = ctx->store; |
818 | 211 | size_t freed = 0; |
819 | 211 | fz_item *item; |
820 | | |
821 | 211 | if (store->scavenging) |
822 | 0 | return 0; |
823 | | |
824 | 211 | store->scavenging = 1; |
825 | | |
826 | 211 | do |
827 | 217 | { |
828 | | /* Count through a suffix of objects in the store until |
829 | | * we find enough to give us what we need to evict. */ |
830 | 217 | size_t suffix_size = 0; |
831 | 217 | fz_item *largest = NULL; |
832 | | |
833 | 261 | for (item = store->tail; item; item = item->prev) |
834 | 45 | { |
835 | 45 | if (item->val->refs == 1 && (item->val->droppable == NULL || item->val->droppable(ctx, item->val))) |
836 | 13 | { |
837 | | /* This one is evictable */ |
838 | 13 | suffix_size += item->size; |
839 | 13 | if (largest == NULL || item->size > largest->size) |
840 | 11 | largest = item; |
841 | 13 | if (suffix_size >= tofree - freed) |
842 | 1 | break; |
843 | 13 | } |
844 | 45 | } |
845 | | |
846 | | /* If there are no evictable blocks, we can't find anything to free. */ |
847 | 217 | if (largest == NULL) |
848 | 210 | break; |
849 | | |
850 | | /* Free largest. */ |
851 | 7 | if (freed == 0) { |
852 | 3 | FZ_LOG_DUMP_STORE(ctx, "Before scavenge:\n"); |
853 | 3 | } |
854 | 7 | freed += largest->size; |
855 | 7 | evict(ctx, largest); /* Drops then retakes lock */ |
856 | 7 | } |
857 | 211 | while (freed < tofree); |
858 | | |
859 | 211 | if (freed != 0) { |
860 | 3 | FZ_LOG_DUMP_STORE(ctx, "After scavenge:\n"); |
861 | 3 | } |
862 | 211 | store->scavenging = 0; |
863 | | /* Success is managing to evict any blocks */ |
864 | 211 | return freed != 0; |
865 | 211 | } |
866 | | |
867 | | void |
868 | | fz_drop_storable(fz_context *ctx, const fz_storable *sc) |
869 | 12.7M | { |
870 | | /* Explicitly drop const to allow us to use const |
871 | | * sanely throughout the code. */ |
872 | 12.7M | fz_storable *s = (fz_storable *)sc; |
873 | 12.7M | int num; |
874 | | |
875 | 12.7M | if (s == NULL) |
876 | 4.44M | return; |
877 | | |
878 | 8.27M | fz_lock(ctx, FZ_LOCK_ALLOC); |
879 | | /* Drop the ref, and leave num as being the number of |
880 | | * refs left (-1 meaning, "statically allocated"). */ |
881 | 8.27M | if (s->refs > 0) |
882 | 8.26M | { |
883 | 8.26M | (void)Memento_dropIntRef(s); |
884 | 8.26M | num = --s->refs; |
885 | 8.26M | } |
886 | 4.80k | else |
887 | 4.80k | num = -1; |
888 | | |
889 | | /* If we have just 1 ref left, it's possible that |
890 | | * this ref is held by the store. If the store is |
891 | | * oversized, we ought to throw any such references |
892 | | * away to try to bring the store down to a "legal" |
893 | | * size. Run a scavenge to check for this case. */ |
894 | 8.27M | if (ctx->store->max != FZ_STORE_UNLIMITED) |
895 | 8.27M | if (num == 1 && ctx->store->size > ctx->store->max) |
896 | 1 | scavenge(ctx, ctx->store->size - ctx->store->max); |
897 | 8.27M | fz_unlock(ctx, FZ_LOCK_ALLOC); |
898 | | |
899 | | /* If we have no references to an object left, then |
900 | | * it cannot possibly be in the store (as the store always |
901 | | * keeps a ref to everything in it, and doesn't drop via |
902 | | * this method). So we can simply drop the storable object |
903 | | * itself without any operations on the fz_store. |
904 | | */ |
905 | 8.27M | if (num == 0) |
906 | 1.91M | s->drop(ctx, s); |
907 | 8.27M | } |
908 | | |
909 | | int fz_store_scavenge_external(fz_context *ctx, size_t size, int *phase) |
910 | 0 | { |
911 | 0 | int ret; |
912 | |
|
913 | 0 | fz_lock(ctx, FZ_LOCK_ALLOC); |
914 | 0 | ret = fz_store_scavenge(ctx, size, phase); |
915 | 0 | fz_unlock(ctx, FZ_LOCK_ALLOC); |
916 | |
|
917 | 0 | return ret; |
918 | 0 | } |
919 | | |
920 | | int fz_store_scavenge(fz_context *ctx, size_t size, int *phase) |
921 | 138 | { |
922 | 138 | fz_store *store; |
923 | 138 | size_t max; |
924 | | |
925 | 138 | store = ctx->store; |
926 | 138 | if (store == NULL) |
927 | 0 | return 0; |
928 | | |
929 | | #ifdef DEBUG_SCAVENGING |
930 | | fz_write_printf(ctx, fz_stdout(ctx), "Scavenging: store=%zu size=%zu phase=%d\n", store->size, size, *phase); |
931 | | fz_debug_store_locked(ctx, fz_stdout(ctx)); |
932 | | Memento_stats(); |
933 | | #endif |
934 | 138 | do |
935 | 2.31k | { |
936 | 2.31k | size_t tofree; |
937 | | |
938 | | /* Calculate 'max' as the maximum size of the store for this phase */ |
939 | 2.31k | if (*phase >= 16) |
940 | 136 | max = 0; |
941 | 2.17k | else if (store->max != FZ_STORE_UNLIMITED) |
942 | 2.17k | max = store->max / 16 * (16 - *phase); |
943 | 0 | else |
944 | 0 | max = store->size / (16 - *phase) * (15 - *phase); |
945 | 2.31k | (*phase)++; |
946 | | |
947 | | /* Slightly baroque calculations to avoid overflow */ |
948 | 2.31k | if (size > SIZE_MAX - store->size) |
949 | 0 | tofree = SIZE_MAX - max; |
950 | 2.31k | else if (size + store->size > max) |
951 | 2.10k | continue; |
952 | 210 | else |
953 | 210 | tofree = size + store->size - max; |
954 | | |
955 | 210 | if (scavenge(ctx, tofree)) |
956 | 2 | { |
957 | | #ifdef DEBUG_SCAVENGING |
958 | | fz_write_printf(ctx, fz_stdout(ctx), "scavenged: store=%zu\n", store->size); |
959 | | fz_debug_store(ctx, fz_stdout(ctx)); |
960 | | Memento_stats(); |
961 | | #endif |
962 | 2 | return 1; |
963 | 2 | } |
964 | 210 | } |
965 | 2.31k | while (max > 0); |
966 | | |
967 | | #ifdef DEBUG_SCAVENGING |
968 | | fz_write_printf(ctx, fz_stdout(ctx), "scavenging failed\n"); |
969 | | fz_debug_store(ctx, fz_stdout(ctx)); |
970 | | Memento_listBlocks(); |
971 | | #endif |
972 | 136 | return 0; |
973 | 138 | } |
974 | | |
975 | | int |
976 | | fz_shrink_store(fz_context *ctx, unsigned int percent) |
977 | 0 | { |
978 | 0 | int success; |
979 | 0 | fz_store *store; |
980 | 0 | size_t new_size; |
981 | |
|
982 | 0 | if (percent >= 100) |
983 | 0 | return 1; |
984 | | |
985 | 0 | store = ctx->store; |
986 | 0 | if (store == NULL) |
987 | 0 | return 0; |
988 | | |
989 | | #ifdef DEBUG_SCAVENGING |
990 | | fz_write_printf(ctx, fz_stdout(ctx), "fz_shrink_store: %zu\n", store->size/(1024*1024)); |
991 | | #endif |
992 | 0 | fz_lock(ctx, FZ_LOCK_ALLOC); |
993 | |
|
994 | 0 | new_size = (size_t)(((uint64_t)store->size * percent) / 100); |
995 | 0 | if (store->size > new_size) |
996 | 0 | scavenge(ctx, store->size - new_size); |
997 | |
|
998 | 0 | success = (store->size <= new_size) ? 1 : 0; |
999 | 0 | fz_unlock(ctx, FZ_LOCK_ALLOC); |
1000 | | #ifdef DEBUG_SCAVENGING |
1001 | | fz_write_printf(ctx, fz_stdout(ctx), "fz_shrink_store after: %zu\n", store->size/(1024*1024)); |
1002 | | #endif |
1003 | |
|
1004 | 0 | return success; |
1005 | 0 | } |
1006 | | |
1007 | | void fz_filter_store(fz_context *ctx, fz_store_filter_fn *fn, void *arg, const fz_store_type *type) |
1008 | 20.2k | { |
1009 | 20.2k | fz_store *store; |
1010 | 20.2k | fz_item *item, *prev, *remove; |
1011 | | |
1012 | 20.2k | store = ctx->store; |
1013 | 20.2k | if (store == NULL) |
1014 | 0 | return; |
1015 | | |
1016 | 20.2k | fz_lock(ctx, FZ_LOCK_ALLOC); |
1017 | | |
1018 | | /* Filter the items */ |
1019 | 20.2k | remove = NULL; |
1020 | 66.6k | for (item = store->tail; item; item = prev) |
1021 | 46.4k | { |
1022 | 46.4k | prev = item->prev; |
1023 | 46.4k | if (item->type != type) |
1024 | 17.1k | continue; |
1025 | | |
1026 | 29.2k | if (fn(ctx, arg, item->key) == 0) |
1027 | 118 | continue; |
1028 | | |
1029 | | /* We have to drop it */ |
1030 | 29.1k | store->size -= item->size; |
1031 | | |
1032 | | /* Unlink from the linked list */ |
1033 | 29.1k | if (item->next) |
1034 | 14.7k | item->next->prev = item->prev; |
1035 | 14.4k | else |
1036 | 14.4k | store->tail = item->prev; |
1037 | 29.1k | if (item->prev) |
1038 | 27.5k | item->prev->next = item->next; |
1039 | 1.59k | else |
1040 | 1.59k | store->head = item->next; |
1041 | | |
1042 | | /* Remove from the hash table */ |
1043 | 29.1k | if (item->type->make_hash_key) |
1044 | 29.1k | { |
1045 | 29.1k | fz_store_hash hash = { NULL }; |
1046 | 29.1k | hash.drop = item->val->drop; |
1047 | 29.1k | if (item->type->make_hash_key(ctx, &hash, item->key)) |
1048 | 28.3k | fz_hash_remove(ctx, store->hash, &hash); |
1049 | 29.1k | } |
1050 | | |
1051 | | /* Store whether to drop this value or not in 'prev' */ |
1052 | 29.1k | if (item->val->refs > 0) |
1053 | 29.1k | (void)Memento_dropRef(item->val); |
1054 | 29.1k | item->prev = (item->val->refs > 0 && --item->val->refs == 0) ? item : NULL; |
1055 | | |
1056 | | /* Store it in our removal chain - just singly linked */ |
1057 | 29.1k | item->next = remove; |
1058 | 29.1k | remove = item; |
1059 | 29.1k | } |
1060 | 20.2k | fz_unlock(ctx, FZ_LOCK_ALLOC); |
1061 | | |
1062 | | /* Now drop the remove chain */ |
1063 | 49.3k | for (item = remove; item != NULL; item = remove) |
1064 | 29.1k | { |
1065 | 29.1k | remove = item->next; |
1066 | | |
1067 | | /* Drop a reference to the value (freeing if required) */ |
1068 | 29.1k | if (item->prev) /* See above for our abuse of prev here */ |
1069 | 22.7k | item->val->drop(ctx, item->val); |
1070 | | |
1071 | | /* Always drops the key and drop the item */ |
1072 | 29.1k | item->type->drop_key(ctx, item->key); |
1073 | 29.1k | fz_free(ctx, item); |
1074 | 29.1k | } |
1075 | 20.2k | } |
1076 | | |
1077 | | void fz_defer_reap_start(fz_context *ctx) |
1078 | 202k | { |
1079 | 202k | if (ctx->store == NULL) |
1080 | 0 | return; |
1081 | | |
1082 | 202k | fz_lock(ctx, FZ_LOCK_ALLOC); |
1083 | 202k | ctx->store->defer_reap_count++; |
1084 | 202k | fz_unlock(ctx, FZ_LOCK_ALLOC); |
1085 | 202k | } |
1086 | | |
1087 | | void fz_defer_reap_end(fz_context *ctx) |
1088 | 202k | { |
1089 | 202k | int reap; |
1090 | | |
1091 | 202k | if (ctx->store == NULL) |
1092 | 0 | return; |
1093 | | |
1094 | 202k | fz_lock(ctx, FZ_LOCK_ALLOC); |
1095 | 202k | --ctx->store->defer_reap_count; |
1096 | 202k | reap = ctx->store->defer_reap_count == 0 && ctx->store->needs_reaping; |
1097 | 202k | if (reap) |
1098 | 703 | do_reap(ctx); /* Drops FZ_LOCK_ALLOC */ |
1099 | 201k | else |
1100 | 201k | fz_unlock(ctx, FZ_LOCK_ALLOC); |
1101 | 202k | } |
1102 | | |
1103 | | #ifdef ENABLE_STORE_LOGGING |
1104 | | |
1105 | | void fz_log_dump_store(fz_context *ctx, const char *fmt, ...) |
1106 | | { |
1107 | | fz_output *out; |
1108 | | va_list args; |
1109 | | va_start(args, fmt); |
1110 | | out = fz_new_log_for_module(ctx, "STORE"); |
1111 | | fz_write_vprintf(ctx, out, fmt, args); |
1112 | | va_end(args); |
1113 | | fz_debug_store(ctx, out); |
1114 | | fz_write_printf(ctx, out, "STORE\tEND\n"); |
1115 | | fz_close_output(ctx, out); |
1116 | | fz_drop_output(ctx, out); |
1117 | | } |
1118 | | |
1119 | | #endif |