/src/libwebsockets/lib/misc/cache-ttl/heap.c
Line | Count | Source |
1 | | /* |
2 | | * libwebsockets - small server side websockets and web server implementation |
3 | | * |
4 | | * Copyright (C) 2010 - 2021 Andy Green <andy@warmcat.com> |
5 | | * |
6 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
7 | | * of this software and associated documentation files (the "Software"), to |
8 | | * deal in the Software without restriction, including without limitation the |
9 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
10 | | * sell copies of the Software, and to permit persons to whom the Software is |
11 | | * furnished to do so, subject to the following conditions: |
12 | | * |
13 | | * The above copyright notice and this permission notice shall be included in |
14 | | * all copies or substantial portions of the Software. |
15 | | * |
16 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
19 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
20 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
21 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
22 | | * IN THE SOFTWARE. |
23 | | */ |
24 | | |
25 | | #include <private-lib-core.h> |
26 | | #include "private-lib-misc-cache-ttl.h" |
27 | | |
28 | | #if defined(write) |
29 | | #undef write |
30 | | #endif |
31 | | |
32 | | static void |
33 | | update_sul(lws_cache_ttl_lru_t_heap_t *cache); |
34 | | |
35 | | static int |
36 | | lws_cache_heap_invalidate(struct lws_cache_ttl_lru *_c, const char *key); |
37 | | |
38 | | static int |
39 | | sort_expiry(const lws_dll2_t *a, const lws_dll2_t *b) |
40 | 0 | { |
41 | 0 | const lws_cache_ttl_item_heap_t |
42 | 0 | *c = lws_container_of(a, lws_cache_ttl_item_heap_t, list_expiry), |
43 | 0 | *d = lws_container_of(b, lws_cache_ttl_item_heap_t, list_expiry); |
44 | |
|
45 | 0 | if (c->expiry > d->expiry) |
46 | 0 | return 1; |
47 | 0 | if (c->expiry < d->expiry) |
48 | 0 | return -1; |
49 | | |
50 | 0 | return 0; |
51 | 0 | } |
52 | | |
53 | | static void |
54 | | _lws_cache_heap_item_destroy(lws_cache_ttl_lru_t_heap_t *cache, |
55 | | lws_cache_ttl_item_heap_t *item) |
56 | 0 | { |
57 | 0 | lwsl_cache("%s: %s (%s)\n", __func__, cache->cache.info.name, |
58 | 0 | (const char *)&item[1] + item->size); |
59 | |
|
60 | 0 | lws_dll2_remove(&item->list_expiry); |
61 | 0 | lws_dll2_remove(&item->list_lru); |
62 | |
|
63 | 0 | cache->cache.current_footprint -= item->size; |
64 | |
|
65 | 0 | update_sul(cache); |
66 | |
|
67 | 0 | if (cache->cache.info.cb) |
68 | 0 | cache->cache.info.cb((void *)((uint8_t *)&item[1]), item->size); |
69 | |
|
70 | 0 | lws_free(item); |
71 | 0 | } |
72 | | |
73 | | static void |
74 | | lws_cache_heap_item_destroy(lws_cache_ttl_lru_t_heap_t *cache, |
75 | | lws_cache_ttl_item_heap_t *item, int parent_too) |
76 | 0 | { |
77 | 0 | struct lws_cache_ttl_lru *backing = &cache->cache; |
78 | 0 | const char *tag = ((const char *)&item[1]) + item->size; |
79 | | |
80 | | /* |
81 | | * We're destroying a normal item? |
82 | | */ |
83 | |
|
84 | 0 | if (*tag == META_ITEM_LEADING) |
85 | | /* no, nothing to check here then */ |
86 | 0 | goto post; |
87 | | |
88 | 0 | if (backing->info.parent) |
89 | 0 | backing = backing->info.parent; |
90 | | |
91 | | /* |
92 | | * We need to check any cached meta-results from lookups that |
93 | | * include this normal item, and if any, invalidate the meta-results |
94 | | * since they have to be recalculated before being used again. |
95 | | */ |
96 | |
|
97 | 0 | lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1, |
98 | 0 | cache->items_lru.head) { |
99 | 0 | lws_cache_ttl_item_heap_t *i = lws_container_of(d, |
100 | 0 | lws_cache_ttl_item_heap_t, |
101 | 0 | list_lru); |
102 | 0 | const char *iname = ((const char *)&item[1]) + item->size; |
103 | 0 | uint8_t *pay = (uint8_t *)&item[1], *end = pay + item->size; |
104 | |
|
105 | 0 | if (*iname == META_ITEM_LEADING) { |
106 | 0 | size_t taglen = strlen(iname); |
107 | | |
108 | | /* |
109 | | * If the item about to be destroyed makes an |
110 | | * appearance on the meta results list, we must kill |
111 | | * the meta result item to force recalc next time |
112 | | */ |
113 | |
|
114 | 0 | while (pay < end) { |
115 | 0 | uint32_t tlen = lws_ser_ru32be(pay + 4); |
116 | |
|
117 | 0 | if (tlen == taglen && |
118 | 0 | !strcmp((const char *)pay + 8, iname)) { |
119 | 0 | #if defined(_DEBUG) |
120 | | /* |
121 | | * Sanity check that the item tag is |
122 | | * really a match for that meta results |
123 | | * item |
124 | | */ |
125 | |
|
126 | 0 | assert (!backing->info.ops->tag_match( |
127 | 0 | backing, iname + 1, tag, 1)); |
128 | 0 | #endif |
129 | 0 | _lws_cache_heap_item_destroy(cache, i); |
130 | 0 | break; |
131 | 0 | } |
132 | 0 | pay += 8 + tlen + 1; |
133 | 0 | } |
134 | | |
135 | 0 | #if defined(_DEBUG) |
136 | | /* |
137 | | * Sanity check that the item tag really isn't a match |
138 | | * for that meta results item |
139 | | */ |
140 | | |
141 | 0 | assert (backing->info.ops->tag_match(backing, iname + 1, |
142 | 0 | tag, 1)); |
143 | 0 | #endif |
144 | 0 | } |
145 | |
|
146 | 0 | } lws_end_foreach_dll_safe(d, d1); |
147 | |
|
148 | 0 | post: |
149 | 0 | _lws_cache_heap_item_destroy(cache, item); |
150 | 0 | } |
151 | | |
152 | | static void |
153 | | lws_cache_item_evict_lru(lws_cache_ttl_lru_t_heap_t *cache) |
154 | 0 | { |
155 | 0 | lws_cache_ttl_item_heap_t *ei; |
156 | |
|
157 | 0 | if (!cache->items_lru.head) |
158 | 0 | return; |
159 | | |
160 | 0 | ei = lws_container_of(cache->items_lru.head, |
161 | 0 | lws_cache_ttl_item_heap_t, list_lru); |
162 | |
|
163 | 0 | lws_cache_heap_item_destroy(cache, ei, 0); |
164 | 0 | } |
165 | | |
166 | | /* |
167 | | * We need to weed out expired entries in the backing file |
168 | | */ |
169 | | |
170 | | static void |
171 | | expiry_cb(lws_sorted_usec_list_t *sul) |
172 | 0 | { |
173 | 0 | lws_cache_ttl_lru_t_heap_t *cache = lws_container_of(sul, |
174 | 0 | lws_cache_ttl_lru_t_heap_t, cache.sul); |
175 | 0 | lws_usec_t now = lws_now_usecs(); |
176 | |
|
177 | 0 | lwsl_cache("%s: %s\n", __func__, cache->cache.info.name); |
178 | |
|
179 | 0 | while (cache->items_expiry.head) { |
180 | 0 | lws_cache_ttl_item_heap_t *item; |
181 | |
|
182 | 0 | item = lws_container_of(cache->items_expiry.head, |
183 | 0 | lws_cache_ttl_item_heap_t, list_expiry); |
184 | |
|
185 | 0 | if (item->expiry > now) |
186 | 0 | return; |
187 | | |
188 | 0 | lws_cache_heap_item_destroy(cache, item, 1); |
189 | 0 | } |
190 | 0 | } |
191 | | |
192 | | /* |
193 | | * Let's figure out what the earliest next expiry is |
194 | | */ |
195 | | |
196 | | static int |
197 | | earliest_expiry(lws_cache_ttl_lru_t_heap_t *cache, lws_usec_t *pearliest) |
198 | 0 | { |
199 | 0 | lws_cache_ttl_item_heap_t *item; |
200 | |
|
201 | 0 | if (!cache->items_expiry.head) |
202 | 0 | return 1; |
203 | | |
204 | 0 | item = lws_container_of(cache->items_expiry.head, |
205 | 0 | lws_cache_ttl_item_heap_t, list_expiry); |
206 | |
|
207 | 0 | *pearliest = item->expiry; |
208 | |
|
209 | 0 | return 0; |
210 | 0 | } |
211 | | |
212 | | static void |
213 | | update_sul(lws_cache_ttl_lru_t_heap_t *cache) |
214 | 0 | { |
215 | 0 | lws_usec_t earliest; |
216 | | |
217 | | /* weed out any newly-expired */ |
218 | 0 | expiry_cb(&cache->cache.sul); |
219 | | |
220 | | /* figure out the next soonest expiring item */ |
221 | 0 | if (earliest_expiry(cache, &earliest)) { |
222 | 0 | lws_sul_cancel(&cache->cache.sul); |
223 | 0 | return; |
224 | 0 | } |
225 | | |
226 | 0 | lwsl_debug("%s: setting exp %llu\n", __func__, |
227 | 0 | (unsigned long long)earliest); |
228 | |
|
229 | 0 | if (earliest) |
230 | 0 | lws_cache_schedule(&cache->cache, expiry_cb, earliest); |
231 | 0 | } |
232 | | |
233 | | static lws_cache_ttl_item_heap_t * |
234 | | lws_cache_heap_specific(lws_cache_ttl_lru_t_heap_t *cache, |
235 | | const char *specific_key) |
236 | 0 | { |
237 | 0 | lws_start_foreach_dll(struct lws_dll2 *, d, cache->items_lru.head) { |
238 | 0 | lws_cache_ttl_item_heap_t *item = lws_container_of(d, |
239 | 0 | lws_cache_ttl_item_heap_t, |
240 | 0 | list_lru); |
241 | 0 | const char *iname = ((const char *)&item[1]) + item->size; |
242 | |
|
243 | 0 | if (!strcmp(specific_key, iname)) |
244 | 0 | return item; |
245 | |
|
246 | 0 | } lws_end_foreach_dll(d); |
247 | |
|
248 | 0 | return NULL; |
249 | 0 | } |
250 | | |
251 | | static int |
252 | | lws_cache_heap_tag_match(struct lws_cache_ttl_lru *cache, const char *wc, |
253 | | const char *tag, char lookup_rules) |
254 | 0 | { |
255 | 0 | return lws_strcmp_wildcard(wc, strlen(wc), tag, strlen(tag)); |
256 | 0 | } |
257 | | |
258 | | static int |
259 | | lws_cache_heap_lookup(struct lws_cache_ttl_lru *_c, const char *wildcard_key, |
260 | | lws_dll2_owner_t *results_owner) |
261 | 0 | { |
262 | 0 | lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c; |
263 | 0 | size_t sklen = strlen(wildcard_key); |
264 | |
|
265 | 0 | lws_start_foreach_dll(struct lws_dll2 *, d, cache->items_lru.head) { |
266 | 0 | lws_cache_ttl_item_heap_t *item = lws_container_of(d, |
267 | 0 | lws_cache_ttl_item_heap_t, |
268 | 0 | list_lru); |
269 | 0 | const char *iname = ((const char *)&item[1]) + item->size; |
270 | |
|
271 | 0 | if (!lws_strcmp_wildcard(wildcard_key, sklen, iname, |
272 | 0 | strlen(iname))) { |
273 | 0 | size_t ilen = strlen(iname); |
274 | 0 | lws_cache_match_t *m; |
275 | 0 | char hit = 0; |
276 | | |
277 | | /* |
278 | | * It musn't already be on the list from an earlier |
279 | | * cache level |
280 | | */ |
281 | |
|
282 | 0 | lws_start_foreach_dll(struct lws_dll2 *, e, |
283 | 0 | results_owner->head) { |
284 | 0 | lws_cache_match_t *i = lws_container_of(e, |
285 | 0 | lws_cache_match_t, list); |
286 | 0 | if (i->tag_size == ilen && |
287 | 0 | !strcmp(iname, ((const char *)&i[1]))) { |
288 | 0 | hit = 1; |
289 | 0 | break; |
290 | 0 | } |
291 | 0 | } lws_end_foreach_dll(e); |
292 | |
|
293 | 0 | if (!hit) { |
294 | | |
295 | | /* |
296 | | * it's unique, instantiate a record for it |
297 | | */ |
298 | |
|
299 | 0 | m = lws_fi(&_c->info.cx->fic, |
300 | 0 | "cache_lookup_oom") ? NULL : |
301 | 0 | lws_malloc(sizeof(*m) + ilen + 1, |
302 | 0 | __func__); |
303 | 0 | if (!m) { |
304 | 0 | lws_cache_clear_matches(results_owner); |
305 | 0 | return 1; |
306 | 0 | } |
307 | | |
308 | 0 | memset(&m->list, 0, sizeof(m->list)); |
309 | 0 | m->tag_size = ilen; |
310 | 0 | memcpy(&m[1], iname, ilen + 1); |
311 | |
|
312 | 0 | lws_dll2_add_tail(&m->list, results_owner); |
313 | 0 | } |
314 | 0 | } |
315 | |
|
316 | 0 | } lws_end_foreach_dll(d); |
317 | |
|
318 | 0 | return 0; |
319 | 0 | } |
320 | | |
321 | | static int |
322 | | lws_cache_heap_write(struct lws_cache_ttl_lru *_c, const char *specific_key, |
323 | | const uint8_t *source, size_t size, lws_usec_t expiry, |
324 | | void **ppvoid) |
325 | 0 | { |
326 | 0 | lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c; |
327 | 0 | struct lws_cache_ttl_lru *backing = _c; |
328 | 0 | lws_cache_ttl_item_heap_t *item, *ei; |
329 | 0 | size_t kl = strlen(specific_key); |
330 | 0 | char *p; |
331 | |
|
332 | 0 | lwsl_cache("%s: %s: len %d\n", __func__, _c->info.name, (int)size); |
333 | | |
334 | | /* |
335 | | * Is this new tag going to invalidate any existing cached meta-results? |
336 | | * |
337 | | * If so, let's destroy any of those first to recover the heap |
338 | | */ |
339 | |
|
340 | 0 | if (backing->info.parent) |
341 | 0 | backing = backing->info.parent; |
342 | |
|
343 | 0 | lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1, |
344 | 0 | cache->items_lru.head) { |
345 | 0 | lws_cache_ttl_item_heap_t *i = lws_container_of(d, |
346 | 0 | lws_cache_ttl_item_heap_t, |
347 | 0 | list_lru); |
348 | 0 | const char *iname = ((const char *)&i[1]) + i->size; |
349 | |
|
350 | 0 | if (*iname == META_ITEM_LEADING) { |
351 | | |
352 | | /* |
353 | | * If the item about to be added would match any cached |
354 | | * results from before it was added, we have to |
355 | | * invalidate them. To check this, we have to use the |
356 | | * matching rules at the backing store level |
357 | | */ |
358 | |
|
359 | 0 | if (!strcmp(iname + 1, specific_key)) |
360 | 0 | _lws_cache_heap_item_destroy(cache, i); |
361 | 0 | } |
362 | |
|
363 | 0 | } lws_end_foreach_dll_safe(d, d1); |
364 | | |
365 | | |
366 | | /* |
367 | | * Keep us under the limit if possible... note this will always allow |
368 | | * caching a single large item even if it is above the limits |
369 | | */ |
370 | |
|
371 | 0 | while ((cache->cache.info.max_footprint && |
372 | 0 | cache->cache.current_footprint + size > |
373 | 0 | cache->cache.info.max_footprint) || |
374 | 0 | (cache->cache.info.max_items && |
375 | 0 | cache->items_lru.count + 1 > cache->cache.info.max_items)) |
376 | 0 | lws_cache_item_evict_lru(cache); |
377 | | |
378 | | /* remove any existing entry of the same key */ |
379 | |
|
380 | 0 | lws_cache_heap_invalidate(&cache->cache, specific_key); |
381 | |
|
382 | 0 | item = lws_fi(&_c->info.cx->fic, "cache_write_oom") ? NULL : |
383 | 0 | lws_malloc(sizeof(*item) + kl + 1u + size, __func__); |
384 | 0 | if (!item) |
385 | 0 | return 1; |
386 | | |
387 | 0 | cache->cache.current_footprint += item->size; |
388 | | |
389 | | /* only need to zero down our item object */ |
390 | 0 | memset(item, 0, sizeof(*item)); |
391 | |
|
392 | 0 | p = (char *)&item[1]; |
393 | 0 | if (ppvoid) |
394 | 0 | *ppvoid = p; |
395 | | |
396 | | /* copy the payload into place */ |
397 | 0 | if (source) |
398 | 0 | memcpy(p, source, size); |
399 | | |
400 | | /* copy the key string into place, with terminating NUL */ |
401 | 0 | memcpy(p + size, specific_key, kl + 1); |
402 | |
|
403 | 0 | item->expiry = expiry; |
404 | 0 | item->key_len = kl; |
405 | 0 | item->size = size; |
406 | |
|
407 | 0 | if (expiry) { |
408 | | /* adding to expiry is optional, on nonzero expiry */ |
409 | 0 | lws_dll2_add_sorted(&item->list_expiry, &cache->items_expiry, |
410 | 0 | sort_expiry); |
411 | 0 | ei = lws_container_of(cache->items_expiry.head, |
412 | 0 | lws_cache_ttl_item_heap_t, list_expiry); |
413 | 0 | lwsl_debug("%s: setting exp %llu\n", __func__, |
414 | 0 | (unsigned long long)ei->expiry); |
415 | 0 | lws_cache_schedule(&cache->cache, expiry_cb, ei->expiry); |
416 | 0 | } |
417 | | |
418 | | /* always add outselves to head of lru list */ |
419 | 0 | lws_dll2_add_head(&item->list_lru, &cache->items_lru); |
420 | |
|
421 | 0 | return 0; |
422 | 0 | } |
423 | | |
424 | | static int |
425 | | lws_cache_heap_get(struct lws_cache_ttl_lru *_c, const char *specific_key, |
426 | | const void **pdata, size_t *psize) |
427 | 0 | { |
428 | 0 | lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c; |
429 | 0 | lws_cache_ttl_item_heap_t *item; |
430 | |
|
431 | 0 | item = lws_cache_heap_specific(cache, specific_key); |
432 | 0 | if (!item) |
433 | 0 | return 1; |
434 | | |
435 | | /* we are using it, move it to lru head */ |
436 | 0 | lws_dll2_remove(&item->list_lru); |
437 | 0 | lws_dll2_add_head(&item->list_lru, &cache->items_lru); |
438 | |
|
439 | 0 | if (pdata) { |
440 | 0 | *pdata = (const void *)&item[1]; |
441 | 0 | *psize = item->size; |
442 | 0 | } |
443 | |
|
444 | 0 | return 0; |
445 | 0 | } |
446 | | |
447 | | static int |
448 | | lws_cache_heap_invalidate(struct lws_cache_ttl_lru *_c, const char *specific_key) |
449 | 0 | { |
450 | 0 | lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c; |
451 | 0 | struct lws_cache_ttl_lru *backing = _c; |
452 | 0 | lws_cache_ttl_item_heap_t *item; |
453 | 0 | const void *user; |
454 | 0 | size_t size; |
455 | |
|
456 | 0 | if (lws_cache_heap_get(_c, specific_key, &user, &size)) |
457 | 0 | return 0; |
458 | | |
459 | 0 | if (backing->info.parent) |
460 | 0 | backing = backing->info.parent; |
461 | |
|
462 | 0 | item = (lws_cache_ttl_item_heap_t *)(((uint8_t *)user) - sizeof(*item)); |
463 | | |
464 | | /* |
465 | | * We must invalidate any cached results that would have included this |
466 | | */ |
467 | |
|
468 | 0 | lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1, |
469 | 0 | cache->items_lru.head) { |
470 | 0 | lws_cache_ttl_item_heap_t *i = lws_container_of(d, |
471 | 0 | lws_cache_ttl_item_heap_t, |
472 | 0 | list_lru); |
473 | 0 | const char *iname = ((const char *)&i[1]) + i->size; |
474 | |
|
475 | 0 | if (*iname == META_ITEM_LEADING) { |
476 | | |
477 | | /* |
478 | | * If the item about to be added would match any cached |
479 | | * results from before it was added, we have to |
480 | | * invalidate them. To check this, we have to use the |
481 | | * matching rules at the backing store level |
482 | | */ |
483 | |
|
484 | 0 | if (!backing->info.ops->tag_match(backing, iname + 1, |
485 | 0 | specific_key, 1)) |
486 | 0 | _lws_cache_heap_item_destroy(cache, i); |
487 | 0 | } |
488 | |
|
489 | 0 | } lws_end_foreach_dll_safe(d, d1); |
490 | |
|
491 | 0 | lws_cache_heap_item_destroy(cache, item, 0); |
492 | |
|
493 | 0 | return 0; |
494 | 0 | } |
495 | | |
496 | | static struct lws_cache_ttl_lru * |
497 | | lws_cache_heap_create(const struct lws_cache_creation_info *info) |
498 | 0 | { |
499 | 0 | lws_cache_ttl_lru_t_heap_t *cache; |
500 | |
|
501 | 0 | assert(info->cx); |
502 | 0 | assert(info->name); |
503 | |
|
504 | 0 | cache = lws_fi(&info->cx->fic, "cache_createfail") ? NULL : |
505 | 0 | lws_zalloc(sizeof(*cache), __func__); |
506 | 0 | if (!cache) |
507 | 0 | return NULL; |
508 | | |
509 | 0 | cache->cache.info = *info; |
510 | 0 | if (info->parent) |
511 | 0 | info->parent->child = &cache->cache; |
512 | | |
513 | | // lwsl_cache("%s: create %s\n", __func__, info->name); |
514 | |
|
515 | 0 | return (struct lws_cache_ttl_lru *)cache; |
516 | 0 | } |
517 | | |
518 | | static int |
519 | | destroy_dll(struct lws_dll2 *d, void *user) |
520 | 0 | { |
521 | 0 | lws_cache_ttl_lru_t *_c = (struct lws_cache_ttl_lru *)user; |
522 | 0 | lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c; |
523 | 0 | lws_cache_ttl_item_heap_t *item = |
524 | 0 | lws_container_of(d, lws_cache_ttl_item_heap_t, list_lru); |
525 | |
|
526 | 0 | lws_cache_heap_item_destroy(cache, item, 0); |
527 | |
|
528 | 0 | return 0; |
529 | 0 | } |
530 | | |
531 | | static int |
532 | | lws_cache_heap_expunge(struct lws_cache_ttl_lru *_c) |
533 | 0 | { |
534 | 0 | lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c; |
535 | |
|
536 | 0 | lws_dll2_foreach_safe(&cache->items_lru, cache, destroy_dll); |
537 | |
|
538 | 0 | return 0; |
539 | 0 | } |
540 | | |
541 | | static void |
542 | | lws_cache_heap_destroy(struct lws_cache_ttl_lru **_cache) |
543 | 0 | { |
544 | 0 | lws_cache_ttl_lru_t *c = *_cache; |
545 | 0 | lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)c; |
546 | |
|
547 | 0 | if (!cache) |
548 | 0 | return; |
549 | | |
550 | 0 | lws_sul_cancel(&c->sul); |
551 | |
|
552 | 0 | lws_dll2_foreach_safe(&cache->items_lru, cache, destroy_dll); |
553 | |
|
554 | 0 | lws_free_set_NULL(*_cache); |
555 | 0 | } |
556 | | |
557 | | #if defined(_DEBUG) |
558 | | static int |
559 | | dump_dll(struct lws_dll2 *d, void *user) |
560 | 0 | { |
561 | 0 | lws_cache_ttl_item_heap_t *item = |
562 | 0 | lws_container_of(d, lws_cache_ttl_item_heap_t, list_lru); |
563 | |
|
564 | 0 | lwsl_cache(" %s: size %d, exp %llu\n", |
565 | 0 | (const char *)&item[1] + item->size, |
566 | 0 | (int)item->size, (unsigned long long)item->expiry); |
567 | |
|
568 | 0 | lwsl_hexdump_cache((const char *)&item[1], item->size); |
569 | |
|
570 | 0 | return 0; |
571 | 0 | } |
572 | | |
573 | | static void |
574 | | lws_cache_heap_debug_dump(struct lws_cache_ttl_lru *_c) |
575 | 0 | { |
576 | 0 | lws_cache_ttl_lru_t_heap_t *cache = (lws_cache_ttl_lru_t_heap_t *)_c; |
577 | 0 | #if !defined(LWS_WITH_NO_LOGS) |
578 | 0 | lws_cache_ttl_item_heap_t *item = NULL; |
579 | |
|
580 | 0 | lws_dll2_t *d = cache->items_expiry.head; |
581 | |
|
582 | 0 | if (d) |
583 | 0 | item = lws_container_of(d, lws_cache_ttl_item_heap_t, |
584 | 0 | list_expiry); |
585 | |
|
586 | 0 | lwsl_cache("%s: %s: items %d, earliest %llu\n", __func__, |
587 | 0 | cache->cache.info.name, (int)cache->items_lru.count, |
588 | 0 | item ? (unsigned long long)item->expiry : 0ull); |
589 | 0 | #endif |
590 | |
|
591 | 0 | lws_dll2_foreach_safe(&cache->items_lru, cache, dump_dll); |
592 | 0 | } |
593 | | #endif |
594 | | |
595 | | const struct lws_cache_ops lws_cache_ops_heap = { |
596 | | .create = lws_cache_heap_create, |
597 | | .destroy = lws_cache_heap_destroy, |
598 | | .expunge = lws_cache_heap_expunge, |
599 | | |
600 | | .write = lws_cache_heap_write, |
601 | | .tag_match = lws_cache_heap_tag_match, |
602 | | .lookup = lws_cache_heap_lookup, |
603 | | .invalidate = lws_cache_heap_invalidate, |
604 | | .get = lws_cache_heap_get, |
605 | | #if defined(_DEBUG) |
606 | | .debug_dump = lws_cache_heap_debug_dump, |
607 | | #endif |
608 | | }; |