/src/varnish-cache/bin/varnishd/cache/cache.h
Line | Count | Source (jump to first uncovered line) |
1 | | /*- |
2 | | * Copyright (c) 2006 Verdens Gang AS |
3 | | * Copyright (c) 2006-2015 Varnish Software AS |
4 | | * All rights reserved. |
5 | | * |
6 | | * Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
7 | | * |
8 | | * SPDX-License-Identifier: BSD-2-Clause |
9 | | * |
10 | | * Redistribution and use in source and binary forms, with or without |
11 | | * modification, are permitted provided that the following conditions |
12 | | * are met: |
13 | | * 1. Redistributions of source code must retain the above copyright |
14 | | * notice, this list of conditions and the following disclaimer. |
15 | | * 2. Redistributions in binary form must reproduce the above copyright |
16 | | * notice, this list of conditions and the following disclaimer in the |
17 | | * documentation and/or other materials provided with the distribution. |
18 | | * |
19 | | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
20 | | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
21 | | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
22 | | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
23 | | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
24 | | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
25 | | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
26 | | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
27 | | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
28 | | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
29 | | * SUCH DAMAGE. |
30 | | * |
31 | | */ |
32 | | |
33 | | #ifdef VRT_H_INCLUDED |
34 | | # error "vrt.h included before cache.h - they are exclusive" |
35 | | #endif |
36 | | |
37 | | #ifdef CACHE_H_INCLUDED |
38 | | # error "cache.h included multiple times" |
39 | | #endif |
40 | | |
41 | | #include <math.h> |
42 | | #include <pthread.h> |
43 | | #include <stdarg.h> |
44 | | #include <sys/types.h> |
45 | | #include <sys/uio.h> |
46 | | |
47 | | #include "vdef.h" |
48 | | #include "vrt.h" |
49 | | |
50 | | #define CACHE_H_INCLUDED // After vrt.h include. |
51 | | |
52 | | #include "miniobj.h" |
53 | | #include "vas.h" |
54 | | #include "vqueue.h" |
55 | | #include "vtree.h" |
56 | | |
57 | | #include "vapi/vsl_int.h" |
58 | | |
59 | | /*--------------------------------------------------------------------*/ |
60 | | |
61 | | struct vxids { |
62 | | uint64_t vxid; |
63 | | }; |
64 | | |
65 | | typedef struct vxids vxid_t; |
66 | | |
67 | | #define NO_VXID ((struct vxids){0}) |
68 | | #define IS_NO_VXID(x) ((x).vxid == 0) |
69 | | #define VXID_TAG(x) ((uintmax_t)((x).vxid & (VSL_CLIENTMARKER|VSL_BACKENDMARKER))) |
70 | | #define VXID(u) ((uintmax_t)((u.vxid) & VSL_IDENTMASK)) |
71 | | #define IS_SAME_VXID(x, y) ((x).vxid == (y).vxid) |
72 | | |
73 | | /*--------------------------------------------------------------------*/ |
74 | | |
75 | | struct body_status { |
76 | | const char *name; |
77 | | int nbr; |
78 | | int avail; |
79 | | int length_known; |
80 | | }; |
81 | | |
82 | | #define BODYSTATUS(U, l, n, a, k) extern const struct body_status BS_##U[1]; |
83 | | #include "tbl/body_status.h" |
84 | | |
85 | | typedef const struct body_status *body_status_t; |
86 | | |
87 | | /*--------------------------------------------------------------------*/ |
88 | | |
89 | | struct stream_close { |
90 | | unsigned magic; |
91 | | #define STREAM_CLOSE_MAGIC 0xc879c93d |
92 | | int idx; |
93 | | unsigned is_err; |
94 | | const char *name; |
95 | | const char *desc; |
96 | | }; |
97 | | extern const struct stream_close SC_NULL[1]; |
98 | | #define SESS_CLOSE(nm, stat, err, desc) \ |
99 | | extern const struct stream_close SC_##nm[1]; |
100 | | #include "tbl/sess_close.h" |
101 | | |
102 | | |
103 | | /*-------------------------------------------------------------------- |
104 | | * Indices into http->hd[] |
105 | | */ |
106 | | enum { |
107 | | #define SLTH(tag, ind, req, resp, sdesc, ldesc) ind, |
108 | | #include "tbl/vsl_tags_http.h" |
109 | | }; |
110 | | |
111 | | /*--------------------------------------------------------------------*/ |
112 | | |
113 | | struct ban; |
114 | | struct ban_proto; |
115 | | struct cli; |
116 | | struct http_conn; |
117 | | struct listen_sock; |
118 | | struct mempool; |
119 | | struct objcore; |
120 | | struct objhead; |
121 | | struct pool; |
122 | | struct req_step; |
123 | | struct sess; |
124 | | struct transport; |
125 | | struct vcf; |
126 | | struct VSC_lck; |
127 | | struct VSC_main; |
128 | | struct VSC_main_wrk; |
129 | | struct worker; |
130 | | struct worker_priv; |
131 | | |
132 | | #define DIGEST_LEN 32 |
133 | | |
134 | | /*--------------------------------------------------------------------*/ |
135 | | |
136 | | struct lock { void *priv; }; // Opaque |
137 | | |
138 | | /*-------------------------------------------------------------------- |
139 | | * Workspace structure for quick memory allocation. |
140 | | */ |
141 | | |
142 | 0 | #define WS_ID_SIZE 4 |
143 | | |
144 | | struct ws { |
145 | | unsigned magic; |
146 | | #define WS_MAGIC 0x35fac554 |
147 | | char id[WS_ID_SIZE]; /* identity */ |
148 | | char *s; /* (S)tart of buffer */ |
149 | | char *f; /* (F)ree/front pointer */ |
150 | | char *r; /* (R)eserved length */ |
151 | | char *e; /* (E)nd of buffer */ |
152 | | }; |
153 | | |
154 | | /*-------------------------------------------------------------------- |
155 | | * |
156 | | */ |
157 | | |
158 | | struct http { |
159 | | unsigned magic; |
160 | | #define HTTP_MAGIC 0x6428b5c9 |
161 | | |
162 | | uint16_t shd; /* Size of hd space */ |
163 | | txt *hd; |
164 | | unsigned char *hdf; |
165 | | #define HDF_FILTER (1 << 0) /* Filtered by Connection */ |
166 | | |
167 | | /* NB: ->nhd and below zeroed/initialized by http_Teardown */ |
168 | | uint16_t nhd; /* Next free hd */ |
169 | | |
170 | | enum VSL_tag_e logtag; /* Must be SLT_*Method */ |
171 | | struct vsl_log *vsl; |
172 | | |
173 | | struct ws *ws; |
174 | | uint16_t status; |
175 | | uint8_t protover; |
176 | | }; |
177 | | |
178 | | /*--------------------------------------------------------------------*/ |
179 | | |
180 | | struct acct_req { |
181 | | #define ACCT(foo) uint64_t foo; |
182 | | #include "tbl/acct_fields_req.h" |
183 | | }; |
184 | | |
185 | | /*--------------------------------------------------------------------*/ |
186 | | |
187 | | struct acct_bereq { |
188 | | #define ACCT(foo) uint64_t foo; |
189 | | #include "tbl/acct_fields_bereq.h" |
190 | | }; |
191 | | |
192 | | /*--------------------------------------------------------------------*/ |
193 | | |
194 | | struct vsl_log { |
195 | | uint32_t *wlb, *wlp, *wle; |
196 | | unsigned wlr; |
197 | | vxid_t wid; |
198 | | }; |
199 | | |
200 | | /*--------------------------------------------------------------------*/ |
201 | | |
202 | | VRBT_HEAD(vrt_privs, vrt_priv); |
203 | | |
204 | | /* Worker pool stuff -------------------------------------------------*/ |
205 | | |
206 | | typedef void task_func_t(struct worker *wrk, void *priv); |
207 | | |
208 | | struct pool_task { |
209 | | VTAILQ_ENTRY(pool_task) list; |
210 | | task_func_t *func; |
211 | | void *priv; |
212 | | }; |
213 | | |
214 | | /* |
215 | | * tasks are taken off the queues in this order |
216 | | * |
217 | | * TASK_QUEUE_{REQ|STR} are new req's (H1/H2), and subject to queue limit. |
218 | | * |
219 | | * TASK_QUEUE_RUSH is req's returning from waiting list |
220 | | * |
221 | | * NOTE: When changing the number of classes, update places marked with |
222 | | * TASK_QUEUE_RESERVE in params.h |
223 | | */ |
224 | | enum task_prio { |
225 | | TASK_QUEUE_BO, |
226 | | TASK_QUEUE_RUSH, |
227 | | TASK_QUEUE_REQ, |
228 | | TASK_QUEUE_STR, |
229 | | TASK_QUEUE_VCA, |
230 | | TASK_QUEUE_BG, |
231 | | TASK_QUEUE__END |
232 | | }; |
233 | | |
234 | | #define TASK_QUEUE_HIGHEST_PRIORITY TASK_QUEUE_BO |
235 | | #define TASK_QUEUE_RESERVE TASK_QUEUE_BG |
236 | | #define TASK_QUEUE_LIMITED(prio) \ |
237 | | (prio == TASK_QUEUE_REQ || prio == TASK_QUEUE_STR) |
238 | | |
239 | | /*--------------------------------------------------------------------*/ |
240 | | |
241 | | struct worker { |
242 | | unsigned magic; |
243 | | #define WORKER_MAGIC 0x6391adcf |
244 | | int strangelove; |
245 | | struct worker_priv *wpriv; |
246 | | struct pool *pool; |
247 | | struct VSC_main_wrk *stats; |
248 | | struct vsl_log *vsl; // borrowed from req/bo |
249 | | |
250 | | struct pool_task task[1]; |
251 | | |
252 | | vtim_real lastused; |
253 | | |
254 | | pthread_cond_t cond; |
255 | | |
256 | | struct ws aws[1]; |
257 | | |
258 | | unsigned cur_method; |
259 | | unsigned seen_methods; |
260 | | |
261 | | struct wrk_vpi *vpi; |
262 | | }; |
263 | | |
264 | | /* Stored object ----------------------------------------------------- |
265 | | * This is just to encapsulate the fields owned by the stevedore |
266 | | */ |
267 | | |
268 | | struct storeobj { |
269 | | const struct stevedore *stevedore; |
270 | | void *priv; |
271 | | uint64_t priv2; |
272 | | }; |
273 | | |
274 | | /* Busy Objcore structure -------------------------------------------- |
275 | | * |
276 | | */ |
277 | | |
278 | | /* |
279 | | * The macro-states we expose outside the fetch code |
280 | | */ |
281 | | enum boc_state_e { |
282 | | #define BOC_STATE(U, l) BOS_##U, |
283 | | #include "tbl/boc_state.h" |
284 | | }; |
285 | | |
286 | | // cache_obj.h vai notify |
287 | | struct vai_qe; |
288 | | VSLIST_HEAD(vai_q_head, vai_qe); |
289 | | |
290 | | struct boc { |
291 | | unsigned magic; |
292 | | #define BOC_MAGIC 0x70c98476 |
293 | | unsigned refcount; |
294 | | struct lock mtx; |
295 | | pthread_cond_t cond; |
296 | | void *stevedore_priv; |
297 | | enum boc_state_e state; |
298 | | uint8_t *vary; |
299 | | uint64_t fetched_so_far; |
300 | | uint64_t delivered_so_far; |
301 | | uint64_t transit_buffer; |
302 | | struct vai_q_head vai_q_head; |
303 | | }; |
304 | | |
305 | | /* Object core structure --------------------------------------------- |
306 | | * Objects have sideways references in the binary heap and the LRU list |
307 | | * and we want to avoid paging in a lot of objects just to move them up |
308 | | * or down the binheap or to move a unrelated object on the LRU list. |
309 | | * To avoid this we use a proxy object, objcore, to hold the relevant |
310 | | * housekeeping fields parts of an object. |
311 | | */ |
312 | | |
313 | | enum obj_attr { |
314 | | #define OBJ_FIXATTR(U, l, s) OA_##U, |
315 | | #define OBJ_VARATTR(U, l) OA_##U, |
316 | | #define OBJ_AUXATTR(U, l) OA_##U, |
317 | | #include "tbl/obj_attr.h" |
318 | | OA__MAX, |
319 | | }; |
320 | | |
321 | | enum obj_flags { |
322 | | #define OBJ_FLAG(U, l, v) OF_##U = v, |
323 | | #include "tbl/obj_attr.h" |
324 | | }; |
325 | | |
326 | | enum oc_flags { |
327 | | #define OC_FLAG(U, l, v) OC_F_##U = v, |
328 | | #include "tbl/oc_flags.h" |
329 | | }; |
330 | | |
331 | | #define OC_F_TRANSIENT (OC_F_PRIVATE | OC_F_HFM | OC_F_HFP) |
332 | | |
333 | | enum oc_exp_flags { |
334 | | #define OC_EXP_FLAG(U, l, v) OC_EF_##U = v, |
335 | | #include "tbl/oc_exp_flags.h" |
336 | | }; |
337 | | |
338 | | struct objcore { |
339 | | unsigned magic; |
340 | | #define OBJCORE_MAGIC 0x4d301302 |
341 | | int refcnt; |
342 | | struct storeobj stobj[1]; |
343 | | struct objhead *objhead; |
344 | | struct boc *boc; |
345 | | vtim_real timer_when; |
346 | | VCL_INT hits; |
347 | | |
348 | | |
349 | | vtim_real t_origin; |
350 | | float ttl; |
351 | | float grace; |
352 | | float keep; |
353 | | |
354 | | uint8_t flags; |
355 | | |
356 | | uint8_t exp_flags; |
357 | | |
358 | | uint16_t oa_present; |
359 | | |
360 | | unsigned timer_idx; // XXX 4Gobj limit |
361 | | vtim_real last_lru; |
362 | | VTAILQ_ENTRY(objcore) hsh_list; |
363 | | VTAILQ_ENTRY(objcore) lru_list; |
364 | | VTAILQ_ENTRY(objcore) ban_list; |
365 | | VSTAILQ_ENTRY(objcore) exp_list; |
366 | | struct ban *ban; |
367 | | }; |
368 | | |
369 | | /* Busy Object structure --------------------------------------------- |
370 | | * |
371 | | * The busyobj structure captures the aspects of an object related to, |
372 | | * and while it is being fetched from the backend. |
373 | | * |
374 | | * One of these aspects will be how much has been fetched, which |
375 | | * streaming delivery will make use of. |
376 | | */ |
377 | | |
378 | | enum director_state_e { |
379 | | DIR_S_NULL = 0, |
380 | | DIR_S_HDRS = 1, |
381 | | DIR_S_BODY = 2, |
382 | | }; |
383 | | |
384 | | struct busyobj { |
385 | | unsigned magic; |
386 | | #define BUSYOBJ_MAGIC 0x23b95567 |
387 | | |
388 | | char *end; |
389 | | |
390 | | unsigned max_retries; |
391 | | unsigned retries; |
392 | | struct req *req; |
393 | | struct sess *sp; |
394 | | struct worker *wrk; |
395 | | |
396 | | /* beresp.body */ |
397 | | struct vfp_ctx *vfc; |
398 | | const char *vfp_filter_list; |
399 | | /* bereq.body */ |
400 | | const char *vdp_filter_list; |
401 | | |
402 | | struct ws ws[1]; |
403 | | uintptr_t ws_bo; |
404 | | struct http *bereq0; |
405 | | struct http *bereq; |
406 | | struct http *beresp; |
407 | | struct objcore *bereq_body; |
408 | | struct objcore *stale_oc; |
409 | | struct objcore *fetch_objcore; |
410 | | |
411 | | const char *no_retry; |
412 | | |
413 | | struct http_conn *htc; |
414 | | |
415 | | struct pool_task fetch_task[1]; |
416 | | |
417 | | const char *err_reason; |
418 | | enum director_state_e director_state; |
419 | | uint16_t err_code; |
420 | | |
421 | | #define BERESP_FLAG(l, r, w, f, d) unsigned l:1; |
422 | | #define BEREQ_FLAG(l, r, w, d) BERESP_FLAG(l, r, w, 0, d) |
423 | | #include "tbl/bereq_flags.h" |
424 | | #include "tbl/beresp_flags.h" |
425 | | |
426 | | |
427 | | /* Timeouts */ |
428 | | vtim_dur connect_timeout; |
429 | | vtim_dur first_byte_timeout; |
430 | | vtim_dur between_bytes_timeout; |
431 | | vtim_dur task_deadline; |
432 | | |
433 | | /* Timers */ |
434 | | vtim_real t_first; /* First timestamp logged */ |
435 | | vtim_real t_resp; /* response received */ |
436 | | vtim_real t_prev; /* Previous timestamp logged */ |
437 | | |
438 | | /* Acct */ |
439 | | struct acct_bereq acct; |
440 | | |
441 | | const struct stevedore *storage; |
442 | | const struct director *director_req; |
443 | | const struct director *director_resp; |
444 | | struct vcl *vcl; |
445 | | |
446 | | struct vsl_log vsl[1]; |
447 | | |
448 | | uint8_t digest[DIGEST_LEN]; |
449 | | struct vrt_privs privs[1]; |
450 | | |
451 | | const char *client_identity; |
452 | | }; |
453 | | |
454 | | #define BUSYOBJ_TMO(bo, pfx, tmo) \ |
455 | | (isnan((bo)->tmo) ? cache_param->pfx##tmo : (bo)->tmo) |
456 | | |
457 | | |
458 | | /*--------------------------------------------------------------------*/ |
459 | | |
460 | | struct reqtop { |
461 | | unsigned magic; |
462 | | #define REQTOP_MAGIC 0x57fbda52 |
463 | | struct req *topreq; |
464 | | struct vcl *vcl0; |
465 | | struct vrt_privs privs[1]; |
466 | | }; |
467 | | |
468 | | struct req { |
469 | | unsigned magic; |
470 | | #define REQ_MAGIC 0xfb4abf6d |
471 | | |
472 | | unsigned esi_level; |
473 | | body_status_t req_body_status; |
474 | | stream_close_t doclose; |
475 | | unsigned restarts; |
476 | | unsigned max_restarts; |
477 | | |
478 | | const struct req_step *req_step; |
479 | | struct reqtop *top; /* esi_level == 0 request */ |
480 | | |
481 | | uint16_t err_code; |
482 | | #define REQ_FLAG(l, r, w, d) unsigned l:1; |
483 | | #include "tbl/req_flags.h" |
484 | | |
485 | | const char *err_reason; |
486 | | |
487 | | struct sess *sp; |
488 | | struct worker *wrk; |
489 | | struct pool_task task[1]; |
490 | | |
491 | | const struct transport *transport; |
492 | | void *transport_priv; |
493 | | |
494 | | VTAILQ_ENTRY(req) w_list; |
495 | | |
496 | | struct objcore *body_oc; |
497 | | |
498 | | /* The busy objhead we sleep on */ |
499 | | struct objhead *hash_objhead; |
500 | | |
501 | | /* Built Vary string == workspace reservation */ |
502 | | uint8_t *vary_b; |
503 | | uint8_t *vary_e; |
504 | | |
505 | | uint8_t digest[DIGEST_LEN]; |
506 | | |
507 | | vtim_dur d_ttl; |
508 | | vtim_dur d_grace; |
509 | | |
510 | | const struct stevedore *storage; |
511 | | |
512 | | const struct director *director_hint; |
513 | | struct vcl *vcl; |
514 | | |
515 | | uintptr_t ws_req; /* WS above request data */ |
516 | | |
517 | | /* Timestamps */ |
518 | | vtim_real t_first; /* First timestamp logged */ |
519 | | vtim_real t_prev; /* Previous timestamp logged */ |
520 | | vtim_real t_req; /* Headers complete */ |
521 | | vtim_real t_resp; /* Entry to last deliver/synth */ |
522 | | |
523 | | struct http_conn *htc; |
524 | | struct vfp_ctx *vfc; |
525 | | const char *client_identity; |
526 | | |
527 | | /* HTTP request */ |
528 | | struct http *http; |
529 | | struct http *http0; |
530 | | |
531 | | /* HTTP response */ |
532 | | struct http *resp; |
533 | | intmax_t resp_len; |
534 | | |
535 | | struct ws ws[1]; |
536 | | struct objcore *objcore; |
537 | | struct objcore *stale_oc; |
538 | | struct boc *boc; /* valid during cnt_transmit */ |
539 | | |
540 | | /* resp.body */ |
541 | | struct vdp_ctx *vdc; |
542 | | const char *vdp_filter_list; |
543 | | /* req.body */ |
544 | | const char *vfp_filter_list; |
545 | | |
546 | | /* Transaction VSL buffer */ |
547 | | struct vsl_log vsl[1]; |
548 | | |
549 | | /* Temporary accounting */ |
550 | | struct acct_req acct; |
551 | | |
552 | | struct vrt_privs privs[1]; |
553 | | |
554 | | struct vcf *vcf; |
555 | | }; |
556 | | |
557 | | #define IS_TOPREQ(req) ((req)->top->topreq == (req)) |
558 | | |
559 | | /*-------------------------------------------------------------------- |
560 | | * Struct sess is a high memory-load structure because sessions typically |
561 | | * hang around the waiter for relatively long time. |
562 | | * |
563 | | * The size goal for struct sess + struct memitem is <512 bytes |
564 | | * |
565 | | * Getting down to the next relevant size (<256 bytes because of how malloc |
566 | | * works, is not realistic without a lot of code changes. |
567 | | */ |
568 | | |
569 | | enum sess_attr { |
570 | | #define SESS_ATTR(UP, low, typ, len) SA_##UP, |
571 | | #include "tbl/sess_attr.h" |
572 | | SA_LAST |
573 | | }; |
574 | | |
575 | | struct sess { |
576 | | unsigned magic; |
577 | | #define SESS_MAGIC 0x2c2f9c5a |
578 | | |
579 | | uint16_t sattr[SA_LAST]; |
580 | | struct listen_sock *listen_sock; |
581 | | int refcnt; |
582 | | int fd; |
583 | | vxid_t vxid; |
584 | | |
585 | | struct lock mtx; |
586 | | |
587 | | struct pool *pool; |
588 | | |
589 | | struct ws ws[1]; |
590 | | |
591 | | vtim_real t_open; /* fd accepted */ |
592 | | vtim_real t_idle; /* fd accepted or resp sent */ |
593 | | vtim_dur timeout_idle; |
594 | | vtim_dur timeout_linger; |
595 | | vtim_dur send_timeout; |
596 | | vtim_dur idle_send_timeout; |
597 | | }; |
598 | | |
599 | | #define SESS_TMO(sp, tmo) \ |
600 | | (isnan((sp)->tmo) ? cache_param->tmo : (sp)->tmo) |
601 | | |
602 | | /* Prototypes etc ----------------------------------------------------*/ |
603 | | |
604 | | |
605 | | /* cache_ban.c */ |
606 | | |
607 | | /* for constructing bans */ |
608 | | struct ban_proto *BAN_Build(void); |
609 | | const char *BAN_AddTest(struct ban_proto *, |
610 | | const char *, const char *, const char *); |
611 | | const char *BAN_Commit(struct ban_proto *b); |
612 | | void BAN_Abandon(struct ban_proto *b); |
613 | | |
614 | | /* cache_cli.c [CLI] */ |
615 | | extern pthread_t cli_thread; |
616 | | #define IS_CLI() (pthread_equal(pthread_self(), cli_thread)) |
617 | | #define ASSERT_CLI() do {assert(IS_CLI());} while (0) |
618 | | |
619 | | /* cache_http.c */ |
620 | | unsigned HTTP_estimate(unsigned nhttp); |
621 | | void HTTP_Clone(struct http *to, const struct http * const fm); |
622 | | void HTTP_Dup(struct http *to, const struct http * const fm); |
623 | | struct http *HTTP_create(void *p, uint16_t nhttp, unsigned); |
624 | | const char *http_Status2Reason(unsigned, const char **); |
625 | | int http_IsHdr(const txt *hh, hdr_t hdr); |
626 | | unsigned http_EstimateWS(const struct http *fm, unsigned how); |
627 | | void http_PutResponse(struct http *to, const char *proto, uint16_t status, |
628 | | const char *response); |
629 | | void http_FilterReq(struct http *to, const struct http *fm, unsigned how); |
630 | | void HTTP_Encode(const struct http *fm, uint8_t *, unsigned len, unsigned how); |
631 | | int HTTP_Decode(struct http *to, const uint8_t *fm); |
632 | | void http_ForceHeader(struct http *to, hdr_t, const char *val); |
633 | | void http_AppendHeader(struct http *to, hdr_t, const char *val); |
634 | | void http_PrintfHeader(struct http *to, const char *fmt, ...) |
635 | | v_printflike_(2, 3); |
636 | | void http_TimeHeader(struct http *to, const char *fmt, vtim_real now); |
637 | | const char * http_ViaHeader(void); |
638 | | void http_Proto(struct http *to); |
639 | | void http_SetHeader(struct http *to, const char *header); |
640 | | void http_SetH(struct http *to, unsigned n, const char *header); |
641 | | void http_ForceField(struct http *to, unsigned n, const char *t); |
642 | | void HTTP_Setup(struct http *, struct ws *, struct vsl_log *, enum VSL_tag_e); |
643 | | void http_Teardown(struct http *ht); |
644 | | int http_GetHdr(const struct http *hp, hdr_t, const char **ptr); |
645 | | int http_GetHdrToken(const struct http *hp, hdr_t, |
646 | | const char *token, const char **pb, const char **pe); |
647 | | int http_GetHdrField(const struct http *hp, hdr_t, |
648 | | const char *field, const char **ptr); |
649 | | double http_GetHdrQ(const struct http *hp, hdr_t, const char *field); |
650 | | ssize_t http_GetContentLength(const struct http *hp); |
651 | | ssize_t http_GetContentRange(const struct http *hp, ssize_t *lo, ssize_t *hi); |
652 | | const char * http_GetRange(const struct http *hp, ssize_t *lo, ssize_t *hi, |
653 | | ssize_t len); |
654 | | uint16_t http_GetStatus(const struct http *hp); |
655 | | int http_IsStatus(const struct http *hp, int); |
656 | | void http_SetStatus(struct http *to, uint16_t status, const char *reason); |
657 | | const char *http_GetMethod(const struct http *hp); |
658 | | int http_HdrIs(const struct http *hp, hdr_t, const char *val); |
659 | | void http_CopyHome(const struct http *hp); |
660 | | void http_Unset(struct http *hp, hdr_t); |
661 | | unsigned http_CountHdr(const struct http *hp, hdr_t); |
662 | | void http_CollectHdr(struct http *hp, hdr_t); |
663 | | void http_CollectHdrSep(struct http *hp, hdr_t, const char *sep); |
664 | | void http_VSL_log(const struct http *hp); |
665 | | void HTTP_Merge(struct worker *, struct objcore *, struct http *to); |
666 | | uint16_t HTTP_GetStatusPack(struct worker *, struct objcore *oc); |
667 | | int HTTP_IterHdrPack(struct worker *, struct objcore *, const char **); |
668 | | #define HTTP_FOREACH_PACK(wrk, oc, ptr) \ |
669 | | for ((ptr) = NULL; HTTP_IterHdrPack(wrk, oc, &(ptr));) |
670 | | const char *HTTP_GetHdrPack(struct worker *, struct objcore *, hdr_t); |
671 | | stream_close_t http_DoConnection(struct http *hp, stream_close_t sc_close); |
672 | | int http_IsFiltered(const struct http *hp, unsigned u, unsigned how); |
673 | | |
674 | | #define HTTPH_R_PASS (1 << 0) /* Request (c->b) in pass mode */ |
675 | | #define HTTPH_R_FETCH (1 << 1) /* Request (c->b) for fetch */ |
676 | | #define HTTPH_A_INS (1 << 2) /* Response (b->o) for insert */ |
677 | | #define HTTPH_A_PASS (1 << 3) /* Response (b->o) for pass */ |
678 | | #define HTTPH_C_SPECIFIC (1 << 4) /* Connection-specific */ |
679 | | |
680 | | #define HTTPH(a, b, c) extern hdr_t b; |
681 | | #include "tbl/http_headers.h" |
682 | | |
683 | | extern hdr_t H__Status; |
684 | | extern hdr_t H__Proto; |
685 | | extern hdr_t H__Reason; |
686 | | |
687 | | // rfc7233,l,1207,1208 |
688 | | #define http_tok_eq(s1, s2) (!vct_casecmp(s1, s2)) |
689 | | #define http_tok_at(s1, s2, l) (!vct_caselencmp(s1, s2, l)) |
690 | | #define http_ctok_at(s, cs) (!vct_caselencmp(s, cs, sizeof(cs) - 1)) |
691 | | |
692 | | // rfc7230,l,1037,1038 |
693 | | #define http_scheme_at(str, tok) http_ctok_at(str, #tok "://") |
694 | | |
695 | | // rfc7230,l,1144,1144 |
696 | | // rfc7231,l,1156,1158 |
697 | | #define http_method_eq(str, tok) (!strcmp(str, #tok)) |
698 | | |
699 | | // rfc7230,l,1222,1222 |
700 | | // rfc7230,l,2848,2848 |
701 | | // rfc7231,l,3883,3885 |
702 | | // rfc7234,l,1339,1340 |
703 | | // rfc7234,l,1418,1419 |
704 | | #define http_hdr_eq(s1, s2) http_tok_eq(s1, s2) |
705 | | #define http_hdr_at(s1, s2, l) http_tok_at(s1, s2, l) |
706 | | |
707 | | // rfc7230,l,1952,1952 |
708 | | // rfc7231,l,604,604 |
709 | | #define http_coding_eq(str, tok) http_tok_eq(str, #tok) |
710 | | |
711 | | // rfc7231,l,1864,1864 |
712 | | #define http_expect_eq(str, tok) http_tok_eq(str, #tok) |
713 | | |
714 | | // rfc7233,l,1207,1208 |
715 | | #define http_range_at(str, tok, l) http_tok_at(str, #tok, l) |
716 | | |
717 | | /* cache_lck.c */ |
718 | | |
719 | | /* Internal functions, call only through macros below */ |
720 | | void Lck__Lock(struct lock *lck, const char *p, int l); |
721 | | void Lck__Unlock(struct lock *lck, const char *p, int l); |
722 | | int Lck__Trylock(struct lock *lck, const char *p, int l); |
723 | | void Lck__New(struct lock *lck, struct VSC_lck *, const char *); |
724 | | int Lck__Held(const struct lock *lck); |
725 | | int Lck__Owned(const struct lock *lck); |
726 | | extern pthread_mutexattr_t mtxattr_errorcheck; |
727 | | |
728 | | /* public interface: */ |
729 | | void Lck_Delete(struct lock *lck); |
730 | | int Lck_CondWaitUntil(pthread_cond_t *, struct lock *, vtim_real when); |
731 | | int Lck_CondWait(pthread_cond_t *, struct lock *); |
732 | | int Lck_CondWaitTimeout(pthread_cond_t *, struct lock *, vtim_dur timeout); |
733 | | |
734 | | #define Lck_New(a, b) Lck__New(a, b, #b) |
735 | | #define Lck_Lock(a) Lck__Lock(a, __func__, __LINE__) |
736 | | #define Lck_Unlock(a) Lck__Unlock(a, __func__, __LINE__) |
737 | | #define Lck_Trylock(a) Lck__Trylock(a, __func__, __LINE__) |
738 | | #define Lck_AssertHeld(a) \ |
739 | | do { \ |
740 | | assert(Lck__Held(a)); \ |
741 | | assert(Lck__Owned(a)); \ |
742 | | } while (0) |
743 | | |
744 | | struct VSC_lck *Lck_CreateClass(struct vsc_seg **, const char *); |
745 | | void Lck_DestroyClass(struct vsc_seg **); |
746 | | |
747 | | #define LOCK(nam) extern struct VSC_lck *lck_##nam; |
748 | | #include "tbl/locks.h" |
749 | | |
750 | | /* cache_obj.c */ |
751 | | |
752 | | int ObjHasAttr(struct worker *, struct objcore *, enum obj_attr); |
753 | | const void *ObjGetAttr(struct worker *, struct objcore *, enum obj_attr, |
754 | | ssize_t *len); |
755 | | |
756 | | typedef int objiterate_f(void *priv, unsigned flush, |
757 | | const void *ptr, ssize_t len); |
758 | | #define OBJ_ITER_FLUSH 0x01 |
759 | | #define OBJ_ITER_END 0x02 |
760 | | |
761 | | int ObjIterate(struct worker *, struct objcore *, |
762 | | void *priv, objiterate_f *func, int final); |
763 | | |
764 | | vxid_t ObjGetXID(struct worker *, struct objcore *); |
765 | | uint64_t ObjGetLen(struct worker *, struct objcore *); |
766 | | int ObjGetDouble(struct worker *, struct objcore *, enum obj_attr, double *); |
767 | | int ObjGetU64(struct worker *, struct objcore *, enum obj_attr, uint64_t *); |
768 | | int ObjCheckFlag(struct worker *, struct objcore *, enum obj_flags of); |
769 | | |
770 | | /*==================================================================== |
771 | | * ObjVAI...(): Asynchronous Iteration |
772 | | * |
773 | | * see comments in cache_obj.c for usage |
774 | | */ |
775 | | |
776 | | typedef void *vai_hdl; |
777 | | typedef void vai_notify_cb(vai_hdl, void *priv); |
778 | | |
779 | | |
780 | | /* |
781 | | * VSCARAB: Varnish SCatter ARAy of Buffers: |
782 | | * |
783 | | * an array of viovs, elsewhere also called an siov or sarray |
784 | | */ |
785 | | struct viov { |
786 | | uint64_t lease; |
787 | | struct iovec iov; |
788 | | }; |
789 | | |
790 | | struct vscarab { |
791 | | unsigned magic; |
792 | | #define VSCARAB_MAGIC 0x05ca7ab0 |
793 | | unsigned flags; |
794 | | #define VSCARAB_F_END 1 // last viov is last overall |
795 | | unsigned capacity; |
796 | | unsigned used; |
797 | | struct viov s[] v_counted_by_(capacity); |
798 | | }; |
799 | | |
800 | | // VFLA: starting generic container-with-flexible-array-member macros |
801 | | // aka "struct hack" |
802 | | // |
803 | | // type : struct name |
804 | | // name : a pointer to struct type |
805 | | // mag : the magic value for this VFLA |
806 | | // cptr : pointer to container struct (aka "head") |
807 | | // fam : member name of the flexible array member |
808 | | // cap : capacity |
809 | | // |
810 | | // common properties of all VFLAs: |
811 | | // - are a miniobj (have magic as the first element) |
812 | | // - capacity member is the fam capacity |
813 | | // - used member is the number of fam elements used |
814 | | // |
815 | | // VFLA_SIZE ignores the cap == 0 case, we assert in _INIT |
816 | | // offsetoff ref: https://gustedt.wordpress.com/2011/03/14/flexible-array-member/ |
817 | | //lint -emacro(413, VFLA_SIZE) |
818 | | //lint -emacro(545, VFLA_SIZE) bsd offsetof() seems to be using & |
819 | | #define VFLA_SIZE(type, fam, cap) (offsetof(struct type, fam) + \ |
820 | | (cap) * sizeof(((struct type *)0)->fam[0])) |
821 | | #define VFLA_INIT_(type, cptr, mag, fam, cap, save) do { \ |
822 | | unsigned save = (cap); \ |
823 | | AN(save); \ |
824 | | memset((cptr), 0, VFLA_SIZE(type, fam, save)); \ |
825 | | (cptr)->magic = (mag); \ |
826 | | (cptr)->capacity = (save); \ |
827 | | } while (0) |
828 | | #define VFLA_INIT(type, cptr, mag, fam, cap) \ |
829 | | VFLA_INIT_(type, cptr, mag, fam, cap, VUNIQ_NAME(save)) |
830 | | // declare, allocate and initialize a local VFLA |
831 | | // the additional VLA buf declaration avoids |
832 | | // "Variable-sized object may not be initialized" |
833 | | #define VFLA_LOCAL_(type, name, mag, fam, cap, bufname) \ |
834 | | char bufname[VFLA_SIZE(type, fam, cap)]; \ |
835 | | struct type *name = (void *)bufname; \ |
836 | | VFLA_INIT(type, name, mag, fam, cap) |
837 | | #define VFLA_LOCAL(type, name, mag, fam, cap) \ |
838 | | VFLA_LOCAL_(type, name, mag, fam, cap, VUNIQ_NAME(buf)) |
839 | | // malloc and initialize a VFLA |
840 | | #define VFLA_ALLOC(type, name, mag, fam, cap) do { \ |
841 | | (name) = malloc(VFLA_SIZE(type, fam, cap)); \ |
842 | | if ((name) != NULL) \ |
843 | | VFLA_INIT(type, name, mag, fam, cap); \ |
844 | | } while(0) |
845 | | #define VFLA_FOREACH(var, cptr, fam) \ |
846 | | for (var = &(cptr)->fam[0]; \ |
847 | | (var = (var < &(cptr)->fam[(cptr)->used] ? var : NULL)) != NULL; \ |
848 | | var++) |
849 | | // continue iterating after a break of a _FOREACH |
850 | | #define VFLA_FOREACH_RESUME(var, cptr, fam) \ |
851 | | for (; \ |
852 | | var != NULL && \ |
853 | | (var = (var < &(cptr)->fam[(cptr)->used] ? var : NULL)) != NULL; \ |
854 | | var++) |
855 | | #define VFLA_GET(cptr, fam) ((cptr)->used < (cptr)->capacity ? &(cptr)->fam[(cptr)->used++] : NULL) |
856 | | // asserts sufficient capacity |
857 | | #define VFLA_ADD(cptr, fam, val) do { \ |
858 | | assert((cptr)->used < (cptr)->capacity); \ |
859 | | (cptr)->fam[(cptr)->used++] = (val); \ |
860 | | } while(0) |
861 | | |
862 | | #define VSCARAB_SIZE(cap) VFLA_SIZE(vscarab, s, cap) |
863 | | #define VSCARAB_INIT(scarab, cap) VFLA_INIT(vscarab, scarab, VSCARAB_MAGIC, s, cap) |
864 | | #define VSCARAB_LOCAL(scarab, cap) VFLA_LOCAL(vscarab, scarab, VSCARAB_MAGIC, s, cap) |
865 | | #define VSCARAB_ALLOC(scarab, cap) VFLA_ALLOC(vscarab, scarab, VSCARAB_MAGIC, s, cap) |
866 | | #define VSCARAB_FOREACH(var, scarab) VFLA_FOREACH(var, scarab, s) |
867 | | #define VSCARAB_FOREACH_RESUME(var, scarab) VFLA_FOREACH_RESUME(var, scarab, s) |
868 | | #define VSCARAB_GET(scarab) VFLA_GET(scarab, s) |
869 | | #define VSCARAB_ADD(scarab, val) VFLA_ADD(scarab, s, val) |
870 | | //lint -emacro(64, VSCARAB_ADD_IOV_NORET) weird flexelint bug? |
871 | | #define VSCARAB_ADD_IOV_NORET(scarab, vec) \ |
872 | | VSCARAB_ADD(scarab, ((struct viov){.lease = VAI_LEASE_NORET, .iov = (vec)})) |
873 | | #define VSCARAB_LAST(scarab) ((scarab)->used > 0 ? \ |
874 | | &(scarab)->s[(scarab)->used - 1] : NULL) |
875 | | |
876 | | #define VSCARAB_CHECK(scarab) do { \ |
877 | | CHECK_OBJ(scarab, VSCARAB_MAGIC); \ |
878 | | assert((scarab)->used <= (scarab)->capacity); \ |
879 | | } while(0) |
880 | | |
881 | | #define VSCARAB_CHECK_NOTNULL(scarab) do { \ |
882 | | AN(scarab); \ |
883 | | VSCARAB_CHECK(scarab); \ |
884 | | } while(0) |
885 | | |
886 | | /* |
887 | | * VSCARET: Varnish SCatter Array Return |
888 | | * |
889 | | * an array of leases obtained from a vscarab |
890 | | */ |
891 | | |
892 | | struct vscaret { |
893 | | unsigned magic; |
894 | | #define VSCARET_MAGIC 0x9c1f3d7b |
895 | | unsigned capacity; |
896 | | unsigned used; |
897 | | uint64_t lease[] v_counted_by_(capacity); |
898 | | }; |
899 | | |
900 | | #define VSCARET_SIZE(cap) VFLA_SIZE(vscaret, lease, cap) |
901 | | #define VSCARET_INIT(scaret, cap) VFLA_INIT(vscaret, scaret, VSCARET_MAGIC, lease, cap) |
902 | | #define VSCARET_LOCAL(scaret, cap) VFLA_LOCAL(vscaret, scaret, VSCARET_MAGIC, lease, cap) |
903 | | #define VSCARET_ALLOC(scaret, cap) VFLA_ALLOC(vscaret, scaret, VSCARET_MAGIC, lease, cap) |
904 | | #define VSCARET_FOREACH(var, scaret) VFLA_FOREACH(var, scaret, lease) |
905 | | #define VSCARET_GET(scaret) VFLA_GET(scaret, lease) |
906 | | #define VSCARET_ADD(scaret, val) VFLA_ADD(scaret, lease, val) |
907 | | |
908 | | #define VSCARET_CHECK(scaret) do { \ |
909 | | CHECK_OBJ(scaret, VSCARET_MAGIC); \ |
910 | | assert(scaret->used <= scaret->capacity); \ |
911 | | } while(0) |
912 | | |
913 | | #define VSCARET_CHECK_NOTNULL(scaret) do { \ |
914 | | AN(scaret); \ |
915 | | VSCARET_CHECK(scaret); \ |
916 | | } while(0) |
917 | | |
918 | | /* |
919 | | * VSCARABs can contain leases which are not to be returned to storage, for |
920 | | * example static data or fragments of larger leases to be returned later. For |
921 | | * these cases, use this magic value as the lease. This is deliberately not 0 to |
922 | | * catch oversights. |
923 | | */ |
924 | | #define VAI_LEASE_NORET ((uint64_t)0x8) |
925 | | |
926 | | vai_hdl ObjVAIinit(struct worker *, struct objcore *, struct ws *, |
927 | | vai_notify_cb *, void *); |
928 | | int ObjVAIlease(struct worker *, vai_hdl, struct vscarab *); |
929 | | int ObjVAIbuffer(struct worker *, vai_hdl, struct vscarab *); |
930 | | void ObjVAIreturn(struct worker *, vai_hdl, struct vscaret *); |
931 | | void ObjVAIfini(struct worker *, vai_hdl *); |
932 | | |
933 | | /* cache_req_body.c */ |
934 | | ssize_t VRB_Iterate(struct worker *, struct vsl_log *, struct req *, |
935 | | objiterate_f *func, void *priv); |
936 | | |
937 | | /* cache_session.c [SES] */ |
938 | | |
939 | | #define SESS_ATTR(UP, low, typ, len) \ |
940 | | int SES_Get_##low(const struct sess *sp, typ **dst); |
941 | | #include "tbl/sess_attr.h" |
942 | | const char *SES_Get_String_Attr(const struct sess *sp, enum sess_attr a); |
943 | | |
944 | | /* cache_shmlog.c */ |
945 | | void VSLv(enum VSL_tag_e tag, vxid_t vxid, const char *fmt, va_list va); |
946 | | void VSL(enum VSL_tag_e tag, vxid_t vxid, const char *fmt, ...) |
947 | | v_printflike_(3, 4); |
948 | | void VSLs(enum VSL_tag_e tag, vxid_t vxid, const struct strands *s); |
949 | | void VSLbv(struct vsl_log *, enum VSL_tag_e tag, const char *fmt, va_list va); |
950 | | void VSLb(struct vsl_log *, enum VSL_tag_e tag, const char *fmt, ...) |
951 | | v_printflike_(3, 4); |
952 | | void VSLbt(struct vsl_log *, enum VSL_tag_e tag, txt t); |
953 | | void VSLbs(struct vsl_log *, enum VSL_tag_e tag, const struct strands *s); |
954 | | void VSLb_ts(struct vsl_log *, const char *event, vtim_real first, |
955 | | vtim_real *pprev, vtim_real now); |
956 | | void VSLb_bin(struct vsl_log *, enum VSL_tag_e, ssize_t, const void*); |
957 | | int VSL_tag_is_masked(enum VSL_tag_e tag); |
958 | | |
959 | | static inline void |
960 | | VSLb_ts_req(struct req *req, const char *event, vtim_real now) |
961 | 0 | { |
962 | 0 |
|
963 | 0 | if (isnan(req->t_first) || req->t_first == 0.) |
964 | 0 | req->t_first = req->t_prev = now; |
965 | 0 | VSLb_ts(req->vsl, event, req->t_first, &req->t_prev, now); |
966 | 0 | } Unexecuted instantiation: cache_ws_emu.c:VSLb_ts_req Unexecuted instantiation: cache_ws_common.c:VSLb_ts_req Unexecuted instantiation: cache_esi_parse.c:VSLb_ts_req Unexecuted instantiation: esi_parse_fuzzer.c:VSLb_ts_req |
967 | | |
968 | | static inline void |
969 | | VSLb_ts_busyobj(struct busyobj *bo, const char *event, vtim_real now) |
970 | 0 | { |
971 | 0 |
|
972 | 0 | if (isnan(bo->t_first) || bo->t_first == 0.) |
973 | 0 | bo->t_first = bo->t_prev = now; |
974 | 0 | VSLb_ts(bo->vsl, event, bo->t_first, &bo->t_prev, now); |
975 | 0 | } Unexecuted instantiation: cache_ws_emu.c:VSLb_ts_busyobj Unexecuted instantiation: cache_ws_common.c:VSLb_ts_busyobj Unexecuted instantiation: cache_esi_parse.c:VSLb_ts_busyobj Unexecuted instantiation: esi_parse_fuzzer.c:VSLb_ts_busyobj |
976 | | |
977 | | /* cache_vcl.c */ |
978 | | const char *VCL_Name(const struct vcl *); |
979 | | |
980 | | /* cache_wrk.c */ |
981 | | |
982 | | typedef void *bgthread_t(struct worker *, void *priv); |
983 | | void WRK_BgThread(pthread_t *thr, const char *name, bgthread_t *func, |
984 | | void *priv); |
985 | | |
986 | | /* cache_ws.c */ |
987 | | void WS_Init(struct ws *ws, const char *id, void *space, unsigned len); |
988 | | |
989 | | unsigned WS_ReserveSize(struct ws *, unsigned); |
990 | | unsigned WS_ReserveAll(struct ws *); |
991 | | void WS_Release(struct ws *ws, unsigned bytes); |
992 | | void WS_ReleaseP(struct ws *ws, const char *ptr); |
993 | | void WS_Assert(const struct ws *ws); |
994 | | void WS_Reset(struct ws *ws, uintptr_t); |
995 | | void *WS_Alloc(struct ws *ws, unsigned bytes); |
996 | | void *WS_Copy(struct ws *ws, const void *str, int len); |
997 | | uintptr_t WS_Snapshot(struct ws *ws); |
998 | | int WS_Allocated(const struct ws *ws, const void *ptr, ssize_t len); |
999 | | unsigned WS_Dump(const struct ws *ws, char, size_t off, void *buf, size_t len); |
1000 | | |
1001 | | static inline void * |
1002 | | WS_Reservation(const struct ws *ws) |
1003 | 0 | { |
1004 | |
|
1005 | 0 | WS_Assert(ws); |
1006 | 0 | AN(ws->r); |
1007 | 0 | AN(ws->f); |
1008 | 0 | return (ws->f); |
1009 | 0 | } Unexecuted instantiation: cache_ws_emu.c:WS_Reservation Unexecuted instantiation: cache_ws_common.c:WS_Reservation Unexecuted instantiation: cache_esi_parse.c:WS_Reservation Unexecuted instantiation: esi_parse_fuzzer.c:WS_Reservation |
1010 | | |
1011 | | static inline unsigned |
1012 | | WS_ReservationSize(const struct ws *ws) |
1013 | 0 | { |
1014 | 0 |
|
1015 | 0 | AN(ws->r); |
1016 | 0 | return (ws->r - ws->f); |
1017 | 0 | } Unexecuted instantiation: cache_ws_emu.c:WS_ReservationSize Unexecuted instantiation: cache_ws_common.c:WS_ReservationSize Unexecuted instantiation: cache_esi_parse.c:WS_ReservationSize Unexecuted instantiation: esi_parse_fuzzer.c:WS_ReservationSize |
1018 | | |
1019 | | static inline unsigned |
1020 | | WS_ReserveLumps(struct ws *ws, size_t sz) |
1021 | 0 | { |
1022 | 0 |
|
1023 | 0 | AN(sz); |
1024 | 0 | return (WS_ReserveAll(ws) / sz); |
1025 | 0 | } Unexecuted instantiation: cache_ws_emu.c:WS_ReserveLumps Unexecuted instantiation: cache_ws_common.c:WS_ReserveLumps Unexecuted instantiation: cache_esi_parse.c:WS_ReserveLumps Unexecuted instantiation: esi_parse_fuzzer.c:WS_ReserveLumps |
1026 | | |
1027 | | /* cache_ws_common.c */ |
1028 | | void WS_MarkOverflow(struct ws *ws); |
1029 | | int WS_Overflowed(const struct ws *ws); |
1030 | | |
1031 | | const char *WS_Printf(struct ws *ws, const char *fmt, ...) v_printflike_(2, 3); |
1032 | | |
1033 | | void WS_VSB_new(struct vsb *, struct ws *); |
1034 | | char *WS_VSB_finish(struct vsb *, struct ws *, size_t *); |
1035 | | |
1036 | | /* WS utility */ |
1037 | | #define WS_TASK_ALLOC_OBJ(ctx, ptr, magic) do { \ |
1038 | | ptr = WS_Alloc((ctx)->ws, sizeof *(ptr)); \ |
1039 | | if ((ptr) == NULL) \ |
1040 | | VRT_fail(ctx, "Out of workspace for " #magic); \ |
1041 | | else \ |
1042 | | INIT_OBJ(ptr, magic); \ |
1043 | | } while(0) |
1044 | | |
1045 | | /* cache_rfc2616.c */ |
1046 | | void RFC2616_Ttl(struct busyobj *, vtim_real now, vtim_real *t_origin, |
1047 | | float *ttl, float *grace, float *keep); |
1048 | | unsigned RFC2616_Req_Gzip(const struct http *); |
1049 | | int RFC2616_Do_Cond(const struct req *sp); |
1050 | | void RFC2616_Weaken_Etag(struct http *hp); |
1051 | | void RFC2616_Vary_AE(struct http *hp); |
1052 | | const char * RFC2616_Strong_LM(const struct http *hp, struct worker *wrk, |
1053 | | struct objcore *oc); |
1054 | | |
1055 | | /* |
1056 | | * We want to cache the most recent timestamp in wrk->lastused to avoid |
1057 | | * extra timestamps in cache_pool.c. Hide this detail with a macro |
1058 | | */ |
1059 | | #define W_TIM_real(w) ((w)->lastused = VTIM_real()) |