Coverage Report

Created: 2025-08-28 06:38

/src/varnish-cache/bin/varnishd/cache/cache.h
Line
Count
Source (jump to first uncovered line)
1
/*-
2
 * Copyright (c) 2006 Verdens Gang AS
3
 * Copyright (c) 2006-2015 Varnish Software AS
4
 * All rights reserved.
5
 *
6
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
7
 *
8
 * SPDX-License-Identifier: BSD-2-Clause
9
 *
10
 * Redistribution and use in source and binary forms, with or without
11
 * modification, are permitted provided that the following conditions
12
 * are met:
13
 * 1. Redistributions of source code must retain the above copyright
14
 *    notice, this list of conditions and the following disclaimer.
15
 * 2. Redistributions in binary form must reproduce the above copyright
16
 *    notice, this list of conditions and the following disclaimer in the
17
 *    documentation and/or other materials provided with the distribution.
18
 *
19
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29
 * SUCH DAMAGE.
30
 *
31
 */
32
33
#ifdef VRT_H_INCLUDED
34
#  error "vrt.h included before cache.h - they are exclusive"
35
#endif
36
37
#ifdef CACHE_H_INCLUDED
38
#  error "cache.h included multiple times"
39
#endif
40
41
#include <math.h>
42
#include <pthread.h>
43
#include <stdarg.h>
44
#include <sys/types.h>
45
#include <sys/uio.h>
46
47
#include "vdef.h"
48
#include "vrt.h"
49
50
#define CACHE_H_INCLUDED  // After vrt.h include.
51
52
#include "miniobj.h"
53
#include "vas.h"
54
#include "vqueue.h"
55
#include "vtree.h"
56
57
#include "vapi/vsl_int.h"
58
59
/*--------------------------------------------------------------------*/
60
61
struct vxids {
62
  uint64_t  vxid;
63
};
64
65
typedef struct vxids vxid_t;
66
67
#define NO_VXID ((struct vxids){0})
68
#define IS_NO_VXID(x) ((x).vxid == 0)
69
#define VXID_TAG(x) ((uintmax_t)((x).vxid & (VSL_CLIENTMARKER|VSL_BACKENDMARKER)))
70
#define VXID(u) ((uintmax_t)((u.vxid) & VSL_IDENTMASK))
71
#define IS_SAME_VXID(x, y) ((x).vxid == (y).vxid)
72
73
/*--------------------------------------------------------------------*/
74
75
struct body_status {
76
  const char    *name;
77
  int     nbr;
78
  int     avail;
79
  int     length_known;
80
};
81
82
#define BODYSTATUS(U, l, n, a, k) extern const struct body_status BS_##U[1];
83
#include "tbl/body_status.h"
84
85
typedef const struct body_status *body_status_t;
86
87
/*--------------------------------------------------------------------*/
88
89
struct stream_close {
90
  unsigned    magic;
91
#define STREAM_CLOSE_MAGIC  0xc879c93d
92
  int     idx;
93
  unsigned    is_err;
94
  const char    *name;
95
  const char    *desc;
96
};
97
    extern const struct stream_close SC_NULL[1];
98
#define SESS_CLOSE(nm, stat, err, desc) \
99
    extern const struct stream_close SC_##nm[1];
100
#include "tbl/sess_close.h"
101
102
103
/*--------------------------------------------------------------------
104
 * Indices into http->hd[]
105
 */
106
enum {
107
#define SLTH(tag, ind, req, resp, sdesc, ldesc) ind,
108
#include "tbl/vsl_tags_http.h"
109
};
110
111
/*--------------------------------------------------------------------*/
112
113
struct ban;
114
struct ban_proto;
115
struct cli;
116
struct http_conn;
117
struct listen_sock;
118
struct mempool;
119
struct objcore;
120
struct objhead;
121
struct pool;
122
struct req_step;
123
struct sess;
124
struct transport;
125
struct vcf;
126
struct VSC_lck;
127
struct VSC_main;
128
struct VSC_main_wrk;
129
struct worker;
130
struct worker_priv;
131
132
#define DIGEST_LEN    32
133
134
/*--------------------------------------------------------------------*/
135
136
struct lock { void *priv; };  // Opaque
137
138
/*--------------------------------------------------------------------
139
 * Workspace structure for quick memory allocation.
140
 */
141
142
0
#define WS_ID_SIZE 4
143
144
struct ws {
145
  unsigned    magic;
146
#define WS_MAGIC    0x35fac554
147
  char      id[WS_ID_SIZE]; /* identity */
148
  char      *s;   /* (S)tart of buffer */
149
  char      *f;   /* (F)ree/front pointer */
150
  char      *r;   /* (R)eserved length */
151
  char      *e;   /* (E)nd of buffer */
152
};
153
154
/*--------------------------------------------------------------------
155
 *
156
 */
157
158
struct http {
159
  unsigned    magic;
160
#define HTTP_MAGIC    0x6428b5c9
161
162
  uint16_t    shd;    /* Size of hd space */
163
  txt     *hd;
164
  unsigned char   *hdf;
165
#define HDF_FILTER    (1 << 0)  /* Filtered by Connection */
166
167
  /* NB: ->nhd and below zeroed/initialized by http_Teardown */
168
  uint16_t    nhd;    /* Next free hd */
169
170
  enum VSL_tag_e    logtag;   /* Must be SLT_*Method */
171
  struct vsl_log    *vsl;
172
173
  struct ws   *ws;
174
  uint16_t    status;
175
  uint8_t     protover;
176
};
177
178
/*--------------------------------------------------------------------*/
179
180
struct acct_req {
181
#define ACCT(foo) uint64_t  foo;
182
#include "tbl/acct_fields_req.h"
183
};
184
185
/*--------------------------------------------------------------------*/
186
187
struct acct_bereq {
188
#define ACCT(foo) uint64_t  foo;
189
#include "tbl/acct_fields_bereq.h"
190
};
191
192
/*--------------------------------------------------------------------*/
193
194
struct vsl_log {
195
  uint32_t    *wlb, *wlp, *wle;
196
  vxid_t      wid;
197
  unsigned    wlr;
198
};
199
200
/*--------------------------------------------------------------------*/
201
202
VRBT_HEAD(vrt_privs, vrt_priv);
203
204
/* Worker pool stuff -------------------------------------------------*/
205
206
typedef void task_func_t(struct worker *wrk, void *priv);
207
208
struct pool_task {
209
  VTAILQ_ENTRY(pool_task)   list;
210
  task_func_t     *func;
211
  void        *priv;
212
};
213
214
/*
215
 * tasks are taken off the queues in this order
216
 *
217
 * TASK_QUEUE_{REQ|STR} are new req's (H1/H2), and subject to queue limit.
218
 *
219
 * TASK_QUEUE_RUSH is req's returning from waiting list
220
 *
221
 * NOTE: When changing the number of classes, update places marked with
222
 * TASK_QUEUE_RESERVE in params.h
223
 */
224
enum task_prio {
225
  TASK_QUEUE_BO,
226
  TASK_QUEUE_RUSH,
227
  TASK_QUEUE_REQ,
228
  TASK_QUEUE_STR,
229
  TASK_QUEUE_VCA,
230
  TASK_QUEUE_BG,
231
  TASK_QUEUE__END
232
};
233
234
#define TASK_QUEUE_HIGHEST_PRIORITY TASK_QUEUE_BO
235
#define TASK_QUEUE_RESERVE TASK_QUEUE_BG
236
#define TASK_QUEUE_LIMITED(prio) \
237
  (prio == TASK_QUEUE_REQ || prio == TASK_QUEUE_STR)
238
239
/*--------------------------------------------------------------------*/
240
241
struct worker {
242
  unsigned    magic;
243
#define WORKER_MAGIC    0x6391adcf
244
  int     strangelove;
245
  struct worker_priv  *wpriv;
246
  struct pool   *pool;
247
  struct VSC_main_wrk *stats;
248
  struct vsl_log    *vsl;   // borrowed from req/bo
249
250
  struct pool_task  task[1];
251
252
  vtim_real   lastused;
253
254
  pthread_cond_t    cond;
255
256
  struct ws   aws[1];
257
258
  unsigned    cur_method;
259
  unsigned    seen_methods;
260
261
  struct wrk_vpi    *vpi;
262
};
263
264
/* Stored object -----------------------------------------------------
265
 * This is just to encapsulate the fields owned by the stevedore
266
 */
267
268
struct storeobj {
269
  const struct stevedore  *stevedore;
270
  void      *priv;
271
  uint64_t    priv2;
272
};
273
274
/* Busy Objcore structure --------------------------------------------
275
 *
276
 */
277
278
/*
279
 * The macro-states we expose outside the fetch code
280
 */
281
enum boc_state_e {
282
#define BOC_STATE(U, l)       BOS_##U,
283
#include "tbl/boc_state.h"
284
};
285
286
// cache_obj.h vai notify
287
struct vai_qe;
288
VSLIST_HEAD(vai_q_head, vai_qe);
289
290
struct boc {
291
  unsigned    magic;
292
#define BOC_MAGIC   0x70c98476
293
  unsigned    refcount;
294
  struct lock   mtx;
295
  pthread_cond_t    cond;
296
  void      *stevedore_priv;
297
  enum boc_state_e  state;
298
  uint8_t     *vary;
299
  uint64_t    fetched_so_far;
300
  uint64_t    delivered_so_far;
301
  uint64_t    transit_buffer;
302
  struct vai_q_head vai_q_head;
303
};
304
305
/* Object core structure ---------------------------------------------
306
 * Objects have sideways references in the binary heap and the LRU list
307
 * and we want to avoid paging in a lot of objects just to move them up
308
 * or down the binheap or to move a unrelated object on the LRU list.
309
 * To avoid this we use a proxy object, objcore, to hold the relevant
310
 * housekeeping fields parts of an object.
311
 */
312
313
enum obj_attr {
314
#define OBJ_FIXATTR(U, l, s)  OA_##U,
315
#define OBJ_VARATTR(U, l) OA_##U,
316
#define OBJ_AUXATTR(U, l) OA_##U,
317
#include "tbl/obj_attr.h"
318
        OA__MAX,
319
};
320
321
enum obj_flags {
322
#define OBJ_FLAG(U, l, v)       OF_##U = v,
323
#include "tbl/obj_attr.h"
324
};
325
326
enum oc_flags {
327
#define OC_FLAG(U, l, v)  OC_F_##U = v,
328
#include "tbl/oc_flags.h"
329
};
330
331
#define OC_F_TRANSIENT (OC_F_PRIVATE | OC_F_HFM | OC_F_HFP)
332
333
enum oc_exp_flags {
334
#define OC_EXP_FLAG(U, l, v)  OC_EF_##U = v,
335
#include "tbl/oc_exp_flags.h"
336
};
337
338
struct objcore {
339
  unsigned    magic;
340
#define OBJCORE_MAGIC   0x4d301302
341
  int     refcnt;
342
  struct storeobj   stobj[1];
343
  struct objhead    *objhead;
344
  struct boc    *boc;
345
  vtim_real   timer_when;
346
  VCL_INT     hits;
347
348
349
  vtim_real   t_origin;
350
  float     ttl;
351
  float     grace;
352
  float     keep;
353
354
  uint8_t     flags;
355
356
  uint8_t     exp_flags;
357
358
  uint16_t    oa_present;
359
360
  unsigned    timer_idx;  // XXX 4Gobj limit
361
  unsigned    waitinglist_gen;
362
  vtim_real   last_lru;
363
  VTAILQ_ENTRY(objcore) hsh_list;
364
  VTAILQ_ENTRY(objcore) lru_list;
365
  VTAILQ_ENTRY(objcore) ban_list;
366
  VSTAILQ_ENTRY(objcore)  exp_list;
367
  struct ban    *ban;
368
};
369
370
/* Busy Object structure ---------------------------------------------
371
 *
372
 * The busyobj structure captures the aspects of an object related to,
373
 * and while it is being fetched from the backend.
374
 *
375
 * One of these aspects will be how much has been fetched, which
376
 * streaming delivery will make use of.
377
 */
378
379
enum director_state_e {
380
  DIR_S_NULL = 0,
381
  DIR_S_HDRS = 1,
382
  DIR_S_BODY = 2,
383
};
384
385
struct busyobj {
386
  unsigned    magic;
387
#define BUSYOBJ_MAGIC   0x23b95567
388
389
  char      *end;
390
391
  unsigned    max_retries;
392
  unsigned    retries;
393
  struct req    *req;
394
  struct sess   *sp;
395
  struct worker   *wrk;
396
397
  /* beresp.body */
398
  struct vfp_ctx    *vfc;
399
  const char    *vfp_filter_list;
400
  /* bereq.body */
401
  const char    *vdp_filter_list;
402
403
  struct ws   ws[1];
404
  uintptr_t   ws_bo;
405
  struct http   *bereq0;
406
  struct http   *bereq;
407
  struct http   *beresp;
408
  struct objcore    *bereq_body;
409
  struct objcore    *stale_oc;
410
  struct objcore    *fetch_objcore;
411
412
  const char    *no_retry;
413
414
  struct http_conn  *htc;
415
416
  struct pool_task  fetch_task[1];
417
418
  const char    *err_reason;
419
  enum director_state_e director_state;
420
  uint16_t    err_code;
421
422
#define BERESP_FLAG(l, r, w, f, d) unsigned l:1;
423
#define BEREQ_FLAG(l, r, w, d) BERESP_FLAG(l, r, w, 0, d)
424
#include "tbl/bereq_flags.h"
425
#include "tbl/beresp_flags.h"
426
427
428
  /* Timeouts */
429
  vtim_dur    connect_timeout;
430
  vtim_dur    first_byte_timeout;
431
  vtim_dur    between_bytes_timeout;
432
  vtim_dur    task_deadline;
433
434
  /* Timers */
435
  vtim_real   t_first;  /* First timestamp logged */
436
  vtim_real   t_resp;   /* response received */
437
  vtim_real   t_prev;   /* Previous timestamp logged */
438
439
  /* Acct */
440
  struct acct_bereq acct;
441
442
  const struct stevedore  *storage;
443
  const struct director *director_req;
444
  const struct director *director_resp;
445
  struct vcl    *vcl;
446
447
  struct vsl_log    vsl[1];
448
449
  uint8_t     digest[DIGEST_LEN];
450
  struct vrt_privs  privs[1];
451
452
  const char    *client_identity;
453
};
454
455
#define BUSYOBJ_TMO(bo, pfx, tmo)         \
456
  (isnan((bo)->tmo) ? cache_param->pfx##tmo : (bo)->tmo)
457
458
459
/*--------------------------------------------------------------------*/
460
461
struct reqtop {
462
  unsigned    magic;
463
#define REQTOP_MAGIC    0x57fbda52
464
  struct req    *topreq;
465
  struct vcl    *vcl0;
466
  struct vrt_privs  privs[1];
467
};
468
469
struct req {
470
  unsigned    magic;
471
#define REQ_MAGIC   0xfb4abf6d
472
473
  unsigned    esi_level;
474
  body_status_t   req_body_status;
475
  stream_close_t    doclose;
476
  unsigned    restarts;
477
  unsigned    max_restarts;
478
  unsigned    waitinglist_gen;
479
480
  const struct req_step *req_step;
481
  struct reqtop   *top; /* esi_level == 0 request */
482
483
  uint16_t    err_code;
484
#define REQ_FLAG(l, r, w, d) unsigned l:1;
485
#include "tbl/req_flags.h"
486
487
  const char    *err_reason;
488
489
  struct sess   *sp;
490
  struct worker   *wrk;
491
  struct pool_task  task[1];
492
493
  const struct transport  *transport;
494
  void      *transport_priv;
495
496
  VTAILQ_ENTRY(req) w_list;
497
498
  struct objcore    *body_oc;
499
500
  /* Built Vary string == workspace reservation */
501
  uint8_t     *vary_b;
502
  uint8_t     *vary_e;
503
504
  uint8_t     digest[DIGEST_LEN];
505
506
  vtim_dur    d_ttl;
507
  vtim_dur    d_grace;
508
509
  const struct stevedore  *storage;
510
511
  const struct director *director_hint;
512
  struct vcl    *vcl;
513
514
  uintptr_t   ws_req;   /* WS above request data */
515
516
  /* Timestamps */
517
  vtim_real   t_first;  /* First timestamp logged */
518
  vtim_real   t_prev;   /* Previous timestamp logged */
519
  vtim_real   t_req;    /* Headers complete */
520
  vtim_real   t_resp;   /* Entry to last deliver/synth */
521
522
  struct http_conn  *htc;
523
  struct vfp_ctx    *vfc;
524
  const char    *client_identity;
525
526
  /* HTTP request */
527
  struct http   *http;
528
  struct http   *http0;
529
530
  /* HTTP response */
531
  struct http   *resp;
532
  intmax_t    resp_len;
533
534
  struct ws   ws[1];
535
  struct objcore    *objcore;
536
  struct objcore    *stale_oc;
537
  struct boc    *boc;   /* valid during cnt_transmit */
538
539
  /* resp.body */
540
  struct vdp_ctx    *vdc;
541
  const char    *vdp_filter_list;
542
  /* req.body */
543
  const char    *vfp_filter_list;
544
545
  /* Transaction VSL buffer */
546
  struct vsl_log    vsl[1];
547
548
  /* Temporary accounting */
549
  struct acct_req   acct;
550
551
  struct vrt_privs  privs[1];
552
553
  struct vcf    *vcf;
554
};
555
556
#define IS_TOPREQ(req) ((req)->top->topreq == (req))
557
558
/*--------------------------------------------------------------------
559
 * Struct sess is a high memory-load structure because sessions typically
560
 * hang around the waiter for relatively long time.
561
 *
562
 * The size goal for struct sess + struct memitem is <512 bytes
563
 *
564
 * Getting down to the next relevant size (<256 bytes because of how malloc
565
 * works, is not realistic without a lot of code changes.
566
 */
567
568
enum sess_attr {
569
#define SESS_ATTR(UP, low, typ, len)  SA_##UP,
570
#include "tbl/sess_attr.h"
571
  SA_LAST
572
};
573
574
struct sess {
575
  unsigned    magic;
576
#define SESS_MAGIC    0x2c2f9c5a
577
578
  uint16_t    sattr[SA_LAST];
579
  struct listen_sock  *listen_sock;
580
  int     refcnt;
581
  int     fd;
582
  vxid_t      vxid;
583
584
  struct lock   mtx;
585
586
  struct pool   *pool;
587
588
  struct ws   ws[1];
589
590
  vtim_real   t_open;   /* fd accepted */
591
  vtim_real   t_idle;   /* fd accepted or resp sent */
592
  vtim_dur    timeout_idle;
593
  vtim_dur    timeout_linger;
594
  vtim_dur    send_timeout;
595
  vtim_dur    idle_send_timeout;
596
};
597
598
#define SESS_TMO(sp, tmo)         \
599
  (isnan((sp)->tmo) ? cache_param->tmo : (sp)->tmo)
600
601
/* Prototypes etc ----------------------------------------------------*/
602
603
604
/* cache_ban.c */
605
606
/* for constructing bans */
607
struct ban_proto *BAN_Build(void);
608
const char *BAN_AddTest(struct ban_proto *,
609
    const char *, const char *, const char *);
610
const char *BAN_Commit(struct ban_proto *b);
611
void BAN_Abandon(struct ban_proto *b);
612
613
/* cache_cli.c [CLI] */
614
extern pthread_t cli_thread;
615
#define IS_CLI() (pthread_equal(pthread_self(), cli_thread))
616
#define ASSERT_CLI() do {assert(IS_CLI());} while (0)
617
618
/* cache_http.c */
619
unsigned HTTP_estimate(unsigned nhttp);
620
void HTTP_Clone(struct http *to, const struct http * const fm);
621
void HTTP_Dup(struct http *to, const struct http * const fm);
622
struct http *HTTP_create(void *p, uint16_t nhttp, unsigned);
623
const char *http_Status2Reason(unsigned, const char **);
624
int http_IsHdr(const txt *hh, hdr_t hdr);
625
unsigned http_EstimateWS(const struct http *fm, unsigned how);
626
void http_PutResponse(struct http *to, const char *proto, uint16_t status,
627
    const char *response);
628
void http_FilterReq(struct http *to, const struct http *fm, unsigned how);
629
void HTTP_Encode(const struct http *fm, uint8_t *, unsigned len, unsigned how);
630
int HTTP_Decode(struct http *to, const uint8_t *fm);
631
void http_ForceHeader(struct http *to, hdr_t, const char *val);
632
void http_AppendHeader(struct http *to, hdr_t, const char *val);
633
void http_PrintfHeader(struct http *to, const char *fmt, ...)
634
    v_printflike_(2, 3);
635
void http_TimeHeader(struct http *to, const char *fmt, vtim_real now);
636
const char * http_ViaHeader(void);
637
void http_Proto(struct http *to);
638
void http_SetHeader(struct http *to, const char *header);
639
void http_SetH(struct http *to, unsigned n, const char *header);
640
void http_ForceField(struct http *to, unsigned n, const char *t);
641
void HTTP_Setup(struct http *, struct ws *, struct vsl_log *, enum VSL_tag_e);
642
void http_Teardown(struct http *ht);
643
int http_GetHdr(const struct http *hp, hdr_t, const char **ptr);
644
int http_GetHdrToken(const struct http *hp, hdr_t,
645
    const char *token, const char **pb, const char **pe);
646
int http_GetHdrField(const struct http *hp, hdr_t,
647
    const char *field, const char **ptr);
648
double http_GetHdrQ(const struct http *hp, hdr_t, const char *field);
649
ssize_t http_GetContentLength(const struct http *hp);
650
ssize_t http_GetContentRange(const struct http *hp, ssize_t *lo, ssize_t *hi);
651
const char * http_GetRange(const struct http *hp, ssize_t *lo, ssize_t *hi,
652
    ssize_t len);
653
uint16_t http_GetStatus(const struct http *hp);
654
int http_IsStatus(const struct http *hp, int);
655
void http_SetStatus(struct http *to, uint16_t status, const char *reason);
656
const char *http_GetMethod(const struct http *hp);
657
int http_HdrIs(const struct http *hp, hdr_t, const char *val);
658
void http_CopyHome(const struct http *hp);
659
void http_Unset(struct http *hp, hdr_t);
660
unsigned http_CountHdr(const struct http *hp, hdr_t);
661
void http_CollectHdr(struct http *hp, hdr_t);
662
void http_CollectHdrSep(struct http *hp, hdr_t, const char *sep);
663
void http_VSL_log(const struct http *hp);
664
void HTTP_Merge(struct worker *, struct objcore *, struct http *to);
665
uint16_t HTTP_GetStatusPack(struct worker *, struct objcore *oc);
666
int HTTP_IterHdrPack(struct worker *, struct objcore *, const char **);
667
#define HTTP_FOREACH_PACK(wrk, oc, ptr) \
668
   for ((ptr) = NULL; HTTP_IterHdrPack(wrk, oc, &(ptr));)
669
const char *HTTP_GetHdrPack(struct worker *, struct objcore *, hdr_t);
670
stream_close_t http_DoConnection(struct http *hp, stream_close_t sc_close);
671
int http_IsFiltered(const struct http *hp, unsigned u, unsigned how);
672
673
#define HTTPH_R_PASS    (1 << 0)  /* Request (c->b) in pass mode */
674
#define HTTPH_R_FETCH   (1 << 1)  /* Request (c->b) for fetch */
675
#define HTTPH_A_INS   (1 << 2)  /* Response (b->o) for insert */
676
#define HTTPH_A_PASS    (1 << 3)  /* Response (b->o) for pass */
677
#define HTTPH_C_SPECIFIC  (1 << 4)  /* Connection-specific */
678
679
#define HTTPH(a, b, c) extern hdr_t b;
680
#include "tbl/http_headers.h"
681
682
extern hdr_t H__Status;
683
extern hdr_t H__Proto;
684
extern hdr_t H__Reason;
685
686
// rfc7233,l,1207,1208
687
#define http_tok_eq(s1, s2)   (!vct_casecmp(s1, s2))
688
#define http_tok_at(s1, s2, l)    (!vct_caselencmp(s1, s2, l))
689
#define http_ctok_at(s, cs)   (!vct_caselencmp(s, cs, sizeof(cs) - 1))
690
691
// rfc7230,l,1037,1038
692
#define http_scheme_at(str, tok)  http_ctok_at(str, #tok "://")
693
694
// rfc7230,l,1144,1144
695
// rfc7231,l,1156,1158
696
#define http_method_eq(str, tok)  (!strcmp(str, #tok))
697
698
// rfc7230,l,1222,1222
699
// rfc7230,l,2848,2848
700
// rfc7231,l,3883,3885
701
// rfc7234,l,1339,1340
702
// rfc7234,l,1418,1419
703
#define http_hdr_eq(s1, s2)   http_tok_eq(s1, s2)
704
#define http_hdr_at(s1, s2, l)    http_tok_at(s1, s2, l)
705
706
// rfc7230,l,1952,1952
707
// rfc7231,l,604,604
708
#define http_coding_eq(str, tok)  http_tok_eq(str, #tok)
709
710
// rfc7231,l,1864,1864
711
#define http_expect_eq(str, tok)  http_tok_eq(str, #tok)
712
713
// rfc7233,l,1207,1208
714
#define http_range_at(str, tok, l)  http_tok_at(str, #tok, l)
715
716
/* cache_lck.c */
717
718
/* Internal functions, call only through macros below */
719
void Lck__Lock(struct lock *lck, const char *p,  int l);
720
void Lck__Unlock(struct lock *lck, const char *p,  int l);
721
int Lck__Trylock(struct lock *lck, const char *p,  int l);
722
void Lck__New(struct lock *lck, struct VSC_lck *, const char *);
723
int Lck__Held(const struct lock *lck);
724
int Lck__Owned(const struct lock *lck);
725
extern pthread_mutexattr_t mtxattr_errorcheck;
726
727
/* public interface: */
728
void Lck_Delete(struct lock *lck);
729
int Lck_CondWaitUntil(pthread_cond_t *, struct lock *, vtim_real when);
730
int Lck_CondWait(pthread_cond_t *, struct lock *);
731
int Lck_CondWaitTimeout(pthread_cond_t *, struct lock *, vtim_dur timeout);
732
733
#define Lck_New(a, b) Lck__New(a, b, #b)
734
#define Lck_Lock(a) Lck__Lock(a, __func__, __LINE__)
735
#define Lck_Unlock(a) Lck__Unlock(a, __func__, __LINE__)
736
#define Lck_Trylock(a) Lck__Trylock(a, __func__, __LINE__)
737
#define Lck_AssertHeld(a)   \
738
  do {        \
739
    assert(Lck__Held(a)); \
740
    assert(Lck__Owned(a));  \
741
  } while (0)
742
743
struct VSC_lck *Lck_CreateClass(struct vsc_seg **, const char *);
744
void Lck_DestroyClass(struct vsc_seg **);
745
746
#define LOCK(nam) extern struct VSC_lck *lck_##nam;
747
#include "tbl/locks.h"
748
749
/* cache_obj.c */
750
751
int ObjHasAttr(struct worker *, struct objcore *, enum obj_attr);
752
const void *ObjGetAttr(struct worker *, struct objcore *, enum obj_attr,
753
    ssize_t *len);
754
755
typedef int objiterate_f(void *priv, unsigned flush,
756
    const void *ptr, ssize_t len);
757
#define OBJ_ITER_FLUSH  0x01
758
#define OBJ_ITER_END  0x02
759
760
int ObjIterate(struct worker *, struct objcore *,
761
    void *priv, objiterate_f *func, int final);
762
763
vxid_t ObjGetXID(struct worker *, struct objcore *);
764
uint64_t ObjGetLen(struct worker *, struct objcore *);
765
int ObjGetDouble(struct worker *, struct objcore *, enum obj_attr, double *);
766
int ObjGetU64(struct worker *, struct objcore *, enum obj_attr, uint64_t *);
767
int ObjCheckFlag(struct worker *, struct objcore *, enum obj_flags of);
768
769
/*====================================================================
770
 * ObjVAI...(): Asynchronous Iteration
771
 *
772
 * see comments in cache_obj.c for usage
773
 */
774
775
typedef void *vai_hdl;
776
typedef void vai_notify_cb(vai_hdl, void *priv);
777
778
779
/*
780
 * VSCARAB: Varnish SCatter ARAy of Buffers:
781
 *
782
 * an array of viovs, elsewhere also called an siov or sarray
783
 */
784
struct viov {
785
  uint64_t  lease;
786
  struct iovec  iov;
787
};
788
789
struct vscarab {
790
  unsigned  magic;
791
#define VSCARAB_MAGIC 0x05ca7ab0
792
  unsigned  flags;
793
#define VSCARAB_F_END 1 // last viov is last overall
794
  unsigned  capacity;
795
  unsigned  used;
796
  struct viov s[] v_counted_by_(capacity);
797
};
798
799
// VFLA: starting generic container-with-flexible-array-member macros
800
// aka "struct hack"
801
//
802
// type : struct name
803
// name : a pointer to struct type
804
// mag  : the magic value for this VFLA
805
// cptr : pointer to container struct (aka "head")
806
// fam  : member name of the flexible array member
807
// cap  : capacity
808
//
809
// common properties of all VFLAs:
810
// - are a miniobj (have magic as the first element)
811
// - capacity member is the fam capacity
812
// - used member is the number of fam elements used
813
//
814
// VFLA_SIZE ignores the cap == 0 case, we assert in _INIT
815
// offsetoff ref: https://gustedt.wordpress.com/2011/03/14/flexible-array-member/
816
//lint -emacro(413, VFLA_SIZE)
817
//lint -emacro(545, VFLA_SIZE) bsd offsetof() seems to be using &
818
#define VFLA_SIZE(type, fam, cap) (offsetof(struct type, fam) + \
819
  (cap) * sizeof(((struct type *)0)->fam[0]))
820
#define VFLA_INIT_(type, cptr, mag, fam, cap, save) do {  \
821
  unsigned save = (cap);          \
822
  AN(save);           \
823
  memset((cptr), 0, VFLA_SIZE(type, fam, save));    \
824
  (cptr)->magic = (mag);          \
825
  (cptr)->capacity = (save);        \
826
} while (0)
827
#define VFLA_INIT(type, cptr, mag, fam, cap)      \
828
  VFLA_INIT_(type, cptr, mag, fam, cap, VUNIQ_NAME(save))
829
// declare, allocate and initialize a local VFLA
830
// the additional VLA buf declaration avoids
831
// "Variable-sized object may not be initialized"
832
#define VFLA_LOCAL_(type, name, mag, fam, cap, bufname)       \
833
  char bufname[VFLA_SIZE(type, fam, cap)];        \
834
  struct type *name = (void *)bufname;          \
835
  VFLA_INIT(type, name, mag, fam, cap)
836
#define VFLA_LOCAL(type, name, mag, fam, cap)         \
837
  VFLA_LOCAL_(type, name, mag, fam, cap, VUNIQ_NAME(buf))
838
// malloc and initialize a VFLA
839
#define VFLA_ALLOC(type, name, mag, fam, cap) do {      \
840
  (name) = malloc(VFLA_SIZE(type, fam, cap));     \
841
  if ((name) != NULL)           \
842
    VFLA_INIT(type, name, mag, fam, cap);     \
843
} while(0)
844
#define VFLA_FOREACH(var, cptr, fam)            \
845
  for (var = &(cptr)->fam[0];           \
846
       (var = (var < &(cptr)->fam[(cptr)->used] ? var : NULL)) != NULL; \
847
       var++)
848
// continue iterating after a break of a _FOREACH
849
#define VFLA_FOREACH_RESUME(var, cptr, fam)         \
850
  for (;                  \
851
       var != NULL &&             \
852
         (var = (var < &(cptr)->fam[(cptr)->used] ? var : NULL)) != NULL; \
853
       var++)
854
#define VFLA_GET(cptr, fam) ((cptr)->used < (cptr)->capacity ? &(cptr)->fam[(cptr)->used++] : NULL)
855
// asserts sufficient capacity
856
#define VFLA_ADD(cptr, fam, val) do {           \
857
  assert((cptr)->used < (cptr)->capacity);        \
858
  (cptr)->fam[(cptr)->used++] = (val);          \
859
} while(0)
860
861
#define VSCARAB_SIZE(cap) VFLA_SIZE(vscarab, s, cap)
862
#define VSCARAB_INIT(scarab, cap) VFLA_INIT(vscarab, scarab, VSCARAB_MAGIC, s, cap)
863
#define VSCARAB_LOCAL(scarab, cap) VFLA_LOCAL(vscarab, scarab, VSCARAB_MAGIC, s, cap)
864
#define VSCARAB_ALLOC(scarab, cap) VFLA_ALLOC(vscarab, scarab, VSCARAB_MAGIC, s, cap)
865
#define VSCARAB_FOREACH(var, scarab) VFLA_FOREACH(var, scarab, s)
866
#define VSCARAB_FOREACH_RESUME(var, scarab) VFLA_FOREACH_RESUME(var, scarab, s)
867
#define VSCARAB_GET(scarab) VFLA_GET(scarab, s)
868
#define VSCARAB_ADD(scarab, val) VFLA_ADD(scarab, s, val)
869
//lint -emacro(64, VSCARAB_ADD_IOV_NORET) weird flexelint bug?
870
#define VSCARAB_ADD_IOV_NORET(scarab, vec)          \
871
  VSCARAB_ADD(scarab, ((struct viov){.lease = VAI_LEASE_NORET, .iov = (vec)}))
872
#define VSCARAB_LAST(scarab) ((scarab)->used > 0 ?        \
873
  &(scarab)->s[(scarab)->used - 1] : NULL)
874
875
#define VSCARAB_CHECK(scarab) do {            \
876
  CHECK_OBJ(scarab, VSCARAB_MAGIC);         \
877
  assert((scarab)->used <= (scarab)->capacity);       \
878
} while(0)
879
880
#define VSCARAB_CHECK_NOTNULL(scarab) do {          \
881
  AN(scarab);               \
882
  VSCARAB_CHECK(scarab);              \
883
} while(0)
884
885
/*
886
 * VSCARET: Varnish SCatter Array Return
887
 *
888
 * an array of leases obtained from a vscarab
889
 */
890
891
struct vscaret {
892
  unsigned  magic;
893
#define VSCARET_MAGIC 0x9c1f3d7b
894
  unsigned  capacity;
895
  unsigned  used;
896
  uint64_t  lease[] v_counted_by_(capacity);
897
};
898
899
#define VSCARET_SIZE(cap) VFLA_SIZE(vscaret, lease, cap)
900
#define VSCARET_INIT(scaret, cap) VFLA_INIT(vscaret, scaret, VSCARET_MAGIC, lease, cap)
901
#define VSCARET_LOCAL(scaret, cap) VFLA_LOCAL(vscaret, scaret, VSCARET_MAGIC, lease, cap)
902
#define VSCARET_ALLOC(scaret, cap) VFLA_ALLOC(vscaret, scaret, VSCARET_MAGIC, lease, cap)
903
#define VSCARET_FOREACH(var, scaret) VFLA_FOREACH(var, scaret, lease)
904
#define VSCARET_GET(scaret) VFLA_GET(scaret, lease)
905
#define VSCARET_ADD(scaret, val) VFLA_ADD(scaret, lease, val)
906
907
#define VSCARET_CHECK(scaret) do {            \
908
  CHECK_OBJ(scaret, VSCARET_MAGIC);         \
909
  assert(scaret->used <= scaret->capacity);       \
910
} while(0)
911
912
#define VSCARET_CHECK_NOTNULL(scaret) do {          \
913
  AN(scaret);               \
914
  VSCARET_CHECK(scaret);              \
915
} while(0)
916
917
/*
918
 * VSCARABs can contain leases which are not to be returned to storage, for
919
 * example static data or fragments of larger leases to be returned later. For
920
 * these cases, use this magic value as the lease. This is deliberately not 0 to
921
 * catch oversights.
922
 */
923
#define VAI_LEASE_NORET ((uint64_t)0x8)
924
925
vai_hdl ObjVAIinit(struct worker *, struct objcore *, struct ws *,
926
    vai_notify_cb *, void *);
927
int ObjVAIlease(struct worker *, vai_hdl, struct vscarab *);
928
int ObjVAIbuffer(struct worker *, vai_hdl, struct vscarab *);
929
void ObjVAIreturn(struct worker *, vai_hdl, struct vscaret *);
930
void ObjVAIfini(struct worker *, vai_hdl *);
931
932
/* cache_req_body.c */
933
ssize_t VRB_Iterate(struct worker *, struct vsl_log *, struct req *,
934
    objiterate_f *func, void *priv);
935
936
/* cache_session.c [SES] */
937
938
#define SESS_ATTR(UP, low, typ, len)          \
939
  int SES_Get_##low(const struct sess *sp, typ **dst);
940
#include "tbl/sess_attr.h"
941
const char *SES_Get_String_Attr(const struct sess *sp, enum sess_attr a);
942
943
/* cache_shmlog.c */
944
void VSLv(enum VSL_tag_e tag, vxid_t vxid, const char *fmt, va_list va);
945
void VSL(enum VSL_tag_e tag, vxid_t vxid, const char *fmt, ...)
946
    v_printflike_(3, 4);
947
void VSLs(enum VSL_tag_e tag, vxid_t vxid, const struct strands *s);
948
void VSLbv(struct vsl_log *, enum VSL_tag_e tag, const char *fmt, va_list va);
949
void VSLb(struct vsl_log *, enum VSL_tag_e tag, const char *fmt, ...)
950
    v_printflike_(3, 4);
951
void VSLbt(struct vsl_log *, enum VSL_tag_e tag, txt t);
952
void VSLbs(struct vsl_log *, enum VSL_tag_e tag, const struct strands *s);
953
void VSLb_ts(struct vsl_log *, const char *event, vtim_real first,
954
    vtim_real *pprev, vtim_real now);
955
void VSLb_bin(struct vsl_log *, enum VSL_tag_e, ssize_t, const void*);
956
int VSL_tag_is_masked(enum VSL_tag_e tag);
957
958
static inline void
959
VSLb_ts_req(struct req *req, const char *event, vtim_real now)
960
0
{
961
0
962
0
  if (isnan(req->t_first) || req->t_first == 0.)
963
0
    req->t_first = req->t_prev = now;
964
0
  VSLb_ts(req->vsl, event, req->t_first, &req->t_prev, now);
965
0
}
Unexecuted instantiation: cache_ws_emu.c:VSLb_ts_req
Unexecuted instantiation: cache_ws_common.c:VSLb_ts_req
Unexecuted instantiation: cache_esi_parse.c:VSLb_ts_req
Unexecuted instantiation: esi_parse_fuzzer.c:VSLb_ts_req
966
967
static inline void
968
VSLb_ts_busyobj(struct busyobj *bo, const char *event, vtim_real now)
969
0
{
970
0
971
0
  if (isnan(bo->t_first) || bo->t_first == 0.)
972
0
    bo->t_first = bo->t_prev = now;
973
0
  VSLb_ts(bo->vsl, event, bo->t_first, &bo->t_prev, now);
974
0
}
Unexecuted instantiation: cache_ws_emu.c:VSLb_ts_busyobj
Unexecuted instantiation: cache_ws_common.c:VSLb_ts_busyobj
Unexecuted instantiation: cache_esi_parse.c:VSLb_ts_busyobj
Unexecuted instantiation: esi_parse_fuzzer.c:VSLb_ts_busyobj
975
976
/* cache_vcl.c */
977
const char *VCL_Name(const struct vcl *);
978
979
/* cache_wrk.c */
980
981
typedef void *bgthread_t(struct worker *, void *priv);
982
void WRK_BgThread(pthread_t *thr, const char *name, bgthread_t *func,
983
    void *priv);
984
985
/* cache_ws.c */
986
void WS_Init(struct ws *ws, const char *id, void *space, unsigned len);
987
988
unsigned WS_ReserveSize(struct ws *, unsigned);
989
unsigned WS_ReserveAll(struct ws *);
990
void WS_Release(struct ws *ws, unsigned bytes);
991
void WS_ReleaseP(struct ws *ws, const char *ptr);
992
void WS_Assert(const struct ws *ws);
993
void WS_Reset(struct ws *ws, uintptr_t);
994
void *WS_Alloc(struct ws *ws, unsigned bytes);
995
void *WS_Copy(struct ws *ws, const void *str, int len);
996
uintptr_t WS_Snapshot(struct ws *ws);
997
int WS_Allocated(const struct ws *ws, const void *ptr, ssize_t len);
998
unsigned WS_Dump(const struct ws *ws, char, size_t off, void *buf, size_t len);
999
1000
static inline void *
1001
WS_Reservation(const struct ws *ws)
1002
0
{
1003
1004
0
  WS_Assert(ws);
1005
0
  AN(ws->r);
1006
0
  AN(ws->f);
1007
0
  return (ws->f);
1008
0
}
Unexecuted instantiation: cache_ws_emu.c:WS_Reservation
Unexecuted instantiation: cache_ws_common.c:WS_Reservation
Unexecuted instantiation: cache_esi_parse.c:WS_Reservation
Unexecuted instantiation: esi_parse_fuzzer.c:WS_Reservation
1009
1010
static inline unsigned
1011
WS_ReservationSize(const struct ws *ws)
1012
0
{
1013
0
1014
0
  AN(ws->r);
1015
0
  return (ws->r - ws->f);
1016
0
}
Unexecuted instantiation: cache_ws_emu.c:WS_ReservationSize
Unexecuted instantiation: cache_ws_common.c:WS_ReservationSize
Unexecuted instantiation: cache_esi_parse.c:WS_ReservationSize
Unexecuted instantiation: esi_parse_fuzzer.c:WS_ReservationSize
1017
1018
static inline unsigned
1019
WS_ReserveLumps(struct ws *ws, size_t sz)
1020
0
{
1021
0
1022
0
  AN(sz);
1023
0
  return (WS_ReserveAll(ws) / sz);
1024
0
}
Unexecuted instantiation: cache_ws_emu.c:WS_ReserveLumps
Unexecuted instantiation: cache_ws_common.c:WS_ReserveLumps
Unexecuted instantiation: cache_esi_parse.c:WS_ReserveLumps
Unexecuted instantiation: esi_parse_fuzzer.c:WS_ReserveLumps
1025
1026
/* cache_ws_common.c */
1027
void WS_MarkOverflow(struct ws *ws);
1028
int WS_Overflowed(const struct ws *ws);
1029
1030
const char *WS_Printf(struct ws *ws, const char *fmt, ...) v_printflike_(2, 3);
1031
1032
void WS_VSB_new(struct vsb *, struct ws *);
1033
char *WS_VSB_finish(struct vsb *, struct ws *, size_t *);
1034
1035
/* WS utility */
1036
#define WS_TASK_ALLOC_OBJ(ctx, ptr, magic) do {     \
1037
  ptr = WS_Alloc((ctx)->ws, sizeof *(ptr));   \
1038
  if ((ptr) == NULL)          \
1039
    VRT_fail(ctx, "Out of workspace for " #magic);  \
1040
  else              \
1041
    INIT_OBJ(ptr, magic);       \
1042
} while(0)
1043
1044
/* cache_rfc2616.c */
1045
void RFC2616_Ttl(struct busyobj *, vtim_real now, vtim_real *t_origin,
1046
    float *ttl, float *grace, float *keep);
1047
unsigned RFC2616_Req_Gzip(const struct http *);
1048
int RFC2616_Do_Cond(const struct req *sp);
1049
void RFC2616_Weaken_Etag(struct http *hp);
1050
void RFC2616_Vary_AE(struct http *hp);
1051
const char * RFC2616_Strong_LM(const struct http *hp, struct worker *wrk,
1052
    struct objcore *oc);
1053
1054
/*
1055
 * We want to cache the most recent timestamp in wrk->lastused to avoid
1056
 * extra timestamps in cache_pool.c.  Hide this detail with a macro
1057
 */
1058
#define W_TIM_real(w) ((w)->lastused = VTIM_real())