Coverage Report

Created: 2026-01-17 06:27

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/varnish-cache/bin/varnishd/cache/cache.h
Line
Count
Source
1
/*-
2
 * Copyright (c) 2006 Verdens Gang AS
3
 * Copyright (c) 2006-2015 Varnish Software AS
4
 * All rights reserved.
5
 *
6
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
7
 *
8
 * SPDX-License-Identifier: BSD-2-Clause
9
 *
10
 * Redistribution and use in source and binary forms, with or without
11
 * modification, are permitted provided that the following conditions
12
 * are met:
13
 * 1. Redistributions of source code must retain the above copyright
14
 *    notice, this list of conditions and the following disclaimer.
15
 * 2. Redistributions in binary form must reproduce the above copyright
16
 *    notice, this list of conditions and the following disclaimer in the
17
 *    documentation and/or other materials provided with the distribution.
18
 *
19
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
23
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29
 * SUCH DAMAGE.
30
 *
31
 */
32
33
#ifdef VRT_H_INCLUDED
34
#  error "vrt.h included before cache.h - they are exclusive"
35
#endif
36
37
#ifdef CACHE_H_INCLUDED
38
#  error "cache.h included multiple times"
39
#endif
40
41
#include <math.h>
42
#include <pthread.h>
43
#include <stdarg.h>
44
#include <sys/types.h>
45
#include <sys/uio.h>
46
47
#include "vdef.h"
48
#include "vrt.h"
49
50
#define CACHE_H_INCLUDED  // After vrt.h include.
51
52
#include "miniobj.h"
53
#include "vas.h"
54
#include "vqueue.h"
55
#include "vtree.h"
56
57
#include "vapi/vsl_int.h"
58
59
/*--------------------------------------------------------------------*/
60
61
struct vxids {
62
  uint64_t  vxid;
63
};
64
65
typedef struct vxids vxid_t;
66
67
#define NO_VXID ((struct vxids){0})
68
#define IS_NO_VXID(x) ((x).vxid == 0)
69
#define VXID_TAG(x) ((uintmax_t)((x).vxid & (VSL_CLIENTMARKER|VSL_BACKENDMARKER)))
70
#define VXID(u) ((uintmax_t)((u.vxid) & VSL_IDENTMASK))
71
#define IS_SAME_VXID(x, y) ((x).vxid == (y).vxid)
72
73
/*--------------------------------------------------------------------*/
74
75
struct body_status {
76
  const char    *name;
77
  int     nbr;
78
  int     avail;
79
  int     length_known;
80
};
81
82
#define BODYSTATUS(U, l, n, a, k) extern const struct body_status BS_##U[1];
83
#include "tbl/body_status.h"
84
85
typedef const struct body_status *body_status_t;
86
87
/*--------------------------------------------------------------------*/
88
89
struct stream_close {
90
  unsigned    magic;
91
#define STREAM_CLOSE_MAGIC  0xc879c93d
92
  int     idx;
93
  unsigned    is_err;
94
  const char    *name;
95
  const char    *desc;
96
};
97
    extern const struct stream_close SC_NULL[1];
98
#define SESS_CLOSE(nm, stat, err, desc) \
99
    extern const struct stream_close SC_##nm[1];
100
#include "tbl/sess_close.h"
101
102
103
/*--------------------------------------------------------------------
104
 * Indices into http->hd[]
105
 */
106
enum {
107
#define SLTH(tag, ind, req, resp, sdesc, ldesc) ind,
108
#include "tbl/vsl_tags_http.h"
109
};
110
111
/*--------------------------------------------------------------------*/
112
113
struct ban;
114
struct ban_proto;
115
struct cli;
116
struct http_conn;
117
struct listen_sock;
118
struct mempool;
119
struct objcore;
120
struct objhead;
121
struct pool;
122
struct req_step;
123
struct sess;
124
struct transport;
125
struct vcf;
126
struct VSC_lck;
127
struct VSC_main;
128
struct VSC_main_wrk;
129
struct worker;
130
struct worker_priv;
131
132
#define DIGEST_LEN    32
133
134
/*--------------------------------------------------------------------*/
135
136
struct lock { void *priv; };  // Opaque
137
138
/*--------------------------------------------------------------------
139
 * Workspace structure for quick memory allocation.
140
 */
141
142
0
#define WS_ID_SIZE 4
143
144
struct ws {
145
  unsigned    magic;
146
#define WS_MAGIC    0x35fac554
147
  char      id[WS_ID_SIZE]; /* identity */
148
  char      *s;   /* (S)tart of buffer */
149
  char      *f;   /* (F)ree/front pointer */
150
  char      *r;   /* (R)eserved length */
151
  char      *e;   /* (E)nd of buffer */
152
};
153
154
/*--------------------------------------------------------------------
155
 *
156
 */
157
158
struct http {
159
  unsigned    magic;
160
#define HTTP_MAGIC    0x6428b5c9
161
162
  uint16_t    shd;    /* Size of hd space */
163
  txt     *hd;
164
  unsigned char   *hdf;
165
#define HDF_FILTER    (1 << 0)  /* Filtered by Connection */
166
167
  /* NB: ->nhd and below zeroed/initialized by http_Teardown */
168
  uint16_t    nhd;    /* Next free hd */
169
170
  enum VSL_tag_e    logtag;   /* Must be SLT_*Method */
171
  struct vsl_log    *vsl;
172
173
  struct ws   *ws;
174
  uint16_t    status;
175
  uint8_t     protover;
176
};
177
178
/*--------------------------------------------------------------------*/
179
180
struct acct_req {
181
#define ACCT(foo) uint64_t  foo;
182
#include "tbl/acct_fields_req.h"
183
};
184
185
/*--------------------------------------------------------------------*/
186
187
struct acct_bereq {
188
#define ACCT(foo) uint64_t  foo;
189
#include "tbl/acct_fields_bereq.h"
190
};
191
192
/*--------------------------------------------------------------------*/
193
194
struct vsl_log {
195
  uint32_t    *wlb, *wlp, *wle;
196
  vxid_t      wid;
197
  unsigned    wlr;
198
};
199
200
/*--------------------------------------------------------------------*/
201
202
VRBT_HEAD(vrt_privs, vrt_priv);
203
204
/* Worker pool stuff -------------------------------------------------*/
205
206
typedef void task_func_t(struct worker *wrk, void *priv);
207
208
struct pool_task {
209
  VTAILQ_ENTRY(pool_task)   list;
210
  task_func_t     *func;
211
  void        *priv;
212
};
213
214
/*
215
 * tasks are taken off the queues in this order
216
 *
217
 * TASK_QUEUE_{REQ|STR} are new req's (H1/H2), and subject to queue limit.
218
 *
219
 * TASK_QUEUE_RUSH is req's returning from waiting list
220
 *
221
 * NOTE: When changing the number of classes, update places marked with
222
 * TASK_QUEUE_RESERVE in params.h
223
 */
224
enum task_prio {
225
  TASK_QUEUE_BO,
226
  TASK_QUEUE_RUSH,
227
  TASK_QUEUE_REQ,
228
  TASK_QUEUE_STR,
229
  TASK_QUEUE_VCA,
230
  TASK_QUEUE_BG,
231
  TASK_QUEUE__END
232
};
233
234
#define TASK_QUEUE_HIGHEST_PRIORITY TASK_QUEUE_BO
235
#define TASK_QUEUE_RESERVE TASK_QUEUE_BG
236
#define TASK_QUEUE_LIMITED(prio) \
237
  (prio == TASK_QUEUE_REQ || prio == TASK_QUEUE_STR)
238
239
/*--------------------------------------------------------------------*/
240
241
struct worker {
242
  unsigned    magic;
243
#define WORKER_MAGIC    0x6391adcf
244
  int     strangelove;
245
  struct worker_priv  *wpriv;
246
  struct pool   *pool;
247
  struct VSC_main_wrk *stats;
248
  struct vsl_log    *vsl;   // borrowed from req/bo
249
250
  struct pool_task  task[1];
251
252
  vtim_real   lastused;
253
254
  pthread_cond_t    cond;
255
256
  struct ws   aws[1];
257
258
  unsigned    cur_method;
259
  unsigned    seen_methods;
260
261
  struct wrk_vpi    *vpi;
262
};
263
264
/* Stored object -----------------------------------------------------
265
 * This is just to encapsulate the fields owned by the stevedore
266
 */
267
268
struct storeobj {
269
  const struct stevedore  *stevedore;
270
  void      *priv;
271
  uint64_t    priv2;
272
};
273
274
/* Busy Objcore structure --------------------------------------------
275
 *
276
 */
277
278
/*
279
 * The macro-states we expose outside the fetch code
280
 */
281
enum boc_state_e {
282
#define BOC_STATE(U, l)       BOS_##U,
283
#include "tbl/boc_state.h"
284
};
285
286
// cache_obj.h vai notify
287
struct vai_qe;
288
VSLIST_HEAD(vai_q_head, vai_qe);
289
290
struct boc {
291
  unsigned    magic;
292
#define BOC_MAGIC   0x70c98476
293
  unsigned    refcount;
294
  struct lock   mtx;
295
  pthread_cond_t    cond;
296
  void      *stevedore_priv;
297
  enum boc_state_e  state;
298
  uint8_t     *vary;
299
  uint64_t    fetched_so_far;
300
  uint64_t    delivered_so_far;
301
  uint64_t    transit_buffer;
302
  struct vai_q_head vai_q_head;
303
};
304
305
/* Object core structure ---------------------------------------------
306
 * Objects have sideways references in the binary heap and the LRU list
307
 * and we want to avoid paging in a lot of objects just to move them up
308
 * or down the binheap or to move a unrelated object on the LRU list.
309
 * To avoid this we use a proxy object, objcore, to hold the relevant
310
 * housekeeping fields parts of an object.
311
 */
312
313
enum obj_attr {
314
#define OBJ_FIXATTR(U, l, s)  OA_##U,
315
#define OBJ_VARATTR(U, l) OA_##U,
316
#define OBJ_AUXATTR(U, l) OA_##U,
317
#include "tbl/obj_attr.h"
318
        OA__MAX,
319
};
320
321
enum obj_flags {
322
#define OBJ_FLAG(U, l, v)       OF_##U = v,
323
#include "tbl/obj_attr.h"
324
};
325
326
enum oc_flags {
327
#define OC_FLAG(U, l, v)  OC_F_##U = v,
328
#include "tbl/oc_flags.h"
329
};
330
331
#define OC_F_TRANSIENT (OC_F_PRIVATE | OC_F_HFM | OC_F_HFP)
332
333
enum oc_exp_flags {
334
#define OC_EXP_FLAG(U, l, v)  OC_EF_##U = v,
335
#include "tbl/oc_exp_flags.h"
336
};
337
338
struct objcore {
339
  unsigned    magic;
340
#define OBJCORE_MAGIC   0x4d301302
341
  int     refcnt;
342
  struct storeobj   stobj[1];
343
  struct objhead    *objhead;
344
  struct boc    *boc;
345
  vtim_real   timer_when;
346
  VCL_INT     hits;
347
348
349
  vtim_real   t_origin;
350
  float     ttl;
351
  float     grace;
352
  float     keep;
353
354
  uint8_t     flags;
355
356
  uint8_t     exp_flags;
357
358
  uint16_t    oa_present;
359
360
  unsigned    timer_idx;  // XXX 4Gobj limit
361
  unsigned    waitinglist_gen;
362
  vtim_real   last_lru;
363
  VTAILQ_ENTRY(objcore) hsh_list;
364
  VTAILQ_ENTRY(objcore) lru_list;
365
  VTAILQ_ENTRY(objcore) ban_list;
366
  VSTAILQ_ENTRY(objcore)  exp_list;
367
  struct ban    *ban;
368
};
369
370
/* Busy Object structure ---------------------------------------------
371
 *
372
 * The busyobj structure captures the aspects of an object related to,
373
 * and while it is being fetched from the backend.
374
 *
375
 * One of these aspects will be how much has been fetched, which
376
 * streaming delivery will make use of.
377
 */
378
379
enum director_state_e {
380
  DIR_S_NULL = 0,
381
  DIR_S_HDRS = 1,
382
  DIR_S_BODY = 2,
383
};
384
385
struct busyobj {
386
  unsigned    magic;
387
#define BUSYOBJ_MAGIC   0x23b95567
388
389
  char      *end;
390
391
  unsigned    max_retries;
392
  unsigned    retries;
393
  struct req    *req;
394
  struct sess   *sp;
395
  struct worker   *wrk;
396
397
  /* beresp.body */
398
  struct vfp_ctx    *vfc;
399
  const char    *vfp_filter_list;
400
  /* bereq.body */
401
  const char    *vdp_filter_list;
402
403
  struct ws   ws[1];
404
  uintptr_t   ws_bo;
405
  struct http   *bereq0;
406
  struct http   *bereq;
407
  struct http   *beresp;
408
  struct objcore    *bereq_body;
409
  struct objcore    *stale_oc;
410
  struct objcore    *fetch_objcore;
411
412
  const char    *no_retry;
413
414
  struct http_conn  *htc;
415
416
  struct pool_task  fetch_task[1];
417
418
  const char    *err_reason;
419
  enum director_state_e director_state;
420
  uint16_t    err_code;
421
422
#define BERESP_FLAG(l, r, w, f, d) unsigned l:1;
423
#define BEREQ_FLAG(l, r, w, d) BERESP_FLAG(l, r, w, 0, d)
424
#include "tbl/bereq_flags.h"
425
#include "tbl/beresp_flags.h"
426
427
428
  /* Timeouts */
429
  vtim_dur    connect_timeout;
430
  vtim_dur    first_byte_timeout;
431
  vtim_dur    between_bytes_timeout;
432
  vtim_dur    task_deadline;
433
434
  /* Timers */
435
  vtim_real   t_first;  /* First timestamp logged */
436
  vtim_real   t_resp;   /* response received */
437
  vtim_real   t_prev;   /* Previous timestamp logged */
438
439
  /* Acct */
440
  struct acct_bereq acct;
441
442
  const struct stevedore  *storage;
443
  const struct director *director_req;
444
  const struct director *director_resp;
445
  struct vcl    *vcl;
446
447
  struct vsl_log    vsl[1];
448
449
  uint8_t     digest[DIGEST_LEN];
450
  struct vrt_privs  privs[1];
451
452
  const char    *client_identity;
453
};
454
455
#define BUSYOBJ_TMO(bo, pfx, tmo)         \
456
  (isnan((bo)->tmo) ? cache_param->pfx##tmo : (bo)->tmo)
457
458
extern const char *retry_disabled;
459
460
/*--------------------------------------------------------------------*/
461
462
struct reqtop {
463
  unsigned    magic;
464
#define REQTOP_MAGIC    0x57fbda52
465
  struct req    *topreq;
466
  struct vcl    *vcl0;
467
  struct vrt_privs  privs[1];
468
};
469
470
struct req {
471
  unsigned    magic;
472
#define REQ_MAGIC   0xfb4abf6d
473
474
  unsigned    esi_level;
475
  body_status_t   req_body_status;
476
  stream_close_t    doclose;
477
  unsigned    restarts;
478
  unsigned    max_restarts;
479
  unsigned    waitinglist_gen;
480
481
  const struct req_step *req_step;
482
  struct reqtop   *top; /* esi_level == 0 request */
483
484
  uint16_t    err_code;
485
#define REQ_FLAG(l, r, w, d) unsigned l:1;
486
#include "tbl/req_flags.h"
487
488
  const char    *err_reason;
489
490
  struct sess   *sp;
491
  struct worker   *wrk;
492
  struct pool_task  task[1];
493
494
  const struct transport  *transport;
495
  void      *transport_priv;
496
497
  VTAILQ_ENTRY(req) w_list;
498
499
  struct objcore    *body_oc;
500
501
  /* Built Vary string == workspace reservation */
502
  uint8_t     *vary_b;
503
  uint8_t     *vary_e;
504
505
  uint8_t     digest[DIGEST_LEN];
506
507
  vtim_dur    d_ttl;
508
  vtim_dur    d_grace;
509
510
  const struct stevedore  *storage;
511
512
  const struct director *director_hint;
513
  struct vcl    *vcl;
514
515
  uintptr_t   ws_req;   /* WS above request data */
516
517
  /* Timestamps */
518
  vtim_real   t_first;  /* First timestamp logged */
519
  vtim_real   t_prev;   /* Previous timestamp logged */
520
  vtim_real   t_req;    /* Headers complete */
521
  vtim_real   t_resp;   /* Entry to last deliver/synth */
522
523
  struct http_conn  *htc;
524
  struct vfp_ctx    *vfc;
525
  const char    *client_identity;
526
527
  /* HTTP request */
528
  struct http   *http;
529
  struct http   *http0;
530
531
  /* HTTP response */
532
  struct http   *resp;
533
  intmax_t    resp_len;
534
535
  struct ws   ws[1];
536
  struct objcore    *objcore;
537
  struct objcore    *stale_oc;
538
  struct boc    *boc;   /* valid during cnt_transmit */
539
540
  /* resp.body */
541
  struct vdp_ctx    *vdc;
542
  const char    *vdp_filter_list;
543
  /* req.body */
544
  const char    *vfp_filter_list;
545
546
  /* Transaction VSL buffer */
547
  struct vsl_log    vsl[1];
548
549
  /* Temporary accounting */
550
  struct acct_req   acct;
551
552
  struct vrt_privs  privs[1];
553
554
  struct vcf    *vcf;
555
};
556
557
#define IS_TOPREQ(req) ((req)->top->topreq == (req))
558
559
/*--------------------------------------------------------------------
560
 * Struct sess is a high memory-load structure because sessions typically
561
 * hang around the waiter for relatively long time.
562
 *
563
 * The size goal for struct sess + struct memitem is <512 bytes
564
 *
565
 * Getting down to the next relevant size (<256 bytes because of how malloc
566
 * works, is not realistic without a lot of code changes.
567
 */
568
569
enum sess_attr {
570
#define SESS_ATTR(UP, low, typ, len)  SA_##UP,
571
#include "tbl/sess_attr.h"
572
  SA_LAST
573
};
574
575
struct sess {
576
  unsigned    magic;
577
#define SESS_MAGIC    0x2c2f9c5a
578
579
  uint16_t    sattr[SA_LAST];
580
  struct listen_sock  *listen_sock;
581
  int     refcnt;
582
  int     fd;
583
  vxid_t      vxid;
584
585
  struct lock   mtx;
586
587
  struct pool   *pool;
588
589
  struct ws   ws[1];
590
591
  vtim_real   t_open;   /* fd accepted */
592
  vtim_real   t_idle;   /* fd accepted or resp sent */
593
  vtim_dur    timeout_idle;
594
  vtim_dur    timeout_linger;
595
  vtim_dur    send_timeout;
596
  vtim_dur    idle_send_timeout;
597
};
598
599
#define SESS_TMO(sp, tmo)         \
600
  (isnan((sp)->tmo) ? cache_param->tmo : (sp)->tmo)
601
602
/* Prototypes etc ----------------------------------------------------*/
603
604
605
/* cache_ban.c */
606
607
/* for constructing bans */
608
struct ban_proto *BAN_Build(void);
609
const char *BAN_AddTest(struct ban_proto *,
610
    const char *, const char *, const char *);
611
const char *BAN_Commit(struct ban_proto *b);
612
void BAN_Abandon(struct ban_proto *b);
613
614
/* cache_cli.c [CLI] */
615
extern pthread_t cli_thread;
616
#define IS_CLI() (pthread_equal(pthread_self(), cli_thread))
617
#define ASSERT_CLI() do {assert(IS_CLI());} while (0)
618
619
/* cache_http.c */
620
unsigned HTTP_estimate(unsigned nhttp);
621
void HTTP_Clone(struct http *to, const struct http * const fm);
622
void HTTP_Dup(struct http *to, const struct http * const fm);
623
struct http *HTTP_create(void *p, uint16_t nhttp, unsigned);
624
const char *http_Status2Reason(unsigned, const char **);
625
int http_IsHdr(const txt *hh, hdr_t hdr);
626
unsigned http_EstimateWS(const struct http *fm, unsigned how);
627
void http_PutResponse(struct http *to, const char *proto, uint16_t status,
628
    const char *response);
629
void http_FilterReq(struct http *to, const struct http *fm, unsigned how);
630
void HTTP_Encode(const struct http *fm, uint8_t *, unsigned len, unsigned how);
631
int HTTP_Decode(struct http *to, const uint8_t *fm);
632
void http_ForceHeader(struct http *to, hdr_t, const char *val);
633
void http_AppendHeader(struct http *to, hdr_t, const char *val);
634
void http_PrintfHeader(struct http *to, const char *fmt, ...)
635
    v_printflike_(2, 3);
636
void http_TimeHeader(struct http *to, const char *fmt, vtim_real now);
637
const char * http_ViaHeader(void);
638
void http_Proto(struct http *to);
639
void http_SetHeader(struct http *to, const char *header);
640
void http_SetH(struct http *to, unsigned n, const char *header);
641
void http_ForceField(struct http *to, unsigned n, const char *t);
642
void HTTP_Setup(struct http *, struct ws *, struct vsl_log *, enum VSL_tag_e);
643
void http_Teardown(struct http *ht);
644
int http_GetHdr(const struct http *hp, hdr_t, const char **ptr);
645
int http_GetHdrToken(const struct http *hp, hdr_t,
646
    const char *token, const char **pb, const char **pe);
647
int http_GetHdrField(const struct http *hp, hdr_t,
648
    const char *field, const char **ptr);
649
double http_GetHdrQ(const struct http *hp, hdr_t, const char *field);
650
ssize_t http_GetContentLength(const struct http *hp);
651
ssize_t http_GetContentRange(const struct http *hp, ssize_t *lo, ssize_t *hi);
652
const char * http_GetRange(const struct http *hp, ssize_t *lo, ssize_t *hi,
653
    ssize_t len);
654
uint16_t http_GetStatus(const struct http *hp);
655
int http_IsStatus(const struct http *hp, int);
656
void http_SetStatus(struct http *to, uint16_t status, const char *reason);
657
const char *http_GetMethod(const struct http *hp);
658
int http_HdrIs(const struct http *hp, hdr_t, const char *val);
659
void http_CopyHome(const struct http *hp);
660
void http_Unset(struct http *hp, hdr_t);
661
unsigned http_CountHdr(const struct http *hp, hdr_t);
662
void http_CollectHdr(struct http *hp, hdr_t);
663
void http_CollectHdrSep(struct http *hp, hdr_t, const char *sep);
664
void http_VSL_log(const struct http *hp);
665
void HTTP_Merge(struct worker *, struct objcore *, struct http *to);
666
uint16_t HTTP_GetStatusPack(struct worker *, struct objcore *oc);
667
int HTTP_IterHdrPack(struct worker *, struct objcore *, const char **);
668
#define HTTP_FOREACH_PACK(wrk, oc, ptr) \
669
   for ((ptr) = NULL; HTTP_IterHdrPack(wrk, oc, &(ptr));)
670
const char *HTTP_GetHdrPack(struct worker *, struct objcore *, hdr_t);
671
stream_close_t http_DoConnection(struct http *hp, stream_close_t sc_close);
672
int http_IsFiltered(const struct http *hp, unsigned u, unsigned how);
673
674
#define HTTPH_R_PASS    (1 << 0)  /* Request (c->b) in pass mode */
675
#define HTTPH_R_FETCH   (1 << 1)  /* Request (c->b) for fetch */
676
#define HTTPH_A_INS   (1 << 2)  /* Response (b->o) for insert */
677
#define HTTPH_A_PASS    (1 << 3)  /* Response (b->o) for pass */
678
#define HTTPH_C_SPECIFIC  (1 << 4)  /* Connection-specific */
679
680
#define HTTPH(a, b, c) extern hdr_t b;
681
#include "tbl/http_headers.h"
682
683
extern hdr_t H__Status;
684
extern hdr_t H__Proto;
685
extern hdr_t H__Reason;
686
687
// rfc7233,l,1207,1208
688
#define http_tok_eq(s1, s2)   (!vct_casecmp(s1, s2))
689
#define http_tok_at(s1, s2, l)    (!vct_caselencmp(s1, s2, l))
690
#define http_ctok_at(s, cs)   (!vct_caselencmp(s, cs, sizeof(cs) - 1))
691
692
// rfc7230,l,1037,1038
693
#define http_scheme_at(str, tok)  http_ctok_at(str, #tok "://")
694
695
// rfc7230,l,1144,1144
696
// rfc7231,l,1156,1158
697
#define http_method_eq(str, tok)  (!vstrcmp(str, #tok))
698
// l = vstrlen(str)
699
#define http_method_eq_l(str, l, tok) (l == vstrlen(#tok) && ! vstrcmp(str, #tok))
700
701
// rfc7230,l,1222,1222
702
// rfc7230,l,2848,2848
703
// rfc7231,l,3883,3885
704
// rfc7234,l,1339,1340
705
// rfc7234,l,1418,1419
706
#define http_hdr_eq(s1, s2)   http_tok_eq(s1, s2)
707
#define http_hdr_at(s1, s2, l)    http_tok_at(s1, s2, l)
708
709
// rfc7230,l,1952,1952
710
// rfc7231,l,604,604
711
#define http_coding_eq(str, tok)  http_tok_eq(str, #tok)
712
713
// rfc7231,l,1864,1864
714
#define http_expect_eq(str, tok)  http_tok_eq(str, #tok)
715
716
// rfc7233,l,1207,1208
717
#define http_range_at(str, tok, l)  http_tok_at(str, #tok, l)
718
719
/* cache_lck.c */
720
721
/* Internal functions, call only through macros below */
722
void Lck__Lock(struct lock *lck, const char *p,  int l);
723
void Lck__Unlock(struct lock *lck, const char *p,  int l);
724
int Lck__Trylock(struct lock *lck, const char *p,  int l);
725
void Lck__New(struct lock *lck, struct VSC_lck *, const char *);
726
int Lck__Held(const struct lock *lck);
727
int Lck__Owned(const struct lock *lck);
728
extern pthread_mutexattr_t mtxattr_errorcheck;
729
730
/* public interface: */
731
void Lck_Delete(struct lock *lck);
732
int Lck_CondWaitUntil(pthread_cond_t *, struct lock *, vtim_real when);
733
int Lck_CondWait(pthread_cond_t *, struct lock *);
734
int Lck_CondWaitTimeout(pthread_cond_t *, struct lock *, vtim_dur timeout);
735
736
#define Lck_New(a, b) Lck__New(a, b, #b)
737
#define Lck_Lock(a) Lck__Lock(a, __func__, __LINE__)
738
#define Lck_Unlock(a) Lck__Unlock(a, __func__, __LINE__)
739
#define Lck_Trylock(a) Lck__Trylock(a, __func__, __LINE__)
740
#define Lck_AssertHeld(a)   \
741
  do {        \
742
    assert(Lck__Held(a)); \
743
    assert(Lck__Owned(a));  \
744
  } while (0)
745
746
struct VSC_lck *Lck_CreateClass(struct vsc_seg **, const char *);
747
void Lck_DestroyClass(struct vsc_seg **);
748
749
#define LOCK(nam) extern struct VSC_lck *lck_##nam;
750
#include "tbl/locks.h"
751
752
/* cache_obj.c */
753
754
int ObjHasAttr(struct worker *, struct objcore *, enum obj_attr);
755
const void *ObjGetAttr(struct worker *, struct objcore *, enum obj_attr,
756
    ssize_t *len);
757
758
typedef int objiterate_f(void *priv, unsigned flush,
759
    const void *ptr, ssize_t len);
760
#define OBJ_ITER_FLUSH  0x01
761
#define OBJ_ITER_END  0x02
762
763
int ObjIterate(struct worker *, struct objcore *,
764
    void *priv, objiterate_f *func, int final);
765
766
vxid_t ObjGetXID(struct worker *, struct objcore *);
767
uint64_t ObjGetLen(struct worker *, struct objcore *);
768
int ObjGetDouble(struct worker *, struct objcore *, enum obj_attr, double *);
769
int ObjGetU64(struct worker *, struct objcore *, enum obj_attr, uint64_t *);
770
int ObjCheckFlag(struct worker *, struct objcore *, enum obj_flags of);
771
772
/*====================================================================
773
 * ObjVAI...(): Asynchronous Iteration
774
 *
775
 * see comments in cache_obj.c for usage
776
 */
777
778
typedef void *vai_hdl;
779
typedef void vai_notify_cb(vai_hdl, void *priv);
780
781
782
/*
783
 * VSCARAB: Varnish SCatter ARAy of Buffers:
784
 *
785
 * an array of viovs, elsewhere also called an siov or sarray
786
 */
787
struct viov {
788
  uint64_t  lease;
789
  struct iovec  iov;
790
};
791
792
struct vscarab {
793
  unsigned  magic;
794
#define VSCARAB_MAGIC 0x05ca7ab0
795
  unsigned  flags;
796
#define VSCARAB_F_END 1 // last viov is last overall
797
  unsigned  capacity;
798
  unsigned  used;
799
  struct viov s[] v_counted_by_(capacity);
800
};
801
802
// VFLA: starting generic container-with-flexible-array-member macros
803
// aka "struct hack"
804
//
805
// type : struct name
806
// name : a pointer to struct type
807
// mag  : the magic value for this VFLA
808
// cptr : pointer to container struct (aka "head")
809
// fam  : member name of the flexible array member
810
// cap  : capacity
811
//
812
// common properties of all VFLAs:
813
// - are a miniobj (have magic as the first element)
814
// - capacity member is the fam capacity
815
// - used member is the number of fam elements used
816
//
817
// VFLA_SIZE ignores the cap == 0 case, we assert in _INIT
818
// offsetoff ref: https://gustedt.wordpress.com/2011/03/14/flexible-array-member/
819
//lint -emacro(413, VFLA_SIZE)
820
//lint -emacro(545, VFLA_SIZE) bsd offsetof() seems to be using &
821
#define VFLA_SIZE(type, fam, cap) (offsetof(struct type, fam) + \
822
  (cap) * sizeof(((struct type *)0)->fam[0]))
823
#define VFLA_INIT_(type, cptr, mag, fam, cap, save) do {  \
824
  unsigned save = (cap);          \
825
  AN(save);           \
826
  memset((cptr), 0, VFLA_SIZE(type, fam, save));    \
827
  (cptr)->magic = (mag);          \
828
  (cptr)->capacity = (save);        \
829
} while (0)
830
#define VFLA_INIT(type, cptr, mag, fam, cap)      \
831
  VFLA_INIT_(type, cptr, mag, fam, cap, VUNIQ_NAME(save))
832
// declare, allocate and initialize a local VFLA
833
// the additional VLA buf declaration avoids
834
// "Variable-sized object may not be initialized"
835
#define VFLA_LOCAL_(type, name, mag, fam, cap, bufname)       \
836
  char bufname[VFLA_SIZE(type, fam, cap)];        \
837
  struct type *name = (void *)bufname;          \
838
  VFLA_INIT(type, name, mag, fam, cap)
839
#define VFLA_LOCAL(type, name, mag, fam, cap)         \
840
  VFLA_LOCAL_(type, name, mag, fam, cap, VUNIQ_NAME(buf))
841
// malloc and initialize a VFLA
842
#define VFLA_ALLOC(type, name, mag, fam, cap) do {      \
843
  (name) = malloc(VFLA_SIZE(type, fam, cap));     \
844
  if ((name) != NULL)           \
845
    VFLA_INIT(type, name, mag, fam, cap);     \
846
} while(0)
847
#define VFLA_FOREACH(var, cptr, fam)            \
848
  for (var = &(cptr)->fam[0];           \
849
       (var = (var < &(cptr)->fam[(cptr)->used] ? var : NULL)) != NULL; \
850
       var++)
851
// continue iterating after a break of a _FOREACH
852
#define VFLA_FOREACH_RESUME(var, cptr, fam)         \
853
  for (;                  \
854
       var != NULL &&             \
855
         (var = (var < &(cptr)->fam[(cptr)->used] ? var : NULL)) != NULL; \
856
       var++)
857
#define VFLA_GET(cptr, fam) ((cptr)->used < (cptr)->capacity ? &(cptr)->fam[(cptr)->used++] : NULL)
858
// asserts sufficient capacity
859
#define VFLA_ADD(cptr, fam, val) do {           \
860
  assert((cptr)->used < (cptr)->capacity);        \
861
  (cptr)->fam[(cptr)->used++] = (val);          \
862
} while(0)
863
864
#define VSCARAB_SIZE(cap) VFLA_SIZE(vscarab, s, cap)
865
#define VSCARAB_INIT(scarab, cap) VFLA_INIT(vscarab, scarab, VSCARAB_MAGIC, s, cap)
866
#define VSCARAB_LOCAL(scarab, cap) VFLA_LOCAL(vscarab, scarab, VSCARAB_MAGIC, s, cap)
867
#define VSCARAB_ALLOC(scarab, cap) VFLA_ALLOC(vscarab, scarab, VSCARAB_MAGIC, s, cap)
868
#define VSCARAB_FOREACH(var, scarab) VFLA_FOREACH(var, scarab, s)
869
#define VSCARAB_FOREACH_RESUME(var, scarab) VFLA_FOREACH_RESUME(var, scarab, s)
870
#define VSCARAB_GET(scarab) VFLA_GET(scarab, s)
871
#define VSCARAB_ADD(scarab, val) VFLA_ADD(scarab, s, val)
872
//lint -emacro(64, VSCARAB_ADD_IOV_NORET) weird flexelint bug?
873
#define VSCARAB_ADD_IOV_NORET(scarab, vec)          \
874
  VSCARAB_ADD(scarab, ((struct viov){.lease = VAI_LEASE_NORET, .iov = (vec)}))
875
#define VSCARAB_LAST(scarab) ((scarab)->used > 0 ?        \
876
  &(scarab)->s[(scarab)->used - 1] : NULL)
877
878
#define VSCARAB_CHECK(scarab) do {            \
879
  CHECK_OBJ(scarab, VSCARAB_MAGIC);         \
880
  assert((scarab)->used <= (scarab)->capacity);       \
881
} while(0)
882
883
#define VSCARAB_CHECK_NOTNULL(scarab) do {          \
884
  AN(scarab);               \
885
  VSCARAB_CHECK(scarab);              \
886
} while(0)
887
888
/*
889
 * VSCARET: Varnish SCatter Array Return
890
 *
891
 * an array of leases obtained from a vscarab
892
 */
893
894
struct vscaret {
895
  unsigned  magic;
896
#define VSCARET_MAGIC 0x9c1f3d7b
897
  unsigned  capacity;
898
  unsigned  used;
899
  uint64_t  lease[] v_counted_by_(capacity);
900
};
901
902
#define VSCARET_SIZE(cap) VFLA_SIZE(vscaret, lease, cap)
903
#define VSCARET_INIT(scaret, cap) VFLA_INIT(vscaret, scaret, VSCARET_MAGIC, lease, cap)
904
#define VSCARET_LOCAL(scaret, cap) VFLA_LOCAL(vscaret, scaret, VSCARET_MAGIC, lease, cap)
905
#define VSCARET_ALLOC(scaret, cap) VFLA_ALLOC(vscaret, scaret, VSCARET_MAGIC, lease, cap)
906
#define VSCARET_FOREACH(var, scaret) VFLA_FOREACH(var, scaret, lease)
907
#define VSCARET_GET(scaret) VFLA_GET(scaret, lease)
908
#define VSCARET_ADD(scaret, val) VFLA_ADD(scaret, lease, val)
909
910
#define VSCARET_CHECK(scaret) do {            \
911
  CHECK_OBJ(scaret, VSCARET_MAGIC);         \
912
  assert(scaret->used <= scaret->capacity);       \
913
} while(0)
914
915
#define VSCARET_CHECK_NOTNULL(scaret) do {          \
916
  AN(scaret);               \
917
  VSCARET_CHECK(scaret);              \
918
} while(0)
919
920
/*
921
 * VSCARABs can contain leases which are not to be returned to storage, for
922
 * example static data or fragments of larger leases to be returned later. For
923
 * these cases, use this magic value as the lease. This is deliberately not 0 to
924
 * catch oversights.
925
 */
926
#define VAI_LEASE_NORET ((uint64_t)0x8)
927
928
vai_hdl ObjVAIinit(struct worker *, struct objcore *, struct ws *,
929
    vai_notify_cb *, void *);
930
int ObjVAIlease(struct worker *, vai_hdl, struct vscarab *);
931
int ObjVAIbuffer(struct worker *, vai_hdl, struct vscarab *);
932
void ObjVAIreturn(struct worker *, vai_hdl, struct vscaret *);
933
void ObjVAIfini(struct worker *, vai_hdl *);
934
935
/* cache_req_body.c */
936
ssize_t VRB_Iterate(struct worker *, struct vsl_log *, struct req *,
937
    objiterate_f *func, void *priv);
938
939
/* cache_session.c [SES] */
940
941
#define SESS_ATTR(UP, low, typ, len)          \
942
  int SES_Get_##low(const struct sess *sp, typ **dst);
943
#include "tbl/sess_attr.h"
944
const char *SES_Get_String_Attr(const struct sess *sp, enum sess_attr a);
945
946
/* cache_shmlog.c */
947
void VSLv(enum VSL_tag_e tag, vxid_t vxid, const char *fmt, va_list va);
948
void VSL(enum VSL_tag_e tag, vxid_t vxid, const char *fmt, ...)
949
    v_printflike_(3, 4);
950
void VSLs(enum VSL_tag_e tag, vxid_t vxid, const struct strands *s);
951
void VSLbv(struct vsl_log *, enum VSL_tag_e tag, const char *fmt, va_list va);
952
void VSLb(struct vsl_log *, enum VSL_tag_e tag, const char *fmt, ...)
953
    v_printflike_(3, 4);
954
void VSLbt(struct vsl_log *, enum VSL_tag_e tag, txt t);
955
void VSLbs(struct vsl_log *, enum VSL_tag_e tag, const struct strands *s);
956
void VSLb_ts(struct vsl_log *, const char *event, vtim_real first,
957
    vtim_real *pprev, vtim_real now);
958
void VSLb_bin(struct vsl_log *, enum VSL_tag_e, ssize_t, const void*);
959
int VSL_tag_is_masked(enum VSL_tag_e tag);
960
961
static inline void
962
VSLb_ts_req(struct req *req, const char *event, vtim_real now)
963
0
{
964
0
965
0
  if (isnan(req->t_first) || req->t_first == 0.)
966
0
    req->t_first = req->t_prev = now;
967
0
  VSLb_ts(req->vsl, event, req->t_first, &req->t_prev, now);
968
0
}
Unexecuted instantiation: cache_ws_emu.c:VSLb_ts_req
Unexecuted instantiation: cache_ws_common.c:VSLb_ts_req
Unexecuted instantiation: cache_esi_parse.c:VSLb_ts_req
Unexecuted instantiation: esi_parse_fuzzer.c:VSLb_ts_req
969
970
static inline void
971
VSLb_ts_busyobj(struct busyobj *bo, const char *event, vtim_real now)
972
0
{
973
0
974
0
  if (isnan(bo->t_first) || bo->t_first == 0.)
975
0
    bo->t_first = bo->t_prev = now;
976
0
  VSLb_ts(bo->vsl, event, bo->t_first, &bo->t_prev, now);
977
0
}
Unexecuted instantiation: cache_ws_emu.c:VSLb_ts_busyobj
Unexecuted instantiation: cache_ws_common.c:VSLb_ts_busyobj
Unexecuted instantiation: cache_esi_parse.c:VSLb_ts_busyobj
Unexecuted instantiation: esi_parse_fuzzer.c:VSLb_ts_busyobj
978
979
/* cache_vcl.c */
980
const char *VCL_Name(const struct vcl *);
981
982
/* cache_wrk.c */
983
984
typedef void *bgthread_t(struct worker *, void *priv);
985
void WRK_BgThread(pthread_t *thr, const char *name, bgthread_t *func,
986
    void *priv);
987
988
/* cache_ws.c */
989
void WS_Init(struct ws *ws, const char *id, void *space, unsigned len);
990
991
unsigned WS_ReserveSize(struct ws *, unsigned);
992
unsigned WS_ReserveAll(struct ws *);
993
void WS_Release(struct ws *ws, unsigned bytes);
994
void WS_ReleaseP(struct ws *ws, const char *ptr);
995
void WS_Assert(const struct ws *ws);
996
void WS_Reset(struct ws *ws, uintptr_t);
997
void *WS_Alloc(struct ws *ws, unsigned bytes);
998
void *WS_Copy(struct ws *ws, const void *str, int len);
999
uintptr_t WS_Snapshot(struct ws *ws);
1000
int WS_Allocated(const struct ws *ws, const void *ptr, ssize_t len);
1001
unsigned WS_Dump(const struct ws *ws, char, size_t off, void *buf, size_t len);
1002
1003
static inline void *
1004
WS_Reservation(const struct ws *ws)
1005
0
{
1006
1007
0
  WS_Assert(ws);
1008
0
  AN(ws->r);
1009
0
  AN(ws->f);
1010
0
  return (ws->f);
1011
0
}
Unexecuted instantiation: cache_ws_emu.c:WS_Reservation
Unexecuted instantiation: cache_ws_common.c:WS_Reservation
Unexecuted instantiation: cache_esi_parse.c:WS_Reservation
Unexecuted instantiation: esi_parse_fuzzer.c:WS_Reservation
1012
1013
static inline unsigned
1014
WS_ReservationSize(const struct ws *ws)
1015
0
{
1016
0
1017
0
  AN(ws->r);
1018
0
  return (ws->r - ws->f);
1019
0
}
Unexecuted instantiation: cache_ws_emu.c:WS_ReservationSize
Unexecuted instantiation: cache_ws_common.c:WS_ReservationSize
Unexecuted instantiation: cache_esi_parse.c:WS_ReservationSize
Unexecuted instantiation: esi_parse_fuzzer.c:WS_ReservationSize
1020
1021
static inline unsigned
1022
WS_ReserveLumps(struct ws *ws, size_t sz)
1023
0
{
1024
0
1025
0
  AN(sz);
1026
0
  return (WS_ReserveAll(ws) / sz);
1027
0
}
Unexecuted instantiation: cache_ws_emu.c:WS_ReserveLumps
Unexecuted instantiation: cache_ws_common.c:WS_ReserveLumps
Unexecuted instantiation: cache_esi_parse.c:WS_ReserveLumps
Unexecuted instantiation: esi_parse_fuzzer.c:WS_ReserveLumps
1028
1029
/* cache_ws_common.c */
1030
void WS_MarkOverflow(struct ws *ws);
1031
int WS_Overflowed(const struct ws *ws);
1032
1033
const char *WS_Printf(struct ws *ws, const char *fmt, ...) v_printflike_(2, 3);
1034
1035
void WS_VSB_new(struct vsb *, struct ws *);
1036
char *WS_VSB_finish(struct vsb *, struct ws *, size_t *);
1037
1038
/* WS utility */
1039
#define WS_TASK_ALLOC_OBJ(ctx, ptr, magic) do {     \
1040
  ptr = WS_Alloc((ctx)->ws, sizeof *(ptr));   \
1041
  if ((ptr) == NULL)          \
1042
    VRT_fail(ctx, "Out of workspace for " #magic);  \
1043
  else              \
1044
    INIT_OBJ(ptr, magic);       \
1045
} while(0)
1046
1047
/* cache_rfc2616.c */
1048
void RFC2616_Ttl(struct busyobj *, vtim_real now, vtim_real *t_origin,
1049
    float *ttl, float *grace, float *keep);
1050
unsigned RFC2616_Req_Gzip(const struct http *);
1051
int RFC2616_Do_Cond(const struct req *sp);
1052
void RFC2616_Weaken_Etag(struct http *hp);
1053
void RFC2616_Vary_AE(struct http *hp);
1054
const char * RFC2616_Strong_LM(const struct http *hp, struct worker *wrk,
1055
    struct objcore *oc);
1056
1057
/*
1058
 * We want to cache the most recent timestamp in wrk->lastused to avoid
1059
 * extra timestamps in cache_pool.c.  Hide this detail with a macro
1060
 */
1061
#define W_TIM_real(w) ((w)->lastused = VTIM_real())