Coverage Report

Created: 2025-11-24 06:16

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/haproxy/src/dns_ring.c
Line
Count
Source
1
/*
2
 * Ring buffer management
3
 * This is a fork of ring.c for DNS usage.
4
 *
5
 * Copyright (C) 2000-2019 Willy Tarreau - w@1wt.eu
6
 *
7
 * This library is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation, version 2.1
10
 * exclusively.
11
 *
12
 * This library is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with this library; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
20
 */
21
22
#include <stdlib.h>
23
#include <haproxy/api.h>
24
#include <haproxy/applet.h>
25
#include <haproxy/buf.h>
26
#include <haproxy/cli.h>
27
#include <haproxy/dns_ring.h>
28
#include <haproxy/sc_strm.h>
29
#include <haproxy/stconn.h>
30
#include <haproxy/thread.h>
31
32
/* Initialize a pre-allocated ring with the buffer area
33
 * of size */
34
void dns_ring_init(struct dns_ring *ring, void *area, size_t size)
35
0
{
36
0
  HA_RWLOCK_INIT(&ring->lock);
37
0
  MT_LIST_INIT(&ring->waiters);
38
0
  ring->readers_count = 0;
39
0
  ring->buf = b_make(area, size, 0, 0);
40
  /* write the initial RC byte */
41
0
  b_putchr(&ring->buf, 0);
42
0
}
43
44
/* Creates and returns a ring buffer of size <size> bytes. Returns NULL on
45
 * allocation failure.
46
 */
47
struct dns_ring *dns_ring_new(size_t size)
48
0
{
49
0
  struct dns_ring *ring = NULL;
50
0
  void *area = NULL;
51
52
0
  if (size < 2)
53
0
    goto fail;
54
55
0
  ring = malloc(sizeof(*ring));
56
0
  if (!ring)
57
0
    goto fail;
58
59
0
  area = malloc(size);
60
0
  if (!area)
61
0
    goto fail;
62
63
0
  dns_ring_init(ring, area, size);
64
0
  return ring;
65
0
 fail:
66
0
  free(area);
67
0
  free(ring);
68
0
  return NULL;
69
0
}
70
71
/* destroys and frees ring <ring> */
72
void dns_ring_free(struct dns_ring *ring)
73
0
{
74
0
  if (!ring)
75
0
    return;
76
77
0
  free(ring->buf.area);
78
0
  free(ring);
79
0
}
80
81
/* Tries to send <npfx> parts from <prefix> followed by <nmsg> parts from <msg>
82
 * to ring <ring>. The message is sent atomically. It may be truncated to
83
 * <maxlen> bytes if <maxlen> is non-null. There is no distinction between the
84
 * two lists, it's just a convenience to help the caller prepend some prefixes
85
 * when necessary. It takes the ring's write lock to make sure no other thread
86
 * will touch the buffer during the update. Returns the number of bytes sent,
87
 * or <=0 on failure.
88
 */
89
ssize_t dns_ring_write(struct dns_ring *ring, size_t maxlen, const struct ist pfx[], size_t npfx, const struct ist msg[], size_t nmsg)
90
0
{
91
0
  struct buffer *buf = &ring->buf;
92
0
  struct appctx *appctx;
93
0
  size_t totlen = 0;
94
0
  size_t lenlen;
95
0
  uint64_t dellen;
96
0
  int dellenlen;
97
0
  struct mt_list back;
98
0
  ssize_t sent = 0;
99
0
  int i;
100
101
  /* we have to find some room to add our message (the buffer is
102
   * never empty and at least contains the previous counter) and
103
   * to update both the buffer contents and heads at the same
104
   * time (it's doable using atomic ops but not worth the
105
   * trouble, let's just lock). For this we first need to know
106
   * the total message's length. We cannot measure it while
107
   * copying due to the varint encoding of the length.
108
   */
109
0
  for (i = 0; i < npfx; i++)
110
0
    totlen += pfx[i].len;
111
0
  for (i = 0; i < nmsg; i++)
112
0
    totlen += msg[i].len;
113
114
0
  if (totlen > maxlen)
115
0
    totlen = maxlen;
116
117
0
  lenlen = varint_bytes(totlen);
118
119
0
  HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
120
0
  if (lenlen + totlen + 1 + 1 > b_size(buf))
121
0
    goto done_buf;
122
123
0
  while (b_room(buf) < lenlen + totlen + 1) {
124
    /* we need to delete the oldest message (from the end),
125
     * and we have to stop if there's a reader stuck there.
126
     * Unless there's corruption in the buffer it's guaranteed
127
     * that we have enough data to find 1 counter byte, a
128
     * varint-encoded length (1 byte min) and the message
129
     * payload (0 bytes min).
130
     */
131
0
    if (*b_head(buf))
132
0
      goto done_buf;
133
0
    dellenlen = b_peek_varint(buf, 1, &dellen);
134
0
    if (!dellenlen)
135
0
      goto done_buf;
136
0
    BUG_ON(b_data(buf) < 1 + dellenlen + dellen);
137
138
0
    b_del(buf, 1 + dellenlen + dellen);
139
0
  }
140
141
  /* OK now we do have room */
142
0
  __b_put_varint(buf, totlen);
143
144
0
  totlen = 0;
145
0
  for (i = 0; i < npfx; i++) {
146
0
    size_t len = pfx[i].len;
147
148
0
    if (len + totlen > maxlen)
149
0
      len = maxlen - totlen;
150
0
    if (len)
151
0
      __b_putblk(buf, pfx[i].ptr, len);
152
0
    totlen += len;
153
0
  }
154
155
0
  for (i = 0; i < nmsg; i++) {
156
0
    size_t len = msg[i].len;
157
158
0
    if (len + totlen > maxlen)
159
0
      len = maxlen - totlen;
160
0
    if (len)
161
0
      __b_putblk(buf, msg[i].ptr, len);
162
0
    totlen += len;
163
0
  }
164
165
0
  *b_tail(buf) = 0; buf->data++; // new read counter
166
0
  sent = lenlen + totlen + 1;
167
168
  /* notify potential readers */
169
0
  MT_LIST_FOR_EACH_ENTRY_LOCKED(appctx, &ring->waiters, wait_entry, back)
170
0
    appctx_wakeup(appctx);
171
172
0
 done_buf:
173
0
  HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
174
0
  return sent;
175
0
}
176
177
/* Tries to attach appctx <appctx> as a new reader on ring <ring>. This is
178
 * meant to be used by low level appctx code such as CLI or ring forwarding.
179
 * For higher level functions, please see the relevant parts in appctx or CLI.
180
 * It returns non-zero on success or zero on failure if too many users are
181
 * already attached. On success, the caller MUST call dns_ring_detach_appctx()
182
 * to detach itself, even if it was never woken up.
183
 */
184
int dns_ring_attach(struct dns_ring *ring)
185
0
{
186
0
  int users = ring->readers_count;
187
188
0
  do {
189
0
    if (users >= 255)
190
0
      return 0;
191
0
  } while (!_HA_ATOMIC_CAS(&ring->readers_count, &users, users + 1));
192
0
  return 1;
193
0
}
194
195
/* detach an appctx from a ring. The appctx is expected to be waiting at offset
196
 * <ofs> relative to the beginning of the storage, or ~0 if not waiting yet.
197
 * Nothing is done if <ring> is NULL.
198
 */
199
void dns_ring_detach_appctx(struct dns_ring *ring, struct appctx *appctx, size_t ofs)
200
0
{
201
0
  if (!ring)
202
0
    return;
203
204
0
  HA_RWLOCK_WRLOCK(RING_LOCK, &ring->lock);
205
0
  if (ofs != ~0) {
206
    /* reader was still attached */
207
0
    if (ofs < b_head_ofs(&ring->buf))
208
0
      ofs += b_size(&ring->buf) - b_head_ofs(&ring->buf);
209
0
    else
210
0
      ofs -= b_head_ofs(&ring->buf);
211
212
0
    BUG_ON(ofs >= b_size(&ring->buf));
213
0
    MT_LIST_DELETE(&appctx->wait_entry);
214
0
    HA_ATOMIC_DEC(b_peek(&ring->buf, ofs));
215
0
  }
216
0
  HA_ATOMIC_DEC(&ring->readers_count);
217
0
  HA_RWLOCK_WRUNLOCK(RING_LOCK, &ring->lock);
218
0
}
219
220
/*
221
 * Local variables:
222
 *  c-indent-level: 8
223
 *  c-basic-offset: 8
224
 * End:
225
 */