Coverage Report

Created: 2026-02-26 06:20

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/ntp-dev/libntp/recvbuff.c
Line
Count
Source
1
#ifdef HAVE_CONFIG_H
2
# include <config.h>
3
#endif
4
5
#include <stdio.h>
6
7
#include "ntp_assert.h"
8
#include "ntp_syslog.h"
9
#include "ntp_stdlib.h"
10
#include "ntp_lists.h"
11
#include "recvbuff.h"
12
#include "iosignal.h"
13
14
#if (RECV_INC & (RECV_INC-1))
15
# error RECV_INC not a power of 2!
16
#endif
17
#if (RECV_BATCH & (RECV_BATCH - 1))
18
#error RECV_BATCH not a power of 2!
19
#endif
20
#if (RECV_BATCH < RECV_INC)
21
#error RECV_BATCH must be >= RECV_INC!
22
#endif
23
24
/*
25
 * Memory allocation
26
 */
27
static u_long volatile full_recvbufs; /* recvbufs on full_recv_fifo */
28
static u_long volatile free_recvbufs; /* recvbufs on free_recv_list */
29
static u_long volatile total_recvbufs;  /* total recvbufs currently in use */
30
static u_long volatile lowater_adds;  /* number of times we have added memory */
31
static u_long volatile buffer_shortfall;/* number of missed free receive buffers
32
             between replenishments */
33
static u_long limit_recvbufs;   /* maximum total of receive buffers */
34
static u_long emerg_recvbufs;   /* emergency/urgent buffers to keep */
35
36
static DECL_FIFO_ANCHOR(recvbuf_t) full_recv_fifo;
37
static recvbuf_t *       free_recv_list;
38
  
39
#if defined(SYS_WINNT)
40
41
/*
42
 * For Windows we need to set up a lock to manipulate the
43
 * recv buffers to prevent corruption. We keep it lock for as
44
 * short a time as possible
45
 */
46
static CRITICAL_SECTION RecvLock;
47
static CRITICAL_SECTION FreeLock;
48
# define LOCK_R() EnterCriticalSection(&RecvLock)
49
# define UNLOCK_R() LeaveCriticalSection(&RecvLock)
50
# define LOCK_F() EnterCriticalSection(&FreeLock)
51
# define UNLOCK_F() LeaveCriticalSection(&FreeLock)
52
#else
53
0
# define LOCK_R() do {} while (FALSE)
54
0
# define UNLOCK_R() do {} while (FALSE)
55
0
# define LOCK_F() do {} while (FALSE)
56
0
# define UNLOCK_F() do {} while (FALSE)
57
#endif
58
59
#ifdef DEBUG
60
static void uninit_recvbuff(void);
61
#endif
62
63
64
u_long
65
free_recvbuffs (void)
66
0
{
67
0
  return free_recvbufs;
68
0
}
69
70
u_long
71
full_recvbuffs (void)
72
0
{
73
0
  return full_recvbufs;
74
0
}
75
76
u_long
77
total_recvbuffs (void)
78
0
{
79
0
  return total_recvbufs;
80
0
}
81
82
u_long
83
lowater_additions(void)
84
0
{
85
0
  return lowater_adds;
86
0
}
87
88
static inline void 
89
initialise_buffer(recvbuf_t *buff)
90
0
{
91
0
  ZERO(*buff);
92
0
}
93
94
static void
95
create_buffers(
96
  size_t      nbufs
97
)
98
1
{
99
1
  static const u_int  chunk =
100
#   ifndef DEBUG
101
          RECV_INC;
102
#   else
103
  /* Allocate each buffer individually so they can be free()d
104
   * during ntpd shutdown on DEBUG builds to keep them out of heap
105
   * leak reports.
106
   */
107
1
          1;
108
1
#   endif
109
1
  static int/*BOOL*/  doneonce;
110
1
  recvbuf_t *   bufp;
111
1
  u_int     i;
112
1
  size_t      abuf;
113
114
  /*[bug 3666]: followup -- reset shortfalls in all cases */
115
1
  abuf = nbufs + buffer_shortfall;
116
1
  buffer_shortfall = 0;
117
118
1
  if (limit_recvbufs <= total_recvbufs) {
119
0
    if (!doneonce) {
120
0
      msyslog(LOG_CRIT, "Unable to allocate receive"
121
0
            " buffer, %lu/%lu",
122
0
        total_recvbufs, limit_recvbufs);
123
0
      doneonce = TRUE;
124
0
    }
125
0
    return;
126
0
  }
127
128
1
  if (abuf < nbufs || abuf > RECV_BATCH) {
129
0
    abuf = RECV_BATCH; /* clamp on overflow */
130
1
  } else {
131
1
    abuf += (~abuf + 1) & (RECV_INC - 1);  /* round up */
132
1
  }
133
1
  if (abuf > (limit_recvbufs - total_recvbufs)) {
134
0
    abuf = limit_recvbufs - total_recvbufs;
135
0
  }
136
1
  abuf += (~abuf + 1) & (chunk - 1);    /* round up */
137
  
138
65
  while (abuf) {
139
64
    bufp = calloc(chunk, sizeof(*bufp));
140
64
    if (!bufp) {
141
0
      msyslog(LOG_CRIT, "Out of memory, allocating "
142
0
            "%u recvbufs, %lu bytes",
143
0
        chunk, (u_long)sizeof(*bufp) * chunk);
144
0
      limit_recvbufs = total_recvbufs;
145
0
      break;
146
0
    }
147
128
    for (i = chunk; i; --i,++bufp) {
148
64
      LINK_SLIST(free_recv_list, bufp, link);
149
64
    }
150
64
    free_recvbufs += chunk;
151
64
    total_recvbufs += chunk;
152
64
    abuf -= chunk;
153
64
  }
154
1
  ++lowater_adds;
155
1
}
156
157
void
158
init_recvbuff(int nbufs)
159
1
{
160
161
  /*
162
   * Init buffer free list and stat counters
163
   */
164
1
  free_recvbufs = total_recvbufs = 0;
165
1
  full_recvbufs = lowater_adds = 0;
166
167
1
  limit_recvbufs = RECV_TOOMANY;
168
1
  emerg_recvbufs = RECV_CLOCK;
169
170
1
  create_buffers(nbufs);
171
172
#   if defined(SYS_WINNT)
173
  InitializeCriticalSection(&RecvLock);
174
  InitializeCriticalSection(&FreeLock);
175
#   endif
176
177
1
#   ifdef DEBUG
178
1
  atexit(&uninit_recvbuff);
179
1
#   endif
180
1
}
181
182
183
#ifdef DEBUG
184
static void
185
uninit_recvbuff(void)
186
1
{
187
1
  recvbuf_t *rbunlinked;
188
189
1
  for (;;) {
190
1
    UNLINK_FIFO(rbunlinked, full_recv_fifo, link);
191
1
    if (rbunlinked == NULL)
192
1
      break;
193
0
    free(rbunlinked);
194
0
  }
195
196
65
  for (;;) {
197
65
    UNLINK_HEAD_SLIST(rbunlinked, free_recv_list, link);
198
65
    if (rbunlinked == NULL)
199
1
      break;
200
64
    free(rbunlinked);
201
64
  }
202
#   if defined(SYS_WINNT)
203
  DeleteCriticalSection(&FreeLock);
204
  DeleteCriticalSection(&RecvLock);
205
#   endif
206
1
}
207
#endif  /* DEBUG */
208
209
210
/*
211
 * freerecvbuf - make a single recvbuf available for reuse
212
 */
213
void
214
freerecvbuf(recvbuf_t *rb)
215
0
{
216
0
  if (rb) {
217
0
    if (--rb->used != 0) {
218
0
      msyslog(LOG_ERR, "******** freerecvbuff non-zero usage: %d *******", rb->used);
219
0
      rb->used = 0;
220
0
    }
221
0
    LOCK_F();
222
0
    LINK_SLIST(free_recv_list, rb, link);
223
0
    ++free_recvbufs;
224
0
    UNLOCK_F();
225
0
  }
226
0
}
227
228
  
229
void
230
add_full_recv_buffer(recvbuf_t *rb)
231
0
{
232
0
  if (rb == NULL) {
233
0
    msyslog(LOG_ERR, "add_full_recv_buffer received NULL buffer");
234
0
    return;
235
0
  }
236
0
  LOCK_R();
237
0
  LINK_FIFO(full_recv_fifo, rb, link);
238
0
  ++full_recvbufs;
239
0
  UNLOCK_R();
240
0
}
241
242
243
recvbuf_t *
244
get_free_recv_buffer(
245
    int /*BOOL*/ urgent
246
    )
247
0
{
248
0
  recvbuf_t *buffer = NULL;
249
250
0
  LOCK_F();
251
0
  if (free_recvbufs > (urgent ? 0 : emerg_recvbufs)) {
252
0
    UNLINK_HEAD_SLIST(buffer, free_recv_list, link);
253
0
  }
254
  
255
0
  if (buffer != NULL) {
256
0
    if (free_recvbufs)
257
0
      --free_recvbufs;
258
0
    initialise_buffer(buffer);
259
0
    ++buffer->used;
260
0
  } else {
261
0
    ++buffer_shortfall;
262
0
  }
263
0
  UNLOCK_F();
264
265
0
  return buffer;
266
0
}
267
268
269
#ifdef HAVE_IO_COMPLETION_PORT
270
recvbuf_t *
271
get_free_recv_buffer_alloc(
272
    int /*BOOL*/ urgent
273
    )
274
{
275
  LOCK_F(); 
276
  if (free_recvbufs <= emerg_recvbufs || buffer_shortfall > 0)
277
    create_buffers(RECV_INC);
278
  UNLOCK_F();
279
  return get_free_recv_buffer(urgent);
280
}
281
#endif
282
283
284
recvbuf_t *
285
get_full_recv_buffer(void)
286
0
{
287
0
  recvbuf_t * rbuf;
288
289
  /*
290
   * make sure there are free buffers when we wander off to do
291
   * lengthy packet processing with any buffer we grab from the
292
   * full list.
293
   * 
294
   * fixes malloc() interrupted by SIGIO risk (Bug 889)
295
   */
296
0
  LOCK_F(); 
297
0
  if (free_recvbufs <= emerg_recvbufs || buffer_shortfall > 0)
298
0
    create_buffers(RECV_INC);
299
0
  UNLOCK_F();
300
301
  /*
302
   * try to grab a full buffer
303
   */
304
0
  LOCK_R();
305
0
  UNLINK_FIFO(rbuf, full_recv_fifo, link);
306
0
  if (rbuf != NULL && full_recvbufs)
307
0
    --full_recvbufs;
308
0
  UNLOCK_R();
309
310
0
  return rbuf;
311
0
}
312
313
314
/*
315
 * purge_recv_buffers_for_fd() - purges any previously-received input
316
 *         from a given file descriptor.
317
 */
318
void
319
purge_recv_buffers_for_fd(
320
  int fd
321
  )
322
0
{
323
0
  recvbuf_t *rbufp;
324
0
  recvbuf_t *next;
325
0
  recvbuf_t *punlinked;
326
0
  recvbuf_t *freelist = NULL;
327
328
  /* We want to hold only one lock at a time. So we do a scan on
329
   * the full buffer queue, collecting items as we go, and when
330
   * done we spool the the collected items to 'freerecvbuf()'.
331
   */
332
0
  LOCK_R();
333
334
0
  for (rbufp = HEAD_FIFO(full_recv_fifo);
335
0
       rbufp != NULL;
336
0
       rbufp = next)
337
0
  {
338
0
    next = rbufp->link;
339
#     ifdef HAVE_IO_COMPLETION_PORT
340
    if (rbufp->dstadr == NULL && rbufp->fd == fd)
341
#     else
342
0
    if (rbufp->fd == fd)
343
0
#     endif
344
0
    {
345
0
      UNLINK_MID_FIFO(punlinked, full_recv_fifo,
346
0
          rbufp, link, recvbuf_t);
347
0
      INSIST(punlinked == rbufp);
348
0
      if (full_recvbufs)
349
0
        --full_recvbufs;
350
0
      rbufp->link = freelist;
351
0
      freelist = rbufp;
352
0
    }
353
0
  }
354
355
0
  UNLOCK_R();
356
  
357
0
  while (freelist) {
358
0
    next = freelist->link;
359
0
    freerecvbuf(freelist);
360
0
    freelist = next;
361
0
  }
362
0
}
363
364
365
/*
366
 * Checks to see if there are buffers to process
367
 */
368
isc_boolean_t has_full_recv_buffer(void)
369
0
{
370
0
  if (HEAD_FIFO(full_recv_fifo) != NULL)
371
0
    return (ISC_TRUE);
372
0
  else
373
0
    return (ISC_FALSE);
374
0
}
375
376
377
#ifdef NTP_DEBUG_LISTS_H
378
void
379
check_gen_fifo_consistency(void *fifo)
380
1
{
381
1
  gen_fifo *pf;
382
1
  gen_node *pthis;
383
1
  gen_node **pptail;
384
385
1
  pf = fifo;
386
1
  REQUIRE((NULL == pf->phead && NULL == pf->pptail) ||
387
0
    (NULL != pf->phead && NULL != pf->pptail));
388
389
0
  pptail = &pf->phead;
390
1
  for (pthis = pf->phead;
391
1
       pthis != NULL;
392
1
       pthis = pthis->link)
393
0
    if (NULL != pthis->link)
394
0
      pptail = &pthis->link;
395
396
1
  REQUIRE(NULL == pf->pptail || pptail == pf->pptail);
397
1
}
398
#endif  /* NTP_DEBUG_LISTS_H */