Coverage Report

Created: 2025-12-31 06:20

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/samba/lib/tsocket/tsocket_helpers.c
Line
Count
Source
1
/*
2
   Unix SMB/CIFS implementation.
3
4
   Copyright (C) Stefan Metzmacher 2009
5
6
     ** NOTE! The following LGPL license applies to the tsocket
7
     ** library. This does NOT imply that all of Samba is released
8
     ** under the LGPL
9
10
   This library is free software; you can redistribute it and/or
11
   modify it under the terms of the GNU Lesser General Public
12
   License as published by the Free Software Foundation; either
13
   version 3 of the License, or (at your option) any later version.
14
15
   This library is distributed in the hope that it will be useful,
16
   but WITHOUT ANY WARRANTY; without even the implied warranty of
17
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18
   Lesser General Public License for more details.
19
20
   You should have received a copy of the GNU Lesser General Public
21
   License along with this library; if not, see <http://www.gnu.org/licenses/>.
22
*/
23
24
#include "replace.h"
25
#include "system/filesys.h"
26
#include "tsocket.h"
27
#include "tsocket_internal.h"
28
29
struct tdgram_sendto_queue_state {
30
  /* this structs are owned by the caller */
31
  struct {
32
    struct tevent_context *ev;
33
    struct tdgram_context *dgram;
34
    const uint8_t *buf;
35
    size_t len;
36
    const struct tsocket_address *dst;
37
  } caller;
38
  ssize_t ret;
39
};
40
41
static void tdgram_sendto_queue_trigger(struct tevent_req *req,
42
           void *private_data);
43
static void tdgram_sendto_queue_done(struct tevent_req *subreq);
44
45
struct tevent_req *tdgram_sendto_queue_send(TALLOC_CTX *mem_ctx,
46
              struct tevent_context *ev,
47
              struct tdgram_context *dgram,
48
              struct tevent_queue *queue,
49
              const uint8_t *buf,
50
              size_t len,
51
              struct tsocket_address *dst)
52
0
{
53
0
  struct tevent_req *req;
54
0
  struct tdgram_sendto_queue_state *state;
55
0
  struct tevent_queue_entry *e;
56
57
0
  req = tevent_req_create(mem_ctx, &state,
58
0
        struct tdgram_sendto_queue_state);
59
0
  if (!req) {
60
0
    return NULL;
61
0
  }
62
63
0
  state->caller.ev  = ev;
64
0
  state->caller.dgram = dgram;
65
0
  state->caller.buf = buf;
66
0
  state->caller.len = len;
67
0
  state->caller.dst = dst;
68
0
  state->ret    = -1;
69
70
  /*
71
   * we use tevent_queue_add_optimize_empty() with allow_direct
72
   * in order to optimize for the empty queue case.
73
   */
74
0
  e = tevent_queue_add_optimize_empty(
75
0
        queue,
76
0
        ev,
77
0
        req,
78
0
        tdgram_sendto_queue_trigger,
79
0
        NULL);
80
0
  if (tevent_req_nomem(e, req)) {
81
0
    return tevent_req_post(req, ev);
82
0
  }
83
0
  if (!tevent_req_is_in_progress(req)) {
84
0
    return tevent_req_post(req, ev);
85
0
  }
86
87
0
  return req;
88
0
}
89
90
static void tdgram_sendto_queue_trigger(struct tevent_req *req,
91
           void *private_data)
92
0
{
93
0
  struct tdgram_sendto_queue_state *state = tevent_req_data(req,
94
0
          struct tdgram_sendto_queue_state);
95
0
  struct tevent_req *subreq;
96
97
0
  subreq = tdgram_sendto_send(state,
98
0
            state->caller.ev,
99
0
            state->caller.dgram,
100
0
            state->caller.buf,
101
0
            state->caller.len,
102
0
            state->caller.dst);
103
0
  if (tevent_req_nomem(subreq, req)) {
104
0
    return;
105
0
  }
106
0
  tevent_req_set_callback(subreq, tdgram_sendto_queue_done, req);
107
0
}
108
109
static void tdgram_sendto_queue_done(struct tevent_req *subreq)
110
0
{
111
0
  struct tevent_req *req = tevent_req_callback_data(subreq,
112
0
         struct tevent_req);
113
0
  struct tdgram_sendto_queue_state *state = tevent_req_data(req,
114
0
          struct tdgram_sendto_queue_state);
115
0
  ssize_t ret;
116
0
  int sys_errno;
117
118
0
  ret = tdgram_sendto_recv(subreq, &sys_errno);
119
0
  talloc_free(subreq);
120
0
  if (ret == -1) {
121
0
    tevent_req_error(req, sys_errno);
122
0
    return;
123
0
  }
124
0
  state->ret = ret;
125
126
0
  tevent_req_done(req);
127
0
}
128
129
ssize_t tdgram_sendto_queue_recv(struct tevent_req *req, int *perrno)
130
0
{
131
0
  struct tdgram_sendto_queue_state *state = tevent_req_data(req,
132
0
          struct tdgram_sendto_queue_state);
133
0
  ssize_t ret;
134
135
0
  ret = tsocket_simple_int_recv(req, perrno);
136
0
  if (ret == 0) {
137
0
    ret = state->ret;
138
0
  }
139
140
0
  tevent_req_received(req);
141
0
  return ret;
142
0
}
143
144
struct tstream_readv_pdu_state {
145
  /* this structs are owned by the caller */
146
  struct {
147
    struct tevent_context *ev;
148
    struct tstream_context *stream;
149
    tstream_readv_pdu_next_vector_t next_vector_fn;
150
    void *next_vector_private;
151
  } caller;
152
153
  /*
154
   * Each call to the callback resets iov and count
155
   * the callback allocated the iov as child of our state,
156
   * that means we are allowed to modify and free it.
157
   *
158
   * we should call the callback every time we filled the given
159
   * vector and ask for a new vector. We return if the callback
160
   * ask for 0 bytes.
161
   */
162
  struct iovec *vector;
163
  size_t count;
164
165
  /*
166
   * the total number of bytes we read,
167
   * the return value of the _recv function
168
   */
169
  int total_read;
170
};
171
172
static void tstream_readv_pdu_ask_for_next_vector(struct tevent_req *req);
173
static void tstream_readv_pdu_readv_done(struct tevent_req *subreq);
174
175
struct tevent_req *tstream_readv_pdu_send(TALLOC_CTX *mem_ctx,
176
        struct tevent_context *ev,
177
        struct tstream_context *stream,
178
        tstream_readv_pdu_next_vector_t next_vector_fn,
179
        void *next_vector_private)
180
0
{
181
0
  struct tevent_req *req;
182
0
  struct tstream_readv_pdu_state *state;
183
184
0
  req = tevent_req_create(mem_ctx, &state,
185
0
        struct tstream_readv_pdu_state);
186
0
  if (!req) {
187
0
    return NULL;
188
0
  }
189
190
0
  state->caller.ev      = ev;
191
0
  state->caller.stream      = stream;
192
0
  state->caller.next_vector_fn    = next_vector_fn;
193
0
  state->caller.next_vector_private = next_vector_private;
194
195
0
  state->vector   = NULL;
196
0
  state->count    = 0;
197
0
  state->total_read = 0;
198
199
0
  tstream_readv_pdu_ask_for_next_vector(req);
200
0
  if (!tevent_req_is_in_progress(req)) {
201
0
    goto post;
202
0
  }
203
204
0
  return req;
205
206
0
 post:
207
0
  return tevent_req_post(req, ev);
208
0
}
209
210
static void tstream_readv_pdu_ask_for_next_vector(struct tevent_req *req)
211
0
{
212
0
  struct tstream_readv_pdu_state *state = tevent_req_data(req,
213
0
              struct tstream_readv_pdu_state);
214
0
  int ret;
215
0
  size_t to_read = 0;
216
0
  size_t i;
217
0
  struct tevent_req *subreq;
218
0
  bool optimize = false;
219
0
  bool save_optimize = false;
220
221
0
  if (state->count > 0) {
222
    /*
223
     * This is not the first time we asked for a vector,
224
     * which means parts of the pdu already arrived.
225
     *
226
     * In this case it make sense to enable
227
     * a syscall/performance optimization if the
228
     * low level tstream implementation supports it.
229
     */
230
0
    optimize = true;
231
0
  }
232
233
0
  TALLOC_FREE(state->vector);
234
0
  state->count = 0;
235
236
0
  ret = state->caller.next_vector_fn(state->caller.stream,
237
0
             state->caller.next_vector_private,
238
0
             state, &state->vector, &state->count);
239
0
  if (ret == -1) {
240
0
    tevent_req_error(req, errno);
241
0
    return;
242
0
  }
243
244
0
  if (state->count == 0) {
245
0
    tevent_req_done(req);
246
0
    return;
247
0
  }
248
249
0
  for (i=0; i < state->count; i++) {
250
0
    size_t tmp = to_read;
251
0
    tmp += state->vector[i].iov_len;
252
253
0
    if (tmp < to_read) {
254
0
      tevent_req_error(req, EMSGSIZE);
255
0
      return;
256
0
    }
257
258
0
    to_read = tmp;
259
0
  }
260
261
  /*
262
   * this is invalid the next vector function should have
263
   * reported count == 0.
264
   */
265
0
  if (to_read == 0) {
266
0
    tevent_req_error(req, EINVAL);
267
0
    return;
268
0
  }
269
270
0
  if (state->total_read + to_read < state->total_read) {
271
0
    tevent_req_error(req, EMSGSIZE);
272
0
    return;
273
0
  }
274
275
0
  if (optimize) {
276
    /*
277
     * If the low level stream is a bsd socket
278
     * we will get syscall optimization.
279
     *
280
     * If it is not a bsd socket
281
     * tstream_bsd_optimize_readv() just returns.
282
     */
283
0
    save_optimize = tstream_bsd_optimize_readv(state->caller.stream,
284
0
                 true);
285
0
  }
286
0
  subreq = tstream_readv_send(state,
287
0
            state->caller.ev,
288
0
            state->caller.stream,
289
0
            state->vector,
290
0
            state->count);
291
0
  if (optimize) {
292
0
    tstream_bsd_optimize_readv(state->caller.stream,
293
0
             save_optimize);
294
0
  }
295
0
  if (tevent_req_nomem(subreq, req)) {
296
0
    return;
297
0
  }
298
0
  tevent_req_set_callback(subreq, tstream_readv_pdu_readv_done, req);
299
0
}
300
301
static void tstream_readv_pdu_readv_done(struct tevent_req *subreq)
302
0
{
303
0
  struct tevent_req *req = tevent_req_callback_data(subreq,
304
0
         struct tevent_req);
305
0
  struct tstream_readv_pdu_state *state = tevent_req_data(req,
306
0
              struct tstream_readv_pdu_state);
307
0
  int ret;
308
0
  int sys_errno;
309
310
0
  ret = tstream_readv_recv(subreq, &sys_errno);
311
0
  TALLOC_FREE(subreq);
312
0
  if (ret == -1) {
313
0
    tevent_req_error(req, sys_errno);
314
0
    return;
315
0
  }
316
317
0
  state->total_read += ret;
318
319
  /* ask the callback for a new vector we should fill */
320
0
  tstream_readv_pdu_ask_for_next_vector(req);
321
0
}
322
323
int tstream_readv_pdu_recv(struct tevent_req *req, int *perrno)
324
0
{
325
0
  struct tstream_readv_pdu_state *state = tevent_req_data(req,
326
0
              struct tstream_readv_pdu_state);
327
0
  int ret;
328
329
0
  ret = tsocket_simple_int_recv(req, perrno);
330
0
  if (ret == 0) {
331
0
    ret = state->total_read;
332
0
  }
333
334
0
  tevent_req_received(req);
335
0
  return ret;
336
0
}
337
338
struct tstream_readv_pdu_queue_state {
339
  /* this structs are owned by the caller */
340
  struct {
341
    struct tevent_context *ev;
342
    struct tstream_context *stream;
343
    tstream_readv_pdu_next_vector_t next_vector_fn;
344
    void *next_vector_private;
345
  } caller;
346
  int ret;
347
};
348
349
static void tstream_readv_pdu_queue_trigger(struct tevent_req *req,
350
           void *private_data);
351
static void tstream_readv_pdu_queue_done(struct tevent_req *subreq);
352
353
struct tevent_req *tstream_readv_pdu_queue_send(TALLOC_CTX *mem_ctx,
354
        struct tevent_context *ev,
355
        struct tstream_context *stream,
356
        struct tevent_queue *queue,
357
        tstream_readv_pdu_next_vector_t next_vector_fn,
358
        void *next_vector_private)
359
0
{
360
0
  struct tevent_req *req;
361
0
  struct tstream_readv_pdu_queue_state *state;
362
0
  struct tevent_queue_entry *e;
363
364
0
  req = tevent_req_create(mem_ctx, &state,
365
0
        struct tstream_readv_pdu_queue_state);
366
0
  if (!req) {
367
0
    return NULL;
368
0
  }
369
370
0
  state->caller.ev      = ev;
371
0
  state->caller.stream      = stream;
372
0
  state->caller.next_vector_fn    = next_vector_fn;
373
0
  state->caller.next_vector_private = next_vector_private;
374
0
  state->ret        = -1;
375
376
  /*
377
   * we use tevent_queue_add_optimize_empty() with allow_direct
378
   * in order to optimize for the empty queue case.
379
   */
380
0
  e = tevent_queue_add_optimize_empty(
381
0
        queue,
382
0
        ev,
383
0
        req,
384
0
        tstream_readv_pdu_queue_trigger,
385
0
        NULL);
386
0
  if (tevent_req_nomem(e, req)) {
387
0
    return tevent_req_post(req, ev);
388
0
  }
389
0
  if (!tevent_req_is_in_progress(req)) {
390
0
    return tevent_req_post(req, ev);
391
0
  }
392
393
0
  return req;
394
0
}
395
396
static void tstream_readv_pdu_queue_trigger(struct tevent_req *req,
397
           void *private_data)
398
0
{
399
0
  struct tstream_readv_pdu_queue_state *state = tevent_req_data(req,
400
0
          struct tstream_readv_pdu_queue_state);
401
0
  struct tevent_req *subreq;
402
403
0
  subreq = tstream_readv_pdu_send(state,
404
0
          state->caller.ev,
405
0
          state->caller.stream,
406
0
          state->caller.next_vector_fn,
407
0
          state->caller.next_vector_private);
408
0
  if (tevent_req_nomem(subreq, req)) {
409
0
    return;
410
0
  }
411
0
  tevent_req_set_callback(subreq, tstream_readv_pdu_queue_done ,req);
412
0
}
413
414
static void tstream_readv_pdu_queue_done(struct tevent_req *subreq)
415
0
{
416
0
  struct tevent_req *req = tevent_req_callback_data(subreq,
417
0
         struct tevent_req);
418
0
  struct tstream_readv_pdu_queue_state *state = tevent_req_data(req,
419
0
          struct tstream_readv_pdu_queue_state);
420
0
  int ret;
421
0
  int sys_errno;
422
423
0
  ret = tstream_readv_pdu_recv(subreq, &sys_errno);
424
0
  talloc_free(subreq);
425
0
  if (ret == -1) {
426
0
    tevent_req_error(req, sys_errno);
427
0
    return;
428
0
  }
429
0
  state->ret = ret;
430
431
0
  tevent_req_done(req);
432
0
}
433
434
int tstream_readv_pdu_queue_recv(struct tevent_req *req, int *perrno)
435
0
{
436
0
  struct tstream_readv_pdu_queue_state *state = tevent_req_data(req,
437
0
          struct tstream_readv_pdu_queue_state);
438
0
  int ret;
439
440
0
  ret = tsocket_simple_int_recv(req, perrno);
441
0
  if (ret == 0) {
442
0
    ret = state->ret;
443
0
  }
444
445
0
  tevent_req_received(req);
446
0
  return ret;
447
0
}
448
449
struct tstream_writev_queue_state {
450
  /* this structs are owned by the caller */
451
  struct {
452
    struct tevent_context *ev;
453
    struct tstream_context *stream;
454
    const struct iovec *vector;
455
    size_t count;
456
  } caller;
457
  int ret;
458
};
459
460
static void tstream_writev_queue_trigger(struct tevent_req *req,
461
           void *private_data);
462
static void tstream_writev_queue_done(struct tevent_req *subreq);
463
464
struct tevent_req *tstream_writev_queue_send(TALLOC_CTX *mem_ctx,
465
               struct tevent_context *ev,
466
               struct tstream_context *stream,
467
               struct tevent_queue *queue,
468
               const struct iovec *vector,
469
               size_t count)
470
0
{
471
0
  struct tevent_req *req;
472
0
  struct tstream_writev_queue_state *state;
473
0
  struct tevent_queue_entry *e;
474
475
0
  req = tevent_req_create(mem_ctx, &state,
476
0
        struct tstream_writev_queue_state);
477
0
  if (!req) {
478
0
    return NULL;
479
0
  }
480
481
0
  state->caller.ev  = ev;
482
0
  state->caller.stream  = stream;
483
0
  state->caller.vector  = vector;
484
0
  state->caller.count = count;
485
0
  state->ret    = -1;
486
487
  /*
488
   * we use tevent_queue_add_optimize_empty() with allow_direct
489
   * in order to optimize for the empty queue case.
490
   */
491
0
  e = tevent_queue_add_optimize_empty(
492
0
        queue,
493
0
        ev,
494
0
        req,
495
0
        tstream_writev_queue_trigger,
496
0
        NULL);
497
0
  if (tevent_req_nomem(e, req)) {
498
0
    return tevent_req_post(req, ev);
499
0
  }
500
0
  if (!tevent_req_is_in_progress(req)) {
501
0
    return tevent_req_post(req, ev);
502
0
  }
503
504
0
  return req;
505
0
}
506
507
static void tstream_writev_queue_trigger(struct tevent_req *req,
508
           void *private_data)
509
0
{
510
0
  struct tstream_writev_queue_state *state = tevent_req_data(req,
511
0
          struct tstream_writev_queue_state);
512
0
  struct tevent_req *subreq;
513
514
0
  subreq = tstream_writev_send(state,
515
0
             state->caller.ev,
516
0
             state->caller.stream,
517
0
             state->caller.vector,
518
0
             state->caller.count);
519
0
  if (tevent_req_nomem(subreq, req)) {
520
0
    return;
521
0
  }
522
0
  tevent_req_set_callback(subreq, tstream_writev_queue_done ,req);
523
0
}
524
525
static void tstream_writev_queue_done(struct tevent_req *subreq)
526
0
{
527
0
  struct tevent_req *req = tevent_req_callback_data(subreq,
528
0
         struct tevent_req);
529
0
  struct tstream_writev_queue_state *state = tevent_req_data(req,
530
0
          struct tstream_writev_queue_state);
531
0
  int ret;
532
0
  int sys_errno;
533
534
0
  ret = tstream_writev_recv(subreq, &sys_errno);
535
0
  talloc_free(subreq);
536
0
  if (ret == -1) {
537
0
    tevent_req_error(req, sys_errno);
538
0
    return;
539
0
  }
540
0
  state->ret = ret;
541
542
0
  tevent_req_done(req);
543
0
}
544
545
int tstream_writev_queue_recv(struct tevent_req *req, int *perrno)
546
0
{
547
0
  struct tstream_writev_queue_state *state = tevent_req_data(req,
548
0
          struct tstream_writev_queue_state);
549
0
  int ret;
550
551
0
  ret = tsocket_simple_int_recv(req, perrno);
552
0
  if (ret == 0) {
553
0
    ret = state->ret;
554
0
  }
555
556
0
  tevent_req_received(req);
557
0
  return ret;
558
0
}
559