Coverage Report

Created: 2025-08-28 06:16

/src/opensips/net/net_tcp.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
 * Copyright (C) 2014-2015 OpenSIPS Project
3
 * Copyright (C) 2001-2003 FhG Fokus
4
 *
5
 * This file is part of opensips, a free SIP server.
6
 *
7
 * opensips is free software; you can redistribute it and/or modify
8
 * it under the terms of the GNU General Public License as published by
9
 * the Free Software Foundation; either version 2 of the License, or
10
 * (at your option) any later version.
11
 *
12
 * opensips is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
 * GNU General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU General Public License
18
 * along with this program; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20
 *
21
 *
22
 * history:
23
 * ---------
24
 *  2015-01-xx  created (razvanc)
25
 */
26
27
#include <sys/types.h>
28
#include <sys/socket.h>
29
#include <netinet/in.h>
30
#include <netinet/in_systm.h>
31
#include <netinet/ip.h>
32
#include <netinet/tcp.h>
33
#include <sys/uio.h>  /* writev*/
34
#include <netdb.h>
35
#include <stdlib.h> /*exit() */
36
#include <time.h>   /*time() */
37
#include <unistd.h>
38
#include <errno.h>
39
#include <string.h>
40
41
#include "../mem/mem.h"
42
#include "../mem/shm_mem.h"
43
#include "../globals.h"
44
#include "../locking.h"
45
#include "../socket_info.h"
46
#include "../ut.h"
47
#include "../pt.h"
48
#include "../pt_load.h"
49
#include "../daemonize.h"
50
#include "../status_report.h"
51
#include "../reactor.h"
52
#include "../timer.h"
53
#include "../ipc.h"
54
55
#include "tcp_passfd.h"
56
#include "net_tcp_proc.h"
57
#include "net_tcp_report.h"
58
#include "net_tcp.h"
59
#include "tcp_conn.h"
60
#include "tcp_conn_profile.h"
61
#include "trans.h"
62
#include "net_tcp_dbg.h"
63
64
struct struct_hist_list *con_hist;
65
66
enum tcp_worker_state { STATE_INACTIVE=0, STATE_ACTIVE, STATE_DRAINING};
67
68
/* definition of a TCP worker - the array of these TCP workers is
69
 * mainly intended to be used by the TCP main, to keep track of the
70
 * workers, about their load and so. Nevertheless, since the addition
71
 * of the process auto-scaling, other processes may need access to this
72
 * data, thus it's relocation in SHM (versus initial PKG). For example,
73
 * the attendant process is the one forking new TCP workers (scaling up),
74
 * so it must be able to set the ENABLE state for the TCP worker (and being
75
 * (seen by the TCP main proc). Similar, when a TCP worker shuts down, it has
76
 * to mark itself as DISABLED and the TCP main must see that.
77
 * Again, 99% this array is intended for TCP Main ops, it is not lock
78
 * protected, so be very careful with any ops from other procs.
79
 */
80
struct tcp_worker {
81
  pid_t pid;
82
  int unix_sock;    /*!< Main-Worker comm, worker end */
83
  int main_unix_sock; /*!< Main-Worker comm, TCP Main end */
84
  int pt_idx;     /*!< Index in the main Process Table */
85
  enum tcp_worker_state state;
86
  int n_reqs;   /*!< number of requests serviced so far */
87
};
88
89
/* definition of a TCP partition */
90
struct tcp_partition {
91
  /*! \brief connection hash table (after ip&port), includes also aliases */
92
  struct tcp_conn_alias** tcpconn_aliases_hash;
93
  /*! \brief connection hash table (after connection id) */
94
  struct tcp_connection** tcpconn_id_hash;
95
  gen_lock_t* tcpconn_lock;
96
};
97
98
99
/* array of TCP workers - to be used only by TCP MAIN */
100
struct tcp_worker *tcp_workers=0;
101
102
/* unique for each connection, used for
103
 * quickly finding the corresponding connection for a reply */
104
static unsigned int* connection_id=0;
105
106
/* array of TCP partitions */
107
static struct tcp_partition tcp_parts[TCP_PARTITION_SIZE];
108
109
/*!< tcp protocol number as returned by getprotobyname */
110
static int tcp_proto_no=-1;
111
112
/* communication socket from generic proc to TCP main */
113
int unix_tcp_sock = -1;
114
115
/*!< current number of open connections */
116
static int tcp_connections_no = 0;
117
118
/*!< by default don't accept aliases */
119
int tcp_accept_aliases=0;
120
int tcp_connect_timeout=DEFAULT_TCP_CONNECT_TIMEOUT;
121
int tcp_con_lifetime=DEFAULT_TCP_CONNECTION_LIFETIME;
122
int tcp_socket_backlog=DEFAULT_TCP_SOCKET_BACKLOG;
123
/*!< by default choose the best method */
124
enum poll_types tcp_poll_method=0;
125
int tcp_max_connections=DEFAULT_TCP_MAX_CONNECTIONS;
126
/* the configured/starting number of TCP workers */
127
int tcp_workers_no = UDP_WORKERS_NO;
128
/* the maximum numbers of TCP workers */
129
int tcp_workers_max_no;
130
/* the name of the auto-scaling profile (optional) */
131
char* tcp_auto_scaling_profile = NULL;
132
/* Max number of seconds that we except a full SIP message
133
 * to arrive in - anything above will lead to the connection to closed */
134
int tcp_max_msg_time = TCP_CHILD_MAX_MSG_TIME;
135
/* If the data reading may be performed across different workers (still
136
 * serial) or by a single worker (the TCP conns sticks to one worker) */
137
int tcp_parallel_read_on_workers = 0;
138
139
#ifdef HAVE_SO_KEEPALIVE
140
    int tcp_keepalive = 1;
141
#else
142
    int tcp_keepalive = 0;
143
#endif
144
int tcp_keepcount = 0;
145
int tcp_keepidle = 0;
146
int tcp_keepinterval = 0;
147
148
/*!< should we allow opening a new TCP conn when sending data 
149
 * over UAC branches? - branch flag to be set in the SIP messages */
150
int tcp_no_new_conn_bflag = 0;
151
/*!< should we allow opening a new TCP conn when sending data 
152
 * back to UAS (replies)? - msg flag to be set in the SIP messages */
153
int tcp_no_new_conn_rplflag = 0;
154
/*!< should a new TCP conn be open if needed? - variable used to used for
155
 * signalizing between SIP layer (branch flag) and TCP layer (tcp_send func)*/
156
int tcp_no_new_conn = 0;
157
158
/* if the TCP net layer is on or off (if no TCP based protos are loaded) */
159
static int tcp_disabled = 1;
160
161
/* is the process TCP MAIN ? */
162
int is_tcp_main = 0;
163
164
/* the ID of the TCP conn used for the last send operation in the
165
 * current process - attention, this is a really ugly HACK here */
166
unsigned int last_outgoing_tcp_id = 0;
167
168
static struct scaling_profile *s_profile = NULL;
169
170
/****************************** helper functions *****************************/
171
extern void handle_sigs(void);
172
173
static inline int init_sock_keepalive(int s, const struct tcp_conn_profile *prof)
174
0
{
175
0
  int ka;
176
0
#if defined(HAVE_TCP_KEEPINTVL) || defined(HAVE_TCP_KEEPIDLE) || defined(HAVE_TCP_KEEPCNT)
177
0
  int optval;
178
0
#endif
179
180
0
  if (prof->keepinterval || prof->keepidle || prof->keepcount)
181
0
    ka = 1; /* force on */
182
0
  else
183
0
    ka = prof->keepalive;
184
185
0
#ifdef HAVE_SO_KEEPALIVE
186
0
  if (setsockopt(s,SOL_SOCKET,SO_KEEPALIVE,&ka,sizeof(ka))<0){
187
0
    LM_WARN("setsockopt failed to enable SO_KEEPALIVE: %s\n",
188
0
      strerror(errno));
189
0
    return -1;
190
0
  }
191
0
  LM_DBG("TCP keepalive enabled on socket %d\n",s);
192
0
#endif
193
0
#ifdef HAVE_TCP_KEEPINTVL
194
0
  if ((optval = prof->keepinterval)) {
195
0
    if (setsockopt(s,IPPROTO_TCP,TCP_KEEPINTVL,&optval,sizeof(optval))<0){
196
0
      LM_WARN("setsockopt failed to set keepalive probes interval: %s\n",
197
0
        strerror(errno));
198
0
    }
199
0
  }
200
0
#endif
201
0
#ifdef HAVE_TCP_KEEPIDLE
202
0
  if ((optval = prof->keepidle)) {
203
0
    if (setsockopt(s,IPPROTO_TCP,TCP_KEEPIDLE,&optval,sizeof(optval))<0){
204
0
      LM_WARN("setsockopt failed to set keepalive idle interval: %s\n",
205
0
        strerror(errno));
206
0
    }
207
0
  }
208
0
#endif
209
0
#ifdef HAVE_TCP_KEEPCNT
210
0
  if ((optval = prof->keepcount)) {
211
0
    if (setsockopt(s,IPPROTO_TCP,TCP_KEEPCNT,&optval,sizeof(optval))<0){
212
0
      LM_WARN("setsockopt failed to set maximum keepalive count: %s\n",
213
0
        strerror(errno));
214
0
    }
215
0
  }
216
0
#endif
217
0
  return 0;
218
0
}
219
220
static inline void set_sock_reuseport(int s)
221
0
{
222
0
  int yes = 1;
223
224
0
  if (setsockopt(s,SOL_SOCKET,SO_REUSEPORT,&yes,sizeof(yes))<0){
225
0
    LM_WARN("setsockopt failed to set SO_REUSEPORT: %s\n",
226
0
      strerror(errno));
227
0
  }
228
0
  if (setsockopt(s,SOL_SOCKET,SO_REUSEADDR,&yes,sizeof(yes))<0){
229
0
    LM_WARN("setsockopt failed to set SO_REUSEADDR: %s\n",
230
0
      strerror(errno));
231
0
  }
232
0
}
233
234
/*! \brief Set all socket/fd options:  disable nagle, tos lowdelay,
235
 * non-blocking
236
 * \return -1 on error */
237
int tcp_init_sock_opt(int s, const struct tcp_conn_profile *prof, enum si_flags socketflags, int sock_tos)
238
0
{
239
0
  int flags;
240
0
  int optval;
241
242
0
#ifdef DISABLE_NAGLE
243
0
  flags=1;
244
0
  if ( (tcp_proto_no!=-1) && (setsockopt(s, tcp_proto_no , TCP_NODELAY,
245
0
          &flags, sizeof(flags))<0) ){
246
0
    LM_WARN("could not disable Nagle: %s\n", strerror(errno));
247
0
  }
248
0
#endif
249
  /* tos*/
250
0
  optval = (sock_tos > 0) ? sock_tos : tos;
251
0
  if (optval > 0) {
252
0
    if (setsockopt(s, IPPROTO_IP, IP_TOS, (void*)&optval,sizeof(optval)) ==-1){
253
0
      LM_WARN("setsockopt tos: %s\n",  strerror(errno));
254
      /* continue since this is not critical */
255
0
    }
256
0
  }
257
258
0
  if (probe_max_sock_buff(s,1,MAX_SEND_BUFFER_SIZE,BUFFER_INCREMENT)) {
259
0
    LM_WARN("setsockopt tcp snd buff: %s\n", strerror(errno));
260
    /* continue since this is not critical */
261
0
  }
262
263
0
  init_sock_keepalive(s, prof);
264
0
  if (socketflags & SI_REUSEPORT)
265
0
    set_sock_reuseport(s);
266
267
  /* non-blocking */
268
0
  flags=fcntl(s, F_GETFL);
269
0
  if (flags==-1){
270
0
    LM_ERR("fcntl failed: (%d) %s\n", errno, strerror(errno));
271
0
    goto error;
272
0
  }
273
0
  if (fcntl(s, F_SETFL, flags|O_NONBLOCK)==-1){
274
0
    LM_ERR("set non-blocking failed: (%d) %s\n", errno, strerror(errno));
275
0
    goto error;
276
0
  }
277
0
  return 0;
278
0
error:
279
0
  return -1;
280
0
}
281
282
static int send2worker(struct tcp_connection* tcpconn,int rw)
283
0
{
284
0
  int i;
285
0
  int min_load;
286
0
  int idx;
287
0
  long response[2];
288
0
  unsigned int load;
289
290
0
  min_load=100; /* it is a percentage */
291
0
  idx=0;
292
0
  for (i=0; i<tcp_workers_max_no; i++){
293
0
    if (tcp_workers[i].state==STATE_ACTIVE) {
294
0
      load = pt_get_1m_proc_load( tcp_workers[i].pt_idx );
295
#ifdef EXTRA_DEBUG
296
      LM_DBG("checking TCP worker %d (proc %d), with load %u,"
297
        "min_load so far %u\n", i, tcp_workers[i].pt_idx, load,
298
        min_load);
299
#endif
300
0
      if (min_load>load) {
301
0
        min_load = load;
302
0
        idx = i;
303
0
      }
304
0
    }
305
0
  }
306
307
0
  tcp_workers[idx].n_reqs++;
308
0
  LM_DBG("to tcp worker %d (%d/%d) load %u, %p/%d rw %d\n", idx,
309
0
    tcp_workers[idx].pid, tcp_workers[idx].pt_idx, min_load,
310
0
    tcpconn, tcpconn->s, rw);
311
0
  response[0]=(long)tcpconn;
312
0
  response[1]=rw;
313
0
  if (send_fd(tcp_workers[idx].unix_sock, response, sizeof(response),
314
0
      tcpconn->s)<=0){
315
0
    LM_ERR("send_fd failed\n");
316
0
    return -1;
317
0
  }
318
319
0
  return 0;
320
0
}
321
322
323
324
/********************** TCP conn management functions ************************/
325
326
/* initializes an already defined TCP listener */
327
int tcp_init_listener(struct socket_info *si)
328
0
{
329
0
  union sockaddr_union* addr = &si->su;
330
0
  if (init_su(addr, &si->address, si->port_no)<0){
331
0
    LM_ERR("could no init sockaddr_union\n");
332
0
    return -1;
333
0
  }
334
0
  return 0;
335
0
}
336
337
/* binding an defined TCP listener */
338
int tcp_bind_listener(struct socket_info *si)
339
0
{
340
0
  union sockaddr_union* addr;
341
0
  int optval;
342
0
#ifdef DISABLE_NAGLE
343
0
  int flag;
344
0
  struct protoent* pe;
345
346
0
  if (tcp_proto_no==-1){ /* if not already set */
347
0
    pe=getprotobyname("tcp");
348
0
    if (pe==0){
349
0
      LM_ERR("could not get TCP protocol number\n");
350
0
      tcp_proto_no=-1;
351
0
    }else{
352
0
      tcp_proto_no=pe->p_proto;
353
0
    }
354
0
  }
355
0
#endif
356
357
0
  addr = &si->su;
358
0
  si->socket = socket(AF2PF(addr->s.sa_family), SOCK_STREAM, 0);
359
0
  if (si->socket==-1){
360
0
    LM_ERR("socket failed with [%s]\n", strerror(errno));
361
0
    goto error;
362
0
  }
363
0
#ifdef DISABLE_NAGLE
364
0
  flag=1;
365
0
  if ( (tcp_proto_no!=-1) &&
366
0
     (setsockopt(si->socket, tcp_proto_no , TCP_NODELAY,
367
0
           &flag, sizeof(flag))<0) ){
368
0
    LM_ERR("could not disable Nagle: %s\n",strerror(errno));
369
0
  }
370
0
#endif
371
372
0
#if  !defined(TCP_DONT_REUSEADDR)
373
  /* Stevens, "Network Programming", Section 7.5, "Generic Socket
374
   * Options": "...server started,..a child continues..on existing
375
   * connection..listening server is restarted...call to bind fails
376
   * ... ALL TCP servers should specify the SO_REUSEADDRE option
377
   * to allow the server to be restarted in this situation
378
   */
379
0
  optval=1;
380
0
  if (setsockopt(si->socket, SOL_SOCKET, SO_REUSEADDR,
381
0
  (void*)&optval, sizeof(optval))==-1) {
382
0
    LM_ERR("setsockopt failed with [%s]\n", strerror(errno));
383
0
    goto error;
384
0
  }
385
0
#endif
386
  /* tos */
387
0
  optval = (si->tos > 0) ? si->tos : tos;
388
0
  if (optval > 0) {
389
0
    if (setsockopt(si->socket, IPPROTO_IP, IP_TOS, (void*)&optval,
390
0
    sizeof(optval)) ==-1){
391
0
      LM_WARN("setsockopt tos: %s\n", strerror(errno));
392
      /* continue since this is not critical */
393
0
    }
394
0
  }
395
396
0
  if (probe_max_sock_buff(si->socket,1,MAX_SEND_BUFFER_SIZE,
397
0
  BUFFER_INCREMENT)) {
398
0
    LM_WARN("setsockopt tcp snd buff: %s\n",strerror(errno));
399
    /* continue since this is not critical */
400
0
  }
401
402
0
  init_sock_keepalive(si->socket, &tcp_con_df_profile);
403
0
  if (si->flags & SI_REUSEPORT)
404
0
    set_sock_reuseport(si->socket);
405
0
  if (bind(si->socket, &addr->s, sockaddru_len(*addr))==-1){
406
0
    LM_ERR("bind(%x, %p, %d) on %s:%d : %s\n",
407
0
        si->socket, &addr->s,
408
0
        (unsigned)sockaddru_len(*addr),
409
0
        si->address_str.s,
410
0
        si->port_no,
411
0
        strerror(errno));
412
0
    goto error;
413
0
  }
414
0
  if (listen(si->socket, tcp_socket_backlog)==-1){
415
0
    LM_ERR("listen(%x, %p, %d) on %s: %s\n",
416
0
        si->socket, &addr->s,
417
0
        (unsigned)sockaddru_len(*addr),
418
0
        si->address_str.s,
419
0
        strerror(errno));
420
0
    goto error;
421
0
  }
422
423
0
  return 0;
424
0
error:
425
0
  if (si->socket!=-1){
426
0
    close(si->socket);
427
0
    si->socket=-1;
428
0
  }
429
0
  return -1;
430
0
}
431
432
433
/*! \brief finds a connection, if id=0 return NULL
434
 * \note WARNING: unprotected (locks) use tcpconn_get unless you really
435
 * know what you are doing */
436
static struct tcp_connection* _tcpconn_find(unsigned int id)
437
0
{
438
0
  struct tcp_connection *c;
439
0
  unsigned hash;
440
441
0
  if (id){
442
0
    hash=tcp_id_hash(id);
443
0
    for (c=TCP_PART(id).tcpconn_id_hash[hash]; c; c=c->id_next){
444
#ifdef EXTRA_DEBUG
445
      LM_DBG("c=%p, c->id=%u, port=%d\n",c, c->id, c->rcv.src_port);
446
      print_ip("ip=", &c->rcv.src_ip, "\n");
447
#endif
448
0
      if ((id==c->id)&&(c->state!=S_CONN_BAD)) return c;
449
0
    }
450
0
  }
451
0
  return 0;
452
0
}
453
454
455
/* returns the correlation ID of a TCP connection */
456
int tcp_get_correlation_id( unsigned int id, unsigned long long *cid)
457
0
{
458
0
  struct tcp_connection* c;
459
460
0
  TCPCONN_LOCK(id);
461
0
  if ( (c=_tcpconn_find(id))!=NULL ) {
462
0
    *cid = c->cid;
463
0
    TCPCONN_UNLOCK(id);
464
0
    return 0;
465
0
  }
466
0
  *cid = 0;
467
0
  TCPCONN_UNLOCK(id);
468
0
  return -1;
469
0
}
470
471
472
/*! \brief _tcpconn_find with locks and acquire fd */
473
int tcp_conn_get(unsigned int id, struct ip_addr* ip, int port,
474
    enum sip_protos proto, void *proto_extra_id,
475
    struct tcp_connection** conn, int* conn_fd,
476
    const struct socket_info* send_sock)
477
0
{
478
0
  struct tcp_connection* c;
479
0
  struct tcp_connection* tmp;
480
0
  struct tcp_conn_alias* a;
481
0
  unsigned hash;
482
0
  long response[2];
483
0
  unsigned int part;
484
0
  int n;
485
0
  int fd;
486
487
0
  if (id) {
488
0
    part = id;
489
0
    TCPCONN_LOCK(part);
490
0
    if ( (c=_tcpconn_find(part))!=NULL )
491
0
      goto found;
492
0
    TCPCONN_UNLOCK(part);
493
0
  }
494
495
  /* continue search based on IP address + port + transport */
496
#ifdef EXTRA_DEBUG
497
  LM_DBG("%d  port %u\n",id, port);
498
  if (ip) print_ip("tcpconn_find: ip ", ip, "\n");
499
#endif
500
0
  if (ip){
501
0
    hash=tcp_addr_hash(ip, port);
502
0
    for( part=0 ; part<TCP_PARTITION_SIZE ; part++ ) {
503
0
      TCPCONN_LOCK(part);
504
0
      for (a=TCP_PART(part).tcpconn_aliases_hash[hash]; a; a=a->next) {
505
#ifdef EXTRA_DEBUG
506
        LM_DBG("a=%p, c=%p, c->id=%u, alias port= %d port=%d\n",
507
          a, a->parent, a->parent->id, a->port,
508
          a->parent->rcv.src_port);
509
        print_ip("ip=",&a->parent->rcv.src_ip,"\n");
510
        if (send_sock && a->parent->rcv.bind_address) {
511
          print_ip("requested send_sock ip=", &send_sock->address,"\n");
512
          print_ip("found send_sock ip=", &a->parent->rcv.bind_address->address,"\n");
513
        }
514
#endif
515
0
        c = a->parent;
516
0
        if (c->state != S_CONN_BAD &&
517
0
            c->flags&F_CONN_INIT &&
518
0
            (send_sock==NULL || send_sock == a->parent->rcv.bind_address) &&
519
0
            port == a->port &&
520
0
            proto == c->type &&
521
0
            ip_addr_cmp(ip, &c->rcv.src_ip) &&
522
0
            (proto_extra_id==NULL ||
523
0
            protos[proto].net.stream.conn.match==NULL ||
524
0
            protos[proto].net.stream.conn.match( c, proto_extra_id)) )
525
0
          goto found;
526
0
      }
527
0
      TCPCONN_UNLOCK(part);
528
0
    }
529
0
  }
530
531
  /* not found */
532
0
  *conn = NULL;
533
0
  if (conn_fd) *conn_fd = -1;
534
0
  return 0;
535
536
0
found:
537
0
  c->refcnt++;
538
0
  TCPCONN_UNLOCK(part);
539
0
  sh_log(c->hist, TCP_REF, "tcp_conn_get, (%d)", c->refcnt);
540
541
0
  LM_DBG("con found in state %d\n",c->state);
542
543
0
  if (c->state!=S_CONN_OK || conn_fd==NULL) {
544
    /* no need to acquired, just return the conn with an invalid fd */
545
0
    *conn = c;
546
0
    if (conn_fd) *conn_fd = -1;
547
0
    return 1;
548
0
  }
549
550
0
  if (c->proc_id == process_no) {
551
0
    LM_DBG("tcp connection found (%p) already in this process ( %d ) ,"
552
0
      " fd = %d\n", c, c->proc_id, c->fd);
553
    /* we already have the connection in this worker's reactor, */
554
    /* no need to acquire FD */
555
0
    *conn = c;
556
0
    *conn_fd = c->fd;
557
0
    return 1;
558
0
  }
559
560
  /* acquire the fd for this connection too */
561
0
  LM_DBG("tcp connection found (%p), acquiring fd\n", c);
562
  /* get the fd */
563
0
  response[0]=(long)c;
564
0
  response[1]=CONN_GET_FD;
565
0
  n=send_all(unix_tcp_sock, response, sizeof(response));
566
0
  if (n<=0){
567
0
    LM_ERR("failed to get fd(write):%s (%d)\n",
568
0
        strerror(errno), errno);
569
0
    n=-1;
570
0
    goto error;
571
0
  }
572
0
  LM_DBG("c= %p, n=%d, Usock=%d\n", c, n, unix_tcp_sock);
573
0
  tmp = c;
574
0
  n=receive_fd(unix_tcp_sock, &c, sizeof(c), &fd, MSG_WAITALL);
575
0
  if (n<=0){
576
0
    LM_ERR("failed to get fd(receive_fd):"
577
0
      " %s (%d)\n", strerror(errno), errno);
578
0
    n=-1;
579
0
    goto error;
580
0
  }
581
0
  if (c!=tmp){
582
0
    LM_CRIT("got different connection:"
583
0
      "  %p (id= %u, refcnt=%d state=%d != "
584
0
      "  %p (id= %u, refcnt=%d state=%d (n=%d)\n",
585
0
        c,   c->id,   c->refcnt,   c->state,
586
0
        tmp, tmp->id, tmp->refcnt, tmp->state, n
587
0
       );
588
0
    n=-1; /* fail */
589
0
    close(fd);
590
0
    goto error;
591
0
  }
592
0
  LM_DBG("after receive_fd: c= %p n=%d fd=%d\n",c, n, fd);
593
594
0
  *conn = c;
595
0
  *conn_fd = fd;
596
597
0
  return 1;
598
0
error:
599
0
  tcpconn_put(c);
600
0
  sh_log(c->hist, TCP_UNREF, "tcp_conn_get, (%d)", c->refcnt);
601
0
  *conn = NULL;
602
0
  *conn_fd = -1;
603
0
  return -1;
604
0
}
605
606
607
/* used to tune the tcp_connection attributes - not to be used inside the
608
   network layer, but onlu from the above layer (otherwise we may end up
609
   in strange deadlocks!) */
610
int tcp_conn_fcntl(struct receive_info *rcv, int attr, void *value)
611
0
{
612
0
  struct tcp_connection *con;
613
614
0
  switch (attr) {
615
0
  case DST_FCNTL_SET_LIFETIME:
616
    /* set connection timeout */
617
0
    TCPCONN_LOCK(rcv->proto_reserved1);
618
0
    con =_tcpconn_find(rcv->proto_reserved1);
619
0
    if (!con) {
620
0
      LM_ERR("Strange, tcp conn not found (id=%u)\n",
621
0
        rcv->proto_reserved1);
622
0
    } else {
623
0
      tcp_conn_set_lifetime( con, (int)(long)(value));
624
0
    }
625
0
    TCPCONN_UNLOCK(rcv->proto_reserved1);
626
0
    return 0;
627
0
  default:
628
0
    LM_ERR("unsupported operation %d on conn\n",attr);
629
0
    return -1;
630
0
  }
631
0
  return -1;
632
0
}
633
634
635
static struct tcp_connection* tcpconn_add(struct tcp_connection *c)
636
0
{
637
0
  unsigned hash;
638
639
0
  if (c){
640
0
    TCPCONN_LOCK(c->id);
641
    /* add it at the beginning of the list*/
642
0
    hash=tcp_id_hash(c->id);
643
0
    c->id_hash=hash;
644
0
    tcpconn_listadd(TCP_PART(c->id).tcpconn_id_hash[hash], c, id_next,
645
0
      id_prev);
646
647
0
    hash=tcp_addr_hash(&c->rcv.src_ip, c->rcv.src_port);
648
    /* set the first alias */
649
0
    c->con_aliases[0].port=c->rcv.src_port;
650
0
    c->con_aliases[0].hash=hash;
651
0
    c->con_aliases[0].parent=c;
652
0
    tcpconn_listadd(TCP_PART(c->id).tcpconn_aliases_hash[hash],
653
0
      &c->con_aliases[0], next, prev);
654
0
    c->aliases++;
655
0
    TCPCONN_UNLOCK(c->id);
656
0
    LM_DBG("hashes: %d, %d\n", hash, c->id_hash);
657
0
    return c;
658
0
  }else{
659
0
    LM_CRIT("null connection pointer\n");
660
0
    return 0;
661
0
  }
662
0
}
663
664
static str e_tcp_src_ip = str_init("src_ip");
665
static str e_tcp_src_port = str_init("src_port");
666
static str e_tcp_dst_ip = str_init("dst_ip");
667
static str e_tcp_dst_port = str_init("dst_port");
668
static str e_tcp_c_proto = str_init("proto");
669
670
static void tcp_disconnect_event_raise(struct tcp_connection* c)
671
0
{
672
0
  evi_params_p list = 0;
673
0
  str src_ip,dst_ip, proto;
674
0
  int src_port,dst_port;
675
0
  char src_ip_buf[IP_ADDR_MAX_STR_SIZE],dst_ip_buf[IP_ADDR_MAX_STR_SIZE];
676
677
  // event has to be triggered - check for subscribers
678
0
  if (!evi_probe_event(EVI_TCP_DISCONNECT)) {
679
0
    goto end;
680
0
  }
681
682
0
  if (!(list = evi_get_params()))
683
0
    goto end;
684
685
0
  src_ip.s = ip_addr2a( &c->rcv.src_ip );
686
0
  memcpy(src_ip_buf,src_ip.s,IP_ADDR_MAX_STR_SIZE);
687
0
  src_ip.s = src_ip_buf;
688
0
  src_ip.len = strlen(src_ip.s);
689
690
0
  if (evi_param_add_str(list, &e_tcp_src_ip, &src_ip)) {
691
0
    LM_ERR("unable to add parameter\n");
692
0
    goto end;
693
0
  }
694
695
0
  src_port = c->rcv.src_port;
696
697
0
  if (evi_param_add_int(list, &e_tcp_src_port, &src_port)) {
698
0
    LM_ERR("unable to add parameter\n");
699
0
    goto end;
700
0
  }
701
702
0
  dst_ip.s = ip_addr2a( &c->rcv.dst_ip );
703
0
  memcpy(dst_ip_buf,dst_ip.s,IP_ADDR_MAX_STR_SIZE);
704
0
  dst_ip.s = dst_ip_buf;
705
0
  dst_ip.len = strlen(dst_ip.s);
706
707
0
  if (evi_param_add_str(list, &e_tcp_dst_ip, &dst_ip)) {
708
0
    LM_ERR("unable to add parameter\n");
709
0
    goto end;
710
0
  }
711
712
0
  dst_port = c->rcv.dst_port;
713
714
0
  if (evi_param_add_int(list, &e_tcp_dst_port, &dst_port)) {
715
0
    LM_ERR("unable to add parameter\n");
716
0
    goto end;
717
0
  }
718
719
0
  proto.s = protos[c->rcv.proto].name;
720
0
  proto.len = strlen(proto.s);
721
722
0
  if (evi_param_add_str(list, &e_tcp_c_proto, &proto)) {
723
0
    LM_ERR("unable to add parameter\n");
724
0
    goto end;
725
0
  }
726
727
0
  if (is_tcp_main) {
728
0
    if (evi_dispatch_event(EVI_TCP_DISCONNECT, list)) {
729
0
      LM_ERR("unable to dispatch tcp disconnect event\n");
730
0
    }
731
0
  } else {
732
0
    if (evi_raise_event(EVI_TCP_DISCONNECT, list)) {
733
0
      LM_ERR("unable to send tcp disconnect event\n");
734
0
    }
735
0
  }
736
0
  list = 0;
737
738
0
end:
739
0
  if (list)
740
0
    evi_free_params(list);
741
0
}
742
743
/* convenience macro to aid in shm_free() debugging */
744
#define _tcpconn_rm(c, ne) \
745
0
  do {\
746
0
    __tcpconn_rm(c, ne);\
747
0
    shm_free(c);\
748
0
  } while (0)
749
750
/*! \brief unsafe tcpconn_rm version (nolocks) */
751
static void __tcpconn_rm(struct tcp_connection* c, int no_event)
752
0
{
753
0
  int r;
754
755
0
  tcpconn_listrm(TCP_PART(c->id).tcpconn_id_hash[c->id_hash], c,
756
0
    id_next, id_prev);
757
  /* remove all the aliases */
758
0
  for (r=0; r<c->aliases; r++)
759
0
    tcpconn_listrm(TCP_PART(c->id).tcpconn_aliases_hash[c->con_aliases[r].hash],
760
0
      &c->con_aliases[r], next, prev);
761
0
  lock_destroy(&c->write_lock);
762
763
0
  if (c->async) {
764
0
    for (r = 0; r<c->async->pending; r++)
765
0
      shm_free(c->async->chunks[r]);
766
0
    shm_free(c->async);
767
0
    c->async = NULL;
768
0
  }
769
770
0
  if (c->con_req)
771
0
    shm_free(c->con_req);
772
773
0
  if (protos[c->type].net.stream.conn.clean)
774
0
    protos[c->type].net.stream.conn.clean(c);
775
776
0
  if (!no_event) tcp_disconnect_event_raise(c);
777
778
#ifdef DBG_TCPCON
779
  sh_log(c->hist, TCP_DESTROY, "type=%d", c->type);
780
  sh_unref(c->hist);
781
  c->hist = NULL;
782
#endif
783
784
  /* shm_free(c); -- freed by _tcpconn_rm() */
785
0
}
786
787
788
789
#if 0
790
static void tcpconn_rm(struct tcp_connection* c)
791
{
792
  int r;
793
794
  TCPCONN_LOCK(c->id);
795
  tcpconn_listrm(TCP_PART(c->id).tcpconn_id_hash[c->id_hash], c,
796
    id_next, id_prev);
797
  /* remove all the aliases */
798
  for (r=0; r<c->aliases; r++)
799
    tcpconn_listrm(TCP_PART(c->id).tcpconn_aliases_hash
800
      [c->con_aliases[r].hash],
801
      &c->con_aliases[r], next, prev);
802
  TCPCONN_UNLOCK(c->id);
803
  lock_destroy(&c->write_lock);
804
805
  if (protos[c->type].net.stream.conn.clean)
806
    protos[c->type].net.stream.conn.clean(c);
807
808
  shm_free(c);
809
}
810
#endif
811
812
813
/*! \brief add port as an alias for the "id" connection
814
 * \return 0 on success,-1 on failure */
815
int tcpconn_add_alias(struct sip_msg *msg, unsigned int id, int port, int proto)
816
0
{
817
0
  struct tcp_connection* c;
818
0
  unsigned hash;
819
0
  struct tcp_conn_alias* a;
820
821
0
  a=0;
822
  /* fix the port */
823
0
  port=port ? port : protos[proto].default_port ;
824
0
  TCPCONN_LOCK(id);
825
  /* check if alias already exists */
826
0
  c=_tcpconn_find(id);
827
0
  if (c) {
828
0
    if (msg && !(c->profile.alias_mode == TCP_ALIAS_ALWAYS
829
0
                   || (c->profile.alias_mode == TCP_ALIAS_RFC_5923
830
0
                       && msg->via1->alias))) {
831
0
      LM_DBG("refusing to add alias (alias_mode: %u, via 'alias': %u)\n",
832
0
              c->profile.alias_mode, !!msg->via1->alias);
833
0
      TCPCONN_UNLOCK(id);
834
0
      return 0;
835
0
    }
836
837
0
    hash=tcp_addr_hash(&c->rcv.src_ip, port);
838
    /* search the aliases for an already existing one */
839
0
    for (a=TCP_PART(id).tcpconn_aliases_hash[hash]; a; a=a->next) {
840
0
      if (a->parent->state != S_CONN_BAD &&
841
0
          port == a->port &&
842
0
          proto == a->parent->type &&
843
0
          ip_addr_cmp(&c->rcv.src_ip, &a->parent->rcv.src_ip)) {
844
        /* found */
845
0
        if (a->parent!=c) goto error_sec;
846
0
        else goto ok;
847
0
      }
848
0
    }
849
0
    if (c->aliases>=TCP_CON_MAX_ALIASES) goto error_aliases;
850
0
    c->con_aliases[c->aliases].parent=c;
851
0
    c->con_aliases[c->aliases].port=port;
852
0
    c->con_aliases[c->aliases].hash=hash;
853
0
    tcpconn_listadd(TCP_PART(id).tcpconn_aliases_hash[hash],
854
0
                &c->con_aliases[c->aliases], next, prev);
855
0
    c->aliases++;
856
0
  }else goto error_not_found;
857
0
ok:
858
0
  TCPCONN_UNLOCK(id);
859
#ifdef EXTRA_DEBUG
860
  if (a) LM_DBG("alias already present\n");
861
  else   LM_DBG("alias port %d for hash %d, id %u\n", port, hash, id);
862
#endif
863
0
  return 0;
864
0
error_aliases:
865
0
  TCPCONN_UNLOCK(id);
866
0
  LM_ERR("too many aliases for connection %p (%u)\n", c, id);
867
0
  return -1;
868
0
error_not_found:
869
0
  TCPCONN_UNLOCK(id);
870
0
  LM_ERR("no connection found for id %u\n",id);
871
0
  return -1;
872
0
error_sec:
873
0
  LM_WARN("possible port hijack attempt\n");
874
0
  LM_WARN("alias already present and points to another connection "
875
0
      "(%d : %d and %u : %d)\n", a->parent->id,  port, id, port);
876
0
  TCPCONN_UNLOCK(id);
877
0
  return -1;
878
0
}
879
880
881
void tcpconn_put(struct tcp_connection* c)
882
0
{
883
0
  TCPCONN_LOCK(c->id);
884
0
  c->refcnt--;
885
0
  TCPCONN_UNLOCK(c->id);
886
0
}
887
888
889
static inline void tcpconn_ref(struct tcp_connection* c)
890
0
{
891
0
  TCPCONN_LOCK(c->id);
892
0
  c->refcnt++;
893
0
  TCPCONN_UNLOCK(c->id);
894
0
}
895
896
897
static struct tcp_connection* tcpconn_new(int sock, const union sockaddr_union* su,
898
                    const struct socket_info* si, const struct tcp_conn_profile *prof,
899
                    int state, int flags, int in_main_proc)
900
0
{
901
0
  struct tcp_connection *c;
902
0
  union sockaddr_union local_su;
903
0
  unsigned int su_size;
904
905
0
  c=(struct tcp_connection*)shm_malloc(sizeof(struct tcp_connection));
906
0
  if (c==0){
907
0
    LM_ERR("shared memory allocation failure\n");
908
0
    return 0;
909
0
  }
910
0
  memset(c, 0, sizeof(struct tcp_connection)); /* zero init */
911
0
  c->s=sock;
912
0
  c->fd=-1; /* not initialized */
913
0
  if (lock_init(&c->write_lock)==0){
914
0
    LM_ERR("init lock failed\n");
915
0
    goto error0;
916
0
  }
917
918
0
  c->rcv.src_su=*su;
919
920
0
  c->refcnt=0;
921
0
  su2ip_addr(&c->rcv.src_ip, su);
922
0
  c->rcv.src_port=su_getport(su);
923
0
  c->rcv.bind_address = si;
924
0
  c->rcv.dst_ip = si->address;
925
0
  su_size = sockaddru_len(*su);
926
0
  if (getsockname(sock, (struct sockaddr *)&local_su, &su_size)<0) {
927
0
    LM_ERR("failed to get info on received interface/IP %d/%s\n",
928
0
      errno, strerror(errno));
929
0
    goto error;
930
0
  }
931
0
  c->rcv.dst_port = su_getport(&local_su);
932
0
  print_ip("tcpconn_new: new tcp connection to: ", &c->rcv.src_ip, "\n");
933
0
  LM_DBG("on port %d, proto %d\n", c->rcv.src_port, si->proto);
934
0
  c->id=(*connection_id)++;
935
0
  c->cid = (unsigned long long)c->id
936
0
        | ( (unsigned long long)(startup_time&0xFFFFFF) << 32 )
937
0
          | ( (unsigned long long)(rand()&0xFF) << 56 );
938
939
0
  c->rcv.proto_reserved1=0; /* this will be filled before receive_message*/
940
0
  c->rcv.proto_reserved2=0;
941
0
  c->state=state;
942
0
  c->extra_data=0;
943
0
  c->type = si->proto;
944
0
  c->rcv.proto = si->proto;
945
  /* start with the default conn lifetime */
946
0
  c->lifetime = get_ticks() + prof->con_lifetime;
947
0
  c->profile = *prof;
948
0
  c->flags|=F_CONN_REMOVED|flags;
949
#ifdef DBG_TCPCON
950
  c->hist = sh_push(c, con_hist);
951
#endif
952
953
0
  if (protos[si->proto].net.stream.async_chunks) {
954
0
    c->async = shm_malloc(sizeof(struct tcp_async_data) +
955
0
        protos[si->proto].net.stream.async_chunks *
956
0
        sizeof(struct tcp_async_chunk));
957
0
    if (c->async) {
958
0
      c->async->allocated = protos[si->proto].net.stream.async_chunks;
959
0
      c->async->oldest = 0;
960
0
      c->async->pending = 0;
961
0
    } else {
962
0
      LM_ERR("could not allocate async data for con!\n");
963
0
      goto error;
964
0
    }
965
0
  }
966
0
  if(in_main_proc)
967
0
    tcp_connections_no++;
968
0
  return c;
969
970
0
error:
971
0
  lock_destroy(&c->write_lock);
972
0
error0:
973
0
  shm_free(c);
974
0
  return 0;
975
0
}
976
977
978
/* creates a new tcp connection structure
979
 * if send2main is 1, the function informs the TCP Main about the new conn
980
 * a +1 ref is set for the new conn !
981
 * IMPORTANT - the function assumes you want to create a new TCP conn as
982
 * a result of a connect operation - the conn will be set as connect !!
983
 * Accepted connection are triggered internally only */
984
struct tcp_connection* tcp_conn_create(int sock, const union sockaddr_union* su,
985
    const struct socket_info* si, struct tcp_conn_profile *prof,
986
    int state, int send2main)
987
0
{
988
0
  struct tcp_connection *c;
989
990
0
  if (!prof)
991
0
    tcp_con_get_profile(su, &si->su, si->proto, prof);
992
993
  /* create the connection structure */
994
0
  c = tcpconn_new(sock, su, si, prof, state, 0, !send2main);
995
0
  if (c==NULL) {
996
0
    LM_ERR("tcpconn_new failed\n");
997
0
    return NULL;
998
0
  }
999
1000
0
  if (protos[c->type].net.stream.conn.init &&
1001
0
      protos[c->type].net.stream.conn.init(c) < 0) {
1002
0
    LM_ERR("failed to do proto %d specific init for conn %p\n",
1003
0
        c->type, c);
1004
0
    tcp_conn_destroy(c);
1005
0
    return NULL;
1006
0
  }
1007
0
  c->flags |= F_CONN_INIT;
1008
1009
0
  c->refcnt++; /* safe to do it w/o locking, it's not yet
1010
          available to the rest of the world */
1011
0
  sh_log(c->hist, TCP_REF, "connect, (%d)", c->refcnt);
1012
0
  if (!send2main)
1013
0
    return c;
1014
1015
0
  return (tcp_conn_send(c) == 0 ? c : NULL);
1016
0
}
1017
1018
/* sends a new connection from a worker to main */
1019
int tcp_conn_send(struct tcp_connection *c)
1020
0
{
1021
0
  long response[2];
1022
0
  int n, fd;
1023
1024
  /* inform TCP main about this new connection */
1025
0
  if (c->state==S_CONN_CONNECTING) {
1026
    /* store the local fd now, before TCP main overwrites it */
1027
0
    fd = c->s;
1028
0
    response[0]=(long)c;
1029
0
    response[1]=ASYNC_CONNECT;
1030
0
    n=send_fd(unix_tcp_sock, response, sizeof(response), fd);
1031
0
    if (n<=0) {
1032
0
      LM_ERR("Failed to send the socket to main for async connection\n");
1033
0
      goto error;
1034
0
    }
1035
0
    close(fd);
1036
0
  } else {
1037
0
    response[0]=(long)c;
1038
0
    response[1]=CONN_NEW;
1039
0
    n=send_fd(unix_tcp_sock, response, sizeof(response), c->s);
1040
0
    if (n<=0){
1041
0
      LM_ERR("failed send_fd: %s (%d)\n", strerror(errno), errno);
1042
0
      goto error;
1043
0
    }
1044
0
  }
1045
1046
0
  return 0;
1047
0
error:
1048
  /* no reporting as closed, as PROTO layer did not reporte it as
1049
   * OPEN yet */
1050
0
  _tcpconn_rm(c,1);
1051
0
  tcp_connections_no--;
1052
0
  return -1;
1053
0
}
1054
1055
1056
static inline void tcpconn_destroy(struct tcp_connection* tcpconn)
1057
0
{
1058
0
  int fd;
1059
0
  int unsigned id = tcpconn->id;
1060
1061
0
  TCPCONN_LOCK(id); /*avoid races w/ tcp_send*/
1062
0
  tcpconn->refcnt--;
1063
0
  if (tcpconn->refcnt==0){
1064
0
    LM_DBG("destroying connection %p, flags %04x\n",
1065
0
        tcpconn, tcpconn->flags);
1066
0
    fd=tcpconn->s;
1067
    /* no reporting here - the tcpconn_destroy() function is called
1068
     * from the TCP_MAIN reactor when handling connectioned received
1069
     * from a worker; and we generate the CLOSE reports from WORKERs */
1070
0
    _tcpconn_rm(tcpconn,0);
1071
0
    if (fd >= 0)
1072
0
      close(fd);
1073
0
    tcp_connections_no--;
1074
0
  }else{
1075
    /* force timeout */
1076
0
    tcpconn->lifetime=0;
1077
0
    tcpconn->state=S_CONN_BAD;
1078
0
    LM_DBG("delaying (%p, flags %04x) ref = %d ...\n",
1079
0
        tcpconn, tcpconn->flags, tcpconn->refcnt);
1080
1081
0
  }
1082
0
  TCPCONN_UNLOCK(id);
1083
0
}
1084
1085
/* wrapper to the internally used function */
1086
void tcp_conn_destroy(struct tcp_connection* tcpconn)
1087
0
{
1088
0
  tcp_trigger_report(tcpconn, TCP_REPORT_CLOSE,
1089
0
        "Closed by Proto layer");
1090
0
  sh_log(tcpconn->hist, TCP_UNREF, "tcp_conn_destroy, (%d)", tcpconn->refcnt);
1091
0
  return tcpconn_destroy(tcpconn);
1092
0
}
1093
1094
1095
/************************ TCP MAIN process functions ************************/
1096
1097
/*! \brief
1098
 * handles a new connection, called internally by tcp_main_loop/handle_io.
1099
 * \param si - pointer to one of the tcp socket_info structures on which
1100
 *              an io event was detected (connection attempt)
1101
 * \return  handle_* return convention: -1 on error, 0 on EAGAIN (no more
1102
 *           io events queued), >0 on success. success/error refer only to
1103
 *           the accept.
1104
 */
1105
static inline int handle_new_connect(const struct socket_info* si)
1106
0
{
1107
0
  union sockaddr_union su;
1108
0
  struct tcp_connection* tcpconn;
1109
0
  struct tcp_conn_profile prof;
1110
0
  socklen_t su_len = sizeof(su);
1111
0
  int new_sock;
1112
0
  unsigned int id;
1113
1114
  /* coverity[overrun-buffer-arg: FALSE] - union has 28 bytes, CID #200070 */
1115
0
  new_sock=accept(si->socket, &(su.s), &su_len);
1116
0
  if (new_sock==-1){
1117
0
    if ((errno==EAGAIN)||(errno==EWOULDBLOCK))
1118
0
      return 0;
1119
0
    LM_ERR("failed to accept connection(%d): %s\n", errno, strerror(errno));
1120
0
    return -1;
1121
0
  }
1122
0
  if (tcp_connections_no>=tcp_max_connections){
1123
0
    LM_ERR("maximum number of connections exceeded: %d/%d\n",
1124
0
          tcp_connections_no, tcp_max_connections);
1125
0
    close(new_sock);
1126
0
    return 1; /* success, because the accept was successful */
1127
0
  }
1128
1129
0
  tcp_con_get_profile(&su, &si->su, si->proto, &prof);
1130
0
  if (tcp_init_sock_opt(new_sock, &prof, si->flags, si->tos)<0){
1131
0
    LM_ERR("tcp_init_sock_opt failed\n");
1132
0
    close(new_sock);
1133
0
    return 1; /* success, because the accept was successful */
1134
0
  }
1135
1136
  /* add socket to list */
1137
0
  tcpconn=tcpconn_new(new_sock, &su, si, &prof, S_CONN_OK, F_CONN_ACCEPTED, 1);
1138
0
  if (tcpconn){
1139
0
    tcpconn->refcnt++; /* safe, not yet available to the
1140
                outside world */
1141
0
    sh_log(tcpconn->hist, TCP_REF, "accept, (%d)", tcpconn->refcnt);
1142
0
    tcpconn_add(tcpconn);
1143
0
    LM_DBG("new connection: %p %d flags: %04x\n",
1144
0
        tcpconn, tcpconn->s, tcpconn->flags);
1145
    /* pass it to a workerr */
1146
0
    sh_log(tcpconn->hist, TCP_SEND2CHILD, "accept");
1147
0
    if(send2worker(tcpconn,IO_WATCH_READ)<0){
1148
0
      LM_ERR("no TCP workers available\n");
1149
0
      id = tcpconn->id;
1150
0
      sh_log(tcpconn->hist, TCP_UNREF, "accept, (%d)", tcpconn->refcnt);
1151
0
      TCPCONN_LOCK(id);
1152
0
      tcpconn->refcnt--;
1153
0
      if (tcpconn->refcnt==0){
1154
        /* no close to report here as the connection was not yet
1155
         * reported as OPEN by the proto layer...this sucks a bit */
1156
0
        _tcpconn_rm(tcpconn,1);
1157
0
        close(new_sock/*same as tcpconn->s*/);
1158
0
      }else tcpconn->lifetime=0; /* force expire */
1159
0
      TCPCONN_UNLOCK(id);
1160
0
    }
1161
0
  }else{ /*tcpconn==0 */
1162
0
    LM_ERR("tcpconn_new failed, closing socket\n");
1163
0
    close(new_sock);
1164
0
  }
1165
0
  return 1; /* accept() was successful */
1166
0
}
1167
1168
1169
/*! \brief
1170
 * handles an io event on one of the watched tcp connections
1171
 *
1172
 * \param    tcpconn - pointer to the tcp_connection for which we have an io ev.
1173
 * \param    fd_i    - index in the fd_array table (needed for delete)
1174
 * \return   handle_* return convention, but on success it always returns 0
1175
 *           (because it's one-shot, after a successful execution the fd is
1176
 *            removed from tcp_main's watch fd list and passed to a worker =>
1177
 *            tcp_main is not interested in further io events that might be
1178
 *            queued for this fd)
1179
 */
1180
inline static int handle_tcpconn_ev(struct tcp_connection* tcpconn, int fd_i,
1181
                                int event_type)
1182
0
{
1183
0
  int fd;
1184
0
  int err;
1185
0
  unsigned int id;
1186
0
  unsigned int err_len;
1187
1188
0
  if (event_type == IO_WATCH_READ) {
1189
    /* pass it to worker, so remove it from the io watch list */
1190
0
    LM_DBG("data available on %p %d\n", tcpconn, tcpconn->s);
1191
0
    if (reactor_del_reader(tcpconn->s, fd_i, 0)==-1)
1192
0
      return -1;
1193
0
    tcpconn->flags|=F_CONN_REMOVED_READ;
1194
0
    tcpconn_ref(tcpconn); /* refcnt ++ */
1195
0
    sh_log(tcpconn->hist, TCP_REF, "tcpconn read, (%d)", tcpconn->refcnt);
1196
0
    sh_log(tcpconn->hist, TCP_SEND2CHILD, "read");
1197
0
    if (send2worker(tcpconn,IO_WATCH_READ)<0){
1198
0
      LM_ERR("no TCP workers available\n");
1199
0
      id = tcpconn->id;
1200
0
      TCPCONN_LOCK(id);
1201
0
      tcpconn->refcnt--;
1202
0
      sh_log(tcpconn->hist, TCP_UNREF, "tcpconn read, (%d)", tcpconn->refcnt);
1203
0
      if (tcpconn->refcnt==0){
1204
0
        fd=tcpconn->s;
1205
0
        tcp_trigger_report(tcpconn, TCP_REPORT_CLOSE,
1206
0
          "No worker for read");
1207
0
        _tcpconn_rm(tcpconn,0);
1208
0
        close(fd);
1209
0
      }else tcpconn->lifetime=0; /* force expire*/
1210
0
      TCPCONN_UNLOCK(id);
1211
0
    }
1212
0
    return 0; /* we are not interested in possibly queued io events,
1213
           the fd was either passed to a worker, or closed */
1214
0
  } else {
1215
0
    LM_DBG("connection %p fd %d is now writable\n", tcpconn, tcpconn->s);
1216
    /* we received a write event */
1217
0
    if (tcpconn->state==S_CONN_CONNECTING) {
1218
      /* we're coming from an async connect & write
1219
       * let's see if we connected successfully */
1220
0
      err_len=sizeof(err);
1221
0
      if (getsockopt(tcpconn->s, SOL_SOCKET, SO_ERROR, &err, &err_len) < 0 || \
1222
0
          err != 0) {
1223
0
        LM_DBG("Failed connection attempt\n");
1224
0
        tcpconn_ref(tcpconn);
1225
0
        sh_log(tcpconn->hist, TCP_REF, "tcpconn connect, (%d)", tcpconn->refcnt);
1226
0
        reactor_del_all(tcpconn->s, fd_i, IO_FD_CLOSING);
1227
0
        tcpconn->flags|=F_CONN_REMOVED;
1228
0
        tcp_trigger_report(tcpconn, TCP_REPORT_CLOSE,
1229
0
          "Async connect failed");
1230
0
        sh_log(tcpconn->hist, TCP_UNREF, "tcpconn connect, (%d)", tcpconn->refcnt);
1231
0
        tcpconn_destroy(tcpconn);
1232
0
        return 0;
1233
0
      }
1234
1235
      /* we successfully connected - further treat this case as if we
1236
       * were coming from an async write */
1237
0
      tcpconn->state = S_CONN_OK;
1238
0
      LM_DBG("Successfully completed previous async connect\n");
1239
1240
      /* now that we completed the async connection, we also need to
1241
       * listen for READ events, otherwise these will get lost */
1242
0
      if (tcpconn->flags & F_CONN_REMOVED_READ) {
1243
0
        reactor_add_reader( tcpconn->s, F_TCPCONN, RCT_PRIO_NET, tcpconn);
1244
0
        tcpconn->flags&=~F_CONN_REMOVED_READ;
1245
0
      }
1246
1247
0
      goto async_write;
1248
0
    } else {
1249
      /* we're coming from an async write -
1250
       * just pass to worker and have it write
1251
       * our TCP chunks */
1252
0
async_write:
1253
      /* no more write events for now */
1254
0
      if (reactor_del_writer( tcpconn->s, fd_i, 0)==-1)
1255
0
        return -1;
1256
0
      tcpconn->flags|=F_CONN_REMOVED_WRITE;
1257
0
      tcpconn_ref(tcpconn); /* refcnt ++ */
1258
0
      sh_log(tcpconn->hist, TCP_REF, "tcpconn write, (%d)",
1259
0
        tcpconn->refcnt);
1260
0
      sh_log(tcpconn->hist, TCP_SEND2CHILD, "write");
1261
0
      if (send2worker(tcpconn,IO_WATCH_WRITE)<0){
1262
0
        LM_ERR("no TCP worker available\n");
1263
0
        id = tcpconn->id;
1264
0
        TCPCONN_LOCK(id);
1265
0
        tcpconn->refcnt--;
1266
0
        sh_log(tcpconn->hist, TCP_UNREF, "tcpconn write, (%d)",
1267
0
          tcpconn->refcnt);
1268
0
        if (tcpconn->refcnt==0){
1269
0
          fd=tcpconn->s;
1270
0
          tcp_trigger_report(tcpconn, TCP_REPORT_CLOSE,
1271
0
            "No worker for write");
1272
0
          _tcpconn_rm(tcpconn,0);
1273
0
          close(fd);
1274
0
        }else tcpconn->lifetime=0; /* force expire*/
1275
0
        TCPCONN_UNLOCK(id);
1276
0
      }
1277
0
      return 0;
1278
0
    }
1279
0
  }
1280
0
}
1281
1282
1283
/*! \brief handles io from a tcp worker process
1284
 * \param  tcp_c - pointer in the tcp_workers array, to the entry for
1285
 *                 which an io event was detected
1286
 * \param  fd_i  - fd index in the fd_array (useful for optimizing
1287
 *                 io_watch_deletes)
1288
 * \return handle_* return convention: -1 on error, 0 on EAGAIN (no more
1289
 *           io events queued), >0 on success. success/error refer only to
1290
 *           the reads from the fd.
1291
 */
1292
inline static int handle_tcp_worker(struct tcp_worker* tcp_c, int fd_i)
1293
0
{
1294
0
  struct tcp_connection* tcpconn;
1295
0
  long response[2];
1296
0
  int cmd;
1297
0
  int bytes;
1298
1299
0
  if (tcp_c->unix_sock<=0){
1300
    /* (we can't have a fd==0, 0 is never closed )*/
1301
0
    LM_CRIT("fd %d for %d (pid %d)\n", tcp_c->unix_sock,
1302
0
        (int)(tcp_c-&tcp_workers[0]), tcp_c->pid);
1303
0
    goto error;
1304
0
  }
1305
  /* read until sizeof(response)
1306
   * (this is a SOCK_STREAM so read is not atomic) */
1307
0
  bytes=recv_all(tcp_c->unix_sock, response, sizeof(response), MSG_DONTWAIT);
1308
0
  if (bytes<(int)sizeof(response)){
1309
0
    if (bytes==0){
1310
      /* EOF -> bad, worker has died */
1311
0
      if (sr_get_core_status()!=STATE_TERMINATING)
1312
0
        LM_CRIT("dead tcp worker %d (EOF received), pid %d\n",
1313
0
          (int)(tcp_c-&tcp_workers[0]), tcp_c->pid );
1314
      /* don't listen on it any more */
1315
0
      reactor_del_reader( tcp_c->unix_sock, fd_i, 0/*flags*/);
1316
      /* eof. so no more io here, it's ok to return error */
1317
0
      goto error;
1318
0
    }else if (bytes<0){
1319
      /* EAGAIN is ok if we try to empty the buffer
1320
       * e.g.: SIGIO_RT overflow mode or EPOLL ET */
1321
0
      if ((errno!=EAGAIN) && (errno!=EWOULDBLOCK)){
1322
0
        LM_CRIT("read from tcp worker %ld (pid %d) %s [%d]\n",
1323
0
            (long)(tcp_c-&tcp_workers[0]), tcp_c->pid,
1324
0
            strerror(errno), errno );
1325
0
      }else{
1326
0
        bytes=0;
1327
0
      }
1328
      /* try to ignore ? */
1329
0
      goto end;
1330
0
    }else{
1331
      /* should never happen */
1332
0
      LM_CRIT("too few bytes received (%d)\n", bytes );
1333
0
      bytes=0; /* something was read so there is no error; otoh if
1334
            receive_fd returned less then requested => the receive
1335
            buffer is empty => no more io queued on this fd */
1336
0
      goto end;
1337
0
    }
1338
0
  }
1339
1340
0
  LM_DBG("response= %lx, %ld from tcp worker %d (%d)\n",
1341
0
    response[0], response[1], tcp_c->pid, (int)(tcp_c-&tcp_workers[0]));
1342
1343
0
  cmd=response[1];
1344
0
  tcpconn=(struct tcp_connection*)response[0];
1345
0
  if (tcpconn==0){
1346
    /* should never happen */
1347
0
    LM_CRIT("null tcpconn pointer received from tcp worker %d (pid %d):"
1348
0
      "%lx, %lx\n", (int)(tcp_c-&tcp_workers[0]), tcp_c->pid,
1349
0
      response[0], response[1]) ;
1350
0
    goto end;
1351
0
  }
1352
0
  switch(cmd){
1353
0
    case CONN_RELEASE:
1354
0
      if (tcpconn->state==S_CONN_BAD){
1355
0
        sh_log(tcpconn->hist, TCP_UNREF, "tcpworker release bad, (%d)", tcpconn->refcnt);
1356
0
        tcpconn_destroy(tcpconn);
1357
0
        break;
1358
0
      }
1359
0
      sh_log(tcpconn->hist, TCP_UNREF, "tcpworker release, (%d)", tcpconn->refcnt);
1360
0
      tcpconn_put(tcpconn);
1361
      /* must be after the de-ref*/
1362
0
      reactor_add_reader( tcpconn->s, F_TCPCONN, RCT_PRIO_NET, tcpconn);
1363
0
      tcpconn->flags&=~F_CONN_REMOVED_READ;
1364
0
      break;
1365
0
    case CONN_RELEASE_WRITE:
1366
0
      if (tcpconn->state==S_CONN_BAD){
1367
0
        sh_log(tcpconn->hist, TCP_UNREF, "tcpworker release write bad, (%d)", tcpconn->refcnt);
1368
0
        tcpconn_destroy(tcpconn);
1369
0
        break;
1370
0
      }
1371
0
      sh_log(tcpconn->hist, TCP_UNREF, "tcpworker release write, (%d)", tcpconn->refcnt);
1372
0
      tcpconn_put(tcpconn);
1373
0
      break;
1374
0
    case ASYNC_WRITE_TCPW:
1375
0
      if (tcpconn->state==S_CONN_BAD){
1376
0
        sh_log(tcpconn->hist, TCP_UNREF, "tcpworker async write bad, (%d)", tcpconn->refcnt);
1377
0
        tcpconn_destroy(tcpconn);
1378
0
        break;
1379
0
      }
1380
0
      sh_log(tcpconn->hist, TCP_UNREF, "tcpworker async write, (%d)", tcpconn->refcnt);
1381
0
      tcpconn_put(tcpconn);
1382
      /* must be after the de-ref*/
1383
0
      reactor_add_writer( tcpconn->s, F_TCPCONN, RCT_PRIO_NET, tcpconn);
1384
0
      tcpconn->flags&=~F_CONN_REMOVED_WRITE;
1385
0
      break;
1386
0
    case CONN_ERROR_TCPW:
1387
0
    case CONN_DESTROY:
1388
0
    case CONN_EOF:
1389
      /* WARNING: this will auto-dec. refcnt! */
1390
0
      if ((tcpconn->flags & F_CONN_REMOVED) != F_CONN_REMOVED &&
1391
0
        (tcpconn->s!=-1)){
1392
0
        reactor_del_all( tcpconn->s, -1, IO_FD_CLOSING);
1393
0
        tcpconn->flags|=F_CONN_REMOVED;
1394
0
      }
1395
0
      sh_log(tcpconn->hist, TCP_UNREF, "tcpworker destroy, (%d)", tcpconn->refcnt);
1396
0
      tcpconn_destroy(tcpconn); /* closes also the fd */
1397
0
      break;
1398
0
    default:
1399
0
      LM_CRIT("unknown cmd %d from tcp worker %d (%d)\n",
1400
0
        cmd, tcp_c->pid, (int)(tcp_c-&tcp_workers[0]));
1401
0
  }
1402
0
end:
1403
0
  return bytes;
1404
0
error:
1405
0
  return -1;
1406
0
}
1407
1408
1409
/*! \brief handles io from a "generic" ser process (get fd or new_fd from a tcp_send)
1410
 *
1411
 * \param p     - pointer in the ser processes array (pt[]), to the entry for
1412
 *                 which an io event was detected
1413
 * \param fd_i  - fd index in the fd_array (useful for optimizing
1414
 *                 io_watch_deletes)
1415
 * \return  handle_* return convention:
1416
 *          - -1 on error reading from the fd,
1417
 *          -  0 on EAGAIN  or when no  more io events are queued
1418
 *             (receive buffer empty),
1419
 *          -  >0 on successful reads from the fd (the receive buffer might
1420
 *             be non-empty).
1421
 */
1422
inline static int handle_worker(struct process_table* p, int fd_i)
1423
0
{
1424
0
  struct tcp_connection* tcpconn;
1425
0
  long response[2];
1426
0
  int cmd;
1427
0
  int bytes;
1428
0
  int ret;
1429
0
  int fd;
1430
1431
0
  ret=-1;
1432
0
  if (p->unix_sock<=0){
1433
    /* (we can't have a fd==0, 0 is never closed )*/
1434
0
    LM_CRIT("fd %d for %d (pid %d)\n",
1435
0
        p->unix_sock, (int)(p-&pt[0]), p->pid);
1436
0
    goto error;
1437
0
  }
1438
1439
  /* get all bytes and the fd (if transmitted)
1440
   * (this is a SOCK_STREAM so read is not atomic) */
1441
0
  bytes=receive_fd(p->unix_sock, response, sizeof(response), &fd,
1442
0
            MSG_DONTWAIT);
1443
0
  if (bytes<(int)sizeof(response)){
1444
    /* too few bytes read */
1445
0
    if (bytes==0){
1446
      /* EOF -> bad, worker has died */
1447
0
      if (sr_get_core_status()!=STATE_TERMINATING)
1448
0
        LM_CRIT("dead tcp worker %d (EOF received), pid %d\n",
1449
0
          (int)(p-&pt[0]), p->pid);
1450
      /* don't listen on it any more */
1451
0
      reactor_del_reader( p->unix_sock, fd_i, 0/*flags*/);
1452
0
      goto error; /* worker dead => no further io events from it */
1453
0
    }else if (bytes<0){
1454
      /* EAGAIN is ok if we try to empty the buffer
1455
       * e.g: SIGIO_RT overflow mode or EPOLL ET */
1456
0
      if ((errno!=EAGAIN) && (errno!=EWOULDBLOCK)){
1457
0
        LM_CRIT("read from worker %d (pid %d):  %s [%d]\n",
1458
0
            (int)(p-&pt[0]), p->pid, strerror(errno), errno);
1459
0
        ret=-1;
1460
0
      }else{
1461
0
        ret=0;
1462
0
      }
1463
      /* try to ignore ? */
1464
0
      goto end;
1465
0
    }else{
1466
      /* should never happen */
1467
0
      LM_CRIT("too few bytes received (%d)\n", bytes );
1468
0
      ret=0; /* something was read so there is no error; otoh if
1469
            receive_fd returned less then requested => the receive
1470
            buffer is empty => no more io queued on this fd */
1471
0
      goto end;
1472
0
    }
1473
0
  }
1474
0
  ret=1; /* something was received, there might be more queued */
1475
0
  LM_DBG("read response= %lx, %ld, fd %d from %d (%d)\n",
1476
0
          response[0], response[1], fd, (int)(p-&pt[0]), p->pid);
1477
0
  cmd=response[1];
1478
0
  tcpconn=(struct tcp_connection*)response[0];
1479
0
  if (tcpconn==0){
1480
0
    LM_CRIT("null tcpconn pointer received from worker %d (pid %d)"
1481
0
      "%lx, %lx\n", (int)(p-&pt[0]), p->pid, response[0], response[1]) ;
1482
0
    goto end;
1483
0
  }
1484
0
  switch(cmd){
1485
0
    case CONN_ERROR_GENW:
1486
      /* remove from reactor only if the fd exists, and it wasn't
1487
       * removed before */
1488
0
      if ((tcpconn->flags & F_CONN_REMOVED) != F_CONN_REMOVED &&
1489
0
          (tcpconn->s!=-1)){
1490
0
        reactor_del_all( tcpconn->s, -1, IO_FD_CLOSING);
1491
0
        tcpconn->flags|=F_CONN_REMOVED;
1492
0
      }
1493
0
      sh_log(tcpconn->hist, TCP_UNREF, "worker error, (%d)", tcpconn->refcnt);
1494
0
      tcpconn_destroy(tcpconn); /* will close also the fd */
1495
0
      break;
1496
0
    case CONN_GET_FD:
1497
      /* send the requested FD  */
1498
      /* WARNING: take care of setting refcnt properly to
1499
       * avoid race condition */
1500
0
      if (send_fd(p->unix_sock, &tcpconn, sizeof(tcpconn),
1501
0
              tcpconn->s)<=0){
1502
0
        LM_ERR("send_fd failed\n");
1503
0
      }
1504
0
      break;
1505
0
    case CONN_NEW:
1506
      /* update the fd in the requested tcpconn*/
1507
      /* WARNING: take care of setting refcnt properly to
1508
       * avoid race condition */
1509
0
      if (fd==-1){
1510
0
        LM_CRIT(" cmd CONN_NEW: no fd received\n");
1511
0
        break;
1512
0
      }
1513
0
      tcpconn->s=fd;
1514
      /* add tcpconn to the list*/
1515
0
      tcpconn_add(tcpconn);
1516
0
      tcp_connections_no++;
1517
0
      reactor_add_reader( tcpconn->s, F_TCPCONN, RCT_PRIO_NET, tcpconn);
1518
0
      tcpconn->flags&=~F_CONN_REMOVED_READ;
1519
0
      break;
1520
0
    case ASYNC_CONNECT:
1521
      /* connection is not yet linked to hash = not yet
1522
       * available to the outside world */
1523
0
      if (fd==-1){
1524
0
        LM_CRIT(" cmd CONN_NEW: no fd received\n");
1525
0
        break;
1526
0
      }
1527
0
      tcpconn->s=fd;
1528
      /* add tcpconn to the list*/
1529
0
      tcpconn_add(tcpconn);
1530
0
      tcp_connections_no++;
1531
      /* FIXME - now we have lifetime==default_lifetime - should we
1532
       * set a shorter one when waiting for a connect ??? */
1533
      /* only maintain the socket in the IO_WATCH_WRITE watcher
1534
       * while we have stuff to write - otherwise we're going to get
1535
       * useless events */
1536
0
      reactor_add_writer( tcpconn->s, F_TCPCONN, RCT_PRIO_NET, tcpconn);
1537
0
      tcpconn->flags&=~F_CONN_REMOVED_WRITE;
1538
0
      break;
1539
0
    case ASYNC_WRITE_GENW:
1540
0
      if (tcpconn->state==S_CONN_BAD){
1541
0
        tcpconn->lifetime=0;
1542
0
        break;
1543
0
      }
1544
0
      tcpconn_put(tcpconn);
1545
      /* must be after the de-ref*/
1546
0
      reactor_add_writer( tcpconn->s, F_TCPCONN, RCT_PRIO_NET, tcpconn);
1547
0
      tcpconn->flags&=~F_CONN_REMOVED_WRITE;
1548
0
      break;
1549
0
    default:
1550
0
      LM_CRIT("unknown cmd %d from worker %d (pid %d)\n", cmd,
1551
0
        (int)(p-&pt[0]), p->pid);
1552
0
  }
1553
0
end:
1554
0
  return ret;
1555
0
error:
1556
0
  return -1;
1557
0
}
1558
1559
1560
/*! \brief generic handle io routine, it will call the appropiate
1561
 *  handle_xxx() based on the fd_map type
1562
 *
1563
 * \param  fm  - pointer to a fd hash entry
1564
 * \param  idx - index in the fd_array (or -1 if not known)
1565
 * \return -1 on error
1566
 *          0 on EAGAIN or when by some other way it is known that no more
1567
 *            io events are queued on the fd (the receive buffer is empty).
1568
 *            Usefull to detect when there are no more io events queued for
1569
 *            sigio_rt, epoll_et, kqueue.
1570
 *         >0 on successful read from the fd (when there might be more io
1571
 *            queued -- the receive buffer might still be non-empty)
1572
 */
1573
inline static int handle_io(struct fd_map* fm, int idx,int event_type)
1574
0
{
1575
0
  int ret = 0;
1576
1577
0
  pt_become_active();
1578
0
  switch(fm->type){
1579
0
    case F_TCP_LISTENER:
1580
0
      ret = handle_new_connect((const struct socket_info*)fm->data);
1581
0
      break;
1582
0
    case F_TCPCONN:
1583
0
      ret = handle_tcpconn_ev((struct tcp_connection*)fm->data, idx,
1584
0
        event_type);
1585
0
      break;
1586
0
    case F_TCP_TCPWORKER:
1587
0
      ret = handle_tcp_worker((struct tcp_worker*)fm->data, idx);
1588
0
      break;
1589
0
    case F_TCP_WORKER:
1590
0
      ret = handle_worker((struct process_table*)fm->data, idx);
1591
0
      break;
1592
0
    case F_IPC:
1593
0
      ipc_handle_job(fm->fd);
1594
0
      break;
1595
0
    case F_NONE:
1596
0
      LM_CRIT("empty fd map\n");
1597
0
      goto error;
1598
0
    default:
1599
0
      LM_CRIT("unknown fd type %d\n", fm->type);
1600
0
      goto error;
1601
0
  }
1602
0
  pt_become_idle();
1603
0
  return ret;
1604
0
error:
1605
0
  pt_become_idle();
1606
0
  return -1;
1607
0
}
1608
1609
1610
/*
1611
 * iterates through all TCP connections and closes expired ones
1612
 * Note: runs once per second at most
1613
 */
1614
#define tcpconn_lifetime(last_sec) \
1615
  do { \
1616
    int now; \
1617
    now = get_ticks(); \
1618
    if (last_sec != now) { \
1619
      last_sec = now; \
1620
      __tcpconn_lifetime(0); \
1621
    } \
1622
  } while (0)
1623
1624
1625
/*! \brief very inefficient for now - FIXME
1626
 * keep in sync with tcpconn_destroy, the "delete" part should be
1627
 * the same except for io_watch_del..
1628
 * \todo FIXME (very inefficient for now)
1629
 */
1630
static inline void __tcpconn_lifetime(int shutdown)
1631
0
{
1632
0
  struct tcp_connection *c, *next;
1633
0
  unsigned int ticks,part;
1634
0
  unsigned h;
1635
0
  int fd;
1636
1637
0
  if (have_ticks())
1638
0
    ticks=get_ticks();
1639
0
  else
1640
0
    ticks=0;
1641
1642
0
  for( part=0 ; part<TCP_PARTITION_SIZE ; part++ ) {
1643
0
    if (!shutdown) TCPCONN_LOCK(part); /* fixme: we can lock only on delete IMO */
1644
0
    for(h=0; h<TCP_ID_HASH_SIZE; h++){
1645
0
      c=TCP_PART(part).tcpconn_id_hash[h];
1646
0
      while(c){
1647
0
        next=c->id_next;
1648
0
        if (shutdown ||((c->refcnt==0) && (ticks>c->lifetime))) {
1649
0
          if (!shutdown)
1650
0
            LM_DBG("timeout for hash=%d - %p"
1651
0
                " (%d > %d)\n", h, c, ticks, c->lifetime);
1652
0
          fd=c->s;
1653
          /* report the closing of the connection . Note that
1654
           * there are connectioned that use an foced expire to 0
1655
           * as a way to be deleted - we are not interested in */
1656
          /* Also, do not trigger reporting when shutdown
1657
           * is done */
1658
0
          if (c->lifetime>0 && !shutdown)
1659
0
            tcp_trigger_report(c, TCP_REPORT_CLOSE,
1660
0
              "Timeout on no traffic");
1661
0
          if ((!shutdown)&&(fd>0)&&(c->refcnt==0)) {
1662
            /* if any of read or write are set, we need to remove
1663
             * the fd from the reactor */
1664
0
            if ((c->flags & F_CONN_REMOVED) != F_CONN_REMOVED){
1665
0
              reactor_del_all( fd, -1, IO_FD_CLOSING);
1666
0
              c->flags|=F_CONN_REMOVED;
1667
0
            }
1668
0
            close(fd);
1669
0
            c->s = -1;
1670
0
          }
1671
0
          _tcpconn_rm(c, shutdown?1:0);
1672
0
          tcp_connections_no--;
1673
0
        }
1674
0
        c=next;
1675
0
      }
1676
0
    }
1677
0
    if (!shutdown) TCPCONN_UNLOCK(part);
1678
0
  }
1679
0
}
1680
1681
1682
static void tcp_main_server(void)
1683
0
{
1684
0
  static unsigned int last_sec = 0;
1685
0
  int flags;
1686
0
  struct socket_info_full* sif;
1687
0
  int n;
1688
1689
  /* we run in a separate, dedicated process, with its own reactor
1690
   * (reactors are per process) */
1691
0
  if (init_worker_reactor("TCP_main", RCT_PRIO_MAX)<0)
1692
0
    goto error;
1693
1694
  /* now start watching all the fds */
1695
1696
  /* add all the sockets we listens on for connections */
1697
0
  for( n=PROTO_FIRST ; n<PROTO_LAST ; n++ )
1698
0
    if ( is_tcp_based_proto(n) )
1699
0
      for( sif=protos[n].listeners ; sif ; sif=sif->next ) {
1700
0
        struct socket_info* si = &sif->socket_info;
1701
0
        if (protos[n].tran.bind_listener && protos[n].tran.bind_listener(si)<0) {
1702
0
          LM_ERR("failed to bind listener [%.*s], proto %s\n",
1703
0
              si->name.len, si->name.s,
1704
0
              protos[n].name );
1705
0
          goto error;
1706
0
        }
1707
0
        if ( (si->socket!=-1) &&
1708
0
        reactor_add_reader( si->socket, F_TCP_LISTENER,
1709
0
        RCT_PRIO_NET, si)<0 ) {
1710
0
          LM_ERR("failed to add listen socket to reactor\n");
1711
0
          goto error;
1712
0
        }
1713
0
      }
1714
  /* add all the unix sockets used for communcation with other opensips
1715
   * processes (get fd, new connection a.s.o)
1716
   * NOTE: we add even the socks for the inactive/unfork processes - the
1717
   *       socks are already created, but the triggering is from proc to
1718
   *       main, having them into reactor is harmless - they will never
1719
   *       trigger as there is no proc on the other end to write us */
1720
0
  for (n=1; n<counted_max_processes; n++) {
1721
    /* skip myslef (as process) and -1 socks (disabled)
1722
       (we can't have 0, we never close it!) */
1723
0
    if (n!=process_no && pt[n].tcp_socks_holder[0]>0)
1724
0
      if (reactor_add_reader( pt[n].tcp_socks_holder[0], F_TCP_WORKER,
1725
0
      RCT_PRIO_PROC, &pt[n])<0){
1726
0
        LM_ERR("failed to add process %d (%s) unix socket "
1727
0
          "to the fd list\n", n, pt[n].desc);
1728
0
        goto error;
1729
0
      }
1730
0
  }
1731
  /* add all the unix sokets used for communication with the tcp workers */
1732
0
  for (n=0; n<tcp_workers_max_no; n++) {
1733
    /*we can't have 0, we never close it!*/
1734
0
    if (tcp_workers[n].unix_sock>0) {
1735
      /* make socket non-blocking */
1736
0
      flags=fcntl(tcp_workers[n].unix_sock, F_GETFL);
1737
0
      if (flags==-1){
1738
0
        LM_ERR("fcntl failed: (%d) %s\n", errno, strerror(errno));
1739
0
        goto error;
1740
0
      }
1741
0
      if (fcntl(tcp_workers[n].unix_sock,F_SETFL,flags|O_NONBLOCK)==-1){
1742
0
        LM_ERR("set non-blocking failed: (%d) %s\n",
1743
0
          errno, strerror(errno));
1744
0
        goto error;
1745
0
      }
1746
      /* add socket for listening */
1747
0
      if (reactor_add_reader( tcp_workers[n].unix_sock,
1748
0
      F_TCP_TCPWORKER, RCT_PRIO_PROC, &tcp_workers[n])<0) {
1749
0
        LM_ERR("failed to add tcp worker %d unix socket to "
1750
0
            "the fd list\n", n);
1751
0
        goto error;
1752
0
      }
1753
0
    }
1754
0
  }
1755
1756
  /* init: start watching for the IPC jobs */
1757
0
  if (reactor_add_reader(IPC_FD_READ_SELF, F_IPC, RCT_PRIO_ASYNC, NULL)<0){
1758
0
    LM_CRIT("failed to add IPC pipe to reactor\n");
1759
0
    goto error;
1760
0
  }
1761
1762
0
  is_tcp_main = 1;
1763
1764
  /* main loop (requires "handle_io()" implementation) */
1765
0
  reactor_main_loop( TCP_MAIN_SELECT_TIMEOUT, error,
1766
0
      tcpconn_lifetime(last_sec) );
1767
1768
0
error:
1769
0
  destroy_worker_reactor();
1770
0
  LM_CRIT("exiting...");
1771
0
  exit(-1);
1772
0
}
1773
1774
1775
1776
/**************************** Control functions ******************************/
1777
1778
/* initializes the TCP network level in terms of data structures */
1779
int tcp_init(void)
1780
0
{
1781
0
  unsigned int i;
1782
1783
  /* first we do auto-detection to see if there are any TCP based
1784
   * protocols loaded */
1785
0
  for ( i=PROTO_FIRST ; i<PROTO_LAST ; i++ )
1786
0
    if (is_tcp_based_proto(i) && proto_has_listeners(i)) {
1787
0
      tcp_disabled=0;
1788
0
      break;
1789
0
    }
1790
1791
0
  tcp_init_con_profiles();
1792
1793
0
  if (tcp_disabled)
1794
0
    return 0;
1795
1796
#ifdef DBG_TCPCON
1797
  con_hist = shl_init("TCP con", 10000, 0);
1798
  if (!con_hist) {
1799
    LM_ERR("oom con hist\n");
1800
    goto error;
1801
  }
1802
#endif
1803
1804
0
  if (tcp_auto_scaling_profile) {
1805
0
    s_profile = get_scaling_profile(tcp_auto_scaling_profile);
1806
0
    if (s_profile==NULL) {
1807
0
      LM_WARN("TCP scaling profile <%s> not defined "
1808
0
        "-> ignoring it...\n", tcp_auto_scaling_profile);
1809
0
    } else {
1810
0
      auto_scaling_enabled = 1;
1811
0
    }
1812
0
  }
1813
1814
0
  tcp_workers_max_no = (s_profile && (tcp_workers_no<s_profile->max_procs)) ?
1815
0
    s_profile->max_procs : tcp_workers_no ;
1816
1817
  /* init tcp workers array */
1818
0
  tcp_workers = (struct tcp_worker*)shm_malloc
1819
0
    ( tcp_workers_max_no*sizeof(struct tcp_worker) );
1820
0
  if (tcp_workers==0) {
1821
0
    LM_CRIT("could not alloc tcp_workers array in shm memory\n");
1822
0
    goto error;
1823
0
  }
1824
0
  memset( tcp_workers, 0, tcp_workers_max_no*sizeof(struct tcp_worker));
1825
  /* init globals */
1826
0
  connection_id=(unsigned int*)shm_malloc(sizeof(unsigned int));
1827
0
  if (connection_id==0){
1828
0
    LM_CRIT("could not alloc globals in shm memory\n");
1829
0
    goto error;
1830
0
  }
1831
  // The  rand()  function returns a pseudo-random integer in the range 0 to
1832
  // RAND_MAX inclusive (i.e., the mathematical range [0, RAND_MAX]).
1833
0
  *connection_id=(unsigned int)rand();
1834
0
  memset( &tcp_parts, 0, TCP_PARTITION_SIZE*sizeof(struct tcp_partition));
1835
  /* init partitions */
1836
0
  for( i=0 ; i<TCP_PARTITION_SIZE ; i++ ) {
1837
    /* init lock */
1838
0
    tcp_parts[i].tcpconn_lock=lock_alloc();
1839
0
    if (tcp_parts[i].tcpconn_lock==0){
1840
0
      LM_CRIT("could not alloc lock\n");
1841
0
      goto error;
1842
0
    }
1843
0
    if (lock_init(tcp_parts[i].tcpconn_lock)==0){
1844
0
      LM_CRIT("could not init lock\n");
1845
0
      lock_dealloc((void*)tcp_parts[i].tcpconn_lock);
1846
0
      tcp_parts[i].tcpconn_lock=0;
1847
0
      goto error;
1848
0
    }
1849
    /* alloc hashtables*/
1850
0
    tcp_parts[i].tcpconn_aliases_hash=(struct tcp_conn_alias**)
1851
0
      shm_malloc(TCP_ALIAS_HASH_SIZE* sizeof(struct tcp_conn_alias*));
1852
0
    if (tcp_parts[i].tcpconn_aliases_hash==0){
1853
0
      LM_CRIT("could not alloc address hashtable in shm memory\n");
1854
0
      goto error;
1855
0
    }
1856
0
    tcp_parts[i].tcpconn_id_hash=(struct tcp_connection**)
1857
0
      shm_malloc(TCP_ID_HASH_SIZE*sizeof(struct tcp_connection*));
1858
0
    if (tcp_parts[i].tcpconn_id_hash==0){
1859
0
      LM_CRIT("could not alloc id hashtable in shm memory\n");
1860
0
      goto error;
1861
0
    }
1862
    /* init hashtables*/
1863
0
    memset((void*)tcp_parts[i].tcpconn_aliases_hash, 0,
1864
0
      TCP_ALIAS_HASH_SIZE * sizeof(struct tcp_conn_alias*));
1865
0
    memset((void*)tcp_parts[i].tcpconn_id_hash, 0,
1866
0
      TCP_ID_HASH_SIZE * sizeof(struct tcp_connection*));
1867
0
  }
1868
1869
0
  return 0;
1870
0
error:
1871
  /* clean-up */
1872
0
  tcp_destroy();
1873
0
  return -1;
1874
0
}
1875
1876
1877
/* destroys the TCP data */
1878
void tcp_destroy(void)
1879
0
{
1880
0
  int part;
1881
1882
0
  if (tcp_parts[0].tcpconn_id_hash)
1883
      /* force close/expire for all active tcpconns*/
1884
0
      __tcpconn_lifetime(1);
1885
1886
0
  if (connection_id){
1887
0
    shm_free(connection_id);
1888
0
    connection_id=0;
1889
0
  }
1890
1891
0
  for ( part=0 ; part<TCP_PARTITION_SIZE ; part++ ) {
1892
0
    if (tcp_parts[part].tcpconn_id_hash){
1893
0
      shm_free(tcp_parts[part].tcpconn_id_hash);
1894
0
      tcp_parts[part].tcpconn_id_hash=0;
1895
0
    }
1896
0
    if (tcp_parts[part].tcpconn_aliases_hash){
1897
0
      shm_free(tcp_parts[part].tcpconn_aliases_hash);
1898
0
      tcp_parts[part].tcpconn_aliases_hash=0;
1899
0
    }
1900
0
    if (tcp_parts[part].tcpconn_lock){
1901
0
      lock_destroy(tcp_parts[part].tcpconn_lock);
1902
0
      lock_dealloc((void*)tcp_parts[part].tcpconn_lock);
1903
0
      tcp_parts[part].tcpconn_lock=0;
1904
0
    }
1905
0
  }
1906
0
}
1907
1908
1909
int tcp_create_comm_proc_socks( int proc_no)
1910
0
{
1911
0
  int i;
1912
1913
0
  if (tcp_disabled)
1914
0
    return 0;
1915
1916
0
  for( i=0 ; i<proc_no ; i++ ) {
1917
0
    if (socketpair(AF_UNIX, SOCK_STREAM, 0, pt[i].tcp_socks_holder)<0){
1918
0
      LM_ERR("socketpair failed for process %d: %d/%s\n",
1919
0
        i, errno, strerror(errno));
1920
0
      return -1;
1921
0
    }
1922
0
  }
1923
1924
0
  return 0;
1925
0
}
1926
1927
1928
int tcp_activate_comm_proc_socks( int proc_no)
1929
0
{
1930
0
  if (tcp_disabled)
1931
0
    return 0;
1932
1933
0
  unix_tcp_sock = pt[proc_no].tcp_socks_holder[1];
1934
0
  pt[proc_no].unix_sock = pt[proc_no].tcp_socks_holder[0];
1935
1936
0
  return 0;
1937
0
}
1938
1939
1940
void tcp_connect_proc_to_tcp_main( int proc_no, int worker )
1941
0
{
1942
0
  if (tcp_disabled)
1943
0
    return;
1944
1945
0
  if (worker) {
1946
0
    close( pt[proc_no].unix_sock );
1947
0
  } else {
1948
0
    unix_tcp_sock = -1;
1949
0
  }
1950
0
}
1951
1952
1953
int _get_own_tcp_worker_id(void)
1954
0
{
1955
0
  pid_t pid;
1956
0
  int i;
1957
1958
0
  pid = getpid();
1959
0
  for( i=0 ; i<tcp_workers_max_no ; i++)
1960
0
    if(tcp_workers[i].pid==pid)
1961
0
      return i;
1962
1963
0
  return -1;
1964
0
}
1965
1966
1967
void tcp_reset_worker_slot(void)
1968
0
{
1969
0
  int i;
1970
1971
0
  if ((i=_get_own_tcp_worker_id())>=0) {
1972
0
    tcp_workers[i].state=STATE_INACTIVE;
1973
0
    tcp_workers[i].pid=0;
1974
0
    tcp_workers[i].pt_idx=0;
1975
0
  }
1976
0
}
1977
1978
1979
static int fork_dynamic_tcp_process(void *foo)
1980
0
{
1981
0
  int p_id;
1982
0
  int r;
1983
0
  const struct internal_fork_params ifp_sr_tcp = {
1984
0
    .proc_desc = "SIP receiver TCP",
1985
0
    .flags = OSS_PROC_DYNAMIC|OSS_PROC_NEEDS_SCRIPT,
1986
0
    .type = TYPE_TCP,
1987
0
  };
1988
1989
  /* search for free slot in the TCP workers table */
1990
0
  for( r=0 ; r<tcp_workers_max_no ; r++ )
1991
0
    if (tcp_workers[r].state==STATE_INACTIVE)
1992
0
      break;
1993
1994
0
  if (r==tcp_workers_max_no) {
1995
0
    LM_BUG("trying to fork one more TCP worker but no free slots in "
1996
0
      "the TCP table (size=%d)\n",tcp_workers_max_no);
1997
0
    return -1;
1998
0
  }
1999
2000
0
  if((p_id=internal_fork(&ifp_sr_tcp))<0){
2001
0
    LM_ERR("cannot fork dynamic TCP worker process\n");
2002
0
    return(-1);
2003
0
  }else if (p_id==0){
2004
    /* new TCP process */
2005
0
    set_proc_attrs("TCP receiver");
2006
0
    tcp_workers[r].pid = getpid();
2007
2008
0
    if (tcp_worker_proc_reactor_init(tcp_workers[r].main_unix_sock)<0||
2009
0
    init_child(20000) < 0) {
2010
0
      goto error;
2011
0
    }
2012
2013
0
    report_conditional_status( 1, 0);/*report success*/
2014
    /* the child proc is done read&write) dealing with the status pipe */
2015
0
    clean_read_pipeend();
2016
2017
0
    tcp_worker_proc_loop();
2018
0
    destroy_worker_reactor();
2019
2020
0
error:
2021
0
    report_failure_status();
2022
0
    LM_ERR("Initializing new process failed, exiting with error \n");
2023
0
    pt[process_no].flags |= OSS_PROC_SELFEXIT;
2024
0
    exit( -1);
2025
0
  } else {
2026
    /*parent/main*/
2027
0
    tcp_workers[r].state=STATE_ACTIVE;
2028
0
    tcp_workers[r].n_reqs=0;
2029
0
    tcp_workers[r].pt_idx=p_id;
2030
0
    return p_id;
2031
0
  }
2032
2033
0
  return 0;
2034
0
}
2035
2036
2037
static void tcp_process_graceful_terminate(int sender, void *param)
2038
0
{
2039
0
  int i;
2040
2041
  /* we accept this only from the main proccess */
2042
0
  if (sender!=0) {
2043
0
    LM_BUG("graceful terminate received from a non-main process!!\n");
2044
0
    return;
2045
0
  }
2046
0
  LM_NOTICE("process %d received RPC to terminate from Main\n",process_no);
2047
2048
  /* going into "draining" state will avoid:
2049
   *  - getting jobs from TCP MAIN (active state required for that)
2050
   *  - having othe worker slot re-used (inactive state required for that) */
2051
0
  if ((i=_get_own_tcp_worker_id())>=0)
2052
0
    tcp_workers[i].state=STATE_DRAINING;
2053
2054
0
  tcp_terminate_worker();
2055
2056
0
  return;
2057
0
}
2058
2059
2060
/* counts the number of TCP processes to start with; this number may
2061
 * change during runtime due auto-scaling */
2062
int tcp_count_processes(unsigned int *extra)
2063
0
{
2064
0
  if (extra) *extra = 0;
2065
2066
0
  if (tcp_disabled)
2067
0
    return 0;
2068
2069
2070
0
  if (s_profile && extra) {
2071
    /* how many can be forked over the number of procs to start with ?*/
2072
0
    if (s_profile->max_procs > tcp_workers_no)
2073
0
      *extra = s_profile->max_procs - tcp_workers_no;
2074
0
  }
2075
2076
0
  return 1/* tcp main */ + tcp_workers_no /*workers to start with*/;
2077
0
}
2078
2079
2080
int tcp_start_processes(int *chd_rank, int *startup_done)
2081
0
{
2082
0
  int r, n, p_id;
2083
0
  int reader_fd[2]; /* for comm. with the tcp workers read  */
2084
0
  struct socket_info_full *sif;
2085
0
  const struct internal_fork_params ifp_sr_tcp = {
2086
0
    .proc_desc = "SIP receiver TCP",
2087
0
    .flags = OSS_PROC_NEEDS_SCRIPT,
2088
0
    .type = TYPE_TCP,
2089
0
  };
2090
2091
0
  if (tcp_disabled)
2092
0
    return 0;
2093
2094
  /* estimate max fd. no:
2095
   * 1 tcp send unix socket/all_proc,
2096
   *  + 1 udp sock/udp proc + 1 tcp_worker sock/tcp worker*
2097
   *  + no_listen_tcp */
2098
0
  for( r=0,n=PROTO_FIRST ; n<PROTO_LAST ; n++ )
2099
0
    if ( is_tcp_based_proto(n) )
2100
0
      for(sif=protos[n].listeners; sif ; sif=sif->next,r++ );
2101
2102
  /* create the socket pairs for ALL potential processes */
2103
0
  for(r=0; r<tcp_workers_max_no; r++){
2104
    /* create sock to communicate from TCP main to worker */
2105
0
    if (socketpair(AF_UNIX, SOCK_STREAM, 0, reader_fd)<0){
2106
0
      LM_ERR("socketpair failed: %s\n", strerror(errno));
2107
0
      goto error;
2108
0
    }
2109
0
    tcp_workers[r].unix_sock = reader_fd[0]; /* worker's end */
2110
0
    tcp_workers[r].main_unix_sock = reader_fd[1]; /* main's end */
2111
0
  }
2112
2113
0
  if ( auto_scaling_enabled && s_profile &&
2114
0
  create_process_group( TYPE_TCP, NULL, s_profile,
2115
0
  fork_dynamic_tcp_process, tcp_process_graceful_terminate)!=0)
2116
0
    LM_ERR("failed to create group of TCP processes for, "
2117
0
      "auto forking will not be possible\n");
2118
2119
  /* start the TCP workers */
2120
0
  for(r=0; r<tcp_workers_no; r++){
2121
0
    (*chd_rank)++;
2122
0
    p_id=internal_fork(&ifp_sr_tcp);
2123
0
    if (p_id<0){
2124
0
      LM_ERR("fork failed\n");
2125
0
      goto error;
2126
0
    }else if (p_id>0){
2127
      /* parent */
2128
0
      tcp_workers[r].state=STATE_ACTIVE;
2129
0
      tcp_workers[r].n_reqs=0;
2130
0
      tcp_workers[r].pt_idx=p_id;
2131
0
    }else{
2132
      /* child */
2133
0
      set_proc_attrs("TCP receiver");
2134
0
      tcp_workers[r].pid = getpid();
2135
0
      if (tcp_worker_proc_reactor_init(tcp_workers[r].main_unix_sock)<0||
2136
0
          init_child(*chd_rank) < 0) {
2137
0
        LM_ERR("init_children failed\n");
2138
0
        report_failure_status();
2139
0
        if (startup_done)
2140
0
          *startup_done = -1;
2141
0
        exit(-1);
2142
0
      }
2143
2144
      /* was startup route executed so far ? */
2145
0
      if (startup_done!=NULL && *startup_done==0 && r==0) {
2146
0
        LM_DBG("running startup for first TCP\n");
2147
0
        if(run_startup_route()< 0) {
2148
0
          LM_ERR("Startup route processing failed\n");
2149
0
          report_failure_status();
2150
0
          *startup_done = -1;
2151
0
          exit(-1);
2152
0
        }
2153
0
        *startup_done = 1;
2154
0
      }
2155
2156
0
      report_conditional_status( (!no_daemon_mode), 0);
2157
2158
0
      tcp_worker_proc_loop();
2159
0
    }
2160
0
  }
2161
2162
  /* wait for the startup route to be executed */
2163
0
  if (startup_done)
2164
0
    while (!(*startup_done)) {
2165
0
      usleep(5);
2166
0
      handle_sigs();
2167
0
    }
2168
2169
0
  return 0;
2170
0
error:
2171
0
  return -1;
2172
0
}
2173
2174
2175
int tcp_start_listener(void)
2176
0
{
2177
0
  int p_id;
2178
0
  const struct internal_fork_params ifp_tcp_main = {
2179
0
    .proc_desc = "TCP main",
2180
0
    .flags = 0,
2181
0
    .type = TYPE_NONE,
2182
0
  };
2183
2184
0
  if (tcp_disabled)
2185
0
    return 0;
2186
2187
  /* start the TCP manager process */
2188
0
  if ( (p_id=internal_fork(&ifp_tcp_main))<0 ) {
2189
0
    LM_CRIT("cannot fork tcp main process\n");
2190
0
    goto error;
2191
0
  }else if (p_id==0){
2192
      /* child */
2193
    /* close the TCP inter-process sockets */
2194
0
    close(unix_tcp_sock);
2195
0
    unix_tcp_sock = -1;
2196
0
    close(pt[process_no].unix_sock);
2197
0
    pt[process_no].unix_sock = -1;
2198
2199
0
    report_conditional_status( (!no_daemon_mode), 0);
2200
2201
0
    tcp_main_server();
2202
0
    exit(-1);
2203
0
  }
2204
2205
0
  return 0;
2206
0
error:
2207
0
  return -1;
2208
0
}
2209
2210
int tcp_has_async_write(void)
2211
0
{
2212
0
  return reactor_has_async();
2213
0
}
2214
2215
2216
/***************************** MI functions **********************************/
2217
2218
mi_response_t *mi_tcp_list_conns(const mi_params_t *params,
2219
            struct mi_handler *async_hdl)
2220
0
{
2221
0
  mi_response_t *resp;
2222
0
  mi_item_t *resp_obj;
2223
0
  mi_item_t *conns_arr, *conn_item;
2224
0
  struct tcp_connection *conn;
2225
0
  time_t _ts;
2226
0
  char date_buf[MI_DATE_BUF_LEN];
2227
0
  int date_buf_len;
2228
0
  unsigned int i,j,part;
2229
0
  char proto[PROTO_NAME_MAX_SIZE];
2230
0
  struct tm ltime;
2231
0
  char *p;
2232
2233
0
  if (tcp_disabled)
2234
0
    return init_mi_result_null();
2235
2236
0
  resp = init_mi_result_object(&resp_obj);
2237
0
  if (!resp)
2238
0
    return 0;
2239
2240
0
  conns_arr = add_mi_array(resp_obj, MI_SSTR("Connections"));
2241
0
  if (!conns_arr) {
2242
0
    free_mi_response(resp);
2243
0
    return 0;
2244
0
  }
2245
2246
0
  for( part=0 ; part<TCP_PARTITION_SIZE ; part++) {
2247
0
    TCPCONN_LOCK(part);
2248
0
    for( i=0; i<TCP_ID_HASH_SIZE ; i++ ) {
2249
0
      for(conn=TCP_PART(part).tcpconn_id_hash[i];conn;conn=conn->id_next){
2250
        /* add one object fo each conn */
2251
0
        conn_item = add_mi_object(conns_arr, 0, 0);
2252
0
        if (!conn_item)
2253
0
          goto error;
2254
2255
        /* add ID */
2256
0
        if (add_mi_number(conn_item, MI_SSTR("ID"), conn->id) < 0)
2257
0
          goto error;
2258
2259
        /* add type/proto */
2260
0
        p = proto2str(conn->type, proto);
2261
0
        if (add_mi_string(conn_item, MI_SSTR("Type"), proto,
2262
0
          (int)(long)(p-proto)) > 0)
2263
0
          goto error;
2264
2265
        /* add state */
2266
0
        if (add_mi_number(conn_item, MI_SSTR("State"), conn->state) < 0)
2267
0
          goto error;
2268
2269
        /* add Remote IP:Port */
2270
0
        if (add_mi_string_fmt(conn_item, MI_SSTR("Remote"), "%s:%d",
2271
0
          ip_addr2a(&conn->rcv.src_ip), conn->rcv.src_port) < 0)
2272
0
          goto error;
2273
2274
        /* add Local IP:Port */
2275
0
        if (add_mi_string_fmt(conn_item, MI_SSTR("Local"), "%s:%d",
2276
0
          ip_addr2a(&conn->rcv.dst_ip), conn->rcv.dst_port) < 0)
2277
0
          goto error;
2278
2279
        /* add lifetime */
2280
0
        _ts = (time_t)conn->lifetime + startup_time;
2281
0
        localtime_r(&_ts, &ltime);
2282
0
        date_buf_len = strftime(date_buf, MI_DATE_BUF_LEN - 1,
2283
0
                    "%Y-%m-%d %H:%M:%S", &ltime);
2284
0
        if (date_buf_len != 0) {
2285
0
          if (add_mi_string(conn_item, MI_SSTR("Lifetime"),
2286
0
            date_buf, date_buf_len) < 0)
2287
0
            goto error;
2288
0
        } else {
2289
0
          if (add_mi_number(conn_item, MI_SSTR("Lifetime"), _ts) < 0)
2290
0
            goto error;
2291
0
        }
2292
2293
        /* add the port-aliases */
2294
0
        for( j=0 ; j<conn->aliases ; j++ )
2295
          /* add one node for each conn */
2296
0
          add_mi_number( conn_item, MI_SSTR("Alias port"),
2297
0
            conn->con_aliases[j].port );
2298
0
      }
2299
0
    }
2300
2301
0
    TCPCONN_UNLOCK(part);
2302
0
  }
2303
2304
0
  return resp;
2305
2306
0
error:
2307
0
  TCPCONN_UNLOCK(part);
2308
0
  LM_ERR("failed to add MI item\n");
2309
0
  free_mi_response(resp);
2310
0
  return 0;
2311
0
}