Line | Count | Source |
1 | | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | | /* |
3 | | * Zebra API server. |
4 | | * Portions: |
5 | | * Copyright (C) 1997-1999 Kunihiro Ishiguro |
6 | | * Copyright (C) 2015-2018 Cumulus Networks, Inc. |
7 | | * et al. |
8 | | */ |
9 | | |
10 | | #include <zebra.h> |
11 | | |
12 | | /* clang-format off */ |
13 | | #include <errno.h> /* for errno */ |
14 | | #include <netinet/in.h> /* for sockaddr_in */ |
15 | | #include <stdint.h> /* for uint8_t */ |
16 | | #include <stdio.h> /* for snprintf */ |
17 | | #include <sys/socket.h> /* for sockaddr_storage, AF_UNIX, accept... */ |
18 | | #include <sys/stat.h> /* for umask, mode_t */ |
19 | | #include <sys/un.h> /* for sockaddr_un */ |
20 | | #include <time.h> /* for NULL, tm, gmtime, time_t */ |
21 | | #include <unistd.h> /* for close, unlink, ssize_t */ |
22 | | |
23 | | #include "lib/buffer.h" /* for BUFFER_EMPTY, BUFFER_ERROR, BUFFE... */ |
24 | | #include "lib/command.h" /* for vty, install_element, CMD_SUCCESS... */ |
25 | | #include "lib/hook.h" /* for DEFINE_HOOK, DEFINE_KOOH, hook_call */ |
26 | | #include "lib/linklist.h" /* for ALL_LIST_ELEMENTS_RO, ALL_LIST_EL... */ |
27 | | #include "lib/libfrr.h" /* for frr_zclient_addr */ |
28 | | #include "lib/log.h" /* for zlog_warn, zlog_debug, safe_strerror */ |
29 | | #include "lib/memory.h" /* for MTYPE_TMP, XCALLOC, XFREE */ |
30 | | #include "lib/monotime.h" /* for monotime, ONE_DAY_SECOND, ONE_WEE... */ |
31 | | #include "lib/network.h" /* for set_nonblocking */ |
32 | | #include "lib/privs.h" /* for zebra_privs_t, ZPRIVS_LOWER, ZPRI... */ |
33 | | #include "lib/route_types.h" /* for ZEBRA_ROUTE_MAX */ |
34 | | #include "lib/sockopt.h" /* for setsockopt_so_recvbuf, setsockopt... */ |
35 | | #include "lib/sockunion.h" /* for sockopt_reuseaddr, sockopt_reuseport */ |
36 | | #include "lib/stream.h" /* for STREAM_SIZE, stream (ptr only), ... */ |
37 | | #include "frrevent.h" /* for thread (ptr only), EVENT_ARG, ... */ |
38 | | #include "lib/vrf.h" /* for vrf_info_lookup, VRF_DEFAULT */ |
39 | | #include "lib/vty.h" /* for vty_out, vty (ptr only) */ |
40 | | #include "lib/zclient.h" /* for zmsghdr, ZEBRA_HEADER_SIZE, ZEBRA... */ |
41 | | #include "lib/frr_pthread.h" /* for frr_pthread_new, frr_pthread_stop... */ |
42 | | #include "lib/frratomic.h" /* for atomic_load_explicit, atomic_stor... */ |
43 | | #include "lib/lib_errors.h" /* for generic ferr ids */ |
44 | | #include "lib/printfrr.h" /* for string functions */ |
45 | | |
46 | | #include "zebra/debug.h" /* for various debugging macros */ |
47 | | #include "zebra/rib.h" /* for rib_score_proto */ |
48 | | #include "zebra/zapi_msg.h" /* for zserv_handle_commands */ |
49 | | #include "zebra/zebra_vrf.h" /* for zebra_vrf_lookup_by_id, zvrf */ |
50 | | #include "zebra/zserv.h" /* for zserv */ |
51 | | #include "zebra/zebra_router.h" |
52 | | #include "zebra/zebra_errors.h" /* for error messages */ |
53 | | /* clang-format on */ |
54 | | |
55 | | /* privileges */ |
56 | | extern struct zebra_privs_t zserv_privs; |
57 | | |
58 | | /* The listener socket for clients connecting to us */ |
59 | | static int zsock; |
60 | | |
61 | | /* The lock that protects access to zapi client objects */ |
62 | | static pthread_mutex_t client_mutex; |
63 | | |
64 | | static struct zserv *find_client_internal(uint8_t proto, |
65 | | unsigned short instance, |
66 | | uint32_t session_id); |
67 | | |
68 | | /* Mem type for zclients. */ |
69 | 2 | DEFINE_MTYPE_STATIC(ZEBRA, ZSERV_CLIENT, "ZClients"); |
70 | 2 | |
71 | 2 | /* |
72 | 2 | * Client thread events. |
73 | 2 | * |
74 | 2 | * These are used almost exclusively by client threads to drive their own event |
75 | 2 | * loops. The only exception is in zserv_client_create(), which pushes an |
76 | 2 | * initial ZSERV_CLIENT_READ event to start the API handler loop. |
77 | 2 | */ |
78 | 2 | enum zserv_client_event { |
79 | 2 | /* Schedule a socket read */ |
80 | 2 | ZSERV_CLIENT_READ, |
81 | 2 | /* Schedule a buffer write */ |
82 | 2 | ZSERV_CLIENT_WRITE, |
83 | 2 | }; |
84 | 2 | |
85 | 2 | /* |
86 | 2 | * Main thread events. |
87 | 2 | * |
88 | 2 | * These are used by client threads to notify the main thread about various |
89 | 2 | * events and to make processing requests. |
90 | 2 | */ |
91 | 2 | enum zserv_event { |
92 | 2 | /* Schedule listen job on Zebra API socket */ |
93 | 2 | ZSERV_ACCEPT, |
94 | 2 | /* The calling client has packets on its input buffer */ |
95 | 2 | ZSERV_PROCESS_MESSAGES, |
96 | 2 | /* The calling client wishes to be killed */ |
97 | 2 | ZSERV_HANDLE_CLIENT_FAIL, |
98 | 2 | }; |
99 | 2 | |
100 | 2 | /* |
101 | 2 | * Zebra server event driver for all client threads. |
102 | 2 | * |
103 | 2 | * This is essentially a wrapper around event_add_event() that centralizes |
104 | 2 | * those scheduling calls into one place. |
105 | 2 | * |
106 | 2 | * All calls to this function schedule an event on the pthread running the |
107 | 2 | * provided client. |
108 | 2 | * |
109 | 2 | * client |
110 | 2 | * the client in question, and thread target |
111 | 2 | * |
112 | 2 | * event |
113 | 2 | * the event to notify them about |
114 | 2 | */ |
115 | 2 | static void zserv_client_event(struct zserv *client, |
116 | 2 | enum zserv_client_event event); |
117 | 2 | |
118 | 2 | /* |
119 | 2 | * Zebra server event driver for the main thread. |
120 | 2 | * |
121 | 2 | * This is essentially a wrapper around event_add_event() that centralizes |
122 | 2 | * those scheduling calls into one place. |
123 | 2 | * |
124 | 2 | * All calls to this function schedule an event on Zebra's main pthread. |
125 | 2 | * |
126 | 2 | * client |
127 | 2 | * the client in question |
128 | 2 | * |
129 | 2 | * event |
130 | 2 | * the event to notify the main thread about |
131 | 2 | */ |
132 | 2 | static void zserv_event(struct zserv *client, enum zserv_event event); |
133 | 2 | |
134 | 2 | |
135 | 2 | /* Client thread lifecycle -------------------------------------------------- */ |
136 | 2 | |
137 | 2 | /* |
138 | 2 | * Free a zserv client object. |
139 | 2 | */ |
140 | 2 | void zserv_client_delete(struct zserv *client) |
141 | 269 | { |
142 | 269 | XFREE(MTYPE_ZSERV_CLIENT, client); |
143 | 269 | } |
144 | | |
145 | | /* |
146 | | * Log zapi message to zlog. |
147 | | * |
148 | | * errmsg (optional) |
149 | | * Debugging message |
150 | | * |
151 | | * msg |
152 | | * The message |
153 | | * |
154 | | * hdr (optional) |
155 | | * The message header |
156 | | */ |
157 | | void zserv_log_message(const char *errmsg, struct stream *msg, |
158 | | struct zmsghdr *hdr) |
159 | 0 | { |
160 | 0 | zlog_debug("Rx'd ZAPI message"); |
161 | 0 | if (errmsg) |
162 | 0 | zlog_debug("%s", errmsg); |
163 | 0 | if (hdr) { |
164 | 0 | zlog_debug(" Length: %d", hdr->length); |
165 | 0 | zlog_debug("Command: %s", zserv_command_string(hdr->command)); |
166 | 0 | zlog_debug(" VRF: %u", hdr->vrf_id); |
167 | 0 | } |
168 | 0 | stream_hexdump(msg); |
169 | 0 | } |
170 | | |
171 | | /* |
172 | | * Gracefuly shut down a client connection. |
173 | | * |
174 | | * Cancel any pending tasks for the client's thread. Then schedule a task on |
175 | | * the main thread to shut down the calling thread. |
176 | | * |
177 | | * It is not safe to close the client socket in this function. The socket is |
178 | | * owned by the main thread. |
179 | | * |
180 | | * Must be called from the client pthread, never the main thread. |
181 | | */ |
182 | | static void zserv_client_fail(struct zserv *client) |
183 | 0 | { |
184 | 0 | flog_warn(EC_ZEBRA_CLIENT_IO_ERROR, |
185 | 0 | "Client '%s' encountered an error and is shutting down.", |
186 | 0 | zebra_route_string(client->proto)); |
187 | 0 |
|
188 | 0 | atomic_store_explicit(&client->pthread->running, false, |
189 | 0 | memory_order_relaxed); |
190 | 0 |
|
191 | 0 | EVENT_OFF(client->t_read); |
192 | 0 | EVENT_OFF(client->t_write); |
193 | 0 | zserv_event(client, ZSERV_HANDLE_CLIENT_FAIL); |
194 | 0 | } |
195 | | |
196 | | /* |
197 | | * Write all pending messages to client socket. |
198 | | * |
199 | | * This function first attempts to flush any buffered data. If unsuccessful, |
200 | | * the function reschedules itself and returns. If successful, it pops all |
201 | | * available messages from the output queue and continues to write data |
202 | | * directly to the socket until the socket would block. If the socket never |
203 | | * blocks and all data is written, the function returns without rescheduling |
204 | | * itself. If the socket ends up throwing EWOULDBLOCK, the remaining data is |
205 | | * buffered and the function reschedules itself. |
206 | | * |
207 | | * The utility of the buffer is that it allows us to vastly reduce lock |
208 | | * contention by allowing us to pop *all* messages off the output queue at once |
209 | | * instead of locking and unlocking each time we want to pop a single message |
210 | | * off the queue. The same thing could arguably be accomplished faster by |
211 | | * allowing the main thread to write directly into the buffer instead of |
212 | | * enqueuing packets onto an intermediary queue, but the intermediary queue |
213 | | * allows us to expose information about input and output queues to the user in |
214 | | * terms of number of packets rather than size of data. |
215 | | */ |
216 | | static void zserv_write(struct event *thread) |
217 | 0 | { |
218 | 0 | struct zserv *client = EVENT_ARG(thread); |
219 | 0 | struct stream *msg; |
220 | 0 | uint32_t wcmd = 0; |
221 | 0 | struct stream_fifo *cache; |
222 | 0 | uint64_t time_now = monotime(NULL); |
223 | 0 |
|
224 | 0 | /* If we have any data pending, try to flush it first */ |
225 | 0 | switch (buffer_flush_all(client->wb, client->sock)) { |
226 | 0 | case BUFFER_ERROR: |
227 | 0 | goto zwrite_fail; |
228 | 0 | case BUFFER_PENDING: |
229 | 0 | frr_with_mutex (&client->stats_mtx) { |
230 | 0 | client->last_write_time = time_now; |
231 | 0 | } |
232 | 0 | zserv_client_event(client, ZSERV_CLIENT_WRITE); |
233 | 0 | return; |
234 | 0 | case BUFFER_EMPTY: |
235 | 0 | break; |
236 | 0 | } |
237 | 0 |
|
238 | 0 | cache = stream_fifo_new(); |
239 | 0 |
|
240 | 0 | frr_with_mutex (&client->obuf_mtx) { |
241 | 0 | while (stream_fifo_head(client->obuf_fifo)) |
242 | 0 | stream_fifo_push(cache, |
243 | 0 | stream_fifo_pop(client->obuf_fifo)); |
244 | 0 | } |
245 | 0 |
|
246 | 0 | if (cache->tail) { |
247 | 0 | msg = cache->tail; |
248 | 0 | stream_set_getp(msg, 0); |
249 | 0 | wcmd = stream_getw_from(msg, ZAPI_HEADER_CMD_LOCATION); |
250 | 0 | } |
251 | 0 |
|
252 | 0 | while (stream_fifo_head(cache)) { |
253 | 0 | msg = stream_fifo_pop(cache); |
254 | 0 | buffer_put(client->wb, STREAM_DATA(msg), stream_get_endp(msg)); |
255 | 0 | stream_free(msg); |
256 | 0 | } |
257 | 0 |
|
258 | 0 | stream_fifo_free(cache); |
259 | 0 |
|
260 | 0 | /* If we have any data pending, try to flush it first */ |
261 | 0 | switch (buffer_flush_all(client->wb, client->sock)) { |
262 | 0 | case BUFFER_ERROR: |
263 | 0 | goto zwrite_fail; |
264 | 0 | case BUFFER_PENDING: |
265 | 0 | frr_with_mutex (&client->stats_mtx) { |
266 | 0 | client->last_write_time = time_now; |
267 | 0 | } |
268 | 0 | zserv_client_event(client, ZSERV_CLIENT_WRITE); |
269 | 0 | return; |
270 | 0 | case BUFFER_EMPTY: |
271 | 0 | break; |
272 | 0 | } |
273 | 0 |
|
274 | 0 | frr_with_mutex (&client->stats_mtx) { |
275 | 0 | client->last_write_cmd = wcmd; |
276 | 0 | client->last_write_time = time_now; |
277 | 0 | } |
278 | 0 | return; |
279 | 0 |
|
280 | 0 | zwrite_fail: |
281 | 0 | flog_warn(EC_ZEBRA_CLIENT_WRITE_FAILED, |
282 | 0 | "%s: could not write to %s [fd = %d], closing.", __func__, |
283 | 0 | zebra_route_string(client->proto), client->sock); |
284 | 0 | zserv_client_fail(client); |
285 | 0 | } |
286 | | |
287 | | /* |
288 | | * Read and process data from a client socket. |
289 | | * |
290 | | * The responsibilities here are to read raw data from the client socket, |
291 | | * validate the header, encapsulate it into a single stream object, push it |
292 | | * onto the input queue and then notify the main thread that there is new data |
293 | | * available. |
294 | | * |
295 | | * This function first looks for any data in the client structure's working |
296 | | * input buffer. If data is present, it is assumed that reading stopped in a |
297 | | * previous invocation of this task and needs to be resumed to finish a message. |
298 | | * Otherwise, the socket data stream is assumed to be at the beginning of a new |
299 | | * ZAPI message (specifically at the header). The header is read and validated. |
300 | | * If the header passed validation then the length field found in the header is |
301 | | * used to compute the total length of the message. That much data is read (but |
302 | | * not inspected), appended to the header, placed into a stream and pushed onto |
303 | | * the client's input queue. A task is then scheduled on the main thread to |
304 | | * process the client's input queue. Finally, if all of this was successful, |
305 | | * this task reschedules itself. |
306 | | * |
307 | | * Any failure in any of these actions is handled by terminating the client. |
308 | | */ |
309 | | static void zserv_read(struct event *thread) |
310 | 0 | { |
311 | 0 | struct zserv *client = EVENT_ARG(thread); |
312 | 0 | int sock; |
313 | 0 | size_t already; |
314 | 0 | struct stream_fifo *cache; |
315 | 0 | uint32_t p2p_orig; |
316 | 0 |
|
317 | 0 | uint32_t p2p; |
318 | 0 | struct zmsghdr hdr; |
319 | 0 |
|
320 | 0 | p2p_orig = atomic_load_explicit(&zrouter.packets_to_process, |
321 | 0 | memory_order_relaxed); |
322 | 0 | cache = stream_fifo_new(); |
323 | 0 | p2p = p2p_orig; |
324 | 0 | sock = EVENT_FD(thread); |
325 | 0 |
|
326 | 0 | while (p2p) { |
327 | 0 | ssize_t nb; |
328 | 0 | bool hdrvalid; |
329 | 0 | char errmsg[256]; |
330 | 0 |
|
331 | 0 | already = stream_get_endp(client->ibuf_work); |
332 | 0 |
|
333 | 0 | /* Read length and command (if we don't have it already). */ |
334 | 0 | if (already < ZEBRA_HEADER_SIZE) { |
335 | 0 | nb = stream_read_try(client->ibuf_work, sock, |
336 | 0 | ZEBRA_HEADER_SIZE - already); |
337 | 0 | if ((nb == 0 || nb == -1)) { |
338 | 0 | if (IS_ZEBRA_DEBUG_EVENT) |
339 | 0 | zlog_debug("connection closed socket [%d]", |
340 | 0 | sock); |
341 | 0 | goto zread_fail; |
342 | 0 | } |
343 | 0 | if (nb != (ssize_t)(ZEBRA_HEADER_SIZE - already)) { |
344 | 0 | /* Try again later. */ |
345 | 0 | break; |
346 | 0 | } |
347 | 0 | already = ZEBRA_HEADER_SIZE; |
348 | 0 | } |
349 | 0 |
|
350 | 0 | /* Reset to read from the beginning of the incoming packet. */ |
351 | 0 | stream_set_getp(client->ibuf_work, 0); |
352 | 0 |
|
353 | 0 | /* Fetch header values */ |
354 | 0 | hdrvalid = zapi_parse_header(client->ibuf_work, &hdr); |
355 | 0 |
|
356 | 0 | if (!hdrvalid) { |
357 | 0 | snprintf(errmsg, sizeof(errmsg), |
358 | 0 | "%s: Message has corrupt header", __func__); |
359 | 0 | zserv_log_message(errmsg, client->ibuf_work, NULL); |
360 | 0 | goto zread_fail; |
361 | 0 | } |
362 | 0 |
|
363 | 0 | /* Validate header */ |
364 | 0 | if (hdr.marker != ZEBRA_HEADER_MARKER |
365 | 0 | || hdr.version != ZSERV_VERSION) { |
366 | 0 | snprintf( |
367 | 0 | errmsg, sizeof(errmsg), |
368 | 0 | "Message has corrupt header\n%s: socket %d version mismatch, marker %d, version %d", |
369 | 0 | __func__, sock, hdr.marker, hdr.version); |
370 | 0 | zserv_log_message(errmsg, client->ibuf_work, &hdr); |
371 | 0 | goto zread_fail; |
372 | 0 | } |
373 | 0 | if (hdr.length < ZEBRA_HEADER_SIZE) { |
374 | 0 | snprintf( |
375 | 0 | errmsg, sizeof(errmsg), |
376 | 0 | "Message has corrupt header\n%s: socket %d message length %u is less than header size %d", |
377 | 0 | __func__, sock, hdr.length, ZEBRA_HEADER_SIZE); |
378 | 0 | zserv_log_message(errmsg, client->ibuf_work, &hdr); |
379 | 0 | goto zread_fail; |
380 | 0 | } |
381 | 0 | if (hdr.length > STREAM_SIZE(client->ibuf_work)) { |
382 | 0 | snprintf( |
383 | 0 | errmsg, sizeof(errmsg), |
384 | 0 | "Message has corrupt header\n%s: socket %d message length %u exceeds buffer size %lu", |
385 | 0 | __func__, sock, hdr.length, |
386 | 0 | (unsigned long)STREAM_SIZE(client->ibuf_work)); |
387 | 0 | zserv_log_message(errmsg, client->ibuf_work, &hdr); |
388 | 0 | goto zread_fail; |
389 | 0 | } |
390 | 0 |
|
391 | 0 | /* Read rest of data. */ |
392 | 0 | if (already < hdr.length) { |
393 | 0 | nb = stream_read_try(client->ibuf_work, sock, |
394 | 0 | hdr.length - already); |
395 | 0 | if ((nb == 0 || nb == -1)) { |
396 | 0 | if (IS_ZEBRA_DEBUG_EVENT) |
397 | 0 | zlog_debug( |
398 | 0 | "connection closed [%d] when reading zebra data", |
399 | 0 | sock); |
400 | 0 | goto zread_fail; |
401 | 0 | } |
402 | 0 | if (nb != (ssize_t)(hdr.length - already)) { |
403 | 0 | /* Try again later. */ |
404 | 0 | break; |
405 | 0 | } |
406 | 0 | } |
407 | 0 |
|
408 | 0 | /* Debug packet information. */ |
409 | 0 | if (IS_ZEBRA_DEBUG_PACKET) |
410 | 0 | zlog_debug("zebra message[%s:%u:%u] comes from socket [%d]", |
411 | 0 | zserv_command_string(hdr.command), |
412 | 0 | hdr.vrf_id, hdr.length, |
413 | 0 | sock); |
414 | 0 |
|
415 | 0 | stream_set_getp(client->ibuf_work, 0); |
416 | 0 | struct stream *msg = stream_dup(client->ibuf_work); |
417 | 0 |
|
418 | 0 | stream_fifo_push(cache, msg); |
419 | 0 | stream_reset(client->ibuf_work); |
420 | 0 | p2p--; |
421 | 0 | } |
422 | 0 |
|
423 | 0 | if (p2p < p2p_orig) { |
424 | 0 | uint64_t time_now = monotime(NULL); |
425 | 0 |
|
426 | 0 | /* update session statistics */ |
427 | 0 | frr_with_mutex (&client->stats_mtx) { |
428 | 0 | client->last_read_time = time_now; |
429 | 0 | client->last_read_cmd = hdr.command; |
430 | 0 | } |
431 | 0 |
|
432 | 0 | /* publish read packets on client's input queue */ |
433 | 0 | frr_with_mutex (&client->ibuf_mtx) { |
434 | 0 | while (cache->head) |
435 | 0 | stream_fifo_push(client->ibuf_fifo, |
436 | 0 | stream_fifo_pop(cache)); |
437 | 0 | } |
438 | 0 |
|
439 | 0 | /* Schedule job to process those packets */ |
440 | 0 | zserv_event(client, ZSERV_PROCESS_MESSAGES); |
441 | 0 |
|
442 | 0 | } |
443 | 0 |
|
444 | 0 | if (IS_ZEBRA_DEBUG_PACKET) |
445 | 0 | zlog_debug("Read %d packets from client: %s", p2p_orig - p2p, |
446 | 0 | zebra_route_string(client->proto)); |
447 | 0 |
|
448 | 0 | /* Reschedule ourselves */ |
449 | 0 | zserv_client_event(client, ZSERV_CLIENT_READ); |
450 | 0 |
|
451 | 0 | stream_fifo_free(cache); |
452 | 0 |
|
453 | 0 | return; |
454 | 0 |
|
455 | 0 | zread_fail: |
456 | 0 | stream_fifo_free(cache); |
457 | 0 | zserv_client_fail(client); |
458 | 0 | } |
459 | | |
460 | | static void zserv_client_event(struct zserv *client, |
461 | | enum zserv_client_event event) |
462 | 0 | { |
463 | 0 | #ifdef FUZZING |
464 | 0 | return; |
465 | 0 | #endif |
466 | 0 | switch (event) { |
467 | 0 | case ZSERV_CLIENT_READ: |
468 | 0 | event_add_read(client->pthread->master, zserv_read, client, |
469 | 0 | client->sock, &client->t_read); |
470 | 0 | break; |
471 | 0 | case ZSERV_CLIENT_WRITE: |
472 | 0 | event_add_write(client->pthread->master, zserv_write, client, |
473 | 0 | client->sock, &client->t_write); |
474 | 0 | break; |
475 | 0 | } |
476 | 0 | } |
477 | | |
478 | | /* Main thread lifecycle ---------------------------------------------------- */ |
479 | | |
480 | | /* |
481 | | * Read and process messages from a client. |
482 | | * |
483 | | * This task runs on the main pthread. It is scheduled by client pthreads when |
484 | | * they have new messages available on their input queues. The client is passed |
485 | | * as the task argument. |
486 | | * |
487 | | * Each message is popped off the client's input queue and the action associated |
488 | | * with the message is executed. This proceeds until there are no more messages, |
489 | | * an error occurs, or the processing limit is reached. |
490 | | * |
491 | | * The client's I/O thread can push at most zrouter.packets_to_process messages |
492 | | * onto the input buffer before notifying us there are packets to read. As long |
493 | | * as we always process zrouter.packets_to_process messages here, then we can |
494 | | * rely on the read thread to handle queuing this task enough times to process |
495 | | * everything on the input queue. |
496 | | */ |
497 | | static void zserv_process_messages(struct event *thread) |
498 | 0 | { |
499 | 0 | struct zserv *client = EVENT_ARG(thread); |
500 | 0 | struct stream *msg; |
501 | 0 | struct stream_fifo *cache = stream_fifo_new(); |
502 | 0 | uint32_t p2p = zrouter.packets_to_process; |
503 | 0 | bool need_resched = false; |
504 | 0 |
|
505 | 0 | frr_with_mutex (&client->ibuf_mtx) { |
506 | 0 | uint32_t i; |
507 | 0 | for (i = 0; i < p2p && stream_fifo_head(client->ibuf_fifo); |
508 | 0 | ++i) { |
509 | 0 | msg = stream_fifo_pop(client->ibuf_fifo); |
510 | 0 | stream_fifo_push(cache, msg); |
511 | 0 | } |
512 | 0 |
|
513 | 0 | /* Need to reschedule processing work if there are still |
514 | 0 | * packets in the fifo. |
515 | 0 | */ |
516 | 0 | if (stream_fifo_head(client->ibuf_fifo)) |
517 | 0 | need_resched = true; |
518 | 0 | } |
519 | 0 |
|
520 | 0 | /* Process the batch of messages */ |
521 | 0 | if (stream_fifo_head(cache)) |
522 | 0 | zserv_handle_commands(client, cache); |
523 | 0 |
|
524 | 0 | stream_fifo_free(cache); |
525 | 0 |
|
526 | 0 | /* Reschedule ourselves if necessary */ |
527 | 0 | if (need_resched) |
528 | 0 | zserv_event(client, ZSERV_PROCESS_MESSAGES); |
529 | 0 | } |
530 | | |
531 | | int zserv_send_message(struct zserv *client, struct stream *msg) |
532 | 2.25k | { |
533 | 2.25k | #ifdef FUZZING |
534 | 2.25k | stream_free(msg); |
535 | 2.25k | return 0; |
536 | 0 | #endif |
537 | 0 | frr_with_mutex (&client->obuf_mtx) { |
538 | 0 | stream_fifo_push(client->obuf_fifo, msg); |
539 | 0 | } |
540 | |
|
541 | 0 | zserv_client_event(client, ZSERV_CLIENT_WRITE); |
542 | |
|
543 | 0 | return 0; |
544 | 2.25k | } |
545 | | |
546 | | /* |
547 | | * Send a batch of messages to a connected Zebra API client. |
548 | | */ |
549 | | int zserv_send_batch(struct zserv *client, struct stream_fifo *fifo) |
550 | 0 | { |
551 | 0 | struct stream *msg; |
552 | |
|
553 | 0 | frr_with_mutex (&client->obuf_mtx) { |
554 | 0 | msg = stream_fifo_pop(fifo); |
555 | 0 | while (msg) { |
556 | 0 | stream_fifo_push(client->obuf_fifo, msg); |
557 | 0 | msg = stream_fifo_pop(fifo); |
558 | 0 | } |
559 | 0 | } |
560 | |
|
561 | 0 | zserv_client_event(client, ZSERV_CLIENT_WRITE); |
562 | |
|
563 | 0 | return 0; |
564 | 0 | } |
565 | | |
566 | | /* Hooks for client connect / disconnect */ |
567 | | DEFINE_HOOK(zserv_client_connect, (struct zserv *client), (client)); |
568 | | DEFINE_KOOH(zserv_client_close, (struct zserv *client), (client)); |
569 | | |
570 | | /* |
571 | | * Deinitialize zebra client. |
572 | | * |
573 | | * - Deregister and deinitialize related internal resources |
574 | | * - Gracefuly close socket |
575 | | * - Free associated resources |
576 | | * - Free client structure |
577 | | * |
578 | | * This does *not* take any action on the struct event * fields. These are |
579 | | * managed by the owning pthread and any tasks associated with them must have |
580 | | * been stopped prior to invoking this function. |
581 | | */ |
582 | | static void zserv_client_free(struct zserv *client) |
583 | 269 | { |
584 | 269 | if (client == NULL) |
585 | 0 | return; |
586 | | |
587 | 269 | hook_call(zserv_client_close, client); |
588 | | |
589 | | /* Close file descriptor. */ |
590 | 269 | if (client->sock) { |
591 | 269 | unsigned long nroutes; |
592 | 269 | unsigned long nnhgs; |
593 | | |
594 | | #ifndef FUZZING |
595 | | close(client->sock); |
596 | | #endif |
597 | | |
598 | 269 | if (DYNAMIC_CLIENT_GR_DISABLED(client)) { |
599 | 269 | zebra_mpls_client_cleanup_vrf_label(client->proto); |
600 | | |
601 | 269 | nroutes = rib_score_proto(client->proto, |
602 | 269 | client->instance); |
603 | 269 | zlog_notice( |
604 | 269 | "client %d disconnected %lu %s routes removed from the rib", |
605 | 269 | client->sock, nroutes, |
606 | 269 | zebra_route_string(client->proto)); |
607 | | |
608 | | /* Not worrying about instance for now */ |
609 | 269 | nnhgs = zebra_nhg_score_proto(client->proto); |
610 | 269 | zlog_notice( |
611 | 269 | "client %d disconnected %lu %s nhgs removed from the rib", |
612 | 269 | client->sock, nnhgs, |
613 | 269 | zebra_route_string(client->proto)); |
614 | 269 | } |
615 | 269 | client->sock = -1; |
616 | 269 | } |
617 | | |
618 | | /* Free stream buffers. */ |
619 | 269 | if (client->ibuf_work) |
620 | 269 | stream_free(client->ibuf_work); |
621 | 269 | if (client->obuf_work) |
622 | 269 | stream_free(client->obuf_work); |
623 | 269 | if (client->ibuf_fifo) |
624 | 269 | stream_fifo_free(client->ibuf_fifo); |
625 | 269 | if (client->obuf_fifo) |
626 | 269 | stream_fifo_free(client->obuf_fifo); |
627 | 269 | if (client->wb) |
628 | 269 | buffer_free(client->wb); |
629 | | |
630 | | #ifndef FUZZING |
631 | | /* Free buffer mutexes */ |
632 | | pthread_mutex_destroy(&client->stats_mtx); |
633 | | pthread_mutex_destroy(&client->obuf_mtx); |
634 | | pthread_mutex_destroy(&client->ibuf_mtx); |
635 | | #endif |
636 | | |
637 | | /* Free bitmaps. */ |
638 | 1.07k | for (afi_t afi = AFI_IP; afi < AFI_MAX; afi++) { |
639 | 25.8k | for (int i = 0; i < ZEBRA_ROUTE_MAX; i++) { |
640 | 25.0k | vrf_bitmap_free(client->redist[afi][i]); |
641 | 25.0k | redist_del_all_instances(&client->mi_redist[afi][i]); |
642 | 25.0k | } |
643 | | |
644 | 807 | vrf_bitmap_free(client->redist_default[afi]); |
645 | 807 | vrf_bitmap_free(client->ridinfo[afi]); |
646 | 807 | vrf_bitmap_free(client->nhrp_neighinfo[afi]); |
647 | 807 | } |
648 | | |
649 | | /* |
650 | | * If any instance are graceful restart enabled, |
651 | | * client is not deleted |
652 | | */ |
653 | 269 | if (DYNAMIC_CLIENT_GR_DISABLED(client)) { |
654 | 269 | if (IS_ZEBRA_DEBUG_EVENT) |
655 | 0 | zlog_debug("%s: Deleting client %s", __func__, |
656 | 269 | zebra_route_string(client->proto)); |
657 | 269 | zserv_client_delete(client); |
658 | 269 | } else { |
659 | | /* Handle cases where client has GR instance. */ |
660 | 0 | if (IS_ZEBRA_DEBUG_EVENT) |
661 | 0 | zlog_debug("%s: client %s restart enabled", __func__, |
662 | 0 | zebra_route_string(client->proto)); |
663 | 0 | if (zebra_gr_client_disconnect(client) < 0) |
664 | 0 | zlog_err( |
665 | 0 | "%s: GR enabled but could not handle disconnect event", |
666 | 0 | __func__); |
667 | 0 | } |
668 | 269 | } |
669 | | |
670 | | void zserv_close_client(struct zserv *client) |
671 | 269 | { |
672 | 269 | bool free_p = true; |
673 | | |
674 | 269 | if (client->pthread) { |
675 | | #ifndef FUZZING |
676 | | /* synchronously stop and join pthread */ |
677 | | frr_pthread_stop(client->pthread, NULL); |
678 | | |
679 | | if (IS_ZEBRA_DEBUG_EVENT) |
680 | | zlog_debug("Closing client '%s'", |
681 | | zebra_route_string(client->proto)); |
682 | | |
683 | | event_cancel_event(zrouter.master, client); |
684 | | EVENT_OFF(client->t_cleanup); |
685 | | EVENT_OFF(client->t_process); |
686 | | |
687 | | /* destroy pthread */ |
688 | | frr_pthread_destroy(client->pthread); |
689 | | client->pthread = NULL; |
690 | | #endif |
691 | 0 | } |
692 | | |
693 | | /* |
694 | | * Final check in case the client struct is in use in another |
695 | | * pthread: if not in-use, continue and free the client |
696 | | */ |
697 | 269 | frr_with_mutex (&client_mutex) { |
698 | 269 | if (client->busy_count <= 0) { |
699 | | /* remove from client list */ |
700 | 269 | listnode_delete(zrouter.client_list, client); |
701 | 269 | } else { |
702 | | /* |
703 | | * The client session object may be in use, although |
704 | | * the associated pthread is gone. Defer final |
705 | | * cleanup. |
706 | | */ |
707 | 0 | client->is_closed = true; |
708 | 0 | free_p = false; |
709 | 0 | } |
710 | 269 | } |
711 | | |
712 | | /* delete client */ |
713 | 269 | if (free_p) |
714 | 269 | zserv_client_free(client); |
715 | 269 | } |
716 | | |
717 | | /* |
718 | | * This task is scheduled by a ZAPI client pthread on the main pthread when it |
719 | | * wants to stop itself. When this executes, the client connection should |
720 | | * already have been closed and the thread will most likely have died, but its |
721 | | * resources still need to be cleaned up. |
722 | | */ |
723 | | static void zserv_handle_client_fail(struct event *thread) |
724 | 0 | { |
725 | 0 | struct zserv *client = EVENT_ARG(thread); |
726 | 0 |
|
727 | 0 | zserv_close_client(client); |
728 | 0 | } |
729 | | |
730 | | /* |
731 | | * Create a new client. |
732 | | * |
733 | | * This is called when a new connection is accept()'d on the ZAPI socket. It |
734 | | * initializes new client structure, notifies any subscribers of the connection |
735 | | * event and spawns the client's thread. |
736 | | * |
737 | | * sock |
738 | | * client's socket file descriptor |
739 | | */ |
740 | | struct zserv *zserv_client_create(int sock) |
741 | 269 | { |
742 | 269 | struct zserv *client; |
743 | 269 | size_t stream_size = |
744 | 269 | MAX(ZEBRA_MAX_PACKET_SIZ, sizeof(struct zapi_route)); |
745 | 269 | int i; |
746 | 269 | afi_t afi; |
747 | | |
748 | 269 | client = XCALLOC(MTYPE_ZSERV_CLIENT, sizeof(struct zserv)); |
749 | | |
750 | | /* Make client input/output buffer. */ |
751 | 269 | client->sock = sock; |
752 | 269 | client->ibuf_fifo = stream_fifo_new(); |
753 | 269 | client->obuf_fifo = stream_fifo_new(); |
754 | 269 | client->ibuf_work = stream_new(stream_size); |
755 | 269 | client->obuf_work = stream_new(stream_size); |
756 | 269 | client->connect_time = monotime(NULL); |
757 | | #ifndef FUZZING |
758 | | pthread_mutex_init(&client->ibuf_mtx, NULL); |
759 | | pthread_mutex_init(&client->obuf_mtx, NULL); |
760 | | pthread_mutex_init(&client->stats_mtx, NULL); |
761 | | #endif |
762 | 269 | client->wb = buffer_new(0); |
763 | 269 | TAILQ_INIT(&(client->gr_info_queue)); |
764 | | |
765 | | /* Initialize flags */ |
766 | 1.07k | for (afi = AFI_IP; afi < AFI_MAX; afi++) { |
767 | 25.8k | for (i = 0; i < ZEBRA_ROUTE_MAX; i++) |
768 | 25.0k | client->redist[afi][i] = vrf_bitmap_init(); |
769 | 807 | client->redist_default[afi] = vrf_bitmap_init(); |
770 | 807 | client->ridinfo[afi] = vrf_bitmap_init(); |
771 | 807 | client->nhrp_neighinfo[afi] = vrf_bitmap_init(); |
772 | 807 | } |
773 | | |
774 | | /* Add this client to linked list. */ |
775 | 269 | frr_with_mutex (&client_mutex) { |
776 | 269 | listnode_add(zrouter.client_list, client); |
777 | 269 | } |
778 | | |
779 | | #ifndef FUZZING |
780 | | struct frr_pthread_attr zclient_pthr_attrs = { |
781 | | .start = frr_pthread_attr_default.start, |
782 | | .stop = frr_pthread_attr_default.stop |
783 | | }; |
784 | | client->pthread = |
785 | | frr_pthread_new(&zclient_pthr_attrs, "Zebra API client thread", |
786 | | "zebra_apic"); |
787 | | |
788 | | /* start read loop */ |
789 | | zserv_client_event(client, ZSERV_CLIENT_READ); |
790 | | #endif |
791 | | |
792 | | /* call callbacks */ |
793 | 269 | hook_call(zserv_client_connect, client); |
794 | | |
795 | | /* start pthread */ |
796 | | #ifndef FUZZING |
797 | | frr_pthread_run(client->pthread, NULL); |
798 | | #endif |
799 | | |
800 | 269 | return client; |
801 | 269 | } |
802 | | |
803 | | /* |
804 | | * Retrieve a client object by the complete tuple of |
805 | | * {protocol, instance, session}. This version supports use |
806 | | * from a different pthread: the object will be returned marked |
807 | | * in-use. The caller *must* release the client object with the |
808 | | * release_client() api, to ensure that the in-use marker is cleared properly. |
809 | | */ |
810 | | struct zserv *zserv_acquire_client(uint8_t proto, unsigned short instance, |
811 | | uint32_t session_id) |
812 | 0 | { |
813 | 0 | struct zserv *client = NULL; |
814 | |
|
815 | 0 | frr_with_mutex (&client_mutex) { |
816 | 0 | client = find_client_internal(proto, instance, session_id); |
817 | 0 | if (client) { |
818 | | /* Don't return a dead/closed client object */ |
819 | 0 | if (client->is_closed) |
820 | 0 | client = NULL; |
821 | 0 | else |
822 | 0 | client->busy_count++; |
823 | 0 | } |
824 | 0 | } |
825 | |
|
826 | 0 | return client; |
827 | 0 | } |
828 | | |
829 | | /* |
830 | | * Release a client object that was acquired with the acquire_client() api. |
831 | | * After this has been called, the caller must not use the client pointer - |
832 | | * it may be freed if the client has closed. |
833 | | */ |
834 | | void zserv_release_client(struct zserv *client) |
835 | 0 | { |
836 | | /* |
837 | | * Once we've decremented the client object's refcount, it's possible |
838 | | * for it to be deleted as soon as we release the lock, so we won't |
839 | | * touch the object again. |
840 | | */ |
841 | 0 | frr_with_mutex (&client_mutex) { |
842 | 0 | client->busy_count--; |
843 | |
|
844 | 0 | if (client->busy_count <= 0) { |
845 | | /* |
846 | | * No more users of the client object. If the client |
847 | | * session is closed, schedule cleanup on the zebra |
848 | | * main pthread. |
849 | | */ |
850 | 0 | if (client->is_closed) |
851 | 0 | event_add_event(zrouter.master, |
852 | 0 | zserv_handle_client_fail, |
853 | 0 | client, 0, &client->t_cleanup); |
854 | 0 | } |
855 | 0 | } |
856 | | |
857 | | /* |
858 | | * Cleanup must take place on the zebra main pthread, so we've |
859 | | * scheduled an event. |
860 | | */ |
861 | 0 | } |
862 | | |
863 | | /* |
864 | | * Accept socket connection. |
865 | | */ |
866 | | static void zserv_accept(struct event *thread) |
867 | 0 | { |
868 | 0 | int accept_sock; |
869 | 0 | int client_sock; |
870 | 0 | struct sockaddr_in client; |
871 | 0 | socklen_t len; |
872 | 0 |
|
873 | 0 | accept_sock = EVENT_FD(thread); |
874 | 0 |
|
875 | 0 | /* Reregister myself. */ |
876 | 0 | zserv_event(NULL, ZSERV_ACCEPT); |
877 | 0 |
|
878 | 0 | len = sizeof(struct sockaddr_in); |
879 | 0 | client_sock = accept(accept_sock, (struct sockaddr *)&client, &len); |
880 | 0 |
|
881 | 0 | if (client_sock < 0) { |
882 | 0 | flog_err_sys(EC_LIB_SOCKET, "Can't accept zebra socket: %s", |
883 | 0 | safe_strerror(errno)); |
884 | 0 | return; |
885 | 0 | } |
886 | 0 |
|
887 | 0 | /* Make client socket non-blocking. */ |
888 | 0 | set_nonblocking(client_sock); |
889 | 0 |
|
890 | 0 | /* Create new zebra client. */ |
891 | 0 | zserv_client_create(client_sock); |
892 | 0 | } |
893 | | |
894 | | void zserv_close(void) |
895 | 0 | { |
896 | | /* |
897 | | * On shutdown, let's close the socket down |
898 | | * so that long running processes of killing the |
899 | | * routing table doesn't leave us in a bad |
900 | | * state where a client tries to reconnect |
901 | | */ |
902 | 0 | close(zsock); |
903 | 0 | zsock = -1; |
904 | | |
905 | | /* Free client list's mutex */ |
906 | 0 | pthread_mutex_destroy(&client_mutex); |
907 | 0 | } |
908 | | |
909 | | void zserv_start(char *path) |
910 | 0 | { |
911 | 0 | int ret; |
912 | 0 | mode_t old_mask; |
913 | 0 | struct sockaddr_storage sa; |
914 | 0 | socklen_t sa_len; |
915 | |
|
916 | 0 | if (!frr_zclient_addr(&sa, &sa_len, path)) |
917 | | /* should be caught in zebra main() */ |
918 | 0 | return; |
919 | | |
920 | | /* Set umask */ |
921 | 0 | old_mask = umask(0077); |
922 | | |
923 | | /* Make UNIX domain socket. */ |
924 | 0 | zsock = socket(sa.ss_family, SOCK_STREAM, 0); |
925 | 0 | if (zsock < 0) { |
926 | 0 | flog_err_sys(EC_LIB_SOCKET, "Can't create zserv socket: %s", |
927 | 0 | safe_strerror(errno)); |
928 | 0 | return; |
929 | 0 | } |
930 | | |
931 | 0 | if (sa.ss_family != AF_UNIX) { |
932 | 0 | sockopt_reuseaddr(zsock); |
933 | 0 | sockopt_reuseport(zsock); |
934 | 0 | } else { |
935 | 0 | struct sockaddr_un *suna = (struct sockaddr_un *)&sa; |
936 | 0 | if (suna->sun_path[0]) |
937 | 0 | unlink(suna->sun_path); |
938 | 0 | } |
939 | |
|
940 | 0 | setsockopt_so_recvbuf(zsock, 1048576); |
941 | 0 | setsockopt_so_sendbuf(zsock, 1048576); |
942 | |
|
943 | 0 | frr_with_privs((sa.ss_family != AF_UNIX) ? &zserv_privs : NULL) { |
944 | 0 | ret = bind(zsock, (struct sockaddr *)&sa, sa_len); |
945 | 0 | } |
946 | 0 | if (ret < 0) { |
947 | 0 | flog_err_sys(EC_LIB_SOCKET, "Can't bind zserv socket on %s: %s", |
948 | 0 | path, safe_strerror(errno)); |
949 | 0 | close(zsock); |
950 | 0 | zsock = -1; |
951 | 0 | return; |
952 | 0 | } |
953 | | |
954 | 0 | ret = listen(zsock, 5); |
955 | 0 | if (ret < 0) { |
956 | 0 | flog_err_sys(EC_LIB_SOCKET, |
957 | 0 | "Can't listen to zserv socket %s: %s", path, |
958 | 0 | safe_strerror(errno)); |
959 | 0 | close(zsock); |
960 | 0 | zsock = -1; |
961 | 0 | return; |
962 | 0 | } |
963 | | |
964 | 0 | umask(old_mask); |
965 | |
|
966 | 0 | zserv_event(NULL, ZSERV_ACCEPT); |
967 | 0 | } |
968 | | |
969 | | void zserv_event(struct zserv *client, enum zserv_event event) |
970 | 0 | { |
971 | 0 | #ifdef FUZZING |
972 | 0 | return; |
973 | 0 | #endif |
974 | 0 | switch (event) { |
975 | 0 | case ZSERV_ACCEPT: |
976 | 0 | event_add_read(zrouter.master, zserv_accept, NULL, zsock, NULL); |
977 | 0 | break; |
978 | 0 | case ZSERV_PROCESS_MESSAGES: |
979 | 0 | event_add_event(zrouter.master, zserv_process_messages, client, |
980 | 0 | 0, &client->t_process); |
981 | 0 | break; |
982 | 0 | case ZSERV_HANDLE_CLIENT_FAIL: |
983 | 0 | event_add_event(zrouter.master, zserv_handle_client_fail, |
984 | 0 | client, 0, &client->t_cleanup); |
985 | 0 | } |
986 | 0 | } |
987 | | |
988 | | |
989 | | /* General purpose ---------------------------------------------------------- */ |
990 | | |
991 | 0 | #define ZEBRA_TIME_BUF 32 |
992 | | static char *zserv_time_buf(time_t *time1, char *buf, int buflen) |
993 | 0 | { |
994 | 0 | time_t now; |
995 | |
|
996 | 0 | assert(buf != NULL); |
997 | 0 | assert(buflen >= ZEBRA_TIME_BUF); |
998 | 0 | assert(time1 != NULL); |
999 | |
|
1000 | 0 | if (!*time1) { |
1001 | 0 | snprintf(buf, buflen, "never "); |
1002 | 0 | return (buf); |
1003 | 0 | } |
1004 | | |
1005 | 0 | now = monotime(NULL); |
1006 | 0 | now -= *time1; |
1007 | |
|
1008 | 0 | frrtime_to_interval(now, buf, buflen); |
1009 | |
|
1010 | 0 | return buf; |
1011 | 0 | } |
1012 | | |
1013 | | /* Display client info details */ |
1014 | | static void zebra_show_client_detail(struct vty *vty, struct zserv *client) |
1015 | 0 | { |
1016 | 0 | char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF]; |
1017 | 0 | char wbuf[ZEBRA_TIME_BUF], nhbuf[ZEBRA_TIME_BUF], mbuf[ZEBRA_TIME_BUF]; |
1018 | 0 | time_t connect_time, last_read_time, last_write_time; |
1019 | 0 | uint32_t last_read_cmd, last_write_cmd; |
1020 | |
|
1021 | 0 | vty_out(vty, "Client: %s", zebra_route_string(client->proto)); |
1022 | 0 | if (client->instance) |
1023 | 0 | vty_out(vty, " Instance: %u", client->instance); |
1024 | 0 | if (client->session_id) |
1025 | 0 | vty_out(vty, " [%u]", client->session_id); |
1026 | 0 | vty_out(vty, "\n"); |
1027 | |
|
1028 | 0 | vty_out(vty, "------------------------ \n"); |
1029 | 0 | vty_out(vty, "FD: %d \n", client->sock); |
1030 | |
|
1031 | 0 | frr_with_mutex (&client->stats_mtx) { |
1032 | 0 | connect_time = client->connect_time; |
1033 | 0 | last_read_time = client->last_read_time; |
1034 | 0 | last_write_time = client->last_write_time; |
1035 | |
|
1036 | 0 | last_read_cmd = client->last_read_cmd; |
1037 | 0 | last_write_cmd = client->last_write_cmd; |
1038 | 0 | } |
1039 | |
|
1040 | 0 | vty_out(vty, "Connect Time: %s \n", |
1041 | 0 | zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF)); |
1042 | 0 | if (client->nh_reg_time) { |
1043 | 0 | vty_out(vty, "Nexthop Registry Time: %s \n", |
1044 | 0 | zserv_time_buf(&client->nh_reg_time, nhbuf, |
1045 | 0 | ZEBRA_TIME_BUF)); |
1046 | 0 | if (client->nh_last_upd_time) |
1047 | 0 | vty_out(vty, "Nexthop Last Update Time: %s \n", |
1048 | 0 | zserv_time_buf(&client->nh_last_upd_time, mbuf, |
1049 | 0 | ZEBRA_TIME_BUF)); |
1050 | 0 | else |
1051 | 0 | vty_out(vty, "No Nexthop Update sent\n"); |
1052 | 0 | } else |
1053 | 0 | vty_out(vty, "Not registered for Nexthop Updates\n"); |
1054 | |
|
1055 | 0 | vty_out(vty, |
1056 | 0 | "Client will %sbe notified about the status of its routes.\n", |
1057 | 0 | client->notify_owner ? "" : "Not "); |
1058 | |
|
1059 | 0 | vty_out(vty, "Last Msg Rx Time: %s \n", |
1060 | 0 | zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF)); |
1061 | 0 | vty_out(vty, "Last Msg Tx Time: %s \n", |
1062 | 0 | zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF)); |
1063 | 0 | if (last_read_cmd) |
1064 | 0 | vty_out(vty, "Last Rcvd Cmd: %s \n", |
1065 | 0 | zserv_command_string(last_read_cmd)); |
1066 | 0 | if (last_write_cmd) |
1067 | 0 | vty_out(vty, "Last Sent Cmd: %s \n", |
1068 | 0 | zserv_command_string(last_write_cmd)); |
1069 | 0 | vty_out(vty, "\n"); |
1070 | |
|
1071 | 0 | vty_out(vty, "Type Add Update Del \n"); |
1072 | 0 | vty_out(vty, "================================================== \n"); |
1073 | 0 | vty_out(vty, "IPv4 %-12u%-12u%-12u\n", client->v4_route_add_cnt, |
1074 | 0 | client->v4_route_upd8_cnt, client->v4_route_del_cnt); |
1075 | 0 | vty_out(vty, "IPv6 %-12u%-12u%-12u\n", client->v6_route_add_cnt, |
1076 | 0 | client->v6_route_upd8_cnt, client->v6_route_del_cnt); |
1077 | 0 | vty_out(vty, "Redist:v4 %-12u%-12u%-12u\n", client->redist_v4_add_cnt, |
1078 | 0 | 0, client->redist_v4_del_cnt); |
1079 | 0 | vty_out(vty, "Redist:v6 %-12u%-12u%-12u\n", client->redist_v6_add_cnt, |
1080 | 0 | 0, client->redist_v6_del_cnt); |
1081 | 0 | vty_out(vty, "VRF %-12u%-12u%-12u\n", client->vrfadd_cnt, 0, |
1082 | 0 | client->vrfdel_cnt); |
1083 | 0 | vty_out(vty, "Connected %-12u%-12u%-12u\n", client->ifadd_cnt, 0, |
1084 | 0 | client->ifdel_cnt); |
1085 | 0 | vty_out(vty, "Interface %-12u%-12u%-12u\n", client->ifup_cnt, 0, |
1086 | 0 | client->ifdown_cnt); |
1087 | 0 | vty_out(vty, "Intf Addr %-12u%-12u%-12u\n", |
1088 | 0 | client->connected_rt_add_cnt, 0, client->connected_rt_del_cnt); |
1089 | 0 | vty_out(vty, "BFD peer %-12u%-12u%-12u\n", client->bfd_peer_add_cnt, |
1090 | 0 | client->bfd_peer_upd8_cnt, client->bfd_peer_del_cnt); |
1091 | 0 | vty_out(vty, "NHT v4 %-12u%-12u%-12u\n", |
1092 | 0 | client->v4_nh_watch_add_cnt, 0, client->v4_nh_watch_rem_cnt); |
1093 | 0 | vty_out(vty, "NHT v6 %-12u%-12u%-12u\n", |
1094 | 0 | client->v6_nh_watch_add_cnt, 0, client->v6_nh_watch_rem_cnt); |
1095 | 0 | vty_out(vty, "VxLAN SG %-12u%-12u%-12u\n", client->vxlan_sg_add_cnt, |
1096 | 0 | 0, client->vxlan_sg_del_cnt); |
1097 | 0 | vty_out(vty, "VNI %-12u%-12u%-12u\n", client->vniadd_cnt, 0, |
1098 | 0 | client->vnidel_cnt); |
1099 | 0 | vty_out(vty, "L3-VNI %-12u%-12u%-12u\n", client->l3vniadd_cnt, 0, |
1100 | 0 | client->l3vnidel_cnt); |
1101 | 0 | vty_out(vty, "MAC-IP %-12u%-12u%-12u\n", client->macipadd_cnt, 0, |
1102 | 0 | client->macipdel_cnt); |
1103 | 0 | vty_out(vty, "ES %-12u%-12u%-12u\n", client->local_es_add_cnt, |
1104 | 0 | 0, client->local_es_del_cnt); |
1105 | 0 | vty_out(vty, "ES-EVI %-12u%-12u%-12u\n", |
1106 | 0 | client->local_es_evi_add_cnt, 0, client->local_es_evi_del_cnt); |
1107 | 0 | vty_out(vty, "Errors: %u\n", client->error_cnt); |
1108 | |
|
1109 | | #if defined DEV_BUILD |
1110 | | vty_out(vty, "Input Fifo: %zu:%zu Output Fifo: %zu:%zu\n", |
1111 | | client->ibuf_fifo->count, client->ibuf_fifo->max_count, |
1112 | | client->obuf_fifo->count, client->obuf_fifo->max_count); |
1113 | | #endif |
1114 | 0 | vty_out(vty, "\n"); |
1115 | 0 | } |
1116 | | |
1117 | | /* Display stale client information */ |
1118 | | static void zebra_show_stale_client_detail(struct vty *vty, |
1119 | | struct zserv *client) |
1120 | 0 | { |
1121 | 0 | char buf[PREFIX2STR_BUFFER]; |
1122 | 0 | time_t uptime; |
1123 | 0 | struct client_gr_info *info = NULL; |
1124 | 0 | struct zserv *s = NULL; |
1125 | 0 | bool first_p = true; |
1126 | |
|
1127 | 0 | TAILQ_FOREACH (info, &client->gr_info_queue, gr_info) { |
1128 | 0 | if (first_p) { |
1129 | 0 | vty_out(vty, "Stale Client Information\n"); |
1130 | 0 | vty_out(vty, "------------------------\n"); |
1131 | |
|
1132 | 0 | if (client->instance) |
1133 | 0 | vty_out(vty, " Instance: %u", client->instance); |
1134 | 0 | if (client->session_id) |
1135 | 0 | vty_out(vty, " [%u]", client->session_id); |
1136 | |
|
1137 | 0 | first_p = false; |
1138 | 0 | } |
1139 | |
|
1140 | 0 | vty_out(vty, "VRF : %s\n", vrf_id_to_name(info->vrf_id)); |
1141 | 0 | vty_out(vty, "Capabilities : "); |
1142 | 0 | switch (info->capabilities) { |
1143 | 0 | case ZEBRA_CLIENT_GR_CAPABILITIES: |
1144 | 0 | vty_out(vty, "Graceful Restart(%u seconds)\n", |
1145 | 0 | info->stale_removal_time); |
1146 | 0 | break; |
1147 | 0 | case ZEBRA_CLIENT_ROUTE_UPDATE_COMPLETE: |
1148 | 0 | case ZEBRA_CLIENT_ROUTE_UPDATE_PENDING: |
1149 | 0 | case ZEBRA_CLIENT_GR_DISABLE: |
1150 | 0 | case ZEBRA_CLIENT_RIB_STALE_TIME: |
1151 | 0 | vty_out(vty, "None\n"); |
1152 | 0 | break; |
1153 | 0 | } |
1154 | | |
1155 | 0 | if (ZEBRA_CLIENT_GR_ENABLED(info->capabilities)) { |
1156 | 0 | if (info->stale_client_ptr) { |
1157 | 0 | s = (struct zserv *)(info->stale_client_ptr); |
1158 | 0 | uptime = monotime(NULL); |
1159 | 0 | uptime -= s->restart_time; |
1160 | |
|
1161 | 0 | frrtime_to_interval(uptime, buf, sizeof(buf)); |
1162 | |
|
1163 | 0 | vty_out(vty, "Last restart time : %s ago\n", |
1164 | 0 | buf); |
1165 | |
|
1166 | 0 | vty_out(vty, "Stalepath removal time: %d sec\n", |
1167 | 0 | info->stale_removal_time); |
1168 | 0 | if (info->t_stale_removal) { |
1169 | 0 | vty_out(vty, |
1170 | 0 | "Stale delete timer: %ld sec\n", |
1171 | 0 | event_timer_remain_second( |
1172 | 0 | info->t_stale_removal)); |
1173 | 0 | } |
1174 | 0 | } |
1175 | 0 | } |
1176 | 0 | } |
1177 | 0 | vty_out(vty, "\n"); |
1178 | 0 | return; |
1179 | 0 | } |
1180 | | |
1181 | | static void zebra_show_client_brief(struct vty *vty, struct zserv *client) |
1182 | 0 | { |
1183 | 0 | char client_string[80]; |
1184 | 0 | char cbuf[ZEBRA_TIME_BUF], rbuf[ZEBRA_TIME_BUF]; |
1185 | 0 | char wbuf[ZEBRA_TIME_BUF]; |
1186 | 0 | time_t connect_time, last_read_time, last_write_time; |
1187 | |
|
1188 | 0 | frr_with_mutex (&client->stats_mtx) { |
1189 | 0 | connect_time = client->connect_time; |
1190 | 0 | last_read_time = client->last_read_time; |
1191 | 0 | last_write_time = client->last_write_time; |
1192 | 0 | } |
1193 | |
|
1194 | 0 | if (client->instance || client->session_id) |
1195 | 0 | snprintfrr(client_string, sizeof(client_string), "%s[%u:%u]", |
1196 | 0 | zebra_route_string(client->proto), client->instance, |
1197 | 0 | client->session_id); |
1198 | 0 | else |
1199 | 0 | snprintfrr(client_string, sizeof(client_string), "%s", |
1200 | 0 | zebra_route_string(client->proto)); |
1201 | |
|
1202 | 0 | vty_out(vty, "%-10s%12s %12s%12s %10d/%-10d %10d/%-10d\n", |
1203 | 0 | client_string, |
1204 | 0 | zserv_time_buf(&connect_time, cbuf, ZEBRA_TIME_BUF), |
1205 | 0 | zserv_time_buf(&last_read_time, rbuf, ZEBRA_TIME_BUF), |
1206 | 0 | zserv_time_buf(&last_write_time, wbuf, ZEBRA_TIME_BUF), |
1207 | 0 | client->v4_route_add_cnt + client->v4_route_upd8_cnt, |
1208 | 0 | client->v4_route_del_cnt, |
1209 | 0 | client->v6_route_add_cnt + client->v6_route_upd8_cnt, |
1210 | 0 | client->v6_route_del_cnt); |
1211 | 0 | } |
1212 | | |
1213 | | /* |
1214 | | * Common logic that searches the client list for a zapi client; this |
1215 | | * MUST be called holding the client list mutex. |
1216 | | */ |
1217 | | static struct zserv *find_client_internal(uint8_t proto, |
1218 | | unsigned short instance, |
1219 | | uint32_t session_id) |
1220 | 0 | { |
1221 | 0 | struct listnode *node, *nnode; |
1222 | 0 | struct zserv *client = NULL; |
1223 | |
|
1224 | 0 | for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { |
1225 | 0 | if (client->proto == proto && client->instance == instance && |
1226 | 0 | client->session_id == session_id) |
1227 | 0 | break; |
1228 | 0 | } |
1229 | |
|
1230 | 0 | return client; |
1231 | 0 | } |
1232 | | |
1233 | | /* |
1234 | | * Public api that searches for a client session; this version is |
1235 | | * used from the zebra main pthread. |
1236 | | */ |
1237 | | struct zserv *zserv_find_client(uint8_t proto, unsigned short instance) |
1238 | 0 | { |
1239 | 0 | struct zserv *client; |
1240 | |
|
1241 | 0 | frr_with_mutex (&client_mutex) { |
1242 | 0 | client = find_client_internal(proto, instance, 0); |
1243 | 0 | } |
1244 | |
|
1245 | 0 | return client; |
1246 | 0 | } |
1247 | | |
1248 | | /* |
1249 | | * Retrieve a client by its protocol, instance number, and session id. |
1250 | | */ |
1251 | | struct zserv *zserv_find_client_session(uint8_t proto, unsigned short instance, |
1252 | | uint32_t session_id) |
1253 | 0 | { |
1254 | 0 | struct zserv *client; |
1255 | |
|
1256 | 0 | frr_with_mutex (&client_mutex) { |
1257 | 0 | client = find_client_internal(proto, instance, session_id); |
1258 | 0 | } |
1259 | |
|
1260 | 0 | return client; |
1261 | |
|
1262 | 0 | } |
1263 | | |
1264 | | /* This command is for debugging purpose. */ |
1265 | | DEFUN (show_zebra_client, |
1266 | | show_zebra_client_cmd, |
1267 | | "show zebra client", |
1268 | | SHOW_STR |
1269 | | ZEBRA_STR |
1270 | | "Client information\n") |
1271 | 0 | { |
1272 | 0 | struct listnode *node; |
1273 | 0 | struct zserv *client; |
1274 | |
|
1275 | 0 | for (ALL_LIST_ELEMENTS_RO(zrouter.client_list, node, client)) { |
1276 | 0 | zebra_show_client_detail(vty, client); |
1277 | | /* Show GR info if present */ |
1278 | 0 | zebra_show_stale_client_detail(vty, client); |
1279 | 0 | } |
1280 | |
|
1281 | 0 | return CMD_SUCCESS; |
1282 | 0 | } |
1283 | | |
1284 | | /* This command is for debugging purpose. */ |
1285 | | DEFUN (show_zebra_client_summary, |
1286 | | show_zebra_client_summary_cmd, |
1287 | | "show zebra client summary", |
1288 | | SHOW_STR |
1289 | | ZEBRA_STR |
1290 | | "Client information brief\n" |
1291 | | "Brief Summary\n") |
1292 | 0 | { |
1293 | 0 | struct listnode *node; |
1294 | 0 | struct zserv *client; |
1295 | |
|
1296 | 0 | vty_out(vty, |
1297 | 0 | "Name Connect Time Last Read Last Write IPv4 Routes IPv6 Routes\n"); |
1298 | 0 | vty_out(vty, |
1299 | 0 | "------------------------------------------------------------------------------------------\n"); |
1300 | |
|
1301 | 0 | for (ALL_LIST_ELEMENTS_RO(zrouter.client_list, node, client)) |
1302 | 0 | zebra_show_client_brief(vty, client); |
1303 | |
|
1304 | 0 | vty_out(vty, "Routes column shows (added+updated)/deleted\n"); |
1305 | 0 | return CMD_SUCCESS; |
1306 | 0 | } |
1307 | | |
1308 | | static int zserv_client_close_cb(struct zserv *closed_client) |
1309 | 269 | { |
1310 | 269 | struct listnode *node, *nnode; |
1311 | 269 | struct zserv *client = NULL; |
1312 | | |
1313 | 269 | for (ALL_LIST_ELEMENTS(zrouter.client_list, node, nnode, client)) { |
1314 | 0 | if (client->proto == closed_client->proto) |
1315 | 0 | continue; |
1316 | | |
1317 | 0 | zsend_client_close_notify(client, closed_client); |
1318 | 0 | } |
1319 | | |
1320 | 269 | return 0; |
1321 | 269 | } |
1322 | | |
1323 | | void zserv_init(void) |
1324 | 1 | { |
1325 | | /* Client list init. */ |
1326 | 1 | zrouter.client_list = list_new(); |
1327 | 1 | zrouter.stale_client_list = list_new(); |
1328 | | |
1329 | | /* Misc init. */ |
1330 | 1 | zsock = -1; |
1331 | 1 | pthread_mutex_init(&client_mutex, NULL); |
1332 | | |
1333 | 1 | install_element(ENABLE_NODE, &show_zebra_client_cmd); |
1334 | 1 | install_element(ENABLE_NODE, &show_zebra_client_summary_cmd); |
1335 | | |
1336 | 1 | hook_register(zserv_client_close, zserv_client_close_cb); |
1337 | 1 | } |