Line | Count | Source (jump to first uncovered line) |
1 | | // SPDX-License-Identifier: ISC |
2 | | /* |
3 | | * Copyright (c) 2019-22 David Lamparter, for NetDEF, Inc. |
4 | | */ |
5 | | |
6 | | #include "zebra.h" |
7 | | |
8 | | #include "zlog_live.h" |
9 | | |
10 | | #include "memory.h" |
11 | | #include "frrcu.h" |
12 | | #include "zlog.h" |
13 | | #include "printfrr.h" |
14 | | #include "network.h" |
15 | | |
16 | | DEFINE_MTYPE_STATIC(LOG, LOG_LIVE, "log vtysh live target"); |
17 | | |
18 | | enum { |
19 | | STATE_NORMAL = 0, |
20 | | STATE_FD_DEAD, |
21 | | STATE_DISOWNED, |
22 | | }; |
23 | | |
24 | | struct zlt_live { |
25 | | struct zlog_target zt; |
26 | | |
27 | | atomic_uint_fast32_t fd; |
28 | | struct rcu_head_close head_close; |
29 | | struct rcu_head head_self; |
30 | | |
31 | | atomic_uint_fast32_t state; |
32 | | atomic_uint_fast32_t lost_msgs; |
33 | | }; |
34 | | |
35 | | static void zlog_live(struct zlog_target *zt, struct zlog_msg *msgs[], |
36 | | size_t nmsgs) |
37 | 0 | { |
38 | 0 | struct zlt_live *zte = container_of(zt, struct zlt_live, zt); |
39 | 0 | struct zlog_live_hdr hdrs[nmsgs], *hdr = hdrs; |
40 | 0 | struct mmsghdr mmhs[nmsgs], *mmh = mmhs; |
41 | 0 | struct iovec iovs[nmsgs * 3], *iov = iovs; |
42 | 0 | struct timespec ts; |
43 | 0 | size_t i, textlen; |
44 | 0 | int fd; |
45 | 0 | uint_fast32_t state; |
46 | |
|
47 | 0 | fd = atomic_load_explicit(&zte->fd, memory_order_relaxed); |
48 | |
|
49 | 0 | if (fd < 0) |
50 | 0 | return; |
51 | | |
52 | 0 | memset(mmhs, 0, sizeof(mmhs)); |
53 | 0 | memset(hdrs, 0, sizeof(hdrs)); |
54 | |
|
55 | 0 | for (i = 0; i < nmsgs; i++) { |
56 | 0 | const struct fmt_outpos *argpos; |
57 | 0 | size_t n_argpos, texthdrlen; |
58 | 0 | struct zlog_msg *msg = msgs[i]; |
59 | 0 | int prio = zlog_msg_prio(msg); |
60 | 0 | const struct xref_logmsg *xref; |
61 | 0 | intmax_t pid, tid; |
62 | |
|
63 | 0 | if (prio > zt->prio_min) |
64 | 0 | continue; |
65 | | |
66 | 0 | zlog_msg_args(msg, &texthdrlen, &n_argpos, &argpos); |
67 | |
|
68 | 0 | mmh->msg_hdr.msg_iov = iov; |
69 | |
|
70 | 0 | iov->iov_base = hdr; |
71 | 0 | iov->iov_len = sizeof(*hdr); |
72 | 0 | iov++; |
73 | |
|
74 | 0 | if (n_argpos) { |
75 | 0 | iov->iov_base = (char *)argpos; |
76 | 0 | iov->iov_len = sizeof(*argpos) * n_argpos; |
77 | 0 | iov++; |
78 | 0 | } |
79 | |
|
80 | 0 | iov->iov_base = (char *)zlog_msg_text(msg, &textlen); |
81 | 0 | iov->iov_len = textlen; |
82 | 0 | iov++; |
83 | |
|
84 | 0 | zlog_msg_tsraw(msg, &ts); |
85 | 0 | zlog_msg_pid(msg, &pid, &tid); |
86 | 0 | xref = zlog_msg_xref(msg); |
87 | |
|
88 | 0 | hdr->ts_sec = ts.tv_sec; |
89 | 0 | hdr->ts_nsec = ts.tv_nsec; |
90 | 0 | hdr->pid = pid; |
91 | 0 | hdr->tid = tid; |
92 | 0 | hdr->lost_msgs = atomic_load_explicit(&zte->lost_msgs, |
93 | 0 | memory_order_relaxed); |
94 | 0 | hdr->prio = prio; |
95 | 0 | hdr->flags = 0; |
96 | 0 | hdr->textlen = textlen; |
97 | 0 | hdr->texthdrlen = texthdrlen; |
98 | 0 | hdr->n_argpos = n_argpos; |
99 | 0 | if (xref) { |
100 | 0 | memcpy(hdr->uid, xref->xref.xrefdata->uid, |
101 | 0 | sizeof(hdr->uid)); |
102 | 0 | hdr->ec = xref->ec; |
103 | 0 | } else { |
104 | 0 | memset(hdr->uid, 0, sizeof(hdr->uid)); |
105 | 0 | hdr->ec = 0; |
106 | 0 | } |
107 | 0 | hdr->hdrlen = sizeof(*hdr) + sizeof(*argpos) * n_argpos; |
108 | |
|
109 | 0 | mmh->msg_hdr.msg_iovlen = iov - mmh->msg_hdr.msg_iov; |
110 | 0 | mmh++; |
111 | 0 | hdr++; |
112 | 0 | } |
113 | |
|
114 | 0 | size_t msgtotal = mmh - mmhs; |
115 | 0 | ssize_t sent; |
116 | |
|
117 | 0 | for (size_t msgpos = 0; msgpos < msgtotal; msgpos += sent) { |
118 | 0 | sent = sendmmsg(fd, mmhs + msgpos, msgtotal - msgpos, 0); |
119 | |
|
120 | 0 | if (sent <= 0 && (errno == EAGAIN || errno == EWOULDBLOCK)) { |
121 | 0 | atomic_fetch_add_explicit(&zte->lost_msgs, |
122 | 0 | msgtotal - msgpos, |
123 | 0 | memory_order_relaxed); |
124 | 0 | break; |
125 | 0 | } |
126 | 0 | if (sent <= 0) |
127 | 0 | goto out_err; |
128 | 0 | } |
129 | 0 | return; |
130 | | |
131 | 0 | out_err: |
132 | 0 | fd = atomic_exchange_explicit(&zte->fd, -1, memory_order_relaxed); |
133 | 0 | if (fd < 0) |
134 | 0 | return; |
135 | | |
136 | 0 | rcu_close(&zte->head_close, fd); |
137 | 0 | zlog_target_replace(zt, NULL); |
138 | |
|
139 | 0 | state = STATE_NORMAL; |
140 | 0 | atomic_compare_exchange_strong_explicit( |
141 | 0 | &zte->state, &state, STATE_FD_DEAD, memory_order_relaxed, |
142 | 0 | memory_order_relaxed); |
143 | 0 | if (state == STATE_DISOWNED) |
144 | 0 | rcu_free(MTYPE_LOG_LIVE, zte, head_self); |
145 | 0 | } |
146 | | |
147 | | static void zlog_live_sigsafe(struct zlog_target *zt, const char *text, |
148 | | size_t len) |
149 | 0 | { |
150 | 0 | struct zlt_live *zte = container_of(zt, struct zlt_live, zt); |
151 | 0 | struct zlog_live_hdr hdr[1] = {}; |
152 | 0 | struct iovec iovs[2], *iov = iovs; |
153 | 0 | struct timespec ts; |
154 | 0 | int fd; |
155 | |
|
156 | 0 | fd = atomic_load_explicit(&zte->fd, memory_order_relaxed); |
157 | 0 | if (fd < 0) |
158 | 0 | return; |
159 | | |
160 | 0 | clock_gettime(CLOCK_REALTIME, &ts); |
161 | |
|
162 | 0 | hdr->ts_sec = ts.tv_sec; |
163 | 0 | hdr->ts_nsec = ts.tv_nsec; |
164 | 0 | hdr->prio = LOG_CRIT; |
165 | 0 | hdr->textlen = len; |
166 | |
|
167 | 0 | iov->iov_base = (char *)hdr; |
168 | 0 | iov->iov_len = sizeof(hdr); |
169 | 0 | iov++; |
170 | |
|
171 | 0 | iov->iov_base = (char *)text; |
172 | 0 | iov->iov_len = len; |
173 | 0 | iov++; |
174 | |
|
175 | 0 | writev(fd, iovs, iov - iovs); |
176 | 0 | } |
177 | | |
178 | | void zlog_live_open(struct zlog_live_cfg *cfg, int prio_min, int *other_fd) |
179 | 0 | { |
180 | 0 | int sockets[2]; |
181 | |
|
182 | 0 | if (cfg->target) |
183 | 0 | zlog_live_close(cfg); |
184 | |
|
185 | 0 | *other_fd = -1; |
186 | 0 | if (prio_min == ZLOG_DISABLED) |
187 | 0 | return; |
188 | | |
189 | | /* the only reason for SEQPACKET here is getting close notifications. |
190 | | * otherwise if you open a bunch of vtysh connections with live logs |
191 | | * and close them all, the fds will stick around until we get an error |
192 | | * when trying to log something to them at some later point -- which |
193 | | * eats up fds and might be *much* later for some daemons. |
194 | | */ |
195 | 0 | if (socketpair(AF_UNIX, SOCK_SEQPACKET, 0, sockets) < 0) { |
196 | 0 | if (socketpair(AF_UNIX, SOCK_DGRAM, 0, sockets) < 0) { |
197 | 0 | zlog_warn("%% could not open socket pair: %m"); |
198 | 0 | return; |
199 | 0 | } |
200 | 0 | } else |
201 | | /* SEQPACKET only: try to zap read direction */ |
202 | 0 | shutdown(sockets[0], SHUT_RD); |
203 | | |
204 | 0 | *other_fd = sockets[1]; |
205 | 0 | zlog_live_open_fd(cfg, prio_min, sockets[0]); |
206 | 0 | } |
207 | | |
208 | | void zlog_live_open_fd(struct zlog_live_cfg *cfg, int prio_min, int fd) |
209 | 0 | { |
210 | 0 | struct zlt_live *zte; |
211 | 0 | struct zlog_target *zt; |
212 | |
|
213 | 0 | if (cfg->target) |
214 | 0 | zlog_live_close(cfg); |
215 | |
|
216 | 0 | zt = zlog_target_clone(MTYPE_LOG_LIVE, NULL, sizeof(*zte)); |
217 | 0 | zte = container_of(zt, struct zlt_live, zt); |
218 | 0 | cfg->target = zte; |
219 | |
|
220 | 0 | set_nonblocking(fd); |
221 | 0 | zte->fd = fd; |
222 | 0 | zte->zt.prio_min = prio_min; |
223 | 0 | zte->zt.logfn = zlog_live; |
224 | 0 | zte->zt.logfn_sigsafe = zlog_live_sigsafe; |
225 | |
|
226 | 0 | zlog_target_replace(NULL, zt); |
227 | 0 | } |
228 | | |
229 | | void zlog_live_close(struct zlog_live_cfg *cfg) |
230 | 0 | { |
231 | 0 | struct zlt_live *zte; |
232 | 0 | int fd; |
233 | |
|
234 | 0 | if (!cfg->target) |
235 | 0 | return; |
236 | | |
237 | 0 | zte = cfg->target; |
238 | 0 | cfg->target = NULL; |
239 | |
|
240 | 0 | fd = atomic_exchange_explicit(&zte->fd, -1, memory_order_relaxed); |
241 | |
|
242 | 0 | if (fd >= 0) { |
243 | 0 | rcu_close(&zte->head_close, fd); |
244 | 0 | zlog_target_replace(&zte->zt, NULL); |
245 | 0 | } |
246 | 0 | rcu_free(MTYPE_LOG_LIVE, zte, head_self); |
247 | 0 | } |
248 | | |
249 | | void zlog_live_disown(struct zlog_live_cfg *cfg) |
250 | 0 | { |
251 | 0 | struct zlt_live *zte; |
252 | 0 | uint_fast32_t state; |
253 | |
|
254 | 0 | if (!cfg->target) |
255 | 0 | return; |
256 | | |
257 | 0 | zte = cfg->target; |
258 | 0 | cfg->target = NULL; |
259 | |
|
260 | 0 | state = STATE_NORMAL; |
261 | 0 | atomic_compare_exchange_strong_explicit( |
262 | 0 | &zte->state, &state, STATE_DISOWNED, memory_order_relaxed, |
263 | 0 | memory_order_relaxed); |
264 | 0 | if (state == STATE_FD_DEAD) |
265 | 0 | rcu_free(MTYPE_LOG_LIVE, zte, head_self); |
266 | 0 | } |