Line | Count | Source (jump to first uncovered line) |
1 | | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | | /* |
3 | | * This is an implementation of PIM MLAG Functionality |
4 | | * |
5 | | * Module name: PIM MLAG |
6 | | * |
7 | | * Author: sathesh Kumar karra <sathk@cumulusnetworks.com> |
8 | | * |
9 | | * Copyright (C) 2019 Cumulus Networks http://www.cumulusnetworks.com |
10 | | */ |
11 | | #include <zebra.h> |
12 | | |
13 | | #include "pimd.h" |
14 | | #include "pim_mlag.h" |
15 | | #include "pim_upstream.h" |
16 | | #include "pim_vxlan.h" |
17 | | |
18 | | extern struct zclient *zclient; |
19 | | |
20 | 0 | #define PIM_MLAG_METADATA_LEN 4 |
21 | | |
22 | | /*********************ACtual Data processing *****************************/ |
23 | | /* TBD: There can be duplicate updates to FIB***/ |
24 | | #define PIM_MLAG_ADD_OIF_TO_OIL(ch, ch_oil) \ |
25 | 0 | do { \ |
26 | 0 | if (PIM_DEBUG_MLAG) \ |
27 | 0 | zlog_debug( \ |
28 | 0 | "%s: add Dual-active Interface to %s " \ |
29 | 0 | "to oil:%s", \ |
30 | 0 | __func__, ch->interface->name, ch->sg_str); \ |
31 | 0 | pim_channel_update_oif_mute(ch_oil, ch->interface->info); \ |
32 | 0 | } while (0) |
33 | | |
34 | | #define PIM_MLAG_DEL_OIF_TO_OIL(ch, ch_oil) \ |
35 | 0 | do { \ |
36 | 0 | if (PIM_DEBUG_MLAG) \ |
37 | 0 | zlog_debug( \ |
38 | 0 | "%s: del Dual-active Interface to %s " \ |
39 | 0 | "to oil:%s", \ |
40 | 0 | __func__, ch->interface->name, ch->sg_str); \ |
41 | 0 | pim_channel_update_oif_mute(ch_oil, ch->interface->info); \ |
42 | 0 | } while (0) |
43 | | |
44 | | |
45 | | static void pim_mlag_calculate_df_for_ifchannels(struct pim_upstream *up, |
46 | | bool is_df) |
47 | 0 | { |
48 | 0 | struct listnode *chnode; |
49 | 0 | struct listnode *chnextnode; |
50 | 0 | struct pim_ifchannel *ch; |
51 | 0 | struct pim_interface *pim_ifp = NULL; |
52 | 0 | struct channel_oil *ch_oil = NULL; |
53 | |
|
54 | 0 | ch_oil = (up) ? up->channel_oil : NULL; |
55 | |
|
56 | 0 | if (!ch_oil) |
57 | 0 | return; |
58 | | |
59 | 0 | if (PIM_DEBUG_MLAG) |
60 | 0 | zlog_debug("%s: Calculating DF for Dual active if-channel%s", |
61 | 0 | __func__, up->sg_str); |
62 | |
|
63 | 0 | for (ALL_LIST_ELEMENTS(up->ifchannels, chnode, chnextnode, ch)) { |
64 | 0 | pim_ifp = (ch->interface) ? ch->interface->info : NULL; |
65 | 0 | if (!pim_ifp || !PIM_I_am_DualActive(pim_ifp)) |
66 | 0 | continue; |
67 | | |
68 | 0 | if (is_df) |
69 | 0 | PIM_MLAG_ADD_OIF_TO_OIL(ch, ch_oil); |
70 | 0 | else |
71 | 0 | PIM_MLAG_DEL_OIF_TO_OIL(ch, ch_oil); |
72 | 0 | } |
73 | 0 | } |
74 | | |
75 | | static void pim_mlag_inherit_mlag_flags(struct pim_upstream *up, bool is_df) |
76 | 0 | { |
77 | 0 | struct listnode *listnode; |
78 | 0 | struct pim_upstream *child; |
79 | 0 | struct listnode *chnode; |
80 | 0 | struct listnode *chnextnode; |
81 | 0 | struct pim_ifchannel *ch; |
82 | 0 | struct pim_interface *pim_ifp = NULL; |
83 | 0 | struct channel_oil *ch_oil = NULL; |
84 | |
|
85 | 0 | if (PIM_DEBUG_MLAG) |
86 | 0 | zlog_debug("%s: Updating DF for uptream:%s children", __func__, |
87 | 0 | up->sg_str); |
88 | | |
89 | |
|
90 | 0 | for (ALL_LIST_ELEMENTS(up->ifchannels, chnode, chnextnode, ch)) { |
91 | 0 | pim_ifp = (ch->interface) ? ch->interface->info : NULL; |
92 | 0 | if (!pim_ifp || !PIM_I_am_DualActive(pim_ifp)) |
93 | 0 | continue; |
94 | | |
95 | 0 | for (ALL_LIST_ELEMENTS_RO(up->sources, listnode, child)) { |
96 | 0 | if (PIM_DEBUG_MLAG) |
97 | 0 | zlog_debug("%s: Updating DF for child:%s", |
98 | 0 | __func__, child->sg_str); |
99 | 0 | ch_oil = (child) ? child->channel_oil : NULL; |
100 | |
|
101 | 0 | if (!ch_oil) |
102 | 0 | continue; |
103 | | |
104 | 0 | if (is_df) |
105 | 0 | PIM_MLAG_ADD_OIF_TO_OIL(ch, ch_oil); |
106 | 0 | else |
107 | 0 | PIM_MLAG_DEL_OIF_TO_OIL(ch, ch_oil); |
108 | 0 | } |
109 | 0 | } |
110 | 0 | } |
111 | | |
112 | | /******************************* pim upstream sync **************************/ |
113 | | /* Update DF role for the upstream entry and return true on role change */ |
114 | | bool pim_mlag_up_df_role_update(struct pim_instance *pim, |
115 | | struct pim_upstream *up, bool is_df, const char *reason) |
116 | 0 | { |
117 | 0 | struct channel_oil *c_oil = up->channel_oil; |
118 | 0 | bool old_is_df = !PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(up->flags); |
119 | 0 | struct pim_interface *vxlan_ifp; |
120 | |
|
121 | 0 | if (is_df == old_is_df) { |
122 | 0 | if (PIM_DEBUG_MLAG) |
123 | 0 | zlog_debug( |
124 | 0 | "%s: Ignoring Role update for %s, since no change", |
125 | 0 | __func__, up->sg_str); |
126 | 0 | return false; |
127 | 0 | } |
128 | | |
129 | 0 | if (PIM_DEBUG_MLAG) |
130 | 0 | zlog_debug("local MLAG mroute %s role changed to %s based on %s", |
131 | 0 | up->sg_str, is_df ? "df" : "non-df", reason); |
132 | |
|
133 | 0 | if (is_df) |
134 | 0 | PIM_UPSTREAM_FLAG_UNSET_MLAG_NON_DF(up->flags); |
135 | 0 | else |
136 | 0 | PIM_UPSTREAM_FLAG_SET_MLAG_NON_DF(up->flags); |
137 | | |
138 | | |
139 | | /* |
140 | | * This Upstream entry synced to peer Because of Dual-active |
141 | | * Interface configuration |
142 | | */ |
143 | 0 | if (PIM_UPSTREAM_FLAG_TEST_MLAG_INTERFACE(up->flags)) { |
144 | 0 | pim_mlag_inherit_mlag_flags(up, is_df); |
145 | 0 | pim_mlag_calculate_df_for_ifchannels(up, is_df); |
146 | 0 | } |
147 | | |
148 | | /* If the DF role has changed check if ipmr-lo needs to be |
149 | | * muted/un-muted. Active-Active devices and vxlan termination |
150 | | * devices (ipmr-lo) are suppressed on the non-DF. |
151 | | * This may leave the mroute with the empty OIL in which case the |
152 | | * the forwarding entry's sole purpose is to just blackhole the flow |
153 | | * headed to the switch. |
154 | | */ |
155 | 0 | if (c_oil) { |
156 | 0 | vxlan_ifp = pim_vxlan_get_term_ifp(pim); |
157 | 0 | if (vxlan_ifp) |
158 | 0 | pim_channel_update_oif_mute(c_oil, vxlan_ifp); |
159 | 0 | } |
160 | | |
161 | | /* If DF role changed on a (*,G) termination mroute update the |
162 | | * associated DF role on the inherited (S,G) entries |
163 | | */ |
164 | 0 | if (pim_addr_is_any(up->sg.src) && |
165 | 0 | PIM_UPSTREAM_FLAG_TEST_MLAG_VXLAN(up->flags)) |
166 | 0 | pim_vxlan_inherit_mlag_flags(pim, up, true /* inherit */); |
167 | |
|
168 | 0 | return true; |
169 | 0 | } |
170 | | |
171 | | /* Run per-upstream entry DF election and return true on role change */ |
172 | | static bool pim_mlag_up_df_role_elect(struct pim_instance *pim, |
173 | | struct pim_upstream *up) |
174 | 0 | { |
175 | 0 | bool is_df; |
176 | 0 | uint32_t peer_cost; |
177 | 0 | uint32_t local_cost; |
178 | 0 | bool rv; |
179 | |
|
180 | 0 | if (!pim_up_mlag_is_local(up)) |
181 | 0 | return false; |
182 | | |
183 | | /* We are yet to rx a status update from the local MLAG daemon so |
184 | | * we will assume DF status. |
185 | | */ |
186 | 0 | if (!(router->mlag_flags & PIM_MLAGF_STATUS_RXED)) |
187 | 0 | return pim_mlag_up_df_role_update(pim, up, |
188 | 0 | true /*is_df*/, "mlagd-down"); |
189 | | |
190 | | /* If not connected to peer assume DF role on the MLAG primary |
191 | | * switch (and non-DF on the secondary switch. |
192 | | */ |
193 | 0 | if (!(router->mlag_flags & PIM_MLAGF_PEER_CONN_UP)) { |
194 | 0 | is_df = (router->mlag_role == MLAG_ROLE_PRIMARY) ? true : false; |
195 | 0 | return pim_mlag_up_df_role_update(pim, up, |
196 | 0 | is_df, "peer-down"); |
197 | 0 | } |
198 | | |
199 | | /* If MLAG peer session is up but zebra is down on the peer |
200 | | * assume DF role. |
201 | | */ |
202 | 0 | if (!(router->mlag_flags & PIM_MLAGF_PEER_ZEBRA_UP)) |
203 | 0 | return pim_mlag_up_df_role_update(pim, up, |
204 | 0 | true /*is_df*/, "zebra-down"); |
205 | | |
206 | | /* If we are connected to peer switch but don't have a mroute |
207 | | * from it we have to assume non-DF role to avoid duplicates. |
208 | | * Note: When the peer connection comes up we wait for initial |
209 | | * replay to complete before moving "strays" i.e. local-mlag-mroutes |
210 | | * without a peer reference to non-df role. |
211 | | */ |
212 | 0 | if (!PIM_UPSTREAM_FLAG_TEST_MLAG_PEER(up->flags)) |
213 | 0 | return pim_mlag_up_df_role_update(pim, up, |
214 | 0 | false /*is_df*/, "no-peer-mroute"); |
215 | | |
216 | | /* switch with the lowest RPF cost wins. if both switches have the same |
217 | | * cost MLAG role is used as a tie breaker (MLAG primary wins). |
218 | | */ |
219 | 0 | peer_cost = up->mlag.peer_mrib_metric; |
220 | 0 | local_cost = pim_up_mlag_local_cost(up); |
221 | 0 | if (local_cost == peer_cost) { |
222 | 0 | is_df = (router->mlag_role == MLAG_ROLE_PRIMARY) ? true : false; |
223 | 0 | rv = pim_mlag_up_df_role_update(pim, up, is_df, "equal-cost"); |
224 | 0 | } else { |
225 | 0 | is_df = (local_cost < peer_cost) ? true : false; |
226 | 0 | rv = pim_mlag_up_df_role_update(pim, up, is_df, "cost"); |
227 | 0 | } |
228 | |
|
229 | 0 | return rv; |
230 | 0 | } |
231 | | |
232 | | /* Handle upstream entry add from the peer MLAG switch - |
233 | | * - if a local entry doesn't exist one is created with reference |
234 | | * _MLAG_PEER |
235 | | * - if a local entry exists and has a MLAG OIF DF election is run. |
236 | | * the non-DF switch stop forwarding traffic to MLAG devices. |
237 | | */ |
238 | | static void pim_mlag_up_peer_add(struct mlag_mroute_add *msg) |
239 | 0 | { |
240 | 0 | struct pim_upstream *up; |
241 | 0 | struct pim_instance *pim; |
242 | 0 | int flags = 0; |
243 | 0 | pim_sgaddr sg; |
244 | 0 | struct vrf *vrf; |
245 | |
|
246 | 0 | memset(&sg, 0, sizeof(sg)); |
247 | 0 | sg.src.s_addr = htonl(msg->source_ip); |
248 | 0 | sg.grp.s_addr = htonl(msg->group_ip); |
249 | |
|
250 | 0 | if (PIM_DEBUG_MLAG) |
251 | 0 | zlog_debug("peer MLAG mroute add %s:%pSG cost %d", |
252 | 0 | msg->vrf_name, &sg, msg->cost_to_rp); |
253 | | |
254 | | /* XXX - this is not correct. we MUST cache updates to avoid losing |
255 | | * an entry because of race conditions with the peer switch. |
256 | | */ |
257 | 0 | vrf = vrf_lookup_by_name(msg->vrf_name); |
258 | 0 | if (!vrf) { |
259 | 0 | if (PIM_DEBUG_MLAG) |
260 | 0 | zlog_debug( |
261 | 0 | "peer MLAG mroute add failed %s:%pSG; no vrf", |
262 | 0 | msg->vrf_name, &sg); |
263 | 0 | return; |
264 | 0 | } |
265 | 0 | pim = vrf->info; |
266 | |
|
267 | 0 | up = pim_upstream_find(pim, &sg); |
268 | 0 | if (up) { |
269 | | /* upstream already exists; create peer reference if it |
270 | | * doesn't already exist. |
271 | | */ |
272 | 0 | if (!PIM_UPSTREAM_FLAG_TEST_MLAG_PEER(up->flags)) |
273 | 0 | pim_upstream_ref(up, PIM_UPSTREAM_FLAG_MASK_MLAG_PEER, |
274 | 0 | __func__); |
275 | 0 | } else { |
276 | 0 | PIM_UPSTREAM_FLAG_SET_MLAG_PEER(flags); |
277 | 0 | up = pim_upstream_add(pim, &sg, NULL /*iif*/, flags, __func__, |
278 | 0 | NULL /*if_ch*/); |
279 | |
|
280 | 0 | if (!up) { |
281 | 0 | if (PIM_DEBUG_MLAG) |
282 | 0 | zlog_debug( |
283 | 0 | "peer MLAG mroute add failed %s:%pSG", |
284 | 0 | vrf->name, &sg); |
285 | 0 | return; |
286 | 0 | } |
287 | 0 | } |
288 | 0 | up->mlag.peer_mrib_metric = msg->cost_to_rp; |
289 | 0 | pim_mlag_up_df_role_elect(pim, up); |
290 | 0 | } |
291 | | |
292 | | /* Handle upstream entry del from the peer MLAG switch - |
293 | | * - peer reference is removed. this can result in the upstream |
294 | | * being deleted altogether. |
295 | | * - if a local entry continues to exisy and has a MLAG OIF DF election |
296 | | * is re-run (at the end of which the local entry will be the DF). |
297 | | */ |
298 | | static struct pim_upstream *pim_mlag_up_peer_deref(struct pim_instance *pim, |
299 | | struct pim_upstream *up) |
300 | 0 | { |
301 | 0 | if (!PIM_UPSTREAM_FLAG_TEST_MLAG_PEER(up->flags)) |
302 | 0 | return up; |
303 | | |
304 | 0 | PIM_UPSTREAM_FLAG_UNSET_MLAG_PEER(up->flags); |
305 | 0 | up = pim_upstream_del(pim, up, __func__); |
306 | 0 | if (up) |
307 | 0 | pim_mlag_up_df_role_elect(pim, up); |
308 | |
|
309 | 0 | return up; |
310 | 0 | } |
311 | | |
312 | | static void pim_mlag_up_peer_del(struct mlag_mroute_del *msg) |
313 | 0 | { |
314 | 0 | struct pim_upstream *up; |
315 | 0 | struct pim_instance *pim; |
316 | 0 | pim_sgaddr sg; |
317 | 0 | struct vrf *vrf; |
318 | |
|
319 | 0 | memset(&sg, 0, sizeof(sg)); |
320 | 0 | sg.src.s_addr = htonl(msg->source_ip); |
321 | 0 | sg.grp.s_addr = htonl(msg->group_ip); |
322 | |
|
323 | 0 | if (PIM_DEBUG_MLAG) |
324 | 0 | zlog_debug("peer MLAG mroute del %s:%pSG", msg->vrf_name, &sg); |
325 | |
|
326 | 0 | vrf = vrf_lookup_by_name(msg->vrf_name); |
327 | 0 | if (!vrf) { |
328 | 0 | if (PIM_DEBUG_MLAG) |
329 | 0 | zlog_debug( |
330 | 0 | "peer MLAG mroute del skipped %s:%pSG; no vrf", |
331 | 0 | msg->vrf_name, &sg); |
332 | 0 | return; |
333 | 0 | } |
334 | 0 | pim = vrf->info; |
335 | |
|
336 | 0 | up = pim_upstream_find(pim, &sg); |
337 | 0 | if (!up) { |
338 | 0 | if (PIM_DEBUG_MLAG) |
339 | 0 | zlog_debug( |
340 | 0 | "peer MLAG mroute del skipped %s:%pSG; no up", |
341 | 0 | vrf->name, &sg); |
342 | 0 | return; |
343 | 0 | } |
344 | | |
345 | 0 | (void)pim_mlag_up_peer_deref(pim, up); |
346 | 0 | } |
347 | | |
348 | | /* When we lose connection to the local MLAG daemon we can drop all peer |
349 | | * references. |
350 | | */ |
351 | | static void pim_mlag_up_peer_del_all(void) |
352 | 0 | { |
353 | 0 | struct list *temp = list_new(); |
354 | 0 | struct pim_upstream *up; |
355 | 0 | struct vrf *vrf; |
356 | 0 | struct pim_instance *pim; |
357 | | |
358 | | /* |
359 | | * So why these gyrations? |
360 | | * pim->upstream_head has the list of *,G and S,G |
361 | | * that are in the system. The problem of course |
362 | | * is that it is an ordered list: |
363 | | * (*,G1) -> (S1,G1) -> (S2,G2) -> (S3, G2) -> (*,G2) -> (S1,G2) |
364 | | * And the *,G1 has pointers to S1,G1 and S2,G1 |
365 | | * if we delete *,G1 then we have a situation where |
366 | | * S1,G1 and S2,G2 can be deleted as well. Then a |
367 | | * simple ALL_LIST_ELEMENTS will have the next listnode |
368 | | * pointer become invalid and we crash. |
369 | | * So let's grab the list of MLAG_PEER upstreams |
370 | | * add a refcount put on another list and delete safely |
371 | | */ |
372 | 0 | RB_FOREACH(vrf, vrf_name_head, &vrfs_by_name) { |
373 | 0 | pim = vrf->info; |
374 | 0 | frr_each (rb_pim_upstream, &pim->upstream_head, up) { |
375 | 0 | if (!PIM_UPSTREAM_FLAG_TEST_MLAG_PEER(up->flags)) |
376 | 0 | continue; |
377 | 0 | listnode_add(temp, up); |
378 | | /* |
379 | | * Add a reference since we are adding to this |
380 | | * list for deletion |
381 | | */ |
382 | 0 | up->ref_count++; |
383 | 0 | } |
384 | |
|
385 | 0 | while (temp->count) { |
386 | 0 | up = listnode_head(temp); |
387 | 0 | listnode_delete(temp, up); |
388 | |
|
389 | 0 | up = pim_mlag_up_peer_deref(pim, up); |
390 | | /* |
391 | | * This is the deletion of the reference added |
392 | | * above |
393 | | */ |
394 | 0 | if (up) |
395 | 0 | pim_upstream_del(pim, up, __func__); |
396 | 0 | } |
397 | 0 | } |
398 | |
|
399 | 0 | list_delete(&temp); |
400 | 0 | } |
401 | | |
402 | | /* Send upstream entry to the local MLAG daemon (which will subsequently |
403 | | * send it to the peer MLAG switch). |
404 | | */ |
405 | | static void pim_mlag_up_local_add_send(struct pim_instance *pim, |
406 | | struct pim_upstream *up) |
407 | 0 | { |
408 | 0 | struct stream *s = NULL; |
409 | 0 | struct vrf *vrf = pim->vrf; |
410 | |
|
411 | 0 | if (!(router->mlag_flags & PIM_MLAGF_LOCAL_CONN_UP)) |
412 | 0 | return; |
413 | | |
414 | 0 | s = stream_new(sizeof(struct mlag_mroute_add) + PIM_MLAG_METADATA_LEN); |
415 | 0 | if (!s) |
416 | 0 | return; |
417 | | |
418 | 0 | if (PIM_DEBUG_MLAG) |
419 | 0 | zlog_debug("local MLAG mroute add %s:%s", |
420 | 0 | vrf->name, up->sg_str); |
421 | |
|
422 | 0 | ++router->mlag_stats.msg.mroute_add_tx; |
423 | |
|
424 | 0 | stream_putl(s, MLAG_MROUTE_ADD); |
425 | 0 | stream_put(s, vrf->name, VRF_NAMSIZ); |
426 | 0 | stream_putl(s, ntohl(up->sg.src.s_addr)); |
427 | 0 | stream_putl(s, ntohl(up->sg.grp.s_addr)); |
428 | |
|
429 | 0 | stream_putl(s, pim_up_mlag_local_cost(up)); |
430 | | /* XXX - who is addding*/ |
431 | 0 | stream_putl(s, MLAG_OWNER_VXLAN); |
432 | | /* XXX - am_i_DR field should be removed */ |
433 | 0 | stream_putc(s, false); |
434 | 0 | stream_putc(s, !(PIM_UPSTREAM_FLAG_TEST_MLAG_NON_DF(up->flags))); |
435 | 0 | stream_putl(s, vrf->vrf_id); |
436 | | /* XXX - this field is a No-op for VXLAN*/ |
437 | 0 | stream_put(s, NULL, INTERFACE_NAMSIZ); |
438 | |
|
439 | 0 | stream_fifo_push_safe(router->mlag_fifo, s); |
440 | 0 | pim_mlag_signal_zpthread(); |
441 | 0 | } |
442 | | |
443 | | static void pim_mlag_up_local_del_send(struct pim_instance *pim, |
444 | | struct pim_upstream *up) |
445 | 0 | { |
446 | 0 | struct stream *s = NULL; |
447 | 0 | struct vrf *vrf = pim->vrf; |
448 | |
|
449 | 0 | if (!(router->mlag_flags & PIM_MLAGF_LOCAL_CONN_UP)) |
450 | 0 | return; |
451 | | |
452 | 0 | s = stream_new(sizeof(struct mlag_mroute_del) + PIM_MLAG_METADATA_LEN); |
453 | 0 | if (!s) |
454 | 0 | return; |
455 | | |
456 | 0 | if (PIM_DEBUG_MLAG) |
457 | 0 | zlog_debug("local MLAG mroute del %s:%s", |
458 | 0 | vrf->name, up->sg_str); |
459 | |
|
460 | 0 | ++router->mlag_stats.msg.mroute_del_tx; |
461 | |
|
462 | 0 | stream_putl(s, MLAG_MROUTE_DEL); |
463 | 0 | stream_put(s, vrf->name, VRF_NAMSIZ); |
464 | 0 | stream_putl(s, ntohl(up->sg.src.s_addr)); |
465 | 0 | stream_putl(s, ntohl(up->sg.grp.s_addr)); |
466 | | /* XXX - who is adding */ |
467 | 0 | stream_putl(s, MLAG_OWNER_VXLAN); |
468 | 0 | stream_putl(s, vrf->vrf_id); |
469 | | /* XXX - this field is a No-op for VXLAN */ |
470 | 0 | stream_put(s, NULL, INTERFACE_NAMSIZ); |
471 | | |
472 | | /* XXX - is this the the most optimal way to do things */ |
473 | 0 | stream_fifo_push_safe(router->mlag_fifo, s); |
474 | 0 | pim_mlag_signal_zpthread(); |
475 | 0 | } |
476 | | |
477 | | |
478 | | /* Called when a local upstream entry is created or if it's cost changes */ |
479 | | void pim_mlag_up_local_add(struct pim_instance *pim, |
480 | | struct pim_upstream *up) |
481 | 0 | { |
482 | 0 | pim_mlag_up_df_role_elect(pim, up); |
483 | | /* XXX - need to add some dup checks here */ |
484 | 0 | pim_mlag_up_local_add_send(pim, up); |
485 | 0 | } |
486 | | |
487 | | /* Called when local MLAG reference is removed from an upstream entry */ |
488 | | void pim_mlag_up_local_del(struct pim_instance *pim, |
489 | | struct pim_upstream *up) |
490 | 0 | { |
491 | 0 | pim_mlag_up_df_role_elect(pim, up); |
492 | 0 | pim_mlag_up_local_del_send(pim, up); |
493 | 0 | } |
494 | | |
495 | | /* When connection to local MLAG daemon is established all the local |
496 | | * MLAG upstream entries are replayed to it. |
497 | | */ |
498 | | static void pim_mlag_up_local_replay(void) |
499 | 0 | { |
500 | 0 | struct pim_upstream *up; |
501 | 0 | struct vrf *vrf; |
502 | 0 | struct pim_instance *pim; |
503 | |
|
504 | 0 | RB_FOREACH(vrf, vrf_name_head, &vrfs_by_name) { |
505 | 0 | pim = vrf->info; |
506 | 0 | frr_each (rb_pim_upstream, &pim->upstream_head, up) { |
507 | 0 | if (pim_up_mlag_is_local(up)) |
508 | 0 | pim_mlag_up_local_add_send(pim, up); |
509 | 0 | } |
510 | 0 | } |
511 | 0 | } |
512 | | |
513 | | /* on local/peer mlag connection and role changes the DF status needs |
514 | | * to be re-evaluated |
515 | | */ |
516 | | static void pim_mlag_up_local_reeval(bool mlagd_send, const char *reason_code) |
517 | 0 | { |
518 | 0 | struct pim_upstream *up; |
519 | 0 | struct vrf *vrf; |
520 | 0 | struct pim_instance *pim; |
521 | |
|
522 | 0 | if (PIM_DEBUG_MLAG) |
523 | 0 | zlog_debug("%s re-run DF election because of %s", |
524 | 0 | __func__, reason_code); |
525 | 0 | RB_FOREACH(vrf, vrf_name_head, &vrfs_by_name) { |
526 | 0 | pim = vrf->info; |
527 | 0 | frr_each (rb_pim_upstream, &pim->upstream_head, up) { |
528 | 0 | if (!pim_up_mlag_is_local(up)) |
529 | 0 | continue; |
530 | | /* if role changes re-send to peer */ |
531 | 0 | if (pim_mlag_up_df_role_elect(pim, up) && |
532 | 0 | mlagd_send) |
533 | 0 | pim_mlag_up_local_add_send(pim, up); |
534 | 0 | } |
535 | 0 | } |
536 | 0 | } |
537 | | |
538 | | /*****************PIM Actions for MLAG state changes**********************/ |
539 | | |
540 | | /* notify the anycast VTEP component about state changes */ |
541 | | static inline void pim_mlag_vxlan_state_update(void) |
542 | 0 | { |
543 | 0 | bool enable = !!(router->mlag_flags & PIM_MLAGF_STATUS_RXED); |
544 | 0 | bool peer_state = !!(router->mlag_flags & PIM_MLAGF_PEER_CONN_UP); |
545 | |
|
546 | 0 | pim_vxlan_mlag_update(enable, peer_state, router->mlag_role, |
547 | 0 | router->peerlink_rif_p, &router->local_vtep_ip); |
548 | |
|
549 | 0 | } |
550 | | |
551 | | /**************End of PIM Actions for MLAG State changes******************/ |
552 | | |
553 | | |
554 | | /********************API to process PIM MLAG Data ************************/ |
555 | | |
556 | | static void pim_mlag_process_mlagd_state_change(struct mlag_status msg) |
557 | 0 | { |
558 | 0 | bool role_chg = false; |
559 | 0 | bool state_chg = false; |
560 | 0 | bool notify_vxlan = false; |
561 | 0 | struct interface *peerlink_rif_p; |
562 | 0 | char buf[MLAG_ROLE_STRSIZE]; |
563 | |
|
564 | 0 | if (PIM_DEBUG_MLAG) |
565 | 0 | zlog_debug("%s: msg dump: my_role: %s, peer_state: %s", |
566 | 0 | __func__, |
567 | 0 | mlag_role2str(msg.my_role, buf, sizeof(buf)), |
568 | 0 | (msg.peer_state == MLAG_STATE_RUNNING ? "RUNNING" |
569 | 0 | : "DOWN")); |
570 | |
|
571 | 0 | if (!(router->mlag_flags & PIM_MLAGF_LOCAL_CONN_UP)) { |
572 | 0 | if (PIM_DEBUG_MLAG) |
573 | 0 | zlog_debug("%s: msg ignored mlagd process state down", |
574 | 0 | __func__); |
575 | 0 | return; |
576 | 0 | } |
577 | 0 | ++router->mlag_stats.msg.mlag_status_updates; |
578 | | |
579 | | /* evaluate the changes first */ |
580 | 0 | if (router->mlag_role != msg.my_role) { |
581 | 0 | role_chg = true; |
582 | 0 | notify_vxlan = true; |
583 | 0 | router->mlag_role = msg.my_role; |
584 | 0 | } |
585 | |
|
586 | 0 | strlcpy(router->peerlink_rif, msg.peerlink_rif, |
587 | 0 | sizeof(router->peerlink_rif)); |
588 | | |
589 | | /* XXX - handle the case where we may rx the interface name from the |
590 | | * MLAG daemon before we get the interface from zebra. |
591 | | */ |
592 | 0 | peerlink_rif_p = if_lookup_by_name(router->peerlink_rif, VRF_DEFAULT); |
593 | 0 | if (router->peerlink_rif_p != peerlink_rif_p) { |
594 | 0 | router->peerlink_rif_p = peerlink_rif_p; |
595 | 0 | notify_vxlan = true; |
596 | 0 | } |
597 | |
|
598 | 0 | if (msg.peer_state == MLAG_STATE_RUNNING) { |
599 | 0 | if (!(router->mlag_flags & PIM_MLAGF_PEER_CONN_UP)) { |
600 | 0 | state_chg = true; |
601 | 0 | notify_vxlan = true; |
602 | 0 | router->mlag_flags |= PIM_MLAGF_PEER_CONN_UP; |
603 | 0 | } |
604 | 0 | router->connected_to_mlag = true; |
605 | 0 | } else { |
606 | 0 | if (router->mlag_flags & PIM_MLAGF_PEER_CONN_UP) { |
607 | 0 | ++router->mlag_stats.peer_session_downs; |
608 | 0 | state_chg = true; |
609 | 0 | notify_vxlan = true; |
610 | 0 | router->mlag_flags &= ~PIM_MLAGF_PEER_CONN_UP; |
611 | 0 | } |
612 | 0 | router->connected_to_mlag = false; |
613 | 0 | } |
614 | | |
615 | | /* apply the changes */ |
616 | | /* when connection to mlagd comes up we hold send mroutes till we have |
617 | | * rxed the status and had a chance to re-valuate DF state |
618 | | */ |
619 | 0 | if (!(router->mlag_flags & PIM_MLAGF_STATUS_RXED)) { |
620 | 0 | router->mlag_flags |= PIM_MLAGF_STATUS_RXED; |
621 | 0 | pim_mlag_vxlan_state_update(); |
622 | | /* on session up re-eval DF status */ |
623 | 0 | pim_mlag_up_local_reeval(false /*mlagd_send*/, "mlagd_up"); |
624 | | /* replay all the upstream entries to the local MLAG daemon */ |
625 | 0 | pim_mlag_up_local_replay(); |
626 | 0 | return; |
627 | 0 | } |
628 | | |
629 | 0 | if (notify_vxlan) |
630 | 0 | pim_mlag_vxlan_state_update(); |
631 | |
|
632 | 0 | if (state_chg) { |
633 | 0 | if (!(router->mlag_flags & PIM_MLAGF_PEER_CONN_UP)) |
634 | | /* when a connection goes down the primary takes over |
635 | | * DF role for all entries |
636 | | */ |
637 | 0 | pim_mlag_up_local_reeval(true /*mlagd_send*/, |
638 | 0 | "peer_down"); |
639 | 0 | else |
640 | | /* XXX - when session comes up we need to wait for |
641 | | * PEER_REPLAY_DONE before running re-election on |
642 | | * local-mlag entries that are missing peer reference |
643 | | */ |
644 | 0 | pim_mlag_up_local_reeval(true /*mlagd_send*/, |
645 | 0 | "peer_up"); |
646 | 0 | } else if (role_chg) { |
647 | | /* MLAG role changed without a state change */ |
648 | 0 | pim_mlag_up_local_reeval(true /*mlagd_send*/, "role_chg"); |
649 | 0 | } |
650 | 0 | } |
651 | | |
652 | | static void pim_mlag_process_peer_frr_state_change(struct mlag_frr_status msg) |
653 | 0 | { |
654 | 0 | if (PIM_DEBUG_MLAG) |
655 | 0 | zlog_debug( |
656 | 0 | "%s: msg dump: peer_frr_state: %s", __func__, |
657 | 0 | (msg.frr_state == MLAG_FRR_STATE_UP ? "UP" : "DOWN")); |
658 | |
|
659 | 0 | if (!(router->mlag_flags & PIM_MLAGF_LOCAL_CONN_UP)) { |
660 | 0 | if (PIM_DEBUG_MLAG) |
661 | 0 | zlog_debug("%s: msg ignored mlagd process state down", |
662 | 0 | __func__); |
663 | 0 | return; |
664 | 0 | } |
665 | 0 | ++router->mlag_stats.msg.peer_zebra_status_updates; |
666 | | |
667 | | /* evaluate the changes first */ |
668 | 0 | if (msg.frr_state == MLAG_FRR_STATE_UP) { |
669 | 0 | if (!(router->mlag_flags & PIM_MLAGF_PEER_ZEBRA_UP)) { |
670 | 0 | router->mlag_flags |= PIM_MLAGF_PEER_ZEBRA_UP; |
671 | | /* XXX - when peer zebra comes up we need to wait for |
672 | | * for some time to let the peer setup MDTs before |
673 | | * before relinquishing DF status |
674 | | */ |
675 | 0 | pim_mlag_up_local_reeval(true /*mlagd_send*/, |
676 | 0 | "zebra_up"); |
677 | 0 | } |
678 | 0 | } else { |
679 | 0 | if (router->mlag_flags & PIM_MLAGF_PEER_ZEBRA_UP) { |
680 | 0 | ++router->mlag_stats.peer_zebra_downs; |
681 | 0 | router->mlag_flags &= ~PIM_MLAGF_PEER_ZEBRA_UP; |
682 | | /* when a peer zebra goes down we assume DF role */ |
683 | 0 | pim_mlag_up_local_reeval(true /*mlagd_send*/, |
684 | 0 | "zebra_down"); |
685 | 0 | } |
686 | 0 | } |
687 | 0 | } |
688 | | |
689 | | static void pim_mlag_process_vxlan_update(struct mlag_vxlan *msg) |
690 | 0 | { |
691 | 0 | char addr_buf1[INET_ADDRSTRLEN]; |
692 | 0 | char addr_buf2[INET_ADDRSTRLEN]; |
693 | 0 | uint32_t local_ip; |
694 | |
|
695 | 0 | if (!(router->mlag_flags & PIM_MLAGF_LOCAL_CONN_UP)) { |
696 | 0 | if (PIM_DEBUG_MLAG) |
697 | 0 | zlog_debug("%s: msg ignored mlagd process state down", |
698 | 0 | __func__); |
699 | 0 | return; |
700 | 0 | } |
701 | | |
702 | 0 | ++router->mlag_stats.msg.vxlan_updates; |
703 | 0 | router->anycast_vtep_ip.s_addr = htonl(msg->anycast_ip); |
704 | 0 | local_ip = htonl(msg->local_ip); |
705 | 0 | if (router->local_vtep_ip.s_addr != local_ip) { |
706 | 0 | router->local_vtep_ip.s_addr = local_ip; |
707 | 0 | pim_mlag_vxlan_state_update(); |
708 | 0 | } |
709 | |
|
710 | 0 | if (PIM_DEBUG_MLAG) { |
711 | 0 | inet_ntop(AF_INET, &router->local_vtep_ip, |
712 | 0 | addr_buf1, INET_ADDRSTRLEN); |
713 | 0 | inet_ntop(AF_INET, &router->anycast_vtep_ip, |
714 | 0 | addr_buf2, INET_ADDRSTRLEN); |
715 | |
|
716 | 0 | zlog_debug("%s: msg dump: local-ip:%s, anycast-ip:%s", |
717 | 0 | __func__, addr_buf1, addr_buf2); |
718 | 0 | } |
719 | 0 | } |
720 | | |
721 | | static void pim_mlag_process_mroute_add(struct mlag_mroute_add msg) |
722 | 0 | { |
723 | 0 | if (PIM_DEBUG_MLAG) { |
724 | 0 | pim_sgaddr sg; |
725 | |
|
726 | 0 | sg.grp.s_addr = ntohl(msg.group_ip); |
727 | 0 | sg.src.s_addr = ntohl(msg.source_ip); |
728 | |
|
729 | 0 | zlog_debug( |
730 | 0 | "%s: msg dump: vrf_name: %s, s.ip: 0x%x, g.ip: 0x%x (%pSG) cost: %u", |
731 | 0 | __func__, msg.vrf_name, msg.source_ip, msg.group_ip, |
732 | 0 | &sg, msg.cost_to_rp); |
733 | 0 | zlog_debug( |
734 | 0 | "(%pSG)owner_id: %d, DR: %d, Dual active: %d, vrf_id: 0x%x intf_name: %s", |
735 | 0 | &sg, msg.owner_id, msg.am_i_dr, msg.am_i_dual_active, |
736 | 0 | msg.vrf_id, msg.intf_name); |
737 | 0 | } |
738 | |
|
739 | 0 | if (!(router->mlag_flags & PIM_MLAGF_LOCAL_CONN_UP)) { |
740 | 0 | if (PIM_DEBUG_MLAG) |
741 | 0 | zlog_debug("%s: msg ignored mlagd process state down", |
742 | 0 | __func__); |
743 | 0 | return; |
744 | 0 | } |
745 | | |
746 | 0 | ++router->mlag_stats.msg.mroute_add_rx; |
747 | |
|
748 | 0 | pim_mlag_up_peer_add(&msg); |
749 | 0 | } |
750 | | |
751 | | static void pim_mlag_process_mroute_del(struct mlag_mroute_del msg) |
752 | 0 | { |
753 | 0 | if (PIM_DEBUG_MLAG) { |
754 | 0 | pim_sgaddr sg; |
755 | |
|
756 | 0 | sg.grp.s_addr = ntohl(msg.group_ip); |
757 | 0 | sg.src.s_addr = ntohl(msg.source_ip); |
758 | 0 | zlog_debug( |
759 | 0 | "%s: msg dump: vrf_name: %s, s.ip: 0x%x, g.ip: 0x%x(%pSG)", |
760 | 0 | __func__, msg.vrf_name, msg.source_ip, msg.group_ip, |
761 | 0 | &sg); |
762 | 0 | zlog_debug("(%pSG)owner_id: %d, vrf_id: 0x%x intf_name: %s", |
763 | 0 | &sg, msg.owner_id, msg.vrf_id, msg.intf_name); |
764 | 0 | } |
765 | |
|
766 | 0 | if (!(router->mlag_flags & PIM_MLAGF_LOCAL_CONN_UP)) { |
767 | 0 | if (PIM_DEBUG_MLAG) |
768 | 0 | zlog_debug("%s: msg ignored mlagd process state down", |
769 | 0 | __func__); |
770 | 0 | return; |
771 | 0 | } |
772 | | |
773 | 0 | ++router->mlag_stats.msg.mroute_del_rx; |
774 | |
|
775 | 0 | pim_mlag_up_peer_del(&msg); |
776 | 0 | } |
777 | | |
778 | | int pim_zebra_mlag_handle_msg(int cmd, struct zclient *zclient, |
779 | | uint16_t zapi_length, vrf_id_t vrf_id) |
780 | 0 | { |
781 | 0 | struct stream *s = zclient->ibuf; |
782 | 0 | struct mlag_msg mlag_msg; |
783 | 0 | char buf[80]; |
784 | 0 | int rc = 0; |
785 | 0 | size_t length; |
786 | |
|
787 | 0 | rc = mlag_lib_decode_mlag_hdr(s, &mlag_msg, &length); |
788 | 0 | if (rc) |
789 | 0 | return (rc); |
790 | | |
791 | 0 | if (PIM_DEBUG_MLAG) |
792 | 0 | zlog_debug("%s: Received msg type: %s length: %d, bulk_cnt: %d", |
793 | 0 | __func__, |
794 | 0 | mlag_lib_msgid_to_str(mlag_msg.msg_type, buf, |
795 | 0 | sizeof(buf)), |
796 | 0 | mlag_msg.data_len, mlag_msg.msg_cnt); |
797 | |
|
798 | 0 | switch (mlag_msg.msg_type) { |
799 | 0 | case MLAG_STATUS_UPDATE: { |
800 | 0 | struct mlag_status msg; |
801 | |
|
802 | 0 | rc = mlag_lib_decode_mlag_status(s, &msg); |
803 | 0 | if (rc) |
804 | 0 | return (rc); |
805 | 0 | pim_mlag_process_mlagd_state_change(msg); |
806 | 0 | } break; |
807 | 0 | case MLAG_PEER_FRR_STATUS: { |
808 | 0 | struct mlag_frr_status msg; |
809 | |
|
810 | 0 | rc = mlag_lib_decode_frr_status(s, &msg); |
811 | 0 | if (rc) |
812 | 0 | return (rc); |
813 | 0 | pim_mlag_process_peer_frr_state_change(msg); |
814 | 0 | } break; |
815 | 0 | case MLAG_VXLAN_UPDATE: { |
816 | 0 | struct mlag_vxlan msg; |
817 | |
|
818 | 0 | rc = mlag_lib_decode_vxlan_update(s, &msg); |
819 | 0 | if (rc) |
820 | 0 | return rc; |
821 | 0 | pim_mlag_process_vxlan_update(&msg); |
822 | 0 | } break; |
823 | 0 | case MLAG_MROUTE_ADD: { |
824 | 0 | struct mlag_mroute_add msg; |
825 | |
|
826 | 0 | rc = mlag_lib_decode_mroute_add(s, &msg, &length); |
827 | 0 | if (rc) |
828 | 0 | return (rc); |
829 | 0 | pim_mlag_process_mroute_add(msg); |
830 | 0 | } break; |
831 | 0 | case MLAG_MROUTE_DEL: { |
832 | 0 | struct mlag_mroute_del msg; |
833 | |
|
834 | 0 | rc = mlag_lib_decode_mroute_del(s, &msg, &length); |
835 | 0 | if (rc) |
836 | 0 | return (rc); |
837 | 0 | pim_mlag_process_mroute_del(msg); |
838 | 0 | } break; |
839 | 0 | case MLAG_MROUTE_ADD_BULK: { |
840 | 0 | struct mlag_mroute_add msg; |
841 | 0 | int i; |
842 | |
|
843 | 0 | for (i = 0; i < mlag_msg.msg_cnt; i++) { |
844 | 0 | rc = mlag_lib_decode_mroute_add(s, &msg, &length); |
845 | 0 | if (rc) |
846 | 0 | return (rc); |
847 | 0 | pim_mlag_process_mroute_add(msg); |
848 | 0 | } |
849 | 0 | } break; |
850 | 0 | case MLAG_MROUTE_DEL_BULK: { |
851 | 0 | struct mlag_mroute_del msg; |
852 | 0 | int i; |
853 | |
|
854 | 0 | for (i = 0; i < mlag_msg.msg_cnt; i++) { |
855 | 0 | rc = mlag_lib_decode_mroute_del(s, &msg, &length); |
856 | 0 | if (rc) |
857 | 0 | return (rc); |
858 | 0 | pim_mlag_process_mroute_del(msg); |
859 | 0 | } |
860 | 0 | } break; |
861 | 0 | case MLAG_MSG_NONE: |
862 | 0 | case MLAG_REGISTER: |
863 | 0 | case MLAG_DEREGISTER: |
864 | 0 | case MLAG_DUMP: |
865 | 0 | case MLAG_PIM_CFG_DUMP: |
866 | 0 | break; |
867 | 0 | } |
868 | 0 | return 0; |
869 | 0 | } |
870 | | |
871 | | /****************End of PIM Mesasge processing handler********************/ |
872 | | |
873 | | int pim_zebra_mlag_process_up(ZAPI_CALLBACK_ARGS) |
874 | 0 | { |
875 | 0 | if (PIM_DEBUG_MLAG) |
876 | 0 | zlog_debug("%s: Received Process-Up from Mlag", __func__); |
877 | | |
878 | | /* |
879 | | * Incase of local MLAG restart, PIM needs to replay all the data |
880 | | * since MLAG is empty. |
881 | | */ |
882 | 0 | router->connected_to_mlag = true; |
883 | 0 | router->mlag_flags |= PIM_MLAGF_LOCAL_CONN_UP; |
884 | 0 | return 0; |
885 | 0 | } |
886 | | |
887 | | static void pim_mlag_param_reset(void) |
888 | 1 | { |
889 | | /* reset the cached params and stats */ |
890 | 1 | router->mlag_flags &= |
891 | 1 | (uint8_t) ~(PIM_MLAGF_STATUS_RXED | PIM_MLAGF_LOCAL_CONN_UP |
892 | 1 | | PIM_MLAGF_PEER_CONN_UP | PIM_MLAGF_PEER_ZEBRA_UP); |
893 | 1 | router->local_vtep_ip.s_addr = INADDR_ANY; |
894 | 1 | router->anycast_vtep_ip.s_addr = INADDR_ANY; |
895 | 1 | router->mlag_role = MLAG_ROLE_NONE; |
896 | 1 | memset(&router->mlag_stats.msg, 0, sizeof(router->mlag_stats.msg)); |
897 | 1 | router->peerlink_rif[0] = '\0'; |
898 | 1 | } |
899 | | |
900 | | int pim_zebra_mlag_process_down(ZAPI_CALLBACK_ARGS) |
901 | 0 | { |
902 | 0 | if (PIM_DEBUG_MLAG) |
903 | 0 | zlog_debug("%s: Received Process-Down from Mlag", __func__); |
904 | | |
905 | | /* Local CLAG is down, reset peer data and forward the traffic if |
906 | | * we are DR |
907 | | */ |
908 | 0 | if (router->mlag_flags & PIM_MLAGF_PEER_CONN_UP) |
909 | 0 | ++router->mlag_stats.peer_session_downs; |
910 | 0 | if (router->mlag_flags & PIM_MLAGF_PEER_ZEBRA_UP) |
911 | 0 | ++router->mlag_stats.peer_zebra_downs; |
912 | 0 | router->connected_to_mlag = false; |
913 | 0 | pim_mlag_param_reset(); |
914 | | /* on mlagd session down re-eval DF status */ |
915 | 0 | pim_mlag_up_local_reeval(false /*mlagd_send*/, "mlagd_down"); |
916 | | /* flush all peer references */ |
917 | 0 | pim_mlag_up_peer_del_all(); |
918 | | /* notify the vxlan component */ |
919 | 0 | pim_mlag_vxlan_state_update(); |
920 | 0 | return 0; |
921 | 0 | } |
922 | | |
923 | | static void pim_mlag_register_handler(struct event *thread) |
924 | 0 | { |
925 | 0 | uint32_t bit_mask = 0; |
926 | 0 |
|
927 | 0 | if (!zclient) |
928 | 0 | return; |
929 | 0 |
|
930 | 0 | SET_FLAG(bit_mask, (1 << MLAG_STATUS_UPDATE)); |
931 | 0 | SET_FLAG(bit_mask, (1 << MLAG_MROUTE_ADD)); |
932 | 0 | SET_FLAG(bit_mask, (1 << MLAG_MROUTE_DEL)); |
933 | 0 | SET_FLAG(bit_mask, (1 << MLAG_DUMP)); |
934 | 0 | SET_FLAG(bit_mask, (1 << MLAG_MROUTE_ADD_BULK)); |
935 | 0 | SET_FLAG(bit_mask, (1 << MLAG_MROUTE_DEL_BULK)); |
936 | 0 | SET_FLAG(bit_mask, (1 << MLAG_PIM_CFG_DUMP)); |
937 | 0 | SET_FLAG(bit_mask, (1 << MLAG_VXLAN_UPDATE)); |
938 | 0 | SET_FLAG(bit_mask, (1 << MLAG_PEER_FRR_STATUS)); |
939 | 0 |
|
940 | 0 | if (PIM_DEBUG_MLAG) |
941 | 0 | zlog_debug("%s: Posting Client Register to MLAG mask: 0x%x", |
942 | 0 | __func__, bit_mask); |
943 | 0 |
|
944 | 0 | zclient_send_mlag_register(zclient, bit_mask); |
945 | 0 | } |
946 | | |
947 | | void pim_mlag_register(void) |
948 | 0 | { |
949 | 0 | if (router->mlag_process_register) |
950 | 0 | return; |
951 | | |
952 | 0 | router->mlag_process_register = true; |
953 | |
|
954 | 0 | event_add_event(router->master, pim_mlag_register_handler, NULL, 0, |
955 | 0 | NULL); |
956 | 0 | } |
957 | | |
958 | | static void pim_mlag_deregister_handler(struct event *thread) |
959 | 0 | { |
960 | 0 | if (!zclient) |
961 | 0 | return; |
962 | 0 |
|
963 | 0 | if (PIM_DEBUG_MLAG) |
964 | 0 | zlog_debug("%s: Posting Client De-Register to MLAG from PIM", |
965 | 0 | __func__); |
966 | 0 | router->connected_to_mlag = false; |
967 | 0 | zclient_send_mlag_deregister(zclient); |
968 | 0 | } |
969 | | |
970 | | void pim_mlag_deregister(void) |
971 | 0 | { |
972 | | /* if somebody still interested in the MLAG channel skip de-reg */ |
973 | 0 | if (router->pim_mlag_intf_cnt || pim_vxlan_do_mlag_reg()) |
974 | 0 | return; |
975 | | |
976 | | /* not registered; nothing do */ |
977 | 0 | if (!router->mlag_process_register) |
978 | 0 | return; |
979 | | |
980 | 0 | router->mlag_process_register = false; |
981 | |
|
982 | 0 | event_add_event(router->master, pim_mlag_deregister_handler, NULL, 0, |
983 | 0 | NULL); |
984 | 0 | } |
985 | | |
986 | | void pim_if_configure_mlag_dualactive(struct pim_interface *pim_ifp) |
987 | 0 | { |
988 | 0 | if (!pim_ifp || !pim_ifp->pim || pim_ifp->activeactive == true) |
989 | 0 | return; |
990 | | |
991 | 0 | if (PIM_DEBUG_MLAG) |
992 | 0 | zlog_debug("%s: Configuring active-active on Interface: %s", |
993 | 0 | __func__, "NULL"); |
994 | |
|
995 | 0 | pim_ifp->activeactive = true; |
996 | 0 | if (pim_ifp->pim) |
997 | 0 | pim_ifp->pim->inst_mlag_intf_cnt++; |
998 | |
|
999 | 0 | router->pim_mlag_intf_cnt++; |
1000 | 0 | if (PIM_DEBUG_MLAG) |
1001 | 0 | zlog_debug( |
1002 | 0 | "%s: Total MLAG configured Interfaces on router: %d, Inst: %d", |
1003 | 0 | __func__, router->pim_mlag_intf_cnt, |
1004 | 0 | pim_ifp->pim->inst_mlag_intf_cnt); |
1005 | |
|
1006 | 0 | if (router->pim_mlag_intf_cnt == 1) { |
1007 | | /* |
1008 | | * at least one Interface is configured for MLAG, send register |
1009 | | * to Zebra for receiving MLAG Updates |
1010 | | */ |
1011 | 0 | pim_mlag_register(); |
1012 | 0 | } |
1013 | 0 | } |
1014 | | |
1015 | | void pim_if_unconfigure_mlag_dualactive(struct pim_interface *pim_ifp) |
1016 | 0 | { |
1017 | 0 | if (!pim_ifp || !pim_ifp->pim || pim_ifp->activeactive == false) |
1018 | 0 | return; |
1019 | | |
1020 | 0 | if (PIM_DEBUG_MLAG) |
1021 | 0 | zlog_debug("%s: UnConfiguring active-active on Interface: %s", |
1022 | 0 | __func__, "NULL"); |
1023 | |
|
1024 | 0 | pim_ifp->activeactive = false; |
1025 | 0 | pim_ifp->pim->inst_mlag_intf_cnt--; |
1026 | |
|
1027 | 0 | router->pim_mlag_intf_cnt--; |
1028 | 0 | if (PIM_DEBUG_MLAG) |
1029 | 0 | zlog_debug( |
1030 | 0 | "%s: Total MLAG configured Interfaces on router: %d, Inst: %d", |
1031 | 0 | __func__, router->pim_mlag_intf_cnt, |
1032 | 0 | pim_ifp->pim->inst_mlag_intf_cnt); |
1033 | |
|
1034 | 0 | if (router->pim_mlag_intf_cnt == 0) { |
1035 | | /* |
1036 | | * all the Interfaces are MLAG un-configured, post MLAG |
1037 | | * De-register to Zebra |
1038 | | */ |
1039 | 0 | pim_mlag_deregister(); |
1040 | 0 | pim_mlag_param_reset(); |
1041 | 0 | } |
1042 | 0 | } |
1043 | | |
1044 | | |
1045 | | void pim_instance_mlag_init(struct pim_instance *pim) |
1046 | 1 | { |
1047 | 1 | if (!pim) |
1048 | 0 | return; |
1049 | | |
1050 | 1 | pim->inst_mlag_intf_cnt = 0; |
1051 | 1 | } |
1052 | | |
1053 | | |
1054 | | void pim_instance_mlag_terminate(struct pim_instance *pim) |
1055 | 0 | { |
1056 | 0 | struct interface *ifp; |
1057 | |
|
1058 | 0 | if (!pim) |
1059 | 0 | return; |
1060 | | |
1061 | 0 | FOR_ALL_INTERFACES (pim->vrf, ifp) { |
1062 | 0 | struct pim_interface *pim_ifp = ifp->info; |
1063 | |
|
1064 | 0 | if (!pim_ifp || pim_ifp->activeactive == false) |
1065 | 0 | continue; |
1066 | | |
1067 | 0 | pim_if_unconfigure_mlag_dualactive(pim_ifp); |
1068 | 0 | } |
1069 | 0 | pim->inst_mlag_intf_cnt = 0; |
1070 | 0 | } |
1071 | | |
1072 | | void pim_mlag_terminate(void) |
1073 | 0 | { |
1074 | 0 | stream_free(router->mlag_stream); |
1075 | 0 | router->mlag_stream = NULL; |
1076 | 0 | stream_fifo_free(router->mlag_fifo); |
1077 | 0 | router->mlag_fifo = NULL; |
1078 | 0 | } |
1079 | | |
1080 | | void pim_mlag_init(void) |
1081 | 1 | { |
1082 | 1 | pim_mlag_param_reset(); |
1083 | 1 | router->pim_mlag_intf_cnt = 0; |
1084 | 1 | router->connected_to_mlag = false; |
1085 | 1 | router->mlag_fifo = stream_fifo_new(); |
1086 | 1 | router->zpthread_mlag_write = NULL; |
1087 | 1 | router->mlag_stream = stream_new(MLAG_BUF_LIMIT); |
1088 | 1 | } |