Line | Count | Source |
1 | | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | | /* |
3 | | * Constraints Shortest Path First algorithms - cspf.c |
4 | | * |
5 | | * Author: Olivier Dugeon <olivier.dugeon@orange.com> |
6 | | * |
7 | | * Copyright (C) 2022 Orange http://www.orange.com |
8 | | * |
9 | | * This file is part of Free Range Routing (FRR). |
10 | | */ |
11 | | |
12 | | #include <zebra.h> |
13 | | |
14 | | #include "if.h" |
15 | | #include "linklist.h" |
16 | | #include "log.h" |
17 | | #include "hash.h" |
18 | | #include "memory.h" |
19 | | #include "prefix.h" |
20 | | #include "table.h" |
21 | | #include "stream.h" |
22 | | #include "printfrr.h" |
23 | | #include "link_state.h" |
24 | | #include "cspf.h" |
25 | | |
26 | | /* Link State Memory allocation */ |
27 | 8 | DEFINE_MTYPE_STATIC(LIB, PCA, "Path Computation Algorithms"); |
28 | 8 | |
29 | 8 | /** |
30 | 8 | * Create new Constrained Path. Memory is dynamically allocated. |
31 | 8 | * |
32 | 8 | * @param key Vertex key of the destination of this path |
33 | 8 | * |
34 | 8 | * @return Pointer to a new Constrained Path structure |
35 | 8 | */ |
36 | 8 | static struct c_path *cpath_new(uint64_t key) |
37 | 8 | { |
38 | 0 | struct c_path *path; |
39 | | |
40 | | /* Sanity Check */ |
41 | 0 | if (key == 0) |
42 | 0 | return NULL; |
43 | | |
44 | 0 | path = XCALLOC(MTYPE_PCA, sizeof(struct c_path)); |
45 | 0 | path->dst = key; |
46 | 0 | path->status = IN_PROGRESS; |
47 | 0 | path->edges = list_new(); |
48 | 0 | path->weight = MAX_COST; |
49 | |
|
50 | 0 | return path; |
51 | 0 | } |
52 | | |
53 | | /** |
54 | | * Copy src Constrained Path into dst Constrained Path. A new Constrained Path |
55 | | * structure is dynamically allocated if dst is NULL. If src is NULL, the |
56 | | * function return the dst disregarding if it is NULL or not. |
57 | | * |
58 | | * @param dest Destination Constrained Path structure |
59 | | * @param src Source Constrained Path structure |
60 | | * |
61 | | * @return Pointer to the destination Constrained Path structure |
62 | | */ |
63 | | static struct c_path *cpath_copy(struct c_path *dest, const struct c_path *src) |
64 | 0 | { |
65 | 0 | struct c_path *new_path; |
66 | |
|
67 | 0 | if (!src) |
68 | 0 | return dest; |
69 | | |
70 | 0 | if (!dest) { |
71 | 0 | new_path = XCALLOC(MTYPE_PCA, sizeof(struct c_path)); |
72 | 0 | } else { |
73 | 0 | new_path = dest; |
74 | 0 | if (dest->edges) |
75 | 0 | list_delete(&new_path->edges); |
76 | 0 | } |
77 | |
|
78 | 0 | new_path->dst = src->dst; |
79 | 0 | new_path->weight = src->weight; |
80 | 0 | new_path->edges = list_dup(src->edges); |
81 | 0 | new_path->status = src->status; |
82 | |
|
83 | 0 | return new_path; |
84 | 0 | } |
85 | | |
86 | | /** |
87 | | * Delete Constrained Path structure. Previous allocated memory is freed. |
88 | | * |
89 | | * @param path Constrained Path structure to be deleted |
90 | | */ |
91 | | void cpath_del(struct c_path *path) |
92 | 0 | { |
93 | 0 | if (!path) |
94 | 0 | return; |
95 | | |
96 | 0 | if (path->edges) |
97 | 0 | list_delete(&path->edges); |
98 | |
|
99 | 0 | XFREE(MTYPE_PCA, path); |
100 | 0 | path = NULL; |
101 | 0 | } |
102 | | |
103 | | /** |
104 | | * Replace the list of edges in the next Constrained Path by the list of edges |
105 | | * in the current Constrained Path. |
106 | | * |
107 | | * @param next_path next Constrained Path structure |
108 | | * @param cur_path current Constrained Path structure |
109 | | */ |
110 | | static void cpath_replace(struct c_path *next_path, struct c_path *cur_path) |
111 | 0 | { |
112 | |
|
113 | 0 | if (next_path->edges) |
114 | 0 | list_delete(&next_path->edges); |
115 | |
|
116 | 0 | next_path->edges = list_dup(cur_path->edges); |
117 | 0 | } |
118 | | |
119 | | /** |
120 | | * Create a new Visited Node structure from the provided Vertex. Structure is |
121 | | * dynamically allocated. |
122 | | * |
123 | | * @param vertex Vertex structure |
124 | | * |
125 | | * @return Pointer to the new Visited Node structure |
126 | | */ |
127 | | static struct v_node *vnode_new(struct ls_vertex *vertex) |
128 | 0 | { |
129 | 0 | struct v_node *vnode; |
130 | |
|
131 | 0 | if (!vertex) |
132 | 0 | return NULL; |
133 | | |
134 | 0 | vnode = XCALLOC(MTYPE_PCA, sizeof(struct v_node)); |
135 | 0 | vnode->vertex = vertex; |
136 | 0 | vnode->key = vertex->key; |
137 | |
|
138 | 0 | return vnode; |
139 | 0 | } |
140 | | |
141 | | /** |
142 | | * Delete Visited Node structure. Previous allocated memory is freed. |
143 | | * |
144 | | * @param vnode Visited Node structure to be deleted |
145 | | */ |
146 | | static void vnode_del(struct v_node *vnode) |
147 | 0 | { |
148 | 0 | if (!vnode) |
149 | 0 | return; |
150 | | |
151 | 0 | XFREE(MTYPE_PCA, vnode); |
152 | 0 | vnode = NULL; |
153 | 0 | } |
154 | | |
155 | | /** |
156 | | * Search Vertex in TED by IPv4 address. The function search vertex by browsing |
157 | | * the subnets table. It allows to find not only vertex by router ID, but also |
158 | | * vertex by interface IPv4 address. |
159 | | * |
160 | | * @param ted Traffic Engineering Database |
161 | | * @param ipv4 IPv4 address |
162 | | * |
163 | | * @return Vertex if found, NULL otherwise |
164 | | */ |
165 | | static struct ls_vertex *get_vertex_by_ipv4(struct ls_ted *ted, |
166 | | struct in_addr ipv4) |
167 | 0 | { |
168 | 0 | struct ls_subnet *subnet; |
169 | 0 | struct prefix p; |
170 | |
|
171 | 0 | p.family = AF_INET; |
172 | 0 | p.u.prefix4 = ipv4; |
173 | |
|
174 | 0 | frr_each (subnets, &ted->subnets, subnet) { |
175 | 0 | if (subnet->key.family != AF_INET) |
176 | 0 | continue; |
177 | 0 | p.prefixlen = subnet->key.prefixlen; |
178 | 0 | if (prefix_same(&subnet->key, &p)) |
179 | 0 | return subnet->vertex; |
180 | 0 | } |
181 | | |
182 | 0 | return NULL; |
183 | 0 | } |
184 | | |
185 | | /** |
186 | | * Search Vertex in TED by IPv6 address. The function search vertex by browsing |
187 | | * the subnets table. It allows to find not only vertex by router ID, but also |
188 | | * vertex by interface IPv6 address. |
189 | | * |
190 | | * @param ted Traffic Engineering Database |
191 | | * @param ipv6 IPv6 address |
192 | | * |
193 | | * @return Vertex if found, NULL otherwise |
194 | | */ |
195 | | static struct ls_vertex *get_vertex_by_ipv6(struct ls_ted *ted, |
196 | | struct in6_addr ipv6) |
197 | 0 | { |
198 | 0 | struct ls_subnet *subnet; |
199 | 0 | struct prefix p; |
200 | |
|
201 | 0 | p.family = AF_INET6; |
202 | 0 | p.u.prefix6 = ipv6; |
203 | |
|
204 | 0 | frr_each (subnets, &ted->subnets, subnet) { |
205 | 0 | if (subnet->key.family != AF_INET6) |
206 | 0 | continue; |
207 | 0 | p.prefixlen = subnet->key.prefixlen; |
208 | 0 | if (prefix_cmp(&subnet->key, &p) == 0) |
209 | 0 | return subnet->vertex; |
210 | 0 | } |
211 | | |
212 | 0 | return NULL; |
213 | 0 | } |
214 | | |
215 | | struct cspf *cspf_new(void) |
216 | 0 | { |
217 | 0 | struct cspf *algo; |
218 | | |
219 | | /* Allocate New CSPF structure */ |
220 | 0 | algo = XCALLOC(MTYPE_PCA, sizeof(struct cspf)); |
221 | | |
222 | | /* Initialize RB-Trees */ |
223 | 0 | processed_init(&algo->processed); |
224 | 0 | visited_init(&algo->visited); |
225 | 0 | pqueue_init(&algo->pqueue); |
226 | |
|
227 | 0 | algo->path = NULL; |
228 | 0 | algo->pdst = NULL; |
229 | |
|
230 | 0 | return algo; |
231 | 0 | } |
232 | | |
233 | | struct cspf *cspf_init(struct cspf *algo, const struct ls_vertex *src, |
234 | | const struct ls_vertex *dst, struct constraints *csts) |
235 | 0 | { |
236 | 0 | struct cspf *new_algo; |
237 | 0 | struct c_path *psrc; |
238 | |
|
239 | 0 | if (!csts) |
240 | 0 | return NULL; |
241 | | |
242 | 0 | if (!algo) |
243 | 0 | new_algo = cspf_new(); |
244 | 0 | else |
245 | 0 | new_algo = algo; |
246 | | |
247 | | /* Initialize Processed Path and Priority Queue with Src & Dst */ |
248 | 0 | if (src) { |
249 | 0 | psrc = cpath_new(src->key); |
250 | 0 | psrc->weight = 0; |
251 | 0 | processed_add(&new_algo->processed, psrc); |
252 | 0 | pqueue_add(&new_algo->pqueue, psrc); |
253 | 0 | new_algo->path = psrc; |
254 | 0 | } |
255 | 0 | if (dst) { |
256 | 0 | new_algo->pdst = cpath_new(dst->key); |
257 | 0 | processed_add(&new_algo->processed, new_algo->pdst); |
258 | 0 | } |
259 | |
|
260 | 0 | memcpy(&new_algo->csts, csts, sizeof(struct constraints)); |
261 | |
|
262 | 0 | return new_algo; |
263 | 0 | } |
264 | | |
265 | | struct cspf *cspf_init_v4(struct cspf *algo, struct ls_ted *ted, |
266 | | const struct in_addr src, const struct in_addr dst, |
267 | | struct constraints *csts) |
268 | 0 | { |
269 | 0 | struct ls_vertex *vsrc; |
270 | 0 | struct ls_vertex *vdst; |
271 | 0 | struct cspf *new_algo; |
272 | | |
273 | | /* Sanity Check */ |
274 | 0 | if (!ted) |
275 | 0 | return algo; |
276 | | |
277 | 0 | if (!algo) |
278 | 0 | new_algo = cspf_new(); |
279 | 0 | else |
280 | 0 | new_algo = algo; |
281 | | |
282 | | /* Got Source and Destination Vertex from TED */ |
283 | 0 | vsrc = get_vertex_by_ipv4(ted, src); |
284 | 0 | vdst = get_vertex_by_ipv4(ted, dst); |
285 | 0 | csts->family = AF_INET; |
286 | |
|
287 | 0 | return cspf_init(new_algo, vsrc, vdst, csts); |
288 | 0 | } |
289 | | |
290 | | struct cspf *cspf_init_v6(struct cspf *algo, struct ls_ted *ted, |
291 | | const struct in6_addr src, const struct in6_addr dst, |
292 | | struct constraints *csts) |
293 | 0 | { |
294 | 0 | struct ls_vertex *vsrc; |
295 | 0 | struct ls_vertex *vdst; |
296 | 0 | struct cspf *new_algo; |
297 | | |
298 | | /* Sanity Check */ |
299 | 0 | if (!ted) |
300 | 0 | return algo; |
301 | | |
302 | 0 | if (!algo) |
303 | 0 | new_algo = cspf_new(); |
304 | 0 | else |
305 | 0 | new_algo = algo; |
306 | | |
307 | | /* Got Source and Destination Vertex from TED */ |
308 | 0 | vsrc = get_vertex_by_ipv6(ted, src); |
309 | 0 | vdst = get_vertex_by_ipv6(ted, dst); |
310 | 0 | csts->family = AF_INET6; |
311 | |
|
312 | 0 | return cspf_init(new_algo, vsrc, vdst, csts); |
313 | 0 | } |
314 | | |
315 | | void cspf_clean(struct cspf *algo) |
316 | 0 | { |
317 | 0 | struct c_path *path; |
318 | 0 | struct v_node *vnode; |
319 | |
|
320 | 0 | if (!algo) |
321 | 0 | return; |
322 | | |
323 | | /* Normally, Priority Queue is empty. Clean it in case of. */ |
324 | 0 | if (pqueue_count(&algo->pqueue)) { |
325 | 0 | frr_each_safe (pqueue, &algo->pqueue, path) { |
326 | 0 | pqueue_del(&algo->pqueue, path); |
327 | 0 | } |
328 | 0 | } |
329 | | |
330 | | /* Empty Processed Path tree and associated Path */ |
331 | 0 | if (processed_count(&algo->processed)) { |
332 | 0 | frr_each_safe (processed, &algo->processed, path) { |
333 | 0 | processed_del(&algo->processed, path); |
334 | 0 | cpath_del(path); |
335 | 0 | } |
336 | 0 | } |
337 | | |
338 | | /* Empty visited Vertex tree and associated Node */ |
339 | 0 | if (visited_count(&algo->visited)) { |
340 | 0 | frr_each_safe (visited, &algo->visited, vnode) { |
341 | 0 | visited_del(&algo->visited, vnode); |
342 | 0 | vnode_del(vnode); |
343 | 0 | } |
344 | 0 | } |
345 | |
|
346 | 0 | memset(&algo->csts, 0, sizeof(struct constraints)); |
347 | 0 | algo->path = NULL; |
348 | 0 | algo->pdst = NULL; |
349 | 0 | } |
350 | | |
351 | | void cspf_del(struct cspf *algo) |
352 | 0 | { |
353 | 0 | if (!algo) |
354 | 0 | return; |
355 | | |
356 | | /* Empty Priority Queue and Processes Path */ |
357 | 0 | cspf_clean(algo); |
358 | | |
359 | | /* Then, reset Priority Queue, Processed Path and Visited RB-Tree */ |
360 | 0 | pqueue_fini(&algo->pqueue); |
361 | 0 | processed_fini(&algo->processed); |
362 | 0 | visited_fini(&algo->visited); |
363 | |
|
364 | 0 | XFREE(MTYPE_PCA, algo); |
365 | 0 | algo = NULL; |
366 | 0 | } |
367 | | |
368 | | /** |
369 | | * Prune Edge if constraints are not met by testing Edge Attributes against |
370 | | * given constraints and cumulative cost of the given constrained path. |
371 | | * |
372 | | * @param path On-going Computed Path with cumulative cost constraints |
373 | | * @param edge Edge to be validate against Constraints |
374 | | * @param csts Constraints for this path |
375 | | * |
376 | | * @return True if Edge should be prune, false if Edge is valid |
377 | | */ |
378 | | static bool prune_edge(const struct c_path *path, const struct ls_edge *edge, |
379 | | const struct constraints *csts) |
380 | 0 | { |
381 | 0 | struct ls_vertex *dst; |
382 | 0 | struct ls_attributes *attr; |
383 | | |
384 | | /* Check that Path, Edge and Constraints are valid */ |
385 | 0 | if (!path || !edge || !csts) |
386 | 0 | return true; |
387 | | |
388 | | /* Check that Edge has a valid destination */ |
389 | 0 | if (!edge->destination) |
390 | 0 | return true; |
391 | 0 | dst = edge->destination; |
392 | | |
393 | | /* Check that Edge has valid attributes */ |
394 | 0 | if (!edge->attributes) |
395 | 0 | return true; |
396 | 0 | attr = edge->attributes; |
397 | | |
398 | | /* Check that Edge belongs to the requested Address Family and type */ |
399 | 0 | if (csts->family == AF_INET) { |
400 | 0 | if (IPV4_NET0(attr->standard.local.s_addr)) |
401 | 0 | return true; |
402 | 0 | if (csts->type == SR_TE) |
403 | 0 | if (!CHECK_FLAG(attr->flags, LS_ATTR_ADJ_SID) || |
404 | 0 | !CHECK_FLAG(dst->node->flags, LS_NODE_SR)) |
405 | 0 | return true; |
406 | 0 | } |
407 | 0 | if (csts->family == AF_INET6) { |
408 | 0 | if (IN6_IS_ADDR_UNSPECIFIED(&attr->standard.local6)) |
409 | 0 | return true; |
410 | 0 | if (csts->type == SR_TE) |
411 | 0 | if (!CHECK_FLAG(attr->flags, LS_ATTR_ADJ_SID6) || |
412 | 0 | !CHECK_FLAG(dst->node->flags, LS_NODE_SR)) |
413 | 0 | return true; |
414 | 0 | } |
415 | | |
416 | | /* |
417 | | * Check that total cost, up to this edge, respects the initial |
418 | | * constraints |
419 | | */ |
420 | 0 | switch (csts->ctype) { |
421 | 0 | case CSPF_METRIC: |
422 | 0 | if (!CHECK_FLAG(attr->flags, LS_ATTR_METRIC)) |
423 | 0 | return true; |
424 | 0 | if ((attr->metric + path->weight) > csts->cost) |
425 | 0 | return true; |
426 | 0 | break; |
427 | | |
428 | 0 | case CSPF_TE_METRIC: |
429 | 0 | if (!CHECK_FLAG(attr->flags, LS_ATTR_TE_METRIC)) |
430 | 0 | return true; |
431 | 0 | if ((attr->standard.te_metric + path->weight) > csts->cost) |
432 | 0 | return true; |
433 | 0 | break; |
434 | | |
435 | 0 | case CSPF_DELAY: |
436 | 0 | if (!CHECK_FLAG(attr->flags, LS_ATTR_DELAY)) |
437 | 0 | return true; |
438 | 0 | if ((attr->extended.delay + path->weight) > csts->cost) |
439 | 0 | return true; |
440 | 0 | break; |
441 | 0 | } |
442 | | |
443 | | /* If specified, check that Edge meet Bandwidth constraint */ |
444 | 0 | if (csts->bw > 0.0) { |
445 | 0 | if (attr->standard.max_bw < csts->bw || |
446 | 0 | attr->standard.max_rsv_bw < csts->bw || |
447 | 0 | attr->standard.unrsv_bw[csts->cos] < csts->bw) |
448 | 0 | return true; |
449 | 0 | } |
450 | | |
451 | | /* All is fine. We can consider this Edge valid, so not to be prune */ |
452 | 0 | return false; |
453 | 0 | } |
454 | | |
455 | | /** |
456 | | * Relax constraints of the current path up to the destination vertex of the |
457 | | * provided Edge. This function progress in the network topology by validating |
458 | | * the next vertex on the computed path. If Vertex has not already been visited, |
459 | | * list of edges of the current path is augmented with this edge if the new cost |
460 | | * is lower than prior path up to this vertex. Current path is re-inserted in |
461 | | * the Priority Queue with its new cost i.e. current cost + edge cost. |
462 | | * |
463 | | * @param algo CSPF structure |
464 | | * @param edge Next Edge to be added to the current computed path |
465 | | * |
466 | | * @return True if current path reach destination, false otherwise |
467 | | */ |
468 | | static bool relax_constraints(struct cspf *algo, struct ls_edge *edge) |
469 | 0 | { |
470 | |
|
471 | 0 | struct c_path pkey = {}; |
472 | 0 | struct c_path *next_path; |
473 | 0 | struct v_node vnode = {}; |
474 | 0 | uint32_t total_cost = MAX_COST; |
475 | | |
476 | | /* Verify that we have a current computed path */ |
477 | 0 | if (!algo->path) |
478 | 0 | return false; |
479 | | |
480 | | /* Verify if we have not visited the next Vertex to avoid loop */ |
481 | 0 | vnode.key = edge->destination->key; |
482 | 0 | if (visited_member(&algo->visited, &vnode)) { |
483 | 0 | return false; |
484 | 0 | } |
485 | | |
486 | | /* |
487 | | * Get Next Computed Path from next vertex key |
488 | | * or create a new one if it has not yet computed. |
489 | | */ |
490 | 0 | pkey.dst = edge->destination->key; |
491 | 0 | next_path = processed_find(&algo->processed, &pkey); |
492 | 0 | if (!next_path) { |
493 | 0 | next_path = cpath_new(pkey.dst); |
494 | 0 | processed_add(&algo->processed, next_path); |
495 | 0 | } |
496 | | |
497 | | /* |
498 | | * Add or update the Computed Path in the Priority Queue if total cost |
499 | | * is lower than cost associated to this next Vertex. This could occurs |
500 | | * if we process a Vertex that as not yet been visited in the Graph |
501 | | * or if we found a shortest path up to this Vertex. |
502 | | */ |
503 | 0 | switch (algo->csts.ctype) { |
504 | 0 | case CSPF_METRIC: |
505 | 0 | total_cost = edge->attributes->metric + algo->path->weight; |
506 | 0 | break; |
507 | 0 | case CSPF_TE_METRIC: |
508 | 0 | total_cost = edge->attributes->standard.te_metric + |
509 | 0 | algo->path->weight; |
510 | 0 | break; |
511 | 0 | case CSPF_DELAY: |
512 | 0 | total_cost = |
513 | 0 | edge->attributes->extended.delay + algo->path->weight; |
514 | 0 | break; |
515 | 0 | default: |
516 | 0 | break; |
517 | 0 | } |
518 | 0 | if (total_cost < next_path->weight) { |
519 | | /* |
520 | | * It is not possible to directly update the q_path in the |
521 | | * Priority Queue. Indeed, if we modify the path weight, the |
522 | | * Priority Queue must be re-ordered. So, we need fist to remove |
523 | | * the q_path if it is present in the Priority Queue, then, |
524 | | * update the Path, in particular the Weight, and finally |
525 | | * (re-)insert it in the Priority Queue. |
526 | | */ |
527 | 0 | struct c_path *path; |
528 | 0 | frr_each_safe (pqueue, &algo->pqueue, path) { |
529 | 0 | if (path->dst == pkey.dst) { |
530 | 0 | pqueue_del(&algo->pqueue, path); |
531 | 0 | break; |
532 | 0 | } |
533 | 0 | } |
534 | 0 | next_path->weight = total_cost; |
535 | 0 | cpath_replace(next_path, algo->path); |
536 | 0 | listnode_add(next_path->edges, edge); |
537 | 0 | pqueue_add(&algo->pqueue, next_path); |
538 | 0 | } |
539 | | |
540 | | /* Return True if we reach the destination */ |
541 | 0 | return (next_path->dst == algo->pdst->dst); |
542 | 0 | } |
543 | | |
544 | | struct c_path *compute_p2p_path(struct cspf *algo, struct ls_ted *ted) |
545 | 0 | { |
546 | 0 | struct listnode *node; |
547 | 0 | struct ls_vertex *vertex; |
548 | 0 | struct ls_edge *edge; |
549 | 0 | struct c_path *optim_path; |
550 | 0 | struct v_node *vnode; |
551 | 0 | uint32_t cur_cost; |
552 | |
|
553 | 0 | optim_path = cpath_new(0xFFFFFFFFFFFFFFFF); |
554 | 0 | optim_path->status = FAILED; |
555 | | |
556 | | /* Check that all is correctly initialized */ |
557 | 0 | if (!algo) |
558 | 0 | return optim_path; |
559 | | |
560 | 0 | if (!algo->csts.ctype) |
561 | 0 | return optim_path; |
562 | | |
563 | 0 | if (!algo->pdst) { |
564 | 0 | optim_path->status = NO_DESTINATION; |
565 | 0 | return optim_path; |
566 | 0 | } |
567 | | |
568 | 0 | if (!algo->path) { |
569 | 0 | optim_path->status = NO_SOURCE; |
570 | 0 | return optim_path; |
571 | 0 | } |
572 | | |
573 | 0 | if (algo->pdst->dst == algo->path->dst) { |
574 | 0 | optim_path->status = SAME_SRC_DST; |
575 | 0 | return optim_path; |
576 | 0 | } |
577 | | |
578 | 0 | optim_path->dst = algo->pdst->dst; |
579 | 0 | optim_path->status = IN_PROGRESS; |
580 | | |
581 | | /* |
582 | | * Process all Connected Vertex until priority queue becomes empty. |
583 | | * Connected Vertices are added into the priority queue when |
584 | | * processing the next Connected Vertex: see relax_constraints() |
585 | | */ |
586 | 0 | cur_cost = MAX_COST; |
587 | 0 | while (pqueue_count(&algo->pqueue) != 0) { |
588 | | /* Got shortest current Path from the Priority Queue */ |
589 | 0 | algo->path = pqueue_pop(&algo->pqueue); |
590 | | |
591 | | /* Add destination Vertex of this path to the visited RB Tree */ |
592 | 0 | vertex = ls_find_vertex_by_key(ted, algo->path->dst); |
593 | 0 | if (!vertex) |
594 | 0 | continue; |
595 | 0 | vnode = vnode_new(vertex); |
596 | 0 | visited_add(&algo->visited, vnode); |
597 | | |
598 | | /* Process all outgoing links from this Vertex */ |
599 | 0 | for (ALL_LIST_ELEMENTS_RO(vertex->outgoing_edges, node, edge)) { |
600 | | /* |
601 | | * Skip Connected Edges that must be prune i.e. |
602 | | * Edges that not satisfy the given constraints, |
603 | | * in particular the Bandwidth, TE Metric and Delay. |
604 | | */ |
605 | 0 | if (prune_edge(algo->path, edge, &algo->csts)) |
606 | 0 | continue; |
607 | | |
608 | | /* |
609 | | * Relax constraints and check if we got a shorter |
610 | | * candidate path |
611 | | */ |
612 | 0 | if (relax_constraints(algo, edge) && |
613 | 0 | algo->pdst->weight < cur_cost) { |
614 | 0 | cur_cost = algo->pdst->weight; |
615 | 0 | cpath_copy(optim_path, algo->pdst); |
616 | 0 | optim_path->status = SUCCESS; |
617 | 0 | } |
618 | 0 | } |
619 | 0 | } |
620 | | |
621 | | /* |
622 | | * The priority queue is empty => all the possible (vertex, path) |
623 | | * elements have been explored. The optim_path contains the optimal |
624 | | * path if it exists. Otherwise an empty path with status failed is |
625 | | * returned. |
626 | | */ |
627 | 0 | if (optim_path->status == IN_PROGRESS || |
628 | 0 | listcount(optim_path->edges) == 0) |
629 | 0 | optim_path->status = FAILED; |
630 | 0 | cspf_clean(algo); |
631 | |
|
632 | 0 | return optim_path; |
633 | 0 | } |