Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/networkx/utils/backends.py: 21%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

778 statements  

1# Notes about NetworkX namespace objects set up here: 

2# 

3# nx.utils.backends.backends: 

4# dict keyed by backend name to the backend entry point object. 

5# Filled using ``_get_backends("networkx.backends")`` during import of this module. 

6# 

7# nx.utils.backends.backend_info: 

8# dict keyed by backend name to the metadata returned by the function indicated 

9# by the "networkx.backend_info" entry point. 

10# Created as an empty dict while importing this module, but later filled using 

11# ``_set_configs_from_environment()`` at end of importing ``networkx/__init__.py``. 

12# 

13# nx.config: 

14# Config object for NetworkX config setting. Created using 

15# ``_set_configs_from_environment()`` at end of importing ``networkx/__init__.py``. 

16# 

17# private dicts: 

18# nx.utils.backends._loaded_backends: 

19# dict used to memoize loaded backends. Keyed by backend name to loaded backends. 

20# 

21# nx.utils.backends._registered_algorithms: 

22# dict of all the dispatchable functions in networkx, keyed by _dispatchable 

23# function name to the wrapped function object. 

24 

25import inspect 

26import itertools 

27import logging 

28import os 

29import typing 

30import warnings 

31from functools import partial 

32from importlib.metadata import entry_points 

33 

34import networkx as nx 

35 

36from .configs import BackendPriorities, Config, NetworkXConfig 

37from .decorators import argmap 

38 

39__all__ = ["_dispatchable"] 

40 

41_logger = logging.getLogger(__name__) 

42FAILED_TO_CONVERT = "FAILED_TO_CONVERT" 

43 

44 

45def _get_backends(group, *, load_and_call=False): 

46 """ 

47 Retrieve NetworkX ``backends`` and ``backend_info`` from the entry points. 

48 

49 Parameters 

50 ----------- 

51 group : str 

52 The entry_point to be retrieved. 

53 load_and_call : bool, optional 

54 If True, load and call the backend. Defaults to False. 

55 

56 Returns 

57 -------- 

58 dict 

59 A dictionary mapping backend names to their respective backend objects. 

60 

61 Notes 

62 ------ 

63 If a backend is defined more than once, a warning is issued. 

64 If a backend name is not a valid Python identifier, the backend is 

65 ignored and a warning is issued. 

66 The "nx_loopback" backend is removed if it exists, as it is only available during testing. 

67 A warning is displayed if an error occurs while loading a backend. 

68 """ 

69 items = entry_points(group=group) 

70 rv = {} 

71 for ep in items: 

72 if not ep.name.isidentifier(): 

73 warnings.warn( 

74 f"networkx backend name is not a valid identifier: {ep.name!r}. Ignoring.", 

75 RuntimeWarning, 

76 stacklevel=2, 

77 ) 

78 elif ep.name in rv: 

79 warnings.warn( 

80 f"networkx backend defined more than once: {ep.name}", 

81 RuntimeWarning, 

82 stacklevel=2, 

83 ) 

84 elif load_and_call: 

85 try: 

86 rv[ep.name] = ep.load()() 

87 except Exception as exc: 

88 warnings.warn( 

89 f"Error encountered when loading info for backend {ep.name}: {exc}", 

90 RuntimeWarning, 

91 stacklevel=2, 

92 ) 

93 else: 

94 rv[ep.name] = ep 

95 rv.pop("nx_loopback", None) 

96 return rv 

97 

98 

99# Note: "networkx" is in `backend_info` but ignored in `backends` and `config.backends`. 

100# It is valid to use "networkx" as a backend argument and in `config.backend_priority`. 

101# If we make "networkx" a "proper" backend, put it in `backends` and `config.backends`. 

102backends = _get_backends("networkx.backends") 

103 

104# Use _set_configs_from_environment() below to fill backend_info dict as 

105# the last step in importing networkx 

106backend_info = {} 

107 

108# Load and cache backends on-demand 

109_loaded_backends = {} # type: ignore[var-annotated] 

110_registered_algorithms = {} 

111 

112 

113# Get default configuration from environment variables at import time 

114def _comma_sep_to_list(string): 

115 return [x_strip for x in string.strip().split(",") if (x_strip := x.strip())] 

116 

117 

118def _set_configs_from_environment(): 

119 """Initialize ``config.backend_priority``, load backend_info and config. 

120 

121 This gets default values from environment variables (see ``nx.config`` for details). 

122 This function is run at the very end of importing networkx. It is run at this time 

123 to avoid loading backend_info before the rest of networkx is imported in case a 

124 backend uses networkx for its backend_info (e.g. subclassing the Config class.) 

125 """ 

126 # backend_info is defined above as empty dict. Fill it after import finishes. 

127 backend_info.update(_get_backends("networkx.backend_info", load_and_call=True)) 

128 backend_info.update( 

129 (backend, {}) for backend in backends.keys() - backend_info.keys() 

130 ) 

131 

132 # set up config based on backend_info and environment 

133 backend_config = {} 

134 for backend, info in backend_info.items(): 

135 if "default_config" not in info: 

136 cfg = Config() 

137 else: 

138 cfg = info["default_config"] 

139 if not isinstance(cfg, Config): 

140 cfg = Config(**cfg) 

141 backend_config[backend] = cfg 

142 backend_config = Config(**backend_config) 

143 # Setting doc of backends_config type is not setting doc of Config 

144 # Config has __new__ method that returns instance with a unique type! 

145 type(backend_config).__doc__ = "All installed NetworkX backends and their configs." 

146 

147 backend_priority = BackendPriorities(algos=[], generators=[]) 

148 

149 config = NetworkXConfig( 

150 backend_priority=backend_priority, 

151 backends=backend_config, 

152 cache_converted_graphs=bool( 

153 os.environ.get("NETWORKX_CACHE_CONVERTED_GRAPHS", True) 

154 ), 

155 fallback_to_nx=bool(os.environ.get("NETWORKX_FALLBACK_TO_NX", False)), 

156 warnings_to_ignore=set( 

157 _comma_sep_to_list(os.environ.get("NETWORKX_WARNINGS_TO_IGNORE", "")) 

158 ), 

159 ) 

160 

161 # Add "networkx" item to backend_info now b/c backend_config is set up 

162 backend_info["networkx"] = {} 

163 

164 # NETWORKX_BACKEND_PRIORITY is the same as NETWORKX_BACKEND_PRIORITY_ALGOS 

165 priorities = { 

166 key[26:].lower(): val 

167 for key, val in os.environ.items() 

168 if key.startswith("NETWORKX_BACKEND_PRIORITY_") 

169 } 

170 backend_priority = config.backend_priority 

171 backend_priority.algos = ( 

172 _comma_sep_to_list(priorities.pop("algos")) 

173 if "algos" in priorities 

174 else _comma_sep_to_list( 

175 os.environ.get( 

176 "NETWORKX_BACKEND_PRIORITY", 

177 os.environ.get("NETWORKX_AUTOMATIC_BACKENDS", ""), 

178 ) 

179 ) 

180 ) 

181 backend_priority.generators = _comma_sep_to_list(priorities.pop("generators", "")) 

182 for key in sorted(priorities): 

183 backend_priority[key] = _comma_sep_to_list(priorities[key]) 

184 

185 return config 

186 

187 

188def _do_nothing(): 

189 """This does nothing at all, yet it helps turn ``_dispatchable`` into functions. 

190 

191 Use this with the ``argmap`` decorator to turn ``self`` into a function. It results 

192 in some small additional overhead compared to calling ``_dispatchable`` directly, 

193 but ``argmap`` has the property that it can stack with other ``argmap`` 

194 decorators "for free". Being a function is better for REPRs and type-checkers. 

195 """ 

196 

197 

198def _always_run(name, args, kwargs): 

199 return True 

200 

201 

202def _load_backend(backend_name): 

203 if backend_name in _loaded_backends: 

204 return _loaded_backends[backend_name] 

205 if backend_name not in backends: 

206 raise ImportError(f"'{backend_name}' backend is not installed") 

207 rv = _loaded_backends[backend_name] = backends[backend_name].load() 

208 if not hasattr(rv, "can_run"): 

209 rv.can_run = _always_run 

210 if not hasattr(rv, "should_run"): 

211 rv.should_run = _always_run 

212 return rv 

213 

214 

215class _dispatchable: 

216 _is_testing = False 

217 

218 def __new__( 

219 cls, 

220 func=None, 

221 *, 

222 name=None, 

223 graphs="G", 

224 edge_attrs=None, 

225 node_attrs=None, 

226 preserve_edge_attrs=False, 

227 preserve_node_attrs=False, 

228 preserve_graph_attrs=False, 

229 preserve_all_attrs=False, 

230 mutates_input=False, 

231 returns_graph=False, 

232 implemented_by_nx=True, 

233 ): 

234 """A decorator function that is used to redirect the execution of ``func`` 

235 function to its backend implementation. 

236 

237 This decorator allows the function to dispatch to different backend 

238 implementations based on the input graph types, and also manages the 

239 extra keywords ``backend`` and ``**backend_kwargs``. 

240 Usage can be any of the following decorator forms: 

241 

242 - ``@_dispatchable`` 

243 - ``@_dispatchable()`` 

244 - ``@_dispatchable(name="override_name")`` 

245 - ``@_dispatchable(graphs="graph_var_name")`` 

246 - ``@_dispatchable(edge_attrs="weight")`` 

247 - ``@_dispatchable(graphs={"G": 0, "H": 1}, edge_attrs={"weight": "default"})`` 

248 with 0 and 1 giving the position in the signature function for graph 

249 objects. When ``edge_attrs`` is a dict, keys are keyword names and values 

250 are defaults. 

251 

252 Parameters 

253 ---------- 

254 func : callable, optional (default: None) 

255 The function to be decorated. If None, ``_dispatchable`` returns a 

256 partial object that can be used to decorate a function later. If ``func`` 

257 is a callable, returns a new callable object that dispatches to a backend 

258 function based on input graph types. 

259 

260 name : str, optional (default: name of `func`) 

261 The dispatch name for the function. It defaults to the name of `func`, 

262 but can be set manually to avoid conflicts in the global dispatch 

263 namespace. A common pattern is to prefix the function name with its 

264 module or submodule to make it unique. For example: 

265 

266 - ``@_dispatchable(name="tournament_is_strongly_connected")`` 

267 resolves conflict between ``nx.tournament.is_strongly_connected`` 

268 and ``nx.is_strongly_connected``. 

269 - ``@_dispatchable(name="approximate_node_connectivity")`` 

270 resolves conflict between ``nx.approximation.node_connectivity`` 

271 and ``nx.connectivity.node_connectivity``. 

272 

273 graphs : str or dict or None, optional (default: "G") 

274 If a string, the parameter name of the graph, which must be the first 

275 argument of the wrapped function. If more than one graph is required 

276 for the function (or if the graph is not the first argument), provide 

277 a dict keyed by graph parameter name to the value parameter position. 

278 A question mark in the name indicates an optional argument. 

279 For example, ``@_dispatchable(graphs={"G": 0, "auxiliary?": 4})`` 

280 indicates the 0th parameter ``G`` of the function is a required graph, 

281 and the 4th parameter ``auxiliary?`` is an optional graph. 

282 To indicate that an argument is a list of graphs, do ``"[graphs]"``. 

283 Use ``graphs=None``, if *no* arguments are NetworkX graphs such as for 

284 graph generators, readers, and conversion functions. 

285 

286 edge_attrs : str or dict, optional (default: None) 

287 ``edge_attrs`` holds information about edge attribute arguments 

288 and default values for those edge attributes. 

289 If a string, ``edge_attrs`` holds the function argument name that 

290 indicates a single edge attribute to include in the converted graph. 

291 The default value for this attribute is 1. To indicate that an argument 

292 is a list of attributes (all with default value 1), use e.g. ``"[attrs]"``. 

293 If a dict, ``edge_attrs`` holds a dict keyed by argument names, with 

294 values that are either the default value or, if a string, the argument 

295 name that indicates the default value. 

296 If None, function does not use edge attributes. 

297 

298 node_attrs : str or dict, optional 

299 Like ``edge_attrs``, but for node attributes. 

300 

301 preserve_edge_attrs : bool or str or dict, optional (default: False) 

302 If bool, whether to preserve all edge attributes. 

303 If a string, the parameter name that may indicate (with ``True`` or a 

304 callable argument) whether all edge attributes should be preserved 

305 when converting graphs to a backend graph type. 

306 If a dict of form ``{graph_name: {attr: default}}``, indicate 

307 pre-determined edge attributes (and defaults) to preserve for the 

308 indicated input graph. 

309 

310 preserve_node_attrs : bool or str or dict, optional (default: False) 

311 Like ``preserve_edge_attrs``, but for node attributes. 

312 

313 preserve_graph_attrs : bool or set, optional (default: False) 

314 If bool, whether to preserve all graph attributes. 

315 If set, which input graph arguments to preserve graph attributes. 

316 

317 preserve_all_attrs : bool, optional (default: False) 

318 Whether to preserve all edge, node and graph attributes. 

319 If True, this overrides all the other preserve_*_attrs. 

320 

321 mutates_input : bool or dict, optional (default: False) 

322 If bool, whether the function mutates an input graph argument. 

323 If dict of ``{arg_name: arg_pos}``, name and position of bool arguments 

324 that indicate whether an input graph will be mutated, and ``arg_name`` 

325 may begin with ``"not "`` to negate the logic (for example, ``"not copy"`` 

326 means we mutate the input graph when the ``copy`` argument is False). 

327 By default, dispatching doesn't convert input graphs to a different 

328 backend for functions that mutate input graphs. 

329 

330 returns_graph : bool, optional (default: False) 

331 Whether the function can return or yield a graph object. By default, 

332 dispatching doesn't convert input graphs to a different backend for 

333 functions that return graphs. 

334 

335 implemented_by_nx : bool, optional (default: True) 

336 Whether the function is implemented by NetworkX. If it is not, then the 

337 function is included in NetworkX only as an API to dispatch to backends. 

338 Default is True. 

339 """ 

340 if func is None: 

341 return partial( 

342 _dispatchable, 

343 name=name, 

344 graphs=graphs, 

345 edge_attrs=edge_attrs, 

346 node_attrs=node_attrs, 

347 preserve_edge_attrs=preserve_edge_attrs, 

348 preserve_node_attrs=preserve_node_attrs, 

349 preserve_graph_attrs=preserve_graph_attrs, 

350 preserve_all_attrs=preserve_all_attrs, 

351 mutates_input=mutates_input, 

352 returns_graph=returns_graph, 

353 implemented_by_nx=implemented_by_nx, 

354 ) 

355 if isinstance(func, str): 

356 raise TypeError("'name' and 'graphs' must be passed by keyword") from None 

357 # If name not provided, use the name of the function 

358 if name is None: 

359 name = func.__name__ 

360 

361 self = object.__new__(cls) 

362 

363 # standard function-wrapping stuff 

364 # __annotations__ not used 

365 self.__name__ = func.__name__ 

366 # self.__doc__ = func.__doc__ # __doc__ handled as cached property 

367 self.__defaults__ = func.__defaults__ 

368 # Add `backend=` keyword argument to allow backend choice at call-time 

369 if func.__kwdefaults__: 

370 self.__kwdefaults__ = {**func.__kwdefaults__, "backend": None} 

371 else: 

372 self.__kwdefaults__ = {"backend": None} 

373 self.__module__ = func.__module__ 

374 self.__qualname__ = func.__qualname__ 

375 self.__dict__.update(func.__dict__) 

376 self.__wrapped__ = func 

377 

378 # Supplement docstring with backend info; compute and cache when needed 

379 self._orig_doc = func.__doc__ 

380 self._cached_doc = None 

381 

382 self.orig_func = func 

383 self.name = name 

384 self.edge_attrs = edge_attrs 

385 self.node_attrs = node_attrs 

386 self.preserve_edge_attrs = preserve_edge_attrs or preserve_all_attrs 

387 self.preserve_node_attrs = preserve_node_attrs or preserve_all_attrs 

388 self.preserve_graph_attrs = preserve_graph_attrs or preserve_all_attrs 

389 self.mutates_input = mutates_input 

390 # Keep `returns_graph` private for now, b/c we may extend info on return types 

391 self._returns_graph = returns_graph 

392 

393 if edge_attrs is not None and not isinstance(edge_attrs, str | dict): 

394 raise TypeError( 

395 f"Bad type for edge_attrs: {type(edge_attrs)}. Expected str or dict." 

396 ) from None 

397 if node_attrs is not None and not isinstance(node_attrs, str | dict): 

398 raise TypeError( 

399 f"Bad type for node_attrs: {type(node_attrs)}. Expected str or dict." 

400 ) from None 

401 if not isinstance(self.preserve_edge_attrs, bool | str | dict): 

402 raise TypeError( 

403 f"Bad type for preserve_edge_attrs: {type(self.preserve_edge_attrs)}." 

404 " Expected bool, str, or dict." 

405 ) from None 

406 if not isinstance(self.preserve_node_attrs, bool | str | dict): 

407 raise TypeError( 

408 f"Bad type for preserve_node_attrs: {type(self.preserve_node_attrs)}." 

409 " Expected bool, str, or dict." 

410 ) from None 

411 if not isinstance(self.preserve_graph_attrs, bool | set): 

412 raise TypeError( 

413 f"Bad type for preserve_graph_attrs: {type(self.preserve_graph_attrs)}." 

414 " Expected bool or set." 

415 ) from None 

416 if not isinstance(self.mutates_input, bool | dict): 

417 raise TypeError( 

418 f"Bad type for mutates_input: {type(self.mutates_input)}." 

419 " Expected bool or dict." 

420 ) from None 

421 if not isinstance(self._returns_graph, bool): 

422 raise TypeError( 

423 f"Bad type for returns_graph: {type(self._returns_graph)}." 

424 " Expected bool." 

425 ) from None 

426 

427 if isinstance(graphs, str): 

428 graphs = {graphs: 0} 

429 elif graphs is None: 

430 pass 

431 elif not isinstance(graphs, dict): 

432 raise TypeError( 

433 f"Bad type for graphs: {type(graphs)}. Expected str or dict." 

434 ) from None 

435 elif len(graphs) == 0: 

436 raise KeyError("'graphs' must contain at least one variable name") from None 

437 

438 # This dict comprehension is complicated for better performance; equivalent shown below. 

439 self.optional_graphs = set() 

440 self.list_graphs = set() 

441 if graphs is None: 

442 self.graphs = {} 

443 else: 

444 self.graphs = { 

445 self.optional_graphs.add(val := k[:-1]) or val 

446 if (last := k[-1]) == "?" 

447 else self.list_graphs.add(val := k[1:-1]) or val 

448 if last == "]" 

449 else k: v 

450 for k, v in graphs.items() 

451 } 

452 # The above is equivalent to: 

453 # self.optional_graphs = {k[:-1] for k in graphs if k[-1] == "?"} 

454 # self.list_graphs = {k[1:-1] for k in graphs if k[-1] == "]"} 

455 # self.graphs = {k[:-1] if k[-1] == "?" else k: v for k, v in graphs.items()} 

456 

457 # Compute and cache the signature on-demand 

458 self._sig = None 

459 

460 # Which backends implement this function? 

461 self.backends = { 

462 backend 

463 for backend, info in backend_info.items() 

464 if "functions" in info and name in info["functions"] 

465 } 

466 if implemented_by_nx: 

467 self.backends.add("networkx") 

468 

469 if name in _registered_algorithms: 

470 raise KeyError( 

471 f"Algorithm already exists in dispatch namespace: {name}. " 

472 "Fix by assigning a unique `name=` in the `@_dispatchable` decorator." 

473 ) from None 

474 # Use the `argmap` decorator to turn `self` into a function. This does result 

475 # in small additional overhead compared to calling `_dispatchable` directly, 

476 # but `argmap` has the property that it can stack with other `argmap` 

477 # decorators "for free". Being a function is better for REPRs and type-checkers. 

478 self = argmap(_do_nothing)(self) 

479 _registered_algorithms[name] = self 

480 return self 

481 

482 @property 

483 def __doc__(self): 

484 """If the cached documentation exists, it is returned. 

485 Otherwise, the documentation is generated using _make_doc() method, 

486 cached, and then returned.""" 

487 

488 rv = self._cached_doc 

489 if rv is None: 

490 rv = self._cached_doc = self._make_doc() 

491 return rv 

492 

493 @__doc__.setter 

494 def __doc__(self, val): 

495 """Sets the original documentation to the given value and resets the 

496 cached documentation.""" 

497 

498 self._orig_doc = val 

499 self._cached_doc = None 

500 

501 @property 

502 def __signature__(self): 

503 """Return the signature of the original function, with the addition of 

504 the `backend` and `backend_kwargs` parameters.""" 

505 

506 if self._sig is None: 

507 sig = inspect.signature(self.orig_func) 

508 # `backend` is now a reserved argument used by dispatching. 

509 # assert "backend" not in sig.parameters 

510 if not any( 

511 p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values() 

512 ): 

513 sig = sig.replace( 

514 parameters=[ 

515 *sig.parameters.values(), 

516 inspect.Parameter( 

517 "backend", inspect.Parameter.KEYWORD_ONLY, default=None 

518 ), 

519 inspect.Parameter( 

520 "backend_kwargs", inspect.Parameter.VAR_KEYWORD 

521 ), 

522 ] 

523 ) 

524 else: 

525 *parameters, var_keyword = sig.parameters.values() 

526 sig = sig.replace( 

527 parameters=[ 

528 *parameters, 

529 inspect.Parameter( 

530 "backend", inspect.Parameter.KEYWORD_ONLY, default=None 

531 ), 

532 var_keyword, 

533 ] 

534 ) 

535 self._sig = sig 

536 return self._sig 

537 

538 # Fast, simple path if no backends are installed 

539 def _call_if_no_backends_installed(self, /, *args, backend=None, **kwargs): 

540 """Returns the result of the original function (no backends installed).""" 

541 if backend is not None and backend != "networkx": 

542 raise ImportError(f"'{backend}' backend is not installed") 

543 if "networkx" not in self.backends: 

544 raise NotImplementedError( 

545 f"'{self.name}' is not implemented by 'networkx' backend. " 

546 "This function is included in NetworkX as an API to dispatch to " 

547 "other backends." 

548 ) 

549 return self.orig_func(*args, **kwargs) 

550 

551 # Dispatch to backends based on inputs, `backend=` arg, or configuration 

552 def _call_if_any_backends_installed(self, /, *args, backend=None, **kwargs): 

553 """Returns the result of the original function, or the backend function if 

554 the backend is specified and that backend implements `func`.""" 

555 # Use `backend_name` in this function instead of `backend`. 

556 # This is purely for aesthetics and to make it easier to search for this 

557 # variable since "backend" is used in many comments and log/error messages. 

558 backend_name = backend 

559 if backend_name is not None and backend_name not in backend_info: 

560 raise ImportError(f"'{backend_name}' backend is not installed") 

561 

562 graphs_resolved = {} 

563 for gname, pos in self.graphs.items(): 

564 if pos < len(args): 

565 if gname in kwargs: 

566 raise TypeError(f"{self.name}() got multiple values for {gname!r}") 

567 graph = args[pos] 

568 elif gname in kwargs: 

569 graph = kwargs[gname] 

570 elif gname not in self.optional_graphs: 

571 raise TypeError( 

572 f"{self.name}() missing required graph argument: {gname}" 

573 ) 

574 else: 

575 continue 

576 if graph is None: 

577 if gname not in self.optional_graphs: 

578 raise TypeError( 

579 f"{self.name}() required graph argument {gname!r} is None; must be a graph" 

580 ) 

581 else: 

582 graphs_resolved[gname] = graph 

583 

584 # Alternative to the above that does not check duplicated args or missing required graphs. 

585 # graphs_resolved = { 

586 # gname: graph 

587 # for gname, pos in self.graphs.items() 

588 # if (graph := args[pos] if pos < len(args) else kwargs.get(gname)) is not None 

589 # } 

590 

591 # Check if any graph comes from a backend 

592 if self.list_graphs: 

593 # Make sure we don't lose values by consuming an iterator 

594 args = list(args) 

595 for gname in self.list_graphs & graphs_resolved.keys(): 

596 list_of_graphs = list(graphs_resolved[gname]) 

597 graphs_resolved[gname] = list_of_graphs 

598 if gname in kwargs: 

599 kwargs[gname] = list_of_graphs 

600 else: 

601 args[self.graphs[gname]] = list_of_graphs 

602 

603 graph_backend_names = { 

604 getattr(g, "__networkx_backend__", None) 

605 for gname, g in graphs_resolved.items() 

606 if gname not in self.list_graphs 

607 } 

608 for gname in self.list_graphs & graphs_resolved.keys(): 

609 graph_backend_names.update( 

610 getattr(g, "__networkx_backend__", None) 

611 for g in graphs_resolved[gname] 

612 ) 

613 else: 

614 graph_backend_names = { 

615 getattr(g, "__networkx_backend__", None) 

616 for g in graphs_resolved.values() 

617 } 

618 

619 backend_priority = nx.config.backend_priority.get( 

620 self.name, 

621 nx.config.backend_priority.generators 

622 if self._returns_graph 

623 else nx.config.backend_priority.algos, 

624 ) 

625 fallback_to_nx = nx.config.fallback_to_nx and "networkx" in self.backends 

626 if self._is_testing and backend_priority and backend_name is None: 

627 # Special path if we are running networkx tests with a backend. 

628 # This even runs for (and handles) functions that mutate input graphs. 

629 return self._convert_and_call_for_tests( 

630 backend_priority[0], 

631 args, 

632 kwargs, 

633 fallback_to_nx=fallback_to_nx, 

634 ) 

635 

636 graph_backend_names.discard(None) 

637 if backend_name is not None: 

638 # Must run with the given backend. 

639 # `can_run` only used for better log and error messages. 

640 # Check `mutates_input` for logging, not behavior. 

641 backend_kwarg_msg = ( 

642 "No other backends will be attempted, because the backend was " 

643 f"specified with the `backend='{backend_name}'` keyword argument." 

644 ) 

645 extra_message = ( 

646 f"'{backend_name}' backend raised NotImplementedError when calling " 

647 f"'{self.name}'. {backend_kwarg_msg}" 

648 ) 

649 if not graph_backend_names or graph_backend_names == {backend_name}: 

650 # All graphs are backend graphs--no need to convert! 

651 if self._can_backend_run(backend_name, args, kwargs): 

652 return self._call_with_backend( 

653 backend_name, args, kwargs, extra_message=extra_message 

654 ) 

655 if self._does_backend_have(backend_name): 

656 extra = " for the given arguments" 

657 else: 

658 extra = "" 

659 raise NotImplementedError( 

660 f"'{self.name}' is not implemented by '{backend_name}' backend" 

661 f"{extra}. {backend_kwarg_msg}" 

662 ) 

663 if self._can_convert(backend_name, graph_backend_names): 

664 if self._can_backend_run(backend_name, args, kwargs): 

665 if self._will_call_mutate_input(args, kwargs): 

666 _logger.debug( 

667 "'%s' will mutate an input graph. This prevents automatic conversion " 

668 "to, and use of, backends listed in `nx.config.backend_priority`. " 

669 "Using backend specified by the " 

670 "`backend='%s'` keyword argument. This may change behavior by not " 

671 "mutating inputs.", 

672 self.name, 

673 backend_name, 

674 ) 

675 mutations = [] 

676 else: 

677 mutations = None 

678 rv = self._convert_and_call( 

679 backend_name, 

680 graph_backend_names, 

681 args, 

682 kwargs, 

683 extra_message=extra_message, 

684 mutations=mutations, 

685 ) 

686 if mutations: 

687 for cache, key in mutations: 

688 # If the call mutates inputs, then remove all inputs gotten 

689 # from cache. We do this after all conversions (and call) so 

690 # that a graph can be gotten from a cache multiple times. 

691 cache.pop(key, None) 

692 return rv 

693 if self._does_backend_have(backend_name): 

694 extra = " for the given arguments" 

695 else: 

696 extra = "" 

697 raise NotImplementedError( 

698 f"'{self.name}' is not implemented by '{backend_name}' backend" 

699 f"{extra}. {backend_kwarg_msg}" 

700 ) 

701 if len(graph_backend_names) == 1: 

702 maybe_s = "" 

703 graph_backend_names = f"'{next(iter(graph_backend_names))}'" 

704 else: 

705 maybe_s = "s" 

706 raise TypeError( 

707 f"'{self.name}' is unable to convert graph from backend{maybe_s} " 

708 f"{graph_backend_names} to '{backend_name}' backend, which was " 

709 f"specified with the `backend='{backend_name}'` keyword argument. " 

710 f"{backend_kwarg_msg}" 

711 ) 

712 

713 if self._will_call_mutate_input(args, kwargs): 

714 # The current behavior for functions that mutate input graphs: 

715 # 

716 # 1. If backend is specified by `backend=` keyword, use it (done above). 

717 # 2. If inputs are from one backend, try to use it. 

718 # 3. If all input graphs are instances of `nx.Graph`, then run with the 

719 # default "networkx" implementation. 

720 # 

721 # Do not automatically convert if a call will mutate inputs, because doing 

722 # so would change behavior. Hence, we should fail if there are multiple input 

723 # backends or if the input backend does not implement the function. However, 

724 # we offer a way for backends to circumvent this if they do not implement 

725 # this function: we will fall back to the default "networkx" implementation 

726 # without using conversions if all input graphs are subclasses of `nx.Graph`. 

727 mutate_msg = ( 

728 "conversions between backends (if configured) will not be attempted " 

729 "because the original input graph would not be mutated. Using the " 

730 "backend keyword e.g. `backend='some_backend'` will force conversions " 

731 "and not mutate the original input graph." 

732 ) 

733 fallback_msg = ( 

734 "This call will mutate inputs, so fall back to 'networkx' " 

735 "backend (without converting) since all input graphs are " 

736 "instances of nx.Graph and are hopefully compatible." 

737 ) 

738 if len(graph_backend_names) == 1: 

739 [backend_name] = graph_backend_names 

740 msg_template = ( 

741 f"Backend '{backend_name}' does not implement '{self.name}'%s. " 

742 f"This call will mutate an input, so automatic {mutate_msg}" 

743 ) 

744 # `can_run` is only used for better log and error messages 

745 try: 

746 if self._can_backend_run(backend_name, args, kwargs): 

747 return self._call_with_backend( 

748 backend_name, 

749 args, 

750 kwargs, 

751 extra_message=msg_template % " with these arguments", 

752 ) 

753 except NotImplementedError as exc: 

754 if all(isinstance(g, nx.Graph) for g in graphs_resolved.values()): 

755 _logger.debug( 

756 "Backend '%s' raised when calling '%s': %s. %s", 

757 backend_name, 

758 self.name, 

759 exc, 

760 fallback_msg, 

761 ) 

762 else: 

763 raise 

764 else: 

765 if fallback_to_nx and all( 

766 # Consider dropping the `isinstance` check here to allow 

767 # duck-type graphs, but let's wait for a backend to ask us. 

768 isinstance(g, nx.Graph) 

769 for g in graphs_resolved.values() 

770 ): 

771 # Log that we are falling back to networkx 

772 _logger.debug( 

773 "Backend '%s' can't run '%s'. %s", 

774 backend_name, 

775 self.name, 

776 fallback_msg, 

777 ) 

778 else: 

779 if self._does_backend_have(backend_name): 

780 extra = " with these arguments" 

781 else: 

782 extra = "" 

783 raise NotImplementedError(msg_template % extra) 

784 elif fallback_to_nx and all( 

785 # Consider dropping the `isinstance` check here to allow 

786 # duck-type graphs, but let's wait for a backend to ask us. 

787 isinstance(g, nx.Graph) 

788 for g in graphs_resolved.values() 

789 ): 

790 # Log that we are falling back to networkx 

791 _logger.debug( 

792 "'%s' was called with inputs from multiple backends: %s. %s", 

793 self.name, 

794 graph_backend_names, 

795 fallback_msg, 

796 ) 

797 else: 

798 raise RuntimeError( 

799 f"'{self.name}' will mutate an input, but it was called with " 

800 f"inputs from multiple backends: {graph_backend_names}. " 

801 f"Automatic {mutate_msg}" 

802 ) 

803 # At this point, no backends are available to handle the call with 

804 # the input graph types, but if the input graphs are compatible 

805 # nx.Graph instances, fall back to networkx without converting. 

806 return self.orig_func(*args, **kwargs) 

807 

808 # We may generalize fallback configuration as e.g. `nx.config.backend_fallback` 

809 if fallback_to_nx or not graph_backend_names: 

810 # Use "networkx" by default if there are no inputs from backends. 

811 # For example, graph generators should probably return NetworkX graphs 

812 # instead of raising NotImplementedError. 

813 backend_fallback = ["networkx"] 

814 else: 

815 backend_fallback = [] 

816 

817 # ########################## 

818 # # How this behaves today # 

819 # ########################## 

820 # 

821 # The prose below describes the implementation and a *possible* way to 

822 # generalize "networkx" as "just another backend". The code is structured 

823 # to perhaps someday support backend-to-backend conversions (including 

824 # simply passing objects from one backend directly to another backend; 

825 # the dispatch machinery does not necessarily need to perform conversions), 

826 # but since backend-to-backend matching is not yet supported, the following 

827 # code is merely a convenient way to implement dispatch behaviors that have 

828 # been carefully developed since NetworkX 3.0 and to include falling back 

829 # to the default NetworkX implementation. 

830 # 

831 # The current behavior for functions that don't mutate input graphs: 

832 # 

833 # 1. If backend is specified by `backend=` keyword, use it (done above). 

834 # 2. If input is from a backend other than "networkx", try to use it. 

835 # - Note: if present, "networkx" graphs will be converted to the backend. 

836 # 3. If input is from "networkx" (or no backend), try to use backends from 

837 # `backend_priority` before running with the default "networkx" implementation. 

838 # 4. If configured, "fall back" and run with the default "networkx" implementation. 

839 # 

840 # ################################################ 

841 # # How this is implemented and may work someday # 

842 # ################################################ 

843 # 

844 # Let's determine the order of backends we should try according 

845 # to `backend_priority`, `backend_fallback`, and input backends. 

846 # There are two† dimensions of priorities to consider: 

847 # backend_priority > unspecified > backend_fallback 

848 # and 

849 # backend of an input > not a backend of an input 

850 # These are combined to form five groups of priorities as such: 

851 # 

852 # input ~input 

853 # +-------+-------+ 

854 # backend_priority | 1 | 2 | 

855 # unspecified | 3 | N/A | (if only 1) 

856 # backend_fallback | 4 | 5 | 

857 # +-------+-------+ 

858 # 

859 # This matches the behaviors we developed in versions 3.0 to 3.2, it 

860 # ought to cover virtually all use cases we expect, and I (@eriknw) don't 

861 # think it can be done any simpler (although it can be generalized further 

862 # and made to be more complicated to capture 100% of *possible* use cases). 

863 # Some observations: 

864 # 

865 # 1. If an input is in `backend_priority`, it will be used before trying a 

866 # backend that is higher priority in `backend_priority` and not an input. 

867 # 2. To prioritize converting from one backend to another even if both implement 

868 # a function, list one in `backend_priority` and one in `backend_fallback`. 

869 # 3. To disable conversions, set `backend_priority` and `backend_fallback` to []. 

870 # 

871 # †: There is actually a third dimension of priorities: 

872 # should_run == True > should_run == False 

873 # Backends with `can_run == True` and `should_run == False` are tried last. 

874 # 

875 seen = set() 

876 group1 = [] # In backend_priority, and an input 

877 group2 = [] # In backend_priority, but not an input 

878 for name in backend_priority: 

879 if name in seen: 

880 continue 

881 seen.add(name) 

882 if name in graph_backend_names: 

883 group1.append(name) 

884 else: 

885 group2.append(name) 

886 group4 = [] # In backend_fallback, and an input 

887 group5 = [] # In backend_fallback, but not an input 

888 for name in backend_fallback: 

889 if name in seen: 

890 continue 

891 seen.add(name) 

892 if name in graph_backend_names: 

893 group4.append(name) 

894 else: 

895 group5.append(name) 

896 # An input, but not in backend_priority or backend_fallback. 

897 group3 = graph_backend_names - seen 

898 if len(group3) > 1: 

899 # `group3` backends are not configured for automatic conversion or fallback. 

900 # There are at least two issues if this group contains multiple backends: 

901 # 

902 # 1. How should we prioritize them? We have no good way to break ties. 

903 # Although we could arbitrarily choose alphabetical or left-most, 

904 # let's follow the Zen of Python and refuse the temptation to guess. 

905 # 2. We probably shouldn't automatically convert to these backends, 

906 # because we are not configured to do so. 

907 # 

908 # (2) is important to allow disabling all conversions by setting both 

909 # `nx.config.backend_priority` and `nx.config.backend_fallback` to []. 

910 # 

911 # If there is a single backend in `group3`, then giving it priority over 

912 # the fallback backends is what is generally expected. For example, this 

913 # allows input graphs of `backend_fallback` backends (such as "networkx") 

914 # to be converted to, and run with, the unspecified backend. 

915 _logger.debug( 

916 "Call to '%s' has inputs from multiple backends, %s, that " 

917 "have no priority set in `nx.config.backend_priority`, " 

918 "so automatic conversions to " 

919 "these backends will not be attempted.", 

920 self.name, 

921 group3, 

922 ) 

923 group3 = () 

924 

925 try_order = list(itertools.chain(group1, group2, group3, group4, group5)) 

926 if len(try_order) > 1: 

927 # Should we consider adding an option for more verbose logging? 

928 # For example, we could explain the order of `try_order` in detail. 

929 _logger.debug( 

930 "Call to '%s' has inputs from %s backends, and will try to use " 

931 "backends in the following order: %s", 

932 self.name, 

933 graph_backend_names or "no", 

934 try_order, 

935 ) 

936 backends_to_try_again = [] 

937 for is_not_first, backend_name in enumerate(try_order): 

938 if is_not_first: 

939 _logger.debug("Trying next backend: '%s'", backend_name) 

940 try: 

941 if not graph_backend_names or graph_backend_names == {backend_name}: 

942 if self._can_backend_run(backend_name, args, kwargs): 

943 return self._call_with_backend(backend_name, args, kwargs) 

944 elif self._can_convert( 

945 backend_name, graph_backend_names 

946 ) and self._can_backend_run(backend_name, args, kwargs): 

947 if self._should_backend_run(backend_name, args, kwargs): 

948 rv = self._convert_and_call( 

949 backend_name, graph_backend_names, args, kwargs 

950 ) 

951 if ( 

952 self._returns_graph 

953 and graph_backend_names 

954 and backend_name not in graph_backend_names 

955 ): 

956 # If the function has graph inputs and graph output, we try 

957 # to make it so the backend of the return type will match the 

958 # backend of the input types. In case this is not possible, 

959 # let's tell the user that the backend of the return graph 

960 # has changed. Perhaps we could try to convert back, but 

961 # "fallback" backends for graph generators should typically 

962 # be compatible with NetworkX graphs. 

963 _logger.debug( 

964 "Call to '%s' is returning a graph from a different " 

965 "backend! It has inputs from %s backends, but ran with " 

966 "'%s' backend and is returning graph from '%s' backend", 

967 self.name, 

968 graph_backend_names, 

969 backend_name, 

970 backend_name, 

971 ) 

972 return rv 

973 # `should_run` is False, but `can_run` is True, so try again later 

974 backends_to_try_again.append(backend_name) 

975 except NotImplementedError as exc: 

976 _logger.debug( 

977 "Backend '%s' raised when calling '%s': %s", 

978 backend_name, 

979 self.name, 

980 exc, 

981 ) 

982 

983 # We are about to fail. Let's try backends with can_run=True and should_run=False. 

984 # This is unlikely to help today since we try to run with "networkx" before this. 

985 for backend_name in backends_to_try_again: 

986 _logger.debug( 

987 "Trying backend: '%s' (ignoring `should_run=False`)", backend_name 

988 ) 

989 try: 

990 rv = self._convert_and_call( 

991 backend_name, graph_backend_names, args, kwargs 

992 ) 

993 if ( 

994 self._returns_graph 

995 and graph_backend_names 

996 and backend_name not in graph_backend_names 

997 ): 

998 _logger.debug( 

999 "Call to '%s' is returning a graph from a different " 

1000 "backend! It has inputs from %s backends, but ran with " 

1001 "'%s' backend and is returning graph from '%s' backend", 

1002 self.name, 

1003 graph_backend_names, 

1004 backend_name, 

1005 backend_name, 

1006 ) 

1007 return rv 

1008 except NotImplementedError as exc: 

1009 _logger.debug( 

1010 "Backend '%s' raised when calling '%s': %s", 

1011 backend_name, 

1012 self.name, 

1013 exc, 

1014 ) 

1015 # As a final effort, we could try to convert and run with `group3` backends 

1016 # that we discarded when `len(group3) > 1`, but let's not consider doing 

1017 # so until there is a reasonable request for it. 

1018 

1019 if len(unspecified_backends := graph_backend_names - seen) > 1: 

1020 raise TypeError( 

1021 f"Unable to convert inputs from {graph_backend_names} backends and " 

1022 f"run '{self.name}'. NetworkX is configured to automatically convert " 

1023 f"to {try_order} backends. To remedy this, you may enable automatic " 

1024 f"conversion to {unspecified_backends} backends by adding them to " 

1025 "`nx.config.backend_priority`, or you " 

1026 "may specify a backend to use with the `backend=` keyword argument." 

1027 ) 

1028 if "networkx" not in self.backends: 

1029 extra = ( 

1030 " This function is included in NetworkX as an API to dispatch to " 

1031 "other backends." 

1032 ) 

1033 else: 

1034 extra = "" 

1035 raise NotImplementedError( 

1036 f"'{self.name}' is not implemented by {try_order} backends. To remedy " 

1037 "this, you may enable automatic conversion to more backends (including " 

1038 "'networkx') by adding them to `nx.config.backend_priority`, " 

1039 "or you may specify a backend to use with " 

1040 f"the `backend=` keyword argument.{extra}" 

1041 ) 

1042 

1043 # Dispatch only if there exist any installed backend(s) 

1044 __call__: typing.Callable = ( 

1045 _call_if_any_backends_installed if backends else _call_if_no_backends_installed 

1046 ) 

1047 

1048 def _will_call_mutate_input(self, args, kwargs): 

1049 # Fairly few nx functions mutate the input graph. Most that do, always do. 

1050 # So a boolean input indicates "always" or "never". 

1051 if isinstance((mutates_input := self.mutates_input), bool): 

1052 return mutates_input 

1053 

1054 # The ~10 other nx functions either use "copy=True" to control mutation or 

1055 # an arg naming an edge/node attribute to mutate (None means no mutation). 

1056 # Now `mutates_input` is a dict keyed by arg_name to its func-sig position. 

1057 # The `copy=` args are keyed as "not copy" to mean "negate the copy argument". 

1058 # Keys w/o "not " mean the call mutates only when the arg value `is not None`. 

1059 # 

1060 # This section might need different code if new functions mutate in new ways. 

1061 # 

1062 # NetworkX doesn't have any `mutates_input` dicts with more than 1 item. 

1063 # But we treat it like it might have more than 1 item for generality. 

1064 n = len(args) 

1065 return any( 

1066 (args[arg_pos] if n > arg_pos else kwargs.get(arg_name)) is not None 

1067 if not arg_name.startswith("not ") 

1068 # This assumes that e.g. `copy=True` is the default 

1069 else not (args[arg_pos] if n > arg_pos else kwargs.get(arg_name[4:], True)) 

1070 for arg_name, arg_pos in mutates_input.items() 

1071 ) 

1072 

1073 def _can_convert(self, backend_name, graph_backend_names): 

1074 # Backend-to-backend conversion not supported yet. 

1075 # We can only convert to and from networkx. 

1076 rv = backend_name == "networkx" or graph_backend_names.issubset( 

1077 {"networkx", backend_name} 

1078 ) 

1079 if not rv: 

1080 _logger.debug( 

1081 "Unable to convert from %s backends to '%s' backend", 

1082 graph_backend_names, 

1083 backend_name, 

1084 ) 

1085 return rv 

1086 

1087 def _does_backend_have(self, backend_name): 

1088 """Does the specified backend have this algorithm?""" 

1089 if backend_name == "networkx": 

1090 return "networkx" in self.backends 

1091 # Inspect the backend; don't trust metadata used to create `self.backends` 

1092 backend = _load_backend(backend_name) 

1093 return hasattr(backend, self.name) 

1094 

1095 def _can_backend_run(self, backend_name, args, kwargs): 

1096 """Can the specified backend run this algorithm with these arguments?""" 

1097 if backend_name == "networkx": 

1098 return "networkx" in self.backends 

1099 backend = _load_backend(backend_name) 

1100 # `backend.can_run` and `backend.should_run` may return strings that describe 

1101 # why they can't or shouldn't be run. 

1102 if not hasattr(backend, self.name): 

1103 _logger.debug( 

1104 "Backend '%s' does not implement '%s'", backend_name, self.name 

1105 ) 

1106 return False 

1107 can_run = backend.can_run(self.name, args, kwargs) 

1108 if isinstance(can_run, str) or not can_run: 

1109 reason = f", because: {can_run}" if isinstance(can_run, str) else "" 

1110 _logger.debug( 

1111 "Backend '%s' can't run `%s` with arguments: %s%s", 

1112 backend_name, 

1113 self.name, 

1114 _LazyArgsRepr(self, args, kwargs), 

1115 reason, 

1116 ) 

1117 return False 

1118 return True 

1119 

1120 def _should_backend_run(self, backend_name, args, kwargs): 

1121 """Should the specified backend run this algorithm with these arguments? 

1122 

1123 Note that this does not check ``backend.can_run``. 

1124 """ 

1125 # `backend.can_run` and `backend.should_run` may return strings that describe 

1126 # why they can't or shouldn't be run. 

1127 # `_should_backend_run` may assume that `_can_backend_run` returned True. 

1128 if backend_name == "networkx": 

1129 return True 

1130 backend = _load_backend(backend_name) 

1131 should_run = backend.should_run(self.name, args, kwargs) 

1132 if isinstance(should_run, str) or not should_run: 

1133 reason = f", because: {should_run}" if isinstance(should_run, str) else "" 

1134 _logger.debug( 

1135 "Backend '%s' shouldn't run `%s` with arguments: %s%s", 

1136 backend_name, 

1137 self.name, 

1138 _LazyArgsRepr(self, args, kwargs), 

1139 reason, 

1140 ) 

1141 return False 

1142 return True 

1143 

1144 def _convert_arguments(self, backend_name, args, kwargs, *, use_cache, mutations): 

1145 """Convert graph arguments to the specified backend. 

1146 

1147 Returns 

1148 ------- 

1149 args tuple and kwargs dict 

1150 """ 

1151 bound = self.__signature__.bind(*args, **kwargs) 

1152 bound.apply_defaults() 

1153 if not self.graphs: 

1154 bound_kwargs = bound.kwargs 

1155 del bound_kwargs["backend"] 

1156 return bound.args, bound_kwargs 

1157 if backend_name == "networkx": 

1158 # `backend_interface.convert_from_nx` preserves everything 

1159 preserve_edge_attrs = preserve_node_attrs = preserve_graph_attrs = True 

1160 else: 

1161 preserve_edge_attrs = self.preserve_edge_attrs 

1162 preserve_node_attrs = self.preserve_node_attrs 

1163 preserve_graph_attrs = self.preserve_graph_attrs 

1164 edge_attrs = self.edge_attrs 

1165 node_attrs = self.node_attrs 

1166 # Convert graphs into backend graph-like object 

1167 # Include the edge and/or node labels if provided to the algorithm 

1168 if preserve_edge_attrs is False: 

1169 # e.g. `preserve_edge_attrs=False` 

1170 pass 

1171 elif preserve_edge_attrs is True: 

1172 # e.g. `preserve_edge_attrs=True` 

1173 edge_attrs = None 

1174 elif isinstance(preserve_edge_attrs, str): 

1175 if bound.arguments[preserve_edge_attrs] is True or callable( 

1176 bound.arguments[preserve_edge_attrs] 

1177 ): 

1178 # e.g. `preserve_edge_attrs="attr"` and `func(attr=True)` 

1179 # e.g. `preserve_edge_attrs="attr"` and `func(attr=myfunc)` 

1180 preserve_edge_attrs = True 

1181 edge_attrs = None 

1182 elif bound.arguments[preserve_edge_attrs] is False and ( 

1183 isinstance(edge_attrs, str) 

1184 and edge_attrs == preserve_edge_attrs 

1185 or isinstance(edge_attrs, dict) 

1186 and preserve_edge_attrs in edge_attrs 

1187 ): 

1188 # e.g. `preserve_edge_attrs="attr"` and `func(attr=False)` 

1189 # Treat `False` argument as meaning "preserve_edge_data=False" 

1190 # and not `False` as the edge attribute to use. 

1191 preserve_edge_attrs = False 

1192 edge_attrs = None 

1193 else: 

1194 # e.g. `preserve_edge_attrs="attr"` and `func(attr="weight")` 

1195 preserve_edge_attrs = False 

1196 # Else: e.g. `preserve_edge_attrs={"G": {"weight": 1}}` 

1197 

1198 if edge_attrs is None: 

1199 # May have been set to None above b/c all attributes are preserved 

1200 pass 

1201 elif isinstance(edge_attrs, str): 

1202 if edge_attrs[0] == "[": 

1203 # e.g. `edge_attrs="[edge_attributes]"` (argument of list of attributes) 

1204 # e.g. `func(edge_attributes=["foo", "bar"])` 

1205 edge_attrs = { 

1206 edge_attr: 1 for edge_attr in bound.arguments[edge_attrs[1:-1]] 

1207 } 

1208 elif callable(bound.arguments[edge_attrs]): 

1209 # e.g. `edge_attrs="weight"` and `func(weight=myfunc)` 

1210 preserve_edge_attrs = True 

1211 edge_attrs = None 

1212 elif bound.arguments[edge_attrs] is not None: 

1213 # e.g. `edge_attrs="weight"` and `func(weight="foo")` (default of 1) 

1214 edge_attrs = {bound.arguments[edge_attrs]: 1} 

1215 elif self.name == "to_numpy_array" and hasattr( 

1216 bound.arguments["dtype"], "names" 

1217 ): 

1218 # Custom handling: attributes may be obtained from `dtype` 

1219 edge_attrs = { 

1220 edge_attr: 1 for edge_attr in bound.arguments["dtype"].names 

1221 } 

1222 else: 

1223 # e.g. `edge_attrs="weight"` and `func(weight=None)` 

1224 edge_attrs = None 

1225 else: 

1226 # e.g. `edge_attrs={"attr": "default"}` and `func(attr="foo", default=7)` 

1227 # e.g. `edge_attrs={"attr": 0}` and `func(attr="foo")` 

1228 edge_attrs = { 

1229 edge_attr: bound.arguments.get(val, 1) if isinstance(val, str) else val 

1230 for key, val in edge_attrs.items() 

1231 if (edge_attr := bound.arguments[key]) is not None 

1232 } 

1233 

1234 if preserve_node_attrs is False: 

1235 # e.g. `preserve_node_attrs=False` 

1236 pass 

1237 elif preserve_node_attrs is True: 

1238 # e.g. `preserve_node_attrs=True` 

1239 node_attrs = None 

1240 elif isinstance(preserve_node_attrs, str): 

1241 if bound.arguments[preserve_node_attrs] is True or callable( 

1242 bound.arguments[preserve_node_attrs] 

1243 ): 

1244 # e.g. `preserve_node_attrs="attr"` and `func(attr=True)` 

1245 # e.g. `preserve_node_attrs="attr"` and `func(attr=myfunc)` 

1246 preserve_node_attrs = True 

1247 node_attrs = None 

1248 elif bound.arguments[preserve_node_attrs] is False and ( 

1249 isinstance(node_attrs, str) 

1250 and node_attrs == preserve_node_attrs 

1251 or isinstance(node_attrs, dict) 

1252 and preserve_node_attrs in node_attrs 

1253 ): 

1254 # e.g. `preserve_node_attrs="attr"` and `func(attr=False)` 

1255 # Treat `False` argument as meaning "preserve_node_data=False" 

1256 # and not `False` as the node attribute to use. Is this used? 

1257 preserve_node_attrs = False 

1258 node_attrs = None 

1259 else: 

1260 # e.g. `preserve_node_attrs="attr"` and `func(attr="weight")` 

1261 preserve_node_attrs = False 

1262 # Else: e.g. `preserve_node_attrs={"G": {"pos": None}}` 

1263 

1264 if node_attrs is None: 

1265 # May have been set to None above b/c all attributes are preserved 

1266 pass 

1267 elif isinstance(node_attrs, str): 

1268 if node_attrs[0] == "[": 

1269 # e.g. `node_attrs="[node_attributes]"` (argument of list of attributes) 

1270 # e.g. `func(node_attributes=["foo", "bar"])` 

1271 node_attrs = { 

1272 node_attr: None for node_attr in bound.arguments[node_attrs[1:-1]] 

1273 } 

1274 elif callable(bound.arguments[node_attrs]): 

1275 # e.g. `node_attrs="weight"` and `func(weight=myfunc)` 

1276 preserve_node_attrs = True 

1277 node_attrs = None 

1278 elif bound.arguments[node_attrs] is not None: 

1279 # e.g. `node_attrs="weight"` and `func(weight="foo")` 

1280 node_attrs = {bound.arguments[node_attrs]: None} 

1281 else: 

1282 # e.g. `node_attrs="weight"` and `func(weight=None)` 

1283 node_attrs = None 

1284 else: 

1285 # e.g. `node_attrs={"attr": "default"}` and `func(attr="foo", default=7)` 

1286 # e.g. `node_attrs={"attr": 0}` and `func(attr="foo")` 

1287 node_attrs = { 

1288 node_attr: bound.arguments.get(val) if isinstance(val, str) else val 

1289 for key, val in node_attrs.items() 

1290 if (node_attr := bound.arguments[key]) is not None 

1291 } 

1292 

1293 # It should be safe to assume that we either have networkx graphs or backend graphs. 

1294 # Future work: allow conversions between backends. 

1295 for gname in self.graphs: 

1296 if gname in self.list_graphs: 

1297 bound.arguments[gname] = [ 

1298 self._convert_graph( 

1299 backend_name, 

1300 g, 

1301 edge_attrs=edge_attrs, 

1302 node_attrs=node_attrs, 

1303 preserve_edge_attrs=preserve_edge_attrs, 

1304 preserve_node_attrs=preserve_node_attrs, 

1305 preserve_graph_attrs=preserve_graph_attrs, 

1306 graph_name=gname, 

1307 use_cache=use_cache, 

1308 mutations=mutations, 

1309 ) 

1310 if getattr(g, "__networkx_backend__", "networkx") != backend_name 

1311 else g 

1312 for g in bound.arguments[gname] 

1313 ] 

1314 else: 

1315 graph = bound.arguments[gname] 

1316 if graph is None: 

1317 if gname in self.optional_graphs: 

1318 continue 

1319 raise TypeError( 

1320 f"Missing required graph argument `{gname}` in {self.name} function" 

1321 ) 

1322 if isinstance(preserve_edge_attrs, dict): 

1323 preserve_edges = False 

1324 edges = preserve_edge_attrs.get(gname, edge_attrs) 

1325 else: 

1326 preserve_edges = preserve_edge_attrs 

1327 edges = edge_attrs 

1328 if isinstance(preserve_node_attrs, dict): 

1329 preserve_nodes = False 

1330 nodes = preserve_node_attrs.get(gname, node_attrs) 

1331 else: 

1332 preserve_nodes = preserve_node_attrs 

1333 nodes = node_attrs 

1334 if isinstance(preserve_graph_attrs, set): 

1335 preserve_graph = gname in preserve_graph_attrs 

1336 else: 

1337 preserve_graph = preserve_graph_attrs 

1338 if getattr(graph, "__networkx_backend__", "networkx") != backend_name: 

1339 bound.arguments[gname] = self._convert_graph( 

1340 backend_name, 

1341 graph, 

1342 edge_attrs=edges, 

1343 node_attrs=nodes, 

1344 preserve_edge_attrs=preserve_edges, 

1345 preserve_node_attrs=preserve_nodes, 

1346 preserve_graph_attrs=preserve_graph, 

1347 graph_name=gname, 

1348 use_cache=use_cache, 

1349 mutations=mutations, 

1350 ) 

1351 bound_kwargs = bound.kwargs 

1352 del bound_kwargs["backend"] 

1353 return bound.args, bound_kwargs 

1354 

1355 def _convert_graph( 

1356 self, 

1357 backend_name, 

1358 graph, 

1359 *, 

1360 edge_attrs, 

1361 node_attrs, 

1362 preserve_edge_attrs, 

1363 preserve_node_attrs, 

1364 preserve_graph_attrs, 

1365 graph_name, 

1366 use_cache, 

1367 mutations, 

1368 ): 

1369 nx_cache = getattr(graph, "__networkx_cache__", None) if use_cache else None 

1370 if nx_cache is not None: 

1371 cache = nx_cache.setdefault("backends", {}).setdefault(backend_name, {}) 

1372 key = _get_cache_key( 

1373 edge_attrs=edge_attrs, 

1374 node_attrs=node_attrs, 

1375 preserve_edge_attrs=preserve_edge_attrs, 

1376 preserve_node_attrs=preserve_node_attrs, 

1377 preserve_graph_attrs=preserve_graph_attrs, 

1378 ) 

1379 compat_key, rv = _get_from_cache(cache, key, mutations=mutations) 

1380 if rv is not None: 

1381 if "cache" not in nx.config.warnings_to_ignore: 

1382 warnings.warn( 

1383 "Note: conversions to backend graphs are saved to cache " 

1384 "(`G.__networkx_cache__` on the original graph) by default." 

1385 "\n\nThis warning means the cached graph is being used " 

1386 f"for the {backend_name!r} backend in the " 

1387 f"call to {self.name}.\n\nFor the cache to be consistent " 

1388 "(i.e., correct), the input graph must not have been " 

1389 "manually mutated since the cached graph was created. " 

1390 "Examples of manually mutating the graph data structures " 

1391 "resulting in an inconsistent cache include:\n\n" 

1392 " >>> G[u][v][key] = val\n\n" 

1393 "and\n\n" 

1394 " >>> for u, v, d in G.edges(data=True):\n" 

1395 " ... d[key] = val\n\n" 

1396 "Using methods such as `G.add_edge(u, v, weight=val)` " 

1397 "will correctly clear the cache to keep it consistent. " 

1398 "You may also use `G.__networkx_cache__.clear()` to " 

1399 "manually clear the cache, or set `G.__networkx_cache__` " 

1400 "to None to disable caching for G. Enable or disable caching " 

1401 "globally via `nx.config.cache_converted_graphs` config.\n\n" 

1402 "To disable this warning:\n\n" 

1403 ' >>> nx.config.warnings_to_ignore.add("cache")\n' 

1404 ) 

1405 if rv == FAILED_TO_CONVERT: 

1406 # NotImplementedError is reasonable to use since the backend doesn't 

1407 # implement this conversion. However, this will be different than 

1408 # the original exception that the backend raised when it failed. 

1409 # Using NotImplementedError allows the next backend to be attempted. 

1410 raise NotImplementedError( 

1411 "Graph conversion aborted: unable to convert graph to " 

1412 f"'{backend_name}' backend in call to `{self.name}', " 

1413 "because this conversion has previously failed." 

1414 ) 

1415 _logger.debug( 

1416 "Using cached converted graph (from '%s' to '%s' backend) " 

1417 "in call to '%s' for '%s' argument", 

1418 getattr(graph, "__networkx_backend__", None), 

1419 backend_name, 

1420 self.name, 

1421 graph_name, 

1422 ) 

1423 return rv 

1424 

1425 if backend_name == "networkx": 

1426 # Perhaps we should check that "__networkx_backend__" attribute exists 

1427 # and return the original object if not. 

1428 if not hasattr(graph, "__networkx_backend__"): 

1429 _logger.debug( 

1430 "Unable to convert input to 'networkx' backend in call to '%s' for " 

1431 "'%s argument, because it is not from a backend (i.e., it does not " 

1432 "have `G.__networkx_backend__` attribute). Using the original " 

1433 "object: %s", 

1434 self.name, 

1435 graph_name, 

1436 graph, 

1437 ) 

1438 # This may fail, but let it fail in the networkx function 

1439 return graph 

1440 backend = _load_backend(graph.__networkx_backend__) 

1441 try: 

1442 rv = backend.convert_to_nx(graph) 

1443 except Exception: 

1444 if nx_cache is not None: 

1445 _set_to_cache(cache, key, FAILED_TO_CONVERT) 

1446 raise 

1447 else: 

1448 backend = _load_backend(backend_name) 

1449 try: 

1450 rv = backend.convert_from_nx( 

1451 graph, 

1452 edge_attrs=edge_attrs, 

1453 node_attrs=node_attrs, 

1454 preserve_edge_attrs=preserve_edge_attrs, 

1455 preserve_node_attrs=preserve_node_attrs, 

1456 # Always preserve graph attrs when we are caching b/c this should be 

1457 # cheap and may help prevent extra (unnecessary) conversions. Because 

1458 # we do this, we don't need `preserve_graph_attrs` in the cache key. 

1459 preserve_graph_attrs=preserve_graph_attrs or nx_cache is not None, 

1460 name=self.name, 

1461 graph_name=graph_name, 

1462 ) 

1463 except Exception: 

1464 if nx_cache is not None: 

1465 _set_to_cache(cache, key, FAILED_TO_CONVERT) 

1466 raise 

1467 if nx_cache is not None: 

1468 _set_to_cache(cache, key, rv) 

1469 _logger.debug( 

1470 "Caching converted graph (from '%s' to '%s' backend) " 

1471 "in call to '%s' for '%s' argument", 

1472 getattr(graph, "__networkx_backend__", None), 

1473 backend_name, 

1474 self.name, 

1475 graph_name, 

1476 ) 

1477 

1478 return rv 

1479 

1480 def _call_with_backend(self, backend_name, args, kwargs, *, extra_message=None): 

1481 """Call this dispatchable function with a backend without converting inputs.""" 

1482 if backend_name == "networkx": 

1483 return self.orig_func(*args, **kwargs) 

1484 backend = _load_backend(backend_name) 

1485 _logger.debug( 

1486 "Using backend '%s' for call to '%s' with arguments: %s", 

1487 backend_name, 

1488 self.name, 

1489 _LazyArgsRepr(self, args, kwargs), 

1490 ) 

1491 try: 

1492 return getattr(backend, self.name)(*args, **kwargs) 

1493 except NotImplementedError as exc: 

1494 if extra_message is not None: 

1495 _logger.debug( 

1496 "Backend '%s' raised when calling '%s': %s", 

1497 backend_name, 

1498 self.name, 

1499 exc, 

1500 ) 

1501 raise NotImplementedError(extra_message) from exc 

1502 raise 

1503 

1504 def _convert_and_call( 

1505 self, 

1506 backend_name, 

1507 input_backend_names, 

1508 args, 

1509 kwargs, 

1510 *, 

1511 extra_message=None, 

1512 mutations=None, 

1513 ): 

1514 """Call this dispatchable function with a backend after converting inputs. 

1515 

1516 Parameters 

1517 ---------- 

1518 backend_name : str 

1519 input_backend_names : set[str] 

1520 args : arguments tuple 

1521 kwargs : keywords dict 

1522 extra_message : str, optional 

1523 Additional message to log if NotImplementedError is raised by backend. 

1524 mutations : list, optional 

1525 Used to clear objects gotten from cache if inputs will be mutated. 

1526 """ 

1527 if backend_name == "networkx": 

1528 func = self.orig_func 

1529 else: 

1530 backend = _load_backend(backend_name) 

1531 func = getattr(backend, self.name) 

1532 other_backend_names = input_backend_names - {backend_name} 

1533 _logger.debug( 

1534 "Converting input graphs from %s backend%s to '%s' backend for call to '%s'", 

1535 other_backend_names 

1536 if len(other_backend_names) > 1 

1537 else f"'{next(iter(other_backend_names))}'", 

1538 "s" if len(other_backend_names) > 1 else "", 

1539 backend_name, 

1540 self.name, 

1541 ) 

1542 try: 

1543 converted_args, converted_kwargs = self._convert_arguments( 

1544 backend_name, 

1545 args, 

1546 kwargs, 

1547 use_cache=nx.config.cache_converted_graphs, 

1548 mutations=mutations, 

1549 ) 

1550 except NotImplementedError as exc: 

1551 # Only log the exception if we are adding an extra message 

1552 # because we don't want to lose any information. 

1553 _logger.debug( 

1554 "Failed to convert graphs from %s to '%s' backend for call to '%s'" 

1555 + ("" if extra_message is None else ": %s"), 

1556 input_backend_names, 

1557 backend_name, 

1558 self.name, 

1559 *(() if extra_message is None else (exc,)), 

1560 ) 

1561 if extra_message is not None: 

1562 raise NotImplementedError(extra_message) from exc 

1563 raise 

1564 if backend_name != "networkx": 

1565 _logger.debug( 

1566 "Using backend '%s' for call to '%s' with arguments: %s", 

1567 backend_name, 

1568 self.name, 

1569 _LazyArgsRepr(self, converted_args, converted_kwargs), 

1570 ) 

1571 try: 

1572 return func(*converted_args, **converted_kwargs) 

1573 except NotImplementedError as exc: 

1574 if extra_message is not None: 

1575 _logger.debug( 

1576 "Backend '%s' raised when calling '%s': %s", 

1577 backend_name, 

1578 self.name, 

1579 exc, 

1580 ) 

1581 raise NotImplementedError(extra_message) from exc 

1582 raise 

1583 

1584 def _convert_and_call_for_tests( 

1585 self, backend_name, args, kwargs, *, fallback_to_nx=False 

1586 ): 

1587 """Call this dispatchable function with a backend; for use with testing.""" 

1588 backend = _load_backend(backend_name) 

1589 if not self._can_backend_run(backend_name, args, kwargs): 

1590 if fallback_to_nx or not self.graphs: 

1591 if fallback_to_nx: 

1592 _logger.debug( 

1593 "Falling back to use 'networkx' instead of '%s' backend " 

1594 "for call to '%s' with arguments: %s", 

1595 backend_name, 

1596 self.name, 

1597 _LazyArgsRepr(self, args, kwargs), 

1598 ) 

1599 return self.orig_func(*args, **kwargs) 

1600 

1601 import pytest 

1602 

1603 msg = f"'{self.name}' not implemented by {backend_name}" 

1604 if hasattr(backend, self.name): 

1605 msg += " with the given arguments" 

1606 pytest.xfail(msg) 

1607 

1608 from collections.abc import Iterable, Iterator, Mapping 

1609 from copy import copy, deepcopy 

1610 from io import BufferedReader, BytesIO, StringIO, TextIOWrapper 

1611 from itertools import tee 

1612 from random import Random 

1613 

1614 import numpy as np 

1615 from numpy.random import Generator, RandomState 

1616 from scipy.sparse import sparray 

1617 

1618 # We sometimes compare the backend result (or input graphs) to the 

1619 # original result (or input graphs), so we need two sets of arguments. 

1620 compare_result_to_nx = ( 

1621 self._returns_graph 

1622 and "networkx" in self.backends 

1623 and self.name 

1624 not in { 

1625 # Has graphs as node values (unable to compare) 

1626 "quotient_graph", 

1627 # We don't handle tempfile.NamedTemporaryFile arguments 

1628 "read_gml", 

1629 "read_graph6", 

1630 "read_sparse6", 

1631 # We don't handle io.BufferedReader or io.TextIOWrapper arguments 

1632 "bipartite_read_edgelist", 

1633 "read_adjlist", 

1634 "read_edgelist", 

1635 "read_graphml", 

1636 "read_multiline_adjlist", 

1637 "read_pajek", 

1638 "from_pydot", 

1639 "pydot_read_dot", 

1640 "agraph_read_dot", 

1641 # graph comparison fails b/c of nan values 

1642 "read_gexf", 

1643 } 

1644 ) 

1645 compare_inputs_to_nx = ( 

1646 "networkx" in self.backends and self._will_call_mutate_input(args, kwargs) 

1647 ) 

1648 

1649 # Tee iterators and copy random state so that they may be used twice. 

1650 if not args or not compare_result_to_nx and not compare_inputs_to_nx: 

1651 args_to_convert = args_nx = args 

1652 else: 

1653 args_to_convert, args_nx = zip( 

1654 *( 

1655 (arg, deepcopy(arg)) 

1656 if isinstance(arg, RandomState) 

1657 else (arg, copy(arg)) 

1658 if isinstance(arg, BytesIO | StringIO | Random | Generator) 

1659 else tee(arg) 

1660 if isinstance(arg, Iterator) 

1661 and not isinstance(arg, BufferedReader | TextIOWrapper) 

1662 else (arg, arg) 

1663 for arg in args 

1664 ) 

1665 ) 

1666 if not kwargs or not compare_result_to_nx and not compare_inputs_to_nx: 

1667 kwargs_to_convert = kwargs_nx = kwargs 

1668 else: 

1669 kwargs_to_convert, kwargs_nx = zip( 

1670 *( 

1671 ((k, v), (k, deepcopy(v))) 

1672 if isinstance(v, RandomState) 

1673 else ((k, v), (k, copy(v))) 

1674 if isinstance(v, BytesIO | StringIO | Random | Generator) 

1675 else ((k, (teed := tee(v))[0]), (k, teed[1])) 

1676 if isinstance(v, Iterator) 

1677 and not isinstance(v, BufferedReader | TextIOWrapper) 

1678 else ((k, v), (k, v)) 

1679 for k, v in kwargs.items() 

1680 ) 

1681 ) 

1682 kwargs_to_convert = dict(kwargs_to_convert) 

1683 kwargs_nx = dict(kwargs_nx) 

1684 

1685 try: 

1686 converted_args, converted_kwargs = self._convert_arguments( 

1687 backend_name, 

1688 args_to_convert, 

1689 kwargs_to_convert, 

1690 use_cache=False, 

1691 mutations=None, 

1692 ) 

1693 except NotImplementedError as exc: 

1694 if fallback_to_nx: 

1695 _logger.debug( 

1696 "Graph conversion failed; falling back to use 'networkx' instead " 

1697 "of '%s' backend for call to '%s'", 

1698 backend_name, 

1699 self.name, 

1700 ) 

1701 return self.orig_func(*args_nx, **kwargs_nx) 

1702 import pytest 

1703 

1704 pytest.xfail( 

1705 exc.args[0] if exc.args else f"{self.name} raised {type(exc).__name__}" 

1706 ) 

1707 

1708 if compare_inputs_to_nx: 

1709 # Ensure input graphs are different if the function mutates an input graph. 

1710 bound_backend = self.__signature__.bind(*converted_args, **converted_kwargs) 

1711 bound_backend.apply_defaults() 

1712 bound_nx = self.__signature__.bind(*args_nx, **kwargs_nx) 

1713 bound_nx.apply_defaults() 

1714 for gname in self.graphs: 

1715 graph_nx = bound_nx.arguments[gname] 

1716 if bound_backend.arguments[gname] is graph_nx is not None: 

1717 bound_nx.arguments[gname] = graph_nx.copy() 

1718 args_nx = bound_nx.args 

1719 kwargs_nx = bound_nx.kwargs 

1720 kwargs_nx.pop("backend", None) 

1721 

1722 _logger.debug( 

1723 "Using backend '%s' for call to '%s' with arguments: %s", 

1724 backend_name, 

1725 self.name, 

1726 _LazyArgsRepr(self, converted_args, converted_kwargs), 

1727 ) 

1728 try: 

1729 result = getattr(backend, self.name)(*converted_args, **converted_kwargs) 

1730 except NotImplementedError as exc: 

1731 if fallback_to_nx: 

1732 _logger.debug( 

1733 "Backend '%s' raised when calling '%s': %s; " 

1734 "falling back to use 'networkx' instead.", 

1735 backend_name, 

1736 self.name, 

1737 exc, 

1738 ) 

1739 return self.orig_func(*args_nx, **kwargs_nx) 

1740 import pytest 

1741 

1742 pytest.xfail( 

1743 exc.args[0] if exc.args else f"{self.name} raised {type(exc).__name__}" 

1744 ) 

1745 

1746 # Verify that `self._returns_graph` is correct. This compares the return type 

1747 # to the type expected from `self._returns_graph`. This handles tuple and list 

1748 # return types, but *does not* catch functions that yield graphs. 

1749 if ( 

1750 self._returns_graph 

1751 != ( 

1752 isinstance(result, nx.Graph) 

1753 or hasattr(result, "__networkx_backend__") 

1754 or isinstance(result, tuple | list) 

1755 and any( 

1756 isinstance(x, nx.Graph) or hasattr(x, "__networkx_backend__") 

1757 for x in result 

1758 ) 

1759 ) 

1760 and not ( 

1761 # May return Graph or None 

1762 self.name in {"check_planarity", "check_planarity_recursive"} 

1763 and any(x is None for x in result) 

1764 ) 

1765 and not ( 

1766 # May return Graph or dict 

1767 self.name in {"held_karp_ascent"} 

1768 and any(isinstance(x, dict) for x in result) 

1769 ) 

1770 and self.name 

1771 not in { 

1772 # yields graphs 

1773 "all_triads", 

1774 "general_k_edge_subgraphs", 

1775 # yields graphs or arrays 

1776 "nonisomorphic_trees", 

1777 } 

1778 ): 

1779 raise RuntimeError(f"`returns_graph` is incorrect for {self.name}") 

1780 

1781 def check_result(val, depth=0): 

1782 if isinstance(val, np.number): 

1783 raise RuntimeError( 

1784 f"{self.name} returned a numpy scalar {val} ({type(val)}, depth={depth})" 

1785 ) 

1786 if isinstance(val, np.ndarray | sparray): 

1787 return 

1788 if isinstance(val, nx.Graph): 

1789 check_result(val._node, depth=depth + 1) 

1790 check_result(val._adj, depth=depth + 1) 

1791 return 

1792 if isinstance(val, Iterator): 

1793 raise NotImplementedError 

1794 if isinstance(val, Iterable) and not isinstance(val, str): 

1795 for x in val: 

1796 check_result(x, depth=depth + 1) 

1797 if isinstance(val, Mapping): 

1798 for x in val.values(): 

1799 check_result(x, depth=depth + 1) 

1800 

1801 def check_iterator(it): 

1802 for val in it: 

1803 try: 

1804 check_result(val) 

1805 except RuntimeError as exc: 

1806 raise RuntimeError( 

1807 f"{self.name} returned a numpy scalar {val} ({type(val)})" 

1808 ) from exc 

1809 yield val 

1810 

1811 if self.name in {"from_edgelist"}: 

1812 # numpy scalars are explicitly given as values in some tests 

1813 pass 

1814 elif isinstance(result, Iterator): 

1815 result = check_iterator(result) 

1816 else: 

1817 try: 

1818 check_result(result) 

1819 except RuntimeError as exc: 

1820 raise RuntimeError( 

1821 f"{self.name} returned a numpy scalar {result} ({type(result)})" 

1822 ) from exc 

1823 check_result(result) 

1824 

1825 def assert_graphs_equal(G1, G2, strict=True): 

1826 assert G1.number_of_nodes() == G2.number_of_nodes() 

1827 assert G1.number_of_edges() == G2.number_of_edges() 

1828 assert G1.is_directed() is G2.is_directed() 

1829 assert G1.is_multigraph() is G2.is_multigraph() 

1830 if strict: 

1831 assert G1.graph == G2.graph 

1832 assert G1._node == G2._node 

1833 assert G1._adj == G2._adj 

1834 else: 

1835 assert set(G1) == set(G2) 

1836 assert set(G1.edges) == set(G2.edges) 

1837 

1838 if compare_inputs_to_nx: 

1839 # Special-case algorithms that mutate input graphs 

1840 result_nx = self.orig_func(*args_nx, **kwargs_nx) 

1841 for gname in self.graphs: 

1842 G0 = bound_backend.arguments[gname] 

1843 G1 = bound_nx.arguments[gname] 

1844 if G0 is not None or G1 is not None: 

1845 G1 = backend.convert_to_nx(G1) 

1846 assert_graphs_equal(G0, G1, strict=False) 

1847 

1848 converted_result = backend.convert_to_nx(result) 

1849 if compare_result_to_nx and isinstance(converted_result, nx.Graph): 

1850 # For graph return types (e.g. generators), we compare that results are 

1851 # the same between the backend and networkx, then return the original 

1852 # networkx result so the iteration order will be consistent in tests. 

1853 if compare_inputs_to_nx: 

1854 G = result_nx 

1855 else: 

1856 G = self.orig_func(*args_nx, **kwargs_nx) 

1857 assert_graphs_equal(G, converted_result) 

1858 return G 

1859 

1860 return converted_result 

1861 

1862 def _make_doc(self): 

1863 """Generate the backends section at the end for functions having an alternate 

1864 backend implementation(s) using the `backend_info` entry-point.""" 

1865 

1866 if self.backends == {"networkx"}: 

1867 return self._orig_doc 

1868 # Add "Backends" section to the bottom of the docstring (if there are backends) 

1869 lines = [ 

1870 "Backends", 

1871 "--------", 

1872 ] 

1873 for backend in sorted(self.backends - {"networkx"}): 

1874 info = backend_info[backend] 

1875 if "short_summary" in info: 

1876 lines.append(f"{backend} : {info['short_summary']}") 

1877 else: 

1878 lines.append(backend) 

1879 if "functions" not in info or self.name not in info["functions"]: 

1880 lines.append("") 

1881 continue 

1882 

1883 func_info = info["functions"][self.name] 

1884 

1885 # Renaming extra_docstring to additional_docs 

1886 if func_docs := ( 

1887 func_info.get("additional_docs") or func_info.get("extra_docstring") 

1888 ): 

1889 lines.extend( 

1890 f" {line}" if line else line for line in func_docs.split("\n") 

1891 ) 

1892 add_gap = True 

1893 else: 

1894 add_gap = False 

1895 

1896 # Renaming extra_parameters to additional_parameters 

1897 if extra_parameters := ( 

1898 func_info.get("extra_parameters") 

1899 or func_info.get("additional_parameters") 

1900 ): 

1901 if add_gap: 

1902 lines.append("") 

1903 lines.append(" Additional parameters:") 

1904 for param in sorted(extra_parameters): 

1905 lines.append(f" {param}") 

1906 if desc := extra_parameters[param]: 

1907 lines.append(f" {desc}") 

1908 lines.append("") 

1909 else: 

1910 lines.append("") 

1911 

1912 if func_url := func_info.get("url"): 

1913 lines.append(f"[`Source <{func_url}>`_]") 

1914 lines.append("") 

1915 

1916 # We assume the docstrings are indented by four spaces (true for now) 

1917 new_doc = self._orig_doc or "" 

1918 if not new_doc.rstrip(): 

1919 new_doc = f"The original docstring for {self.name} was empty." 

1920 if self.backends: 

1921 lines.pop() # Remove last empty line 

1922 to_add = "\n ".join(lines) 

1923 new_doc = f"{new_doc.rstrip()}\n\n {to_add}" 

1924 

1925 # For backend-only funcs, add "Attention" admonishment after the one line summary 

1926 if "networkx" not in self.backends: 

1927 lines = new_doc.split("\n") 

1928 index = 0 

1929 while not lines[index].strip(): 

1930 index += 1 

1931 while index < len(lines) and lines[index].strip(): 

1932 index += 1 

1933 backends = sorted(self.backends) 

1934 if len(backends) == 0: 

1935 example = "" 

1936 elif len(backends) == 1: 

1937 example = f' such as "{backends[0]}"' 

1938 elif len(backends) == 2: 

1939 example = f' such as "{backends[0]} or "{backends[1]}"' 

1940 else: 

1941 example = ( 

1942 " such as " 

1943 + ", ".join(f'"{x}"' for x in backends[:-1]) 

1944 + f', or "{backends[-1]}"' # Oxford comma 

1945 ) 

1946 to_add = ( 

1947 "\n .. attention:: This function does not have a default NetworkX implementation.\n" 

1948 " It may only be run with an installable :doc:`backend </backends>` that\n" 

1949 f" supports it{example}.\n\n" 

1950 " Hint: use ``backend=...`` keyword argument to specify a backend or add\n" 

1951 " backends to ``nx.config.backend_priority``." 

1952 ) 

1953 lines.insert(index, to_add) 

1954 new_doc = "\n".join(lines) 

1955 return new_doc 

1956 

1957 def __reduce__(self): 

1958 """Allow this object to be serialized with pickle. 

1959 

1960 This uses the global registry `_registered_algorithms` to deserialize. 

1961 """ 

1962 return _restore_dispatchable, (self.name,) 

1963 

1964 

1965def _restore_dispatchable(name): 

1966 return _registered_algorithms[name].__wrapped__ 

1967 

1968 

1969def _get_cache_key( 

1970 *, 

1971 edge_attrs, 

1972 node_attrs, 

1973 preserve_edge_attrs, 

1974 preserve_node_attrs, 

1975 preserve_graph_attrs, 

1976): 

1977 """Return key used by networkx caching given arguments for ``convert_from_nx``.""" 

1978 # edge_attrs: dict | None 

1979 # node_attrs: dict | None 

1980 # preserve_edge_attrs: bool (False if edge_attrs is not None) 

1981 # preserve_node_attrs: bool (False if node_attrs is not None) 

1982 return ( 

1983 frozenset(edge_attrs.items()) 

1984 if edge_attrs is not None 

1985 else preserve_edge_attrs, 

1986 frozenset(node_attrs.items()) 

1987 if node_attrs is not None 

1988 else preserve_node_attrs, 

1989 ) 

1990 

1991 

1992def _get_from_cache(cache, key, *, backend_name=None, mutations=None): 

1993 """Search the networkx cache for a graph that is compatible with ``key``. 

1994 

1995 Parameters 

1996 ---------- 

1997 cache : dict 

1998 If ``backend_name`` is given, then this is treated as ``G.__networkx_cache__``, 

1999 but if ``backend_name`` is None, then this is treated as the resolved inner 

2000 cache such as ``G.__networkx_cache__["backends"][backend_name]``. 

2001 key : tuple 

2002 Cache key from ``_get_cache_key``. 

2003 backend_name : str, optional 

2004 Name of the backend to control how ``cache`` is interpreted. 

2005 mutations : list, optional 

2006 Used internally to clear objects gotten from cache if inputs will be mutated. 

2007 

2008 Returns 

2009 ------- 

2010 tuple or None 

2011 The key of the compatible graph found in the cache. 

2012 graph or "FAILED_TO_CONVERT" or None 

2013 A compatible graph if possible. "FAILED_TO_CONVERT" indicates that a previous 

2014 conversion attempt failed for this cache key. 

2015 """ 

2016 if backend_name is not None: 

2017 cache = cache.get("backends", {}).get(backend_name, {}) 

2018 if not cache: 

2019 return None, None 

2020 

2021 # Do a simple search for a cached graph with compatible data. 

2022 # For example, if we need a single attribute, then it's okay 

2023 # to use a cached graph that preserved all attributes. 

2024 # This looks for an exact match first. 

2025 edge_key, node_key = key 

2026 for compat_key in itertools.product( 

2027 (edge_key, True) if edge_key is not True else (True,), 

2028 (node_key, True) if node_key is not True else (True,), 

2029 ): 

2030 if (rv := cache.get(compat_key)) is not None and ( 

2031 rv != FAILED_TO_CONVERT or key == compat_key 

2032 ): 

2033 if mutations is not None: 

2034 # Remove this item from the cache (after all conversions) if 

2035 # the call to this dispatchable function will mutate an input. 

2036 mutations.append((cache, compat_key)) 

2037 return compat_key, rv 

2038 

2039 # Iterate over the items in `cache` to see if any are compatible. 

2040 # For example, if no edge attributes are needed, then a graph 

2041 # with any edge attribute will suffice. We use the same logic 

2042 # below (but switched) to clear unnecessary items from the cache. 

2043 # Use `list(cache.items())` to be thread-safe. 

2044 for (ekey, nkey), graph in list(cache.items()): 

2045 if graph == FAILED_TO_CONVERT: 

2046 # Return FAILED_TO_CONVERT if any cache key that requires a subset 

2047 # of the edge/node attributes of the given cache key has previously 

2048 # failed to convert. This logic is similar to `_set_to_cache`. 

2049 if ekey is False or edge_key is True: 

2050 pass 

2051 elif ekey is True or edge_key is False or not ekey.issubset(edge_key): 

2052 continue 

2053 if nkey is False or node_key is True: # or nkey == node_key: 

2054 pass 

2055 elif nkey is True or node_key is False or not nkey.issubset(node_key): 

2056 continue 

2057 # Save to cache for faster subsequent lookups 

2058 cache[key] = FAILED_TO_CONVERT 

2059 elif edge_key is False or ekey is True: 

2060 pass # Cache works for edge data! 

2061 elif edge_key is True or ekey is False or not edge_key.issubset(ekey): 

2062 continue # Cache missing required edge data; does not work 

2063 if node_key is False or nkey is True: 

2064 pass # Cache works for node data! 

2065 elif node_key is True or nkey is False or not node_key.issubset(nkey): 

2066 continue # Cache missing required node data; does not work 

2067 if mutations is not None: 

2068 # Remove this item from the cache (after all conversions) if 

2069 # the call to this dispatchable function will mutate an input. 

2070 mutations.append((cache, (ekey, nkey))) 

2071 return (ekey, nkey), graph 

2072 

2073 return None, None 

2074 

2075 

2076def _set_to_cache(cache, key, graph, *, backend_name=None): 

2077 """Set a backend graph to the cache, and remove unnecessary cached items. 

2078 

2079 Parameters 

2080 ---------- 

2081 cache : dict 

2082 If ``backend_name`` is given, then this is treated as ``G.__networkx_cache__``, 

2083 but if ``backend_name`` is None, then this is treated as the resolved inner 

2084 cache such as ``G.__networkx_cache__["backends"][backend_name]``. 

2085 key : tuple 

2086 Cache key from ``_get_cache_key``. 

2087 graph : graph or "FAILED_TO_CONVERT" 

2088 Setting value to "FAILED_TO_CONVERT" prevents this conversion from being 

2089 attempted in future calls. 

2090 backend_name : str, optional 

2091 Name of the backend to control how ``cache`` is interpreted. 

2092 

2093 Returns 

2094 ------- 

2095 dict 

2096 The items that were removed from the cache. 

2097 """ 

2098 if backend_name is not None: 

2099 cache = cache.setdefault("backends", {}).setdefault(backend_name, {}) 

2100 # Remove old cached items that are no longer necessary since they 

2101 # are dominated/subsumed/outdated by what was just calculated. 

2102 # This uses the same logic as above, but with keys switched. 

2103 # Also, don't update the cache here if the call will mutate an input. 

2104 removed = {} 

2105 edge_key, node_key = key 

2106 cache[key] = graph # Set at beginning to be thread-safe 

2107 if graph == FAILED_TO_CONVERT: 

2108 return removed 

2109 for cur_key in list(cache): 

2110 if cur_key == key: 

2111 continue 

2112 ekey, nkey = cur_key 

2113 if ekey is False or edge_key is True: 

2114 pass 

2115 elif ekey is True or edge_key is False or not ekey.issubset(edge_key): 

2116 continue 

2117 if nkey is False or node_key is True: 

2118 pass 

2119 elif nkey is True or node_key is False or not nkey.issubset(node_key): 

2120 continue 

2121 # Use pop instead of del to try to be thread-safe 

2122 if (graph := cache.pop(cur_key, None)) is not None: 

2123 removed[cur_key] = graph 

2124 return removed 

2125 

2126 

2127class _LazyArgsRepr: 

2128 """Simple wrapper to display arguments of dispatchable functions in logging calls.""" 

2129 

2130 def __init__(self, func, args, kwargs): 

2131 self.func = func 

2132 self.args = args 

2133 self.kwargs = kwargs 

2134 self.value = None 

2135 

2136 def __repr__(self): 

2137 if self.value is None: 

2138 bound = self.func.__signature__.bind_partial(*self.args, **self.kwargs) 

2139 inner = ", ".join(f"{key}={val!r}" for key, val in bound.arguments.items()) 

2140 self.value = f"({inner})" 

2141 return self.value 

2142 

2143 

2144if os.environ.get("_NETWORKX_BUILDING_DOCS_"): 

2145 # When building docs with Sphinx, use the original function with the 

2146 # dispatched __doc__, b/c Sphinx renders normal Python functions better. 

2147 # This doesn't show e.g. `*, backend=None, **backend_kwargs` in the 

2148 # signatures, which is probably okay. It does allow the docstring to be 

2149 # updated based on the installed backends. 

2150 _orig_dispatchable = _dispatchable 

2151 

2152 def _dispatchable(func=None, **kwargs): # type: ignore[no-redef] 

2153 if func is None: 

2154 return partial(_dispatchable, **kwargs) 

2155 dispatched_func = _orig_dispatchable(func, **kwargs) 

2156 func.__doc__ = dispatched_func.__doc__ 

2157 return func 

2158 

2159 _dispatchable.__doc__ = _orig_dispatchable.__new__.__doc__ # type: ignore[method-assign,assignment] 

2160 _sig = inspect.signature(_orig_dispatchable.__new__) 

2161 _dispatchable.__signature__ = _sig.replace( # type: ignore[method-assign,assignment] 

2162 parameters=[v for k, v in _sig.parameters.items() if k != "cls"] 

2163 )