Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/networkx/utils/backends.py: 21%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

780 statements  

1# Notes about NetworkX namespace objects set up here: 

2# 

3# nx.utils.backends.backends: 

4# dict keyed by backend name to the backend entry point object. 

5# Filled using ``_get_backends("networkx.backends")`` during import of this module. 

6# 

7# nx.utils.backends.backend_info: 

8# dict keyed by backend name to the metadata returned by the function indicated 

9# by the "networkx.backend_info" entry point. 

10# Created as an empty dict while importing this module, but later filled using 

11# ``_set_configs_from_environment()`` at end of importing ``networkx/__init__.py``. 

12# 

13# nx.config: 

14# Config object for NetworkX config setting. Created using 

15# ``_set_configs_from_environment()`` at end of importing ``networkx/__init__.py``. 

16# 

17# private dicts: 

18# nx.utils.backends._loaded_backends: 

19# dict used to memoize loaded backends. Keyed by backend name to loaded backends. 

20# 

21# nx.utils.backends._registered_algorithms: 

22# dict of all the dispatchable functions in networkx, keyed by _dispatchable 

23# function name to the wrapped function object. 

24 

25import inspect 

26import itertools 

27import logging 

28import os 

29import typing 

30import warnings 

31from functools import partial 

32from importlib.metadata import entry_points 

33 

34import networkx as nx 

35 

36from .configs import BackendPriorities, Config, NetworkXConfig 

37from .decorators import argmap 

38 

39__all__ = ["_dispatchable"] 

40 

41_logger = logging.getLogger(__name__) 

42FAILED_TO_CONVERT = "FAILED_TO_CONVERT" 

43 

44 

45def _get_backends(group, *, load_and_call=False): 

46 """ 

47 Retrieve NetworkX ``backends`` and ``backend_info`` from the entry points. 

48 

49 Parameters 

50 ----------- 

51 group : str 

52 The entry_point to be retrieved. 

53 load_and_call : bool, optional 

54 If True, load and call the backend. Defaults to False. 

55 

56 Returns 

57 -------- 

58 dict 

59 A dictionary mapping backend names to their respective backend objects. 

60 

61 Notes 

62 ------ 

63 If a backend is defined more than once, a warning is issued. 

64 If a backend name is not a valid Python identifier, the backend is 

65 ignored and a warning is issued. 

66 The "nx_loopback" backend is removed if it exists, as it is only available during testing. 

67 A warning is displayed if an error occurs while loading a backend. 

68 """ 

69 items = entry_points(group=group) 

70 rv = {} 

71 for ep in items: 

72 if not ep.name.isidentifier(): 

73 warnings.warn( 

74 f"networkx backend name is not a valid identifier: {ep.name!r}. Ignoring.", 

75 RuntimeWarning, 

76 stacklevel=2, 

77 ) 

78 elif ep.name in rv: 

79 warnings.warn( 

80 f"networkx backend defined more than once: {ep.name}", 

81 RuntimeWarning, 

82 stacklevel=2, 

83 ) 

84 elif load_and_call: 

85 try: 

86 rv[ep.name] = ep.load()() 

87 except Exception as exc: 

88 warnings.warn( 

89 f"Error encountered when loading info for backend {ep.name}: {exc}", 

90 RuntimeWarning, 

91 stacklevel=2, 

92 ) 

93 else: 

94 rv[ep.name] = ep 

95 rv.pop("nx_loopback", None) 

96 return rv 

97 

98 

99# Note: "networkx" is in `backend_info` but ignored in `backends` and `config.backends`. 

100# It is valid to use "networkx" as a backend argument and in `config.backend_priority`. 

101# If we make "networkx" a "proper" backend, put it in `backends` and `config.backends`. 

102backends = _get_backends("networkx.backends") 

103 

104# Use _set_configs_from_environment() below to fill backend_info dict as 

105# the last step in importing networkx 

106backend_info = {} 

107 

108# Load and cache backends on-demand 

109_loaded_backends = {} # type: ignore[var-annotated] 

110_registered_algorithms = {} 

111 

112 

113# Get default configuration from environment variables at import time 

114def _comma_sep_to_list(string): 

115 return [x_strip for x in string.strip().split(",") if (x_strip := x.strip())] 

116 

117 

118def _set_configs_from_environment(): 

119 """Initialize ``config.backend_priority``, load backend_info and config. 

120 

121 This gets default values from environment variables (see ``nx.config`` for details). 

122 This function is run at the very end of importing networkx. It is run at this time 

123 to avoid loading backend_info before the rest of networkx is imported in case a 

124 backend uses networkx for its backend_info (e.g. subclassing the Config class.) 

125 """ 

126 # backend_info is defined above as empty dict. Fill it after import finishes. 

127 backend_info.update(_get_backends("networkx.backend_info", load_and_call=True)) 

128 backend_info.update( 

129 (backend, {}) for backend in backends.keys() - backend_info.keys() 

130 ) 

131 

132 # set up config based on backend_info and environment 

133 backend_config = {} 

134 for backend, info in backend_info.items(): 

135 if "default_config" not in info: 

136 cfg = Config() 

137 else: 

138 cfg = info["default_config"] 

139 if not isinstance(cfg, Config): 

140 cfg = Config(**cfg) 

141 backend_config[backend] = cfg 

142 backend_config = Config(**backend_config) 

143 # Setting doc of backends_config type is not setting doc of Config 

144 # Config has __new__ method that returns instance with a unique type! 

145 type(backend_config).__doc__ = "All installed NetworkX backends and their configs." 

146 

147 backend_priority = BackendPriorities(algos=[], generators=[], classes=[]) 

148 

149 config = NetworkXConfig( 

150 backend_priority=backend_priority, 

151 backends=backend_config, 

152 cache_converted_graphs=bool( 

153 os.environ.get("NETWORKX_CACHE_CONVERTED_GRAPHS", True) 

154 ), 

155 fallback_to_nx=bool(os.environ.get("NETWORKX_FALLBACK_TO_NX", False)), 

156 warnings_to_ignore=set( 

157 _comma_sep_to_list(os.environ.get("NETWORKX_WARNINGS_TO_IGNORE", "")) 

158 ), 

159 ) 

160 

161 # Add "networkx" item to backend_info now b/c backend_config is set up 

162 backend_info["networkx"] = {} 

163 

164 # NETWORKX_BACKEND_PRIORITY is the same as NETWORKX_BACKEND_PRIORITY_ALGOS 

165 priorities = { 

166 key[26:].lower(): val 

167 for key, val in os.environ.items() 

168 if key.startswith("NETWORKX_BACKEND_PRIORITY_") 

169 } 

170 backend_priority = config.backend_priority 

171 backend_priority.algos = ( 

172 _comma_sep_to_list(priorities.pop("algos")) 

173 if "algos" in priorities 

174 else _comma_sep_to_list( 

175 os.environ.get( 

176 "NETWORKX_BACKEND_PRIORITY", 

177 os.environ.get("NETWORKX_AUTOMATIC_BACKENDS", ""), 

178 ) 

179 ) 

180 ) 

181 backend_priority.generators = _comma_sep_to_list(priorities.pop("generators", "")) 

182 for key in sorted(priorities): 

183 backend_priority[key] = _comma_sep_to_list(priorities[key]) 

184 

185 return config 

186 

187 

188def _do_nothing(): 

189 """This does nothing at all, yet it helps turn ``_dispatchable`` into functions. 

190 

191 Use this with the ``argmap`` decorator to turn ``self`` into a function. It results 

192 in some small additional overhead compared to calling ``_dispatchable`` directly, 

193 but ``argmap`` has the property that it can stack with other ``argmap`` 

194 decorators "for free". Being a function is better for REPRs and type-checkers. 

195 """ 

196 

197 

198def _always_run(name, args, kwargs): 

199 return True 

200 

201 

202def _load_backend(backend_name): 

203 if backend_name in _loaded_backends: 

204 return _loaded_backends[backend_name] 

205 if backend_name not in backends: 

206 raise ImportError(f"'{backend_name}' backend is not installed") 

207 rv = _loaded_backends[backend_name] = backends[backend_name].load() 

208 if not hasattr(rv, "can_run"): 

209 rv.can_run = _always_run 

210 if not hasattr(rv, "should_run"): 

211 rv.should_run = _always_run 

212 return rv 

213 

214 

215class _dispatchable: 

216 _is_testing = False 

217 

218 def __new__( 

219 cls, 

220 func=None, 

221 *, 

222 name=None, 

223 graphs="G", 

224 edge_attrs=None, 

225 node_attrs=None, 

226 preserve_edge_attrs=False, 

227 preserve_node_attrs=False, 

228 preserve_graph_attrs=False, 

229 preserve_all_attrs=False, 

230 mutates_input=False, 

231 returns_graph=False, 

232 implemented_by_nx=True, 

233 ): 

234 """A decorator function that is used to redirect the execution of ``func`` 

235 function to its backend implementation. 

236 

237 This decorator allows the function to dispatch to different backend 

238 implementations based on the input graph types, and also manages the 

239 extra keywords ``backend`` and ``**backend_kwargs``. 

240 Usage can be any of the following decorator forms: 

241 

242 - ``@_dispatchable`` 

243 - ``@_dispatchable()`` 

244 - ``@_dispatchable(name="override_name")`` 

245 - ``@_dispatchable(graphs="graph_var_name")`` 

246 - ``@_dispatchable(edge_attrs="weight")`` 

247 - ``@_dispatchable(graphs={"G": 0, "H": 1}, edge_attrs={"weight": "default"})`` 

248 with 0 and 1 giving the position in the signature function for graph 

249 objects. When ``edge_attrs`` is a dict, keys are keyword names and values 

250 are defaults. 

251 

252 Parameters 

253 ---------- 

254 func : callable, optional (default: None) 

255 The function to be decorated. If None, ``_dispatchable`` returns a 

256 partial object that can be used to decorate a function later. If ``func`` 

257 is a callable, returns a new callable object that dispatches to a backend 

258 function based on input graph types. 

259 

260 name : str, optional (default: name of `func`) 

261 The dispatch name for the function. It defaults to the name of `func`, 

262 but can be set manually to avoid conflicts in the global dispatch 

263 namespace. A common pattern is to prefix the function name with its 

264 module or submodule to make it unique. For example: 

265 

266 - ``@_dispatchable(name="tournament_is_strongly_connected")`` 

267 resolves conflict between ``nx.tournament.is_strongly_connected`` 

268 and ``nx.is_strongly_connected``. 

269 - ``@_dispatchable(name="approximate_node_connectivity")`` 

270 resolves conflict between ``nx.approximation.node_connectivity`` 

271 and ``nx.connectivity.node_connectivity``. 

272 

273 graphs : str or dict or None, optional (default: "G") 

274 If a string, the parameter name of the graph, which must be the first 

275 argument of the wrapped function. If more than one graph is required 

276 for the function (or if the graph is not the first argument), provide 

277 a dict keyed by graph parameter name to the value parameter position. 

278 A question mark in the name indicates an optional argument. 

279 For example, ``@_dispatchable(graphs={"G": 0, "auxiliary?": 4})`` 

280 indicates the 0th parameter ``G`` of the function is a required graph, 

281 and the 4th parameter ``auxiliary?`` is an optional graph. 

282 To indicate that an argument is a list of graphs, do ``"[graphs]"``. 

283 Use ``graphs=None``, if *no* arguments are NetworkX graphs such as for 

284 graph generators, readers, and conversion functions. 

285 

286 edge_attrs : str or dict, optional (default: None) 

287 ``edge_attrs`` holds information about edge attribute arguments 

288 and default values for those edge attributes. 

289 If a string, ``edge_attrs`` holds the function argument name that 

290 indicates a single edge attribute to include in the converted graph. 

291 The default value for this attribute is 1. To indicate that an argument 

292 is a list of attributes (all with default value 1), use e.g. ``"[attrs]"``. 

293 If a dict, ``edge_attrs`` holds a dict keyed by argument names, with 

294 values that are either the default value or, if a string, the argument 

295 name that indicates the default value. 

296 If None, function does not use edge attributes. 

297 

298 node_attrs : str or dict, optional 

299 Like ``edge_attrs``, but for node attributes. 

300 

301 preserve_edge_attrs : bool or str or dict, optional (default: False) 

302 If bool, whether to preserve all edge attributes. 

303 If a string, the parameter name that may indicate (with ``True`` or a 

304 callable argument) whether all edge attributes should be preserved 

305 when converting graphs to a backend graph type. 

306 If a dict of form ``{graph_name: {attr: default}}``, indicate 

307 pre-determined edge attributes (and defaults) to preserve for the 

308 indicated input graph. 

309 

310 preserve_node_attrs : bool or str or dict, optional (default: False) 

311 Like ``preserve_edge_attrs``, but for node attributes. 

312 

313 preserve_graph_attrs : bool or set, optional (default: False) 

314 If bool, whether to preserve all graph attributes. 

315 If set, which input graph arguments to preserve graph attributes. 

316 

317 preserve_all_attrs : bool, optional (default: False) 

318 Whether to preserve all edge, node and graph attributes. 

319 If True, this overrides all the other preserve_*_attrs. 

320 

321 mutates_input : bool or dict, optional (default: False) 

322 If bool, whether the function mutates an input graph argument. 

323 If dict of ``{arg_name: arg_pos}``, name and position of bool arguments 

324 that indicate whether an input graph will be mutated, and ``arg_name`` 

325 may begin with ``"not "`` to negate the logic (for example, ``"not copy"`` 

326 means we mutate the input graph when the ``copy`` argument is False). 

327 By default, dispatching doesn't convert input graphs to a different 

328 backend for functions that mutate input graphs. 

329 

330 returns_graph : bool, optional (default: False) 

331 Whether the function can return or yield a graph object. By default, 

332 dispatching doesn't convert input graphs to a different backend for 

333 functions that return graphs. 

334 

335 implemented_by_nx : bool, optional (default: True) 

336 Whether the function is implemented by NetworkX. If it is not, then the 

337 function is included in NetworkX only as an API to dispatch to backends. 

338 Default is True. 

339 """ 

340 if func is None: 

341 return partial( 

342 _dispatchable, 

343 name=name, 

344 graphs=graphs, 

345 edge_attrs=edge_attrs, 

346 node_attrs=node_attrs, 

347 preserve_edge_attrs=preserve_edge_attrs, 

348 preserve_node_attrs=preserve_node_attrs, 

349 preserve_graph_attrs=preserve_graph_attrs, 

350 preserve_all_attrs=preserve_all_attrs, 

351 mutates_input=mutates_input, 

352 returns_graph=returns_graph, 

353 implemented_by_nx=implemented_by_nx, 

354 ) 

355 if isinstance(func, str): 

356 raise TypeError("'name' and 'graphs' must be passed by keyword") from None 

357 # If name not provided, use the name of the function 

358 if name is None: 

359 name = func.__name__ 

360 

361 self = object.__new__(cls) 

362 

363 # standard function-wrapping stuff 

364 # __annotations__ not used 

365 self.__name__ = func.__name__ 

366 # self.__doc__ = func.__doc__ # __doc__ handled as cached property 

367 self.__defaults__ = func.__defaults__ 

368 # Add `backend=` keyword argument to allow backend choice at call-time 

369 if func.__kwdefaults__: 

370 self.__kwdefaults__ = {**func.__kwdefaults__, "backend": None} 

371 else: 

372 self.__kwdefaults__ = {"backend": None} 

373 self.__module__ = func.__module__ 

374 self.__qualname__ = func.__qualname__ 

375 self.__dict__.update(func.__dict__) 

376 self.__wrapped__ = func 

377 

378 # Supplement docstring with backend info; compute and cache when needed 

379 self._orig_doc = func.__doc__ 

380 self._cached_doc = None 

381 

382 self.orig_func = func 

383 self.name = name 

384 self.edge_attrs = edge_attrs 

385 self.node_attrs = node_attrs 

386 self.preserve_edge_attrs = preserve_edge_attrs or preserve_all_attrs 

387 self.preserve_node_attrs = preserve_node_attrs or preserve_all_attrs 

388 self.preserve_graph_attrs = preserve_graph_attrs or preserve_all_attrs 

389 self.mutates_input = mutates_input 

390 # Keep `returns_graph` private for now, b/c we may extend info on return types 

391 self._returns_graph = returns_graph 

392 

393 if edge_attrs is not None and not isinstance(edge_attrs, str | dict): 

394 raise TypeError( 

395 f"Bad type for edge_attrs: {type(edge_attrs)}. Expected str or dict." 

396 ) from None 

397 if node_attrs is not None and not isinstance(node_attrs, str | dict): 

398 raise TypeError( 

399 f"Bad type for node_attrs: {type(node_attrs)}. Expected str or dict." 

400 ) from None 

401 if not isinstance(self.preserve_edge_attrs, bool | str | dict): 

402 raise TypeError( 

403 f"Bad type for preserve_edge_attrs: {type(self.preserve_edge_attrs)}." 

404 " Expected bool, str, or dict." 

405 ) from None 

406 if not isinstance(self.preserve_node_attrs, bool | str | dict): 

407 raise TypeError( 

408 f"Bad type for preserve_node_attrs: {type(self.preserve_node_attrs)}." 

409 " Expected bool, str, or dict." 

410 ) from None 

411 if not isinstance(self.preserve_graph_attrs, bool | set): 

412 raise TypeError( 

413 f"Bad type for preserve_graph_attrs: {type(self.preserve_graph_attrs)}." 

414 " Expected bool or set." 

415 ) from None 

416 if not isinstance(self.mutates_input, bool | dict): 

417 raise TypeError( 

418 f"Bad type for mutates_input: {type(self.mutates_input)}." 

419 " Expected bool or dict." 

420 ) from None 

421 if not isinstance(self._returns_graph, bool): 

422 raise TypeError( 

423 f"Bad type for returns_graph: {type(self._returns_graph)}." 

424 " Expected bool." 

425 ) from None 

426 

427 if isinstance(graphs, str): 

428 graphs = {graphs: 0} 

429 elif graphs is None: 

430 pass 

431 elif not isinstance(graphs, dict): 

432 raise TypeError( 

433 f"Bad type for graphs: {type(graphs)}. Expected str or dict." 

434 ) from None 

435 elif len(graphs) == 0: 

436 raise KeyError("'graphs' must contain at least one variable name") from None 

437 

438 # This dict comprehension is complicated for better performance; equivalent shown below. 

439 self.optional_graphs = set() 

440 self.list_graphs = set() 

441 if graphs is None: 

442 self.graphs = {} 

443 else: 

444 self.graphs = { 

445 self.optional_graphs.add(val := k[:-1]) or val 

446 if (last := k[-1]) == "?" 

447 else self.list_graphs.add(val := k[1:-1]) or val 

448 if last == "]" 

449 else k: v 

450 for k, v in graphs.items() 

451 } 

452 # The above is equivalent to: 

453 # self.optional_graphs = {k[:-1] for k in graphs if k[-1] == "?"} 

454 # self.list_graphs = {k[1:-1] for k in graphs if k[-1] == "]"} 

455 # self.graphs = {k[:-1] if k[-1] == "?" else k: v for k, v in graphs.items()} 

456 

457 # Compute and cache the signature on-demand 

458 self._sig = None 

459 

460 # Which backends implement this function? 

461 self.backends = { 

462 backend 

463 for backend, info in backend_info.items() 

464 if "functions" in info and name in info["functions"] 

465 } 

466 if implemented_by_nx: 

467 self.backends.add("networkx") 

468 

469 if name in _registered_algorithms: 

470 raise KeyError( 

471 f"Algorithm already exists in dispatch namespace: {name}. " 

472 "Fix by assigning a unique `name=` in the `@_dispatchable` decorator." 

473 ) from None 

474 # Use the `argmap` decorator to turn `self` into a function. This does result 

475 # in small additional overhead compared to calling `_dispatchable` directly, 

476 # but `argmap` has the property that it can stack with other `argmap` 

477 # decorators "for free". Being a function is better for REPRs and type-checkers. 

478 # It also allows `_dispatchable` to be used on class methods, since functions 

479 # define `__get__`. Without using `argmap`, we would need to define `__get__`. 

480 self = argmap(_do_nothing)(self) 

481 _registered_algorithms[name] = self 

482 return self 

483 

484 @property 

485 def __doc__(self): 

486 """If the cached documentation exists, it is returned. 

487 Otherwise, the documentation is generated using _make_doc() method, 

488 cached, and then returned.""" 

489 

490 rv = self._cached_doc 

491 if rv is None: 

492 rv = self._cached_doc = self._make_doc() 

493 return rv 

494 

495 @__doc__.setter 

496 def __doc__(self, val): 

497 """Sets the original documentation to the given value and resets the 

498 cached documentation.""" 

499 

500 self._orig_doc = val 

501 self._cached_doc = None 

502 

503 @property 

504 def __signature__(self): 

505 """Return the signature of the original function, with the addition of 

506 the `backend` and `backend_kwargs` parameters.""" 

507 

508 if self._sig is None: 

509 sig = inspect.signature(self.orig_func) 

510 # `backend` is now a reserved argument used by dispatching. 

511 # assert "backend" not in sig.parameters 

512 if not any( 

513 p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values() 

514 ): 

515 sig = sig.replace( 

516 parameters=[ 

517 *sig.parameters.values(), 

518 inspect.Parameter( 

519 "backend", inspect.Parameter.KEYWORD_ONLY, default=None 

520 ), 

521 inspect.Parameter( 

522 "backend_kwargs", inspect.Parameter.VAR_KEYWORD 

523 ), 

524 ] 

525 ) 

526 else: 

527 *parameters, var_keyword = sig.parameters.values() 

528 sig = sig.replace( 

529 parameters=[ 

530 *parameters, 

531 inspect.Parameter( 

532 "backend", inspect.Parameter.KEYWORD_ONLY, default=None 

533 ), 

534 var_keyword, 

535 ] 

536 ) 

537 self._sig = sig 

538 return self._sig 

539 

540 # Fast, simple path if no backends are installed 

541 def _call_if_no_backends_installed(self, /, *args, backend=None, **kwargs): 

542 """Returns the result of the original function (no backends installed).""" 

543 if backend is not None and backend != "networkx": 

544 raise ImportError(f"'{backend}' backend is not installed") 

545 if "networkx" not in self.backends: 

546 raise NotImplementedError( 

547 f"'{self.name}' is not implemented by 'networkx' backend. " 

548 "This function is included in NetworkX as an API to dispatch to " 

549 "other backends." 

550 ) 

551 return self.orig_func(*args, **kwargs) 

552 

553 # Dispatch to backends based on inputs, `backend=` arg, or configuration 

554 def _call_if_any_backends_installed(self, /, *args, backend=None, **kwargs): 

555 """Returns the result of the original function, or the backend function if 

556 the backend is specified and that backend implements `func`.""" 

557 # Use `backend_name` in this function instead of `backend`. 

558 # This is purely for aesthetics and to make it easier to search for this 

559 # variable since "backend" is used in many comments and log/error messages. 

560 backend_name = backend 

561 if backend_name is not None and backend_name not in backend_info: 

562 raise ImportError(f"'{backend_name}' backend is not installed") 

563 

564 graphs_resolved = {} 

565 for gname, pos in self.graphs.items(): 

566 if pos < len(args): 

567 if gname in kwargs: 

568 raise TypeError(f"{self.name}() got multiple values for {gname!r}") 

569 graph = args[pos] 

570 elif gname in kwargs: 

571 graph = kwargs[gname] 

572 elif gname not in self.optional_graphs: 

573 raise TypeError( 

574 f"{self.name}() missing required graph argument: {gname}" 

575 ) 

576 else: 

577 continue 

578 if graph is None: 

579 if gname not in self.optional_graphs: 

580 raise TypeError( 

581 f"{self.name}() required graph argument {gname!r} is None; must be a graph" 

582 ) 

583 else: 

584 graphs_resolved[gname] = graph 

585 

586 # Alternative to the above that does not check duplicated args or missing required graphs. 

587 # graphs_resolved = { 

588 # gname: graph 

589 # for gname, pos in self.graphs.items() 

590 # if (graph := args[pos] if pos < len(args) else kwargs.get(gname)) is not None 

591 # } 

592 

593 # Check if any graph comes from a backend 

594 if self.list_graphs: 

595 # Make sure we don't lose values by consuming an iterator 

596 args = list(args) 

597 for gname in self.list_graphs & graphs_resolved.keys(): 

598 list_of_graphs = list(graphs_resolved[gname]) 

599 graphs_resolved[gname] = list_of_graphs 

600 if gname in kwargs: 

601 kwargs[gname] = list_of_graphs 

602 else: 

603 args[self.graphs[gname]] = list_of_graphs 

604 

605 graph_backend_names = { 

606 getattr(g, "__networkx_backend__", None) 

607 for gname, g in graphs_resolved.items() 

608 if gname not in self.list_graphs 

609 } 

610 for gname in self.list_graphs & graphs_resolved.keys(): 

611 graph_backend_names.update( 

612 getattr(g, "__networkx_backend__", None) 

613 for g in graphs_resolved[gname] 

614 ) 

615 else: 

616 graph_backend_names = { 

617 getattr(g, "__networkx_backend__", None) 

618 for g in graphs_resolved.values() 

619 } 

620 

621 backend_priority = nx.config.backend_priority.get( 

622 self.name, 

623 nx.config.backend_priority.classes 

624 if self.name.endswith("__new__") 

625 else nx.config.backend_priority.generators 

626 if self._returns_graph 

627 else nx.config.backend_priority.algos, 

628 ) 

629 fallback_to_nx = nx.config.fallback_to_nx and "networkx" in self.backends 

630 if self._is_testing and backend_priority and backend_name is None: 

631 # Special path if we are running networkx tests with a backend. 

632 # This even runs for (and handles) functions that mutate input graphs. 

633 return self._convert_and_call_for_tests( 

634 backend_priority[0], 

635 args, 

636 kwargs, 

637 fallback_to_nx=fallback_to_nx, 

638 ) 

639 

640 graph_backend_names.discard(None) 

641 if backend_name is not None: 

642 # Must run with the given backend. 

643 # `can_run` only used for better log and error messages. 

644 # Check `mutates_input` for logging, not behavior. 

645 backend_kwarg_msg = ( 

646 "No other backends will be attempted, because the backend was " 

647 f"specified with the `backend='{backend_name}'` keyword argument." 

648 ) 

649 extra_message = ( 

650 f"'{backend_name}' backend raised NotImplementedError when calling " 

651 f"'{self.name}'. {backend_kwarg_msg}" 

652 ) 

653 if not graph_backend_names or graph_backend_names == {backend_name}: 

654 # All graphs are backend graphs--no need to convert! 

655 if self._can_backend_run(backend_name, args, kwargs): 

656 return self._call_with_backend( 

657 backend_name, args, kwargs, extra_message=extra_message 

658 ) 

659 if self._does_backend_have(backend_name): 

660 extra = " for the given arguments" 

661 else: 

662 extra = "" 

663 raise NotImplementedError( 

664 f"'{self.name}' is not implemented by '{backend_name}' backend" 

665 f"{extra}. {backend_kwarg_msg}" 

666 ) 

667 if self._can_convert(backend_name, graph_backend_names): 

668 if self._can_backend_run(backend_name, args, kwargs): 

669 if self._will_call_mutate_input(args, kwargs): 

670 _logger.debug( 

671 "'%s' will mutate an input graph. This prevents automatic conversion " 

672 "to, and use of, backends listed in `nx.config.backend_priority`. " 

673 "Using backend specified by the " 

674 "`backend='%s'` keyword argument. This may change behavior by not " 

675 "mutating inputs.", 

676 self.name, 

677 backend_name, 

678 ) 

679 mutations = [] 

680 else: 

681 mutations = None 

682 rv = self._convert_and_call( 

683 backend_name, 

684 graph_backend_names, 

685 args, 

686 kwargs, 

687 extra_message=extra_message, 

688 mutations=mutations, 

689 ) 

690 if mutations: 

691 for cache, key in mutations: 

692 # If the call mutates inputs, then remove all inputs gotten 

693 # from cache. We do this after all conversions (and call) so 

694 # that a graph can be gotten from a cache multiple times. 

695 cache.pop(key, None) 

696 return rv 

697 if self._does_backend_have(backend_name): 

698 extra = " for the given arguments" 

699 else: 

700 extra = "" 

701 raise NotImplementedError( 

702 f"'{self.name}' is not implemented by '{backend_name}' backend" 

703 f"{extra}. {backend_kwarg_msg}" 

704 ) 

705 if len(graph_backend_names) == 1: 

706 maybe_s = "" 

707 graph_backend_names = f"'{next(iter(graph_backend_names))}'" 

708 else: 

709 maybe_s = "s" 

710 raise TypeError( 

711 f"'{self.name}' is unable to convert graph from backend{maybe_s} " 

712 f"{graph_backend_names} to '{backend_name}' backend, which was " 

713 f"specified with the `backend='{backend_name}'` keyword argument. " 

714 f"{backend_kwarg_msg}" 

715 ) 

716 

717 if self._will_call_mutate_input(args, kwargs): 

718 # The current behavior for functions that mutate input graphs: 

719 # 

720 # 1. If backend is specified by `backend=` keyword, use it (done above). 

721 # 2. If inputs are from one backend, try to use it. 

722 # 3. If all input graphs are instances of `nx.Graph`, then run with the 

723 # default "networkx" implementation. 

724 # 

725 # Do not automatically convert if a call will mutate inputs, because doing 

726 # so would change behavior. Hence, we should fail if there are multiple input 

727 # backends or if the input backend does not implement the function. However, 

728 # we offer a way for backends to circumvent this if they do not implement 

729 # this function: we will fall back to the default "networkx" implementation 

730 # without using conversions if all input graphs are subclasses of `nx.Graph`. 

731 mutate_msg = ( 

732 "conversions between backends (if configured) will not be attempted " 

733 "because the original input graph would not be mutated. Using the " 

734 "backend keyword e.g. `backend='some_backend'` will force conversions " 

735 "and not mutate the original input graph." 

736 ) 

737 fallback_msg = ( 

738 "This call will mutate inputs, so fall back to 'networkx' " 

739 "backend (without converting) since all input graphs are " 

740 "instances of nx.Graph and are hopefully compatible." 

741 ) 

742 if len(graph_backend_names) == 1: 

743 [backend_name] = graph_backend_names 

744 msg_template = ( 

745 f"Backend '{backend_name}' does not implement '{self.name}'%s. " 

746 f"This call will mutate an input, so automatic {mutate_msg}" 

747 ) 

748 # `can_run` is only used for better log and error messages 

749 try: 

750 if self._can_backend_run(backend_name, args, kwargs): 

751 return self._call_with_backend( 

752 backend_name, 

753 args, 

754 kwargs, 

755 extra_message=msg_template % " with these arguments", 

756 ) 

757 except NotImplementedError as exc: 

758 if all(isinstance(g, nx.Graph) for g in graphs_resolved.values()): 

759 _logger.debug( 

760 "Backend '%s' raised when calling '%s': %s. %s", 

761 backend_name, 

762 self.name, 

763 exc, 

764 fallback_msg, 

765 ) 

766 else: 

767 raise 

768 else: 

769 if fallback_to_nx and all( 

770 # Consider dropping the `isinstance` check here to allow 

771 # duck-type graphs, but let's wait for a backend to ask us. 

772 isinstance(g, nx.Graph) 

773 for g in graphs_resolved.values() 

774 ): 

775 # Log that we are falling back to networkx 

776 _logger.debug( 

777 "Backend '%s' can't run '%s'. %s", 

778 backend_name, 

779 self.name, 

780 fallback_msg, 

781 ) 

782 else: 

783 if self._does_backend_have(backend_name): 

784 extra = " with these arguments" 

785 else: 

786 extra = "" 

787 raise NotImplementedError(msg_template % extra) 

788 elif fallback_to_nx and all( 

789 # Consider dropping the `isinstance` check here to allow 

790 # duck-type graphs, but let's wait for a backend to ask us. 

791 isinstance(g, nx.Graph) 

792 for g in graphs_resolved.values() 

793 ): 

794 # Log that we are falling back to networkx 

795 _logger.debug( 

796 "'%s' was called with inputs from multiple backends: %s. %s", 

797 self.name, 

798 graph_backend_names, 

799 fallback_msg, 

800 ) 

801 else: 

802 raise RuntimeError( 

803 f"'{self.name}' will mutate an input, but it was called with " 

804 f"inputs from multiple backends: {graph_backend_names}. " 

805 f"Automatic {mutate_msg}" 

806 ) 

807 # At this point, no backends are available to handle the call with 

808 # the input graph types, but if the input graphs are compatible 

809 # nx.Graph instances, fall back to networkx without converting. 

810 return self.orig_func(*args, **kwargs) 

811 

812 # We may generalize fallback configuration as e.g. `nx.config.backend_fallback` 

813 if fallback_to_nx or not graph_backend_names: 

814 # Use "networkx" by default if there are no inputs from backends. 

815 # For example, graph generators should probably return NetworkX graphs 

816 # instead of raising NotImplementedError. 

817 backend_fallback = ["networkx"] 

818 else: 

819 backend_fallback = [] 

820 

821 # ########################## 

822 # # How this behaves today # 

823 # ########################## 

824 # 

825 # The prose below describes the implementation and a *possible* way to 

826 # generalize "networkx" as "just another backend". The code is structured 

827 # to perhaps someday support backend-to-backend conversions (including 

828 # simply passing objects from one backend directly to another backend; 

829 # the dispatch machinery does not necessarily need to perform conversions), 

830 # but since backend-to-backend matching is not yet supported, the following 

831 # code is merely a convenient way to implement dispatch behaviors that have 

832 # been carefully developed since NetworkX 3.0 and to include falling back 

833 # to the default NetworkX implementation. 

834 # 

835 # The current behavior for functions that don't mutate input graphs: 

836 # 

837 # 1. If backend is specified by `backend=` keyword, use it (done above). 

838 # 2. If input is from a backend other than "networkx", try to use it. 

839 # - Note: if present, "networkx" graphs will be converted to the backend. 

840 # 3. If input is from "networkx" (or no backend), try to use backends from 

841 # `backend_priority` before running with the default "networkx" implementation. 

842 # 4. If configured, "fall back" and run with the default "networkx" implementation. 

843 # 

844 # ################################################ 

845 # # How this is implemented and may work someday # 

846 # ################################################ 

847 # 

848 # Let's determine the order of backends we should try according 

849 # to `backend_priority`, `backend_fallback`, and input backends. 

850 # There are two† dimensions of priorities to consider: 

851 # backend_priority > unspecified > backend_fallback 

852 # and 

853 # backend of an input > not a backend of an input 

854 # These are combined to form five groups of priorities as such: 

855 # 

856 # input ~input 

857 # +-------+-------+ 

858 # backend_priority | 1 | 2 | 

859 # unspecified | 3 | N/A | (if only 1) 

860 # backend_fallback | 4 | 5 | 

861 # +-------+-------+ 

862 # 

863 # This matches the behaviors we developed in versions 3.0 to 3.2, it 

864 # ought to cover virtually all use cases we expect, and I (@eriknw) don't 

865 # think it can be done any simpler (although it can be generalized further 

866 # and made to be more complicated to capture 100% of *possible* use cases). 

867 # Some observations: 

868 # 

869 # 1. If an input is in `backend_priority`, it will be used before trying a 

870 # backend that is higher priority in `backend_priority` and not an input. 

871 # 2. To prioritize converting from one backend to another even if both implement 

872 # a function, list one in `backend_priority` and one in `backend_fallback`. 

873 # 3. To disable conversions, set `backend_priority` and `backend_fallback` to []. 

874 # 

875 # †: There is actually a third dimension of priorities: 

876 # should_run == True > should_run == False 

877 # Backends with `can_run == True` and `should_run == False` are tried last. 

878 # 

879 seen = set() 

880 group1 = [] # In backend_priority, and an input 

881 group2 = [] # In backend_priority, but not an input 

882 for name in backend_priority: 

883 if name in seen: 

884 continue 

885 seen.add(name) 

886 if name in graph_backend_names: 

887 group1.append(name) 

888 else: 

889 group2.append(name) 

890 group4 = [] # In backend_fallback, and an input 

891 group5 = [] # In backend_fallback, but not an input 

892 for name in backend_fallback: 

893 if name in seen: 

894 continue 

895 seen.add(name) 

896 if name in graph_backend_names: 

897 group4.append(name) 

898 else: 

899 group5.append(name) 

900 # An input, but not in backend_priority or backend_fallback. 

901 group3 = graph_backend_names - seen 

902 if len(group3) > 1: 

903 # `group3` backends are not configured for automatic conversion or fallback. 

904 # There are at least two issues if this group contains multiple backends: 

905 # 

906 # 1. How should we prioritize them? We have no good way to break ties. 

907 # Although we could arbitrarily choose alphabetical or left-most, 

908 # let's follow the Zen of Python and refuse the temptation to guess. 

909 # 2. We probably shouldn't automatically convert to these backends, 

910 # because we are not configured to do so. 

911 # 

912 # (2) is important to allow disabling all conversions by setting both 

913 # `nx.config.backend_priority` and `nx.config.backend_fallback` to []. 

914 # 

915 # If there is a single backend in `group3`, then giving it priority over 

916 # the fallback backends is what is generally expected. For example, this 

917 # allows input graphs of `backend_fallback` backends (such as "networkx") 

918 # to be converted to, and run with, the unspecified backend. 

919 _logger.debug( 

920 "Call to '%s' has inputs from multiple backends, %s, that " 

921 "have no priority set in `nx.config.backend_priority`, " 

922 "so automatic conversions to " 

923 "these backends will not be attempted.", 

924 self.name, 

925 group3, 

926 ) 

927 group3 = () 

928 

929 try_order = list(itertools.chain(group1, group2, group3, group4, group5)) 

930 if len(try_order) > 1: 

931 # Should we consider adding an option for more verbose logging? 

932 # For example, we could explain the order of `try_order` in detail. 

933 _logger.debug( 

934 "Call to '%s' has inputs from %s backends, and will try to use " 

935 "backends in the following order: %s", 

936 self.name, 

937 graph_backend_names or "no", 

938 try_order, 

939 ) 

940 backends_to_try_again = [] 

941 for is_not_first, backend_name in enumerate(try_order): 

942 if is_not_first: 

943 _logger.debug("Trying next backend: '%s'", backend_name) 

944 try: 

945 if not graph_backend_names or graph_backend_names == {backend_name}: 

946 if self._can_backend_run(backend_name, args, kwargs): 

947 return self._call_with_backend(backend_name, args, kwargs) 

948 elif self._can_convert( 

949 backend_name, graph_backend_names 

950 ) and self._can_backend_run(backend_name, args, kwargs): 

951 if self._should_backend_run(backend_name, args, kwargs): 

952 rv = self._convert_and_call( 

953 backend_name, graph_backend_names, args, kwargs 

954 ) 

955 if ( 

956 self._returns_graph 

957 and graph_backend_names 

958 and backend_name not in graph_backend_names 

959 ): 

960 # If the function has graph inputs and graph output, we try 

961 # to make it so the backend of the return type will match the 

962 # backend of the input types. In case this is not possible, 

963 # let's tell the user that the backend of the return graph 

964 # has changed. Perhaps we could try to convert back, but 

965 # "fallback" backends for graph generators should typically 

966 # be compatible with NetworkX graphs. 

967 _logger.debug( 

968 "Call to '%s' is returning a graph from a different " 

969 "backend! It has inputs from %s backends, but ran with " 

970 "'%s' backend and is returning graph from '%s' backend", 

971 self.name, 

972 graph_backend_names, 

973 backend_name, 

974 backend_name, 

975 ) 

976 return rv 

977 # `should_run` is False, but `can_run` is True, so try again later 

978 backends_to_try_again.append(backend_name) 

979 except NotImplementedError as exc: 

980 _logger.debug( 

981 "Backend '%s' raised when calling '%s': %s", 

982 backend_name, 

983 self.name, 

984 exc, 

985 ) 

986 

987 # We are about to fail. Let's try backends with can_run=True and should_run=False. 

988 # This is unlikely to help today since we try to run with "networkx" before this. 

989 for backend_name in backends_to_try_again: 

990 _logger.debug( 

991 "Trying backend: '%s' (ignoring `should_run=False`)", backend_name 

992 ) 

993 try: 

994 rv = self._convert_and_call( 

995 backend_name, graph_backend_names, args, kwargs 

996 ) 

997 if ( 

998 self._returns_graph 

999 and graph_backend_names 

1000 and backend_name not in graph_backend_names 

1001 ): 

1002 _logger.debug( 

1003 "Call to '%s' is returning a graph from a different " 

1004 "backend! It has inputs from %s backends, but ran with " 

1005 "'%s' backend and is returning graph from '%s' backend", 

1006 self.name, 

1007 graph_backend_names, 

1008 backend_name, 

1009 backend_name, 

1010 ) 

1011 return rv 

1012 except NotImplementedError as exc: 

1013 _logger.debug( 

1014 "Backend '%s' raised when calling '%s': %s", 

1015 backend_name, 

1016 self.name, 

1017 exc, 

1018 ) 

1019 # As a final effort, we could try to convert and run with `group3` backends 

1020 # that we discarded when `len(group3) > 1`, but let's not consider doing 

1021 # so until there is a reasonable request for it. 

1022 

1023 if len(unspecified_backends := graph_backend_names - seen) > 1: 

1024 raise TypeError( 

1025 f"Unable to convert inputs from {graph_backend_names} backends and " 

1026 f"run '{self.name}'. NetworkX is configured to automatically convert " 

1027 f"to {try_order} backends. To remedy this, you may enable automatic " 

1028 f"conversion to {unspecified_backends} backends by adding them to " 

1029 "`nx.config.backend_priority`, or you " 

1030 "may specify a backend to use with the `backend=` keyword argument." 

1031 ) 

1032 if "networkx" not in self.backends: 

1033 extra = ( 

1034 " This function is included in NetworkX as an API to dispatch to " 

1035 "other backends." 

1036 ) 

1037 else: 

1038 extra = "" 

1039 raise NotImplementedError( 

1040 f"'{self.name}' is not implemented by {try_order} backends. To remedy " 

1041 "this, you may enable automatic conversion to more backends (including " 

1042 "'networkx') by adding them to `nx.config.backend_priority`, " 

1043 "or you may specify a backend to use with " 

1044 f"the `backend=` keyword argument.{extra}" 

1045 ) 

1046 

1047 # Dispatch only if there exist any installed backend(s) 

1048 __call__: typing.Callable = ( 

1049 _call_if_any_backends_installed if backends else _call_if_no_backends_installed 

1050 ) 

1051 

1052 def _will_call_mutate_input(self, args, kwargs): 

1053 # Fairly few nx functions mutate the input graph. Most that do, always do. 

1054 # So a boolean input indicates "always" or "never". 

1055 if isinstance((mutates_input := self.mutates_input), bool): 

1056 return mutates_input 

1057 

1058 # The ~10 other nx functions either use "copy=True" to control mutation or 

1059 # an arg naming an edge/node attribute to mutate (None means no mutation). 

1060 # Now `mutates_input` is a dict keyed by arg_name to its func-sig position. 

1061 # The `copy=` args are keyed as "not copy" to mean "negate the copy argument". 

1062 # Keys w/o "not " mean the call mutates only when the arg value `is not None`. 

1063 # 

1064 # This section might need different code if new functions mutate in new ways. 

1065 # 

1066 # NetworkX doesn't have any `mutates_input` dicts with more than 1 item. 

1067 # But we treat it like it might have more than 1 item for generality. 

1068 n = len(args) 

1069 return any( 

1070 (args[arg_pos] if n > arg_pos else kwargs.get(arg_name)) is not None 

1071 if not arg_name.startswith("not ") 

1072 # This assumes that e.g. `copy=True` is the default 

1073 else not (args[arg_pos] if n > arg_pos else kwargs.get(arg_name[4:], True)) 

1074 for arg_name, arg_pos in mutates_input.items() 

1075 ) 

1076 

1077 def _can_convert(self, backend_name, graph_backend_names): 

1078 # Backend-to-backend conversion not supported yet. 

1079 # We can only convert to and from networkx. 

1080 rv = backend_name == "networkx" or graph_backend_names.issubset( 

1081 {"networkx", backend_name} 

1082 ) 

1083 if not rv: 

1084 _logger.debug( 

1085 "Unable to convert from %s backends to '%s' backend", 

1086 graph_backend_names, 

1087 backend_name, 

1088 ) 

1089 return rv 

1090 

1091 def _does_backend_have(self, backend_name): 

1092 """Does the specified backend have this algorithm?""" 

1093 if backend_name == "networkx": 

1094 return "networkx" in self.backends 

1095 # Inspect the backend; don't trust metadata used to create `self.backends` 

1096 backend = _load_backend(backend_name) 

1097 return hasattr(backend, self.name) 

1098 

1099 def _can_backend_run(self, backend_name, args, kwargs): 

1100 """Can the specified backend run this algorithm with these arguments?""" 

1101 if backend_name == "networkx": 

1102 return "networkx" in self.backends 

1103 backend = _load_backend(backend_name) 

1104 # `backend.can_run` and `backend.should_run` may return strings that describe 

1105 # why they can't or shouldn't be run. 

1106 if not hasattr(backend, self.name): 

1107 _logger.debug( 

1108 "Backend '%s' does not implement '%s'", backend_name, self.name 

1109 ) 

1110 return False 

1111 can_run = backend.can_run(self.name, args, kwargs) 

1112 if isinstance(can_run, str) or not can_run: 

1113 reason = f", because: {can_run}" if isinstance(can_run, str) else "" 

1114 _logger.debug( 

1115 "Backend '%s' can't run `%s` with arguments: %s%s", 

1116 backend_name, 

1117 self.name, 

1118 _LazyArgsRepr(self, args, kwargs), 

1119 reason, 

1120 ) 

1121 return False 

1122 return True 

1123 

1124 def _should_backend_run(self, backend_name, args, kwargs): 

1125 """Should the specified backend run this algorithm with these arguments? 

1126 

1127 Note that this does not check ``backend.can_run``. 

1128 """ 

1129 # `backend.can_run` and `backend.should_run` may return strings that describe 

1130 # why they can't or shouldn't be run. 

1131 # `_should_backend_run` may assume that `_can_backend_run` returned True. 

1132 if backend_name == "networkx": 

1133 return True 

1134 backend = _load_backend(backend_name) 

1135 should_run = backend.should_run(self.name, args, kwargs) 

1136 if isinstance(should_run, str) or not should_run: 

1137 reason = f", because: {should_run}" if isinstance(should_run, str) else "" 

1138 _logger.debug( 

1139 "Backend '%s' shouldn't run `%s` with arguments: %s%s", 

1140 backend_name, 

1141 self.name, 

1142 _LazyArgsRepr(self, args, kwargs), 

1143 reason, 

1144 ) 

1145 return False 

1146 return True 

1147 

1148 def _convert_arguments(self, backend_name, args, kwargs, *, use_cache, mutations): 

1149 """Convert graph arguments to the specified backend. 

1150 

1151 Returns 

1152 ------- 

1153 args tuple and kwargs dict 

1154 """ 

1155 bound = self.__signature__.bind(*args, **kwargs) 

1156 bound.apply_defaults() 

1157 if not self.graphs: 

1158 bound_kwargs = bound.kwargs 

1159 del bound_kwargs["backend"] 

1160 return bound.args, bound_kwargs 

1161 if backend_name == "networkx": 

1162 # `backend_interface.convert_from_nx` preserves everything 

1163 preserve_edge_attrs = preserve_node_attrs = preserve_graph_attrs = True 

1164 else: 

1165 preserve_edge_attrs = self.preserve_edge_attrs 

1166 preserve_node_attrs = self.preserve_node_attrs 

1167 preserve_graph_attrs = self.preserve_graph_attrs 

1168 edge_attrs = self.edge_attrs 

1169 node_attrs = self.node_attrs 

1170 # Convert graphs into backend graph-like object 

1171 # Include the edge and/or node labels if provided to the algorithm 

1172 if preserve_edge_attrs is False: 

1173 # e.g. `preserve_edge_attrs=False` 

1174 pass 

1175 elif preserve_edge_attrs is True: 

1176 # e.g. `preserve_edge_attrs=True` 

1177 edge_attrs = None 

1178 elif isinstance(preserve_edge_attrs, str): 

1179 if bound.arguments[preserve_edge_attrs] is True or callable( 

1180 bound.arguments[preserve_edge_attrs] 

1181 ): 

1182 # e.g. `preserve_edge_attrs="attr"` and `func(attr=True)` 

1183 # e.g. `preserve_edge_attrs="attr"` and `func(attr=myfunc)` 

1184 preserve_edge_attrs = True 

1185 edge_attrs = None 

1186 elif bound.arguments[preserve_edge_attrs] is False and ( 

1187 isinstance(edge_attrs, str) 

1188 and edge_attrs == preserve_edge_attrs 

1189 or isinstance(edge_attrs, dict) 

1190 and preserve_edge_attrs in edge_attrs 

1191 ): 

1192 # e.g. `preserve_edge_attrs="attr"` and `func(attr=False)` 

1193 # Treat `False` argument as meaning "preserve_edge_data=False" 

1194 # and not `False` as the edge attribute to use. 

1195 preserve_edge_attrs = False 

1196 edge_attrs = None 

1197 else: 

1198 # e.g. `preserve_edge_attrs="attr"` and `func(attr="weight")` 

1199 preserve_edge_attrs = False 

1200 # Else: e.g. `preserve_edge_attrs={"G": {"weight": 1}}` 

1201 

1202 if edge_attrs is None: 

1203 # May have been set to None above b/c all attributes are preserved 

1204 pass 

1205 elif isinstance(edge_attrs, str): 

1206 if edge_attrs[0] == "[": 

1207 # e.g. `edge_attrs="[edge_attributes]"` (argument of list of attributes) 

1208 # e.g. `func(edge_attributes=["foo", "bar"])` 

1209 edge_attrs = { 

1210 edge_attr: 1 for edge_attr in bound.arguments[edge_attrs[1:-1]] 

1211 } 

1212 elif callable(bound.arguments[edge_attrs]): 

1213 # e.g. `edge_attrs="weight"` and `func(weight=myfunc)` 

1214 preserve_edge_attrs = True 

1215 edge_attrs = None 

1216 elif bound.arguments[edge_attrs] is not None: 

1217 # e.g. `edge_attrs="weight"` and `func(weight="foo")` (default of 1) 

1218 edge_attrs = {bound.arguments[edge_attrs]: 1} 

1219 elif self.name == "to_numpy_array" and hasattr( 

1220 bound.arguments["dtype"], "names" 

1221 ): 

1222 # Custom handling: attributes may be obtained from `dtype` 

1223 edge_attrs = { 

1224 edge_attr: 1 for edge_attr in bound.arguments["dtype"].names 

1225 } 

1226 else: 

1227 # e.g. `edge_attrs="weight"` and `func(weight=None)` 

1228 edge_attrs = None 

1229 else: 

1230 # e.g. `edge_attrs={"attr": "default"}` and `func(attr="foo", default=7)` 

1231 # e.g. `edge_attrs={"attr": 0}` and `func(attr="foo")` 

1232 edge_attrs = { 

1233 edge_attr: bound.arguments.get(val, 1) if isinstance(val, str) else val 

1234 for key, val in edge_attrs.items() 

1235 if (edge_attr := bound.arguments[key]) is not None 

1236 } 

1237 

1238 if preserve_node_attrs is False: 

1239 # e.g. `preserve_node_attrs=False` 

1240 pass 

1241 elif preserve_node_attrs is True: 

1242 # e.g. `preserve_node_attrs=True` 

1243 node_attrs = None 

1244 elif isinstance(preserve_node_attrs, str): 

1245 if bound.arguments[preserve_node_attrs] is True or callable( 

1246 bound.arguments[preserve_node_attrs] 

1247 ): 

1248 # e.g. `preserve_node_attrs="attr"` and `func(attr=True)` 

1249 # e.g. `preserve_node_attrs="attr"` and `func(attr=myfunc)` 

1250 preserve_node_attrs = True 

1251 node_attrs = None 

1252 elif bound.arguments[preserve_node_attrs] is False and ( 

1253 isinstance(node_attrs, str) 

1254 and node_attrs == preserve_node_attrs 

1255 or isinstance(node_attrs, dict) 

1256 and preserve_node_attrs in node_attrs 

1257 ): 

1258 # e.g. `preserve_node_attrs="attr"` and `func(attr=False)` 

1259 # Treat `False` argument as meaning "preserve_node_data=False" 

1260 # and not `False` as the node attribute to use. Is this used? 

1261 preserve_node_attrs = False 

1262 node_attrs = None 

1263 else: 

1264 # e.g. `preserve_node_attrs="attr"` and `func(attr="weight")` 

1265 preserve_node_attrs = False 

1266 # Else: e.g. `preserve_node_attrs={"G": {"pos": None}}` 

1267 

1268 if node_attrs is None: 

1269 # May have been set to None above b/c all attributes are preserved 

1270 pass 

1271 elif isinstance(node_attrs, str): 

1272 if node_attrs[0] == "[": 

1273 # e.g. `node_attrs="[node_attributes]"` (argument of list of attributes) 

1274 # e.g. `func(node_attributes=["foo", "bar"])` 

1275 node_attrs = { 

1276 node_attr: None for node_attr in bound.arguments[node_attrs[1:-1]] 

1277 } 

1278 elif callable(bound.arguments[node_attrs]): 

1279 # e.g. `node_attrs="weight"` and `func(weight=myfunc)` 

1280 preserve_node_attrs = True 

1281 node_attrs = None 

1282 elif bound.arguments[node_attrs] is not None: 

1283 # e.g. `node_attrs="weight"` and `func(weight="foo")` 

1284 node_attrs = {bound.arguments[node_attrs]: None} 

1285 else: 

1286 # e.g. `node_attrs="weight"` and `func(weight=None)` 

1287 node_attrs = None 

1288 else: 

1289 # e.g. `node_attrs={"attr": "default"}` and `func(attr="foo", default=7)` 

1290 # e.g. `node_attrs={"attr": 0}` and `func(attr="foo")` 

1291 node_attrs = { 

1292 node_attr: bound.arguments.get(val) if isinstance(val, str) else val 

1293 for key, val in node_attrs.items() 

1294 if (node_attr := bound.arguments[key]) is not None 

1295 } 

1296 

1297 # It should be safe to assume that we either have networkx graphs or backend graphs. 

1298 # Future work: allow conversions between backends. 

1299 for gname in self.graphs: 

1300 if gname in self.list_graphs: 

1301 bound.arguments[gname] = [ 

1302 self._convert_graph( 

1303 backend_name, 

1304 g, 

1305 edge_attrs=edge_attrs, 

1306 node_attrs=node_attrs, 

1307 preserve_edge_attrs=preserve_edge_attrs, 

1308 preserve_node_attrs=preserve_node_attrs, 

1309 preserve_graph_attrs=preserve_graph_attrs, 

1310 graph_name=gname, 

1311 use_cache=use_cache, 

1312 mutations=mutations, 

1313 ) 

1314 if getattr(g, "__networkx_backend__", "networkx") != backend_name 

1315 else g 

1316 for g in bound.arguments[gname] 

1317 ] 

1318 else: 

1319 graph = bound.arguments[gname] 

1320 if graph is None: 

1321 if gname in self.optional_graphs: 

1322 continue 

1323 raise TypeError( 

1324 f"Missing required graph argument `{gname}` in {self.name} function" 

1325 ) 

1326 if isinstance(preserve_edge_attrs, dict): 

1327 preserve_edges = False 

1328 edges = preserve_edge_attrs.get(gname, edge_attrs) 

1329 else: 

1330 preserve_edges = preserve_edge_attrs 

1331 edges = edge_attrs 

1332 if isinstance(preserve_node_attrs, dict): 

1333 preserve_nodes = False 

1334 nodes = preserve_node_attrs.get(gname, node_attrs) 

1335 else: 

1336 preserve_nodes = preserve_node_attrs 

1337 nodes = node_attrs 

1338 if isinstance(preserve_graph_attrs, set): 

1339 preserve_graph = gname in preserve_graph_attrs 

1340 else: 

1341 preserve_graph = preserve_graph_attrs 

1342 if getattr(graph, "__networkx_backend__", "networkx") != backend_name: 

1343 bound.arguments[gname] = self._convert_graph( 

1344 backend_name, 

1345 graph, 

1346 edge_attrs=edges, 

1347 node_attrs=nodes, 

1348 preserve_edge_attrs=preserve_edges, 

1349 preserve_node_attrs=preserve_nodes, 

1350 preserve_graph_attrs=preserve_graph, 

1351 graph_name=gname, 

1352 use_cache=use_cache, 

1353 mutations=mutations, 

1354 ) 

1355 bound_kwargs = bound.kwargs 

1356 del bound_kwargs["backend"] 

1357 return bound.args, bound_kwargs 

1358 

1359 def _convert_graph( 

1360 self, 

1361 backend_name, 

1362 graph, 

1363 *, 

1364 edge_attrs, 

1365 node_attrs, 

1366 preserve_edge_attrs, 

1367 preserve_node_attrs, 

1368 preserve_graph_attrs, 

1369 graph_name, 

1370 use_cache, 

1371 mutations, 

1372 ): 

1373 nx_cache = getattr(graph, "__networkx_cache__", None) if use_cache else None 

1374 if nx_cache is not None: 

1375 cache = nx_cache.setdefault("backends", {}).setdefault(backend_name, {}) 

1376 key = _get_cache_key( 

1377 edge_attrs=edge_attrs, 

1378 node_attrs=node_attrs, 

1379 preserve_edge_attrs=preserve_edge_attrs, 

1380 preserve_node_attrs=preserve_node_attrs, 

1381 preserve_graph_attrs=preserve_graph_attrs, 

1382 ) 

1383 compat_key, rv = _get_from_cache(cache, key, mutations=mutations) 

1384 if rv is not None: 

1385 if "cache" not in nx.config.warnings_to_ignore: 

1386 warnings.warn( 

1387 "Note: conversions to backend graphs are saved to cache " 

1388 "(`G.__networkx_cache__` on the original graph) by default." 

1389 "\n\nThis warning means the cached graph is being used " 

1390 f"for the {backend_name!r} backend in the " 

1391 f"call to {self.name}.\n\nFor the cache to be consistent " 

1392 "(i.e., correct), the input graph must not have been " 

1393 "manually mutated since the cached graph was created. " 

1394 "Examples of manually mutating the graph data structures " 

1395 "resulting in an inconsistent cache include:\n\n" 

1396 " >>> G[u][v][key] = val\n\n" 

1397 "and\n\n" 

1398 " >>> for u, v, d in G.edges(data=True):\n" 

1399 " ... d[key] = val\n\n" 

1400 "Using methods such as `G.add_edge(u, v, weight=val)` " 

1401 "will correctly clear the cache to keep it consistent. " 

1402 "You may also use `G.__networkx_cache__.clear()` to " 

1403 "manually clear the cache, or set `G.__networkx_cache__` " 

1404 "to None to disable caching for G. Enable or disable caching " 

1405 "globally via `nx.config.cache_converted_graphs` config.\n\n" 

1406 "To disable this warning:\n\n" 

1407 ' >>> nx.config.warnings_to_ignore.add("cache")\n' 

1408 ) 

1409 if rv == FAILED_TO_CONVERT: 

1410 # NotImplementedError is reasonable to use since the backend doesn't 

1411 # implement this conversion. However, this will be different than 

1412 # the original exception that the backend raised when it failed. 

1413 # Using NotImplementedError allows the next backend to be attempted. 

1414 raise NotImplementedError( 

1415 "Graph conversion aborted: unable to convert graph to " 

1416 f"'{backend_name}' backend in call to `{self.name}', " 

1417 "because this conversion has previously failed." 

1418 ) 

1419 _logger.debug( 

1420 "Using cached converted graph (from '%s' to '%s' backend) " 

1421 "in call to '%s' for '%s' argument", 

1422 getattr(graph, "__networkx_backend__", None), 

1423 backend_name, 

1424 self.name, 

1425 graph_name, 

1426 ) 

1427 return rv 

1428 

1429 if backend_name == "networkx": 

1430 # Perhaps we should check that "__networkx_backend__" attribute exists 

1431 # and return the original object if not. 

1432 if not hasattr(graph, "__networkx_backend__"): 

1433 _logger.debug( 

1434 "Unable to convert input to 'networkx' backend in call to '%s' for " 

1435 "'%s argument, because it is not from a backend (i.e., it does not " 

1436 "have `G.__networkx_backend__` attribute). Using the original " 

1437 "object: %s", 

1438 self.name, 

1439 graph_name, 

1440 graph, 

1441 ) 

1442 # This may fail, but let it fail in the networkx function 

1443 return graph 

1444 backend = _load_backend(graph.__networkx_backend__) 

1445 try: 

1446 rv = backend.convert_to_nx(graph) 

1447 except Exception: 

1448 if nx_cache is not None: 

1449 _set_to_cache(cache, key, FAILED_TO_CONVERT) 

1450 raise 

1451 else: 

1452 backend = _load_backend(backend_name) 

1453 try: 

1454 rv = backend.convert_from_nx( 

1455 graph, 

1456 edge_attrs=edge_attrs, 

1457 node_attrs=node_attrs, 

1458 preserve_edge_attrs=preserve_edge_attrs, 

1459 preserve_node_attrs=preserve_node_attrs, 

1460 # Always preserve graph attrs when we are caching b/c this should be 

1461 # cheap and may help prevent extra (unnecessary) conversions. Because 

1462 # we do this, we don't need `preserve_graph_attrs` in the cache key. 

1463 preserve_graph_attrs=preserve_graph_attrs or nx_cache is not None, 

1464 name=self.name, 

1465 graph_name=graph_name, 

1466 ) 

1467 except Exception: 

1468 if nx_cache is not None: 

1469 _set_to_cache(cache, key, FAILED_TO_CONVERT) 

1470 raise 

1471 if nx_cache is not None: 

1472 _set_to_cache(cache, key, rv) 

1473 _logger.debug( 

1474 "Caching converted graph (from '%s' to '%s' backend) " 

1475 "in call to '%s' for '%s' argument", 

1476 getattr(graph, "__networkx_backend__", None), 

1477 backend_name, 

1478 self.name, 

1479 graph_name, 

1480 ) 

1481 

1482 return rv 

1483 

1484 def _call_with_backend(self, backend_name, args, kwargs, *, extra_message=None): 

1485 """Call this dispatchable function with a backend without converting inputs.""" 

1486 if backend_name == "networkx": 

1487 return self.orig_func(*args, **kwargs) 

1488 backend = _load_backend(backend_name) 

1489 _logger.debug( 

1490 "Using backend '%s' for call to '%s' with arguments: %s", 

1491 backend_name, 

1492 self.name, 

1493 _LazyArgsRepr(self, args, kwargs), 

1494 ) 

1495 try: 

1496 return getattr(backend, self.name)(*args, **kwargs) 

1497 except NotImplementedError as exc: 

1498 if extra_message is not None: 

1499 _logger.debug( 

1500 "Backend '%s' raised when calling '%s': %s", 

1501 backend_name, 

1502 self.name, 

1503 exc, 

1504 ) 

1505 raise NotImplementedError(extra_message) from exc 

1506 raise 

1507 

1508 def _convert_and_call( 

1509 self, 

1510 backend_name, 

1511 input_backend_names, 

1512 args, 

1513 kwargs, 

1514 *, 

1515 extra_message=None, 

1516 mutations=None, 

1517 ): 

1518 """Call this dispatchable function with a backend after converting inputs. 

1519 

1520 Parameters 

1521 ---------- 

1522 backend_name : str 

1523 input_backend_names : set[str] 

1524 args : arguments tuple 

1525 kwargs : keywords dict 

1526 extra_message : str, optional 

1527 Additional message to log if NotImplementedError is raised by backend. 

1528 mutations : list, optional 

1529 Used to clear objects gotten from cache if inputs will be mutated. 

1530 """ 

1531 if backend_name == "networkx": 

1532 func = self.orig_func 

1533 else: 

1534 backend = _load_backend(backend_name) 

1535 func = getattr(backend, self.name) 

1536 other_backend_names = input_backend_names - {backend_name} 

1537 _logger.debug( 

1538 "Converting input graphs from %s backend%s to '%s' backend for call to '%s'", 

1539 other_backend_names 

1540 if len(other_backend_names) > 1 

1541 else f"'{next(iter(other_backend_names))}'", 

1542 "s" if len(other_backend_names) > 1 else "", 

1543 backend_name, 

1544 self.name, 

1545 ) 

1546 try: 

1547 converted_args, converted_kwargs = self._convert_arguments( 

1548 backend_name, 

1549 args, 

1550 kwargs, 

1551 use_cache=nx.config.cache_converted_graphs, 

1552 mutations=mutations, 

1553 ) 

1554 except NotImplementedError as exc: 

1555 # Only log the exception if we are adding an extra message 

1556 # because we don't want to lose any information. 

1557 _logger.debug( 

1558 "Failed to convert graphs from %s to '%s' backend for call to '%s'" 

1559 + ("" if extra_message is None else ": %s"), 

1560 input_backend_names, 

1561 backend_name, 

1562 self.name, 

1563 *(() if extra_message is None else (exc,)), 

1564 ) 

1565 if extra_message is not None: 

1566 raise NotImplementedError(extra_message) from exc 

1567 raise 

1568 if backend_name != "networkx": 

1569 _logger.debug( 

1570 "Using backend '%s' for call to '%s' with arguments: %s", 

1571 backend_name, 

1572 self.name, 

1573 _LazyArgsRepr(self, converted_args, converted_kwargs), 

1574 ) 

1575 try: 

1576 return func(*converted_args, **converted_kwargs) 

1577 except NotImplementedError as exc: 

1578 if extra_message is not None: 

1579 _logger.debug( 

1580 "Backend '%s' raised when calling '%s': %s", 

1581 backend_name, 

1582 self.name, 

1583 exc, 

1584 ) 

1585 raise NotImplementedError(extra_message) from exc 

1586 raise 

1587 

1588 def _convert_and_call_for_tests( 

1589 self, backend_name, args, kwargs, *, fallback_to_nx=False 

1590 ): 

1591 """Call this dispatchable function with a backend; for use with testing.""" 

1592 backend = _load_backend(backend_name) 

1593 if not self._can_backend_run(backend_name, args, kwargs): 

1594 if fallback_to_nx or not self.graphs: 

1595 if fallback_to_nx: 

1596 _logger.debug( 

1597 "Falling back to use 'networkx' instead of '%s' backend " 

1598 "for call to '%s' with arguments: %s", 

1599 backend_name, 

1600 self.name, 

1601 _LazyArgsRepr(self, args, kwargs), 

1602 ) 

1603 return self.orig_func(*args, **kwargs) 

1604 

1605 import pytest 

1606 

1607 msg = f"'{self.name}' not implemented by {backend_name}" 

1608 if hasattr(backend, self.name): 

1609 msg += " with the given arguments" 

1610 pytest.xfail(msg) 

1611 

1612 from collections.abc import Iterable, Iterator, Mapping 

1613 from copy import copy, deepcopy 

1614 from io import BufferedReader, BytesIO, StringIO, TextIOWrapper 

1615 from itertools import tee 

1616 from random import Random 

1617 

1618 import numpy as np 

1619 from numpy.random import Generator, RandomState 

1620 from scipy.sparse import sparray 

1621 

1622 # We sometimes compare the backend result (or input graphs) to the 

1623 # original result (or input graphs), so we need two sets of arguments. 

1624 compare_result_to_nx = ( 

1625 self._returns_graph 

1626 and "networkx" in self.backends 

1627 and self.name 

1628 not in { 

1629 # Has graphs as node values (unable to compare) 

1630 "quotient_graph", 

1631 # We don't handle tempfile.NamedTemporaryFile arguments 

1632 "read_gml", 

1633 "read_graph6", 

1634 "read_sparse6", 

1635 # We don't handle io.BufferedReader or io.TextIOWrapper arguments 

1636 "bipartite_read_edgelist", 

1637 "read_adjlist", 

1638 "read_edgelist", 

1639 "read_graphml", 

1640 "read_multiline_adjlist", 

1641 "read_pajek", 

1642 "from_pydot", 

1643 "pydot_read_dot", 

1644 "agraph_read_dot", 

1645 # graph comparison fails b/c of nan values 

1646 "read_gexf", 

1647 } 

1648 ) 

1649 compare_inputs_to_nx = ( 

1650 "networkx" in self.backends and self._will_call_mutate_input(args, kwargs) 

1651 ) 

1652 

1653 # Tee iterators and copy random state so that they may be used twice. 

1654 if not args or not compare_result_to_nx and not compare_inputs_to_nx: 

1655 args_to_convert = args_nx = args 

1656 else: 

1657 args_to_convert, args_nx = zip( 

1658 *( 

1659 (arg, deepcopy(arg)) 

1660 if isinstance(arg, RandomState) 

1661 else (arg, copy(arg)) 

1662 if isinstance(arg, BytesIO | StringIO | Random | Generator) 

1663 else tee(arg) 

1664 if isinstance(arg, Iterator) 

1665 and not isinstance(arg, BufferedReader | TextIOWrapper) 

1666 else (arg, arg) 

1667 for arg in args 

1668 ) 

1669 ) 

1670 if not kwargs or not compare_result_to_nx and not compare_inputs_to_nx: 

1671 kwargs_to_convert = kwargs_nx = kwargs 

1672 else: 

1673 kwargs_to_convert, kwargs_nx = zip( 

1674 *( 

1675 ((k, v), (k, deepcopy(v))) 

1676 if isinstance(v, RandomState) 

1677 else ((k, v), (k, copy(v))) 

1678 if isinstance(v, BytesIO | StringIO | Random | Generator) 

1679 else ((k, (teed := tee(v))[0]), (k, teed[1])) 

1680 if isinstance(v, Iterator) 

1681 and not isinstance(v, BufferedReader | TextIOWrapper) 

1682 else ((k, v), (k, v)) 

1683 for k, v in kwargs.items() 

1684 ) 

1685 ) 

1686 kwargs_to_convert = dict(kwargs_to_convert) 

1687 kwargs_nx = dict(kwargs_nx) 

1688 

1689 try: 

1690 converted_args, converted_kwargs = self._convert_arguments( 

1691 backend_name, 

1692 args_to_convert, 

1693 kwargs_to_convert, 

1694 use_cache=False, 

1695 mutations=None, 

1696 ) 

1697 except NotImplementedError as exc: 

1698 if fallback_to_nx: 

1699 _logger.debug( 

1700 "Graph conversion failed; falling back to use 'networkx' instead " 

1701 "of '%s' backend for call to '%s'", 

1702 backend_name, 

1703 self.name, 

1704 ) 

1705 return self.orig_func(*args_nx, **kwargs_nx) 

1706 import pytest 

1707 

1708 pytest.xfail( 

1709 exc.args[0] if exc.args else f"{self.name} raised {type(exc).__name__}" 

1710 ) 

1711 

1712 if compare_inputs_to_nx: 

1713 # Ensure input graphs are different if the function mutates an input graph. 

1714 bound_backend = self.__signature__.bind(*converted_args, **converted_kwargs) 

1715 bound_backend.apply_defaults() 

1716 bound_nx = self.__signature__.bind(*args_nx, **kwargs_nx) 

1717 bound_nx.apply_defaults() 

1718 for gname in self.graphs: 

1719 graph_nx = bound_nx.arguments[gname] 

1720 if bound_backend.arguments[gname] is graph_nx is not None: 

1721 bound_nx.arguments[gname] = graph_nx.copy() 

1722 args_nx = bound_nx.args 

1723 kwargs_nx = bound_nx.kwargs 

1724 kwargs_nx.pop("backend", None) 

1725 

1726 _logger.debug( 

1727 "Using backend '%s' for call to '%s' with arguments: %s", 

1728 backend_name, 

1729 self.name, 

1730 _LazyArgsRepr(self, converted_args, converted_kwargs), 

1731 ) 

1732 try: 

1733 result = getattr(backend, self.name)(*converted_args, **converted_kwargs) 

1734 except NotImplementedError as exc: 

1735 if fallback_to_nx: 

1736 _logger.debug( 

1737 "Backend '%s' raised when calling '%s': %s; " 

1738 "falling back to use 'networkx' instead.", 

1739 backend_name, 

1740 self.name, 

1741 exc, 

1742 ) 

1743 return self.orig_func(*args_nx, **kwargs_nx) 

1744 import pytest 

1745 

1746 pytest.xfail( 

1747 exc.args[0] if exc.args else f"{self.name} raised {type(exc).__name__}" 

1748 ) 

1749 

1750 # Verify that `self._returns_graph` is correct. This compares the return type 

1751 # to the type expected from `self._returns_graph`. This handles tuple and list 

1752 # return types, but *does not* catch functions that yield graphs. 

1753 if ( 

1754 self._returns_graph 

1755 != ( 

1756 isinstance(result, nx.Graph) 

1757 or hasattr(result, "__networkx_backend__") 

1758 or isinstance(result, tuple | list) 

1759 and any( 

1760 isinstance(x, nx.Graph) or hasattr(x, "__networkx_backend__") 

1761 for x in result 

1762 ) 

1763 ) 

1764 and not ( 

1765 # May return Graph or None 

1766 self.name in {"check_planarity", "check_planarity_recursive"} 

1767 and any(x is None for x in result) 

1768 ) 

1769 and not ( 

1770 # May return Graph or dict 

1771 self.name in {"held_karp_ascent"} 

1772 and any(isinstance(x, dict) for x in result) 

1773 ) 

1774 and self.name 

1775 not in { 

1776 # yields graphs 

1777 "all_triads", 

1778 "general_k_edge_subgraphs", 

1779 # yields graphs or arrays 

1780 "nonisomorphic_trees", 

1781 } 

1782 ): 

1783 raise RuntimeError(f"`returns_graph` is incorrect for {self.name}") 

1784 

1785 def check_result(val, depth=0): 

1786 if isinstance(val, np.number): 

1787 raise RuntimeError( 

1788 f"{self.name} returned a numpy scalar {val} ({type(val)}, depth={depth})" 

1789 ) 

1790 if isinstance(val, np.ndarray | sparray): 

1791 return 

1792 if isinstance(val, nx.Graph): 

1793 check_result(val._node, depth=depth + 1) 

1794 check_result(val._adj, depth=depth + 1) 

1795 return 

1796 if isinstance(val, Iterator): 

1797 raise NotImplementedError 

1798 if isinstance(val, Iterable) and not isinstance(val, str): 

1799 for x in val: 

1800 check_result(x, depth=depth + 1) 

1801 if isinstance(val, Mapping): 

1802 for x in val.values(): 

1803 check_result(x, depth=depth + 1) 

1804 

1805 def check_iterator(it): 

1806 for val in it: 

1807 try: 

1808 check_result(val) 

1809 except RuntimeError as exc: 

1810 raise RuntimeError( 

1811 f"{self.name} returned a numpy scalar {val} ({type(val)})" 

1812 ) from exc 

1813 yield val 

1814 

1815 if self.name in {"from_edgelist"}: 

1816 # numpy scalars are explicitly given as values in some tests 

1817 pass 

1818 elif isinstance(result, Iterator): 

1819 result = check_iterator(result) 

1820 else: 

1821 try: 

1822 check_result(result) 

1823 except RuntimeError as exc: 

1824 raise RuntimeError( 

1825 f"{self.name} returned a numpy scalar {result} ({type(result)})" 

1826 ) from exc 

1827 check_result(result) 

1828 

1829 if self.name.endswith("__new__"): 

1830 # Graph is not yet done initializing; no sense doing more here 

1831 return result 

1832 

1833 def assert_graphs_equal(G1, G2, strict=True): 

1834 assert G1.number_of_nodes() == G2.number_of_nodes() 

1835 assert G1.number_of_edges() == G2.number_of_edges() 

1836 assert G1.is_directed() is G2.is_directed() 

1837 assert G1.is_multigraph() is G2.is_multigraph() 

1838 if strict: 

1839 assert G1.graph == G2.graph 

1840 assert G1._node == G2._node 

1841 assert G1._adj == G2._adj 

1842 else: 

1843 assert set(G1) == set(G2) 

1844 assert set(G1.edges) == set(G2.edges) 

1845 

1846 if compare_inputs_to_nx: 

1847 # Special-case algorithms that mutate input graphs 

1848 result_nx = self.orig_func(*args_nx, **kwargs_nx) 

1849 for gname in self.graphs: 

1850 G0 = bound_backend.arguments[gname] 

1851 G1 = bound_nx.arguments[gname] 

1852 if G0 is not None or G1 is not None: 

1853 G1 = backend.convert_to_nx(G1) 

1854 assert_graphs_equal(G0, G1, strict=False) 

1855 

1856 converted_result = backend.convert_to_nx(result) 

1857 if compare_result_to_nx and isinstance(converted_result, nx.Graph): 

1858 # For graph return types (e.g. generators), we compare that results are 

1859 # the same between the backend and networkx, then return the original 

1860 # networkx result so the iteration order will be consistent in tests. 

1861 if compare_inputs_to_nx: 

1862 G = result_nx 

1863 else: 

1864 G = self.orig_func(*args_nx, **kwargs_nx) 

1865 assert_graphs_equal(G, converted_result) 

1866 return G 

1867 

1868 return converted_result 

1869 

1870 def _make_doc(self): 

1871 """Generate the backends section at the end for functions having an alternate 

1872 backend implementation(s) using the `backend_info` entry-point.""" 

1873 

1874 if self.backends == {"networkx"}: 

1875 return self._orig_doc 

1876 # Add "Backends" section to the bottom of the docstring (if there are backends) 

1877 lines = [ 

1878 "Backends", 

1879 "--------", 

1880 ] 

1881 for backend in sorted(self.backends - {"networkx"}): 

1882 info = backend_info[backend] 

1883 if "short_summary" in info: 

1884 lines.append(f"{backend} : {info['short_summary']}") 

1885 else: 

1886 lines.append(backend) 

1887 if "functions" not in info or self.name not in info["functions"]: 

1888 lines.append("") 

1889 continue 

1890 

1891 func_info = info["functions"][self.name] 

1892 

1893 # Renaming extra_docstring to additional_docs 

1894 if func_docs := ( 

1895 func_info.get("additional_docs") or func_info.get("extra_docstring") 

1896 ): 

1897 lines.extend( 

1898 f" {line}" if line else line for line in func_docs.split("\n") 

1899 ) 

1900 add_gap = True 

1901 else: 

1902 add_gap = False 

1903 

1904 # Renaming extra_parameters to additional_parameters 

1905 if extra_parameters := ( 

1906 func_info.get("extra_parameters") 

1907 or func_info.get("additional_parameters") 

1908 ): 

1909 if add_gap: 

1910 lines.append("") 

1911 lines.append(" Additional parameters:") 

1912 for param in sorted(extra_parameters): 

1913 lines.append(f" {param}") 

1914 if desc := extra_parameters[param]: 

1915 lines.append(f" {desc}") 

1916 lines.append("") 

1917 else: 

1918 lines.append("") 

1919 

1920 if func_url := func_info.get("url"): 

1921 lines.append(f"[`Source <{func_url}>`_]") 

1922 lines.append("") 

1923 

1924 # We assume the docstrings are indented by four spaces (true for now) 

1925 new_doc = self._orig_doc or "" 

1926 if not new_doc.rstrip(): 

1927 new_doc = f"The original docstring for {self.name} was empty." 

1928 if self.backends: 

1929 lines.pop() # Remove last empty line 

1930 to_add = "\n ".join(lines) 

1931 new_doc = f"{new_doc.rstrip()}\n\n {to_add}" 

1932 

1933 # For backend-only funcs, add "Attention" admonishment after the one line summary 

1934 if "networkx" not in self.backends: 

1935 lines = new_doc.split("\n") 

1936 index = 0 

1937 while not lines[index].strip(): 

1938 index += 1 

1939 while index < len(lines) and lines[index].strip(): 

1940 index += 1 

1941 backends = sorted(self.backends) 

1942 if len(backends) == 0: 

1943 example = "" 

1944 elif len(backends) == 1: 

1945 example = f' such as "{backends[0]}"' 

1946 elif len(backends) == 2: 

1947 example = f' such as "{backends[0]} or "{backends[1]}"' 

1948 else: 

1949 example = ( 

1950 " such as " 

1951 + ", ".join(f'"{x}"' for x in backends[:-1]) 

1952 + f', or "{backends[-1]}"' # Oxford comma 

1953 ) 

1954 to_add = ( 

1955 "\n .. attention:: This function does not have a default NetworkX implementation.\n" 

1956 " It may only be run with an installable :doc:`backend </backends>` that\n" 

1957 f" supports it{example}.\n\n" 

1958 " Hint: use ``backend=...`` keyword argument to specify a backend or add\n" 

1959 " backends to ``nx.config.backend_priority``." 

1960 ) 

1961 lines.insert(index, to_add) 

1962 new_doc = "\n".join(lines) 

1963 return new_doc 

1964 

1965 def __reduce__(self): 

1966 """Allow this object to be serialized with pickle. 

1967 

1968 This uses the global registry `_registered_algorithms` to deserialize. 

1969 """ 

1970 return _restore_dispatchable, (self.name,) 

1971 

1972 

1973def _restore_dispatchable(name): 

1974 return _registered_algorithms[name].__wrapped__ 

1975 

1976 

1977def _get_cache_key( 

1978 *, 

1979 edge_attrs, 

1980 node_attrs, 

1981 preserve_edge_attrs, 

1982 preserve_node_attrs, 

1983 preserve_graph_attrs, 

1984): 

1985 """Return key used by networkx caching given arguments for ``convert_from_nx``.""" 

1986 # edge_attrs: dict | None 

1987 # node_attrs: dict | None 

1988 # preserve_edge_attrs: bool (False if edge_attrs is not None) 

1989 # preserve_node_attrs: bool (False if node_attrs is not None) 

1990 return ( 

1991 frozenset(edge_attrs.items()) 

1992 if edge_attrs is not None 

1993 else preserve_edge_attrs, 

1994 frozenset(node_attrs.items()) 

1995 if node_attrs is not None 

1996 else preserve_node_attrs, 

1997 ) 

1998 

1999 

2000def _get_from_cache(cache, key, *, backend_name=None, mutations=None): 

2001 """Search the networkx cache for a graph that is compatible with ``key``. 

2002 

2003 Parameters 

2004 ---------- 

2005 cache : dict 

2006 If ``backend_name`` is given, then this is treated as ``G.__networkx_cache__``, 

2007 but if ``backend_name`` is None, then this is treated as the resolved inner 

2008 cache such as ``G.__networkx_cache__["backends"][backend_name]``. 

2009 key : tuple 

2010 Cache key from ``_get_cache_key``. 

2011 backend_name : str, optional 

2012 Name of the backend to control how ``cache`` is interpreted. 

2013 mutations : list, optional 

2014 Used internally to clear objects gotten from cache if inputs will be mutated. 

2015 

2016 Returns 

2017 ------- 

2018 tuple or None 

2019 The key of the compatible graph found in the cache. 

2020 graph or "FAILED_TO_CONVERT" or None 

2021 A compatible graph if possible. "FAILED_TO_CONVERT" indicates that a previous 

2022 conversion attempt failed for this cache key. 

2023 """ 

2024 if backend_name is not None: 

2025 cache = cache.get("backends", {}).get(backend_name, {}) 

2026 if not cache: 

2027 return None, None 

2028 

2029 # Do a simple search for a cached graph with compatible data. 

2030 # For example, if we need a single attribute, then it's okay 

2031 # to use a cached graph that preserved all attributes. 

2032 # This looks for an exact match first. 

2033 edge_key, node_key = key 

2034 for compat_key in itertools.product( 

2035 (edge_key, True) if edge_key is not True else (True,), 

2036 (node_key, True) if node_key is not True else (True,), 

2037 ): 

2038 if (rv := cache.get(compat_key)) is not None and ( 

2039 rv != FAILED_TO_CONVERT or key == compat_key 

2040 ): 

2041 if mutations is not None: 

2042 # Remove this item from the cache (after all conversions) if 

2043 # the call to this dispatchable function will mutate an input. 

2044 mutations.append((cache, compat_key)) 

2045 return compat_key, rv 

2046 

2047 # Iterate over the items in `cache` to see if any are compatible. 

2048 # For example, if no edge attributes are needed, then a graph 

2049 # with any edge attribute will suffice. We use the same logic 

2050 # below (but switched) to clear unnecessary items from the cache. 

2051 # Use `list(cache.items())` to be thread-safe. 

2052 for (ekey, nkey), graph in list(cache.items()): 

2053 if graph == FAILED_TO_CONVERT: 

2054 # Return FAILED_TO_CONVERT if any cache key that requires a subset 

2055 # of the edge/node attributes of the given cache key has previously 

2056 # failed to convert. This logic is similar to `_set_to_cache`. 

2057 if ekey is False or edge_key is True: 

2058 pass 

2059 elif ekey is True or edge_key is False or not ekey.issubset(edge_key): 

2060 continue 

2061 if nkey is False or node_key is True: # or nkey == node_key: 

2062 pass 

2063 elif nkey is True or node_key is False or not nkey.issubset(node_key): 

2064 continue 

2065 # Save to cache for faster subsequent lookups 

2066 cache[key] = FAILED_TO_CONVERT 

2067 elif edge_key is False or ekey is True: 

2068 pass # Cache works for edge data! 

2069 elif edge_key is True or ekey is False or not edge_key.issubset(ekey): 

2070 continue # Cache missing required edge data; does not work 

2071 if node_key is False or nkey is True: 

2072 pass # Cache works for node data! 

2073 elif node_key is True or nkey is False or not node_key.issubset(nkey): 

2074 continue # Cache missing required node data; does not work 

2075 if mutations is not None: 

2076 # Remove this item from the cache (after all conversions) if 

2077 # the call to this dispatchable function will mutate an input. 

2078 mutations.append((cache, (ekey, nkey))) 

2079 return (ekey, nkey), graph 

2080 

2081 return None, None 

2082 

2083 

2084def _set_to_cache(cache, key, graph, *, backend_name=None): 

2085 """Set a backend graph to the cache, and remove unnecessary cached items. 

2086 

2087 Parameters 

2088 ---------- 

2089 cache : dict 

2090 If ``backend_name`` is given, then this is treated as ``G.__networkx_cache__``, 

2091 but if ``backend_name`` is None, then this is treated as the resolved inner 

2092 cache such as ``G.__networkx_cache__["backends"][backend_name]``. 

2093 key : tuple 

2094 Cache key from ``_get_cache_key``. 

2095 graph : graph or "FAILED_TO_CONVERT" 

2096 Setting value to "FAILED_TO_CONVERT" prevents this conversion from being 

2097 attempted in future calls. 

2098 backend_name : str, optional 

2099 Name of the backend to control how ``cache`` is interpreted. 

2100 

2101 Returns 

2102 ------- 

2103 dict 

2104 The items that were removed from the cache. 

2105 """ 

2106 if backend_name is not None: 

2107 cache = cache.setdefault("backends", {}).setdefault(backend_name, {}) 

2108 # Remove old cached items that are no longer necessary since they 

2109 # are dominated/subsumed/outdated by what was just calculated. 

2110 # This uses the same logic as above, but with keys switched. 

2111 # Also, don't update the cache here if the call will mutate an input. 

2112 removed = {} 

2113 edge_key, node_key = key 

2114 cache[key] = graph # Set at beginning to be thread-safe 

2115 if graph == FAILED_TO_CONVERT: 

2116 return removed 

2117 for cur_key in list(cache): 

2118 if cur_key == key: 

2119 continue 

2120 ekey, nkey = cur_key 

2121 if ekey is False or edge_key is True: 

2122 pass 

2123 elif ekey is True or edge_key is False or not ekey.issubset(edge_key): 

2124 continue 

2125 if nkey is False or node_key is True: 

2126 pass 

2127 elif nkey is True or node_key is False or not nkey.issubset(node_key): 

2128 continue 

2129 # Use pop instead of del to try to be thread-safe 

2130 if (graph := cache.pop(cur_key, None)) is not None: 

2131 removed[cur_key] = graph 

2132 return removed 

2133 

2134 

2135class _LazyArgsRepr: 

2136 """Simple wrapper to display arguments of dispatchable functions in logging calls.""" 

2137 

2138 def __init__(self, func, args, kwargs): 

2139 self.func = func 

2140 self.args = args 

2141 self.kwargs = kwargs 

2142 self.value = None 

2143 

2144 def __repr__(self): 

2145 if self.value is None: 

2146 bound = self.func.__signature__.bind_partial(*self.args, **self.kwargs) 

2147 inner = ", ".join(f"{key}={val!r}" for key, val in bound.arguments.items()) 

2148 self.value = f"({inner})" 

2149 return self.value 

2150 

2151 

2152if os.environ.get("_NETWORKX_BUILDING_DOCS_"): 

2153 # When building docs with Sphinx, use the original function with the 

2154 # dispatched __doc__, b/c Sphinx renders normal Python functions better. 

2155 # This doesn't show e.g. `*, backend=None, **backend_kwargs` in the 

2156 # signatures, which is probably okay. It does allow the docstring to be 

2157 # updated based on the installed backends. 

2158 _orig_dispatchable = _dispatchable 

2159 

2160 def _dispatchable(func=None, **kwargs): # type: ignore[no-redef] 

2161 if func is None: 

2162 return partial(_dispatchable, **kwargs) 

2163 dispatched_func = _orig_dispatchable(func, **kwargs) 

2164 func.__doc__ = dispatched_func.__doc__ 

2165 return func 

2166 

2167 _dispatchable.__doc__ = _orig_dispatchable.__new__.__doc__ # type: ignore[method-assign,assignment] 

2168 _sig = inspect.signature(_orig_dispatchable.__new__) 

2169 _dispatchable.__signature__ = _sig.replace( # type: ignore[method-assign,assignment] 

2170 parameters=[v for k, v in _sig.parameters.items() if k != "cls"] 

2171 )