Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/black/linegen.py: 11%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

826 statements  

1""" 

2Generating lines of code. 

3""" 

4 

5import re 

6import sys 

7from collections.abc import Collection, Iterator 

8from dataclasses import replace 

9from enum import Enum, auto 

10from functools import partial, wraps 

11from typing import Union, cast 

12 

13from black.brackets import ( 

14 COMMA_PRIORITY, 

15 DOT_PRIORITY, 

16 STRING_PRIORITY, 

17 get_leaves_inside_matching_brackets, 

18 max_delimiter_priority_in_atom, 

19) 

20from black.comments import ( 

21 FMT_OFF, 

22 FMT_ON, 

23 contains_fmt_directive, 

24 generate_comments, 

25 list_comments, 

26) 

27from black.lines import ( 

28 Line, 

29 RHSResult, 

30 append_leaves, 

31 can_be_split, 

32 can_omit_invisible_parens, 

33 is_line_short_enough, 

34 line_to_string, 

35) 

36from black.mode import Feature, Mode, Preview 

37from black.nodes import ( 

38 ASSIGNMENTS, 

39 BRACKETS, 

40 CLOSING_BRACKETS, 

41 OPENING_BRACKETS, 

42 STANDALONE_COMMENT, 

43 STATEMENT, 

44 WHITESPACE, 

45 Visitor, 

46 ensure_visible, 

47 fstring_tstring_to_string, 

48 get_annotation_type, 

49 has_sibling_with_type, 

50 is_arith_like, 

51 is_async_stmt_or_funcdef, 

52 is_atom_with_invisible_parens, 

53 is_docstring, 

54 is_empty_tuple, 

55 is_generator, 

56 is_lpar_token, 

57 is_multiline_string, 

58 is_name_token, 

59 is_one_sequence_between, 

60 is_one_tuple, 

61 is_parent_function_or_class, 

62 is_part_of_annotation, 

63 is_rpar_token, 

64 is_stub_body, 

65 is_stub_suite, 

66 is_tuple, 

67 is_tuple_containing_star, 

68 is_tuple_containing_walrus, 

69 is_type_ignore_comment_string, 

70 is_vararg, 

71 is_walrus_assignment, 

72 is_yield, 

73 syms, 

74 wrap_in_parentheses, 

75) 

76from black.numerics import normalize_numeric_literal 

77from black.strings import ( 

78 fix_multiline_docstring, 

79 get_string_prefix, 

80 normalize_string_prefix, 

81 normalize_string_quotes, 

82 normalize_unicode_escape_sequences, 

83) 

84from black.trans import ( 

85 CannotTransform, 

86 StringMerger, 

87 StringParenStripper, 

88 StringParenWrapper, 

89 StringSplitter, 

90 Transformer, 

91 hug_power_op, 

92) 

93from blib2to3.pgen2 import token 

94from blib2to3.pytree import Leaf, Node 

95 

96# types 

97LeafID = int 

98LN = Union[Leaf, Node] 

99 

100 

101class CannotSplit(CannotTransform): 

102 """A readable split that fits the allotted line length is impossible.""" 

103 

104 

105# This isn't a dataclass because @dataclass + Generic breaks mypyc. 

106# See also https://github.com/mypyc/mypyc/issues/827. 

107class LineGenerator(Visitor[Line]): 

108 """Generates reformatted Line objects. Empty lines are not emitted. 

109 

110 Note: destroys the tree it's visiting by mutating prefixes of its leaves 

111 in ways that will no longer stringify to valid Python code on the tree. 

112 """ 

113 

114 def __init__(self, mode: Mode, features: Collection[Feature]) -> None: 

115 self.mode = mode 

116 self.features = features 

117 self.current_line: Line 

118 self.__post_init__() 

119 

120 def line(self, indent: int = 0) -> Iterator[Line]: 

121 """Generate a line. 

122 

123 If the line is empty, only emit if it makes sense. 

124 If the line is too long, split it first and then generate. 

125 

126 If any lines were generated, set up a new current_line. 

127 """ 

128 if not self.current_line: 

129 self.current_line.depth += indent 

130 return # Line is empty, don't emit. Creating a new one unnecessary. 

131 

132 if len(self.current_line.leaves) == 1 and is_async_stmt_or_funcdef( 

133 self.current_line.leaves[0] 

134 ): 

135 # Special case for async def/for/with statements. `visit_async_stmt` 

136 # adds an `ASYNC` leaf then visits the child def/for/with statement 

137 # nodes. Line yields from those nodes shouldn't treat the former 

138 # `ASYNC` leaf as a complete line. 

139 return 

140 

141 complete_line = self.current_line 

142 self.current_line = Line(mode=self.mode, depth=complete_line.depth + indent) 

143 yield complete_line 

144 

145 def visit_default(self, node: LN) -> Iterator[Line]: 

146 """Default `visit_*()` implementation. Recurses to children of `node`.""" 

147 if isinstance(node, Leaf): 

148 any_open_brackets = self.current_line.bracket_tracker.any_open_brackets() 

149 for comment in generate_comments(node, mode=self.mode): 

150 if any_open_brackets: 

151 # any comment within brackets is subject to splitting 

152 self.current_line.append(comment) 

153 elif comment.type == token.COMMENT: 

154 # regular trailing comment 

155 self.current_line.append(comment) 

156 yield from self.line() 

157 

158 else: 

159 # regular standalone comment 

160 yield from self.line() 

161 

162 self.current_line.append(comment) 

163 yield from self.line() 

164 

165 if any_open_brackets: 

166 node.prefix = "" 

167 if node.type not in WHITESPACE: 

168 self.current_line.append(node) 

169 yield from super().visit_default(node) 

170 

171 def visit_test(self, node: Node) -> Iterator[Line]: 

172 """Visit an `x if y else z` test""" 

173 

174 already_parenthesized = ( 

175 node.prev_sibling and node.prev_sibling.type == token.LPAR 

176 ) 

177 

178 if not already_parenthesized: 

179 # Similar to logic in wrap_in_parentheses 

180 lpar = Leaf(token.LPAR, "") 

181 rpar = Leaf(token.RPAR, "") 

182 prefix = node.prefix 

183 node.prefix = "" 

184 lpar.prefix = prefix 

185 node.insert_child(0, lpar) 

186 node.append_child(rpar) 

187 

188 yield from self.visit_default(node) 

189 

190 def visit_INDENT(self, node: Leaf) -> Iterator[Line]: 

191 """Increase indentation level, maybe yield a line.""" 

192 # In blib2to3 INDENT never holds comments. 

193 yield from self.line(+1) 

194 yield from self.visit_default(node) 

195 

196 def visit_DEDENT(self, node: Leaf) -> Iterator[Line]: 

197 """Decrease indentation level, maybe yield a line.""" 

198 # The current line might still wait for trailing comments. At DEDENT time 

199 # there won't be any (they would be prefixes on the preceding NEWLINE). 

200 # Emit the line then. 

201 yield from self.line() 

202 

203 # While DEDENT has no value, its prefix may contain standalone comments 

204 # that belong to the current indentation level. Get 'em. 

205 yield from self.visit_default(node) 

206 

207 # Finally, emit the dedent. 

208 yield from self.line(-1) 

209 

210 def visit_stmt( 

211 self, node: Node, keywords: set[str], parens: set[str] 

212 ) -> Iterator[Line]: 

213 """Visit a statement. 

214 

215 This implementation is shared for `if`, `while`, `for`, `try`, `except`, 

216 `def`, `with`, `class`, `assert`, and assignments. 

217 

218 The relevant Python language `keywords` for a given statement will be 

219 NAME leaves within it. This methods puts those on a separate line. 

220 

221 `parens` holds a set of string leaf values immediately after which 

222 invisible parens should be put. 

223 """ 

224 normalize_invisible_parens( 

225 node, parens_after=parens, mode=self.mode, features=self.features 

226 ) 

227 for child in node.children: 

228 if is_name_token(child) and child.value in keywords: 

229 yield from self.line() 

230 

231 yield from self.visit(child) 

232 

233 def visit_typeparams(self, node: Node) -> Iterator[Line]: 

234 yield from self.visit_default(node) 

235 node.children[0].prefix = "" 

236 

237 def visit_typevartuple(self, node: Node) -> Iterator[Line]: 

238 yield from self.visit_default(node) 

239 node.children[1].prefix = "" 

240 

241 def visit_paramspec(self, node: Node) -> Iterator[Line]: 

242 yield from self.visit_default(node) 

243 node.children[1].prefix = "" 

244 

245 def visit_dictsetmaker(self, node: Node) -> Iterator[Line]: 

246 if Preview.wrap_long_dict_values_in_parens in self.mode: 

247 for i, child in enumerate(node.children): 

248 if i == 0: 

249 continue 

250 if node.children[i - 1].type == token.COLON: 

251 if ( 

252 child.type == syms.atom 

253 and child.children[0].type in OPENING_BRACKETS 

254 and not is_walrus_assignment(child) 

255 ): 

256 maybe_make_parens_invisible_in_atom( 

257 child, 

258 parent=node, 

259 mode=self.mode, 

260 features=self.features, 

261 remove_brackets_around_comma=False, 

262 ) 

263 else: 

264 wrap_in_parentheses(node, child, visible=False) 

265 yield from self.visit_default(node) 

266 

267 def visit_funcdef(self, node: Node) -> Iterator[Line]: 

268 """Visit function definition.""" 

269 yield from self.line() 

270 

271 # Remove redundant brackets around return type annotation. 

272 is_return_annotation = False 

273 for child in node.children: 

274 if child.type == token.RARROW: 

275 is_return_annotation = True 

276 elif is_return_annotation: 

277 if child.type == syms.atom and child.children[0].type == token.LPAR: 

278 if maybe_make_parens_invisible_in_atom( 

279 child, 

280 parent=node, 

281 mode=self.mode, 

282 features=self.features, 

283 remove_brackets_around_comma=False, 

284 ): 

285 wrap_in_parentheses(node, child, visible=False) 

286 else: 

287 wrap_in_parentheses(node, child, visible=False) 

288 is_return_annotation = False 

289 

290 for child in node.children: 

291 yield from self.visit(child) 

292 

293 def visit_match_case(self, node: Node) -> Iterator[Line]: 

294 """Visit either a match or case statement.""" 

295 normalize_invisible_parens( 

296 node, parens_after=set(), mode=self.mode, features=self.features 

297 ) 

298 

299 yield from self.line() 

300 for child in node.children: 

301 yield from self.visit(child) 

302 

303 def visit_suite(self, node: Node) -> Iterator[Line]: 

304 """Visit a suite.""" 

305 if is_stub_suite(node): 

306 yield from self.visit(node.children[2]) 

307 else: 

308 yield from self.visit_default(node) 

309 

310 def visit_simple_stmt(self, node: Node) -> Iterator[Line]: 

311 """Visit a statement without nested statements.""" 

312 prev_type: int | None = None 

313 for child in node.children: 

314 if (prev_type is None or prev_type == token.SEMI) and is_arith_like(child): 

315 wrap_in_parentheses(node, child, visible=False) 

316 prev_type = child.type 

317 

318 if node.parent and node.parent.type in STATEMENT: 

319 if is_parent_function_or_class(node) and is_stub_body(node): 

320 yield from self.visit_default(node) 

321 else: 

322 yield from self.line(+1) 

323 yield from self.visit_default(node) 

324 yield from self.line(-1) 

325 

326 else: 

327 if node.parent and is_stub_suite(node.parent): 

328 node.prefix = "" 

329 yield from self.visit_default(node) 

330 return 

331 yield from self.line() 

332 yield from self.visit_default(node) 

333 

334 def visit_async_stmt(self, node: Node) -> Iterator[Line]: 

335 """Visit `async def`, `async for`, `async with`.""" 

336 yield from self.line() 

337 

338 children = iter(node.children) 

339 for child in children: 

340 yield from self.visit(child) 

341 

342 if child.type == token.ASYNC or child.type == STANDALONE_COMMENT: 

343 # STANDALONE_COMMENT happens when `# fmt: skip` is applied on the async 

344 # line. 

345 break 

346 

347 internal_stmt = next(children) 

348 yield from self.visit(internal_stmt) 

349 

350 def visit_decorators(self, node: Node) -> Iterator[Line]: 

351 """Visit decorators.""" 

352 for child in node.children: 

353 yield from self.line() 

354 yield from self.visit(child) 

355 

356 def visit_power(self, node: Node) -> Iterator[Line]: 

357 for idx, leaf in enumerate(node.children[:-1]): 

358 next_leaf = node.children[idx + 1] 

359 

360 if not isinstance(leaf, Leaf): 

361 continue 

362 

363 value = leaf.value.lower() 

364 if ( 

365 leaf.type == token.NUMBER 

366 and next_leaf.type == syms.trailer 

367 # Ensure that we are in an attribute trailer 

368 and next_leaf.children[0].type == token.DOT 

369 # It shouldn't wrap hexadecimal, binary and octal literals 

370 and not value.startswith(("0x", "0b", "0o")) 

371 # It shouldn't wrap complex literals 

372 and "j" not in value 

373 ): 

374 wrap_in_parentheses(node, leaf) 

375 

376 remove_await_parens(node, mode=self.mode, features=self.features) 

377 

378 yield from self.visit_default(node) 

379 

380 def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]: 

381 """Remove a semicolon and put the other statement on a separate line.""" 

382 yield from self.line() 

383 

384 def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]: 

385 """End of file. Process outstanding comments and end with a newline.""" 

386 yield from self.visit_default(leaf) 

387 yield from self.line() 

388 

389 def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]: 

390 any_open_brackets = self.current_line.bracket_tracker.any_open_brackets() 

391 if not any_open_brackets: 

392 yield from self.line() 

393 # STANDALONE_COMMENT nodes created by our special handling in 

394 # normalize_fmt_off for comment-only blocks have fmt:off as the first 

395 # line and fmt:on as the last line (each directive on its own line, 

396 # not embedded in other text). These should be appended directly 

397 # without calling visit_default, which would process their prefix and 

398 # lose indentation. Normal STANDALONE_COMMENT nodes go through 

399 # visit_default. 

400 value = leaf.value 

401 lines = value.splitlines() 

402 is_fmt_off_block = ( 

403 len(lines) >= 2 

404 and contains_fmt_directive(lines[0], FMT_OFF) 

405 and contains_fmt_directive(lines[-1], FMT_ON) 

406 ) 

407 if is_fmt_off_block: 

408 # This is a fmt:off/on block from normalize_fmt_off - we still need 

409 # to process any prefix comments (like markdown comments) but append 

410 # the fmt block itself directly to preserve its formatting 

411 

412 # Only process prefix comments if there actually is a prefix with comments 

413 if leaf.prefix and any( 

414 line.strip().startswith("#") 

415 and not contains_fmt_directive(line.strip()) 

416 for line in leaf.prefix.split("\n") 

417 ): 

418 for comment in generate_comments(leaf, mode=self.mode): 

419 yield from self.line() 

420 self.current_line.append(comment) 

421 yield from self.line() 

422 # Clear the prefix since we've processed it as comments above 

423 leaf.prefix = "" 

424 

425 self.current_line.append(leaf) 

426 if not any_open_brackets: 

427 yield from self.line() 

428 else: 

429 # Normal standalone comment - process through visit_default 

430 yield from self.visit_default(leaf) 

431 

432 def visit_factor(self, node: Node) -> Iterator[Line]: 

433 """Force parentheses between a unary op and a binary power: 

434 

435 -2 ** 8 -> -(2 ** 8) 

436 """ 

437 _operator, operand = node.children 

438 if ( 

439 operand.type == syms.power 

440 and len(operand.children) == 3 

441 and operand.children[1].type == token.DOUBLESTAR 

442 ): 

443 lpar = Leaf(token.LPAR, "(") 

444 rpar = Leaf(token.RPAR, ")") 

445 index = operand.remove() or 0 

446 node.insert_child(index, Node(syms.atom, [lpar, operand, rpar])) 

447 yield from self.visit_default(node) 

448 

449 def visit_tname(self, node: Node) -> Iterator[Line]: 

450 """ 

451 Add potential parentheses around types in function parameter lists to be made 

452 into real parentheses in case the type hint is too long to fit on a line 

453 Examples: 

454 def foo(a: int, b: float = 7): ... 

455 

456 -> 

457 

458 def foo(a: (int), b: (float) = 7): ... 

459 """ 

460 if len(node.children) == 3 and maybe_make_parens_invisible_in_atom( 

461 node.children[2], parent=node, mode=self.mode, features=self.features 

462 ): 

463 wrap_in_parentheses(node, node.children[2], visible=False) 

464 

465 yield from self.visit_default(node) 

466 

467 def visit_STRING(self, leaf: Leaf) -> Iterator[Line]: 

468 normalize_unicode_escape_sequences(leaf) 

469 

470 if is_docstring(leaf) and not re.search(r"\\\s*\n", leaf.value): 

471 # We're ignoring docstrings with backslash newline escapes because changing 

472 # indentation of those changes the AST representation of the code. 

473 if self.mode.string_normalization: 

474 docstring = normalize_string_prefix(leaf.value) 

475 # We handle string normalization at the end of this method, but since 

476 # what we do right now acts differently depending on quote style (ex. 

477 # see padding logic below), there's a possibility for unstable 

478 # formatting. To avoid a situation where this function formats a 

479 # docstring differently on the second pass, normalize it early. 

480 docstring = normalize_string_quotes(docstring) 

481 else: 

482 docstring = leaf.value 

483 prefix = get_string_prefix(docstring) 

484 docstring = docstring[len(prefix) :] # Remove the prefix 

485 quote_char = docstring[0] 

486 # A natural way to remove the outer quotes is to do: 

487 # docstring = docstring.strip(quote_char) 

488 # but that breaks on """""x""" (which is '""x'). 

489 # So we actually need to remove the first character and the next two 

490 # characters but only if they are the same as the first. 

491 quote_len = 1 if docstring[1] != quote_char else 3 

492 docstring = docstring[quote_len:-quote_len] 

493 docstring_started_empty = not docstring 

494 indent = " " * 4 * self.current_line.depth 

495 

496 if is_multiline_string(leaf): 

497 docstring = fix_multiline_docstring(docstring, indent) 

498 else: 

499 docstring = docstring.strip() 

500 

501 has_trailing_backslash = False 

502 if docstring: 

503 # Add some padding if the docstring starts / ends with a quote mark. 

504 if docstring[0] == quote_char: 

505 docstring = " " + docstring 

506 if docstring[-1] == quote_char: 

507 docstring += " " 

508 if docstring[-1] == "\\": 

509 backslash_count = len(docstring) - len(docstring.rstrip("\\")) 

510 if backslash_count % 2: 

511 # Odd number of tailing backslashes, add some padding to 

512 # avoid escaping the closing string quote. 

513 docstring += " " 

514 has_trailing_backslash = True 

515 elif not docstring_started_empty: 

516 docstring = " " 

517 

518 # We could enforce triple quotes at this point. 

519 quote = quote_char * quote_len 

520 

521 # It's invalid to put closing single-character quotes on a new line. 

522 if quote_len == 3: 

523 # We need to find the length of the last line of the docstring 

524 # to find if we can add the closing quotes to the line without 

525 # exceeding the maximum line length. 

526 # If docstring is one line, we don't put the closing quotes on a 

527 # separate line because it looks ugly (#3320). 

528 lines = docstring.splitlines() 

529 last_line_length = len(lines[-1]) if docstring else 0 

530 

531 # If adding closing quotes would cause the last line to exceed 

532 # the maximum line length, and the closing quote is not 

533 # prefixed by a newline then put a line break before 

534 # the closing quotes 

535 if ( 

536 len(lines) > 1 

537 and last_line_length + quote_len > self.mode.line_length 

538 and len(indent) + quote_len <= self.mode.line_length 

539 and not has_trailing_backslash 

540 ): 

541 if leaf.value[-1 - quote_len] == "\n": 

542 leaf.value = prefix + quote + docstring + quote 

543 else: 

544 leaf.value = prefix + quote + docstring + "\n" + indent + quote 

545 else: 

546 leaf.value = prefix + quote + docstring + quote 

547 else: 

548 leaf.value = prefix + quote + docstring + quote 

549 

550 if self.mode.string_normalization and leaf.type == token.STRING: 

551 leaf.value = normalize_string_prefix(leaf.value) 

552 leaf.value = normalize_string_quotes(leaf.value) 

553 yield from self.visit_default(leaf) 

554 

555 def visit_NUMBER(self, leaf: Leaf) -> Iterator[Line]: 

556 normalize_numeric_literal(leaf) 

557 yield from self.visit_default(leaf) 

558 

559 def visit_atom(self, node: Node) -> Iterator[Line]: 

560 """Visit any atom""" 

561 if len(node.children) == 3: 

562 first = node.children[0] 

563 last = node.children[-1] 

564 if (first.type == token.LSQB and last.type == token.RSQB) or ( 

565 first.type == token.LBRACE and last.type == token.RBRACE 

566 ): 

567 # Lists or sets of one item 

568 maybe_make_parens_invisible_in_atom( 

569 node.children[1], 

570 parent=node, 

571 mode=self.mode, 

572 features=self.features, 

573 ) 

574 

575 yield from self.visit_default(node) 

576 

577 def visit_fstring(self, node: Node) -> Iterator[Line]: 

578 # currently we don't want to format and split f-strings at all. 

579 string_leaf = fstring_tstring_to_string(node) 

580 node.replace(string_leaf) 

581 if "\\" in string_leaf.value and any( 

582 "\\" in str(child) 

583 for child in node.children 

584 if child.type == syms.fstring_replacement_field 

585 ): 

586 # string normalization doesn't account for nested quotes, 

587 # causing breakages. skip normalization when nested quotes exist 

588 yield from self.visit_default(string_leaf) 

589 return 

590 yield from self.visit_STRING(string_leaf) 

591 

592 def visit_tstring(self, node: Node) -> Iterator[Line]: 

593 # currently we don't want to format and split t-strings at all. 

594 string_leaf = fstring_tstring_to_string(node) 

595 node.replace(string_leaf) 

596 if "\\" in string_leaf.value and any( 

597 "\\" in str(child) 

598 for child in node.children 

599 if child.type == syms.fstring_replacement_field 

600 ): 

601 # string normalization doesn't account for nested quotes, 

602 # causing breakages. skip normalization when nested quotes exist 

603 yield from self.visit_default(string_leaf) 

604 return 

605 yield from self.visit_STRING(string_leaf) 

606 

607 # TODO: Uncomment Implementation to format f-string children 

608 # fstring_start = node.children[0] 

609 # fstring_end = node.children[-1] 

610 # assert isinstance(fstring_start, Leaf) 

611 # assert isinstance(fstring_end, Leaf) 

612 

613 # quote_char = fstring_end.value[0] 

614 # quote_idx = fstring_start.value.index(quote_char) 

615 # prefix, quote = ( 

616 # fstring_start.value[:quote_idx], 

617 # fstring_start.value[quote_idx:] 

618 # ) 

619 

620 # if not is_docstring(node, self.mode): 

621 # prefix = normalize_string_prefix(prefix) 

622 

623 # assert quote == fstring_end.value 

624 

625 # is_raw_fstring = "r" in prefix or "R" in prefix 

626 # middles = [ 

627 # leaf 

628 # for leaf in node.leaves() 

629 # if leaf.type == token.FSTRING_MIDDLE 

630 # ] 

631 

632 # if self.mode.string_normalization: 

633 # middles, quote = normalize_fstring_quotes(quote, middles, is_raw_fstring) 

634 

635 # fstring_start.value = prefix + quote 

636 # fstring_end.value = quote 

637 

638 # yield from self.visit_default(node) 

639 

640 def visit_comp_for(self, node: Node) -> Iterator[Line]: 

641 if Preview.wrap_comprehension_in in self.mode: 

642 normalize_invisible_parens( 

643 node, parens_after={"in"}, mode=self.mode, features=self.features 

644 ) 

645 yield from self.visit_default(node) 

646 

647 def visit_old_comp_for(self, node: Node) -> Iterator[Line]: 

648 yield from self.visit_comp_for(node) 

649 

650 def __post_init__(self) -> None: 

651 """You are in a twisty little maze of passages.""" 

652 self.current_line = Line(mode=self.mode) 

653 

654 v = self.visit_stmt 

655 Ø: set[str] = set() 

656 self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","}) 

657 self.visit_if_stmt = partial( 

658 v, keywords={"if", "else", "elif"}, parens={"if", "elif"} 

659 ) 

660 self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"}) 

661 self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"}) 

662 self.visit_try_stmt = partial( 

663 v, keywords={"try", "except", "else", "finally"}, parens=Ø 

664 ) 

665 self.visit_except_clause = partial(v, keywords={"except"}, parens={"except"}) 

666 self.visit_with_stmt = partial(v, keywords={"with"}, parens={"with"}) 

667 self.visit_classdef = partial(v, keywords={"class"}, parens=Ø) 

668 

669 self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS) 

670 self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"}) 

671 self.visit_import_from = partial(v, keywords=Ø, parens={"import"}) 

672 self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"}) 

673 self.visit_async_funcdef = self.visit_async_stmt 

674 self.visit_decorated = self.visit_decorators 

675 

676 # PEP 634 

677 self.visit_match_stmt = self.visit_match_case 

678 self.visit_case_block = self.visit_match_case 

679 self.visit_guard = partial(v, keywords=Ø, parens={"if"}) 

680 

681 

682# Remove when `simplify_power_operator_hugging` becomes stable. 

683def _hugging_power_ops_line_to_string( 

684 line: Line, 

685 features: Collection[Feature], 

686 mode: Mode, 

687) -> str | None: 

688 try: 

689 return line_to_string(next(hug_power_op(line, features, mode))) 

690 except CannotTransform: 

691 return None 

692 

693 

694def transform_line( 

695 line: Line, mode: Mode, features: Collection[Feature] = () 

696) -> Iterator[Line]: 

697 """Transform a `line`, potentially splitting it into many lines. 

698 

699 They should fit in the allotted `line_length` but might not be able to. 

700 

701 `features` are syntactical features that may be used in the output. 

702 """ 

703 if line.is_comment: 

704 yield line 

705 return 

706 

707 line_str = line_to_string(line) 

708 

709 if Preview.simplify_power_operator_hugging in mode: 

710 line_str_hugging_power_ops = line_str 

711 else: 

712 # We need the line string when power operators are hugging to determine if we 

713 # should split the line. Default to line_str, if no power operator are present 

714 # on the line. 

715 line_str_hugging_power_ops = ( 

716 _hugging_power_ops_line_to_string(line, features, mode) or line_str 

717 ) 

718 

719 ll = mode.line_length 

720 sn = mode.string_normalization 

721 string_merge = StringMerger(ll, sn) 

722 string_paren_strip = StringParenStripper(ll, sn) 

723 string_split = StringSplitter(ll, sn) 

724 string_paren_wrap = StringParenWrapper(ll, sn) 

725 

726 transformers: list[Transformer] 

727 if ( 

728 not line.contains_uncollapsable_type_comments() 

729 and not line.should_split_rhs 

730 and not line.magic_trailing_comma 

731 and ( 

732 is_line_short_enough(line, mode=mode, line_str=line_str_hugging_power_ops) 

733 or line.contains_unsplittable_type_ignore() 

734 ) 

735 and not (line.inside_brackets and line.contains_standalone_comments()) 

736 and not line.contains_implicit_multiline_string_with_comments() 

737 ): 

738 # Only apply basic string preprocessing, since lines shouldn't be split here. 

739 if Preview.string_processing in mode: 

740 transformers = [string_merge, string_paren_strip] 

741 else: 

742 transformers = [] 

743 elif line.is_def and not should_split_funcdef_with_rhs(line, mode): 

744 transformers = [left_hand_split] 

745 else: 

746 

747 def _rhs( 

748 self: object, line: Line, features: Collection[Feature], mode: Mode 

749 ) -> Iterator[Line]: 

750 """Wraps calls to `right_hand_split`. 

751 

752 The calls increasingly `omit` right-hand trailers (bracket pairs with 

753 content), meaning the trailers get glued together to split on another 

754 bracket pair instead. 

755 """ 

756 for omit in generate_trailers_to_omit(line, mode.line_length): 

757 lines = list(right_hand_split(line, mode, features, omit=omit)) 

758 # Note: this check is only able to figure out if the first line of the 

759 # *current* transformation fits in the line length. This is true only 

760 # for simple cases. All others require running more transforms via 

761 # `transform_line()`. This check doesn't know if those would succeed. 

762 if is_line_short_enough(lines[0], mode=mode): 

763 yield from lines 

764 return 

765 

766 # All splits failed, best effort split with no omits. 

767 # This mostly happens to multiline strings that are by definition 

768 # reported as not fitting a single line, as well as lines that contain 

769 # trailing commas (those have to be exploded). 

770 yield from right_hand_split(line, mode, features=features) 

771 

772 # HACK: nested functions (like _rhs) compiled by mypyc don't retain their 

773 # __name__ attribute which is needed in `run_transformer` further down. 

774 # Unfortunately a nested class breaks mypyc too. So a class must be created 

775 # via type ... https://github.com/mypyc/mypyc/issues/884 

776 rhs = type("rhs", (), {"__call__": _rhs})() 

777 

778 if Preview.string_processing in mode: 

779 if line.inside_brackets: 

780 transformers = [ 

781 string_merge, 

782 string_paren_strip, 

783 string_split, 

784 delimiter_split, 

785 standalone_comment_split, 

786 string_paren_wrap, 

787 rhs, 

788 ] 

789 else: 

790 transformers = [ 

791 string_merge, 

792 string_paren_strip, 

793 string_split, 

794 string_paren_wrap, 

795 rhs, 

796 ] 

797 else: 

798 if line.inside_brackets: 

799 transformers = [delimiter_split, standalone_comment_split, rhs] 

800 else: 

801 transformers = [rhs] 

802 

803 if Preview.simplify_power_operator_hugging not in mode: 

804 # It's always safe to attempt hugging of power operations and pretty much every 

805 # line could match. 

806 transformers.append(hug_power_op) 

807 

808 for transform in transformers: 

809 # We are accumulating lines in `result` because we might want to abort 

810 # mission and return the original line in the end, or attempt a different 

811 # split altogether. 

812 try: 

813 result = run_transformer(line, transform, mode, features, line_str=line_str) 

814 except CannotTransform: 

815 continue 

816 else: 

817 yield from result 

818 break 

819 

820 else: 

821 yield line 

822 

823 

824def should_split_funcdef_with_rhs(line: Line, mode: Mode) -> bool: 

825 """If a funcdef has a magic trailing comma in the return type, then we should first 

826 split the line with rhs to respect the comma. 

827 """ 

828 return_type_leaves: list[Leaf] = [] 

829 in_return_type = False 

830 

831 for leaf in line.leaves: 

832 if leaf.type == token.COLON: 

833 in_return_type = False 

834 if in_return_type: 

835 return_type_leaves.append(leaf) 

836 if leaf.type == token.RARROW: 

837 in_return_type = True 

838 

839 # using `bracket_split_build_line` will mess with whitespace, so we duplicate a 

840 # couple lines from it. 

841 result = Line(mode=line.mode, depth=line.depth) 

842 leaves_to_track = get_leaves_inside_matching_brackets(return_type_leaves) 

843 for leaf in return_type_leaves: 

844 result.append( 

845 leaf, 

846 preformatted=True, 

847 track_bracket=id(leaf) in leaves_to_track, 

848 ) 

849 

850 # we could also return true if the line is too long, and the return type is longer 

851 # than the param list. Or if `should_split_rhs` returns True. 

852 return result.magic_trailing_comma is not None 

853 

854 

855class _BracketSplitComponent(Enum): 

856 head = auto() 

857 body = auto() 

858 tail = auto() 

859 

860 

861def left_hand_split( 

862 line: Line, _features: Collection[Feature], mode: Mode 

863) -> Iterator[Line]: 

864 """Split line into many lines, starting with the first matching bracket pair. 

865 

866 Note: this usually looks weird, only use this for function definitions. 

867 Prefer RHS otherwise. This is why this function is not symmetrical with 

868 :func:`right_hand_split` which also handles optional parentheses. 

869 """ 

870 for leaf_type in [token.LPAR, token.LSQB]: 

871 tail_leaves: list[Leaf] = [] 

872 body_leaves: list[Leaf] = [] 

873 head_leaves: list[Leaf] = [] 

874 current_leaves = head_leaves 

875 matching_bracket: Leaf | None = None 

876 depth = 0 

877 for index, leaf in enumerate(line.leaves): 

878 if index == 2 and leaf.type == token.LSQB: 

879 # A [ at index 2 means this is a type param, so start 

880 # tracking the depth 

881 depth += 1 

882 elif depth > 0: 

883 if leaf.type == token.LSQB: 

884 depth += 1 

885 elif leaf.type == token.RSQB: 

886 depth -= 1 

887 if ( 

888 current_leaves is body_leaves 

889 and leaf.type in CLOSING_BRACKETS 

890 and leaf.opening_bracket is matching_bracket 

891 and isinstance(matching_bracket, Leaf) 

892 # If the code is still on LPAR and we are inside a type 

893 # param, ignore the match since this is searching 

894 # for the function arguments 

895 and not (leaf_type == token.LPAR and depth > 0) 

896 ): 

897 ensure_visible(leaf) 

898 ensure_visible(matching_bracket) 

899 current_leaves = tail_leaves if body_leaves else head_leaves 

900 current_leaves.append(leaf) 

901 if current_leaves is head_leaves: 

902 if leaf.type == leaf_type and ( 

903 not (leaf_type == token.LPAR and depth > 0) 

904 ): 

905 matching_bracket = leaf 

906 current_leaves = body_leaves 

907 if matching_bracket and tail_leaves: 

908 break 

909 if not matching_bracket or not tail_leaves: 

910 raise CannotSplit("No brackets found") 

911 

912 head = bracket_split_build_line( 

913 head_leaves, line, matching_bracket, component=_BracketSplitComponent.head 

914 ) 

915 body = bracket_split_build_line( 

916 body_leaves, line, matching_bracket, component=_BracketSplitComponent.body 

917 ) 

918 tail = bracket_split_build_line( 

919 tail_leaves, line, matching_bracket, component=_BracketSplitComponent.tail 

920 ) 

921 bracket_split_succeeded_or_raise(head, body, tail) 

922 for result in (head, body, tail): 

923 if result: 

924 yield result 

925 

926 

927def right_hand_split( 

928 line: Line, 

929 mode: Mode, 

930 features: Collection[Feature] = (), 

931 omit: Collection[LeafID] = (), 

932) -> Iterator[Line]: 

933 """Split line into many lines, starting with the last matching bracket pair. 

934 

935 If the split was by optional parentheses, attempt splitting without them, too. 

936 `omit` is a collection of closing bracket IDs that shouldn't be considered for 

937 this split. 

938 

939 Note: running this function modifies `bracket_depth` on the leaves of `line`. 

940 """ 

941 rhs_result = _first_right_hand_split(line, omit=omit) 

942 yield from _maybe_split_omitting_optional_parens( 

943 rhs_result, line, mode, features=features, omit=omit 

944 ) 

945 

946 

947def _first_right_hand_split( 

948 line: Line, 

949 omit: Collection[LeafID] = (), 

950) -> RHSResult: 

951 """Split the line into head, body, tail starting with the last bracket pair. 

952 

953 Note: this function should not have side effects. It's relied upon by 

954 _maybe_split_omitting_optional_parens to get an opinion whether to prefer 

955 splitting on the right side of an assignment statement. 

956 """ 

957 tail_leaves: list[Leaf] = [] 

958 body_leaves: list[Leaf] = [] 

959 head_leaves: list[Leaf] = [] 

960 current_leaves = tail_leaves 

961 opening_bracket: Leaf | None = None 

962 closing_bracket: Leaf | None = None 

963 for leaf in reversed(line.leaves): 

964 if current_leaves is body_leaves: 

965 if leaf is opening_bracket: 

966 current_leaves = head_leaves if body_leaves else tail_leaves 

967 current_leaves.append(leaf) 

968 if current_leaves is tail_leaves: 

969 if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit: 

970 opening_bracket = leaf.opening_bracket 

971 closing_bracket = leaf 

972 current_leaves = body_leaves 

973 if not (opening_bracket and closing_bracket and head_leaves): 

974 # If there is no opening or closing_bracket that means the split failed and 

975 # all content is in the tail. Otherwise, if `head_leaves` are empty, it means 

976 # the matching `opening_bracket` wasn't available on `line` anymore. 

977 raise CannotSplit("No brackets found") 

978 

979 tail_leaves.reverse() 

980 body_leaves.reverse() 

981 head_leaves.reverse() 

982 

983 body: Line | None = None 

984 if ( 

985 Preview.hug_parens_with_braces_and_square_brackets in line.mode 

986 and tail_leaves[0].value 

987 and tail_leaves[0].opening_bracket is head_leaves[-1] 

988 ): 

989 inner_body_leaves = list(body_leaves) 

990 hugged_opening_leaves: list[Leaf] = [] 

991 hugged_closing_leaves: list[Leaf] = [] 

992 is_unpacking = body_leaves[0].type in [token.STAR, token.DOUBLESTAR] 

993 unpacking_offset: int = 1 if is_unpacking else 0 

994 while ( 

995 len(inner_body_leaves) >= 2 + unpacking_offset 

996 and inner_body_leaves[-1].type in CLOSING_BRACKETS 

997 and inner_body_leaves[-1].opening_bracket 

998 is inner_body_leaves[unpacking_offset] 

999 ): 

1000 if unpacking_offset: 

1001 hugged_opening_leaves.append(inner_body_leaves.pop(0)) 

1002 unpacking_offset = 0 

1003 hugged_opening_leaves.append(inner_body_leaves.pop(0)) 

1004 hugged_closing_leaves.insert(0, inner_body_leaves.pop()) 

1005 

1006 if hugged_opening_leaves and inner_body_leaves: 

1007 inner_body = bracket_split_build_line( 

1008 inner_body_leaves, 

1009 line, 

1010 hugged_opening_leaves[-1], 

1011 component=_BracketSplitComponent.body, 

1012 ) 

1013 if ( 

1014 line.mode.magic_trailing_comma 

1015 and inner_body_leaves[-1].type == token.COMMA 

1016 ): 

1017 should_hug = True 

1018 else: 

1019 line_length = line.mode.line_length - sum( 

1020 len(str(leaf)) 

1021 for leaf in hugged_opening_leaves + hugged_closing_leaves 

1022 ) 

1023 if is_line_short_enough( 

1024 inner_body, mode=replace(line.mode, line_length=line_length) 

1025 ): 

1026 # Do not hug if it fits on a single line. 

1027 should_hug = False 

1028 else: 

1029 should_hug = True 

1030 if should_hug: 

1031 body_leaves = inner_body_leaves 

1032 head_leaves.extend(hugged_opening_leaves) 

1033 tail_leaves = hugged_closing_leaves + tail_leaves 

1034 body = inner_body # No need to re-calculate the body again later. 

1035 

1036 head = bracket_split_build_line( 

1037 head_leaves, line, opening_bracket, component=_BracketSplitComponent.head 

1038 ) 

1039 if body is None: 

1040 body = bracket_split_build_line( 

1041 body_leaves, line, opening_bracket, component=_BracketSplitComponent.body 

1042 ) 

1043 tail = bracket_split_build_line( 

1044 tail_leaves, line, opening_bracket, component=_BracketSplitComponent.tail 

1045 ) 

1046 bracket_split_succeeded_or_raise(head, body, tail) 

1047 return RHSResult(head, body, tail, opening_bracket, closing_bracket) 

1048 

1049 

1050def _maybe_split_omitting_optional_parens( 

1051 rhs: RHSResult, 

1052 line: Line, 

1053 mode: Mode, 

1054 features: Collection[Feature] = (), 

1055 omit: Collection[LeafID] = (), 

1056) -> Iterator[Line]: 

1057 if ( 

1058 Feature.FORCE_OPTIONAL_PARENTHESES not in features 

1059 # the opening bracket is an optional paren 

1060 and rhs.opening_bracket.type == token.LPAR 

1061 and not rhs.opening_bracket.value 

1062 # the closing bracket is an optional paren 

1063 and rhs.closing_bracket.type == token.RPAR 

1064 and not rhs.closing_bracket.value 

1065 # it's not an import (optional parens are the only thing we can split on 

1066 # in this case; attempting a split without them is a waste of time) 

1067 and not line.is_import 

1068 # and we can actually remove the parens 

1069 and can_omit_invisible_parens(rhs, mode.line_length) 

1070 ): 

1071 omit = {id(rhs.closing_bracket), *omit} 

1072 try: 

1073 # The RHSResult Omitting Optional Parens. 

1074 rhs_oop = _first_right_hand_split(line, omit=omit) 

1075 if _prefer_split_rhs_oop_over_rhs(rhs_oop, rhs, mode): 

1076 yield from _maybe_split_omitting_optional_parens( 

1077 rhs_oop, line, mode, features=features, omit=omit 

1078 ) 

1079 return 

1080 

1081 except CannotSplit as e: 

1082 # For chained assignments we want to use the previous successful split 

1083 if line.is_chained_assignment: 

1084 pass 

1085 

1086 elif ( 

1087 not can_be_split(rhs.body) 

1088 and not is_line_short_enough(rhs.body, mode=mode) 

1089 and not ( 

1090 Preview.wrap_long_dict_values_in_parens 

1091 and rhs.opening_bracket.parent 

1092 and rhs.opening_bracket.parent.parent 

1093 and rhs.opening_bracket.parent.parent.type == syms.dictsetmaker 

1094 ) 

1095 ): 

1096 raise CannotSplit( 

1097 "Splitting failed, body is still too long and can't be split." 

1098 ) from e 

1099 

1100 elif ( 

1101 rhs.head.contains_multiline_strings() 

1102 or rhs.tail.contains_multiline_strings() 

1103 ): 

1104 raise CannotSplit( 

1105 "The current optional pair of parentheses is bound to fail to" 

1106 " satisfy the splitting algorithm because the head or the tail" 

1107 " contains multiline strings which by definition never fit one" 

1108 " line." 

1109 ) from e 

1110 

1111 ensure_visible(rhs.opening_bracket) 

1112 ensure_visible(rhs.closing_bracket) 

1113 for result in (rhs.head, rhs.body, rhs.tail): 

1114 if result: 

1115 yield result 

1116 

1117 

1118def _prefer_split_rhs_oop_over_rhs( 

1119 rhs_oop: RHSResult, rhs: RHSResult, mode: Mode 

1120) -> bool: 

1121 """ 

1122 Returns whether we should prefer the result from a split omitting optional parens 

1123 (rhs_oop) over the original (rhs). 

1124 """ 

1125 # contains unsplittable type ignore 

1126 if ( 

1127 rhs_oop.head.contains_unsplittable_type_ignore() 

1128 or rhs_oop.body.contains_unsplittable_type_ignore() 

1129 or rhs_oop.tail.contains_unsplittable_type_ignore() 

1130 ): 

1131 return True 

1132 

1133 # Retain optional parens around dictionary values 

1134 if ( 

1135 Preview.wrap_long_dict_values_in_parens 

1136 and rhs.opening_bracket.parent 

1137 and rhs.opening_bracket.parent.parent 

1138 and rhs.opening_bracket.parent.parent.type == syms.dictsetmaker 

1139 and rhs.body.bracket_tracker.delimiters 

1140 ): 

1141 # Unless the split is inside the key 

1142 return any(leaf.type == token.COLON for leaf in rhs_oop.tail.leaves) 

1143 

1144 # the split is right after `=` 

1145 if not (len(rhs.head.leaves) >= 2 and rhs.head.leaves[-2].type == token.EQUAL): 

1146 return True 

1147 

1148 # the left side of assignment contains brackets 

1149 if not any(leaf.type in BRACKETS for leaf in rhs.head.leaves[:-1]): 

1150 return True 

1151 

1152 # the left side of assignment is short enough (the -1 is for the ending optional 

1153 # paren) 

1154 if not is_line_short_enough( 

1155 rhs.head, mode=replace(mode, line_length=mode.line_length - 1) 

1156 ): 

1157 return True 

1158 

1159 # the left side of assignment won't explode further because of magic trailing comma 

1160 if rhs.head.magic_trailing_comma is not None: 

1161 return True 

1162 

1163 # If we have multiple targets, we prefer more `=`s on the head vs pushing them to 

1164 # the body 

1165 rhs_head_equal_count = [leaf.type for leaf in rhs.head.leaves].count(token.EQUAL) 

1166 rhs_oop_head_equal_count = [leaf.type for leaf in rhs_oop.head.leaves].count( 

1167 token.EQUAL 

1168 ) 

1169 if rhs_head_equal_count > 1 and rhs_head_equal_count > rhs_oop_head_equal_count: 

1170 return False 

1171 

1172 has_closing_bracket_after_assign = False 

1173 for leaf in reversed(rhs_oop.head.leaves): 

1174 if leaf.type == token.EQUAL: 

1175 break 

1176 if leaf.type in CLOSING_BRACKETS: 

1177 has_closing_bracket_after_assign = True 

1178 break 

1179 return ( 

1180 # contains matching brackets after the `=` (done by checking there is a 

1181 # closing bracket) 

1182 has_closing_bracket_after_assign 

1183 or ( 

1184 # the split is actually from inside the optional parens (done by checking 

1185 # the first line still contains the `=`) 

1186 any(leaf.type == token.EQUAL for leaf in rhs_oop.head.leaves) 

1187 # the first line is short enough 

1188 and is_line_short_enough(rhs_oop.head, mode=mode) 

1189 ) 

1190 ) 

1191 

1192 

1193def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None: 

1194 """Raise :exc:`CannotSplit` if the last left- or right-hand split failed. 

1195 

1196 Do nothing otherwise. 

1197 

1198 A left- or right-hand split is based on a pair of brackets. Content before 

1199 (and including) the opening bracket is left on one line, content inside the 

1200 brackets is put on a separate line, and finally content starting with and 

1201 following the closing bracket is put on a separate line. 

1202 

1203 Those are called `head`, `body`, and `tail`, respectively. If the split 

1204 produced the same line (all content in `head`) or ended up with an empty `body` 

1205 and the `tail` is just the closing bracket, then it's considered failed. 

1206 """ 

1207 tail_len = len(str(tail).strip()) 

1208 if not body: 

1209 if tail_len == 0: 

1210 raise CannotSplit("Splitting brackets produced the same line") 

1211 

1212 elif tail_len < 3: 

1213 raise CannotSplit( 

1214 f"Splitting brackets on an empty body to save {tail_len} characters is" 

1215 " not worth it" 

1216 ) 

1217 

1218 

1219def _ensure_trailing_comma( 

1220 leaves: list[Leaf], original: Line, opening_bracket: Leaf 

1221) -> bool: 

1222 if not leaves: 

1223 return False 

1224 # Ensure a trailing comma for imports 

1225 if original.is_import: 

1226 return True 

1227 # ...and standalone function arguments 

1228 if not original.is_def: 

1229 return False 

1230 if opening_bracket.value != "(": 

1231 return False 

1232 # Don't add commas if we already have any commas 

1233 if any( 

1234 leaf.type == token.COMMA and not is_part_of_annotation(leaf) for leaf in leaves 

1235 ): 

1236 return False 

1237 

1238 # Find a leaf with a parent (comments don't have parents) 

1239 leaf_with_parent = next((leaf for leaf in leaves if leaf.parent), None) 

1240 if leaf_with_parent is None: 

1241 return True 

1242 # Don't add commas inside parenthesized return annotations 

1243 if get_annotation_type(leaf_with_parent) == "return": 

1244 return False 

1245 # Don't add commas inside PEP 604 unions 

1246 if ( 

1247 leaf_with_parent.parent 

1248 and leaf_with_parent.parent.next_sibling 

1249 and leaf_with_parent.parent.next_sibling.type == token.VBAR 

1250 ): 

1251 return False 

1252 return True 

1253 

1254 

1255def bracket_split_build_line( 

1256 leaves: list[Leaf], 

1257 original: Line, 

1258 opening_bracket: Leaf, 

1259 *, 

1260 component: _BracketSplitComponent, 

1261) -> Line: 

1262 """Return a new line with given `leaves` and respective comments from `original`. 

1263 

1264 If it's the head component, brackets will be tracked so trailing commas are 

1265 respected. 

1266 

1267 If it's the body component, the result line is one-indented inside brackets and as 

1268 such has its first leaf's prefix normalized and a trailing comma added when 

1269 expected. 

1270 """ 

1271 result = Line(mode=original.mode, depth=original.depth) 

1272 if component is _BracketSplitComponent.body: 

1273 result.inside_brackets = True 

1274 result.depth += 1 

1275 if _ensure_trailing_comma(leaves, original, opening_bracket): 

1276 for i in range(len(leaves) - 1, -1, -1): 

1277 if leaves[i].type == STANDALONE_COMMENT: 

1278 continue 

1279 

1280 if leaves[i].type != token.COMMA: 

1281 new_comma = Leaf(token.COMMA, ",") 

1282 leaves.insert(i + 1, new_comma) 

1283 break 

1284 

1285 leaves_to_track: set[LeafID] = set() 

1286 if component is _BracketSplitComponent.head: 

1287 leaves_to_track = get_leaves_inside_matching_brackets(leaves) 

1288 # Populate the line 

1289 for leaf in leaves: 

1290 result.append( 

1291 leaf, 

1292 preformatted=True, 

1293 track_bracket=id(leaf) in leaves_to_track, 

1294 ) 

1295 for comment_after in original.comments_after(leaf): 

1296 result.append(comment_after, preformatted=True) 

1297 if component is _BracketSplitComponent.body and should_split_line( 

1298 result, opening_bracket 

1299 ): 

1300 result.should_split_rhs = True 

1301 return result 

1302 

1303 

1304def dont_increase_indentation(split_func: Transformer) -> Transformer: 

1305 """Normalize prefix of the first leaf in every line returned by `split_func`. 

1306 

1307 This is a decorator over relevant split functions. 

1308 """ 

1309 

1310 @wraps(split_func) 

1311 def split_wrapper( 

1312 line: Line, features: Collection[Feature], mode: Mode 

1313 ) -> Iterator[Line]: 

1314 for split_line in split_func(line, features, mode): 

1315 split_line.leaves[0].prefix = "" 

1316 yield split_line 

1317 

1318 return split_wrapper 

1319 

1320 

1321def _get_last_non_comment_leaf(line: Line) -> int | None: 

1322 for leaf_idx in range(len(line.leaves) - 1, 0, -1): 

1323 if line.leaves[leaf_idx].type != STANDALONE_COMMENT: 

1324 return leaf_idx 

1325 return None 

1326 

1327 

1328def _can_add_trailing_comma(leaf: Leaf, features: Collection[Feature]) -> bool: 

1329 if is_vararg(leaf, within={syms.typedargslist}): 

1330 return Feature.TRAILING_COMMA_IN_DEF in features 

1331 if is_vararg(leaf, within={syms.arglist, syms.argument}): 

1332 return Feature.TRAILING_COMMA_IN_CALL in features 

1333 return True 

1334 

1335 

1336def _safe_add_trailing_comma(safe: bool, delimiter_priority: int, line: Line) -> Line: 

1337 if ( 

1338 safe 

1339 and delimiter_priority == COMMA_PRIORITY 

1340 and line.leaves[-1].type != token.COMMA 

1341 and line.leaves[-1].type != STANDALONE_COMMENT 

1342 ): 

1343 new_comma = Leaf(token.COMMA, ",") 

1344 line.append(new_comma) 

1345 return line 

1346 

1347 

1348MIGRATE_COMMENT_DELIMITERS = {STRING_PRIORITY, COMMA_PRIORITY} 

1349 

1350 

1351@dont_increase_indentation 

1352def delimiter_split( 

1353 line: Line, features: Collection[Feature], mode: Mode 

1354) -> Iterator[Line]: 

1355 """Split according to delimiters of the highest priority. 

1356 

1357 If the appropriate Features are given, the split will add trailing commas 

1358 also in function signatures and calls that contain `*` and `**`. 

1359 """ 

1360 if len(line.leaves) == 0: 

1361 raise CannotSplit("Line empty") from None 

1362 last_leaf = line.leaves[-1] 

1363 

1364 bt = line.bracket_tracker 

1365 try: 

1366 delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)}) 

1367 except ValueError: 

1368 raise CannotSplit("No delimiters found") from None 

1369 

1370 if ( 

1371 delimiter_priority == DOT_PRIORITY 

1372 and bt.delimiter_count_with_priority(delimiter_priority) == 1 

1373 ): 

1374 raise CannotSplit("Splitting a single attribute from its owner looks wrong") 

1375 

1376 current_line = Line( 

1377 mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets 

1378 ) 

1379 lowest_depth = sys.maxsize 

1380 trailing_comma_safe = True 

1381 

1382 def append_to_line(leaf: Leaf) -> Iterator[Line]: 

1383 """Append `leaf` to current line or to new line if appending impossible.""" 

1384 nonlocal current_line 

1385 try: 

1386 current_line.append_safe(leaf, preformatted=True) 

1387 except ValueError: 

1388 yield current_line 

1389 

1390 current_line = Line( 

1391 mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets 

1392 ) 

1393 current_line.append(leaf) 

1394 

1395 def append_comments(leaf: Leaf) -> Iterator[Line]: 

1396 for comment_after in line.comments_after(leaf): 

1397 yield from append_to_line(comment_after) 

1398 

1399 last_non_comment_leaf = _get_last_non_comment_leaf(line) 

1400 for leaf_idx, leaf in enumerate(line.leaves): 

1401 yield from append_to_line(leaf) 

1402 

1403 previous_priority = leaf_idx > 0 and bt.delimiters.get( 

1404 id(line.leaves[leaf_idx - 1]) 

1405 ) 

1406 if ( 

1407 previous_priority != delimiter_priority 

1408 or delimiter_priority in MIGRATE_COMMENT_DELIMITERS 

1409 ): 

1410 yield from append_comments(leaf) 

1411 

1412 lowest_depth = min(lowest_depth, leaf.bracket_depth) 

1413 if trailing_comma_safe and leaf.bracket_depth == lowest_depth: 

1414 trailing_comma_safe = _can_add_trailing_comma(leaf, features) 

1415 

1416 if last_leaf.type == STANDALONE_COMMENT and leaf_idx == last_non_comment_leaf: 

1417 current_line = _safe_add_trailing_comma( 

1418 trailing_comma_safe, delimiter_priority, current_line 

1419 ) 

1420 

1421 leaf_priority = bt.delimiters.get(id(leaf)) 

1422 if leaf_priority == delimiter_priority: 

1423 if ( 

1424 leaf_idx + 1 < len(line.leaves) 

1425 and delimiter_priority not in MIGRATE_COMMENT_DELIMITERS 

1426 ): 

1427 yield from append_comments(line.leaves[leaf_idx + 1]) 

1428 

1429 yield current_line 

1430 current_line = Line( 

1431 mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets 

1432 ) 

1433 

1434 if current_line: 

1435 current_line = _safe_add_trailing_comma( 

1436 trailing_comma_safe, delimiter_priority, current_line 

1437 ) 

1438 yield current_line 

1439 

1440 

1441@dont_increase_indentation 

1442def standalone_comment_split( 

1443 line: Line, features: Collection[Feature], mode: Mode 

1444) -> Iterator[Line]: 

1445 """Split standalone comments from the rest of the line.""" 

1446 if not line.contains_standalone_comments(): 

1447 raise CannotSplit("Line does not have any standalone comments") 

1448 

1449 current_line = Line( 

1450 mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets 

1451 ) 

1452 

1453 def append_to_line(leaf: Leaf) -> Iterator[Line]: 

1454 """Append `leaf` to current line or to new line if appending impossible.""" 

1455 nonlocal current_line 

1456 try: 

1457 current_line.append_safe(leaf, preformatted=True) 

1458 except ValueError: 

1459 yield current_line 

1460 

1461 current_line = Line( 

1462 line.mode, depth=line.depth, inside_brackets=line.inside_brackets 

1463 ) 

1464 current_line.append(leaf) 

1465 

1466 for leaf in line.leaves: 

1467 yield from append_to_line(leaf) 

1468 

1469 for comment_after in line.comments_after(leaf): 

1470 yield from append_to_line(comment_after) 

1471 

1472 if current_line: 

1473 yield current_line 

1474 

1475 

1476def normalize_invisible_parens( 

1477 node: Node, parens_after: set[str], *, mode: Mode, features: Collection[Feature] 

1478) -> None: 

1479 """Make existing optional parentheses invisible or create new ones. 

1480 

1481 `parens_after` is a set of string leaf values immediately after which parens 

1482 should be put. 

1483 

1484 Standardizes on visible parentheses for single-element tuples, and keeps 

1485 existing visible parentheses for other tuples and generator expressions. 

1486 """ 

1487 for pc in list_comments(node.prefix, is_endmarker=False, mode=mode): 

1488 if contains_fmt_directive(pc.value, FMT_OFF): 

1489 # This `node` has a prefix with `# fmt: off`, don't mess with parens. 

1490 return 

1491 

1492 # The multiple context managers grammar has a different pattern, thus this is 

1493 # separate from the for-loop below. This possibly wraps them in invisible parens, 

1494 # and later will be removed in remove_with_parens when needed. 

1495 if node.type == syms.with_stmt: 

1496 _maybe_wrap_cms_in_parens(node, mode, features) 

1497 

1498 check_lpar = False 

1499 for index, child in enumerate(list(node.children)): 

1500 # Fixes a bug where invisible parens are not properly stripped from 

1501 # assignment statements that contain type annotations. 

1502 if isinstance(child, Node) and child.type == syms.annassign: 

1503 normalize_invisible_parens( 

1504 child, parens_after=parens_after, mode=mode, features=features 

1505 ) 

1506 

1507 # Fixes a bug where invisible parens are not properly wrapped around 

1508 # case blocks. 

1509 if isinstance(child, Node) and child.type == syms.case_block: 

1510 normalize_invisible_parens( 

1511 child, parens_after={"case"}, mode=mode, features=features 

1512 ) 

1513 

1514 # Add parentheses around if guards in case blocks 

1515 if isinstance(child, Node) and child.type == syms.guard: 

1516 normalize_invisible_parens( 

1517 child, parens_after={"if"}, mode=mode, features=features 

1518 ) 

1519 

1520 # Add parentheses around long tuple unpacking in assignments. 

1521 if ( 

1522 index == 0 

1523 and isinstance(child, Node) 

1524 and child.type == syms.testlist_star_expr 

1525 ): 

1526 check_lpar = True 

1527 

1528 if ( 

1529 index == 0 

1530 and isinstance(child, Node) 

1531 and child.type == syms.atom 

1532 and node.type == syms.expr_stmt 

1533 and not _atom_has_magic_trailing_comma(child, mode) 

1534 and not _is_atom_multiline(child) 

1535 ): 

1536 if maybe_make_parens_invisible_in_atom( 

1537 child, 

1538 parent=node, 

1539 mode=mode, 

1540 features=features, 

1541 remove_brackets_around_comma=True, 

1542 allow_star_expr=True, 

1543 ): 

1544 wrap_in_parentheses(node, child, visible=False) 

1545 

1546 if check_lpar: 

1547 if ( 

1548 child.type == syms.atom 

1549 and node.type == syms.for_stmt 

1550 and isinstance(child.prev_sibling, Leaf) 

1551 and child.prev_sibling.type == token.NAME 

1552 and child.prev_sibling.value == "for" 

1553 ): 

1554 if maybe_make_parens_invisible_in_atom( 

1555 child, 

1556 parent=node, 

1557 mode=mode, 

1558 features=features, 

1559 remove_brackets_around_comma=True, 

1560 ): 

1561 wrap_in_parentheses(node, child, visible=False) 

1562 elif isinstance(child, Node) and node.type == syms.with_stmt: 

1563 remove_with_parens(child, node, mode=mode, features=features) 

1564 elif child.type == syms.atom and not ( 

1565 "in" in parens_after 

1566 and len(child.children) == 3 

1567 and is_lpar_token(child.children[0]) 

1568 and is_rpar_token(child.children[-1]) 

1569 and child.children[1].type == syms.test 

1570 ): 

1571 if maybe_make_parens_invisible_in_atom( 

1572 child, parent=node, mode=mode, features=features 

1573 ): 

1574 wrap_in_parentheses(node, child, visible=False) 

1575 elif is_one_tuple(child): 

1576 wrap_in_parentheses(node, child, visible=True) 

1577 elif node.type == syms.import_from: 

1578 _normalize_import_from(node, child, index) 

1579 break 

1580 elif ( 

1581 index == 1 

1582 and child.type == token.STAR 

1583 and node.type == syms.except_clause 

1584 ): 

1585 # In except* (PEP 654), the star is actually part of 

1586 # of the keyword. So we need to skip the insertion of 

1587 # invisible parentheses to work more precisely. 

1588 continue 

1589 

1590 elif ( 

1591 isinstance(child, Leaf) 

1592 and child.next_sibling is not None 

1593 and child.next_sibling.type == token.COLON 

1594 and child.value == "case" 

1595 ): 

1596 # A special patch for "case case:" scenario, the second occurrence 

1597 # of case will be not parsed as a Python keyword. 

1598 break 

1599 

1600 elif not is_multiline_string(child): 

1601 wrap_in_parentheses(node, child, visible=False) 

1602 

1603 comma_check = child.type == token.COMMA 

1604 

1605 check_lpar = isinstance(child, Leaf) and ( 

1606 child.value in parens_after or comma_check 

1607 ) 

1608 

1609 

1610def _normalize_import_from(parent: Node, child: LN, index: int) -> None: 

1611 # "import from" nodes store parentheses directly as part of 

1612 # the statement 

1613 if is_lpar_token(child): 

1614 assert is_rpar_token(parent.children[-1]) 

1615 # make parentheses invisible 

1616 child.value = "" 

1617 parent.children[-1].value = "" 

1618 elif child.type != token.STAR: 

1619 # insert invisible parentheses 

1620 parent.insert_child(index, Leaf(token.LPAR, "")) 

1621 parent.append_child(Leaf(token.RPAR, "")) 

1622 

1623 

1624def remove_await_parens(node: Node, mode: Mode, features: Collection[Feature]) -> None: 

1625 if node.children[0].type == token.AWAIT and len(node.children) > 1: 

1626 if ( 

1627 node.children[1].type == syms.atom 

1628 and node.children[1].children[0].type == token.LPAR 

1629 ): 

1630 if maybe_make_parens_invisible_in_atom( 

1631 node.children[1], 

1632 parent=node, 

1633 mode=mode, 

1634 features=features, 

1635 remove_brackets_around_comma=True, 

1636 ): 

1637 wrap_in_parentheses(node, node.children[1], visible=False) 

1638 

1639 # Since await is an expression we shouldn't remove 

1640 # brackets in cases where this would change 

1641 # the AST due to operator precedence. 

1642 # Therefore we only aim to remove brackets around 

1643 # power nodes that aren't also await expressions themselves. 

1644 # https://peps.python.org/pep-0492/#updated-operator-precedence-table 

1645 # N.B. We've still removed any redundant nested brackets though :) 

1646 opening_bracket = cast(Leaf, node.children[1].children[0]) 

1647 closing_bracket = cast(Leaf, node.children[1].children[-1]) 

1648 bracket_contents = node.children[1].children[1] 

1649 if isinstance(bracket_contents, Node) and ( 

1650 bracket_contents.type != syms.power 

1651 or bracket_contents.children[0].type == token.AWAIT 

1652 or any( 

1653 isinstance(child, Leaf) and child.type == token.DOUBLESTAR 

1654 for child in bracket_contents.children 

1655 ) 

1656 ): 

1657 ensure_visible(opening_bracket) 

1658 ensure_visible(closing_bracket) 

1659 

1660 

1661def _maybe_wrap_cms_in_parens( 

1662 node: Node, mode: Mode, features: Collection[Feature] 

1663) -> None: 

1664 """When enabled and safe, wrap the multiple context managers in invisible parens. 

1665 

1666 It is only safe when `features` contain Feature.PARENTHESIZED_CONTEXT_MANAGERS. 

1667 """ 

1668 if ( 

1669 Feature.PARENTHESIZED_CONTEXT_MANAGERS not in features 

1670 or len(node.children) <= 2 

1671 # If it's an atom, it's already wrapped in parens. 

1672 or node.children[1].type == syms.atom 

1673 ): 

1674 return 

1675 colon_index: int | None = None 

1676 for i in range(2, len(node.children)): 

1677 if node.children[i].type == token.COLON: 

1678 colon_index = i 

1679 break 

1680 if colon_index is not None: 

1681 lpar = Leaf(token.LPAR, "") 

1682 rpar = Leaf(token.RPAR, "") 

1683 context_managers = node.children[1:colon_index] 

1684 for child in context_managers: 

1685 child.remove() 

1686 # After wrapping, the with_stmt will look like this: 

1687 # with_stmt 

1688 # NAME 'with' 

1689 # atom 

1690 # LPAR '' 

1691 # testlist_gexp 

1692 # ... <-- context_managers 

1693 # /testlist_gexp 

1694 # RPAR '' 

1695 # /atom 

1696 # COLON ':' 

1697 new_child = Node( 

1698 syms.atom, [lpar, Node(syms.testlist_gexp, context_managers), rpar] 

1699 ) 

1700 node.insert_child(1, new_child) 

1701 

1702 

1703def remove_with_parens( 

1704 node: Node, parent: Node, mode: Mode, features: Collection[Feature] 

1705) -> None: 

1706 """Recursively hide optional parens in `with` statements.""" 

1707 # Removing all unnecessary parentheses in with statements in one pass is a tad 

1708 # complex as different variations of bracketed statements result in pretty 

1709 # different parse trees: 

1710 # 

1711 # with (open("file")) as f: # this is an asexpr_test 

1712 # ... 

1713 # 

1714 # with (open("file") as f): # this is an atom containing an 

1715 # ... # asexpr_test 

1716 # 

1717 # with (open("file")) as f, (open("file")) as f: # this is asexpr_test, COMMA, 

1718 # ... # asexpr_test 

1719 # 

1720 # with (open("file") as f, open("file") as f): # an atom containing a 

1721 # ... # testlist_gexp which then 

1722 # # contains multiple asexpr_test(s) 

1723 if node.type == syms.atom: 

1724 if maybe_make_parens_invisible_in_atom( 

1725 node, 

1726 parent=parent, 

1727 mode=mode, 

1728 features=features, 

1729 remove_brackets_around_comma=True, 

1730 ): 

1731 wrap_in_parentheses(parent, node, visible=False) 

1732 if isinstance(node.children[1], Node): 

1733 remove_with_parens(node.children[1], node, mode=mode, features=features) 

1734 elif node.type == syms.testlist_gexp: 

1735 for child in node.children: 

1736 if isinstance(child, Node): 

1737 remove_with_parens(child, node, mode=mode, features=features) 

1738 elif node.type == syms.asexpr_test and not any( 

1739 leaf.type == token.COLONEQUAL for leaf in node.leaves() 

1740 ): 

1741 if maybe_make_parens_invisible_in_atom( 

1742 node.children[0], 

1743 parent=node, 

1744 mode=mode, 

1745 features=features, 

1746 remove_brackets_around_comma=True, 

1747 ): 

1748 wrap_in_parentheses(node, node.children[0], visible=False) 

1749 

1750 

1751def _atom_has_magic_trailing_comma(node: LN, mode: Mode) -> bool: 

1752 """Check if an atom node has a magic trailing comma. 

1753 

1754 Returns True for single-element tuples with trailing commas like (a,), 

1755 which should be preserved to maintain their tuple type. 

1756 """ 

1757 if not mode.magic_trailing_comma: 

1758 return False 

1759 

1760 return is_one_tuple(node) 

1761 

1762 

1763def _is_atom_multiline(node: LN) -> bool: 

1764 """Check if an atom node is multiline (indicating intentional formatting).""" 

1765 if not isinstance(node, Node) or len(node.children) < 3: 

1766 return False 

1767 

1768 # Check the middle child (between LPAR and RPAR) for newlines in its subtree 

1769 # The first child's prefix contains blank lines/comments before the opening paren 

1770 middle = node.children[1] 

1771 for child in middle.pre_order(): 

1772 if isinstance(child, Leaf) and "\n" in child.prefix: 

1773 return True 

1774 

1775 return False 

1776 

1777 

1778def maybe_make_parens_invisible_in_atom( 

1779 node: LN, 

1780 parent: LN, 

1781 mode: Mode, 

1782 features: Collection[Feature], 

1783 remove_brackets_around_comma: bool = False, 

1784 allow_star_expr: bool = False, 

1785) -> bool: 

1786 """If it's safe, make the parens in the atom `node` invisible, recursively. 

1787 Additionally, remove repeated, adjacent invisible parens from the atom `node` 

1788 as they are redundant. 

1789 

1790 Returns whether the node should itself be wrapped in invisible parentheses. 

1791 """ 

1792 if ( 

1793 node.type not in (syms.atom, syms.expr) 

1794 or is_empty_tuple(node) 

1795 or is_one_tuple(node) 

1796 or (is_tuple(node) and parent.type == syms.asexpr_test) 

1797 or ( 

1798 is_tuple(node) 

1799 and parent.type == syms.with_stmt 

1800 and has_sibling_with_type(node, token.COMMA) 

1801 ) 

1802 or (is_yield(node) and parent.type != syms.expr_stmt) 

1803 or ( 

1804 # This condition tries to prevent removing non-optional brackets 

1805 # around a tuple, however, can be a bit overzealous so we provide 

1806 # and option to skip this check for `for` and `with` statements. 

1807 not remove_brackets_around_comma 

1808 and max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY 

1809 # Remove parentheses around multiple exception types in except and 

1810 # except* without as. See PEP 758 for details. 

1811 and not ( 

1812 Feature.UNPARENTHESIZED_EXCEPT_TYPES in features 

1813 # is a tuple 

1814 and is_tuple(node) 

1815 # has a parent node 

1816 and node.parent is not None 

1817 # parent is an except clause 

1818 and node.parent.type == syms.except_clause 

1819 # is not immediately followed by as clause 

1820 and not ( 

1821 node.next_sibling is not None 

1822 and is_name_token(node.next_sibling) 

1823 and node.next_sibling.value == "as" 

1824 ) 

1825 ) 

1826 ) 

1827 or is_tuple_containing_walrus(node) 

1828 or (not allow_star_expr and is_tuple_containing_star(node)) 

1829 or is_generator(node) 

1830 ): 

1831 return False 

1832 

1833 if is_walrus_assignment(node): 

1834 if parent.type in [ 

1835 syms.annassign, 

1836 syms.expr_stmt, 

1837 syms.assert_stmt, 

1838 syms.return_stmt, 

1839 syms.except_clause, 

1840 syms.funcdef, 

1841 syms.with_stmt, 

1842 syms.testlist_gexp, 

1843 syms.tname, 

1844 # these ones aren't useful to end users, but they do please fuzzers 

1845 syms.for_stmt, 

1846 syms.del_stmt, 

1847 syms.for_stmt, 

1848 ]: 

1849 return False 

1850 

1851 first = node.children[0] 

1852 last = node.children[-1] 

1853 if is_lpar_token(first) and is_rpar_token(last): 

1854 middle = node.children[1] 

1855 # make parentheses invisible 

1856 if ( 

1857 # If the prefix of `middle` includes a type comment with 

1858 # ignore annotation, then we do not remove the parentheses 

1859 not is_type_ignore_comment_string(middle.prefix.strip(), mode=mode) 

1860 ): 

1861 first.value = "" 

1862 last.value = "" 

1863 maybe_make_parens_invisible_in_atom( 

1864 middle, 

1865 parent=parent, 

1866 mode=mode, 

1867 features=features, 

1868 remove_brackets_around_comma=remove_brackets_around_comma, 

1869 ) 

1870 

1871 if is_atom_with_invisible_parens(middle): 

1872 # Strip the invisible parens from `middle` by replacing 

1873 # it with the child in-between the invisible parens 

1874 middle.replace(middle.children[1]) 

1875 

1876 if middle.children[0].prefix.strip(): 

1877 # Preserve comments before first paren 

1878 middle.children[1].prefix = ( 

1879 middle.children[0].prefix + middle.children[1].prefix 

1880 ) 

1881 

1882 if middle.children[-1].prefix.strip(): 

1883 # Preserve comments before last paren 

1884 last.prefix = middle.children[-1].prefix + last.prefix 

1885 

1886 return False 

1887 

1888 return True 

1889 

1890 

1891def should_split_line(line: Line, opening_bracket: Leaf) -> bool: 

1892 """Should `line` be immediately split with `delimiter_split()` after RHS?""" 

1893 

1894 if not (opening_bracket.parent and opening_bracket.value in "[{("): 

1895 return False 

1896 

1897 # We're essentially checking if the body is delimited by commas and there's more 

1898 # than one of them (we're excluding the trailing comma and if the delimiter priority 

1899 # is still commas, that means there's more). 

1900 exclude = set() 

1901 trailing_comma = False 

1902 try: 

1903 last_leaf = line.leaves[-1] 

1904 if last_leaf.type == token.COMMA: 

1905 trailing_comma = True 

1906 exclude.add(id(last_leaf)) 

1907 max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude) 

1908 except (IndexError, ValueError): 

1909 return False 

1910 

1911 return max_priority == COMMA_PRIORITY and ( 

1912 (line.mode.magic_trailing_comma and trailing_comma) 

1913 # always explode imports 

1914 or opening_bracket.parent.type in {syms.atom, syms.import_from} 

1915 ) 

1916 

1917 

1918def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[set[LeafID]]: 

1919 """Generate sets of closing bracket IDs that should be omitted in a RHS. 

1920 

1921 Brackets can be omitted if the entire trailer up to and including 

1922 a preceding closing bracket fits in one line. 

1923 

1924 Yielded sets are cumulative (contain results of previous yields, too). First 

1925 set is empty, unless the line should explode, in which case bracket pairs until 

1926 the one that needs to explode are omitted. 

1927 """ 

1928 

1929 omit: set[LeafID] = set() 

1930 if not line.magic_trailing_comma: 

1931 yield omit 

1932 

1933 length = 4 * line.depth 

1934 opening_bracket: Leaf | None = None 

1935 closing_bracket: Leaf | None = None 

1936 inner_brackets: set[LeafID] = set() 

1937 for index, leaf, leaf_length in line.enumerate_with_length(is_reversed=True): 

1938 length += leaf_length 

1939 if length > line_length: 

1940 break 

1941 

1942 has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix) 

1943 if leaf.type == STANDALONE_COMMENT or has_inline_comment: 

1944 break 

1945 

1946 if opening_bracket: 

1947 if leaf is opening_bracket: 

1948 opening_bracket = None 

1949 elif leaf.type in CLOSING_BRACKETS: 

1950 prev = line.leaves[index - 1] if index > 0 else None 

1951 if ( 

1952 prev 

1953 and prev.type == token.COMMA 

1954 and leaf.opening_bracket is not None 

1955 and not is_one_sequence_between( 

1956 leaf.opening_bracket, leaf, line.leaves 

1957 ) 

1958 ): 

1959 # Never omit bracket pairs with trailing commas. 

1960 # We need to explode on those. 

1961 break 

1962 

1963 inner_brackets.add(id(leaf)) 

1964 elif leaf.type in CLOSING_BRACKETS: 

1965 prev = line.leaves[index - 1] if index > 0 else None 

1966 if prev and prev.type in OPENING_BRACKETS: 

1967 # Empty brackets would fail a split so treat them as "inner" 

1968 # brackets (e.g. only add them to the `omit` set if another 

1969 # pair of brackets was good enough. 

1970 inner_brackets.add(id(leaf)) 

1971 continue 

1972 

1973 if closing_bracket: 

1974 omit.add(id(closing_bracket)) 

1975 omit.update(inner_brackets) 

1976 inner_brackets.clear() 

1977 yield omit 

1978 

1979 if ( 

1980 prev 

1981 and prev.type == token.COMMA 

1982 and leaf.opening_bracket is not None 

1983 and not is_one_sequence_between(leaf.opening_bracket, leaf, line.leaves) 

1984 ): 

1985 # Never omit bracket pairs with trailing commas. 

1986 # We need to explode on those. 

1987 break 

1988 

1989 if leaf.value: 

1990 opening_bracket = leaf.opening_bracket 

1991 closing_bracket = leaf 

1992 

1993 

1994def run_transformer( 

1995 line: Line, 

1996 transform: Transformer, 

1997 mode: Mode, 

1998 features: Collection[Feature], 

1999 *, 

2000 line_str: str = "", 

2001) -> list[Line]: 

2002 if not line_str: 

2003 line_str = line_to_string(line) 

2004 result: list[Line] = [] 

2005 for transformed_line in transform(line, features, mode): 

2006 if str(transformed_line).strip("\n") == line_str: 

2007 raise CannotTransform("Line transformer returned an unchanged result") 

2008 

2009 result.extend(transform_line(transformed_line, mode=mode, features=features)) 

2010 

2011 features_set = set(features) 

2012 if ( 

2013 Feature.FORCE_OPTIONAL_PARENTHESES in features_set 

2014 or transform.__class__.__name__ != "rhs" 

2015 or not line.bracket_tracker.invisible 

2016 or any(bracket.value for bracket in line.bracket_tracker.invisible) 

2017 or line.contains_multiline_strings() 

2018 or result[0].contains_uncollapsable_type_comments() 

2019 or result[0].contains_unsplittable_type_ignore() 

2020 or is_line_short_enough(result[0], mode=mode) 

2021 # If any leaves have no parents (which _can_ occur since 

2022 # `transform(line)` potentially destroys the line's underlying node 

2023 # structure), then we can't proceed. Doing so would cause the below 

2024 # call to `append_leaves()` to fail. 

2025 or any(leaf.parent is None for leaf in line.leaves) 

2026 ): 

2027 return result 

2028 

2029 line_copy = line.clone() 

2030 append_leaves(line_copy, line, line.leaves) 

2031 features_fop = features_set | {Feature.FORCE_OPTIONAL_PARENTHESES} 

2032 second_opinion = run_transformer( 

2033 line_copy, transform, mode, features_fop, line_str=line_str 

2034 ) 

2035 if all(is_line_short_enough(ln, mode=mode) for ln in second_opinion): 

2036 result = second_opinion 

2037 return result