Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/black/trans.py: 13%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

935 statements  

1""" 

2String transformers that can split and merge strings. 

3""" 

4 

5import re 

6from abc import ABC, abstractmethod 

7from collections import defaultdict 

8from collections.abc import Callable, Collection, Iterable, Iterator, Sequence 

9from dataclasses import dataclass 

10from typing import Any, ClassVar, Final, Literal, TypeVar, Union 

11 

12from mypy_extensions import trait 

13 

14from black.comments import contains_pragma_comment 

15from black.lines import Line, append_leaves 

16from black.mode import Feature, Mode 

17from black.nodes import ( 

18 CLOSING_BRACKETS, 

19 OPENING_BRACKETS, 

20 STANDALONE_COMMENT, 

21 is_empty_lpar, 

22 is_empty_par, 

23 is_empty_rpar, 

24 is_part_of_annotation, 

25 parent_type, 

26 replace_child, 

27 syms, 

28) 

29from black.rusty import Err, Ok, Result 

30from black.strings import ( 

31 assert_is_leaf_string, 

32 count_chars_in_width, 

33 get_string_prefix, 

34 has_triple_quotes, 

35 normalize_string_quotes, 

36 str_width, 

37) 

38from blib2to3.pgen2 import token 

39from blib2to3.pytree import Leaf, Node 

40 

41 

42class CannotTransform(Exception): 

43 """Base class for errors raised by Transformers.""" 

44 

45 

46# types 

47T = TypeVar("T") 

48LN = Union[Leaf, Node] 

49Transformer = Callable[[Line, Collection[Feature], Mode], Iterator[Line]] 

50Index = int 

51NodeType = int 

52ParserState = int 

53StringID = int 

54TResult = Result[T, CannotTransform] # (T)ransform Result 

55TMatchResult = TResult[list[Index]] 

56 

57SPLIT_SAFE_CHARS = frozenset(["\u3001", "\u3002", "\uff0c"]) # East Asian stops 

58 

59 

60def TErr(err_msg: str) -> Err[CannotTransform]: 

61 """(T)ransform Err 

62 

63 Convenience function used when working with the TResult type. 

64 """ 

65 cant_transform = CannotTransform(err_msg) 

66 return Err(cant_transform) 

67 

68 

69# Remove when `simplify_power_operator_hugging` becomes stable. 

70def hug_power_op( 

71 line: Line, features: Collection[Feature], mode: Mode 

72) -> Iterator[Line]: 

73 """A transformer which normalizes spacing around power operators.""" 

74 

75 # Performance optimization to avoid unnecessary Leaf clones and other ops. 

76 for leaf in line.leaves: 

77 if leaf.type == token.DOUBLESTAR: 

78 break 

79 else: 

80 raise CannotTransform("No doublestar token was found in the line.") 

81 

82 def is_simple_lookup(index: int, kind: Literal[1, -1]) -> bool: 

83 # Brackets and parentheses indicate calls, subscripts, etc. ... 

84 # basically stuff that doesn't count as "simple". Only a NAME lookup 

85 # or dotted lookup (eg. NAME.NAME) is OK. 

86 if kind == -1: 

87 return handle_is_simple_look_up_prev(line, index, {token.RPAR, token.RSQB}) 

88 else: 

89 return handle_is_simple_lookup_forward( 

90 line, index, {token.LPAR, token.LSQB} 

91 ) 

92 

93 def is_simple_operand(index: int, kind: Literal[1, -1]) -> bool: 

94 # An operand is considered "simple" if's a NAME, a numeric CONSTANT, a simple 

95 # lookup (see above), with or without a preceding unary operator. 

96 start = line.leaves[index] 

97 if start.type in {token.NAME, token.NUMBER}: 

98 return is_simple_lookup(index, kind) 

99 

100 if start.type in {token.PLUS, token.MINUS, token.TILDE}: 

101 if line.leaves[index + 1].type in {token.NAME, token.NUMBER}: 

102 # kind is always one as bases with a preceding unary op will be checked 

103 # for simplicity starting from the next token (so it'll hit the check 

104 # above). 

105 return is_simple_lookup(index + 1, kind=1) 

106 

107 return False 

108 

109 new_line = line.clone() 

110 should_hug = False 

111 for idx, leaf in enumerate(line.leaves): 

112 new_leaf = leaf.clone() 

113 if should_hug: 

114 new_leaf.prefix = "" 

115 should_hug = False 

116 

117 should_hug = ( 

118 (0 < idx < len(line.leaves) - 1) 

119 and leaf.type == token.DOUBLESTAR 

120 and is_simple_operand(idx - 1, kind=-1) 

121 and line.leaves[idx - 1].value != "lambda" 

122 and is_simple_operand(idx + 1, kind=1) 

123 ) 

124 if should_hug: 

125 new_leaf.prefix = "" 

126 

127 # We have to be careful to make a new line properly: 

128 # - bracket related metadata must be maintained (handled by Line.append) 

129 # - comments need to copied over, updating the leaf IDs they're attached to 

130 new_line.append(new_leaf, preformatted=True) 

131 for comment_leaf in line.comments_after(leaf): 

132 new_line.append(comment_leaf, preformatted=True) 

133 

134 yield new_line 

135 

136 

137# Remove when `simplify_power_operator_hugging` becomes stable. 

138def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: set[int]) -> bool: 

139 """ 

140 Handling the determination of is_simple_lookup for the lines prior to the doublestar 

141 token. This is required because of the need to isolate the chained expression 

142 to determine the bracket or parenthesis belong to the single expression. 

143 """ 

144 contains_disallowed = False 

145 chain = [] 

146 

147 while 0 <= index < len(line.leaves): 

148 current = line.leaves[index] 

149 chain.append(current) 

150 if not contains_disallowed and current.type in disallowed: 

151 contains_disallowed = True 

152 if not is_expression_chained(chain): 

153 return not contains_disallowed 

154 

155 index -= 1 

156 

157 return True 

158 

159 

160# Remove when `simplify_power_operator_hugging` becomes stable. 

161def handle_is_simple_lookup_forward( 

162 line: Line, index: int, disallowed: set[int] 

163) -> bool: 

164 """ 

165 Handling decision is_simple_lookup for the lines behind the doublestar token. 

166 This function is simplified to keep consistent with the prior logic and the forward 

167 case are more straightforward and do not need to care about chained expressions. 

168 """ 

169 while 0 <= index < len(line.leaves): 

170 current = line.leaves[index] 

171 if current.type in disallowed: 

172 return False 

173 if current.type not in {token.NAME, token.DOT} or ( 

174 current.type == token.NAME and current.value == "for" 

175 ): 

176 # If the current token isn't disallowed, we'll assume this is simple as 

177 # only the disallowed tokens are semantically attached to this lookup 

178 # expression we're checking. Also, stop early if we hit the 'for' bit 

179 # of a comprehension. 

180 return True 

181 

182 index += 1 

183 

184 return True 

185 

186 

187# Remove when `simplify_power_operator_hugging` becomes stable. 

188def is_expression_chained(chained_leaves: list[Leaf]) -> bool: 

189 """ 

190 Function to determine if the variable is a chained call. 

191 (e.g., foo.lookup, foo().lookup, (foo.lookup())) will be recognized as chained call) 

192 """ 

193 if len(chained_leaves) < 2: 

194 return True 

195 

196 current_leaf = chained_leaves[-1] 

197 past_leaf = chained_leaves[-2] 

198 

199 if past_leaf.type == token.NAME: 

200 return current_leaf.type in {token.DOT} 

201 elif past_leaf.type in {token.RPAR, token.RSQB}: 

202 return current_leaf.type in {token.RSQB, token.RPAR} 

203 elif past_leaf.type in {token.LPAR, token.LSQB}: 

204 return current_leaf.type in {token.NAME, token.LPAR, token.LSQB} 

205 else: 

206 return False 

207 

208 

209class StringTransformer(ABC): 

210 """ 

211 An implementation of the Transformer protocol that relies on its 

212 subclasses overriding the template methods `do_match(...)` and 

213 `do_transform(...)`. 

214 

215 This Transformer works exclusively on strings (for example, by merging 

216 or splitting them). 

217 

218 The following sections can be found among the docstrings of each concrete 

219 StringTransformer subclass. 

220 

221 Requirements: 

222 Which requirements must be met of the given Line for this 

223 StringTransformer to be applied? 

224 

225 Transformations: 

226 If the given Line meets all of the above requirements, which string 

227 transformations can you expect to be applied to it by this 

228 StringTransformer? 

229 

230 Collaborations: 

231 What contractual agreements does this StringTransformer have with other 

232 StringTransfomers? Such collaborations should be eliminated/minimized 

233 as much as possible. 

234 """ 

235 

236 __name__: Final = "StringTransformer" 

237 

238 # Ideally this would be a dataclass, but unfortunately mypyc breaks when used with 

239 # `abc.ABC`. 

240 def __init__(self, line_length: int, normalize_strings: bool) -> None: 

241 self.line_length = line_length 

242 self.normalize_strings = normalize_strings 

243 

244 @abstractmethod 

245 def do_match(self, line: Line) -> TMatchResult: 

246 """ 

247 Returns: 

248 * Ok(string_indices) such that for each index, `line.leaves[index]` 

249 is our target string if a match was able to be made. For 

250 transformers that don't result in more lines (e.g. StringMerger, 

251 StringParenStripper), multiple matches and transforms are done at 

252 once to reduce the complexity. 

253 OR 

254 * Err(CannotTransform), if no match could be made. 

255 """ 

256 

257 @abstractmethod 

258 def do_transform( 

259 self, line: Line, string_indices: list[int] 

260 ) -> Iterator[TResult[Line]]: 

261 """ 

262 Yields: 

263 * Ok(new_line) where new_line is the new transformed line. 

264 OR 

265 * Err(CannotTransform) if the transformation failed for some reason. The 

266 `do_match(...)` template method should usually be used to reject 

267 the form of the given Line, but in some cases it is difficult to 

268 know whether or not a Line meets the StringTransformer's 

269 requirements until the transformation is already midway. 

270 

271 Side Effects: 

272 This method should NOT mutate @line directly, but it MAY mutate the 

273 Line's underlying Node structure. (WARNING: If the underlying Node 

274 structure IS altered, then this method should NOT be allowed to 

275 yield an CannotTransform after that point.) 

276 """ 

277 

278 def __call__( 

279 self, line: Line, _features: Collection[Feature], _mode: Mode 

280 ) -> Iterator[Line]: 

281 """ 

282 StringTransformer instances have a call signature that mirrors that of 

283 the Transformer type. 

284 

285 Raises: 

286 CannotTransform(...) if the concrete StringTransformer class is unable 

287 to transform @line. 

288 """ 

289 # Optimization to avoid calling `self.do_match(...)` when the line does 

290 # not contain any string. 

291 if not any(leaf.type == token.STRING for leaf in line.leaves): 

292 raise CannotTransform("There are no strings in this line.") 

293 

294 match_result = self.do_match(line) 

295 

296 if isinstance(match_result, Err): 

297 cant_transform = match_result.err() 

298 raise CannotTransform( 

299 f"The string transformer {self.__class__.__name__} does not recognize" 

300 " this line as one that it can transform." 

301 ) from cant_transform 

302 

303 string_indices = match_result.ok() 

304 

305 for line_result in self.do_transform(line, string_indices): 

306 if isinstance(line_result, Err): 

307 cant_transform = line_result.err() 

308 raise CannotTransform( 

309 "StringTransformer failed while attempting to transform string." 

310 ) from cant_transform 

311 line = line_result.ok() 

312 yield line 

313 

314 

315@dataclass 

316class CustomSplit: 

317 """A custom (i.e. manual) string split. 

318 

319 A single CustomSplit instance represents a single substring. 

320 

321 Examples: 

322 Consider the following string: 

323 ``` 

324 "Hi there friend." 

325 " This is a custom" 

326 f" string {split}." 

327 ``` 

328 

329 This string will correspond to the following three CustomSplit instances: 

330 ``` 

331 CustomSplit(False, 16) 

332 CustomSplit(False, 17) 

333 CustomSplit(True, 16) 

334 ``` 

335 """ 

336 

337 has_prefix: bool 

338 break_idx: int 

339 

340 

341CustomSplitMapKey = tuple[StringID, str] 

342 

343 

344@trait 

345class CustomSplitMapMixin: 

346 """ 

347 This mixin class is used to map merged strings to a sequence of 

348 CustomSplits, which will then be used to re-split the strings iff none of 

349 the resultant substrings go over the configured max line length. 

350 """ 

351 

352 _CUSTOM_SPLIT_MAP: ClassVar[dict[CustomSplitMapKey, tuple[CustomSplit, ...]]] = ( 

353 defaultdict(tuple) 

354 ) 

355 

356 @staticmethod 

357 def _get_key(string: str) -> CustomSplitMapKey: 

358 """ 

359 Returns: 

360 A unique identifier that is used internally to map @string to a 

361 group of custom splits. 

362 """ 

363 return (id(string), string) 

364 

365 def add_custom_splits( 

366 self, string: str, custom_splits: Iterable[CustomSplit] 

367 ) -> None: 

368 """Custom Split Map Setter Method 

369 

370 Side Effects: 

371 Adds a mapping from @string to the custom splits @custom_splits. 

372 """ 

373 key = self._get_key(string) 

374 self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits) 

375 

376 def pop_custom_splits(self, string: str) -> list[CustomSplit]: 

377 """Custom Split Map Getter Method 

378 

379 Returns: 

380 * A list of the custom splits that are mapped to @string, if any 

381 exist. 

382 OR 

383 * [], otherwise. 

384 

385 Side Effects: 

386 Deletes the mapping between @string and its associated custom 

387 splits (which are returned to the caller). 

388 """ 

389 key = self._get_key(string) 

390 

391 custom_splits = self._CUSTOM_SPLIT_MAP[key] 

392 del self._CUSTOM_SPLIT_MAP[key] 

393 

394 return list(custom_splits) 

395 

396 def has_custom_splits(self, string: str) -> bool: 

397 """ 

398 Returns: 

399 True iff @string is associated with a set of custom splits. 

400 """ 

401 key = self._get_key(string) 

402 return key in self._CUSTOM_SPLIT_MAP 

403 

404 

405class StringMerger(StringTransformer, CustomSplitMapMixin): 

406 """StringTransformer that merges strings together. 

407 

408 Requirements: 

409 (A) The line contains adjacent strings such that ALL of the validation checks 

410 listed in StringMerger._validate_msg(...)'s docstring pass. 

411 OR 

412 (B) The line contains a string which uses line continuation backslashes. 

413 

414 Transformations: 

415 Depending on which of the two requirements above where met, either: 

416 

417 (A) The string group associated with the target string is merged. 

418 OR 

419 (B) All line-continuation backslashes are removed from the target string. 

420 

421 Collaborations: 

422 StringMerger provides custom split information to StringSplitter. 

423 """ 

424 

425 def do_match(self, line: Line) -> TMatchResult: 

426 LL = line.leaves 

427 

428 is_valid_index = is_valid_index_factory(LL) 

429 

430 string_indices = [] 

431 idx = 0 

432 while is_valid_index(idx): 

433 leaf = LL[idx] 

434 if ( 

435 leaf.type == token.STRING 

436 and is_valid_index(idx + 1) 

437 and LL[idx + 1].type == token.STRING 

438 ): 

439 # Let's check if the string group contains an inline comment 

440 # If we have a comment inline, we don't merge the strings 

441 contains_comment = False 

442 i = idx 

443 while is_valid_index(i): 

444 if LL[i].type != token.STRING: 

445 break 

446 if line.comments_after(LL[i]): 

447 contains_comment = True 

448 break 

449 i += 1 

450 

451 if not contains_comment and not is_part_of_annotation(leaf): 

452 string_indices.append(idx) 

453 

454 # Advance to the next non-STRING leaf. 

455 idx += 2 

456 while is_valid_index(idx) and LL[idx].type == token.STRING: 

457 idx += 1 

458 

459 elif leaf.type == token.STRING and "\\\n" in leaf.value: 

460 string_indices.append(idx) 

461 # Advance to the next non-STRING leaf. 

462 idx += 1 

463 while is_valid_index(idx) and LL[idx].type == token.STRING: 

464 idx += 1 

465 

466 else: 

467 idx += 1 

468 

469 if string_indices: 

470 return Ok(string_indices) 

471 else: 

472 return TErr("This line has no strings that need merging.") 

473 

474 def do_transform( 

475 self, line: Line, string_indices: list[int] 

476 ) -> Iterator[TResult[Line]]: 

477 new_line = line 

478 

479 rblc_result = self._remove_backslash_line_continuation_chars( 

480 new_line, string_indices 

481 ) 

482 if isinstance(rblc_result, Ok): 

483 new_line = rblc_result.ok() 

484 

485 msg_result = self._merge_string_group(new_line, string_indices) 

486 if isinstance(msg_result, Ok): 

487 new_line = msg_result.ok() 

488 

489 if isinstance(rblc_result, Err) and isinstance(msg_result, Err): 

490 msg_cant_transform = msg_result.err() 

491 rblc_cant_transform = rblc_result.err() 

492 cant_transform = CannotTransform( 

493 "StringMerger failed to merge any strings in this line." 

494 ) 

495 

496 # Chain the errors together using `__cause__`. 

497 msg_cant_transform.__cause__ = rblc_cant_transform 

498 cant_transform.__cause__ = msg_cant_transform 

499 

500 yield Err(cant_transform) 

501 else: 

502 yield Ok(new_line) 

503 

504 @staticmethod 

505 def _remove_backslash_line_continuation_chars( 

506 line: Line, string_indices: list[int] 

507 ) -> TResult[Line]: 

508 """ 

509 Merge strings that were split across multiple lines using 

510 line-continuation backslashes. 

511 

512 Returns: 

513 Ok(new_line), if @line contains backslash line-continuation 

514 characters. 

515 OR 

516 Err(CannotTransform), otherwise. 

517 """ 

518 LL = line.leaves 

519 

520 indices_to_transform = [] 

521 for string_idx in string_indices: 

522 string_leaf = LL[string_idx] 

523 if ( 

524 string_leaf.type == token.STRING 

525 and "\\\n" in string_leaf.value 

526 and not has_triple_quotes(string_leaf.value) 

527 ): 

528 indices_to_transform.append(string_idx) 

529 

530 if not indices_to_transform: 

531 return TErr( 

532 "Found no string leaves that contain backslash line continuation" 

533 " characters." 

534 ) 

535 

536 new_line = line.clone() 

537 new_line.comments = line.comments.copy() 

538 append_leaves(new_line, line, LL) 

539 

540 for string_idx in indices_to_transform: 

541 new_string_leaf = new_line.leaves[string_idx] 

542 new_string_leaf.value = new_string_leaf.value.replace("\\\n", "") 

543 

544 return Ok(new_line) 

545 

546 def _merge_string_group( 

547 self, line: Line, string_indices: list[int] 

548 ) -> TResult[Line]: 

549 """ 

550 Merges string groups (i.e. set of adjacent strings). 

551 

552 Each index from `string_indices` designates one string group's first 

553 leaf in `line.leaves`. 

554 

555 Returns: 

556 Ok(new_line), if ALL of the validation checks found in 

557 _validate_msg(...) pass. 

558 OR 

559 Err(CannotTransform), otherwise. 

560 """ 

561 LL = line.leaves 

562 

563 is_valid_index = is_valid_index_factory(LL) 

564 

565 # A dict of {string_idx: tuple[num_of_strings, string_leaf]}. 

566 merged_string_idx_dict: dict[int, tuple[int, Leaf]] = {} 

567 for string_idx in string_indices: 

568 vresult = self._validate_msg(line, string_idx) 

569 if isinstance(vresult, Err): 

570 continue 

571 merged_string_idx_dict[string_idx] = self._merge_one_string_group( 

572 LL, string_idx, is_valid_index 

573 ) 

574 

575 if not merged_string_idx_dict: 

576 return TErr("No string group is merged") 

577 

578 # Build the final line ('new_line') that this method will later return. 

579 new_line = line.clone() 

580 previous_merged_string_idx = -1 

581 previous_merged_num_of_strings = -1 

582 for i, leaf in enumerate(LL): 

583 if i in merged_string_idx_dict: 

584 previous_merged_string_idx = i 

585 previous_merged_num_of_strings, string_leaf = merged_string_idx_dict[i] 

586 new_line.append(string_leaf) 

587 

588 if ( 

589 previous_merged_string_idx 

590 <= i 

591 < previous_merged_string_idx + previous_merged_num_of_strings 

592 ): 

593 for comment_leaf in line.comments_after(leaf): 

594 new_line.append(comment_leaf, preformatted=True) 

595 continue 

596 

597 append_leaves(new_line, line, [leaf]) 

598 

599 return Ok(new_line) 

600 

601 def _merge_one_string_group( 

602 self, LL: list[Leaf], string_idx: int, is_valid_index: Callable[[int], bool] 

603 ) -> tuple[int, Leaf]: 

604 """ 

605 Merges one string group where the first string in the group is 

606 `LL[string_idx]`. 

607 

608 Returns: 

609 A tuple of `(num_of_strings, leaf)` where `num_of_strings` is the 

610 number of strings merged and `leaf` is the newly merged string 

611 to be replaced in the new line. 

612 """ 

613 # If the string group is wrapped inside an Atom node, we must make sure 

614 # to later replace that Atom with our new (merged) string leaf. 

615 atom_node = LL[string_idx].parent 

616 

617 # We will place BREAK_MARK in between every two substrings that we 

618 # merge. We will then later go through our final result and use the 

619 # various instances of BREAK_MARK we find to add the right values to 

620 # the custom split map. 

621 BREAK_MARK = "@@@@@ BLACK BREAKPOINT MARKER @@@@@" 

622 

623 QUOTE = LL[string_idx].value[-1] 

624 

625 def make_naked(string: str, string_prefix: str) -> str: 

626 """Strip @string (i.e. make it a "naked" string) 

627 

628 Pre-conditions: 

629 * assert_is_leaf_string(@string) 

630 

631 Returns: 

632 A string that is identical to @string except that 

633 @string_prefix has been stripped, the surrounding QUOTE 

634 characters have been removed, and any remaining QUOTE 

635 characters have been escaped. 

636 """ 

637 assert_is_leaf_string(string) 

638 if "f" in string_prefix: 

639 f_expressions = [ 

640 string[span[0] + 1 : span[1] - 1] # +-1 to get rid of curly braces 

641 for span in iter_fexpr_spans(string) 

642 ] 

643 debug_expressions_contain_visible_quotes = any( 

644 re.search(r".*[\'\"].*(?<![!:=])={1}(?!=)(?![^\s:])", expression) 

645 for expression in f_expressions 

646 ) 

647 if not debug_expressions_contain_visible_quotes: 

648 # We don't want to toggle visible quotes in debug f-strings, as 

649 # that would modify the AST 

650 string = _toggle_fexpr_quotes(string, QUOTE) 

651 # After quotes toggling, quotes in expressions won't be escaped 

652 # because quotes can't be reused in f-strings. So we can simply 

653 # let the escaping logic below run without knowing f-string 

654 # expressions. 

655 

656 RE_EVEN_BACKSLASHES = r"(?:(?<!\\)(?:\\\\)*)" 

657 naked_string = string[len(string_prefix) + 1 : -1] 

658 naked_string = re.sub( 

659 "(" + RE_EVEN_BACKSLASHES + ")" + QUOTE, r"\1\\" + QUOTE, naked_string 

660 ) 

661 return naked_string 

662 

663 # Holds the CustomSplit objects that will later be added to the custom 

664 # split map. 

665 custom_splits = [] 

666 

667 # Temporary storage for the 'has_prefix' part of the CustomSplit objects. 

668 prefix_tracker = [] 

669 

670 # Sets the 'prefix' variable. This is the prefix that the final merged 

671 # string will have. 

672 next_str_idx = string_idx 

673 prefix = "" 

674 while ( 

675 not prefix 

676 and is_valid_index(next_str_idx) 

677 and LL[next_str_idx].type == token.STRING 

678 ): 

679 prefix = get_string_prefix(LL[next_str_idx].value).lower() 

680 next_str_idx += 1 

681 

682 # The next loop merges the string group. The final string will be 

683 # contained in 'S'. 

684 # 

685 # The following convenience variables are used: 

686 # 

687 # S: string 

688 # NS: naked string 

689 # SS: next string 

690 # NSS: naked next string 

691 S = "" 

692 NS = "" 

693 num_of_strings = 0 

694 next_str_idx = string_idx 

695 while is_valid_index(next_str_idx) and LL[next_str_idx].type == token.STRING: 

696 num_of_strings += 1 

697 

698 SS = LL[next_str_idx].value 

699 next_prefix = get_string_prefix(SS).lower() 

700 

701 # If this is an f-string group but this substring is not prefixed 

702 # with 'f'... 

703 if "f" in prefix and "f" not in next_prefix: 

704 # Then we must escape any braces contained in this substring. 

705 SS = re.sub(r"(\{|\})", r"\1\1", SS) 

706 

707 NSS = make_naked(SS, next_prefix) 

708 

709 has_prefix = bool(next_prefix) 

710 prefix_tracker.append(has_prefix) 

711 

712 S = prefix + QUOTE + NS + NSS + BREAK_MARK + QUOTE 

713 NS = make_naked(S, prefix) 

714 

715 next_str_idx += 1 

716 

717 # Take a note on the index of the non-STRING leaf. 

718 non_string_idx = next_str_idx 

719 

720 S_leaf = Leaf(token.STRING, S) 

721 if self.normalize_strings: 

722 S_leaf.value = normalize_string_quotes(S_leaf.value) 

723 

724 # Fill the 'custom_splits' list with the appropriate CustomSplit objects. 

725 temp_string = S_leaf.value[len(prefix) + 1 : -1] 

726 for has_prefix in prefix_tracker: 

727 mark_idx = temp_string.find(BREAK_MARK) 

728 assert ( 

729 mark_idx >= 0 

730 ), "Logic error while filling the custom string breakpoint cache." 

731 

732 temp_string = temp_string[mark_idx + len(BREAK_MARK) :] 

733 breakpoint_idx = mark_idx + (len(prefix) if has_prefix else 0) + 1 

734 custom_splits.append(CustomSplit(has_prefix, breakpoint_idx)) 

735 

736 string_leaf = Leaf(token.STRING, S_leaf.value.replace(BREAK_MARK, "")) 

737 

738 if atom_node is not None: 

739 # If not all children of the atom node are merged (this can happen 

740 # when there is a standalone comment in the middle) ... 

741 if non_string_idx - string_idx < len(atom_node.children): 

742 # We need to replace the old STRING leaves with the new string leaf. 

743 first_child_idx = LL[string_idx].remove() 

744 for idx in range(string_idx + 1, non_string_idx): 

745 LL[idx].remove() 

746 if first_child_idx is not None: 

747 atom_node.insert_child(first_child_idx, string_leaf) 

748 else: 

749 # Else replace the atom node with the new string leaf. 

750 replace_child(atom_node, string_leaf) 

751 

752 self.add_custom_splits(string_leaf.value, custom_splits) 

753 return num_of_strings, string_leaf 

754 

755 @staticmethod 

756 def _validate_msg(line: Line, string_idx: int) -> TResult[None]: 

757 """Validate (M)erge (S)tring (G)roup 

758 

759 Transform-time string validation logic for _merge_string_group(...). 

760 

761 Returns: 

762 * Ok(None), if ALL validation checks (listed below) pass. 

763 OR 

764 * Err(CannotTransform), if any of the following are true: 

765 - The target string group does not contain ANY stand-alone comments. 

766 - The target string is not in a string group (i.e. it has no 

767 adjacent strings). 

768 - The string group has more than one inline comment. 

769 - The string group has an inline comment that appears to be a pragma. 

770 - The set of all string prefixes in the string group is of 

771 length greater than one and is not equal to {"", "f"}. 

772 - The string group consists of raw strings. 

773 - The string group would merge f-strings with different quote types 

774 and internal quotes. 

775 - The string group is stringified type annotations. We don't want to 

776 process stringified type annotations since pyright doesn't support 

777 them spanning multiple string values. (NOTE: mypy, pytype, pyre do 

778 support them, so we can change if pyright also gains support in the 

779 future. See https://github.com/microsoft/pyright/issues/4359.) 

780 """ 

781 # We first check for "inner" stand-alone comments (i.e. stand-alone 

782 # comments that have a string leaf before them AND after them). 

783 for inc in [1, -1]: 

784 i = string_idx 

785 found_sa_comment = False 

786 is_valid_index = is_valid_index_factory(line.leaves) 

787 while is_valid_index(i) and line.leaves[i].type in [ 

788 token.STRING, 

789 STANDALONE_COMMENT, 

790 ]: 

791 if line.leaves[i].type == STANDALONE_COMMENT: 

792 found_sa_comment = True 

793 elif found_sa_comment: 

794 return TErr( 

795 "StringMerger does NOT merge string groups which contain " 

796 "stand-alone comments." 

797 ) 

798 

799 i += inc 

800 

801 QUOTE = line.leaves[string_idx].value[-1] 

802 

803 num_of_inline_string_comments = 0 

804 set_of_prefixes = set() 

805 num_of_strings = 0 

806 for leaf in line.leaves[string_idx:]: 

807 if leaf.type != token.STRING: 

808 # If the string group is trailed by a comma, we count the 

809 # comments trailing the comma to be one of the string group's 

810 # comments. 

811 if leaf.type == token.COMMA and id(leaf) in line.comments: 

812 num_of_inline_string_comments += 1 

813 break 

814 

815 if has_triple_quotes(leaf.value): 

816 return TErr("StringMerger does NOT merge multiline strings.") 

817 

818 num_of_strings += 1 

819 prefix = get_string_prefix(leaf.value).lower() 

820 if "r" in prefix: 

821 return TErr("StringMerger does NOT merge raw strings.") 

822 

823 set_of_prefixes.add(prefix) 

824 

825 if ( 

826 "f" in prefix 

827 and leaf.value[-1] != QUOTE 

828 and ( 

829 "'" in leaf.value[len(prefix) + 1 : -1] 

830 or '"' in leaf.value[len(prefix) + 1 : -1] 

831 ) 

832 ): 

833 return TErr( 

834 "StringMerger does NOT merge f-strings with different quote types" 

835 " and internal quotes." 

836 ) 

837 

838 if id(leaf) in line.comments: 

839 num_of_inline_string_comments += 1 

840 if contains_pragma_comment(line.comments[id(leaf)]): 

841 return TErr("Cannot merge strings which have pragma comments.") 

842 

843 if num_of_strings < 2: 

844 return TErr( 

845 f"Not enough strings to merge (num_of_strings={num_of_strings})." 

846 ) 

847 

848 if num_of_inline_string_comments > 1: 

849 return TErr( 

850 f"Too many inline string comments ({num_of_inline_string_comments})." 

851 ) 

852 

853 if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}: 

854 return TErr(f"Too many different prefixes ({set_of_prefixes}).") 

855 

856 return Ok(None) 

857 

858 

859class StringParenStripper(StringTransformer): 

860 """StringTransformer that strips surrounding parentheses from strings. 

861 

862 Requirements: 

863 The line contains a string which is surrounded by parentheses and: 

864 - The target string is NOT the only argument to a function call. 

865 - The target string is NOT a "pointless" string. 

866 - The target string is NOT a dictionary value. 

867 - If the target string contains a PERCENT, the brackets are not 

868 preceded or followed by an operator with higher precedence than 

869 PERCENT. 

870 

871 Transformations: 

872 The parentheses mentioned in the 'Requirements' section are stripped. 

873 

874 Collaborations: 

875 StringParenStripper has its own inherent usefulness, but it is also 

876 relied on to clean up the parentheses created by StringParenWrapper (in 

877 the event that they are no longer needed). 

878 """ 

879 

880 def do_match(self, line: Line) -> TMatchResult: 

881 LL = line.leaves 

882 

883 is_valid_index = is_valid_index_factory(LL) 

884 

885 string_indices = [] 

886 

887 idx = -1 

888 while True: 

889 idx += 1 

890 if idx >= len(LL): 

891 break 

892 leaf = LL[idx] 

893 

894 # Should be a string... 

895 if leaf.type != token.STRING: 

896 continue 

897 

898 # If this is a "pointless" string... 

899 if ( 

900 leaf.parent 

901 and leaf.parent.parent 

902 and leaf.parent.parent.type == syms.simple_stmt 

903 ): 

904 continue 

905 

906 # Should be preceded by a non-empty LPAR... 

907 if ( 

908 not is_valid_index(idx - 1) 

909 or LL[idx - 1].type != token.LPAR 

910 or is_empty_lpar(LL[idx - 1]) 

911 ): 

912 continue 

913 

914 # That LPAR should NOT be preceded by a colon (which could be a 

915 # dictionary value), function name, or a closing bracket (which 

916 # could be a function returning a function or a list/dictionary 

917 # containing a function)... 

918 if is_valid_index(idx - 2) and ( 

919 LL[idx - 2].type == token.COLON 

920 or LL[idx - 2].type == token.NAME 

921 or LL[idx - 2].type in CLOSING_BRACKETS 

922 ): 

923 continue 

924 

925 string_idx = idx 

926 

927 # Skip the string trailer, if one exists. 

928 string_parser = StringParser() 

929 next_idx = string_parser.parse(LL, string_idx) 

930 

931 # if the leaves in the parsed string include a PERCENT, we need to 

932 # make sure the initial LPAR is NOT preceded by an operator with 

933 # higher or equal precedence to PERCENT 

934 if is_valid_index(idx - 2): 

935 # mypy can't quite follow unless we name this 

936 before_lpar = LL[idx - 2] 

937 if token.PERCENT in {leaf.type for leaf in LL[idx - 1 : next_idx]} and ( 

938 ( 

939 before_lpar.type 

940 in { 

941 token.STAR, 

942 token.AT, 

943 token.SLASH, 

944 token.DOUBLESLASH, 

945 token.PERCENT, 

946 token.TILDE, 

947 token.DOUBLESTAR, 

948 token.AWAIT, 

949 token.LSQB, 

950 token.LPAR, 

951 } 

952 ) 

953 or ( 

954 # only unary PLUS/MINUS 

955 before_lpar.parent 

956 and before_lpar.parent.type == syms.factor 

957 and (before_lpar.type in {token.PLUS, token.MINUS}) 

958 ) 

959 ): 

960 continue 

961 

962 # Should be followed by a non-empty RPAR... 

963 if ( 

964 is_valid_index(next_idx) 

965 and LL[next_idx].type == token.RPAR 

966 and not is_empty_rpar(LL[next_idx]) 

967 ): 

968 # That RPAR should NOT be followed by anything with higher 

969 # precedence than PERCENT 

970 if is_valid_index(next_idx + 1) and LL[next_idx + 1].type in { 

971 token.DOUBLESTAR, 

972 token.LSQB, 

973 token.LPAR, 

974 token.DOT, 

975 }: 

976 continue 

977 

978 string_indices.append(string_idx) 

979 idx = string_idx 

980 while idx < len(LL) - 1 and LL[idx + 1].type == token.STRING: 

981 idx += 1 

982 

983 if string_indices: 

984 return Ok(string_indices) 

985 return TErr("This line has no strings wrapped in parens.") 

986 

987 def do_transform( 

988 self, line: Line, string_indices: list[int] 

989 ) -> Iterator[TResult[Line]]: 

990 LL = line.leaves 

991 

992 string_and_rpar_indices: list[int] = [] 

993 for string_idx in string_indices: 

994 string_parser = StringParser() 

995 rpar_idx = string_parser.parse(LL, string_idx) 

996 

997 should_transform = True 

998 for leaf in (LL[string_idx - 1], LL[rpar_idx]): 

999 if line.comments_after(leaf): 

1000 # Should not strip parentheses which have comments attached 

1001 # to them. 

1002 should_transform = False 

1003 break 

1004 if should_transform: 

1005 string_and_rpar_indices.extend((string_idx, rpar_idx)) 

1006 

1007 if string_and_rpar_indices: 

1008 yield Ok(self._transform_to_new_line(line, string_and_rpar_indices)) 

1009 else: 

1010 yield Err( 

1011 CannotTransform("All string groups have comments attached to them.") 

1012 ) 

1013 

1014 def _transform_to_new_line( 

1015 self, line: Line, string_and_rpar_indices: list[int] 

1016 ) -> Line: 

1017 LL = line.leaves 

1018 

1019 new_line = line.clone() 

1020 new_line.comments = line.comments.copy() 

1021 

1022 previous_idx = -1 

1023 # We need to sort the indices, since string_idx and its matching 

1024 # rpar_idx may not come in order, e.g. in 

1025 # `("outer" % ("inner".join(items)))`, the "inner" string's 

1026 # string_idx is smaller than "outer" string's rpar_idx. 

1027 for idx in sorted(string_and_rpar_indices): 

1028 leaf = LL[idx] 

1029 lpar_or_rpar_idx = idx - 1 if leaf.type == token.STRING else idx 

1030 append_leaves(new_line, line, LL[previous_idx + 1 : lpar_or_rpar_idx]) 

1031 if leaf.type == token.STRING: 

1032 string_leaf = Leaf(token.STRING, LL[idx].value) 

1033 LL[lpar_or_rpar_idx].remove() # Remove lpar. 

1034 replace_child(LL[idx], string_leaf) 

1035 new_line.append(string_leaf) 

1036 # replace comments 

1037 old_comments = new_line.comments.pop(id(LL[idx]), []) 

1038 new_line.comments.setdefault(id(string_leaf), []).extend(old_comments) 

1039 else: 

1040 LL[lpar_or_rpar_idx].remove() # This is a rpar. 

1041 

1042 previous_idx = idx 

1043 

1044 # Append the leaves after the last idx: 

1045 append_leaves(new_line, line, LL[idx + 1 :]) 

1046 

1047 return new_line 

1048 

1049 

1050class BaseStringSplitter(StringTransformer): 

1051 """ 

1052 Abstract class for StringTransformers which transform a Line's strings by splitting 

1053 them or placing them on their own lines where necessary to avoid going over 

1054 the configured line length. 

1055 

1056 Requirements: 

1057 * The target string value is responsible for the line going over the 

1058 line length limit. It follows that after all of black's other line 

1059 split methods have been exhausted, this line (or one of the resulting 

1060 lines after all line splits are performed) would still be over the 

1061 line_length limit unless we split this string. 

1062 AND 

1063 

1064 * The target string is NOT a "pointless" string (i.e. a string that has 

1065 no parent or siblings). 

1066 AND 

1067 

1068 * The target string is not followed by an inline comment that appears 

1069 to be a pragma. 

1070 AND 

1071 

1072 * The target string is not a multiline (i.e. triple-quote) string. 

1073 """ 

1074 

1075 STRING_OPERATORS: Final = [ 

1076 token.EQEQUAL, 

1077 token.GREATER, 

1078 token.GREATEREQUAL, 

1079 token.LESS, 

1080 token.LESSEQUAL, 

1081 token.NOTEQUAL, 

1082 token.PERCENT, 

1083 token.PLUS, 

1084 token.STAR, 

1085 ] 

1086 

1087 @abstractmethod 

1088 def do_splitter_match(self, line: Line) -> TMatchResult: 

1089 """ 

1090 BaseStringSplitter asks its clients to override this method instead of 

1091 `StringTransformer.do_match(...)`. 

1092 

1093 Follows the same protocol as `StringTransformer.do_match(...)`. 

1094 

1095 Refer to `help(StringTransformer.do_match)` for more information. 

1096 """ 

1097 

1098 def do_match(self, line: Line) -> TMatchResult: 

1099 match_result = self.do_splitter_match(line) 

1100 if isinstance(match_result, Err): 

1101 return match_result 

1102 

1103 string_indices = match_result.ok() 

1104 assert len(string_indices) == 1, ( 

1105 f"{self.__class__.__name__} should only find one match at a time, found" 

1106 f" {len(string_indices)}" 

1107 ) 

1108 string_idx = string_indices[0] 

1109 vresult = self._validate(line, string_idx) 

1110 if isinstance(vresult, Err): 

1111 return vresult 

1112 

1113 return match_result 

1114 

1115 def _validate(self, line: Line, string_idx: int) -> TResult[None]: 

1116 """ 

1117 Checks that @line meets all of the requirements listed in this classes' 

1118 docstring. Refer to `help(BaseStringSplitter)` for a detailed 

1119 description of those requirements. 

1120 

1121 Returns: 

1122 * Ok(None), if ALL of the requirements are met. 

1123 OR 

1124 * Err(CannotTransform), if ANY of the requirements are NOT met. 

1125 """ 

1126 LL = line.leaves 

1127 

1128 string_leaf = LL[string_idx] 

1129 

1130 max_string_length = self._get_max_string_length(line, string_idx) 

1131 if len(string_leaf.value) <= max_string_length: 

1132 return TErr( 

1133 "The string itself is not what is causing this line to be too long." 

1134 ) 

1135 

1136 if not string_leaf.parent or [L.type for L in string_leaf.parent.children] == [ 

1137 token.STRING, 

1138 token.NEWLINE, 

1139 ]: 

1140 return TErr( 

1141 f"This string ({string_leaf.value}) appears to be pointless (i.e. has" 

1142 " no parent)." 

1143 ) 

1144 

1145 if id(line.leaves[string_idx]) in line.comments and contains_pragma_comment( 

1146 line.comments[id(line.leaves[string_idx])] 

1147 ): 

1148 return TErr( 

1149 "Line appears to end with an inline pragma comment. Splitting the line" 

1150 " could modify the pragma's behavior." 

1151 ) 

1152 

1153 if has_triple_quotes(string_leaf.value): 

1154 return TErr("We cannot split multiline strings.") 

1155 

1156 return Ok(None) 

1157 

1158 def _get_max_string_length(self, line: Line, string_idx: int) -> int: 

1159 """ 

1160 Calculates the max string length used when attempting to determine 

1161 whether or not the target string is responsible for causing the line to 

1162 go over the line length limit. 

1163 

1164 WARNING: This method is tightly coupled to both StringSplitter and 

1165 (especially) StringParenWrapper. There is probably a better way to 

1166 accomplish what is being done here. 

1167 

1168 Returns: 

1169 max_string_length: such that `line.leaves[string_idx].value > 

1170 max_string_length` implies that the target string IS responsible 

1171 for causing this line to exceed the line length limit. 

1172 """ 

1173 LL = line.leaves 

1174 

1175 is_valid_index = is_valid_index_factory(LL) 

1176 

1177 # We use the shorthand "WMA4" in comments to abbreviate "We must 

1178 # account for". When giving examples, we use STRING to mean some/any 

1179 # valid string. 

1180 # 

1181 # Finally, we use the following convenience variables: 

1182 # 

1183 # P: The leaf that is before the target string leaf. 

1184 # N: The leaf that is after the target string leaf. 

1185 # NN: The leaf that is after N. 

1186 

1187 # WMA4 the whitespace at the beginning of the line. 

1188 offset = line.depth * 4 

1189 

1190 if is_valid_index(string_idx - 1): 

1191 p_idx = string_idx - 1 

1192 if ( 

1193 LL[string_idx - 1].type == token.LPAR 

1194 and LL[string_idx - 1].value == "" 

1195 and string_idx >= 2 

1196 ): 

1197 # If the previous leaf is an empty LPAR placeholder, we should skip it. 

1198 p_idx -= 1 

1199 

1200 P = LL[p_idx] 

1201 if P.type in self.STRING_OPERATORS: 

1202 # WMA4 a space and a string operator (e.g. `+ STRING` or `== STRING`). 

1203 offset += len(str(P)) + 1 

1204 

1205 if P.type == token.COMMA: 

1206 # WMA4 a space, a comma, and a closing bracket [e.g. `), STRING`]. 

1207 offset += 3 

1208 

1209 if P.type in [token.COLON, token.EQUAL, token.PLUSEQUAL, token.NAME]: 

1210 # This conditional branch is meant to handle dictionary keys, 

1211 # variable assignments, 'return STRING' statement lines, and 

1212 # 'else STRING' ternary expression lines. 

1213 

1214 # WMA4 a single space. 

1215 offset += 1 

1216 

1217 # WMA4 the lengths of any leaves that came before that space, 

1218 # but after any closing bracket before that space. 

1219 for leaf in reversed(LL[: p_idx + 1]): 

1220 offset += len(str(leaf)) 

1221 if leaf.type in CLOSING_BRACKETS: 

1222 break 

1223 

1224 if is_valid_index(string_idx + 1): 

1225 N = LL[string_idx + 1] 

1226 if N.type == token.RPAR and N.value == "" and len(LL) > string_idx + 2: 

1227 # If the next leaf is an empty RPAR placeholder, we should skip it. 

1228 N = LL[string_idx + 2] 

1229 

1230 if N.type == token.COMMA: 

1231 # WMA4 a single comma at the end of the string (e.g `STRING,`). 

1232 offset += 1 

1233 

1234 if is_valid_index(string_idx + 2): 

1235 NN = LL[string_idx + 2] 

1236 

1237 if N.type == token.DOT and NN.type == token.NAME: 

1238 # This conditional branch is meant to handle method calls invoked 

1239 # off of a string literal up to and including the LPAR character. 

1240 

1241 # WMA4 the '.' character. 

1242 offset += 1 

1243 

1244 if ( 

1245 is_valid_index(string_idx + 3) 

1246 and LL[string_idx + 3].type == token.LPAR 

1247 ): 

1248 # WMA4 the left parenthesis character. 

1249 offset += 1 

1250 

1251 # WMA4 the length of the method's name. 

1252 offset += len(NN.value) 

1253 

1254 has_comments = False 

1255 for comment_leaf in line.comments_after(LL[string_idx]): 

1256 if not has_comments: 

1257 has_comments = True 

1258 # WMA4 two spaces before the '#' character. 

1259 offset += 2 

1260 

1261 # WMA4 the length of the inline comment. 

1262 offset += len(comment_leaf.value) 

1263 

1264 max_string_length = count_chars_in_width(str(line), self.line_length - offset) 

1265 return max_string_length 

1266 

1267 @staticmethod 

1268 def _prefer_paren_wrap_match(LL: list[Leaf]) -> int | None: 

1269 """ 

1270 Returns: 

1271 string_idx such that @LL[string_idx] is equal to our target (i.e. 

1272 matched) string, if this line matches the "prefer paren wrap" statement 

1273 requirements listed in the 'Requirements' section of the StringParenWrapper 

1274 class's docstring. 

1275 OR 

1276 None, otherwise. 

1277 """ 

1278 # The line must start with a string. 

1279 if LL[0].type != token.STRING: 

1280 return None 

1281 

1282 matching_nodes = [ 

1283 syms.listmaker, 

1284 syms.dictsetmaker, 

1285 syms.testlist_gexp, 

1286 ] 

1287 # If the string is an immediate child of a list/set/tuple literal... 

1288 if ( 

1289 parent_type(LL[0]) in matching_nodes 

1290 or parent_type(LL[0].parent) in matching_nodes 

1291 ): 

1292 # And the string is surrounded by commas (or is the first/last child)... 

1293 prev_sibling = LL[0].prev_sibling 

1294 next_sibling = LL[0].next_sibling 

1295 if ( 

1296 not prev_sibling 

1297 and not next_sibling 

1298 and parent_type(LL[0]) == syms.atom 

1299 ): 

1300 # If it's an atom string, we need to check the parent atom's siblings. 

1301 parent = LL[0].parent 

1302 assert parent is not None # For type checkers. 

1303 prev_sibling = parent.prev_sibling 

1304 next_sibling = parent.next_sibling 

1305 if (not prev_sibling or prev_sibling.type == token.COMMA) and ( 

1306 not next_sibling or next_sibling.type == token.COMMA 

1307 ): 

1308 return 0 

1309 

1310 return None 

1311 

1312 

1313def iter_fexpr_spans(s: str) -> Iterator[tuple[int, int]]: 

1314 """ 

1315 Yields spans corresponding to expressions in a given f-string. 

1316 Spans are half-open ranges (left inclusive, right exclusive). 

1317 Assumes the input string is a valid f-string, but will not crash if the input 

1318 string is invalid. 

1319 """ 

1320 stack: list[int] = [] # our curly paren stack 

1321 i = 0 

1322 while i < len(s): 

1323 if s[i] == "{": 

1324 # if we're in a string part of the f-string, ignore escaped curly braces 

1325 if not stack and i + 1 < len(s) and s[i + 1] == "{": 

1326 i += 2 

1327 continue 

1328 stack.append(i) 

1329 i += 1 

1330 continue 

1331 

1332 if s[i] == "}": 

1333 if not stack: 

1334 i += 1 

1335 continue 

1336 j = stack.pop() 

1337 # we've made it back out of the expression! yield the span 

1338 if not stack: 

1339 yield (j, i + 1) 

1340 i += 1 

1341 continue 

1342 

1343 # if we're in an expression part of the f-string, fast-forward through strings 

1344 # note that backslashes are not legal in the expression portion of f-strings 

1345 if stack: 

1346 delim = None 

1347 if s[i : i + 3] in ("'''", '"""'): 

1348 delim = s[i : i + 3] 

1349 elif s[i] in ("'", '"'): 

1350 delim = s[i] 

1351 if delim: 

1352 i += len(delim) 

1353 while i < len(s) and s[i : i + len(delim)] != delim: 

1354 i += 1 

1355 i += len(delim) 

1356 continue 

1357 i += 1 

1358 

1359 

1360def fstring_contains_expr(s: str) -> bool: 

1361 return any(iter_fexpr_spans(s)) 

1362 

1363 

1364def _toggle_fexpr_quotes(fstring: str, old_quote: str) -> str: 

1365 """ 

1366 Toggles quotes used in f-string expressions that are `old_quote`. 

1367 

1368 f-string expressions can't contain backslashes, so we need to toggle the 

1369 quotes if the f-string itself will end up using the same quote. We can 

1370 simply toggle without escaping because, quotes can't be reused in f-string 

1371 expressions. They will fail to parse. 

1372 

1373 NOTE: If PEP 701 is accepted, above statement will no longer be true. 

1374 Though if quotes can be reused, we can simply reuse them without updates or 

1375 escaping, once Black figures out how to parse the new grammar. 

1376 """ 

1377 new_quote = "'" if old_quote == '"' else '"' 

1378 parts = [] 

1379 previous_index = 0 

1380 for start, end in iter_fexpr_spans(fstring): 

1381 parts.append(fstring[previous_index:start]) 

1382 parts.append(fstring[start:end].replace(old_quote, new_quote)) 

1383 previous_index = end 

1384 parts.append(fstring[previous_index:]) 

1385 return "".join(parts) 

1386 

1387 

1388class StringSplitter(BaseStringSplitter, CustomSplitMapMixin): 

1389 """ 

1390 StringTransformer that splits "atom" strings (i.e. strings which exist on 

1391 lines by themselves). 

1392 

1393 Requirements: 

1394 * The line consists ONLY of a single string (possibly prefixed by a 

1395 string operator [e.g. '+' or '==']), MAYBE a string trailer, and MAYBE 

1396 a trailing comma. 

1397 AND 

1398 * All of the requirements listed in BaseStringSplitter's docstring. 

1399 

1400 Transformations: 

1401 The string mentioned in the 'Requirements' section is split into as 

1402 many substrings as necessary to adhere to the configured line length. 

1403 

1404 In the final set of substrings, no substring should be smaller than 

1405 MIN_SUBSTR_SIZE characters. 

1406 

1407 The string will ONLY be split on spaces (i.e. each new substring should 

1408 start with a space). Note that the string will NOT be split on a space 

1409 which is escaped with a backslash. 

1410 

1411 If the string is an f-string, it will NOT be split in the middle of an 

1412 f-expression (e.g. in f"FooBar: {foo() if x else bar()}", {foo() if x 

1413 else bar()} is an f-expression). 

1414 

1415 If the string that is being split has an associated set of custom split 

1416 records and those custom splits will NOT result in any line going over 

1417 the configured line length, those custom splits are used. Otherwise the 

1418 string is split as late as possible (from left-to-right) while still 

1419 adhering to the transformation rules listed above. 

1420 

1421 Collaborations: 

1422 StringSplitter relies on StringMerger to construct the appropriate 

1423 CustomSplit objects and add them to the custom split map. 

1424 """ 

1425 

1426 MIN_SUBSTR_SIZE: Final = 6 

1427 

1428 def do_splitter_match(self, line: Line) -> TMatchResult: 

1429 LL = line.leaves 

1430 

1431 if self._prefer_paren_wrap_match(LL) is not None: 

1432 return TErr("Line needs to be wrapped in parens first.") 

1433 

1434 # If the line is just STRING + COMMA (a one-item tuple) and not inside 

1435 # brackets, we need to defer to StringParenWrapper to wrap it first. 

1436 # Otherwise, splitting the string would create multiple expressions where 

1437 # only the last has the comma, breaking AST equivalence. See issue #4912. 

1438 if ( 

1439 not line.inside_brackets 

1440 and len(LL) == 2 

1441 and LL[0].type == token.STRING 

1442 and LL[1].type == token.COMMA 

1443 ): 

1444 return TErr( 

1445 "Line with trailing comma tuple needs to be wrapped in parens first." 

1446 ) 

1447 

1448 is_valid_index = is_valid_index_factory(LL) 

1449 

1450 idx = 0 

1451 

1452 # The first two leaves MAY be the 'not in' keywords... 

1453 if ( 

1454 is_valid_index(idx) 

1455 and is_valid_index(idx + 1) 

1456 and [LL[idx].type, LL[idx + 1].type] == [token.NAME, token.NAME] 

1457 and str(LL[idx]) + str(LL[idx + 1]) == "not in" 

1458 ): 

1459 idx += 2 

1460 # Else the first leaf MAY be a string operator symbol or the 'in' keyword... 

1461 elif is_valid_index(idx) and ( 

1462 LL[idx].type in self.STRING_OPERATORS 

1463 or LL[idx].type == token.NAME 

1464 and str(LL[idx]) == "in" 

1465 ): 

1466 idx += 1 

1467 

1468 # The next/first leaf MAY be an empty LPAR... 

1469 if is_valid_index(idx) and is_empty_lpar(LL[idx]): 

1470 idx += 1 

1471 

1472 # The next/first leaf MUST be a string... 

1473 if not is_valid_index(idx) or LL[idx].type != token.STRING: 

1474 return TErr("Line does not start with a string.") 

1475 

1476 string_idx = idx 

1477 

1478 # Skip the string trailer, if one exists. 

1479 string_parser = StringParser() 

1480 idx = string_parser.parse(LL, string_idx) 

1481 

1482 # That string MAY be followed by an empty RPAR... 

1483 if is_valid_index(idx) and is_empty_rpar(LL[idx]): 

1484 idx += 1 

1485 

1486 # That string / empty RPAR leaf MAY be followed by a comma... 

1487 if is_valid_index(idx) and LL[idx].type == token.COMMA: 

1488 idx += 1 

1489 

1490 # But no more leaves are allowed... 

1491 if is_valid_index(idx): 

1492 return TErr("This line does not end with a string.") 

1493 

1494 return Ok([string_idx]) 

1495 

1496 def do_transform( 

1497 self, line: Line, string_indices: list[int] 

1498 ) -> Iterator[TResult[Line]]: 

1499 LL = line.leaves 

1500 assert len(string_indices) == 1, ( 

1501 f"{self.__class__.__name__} should only find one match at a time, found" 

1502 f" {len(string_indices)}" 

1503 ) 

1504 string_idx = string_indices[0] 

1505 

1506 QUOTE = LL[string_idx].value[-1] 

1507 

1508 is_valid_index = is_valid_index_factory(LL) 

1509 insert_str_child = insert_str_child_factory(LL[string_idx]) 

1510 

1511 prefix = get_string_prefix(LL[string_idx].value).lower() 

1512 

1513 # We MAY choose to drop the 'f' prefix from substrings that don't 

1514 # contain any f-expressions, but ONLY if the original f-string 

1515 # contains at least one f-expression. Otherwise, we will alter the AST 

1516 # of the program. 

1517 drop_pointless_f_prefix = ("f" in prefix) and fstring_contains_expr( 

1518 LL[string_idx].value 

1519 ) 

1520 

1521 first_string_line = True 

1522 

1523 string_op_leaves = self._get_string_operator_leaves(LL) 

1524 string_op_leaves_length = ( 

1525 sum(len(str(prefix_leaf)) for prefix_leaf in string_op_leaves) + 1 

1526 if string_op_leaves 

1527 else 0 

1528 ) 

1529 

1530 def maybe_append_string_operators(new_line: Line) -> None: 

1531 """ 

1532 Side Effects: 

1533 If @line starts with a string operator and this is the first 

1534 line we are constructing, this function appends the string 

1535 operator to @new_line and replaces the old string operator leaf 

1536 in the node structure. Otherwise this function does nothing. 

1537 """ 

1538 maybe_prefix_leaves = string_op_leaves if first_string_line else [] 

1539 for i, prefix_leaf in enumerate(maybe_prefix_leaves): 

1540 replace_child(LL[i], prefix_leaf) 

1541 new_line.append(prefix_leaf) 

1542 

1543 ends_with_comma = ( 

1544 is_valid_index(string_idx + 1) and LL[string_idx + 1].type == token.COMMA 

1545 ) 

1546 

1547 def max_last_string_column() -> int: 

1548 """ 

1549 Returns: 

1550 The max allowed width of the string value used for the last 

1551 line we will construct. Note that this value means the width 

1552 rather than the number of characters (e.g., many East Asian 

1553 characters expand to two columns). 

1554 """ 

1555 result = self.line_length 

1556 result -= line.depth * 4 

1557 result -= 1 if ends_with_comma else 0 

1558 result -= string_op_leaves_length 

1559 return result 

1560 

1561 # --- Calculate Max Break Width (for string value) 

1562 # We start with the line length limit 

1563 max_break_width = self.line_length 

1564 # The last index of a string of length N is N-1. 

1565 max_break_width -= 1 

1566 # Leading whitespace is not present in the string value (e.g. Leaf.value). 

1567 max_break_width -= line.depth * 4 

1568 if max_break_width < 0: 

1569 yield TErr( 

1570 f"Unable to split {LL[string_idx].value} at such high of a line depth:" 

1571 f" {line.depth}" 

1572 ) 

1573 return 

1574 

1575 # Check if StringMerger registered any custom splits. 

1576 custom_splits = self.pop_custom_splits(LL[string_idx].value) 

1577 # We use them ONLY if none of them would produce lines that exceed the 

1578 # line limit. 

1579 use_custom_breakpoints = bool( 

1580 custom_splits 

1581 and all(csplit.break_idx <= max_break_width for csplit in custom_splits) 

1582 ) 

1583 

1584 # Temporary storage for the remaining chunk of the string line that 

1585 # can't fit onto the line currently being constructed. 

1586 rest_value = LL[string_idx].value 

1587 

1588 def more_splits_should_be_made() -> bool: 

1589 """ 

1590 Returns: 

1591 True iff `rest_value` (the remaining string value from the last 

1592 split), should be split again. 

1593 """ 

1594 if use_custom_breakpoints: 

1595 return len(custom_splits) > 1 

1596 else: 

1597 return str_width(rest_value) > max_last_string_column() 

1598 

1599 string_line_results: list[Ok[Line]] = [] 

1600 while more_splits_should_be_made(): 

1601 if use_custom_breakpoints: 

1602 # Custom User Split (manual) 

1603 csplit = custom_splits.pop(0) 

1604 break_idx = csplit.break_idx 

1605 else: 

1606 # Algorithmic Split (automatic) 

1607 max_bidx = ( 

1608 count_chars_in_width(rest_value, max_break_width) 

1609 - string_op_leaves_length 

1610 ) 

1611 maybe_break_idx = self._get_break_idx(rest_value, max_bidx) 

1612 if maybe_break_idx is None: 

1613 # If we are unable to algorithmically determine a good split 

1614 # and this string has custom splits registered to it, we 

1615 # fall back to using them--which means we have to start 

1616 # over from the beginning. 

1617 if custom_splits: 

1618 rest_value = LL[string_idx].value 

1619 string_line_results = [] 

1620 first_string_line = True 

1621 use_custom_breakpoints = True 

1622 continue 

1623 

1624 # Otherwise, we stop splitting here. 

1625 break 

1626 

1627 break_idx = maybe_break_idx 

1628 

1629 # --- Construct `next_value` 

1630 next_value = rest_value[:break_idx] + QUOTE 

1631 

1632 # HACK: The following 'if' statement is a hack to fix the custom 

1633 # breakpoint index in the case of either: (a) substrings that were 

1634 # f-strings but will have the 'f' prefix removed OR (b) substrings 

1635 # that were not f-strings but will now become f-strings because of 

1636 # redundant use of the 'f' prefix (i.e. none of the substrings 

1637 # contain f-expressions but one or more of them had the 'f' prefix 

1638 # anyway; in which case, we will prepend 'f' to _all_ substrings). 

1639 # 

1640 # There is probably a better way to accomplish what is being done 

1641 # here... 

1642 # 

1643 # If this substring is an f-string, we _could_ remove the 'f' 

1644 # prefix, and the current custom split did NOT originally use a 

1645 # prefix... 

1646 if ( 

1647 use_custom_breakpoints 

1648 and not csplit.has_prefix 

1649 and ( 

1650 # `next_value == prefix + QUOTE` happens when the custom 

1651 # split is an empty string. 

1652 next_value == prefix + QUOTE 

1653 or next_value != self._normalize_f_string(next_value, prefix) 

1654 ) 

1655 ): 

1656 # Then `csplit.break_idx` will be off by one after removing 

1657 # the 'f' prefix. 

1658 break_idx += 1 

1659 next_value = rest_value[:break_idx] + QUOTE 

1660 

1661 if drop_pointless_f_prefix: 

1662 next_value = self._normalize_f_string(next_value, prefix) 

1663 

1664 # --- Construct `next_leaf` 

1665 next_leaf = Leaf(token.STRING, next_value) 

1666 insert_str_child(next_leaf) 

1667 self._maybe_normalize_string_quotes(next_leaf) 

1668 

1669 # --- Construct `next_line` 

1670 next_line = line.clone() 

1671 maybe_append_string_operators(next_line) 

1672 next_line.append(next_leaf) 

1673 string_line_results.append(Ok(next_line)) 

1674 

1675 rest_value = prefix + QUOTE + rest_value[break_idx:] 

1676 first_string_line = False 

1677 

1678 yield from string_line_results 

1679 

1680 if drop_pointless_f_prefix: 

1681 rest_value = self._normalize_f_string(rest_value, prefix) 

1682 

1683 rest_leaf = Leaf(token.STRING, rest_value) 

1684 insert_str_child(rest_leaf) 

1685 

1686 # NOTE: I could not find a test case that verifies that the following 

1687 # line is actually necessary, but it seems to be. Otherwise we risk 

1688 # not normalizing the last substring, right? 

1689 self._maybe_normalize_string_quotes(rest_leaf) 

1690 

1691 last_line = line.clone() 

1692 maybe_append_string_operators(last_line) 

1693 

1694 # If there are any leaves to the right of the target string... 

1695 if is_valid_index(string_idx + 1): 

1696 # We use `temp_value` here to determine how long the last line 

1697 # would be if we were to append all the leaves to the right of the 

1698 # target string to the last string line. 

1699 temp_value = rest_value 

1700 for leaf in LL[string_idx + 1 :]: 

1701 temp_value += str(leaf) 

1702 if leaf.type == token.LPAR: 

1703 break 

1704 

1705 # Try to fit them all on the same line with the last substring... 

1706 if ( 

1707 str_width(temp_value) <= max_last_string_column() 

1708 or LL[string_idx + 1].type == token.COMMA 

1709 ): 

1710 last_line.append(rest_leaf) 

1711 append_leaves(last_line, line, LL[string_idx + 1 :]) 

1712 yield Ok(last_line) 

1713 # Otherwise, place the last substring on one line and everything 

1714 # else on a line below that... 

1715 else: 

1716 last_line.append(rest_leaf) 

1717 yield Ok(last_line) 

1718 

1719 non_string_line = line.clone() 

1720 append_leaves(non_string_line, line, LL[string_idx + 1 :]) 

1721 yield Ok(non_string_line) 

1722 # Else the target string was the last leaf... 

1723 else: 

1724 last_line.append(rest_leaf) 

1725 last_line.comments = line.comments.copy() 

1726 yield Ok(last_line) 

1727 

1728 def _iter_nameescape_slices(self, string: str) -> Iterator[tuple[Index, Index]]: 

1729 r""" 

1730 Yields: 

1731 All ranges of @string which, if @string were to be split there, 

1732 would result in the splitting of an \N{...} expression (which is NOT 

1733 allowed). 

1734 """ 

1735 # True - the previous backslash was unescaped 

1736 # False - the previous backslash was escaped *or* there was no backslash 

1737 previous_was_unescaped_backslash = False 

1738 it = iter(enumerate(string)) 

1739 for idx, c in it: 

1740 if c == "\\": 

1741 previous_was_unescaped_backslash = not previous_was_unescaped_backslash 

1742 continue 

1743 if not previous_was_unescaped_backslash or c != "N": 

1744 previous_was_unescaped_backslash = False 

1745 continue 

1746 previous_was_unescaped_backslash = False 

1747 

1748 begin = idx - 1 # the position of backslash before \N{...} 

1749 for idx, c in it: 

1750 if c == "}": 

1751 end = idx 

1752 break 

1753 else: 

1754 # malformed nameescape expression? 

1755 # should have been detected by AST parsing earlier... 

1756 raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!") 

1757 yield begin, end 

1758 

1759 def _iter_fexpr_slices(self, string: str) -> Iterator[tuple[Index, Index]]: 

1760 """ 

1761 Yields: 

1762 All ranges of @string which, if @string were to be split there, 

1763 would result in the splitting of an f-expression (which is NOT 

1764 allowed). 

1765 """ 

1766 if "f" not in get_string_prefix(string).lower(): 

1767 return 

1768 yield from iter_fexpr_spans(string) 

1769 

1770 def _get_illegal_split_indices(self, string: str) -> set[Index]: 

1771 illegal_indices: set[Index] = set() 

1772 iterators = [ 

1773 self._iter_fexpr_slices(string), 

1774 self._iter_nameescape_slices(string), 

1775 ] 

1776 for it in iterators: 

1777 for begin, end in it: 

1778 illegal_indices.update(range(begin, end)) 

1779 return illegal_indices 

1780 

1781 def _get_break_idx(self, string: str, max_break_idx: int) -> int | None: 

1782 """ 

1783 This method contains the algorithm that StringSplitter uses to 

1784 determine which character to split each string at. 

1785 

1786 Args: 

1787 @string: The substring that we are attempting to split. 

1788 @max_break_idx: The ideal break index. We will return this value if it 

1789 meets all the necessary conditions. In the likely event that it 

1790 doesn't we will try to find the closest index BELOW @max_break_idx 

1791 that does. If that fails, we will expand our search by also 

1792 considering all valid indices ABOVE @max_break_idx. 

1793 

1794 Pre-Conditions: 

1795 * assert_is_leaf_string(@string) 

1796 * 0 <= @max_break_idx < len(@string) 

1797 

1798 Returns: 

1799 break_idx, if an index is able to be found that meets all of the 

1800 conditions listed in the 'Transformations' section of this classes' 

1801 docstring. 

1802 OR 

1803 None, otherwise. 

1804 """ 

1805 is_valid_index = is_valid_index_factory(string) 

1806 

1807 assert is_valid_index(max_break_idx) 

1808 assert_is_leaf_string(string) 

1809 

1810 _illegal_split_indices = self._get_illegal_split_indices(string) 

1811 

1812 def breaks_unsplittable_expression(i: Index) -> bool: 

1813 """ 

1814 Returns: 

1815 True iff returning @i would result in the splitting of an 

1816 unsplittable expression (which is NOT allowed). 

1817 """ 

1818 return i in _illegal_split_indices 

1819 

1820 def passes_all_checks(i: Index) -> bool: 

1821 """ 

1822 Returns: 

1823 True iff ALL of the conditions listed in the 'Transformations' 

1824 section of this classes' docstring would be met by returning @i. 

1825 """ 

1826 is_space = string[i] == " " 

1827 is_split_safe = is_valid_index(i - 1) and string[i - 1] in SPLIT_SAFE_CHARS 

1828 

1829 is_not_escaped = True 

1830 j = i - 1 

1831 while is_valid_index(j) and string[j] == "\\": 

1832 is_not_escaped = not is_not_escaped 

1833 j -= 1 

1834 

1835 is_big_enough = ( 

1836 len(string[i:]) >= self.MIN_SUBSTR_SIZE 

1837 and len(string[:i]) >= self.MIN_SUBSTR_SIZE 

1838 ) 

1839 return ( 

1840 (is_space or is_split_safe) 

1841 and is_not_escaped 

1842 and is_big_enough 

1843 and not breaks_unsplittable_expression(i) 

1844 ) 

1845 

1846 # First, we check all indices BELOW @max_break_idx. 

1847 break_idx = max_break_idx 

1848 while is_valid_index(break_idx - 1) and not passes_all_checks(break_idx): 

1849 break_idx -= 1 

1850 

1851 if not passes_all_checks(break_idx): 

1852 # If that fails, we check all indices ABOVE @max_break_idx. 

1853 # 

1854 # If we are able to find a valid index here, the next line is going 

1855 # to be longer than the specified line length, but it's probably 

1856 # better than doing nothing at all. 

1857 break_idx = max_break_idx + 1 

1858 while is_valid_index(break_idx + 1) and not passes_all_checks(break_idx): 

1859 break_idx += 1 

1860 

1861 if not is_valid_index(break_idx) or not passes_all_checks(break_idx): 

1862 return None 

1863 

1864 return break_idx 

1865 

1866 def _maybe_normalize_string_quotes(self, leaf: Leaf) -> None: 

1867 if self.normalize_strings: 

1868 leaf.value = normalize_string_quotes(leaf.value) 

1869 

1870 def _normalize_f_string(self, string: str, prefix: str) -> str: 

1871 """ 

1872 Pre-Conditions: 

1873 * assert_is_leaf_string(@string) 

1874 

1875 Returns: 

1876 * If @string is an f-string that contains no f-expressions, we 

1877 return a string identical to @string except that the 'f' prefix 

1878 has been stripped and all double braces (i.e. '{{' or '}}') have 

1879 been normalized (i.e. turned into '{' or '}'). 

1880 OR 

1881 * Otherwise, we return @string. 

1882 """ 

1883 assert_is_leaf_string(string) 

1884 

1885 if "f" in prefix and not fstring_contains_expr(string): 

1886 new_prefix = prefix.replace("f", "") 

1887 

1888 temp = string[len(prefix) :] 

1889 temp = re.sub(r"\{\{", "{", temp) 

1890 temp = re.sub(r"\}\}", "}", temp) 

1891 new_string = temp 

1892 

1893 return f"{new_prefix}{new_string}" 

1894 else: 

1895 return string 

1896 

1897 def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> list[Leaf]: 

1898 LL = list(leaves) 

1899 

1900 string_op_leaves = [] 

1901 i = 0 

1902 while LL[i].type in self.STRING_OPERATORS + [token.NAME]: 

1903 prefix_leaf = Leaf(LL[i].type, str(LL[i]).strip()) 

1904 string_op_leaves.append(prefix_leaf) 

1905 i += 1 

1906 return string_op_leaves 

1907 

1908 

1909class StringParenWrapper(BaseStringSplitter, CustomSplitMapMixin): 

1910 """ 

1911 StringTransformer that wraps strings in parens and then splits at the LPAR. 

1912 

1913 Requirements: 

1914 All of the requirements listed in BaseStringSplitter's docstring in 

1915 addition to the requirements listed below: 

1916 

1917 * The line is a return/yield statement, which returns/yields a string. 

1918 OR 

1919 * The line is part of a ternary expression (e.g. `x = y if cond else 

1920 z`) such that the line starts with `else <string>`, where <string> is 

1921 some string. 

1922 OR 

1923 * The line is an assert statement, which ends with a string. 

1924 OR 

1925 * The line is an assignment statement (e.g. `x = <string>` or `x += 

1926 <string>`) such that the variable is being assigned the value of some 

1927 string. 

1928 OR 

1929 * The line is a dictionary key assignment where some valid key is being 

1930 assigned the value of some string. 

1931 OR 

1932 * The line is an lambda expression and the value is a string. 

1933 OR 

1934 * The line starts with an "atom" string that prefers to be wrapped in 

1935 parens. It's preferred to be wrapped when it's is an immediate child of 

1936 a list/set/tuple literal, AND the string is surrounded by commas (or is 

1937 the first/last child). 

1938 

1939 Transformations: 

1940 The chosen string is wrapped in parentheses and then split at the LPAR. 

1941 

1942 We then have one line which ends with an LPAR and another line that 

1943 starts with the chosen string. The latter line is then split again at 

1944 the RPAR. This results in the RPAR (and possibly a trailing comma) 

1945 being placed on its own line. 

1946 

1947 NOTE: If any leaves exist to the right of the chosen string (except 

1948 for a trailing comma, which would be placed after the RPAR), those 

1949 leaves are placed inside the parentheses. In effect, the chosen 

1950 string is not necessarily being "wrapped" by parentheses. We can, 

1951 however, count on the LPAR being placed directly before the chosen 

1952 string. 

1953 

1954 In other words, StringParenWrapper creates "atom" strings. These 

1955 can then be split again by StringSplitter, if necessary. 

1956 

1957 Collaborations: 

1958 In the event that a string line split by StringParenWrapper is 

1959 changed such that it no longer needs to be given its own line, 

1960 StringParenWrapper relies on StringParenStripper to clean up the 

1961 parentheses it created. 

1962 

1963 For "atom" strings that prefers to be wrapped in parens, it requires 

1964 StringSplitter to hold the split until the string is wrapped in parens. 

1965 """ 

1966 

1967 def do_splitter_match(self, line: Line) -> TMatchResult: 

1968 LL = line.leaves 

1969 

1970 if line.leaves[-1].type in OPENING_BRACKETS: 

1971 return TErr( 

1972 "Cannot wrap parens around a line that ends in an opening bracket." 

1973 ) 

1974 

1975 string_idx = ( 

1976 self._return_match(LL) 

1977 or self._else_match(LL) 

1978 or self._assert_match(LL) 

1979 or self._assign_match(LL) 

1980 or self._dict_or_lambda_match(LL) 

1981 ) 

1982 

1983 if string_idx is None: 

1984 string_idx = self._trailing_comma_tuple_match(line) 

1985 

1986 if string_idx is None: 

1987 string_idx = self._prefer_paren_wrap_match(LL) 

1988 

1989 if string_idx is not None: 

1990 string_value = line.leaves[string_idx].value 

1991 # If the string has neither spaces nor East Asian stops... 

1992 if not any( 

1993 char == " " or char in SPLIT_SAFE_CHARS for char in string_value 

1994 ): 

1995 # And will still violate the line length limit when split... 

1996 max_string_width = self.line_length - ((line.depth + 1) * 4) 

1997 if str_width(string_value) > max_string_width: 

1998 # And has no associated custom splits... 

1999 if not self.has_custom_splits(string_value): 

2000 # Then we should NOT put this string on its own line. 

2001 return TErr( 

2002 "We do not wrap long strings in parentheses when the" 

2003 " resultant line would still be over the specified line" 

2004 " length and can't be split further by StringSplitter." 

2005 ) 

2006 return Ok([string_idx]) 

2007 

2008 return TErr("This line does not contain any non-atomic strings.") 

2009 

2010 @staticmethod 

2011 def _return_match(LL: list[Leaf]) -> int | None: 

2012 """ 

2013 Returns: 

2014 string_idx such that @LL[string_idx] is equal to our target (i.e. 

2015 matched) string, if this line matches the return/yield statement 

2016 requirements listed in the 'Requirements' section of this classes' 

2017 docstring. 

2018 OR 

2019 None, otherwise. 

2020 """ 

2021 # If this line is a part of a return/yield statement and the first leaf 

2022 # contains either the "return" or "yield" keywords... 

2023 if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[ 

2024 0 

2025 ].value in ["return", "yield"]: 

2026 is_valid_index = is_valid_index_factory(LL) 

2027 

2028 idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 

2029 # The next visible leaf MUST contain a string... 

2030 if is_valid_index(idx) and LL[idx].type == token.STRING: 

2031 return idx 

2032 

2033 return None 

2034 

2035 @staticmethod 

2036 def _else_match(LL: list[Leaf]) -> int | None: 

2037 """ 

2038 Returns: 

2039 string_idx such that @LL[string_idx] is equal to our target (i.e. 

2040 matched) string, if this line matches the ternary expression 

2041 requirements listed in the 'Requirements' section of this classes' 

2042 docstring. 

2043 OR 

2044 None, otherwise. 

2045 """ 

2046 # If this line is a part of a ternary expression and the first leaf 

2047 # contains the "else" keyword... 

2048 if ( 

2049 parent_type(LL[0]) == syms.test 

2050 and LL[0].type == token.NAME 

2051 and LL[0].value == "else" 

2052 ): 

2053 is_valid_index = is_valid_index_factory(LL) 

2054 

2055 idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 

2056 # The next visible leaf MUST contain a string... 

2057 if is_valid_index(idx) and LL[idx].type == token.STRING: 

2058 return idx 

2059 

2060 return None 

2061 

2062 @staticmethod 

2063 def _assert_match(LL: list[Leaf]) -> int | None: 

2064 """ 

2065 Returns: 

2066 string_idx such that @LL[string_idx] is equal to our target (i.e. 

2067 matched) string, if this line matches the assert statement 

2068 requirements listed in the 'Requirements' section of this classes' 

2069 docstring. 

2070 OR 

2071 None, otherwise. 

2072 """ 

2073 # If this line is a part of an assert statement and the first leaf 

2074 # contains the "assert" keyword... 

2075 if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert": 

2076 is_valid_index = is_valid_index_factory(LL) 

2077 

2078 for i, leaf in enumerate(LL): 

2079 # We MUST find a comma... 

2080 if leaf.type == token.COMMA: 

2081 idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 

2082 

2083 # That comma MUST be followed by a string... 

2084 if is_valid_index(idx) and LL[idx].type == token.STRING: 

2085 string_idx = idx 

2086 

2087 # Skip the string trailer, if one exists. 

2088 string_parser = StringParser() 

2089 idx = string_parser.parse(LL, string_idx) 

2090 

2091 # But no more leaves are allowed... 

2092 if not is_valid_index(idx): 

2093 return string_idx 

2094 

2095 return None 

2096 

2097 @staticmethod 

2098 def _assign_match(LL: list[Leaf]) -> int | None: 

2099 """ 

2100 Returns: 

2101 string_idx such that @LL[string_idx] is equal to our target (i.e. 

2102 matched) string, if this line matches the assignment statement 

2103 requirements listed in the 'Requirements' section of this classes' 

2104 docstring. 

2105 OR 

2106 None, otherwise. 

2107 """ 

2108 # If this line is a part of an expression statement or is a function 

2109 # argument AND the first leaf contains a variable name... 

2110 if ( 

2111 parent_type(LL[0]) in [syms.expr_stmt, syms.argument, syms.power] 

2112 and LL[0].type == token.NAME 

2113 ): 

2114 is_valid_index = is_valid_index_factory(LL) 

2115 

2116 for i, leaf in enumerate(LL): 

2117 # We MUST find either an '=' or '+=' symbol... 

2118 if leaf.type in [token.EQUAL, token.PLUSEQUAL]: 

2119 idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 

2120 

2121 # That symbol MUST be followed by a string... 

2122 if is_valid_index(idx) and LL[idx].type == token.STRING: 

2123 string_idx = idx 

2124 

2125 # Skip the string trailer, if one exists. 

2126 string_parser = StringParser() 

2127 idx = string_parser.parse(LL, string_idx) 

2128 

2129 # The next leaf MAY be a comma iff this line is a part 

2130 # of a function argument... 

2131 if ( 

2132 parent_type(LL[0]) == syms.argument 

2133 and is_valid_index(idx) 

2134 and LL[idx].type == token.COMMA 

2135 ): 

2136 idx += 1 

2137 

2138 # But no more leaves are allowed... 

2139 if not is_valid_index(idx): 

2140 return string_idx 

2141 

2142 return None 

2143 

2144 @staticmethod 

2145 def _dict_or_lambda_match(LL: list[Leaf]) -> int | None: 

2146 """ 

2147 Returns: 

2148 string_idx such that @LL[string_idx] is equal to our target (i.e. 

2149 matched) string, if this line matches the dictionary key assignment 

2150 statement or lambda expression requirements listed in the 

2151 'Requirements' section of this classes' docstring. 

2152 OR 

2153 None, otherwise. 

2154 """ 

2155 # If this line is a part of a dictionary key assignment or lambda expression... 

2156 parent_types = [parent_type(LL[0]), parent_type(LL[0].parent)] 

2157 if syms.dictsetmaker in parent_types or syms.lambdef in parent_types: 

2158 is_valid_index = is_valid_index_factory(LL) 

2159 

2160 for i, leaf in enumerate(LL): 

2161 # We MUST find a colon, it can either be dict's or lambda's colon... 

2162 if leaf.type == token.COLON and i < len(LL) - 1: 

2163 idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 

2164 

2165 # That colon MUST be followed by a string... 

2166 if is_valid_index(idx) and LL[idx].type == token.STRING: 

2167 string_idx = idx 

2168 

2169 # Skip the string trailer, if one exists. 

2170 string_parser = StringParser() 

2171 idx = string_parser.parse(LL, string_idx) 

2172 

2173 # That string MAY be followed by a comma... 

2174 if is_valid_index(idx) and LL[idx].type == token.COMMA: 

2175 idx += 1 

2176 

2177 # But no more leaves are allowed... 

2178 if not is_valid_index(idx): 

2179 return string_idx 

2180 

2181 return None 

2182 

2183 @staticmethod 

2184 def _trailing_comma_tuple_match(line: Line) -> int | None: 

2185 """ 

2186 Returns: 

2187 string_idx such that @line.leaves[string_idx] is equal to our target 

2188 (i.e. matched) string, if the line is a bare trailing comma tuple 

2189 (STRING + COMMA) not inside brackets. 

2190 OR 

2191 None, otherwise. 

2192 

2193 This handles the case from issue #4912 where a long string with a 

2194 trailing comma (making it a one-item tuple) needs to be wrapped in 

2195 parentheses before splitting to preserve AST equivalence. 

2196 """ 

2197 LL = line.leaves 

2198 # Match: STRING followed by COMMA, not inside brackets 

2199 if ( 

2200 not line.inside_brackets 

2201 and len(LL) == 2 

2202 and LL[0].type == token.STRING 

2203 and LL[1].type == token.COMMA 

2204 ): 

2205 return 0 

2206 

2207 return None 

2208 

2209 def do_transform( 

2210 self, line: Line, string_indices: list[int] 

2211 ) -> Iterator[TResult[Line]]: 

2212 LL = line.leaves 

2213 assert len(string_indices) == 1, ( 

2214 f"{self.__class__.__name__} should only find one match at a time, found" 

2215 f" {len(string_indices)}" 

2216 ) 

2217 string_idx = string_indices[0] 

2218 

2219 is_valid_index = is_valid_index_factory(LL) 

2220 insert_str_child = insert_str_child_factory(LL[string_idx]) 

2221 

2222 comma_idx = -1 

2223 ends_with_comma = False 

2224 if LL[comma_idx].type == token.COMMA: 

2225 ends_with_comma = True 

2226 

2227 leaves_to_steal_comments_from = [LL[string_idx]] 

2228 if ends_with_comma: 

2229 leaves_to_steal_comments_from.append(LL[comma_idx]) 

2230 

2231 # --- First Line 

2232 first_line = line.clone() 

2233 left_leaves = LL[:string_idx] 

2234 

2235 # We have to remember to account for (possibly invisible) LPAR and RPAR 

2236 # leaves that already wrapped the target string. If these leaves do 

2237 # exist, we will replace them with our own LPAR and RPAR leaves. 

2238 old_parens_exist = False 

2239 if left_leaves and left_leaves[-1].type == token.LPAR: 

2240 old_parens_exist = True 

2241 leaves_to_steal_comments_from.append(left_leaves[-1]) 

2242 left_leaves.pop() 

2243 

2244 append_leaves(first_line, line, left_leaves) 

2245 

2246 lpar_leaf = Leaf(token.LPAR, "(") 

2247 if old_parens_exist: 

2248 replace_child(LL[string_idx - 1], lpar_leaf) 

2249 else: 

2250 insert_str_child(lpar_leaf) 

2251 first_line.append(lpar_leaf) 

2252 

2253 # We throw inline comments that were originally to the right of the 

2254 # target string to the top line. They will now be shown to the right of 

2255 # the LPAR. 

2256 for leaf in leaves_to_steal_comments_from: 

2257 for comment_leaf in line.comments_after(leaf): 

2258 first_line.append(comment_leaf, preformatted=True) 

2259 

2260 yield Ok(first_line) 

2261 

2262 # --- Middle (String) Line 

2263 # We only need to yield one (possibly too long) string line, since the 

2264 # `StringSplitter` will break it down further if necessary. 

2265 string_value = LL[string_idx].value 

2266 string_line = Line( 

2267 mode=line.mode, 

2268 depth=line.depth + 1, 

2269 inside_brackets=True, 

2270 should_split_rhs=line.should_split_rhs, 

2271 magic_trailing_comma=line.magic_trailing_comma, 

2272 ) 

2273 string_leaf = Leaf(token.STRING, string_value) 

2274 insert_str_child(string_leaf) 

2275 string_line.append(string_leaf) 

2276 

2277 old_rpar_leaf = None 

2278 if is_valid_index(string_idx + 1): 

2279 right_leaves = LL[string_idx + 1 :] 

2280 if ends_with_comma: 

2281 right_leaves.pop() 

2282 

2283 if old_parens_exist: 

2284 assert right_leaves and right_leaves[-1].type == token.RPAR, ( 

2285 "Apparently, old parentheses do NOT exist?!" 

2286 f" (left_leaves={left_leaves}, right_leaves={right_leaves})" 

2287 ) 

2288 old_rpar_leaf = right_leaves.pop() 

2289 elif right_leaves and right_leaves[-1].type == token.RPAR: 

2290 # Special case for lambda expressions as dict's value, e.g.: 

2291 # my_dict = { 

2292 # "key": lambda x: f"formatted: {x}", 

2293 # } 

2294 # After wrapping the dict's value with parentheses, the string is 

2295 # followed by a RPAR but its opening bracket is lambda's, not 

2296 # the string's: 

2297 # "key": (lambda x: f"formatted: {x}"), 

2298 opening_bracket = right_leaves[-1].opening_bracket 

2299 if opening_bracket is not None and opening_bracket in left_leaves: 

2300 index = left_leaves.index(opening_bracket) 

2301 if ( 

2302 0 < index < len(left_leaves) - 1 

2303 and left_leaves[index - 1].type == token.COLON 

2304 and left_leaves[index + 1].value == "lambda" 

2305 ): 

2306 right_leaves.pop() 

2307 

2308 append_leaves(string_line, line, right_leaves) 

2309 

2310 yield Ok(string_line) 

2311 

2312 # --- Last Line 

2313 last_line = line.clone() 

2314 last_line.bracket_tracker = first_line.bracket_tracker 

2315 

2316 new_rpar_leaf = Leaf(token.RPAR, ")") 

2317 if old_rpar_leaf is not None: 

2318 replace_child(old_rpar_leaf, new_rpar_leaf) 

2319 else: 

2320 insert_str_child(new_rpar_leaf) 

2321 last_line.append(new_rpar_leaf) 

2322 

2323 # If the target string ended with a comma, we place this comma to the 

2324 # right of the RPAR on the last line. 

2325 if ends_with_comma: 

2326 comma_leaf = Leaf(token.COMMA, ",") 

2327 replace_child(LL[comma_idx], comma_leaf) 

2328 last_line.append(comma_leaf) 

2329 

2330 yield Ok(last_line) 

2331 

2332 

2333class StringParser: 

2334 """ 

2335 A state machine that aids in parsing a string's "trailer", which can be 

2336 either non-existent, an old-style formatting sequence (e.g. `% varX` or `% 

2337 (varX, varY)`), or a method-call / attribute access (e.g. `.format(varX, 

2338 varY)`). 

2339 

2340 NOTE: A new StringParser object MUST be instantiated for each string 

2341 trailer we need to parse. 

2342 

2343 Examples: 

2344 We shall assume that `line` equals the `Line` object that corresponds 

2345 to the following line of python code: 

2346 ``` 

2347 x = "Some {}.".format("String") + some_other_string 

2348 ``` 

2349 

2350 Furthermore, we will assume that `string_idx` is some index such that: 

2351 ``` 

2352 assert line.leaves[string_idx].value == "Some {}." 

2353 ``` 

2354 

2355 The following code snippet then holds: 

2356 ``` 

2357 string_parser = StringParser() 

2358 idx = string_parser.parse(line.leaves, string_idx) 

2359 assert line.leaves[idx].type == token.PLUS 

2360 ``` 

2361 """ 

2362 

2363 DEFAULT_TOKEN: Final = 20210605 

2364 

2365 # String Parser States 

2366 START: Final = 1 

2367 DOT: Final = 2 

2368 NAME: Final = 3 

2369 PERCENT: Final = 4 

2370 SINGLE_FMT_ARG: Final = 5 

2371 LPAR: Final = 6 

2372 RPAR: Final = 7 

2373 DONE: Final = 8 

2374 

2375 # Lookup Table for Next State 

2376 _goto: Final[dict[tuple[ParserState, NodeType], ParserState]] = { 

2377 # A string trailer may start with '.' OR '%'. 

2378 (START, token.DOT): DOT, 

2379 (START, token.PERCENT): PERCENT, 

2380 (START, DEFAULT_TOKEN): DONE, 

2381 # A '.' MUST be followed by an attribute or method name. 

2382 (DOT, token.NAME): NAME, 

2383 # A method name MUST be followed by an '(', whereas an attribute name 

2384 # is the last symbol in the string trailer. 

2385 (NAME, token.LPAR): LPAR, 

2386 (NAME, DEFAULT_TOKEN): DONE, 

2387 # A '%' symbol can be followed by an '(' or a single argument (e.g. a 

2388 # string or variable name). 

2389 (PERCENT, token.LPAR): LPAR, 

2390 (PERCENT, DEFAULT_TOKEN): SINGLE_FMT_ARG, 

2391 # If a '%' symbol is followed by a single argument, that argument is 

2392 # the last leaf in the string trailer. 

2393 (SINGLE_FMT_ARG, DEFAULT_TOKEN): DONE, 

2394 # If present, a ')' symbol is the last symbol in a string trailer. 

2395 # (NOTE: LPARS and nested RPARS are not included in this lookup table, 

2396 # since they are treated as a special case by the parsing logic in this 

2397 # classes' implementation.) 

2398 (RPAR, DEFAULT_TOKEN): DONE, 

2399 } 

2400 

2401 def __init__(self) -> None: 

2402 self._state = self.START 

2403 self._unmatched_lpars = 0 

2404 

2405 def parse(self, leaves: list[Leaf], string_idx: int) -> int: 

2406 """ 

2407 Pre-conditions: 

2408 * @leaves[@string_idx].type == token.STRING 

2409 

2410 Returns: 

2411 The index directly after the last leaf which is a part of the string 

2412 trailer, if a "trailer" exists. 

2413 OR 

2414 @string_idx + 1, if no string "trailer" exists. 

2415 """ 

2416 assert leaves[string_idx].type == token.STRING 

2417 

2418 idx = string_idx + 1 

2419 while idx < len(leaves) and self._next_state(leaves[idx]): 

2420 idx += 1 

2421 return idx 

2422 

2423 def _next_state(self, leaf: Leaf) -> bool: 

2424 """ 

2425 Pre-conditions: 

2426 * On the first call to this function, @leaf MUST be the leaf that 

2427 was directly after the string leaf in question (e.g. if our target 

2428 string is `line.leaves[i]` then the first call to this method must 

2429 be `line.leaves[i + 1]`). 

2430 * On the next call to this function, the leaf parameter passed in 

2431 MUST be the leaf directly following @leaf. 

2432 

2433 Returns: 

2434 True iff @leaf is a part of the string's trailer. 

2435 """ 

2436 # We ignore empty LPAR or RPAR leaves. 

2437 if is_empty_par(leaf): 

2438 return True 

2439 

2440 next_token = leaf.type 

2441 if next_token == token.LPAR: 

2442 self._unmatched_lpars += 1 

2443 

2444 current_state = self._state 

2445 

2446 # The LPAR parser state is a special case. We will return True until we 

2447 # find the matching RPAR token. 

2448 if current_state == self.LPAR: 

2449 if next_token == token.RPAR: 

2450 self._unmatched_lpars -= 1 

2451 if self._unmatched_lpars == 0: 

2452 self._state = self.RPAR 

2453 # Otherwise, we use a lookup table to determine the next state. 

2454 else: 

2455 # If the lookup table matches the current state to the next 

2456 # token, we use the lookup table. 

2457 if (current_state, next_token) in self._goto: 

2458 self._state = self._goto[current_state, next_token] 

2459 else: 

2460 # Otherwise, we check if a the current state was assigned a 

2461 # default. 

2462 if (current_state, self.DEFAULT_TOKEN) in self._goto: 

2463 self._state = self._goto[current_state, self.DEFAULT_TOKEN] 

2464 # If no default has been assigned, then this parser has a logic 

2465 # error. 

2466 else: 

2467 raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!") 

2468 

2469 if self._state == self.DONE: 

2470 return False 

2471 

2472 return True 

2473 

2474 

2475def insert_str_child_factory(string_leaf: Leaf) -> Callable[[LN], None]: 

2476 """ 

2477 Factory for a convenience function that is used to orphan @string_leaf 

2478 and then insert multiple new leaves into the same part of the node 

2479 structure that @string_leaf had originally occupied. 

2480 

2481 Examples: 

2482 Let `string_leaf = Leaf(token.STRING, '"foo"')` and `N = 

2483 string_leaf.parent`. Assume the node `N` has the following 

2484 original structure: 

2485 

2486 Node( 

2487 expr_stmt, [ 

2488 Leaf(NAME, 'x'), 

2489 Leaf(EQUAL, '='), 

2490 Leaf(STRING, '"foo"'), 

2491 ] 

2492 ) 

2493 

2494 We then run the code snippet shown below. 

2495 ``` 

2496 insert_str_child = insert_str_child_factory(string_leaf) 

2497 

2498 lpar = Leaf(token.LPAR, '(') 

2499 insert_str_child(lpar) 

2500 

2501 bar = Leaf(token.STRING, '"bar"') 

2502 insert_str_child(bar) 

2503 

2504 rpar = Leaf(token.RPAR, ')') 

2505 insert_str_child(rpar) 

2506 ``` 

2507 

2508 After which point, it follows that `string_leaf.parent is None` and 

2509 the node `N` now has the following structure: 

2510 

2511 Node( 

2512 expr_stmt, [ 

2513 Leaf(NAME, 'x'), 

2514 Leaf(EQUAL, '='), 

2515 Leaf(LPAR, '('), 

2516 Leaf(STRING, '"bar"'), 

2517 Leaf(RPAR, ')'), 

2518 ] 

2519 ) 

2520 """ 

2521 string_parent = string_leaf.parent 

2522 string_child_idx = string_leaf.remove() 

2523 

2524 def insert_str_child(child: LN) -> None: 

2525 nonlocal string_child_idx 

2526 

2527 assert string_parent is not None 

2528 assert string_child_idx is not None 

2529 

2530 string_parent.insert_child(string_child_idx, child) 

2531 string_child_idx += 1 

2532 

2533 return insert_str_child 

2534 

2535 

2536def is_valid_index_factory(seq: Sequence[Any]) -> Callable[[int], bool]: 

2537 """ 

2538 Examples: 

2539 ``` 

2540 my_list = [1, 2, 3] 

2541 

2542 is_valid_index = is_valid_index_factory(my_list) 

2543 

2544 assert is_valid_index(0) 

2545 assert is_valid_index(2) 

2546 

2547 assert not is_valid_index(3) 

2548 assert not is_valid_index(-1) 

2549 ``` 

2550 """ 

2551 

2552 def is_valid_index(idx: int) -> bool: 

2553 """ 

2554 Returns: 

2555 True iff @idx is positive AND seq[@idx] does NOT raise an 

2556 IndexError. 

2557 """ 

2558 return 0 <= idx < len(seq) 

2559 

2560 return is_valid_index