Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/black/trans.py: 13%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

923 statements  

1""" 

2String transformers that can split and merge strings. 

3""" 

4 

5import re 

6from abc import ABC, abstractmethod 

7from collections import defaultdict 

8from collections.abc import Callable, Collection, Iterable, Iterator, Sequence 

9from dataclasses import dataclass 

10from typing import Any, ClassVar, Final, Literal, Optional, TypeVar, Union 

11 

12from mypy_extensions import trait 

13 

14from black.comments import contains_pragma_comment 

15from black.lines import Line, append_leaves 

16from black.mode import Feature, Mode 

17from black.nodes import ( 

18 CLOSING_BRACKETS, 

19 OPENING_BRACKETS, 

20 STANDALONE_COMMENT, 

21 is_empty_lpar, 

22 is_empty_par, 

23 is_empty_rpar, 

24 is_part_of_annotation, 

25 parent_type, 

26 replace_child, 

27 syms, 

28) 

29from black.rusty import Err, Ok, Result 

30from black.strings import ( 

31 assert_is_leaf_string, 

32 count_chars_in_width, 

33 get_string_prefix, 

34 has_triple_quotes, 

35 normalize_string_quotes, 

36 str_width, 

37) 

38from blib2to3.pgen2 import token 

39from blib2to3.pytree import Leaf, Node 

40 

41 

42class CannotTransform(Exception): 

43 """Base class for errors raised by Transformers.""" 

44 

45 

46# types 

47T = TypeVar("T") 

48LN = Union[Leaf, Node] 

49Transformer = Callable[[Line, Collection[Feature], Mode], Iterator[Line]] 

50Index = int 

51NodeType = int 

52ParserState = int 

53StringID = int 

54TResult = Result[T, CannotTransform] # (T)ransform Result 

55TMatchResult = TResult[list[Index]] 

56 

57SPLIT_SAFE_CHARS = frozenset(["\u3001", "\u3002", "\uff0c"]) # East Asian stops 

58 

59 

60def TErr(err_msg: str) -> Err[CannotTransform]: 

61 """(T)ransform Err 

62 

63 Convenience function used when working with the TResult type. 

64 """ 

65 cant_transform = CannotTransform(err_msg) 

66 return Err(cant_transform) 

67 

68 

69def hug_power_op( 

70 line: Line, features: Collection[Feature], mode: Mode 

71) -> Iterator[Line]: 

72 """A transformer which normalizes spacing around power operators.""" 

73 

74 # Performance optimization to avoid unnecessary Leaf clones and other ops. 

75 for leaf in line.leaves: 

76 if leaf.type == token.DOUBLESTAR: 

77 break 

78 else: 

79 raise CannotTransform("No doublestar token was found in the line.") 

80 

81 def is_simple_lookup(index: int, kind: Literal[1, -1]) -> bool: 

82 # Brackets and parentheses indicate calls, subscripts, etc. ... 

83 # basically stuff that doesn't count as "simple". Only a NAME lookup 

84 # or dotted lookup (eg. NAME.NAME) is OK. 

85 if kind == -1: 

86 return handle_is_simple_look_up_prev(line, index, {token.RPAR, token.RSQB}) 

87 else: 

88 return handle_is_simple_lookup_forward( 

89 line, index, {token.LPAR, token.LSQB} 

90 ) 

91 

92 def is_simple_operand(index: int, kind: Literal[1, -1]) -> bool: 

93 # An operand is considered "simple" if's a NAME, a numeric CONSTANT, a simple 

94 # lookup (see above), with or without a preceding unary operator. 

95 start = line.leaves[index] 

96 if start.type in {token.NAME, token.NUMBER}: 

97 return is_simple_lookup(index, kind) 

98 

99 if start.type in {token.PLUS, token.MINUS, token.TILDE}: 

100 if line.leaves[index + 1].type in {token.NAME, token.NUMBER}: 

101 # kind is always one as bases with a preceding unary op will be checked 

102 # for simplicity starting from the next token (so it'll hit the check 

103 # above). 

104 return is_simple_lookup(index + 1, kind=1) 

105 

106 return False 

107 

108 new_line = line.clone() 

109 should_hug = False 

110 for idx, leaf in enumerate(line.leaves): 

111 new_leaf = leaf.clone() 

112 if should_hug: 

113 new_leaf.prefix = "" 

114 should_hug = False 

115 

116 should_hug = ( 

117 (0 < idx < len(line.leaves) - 1) 

118 and leaf.type == token.DOUBLESTAR 

119 and is_simple_operand(idx - 1, kind=-1) 

120 and line.leaves[idx - 1].value != "lambda" 

121 and is_simple_operand(idx + 1, kind=1) 

122 ) 

123 if should_hug: 

124 new_leaf.prefix = "" 

125 

126 # We have to be careful to make a new line properly: 

127 # - bracket related metadata must be maintained (handled by Line.append) 

128 # - comments need to copied over, updating the leaf IDs they're attached to 

129 new_line.append(new_leaf, preformatted=True) 

130 for comment_leaf in line.comments_after(leaf): 

131 new_line.append(comment_leaf, preformatted=True) 

132 

133 yield new_line 

134 

135 

136def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: set[int]) -> bool: 

137 """ 

138 Handling the determination of is_simple_lookup for the lines prior to the doublestar 

139 token. This is required because of the need to isolate the chained expression 

140 to determine the bracket or parenthesis belong to the single expression. 

141 """ 

142 contains_disallowed = False 

143 chain = [] 

144 

145 while 0 <= index < len(line.leaves): 

146 current = line.leaves[index] 

147 chain.append(current) 

148 if not contains_disallowed and current.type in disallowed: 

149 contains_disallowed = True 

150 if not is_expression_chained(chain): 

151 return not contains_disallowed 

152 

153 index -= 1 

154 

155 return True 

156 

157 

158def handle_is_simple_lookup_forward( 

159 line: Line, index: int, disallowed: set[int] 

160) -> bool: 

161 """ 

162 Handling decision is_simple_lookup for the lines behind the doublestar token. 

163 This function is simplified to keep consistent with the prior logic and the forward 

164 case are more straightforward and do not need to care about chained expressions. 

165 """ 

166 while 0 <= index < len(line.leaves): 

167 current = line.leaves[index] 

168 if current.type in disallowed: 

169 return False 

170 if current.type not in {token.NAME, token.DOT} or ( 

171 current.type == token.NAME and current.value == "for" 

172 ): 

173 # If the current token isn't disallowed, we'll assume this is simple as 

174 # only the disallowed tokens are semantically attached to this lookup 

175 # expression we're checking. Also, stop early if we hit the 'for' bit 

176 # of a comprehension. 

177 return True 

178 

179 index += 1 

180 

181 return True 

182 

183 

184def is_expression_chained(chained_leaves: list[Leaf]) -> bool: 

185 """ 

186 Function to determine if the variable is a chained call. 

187 (e.g., foo.lookup, foo().lookup, (foo.lookup())) will be recognized as chained call) 

188 """ 

189 if len(chained_leaves) < 2: 

190 return True 

191 

192 current_leaf = chained_leaves[-1] 

193 past_leaf = chained_leaves[-2] 

194 

195 if past_leaf.type == token.NAME: 

196 return current_leaf.type in {token.DOT} 

197 elif past_leaf.type in {token.RPAR, token.RSQB}: 

198 return current_leaf.type in {token.RSQB, token.RPAR} 

199 elif past_leaf.type in {token.LPAR, token.LSQB}: 

200 return current_leaf.type in {token.NAME, token.LPAR, token.LSQB} 

201 else: 

202 return False 

203 

204 

205class StringTransformer(ABC): 

206 """ 

207 An implementation of the Transformer protocol that relies on its 

208 subclasses overriding the template methods `do_match(...)` and 

209 `do_transform(...)`. 

210 

211 This Transformer works exclusively on strings (for example, by merging 

212 or splitting them). 

213 

214 The following sections can be found among the docstrings of each concrete 

215 StringTransformer subclass. 

216 

217 Requirements: 

218 Which requirements must be met of the given Line for this 

219 StringTransformer to be applied? 

220 

221 Transformations: 

222 If the given Line meets all of the above requirements, which string 

223 transformations can you expect to be applied to it by this 

224 StringTransformer? 

225 

226 Collaborations: 

227 What contractual agreements does this StringTransformer have with other 

228 StringTransfomers? Such collaborations should be eliminated/minimized 

229 as much as possible. 

230 """ 

231 

232 __name__: Final = "StringTransformer" 

233 

234 # Ideally this would be a dataclass, but unfortunately mypyc breaks when used with 

235 # `abc.ABC`. 

236 def __init__(self, line_length: int, normalize_strings: bool) -> None: 

237 self.line_length = line_length 

238 self.normalize_strings = normalize_strings 

239 

240 @abstractmethod 

241 def do_match(self, line: Line) -> TMatchResult: 

242 """ 

243 Returns: 

244 * Ok(string_indices) such that for each index, `line.leaves[index]` 

245 is our target string if a match was able to be made. For 

246 transformers that don't result in more lines (e.g. StringMerger, 

247 StringParenStripper), multiple matches and transforms are done at 

248 once to reduce the complexity. 

249 OR 

250 * Err(CannotTransform), if no match could be made. 

251 """ 

252 

253 @abstractmethod 

254 def do_transform( 

255 self, line: Line, string_indices: list[int] 

256 ) -> Iterator[TResult[Line]]: 

257 """ 

258 Yields: 

259 * Ok(new_line) where new_line is the new transformed line. 

260 OR 

261 * Err(CannotTransform) if the transformation failed for some reason. The 

262 `do_match(...)` template method should usually be used to reject 

263 the form of the given Line, but in some cases it is difficult to 

264 know whether or not a Line meets the StringTransformer's 

265 requirements until the transformation is already midway. 

266 

267 Side Effects: 

268 This method should NOT mutate @line directly, but it MAY mutate the 

269 Line's underlying Node structure. (WARNING: If the underlying Node 

270 structure IS altered, then this method should NOT be allowed to 

271 yield an CannotTransform after that point.) 

272 """ 

273 

274 def __call__( 

275 self, line: Line, _features: Collection[Feature], _mode: Mode 

276 ) -> Iterator[Line]: 

277 """ 

278 StringTransformer instances have a call signature that mirrors that of 

279 the Transformer type. 

280 

281 Raises: 

282 CannotTransform(...) if the concrete StringTransformer class is unable 

283 to transform @line. 

284 """ 

285 # Optimization to avoid calling `self.do_match(...)` when the line does 

286 # not contain any string. 

287 if not any(leaf.type == token.STRING for leaf in line.leaves): 

288 raise CannotTransform("There are no strings in this line.") 

289 

290 match_result = self.do_match(line) 

291 

292 if isinstance(match_result, Err): 

293 cant_transform = match_result.err() 

294 raise CannotTransform( 

295 f"The string transformer {self.__class__.__name__} does not recognize" 

296 " this line as one that it can transform." 

297 ) from cant_transform 

298 

299 string_indices = match_result.ok() 

300 

301 for line_result in self.do_transform(line, string_indices): 

302 if isinstance(line_result, Err): 

303 cant_transform = line_result.err() 

304 raise CannotTransform( 

305 "StringTransformer failed while attempting to transform string." 

306 ) from cant_transform 

307 line = line_result.ok() 

308 yield line 

309 

310 

311@dataclass 

312class CustomSplit: 

313 """A custom (i.e. manual) string split. 

314 

315 A single CustomSplit instance represents a single substring. 

316 

317 Examples: 

318 Consider the following string: 

319 ``` 

320 "Hi there friend." 

321 " This is a custom" 

322 f" string {split}." 

323 ``` 

324 

325 This string will correspond to the following three CustomSplit instances: 

326 ``` 

327 CustomSplit(False, 16) 

328 CustomSplit(False, 17) 

329 CustomSplit(True, 16) 

330 ``` 

331 """ 

332 

333 has_prefix: bool 

334 break_idx: int 

335 

336 

337@trait 

338class CustomSplitMapMixin: 

339 """ 

340 This mixin class is used to map merged strings to a sequence of 

341 CustomSplits, which will then be used to re-split the strings iff none of 

342 the resultant substrings go over the configured max line length. 

343 """ 

344 

345 _Key: ClassVar = tuple[StringID, str] 

346 _CUSTOM_SPLIT_MAP: ClassVar[dict[_Key, tuple[CustomSplit, ...]]] = defaultdict( 

347 tuple 

348 ) 

349 

350 @staticmethod 

351 def _get_key(string: str) -> "CustomSplitMapMixin._Key": 

352 """ 

353 Returns: 

354 A unique identifier that is used internally to map @string to a 

355 group of custom splits. 

356 """ 

357 return (id(string), string) 

358 

359 def add_custom_splits( 

360 self, string: str, custom_splits: Iterable[CustomSplit] 

361 ) -> None: 

362 """Custom Split Map Setter Method 

363 

364 Side Effects: 

365 Adds a mapping from @string to the custom splits @custom_splits. 

366 """ 

367 key = self._get_key(string) 

368 self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits) 

369 

370 def pop_custom_splits(self, string: str) -> list[CustomSplit]: 

371 """Custom Split Map Getter Method 

372 

373 Returns: 

374 * A list of the custom splits that are mapped to @string, if any 

375 exist. 

376 OR 

377 * [], otherwise. 

378 

379 Side Effects: 

380 Deletes the mapping between @string and its associated custom 

381 splits (which are returned to the caller). 

382 """ 

383 key = self._get_key(string) 

384 

385 custom_splits = self._CUSTOM_SPLIT_MAP[key] 

386 del self._CUSTOM_SPLIT_MAP[key] 

387 

388 return list(custom_splits) 

389 

390 def has_custom_splits(self, string: str) -> bool: 

391 """ 

392 Returns: 

393 True iff @string is associated with a set of custom splits. 

394 """ 

395 key = self._get_key(string) 

396 return key in self._CUSTOM_SPLIT_MAP 

397 

398 

399class StringMerger(StringTransformer, CustomSplitMapMixin): 

400 """StringTransformer that merges strings together. 

401 

402 Requirements: 

403 (A) The line contains adjacent strings such that ALL of the validation checks 

404 listed in StringMerger._validate_msg(...)'s docstring pass. 

405 OR 

406 (B) The line contains a string which uses line continuation backslashes. 

407 

408 Transformations: 

409 Depending on which of the two requirements above where met, either: 

410 

411 (A) The string group associated with the target string is merged. 

412 OR 

413 (B) All line-continuation backslashes are removed from the target string. 

414 

415 Collaborations: 

416 StringMerger provides custom split information to StringSplitter. 

417 """ 

418 

419 def do_match(self, line: Line) -> TMatchResult: 

420 LL = line.leaves 

421 

422 is_valid_index = is_valid_index_factory(LL) 

423 

424 string_indices = [] 

425 idx = 0 

426 while is_valid_index(idx): 

427 leaf = LL[idx] 

428 if ( 

429 leaf.type == token.STRING 

430 and is_valid_index(idx + 1) 

431 and LL[idx + 1].type == token.STRING 

432 ): 

433 # Let's check if the string group contains an inline comment 

434 # If we have a comment inline, we don't merge the strings 

435 contains_comment = False 

436 i = idx 

437 while is_valid_index(i): 

438 if LL[i].type != token.STRING: 

439 break 

440 if line.comments_after(LL[i]): 

441 contains_comment = True 

442 break 

443 i += 1 

444 

445 if not contains_comment and not is_part_of_annotation(leaf): 

446 string_indices.append(idx) 

447 

448 # Advance to the next non-STRING leaf. 

449 idx += 2 

450 while is_valid_index(idx) and LL[idx].type == token.STRING: 

451 idx += 1 

452 

453 elif leaf.type == token.STRING and "\\\n" in leaf.value: 

454 string_indices.append(idx) 

455 # Advance to the next non-STRING leaf. 

456 idx += 1 

457 while is_valid_index(idx) and LL[idx].type == token.STRING: 

458 idx += 1 

459 

460 else: 

461 idx += 1 

462 

463 if string_indices: 

464 return Ok(string_indices) 

465 else: 

466 return TErr("This line has no strings that need merging.") 

467 

468 def do_transform( 

469 self, line: Line, string_indices: list[int] 

470 ) -> Iterator[TResult[Line]]: 

471 new_line = line 

472 

473 rblc_result = self._remove_backslash_line_continuation_chars( 

474 new_line, string_indices 

475 ) 

476 if isinstance(rblc_result, Ok): 

477 new_line = rblc_result.ok() 

478 

479 msg_result = self._merge_string_group(new_line, string_indices) 

480 if isinstance(msg_result, Ok): 

481 new_line = msg_result.ok() 

482 

483 if isinstance(rblc_result, Err) and isinstance(msg_result, Err): 

484 msg_cant_transform = msg_result.err() 

485 rblc_cant_transform = rblc_result.err() 

486 cant_transform = CannotTransform( 

487 "StringMerger failed to merge any strings in this line." 

488 ) 

489 

490 # Chain the errors together using `__cause__`. 

491 msg_cant_transform.__cause__ = rblc_cant_transform 

492 cant_transform.__cause__ = msg_cant_transform 

493 

494 yield Err(cant_transform) 

495 else: 

496 yield Ok(new_line) 

497 

498 @staticmethod 

499 def _remove_backslash_line_continuation_chars( 

500 line: Line, string_indices: list[int] 

501 ) -> TResult[Line]: 

502 """ 

503 Merge strings that were split across multiple lines using 

504 line-continuation backslashes. 

505 

506 Returns: 

507 Ok(new_line), if @line contains backslash line-continuation 

508 characters. 

509 OR 

510 Err(CannotTransform), otherwise. 

511 """ 

512 LL = line.leaves 

513 

514 indices_to_transform = [] 

515 for string_idx in string_indices: 

516 string_leaf = LL[string_idx] 

517 if ( 

518 string_leaf.type == token.STRING 

519 and "\\\n" in string_leaf.value 

520 and not has_triple_quotes(string_leaf.value) 

521 ): 

522 indices_to_transform.append(string_idx) 

523 

524 if not indices_to_transform: 

525 return TErr( 

526 "Found no string leaves that contain backslash line continuation" 

527 " characters." 

528 ) 

529 

530 new_line = line.clone() 

531 new_line.comments = line.comments.copy() 

532 append_leaves(new_line, line, LL) 

533 

534 for string_idx in indices_to_transform: 

535 new_string_leaf = new_line.leaves[string_idx] 

536 new_string_leaf.value = new_string_leaf.value.replace("\\\n", "") 

537 

538 return Ok(new_line) 

539 

540 def _merge_string_group( 

541 self, line: Line, string_indices: list[int] 

542 ) -> TResult[Line]: 

543 """ 

544 Merges string groups (i.e. set of adjacent strings). 

545 

546 Each index from `string_indices` designates one string group's first 

547 leaf in `line.leaves`. 

548 

549 Returns: 

550 Ok(new_line), if ALL of the validation checks found in 

551 _validate_msg(...) pass. 

552 OR 

553 Err(CannotTransform), otherwise. 

554 """ 

555 LL = line.leaves 

556 

557 is_valid_index = is_valid_index_factory(LL) 

558 

559 # A dict of {string_idx: tuple[num_of_strings, string_leaf]}. 

560 merged_string_idx_dict: dict[int, tuple[int, Leaf]] = {} 

561 for string_idx in string_indices: 

562 vresult = self._validate_msg(line, string_idx) 

563 if isinstance(vresult, Err): 

564 continue 

565 merged_string_idx_dict[string_idx] = self._merge_one_string_group( 

566 LL, string_idx, is_valid_index 

567 ) 

568 

569 if not merged_string_idx_dict: 

570 return TErr("No string group is merged") 

571 

572 # Build the final line ('new_line') that this method will later return. 

573 new_line = line.clone() 

574 previous_merged_string_idx = -1 

575 previous_merged_num_of_strings = -1 

576 for i, leaf in enumerate(LL): 

577 if i in merged_string_idx_dict: 

578 previous_merged_string_idx = i 

579 previous_merged_num_of_strings, string_leaf = merged_string_idx_dict[i] 

580 new_line.append(string_leaf) 

581 

582 if ( 

583 previous_merged_string_idx 

584 <= i 

585 < previous_merged_string_idx + previous_merged_num_of_strings 

586 ): 

587 for comment_leaf in line.comments_after(LL[i]): 

588 new_line.append(comment_leaf, preformatted=True) 

589 continue 

590 

591 append_leaves(new_line, line, [leaf]) 

592 

593 return Ok(new_line) 

594 

595 def _merge_one_string_group( 

596 self, LL: list[Leaf], string_idx: int, is_valid_index: Callable[[int], bool] 

597 ) -> tuple[int, Leaf]: 

598 """ 

599 Merges one string group where the first string in the group is 

600 `LL[string_idx]`. 

601 

602 Returns: 

603 A tuple of `(num_of_strings, leaf)` where `num_of_strings` is the 

604 number of strings merged and `leaf` is the newly merged string 

605 to be replaced in the new line. 

606 """ 

607 # If the string group is wrapped inside an Atom node, we must make sure 

608 # to later replace that Atom with our new (merged) string leaf. 

609 atom_node = LL[string_idx].parent 

610 

611 # We will place BREAK_MARK in between every two substrings that we 

612 # merge. We will then later go through our final result and use the 

613 # various instances of BREAK_MARK we find to add the right values to 

614 # the custom split map. 

615 BREAK_MARK = "@@@@@ BLACK BREAKPOINT MARKER @@@@@" 

616 

617 QUOTE = LL[string_idx].value[-1] 

618 

619 def make_naked(string: str, string_prefix: str) -> str: 

620 """Strip @string (i.e. make it a "naked" string) 

621 

622 Pre-conditions: 

623 * assert_is_leaf_string(@string) 

624 

625 Returns: 

626 A string that is identical to @string except that 

627 @string_prefix has been stripped, the surrounding QUOTE 

628 characters have been removed, and any remaining QUOTE 

629 characters have been escaped. 

630 """ 

631 assert_is_leaf_string(string) 

632 if "f" in string_prefix: 

633 f_expressions = [ 

634 string[span[0] + 1 : span[1] - 1] # +-1 to get rid of curly braces 

635 for span in iter_fexpr_spans(string) 

636 ] 

637 debug_expressions_contain_visible_quotes = any( 

638 re.search(r".*[\'\"].*(?<![!:=])={1}(?!=)(?![^\s:])", expression) 

639 for expression in f_expressions 

640 ) 

641 if not debug_expressions_contain_visible_quotes: 

642 # We don't want to toggle visible quotes in debug f-strings, as 

643 # that would modify the AST 

644 string = _toggle_fexpr_quotes(string, QUOTE) 

645 # After quotes toggling, quotes in expressions won't be escaped 

646 # because quotes can't be reused in f-strings. So we can simply 

647 # let the escaping logic below run without knowing f-string 

648 # expressions. 

649 

650 RE_EVEN_BACKSLASHES = r"(?:(?<!\\)(?:\\\\)*)" 

651 naked_string = string[len(string_prefix) + 1 : -1] 

652 naked_string = re.sub( 

653 "(" + RE_EVEN_BACKSLASHES + ")" + QUOTE, r"\1\\" + QUOTE, naked_string 

654 ) 

655 return naked_string 

656 

657 # Holds the CustomSplit objects that will later be added to the custom 

658 # split map. 

659 custom_splits = [] 

660 

661 # Temporary storage for the 'has_prefix' part of the CustomSplit objects. 

662 prefix_tracker = [] 

663 

664 # Sets the 'prefix' variable. This is the prefix that the final merged 

665 # string will have. 

666 next_str_idx = string_idx 

667 prefix = "" 

668 while ( 

669 not prefix 

670 and is_valid_index(next_str_idx) 

671 and LL[next_str_idx].type == token.STRING 

672 ): 

673 prefix = get_string_prefix(LL[next_str_idx].value).lower() 

674 next_str_idx += 1 

675 

676 # The next loop merges the string group. The final string will be 

677 # contained in 'S'. 

678 # 

679 # The following convenience variables are used: 

680 # 

681 # S: string 

682 # NS: naked string 

683 # SS: next string 

684 # NSS: naked next string 

685 S = "" 

686 NS = "" 

687 num_of_strings = 0 

688 next_str_idx = string_idx 

689 while is_valid_index(next_str_idx) and LL[next_str_idx].type == token.STRING: 

690 num_of_strings += 1 

691 

692 SS = LL[next_str_idx].value 

693 next_prefix = get_string_prefix(SS).lower() 

694 

695 # If this is an f-string group but this substring is not prefixed 

696 # with 'f'... 

697 if "f" in prefix and "f" not in next_prefix: 

698 # Then we must escape any braces contained in this substring. 

699 SS = re.sub(r"(\{|\})", r"\1\1", SS) 

700 

701 NSS = make_naked(SS, next_prefix) 

702 

703 has_prefix = bool(next_prefix) 

704 prefix_tracker.append(has_prefix) 

705 

706 S = prefix + QUOTE + NS + NSS + BREAK_MARK + QUOTE 

707 NS = make_naked(S, prefix) 

708 

709 next_str_idx += 1 

710 

711 # Take a note on the index of the non-STRING leaf. 

712 non_string_idx = next_str_idx 

713 

714 S_leaf = Leaf(token.STRING, S) 

715 if self.normalize_strings: 

716 S_leaf.value = normalize_string_quotes(S_leaf.value) 

717 

718 # Fill the 'custom_splits' list with the appropriate CustomSplit objects. 

719 temp_string = S_leaf.value[len(prefix) + 1 : -1] 

720 for has_prefix in prefix_tracker: 

721 mark_idx = temp_string.find(BREAK_MARK) 

722 assert ( 

723 mark_idx >= 0 

724 ), "Logic error while filling the custom string breakpoint cache." 

725 

726 temp_string = temp_string[mark_idx + len(BREAK_MARK) :] 

727 breakpoint_idx = mark_idx + (len(prefix) if has_prefix else 0) + 1 

728 custom_splits.append(CustomSplit(has_prefix, breakpoint_idx)) 

729 

730 string_leaf = Leaf(token.STRING, S_leaf.value.replace(BREAK_MARK, "")) 

731 

732 if atom_node is not None: 

733 # If not all children of the atom node are merged (this can happen 

734 # when there is a standalone comment in the middle) ... 

735 if non_string_idx - string_idx < len(atom_node.children): 

736 # We need to replace the old STRING leaves with the new string leaf. 

737 first_child_idx = LL[string_idx].remove() 

738 for idx in range(string_idx + 1, non_string_idx): 

739 LL[idx].remove() 

740 if first_child_idx is not None: 

741 atom_node.insert_child(first_child_idx, string_leaf) 

742 else: 

743 # Else replace the atom node with the new string leaf. 

744 replace_child(atom_node, string_leaf) 

745 

746 self.add_custom_splits(string_leaf.value, custom_splits) 

747 return num_of_strings, string_leaf 

748 

749 @staticmethod 

750 def _validate_msg(line: Line, string_idx: int) -> TResult[None]: 

751 """Validate (M)erge (S)tring (G)roup 

752 

753 Transform-time string validation logic for _merge_string_group(...). 

754 

755 Returns: 

756 * Ok(None), if ALL validation checks (listed below) pass. 

757 OR 

758 * Err(CannotTransform), if any of the following are true: 

759 - The target string group does not contain ANY stand-alone comments. 

760 - The target string is not in a string group (i.e. it has no 

761 adjacent strings). 

762 - The string group has more than one inline comment. 

763 - The string group has an inline comment that appears to be a pragma. 

764 - The set of all string prefixes in the string group is of 

765 length greater than one and is not equal to {"", "f"}. 

766 - The string group consists of raw strings. 

767 - The string group would merge f-strings with different quote types 

768 and internal quotes. 

769 - The string group is stringified type annotations. We don't want to 

770 process stringified type annotations since pyright doesn't support 

771 them spanning multiple string values. (NOTE: mypy, pytype, pyre do 

772 support them, so we can change if pyright also gains support in the 

773 future. See https://github.com/microsoft/pyright/issues/4359.) 

774 """ 

775 # We first check for "inner" stand-alone comments (i.e. stand-alone 

776 # comments that have a string leaf before them AND after them). 

777 for inc in [1, -1]: 

778 i = string_idx 

779 found_sa_comment = False 

780 is_valid_index = is_valid_index_factory(line.leaves) 

781 while is_valid_index(i) and line.leaves[i].type in [ 

782 token.STRING, 

783 STANDALONE_COMMENT, 

784 ]: 

785 if line.leaves[i].type == STANDALONE_COMMENT: 

786 found_sa_comment = True 

787 elif found_sa_comment: 

788 return TErr( 

789 "StringMerger does NOT merge string groups which contain " 

790 "stand-alone comments." 

791 ) 

792 

793 i += inc 

794 

795 QUOTE = line.leaves[string_idx].value[-1] 

796 

797 num_of_inline_string_comments = 0 

798 set_of_prefixes = set() 

799 num_of_strings = 0 

800 for leaf in line.leaves[string_idx:]: 

801 if leaf.type != token.STRING: 

802 # If the string group is trailed by a comma, we count the 

803 # comments trailing the comma to be one of the string group's 

804 # comments. 

805 if leaf.type == token.COMMA and id(leaf) in line.comments: 

806 num_of_inline_string_comments += 1 

807 break 

808 

809 if has_triple_quotes(leaf.value): 

810 return TErr("StringMerger does NOT merge multiline strings.") 

811 

812 num_of_strings += 1 

813 prefix = get_string_prefix(leaf.value).lower() 

814 if "r" in prefix: 

815 return TErr("StringMerger does NOT merge raw strings.") 

816 

817 set_of_prefixes.add(prefix) 

818 

819 if ( 

820 "f" in prefix 

821 and leaf.value[-1] != QUOTE 

822 and ( 

823 "'" in leaf.value[len(prefix) + 1 : -1] 

824 or '"' in leaf.value[len(prefix) + 1 : -1] 

825 ) 

826 ): 

827 return TErr( 

828 "StringMerger does NOT merge f-strings with different quote types" 

829 " and internal quotes." 

830 ) 

831 

832 if id(leaf) in line.comments: 

833 num_of_inline_string_comments += 1 

834 if contains_pragma_comment(line.comments[id(leaf)]): 

835 return TErr("Cannot merge strings which have pragma comments.") 

836 

837 if num_of_strings < 2: 

838 return TErr( 

839 f"Not enough strings to merge (num_of_strings={num_of_strings})." 

840 ) 

841 

842 if num_of_inline_string_comments > 1: 

843 return TErr( 

844 f"Too many inline string comments ({num_of_inline_string_comments})." 

845 ) 

846 

847 if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}: 

848 return TErr(f"Too many different prefixes ({set_of_prefixes}).") 

849 

850 return Ok(None) 

851 

852 

853class StringParenStripper(StringTransformer): 

854 """StringTransformer that strips surrounding parentheses from strings. 

855 

856 Requirements: 

857 The line contains a string which is surrounded by parentheses and: 

858 - The target string is NOT the only argument to a function call. 

859 - The target string is NOT a "pointless" string. 

860 - The target string is NOT a dictionary value. 

861 - If the target string contains a PERCENT, the brackets are not 

862 preceded or followed by an operator with higher precedence than 

863 PERCENT. 

864 

865 Transformations: 

866 The parentheses mentioned in the 'Requirements' section are stripped. 

867 

868 Collaborations: 

869 StringParenStripper has its own inherent usefulness, but it is also 

870 relied on to clean up the parentheses created by StringParenWrapper (in 

871 the event that they are no longer needed). 

872 """ 

873 

874 def do_match(self, line: Line) -> TMatchResult: 

875 LL = line.leaves 

876 

877 is_valid_index = is_valid_index_factory(LL) 

878 

879 string_indices = [] 

880 

881 idx = -1 

882 while True: 

883 idx += 1 

884 if idx >= len(LL): 

885 break 

886 leaf = LL[idx] 

887 

888 # Should be a string... 

889 if leaf.type != token.STRING: 

890 continue 

891 

892 # If this is a "pointless" string... 

893 if ( 

894 leaf.parent 

895 and leaf.parent.parent 

896 and leaf.parent.parent.type == syms.simple_stmt 

897 ): 

898 continue 

899 

900 # Should be preceded by a non-empty LPAR... 

901 if ( 

902 not is_valid_index(idx - 1) 

903 or LL[idx - 1].type != token.LPAR 

904 or is_empty_lpar(LL[idx - 1]) 

905 ): 

906 continue 

907 

908 # That LPAR should NOT be preceded by a colon (which could be a 

909 # dictionary value), function name, or a closing bracket (which 

910 # could be a function returning a function or a list/dictionary 

911 # containing a function)... 

912 if is_valid_index(idx - 2) and ( 

913 LL[idx - 2].type == token.COLON 

914 or LL[idx - 2].type == token.NAME 

915 or LL[idx - 2].type in CLOSING_BRACKETS 

916 ): 

917 continue 

918 

919 string_idx = idx 

920 

921 # Skip the string trailer, if one exists. 

922 string_parser = StringParser() 

923 next_idx = string_parser.parse(LL, string_idx) 

924 

925 # if the leaves in the parsed string include a PERCENT, we need to 

926 # make sure the initial LPAR is NOT preceded by an operator with 

927 # higher or equal precedence to PERCENT 

928 if is_valid_index(idx - 2): 

929 # mypy can't quite follow unless we name this 

930 before_lpar = LL[idx - 2] 

931 if token.PERCENT in {leaf.type for leaf in LL[idx - 1 : next_idx]} and ( 

932 ( 

933 before_lpar.type 

934 in { 

935 token.STAR, 

936 token.AT, 

937 token.SLASH, 

938 token.DOUBLESLASH, 

939 token.PERCENT, 

940 token.TILDE, 

941 token.DOUBLESTAR, 

942 token.AWAIT, 

943 token.LSQB, 

944 token.LPAR, 

945 } 

946 ) 

947 or ( 

948 # only unary PLUS/MINUS 

949 before_lpar.parent 

950 and before_lpar.parent.type == syms.factor 

951 and (before_lpar.type in {token.PLUS, token.MINUS}) 

952 ) 

953 ): 

954 continue 

955 

956 # Should be followed by a non-empty RPAR... 

957 if ( 

958 is_valid_index(next_idx) 

959 and LL[next_idx].type == token.RPAR 

960 and not is_empty_rpar(LL[next_idx]) 

961 ): 

962 # That RPAR should NOT be followed by anything with higher 

963 # precedence than PERCENT 

964 if is_valid_index(next_idx + 1) and LL[next_idx + 1].type in { 

965 token.DOUBLESTAR, 

966 token.LSQB, 

967 token.LPAR, 

968 token.DOT, 

969 }: 

970 continue 

971 

972 string_indices.append(string_idx) 

973 idx = string_idx 

974 while idx < len(LL) - 1 and LL[idx + 1].type == token.STRING: 

975 idx += 1 

976 

977 if string_indices: 

978 return Ok(string_indices) 

979 return TErr("This line has no strings wrapped in parens.") 

980 

981 def do_transform( 

982 self, line: Line, string_indices: list[int] 

983 ) -> Iterator[TResult[Line]]: 

984 LL = line.leaves 

985 

986 string_and_rpar_indices: list[int] = [] 

987 for string_idx in string_indices: 

988 string_parser = StringParser() 

989 rpar_idx = string_parser.parse(LL, string_idx) 

990 

991 should_transform = True 

992 for leaf in (LL[string_idx - 1], LL[rpar_idx]): 

993 if line.comments_after(leaf): 

994 # Should not strip parentheses which have comments attached 

995 # to them. 

996 should_transform = False 

997 break 

998 if should_transform: 

999 string_and_rpar_indices.extend((string_idx, rpar_idx)) 

1000 

1001 if string_and_rpar_indices: 

1002 yield Ok(self._transform_to_new_line(line, string_and_rpar_indices)) 

1003 else: 

1004 yield Err( 

1005 CannotTransform("All string groups have comments attached to them.") 

1006 ) 

1007 

1008 def _transform_to_new_line( 

1009 self, line: Line, string_and_rpar_indices: list[int] 

1010 ) -> Line: 

1011 LL = line.leaves 

1012 

1013 new_line = line.clone() 

1014 new_line.comments = line.comments.copy() 

1015 

1016 previous_idx = -1 

1017 # We need to sort the indices, since string_idx and its matching 

1018 # rpar_idx may not come in order, e.g. in 

1019 # `("outer" % ("inner".join(items)))`, the "inner" string's 

1020 # string_idx is smaller than "outer" string's rpar_idx. 

1021 for idx in sorted(string_and_rpar_indices): 

1022 leaf = LL[idx] 

1023 lpar_or_rpar_idx = idx - 1 if leaf.type == token.STRING else idx 

1024 append_leaves(new_line, line, LL[previous_idx + 1 : lpar_or_rpar_idx]) 

1025 if leaf.type == token.STRING: 

1026 string_leaf = Leaf(token.STRING, LL[idx].value) 

1027 LL[lpar_or_rpar_idx].remove() # Remove lpar. 

1028 replace_child(LL[idx], string_leaf) 

1029 new_line.append(string_leaf) 

1030 # replace comments 

1031 old_comments = new_line.comments.pop(id(LL[idx]), []) 

1032 new_line.comments.setdefault(id(string_leaf), []).extend(old_comments) 

1033 else: 

1034 LL[lpar_or_rpar_idx].remove() # This is a rpar. 

1035 

1036 previous_idx = idx 

1037 

1038 # Append the leaves after the last idx: 

1039 append_leaves(new_line, line, LL[idx + 1 :]) 

1040 

1041 return new_line 

1042 

1043 

1044class BaseStringSplitter(StringTransformer): 

1045 """ 

1046 Abstract class for StringTransformers which transform a Line's strings by splitting 

1047 them or placing them on their own lines where necessary to avoid going over 

1048 the configured line length. 

1049 

1050 Requirements: 

1051 * The target string value is responsible for the line going over the 

1052 line length limit. It follows that after all of black's other line 

1053 split methods have been exhausted, this line (or one of the resulting 

1054 lines after all line splits are performed) would still be over the 

1055 line_length limit unless we split this string. 

1056 AND 

1057 

1058 * The target string is NOT a "pointless" string (i.e. a string that has 

1059 no parent or siblings). 

1060 AND 

1061 

1062 * The target string is not followed by an inline comment that appears 

1063 to be a pragma. 

1064 AND 

1065 

1066 * The target string is not a multiline (i.e. triple-quote) string. 

1067 """ 

1068 

1069 STRING_OPERATORS: Final = [ 

1070 token.EQEQUAL, 

1071 token.GREATER, 

1072 token.GREATEREQUAL, 

1073 token.LESS, 

1074 token.LESSEQUAL, 

1075 token.NOTEQUAL, 

1076 token.PERCENT, 

1077 token.PLUS, 

1078 token.STAR, 

1079 ] 

1080 

1081 @abstractmethod 

1082 def do_splitter_match(self, line: Line) -> TMatchResult: 

1083 """ 

1084 BaseStringSplitter asks its clients to override this method instead of 

1085 `StringTransformer.do_match(...)`. 

1086 

1087 Follows the same protocol as `StringTransformer.do_match(...)`. 

1088 

1089 Refer to `help(StringTransformer.do_match)` for more information. 

1090 """ 

1091 

1092 def do_match(self, line: Line) -> TMatchResult: 

1093 match_result = self.do_splitter_match(line) 

1094 if isinstance(match_result, Err): 

1095 return match_result 

1096 

1097 string_indices = match_result.ok() 

1098 assert len(string_indices) == 1, ( 

1099 f"{self.__class__.__name__} should only find one match at a time, found" 

1100 f" {len(string_indices)}" 

1101 ) 

1102 string_idx = string_indices[0] 

1103 vresult = self._validate(line, string_idx) 

1104 if isinstance(vresult, Err): 

1105 return vresult 

1106 

1107 return match_result 

1108 

1109 def _validate(self, line: Line, string_idx: int) -> TResult[None]: 

1110 """ 

1111 Checks that @line meets all of the requirements listed in this classes' 

1112 docstring. Refer to `help(BaseStringSplitter)` for a detailed 

1113 description of those requirements. 

1114 

1115 Returns: 

1116 * Ok(None), if ALL of the requirements are met. 

1117 OR 

1118 * Err(CannotTransform), if ANY of the requirements are NOT met. 

1119 """ 

1120 LL = line.leaves 

1121 

1122 string_leaf = LL[string_idx] 

1123 

1124 max_string_length = self._get_max_string_length(line, string_idx) 

1125 if len(string_leaf.value) <= max_string_length: 

1126 return TErr( 

1127 "The string itself is not what is causing this line to be too long." 

1128 ) 

1129 

1130 if not string_leaf.parent or [L.type for L in string_leaf.parent.children] == [ 

1131 token.STRING, 

1132 token.NEWLINE, 

1133 ]: 

1134 return TErr( 

1135 f"This string ({string_leaf.value}) appears to be pointless (i.e. has" 

1136 " no parent)." 

1137 ) 

1138 

1139 if id(line.leaves[string_idx]) in line.comments and contains_pragma_comment( 

1140 line.comments[id(line.leaves[string_idx])] 

1141 ): 

1142 return TErr( 

1143 "Line appears to end with an inline pragma comment. Splitting the line" 

1144 " could modify the pragma's behavior." 

1145 ) 

1146 

1147 if has_triple_quotes(string_leaf.value): 

1148 return TErr("We cannot split multiline strings.") 

1149 

1150 return Ok(None) 

1151 

1152 def _get_max_string_length(self, line: Line, string_idx: int) -> int: 

1153 """ 

1154 Calculates the max string length used when attempting to determine 

1155 whether or not the target string is responsible for causing the line to 

1156 go over the line length limit. 

1157 

1158 WARNING: This method is tightly coupled to both StringSplitter and 

1159 (especially) StringParenWrapper. There is probably a better way to 

1160 accomplish what is being done here. 

1161 

1162 Returns: 

1163 max_string_length: such that `line.leaves[string_idx].value > 

1164 max_string_length` implies that the target string IS responsible 

1165 for causing this line to exceed the line length limit. 

1166 """ 

1167 LL = line.leaves 

1168 

1169 is_valid_index = is_valid_index_factory(LL) 

1170 

1171 # We use the shorthand "WMA4" in comments to abbreviate "We must 

1172 # account for". When giving examples, we use STRING to mean some/any 

1173 # valid string. 

1174 # 

1175 # Finally, we use the following convenience variables: 

1176 # 

1177 # P: The leaf that is before the target string leaf. 

1178 # N: The leaf that is after the target string leaf. 

1179 # NN: The leaf that is after N. 

1180 

1181 # WMA4 the whitespace at the beginning of the line. 

1182 offset = line.depth * 4 

1183 

1184 if is_valid_index(string_idx - 1): 

1185 p_idx = string_idx - 1 

1186 if ( 

1187 LL[string_idx - 1].type == token.LPAR 

1188 and LL[string_idx - 1].value == "" 

1189 and string_idx >= 2 

1190 ): 

1191 # If the previous leaf is an empty LPAR placeholder, we should skip it. 

1192 p_idx -= 1 

1193 

1194 P = LL[p_idx] 

1195 if P.type in self.STRING_OPERATORS: 

1196 # WMA4 a space and a string operator (e.g. `+ STRING` or `== STRING`). 

1197 offset += len(str(P)) + 1 

1198 

1199 if P.type == token.COMMA: 

1200 # WMA4 a space, a comma, and a closing bracket [e.g. `), STRING`]. 

1201 offset += 3 

1202 

1203 if P.type in [token.COLON, token.EQUAL, token.PLUSEQUAL, token.NAME]: 

1204 # This conditional branch is meant to handle dictionary keys, 

1205 # variable assignments, 'return STRING' statement lines, and 

1206 # 'else STRING' ternary expression lines. 

1207 

1208 # WMA4 a single space. 

1209 offset += 1 

1210 

1211 # WMA4 the lengths of any leaves that came before that space, 

1212 # but after any closing bracket before that space. 

1213 for leaf in reversed(LL[: p_idx + 1]): 

1214 offset += len(str(leaf)) 

1215 if leaf.type in CLOSING_BRACKETS: 

1216 break 

1217 

1218 if is_valid_index(string_idx + 1): 

1219 N = LL[string_idx + 1] 

1220 if N.type == token.RPAR and N.value == "" and len(LL) > string_idx + 2: 

1221 # If the next leaf is an empty RPAR placeholder, we should skip it. 

1222 N = LL[string_idx + 2] 

1223 

1224 if N.type == token.COMMA: 

1225 # WMA4 a single comma at the end of the string (e.g `STRING,`). 

1226 offset += 1 

1227 

1228 if is_valid_index(string_idx + 2): 

1229 NN = LL[string_idx + 2] 

1230 

1231 if N.type == token.DOT and NN.type == token.NAME: 

1232 # This conditional branch is meant to handle method calls invoked 

1233 # off of a string literal up to and including the LPAR character. 

1234 

1235 # WMA4 the '.' character. 

1236 offset += 1 

1237 

1238 if ( 

1239 is_valid_index(string_idx + 3) 

1240 and LL[string_idx + 3].type == token.LPAR 

1241 ): 

1242 # WMA4 the left parenthesis character. 

1243 offset += 1 

1244 

1245 # WMA4 the length of the method's name. 

1246 offset += len(NN.value) 

1247 

1248 has_comments = False 

1249 for comment_leaf in line.comments_after(LL[string_idx]): 

1250 if not has_comments: 

1251 has_comments = True 

1252 # WMA4 two spaces before the '#' character. 

1253 offset += 2 

1254 

1255 # WMA4 the length of the inline comment. 

1256 offset += len(comment_leaf.value) 

1257 

1258 max_string_length = count_chars_in_width(str(line), self.line_length - offset) 

1259 return max_string_length 

1260 

1261 @staticmethod 

1262 def _prefer_paren_wrap_match(LL: list[Leaf]) -> Optional[int]: 

1263 """ 

1264 Returns: 

1265 string_idx such that @LL[string_idx] is equal to our target (i.e. 

1266 matched) string, if this line matches the "prefer paren wrap" statement 

1267 requirements listed in the 'Requirements' section of the StringParenWrapper 

1268 class's docstring. 

1269 OR 

1270 None, otherwise. 

1271 """ 

1272 # The line must start with a string. 

1273 if LL[0].type != token.STRING: 

1274 return None 

1275 

1276 matching_nodes = [ 

1277 syms.listmaker, 

1278 syms.dictsetmaker, 

1279 syms.testlist_gexp, 

1280 ] 

1281 # If the string is an immediate child of a list/set/tuple literal... 

1282 if ( 

1283 parent_type(LL[0]) in matching_nodes 

1284 or parent_type(LL[0].parent) in matching_nodes 

1285 ): 

1286 # And the string is surrounded by commas (or is the first/last child)... 

1287 prev_sibling = LL[0].prev_sibling 

1288 next_sibling = LL[0].next_sibling 

1289 if ( 

1290 not prev_sibling 

1291 and not next_sibling 

1292 and parent_type(LL[0]) == syms.atom 

1293 ): 

1294 # If it's an atom string, we need to check the parent atom's siblings. 

1295 parent = LL[0].parent 

1296 assert parent is not None # For type checkers. 

1297 prev_sibling = parent.prev_sibling 

1298 next_sibling = parent.next_sibling 

1299 if (not prev_sibling or prev_sibling.type == token.COMMA) and ( 

1300 not next_sibling or next_sibling.type == token.COMMA 

1301 ): 

1302 return 0 

1303 

1304 return None 

1305 

1306 

1307def iter_fexpr_spans(s: str) -> Iterator[tuple[int, int]]: 

1308 """ 

1309 Yields spans corresponding to expressions in a given f-string. 

1310 Spans are half-open ranges (left inclusive, right exclusive). 

1311 Assumes the input string is a valid f-string, but will not crash if the input 

1312 string is invalid. 

1313 """ 

1314 stack: list[int] = [] # our curly paren stack 

1315 i = 0 

1316 while i < len(s): 

1317 if s[i] == "{": 

1318 # if we're in a string part of the f-string, ignore escaped curly braces 

1319 if not stack and i + 1 < len(s) and s[i + 1] == "{": 

1320 i += 2 

1321 continue 

1322 stack.append(i) 

1323 i += 1 

1324 continue 

1325 

1326 if s[i] == "}": 

1327 if not stack: 

1328 i += 1 

1329 continue 

1330 j = stack.pop() 

1331 # we've made it back out of the expression! yield the span 

1332 if not stack: 

1333 yield (j, i + 1) 

1334 i += 1 

1335 continue 

1336 

1337 # if we're in an expression part of the f-string, fast-forward through strings 

1338 # note that backslashes are not legal in the expression portion of f-strings 

1339 if stack: 

1340 delim = None 

1341 if s[i : i + 3] in ("'''", '"""'): 

1342 delim = s[i : i + 3] 

1343 elif s[i] in ("'", '"'): 

1344 delim = s[i] 

1345 if delim: 

1346 i += len(delim) 

1347 while i < len(s) and s[i : i + len(delim)] != delim: 

1348 i += 1 

1349 i += len(delim) 

1350 continue 

1351 i += 1 

1352 

1353 

1354def fstring_contains_expr(s: str) -> bool: 

1355 return any(iter_fexpr_spans(s)) 

1356 

1357 

1358def _toggle_fexpr_quotes(fstring: str, old_quote: str) -> str: 

1359 """ 

1360 Toggles quotes used in f-string expressions that are `old_quote`. 

1361 

1362 f-string expressions can't contain backslashes, so we need to toggle the 

1363 quotes if the f-string itself will end up using the same quote. We can 

1364 simply toggle without escaping because, quotes can't be reused in f-string 

1365 expressions. They will fail to parse. 

1366 

1367 NOTE: If PEP 701 is accepted, above statement will no longer be true. 

1368 Though if quotes can be reused, we can simply reuse them without updates or 

1369 escaping, once Black figures out how to parse the new grammar. 

1370 """ 

1371 new_quote = "'" if old_quote == '"' else '"' 

1372 parts = [] 

1373 previous_index = 0 

1374 for start, end in iter_fexpr_spans(fstring): 

1375 parts.append(fstring[previous_index:start]) 

1376 parts.append(fstring[start:end].replace(old_quote, new_quote)) 

1377 previous_index = end 

1378 parts.append(fstring[previous_index:]) 

1379 return "".join(parts) 

1380 

1381 

1382class StringSplitter(BaseStringSplitter, CustomSplitMapMixin): 

1383 """ 

1384 StringTransformer that splits "atom" strings (i.e. strings which exist on 

1385 lines by themselves). 

1386 

1387 Requirements: 

1388 * The line consists ONLY of a single string (possibly prefixed by a 

1389 string operator [e.g. '+' or '==']), MAYBE a string trailer, and MAYBE 

1390 a trailing comma. 

1391 AND 

1392 * All of the requirements listed in BaseStringSplitter's docstring. 

1393 

1394 Transformations: 

1395 The string mentioned in the 'Requirements' section is split into as 

1396 many substrings as necessary to adhere to the configured line length. 

1397 

1398 In the final set of substrings, no substring should be smaller than 

1399 MIN_SUBSTR_SIZE characters. 

1400 

1401 The string will ONLY be split on spaces (i.e. each new substring should 

1402 start with a space). Note that the string will NOT be split on a space 

1403 which is escaped with a backslash. 

1404 

1405 If the string is an f-string, it will NOT be split in the middle of an 

1406 f-expression (e.g. in f"FooBar: {foo() if x else bar()}", {foo() if x 

1407 else bar()} is an f-expression). 

1408 

1409 If the string that is being split has an associated set of custom split 

1410 records and those custom splits will NOT result in any line going over 

1411 the configured line length, those custom splits are used. Otherwise the 

1412 string is split as late as possible (from left-to-right) while still 

1413 adhering to the transformation rules listed above. 

1414 

1415 Collaborations: 

1416 StringSplitter relies on StringMerger to construct the appropriate 

1417 CustomSplit objects and add them to the custom split map. 

1418 """ 

1419 

1420 MIN_SUBSTR_SIZE: Final = 6 

1421 

1422 def do_splitter_match(self, line: Line) -> TMatchResult: 

1423 LL = line.leaves 

1424 

1425 if self._prefer_paren_wrap_match(LL) is not None: 

1426 return TErr("Line needs to be wrapped in parens first.") 

1427 

1428 is_valid_index = is_valid_index_factory(LL) 

1429 

1430 idx = 0 

1431 

1432 # The first two leaves MAY be the 'not in' keywords... 

1433 if ( 

1434 is_valid_index(idx) 

1435 and is_valid_index(idx + 1) 

1436 and [LL[idx].type, LL[idx + 1].type] == [token.NAME, token.NAME] 

1437 and str(LL[idx]) + str(LL[idx + 1]) == "not in" 

1438 ): 

1439 idx += 2 

1440 # Else the first leaf MAY be a string operator symbol or the 'in' keyword... 

1441 elif is_valid_index(idx) and ( 

1442 LL[idx].type in self.STRING_OPERATORS 

1443 or LL[idx].type == token.NAME 

1444 and str(LL[idx]) == "in" 

1445 ): 

1446 idx += 1 

1447 

1448 # The next/first leaf MAY be an empty LPAR... 

1449 if is_valid_index(idx) and is_empty_lpar(LL[idx]): 

1450 idx += 1 

1451 

1452 # The next/first leaf MUST be a string... 

1453 if not is_valid_index(idx) or LL[idx].type != token.STRING: 

1454 return TErr("Line does not start with a string.") 

1455 

1456 string_idx = idx 

1457 

1458 # Skip the string trailer, if one exists. 

1459 string_parser = StringParser() 

1460 idx = string_parser.parse(LL, string_idx) 

1461 

1462 # That string MAY be followed by an empty RPAR... 

1463 if is_valid_index(idx) and is_empty_rpar(LL[idx]): 

1464 idx += 1 

1465 

1466 # That string / empty RPAR leaf MAY be followed by a comma... 

1467 if is_valid_index(idx) and LL[idx].type == token.COMMA: 

1468 idx += 1 

1469 

1470 # But no more leaves are allowed... 

1471 if is_valid_index(idx): 

1472 return TErr("This line does not end with a string.") 

1473 

1474 return Ok([string_idx]) 

1475 

1476 def do_transform( 

1477 self, line: Line, string_indices: list[int] 

1478 ) -> Iterator[TResult[Line]]: 

1479 LL = line.leaves 

1480 assert len(string_indices) == 1, ( 

1481 f"{self.__class__.__name__} should only find one match at a time, found" 

1482 f" {len(string_indices)}" 

1483 ) 

1484 string_idx = string_indices[0] 

1485 

1486 QUOTE = LL[string_idx].value[-1] 

1487 

1488 is_valid_index = is_valid_index_factory(LL) 

1489 insert_str_child = insert_str_child_factory(LL[string_idx]) 

1490 

1491 prefix = get_string_prefix(LL[string_idx].value).lower() 

1492 

1493 # We MAY choose to drop the 'f' prefix from substrings that don't 

1494 # contain any f-expressions, but ONLY if the original f-string 

1495 # contains at least one f-expression. Otherwise, we will alter the AST 

1496 # of the program. 

1497 drop_pointless_f_prefix = ("f" in prefix) and fstring_contains_expr( 

1498 LL[string_idx].value 

1499 ) 

1500 

1501 first_string_line = True 

1502 

1503 string_op_leaves = self._get_string_operator_leaves(LL) 

1504 string_op_leaves_length = ( 

1505 sum(len(str(prefix_leaf)) for prefix_leaf in string_op_leaves) + 1 

1506 if string_op_leaves 

1507 else 0 

1508 ) 

1509 

1510 def maybe_append_string_operators(new_line: Line) -> None: 

1511 """ 

1512 Side Effects: 

1513 If @line starts with a string operator and this is the first 

1514 line we are constructing, this function appends the string 

1515 operator to @new_line and replaces the old string operator leaf 

1516 in the node structure. Otherwise this function does nothing. 

1517 """ 

1518 maybe_prefix_leaves = string_op_leaves if first_string_line else [] 

1519 for i, prefix_leaf in enumerate(maybe_prefix_leaves): 

1520 replace_child(LL[i], prefix_leaf) 

1521 new_line.append(prefix_leaf) 

1522 

1523 ends_with_comma = ( 

1524 is_valid_index(string_idx + 1) and LL[string_idx + 1].type == token.COMMA 

1525 ) 

1526 

1527 def max_last_string_column() -> int: 

1528 """ 

1529 Returns: 

1530 The max allowed width of the string value used for the last 

1531 line we will construct. Note that this value means the width 

1532 rather than the number of characters (e.g., many East Asian 

1533 characters expand to two columns). 

1534 """ 

1535 result = self.line_length 

1536 result -= line.depth * 4 

1537 result -= 1 if ends_with_comma else 0 

1538 result -= string_op_leaves_length 

1539 return result 

1540 

1541 # --- Calculate Max Break Width (for string value) 

1542 # We start with the line length limit 

1543 max_break_width = self.line_length 

1544 # The last index of a string of length N is N-1. 

1545 max_break_width -= 1 

1546 # Leading whitespace is not present in the string value (e.g. Leaf.value). 

1547 max_break_width -= line.depth * 4 

1548 if max_break_width < 0: 

1549 yield TErr( 

1550 f"Unable to split {LL[string_idx].value} at such high of a line depth:" 

1551 f" {line.depth}" 

1552 ) 

1553 return 

1554 

1555 # Check if StringMerger registered any custom splits. 

1556 custom_splits = self.pop_custom_splits(LL[string_idx].value) 

1557 # We use them ONLY if none of them would produce lines that exceed the 

1558 # line limit. 

1559 use_custom_breakpoints = bool( 

1560 custom_splits 

1561 and all(csplit.break_idx <= max_break_width for csplit in custom_splits) 

1562 ) 

1563 

1564 # Temporary storage for the remaining chunk of the string line that 

1565 # can't fit onto the line currently being constructed. 

1566 rest_value = LL[string_idx].value 

1567 

1568 def more_splits_should_be_made() -> bool: 

1569 """ 

1570 Returns: 

1571 True iff `rest_value` (the remaining string value from the last 

1572 split), should be split again. 

1573 """ 

1574 if use_custom_breakpoints: 

1575 return len(custom_splits) > 1 

1576 else: 

1577 return str_width(rest_value) > max_last_string_column() 

1578 

1579 string_line_results: list[Ok[Line]] = [] 

1580 while more_splits_should_be_made(): 

1581 if use_custom_breakpoints: 

1582 # Custom User Split (manual) 

1583 csplit = custom_splits.pop(0) 

1584 break_idx = csplit.break_idx 

1585 else: 

1586 # Algorithmic Split (automatic) 

1587 max_bidx = ( 

1588 count_chars_in_width(rest_value, max_break_width) 

1589 - string_op_leaves_length 

1590 ) 

1591 maybe_break_idx = self._get_break_idx(rest_value, max_bidx) 

1592 if maybe_break_idx is None: 

1593 # If we are unable to algorithmically determine a good split 

1594 # and this string has custom splits registered to it, we 

1595 # fall back to using them--which means we have to start 

1596 # over from the beginning. 

1597 if custom_splits: 

1598 rest_value = LL[string_idx].value 

1599 string_line_results = [] 

1600 first_string_line = True 

1601 use_custom_breakpoints = True 

1602 continue 

1603 

1604 # Otherwise, we stop splitting here. 

1605 break 

1606 

1607 break_idx = maybe_break_idx 

1608 

1609 # --- Construct `next_value` 

1610 next_value = rest_value[:break_idx] + QUOTE 

1611 

1612 # HACK: The following 'if' statement is a hack to fix the custom 

1613 # breakpoint index in the case of either: (a) substrings that were 

1614 # f-strings but will have the 'f' prefix removed OR (b) substrings 

1615 # that were not f-strings but will now become f-strings because of 

1616 # redundant use of the 'f' prefix (i.e. none of the substrings 

1617 # contain f-expressions but one or more of them had the 'f' prefix 

1618 # anyway; in which case, we will prepend 'f' to _all_ substrings). 

1619 # 

1620 # There is probably a better way to accomplish what is being done 

1621 # here... 

1622 # 

1623 # If this substring is an f-string, we _could_ remove the 'f' 

1624 # prefix, and the current custom split did NOT originally use a 

1625 # prefix... 

1626 if ( 

1627 use_custom_breakpoints 

1628 and not csplit.has_prefix 

1629 and ( 

1630 # `next_value == prefix + QUOTE` happens when the custom 

1631 # split is an empty string. 

1632 next_value == prefix + QUOTE 

1633 or next_value != self._normalize_f_string(next_value, prefix) 

1634 ) 

1635 ): 

1636 # Then `csplit.break_idx` will be off by one after removing 

1637 # the 'f' prefix. 

1638 break_idx += 1 

1639 next_value = rest_value[:break_idx] + QUOTE 

1640 

1641 if drop_pointless_f_prefix: 

1642 next_value = self._normalize_f_string(next_value, prefix) 

1643 

1644 # --- Construct `next_leaf` 

1645 next_leaf = Leaf(token.STRING, next_value) 

1646 insert_str_child(next_leaf) 

1647 self._maybe_normalize_string_quotes(next_leaf) 

1648 

1649 # --- Construct `next_line` 

1650 next_line = line.clone() 

1651 maybe_append_string_operators(next_line) 

1652 next_line.append(next_leaf) 

1653 string_line_results.append(Ok(next_line)) 

1654 

1655 rest_value = prefix + QUOTE + rest_value[break_idx:] 

1656 first_string_line = False 

1657 

1658 yield from string_line_results 

1659 

1660 if drop_pointless_f_prefix: 

1661 rest_value = self._normalize_f_string(rest_value, prefix) 

1662 

1663 rest_leaf = Leaf(token.STRING, rest_value) 

1664 insert_str_child(rest_leaf) 

1665 

1666 # NOTE: I could not find a test case that verifies that the following 

1667 # line is actually necessary, but it seems to be. Otherwise we risk 

1668 # not normalizing the last substring, right? 

1669 self._maybe_normalize_string_quotes(rest_leaf) 

1670 

1671 last_line = line.clone() 

1672 maybe_append_string_operators(last_line) 

1673 

1674 # If there are any leaves to the right of the target string... 

1675 if is_valid_index(string_idx + 1): 

1676 # We use `temp_value` here to determine how long the last line 

1677 # would be if we were to append all the leaves to the right of the 

1678 # target string to the last string line. 

1679 temp_value = rest_value 

1680 for leaf in LL[string_idx + 1 :]: 

1681 temp_value += str(leaf) 

1682 if leaf.type == token.LPAR: 

1683 break 

1684 

1685 # Try to fit them all on the same line with the last substring... 

1686 if ( 

1687 str_width(temp_value) <= max_last_string_column() 

1688 or LL[string_idx + 1].type == token.COMMA 

1689 ): 

1690 last_line.append(rest_leaf) 

1691 append_leaves(last_line, line, LL[string_idx + 1 :]) 

1692 yield Ok(last_line) 

1693 # Otherwise, place the last substring on one line and everything 

1694 # else on a line below that... 

1695 else: 

1696 last_line.append(rest_leaf) 

1697 yield Ok(last_line) 

1698 

1699 non_string_line = line.clone() 

1700 append_leaves(non_string_line, line, LL[string_idx + 1 :]) 

1701 yield Ok(non_string_line) 

1702 # Else the target string was the last leaf... 

1703 else: 

1704 last_line.append(rest_leaf) 

1705 last_line.comments = line.comments.copy() 

1706 yield Ok(last_line) 

1707 

1708 def _iter_nameescape_slices(self, string: str) -> Iterator[tuple[Index, Index]]: 

1709 """ 

1710 Yields: 

1711 All ranges of @string which, if @string were to be split there, 

1712 would result in the splitting of an \\N{...} expression (which is NOT 

1713 allowed). 

1714 """ 

1715 # True - the previous backslash was unescaped 

1716 # False - the previous backslash was escaped *or* there was no backslash 

1717 previous_was_unescaped_backslash = False 

1718 it = iter(enumerate(string)) 

1719 for idx, c in it: 

1720 if c == "\\": 

1721 previous_was_unescaped_backslash = not previous_was_unescaped_backslash 

1722 continue 

1723 if not previous_was_unescaped_backslash or c != "N": 

1724 previous_was_unescaped_backslash = False 

1725 continue 

1726 previous_was_unescaped_backslash = False 

1727 

1728 begin = idx - 1 # the position of backslash before \N{...} 

1729 for idx, c in it: 

1730 if c == "}": 

1731 end = idx 

1732 break 

1733 else: 

1734 # malformed nameescape expression? 

1735 # should have been detected by AST parsing earlier... 

1736 raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!") 

1737 yield begin, end 

1738 

1739 def _iter_fexpr_slices(self, string: str) -> Iterator[tuple[Index, Index]]: 

1740 """ 

1741 Yields: 

1742 All ranges of @string which, if @string were to be split there, 

1743 would result in the splitting of an f-expression (which is NOT 

1744 allowed). 

1745 """ 

1746 if "f" not in get_string_prefix(string).lower(): 

1747 return 

1748 yield from iter_fexpr_spans(string) 

1749 

1750 def _get_illegal_split_indices(self, string: str) -> set[Index]: 

1751 illegal_indices: set[Index] = set() 

1752 iterators = [ 

1753 self._iter_fexpr_slices(string), 

1754 self._iter_nameescape_slices(string), 

1755 ] 

1756 for it in iterators: 

1757 for begin, end in it: 

1758 illegal_indices.update(range(begin, end)) 

1759 return illegal_indices 

1760 

1761 def _get_break_idx(self, string: str, max_break_idx: int) -> Optional[int]: 

1762 """ 

1763 This method contains the algorithm that StringSplitter uses to 

1764 determine which character to split each string at. 

1765 

1766 Args: 

1767 @string: The substring that we are attempting to split. 

1768 @max_break_idx: The ideal break index. We will return this value if it 

1769 meets all the necessary conditions. In the likely event that it 

1770 doesn't we will try to find the closest index BELOW @max_break_idx 

1771 that does. If that fails, we will expand our search by also 

1772 considering all valid indices ABOVE @max_break_idx. 

1773 

1774 Pre-Conditions: 

1775 * assert_is_leaf_string(@string) 

1776 * 0 <= @max_break_idx < len(@string) 

1777 

1778 Returns: 

1779 break_idx, if an index is able to be found that meets all of the 

1780 conditions listed in the 'Transformations' section of this classes' 

1781 docstring. 

1782 OR 

1783 None, otherwise. 

1784 """ 

1785 is_valid_index = is_valid_index_factory(string) 

1786 

1787 assert is_valid_index(max_break_idx) 

1788 assert_is_leaf_string(string) 

1789 

1790 _illegal_split_indices = self._get_illegal_split_indices(string) 

1791 

1792 def breaks_unsplittable_expression(i: Index) -> bool: 

1793 """ 

1794 Returns: 

1795 True iff returning @i would result in the splitting of an 

1796 unsplittable expression (which is NOT allowed). 

1797 """ 

1798 return i in _illegal_split_indices 

1799 

1800 def passes_all_checks(i: Index) -> bool: 

1801 """ 

1802 Returns: 

1803 True iff ALL of the conditions listed in the 'Transformations' 

1804 section of this classes' docstring would be met by returning @i. 

1805 """ 

1806 is_space = string[i] == " " 

1807 is_split_safe = is_valid_index(i - 1) and string[i - 1] in SPLIT_SAFE_CHARS 

1808 

1809 is_not_escaped = True 

1810 j = i - 1 

1811 while is_valid_index(j) and string[j] == "\\": 

1812 is_not_escaped = not is_not_escaped 

1813 j -= 1 

1814 

1815 is_big_enough = ( 

1816 len(string[i:]) >= self.MIN_SUBSTR_SIZE 

1817 and len(string[:i]) >= self.MIN_SUBSTR_SIZE 

1818 ) 

1819 return ( 

1820 (is_space or is_split_safe) 

1821 and is_not_escaped 

1822 and is_big_enough 

1823 and not breaks_unsplittable_expression(i) 

1824 ) 

1825 

1826 # First, we check all indices BELOW @max_break_idx. 

1827 break_idx = max_break_idx 

1828 while is_valid_index(break_idx - 1) and not passes_all_checks(break_idx): 

1829 break_idx -= 1 

1830 

1831 if not passes_all_checks(break_idx): 

1832 # If that fails, we check all indices ABOVE @max_break_idx. 

1833 # 

1834 # If we are able to find a valid index here, the next line is going 

1835 # to be longer than the specified line length, but it's probably 

1836 # better than doing nothing at all. 

1837 break_idx = max_break_idx + 1 

1838 while is_valid_index(break_idx + 1) and not passes_all_checks(break_idx): 

1839 break_idx += 1 

1840 

1841 if not is_valid_index(break_idx) or not passes_all_checks(break_idx): 

1842 return None 

1843 

1844 return break_idx 

1845 

1846 def _maybe_normalize_string_quotes(self, leaf: Leaf) -> None: 

1847 if self.normalize_strings: 

1848 leaf.value = normalize_string_quotes(leaf.value) 

1849 

1850 def _normalize_f_string(self, string: str, prefix: str) -> str: 

1851 """ 

1852 Pre-Conditions: 

1853 * assert_is_leaf_string(@string) 

1854 

1855 Returns: 

1856 * If @string is an f-string that contains no f-expressions, we 

1857 return a string identical to @string except that the 'f' prefix 

1858 has been stripped and all double braces (i.e. '{{' or '}}') have 

1859 been normalized (i.e. turned into '{' or '}'). 

1860 OR 

1861 * Otherwise, we return @string. 

1862 """ 

1863 assert_is_leaf_string(string) 

1864 

1865 if "f" in prefix and not fstring_contains_expr(string): 

1866 new_prefix = prefix.replace("f", "") 

1867 

1868 temp = string[len(prefix) :] 

1869 temp = re.sub(r"\{\{", "{", temp) 

1870 temp = re.sub(r"\}\}", "}", temp) 

1871 new_string = temp 

1872 

1873 return f"{new_prefix}{new_string}" 

1874 else: 

1875 return string 

1876 

1877 def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> list[Leaf]: 

1878 LL = list(leaves) 

1879 

1880 string_op_leaves = [] 

1881 i = 0 

1882 while LL[i].type in self.STRING_OPERATORS + [token.NAME]: 

1883 prefix_leaf = Leaf(LL[i].type, str(LL[i]).strip()) 

1884 string_op_leaves.append(prefix_leaf) 

1885 i += 1 

1886 return string_op_leaves 

1887 

1888 

1889class StringParenWrapper(BaseStringSplitter, CustomSplitMapMixin): 

1890 """ 

1891 StringTransformer that wraps strings in parens and then splits at the LPAR. 

1892 

1893 Requirements: 

1894 All of the requirements listed in BaseStringSplitter's docstring in 

1895 addition to the requirements listed below: 

1896 

1897 * The line is a return/yield statement, which returns/yields a string. 

1898 OR 

1899 * The line is part of a ternary expression (e.g. `x = y if cond else 

1900 z`) such that the line starts with `else <string>`, where <string> is 

1901 some string. 

1902 OR 

1903 * The line is an assert statement, which ends with a string. 

1904 OR 

1905 * The line is an assignment statement (e.g. `x = <string>` or `x += 

1906 <string>`) such that the variable is being assigned the value of some 

1907 string. 

1908 OR 

1909 * The line is a dictionary key assignment where some valid key is being 

1910 assigned the value of some string. 

1911 OR 

1912 * The line is an lambda expression and the value is a string. 

1913 OR 

1914 * The line starts with an "atom" string that prefers to be wrapped in 

1915 parens. It's preferred to be wrapped when it's is an immediate child of 

1916 a list/set/tuple literal, AND the string is surrounded by commas (or is 

1917 the first/last child). 

1918 

1919 Transformations: 

1920 The chosen string is wrapped in parentheses and then split at the LPAR. 

1921 

1922 We then have one line which ends with an LPAR and another line that 

1923 starts with the chosen string. The latter line is then split again at 

1924 the RPAR. This results in the RPAR (and possibly a trailing comma) 

1925 being placed on its own line. 

1926 

1927 NOTE: If any leaves exist to the right of the chosen string (except 

1928 for a trailing comma, which would be placed after the RPAR), those 

1929 leaves are placed inside the parentheses. In effect, the chosen 

1930 string is not necessarily being "wrapped" by parentheses. We can, 

1931 however, count on the LPAR being placed directly before the chosen 

1932 string. 

1933 

1934 In other words, StringParenWrapper creates "atom" strings. These 

1935 can then be split again by StringSplitter, if necessary. 

1936 

1937 Collaborations: 

1938 In the event that a string line split by StringParenWrapper is 

1939 changed such that it no longer needs to be given its own line, 

1940 StringParenWrapper relies on StringParenStripper to clean up the 

1941 parentheses it created. 

1942 

1943 For "atom" strings that prefers to be wrapped in parens, it requires 

1944 StringSplitter to hold the split until the string is wrapped in parens. 

1945 """ 

1946 

1947 def do_splitter_match(self, line: Line) -> TMatchResult: 

1948 LL = line.leaves 

1949 

1950 if line.leaves[-1].type in OPENING_BRACKETS: 

1951 return TErr( 

1952 "Cannot wrap parens around a line that ends in an opening bracket." 

1953 ) 

1954 

1955 string_idx = ( 

1956 self._return_match(LL) 

1957 or self._else_match(LL) 

1958 or self._assert_match(LL) 

1959 or self._assign_match(LL) 

1960 or self._dict_or_lambda_match(LL) 

1961 or self._prefer_paren_wrap_match(LL) 

1962 ) 

1963 

1964 if string_idx is not None: 

1965 string_value = line.leaves[string_idx].value 

1966 # If the string has neither spaces nor East Asian stops... 

1967 if not any( 

1968 char == " " or char in SPLIT_SAFE_CHARS for char in string_value 

1969 ): 

1970 # And will still violate the line length limit when split... 

1971 max_string_width = self.line_length - ((line.depth + 1) * 4) 

1972 if str_width(string_value) > max_string_width: 

1973 # And has no associated custom splits... 

1974 if not self.has_custom_splits(string_value): 

1975 # Then we should NOT put this string on its own line. 

1976 return TErr( 

1977 "We do not wrap long strings in parentheses when the" 

1978 " resultant line would still be over the specified line" 

1979 " length and can't be split further by StringSplitter." 

1980 ) 

1981 return Ok([string_idx]) 

1982 

1983 return TErr("This line does not contain any non-atomic strings.") 

1984 

1985 @staticmethod 

1986 def _return_match(LL: list[Leaf]) -> Optional[int]: 

1987 """ 

1988 Returns: 

1989 string_idx such that @LL[string_idx] is equal to our target (i.e. 

1990 matched) string, if this line matches the return/yield statement 

1991 requirements listed in the 'Requirements' section of this classes' 

1992 docstring. 

1993 OR 

1994 None, otherwise. 

1995 """ 

1996 # If this line is a part of a return/yield statement and the first leaf 

1997 # contains either the "return" or "yield" keywords... 

1998 if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[ 

1999 0 

2000 ].value in ["return", "yield"]: 

2001 is_valid_index = is_valid_index_factory(LL) 

2002 

2003 idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 

2004 # The next visible leaf MUST contain a string... 

2005 if is_valid_index(idx) and LL[idx].type == token.STRING: 

2006 return idx 

2007 

2008 return None 

2009 

2010 @staticmethod 

2011 def _else_match(LL: list[Leaf]) -> Optional[int]: 

2012 """ 

2013 Returns: 

2014 string_idx such that @LL[string_idx] is equal to our target (i.e. 

2015 matched) string, if this line matches the ternary expression 

2016 requirements listed in the 'Requirements' section of this classes' 

2017 docstring. 

2018 OR 

2019 None, otherwise. 

2020 """ 

2021 # If this line is a part of a ternary expression and the first leaf 

2022 # contains the "else" keyword... 

2023 if ( 

2024 parent_type(LL[0]) == syms.test 

2025 and LL[0].type == token.NAME 

2026 and LL[0].value == "else" 

2027 ): 

2028 is_valid_index = is_valid_index_factory(LL) 

2029 

2030 idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1 

2031 # The next visible leaf MUST contain a string... 

2032 if is_valid_index(idx) and LL[idx].type == token.STRING: 

2033 return idx 

2034 

2035 return None 

2036 

2037 @staticmethod 

2038 def _assert_match(LL: list[Leaf]) -> Optional[int]: 

2039 """ 

2040 Returns: 

2041 string_idx such that @LL[string_idx] is equal to our target (i.e. 

2042 matched) string, if this line matches the assert statement 

2043 requirements listed in the 'Requirements' section of this classes' 

2044 docstring. 

2045 OR 

2046 None, otherwise. 

2047 """ 

2048 # If this line is a part of an assert statement and the first leaf 

2049 # contains the "assert" keyword... 

2050 if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert": 

2051 is_valid_index = is_valid_index_factory(LL) 

2052 

2053 for i, leaf in enumerate(LL): 

2054 # We MUST find a comma... 

2055 if leaf.type == token.COMMA: 

2056 idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 

2057 

2058 # That comma MUST be followed by a string... 

2059 if is_valid_index(idx) and LL[idx].type == token.STRING: 

2060 string_idx = idx 

2061 

2062 # Skip the string trailer, if one exists. 

2063 string_parser = StringParser() 

2064 idx = string_parser.parse(LL, string_idx) 

2065 

2066 # But no more leaves are allowed... 

2067 if not is_valid_index(idx): 

2068 return string_idx 

2069 

2070 return None 

2071 

2072 @staticmethod 

2073 def _assign_match(LL: list[Leaf]) -> Optional[int]: 

2074 """ 

2075 Returns: 

2076 string_idx such that @LL[string_idx] is equal to our target (i.e. 

2077 matched) string, if this line matches the assignment statement 

2078 requirements listed in the 'Requirements' section of this classes' 

2079 docstring. 

2080 OR 

2081 None, otherwise. 

2082 """ 

2083 # If this line is a part of an expression statement or is a function 

2084 # argument AND the first leaf contains a variable name... 

2085 if ( 

2086 parent_type(LL[0]) in [syms.expr_stmt, syms.argument, syms.power] 

2087 and LL[0].type == token.NAME 

2088 ): 

2089 is_valid_index = is_valid_index_factory(LL) 

2090 

2091 for i, leaf in enumerate(LL): 

2092 # We MUST find either an '=' or '+=' symbol... 

2093 if leaf.type in [token.EQUAL, token.PLUSEQUAL]: 

2094 idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 

2095 

2096 # That symbol MUST be followed by a string... 

2097 if is_valid_index(idx) and LL[idx].type == token.STRING: 

2098 string_idx = idx 

2099 

2100 # Skip the string trailer, if one exists. 

2101 string_parser = StringParser() 

2102 idx = string_parser.parse(LL, string_idx) 

2103 

2104 # The next leaf MAY be a comma iff this line is a part 

2105 # of a function argument... 

2106 if ( 

2107 parent_type(LL[0]) == syms.argument 

2108 and is_valid_index(idx) 

2109 and LL[idx].type == token.COMMA 

2110 ): 

2111 idx += 1 

2112 

2113 # But no more leaves are allowed... 

2114 if not is_valid_index(idx): 

2115 return string_idx 

2116 

2117 return None 

2118 

2119 @staticmethod 

2120 def _dict_or_lambda_match(LL: list[Leaf]) -> Optional[int]: 

2121 """ 

2122 Returns: 

2123 string_idx such that @LL[string_idx] is equal to our target (i.e. 

2124 matched) string, if this line matches the dictionary key assignment 

2125 statement or lambda expression requirements listed in the 

2126 'Requirements' section of this classes' docstring. 

2127 OR 

2128 None, otherwise. 

2129 """ 

2130 # If this line is a part of a dictionary key assignment or lambda expression... 

2131 parent_types = [parent_type(LL[0]), parent_type(LL[0].parent)] 

2132 if syms.dictsetmaker in parent_types or syms.lambdef in parent_types: 

2133 is_valid_index = is_valid_index_factory(LL) 

2134 

2135 for i, leaf in enumerate(LL): 

2136 # We MUST find a colon, it can either be dict's or lambda's colon... 

2137 if leaf.type == token.COLON and i < len(LL) - 1: 

2138 idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1 

2139 

2140 # That colon MUST be followed by a string... 

2141 if is_valid_index(idx) and LL[idx].type == token.STRING: 

2142 string_idx = idx 

2143 

2144 # Skip the string trailer, if one exists. 

2145 string_parser = StringParser() 

2146 idx = string_parser.parse(LL, string_idx) 

2147 

2148 # That string MAY be followed by a comma... 

2149 if is_valid_index(idx) and LL[idx].type == token.COMMA: 

2150 idx += 1 

2151 

2152 # But no more leaves are allowed... 

2153 if not is_valid_index(idx): 

2154 return string_idx 

2155 

2156 return None 

2157 

2158 def do_transform( 

2159 self, line: Line, string_indices: list[int] 

2160 ) -> Iterator[TResult[Line]]: 

2161 LL = line.leaves 

2162 assert len(string_indices) == 1, ( 

2163 f"{self.__class__.__name__} should only find one match at a time, found" 

2164 f" {len(string_indices)}" 

2165 ) 

2166 string_idx = string_indices[0] 

2167 

2168 is_valid_index = is_valid_index_factory(LL) 

2169 insert_str_child = insert_str_child_factory(LL[string_idx]) 

2170 

2171 comma_idx = -1 

2172 ends_with_comma = False 

2173 if LL[comma_idx].type == token.COMMA: 

2174 ends_with_comma = True 

2175 

2176 leaves_to_steal_comments_from = [LL[string_idx]] 

2177 if ends_with_comma: 

2178 leaves_to_steal_comments_from.append(LL[comma_idx]) 

2179 

2180 # --- First Line 

2181 first_line = line.clone() 

2182 left_leaves = LL[:string_idx] 

2183 

2184 # We have to remember to account for (possibly invisible) LPAR and RPAR 

2185 # leaves that already wrapped the target string. If these leaves do 

2186 # exist, we will replace them with our own LPAR and RPAR leaves. 

2187 old_parens_exist = False 

2188 if left_leaves and left_leaves[-1].type == token.LPAR: 

2189 old_parens_exist = True 

2190 leaves_to_steal_comments_from.append(left_leaves[-1]) 

2191 left_leaves.pop() 

2192 

2193 append_leaves(first_line, line, left_leaves) 

2194 

2195 lpar_leaf = Leaf(token.LPAR, "(") 

2196 if old_parens_exist: 

2197 replace_child(LL[string_idx - 1], lpar_leaf) 

2198 else: 

2199 insert_str_child(lpar_leaf) 

2200 first_line.append(lpar_leaf) 

2201 

2202 # We throw inline comments that were originally to the right of the 

2203 # target string to the top line. They will now be shown to the right of 

2204 # the LPAR. 

2205 for leaf in leaves_to_steal_comments_from: 

2206 for comment_leaf in line.comments_after(leaf): 

2207 first_line.append(comment_leaf, preformatted=True) 

2208 

2209 yield Ok(first_line) 

2210 

2211 # --- Middle (String) Line 

2212 # We only need to yield one (possibly too long) string line, since the 

2213 # `StringSplitter` will break it down further if necessary. 

2214 string_value = LL[string_idx].value 

2215 string_line = Line( 

2216 mode=line.mode, 

2217 depth=line.depth + 1, 

2218 inside_brackets=True, 

2219 should_split_rhs=line.should_split_rhs, 

2220 magic_trailing_comma=line.magic_trailing_comma, 

2221 ) 

2222 string_leaf = Leaf(token.STRING, string_value) 

2223 insert_str_child(string_leaf) 

2224 string_line.append(string_leaf) 

2225 

2226 old_rpar_leaf = None 

2227 if is_valid_index(string_idx + 1): 

2228 right_leaves = LL[string_idx + 1 :] 

2229 if ends_with_comma: 

2230 right_leaves.pop() 

2231 

2232 if old_parens_exist: 

2233 assert right_leaves and right_leaves[-1].type == token.RPAR, ( 

2234 "Apparently, old parentheses do NOT exist?!" 

2235 f" (left_leaves={left_leaves}, right_leaves={right_leaves})" 

2236 ) 

2237 old_rpar_leaf = right_leaves.pop() 

2238 elif right_leaves and right_leaves[-1].type == token.RPAR: 

2239 # Special case for lambda expressions as dict's value, e.g.: 

2240 # my_dict = { 

2241 # "key": lambda x: f"formatted: {x}", 

2242 # } 

2243 # After wrapping the dict's value with parentheses, the string is 

2244 # followed by a RPAR but its opening bracket is lambda's, not 

2245 # the string's: 

2246 # "key": (lambda x: f"formatted: {x}"), 

2247 opening_bracket = right_leaves[-1].opening_bracket 

2248 if opening_bracket is not None and opening_bracket in left_leaves: 

2249 index = left_leaves.index(opening_bracket) 

2250 if ( 

2251 0 < index < len(left_leaves) - 1 

2252 and left_leaves[index - 1].type == token.COLON 

2253 and left_leaves[index + 1].value == "lambda" 

2254 ): 

2255 right_leaves.pop() 

2256 

2257 append_leaves(string_line, line, right_leaves) 

2258 

2259 yield Ok(string_line) 

2260 

2261 # --- Last Line 

2262 last_line = line.clone() 

2263 last_line.bracket_tracker = first_line.bracket_tracker 

2264 

2265 new_rpar_leaf = Leaf(token.RPAR, ")") 

2266 if old_rpar_leaf is not None: 

2267 replace_child(old_rpar_leaf, new_rpar_leaf) 

2268 else: 

2269 insert_str_child(new_rpar_leaf) 

2270 last_line.append(new_rpar_leaf) 

2271 

2272 # If the target string ended with a comma, we place this comma to the 

2273 # right of the RPAR on the last line. 

2274 if ends_with_comma: 

2275 comma_leaf = Leaf(token.COMMA, ",") 

2276 replace_child(LL[comma_idx], comma_leaf) 

2277 last_line.append(comma_leaf) 

2278 

2279 yield Ok(last_line) 

2280 

2281 

2282class StringParser: 

2283 """ 

2284 A state machine that aids in parsing a string's "trailer", which can be 

2285 either non-existent, an old-style formatting sequence (e.g. `% varX` or `% 

2286 (varX, varY)`), or a method-call / attribute access (e.g. `.format(varX, 

2287 varY)`). 

2288 

2289 NOTE: A new StringParser object MUST be instantiated for each string 

2290 trailer we need to parse. 

2291 

2292 Examples: 

2293 We shall assume that `line` equals the `Line` object that corresponds 

2294 to the following line of python code: 

2295 ``` 

2296 x = "Some {}.".format("String") + some_other_string 

2297 ``` 

2298 

2299 Furthermore, we will assume that `string_idx` is some index such that: 

2300 ``` 

2301 assert line.leaves[string_idx].value == "Some {}." 

2302 ``` 

2303 

2304 The following code snippet then holds: 

2305 ``` 

2306 string_parser = StringParser() 

2307 idx = string_parser.parse(line.leaves, string_idx) 

2308 assert line.leaves[idx].type == token.PLUS 

2309 ``` 

2310 """ 

2311 

2312 DEFAULT_TOKEN: Final = 20210605 

2313 

2314 # String Parser States 

2315 START: Final = 1 

2316 DOT: Final = 2 

2317 NAME: Final = 3 

2318 PERCENT: Final = 4 

2319 SINGLE_FMT_ARG: Final = 5 

2320 LPAR: Final = 6 

2321 RPAR: Final = 7 

2322 DONE: Final = 8 

2323 

2324 # Lookup Table for Next State 

2325 _goto: Final[dict[tuple[ParserState, NodeType], ParserState]] = { 

2326 # A string trailer may start with '.' OR '%'. 

2327 (START, token.DOT): DOT, 

2328 (START, token.PERCENT): PERCENT, 

2329 (START, DEFAULT_TOKEN): DONE, 

2330 # A '.' MUST be followed by an attribute or method name. 

2331 (DOT, token.NAME): NAME, 

2332 # A method name MUST be followed by an '(', whereas an attribute name 

2333 # is the last symbol in the string trailer. 

2334 (NAME, token.LPAR): LPAR, 

2335 (NAME, DEFAULT_TOKEN): DONE, 

2336 # A '%' symbol can be followed by an '(' or a single argument (e.g. a 

2337 # string or variable name). 

2338 (PERCENT, token.LPAR): LPAR, 

2339 (PERCENT, DEFAULT_TOKEN): SINGLE_FMT_ARG, 

2340 # If a '%' symbol is followed by a single argument, that argument is 

2341 # the last leaf in the string trailer. 

2342 (SINGLE_FMT_ARG, DEFAULT_TOKEN): DONE, 

2343 # If present, a ')' symbol is the last symbol in a string trailer. 

2344 # (NOTE: LPARS and nested RPARS are not included in this lookup table, 

2345 # since they are treated as a special case by the parsing logic in this 

2346 # classes' implementation.) 

2347 (RPAR, DEFAULT_TOKEN): DONE, 

2348 } 

2349 

2350 def __init__(self) -> None: 

2351 self._state = self.START 

2352 self._unmatched_lpars = 0 

2353 

2354 def parse(self, leaves: list[Leaf], string_idx: int) -> int: 

2355 """ 

2356 Pre-conditions: 

2357 * @leaves[@string_idx].type == token.STRING 

2358 

2359 Returns: 

2360 The index directly after the last leaf which is a part of the string 

2361 trailer, if a "trailer" exists. 

2362 OR 

2363 @string_idx + 1, if no string "trailer" exists. 

2364 """ 

2365 assert leaves[string_idx].type == token.STRING 

2366 

2367 idx = string_idx + 1 

2368 while idx < len(leaves) and self._next_state(leaves[idx]): 

2369 idx += 1 

2370 return idx 

2371 

2372 def _next_state(self, leaf: Leaf) -> bool: 

2373 """ 

2374 Pre-conditions: 

2375 * On the first call to this function, @leaf MUST be the leaf that 

2376 was directly after the string leaf in question (e.g. if our target 

2377 string is `line.leaves[i]` then the first call to this method must 

2378 be `line.leaves[i + 1]`). 

2379 * On the next call to this function, the leaf parameter passed in 

2380 MUST be the leaf directly following @leaf. 

2381 

2382 Returns: 

2383 True iff @leaf is a part of the string's trailer. 

2384 """ 

2385 # We ignore empty LPAR or RPAR leaves. 

2386 if is_empty_par(leaf): 

2387 return True 

2388 

2389 next_token = leaf.type 

2390 if next_token == token.LPAR: 

2391 self._unmatched_lpars += 1 

2392 

2393 current_state = self._state 

2394 

2395 # The LPAR parser state is a special case. We will return True until we 

2396 # find the matching RPAR token. 

2397 if current_state == self.LPAR: 

2398 if next_token == token.RPAR: 

2399 self._unmatched_lpars -= 1 

2400 if self._unmatched_lpars == 0: 

2401 self._state = self.RPAR 

2402 # Otherwise, we use a lookup table to determine the next state. 

2403 else: 

2404 # If the lookup table matches the current state to the next 

2405 # token, we use the lookup table. 

2406 if (current_state, next_token) in self._goto: 

2407 self._state = self._goto[current_state, next_token] 

2408 else: 

2409 # Otherwise, we check if a the current state was assigned a 

2410 # default. 

2411 if (current_state, self.DEFAULT_TOKEN) in self._goto: 

2412 self._state = self._goto[current_state, self.DEFAULT_TOKEN] 

2413 # If no default has been assigned, then this parser has a logic 

2414 # error. 

2415 else: 

2416 raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!") 

2417 

2418 if self._state == self.DONE: 

2419 return False 

2420 

2421 return True 

2422 

2423 

2424def insert_str_child_factory(string_leaf: Leaf) -> Callable[[LN], None]: 

2425 """ 

2426 Factory for a convenience function that is used to orphan @string_leaf 

2427 and then insert multiple new leaves into the same part of the node 

2428 structure that @string_leaf had originally occupied. 

2429 

2430 Examples: 

2431 Let `string_leaf = Leaf(token.STRING, '"foo"')` and `N = 

2432 string_leaf.parent`. Assume the node `N` has the following 

2433 original structure: 

2434 

2435 Node( 

2436 expr_stmt, [ 

2437 Leaf(NAME, 'x'), 

2438 Leaf(EQUAL, '='), 

2439 Leaf(STRING, '"foo"'), 

2440 ] 

2441 ) 

2442 

2443 We then run the code snippet shown below. 

2444 ``` 

2445 insert_str_child = insert_str_child_factory(string_leaf) 

2446 

2447 lpar = Leaf(token.LPAR, '(') 

2448 insert_str_child(lpar) 

2449 

2450 bar = Leaf(token.STRING, '"bar"') 

2451 insert_str_child(bar) 

2452 

2453 rpar = Leaf(token.RPAR, ')') 

2454 insert_str_child(rpar) 

2455 ``` 

2456 

2457 After which point, it follows that `string_leaf.parent is None` and 

2458 the node `N` now has the following structure: 

2459 

2460 Node( 

2461 expr_stmt, [ 

2462 Leaf(NAME, 'x'), 

2463 Leaf(EQUAL, '='), 

2464 Leaf(LPAR, '('), 

2465 Leaf(STRING, '"bar"'), 

2466 Leaf(RPAR, ')'), 

2467 ] 

2468 ) 

2469 """ 

2470 string_parent = string_leaf.parent 

2471 string_child_idx = string_leaf.remove() 

2472 

2473 def insert_str_child(child: LN) -> None: 

2474 nonlocal string_child_idx 

2475 

2476 assert string_parent is not None 

2477 assert string_child_idx is not None 

2478 

2479 string_parent.insert_child(string_child_idx, child) 

2480 string_child_idx += 1 

2481 

2482 return insert_str_child 

2483 

2484 

2485def is_valid_index_factory(seq: Sequence[Any]) -> Callable[[int], bool]: 

2486 """ 

2487 Examples: 

2488 ``` 

2489 my_list = [1, 2, 3] 

2490 

2491 is_valid_index = is_valid_index_factory(my_list) 

2492 

2493 assert is_valid_index(0) 

2494 assert is_valid_index(2) 

2495 

2496 assert not is_valid_index(3) 

2497 assert not is_valid_index(-1) 

2498 ``` 

2499 """ 

2500 

2501 def is_valid_index(idx: int) -> bool: 

2502 """ 

2503 Returns: 

2504 True iff @idx is positive AND seq[@idx] does NOT raise an 

2505 IndexError. 

2506 """ 

2507 return 0 <= idx < len(seq) 

2508 

2509 return is_valid_index