Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/markdown_it/parser_block.py: 100%

46 statements  

« prev     ^ index     » next       coverage.py v7.2.7, created at 2023-06-07 06:07 +0000

1"""Block-level tokenizer.""" 

2from __future__ import annotations 

3 

4import logging 

5 

6from . import rules_block 

7from .ruler import Ruler 

8from .rules_block.state_block import StateBlock 

9from .token import Token 

10 

11LOGGER = logging.getLogger(__name__) 

12 

13 

14_rules: list[tuple] = [ 

15 # First 2 params - rule name & source. Secondary array - list of rules, 

16 # which can be terminated by this one. 

17 ("table", rules_block.table, ["paragraph", "reference"]), 

18 ("code", rules_block.code), 

19 ("fence", rules_block.fence, ["paragraph", "reference", "blockquote", "list"]), 

20 ( 

21 "blockquote", 

22 rules_block.blockquote, 

23 ["paragraph", "reference", "blockquote", "list"], 

24 ), 

25 ("hr", rules_block.hr, ["paragraph", "reference", "blockquote", "list"]), 

26 ("list", rules_block.list_block, ["paragraph", "reference", "blockquote"]), 

27 ("reference", rules_block.reference), 

28 ("html_block", rules_block.html_block, ["paragraph", "reference", "blockquote"]), 

29 ("heading", rules_block.heading, ["paragraph", "reference", "blockquote"]), 

30 ("lheading", rules_block.lheading), 

31 ("paragraph", rules_block.paragraph), 

32] 

33 

34 

35class ParserBlock: 

36 """ 

37 ParserBlock#ruler -> Ruler 

38 

39 [[Ruler]] instance. Keep configuration of block rules. 

40 """ 

41 

42 def __init__(self): 

43 self.ruler = Ruler() 

44 for data in _rules: 

45 name = data[0] 

46 rule = data[1] 

47 self.ruler.push(name, rule, {"alt": data[2] if len(data) > 2 else []}) 

48 

49 def tokenize( 

50 self, state: StateBlock, startLine: int, endLine: int, silent: bool = False 

51 ) -> None: 

52 """Generate tokens for input range.""" 

53 rules = self.ruler.getRules("") 

54 line = startLine 

55 maxNesting = state.md.options.maxNesting 

56 hasEmptyLines = False 

57 

58 while line < endLine: 

59 state.line = line = state.skipEmptyLines(line) 

60 if line >= endLine: 

61 break 

62 if state.sCount[line] < state.blkIndent: 

63 # Termination condition for nested calls. 

64 # Nested calls currently used for blockquotes & lists 

65 break 

66 if state.level >= maxNesting: 

67 # If nesting level exceeded - skip tail to the end. 

68 # That's not ordinary situation and we should not care about content. 

69 state.line = endLine 

70 break 

71 

72 # Try all possible rules. 

73 # On success, rule should: 

74 # - update `state.line` 

75 # - update `state.tokens` 

76 # - return True 

77 for rule in rules: 

78 if rule(state, line, endLine, False): 

79 break 

80 

81 # set state.tight if we had an empty line before current tag 

82 # i.e. latest empty line should not count 

83 state.tight = not hasEmptyLines 

84 

85 line = state.line 

86 

87 # paragraph might "eat" one newline after it in nested lists 

88 if (line - 1) < endLine and state.isEmpty(line - 1): 

89 hasEmptyLines = True 

90 

91 if line < endLine and state.isEmpty(line): 

92 hasEmptyLines = True 

93 line += 1 

94 state.line = line 

95 

96 def parse( 

97 self, 

98 src: str, 

99 md, 

100 env, 

101 outTokens: list[Token], 

102 ords: tuple[int, ...] | None = None, 

103 ) -> list[Token] | None: 

104 """Process input string and push block tokens into `outTokens`.""" 

105 if not src: 

106 return None 

107 state = StateBlock(src, md, env, outTokens, ords) 

108 self.tokenize(state, state.line, state.lineMax) 

109 return state.tokens