Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/markdown_it/parser_block.py: 98%

49 statements  

« prev     ^ index     » next       coverage.py v7.2.7, created at 2023-06-07 06:15 +0000

1"""Block-level tokenizer.""" 

2from __future__ import annotations 

3 

4import logging 

5from typing import TYPE_CHECKING, Callable 

6 

7from . import rules_block 

8from .ruler import Ruler 

9from .rules_block.state_block import StateBlock 

10from .token import Token 

11from .utils import EnvType 

12 

13if TYPE_CHECKING: 

14 from markdown_it import MarkdownIt 

15 

16LOGGER = logging.getLogger(__name__) 

17 

18 

19RuleFuncBlockType = Callable[[StateBlock, int, int, bool], bool] 

20"""(state: StateBlock, startLine: int, endLine: int, silent: bool) -> matched: bool) 

21 

22`silent` disables token generation, useful for lookahead. 

23""" 

24 

25_rules: list[tuple[str, RuleFuncBlockType, list[str]]] = [ 

26 # First 2 params - rule name & source. Secondary array - list of rules, 

27 # which can be terminated by this one. 

28 ("table", rules_block.table, ["paragraph", "reference"]), 

29 ("code", rules_block.code, []), 

30 ("fence", rules_block.fence, ["paragraph", "reference", "blockquote", "list"]), 

31 ( 

32 "blockquote", 

33 rules_block.blockquote, 

34 ["paragraph", "reference", "blockquote", "list"], 

35 ), 

36 ("hr", rules_block.hr, ["paragraph", "reference", "blockquote", "list"]), 

37 ("list", rules_block.list_block, ["paragraph", "reference", "blockquote"]), 

38 ("reference", rules_block.reference, []), 

39 ("html_block", rules_block.html_block, ["paragraph", "reference", "blockquote"]), 

40 ("heading", rules_block.heading, ["paragraph", "reference", "blockquote"]), 

41 ("lheading", rules_block.lheading, []), 

42 ("paragraph", rules_block.paragraph, []), 

43] 

44 

45 

46class ParserBlock: 

47 """ 

48 ParserBlock#ruler -> Ruler 

49 

50 [[Ruler]] instance. Keep configuration of block rules. 

51 """ 

52 

53 def __init__(self) -> None: 

54 self.ruler = Ruler[RuleFuncBlockType]() 

55 for name, rule, alt in _rules: 

56 self.ruler.push(name, rule, {"alt": alt}) 

57 

58 def tokenize(self, state: StateBlock, startLine: int, endLine: int) -> None: 

59 """Generate tokens for input range.""" 

60 rules = self.ruler.getRules("") 

61 line = startLine 

62 maxNesting = state.md.options.maxNesting 

63 hasEmptyLines = False 

64 

65 while line < endLine: 

66 state.line = line = state.skipEmptyLines(line) 

67 if line >= endLine: 

68 break 

69 if state.sCount[line] < state.blkIndent: 

70 # Termination condition for nested calls. 

71 # Nested calls currently used for blockquotes & lists 

72 break 

73 if state.level >= maxNesting: 

74 # If nesting level exceeded - skip tail to the end. 

75 # That's not ordinary situation and we should not care about content. 

76 state.line = endLine 

77 break 

78 

79 # Try all possible rules. 

80 # On success, rule should: 

81 # - update `state.line` 

82 # - update `state.tokens` 

83 # - return True 

84 for rule in rules: 

85 if rule(state, line, endLine, False): 

86 break 

87 

88 # set state.tight if we had an empty line before current tag 

89 # i.e. latest empty line should not count 

90 state.tight = not hasEmptyLines 

91 

92 line = state.line 

93 

94 # paragraph might "eat" one newline after it in nested lists 

95 if (line - 1) < endLine and state.isEmpty(line - 1): 

96 hasEmptyLines = True 

97 

98 if line < endLine and state.isEmpty(line): 

99 hasEmptyLines = True 

100 line += 1 

101 state.line = line 

102 

103 def parse( 

104 self, src: str, md: MarkdownIt, env: EnvType, outTokens: list[Token] 

105 ) -> list[Token] | None: 

106 """Process input string and push block tokens into `outTokens`.""" 

107 if not src: 

108 return None 

109 state = StateBlock(src, md, env, outTokens) 

110 self.tokenize(state, state.line, state.lineMax) 

111 return state.tokens