1######################## BEGIN LICENSE BLOCK ########################
2# The Original Code is Mozilla Universal charset detector code.
3#
4# The Initial Developer of the Original Code is
5# Netscape Communications Corporation.
6# Portions created by the Initial Developer are Copyright (C) 2001
7# the Initial Developer. All Rights Reserved.
8#
9# Contributor(s):
10# Mark Pilgrim - port to Python
11# Shy Shalom - original C code
12#
13# This library is free software; you can redistribute it and/or
14# modify it under the terms of the GNU Lesser General Public
15# License as published by the Free Software Foundation; either
16# version 2.1 of the License, or (at your option) any later version.
17#
18# This library is distributed in the hope that it will be useful,
19# but WITHOUT ANY WARRANTY; without even the implied warranty of
20# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21# Lesser General Public License for more details.
22#
23# You should have received a copy of the GNU Lesser General Public
24# License along with this library; if not, see
25# <https://www.gnu.org/licenses/>.
26######################### END LICENSE BLOCK #########################
27
28import logging
29from collections.abc import Mapping
30from typing import NamedTuple, Optional, Union
31
32from .charsetprober import CharSetProber
33from .enums import CharacterCategory, ProbingState, SequenceLikelihood
34
35
36class SingleByteCharSetModel(NamedTuple):
37 charset_name: str
38 language: str
39 char_to_order_map: Mapping[int, Union[CharacterCategory, int]]
40 language_model: Mapping[int, Mapping[int, Union[SequenceLikelihood, int]]]
41 typical_positive_ratio: float
42 keep_ascii_letters: bool
43 alphabet: str
44
45
46class SingleByteCharSetProber(CharSetProber):
47 SB_ENOUGH_REL_THRESHOLD = 1024 # 0.25 * SAMPLE_SIZE^2 (SAMPLE_SIZE was 64)
48 POSITIVE_SHORTCUT_THRESHOLD = 0.95
49 NEGATIVE_SHORTCUT_THRESHOLD = 0.05
50
51 def __init__(
52 self,
53 model: SingleByteCharSetModel,
54 is_reversed: bool = False,
55 name_prober: Optional[CharSetProber] = None,
56 ) -> None:
57 super().__init__()
58 self._model = model
59 # TRUE if we need to reverse every pair in the model lookup
60 self._reversed = is_reversed
61 # Optional auxiliary prober for name decision
62 self._name_prober = name_prober
63 self._last_order = CharacterCategory.UNDEFINED
64 self._seq_counters: list[int] = []
65 self._total_seqs = 0
66 self._total_char = 0
67 self._control_char = 0
68 self._freq_char = 0
69 self.logger = logging.getLogger(__name__)
70 self.reset()
71
72 def reset(self) -> None:
73 super().reset()
74 # char order of last character
75 self._last_order = CharacterCategory.UNDEFINED
76 self._seq_counters = [0] * len(SequenceLikelihood)
77 self._total_seqs = 0
78 self._total_char = 0
79 self._control_char = 0
80 # characters that fall in our sampling range
81 self._freq_char = 0
82
83 @property
84 def charset_name(self) -> Optional[str]:
85 if self._name_prober:
86 return self._name_prober.charset_name
87 return self._model.charset_name
88
89 @property
90 def language(self) -> Optional[str]:
91 if self._name_prober:
92 return self._name_prober.language
93 return self._model.language
94
95 def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
96 if self._model.keep_ascii_letters:
97 byte_str = self.remove_xml_tags(byte_str)
98 else:
99 byte_str = self.filter_international_words(byte_str)
100 if not byte_str:
101 return self.state
102 char_to_order_map = self._model.char_to_order_map
103 language_model = self._model.language_model
104 for char in byte_str:
105 order = char_to_order_map[char]
106 if order < CharacterCategory.DIGIT:
107 self._total_char += 1
108 elif order == CharacterCategory.UNDEFINED:
109 # If we find a character that is undefined in the mapping,
110 # this cannot be the right charset
111 self._state = ProbingState.NOT_ME
112 self._last_order = order
113 break
114 # TODO: Follow uchardet's lead and discount confidence for frequent
115 # control characters.
116 # See https://github.com/BYVoid/uchardet/commit/55b4f23971db61
117 elif order == CharacterCategory.CONTROL:
118 self._control_char += 1
119 if 0 < order < CharacterCategory.DIGIT:
120 self._freq_char += 1
121 if 0 < self._last_order < CharacterCategory.DIGIT:
122 self._total_seqs += 1
123 if not self._reversed:
124 lm_cat = language_model[self._last_order][order]
125 else:
126 lm_cat = language_model[order][self._last_order]
127 self._seq_counters[lm_cat] += 1
128 self._last_order = order
129
130 charset_name = self._model.charset_name
131 if self.state == ProbingState.DETECTING:
132 if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD:
133 confidence = self.get_confidence()
134 if confidence > self.POSITIVE_SHORTCUT_THRESHOLD:
135 self.logger.debug(
136 "%s confidence = %s, we have a winner", charset_name, confidence
137 )
138 self._state = ProbingState.FOUND_IT
139 elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD:
140 self.logger.debug(
141 "%s confidence = %s, below negative shortcut threshold %s",
142 charset_name,
143 confidence,
144 self.NEGATIVE_SHORTCUT_THRESHOLD,
145 )
146 self._state = ProbingState.NOT_ME
147 # Early termination: if we have enough data and very low confidence, give up
148 elif self._total_seqs > 512 and self._total_char > 1000:
149 confidence = self.get_confidence()
150 if confidence < 0.01:
151 self.logger.debug(
152 "%s confidence = %s, giving up early", charset_name, confidence
153 )
154 self._state = ProbingState.NOT_ME
155
156 return self.state
157
158 def get_confidence(self) -> float:
159 r = 0.01
160 if self._total_seqs > 0 and self._total_char > 0:
161 r = (
162 self._seq_counters[SequenceLikelihood.POSITIVE]
163 / self._total_seqs
164 / self._model.typical_positive_ratio
165 )
166 # Multiply by ratio of positive sequences per character.
167 # This helps distinguish close winners by penalizing models
168 # that have very few positive sequences relative to the
169 # number of characters. If you add a letter, you'd expect
170 # the positive sequence count to increase proportionally.
171 # If it doesn't, this new character may not have been a letter
172 # but a symbol, making the model less confident.
173 r *= (
174 self._seq_counters[SequenceLikelihood.POSITIVE]
175 + self._seq_counters[SequenceLikelihood.LIKELY] / 4
176 ) / self._total_char
177 # The more control characters (proportionnaly to the size
178 # of the text), the less confident we become in the current
179 # charset.
180 r *= (self._total_char - self._control_char) / self._total_char
181 # The more frequent characters (proportionnaly to the size
182 # of the text), the more confident we become in the current
183 # charset.
184 r *= self._freq_char / self._total_char
185 if r >= 1.0:
186 r = 0.99
187 return r