Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/chardet/__init__.py: 37%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

43 statements  

1######################## BEGIN LICENSE BLOCK ######################## 

2# This library is free software; you can redistribute it and/or 

3# modify it under the terms of the GNU Lesser General Public 

4# License as published by the Free Software Foundation; either 

5# version 2.1 of the License, or (at your option) any later version. 

6# 

7# This library is distributed in the hope that it will be useful, 

8# but WITHOUT ANY WARRANTY; without even the implied warranty of 

9# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 

10# Lesser General Public License for more details. 

11# 

12# You should have received a copy of the GNU Lesser General Public 

13# License along with this library; if not, write to the Free Software 

14# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 

15# 02110-1301 USA 

16######################### END LICENSE BLOCK ######################### 

17 

18from typing import List, Union 

19 

20from .charsetgroupprober import CharSetGroupProber 

21from .charsetprober import CharSetProber 

22from .enums import InputState 

23from .resultdict import ResultDict 

24from .universaldetector import UniversalDetector 

25from .version import VERSION, __version__ 

26 

27__all__ = ["UniversalDetector", "detect", "detect_all", "__version__", "VERSION"] 

28 

29 

30def detect( 

31 byte_str: Union[bytes, bytearray], should_rename_legacy: bool = False 

32) -> ResultDict: 

33 """ 

34 Detect the encoding of the given byte string. 

35 

36 :param byte_str: The byte sequence to examine. 

37 :type byte_str: ``bytes`` or ``bytearray`` 

38 :param should_rename_legacy: Should we rename legacy encodings 

39 to their more modern equivalents? 

40 :type should_rename_legacy: ``bool`` 

41 """ 

42 if not isinstance(byte_str, bytearray): 

43 if not isinstance(byte_str, bytes): 

44 raise TypeError( 

45 f"Expected object of type bytes or bytearray, got: {type(byte_str)}" 

46 ) 

47 byte_str = bytearray(byte_str) 

48 detector = UniversalDetector(should_rename_legacy=should_rename_legacy) 

49 detector.feed(byte_str) 

50 return detector.close() 

51 

52 

53def detect_all( 

54 byte_str: Union[bytes, bytearray], 

55 ignore_threshold: bool = False, 

56 should_rename_legacy: bool = False, 

57) -> List[ResultDict]: 

58 """ 

59 Detect all the possible encodings of the given byte string. 

60 

61 :param byte_str: The byte sequence to examine. 

62 :type byte_str: ``bytes`` or ``bytearray`` 

63 :param ignore_threshold: Include encodings that are below 

64 ``UniversalDetector.MINIMUM_THRESHOLD`` 

65 in results. 

66 :type ignore_threshold: ``bool`` 

67 :param should_rename_legacy: Should we rename legacy encodings 

68 to their more modern equivalents? 

69 :type should_rename_legacy: ``bool`` 

70 """ 

71 if not isinstance(byte_str, bytearray): 

72 if not isinstance(byte_str, bytes): 

73 raise TypeError( 

74 f"Expected object of type bytes or bytearray, got: {type(byte_str)}" 

75 ) 

76 byte_str = bytearray(byte_str) 

77 

78 detector = UniversalDetector(should_rename_legacy=should_rename_legacy) 

79 detector.feed(byte_str) 

80 detector.close() 

81 

82 if detector.input_state == InputState.HIGH_BYTE: 

83 results: List[ResultDict] = [] 

84 probers: List[CharSetProber] = [] 

85 for prober in detector.charset_probers: 

86 if isinstance(prober, CharSetGroupProber): 

87 probers.extend(p for p in prober.probers) 

88 else: 

89 probers.append(prober) 

90 for prober in probers: 

91 if ignore_threshold or prober.get_confidence() > detector.MINIMUM_THRESHOLD: 

92 charset_name = prober.charset_name or "" 

93 lower_charset_name = charset_name.lower() 

94 # Use Windows encoding name instead of ISO-8859 if we saw any 

95 # extra Windows-specific bytes 

96 if lower_charset_name.startswith("iso-8859") and detector.has_win_bytes: 

97 charset_name = detector.ISO_WIN_MAP.get( 

98 lower_charset_name, charset_name 

99 ) 

100 # Rename legacy encodings with superset encodings if asked 

101 if should_rename_legacy: 

102 charset_name = detector.LEGACY_MAP.get( 

103 charset_name.lower(), charset_name 

104 ) 

105 results.append({ 

106 "encoding": charset_name, 

107 "confidence": prober.get_confidence(), 

108 "language": prober.language, 

109 }) 

110 if len(results) > 0: 

111 return sorted(results, key=lambda result: -result["confidence"]) 

112 

113 return [detector.result]