models.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. import warnings
  2. from collections import Counter
  3. from encodings.aliases import aliases
  4. from hashlib import sha256
  5. from json import dumps
  6. from re import sub
  7. from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
  8. from .constant import NOT_PRINTABLE_PATTERN, TOO_BIG_SEQUENCE
  9. from .md import mess_ratio
  10. from .utils import iana_name, is_multi_byte_encoding, unicode_range
  11. class CharsetMatch:
  12. def __init__(
  13. self,
  14. payload: bytes,
  15. guessed_encoding: str,
  16. mean_mess_ratio: float,
  17. has_sig_or_bom: bool,
  18. languages: "CoherenceMatches",
  19. decoded_payload: Optional[str] = None,
  20. ):
  21. self._payload = payload # type: bytes
  22. self._encoding = guessed_encoding # type: str
  23. self._mean_mess_ratio = mean_mess_ratio # type: float
  24. self._languages = languages # type: CoherenceMatches
  25. self._has_sig_or_bom = has_sig_or_bom # type: bool
  26. self._unicode_ranges = None # type: Optional[List[str]]
  27. self._leaves = [] # type: List[CharsetMatch]
  28. self._mean_coherence_ratio = 0.0 # type: float
  29. self._output_payload = None # type: Optional[bytes]
  30. self._output_encoding = None # type: Optional[str]
  31. self._string = decoded_payload # type: Optional[str]
  32. def __eq__(self, other: object) -> bool:
  33. if not isinstance(other, CharsetMatch):
  34. raise TypeError(
  35. "__eq__ cannot be invoked on {} and {}.".format(
  36. str(other.__class__), str(self.__class__)
  37. )
  38. )
  39. return self.encoding == other.encoding and self.fingerprint == other.fingerprint
  40. def __lt__(self, other: object) -> bool:
  41. """
  42. Implemented to make sorted available upon CharsetMatches items.
  43. """
  44. if not isinstance(other, CharsetMatch):
  45. raise ValueError
  46. chaos_difference = abs(self.chaos - other.chaos) # type: float
  47. coherence_difference = abs(self.coherence - other.coherence) # type: float
  48. # Bellow 1% difference --> Use Coherence
  49. if chaos_difference < 0.01 and coherence_difference > 0.02:
  50. # When having a tough decision, use the result that decoded as many multi-byte as possible.
  51. if chaos_difference == 0.0 and self.coherence == other.coherence:
  52. return self.multi_byte_usage > other.multi_byte_usage
  53. return self.coherence > other.coherence
  54. return self.chaos < other.chaos
  55. @property
  56. def multi_byte_usage(self) -> float:
  57. return 1.0 - len(str(self)) / len(self.raw)
  58. @property
  59. def chaos_secondary_pass(self) -> float:
  60. """
  61. Check once again chaos in decoded text, except this time, with full content.
  62. Use with caution, this can be very slow.
  63. Notice: Will be removed in 3.0
  64. """
  65. warnings.warn(
  66. "chaos_secondary_pass is deprecated and will be removed in 3.0",
  67. DeprecationWarning,
  68. )
  69. return mess_ratio(str(self), 1.0)
  70. @property
  71. def coherence_non_latin(self) -> float:
  72. """
  73. Coherence ratio on the first non-latin language detected if ANY.
  74. Notice: Will be removed in 3.0
  75. """
  76. warnings.warn(
  77. "coherence_non_latin is deprecated and will be removed in 3.0",
  78. DeprecationWarning,
  79. )
  80. return 0.0
  81. @property
  82. def w_counter(self) -> Counter:
  83. """
  84. Word counter instance on decoded text.
  85. Notice: Will be removed in 3.0
  86. """
  87. warnings.warn(
  88. "w_counter is deprecated and will be removed in 3.0", DeprecationWarning
  89. )
  90. string_printable_only = sub(NOT_PRINTABLE_PATTERN, " ", str(self).lower())
  91. return Counter(string_printable_only.split())
  92. def __str__(self) -> str:
  93. # Lazy Str Loading
  94. if self._string is None:
  95. self._string = str(self._payload, self._encoding, "strict")
  96. return self._string
  97. def __repr__(self) -> str:
  98. return "<CharsetMatch '{}' bytes({})>".format(self.encoding, self.fingerprint)
  99. def add_submatch(self, other: "CharsetMatch") -> None:
  100. if not isinstance(other, CharsetMatch) or other == self:
  101. raise ValueError(
  102. "Unable to add instance <{}> as a submatch of a CharsetMatch".format(
  103. other.__class__
  104. )
  105. )
  106. other._string = None # Unload RAM usage; dirty trick.
  107. self._leaves.append(other)
  108. @property
  109. def encoding(self) -> str:
  110. return self._encoding
  111. @property
  112. def encoding_aliases(self) -> List[str]:
  113. """
  114. Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855.
  115. """
  116. also_known_as = [] # type: List[str]
  117. for u, p in aliases.items():
  118. if self.encoding == u:
  119. also_known_as.append(p)
  120. elif self.encoding == p:
  121. also_known_as.append(u)
  122. return also_known_as
  123. @property
  124. def bom(self) -> bool:
  125. return self._has_sig_or_bom
  126. @property
  127. def byte_order_mark(self) -> bool:
  128. return self._has_sig_or_bom
  129. @property
  130. def languages(self) -> List[str]:
  131. """
  132. Return the complete list of possible languages found in decoded sequence.
  133. Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
  134. """
  135. return [e[0] for e in self._languages]
  136. @property
  137. def language(self) -> str:
  138. """
  139. Most probable language found in decoded sequence. If none were detected or inferred, the property will return
  140. "Unknown".
  141. """
  142. if not self._languages:
  143. # Trying to infer the language based on the given encoding
  144. # Its either English or we should not pronounce ourselves in certain cases.
  145. if "ascii" in self.could_be_from_charset:
  146. return "English"
  147. # doing it there to avoid circular import
  148. from charset_normalizer.cd import encoding_languages, mb_encoding_languages
  149. languages = (
  150. mb_encoding_languages(self.encoding)
  151. if is_multi_byte_encoding(self.encoding)
  152. else encoding_languages(self.encoding)
  153. )
  154. if len(languages) == 0 or "Latin Based" in languages:
  155. return "Unknown"
  156. return languages[0]
  157. return self._languages[0][0]
  158. @property
  159. def chaos(self) -> float:
  160. return self._mean_mess_ratio
  161. @property
  162. def coherence(self) -> float:
  163. if not self._languages:
  164. return 0.0
  165. return self._languages[0][1]
  166. @property
  167. def percent_chaos(self) -> float:
  168. return round(self.chaos * 100, ndigits=3)
  169. @property
  170. def percent_coherence(self) -> float:
  171. return round(self.coherence * 100, ndigits=3)
  172. @property
  173. def raw(self) -> bytes:
  174. """
  175. Original untouched bytes.
  176. """
  177. return self._payload
  178. @property
  179. def submatch(self) -> List["CharsetMatch"]:
  180. return self._leaves
  181. @property
  182. def has_submatch(self) -> bool:
  183. return len(self._leaves) > 0
  184. @property
  185. def alphabets(self) -> List[str]:
  186. if self._unicode_ranges is not None:
  187. return self._unicode_ranges
  188. # list detected ranges
  189. detected_ranges = [
  190. unicode_range(char) for char in str(self)
  191. ] # type: List[Optional[str]]
  192. # filter and sort
  193. self._unicode_ranges = sorted(list({r for r in detected_ranges if r}))
  194. return self._unicode_ranges
  195. @property
  196. def could_be_from_charset(self) -> List[str]:
  197. """
  198. The complete list of encoding that output the exact SAME str result and therefore could be the originating
  199. encoding.
  200. This list does include the encoding available in property 'encoding'.
  201. """
  202. return [self._encoding] + [m.encoding for m in self._leaves]
  203. def first(self) -> "CharsetMatch":
  204. """
  205. Kept for BC reasons. Will be removed in 3.0.
  206. """
  207. return self
  208. def best(self) -> "CharsetMatch":
  209. """
  210. Kept for BC reasons. Will be removed in 3.0.
  211. """
  212. return self
  213. def output(self, encoding: str = "utf_8") -> bytes:
  214. """
  215. Method to get re-encoded bytes payload using given target encoding. Default to UTF-8.
  216. Any errors will be simply ignored by the encoder NOT replaced.
  217. """
  218. if self._output_encoding is None or self._output_encoding != encoding:
  219. self._output_encoding = encoding
  220. self._output_payload = str(self).encode(encoding, "replace")
  221. return self._output_payload # type: ignore
  222. @property
  223. def fingerprint(self) -> str:
  224. """
  225. Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one.
  226. """
  227. return sha256(self.output()).hexdigest()
  228. class CharsetMatches:
  229. """
  230. Container with every CharsetMatch items ordered by default from most probable to the less one.
  231. Act like a list(iterable) but does not implements all related methods.
  232. """
  233. def __init__(self, results: List[CharsetMatch] = None):
  234. self._results = sorted(results) if results else [] # type: List[CharsetMatch]
  235. def __iter__(self) -> Iterator[CharsetMatch]:
  236. yield from self._results
  237. def __getitem__(self, item: Union[int, str]) -> CharsetMatch:
  238. """
  239. Retrieve a single item either by its position or encoding name (alias may be used here).
  240. Raise KeyError upon invalid index or encoding not present in results.
  241. """
  242. if isinstance(item, int):
  243. return self._results[item]
  244. if isinstance(item, str):
  245. item = iana_name(item, False)
  246. for result in self._results:
  247. if item in result.could_be_from_charset:
  248. return result
  249. raise KeyError
  250. def __len__(self) -> int:
  251. return len(self._results)
  252. def __bool__(self) -> bool:
  253. return len(self._results) > 0
  254. def append(self, item: CharsetMatch) -> None:
  255. """
  256. Insert a single match. Will be inserted accordingly to preserve sort.
  257. Can be inserted as a submatch.
  258. """
  259. if not isinstance(item, CharsetMatch):
  260. raise ValueError(
  261. "Cannot append instance '{}' to CharsetMatches".format(
  262. str(item.__class__)
  263. )
  264. )
  265. # We should disable the submatch factoring when the input file is too heavy (conserve RAM usage)
  266. if len(item.raw) <= TOO_BIG_SEQUENCE:
  267. for match in self._results:
  268. if match.fingerprint == item.fingerprint and match.chaos == item.chaos:
  269. match.add_submatch(item)
  270. return
  271. self._results.append(item)
  272. self._results = sorted(self._results)
  273. def best(self) -> Optional["CharsetMatch"]:
  274. """
  275. Simply return the first match. Strict equivalent to matches[0].
  276. """
  277. if not self._results:
  278. return None
  279. return self._results[0]
  280. def first(self) -> Optional["CharsetMatch"]:
  281. """
  282. Redundant method, call the method best(). Kept for BC reasons.
  283. """
  284. return self.best()
  285. CoherenceMatch = Tuple[str, float]
  286. CoherenceMatches = List[CoherenceMatch]
  287. class CliDetectionResult:
  288. def __init__(
  289. self,
  290. path: str,
  291. encoding: Optional[str],
  292. encoding_aliases: List[str],
  293. alternative_encodings: List[str],
  294. language: str,
  295. alphabets: List[str],
  296. has_sig_or_bom: bool,
  297. chaos: float,
  298. coherence: float,
  299. unicode_path: Optional[str],
  300. is_preferred: bool,
  301. ):
  302. self.path = path # type: str
  303. self.unicode_path = unicode_path # type: Optional[str]
  304. self.encoding = encoding # type: Optional[str]
  305. self.encoding_aliases = encoding_aliases # type: List[str]
  306. self.alternative_encodings = alternative_encodings # type: List[str]
  307. self.language = language # type: str
  308. self.alphabets = alphabets # type: List[str]
  309. self.has_sig_or_bom = has_sig_or_bom # type: bool
  310. self.chaos = chaos # type: float
  311. self.coherence = coherence # type: float
  312. self.is_preferred = is_preferred # type: bool
  313. @property
  314. def __dict__(self) -> Dict[str, Any]: # type: ignore
  315. return {
  316. "path": self.path,
  317. "encoding": self.encoding,
  318. "encoding_aliases": self.encoding_aliases,
  319. "alternative_encodings": self.alternative_encodings,
  320. "language": self.language,
  321. "alphabets": self.alphabets,
  322. "has_sig_or_bom": self.has_sig_or_bom,
  323. "chaos": self.chaos,
  324. "coherence": self.coherence,
  325. "unicode_path": self.unicode_path,
  326. "is_preferred": self.is_preferred,
  327. }
  328. def to_json(self) -> str:
  329. return dumps(self.__dict__, ensure_ascii=True, indent=4)