charsetprober.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. ######################## BEGIN LICENSE BLOCK ########################
  2. # The Original Code is Mozilla Universal charset detector code.
  3. #
  4. # The Initial Developer of the Original Code is
  5. # Netscape Communications Corporation.
  6. # Portions created by the Initial Developer are Copyright (C) 2001
  7. # the Initial Developer. All Rights Reserved.
  8. #
  9. # Contributor(s):
  10. # Mark Pilgrim - port to Python
  11. # Shy Shalom - original C code
  12. #
  13. # This library is free software; you can redistribute it and/or
  14. # modify it under the terms of the GNU Lesser General Public
  15. # License as published by the Free Software Foundation; either
  16. # version 2.1 of the License, or (at your option) any later version.
  17. #
  18. # This library is distributed in the hope that it will be useful,
  19. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. # Lesser General Public License for more details.
  22. #
  23. # You should have received a copy of the GNU Lesser General Public
  24. # License along with this library; if not, write to the Free Software
  25. # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  26. # 02110-1301 USA
  27. ######################### END LICENSE BLOCK #########################
  28. import logging
  29. import re
  30. from typing import Optional, Union
  31. from .enums import LanguageFilter, ProbingState
  32. INTERNATIONAL_WORDS_PATTERN = re.compile(
  33. b"[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?"
  34. )
  35. class CharSetProber:
  36. SHORTCUT_THRESHOLD = 0.95
  37. def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
  38. self._state = ProbingState.DETECTING
  39. self.active = True
  40. self.lang_filter = lang_filter
  41. self.logger = logging.getLogger(__name__)
  42. def reset(self) -> None:
  43. self._state = ProbingState.DETECTING
  44. @property
  45. def charset_name(self) -> Optional[str]:
  46. return None
  47. @property
  48. def language(self) -> Optional[str]:
  49. raise NotImplementedError
  50. def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
  51. raise NotImplementedError
  52. @property
  53. def state(self) -> ProbingState:
  54. return self._state
  55. def get_confidence(self) -> float:
  56. return 0.0
  57. @staticmethod
  58. def filter_high_byte_only(buf: Union[bytes, bytearray]) -> bytes:
  59. buf = re.sub(b"([\x00-\x7F])+", b" ", buf)
  60. return buf
  61. @staticmethod
  62. def filter_international_words(buf: Union[bytes, bytearray]) -> bytearray:
  63. """
  64. We define three types of bytes:
  65. alphabet: english alphabets [a-zA-Z]
  66. international: international characters [\x80-\xFF]
  67. marker: everything else [^a-zA-Z\x80-\xFF]
  68. The input buffer can be thought to contain a series of words delimited
  69. by markers. This function works to filter all words that contain at
  70. least one international character. All contiguous sequences of markers
  71. are replaced by a single space ascii character.
  72. This filter applies to all scripts which do not use English characters.
  73. """
  74. filtered = bytearray()
  75. # This regex expression filters out only words that have at-least one
  76. # international character. The word may include one marker character at
  77. # the end.
  78. words = INTERNATIONAL_WORDS_PATTERN.findall(buf)
  79. for word in words:
  80. filtered.extend(word[:-1])
  81. # If the last character in the word is a marker, replace it with a
  82. # space as markers shouldn't affect our analysis (they are used
  83. # similarly across all languages and may thus have similar
  84. # frequencies).
  85. last_char = word[-1:]
  86. if not last_char.isalpha() and last_char < b"\x80":
  87. last_char = b" "
  88. filtered.extend(last_char)
  89. return filtered
  90. @staticmethod
  91. def remove_xml_tags(buf: Union[bytes, bytearray]) -> bytes:
  92. """
  93. Returns a copy of ``buf`` that retains only the sequences of English
  94. alphabet and high byte characters that are not between <> characters.
  95. This filter can be applied to all scripts which contain both English
  96. characters and extended ASCII characters, but is currently only used by
  97. ``Latin1Prober``.
  98. """
  99. filtered = bytearray()
  100. in_tag = False
  101. prev = 0
  102. buf = memoryview(buf).cast("c")
  103. for curr, buf_char in enumerate(buf):
  104. # Check if we're coming out of or entering an XML tag
  105. # https://github.com/python/typeshed/issues/8182
  106. if buf_char == b">": # type: ignore[comparison-overlap]
  107. prev = curr + 1
  108. in_tag = False
  109. # https://github.com/python/typeshed/issues/8182
  110. elif buf_char == b"<": # type: ignore[comparison-overlap]
  111. if curr > prev and not in_tag:
  112. # Keep everything after last non-extended-ASCII,
  113. # non-alphabetic character
  114. filtered.extend(buf[prev:curr])
  115. # Output a space to delimit stretch we kept
  116. filtered.extend(b" ")
  117. in_tag = True
  118. # If we're not in a tag...
  119. if not in_tag:
  120. # Keep everything after last non-extended-ASCII, non-alphabetic
  121. # character
  122. filtered.extend(buf[prev:])
  123. return filtered