test_dammit.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. # encoding: utf-8
  2. import pytest
  3. import logging
  4. import bs4
  5. from bs4 import BeautifulSoup
  6. from bs4.dammit import (
  7. EntitySubstitution,
  8. EncodingDetector,
  9. UnicodeDammit,
  10. )
  11. class TestUnicodeDammit(object):
  12. """Standalone tests of UnicodeDammit."""
  13. def test_unicode_input(self):
  14. markup = "I'm already Unicode! \N{SNOWMAN}"
  15. dammit = UnicodeDammit(markup)
  16. assert dammit.unicode_markup == markup
  17. def test_smart_quotes_to_unicode(self):
  18. markup = b"<foo>\x91\x92\x93\x94</foo>"
  19. dammit = UnicodeDammit(markup)
  20. assert dammit.unicode_markup == "<foo>\u2018\u2019\u201c\u201d</foo>"
  21. def test_smart_quotes_to_xml_entities(self):
  22. markup = b"<foo>\x91\x92\x93\x94</foo>"
  23. dammit = UnicodeDammit(markup, smart_quotes_to="xml")
  24. assert dammit.unicode_markup == "<foo>&#x2018;&#x2019;&#x201C;&#x201D;</foo>"
  25. def test_smart_quotes_to_html_entities(self):
  26. markup = b"<foo>\x91\x92\x93\x94</foo>"
  27. dammit = UnicodeDammit(markup, smart_quotes_to="html")
  28. assert dammit.unicode_markup == "<foo>&lsquo;&rsquo;&ldquo;&rdquo;</foo>"
  29. def test_smart_quotes_to_ascii(self):
  30. markup = b"<foo>\x91\x92\x93\x94</foo>"
  31. dammit = UnicodeDammit(markup, smart_quotes_to="ascii")
  32. assert dammit.unicode_markup == """<foo>''""</foo>"""
  33. def test_detect_utf8(self):
  34. utf8 = b"Sacr\xc3\xa9 bleu! \xe2\x98\x83"
  35. dammit = UnicodeDammit(utf8)
  36. assert dammit.original_encoding.lower() == 'utf-8'
  37. assert dammit.unicode_markup == 'Sacr\xe9 bleu! \N{SNOWMAN}'
  38. def test_convert_hebrew(self):
  39. hebrew = b"\xed\xe5\xec\xf9"
  40. dammit = UnicodeDammit(hebrew, ["iso-8859-8"])
  41. assert dammit.original_encoding.lower() == 'iso-8859-8'
  42. assert dammit.unicode_markup == '\u05dd\u05d5\u05dc\u05e9'
  43. def test_dont_see_smart_quotes_where_there_are_none(self):
  44. utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch"
  45. dammit = UnicodeDammit(utf_8)
  46. assert dammit.original_encoding.lower() == 'utf-8'
  47. assert dammit.unicode_markup.encode("utf-8") == utf_8
  48. def test_ignore_inappropriate_codecs(self):
  49. utf8_data = "Räksmörgås".encode("utf-8")
  50. dammit = UnicodeDammit(utf8_data, ["iso-8859-8"])
  51. assert dammit.original_encoding.lower() == 'utf-8'
  52. def test_ignore_invalid_codecs(self):
  53. utf8_data = "Räksmörgås".encode("utf-8")
  54. for bad_encoding in ['.utf8', '...', 'utF---16.!']:
  55. dammit = UnicodeDammit(utf8_data, [bad_encoding])
  56. assert dammit.original_encoding.lower() == 'utf-8'
  57. def test_exclude_encodings(self):
  58. # This is UTF-8.
  59. utf8_data = "Räksmörgås".encode("utf-8")
  60. # But if we exclude UTF-8 from consideration, the guess is
  61. # Windows-1252.
  62. dammit = UnicodeDammit(utf8_data, exclude_encodings=["utf-8"])
  63. assert dammit.original_encoding.lower() == 'windows-1252'
  64. # And if we exclude that, there is no valid guess at all.
  65. dammit = UnicodeDammit(
  66. utf8_data, exclude_encodings=["utf-8", "windows-1252"])
  67. assert dammit.original_encoding == None
  68. class TestEncodingDetector(object):
  69. def test_encoding_detector_replaces_junk_in_encoding_name_with_replacement_character(self):
  70. detected = EncodingDetector(
  71. b'<?xml version="1.0" encoding="UTF-\xdb" ?>')
  72. encodings = list(detected.encodings)
  73. assert 'utf-\N{REPLACEMENT CHARACTER}' in encodings
  74. def test_detect_html5_style_meta_tag(self):
  75. for data in (
  76. b'<html><meta charset="euc-jp" /></html>',
  77. b"<html><meta charset='euc-jp' /></html>",
  78. b"<html><meta charset=euc-jp /></html>",
  79. b"<html><meta charset=euc-jp/></html>"):
  80. dammit = UnicodeDammit(data, is_html=True)
  81. assert "euc-jp" == dammit.original_encoding
  82. def test_last_ditch_entity_replacement(self):
  83. # This is a UTF-8 document that contains bytestrings
  84. # completely incompatible with UTF-8 (ie. encoded with some other
  85. # encoding).
  86. #
  87. # Since there is no consistent encoding for the document,
  88. # Unicode, Dammit will eventually encode the document as UTF-8
  89. # and encode the incompatible characters as REPLACEMENT
  90. # CHARACTER.
  91. #
  92. # If chardet is installed, it will detect that the document
  93. # can be converted into ISO-8859-1 without errors. This happens
  94. # to be the wrong encoding, but it is a consistent encoding, so the
  95. # code we're testing here won't run.
  96. #
  97. # So we temporarily disable chardet if it's present.
  98. doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?>
  99. <html><b>\330\250\330\252\330\261</b>
  100. <i>\310\322\321\220\312\321\355\344</i></html>"""
  101. chardet = bs4.dammit.chardet_dammit
  102. logging.disable(logging.WARNING)
  103. try:
  104. def noop(str):
  105. return None
  106. bs4.dammit.chardet_dammit = noop
  107. dammit = UnicodeDammit(doc)
  108. assert True == dammit.contains_replacement_characters
  109. assert "\ufffd" in dammit.unicode_markup
  110. soup = BeautifulSoup(doc, "html.parser")
  111. assert soup.contains_replacement_characters
  112. finally:
  113. logging.disable(logging.NOTSET)
  114. bs4.dammit.chardet_dammit = chardet
  115. def test_byte_order_mark_removed(self):
  116. # A document written in UTF-16LE will have its byte order marker stripped.
  117. data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
  118. dammit = UnicodeDammit(data)
  119. assert "<a>áé</a>" == dammit.unicode_markup
  120. assert "utf-16le" == dammit.original_encoding
  121. def test_known_definite_versus_user_encodings(self):
  122. # The known_definite_encodings are used before sniffing the
  123. # byte-order mark; the user_encodings are used afterwards.
  124. # Here's a document in UTF-16LE.
  125. data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
  126. dammit = UnicodeDammit(data)
  127. # We can process it as UTF-16 by passing it in as a known
  128. # definite encoding.
  129. before = UnicodeDammit(data, known_definite_encodings=["utf-16"])
  130. assert "utf-16" == before.original_encoding
  131. # If we pass UTF-18 as a user encoding, it's not even
  132. # tried--the encoding sniffed from the byte-order mark takes
  133. # precedence.
  134. after = UnicodeDammit(data, user_encodings=["utf-8"])
  135. assert "utf-16le" == after.original_encoding
  136. assert ["utf-16le"] == [x[0] for x in dammit.tried_encodings]
  137. # Here's a document in ISO-8859-8.
  138. hebrew = b"\xed\xe5\xec\xf9"
  139. dammit = UnicodeDammit(hebrew, known_definite_encodings=["utf-8"],
  140. user_encodings=["iso-8859-8"])
  141. # The known_definite_encodings don't work, BOM sniffing does
  142. # nothing (it only works for a few UTF encodings), but one of
  143. # the user_encodings does work.
  144. assert "iso-8859-8" == dammit.original_encoding
  145. assert ["utf-8", "iso-8859-8"] == [x[0] for x in dammit.tried_encodings]
  146. def test_deprecated_override_encodings(self):
  147. # override_encodings is a deprecated alias for
  148. # known_definite_encodings.
  149. hebrew = b"\xed\xe5\xec\xf9"
  150. dammit = UnicodeDammit(
  151. hebrew,
  152. known_definite_encodings=["shift-jis"],
  153. override_encodings=["utf-8"],
  154. user_encodings=["iso-8859-8"],
  155. )
  156. assert "iso-8859-8" == dammit.original_encoding
  157. # known_definite_encodings and override_encodings were tried
  158. # before user_encodings.
  159. assert ["shift-jis", "utf-8", "iso-8859-8"] == (
  160. [x[0] for x in dammit.tried_encodings]
  161. )
  162. def test_detwingle(self):
  163. # Here's a UTF8 document.
  164. utf8 = ("\N{SNOWMAN}" * 3).encode("utf8")
  165. # Here's a Windows-1252 document.
  166. windows_1252 = (
  167. "\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!"
  168. "\N{RIGHT DOUBLE QUOTATION MARK}").encode("windows_1252")
  169. # Through some unholy alchemy, they've been stuck together.
  170. doc = utf8 + windows_1252 + utf8
  171. # The document can't be turned into UTF-8:
  172. with pytest.raises(UnicodeDecodeError):
  173. doc.decode("utf8")
  174. # Unicode, Dammit thinks the whole document is Windows-1252,
  175. # and decodes it into "☃☃☃“Hi, I like Windows!”☃☃☃"
  176. # But if we run it through fix_embedded_windows_1252, it's fixed:
  177. fixed = UnicodeDammit.detwingle(doc)
  178. assert "☃☃☃“Hi, I like Windows!”☃☃☃" == fixed.decode("utf8")
  179. def test_detwingle_ignores_multibyte_characters(self):
  180. # Each of these characters has a UTF-8 representation ending
  181. # in \x93. \x93 is a smart quote if interpreted as
  182. # Windows-1252. But our code knows to skip over multibyte
  183. # UTF-8 characters, so they'll survive the process unscathed.
  184. for tricky_unicode_char in (
  185. "\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93'
  186. "\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93'
  187. "\xf0\x90\x90\x93", # This is a CJK character, not sure which one.
  188. ):
  189. input = tricky_unicode_char.encode("utf8")
  190. assert input.endswith(b'\x93')
  191. output = UnicodeDammit.detwingle(input)
  192. assert output == input
  193. def test_find_declared_encoding(self):
  194. # Test our ability to find a declared encoding inside an
  195. # XML or HTML document.
  196. #
  197. # Even if the document comes in as Unicode, it may be
  198. # interesting to know what encoding was claimed
  199. # originally.
  200. html_unicode = '<html><head><meta charset="utf-8"></head></html>'
  201. html_bytes = html_unicode.encode("ascii")
  202. xml_unicode= '<?xml version="1.0" encoding="ISO-8859-1" ?>'
  203. xml_bytes = xml_unicode.encode("ascii")
  204. m = EncodingDetector.find_declared_encoding
  205. assert m(html_unicode, is_html=False) is None
  206. assert "utf-8" == m(html_unicode, is_html=True)
  207. assert "utf-8" == m(html_bytes, is_html=True)
  208. assert "iso-8859-1" == m(xml_unicode)
  209. assert "iso-8859-1" == m(xml_bytes)
  210. # Normally, only the first few kilobytes of a document are checked for
  211. # an encoding.
  212. spacer = b' ' * 5000
  213. assert m(spacer + html_bytes) is None
  214. assert m(spacer + xml_bytes) is None
  215. # But you can tell find_declared_encoding to search an entire
  216. # HTML document.
  217. assert (
  218. m(spacer + html_bytes, is_html=True, search_entire_document=True)
  219. == "utf-8"
  220. )
  221. # The XML encoding declaration has to be the very first thing
  222. # in the document. We'll allow whitespace before the document
  223. # starts, but nothing else.
  224. assert m(xml_bytes, search_entire_document=True) == "iso-8859-1"
  225. assert m(b' ' + xml_bytes, search_entire_document=True) == "iso-8859-1"
  226. assert m(b'a' + xml_bytes, search_entire_document=True) is None
  227. class TestEntitySubstitution(object):
  228. """Standalone tests of the EntitySubstitution class."""
  229. def setup_method(self):
  230. self.sub = EntitySubstitution
  231. def test_simple_html_substitution(self):
  232. # Unicode characters corresponding to named HTML entites
  233. # are substituted, and no others.
  234. s = "foo\u2200\N{SNOWMAN}\u00f5bar"
  235. assert self.sub.substitute_html(s) == "foo&forall;\N{SNOWMAN}&otilde;bar"
  236. def test_smart_quote_substitution(self):
  237. # MS smart quotes are a common source of frustration, so we
  238. # give them a special test.
  239. quotes = b"\x91\x92foo\x93\x94"
  240. dammit = UnicodeDammit(quotes)
  241. assert self.sub.substitute_html(dammit.markup) == "&lsquo;&rsquo;foo&ldquo;&rdquo;"
  242. def test_html5_entity(self):
  243. # Some HTML5 entities correspond to single- or multi-character
  244. # Unicode sequences.
  245. for entity, u in (
  246. # A few spot checks of our ability to recognize
  247. # special character sequences and convert them
  248. # to named entities.
  249. ('&models;', '\u22a7'),
  250. ('&Nfr;', '\U0001d511'),
  251. ('&ngeqq;', '\u2267\u0338'),
  252. ('&not;', '\xac'),
  253. ('&Not;', '\u2aec'),
  254. # We _could_ convert | to &verbarr;, but we don't, because
  255. # | is an ASCII character.
  256. ('|' '|'),
  257. # Similarly for the fj ligature, which we could convert to
  258. # &fjlig;, but we don't.
  259. ("fj", "fj"),
  260. # We do convert _these_ ASCII characters to HTML entities,
  261. # because that's required to generate valid HTML.
  262. ('&gt;', '>'),
  263. ('&lt;', '<'),
  264. ('&amp;', '&'),
  265. ):
  266. template = '3 %s 4'
  267. raw = template % u
  268. with_entities = template % entity
  269. assert self.sub.substitute_html(raw) == with_entities
  270. def test_html5_entity_with_variation_selector(self):
  271. # Some HTML5 entities correspond either to a single-character
  272. # Unicode sequence _or_ to the same character plus U+FE00,
  273. # VARIATION SELECTOR 1. We can handle this.
  274. data = "fjords \u2294 penguins"
  275. markup = "fjords &sqcup; penguins"
  276. assert self.sub.substitute_html(data) == markup
  277. data = "fjords \u2294\ufe00 penguins"
  278. markup = "fjords &sqcups; penguins"
  279. assert self.sub.substitute_html(data) == markup
  280. def test_xml_converstion_includes_no_quotes_if_make_quoted_attribute_is_false(self):
  281. s = 'Welcome to "my bar"'
  282. assert self.sub.substitute_xml(s, False) == s
  283. def test_xml_attribute_quoting_normally_uses_double_quotes(self):
  284. assert self.sub.substitute_xml("Welcome", True) == '"Welcome"'
  285. assert self.sub.substitute_xml("Bob's Bar", True) == '"Bob\'s Bar"'
  286. def test_xml_attribute_quoting_uses_single_quotes_when_value_contains_double_quotes(self):
  287. s = 'Welcome to "my bar"'
  288. assert self.sub.substitute_xml(s, True) == "'Welcome to \"my bar\"'"
  289. def test_xml_attribute_quoting_escapes_single_quotes_when_value_contains_both_single_and_double_quotes(self):
  290. s = 'Welcome to "Bob\'s Bar"'
  291. assert self.sub.substitute_xml(s, True) == '"Welcome to &quot;Bob\'s Bar&quot;"'
  292. def test_xml_quotes_arent_escaped_when_value_is_not_being_quoted(self):
  293. quoted = 'Welcome to "Bob\'s Bar"'
  294. assert self.sub.substitute_xml(quoted) == quoted
  295. def test_xml_quoting_handles_angle_brackets(self):
  296. assert self.sub.substitute_xml("foo<bar>") == "foo&lt;bar&gt;"
  297. def test_xml_quoting_handles_ampersands(self):
  298. assert self.sub.substitute_xml("AT&T") == "AT&amp;T"
  299. def test_xml_quoting_including_ampersands_when_they_are_part_of_an_entity(self):
  300. assert self.sub.substitute_xml("&Aacute;T&T") == "&amp;Aacute;T&amp;T"
  301. def test_xml_quoting_ignoring_ampersands_when_they_are_part_of_an_entity(self):
  302. assert self.sub.substitute_xml_containing_entities("&Aacute;T&T") == "&Aacute;T&amp;T"
  303. def test_quotes_not_html_substituted(self):
  304. """There's no need to do this except inside attribute values."""
  305. text = 'Bob\'s "bar"'
  306. assert self.sub.substitute_html(text) == text