test_html5lib.py 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. """Tests to ensure that the html5lib tree builder generates good trees."""
  2. import warnings
  3. try:
  4. from bs4.builder import HTML5TreeBuilder
  5. HTML5LIB_PRESENT = True
  6. except ImportError as e:
  7. HTML5LIB_PRESENT = False
  8. from bs4.element import SoupStrainer
  9. from . import (
  10. HTML5TreeBuilderSmokeTest,
  11. SoupTest,
  12. skipIf,
  13. )
  14. @skipIf(
  15. not HTML5LIB_PRESENT,
  16. "html5lib seems not to be present, not testing its tree builder.")
  17. class TestHTML5LibBuilder(SoupTest, HTML5TreeBuilderSmokeTest):
  18. """See ``HTML5TreeBuilderSmokeTest``."""
  19. @property
  20. def default_builder(self):
  21. return HTML5TreeBuilder
  22. def test_soupstrainer(self):
  23. # The html5lib tree builder does not support SoupStrainers.
  24. strainer = SoupStrainer("b")
  25. markup = "<p>A <b>bold</b> statement.</p>"
  26. with warnings.catch_warnings(record=True) as w:
  27. soup = self.soup(markup, parse_only=strainer)
  28. assert soup.decode() == self.document_for(markup)
  29. assert "the html5lib tree builder doesn't support parse_only" in str(w[0].message)
  30. def test_correctly_nested_tables(self):
  31. """html5lib inserts <tbody> tags where other parsers don't."""
  32. markup = ('<table id="1">'
  33. '<tr>'
  34. "<td>Here's another table:"
  35. '<table id="2">'
  36. '<tr><td>foo</td></tr>'
  37. '</table></td>')
  38. self.assert_soup(
  39. markup,
  40. '<table id="1"><tbody><tr><td>Here\'s another table:'
  41. '<table id="2"><tbody><tr><td>foo</td></tr></tbody></table>'
  42. '</td></tr></tbody></table>')
  43. self.assert_soup(
  44. "<table><thead><tr><td>Foo</td></tr></thead>"
  45. "<tbody><tr><td>Bar</td></tr></tbody>"
  46. "<tfoot><tr><td>Baz</td></tr></tfoot></table>")
  47. def test_xml_declaration_followed_by_doctype(self):
  48. markup = '''<?xml version="1.0" encoding="utf-8"?>
  49. <!DOCTYPE html>
  50. <html>
  51. <head>
  52. </head>
  53. <body>
  54. <p>foo</p>
  55. </body>
  56. </html>'''
  57. soup = self.soup(markup)
  58. # Verify that we can reach the <p> tag; this means the tree is connected.
  59. assert b"<p>foo</p>" == soup.p.encode()
  60. def test_reparented_markup(self):
  61. markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>'
  62. soup = self.soup(markup)
  63. assert "<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p></body>" == soup.body.decode()
  64. assert 2 == len(soup.find_all('p'))
  65. def test_reparented_markup_ends_with_whitespace(self):
  66. markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>\n'
  67. soup = self.soup(markup)
  68. assert "<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p>\n</body>" == soup.body.decode()
  69. assert 2 == len(soup.find_all('p'))
  70. def test_reparented_markup_containing_identical_whitespace_nodes(self):
  71. """Verify that we keep the two whitespace nodes in this
  72. document distinct when reparenting the adjacent <tbody> tags.
  73. """
  74. markup = '<table> <tbody><tbody><ims></tbody> </table>'
  75. soup = self.soup(markup)
  76. space1, space2 = soup.find_all(string=' ')
  77. tbody1, tbody2 = soup.find_all('tbody')
  78. assert space1.next_element is tbody1
  79. assert tbody2.next_element is space2
  80. def test_reparented_markup_containing_children(self):
  81. markup = '<div><a>aftermath<p><noscript>target</noscript>aftermath</a></p></div>'
  82. soup = self.soup(markup)
  83. noscript = soup.noscript
  84. assert "target" == noscript.next_element
  85. target = soup.find(string='target')
  86. # The 'aftermath' string was duplicated; we want the second one.
  87. final_aftermath = soup.find_all(string='aftermath')[-1]
  88. # The <noscript> tag was moved beneath a copy of the <a> tag,
  89. # but the 'target' string within is still connected to the
  90. # (second) 'aftermath' string.
  91. assert final_aftermath == target.next_element
  92. assert target == final_aftermath.previous_element
  93. def test_processing_instruction(self):
  94. """Processing instructions become comments."""
  95. markup = b"""<?PITarget PIContent?>"""
  96. soup = self.soup(markup)
  97. assert str(soup).startswith("<!--?PITarget PIContent?-->")
  98. def test_cloned_multivalue_node(self):
  99. markup = b"""<a class="my_class"><p></a>"""
  100. soup = self.soup(markup)
  101. a1, a2 = soup.find_all('a')
  102. assert a1 == a2
  103. assert a1 is not a2
  104. def test_foster_parenting(self):
  105. markup = b"""<table><td></tbody>A"""
  106. soup = self.soup(markup)
  107. assert "<body>A<table><tbody><tr><td></td></tr></tbody></table></body>" == soup.body.decode()
  108. def test_extraction(self):
  109. """
  110. Test that extraction does not destroy the tree.
  111. https://bugs.launchpad.net/beautifulsoup/+bug/1782928
  112. """
  113. markup = """
  114. <html><head></head>
  115. <style>
  116. </style><script></script><body><p>hello</p></body></html>
  117. """
  118. soup = self.soup(markup)
  119. [s.extract() for s in soup('script')]
  120. [s.extract() for s in soup('style')]
  121. assert len(soup.find_all("p")) == 1
  122. def test_empty_comment(self):
  123. """
  124. Test that empty comment does not break structure.
  125. https://bugs.launchpad.net/beautifulsoup/+bug/1806598
  126. """
  127. markup = """
  128. <html>
  129. <body>
  130. <form>
  131. <!----><input type="text">
  132. </form>
  133. </body>
  134. </html>
  135. """
  136. soup = self.soup(markup)
  137. inputs = []
  138. for form in soup.find_all('form'):
  139. inputs.extend(form.find_all('input'))
  140. assert len(inputs) == 1
  141. def test_tracking_line_numbers(self):
  142. # The html.parser TreeBuilder keeps track of line number and
  143. # position of each element.
  144. markup = "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>"
  145. soup = self.soup(markup)
  146. assert 2 == soup.p.sourceline
  147. assert 5 == soup.p.sourcepos
  148. assert "sourceline" == soup.p.find('sourceline').name
  149. # You can deactivate this behavior.
  150. soup = self.soup(markup, store_line_numbers=False)
  151. assert "sourceline" == soup.p.sourceline.name
  152. assert "sourcepos" == soup.p.sourcepos.name
  153. def test_special_string_containers(self):
  154. # The html5lib tree builder doesn't support this standard feature,
  155. # because there's no way of knowing, when a string is created,
  156. # where in the tree it will eventually end up.
  157. pass
  158. def test_html5_attributes(self):
  159. # The html5lib TreeBuilder can convert any entity named in
  160. # the HTML5 spec to a sequence of Unicode characters, and
  161. # convert those Unicode characters to a (potentially
  162. # different) named entity on the way out.
  163. #
  164. # This is a copy of the same test from
  165. # HTMLParserTreeBuilderSmokeTest. It's not in the superclass
  166. # because the lxml HTML TreeBuilder _doesn't_ work this way.
  167. for input_element, output_unicode, output_element in (
  168. ("&RightArrowLeftArrow;", '\u21c4', b'&rlarr;'),
  169. ('&models;', '\u22a7', b'&models;'),
  170. ('&Nfr;', '\U0001d511', b'&Nfr;'),
  171. ('&ngeqq;', '\u2267\u0338', b'&ngeqq;'),
  172. ('&not;', '\xac', b'&not;'),
  173. ('&Not;', '\u2aec', b'&Not;'),
  174. ('&quot;', '"', b'"'),
  175. ('&there4;', '\u2234', b'&there4;'),
  176. ('&Therefore;', '\u2234', b'&there4;'),
  177. ('&therefore;', '\u2234', b'&there4;'),
  178. ("&fjlig;", 'fj', b'fj'),
  179. ("&sqcup;", '\u2294', b'&sqcup;'),
  180. ("&sqcups;", '\u2294\ufe00', b'&sqcups;'),
  181. ("&apos;", "'", b"'"),
  182. ("&verbar;", "|", b"|"),
  183. ):
  184. markup = '<div>%s</div>' % input_element
  185. div = self.soup(markup).div
  186. without_element = div.encode()
  187. expect = b"<div>%s</div>" % output_unicode.encode("utf8")
  188. assert without_element == expect
  189. with_element = div.encode(formatter="html")
  190. expect = b"<div>%s</div>" % output_element
  191. assert with_element == expect