以前讲过利用phantomjs做爬虫抓网页 https:///tillie" id="link3">Tillie</a>]head_tag = soup.headhead_tag# <head><title>The Dormouse's story</title></head>head_tag.contents[<title>The Dormouse's story</title>]title_tag = head_tag.contents[0]title_tag# <title>The Dormouse's story</title>title_tag.contents# [u'The Dormouse's story']len(soup.contents)# 1soup.contents[0].name# u'html'text = title_tag.contents[0]text.contentsfor child in title_tag.children: print(child)head_tag.contents# [<title>The Dormouse's story</title>]for child in head_tag.descendants: print(child)# <title>The Dormouse's story</title># The Dormouse's storylen(list(soup.children))# 1len(list(soup.descendants))# 25title_tag.string# u'The Dormouse's story'
