需要转载的小伙伴转载后请注明转载的地址
需要用到的库
- from bs4 import BeautifulSoup
- import requests
- import time
365好书链接:http:///Book/Chapter/' self.names = [] # 存放章节名 self.hrefs = [] # 存放章节链接 def get_urlAndName(self): '''获取章节名和章节链接''' req = requests.get(url=self.url+'List.aspx?NovelId=0326') # 获取章节目录页面 time.sleep(1) # 等待1秒 div_bf = BeautifulSoup(req.text,"html.parser") # req后面跟text和html都行 div = div_bf.find_all('div', class_='user-catalog-ul-li') # 查找内容,标签为div,属性为class='user-catalog-ul-li' a_bf = BeautifulSoup(str(div)) a = a_bf.find_all('a') # # 查找内容,标签为a for i in a: self.names.append(i.find('span').string) # 获取内容直接string就行 self.hrefs.append(self.url + i.get('href')) # 获取链接 def get_text(self,url): '''获取章节内容''' req = requests.get(url=url) div_bf = BeautifulSoup(req.text,"html.parser") div = div_bf.find_all('div', class_='container user-reading-online pos-rel') # 查找内容 ps = BeautifulSoup(str(div), "html.parser") p = ps.find_all('p', class_='p-content') text = [] for each in p: text.append(each.string) print(text) return text # 将获得的内容返回 def writer(self, name, path, text): '''写入text文档中''' with open(path, 'a', encoding='utf-8') as f: f.write(name + '\n') f.writelines(text) f.write('\n\n')if __name__ == "__main__": # 运行入口 s = spiderstory() s.get_urlAndName() le = len(s.names) for i in range(le): # 利用for循环获得所有的内容 name = s.names[i] text = str(s.get_text(s.hrefs[i])) s.writer(name,"我以月夜寄相思.txt",text) print('下载完毕!!!')
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持。