python爬取廖雪峰教程存為PDF
首先感謝廖老師給我們大家提供的那麼好的教程,相信大部分童鞋都看過廖老師的python教程,我也是從這個教程入了門。後來又開始接著學JavaScript,不過每次都要用瀏覽器上網瀏覽太麻煩,所以就用爬蟲爬下來保存為PDF格式。不過缺點就是沒有目錄廢話不多說上代碼。
# coding=utf-8 nimport os nimport re nimport time nimport pdfkit nimport requests nfrom bs4 import BeautifulSoup nfrom PyPDF2 import PdfFileMerger,PdfFileReader, PdfFileWriternimport sysnn#test12nhtml_template = """ n<!DOCTYPE html> n<html lang="en"> n<head> n <meta charset="UTF-8"> n</head> n<body> n{content} n</body> n</html> nn""" npath_wk = rC:Program Fileswkhtmltopdfbinwkhtmltopdf.exe #安裝位置nconfig = pdfkit.configuration(wkhtmltopdf = path_wk)nn#----------------------------------------------------------------------ndef parse_url_to_html(url, name): n """ n 解析URL,返回HTML內容 n :param url:解析的url n :param name: 保存的html文件名 n :return: html n """ n try: n response = requests.get(url) n soup = BeautifulSoup(response.content, html.parser) n # 正文 n body = soup.find_all(class_="x-wiki-content")[0] n # 標題 n title = soup.find(h4).get_text() nn # 標題加入到正文的最前面,居中顯示 n center_tag = soup.new_tag("center") n title_tag = soup.new_tag(h1) n title_tag.string = title n center_tag.insert(1, title_tag) n body.insert(1, center_tag) n html = str(body) n # body中的img標籤的src相對路徑的改成絕對路徑 n pattern = "(<img .*?src=")(.*?)(")" nn def func(m): n if not m.group(3).startswith("http"): n rtn = m.group(1) + "http://www.liaoxuefeng.com" + m.group(2) + m.group(3) n return rtn n else: n return m.group(1)+m.group(2)+m.group(3) n html = re.compile(pattern).sub(func, html) n html = html_template.format(content=html) n html = html.encode("utf-8") n with open(name, wb) as f: n f.write(html) n return name nn except Exception as e:n print ("解析錯誤!")nn#----------------------------------------------------------------------ndef get_url_list(): n """ n 獲取所有URL目錄列表 n :return: n """ n response = requests.get("http://www.liaoxuefeng.com/wiki/0014316089557264a6b348958f449949df42a6d3a2e542c000") n soup = BeautifulSoup(response.content, "html.parser") n menu_tag = soup.find_all(class_="uk-nav uk-nav-side")[1] n urls = [] n for li in menu_tag.find_all("div"): n url = "http://www.liaoxuefeng.com" + li.a.get(href) n urls.append(url) n return urls nn#----------------------------------------------------------------------ndef save_pdf(htmls, file_name): n """ n 把所有html文件保存到pdf文件 n :param htmls: html文件列表 n :param file_name: pdf文件名 n :return: n """ n options = { n page-size: Letter, n margin-top: 0.75in, n margin-right: 0.75in, n margin-bottom: 0.75in, n margin-left: 0.75in, n encoding: "UTF-8", n custom-header: [ n (Accept-Encoding, gzip) n ], n cookie: [ n (cookie-name1, cookie-value1), n (cookie-name2, cookie-value2), n ], n outline-depth: 10, n } n pdfkit.from_file(htmls, file_name, options=options,configuration=config) nn#----------------------------------------------------------------------ndef main(): n start = time.time() n file_name = u"liaoxuefeng_Python3_tutorial" n urls = get_url_list() n for index, url in enumerate(urls):n parse_url_to_html(url, str(index) + ".html") n htmls =[] n pdfs =[]n print(len(urls))n for i in range(len(urls)):n htmls.append(str(i)+.html) n pdfs.append(file_name+str(i)+.pdf) nn save_pdf(str(i)+.html, file_name+str(i)+.pdf) nn print (u"轉換完成第"+str(i)+個html) nnn print(pdfs)n pdf_output = PdfFileWriter()n for pdf in pdfs:n pdf_input = PdfFileReader(open(pdf,rb)) n page_count = pdf_input.getNumPages()n print(page_count)n for i in range(page_count):n pdf_output.addPage(pdf_input.getPage(i))ttnn output = open(u"廖雪峰Python_all.pdf", "wb")n pdf_output.write(output)nn print (u"輸出PDF成功!") n n for html in htmls: n os.remove(html) n print (u"刪除臨時文件"+html) nn for pdf in pdfs: n os.remove(pdf) n print (u"刪除臨時文件"+pdf) nn total_time = time.time() - start n print(u"總共耗時:%f 秒" % total_time)nn#----------------------------------------------------------------------ndef changeDir(dir_name):n """n 目錄切換n """n if not os.path.exists(dir_name):n os.mkdir(dir_name)nn os.chdir(dir_name)n#----------------------------------------------------------------------nif __name__ == __main__:n #存放文件的路徑n dir_name = c:12 n changeDir(dir_name)n main() n
代碼很簡單,就是獲取所有博客左側導航欄對應的所有URL,然後將每個url解析出來保存成html,再將每個html保存成單個pdf文件,最後合併pdf文件。需要注意的是windwos 下需要安裝wkhtmltopdf.exe 這個軟體,並在python代碼里指明這個程序的路徑。不然合併時會報錯。
圖1,下載html保存成pdf
合併成pdf
內容:
最後歡迎各位同學找我交流討論。
最新更新:按照這個代碼目前無法抓取,因為廖老師把網站改成https了。對應代碼要做修改。
而且Requests 請求里需要加入User-agent模擬瀏覽器請求,就可以了。
推薦閱讀:
※17個新手常見Python運行時錯誤
※python的函數式編程機制
※python中list, array的轉換
※Python 文本解析器
※Python課件的中文版