jb51上面的資源還比較全,就準備用python來實現自動采集信息,與下載啦。
Python具有豐富和強大的庫,使用urllib,re等就可以輕松開發出一個網絡信息采集器!
下面,是我寫的一個實例腳本,用來采集某技術網站的特定欄目的所有電子書資源,并下載到本地保存!
軟件運行截圖如下:
在腳本運行時期,不但會打印出信息到shell窗口,還會保存日志到txt文件,記錄采集到的頁面地址,書籍的名稱,大小,服務器本地下載地址以及百度網盤的下載地址!
實例采集并下載武林站長站的python欄目電子書資源:
# -*- coding:utf-8 -*-import reimport urllib2import urllibimport sysimport osreload(sys)sys.setdefaultencoding('utf-8')def getHtml(url): request = urllib2.Request(url) page = urllib2.urlopen(request) htmlcontent = page.read() #解決中文亂碼問題 htmlcontent = htmlcontent.decode('gbk', 'ignore').encode("utf8",'ignore') return htmlcontentdef report(count, blockSize, totalSize): percent = int(count*blockSize*100/totalSize) sys.stdout.write("r%d%%" % percent + ' complete') sys.stdout.flush()def getBookInfo(url): htmlcontent = getHtml(url); #print "htmlcontent=",htmlcontent; # you should see the ouput html #<h1 class="h1user">crifan</h1> regex_title = '<h1s+?itemprop="name">(?P<title>.+?)</h1>'; title = re.search(regex_title, htmlcontent); if(title): title = title.group("title"); print "書籍名字:",title; file_object.write('書籍名字:'+title+'r'); #<li>書籍大小:<span itemprop="fileSize">27.2MB</span></li> filesize = re.search('<spans+?itemprop="fileSize">(?P<filesize>.+?)</span>', htmlcontent); if(filesize): filesize = filesize.group("filesize"); print "文件大小:",filesize; file_object.write('文件大小:'+filesize+'r'); #<div class="picthumb"><a target="_blank" bookimg = re.search('<divs+?class="picthumb"><a href="(?P<bookimg>.+?)" rel="external nofollow" target="_blank"', htmlcontent); if(bookimg): bookimg = bookimg.group("bookimg"); print "封面圖片:",bookimg; file_object.write('封面圖片:'+bookimg+'r'); #<li><a target="_blank">酷云中國電信下載</a></li> downurl1 = re.search('<li><a href="(?P<downurl1>.+?)" rel="external nofollow" target="_blank">酷云中國電信下載</a></li>', htmlcontent); if(downurl1): downurl1 = downurl1.group("downurl1"); print "下載地址1:",downurl1; file_object.write('下載地址1:'+downurl1+'r'); sys.stdout.write('rFetching ' + title + '...n') title = title.replace(' ', ''); title = title.replace('/', ''); saveFile = '/Users/superl/Desktop/pythonbook/'+title+'.rar'; if os.path.exists(saveFile): print "該文件已經下載了!"; else: urllib.urlretrieve(downurl1, saveFile, reporthook=report); sys.stdout.write("rDownload complete, saved as %s" % (saveFile) + 'nn') sys.stdout.flush() file_object.write('文件下載成功!r'); else: print "下載地址1不存在"; file_error.write(url+'r'); file_error.write(title+"下載地址1不存在!文件沒有自動下載!r"); file_error.write('r'); #<li><a rel="external nofollow" target="_blank">百度網盤下載2</a></li> downurl2 = re.search('</a></li><li><a href="(?P<downurl2>.+?)" rel="external nofollow" target="_blank">百度網盤下載2</a></li>', htmlcontent); if(downurl2): downurl2 = downurl2.group("downurl2"); print "下載地址2:",downurl2; file_object.write('下載地址2:'+downurl2+'r'); else: #file_error.write(url+'r'); print "下載地址2不存在"; file_error.write(title+"下載地址2不存在r"); file_error.write('r'); file_object.write('r'); print "n";def getBooksUrl(url): htmlcontent = getHtml(url); #<ul class="cur-cat-list"><a href="/books/438381.html" rel="external nofollow" class="tit"</ul></div><!--end #content --> urls = re.findall('<a href="(?P<urls>.+?)" rel="external nofollow" class="tit"', htmlcontent); for url in urls: url = "http://www.jb51.net"+url; print url+"n"; file_object.write(url+'r'); getBookInfo(url) #print "url->", urlif __name__=="__main__": file_object = open('/Users/superl/Desktop/python.txt','w+'); file_error = open('/Users/superl/Desktop/pythonerror.txt','w+'); pagenum = 3; for pagevalue in range(1,pagenum+1): listurl = "http://www.jb51.net/ books/list476_%d.html"%pagevalue; print listurl; file_object.write(listurl+'r'); getBooksUrl(listurl); file_object.close(); file_error.close();
新聞熱點
疑難解答