Python 爬取所有51VOA网站的Learn a words文本及mp3音频

#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Python 爬取所有51VOA网站的Learn a words文本及mp3音频
import os
import sys
import time
import urllib as req
from threading import Thread
import urllib2
import urllib
from threading import Thread
import xml
import re
class MyWorkThread(Thread, urllib.FancyURLopener):
"""
Multi-thread downloading class.
run() is a vitual method of Thread
"""
def __init__(self, threadname, url, filename, ranges = 0):
Thread.__init__(self, name = threadname)
urllib.FancyURLopener.__init__(self)
self.name = threadname
self.url = url
self.filename = filename
self.ranges = ranges
self.downloaded = 0
def run(self):
"""
virtual function in Thread
"""
try:
self.downloaded = os.path.getsize(self.filename)
except OSError:
self.downloaded = 0
#rebuild start point
self.startpoint = self.ranges[0] + self.downloaded #if this part is completed
if self.startpoint >= self.ranges[1]:
print 'Part %s has been downloaded over.' % self.filename
return
self.oneTimeSize = 8 * 1024 #8K bytes / time
print 'task %s will download from %d to %d' %(self.name, self.startpoint, self.ranges[1])
self.addheader('Range', 'bytes=%d-%d' %(self.startpoint, self.ranges[1]))
self.urlhandle = self.open(self.url)
data = self.urlhandle.read(self.oneTimeSize)
while data:
filehandle = open(self.filename, 'ab+')
filehandle.write(data)
filehandle.close()
self.downloaded += len(data)
data = self.urlhandle.read(self.oneTimeSize)
def GetUrlFileSize(url):
urlHandler = urllib.urlopen(url)
headers = urlHandler.info().headers
length = 0
for header in headers:
if header.find('Length') != -1:
length = header.split(':')[-1].strip()
length = int(length)
return length
def SpliteBlocks(totalsize, blocknumber):
blocksize = totalsize / blocknumber
ranges = []
for i in range(0, blocknumber -1):
ranges.append((i * blocksize, i * blocksize + blocksize -1))
ranges.append((blocksize * (blocknumber -1), totalsize -1))
return ranges
def isLive(tasks):
for task in tasks:
if task.isAlive():
return True
return False
def downLoadFile(url, output, blocks = 6):
sys.stdout.write('Begin to download from %s\n' %url )
sys.stdout.flush()
size = GetUrlFileSize(url)
ranges = SpliteBlocks(size, blocks) threadname = ["thread_%d" %i for i in range(0, blocks)]
filename = ["tmpfile_%d" %i for i in range(0, blocks)]
tasks = []
for i in range(0, blocks):
task = MyWorkThread(threadname[i], url, filename[i], ranges[i])
task.setDaemon(True)
task.start()
tasks.append(task)
time.sleep(2)
while isLive(tasks):
downloaded = sum([task.downloaded for task in tasks])
process = downloaded / float(size) * 100
show = u'\rFilesize: %d Downloaded:%d Completed: %.2f%%' %(size, downloaded, process)
sys.stdout.write(show)
sys.stdout.flush
time.sleep(1) output = formatFileName(output)
filehandle = open(output, 'wb+')
for i in filename:
f = open(i, 'rb')
filehandle.write(f.read())
f.close()
os.remove(i)
filehandle.close()
sys.stdout.write("Completed!\n")
sys.stdout.flush()
def formatFileName(filename):
if isinstance(filename, str):
header, tail = os.path.split(filename)
if tail != '':
tuple = ('\\','/',':','*', '?', '"', '<', '>', '|')
for char in tuple:
if tail.find(char) != -1:
tail = tail.replace(char, ' ')
filename = os.path.join(header, tail)
#print filename
return filename
else:
return 'None' def remove_tags(raw_html):
cleanr =re.compile('<.*?>')
cleantext = re.sub(cleanr,'', raw_html)
return cleantext def saveword(url,name):
res=req.urlopen(url)
data=res.readlines()
res.close()
startag=r'id="mp3"'
endtag=r'</div>'
k=80
data2=''
data3=''
data4=''
while k<len(data)-10:
if(data[k].find(startag)!=-1):
data2=data[k]
if(data[k].find('<div id="content">')!=-1):
data3=data[k]
if(data[k+1].find('<p>')!=-1):
data4=data[k+1]
# if(data4.rfind('...')!=-1):
# endid = data4.find('...')+3
# else:
# endid = data4.find('</p>')
# data4 = data4[3:endid]
data4=remove_tags(data4)
k=k+1
# print data2
## data=str(data)
## data2=data[(data.find(startag)+14):data.lower().find(endtag)+3]
## data3=data[105]
# print data3
mp3url=data2[data2.find('http'):data2.find(''' title="''')-1]
if(data3.find(endtag)!=-1):
sent = data3[data3.find('今天我们要学'):data3.find(endtag)]
else:
sent = data3[data3.find('今天我们要学'):].strip('\n').strip('\r')+data4.strip('\n')
# sent = sent.replace('\n','. ')
# print mp3url,sent
f=open('LearningWord.txt','a+')
sent=remove_tags(sent)
f.write(name+'\n'+sent.strip('\r')+'\n')
f.close()
# print str(name)+'.mp3'
if(data2.find(startag)!=-1):
downLoadFile(mp3url,str(formatFileName(name.replace(':', ' ')))+'.mp3', blocks = 4) def savepage(url):
res=req.urlopen(url)
data=res.read()
res.close()
startag='''<ul><li>'''
endtag='''</li></ul>'''
data=str(data)
data2=data[data.find(startag)+12:data.find(endtag)]
linestart='href'
meddle = '''" target'''
lineend = '</a>'
urls=[]
words = []
i=data2.find(linestart)
while(i!=-1):
k = data2.find(meddle)
j = data2.find(lineend)
url = 'http://www.51voa.com/'+data2[i+6:k]
urls = urls+[url]
word = data2[k+16:j]
print i,k,j, word,url
words = words + [word]
data2=data2[j+3:]
saveword(url,word)
i=data2.find(linestart)
# break #下载所有单词
f=open('LearningWord.txt','w')
f.close()
i=53
while i<=54:
url = 'http://www.51voa.com/Learn_A_Word_'+str(i)+'.html'
savepage(url)
i=i+1 #下载指定单词
#url = "http://www.51voa.com/Voa_English_Learning/Learn_A_Word_21951.html"
#name ='9:pop up'
#saveword(url,name)

下载单词文本示例:(全部单词文本下载地址:http://pan.baidu.com/s/1o8pmojS)

2650 endorse
今天我们要学的词是 endorse. Endorse 作为动词,有支持的意思。Senator Ted Cruz endorsed Donald Trump, but later said the decision was “agonizing.” 美国联邦参议员克鲁兹支持川普,但是后来又表示,他做出这一决定十分痛苦。The New York Times endorsed Hillary Clinton for president in a Saturday editorial, and dismissed Donald Trump as “the worst nominee put forward by a major party in modern American history.” 纽约时报在星期六的社论中支持希拉里.克林顿当总统,并批评说,川普是“美国现代史上主要政党推举出的最差劲的候选人”。好的,我们今天学习的词是 endorse, endorse, endorse...
2649 deportation
今天我们要学的词是 deportation. Deportation 名词,驱逐出境,递解出境。The Obama administration said it would fully resume deportations of undocumented Haitian immigrants. 奥巴马政府表示,将全面恢复对无证海地移民的遣返工作。China and Canada have reached a new border agreement that would speed up the deportation of Chinese nationals inadmissible in Canada. 中国和加拿大达成新的边境协议,加快遣返那些本不该被允许进入加拿大的中国公民。好的,我们今天学习的词是 deportation, deportation, deportation...
2648 voluntarily
今天我们要学的词是 voluntarily. Voluntarily 副词,自愿地。The International Organization for Migrants says that more people are voluntarily returning to their home countries. 国际移民组织说,越来越多的人开始自愿返回自己的祖国。A high-tech diagnostic company voluntarily withdrew its Zika virus blood test from FDA approval. 一家高科技诊断公司自愿撤回递交美国食品药物管理局的寨卡病毒血液检测批准申请。好的,我们今天学习的词是 voluntarily, voluntarily, voluntarily...
2647 guerrilla
今天我们要学的词是 guerrilla. Guerrilla 形容词,游击队的。The Columbian government signed a peace agreement on Monday with the Revolutionary Armed Forces of Columbia (FARC), a national guerrilla movement. 哥伦比亚政府星期一跟全国游击队运动“哥伦比亚革命武装力量”签署了和平协议。The agreement needs to be approved by an Oct. 2 referendum before roughly 7,000 guerrilla fighters start their transition to civilian life. 这项协议还需经过10月2号全民公决批准,大约七千名游击队员才会开始向平民生活过渡。好的,我们今天学习的词是 guerrilla, guerrilla, guerrilla...
2646 curfew
今天我们要学的词是 curfew. Curfew 名词,宵禁。The city of Charlotte in North Carolina has lifted its midnight curfew, but the state of emergency remains in effect. 北卡罗来纳州夏洛特市取消了午夜宵禁,但是紧急状态依旧生效。Authorities in an Austrian city imposed a curfew on young immigrants following a series of sexual attacks at a local beer and wine festival. 奥地利一个城市的有关当局对未成年移民实施宵禁,此前当地一个啤酒葡萄酒节期间发生了一系列性侵事件。 好的,我们今天学习的词是 curfew, curfew, curfew...
2645 estimate
今天我们要学的词是 estimate. Estimate 动词,估计。A recent study estimates that the Indonesian forest fires that created a smoky haze last year may have caused more than 100,000 premature deaths. 一项最新研究估计,去年印尼山火引发的雾霾可能造成了10万人过早死亡。A new survey estimates that Americans own 265 million guns, but half of these guns are in the hands of only 3% of Americans. 最新调查估计,美国人拥有枪支总数2.65亿支,但其中半数都集中在3%的人手中。好的,我们今天学习的词是 estimate, estimate, estimate...
2644 mercy killing
今天我们要学的词是 mercy killing. Mercy killing 名词,安乐死。A terminally ill 17-year-old has become the first minor to be euthanized in Belgium since the age restrictions on such mercy killings were lifted in 2014. 比利时一个17岁绝症男孩安乐死,他是比利时2014年取消对安乐死年龄限制以来第一个安乐死的未成年人。The United Arab Emirates passed a new law banning all mercy killings. 阿联酋通过新法律,禁止安乐死。好的,我们今天学习的词是 mercy killing, mercy killing, mercy killing...

  

Python 爬取所有51VOA网站的Learn a words文本及mp3音频的更多相关文章

  1. [Python]爬取 游民星空网站 每周精选壁纸(1080高清壁纸) 网络爬虫

    一.检查 首先进入该网站的https://www.gamersky.com/robots.txt页面 给出提示: 弹出错误页面 注: 网络爬虫:自动或人工识别robots.txt,再进行内容爬取 约束 ...

  2. python爬取中国天气网站数据并对其进行数据可视化

    网址:http://www.weather.com.cn/textFC/hb.shtml 解析:BeautifulSoup4 爬取所有城市的最低天气   对爬取的数据进行可视化处理 按温度对城市进行排 ...

  3. python爬取网站数据

    开学前接了一个任务,内容是从网上爬取特定属性的数据.正好之前学了python,练练手. 编码问题 因为涉及到中文,所以必然地涉及到了编码的问题,这一次借这个机会算是彻底搞清楚了. 问题要从文字的编码讲 ...

  4. Python开发爬虫之BeautifulSoup解析网页篇:爬取安居客网站上北京二手房数据

    目标:爬取安居客网站上前10页北京二手房的数据,包括二手房源的名称.价格.几室几厅.大小.建造年份.联系人.地址.标签等. 网址为:https://beijing.anjuke.com/sale/ B ...

  5. 利用Python爬取电影网站

    #!/usr/bin/env python #coding = utf-8 ''' 本爬虫是用来爬取6V电影网站上的电影资源的一个小脚本程序,爬取到的电影链接会通过网页的形式显示出来 ''' impo ...

  6. python爬取网站数据保存使用的方法

    这篇文章主要介绍了使用Python从网上爬取特定属性数据保存的方法,其中解决了编码问题和如何使用正则匹配数据的方法,详情看下文     编码问题因为涉及到中文,所以必然地涉及到了编码的问题,这一次借这 ...

  7. python爬取某个网站的图片并保存到本地

    python爬取某个网站的图片并保存到本地 #coding:utf- import urllib import re import sys reload(sys) sys.setdefaultenco ...

  8. Python轻松爬取Rosimm写真网站全部图片

    RosimmImage 爬取Rosimm写真网站图片 有图有真相 def main_start(url): """ 爬虫入口,主要爬取操作 ""&qu ...

  9. 使用python爬取MedSci上的期刊信息

    使用python爬取medsci上的期刊信息,通过设定条件,然后获取相应的期刊的的影响因子排名,期刊名称,英文全称和影响因子.主要过程如下: 首先,通过分析网站http://www.medsci.cn ...

随机推荐

  1. poj3294 出现次数大于n/2 的公共子串

    Life Forms Time Limit: 5000MS   Memory Limit: 65536K Total Submissions: 13063   Accepted: 3670 Descr ...

  2. 洛谷 P1330 封锁阳光大学 Label:染色问题

    题目描述 曹是一只爱刷街的老曹,暑假期间,他每天都欢快地在阳光大学的校园里刷街.河蟹看到欢快的曹,感到不爽.河蟹决定封锁阳光大学,不让曹刷街. 阳光大学的校园是一张由N个点构成的无向图,N个点之间由M ...

  3. 元素重叠及position定位的z-index顺序

    元素位置重叠的背景常识 (x)html文档中的元素默认处于普通流(normal flow)中,也就是说其顺序由元素在文档中的先后位置决定,此时一般不会产生重叠(但指定负边距可能产生重叠).当我们用cs ...

  4. css滤镜(转载)

    STYLE="filter:filtername(fparameter1, fparameter2...)" (Filtername为滤镜的名称,fparameter1.fpara ...

  5. jQuery Mobile 入门基础教程

    jQuery Mobile是jQuery在手机上和平板设备上的版本.jQuery Mobile 不仅会给主流移动平台带来jQuery核心库,而且会发布一个完整统一的jQuery移动UI框架. jQue ...

  6. python3文字转语音

    #安装库(必须先安装pywin32) pip3 install pyttsx3 简单测试 import pyttsx3 engine = pyttsx3.init() text='name' engi ...

  7. TIDB资料收集

    https://github.com/pingcap/docs-cn https://github.com/pingcap/docs-cn/blob/master/op-guide/binary-de ...

  8. docker数据卷(转)

    原文地址:http://www.cnblogs.com/zydev/p/5809616.html Docker-数据卷和数据容器卷 容器中管理数据主要有两种方式: 数据卷(Data Volumes) ...

  9. Net WebAPI2

    SwaggerUI ASP.Net WebAPI2   目前在用ASP.NET的 WebAPI2来做后台接口开发,在与前台做测试的时候,总是需要发送一个demo给他,但是这样很麻烦的,他还有可能记不住 ...

  10. C语言程序设计I—第四周教学

    第四周教学安排 第四周是本课程的第三次课,依然是我来讲解,学生听讲,也依然继续在寻找大班授课(100人).条件有限(民办学校教学经费投入不够)的情况下如何更好的组织教学. 教学内容 第二章 用C语言编 ...