X-Git-Url: https://git.frykholm.com/svtplaydump.git/blobdiff_plain/84f7ef7d7584cc66c65abaa6c40f002dbd3aad47..d26e691949687bb63dd9177fc5e32a7b2816cff6:/svtplaydump.py diff --git a/svtplaydump.py b/svtplaydump.py index 5348f61..39bc9ff 100755 --- a/svtplaydump.py +++ b/svtplaydump.py @@ -22,7 +22,7 @@ # 0.2 added python 2.4 urlparse compatibility # 0.1 initial release -from bs4 import BeautifulSoup +from bs4 import BeautifulSoup, Doctype from subprocess import * import re from Crypto.Cipher import AES @@ -31,23 +31,42 @@ import argparse import requests import sys, os -def scrape_player_page(url, title): +class Video(dict): + def __init__(self, *args, **kwargs): + self.update(dict(*args, **kwargs)) # use the free update to set keys + + def __setattr__(self, name, value): + return self.__setitem__(name,value) + + def __getattr__(self, name): + return self.__getitem__(name) + + def is_downloaded(self): + raise("NotImplemented") + +def scrape_player_page(video): """ Try to scrape the site for video and download. """ - if not url.startswith('http'): - url = "http://www.svtplay.se" + url - video = {} - soup = BeautifulSoup(requests.get(url).text) + if not video['url'].startswith('http'): + video['url'] = "http://www.svtplay.se" + video['url'] + soup = BeautifulSoup(requests.get(video['url']).text) video_player = soup.body('a',{'data-json-href':True})[0] - if video_player.attrs['data-json-href'].startswith("/wd"): - flashvars = requests.get("http://www.svt.se/%s"%video_player.attrs['data-json-href']).json() + if 'oppetarkiv.se' in video['url']: + flashvars = requests.get("http://www.oppetarkiv.se/%s"%video_player.attrs['data-json-href']+"?output=json").json() else: - flashvars = requests.get("http://www.svtplay.se/%s"%video_player.attrs['data-json-href']+"?output=json").json() + if video_player.attrs['data-json-href'].startswith("/wd"): + flashvars = requests.get("http://www.svt.se/%s"%video_player.attrs['data-json-href']).json() + else: + flashvars = requests.get("http://www.svtplay.se/%s"%video_player.attrs['data-json-href']+"?output=json").json() video['duration'] = video_player.attrs.get('data-length',0) - video['title'] = title - if not title: + if not video['title']: video['title'] = soup.find('meta',{'property':'og:title'}).attrs['content'].replace('|','_').replace('/','_') + if not 'genre' in video: + if soup.find(text='Kategori:'): + video['genre'] = soup.find(text='Kategori:').parent.parent.a.text + else: + video['genre'] = 'Ingen Genre' if 'dynamicStreams' in flashvars: video['url'] = flashvars['dynamicStreams'][0].split('url:')[1].split('.mp4,')[0] +'.mp4' filename = video['title']+".mp4" @@ -58,15 +77,15 @@ def scrape_player_page(url, title): print(Popen(["mplayer","-dumpstream","-dumpfile",filename, rtmp], stdout=PIPE).communicate()[0]) if 'video' in flashvars: for reference in flashvars['video']['videoReferences']: - if reference['url'].endswith("m3u8"): + if 'm3u8' in reference['url']: video['url']=reference['url'] video['filename'] = video['title']+'.ts' if 'statistics' in flashvars: video['category'] = flashvars['statistics']['category'] download_from_playlist(video) - else: + if not 'url' in video: print("Could not find any streams") - return + return False return video def download_from_playlist(video): @@ -74,7 +93,9 @@ def download_from_playlist(video): if not playlist: return videourl = sorted(playlist, key=lambda k: int(k['BANDWIDTH']))[-1]['url'] - segments, metadata = parse_segment_playlist(requests.get(videourl).text) + if not videourl.startswith('http'): #if relative path + videourl = "{}/{}".format(os.path.dirname(video['url']), videourl) + segments, metadata = parse_segment_playlist(videourl) if "EXT-X-KEY" in metadata: key = requests.get(metadata["EXT-X-KEY"]['URI'].strip('"')).text decrypt=True @@ -85,7 +106,7 @@ def download_from_playlist(video): size = 0 for url in segments: ufile = requests.get(url, stream=True).raw - print("\r{} MB".format(size/1024/1024)) + print("\r{0:.2f} MB".format(size/1024/1024)) sys.stdout.flush() if decrypt: iv=struct.pack("IIII",segment,0,0,0) @@ -100,15 +121,21 @@ def download_from_playlist(video): size += len(buf) segment += 1 + if 'thumb-url' in video: + video['thumb'] = requests.get(video['thumb-url'],stream=True).raw + def parse_playlist(playlist): if not playlist.startswith("#EXTM3U"): print(playlist) return False - playlist = playlist.splitlines()[1:] + playlist = playlist.splitlines() + while not 'EXT-X-STREAM-INF' in playlist[0]: + playlist = playlist[1:] items=[] for (metadata_string,url) in zip(playlist[0::2], playlist[1::2]): - md = dict() - assert 'EXT-X-STREAM-INF' in metadata_string.split(':')[0] + md = Video() + if not 'EXT-X-STREAM-INF' in metadata_string.split(':')[0]: + continue for item in metadata_string.split(':')[1].split(','): if '=' in item: md.update([item.split('='),]) @@ -116,7 +143,8 @@ def parse_playlist(playlist): items.append(md) return items -def parse_segment_playlist(playlist): +def parse_segment_playlist(playlisturl): + playlist = requests.get(playlisturl).text assert playlist.startswith("#EXTM3U") PATTERN = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''') segments = [] @@ -124,6 +152,8 @@ def parse_segment_playlist(playlist): metadata = {} for row in playlist.splitlines(): if next_is_url: + if not row.startswith('http'): #if relative path + row = "{}/{}".format(os.path.dirname(playlisturl), row) segments.append(row) next_is_url=False continue @@ -146,7 +176,7 @@ def parse_videolist(): soup = BeautifulSoup(requests.get(base_url).text) for article in soup.findAll('article'): meta = dict(article.attrs) - video = {} + video = Video() video['title'] = meta['data-title'] video['description'] = meta['data-description'] video['url'] = dict(article.find('a').attrs)['href'] @@ -157,6 +187,57 @@ def parse_videolist(): yield video page_num += 1 +def remux(video, xml=None): + basename = video['filename'].split('.ts')[0] + if 'genre' in video: + if not os.path.exists(video['genre']): + os.mkdir(video['genre']) + video['path'] = os.path.join(video['genre'],basename+'.mkv') + command = ["mkvmerge","-o",video['path'], '--title',video['title']] + + if xml: + with open(basename+'.xml','w') as f: + f.write(xml) + command.extend(['--global-tags',basename+'.xml']) + if 'thumb' in video: + with open('thumbnail.jpg','wb') as f: #FIXME use title instead for many downloaders + f.write(video['thumb'].read()) + command.extend(['--attachment-description', "Thumbnail", + '--attachment-mime-type', 'image/jpeg', + '--attach-file', 'thumbnail.jpg']) + command.append(video['filename']) + print(Popen(command, stdout=PIPE).communicate()[0]) + for fname in (video['filename'], basename+'.xml','thumbnail.jpg'): + try: + os.unlink(fname) + except: + pass + +def mkv_metadata(video): + root = BeautifulSoup(features='xml') + root.append(Doctype('Tags SYSTEM "matroskatags.dtd"')) + tags = root.new_tag("Tags") + tag = root.new_tag("Tag") + tags.append(tag) + root.append(tags) + keep = ('title','description', 'url','genre') + targets = root.new_tag("Targets") + ttv = root.new_tag("TargetTypeValue") + ttv.string = str(50) + targets.append(ttv) + tag.append(targets) + for key in video: + if not key in keep: + continue + simple = root.new_tag('Simple') + name = root.new_tag('Name') + name.string=key.upper() + simple.append(name) + sstring = root.new_tag('String') + sstring.string=video[key] + simple.append(sstring) + tag.append(simple) + return str(root) if __name__ == "__main__": parser = argparse.ArgumentParser() @@ -165,6 +246,8 @@ if __name__ == "__main__": group.add_argument("-u", "--url", help="Download video in url") group.add_argument("-m", "--mirror", help="Mirror all files", action="store_true") parser.add_argument("-n", "--no_act", help="Just print what would be done, don't do any downloading.", action="store_true") + parser.add_argument("--no_remux", help="Don't remux into mkv", action="store_true") + args = parser.parse_args() if args.rss: import feedparser @@ -173,28 +256,35 @@ if __name__ == "__main__": print(("Downloading: %s"%e.title)) if args.no_act: continue - filename = scrape_player_page(e.link, e.title) - print(Popen(["avconv","-i",filename,"-vcodec","copy","-acodec","copy", filename+'.mkv'], stdout=PIPE).communicate()[0]) + video = scrape_player_page({'title':e.title,'url':e.link}) + if args.no_remux: + continue + self.remux(video) #print(e.description) if args.mirror: + if not os.path.exists('.seen'): + os.mkdir('.seen') for video in parse_videolist(): video['title'] = video['title'].replace('/','_') print(video['title']+'.mkv') print("{} of {}".format(video['num'], video['total'])) - if os.path.exists(video['title']+'.mkv'): + + if os.path.exists(os.path.join('.seen',video['title'])): print("Skipping") continue print("Downloading...") if args.no_act: continue - ret = scrape_player_page(video['url'], video['title']) - print(ret) - print(Popen(["avconv","-i",video['title']+'.ts',"-vcodec","copy","-acodec","copy", video['title']+'.mkv'], stdout=PIPE).communicate()[0]) - try: - os.unlink(video['title']+'.ts') - except: - import pdb;pdb.set_trace() + open(os.path.join('.seen',video['title']),'w').close() #touch + video = scrape_player_page(video) + if args.no_remux: + continue + xml = mkv_metadata(video) + remux(video, xml) + else: if not args.no_act: - video = scrape_player_page(args.url, None) + video = scrape_player_page({'url':args.url}) + if not args.no_remux: + remux({'title':e.title}) print(("Downloaded {}".format(args.url)))