X-Git-Url: https://git.frykholm.com/svtplaydump.git/blobdiff_plain/2d8521d8ac50afd39e46134e206942c916c8c9e0..8cf70eae2b3cda25f903d9e0de716e8024473709:/svtplaydump.py diff --git a/svtplaydump.py b/svtplaydump.py index 90251dd..8b4a3c2 100755 --- a/svtplaydump.py +++ b/svtplaydump.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!/usr/bin/env python3.4 # -*- coding: utf-8 -*- # # (C) Copyright 2010 Mikael Frykholm @@ -22,7 +22,7 @@ # 0.2 added python 2.4 urlparse compatibility # 0.1 initial release -from bs4 import BeautifulSoup +from bs4 import BeautifulSoup, Doctype from subprocess import * import re from Crypto.Cipher import AES @@ -30,17 +30,33 @@ import struct import argparse import requests import sys, os +import socket +import feedparser +from datetime import datetime, timezone +from pathlib import Path -def scrape_player_page(url, title): +class Video(dict): + def __init__(self, *args, **kwargs): + self.update(dict(*args, **kwargs)) # use the free update to set keys + + def __setattr__(self, name, value): + return self.__setitem__(name,value) + + def __getattr__(self, name): + return self.__getitem__(name) + + def is_downloaded(self): + raise("NotImplemented") + +def scrape_player_page(video): """ Try to scrape the site for video and download. """ - if not url.startswith('http'): - url = "http://www.svtplay.se" + url - video = {} - soup = BeautifulSoup(requests.get(url).text) + if not video['url'].startswith('http'): + video['url'] = "http://www.svtplay.se" + video['url'] + soup = BeautifulSoup(requests.get(video['url']).text) video_player = soup.body('a',{'data-json-href':True})[0] - if 'oppetarkiv.se' in url: + if 'oppetarkiv.se' in video['url']: flashvars = requests.get("http://www.oppetarkiv.se/%s"%video_player.attrs['data-json-href']+"?output=json").json() else: if video_player.attrs['data-json-href'].startswith("/wd"): @@ -48,32 +64,70 @@ def scrape_player_page(url, title): else: flashvars = requests.get("http://www.svtplay.se/%s"%video_player.attrs['data-json-href']+"?output=json").json() video['duration'] = video_player.attrs.get('data-length',0) - video['title'] = title - if not title: + if not 'title' in video: video['title'] = soup.find('meta',{'property':'og:title'}).attrs['content'].replace('|','_').replace('/','_') + if not 'genre' in video: + if soup.find(text='Kategori:'): + video['genre'] = soup.find(text='Kategori:').parent.parent.a.text + else: + video['genre'] = 'Ingen Genre' if 'dynamicStreams' in flashvars: video['url'] = flashvars['dynamicStreams'][0].split('url:')[1].split('.mp4,')[0] +'.mp4' - filename = video['title']+".mp4" + filename = Path(video['title']).with_suffix(".mp4") print(Popen(["rtmpdump","-o"+filename,"-r", url], stdout=PIPE).communicate()[0]) if 'pathflv' in flashvars: rtmp = flashvars['pathflv'][0] - filename = video['title']+".flv" + filename = Path(video['title']).with_suffix(".flv") print(Popen(["mplayer","-dumpstream","-dumpfile",filename, rtmp], stdout=PIPE).communicate()[0]) + if not 'timestamp' in video: + if soup.find_all(datetime=True): + xmldate_str = soup.find_all(datetime=True)[0].attrs['datetime'] + if xmldate_str: + video['timestamp'] = datetime(*feedparser._parse_date_w3dtf(xmldate_str)[:6]) #naive in utc + video['timestamp'] = video['timestamp'].replace(tzinfo=timezone.utc).astimezone(tz=None) #convert to local time if 'video' in flashvars: for reference in flashvars['video']['videoReferences']: if 'm3u8' in reference['url']: video['url']=reference['url'] - video['filename'] = video['title']+'.ts' + video['filename'] = Path(video['title']).with_suffix('.ts') if 'statistics' in flashvars: video['category'] = flashvars['statistics']['category'] - download_from_playlist(video) + if not download_from_playlist(video): + return False if not 'url' in video: print("Could not find any streams") return False return video def download_from_playlist(video): - playlist = parse_playlist(requests.get(video['url']).text) + params = requests.utils.urlparse(video['url']).query + print(params) + if 'cc1=' in params: #'cc1=name=Svenska~default=yes~forced=no~uri=http://media.svt.se/download/mcc/wp3/undertexter-wsrt/1134047/1134047-025A/C(sv)/index.m3u8~lang=sv' + video['subs'] = [dict([k.split('=') for k in params.split('cc1=')[1].split('~')])] #make a dict from the paramstring + try: + req = requests.get(video['url']).text + except: + print("Error reading, skipping file") + print(sys.exc_info()[1]) + return False + if 'subs' in video: + try: + segments = [item for item in requests.get(video['subs'][0]['uri']).text.split('\n') if 'vtt' in item] + except: + print("Error reading, skipping subtitle") + print(sys.exc_info()[1]) + segments = [] #ugly FIXME + video['subs'][0]['download'] = [] + for segment in segments: + if not segment.startswith('http'): + segment = "{}/{}".format(os.path.dirname(video['subs'][0]['uri']), segment) + try: + video['subs'][0]['download'].append(requests.get(segment).text) + except: + print("Error reading, skipping subtitle") + print(sys.exc_info()[1]) + break + playlist = parse_playlist(req) if not playlist: return videourl = sorted(playlist, key=lambda k: int(k['BANDWIDTH']))[-1]['url'] @@ -81,22 +135,42 @@ def download_from_playlist(video): videourl = "{}/{}".format(os.path.dirname(video['url']), videourl) segments, metadata = parse_segment_playlist(videourl) if "EXT-X-KEY" in metadata: - key = requests.get(metadata["EXT-X-KEY"]['URI'].strip('"')).text + try: + key = requests.get(metadata["EXT-X-KEY"]['URI'].strip('"')).text + except: + print("Error reading, skipping file") + print(sys.exc_info()[1]) + return False decrypt=True else: decrypt=False - with open("%s"%video['filename'],"wb") as ofile: + with video['filename'].open("wb") as ofile: segment=0 size = 0 for url in segments: - ufile = requests.get(url, stream=True).raw - print("\r{} MB".format(size/1024/1024)) + try: + ufile = requests.get(url, stream=True).raw + except: + print("Error reading, skipping file") + print(sys.exc_info()[1]) + return False + print("\r{0:.2f} MB".format(size/1024/1024), end="") sys.stdout.flush() if decrypt: iv=struct.pack("IIII",segment,0,0,0) - decryptor = AES.new(key, AES.MODE_CBC, iv) + try: + decryptor = AES.new(key, AES.MODE_CBC, iv) #ValueError: AES key must be either 16, 24, or 32 bytes long + except(ValueError) as e: + print("Error using decryption key. Skipping") + print(e) + return False while(True): - buf = ufile.read(4096) + try: + buf = ufile.read(4096) + except: + print("Error reading, skipping file") #FIXME mark file as failed + print(sys.exc_info()[1]) + return False if not buf: break if decrypt: @@ -105,6 +179,15 @@ def download_from_playlist(video): size += len(buf) segment += 1 + if 'thumb-url' in video: + try: + video['thumb'] = requests.get(video['thumb-url'],stream=True).raw + except: + print("Error reading thumbnail") #FIXME mark file as failed + print(sys.exc_info()[1]) + + return True + def parse_playlist(playlist): if not playlist.startswith("#EXTM3U"): print(playlist) @@ -114,7 +197,7 @@ def parse_playlist(playlist): playlist = playlist[1:] items=[] for (metadata_string,url) in zip(playlist[0::2], playlist[1::2]): - md = dict() + md = Video() if not 'EXT-X-STREAM-INF' in metadata_string.split(':')[0]: continue for item in metadata_string.split(':')[1].split(','): @@ -157,7 +240,7 @@ def parse_videolist(): soup = BeautifulSoup(requests.get(base_url).text) for article in soup.findAll('article'): meta = dict(article.attrs) - video = {} + video = Video() video['title'] = meta['data-title'] video['description'] = meta['data-description'] video['url'] = dict(article.find('a').attrs)['href'] @@ -168,13 +251,71 @@ def parse_videolist(): yield video page_num += 1 -def remux(video): - basename = video['filename'].split('.ts')[0] - print(Popen(["avconv","-i",video['filename'],"-vcodec","copy","-acodec","copy", basename+'.mkv'], stdout=PIPE).communicate()[0]) - try: - os.unlink(video['filename']) - except: - pass +def remux(video, xml=None): + if 'genre' in video: + if not os.path.exists(video['genre']): + os.mkdir(video['genre']) + video['path'] = Path(video['genre'] / video['filename']).with_suffix('.mkv') + else: + video['path'] = video['filename'].with_suffix('.mkv') + command = ["mkvmerge","-o",str(video['path']), '--title',video['title']] + + if xml: + with video['filename'].with_suffix('.xml').open('w') as f: + f.write(xml) + command.extend(['--global-tags',str(video['filename'].with_suffix('.xml'))]) + if 'thumb' in video: + with open('thumbnail.jpg','wb') as f: #FIXME use title instead for many downloaders + f.write(video['thumb'].read()) + command.extend(['--attachment-description', "Thumbnail", + '--attachment-mime-type', 'image/jpeg', + '--attach-file', 'thumbnail.jpg']) + # if 'subs' in video: + # for sub in video['subs']: + # if 'download' in sub: + # with open("{}.vtt".format(sub['lang']),'wb') as f: + # f.write(bytes("".join(sub['download']),'utf-8')) #FIXME + # command.extend(['--language 0:{} {}.vtt'.format(sub['lang'],sub['lang'])]) + + command.append(str(video['filename'])) + print(Popen(command, stdout=PIPE).communicate()[0]) + for fname in (video['filename'], video['filename'].with_suffix('.xml'),Path('thumbnail.jpg')): + try: + fname.unlink() + except: + pass + if 'timestamp' in video: + try: + os.utime(str(video['path']), times=(video['timestamp'].timestamp(),video['timestamp'].timestamp())) + except FileNotFoundError as e: + print(e) + + +def mkv_metadata(video): + root = BeautifulSoup(features='xml') + root.append(Doctype('Tags SYSTEM "matroskatags.dtd"')) + tags = root.new_tag("Tags") + tag = root.new_tag("Tag") + tags.append(tag) + root.append(tags) + keep = ('title','description', 'url','genre') + targets = root.new_tag("Targets") + ttv = root.new_tag("TargetTypeValue") + ttv.string = str(50) + targets.append(ttv) + tag.append(targets) + for key in video: + if not key in keep: + continue + simple = root.new_tag('Simple') + name = root.new_tag('Name') + name.string=key.upper() + simple.append(name) + sstring = root.new_tag('String') + sstring.string=video[key] + simple.append(sstring) + tag.append(simple) + return str(root) if __name__ == "__main__": parser = argparse.ArgumentParser() @@ -187,35 +328,46 @@ if __name__ == "__main__": args = parser.parse_args() if args.rss: - import feedparser d = feedparser.parse(args.rss) for e in d.entries: print(("Downloading: %s"%e.title)) if args.no_act: continue - filename = scrape_player_page(e.link, e.title) + video = scrape_player_page({'title':e.title,'url':e.link}) if args.no_remux: continue - self.remux({'title':e.title}) + self.remux(video) #print(e.description) if args.mirror: + if not os.path.exists('.seen'): + os.mkdir('.seen') for video in parse_videolist(): video['title'] = video['title'].replace('/','_') print(video['title']+'.mkv') print("{} of {}".format(video['num'], video['total'])) - if os.path.exists(video['title']+'.mkv'): + + if os.path.exists(os.path.join('.seen',video['title'])): print("Skipping") continue print("Downloading...") if args.no_act: continue - video = scrape_player_page(video['url'], video['title']) + open(os.path.join('.seen',video['title']),'w').close() #touch + ret = scrape_player_page(video) + if not ret: + if not os.path.exists('.failed'): + os.mkdir('.failed') + open(os.path.join('.failed',video['title']),'w').close() #touch + continue + video = ret if args.no_remux: continue - remux(video) + xml = mkv_metadata(video) + remux(video, xml) + else: if not args.no_act: - video = scrape_player_page(args.url, None) + video = scrape_player_page({'url':args.url}) if not args.no_remux: - remux({'title':e.title}) + remux(video) print(("Downloaded {}".format(args.url)))