-#!/usr/bin/env python
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# (C) Copyright 2010 Mikael Frykholm <mikael@frykholm.com>
# 0.2 added python 2.4 urlparse compatibility
# 0.1 initial release
-from BeautifulSoup import BeautifulSoup
+from bs4 import BeautifulSoup
from subprocess import *
import re
-import json
from Crypto.Cipher import AES
import struct
import argparse
-import feedparser
-try:
- import urlparse
-except ImportError:
- pass
-import urllib2
-try:
- import urllib2.urlparse as urlparse
-except ImportError:
- pass
+import requests
import sys, os
-def main(url, title):
+def scrape_player_page(url, title):
"""
Try to scrape the site for video and download.
"""
if not url.startswith('http'):
url = "http://www.svtplay.se" + url
video = {}
- page = urllib2.urlopen(url).read()
- soup = BeautifulSoup(page,convertEntities=BeautifulSoup.HTML_ENTITIES)
+ soup = BeautifulSoup(requests.get(url).text)
video_player = soup.body('a',{'data-json-href':True})[0]
- flashvars = json.loads(urllib2.urlopen("http://www.svtplay.se/%s"%video_player.attrMap['data-json-href']+"?output=json").read())
+ if video_player.attrs['data-json-href'].startswith("/wd"):
+ flashvars = requests.get("http://www.svt.se/%s"%video_player.attrs['data-json-href']).json()
+ else:
+ flashvars = requests.get("http://www.svtplay.se/%s"%video_player.attrs['data-json-href']+"?output=json").json()
+ video['duration'] = video_player.attrs.get('data-length',0)
video['title'] = title
if not title:
- video['title'] = soup.find('meta',{'property':'og:title'}).attrMap['content'].replace('|','_').replace('/','_')
+ video['title'] = soup.find('meta',{'property':'og:title'}).attrs['content'].replace('|','_').replace('/','_')
if 'dynamicStreams' in flashvars:
video['url'] = flashvars['dynamicStreams'][0].split('url:')[1].split('.mp4,')[0] +'.mp4'
filename = video['title']+".mp4"
- print Popen(["rtmpdump",u"-o"+filename,"-r", url], stdout=PIPE).communicate()[0]
+ print(Popen(["rtmpdump","-o"+filename,"-r", url], stdout=PIPE).communicate()[0])
if 'pathflv' in flashvars:
rtmp = flashvars['pathflv'][0]
filename = video['title']+".flv"
- print Popen(["mplayer","-dumpstream","-dumpfile",filename, rtmp], stdout=PIPE).communicate()[0]
+ print(Popen(["mplayer","-dumpstream","-dumpfile",filename, rtmp], stdout=PIPE).communicate()[0])
if 'video' in flashvars:
for reference in flashvars['video']['videoReferences']:
if reference['url'].endswith("m3u8"):
video['category'] = flashvars['statistics']['category']
download_from_playlist(video)
else:
- print "Could not find any streams"
+ print("Could not find any streams")
return
return video
def download_from_playlist(video):
- playlist = parse_playlist(urllib2.urlopen(video['url']).read())
+ playlist = parse_playlist(requests.get(video['url']).text)
+ if not playlist:
+ return
videourl = sorted(playlist, key=lambda k: int(k['BANDWIDTH']))[-1]['url']
- segments, metadata = parse_segment_playlist(urllib2.urlopen(videourl).read())
+ segments, metadata = parse_segment_playlist(requests.get(videourl).text)
if "EXT-X-KEY" in metadata:
- key = urllib2.urlopen(metadata["EXT-X-KEY"]['URI'].strip('"')).read()
+ key = requests.get(metadata["EXT-X-KEY"]['URI'].strip('"')).text
decrypt=True
else:
decrypt=False
- with open("%s"%video['filename'],"w") as ofile:
+ with open("%s"%video['filename'],"wb") as ofile:
segment=0
size = 0
for url in segments:
- ufile = urllib2.urlopen(url)
- print "\r{} MB".format(size/1024/1024),
+ ufile = requests.get(url, stream=True).raw
+ print("\r{} MB".format(size/1024/1024))
sys.stdout.flush()
if decrypt:
iv=struct.pack("IIII",segment,0,0,0)
decryptor = AES.new(key, AES.MODE_CBC, iv)
while(True):
- buf = ufile.read(1024)
- if buf:
- if decrypt:
- buf = decryptor.decrypt(buf)
- ofile.write(buf)
- size += len(buf)
- else:
- ufile.close()
+ buf = ufile.read(4096)
+ if not buf:
break
+ if decrypt:
+ buf = decryptor.decrypt(buf)
+ ofile.write(buf)
+ size += len(buf)
segment += 1
def parse_playlist(playlist):
if not playlist.startswith("#EXTM3U"):
- print playlist
+ print(playlist)
return False
playlist = playlist.splitlines()[1:]
items=[]
row = row.split(':',1)[1] #skip first part
parts = PATTERN.split(row)[1:-1] #do magic re split and keep quotes
metadata["EXT-X-KEY"] = dict([part.split('=',1) for part in parts if '=' in part]) #throw away the commas and make dict of the pairs
- return(segments, metadata)
+ return(segments, metadata)
+
def parse_videolist():
- page = urllib2.urlopen("http://www.svtplay.se/ajax/videos?antal=100").read()
- soup = BeautifulSoup(page,convertEntities=BeautifulSoup.HTML_ENTITIES)
- videos = []
- for article in soup.findAll('article'):
- meta = dict(article.attrs)
- video = {}
- video['title'] = meta['data-title']
- video['description'] = meta['data-description']
- video['url'] = dict(article.find('a').attrs)['href']
- videos.append(video)
- return videos
+ page_num = 1
+ soup = BeautifulSoup(requests.get("http://www.svtplay.se/ajax/videospager").text)#this call does not work for getting the pages, we use it for the page totals only
+ page_tot = int(soup.find('a',{'data-currentpage':True}).attrs['data-lastpage'])
+ videos_per_page = 8
+ video_num = 0
+ while(page_num <= page_tot):
+ base_url = "http://www.svtplay.se/ajax/videos?sida={}".format(page_num)
+ soup = BeautifulSoup(requests.get(base_url).text)
+ for article in soup.findAll('article'):
+ meta = dict(article.attrs)
+ video = {}
+ video['title'] = meta['data-title']
+ video['description'] = meta['data-description']
+ video['url'] = dict(article.find('a').attrs)['href']
+ video['thumb-url'] = dict(article.find('img',{}).attrs)['src']
+ video['num'] = video_num
+ video['total'] = page_tot * videos_per_page
+ video_num += 1
+ yield video
+ page_num += 1
+
if __name__ == "__main__":
parser = argparse.ArgumentParser()
group.add_argument("-r", "--rss", help="Download all files in rss")
group.add_argument("-u", "--url", help="Download video in url")
group.add_argument("-m", "--mirror", help="Mirror all files", action="store_true")
+ parser.add_argument("-n", "--no_act", help="Just print what would be done, don't do any downloading.", action="store_true")
args = parser.parse_args()
if args.rss:
- d = feedparser.parse(args.url)
+ import feedparser
+ d = feedparser.parse(args.rss)
for e in d.entries:
- print("Downloading: %s"%e.title)
- filename = main(e.link, e.title)
- print Popen(["avconv","-i",filename,"-vcodec","copy","-acodec","copy", filename+'.mkv'], stdout=PIPE).communicate()[0]
+ print(("Downloading: %s"%e.title))
+ if args.no_act:
+ continue
+ filename = scrape_player_page(e.link, e.title)
+ print(Popen(["avconv","-i",filename,"-vcodec","copy","-acodec","copy", filename+'.mkv'], stdout=PIPE).communicate()[0])
#print(e.description)
if args.mirror:
for video in parse_videolist():
video['title'] = video['title'].replace('/','_')
- print video['title']+'.mkv',
+ print(video['title']+'.mkv')
+ print("{} of {}".format(video['num'], video['total']))
if os.path.exists(video['title']+'.mkv'):
- print "Skipping"
+ print("Skipping")
continue
print("Downloading...")
- ret = main(video['url'], video['title'])
-
- print Popen(["avconv","-i",video['title']+'.ts',"-vcodec","copy","-acodec","copy", video['title']+'.mkv'], stdout=PIPE).communicate()[0]
+ if args.no_act:
+ continue
+ ret = scrape_player_page(video['url'], video['title'])
+ print(ret)
+ print(Popen(["avconv","-i",video['title']+'.ts',"-vcodec","copy","-acodec","copy", video['title']+'.mkv'], stdout=PIPE).communicate()[0])
try:
os.unlink(video['title']+'.ts')
except:
import pdb;pdb.set_trace()
else:
- video = main(args.url, None)
- print("Downloaded {}".format(video['title']))
\ No newline at end of file
+ if not args.no_act:
+ video = scrape_player_page(args.url, None)
+ print(("Downloaded {}".format(args.url)))