2 # -*- coding: utf-8 -*-
4 # (C) Copyright 2010 Mikael Frykholm <mikael@frykholm.com>
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>
20 # 0.4 added mirror mode.
21 # 0.3 added apple streaming playlist parsing and decryption
22 # 0.2 added python 2.4 urlparse compatibility
25 from bs4
import BeautifulSoup
26 from subprocess
import *
28 from Crypto
.Cipher
import AES
34 def scrape_player_page(url
, title
):
36 Try to scrape the site for video and download.
38 if not url
.startswith('http'):
39 url
= "http://www.svtplay.se" + url
41 soup
= BeautifulSoup(requests
.get(url
).text
)
42 video_player
= soup
.body('a',{'data-json-href':True}
)[0]
43 if video_player
.attrs
['data-json-href'].startswith("/wd"):
44 flashvars
= requests
.get("http://www.svt.se/%s"%video_player
.attrs
['data-json-href']).json()
46 flashvars
= requests
.get("http://www.svtplay.se/%s"%video_player
.attrs
['data-json-href']+"?output=json").json()
47 video
['duration'] = video_player
.attrs
.get('data-length',0)
48 video
['title'] = title
50 video
['title'] = soup
.find('meta',{'property':'og:title'}
).attrs
['content'].replace('|','_').replace('/','_')
51 if 'dynamicStreams' in flashvars
:
52 video
['url'] = flashvars
['dynamicStreams'][0].split('url:')[1].split('.mp4,')[0] +'.mp4'
53 filename
= video
['title']+".mp4"
54 print(Popen(["rtmpdump","-o"+filename
,"-r", url
], stdout
=PIPE
).communicate()[0])
55 if 'pathflv' in flashvars
:
56 rtmp
= flashvars
['pathflv'][0]
57 filename
= video
['title']+".flv"
58 print(Popen(["mplayer","-dumpstream","-dumpfile",filename
, rtmp
], stdout
=PIPE
).communicate()[0])
59 if 'video' in flashvars
:
60 for reference
in flashvars
['video']['videoReferences']:
61 if reference
['url'].endswith("m3u8"):
62 video
['url']=reference
['url']
63 video
['filename'] = video
['title']+'.ts'
64 if 'statistics' in flashvars
:
65 video
['category'] = flashvars
['statistics']['category']
66 download_from_playlist(video
)
68 print("Could not find any streams")
72 def download_from_playlist(video
):
73 playlist
= parse_playlist(requests
.get(video
['url']).text
)
76 videourl
= sorted(playlist
, key
=lambda k
: int(k
['BANDWIDTH']))[-1]['url']
77 segments
, metadata
= parse_segment_playlist(requests
.get(videourl
).text
)
78 if "EXT-X-KEY" in metadata
:
79 key
= requests
.get(metadata
["EXT-X-KEY"]['URI'].strip('"')).text
83 with open("%s"%video
['filename'],"wb") as ofile
:
87 ufile
= requests
.get(url
, stream
=True).raw
88 print("\r{} MB".format(size
/1024/1024))
91 iv
=struct
.pack("IIII",segment
,0,0,0)
92 decryptor
= AES
.new(key
, AES
.MODE_CBC
, iv
)
94 buf
= ufile
.read(4096)
98 buf
= decryptor
.decrypt(buf
)
103 def parse_playlist(playlist
):
104 if not playlist
.startswith("#EXTM3U"):
107 playlist
= playlist
.splitlines()[1:]
109 for (metadata_string
,url
) in zip(playlist
[0::2], playlist
[1::2]):
111 assert 'EXT-X-STREAM-INF' in metadata_string
.split(':')[0]
112 for item
in metadata_string
.split(':')[1].split(','):
114 md
.update([item
.split('='),])
119 def parse_segment_playlist(playlist
):
120 assert playlist
.startswith("#EXTM3U")
121 PATTERN
= re
.compile(r
'''((?:[^,"']|"[^"]*"|'[^']*')+)''')
125 for row
in playlist
.splitlines():
132 if "EXT-X-KEY" in row
:
133 row
= row
.split(':',1)[1] #skip first part
134 parts
= PATTERN
.split(row
)[1:-1] #do magic re split and keep quotes
135 metadata
["EXT-X-KEY"] = dict([part
.split('=',1) for part
in parts
if '=' in part
]) #throw away the commas and make dict of the pairs
136 return(segments
, metadata
)
138 def parse_videolist():
140 soup
= BeautifulSoup(requests
.get("http://www.svtplay.se/ajax/videospager").text
)#this call does not work for getting the pages, we use it for the page totals only
141 page_tot
= int(soup
.find('a',{'data-currentpage':True}
).attrs
['data-lastpage'])
144 while(page_num
<= page_tot
):
145 base_url
= "http://www.svtplay.se/ajax/videos?sida={}".format(page_num
)
146 soup
= BeautifulSoup(requests
.get(base_url
).text
)
147 for article
in soup
.findAll('article'):
148 meta
= dict(article
.attrs
)
150 video
['title'] = meta
['data-title']
151 video
['description'] = meta
['data-description']
152 video
['url'] = dict(article
.find('a').attrs
)['href']
153 video
['thumb-url'] = dict(article
.find('img',{}).attrs
)['src']
154 video
['num'] = video_num
155 video
['total'] = page_tot
* videos_per_page
161 if __name__
== "__main__":
162 parser
= argparse
.ArgumentParser()
163 group
= parser
.add_mutually_exclusive_group(required
=True)
164 group
.add_argument("-r", "--rss", help="Download all files in rss")
165 group
.add_argument("-u", "--url", help="Download video in url")
166 group
.add_argument("-m", "--mirror", help="Mirror all files", action
="store_true")
167 parser
.add_argument("-n", "--no_act", help="Just print what would be done, don't do any downloading.", action
="store_true")
168 args
= parser
.parse_args()
171 d
= feedparser
.parse(args
.rss
)
173 print(("Downloading: %s"%e.title
))
176 filename
= scrape_player_page(e
.link
, e
.title
)
177 print(Popen(["avconv","-i",filename
,"-vcodec","copy","-acodec","copy", filename
+'.mkv'], stdout
=PIPE
).communicate()[0])
178 #print(e.description)
180 for video
in parse_videolist():
181 video
['title'] = video
['title'].replace('/','_')
182 print(video
['title']+'.mkv')
183 print("{} of {}".format(video
['num'], video
['total']))
184 if os
.path
.exists(video
['title']+'.mkv'):
187 print("Downloading...")
190 ret
= scrape_player_page(video
['url'], video
['title'])
192 print(Popen(["avconv","-i",video
['title']+'.ts',"-vcodec","copy","-acodec","copy", video
['title']+'.mkv'], stdout
=PIPE
).communicate()[0])
194 os
.unlink(video
['title']+'.ts')
196 import pdb
;pdb
.set_trace()
199 video
= scrape_player_page(args
.url
, None)
200 print(("Downloaded {}".format(args
.url
)))