]> git.frykholm.com Git - svtplaydump.git/blame - svtplaydump.py
Quick and dirty tv4 downloader
[svtplaydump.git] / svtplaydump.py
CommitLineData
84f7ef7d 1#!/usr/bin/env python3
56181f0a 2# -*- coding: utf-8 -*-
ca2553c7
MF
3#
4# (C) Copyright 2010 Mikael Frykholm <mikael@frykholm.com>
5#
6# This program is free software: you can redistribute it and/or modify
7# it under the terms of the GNU General Public License as published by
8# the Free Software Foundation, either version 3 of the License, or
9# (at your option) any later version.
10#
11# This program is distributed in the hope that it will be useful,
12# but WITHOUT ANY WARRANTY; without even the implied warranty of
13# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14# GNU General Public License for more details.
15#
16# You should have received a copy of the GNU General Public License
17# along with this program. If not, see <http://www.gnu.org/licenses/>
18#
19# Changelog:
d05b6699 20# 0.4 added mirror mode.
56181f0a 21# 0.3 added apple streaming playlist parsing and decryption
ca2553c7
MF
22# 0.2 added python 2.4 urlparse compatibility
23# 0.1 initial release
24
d26e6919 25from bs4 import BeautifulSoup, Doctype
ca2553c7 26from subprocess import *
89a00fa0 27import re
56181f0a
MF
28from Crypto.Cipher import AES
29import struct
72beea17 30import argparse
84f7ef7d 31import requests
d05b6699 32import sys, os
c1d3d702 33import socket
ca2553c7 34
d26e6919
MF
35class Video(dict):
36 def __init__(self, *args, **kwargs):
37 self.update(dict(*args, **kwargs)) # use the free update to set keys
38
39 def __setattr__(self, name, value):
40 return self.__setitem__(name,value)
41
42 def __getattr__(self, name):
43 return self.__getitem__(name)
44
45 def is_downloaded(self):
46 raise("NotImplemented")
47
48def scrape_player_page(video):
d05b6699
MF
49 """
50 Try to scrape the site for video and download.
51 """
d26e6919
MF
52 if not video['url'].startswith('http'):
53 video['url'] = "http://www.svtplay.se" + video['url']
54 soup = BeautifulSoup(requests.get(video['url']).text)
d05b6699 55 video_player = soup.body('a',{'data-json-href':True})[0]
d26e6919 56 if 'oppetarkiv.se' in video['url']:
7370a42e 57 flashvars = requests.get("http://www.oppetarkiv.se/%s"%video_player.attrs['data-json-href']+"?output=json").json()
d3ebb57d 58 else:
7370a42e
MF
59 if video_player.attrs['data-json-href'].startswith("/wd"):
60 flashvars = requests.get("http://www.svt.se/%s"%video_player.attrs['data-json-href']).json()
61 else:
62 flashvars = requests.get("http://www.svtplay.se/%s"%video_player.attrs['data-json-href']+"?output=json").json()
84f7ef7d 63 video['duration'] = video_player.attrs.get('data-length',0)
d26e6919 64 if not video['title']:
84f7ef7d 65 video['title'] = soup.find('meta',{'property':'og:title'}).attrs['content'].replace('|','_').replace('/','_')
d26e6919
MF
66 if not 'genre' in video:
67 if soup.find(text='Kategori:'):
68 video['genre'] = soup.find(text='Kategori:').parent.parent.a.text
69 else:
70 video['genre'] = 'Ingen Genre'
ca2553c7 71 if 'dynamicStreams' in flashvars:
d05b6699
MF
72 video['url'] = flashvars['dynamicStreams'][0].split('url:')[1].split('.mp4,')[0] +'.mp4'
73 filename = video['title']+".mp4"
84f7ef7d 74 print(Popen(["rtmpdump","-o"+filename,"-r", url], stdout=PIPE).communicate()[0])
ca2553c7
MF
75 if 'pathflv' in flashvars:
76 rtmp = flashvars['pathflv'][0]
d05b6699 77 filename = video['title']+".flv"
84f7ef7d 78 print(Popen(["mplayer","-dumpstream","-dumpfile",filename, rtmp], stdout=PIPE).communicate()[0])
89a00fa0 79 if 'video' in flashvars:
56181f0a 80 for reference in flashvars['video']['videoReferences']:
2d8521d8 81 if 'm3u8' in reference['url']:
d05b6699
MF
82 video['url']=reference['url']
83 video['filename'] = video['title']+'.ts'
84 if 'statistics' in flashvars:
85 video['category'] = flashvars['statistics']['category']
86 download_from_playlist(video)
2d8521d8 87 if not 'url' in video:
84f7ef7d 88 print("Could not find any streams")
2d8521d8 89 return False
d05b6699
MF
90 return video
91
92def download_from_playlist(video):
84f7ef7d
MF
93 playlist = parse_playlist(requests.get(video['url']).text)
94 if not playlist:
95 return
56181f0a 96 videourl = sorted(playlist, key=lambda k: int(k['BANDWIDTH']))[-1]['url']
2d8521d8
MF
97 if not videourl.startswith('http'): #if relative path
98 videourl = "{}/{}".format(os.path.dirname(video['url']), videourl)
99 segments, metadata = parse_segment_playlist(videourl)
56181f0a 100 if "EXT-X-KEY" in metadata:
84f7ef7d 101 key = requests.get(metadata["EXT-X-KEY"]['URI'].strip('"')).text
56181f0a
MF
102 decrypt=True
103 else:
104 decrypt=False
84f7ef7d 105 with open("%s"%video['filename'],"wb") as ofile:
56181f0a 106 segment=0
72beea17 107 size = 0
56181f0a 108 for url in segments:
84f7ef7d 109 ufile = requests.get(url, stream=True).raw
c1d3d702 110 print("\r{0:.2f} MB".format(size/1024/1024),end="")
72beea17 111 sys.stdout.flush()
56181f0a
MF
112 if decrypt:
113 iv=struct.pack("IIII",segment,0,0,0)
114 decryptor = AES.new(key, AES.MODE_CBC, iv)
115 while(True):
c1d3d702
MF
116 try:
117 buf = ufile.read(4096)
118 except socket.error as e:
119 print("Error reading, skipping file")
120 print(e)
121 return
84f7ef7d 122 if not buf:
56181f0a 123 break
84f7ef7d
MF
124 if decrypt:
125 buf = decryptor.decrypt(buf)
126 ofile.write(buf)
127 size += len(buf)
56181f0a
MF
128 segment += 1
129
d26e6919
MF
130 if 'thumb-url' in video:
131 video['thumb'] = requests.get(video['thumb-url'],stream=True).raw
132
56181f0a 133def parse_playlist(playlist):
d05b6699 134 if not playlist.startswith("#EXTM3U"):
84f7ef7d 135 print(playlist)
d05b6699 136 return False
2d8521d8
MF
137 playlist = playlist.splitlines()
138 while not 'EXT-X-STREAM-INF' in playlist[0]:
139 playlist = playlist[1:]
56181f0a
MF
140 items=[]
141 for (metadata_string,url) in zip(playlist[0::2], playlist[1::2]):
d26e6919 142 md = Video()
2d8521d8
MF
143 if not 'EXT-X-STREAM-INF' in metadata_string.split(':')[0]:
144 continue
56181f0a
MF
145 for item in metadata_string.split(':')[1].split(','):
146 if '=' in item:
147 md.update([item.split('='),])
148 md['url']=url
149 items.append(md)
150 return items
151
2d8521d8
MF
152def parse_segment_playlist(playlisturl):
153 playlist = requests.get(playlisturl).text
56181f0a
MF
154 assert playlist.startswith("#EXTM3U")
155 PATTERN = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''')
156 segments = []
157 next_is_url=False
158 metadata = {}
159 for row in playlist.splitlines():
160 if next_is_url:
2d8521d8
MF
161 if not row.startswith('http'): #if relative path
162 row = "{}/{}".format(os.path.dirname(playlisturl), row)
56181f0a
MF
163 segments.append(row)
164 next_is_url=False
165 continue
166 if 'EXTINF' in row:
167 next_is_url=True
168 if "EXT-X-KEY" in row:
169 row = row.split(':',1)[1] #skip first part
d05b6699 170 parts = PATTERN.split(row)[1:-1] #do magic re split and keep quotes
56181f0a 171 metadata["EXT-X-KEY"] = dict([part.split('=',1) for part in parts if '=' in part]) #throw away the commas and make dict of the pairs
84f7ef7d
MF
172 return(segments, metadata)
173
d05b6699 174def parse_videolist():
5b0549b5 175 page_num = 1
84f7ef7d
MF
176 soup = BeautifulSoup(requests.get("http://www.svtplay.se/ajax/videospager").text)#this call does not work for getting the pages, we use it for the page totals only
177 page_tot = int(soup.find('a',{'data-currentpage':True}).attrs['data-lastpage'])
5b0549b5
MF
178 videos_per_page = 8
179 video_num = 0
180 while(page_num <= page_tot):
181 base_url = "http://www.svtplay.se/ajax/videos?sida={}".format(page_num)
84f7ef7d 182 soup = BeautifulSoup(requests.get(base_url).text)
5b0549b5
MF
183 for article in soup.findAll('article'):
184 meta = dict(article.attrs)
d26e6919 185 video = Video()
5b0549b5
MF
186 video['title'] = meta['data-title']
187 video['description'] = meta['data-description']
188 video['url'] = dict(article.find('a').attrs)['href']
189 video['thumb-url'] = dict(article.find('img',{}).attrs)['src']
190 video['num'] = video_num
191 video['total'] = page_tot * videos_per_page
192 video_num += 1
193 yield video
194 page_num += 1
195
d26e6919 196def remux(video, xml=None):
2d8521d8 197 basename = video['filename'].split('.ts')[0]
d26e6919
MF
198 if 'genre' in video:
199 if not os.path.exists(video['genre']):
200 os.mkdir(video['genre'])
5ab69ab4
MF
201 video['path'] = os.path.join(video['genre'],basename+'.mkv')
202 else:
203 video['path'] = basename+'.mkv'
d26e6919
MF
204 command = ["mkvmerge","-o",video['path'], '--title',video['title']]
205
206 if xml:
207 with open(basename+'.xml','w') as f:
208 f.write(xml)
209 command.extend(['--global-tags',basename+'.xml'])
210 if 'thumb' in video:
211 with open('thumbnail.jpg','wb') as f: #FIXME use title instead for many downloaders
212 f.write(video['thumb'].read())
213 command.extend(['--attachment-description', "Thumbnail",
214 '--attachment-mime-type', 'image/jpeg',
215 '--attach-file', 'thumbnail.jpg'])
216 command.append(video['filename'])
217 print(Popen(command, stdout=PIPE).communicate()[0])
218 for fname in (video['filename'], basename+'.xml','thumbnail.jpg'):
219 try:
220 os.unlink(fname)
221 except:
222 pass
223
224def mkv_metadata(video):
225 root = BeautifulSoup(features='xml')
226 root.append(Doctype('Tags SYSTEM "matroskatags.dtd"'))
227 tags = root.new_tag("Tags")
228 tag = root.new_tag("Tag")
229 tags.append(tag)
230 root.append(tags)
231 keep = ('title','description', 'url','genre')
232 targets = root.new_tag("Targets")
233 ttv = root.new_tag("TargetTypeValue")
234 ttv.string = str(50)
235 targets.append(ttv)
236 tag.append(targets)
237 for key in video:
238 if not key in keep:
239 continue
240 simple = root.new_tag('Simple')
241 name = root.new_tag('Name')
242 name.string=key.upper()
243 simple.append(name)
244 sstring = root.new_tag('String')
245 sstring.string=video[key]
246 simple.append(sstring)
247 tag.append(simple)
248 return str(root)
56181f0a 249
ca2553c7 250if __name__ == "__main__":
72beea17 251 parser = argparse.ArgumentParser()
1ad04c01
MF
252 group = parser.add_mutually_exclusive_group(required=True)
253 group.add_argument("-r", "--rss", help="Download all files in rss")
254 group.add_argument("-u", "--url", help="Download video in url")
255 group.add_argument("-m", "--mirror", help="Mirror all files", action="store_true")
5b0549b5 256 parser.add_argument("-n", "--no_act", help="Just print what would be done, don't do any downloading.", action="store_true")
2d8521d8
MF
257 parser.add_argument("--no_remux", help="Don't remux into mkv", action="store_true")
258
72beea17 259 args = parser.parse_args()
d05b6699 260 if args.rss:
84f7ef7d 261 import feedparser
5b0549b5 262 d = feedparser.parse(args.rss)
72beea17 263 for e in d.entries:
84f7ef7d 264 print(("Downloading: %s"%e.title))
5b0549b5
MF
265 if args.no_act:
266 continue
d26e6919 267 video = scrape_player_page({'title':e.title,'url':e.link})
2d8521d8
MF
268 if args.no_remux:
269 continue
d26e6919 270 self.remux(video)
72beea17 271 #print(e.description)
d05b6699 272 if args.mirror:
d26e6919
MF
273 if not os.path.exists('.seen'):
274 os.mkdir('.seen')
d05b6699
MF
275 for video in parse_videolist():
276 video['title'] = video['title'].replace('/','_')
84f7ef7d
MF
277 print(video['title']+'.mkv')
278 print("{} of {}".format(video['num'], video['total']))
d26e6919
MF
279
280 if os.path.exists(os.path.join('.seen',video['title'])):
84f7ef7d 281 print("Skipping")
d05b6699
MF
282 continue
283 print("Downloading...")
5b0549b5
MF
284 if args.no_act:
285 continue
d26e6919
MF
286 open(os.path.join('.seen',video['title']),'w').close() #touch
287 video = scrape_player_page(video)
2d8521d8
MF
288 if args.no_remux:
289 continue
d26e6919
MF
290 xml = mkv_metadata(video)
291 remux(video, xml)
292
72beea17 293 else:
5b0549b5 294 if not args.no_act:
d26e6919 295 video = scrape_player_page({'url':args.url})
2d8521d8
MF
296 if not args.no_remux:
297 remux({'title':e.title})
84f7ef7d 298 print(("Downloaded {}".format(args.url)))