+ parser = argparse.ArgumentParser()
+ group = parser.add_mutually_exclusive_group(required=True)
+ group.add_argument("-r", "--rss", help="Download all files in rss")
+ group.add_argument("-u", "--url", help="Download video in url")
+ group.add_argument("-m", "--mirror", help="Mirror all files", action="store_true")
+ parser.add_argument("-n", "--no_act", help="Just print what would be done, don't do any downloading.", action="store_true")
+ parser.add_argument("--no_remux", help="Don't remux into mkv", action="store_true")
+
+ args = parser.parse_args()
+ if args.rss:
+ import feedparser
+ d = feedparser.parse(args.rss)
+ for e in d.entries:
+ print(("Downloading: %s"%e.title))
+ if args.no_act:
+ continue
+ filename = scrape_player_page(e.link, e.title)
+ if args.no_remux:
+ continue
+ self.remux({'title':e.title})
+ #print(e.description)
+ if args.mirror:
+ for video in parse_videolist():
+ video['title'] = video['title'].replace('/','_')
+ print(video['title']+'.mkv')
+ print("{} of {}".format(video['num'], video['total']))
+ if os.path.exists(video['title']+'.mkv'):
+ print("Skipping")
+ continue
+ print("Downloading...")
+ if args.no_act:
+ continue
+ video = scrape_player_page(video['url'], video['title'])
+ if args.no_remux:
+ continue
+ remux(video)
+ else:
+ if not args.no_act:
+ video = scrape_player_page(args.url, None)
+ if not args.no_remux:
+ remux({'title':e.title})
+ print(("Downloaded {}".format(args.url)))