2013-03-02 21:26:28 +01:00
|
|
|
# ex:ts=4:sw=4:sts=4:et
|
|
|
|
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
2013-04-27 13:17:00 +02:00
|
|
|
|
|
|
|
# pylint has issues with urlparse: "some types could not be inferred"
|
|
|
|
# pylint: disable=E1103
|
|
|
|
|
2013-03-01 23:39:42 +01:00
|
|
|
from __future__ import absolute_import
|
2013-02-12 19:43:37 +01:00
|
|
|
import sys
|
|
|
|
import re
|
|
|
|
import xml.etree.ElementTree as ET
|
|
|
|
|
2013-04-21 13:42:33 +02:00
|
|
|
from svtplay_dl.utils.urllib import urlparse
|
2013-04-21 12:44:31 +02:00
|
|
|
from svtplay_dl.service import Service
|
2013-12-30 01:35:08 +01:00
|
|
|
from svtplay_dl.utils import get_http_data, select_quality, check_redirect, is_py2_old
|
2013-03-17 19:55:19 +01:00
|
|
|
from svtplay_dl.log import log
|
|
|
|
from svtplay_dl.fetcher.rtmp import download_rtmp
|
|
|
|
from svtplay_dl.fetcher.http import download_http
|
2013-02-12 19:43:37 +01:00
|
|
|
|
2013-04-21 12:44:31 +02:00
|
|
|
class Justin(Service):
|
2014-01-01 15:50:47 +01:00
|
|
|
# Justin and Twitch uses language subdomains, e.g. en.www.twitch.tv. They
|
|
|
|
# are usually two characters, but may have a country suffix as well (e.g.
|
|
|
|
# zh-tw, zh-cn and pt-br.
|
|
|
|
supported_domains_re = [
|
|
|
|
r'^(?:(?:[a-z]{2}-)?[a-z]{2}\.)?(www\.)?twitch\.tv$',
|
|
|
|
r'^(?:(?:[a-z]{2}-)?[a-z]{2}\.)?(www\.)?justin\.tv$']
|
2013-01-17 00:21:47 +01:00
|
|
|
|
2014-01-06 23:14:06 +01:00
|
|
|
def get(self, options):
|
|
|
|
parse = urlparse(self.url)
|
2013-05-29 23:16:46 +02:00
|
|
|
match = re.search(r"/[-a-zA-Z0-9_]+/c/(\d+)", parse.path)
|
2013-01-17 00:21:47 +01:00
|
|
|
if match:
|
2013-05-29 23:16:46 +02:00
|
|
|
url = "http://api.justin.tv/api/broadcast/by_chapter/%s.xml?onsite=true" % match.group(1)
|
2013-01-17 00:21:47 +01:00
|
|
|
data = get_http_data(url)
|
|
|
|
xml = ET.XML(data)
|
|
|
|
url = xml.find("archive").find("video_file_url").text
|
|
|
|
|
|
|
|
download_http(options, url)
|
|
|
|
else:
|
2013-05-05 12:57:42 +02:00
|
|
|
match = re.search(r"/(.*)", parse.path)
|
2013-01-17 00:21:47 +01:00
|
|
|
if match:
|
|
|
|
user = match.group(1)
|
|
|
|
data = get_http_data(url)
|
2013-05-05 12:57:42 +02:00
|
|
|
match = re.search(r"embedSWF\(\"(.*)\", \"live", data)
|
2013-01-17 00:21:47 +01:00
|
|
|
if not match:
|
|
|
|
log.error("Can't find swf file.")
|
2014-01-01 16:01:26 +01:00
|
|
|
sys.exit(2)
|
2013-04-14 21:08:12 +02:00
|
|
|
options.other = check_redirect(match.group(1))
|
2013-01-17 00:21:47 +01:00
|
|
|
url = "http://usher.justin.tv/find/%s.xml?type=any&p=2321" % user
|
|
|
|
options.live = True
|
|
|
|
data = get_http_data(url)
|
2013-05-05 12:57:42 +02:00
|
|
|
data = re.sub(r"<(\d+)", r"<_\g<1>", data)
|
|
|
|
data = re.sub(r"</(\d+)", r"</_\g<1>", data)
|
2013-01-17 00:21:47 +01:00
|
|
|
xml = ET.XML(data)
|
2013-12-30 01:35:08 +01:00
|
|
|
if is_py2_old:
|
2013-01-17 00:21:47 +01:00
|
|
|
sa = list(xml)
|
|
|
|
else:
|
|
|
|
sa = list(xml)
|
|
|
|
streams = {}
|
|
|
|
for i in sa:
|
2013-08-17 11:57:00 +02:00
|
|
|
try:
|
|
|
|
stream = {}
|
|
|
|
stream["token"] = i.find("token").text
|
|
|
|
stream["url"] = "%s/%s" % (i.find("connect").text, i.find("play").text)
|
|
|
|
streams[int(i.find("video_height").text)] = stream
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
2013-03-10 14:05:46 +01:00
|
|
|
if len(streams) > 0:
|
|
|
|
test = select_quality(options, streams)
|
|
|
|
options.other = "-j '%s' -W %s" % (test["token"], options.other)
|
|
|
|
options.resume = False
|
|
|
|
download_rtmp(options, test["url"])
|
|
|
|
else:
|
2013-08-09 18:02:57 +02:00
|
|
|
log.error("Can't find any streams")
|
2013-03-10 14:05:46 +01:00
|
|
|
sys.exit(2)
|