2014-08-11 20:41:09 +02:00
|
|
|
from __future__ import absolute_import
|
|
|
|
import re
|
2014-08-11 21:25:10 +02:00
|
|
|
import os
|
2015-11-29 16:32:53 +01:00
|
|
|
import json
|
2014-08-11 20:41:09 +02:00
|
|
|
import xml.etree.ElementTree as ET
|
|
|
|
|
|
|
|
from svtplay_dl.service import Service, OpenGraphThumbMixin
|
2015-08-31 22:25:37 +02:00
|
|
|
from svtplay_dl.utils import is_py2_old
|
2015-09-06 14:19:10 +02:00
|
|
|
from svtplay_dl.error import ServiceError
|
2014-08-11 20:41:09 +02:00
|
|
|
from svtplay_dl.log import log
|
|
|
|
from svtplay_dl.fetcher.rtmp import RTMP
|
2015-11-29 16:32:53 +01:00
|
|
|
from svtplay_dl.fetcher.hls import hlsparse
|
2018-01-27 00:03:42 +01:00
|
|
|
from svtplay_dl.utils.urllib import urlparse
|
2014-08-11 20:41:09 +02:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-08-11 20:41:09 +02:00
|
|
|
# This is _very_ similar to mtvservices..
|
|
|
|
class Mtvnn(Service, OpenGraphThumbMixin):
|
2016-06-02 00:54:35 +02:00
|
|
|
supported_domains = ['nickelodeon.se', "nickelodeon.nl", "nickelodeon.no", "www.comedycentral.se", "nickelodeon.dk"]
|
2014-08-11 20:41:09 +02:00
|
|
|
|
2015-12-26 11:46:14 +01:00
|
|
|
def get(self):
|
2015-08-30 00:06:20 +02:00
|
|
|
data = self.get_urldata()
|
2018-02-04 19:48:23 +01:00
|
|
|
parse = urlparse(self.url)
|
|
|
|
|
|
|
|
if parse.netloc.endswith("se"):
|
|
|
|
|
|
|
|
match = re.search('<div class="video-player" (.*)>', data)
|
|
|
|
|
|
|
|
if not match:
|
|
|
|
yield ServiceError("Can't find video info")
|
|
|
|
return
|
|
|
|
|
|
|
|
match_id = re.search('data-id="([0-9a-fA-F|\-]+)" ', match.group(1))
|
|
|
|
|
|
|
|
if not match_id:
|
|
|
|
yield ServiceError("Can't find video info")
|
|
|
|
return
|
|
|
|
|
|
|
|
wanted_id = match_id.group(1)
|
|
|
|
url_service = "http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed?mgid=mgid:arc:episode:nick.intl:{0}&arcEp=nickelodeon.se&imageEp=nickelodeon.se&stage=staging&accountOverride=intl.mtvi.com&ep=a9cc543c".format(wanted_id)
|
|
|
|
service_asset = self.http.request("get", url_service)
|
|
|
|
match_guid = re.search('<guid isPermaLink="false">(.*)</guid>', service_asset.text)
|
|
|
|
|
|
|
|
if not match_guid:
|
|
|
|
yield ServiceError("Can't find video info")
|
|
|
|
return
|
|
|
|
|
|
|
|
hls_url = "https://mediautilssvcs-a.akamaihd.net/services/MediaGenerator/{0}?arcStage=staging&accountOverride=intl.mtvi.com&billingSection=intl&ep=a9cc543c&acceptMethods=hls".format(match_guid.group(1))
|
|
|
|
hls_asset = self.http.request("get", hls_url)
|
|
|
|
xml = ET.XML(hls_asset.text)
|
|
|
|
|
|
|
|
if xml.find("./video") is not None and xml.find("./video").find("item") is not None and \
|
|
|
|
xml.find("./video").find("item").find("rendition") is not None and \
|
|
|
|
xml.find("./video").find("item").find("rendition").find("src") is not None:
|
|
|
|
|
|
|
|
hls_url = xml.find("./video").find("item").find("rendition").find("src").text
|
|
|
|
stream = hlsparse(self.options, self.http.request("get", hls_url), hls_url)
|
|
|
|
if stream:
|
|
|
|
|
|
|
|
for key in list(stream.keys()):
|
|
|
|
yield stream[key]
|
|
|
|
return
|
|
|
|
|
2016-08-22 15:32:04 +02:00
|
|
|
match = re.search(r'data-mrss=[\'"](http://gakusei-cluster.mtvnn.com/v2/mrss.xml[^\'"]+)[\'"]', data)
|
2014-08-11 20:41:09 +02:00
|
|
|
if not match:
|
2015-09-06 14:19:10 +02:00
|
|
|
yield ServiceError("Can't find id for the video")
|
2014-10-06 23:21:43 +02:00
|
|
|
return
|
2015-07-05 21:51:58 +02:00
|
|
|
|
2017-09-16 18:28:27 +02:00
|
|
|
mrssxmlurl = match.group(1)
|
|
|
|
data = self.http.request("get", mrssxmlurl).content
|
2014-08-11 20:41:09 +02:00
|
|
|
xml = ET.XML(data)
|
|
|
|
mediagen = xml.find("channel").find("item").find("{http://search.yahoo.com/mrss/}group")
|
2014-08-11 21:25:10 +02:00
|
|
|
title = xml.find("channel").find("item").find("title").text
|
2015-12-26 11:46:14 +01:00
|
|
|
if self.options.output_auto:
|
|
|
|
directory = os.path.dirname(self.options.output)
|
2014-08-11 21:25:10 +02:00
|
|
|
if len(directory):
|
2015-12-26 11:46:14 +01:00
|
|
|
self.options.output = os.path.join(directory, title)
|
2014-08-11 21:25:10 +02:00
|
|
|
else:
|
2015-12-26 11:46:14 +01:00
|
|
|
self.options.output = title
|
2014-12-22 17:41:40 +01:00
|
|
|
|
2016-05-14 22:54:30 +02:00
|
|
|
if self.exclude():
|
2015-09-06 23:04:48 +02:00
|
|
|
yield ServiceError("Excluding video")
|
2014-12-22 17:41:40 +01:00
|
|
|
return
|
|
|
|
|
2015-07-05 21:51:58 +02:00
|
|
|
swfurl = mediagen.find("{http://search.yahoo.com/mrss/}player").attrib["url"]
|
2017-10-09 22:35:13 +02:00
|
|
|
self.options.other = "-W {0}".format(self.http.check_redirect(swfurl))
|
2015-07-05 21:51:58 +02:00
|
|
|
|
2014-08-11 20:41:09 +02:00
|
|
|
contenturl = mediagen.find("{http://search.yahoo.com/mrss/}content").attrib["url"]
|
2015-08-31 19:45:15 +02:00
|
|
|
content = self.http.request("get", contenturl).content
|
2014-08-11 20:41:09 +02:00
|
|
|
xml = ET.XML(content)
|
|
|
|
ss = xml.find("video").find("item")
|
|
|
|
if is_py2_old:
|
|
|
|
sa = list(ss.getiterator("rendition"))
|
|
|
|
else:
|
|
|
|
sa = list(ss.iter("rendition"))
|
|
|
|
|
|
|
|
for i in sa:
|
2015-12-26 11:46:14 +01:00
|
|
|
yield RTMP(self.options, i.find("src").text, i.attrib["bitrate"])
|
2017-09-16 18:28:27 +02:00
|
|
|
|
|
|
|
match = re.search("gon.viacom_config=([^;]+);", self.get_urldata())
|
|
|
|
if match:
|
|
|
|
countrycode = json.loads(match.group(1))["country_code"].replace("_", "/")
|
|
|
|
|
|
|
|
match = re.search("mtvnn.com:([^&]+)", mrssxmlurl)
|
|
|
|
if match:
|
2018-01-30 20:11:37 +01:00
|
|
|
urlpart = match.group(1).replace("-", "/").replace("playlist", "playlists") # it use playlists dunno from where it gets it
|
2017-09-16 18:28:27 +02:00
|
|
|
hlsapi = "http://api.mtvnn.com/v2/{0}/{1}.json?video_format=m3u8&callback=&".format(countrycode, urlpart)
|
|
|
|
data = self.http.request("get", hlsapi).text
|
|
|
|
|
|
|
|
dataj = json.loads(data)
|
|
|
|
for i in dataj["local_playlist_videos"]:
|
|
|
|
streams = hlsparse(self.options, self.http.request("get", i["url"]), i["url"])
|
|
|
|
if streams:
|
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|
2014-08-11 22:45:59 +02:00
|
|
|
|
|
|
|
def find_all_episodes(self, options):
|
2015-08-30 00:06:20 +02:00
|
|
|
match = re.search(r"data-franchise='([^']+)'", self.get_urldata())
|
2014-08-11 22:45:59 +02:00
|
|
|
if match is None:
|
|
|
|
log.error("Couldn't program id")
|
2014-10-06 23:21:43 +02:00
|
|
|
return
|
2014-08-11 22:45:59 +02:00
|
|
|
programid = match.group(1)
|
2018-01-30 20:11:37 +01:00
|
|
|
match = re.findall(r"<li class='([a-z]+ )?playlist-item( [a-z]+)*?'( data-[-a-z]+='[^']+')* data-item-id='([^']+)'",
|
|
|
|
self.get_urldata())
|
2014-08-11 22:45:59 +02:00
|
|
|
if not match:
|
|
|
|
log.error("Couldn't retrieve episode list")
|
2014-10-06 23:21:43 +02:00
|
|
|
return
|
2014-09-27 22:53:41 +02:00
|
|
|
episodNr = []
|
|
|
|
for i in match:
|
2016-01-01 14:09:44 +01:00
|
|
|
episodNr.append(i[3])
|
2014-08-11 22:45:59 +02:00
|
|
|
episodes = []
|
2014-12-21 13:16:05 +01:00
|
|
|
n = 0
|
2014-09-27 22:53:41 +02:00
|
|
|
for i in sorted(episodNr):
|
2014-12-21 13:16:05 +01:00
|
|
|
if n == options.all_last:
|
|
|
|
break
|
2017-10-09 22:35:13 +02:00
|
|
|
episodes.append("http://www.nickelodeon.se/serier/{0}-something/videos/{1}-something".format(programid, i))
|
2014-12-21 13:16:05 +01:00
|
|
|
n += 1
|
2014-08-11 22:45:59 +02:00
|
|
|
return episodes
|
2018-01-27 00:03:42 +01:00
|
|
|
|
|
|
|
|
|
|
|
class MtvMusic(Service, OpenGraphThumbMixin):
|
|
|
|
supported_domains = ['mtv.se']
|
|
|
|
|
|
|
|
def get(self):
|
|
|
|
data = self.get_urldata()
|
|
|
|
|
|
|
|
if self.exclude():
|
|
|
|
yield ServiceError("Excluding video")
|
|
|
|
return
|
|
|
|
|
|
|
|
match = re.search('window.pagePlaylist = (.*);', data)
|
|
|
|
|
|
|
|
if not match:
|
|
|
|
yield ServiceError("Can't find video info")
|
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
janson = json.loads(match.group(1))
|
2018-01-30 20:11:37 +01:00
|
|
|
except Exception:
|
2018-01-27 00:03:42 +01:00
|
|
|
yield ServiceError("Can't decode api request: {0}".format(match.group(1)))
|
|
|
|
return
|
|
|
|
|
|
|
|
parse = urlparse(self.url)
|
|
|
|
wanted_id = parse.path.split("/")[-1].split("-")[0]
|
|
|
|
|
|
|
|
for n in janson:
|
|
|
|
if wanted_id == str(n["id"]):
|
|
|
|
|
|
|
|
mrssxmlurl = "http://media-utils.mtvnservices.com/services/MediaGenerator/mgid:arc:video:mtv.se:{0}?acceptMethods=hls".format(n["video_token"])
|
|
|
|
hls_asset = self.http.request("get", mrssxmlurl)
|
|
|
|
xml = ET.XML(hls_asset.text)
|
|
|
|
|
|
|
|
if xml.find("./video") is not None and xml.find("./video").find("item") is not None and \
|
|
|
|
xml.find("./video").find("item").find("rendition") is not None and \
|
|
|
|
xml.find("./video").find("item").find("rendition").find("src") is not None:
|
|
|
|
|
|
|
|
hls_url = xml.find("./video").find("item").find("rendition").find("src").text
|
|
|
|
stream = hlsparse(self.options, self.http.request("get", hls_url), hls_url)
|
|
|
|
if stream:
|
|
|
|
|
|
|
|
for key in list(stream.keys()):
|
|
|
|
yield stream[key]
|