2014-08-11 20:41:09 +02:00
|
|
|
from __future__ import absolute_import
|
|
|
|
import re
|
2015-11-29 16:32:53 +01:00
|
|
|
import json
|
2014-08-11 20:41:09 +02:00
|
|
|
import xml.etree.ElementTree as ET
|
2018-01-30 22:07:21 +01:00
|
|
|
from urllib.parse import urlparse
|
2014-08-11 20:41:09 +02:00
|
|
|
|
|
|
|
from svtplay_dl.service import Service, OpenGraphThumbMixin
|
2015-09-06 14:19:10 +02:00
|
|
|
from svtplay_dl.error import ServiceError
|
2014-08-11 20:41:09 +02:00
|
|
|
from svtplay_dl.log import log
|
2015-11-29 16:32:53 +01:00
|
|
|
from svtplay_dl.fetcher.hls import hlsparse
|
2014-08-11 20:41:09 +02:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-08-11 20:41:09 +02:00
|
|
|
# This is _very_ similar to mtvservices..
|
|
|
|
class Mtvnn(Service, OpenGraphThumbMixin):
|
2016-06-02 00:54:35 +02:00
|
|
|
supported_domains = ['nickelodeon.se', "nickelodeon.nl", "nickelodeon.no", "www.comedycentral.se", "nickelodeon.dk"]
|
2014-08-11 20:41:09 +02:00
|
|
|
|
2015-12-26 11:46:14 +01:00
|
|
|
def get(self):
|
2015-08-30 00:06:20 +02:00
|
|
|
data = self.get_urldata()
|
2018-02-04 19:48:23 +01:00
|
|
|
parse = urlparse(self.url)
|
|
|
|
|
|
|
|
if parse.netloc.endswith("se"):
|
2018-10-28 23:16:00 +01:00
|
|
|
match = re.search(r'<div class="video-player" (.*)>', data)
|
2018-02-04 19:48:23 +01:00
|
|
|
|
|
|
|
if not match:
|
|
|
|
yield ServiceError("Can't find video info")
|
|
|
|
return
|
|
|
|
|
2018-10-28 23:16:00 +01:00
|
|
|
match_id = re.search(r'data-id="([0-9a-fA-F|\-]+)" ', match.group(1))
|
2018-02-04 19:48:23 +01:00
|
|
|
|
|
|
|
if not match_id:
|
|
|
|
yield ServiceError("Can't find video info")
|
|
|
|
return
|
|
|
|
|
|
|
|
wanted_id = match_id.group(1)
|
2018-02-26 00:05:50 +01:00
|
|
|
url_service = "http://feeds.mtvnservices.com/od/feed/intl-mrss-player-feed?mgid=mgid:arc:episode:nick.intl:{0}" \
|
|
|
|
"&arcEp=nickelodeon.se&imageEp=nickelodeon.se&stage=staging&accountOverride=intl.mtvi.com&ep=a9cc543c".format(wanted_id)
|
2018-02-04 19:48:23 +01:00
|
|
|
service_asset = self.http.request("get", url_service)
|
|
|
|
match_guid = re.search('<guid isPermaLink="false">(.*)</guid>', service_asset.text)
|
|
|
|
|
|
|
|
if not match_guid:
|
|
|
|
yield ServiceError("Can't find video info")
|
|
|
|
return
|
|
|
|
|
2018-02-26 00:05:50 +01:00
|
|
|
hls_url = "https://mediautilssvcs-a.akamaihd.net/services/MediaGenerator/{0}?arcStage=staging&accountOverride=intl.mtvi.com&" \
|
|
|
|
"billingSection=intl&ep=a9cc543c&acceptMethods=hls".format(match_guid.group(1))
|
2018-02-04 19:48:23 +01:00
|
|
|
hls_asset = self.http.request("get", hls_url)
|
|
|
|
xml = ET.XML(hls_asset.text)
|
|
|
|
|
2018-02-26 00:04:39 +01:00
|
|
|
if xml.find("./video") is not None and xml.find("./video").find("item") is not None \
|
|
|
|
and xml.find("./video").find("item").find("rendition") is not None \
|
|
|
|
and xml.find("./video").find("item").find("rendition").find("src") is not None:
|
2018-02-04 19:48:23 +01:00
|
|
|
|
|
|
|
hls_url = xml.find("./video").find("item").find("rendition").find("src").text
|
2018-05-21 00:56:22 +02:00
|
|
|
stream = hlsparse(self.config, self.http.request("get", hls_url), hls_url, output=self.output)
|
2018-05-08 22:48:55 +02:00
|
|
|
for key in list(stream.keys()):
|
|
|
|
yield stream[key]
|
2018-02-04 19:48:23 +01:00
|
|
|
return
|
|
|
|
|
2016-08-22 15:32:04 +02:00
|
|
|
match = re.search(r'data-mrss=[\'"](http://gakusei-cluster.mtvnn.com/v2/mrss.xml[^\'"]+)[\'"]', data)
|
2014-08-11 20:41:09 +02:00
|
|
|
if not match:
|
2015-09-06 14:19:10 +02:00
|
|
|
yield ServiceError("Can't find id for the video")
|
2014-10-06 23:21:43 +02:00
|
|
|
return
|
2015-07-05 21:51:58 +02:00
|
|
|
|
2017-09-16 18:28:27 +02:00
|
|
|
mrssxmlurl = match.group(1)
|
|
|
|
data = self.http.request("get", mrssxmlurl).content
|
2014-08-11 20:41:09 +02:00
|
|
|
xml = ET.XML(data)
|
2014-08-11 21:25:10 +02:00
|
|
|
title = xml.find("channel").find("item").find("title").text
|
2018-05-13 13:06:45 +02:00
|
|
|
self.output["title"] = title
|
2014-12-22 17:41:40 +01:00
|
|
|
|
2017-09-16 18:28:27 +02:00
|
|
|
match = re.search("gon.viacom_config=([^;]+);", self.get_urldata())
|
|
|
|
if match:
|
|
|
|
countrycode = json.loads(match.group(1))["country_code"].replace("_", "/")
|
|
|
|
|
|
|
|
match = re.search("mtvnn.com:([^&]+)", mrssxmlurl)
|
|
|
|
if match:
|
2018-01-30 20:11:37 +01:00
|
|
|
urlpart = match.group(1).replace("-", "/").replace("playlist", "playlists") # it use playlists dunno from where it gets it
|
2017-09-16 18:28:27 +02:00
|
|
|
hlsapi = "http://api.mtvnn.com/v2/{0}/{1}.json?video_format=m3u8&callback=&".format(countrycode, urlpart)
|
|
|
|
data = self.http.request("get", hlsapi).text
|
|
|
|
|
|
|
|
dataj = json.loads(data)
|
|
|
|
for i in dataj["local_playlist_videos"]:
|
2018-05-21 00:56:22 +02:00
|
|
|
streams = hlsparse(self.config, self.http.request("get", i["url"]), i["url"], output=self.output)
|
2018-05-08 22:48:55 +02:00
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|
2014-08-11 22:45:59 +02:00
|
|
|
|
2018-05-13 13:06:45 +02:00
|
|
|
def find_all_episodes(self, config):
|
2015-08-30 00:06:20 +02:00
|
|
|
match = re.search(r"data-franchise='([^']+)'", self.get_urldata())
|
2014-08-11 22:45:59 +02:00
|
|
|
if match is None:
|
|
|
|
log.error("Couldn't program id")
|
2014-10-06 23:21:43 +02:00
|
|
|
return
|
2014-08-11 22:45:59 +02:00
|
|
|
programid = match.group(1)
|
2018-01-30 20:11:37 +01:00
|
|
|
match = re.findall(r"<li class='([a-z]+ )?playlist-item( [a-z]+)*?'( data-[-a-z]+='[^']+')* data-item-id='([^']+)'",
|
|
|
|
self.get_urldata())
|
2014-08-11 22:45:59 +02:00
|
|
|
if not match:
|
|
|
|
log.error("Couldn't retrieve episode list")
|
2014-10-06 23:21:43 +02:00
|
|
|
return
|
2014-09-27 22:53:41 +02:00
|
|
|
episodNr = []
|
|
|
|
for i in match:
|
2016-01-01 14:09:44 +01:00
|
|
|
episodNr.append(i[3])
|
2014-08-11 22:45:59 +02:00
|
|
|
episodes = []
|
2014-12-21 13:16:05 +01:00
|
|
|
n = 0
|
2014-09-27 22:53:41 +02:00
|
|
|
for i in sorted(episodNr):
|
2018-05-13 13:06:45 +02:00
|
|
|
if n == config.get("all_last"):
|
2014-12-21 13:16:05 +01:00
|
|
|
break
|
2017-10-09 22:35:13 +02:00
|
|
|
episodes.append("http://www.nickelodeon.se/serier/{0}-something/videos/{1}-something".format(programid, i))
|
2014-12-21 13:16:05 +01:00
|
|
|
n += 1
|
2014-08-11 22:45:59 +02:00
|
|
|
return episodes
|
2018-01-27 00:03:42 +01:00
|
|
|
|
|
|
|
|
|
|
|
class MtvMusic(Service, OpenGraphThumbMixin):
|
|
|
|
supported_domains = ['mtv.se']
|
|
|
|
|
|
|
|
def get(self):
|
|
|
|
data = self.get_urldata()
|
|
|
|
|
|
|
|
match = re.search('window.pagePlaylist = (.*);', data)
|
|
|
|
if not match:
|
|
|
|
yield ServiceError("Can't find video info")
|
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
janson = json.loads(match.group(1))
|
2018-01-30 20:11:37 +01:00
|
|
|
except Exception:
|
2018-01-27 00:03:42 +01:00
|
|
|
yield ServiceError("Can't decode api request: {0}".format(match.group(1)))
|
|
|
|
return
|
|
|
|
|
|
|
|
parse = urlparse(self.url)
|
|
|
|
wanted_id = parse.path.split("/")[-1].split("-")[0]
|
|
|
|
|
|
|
|
for n in janson:
|
|
|
|
if wanted_id == str(n["id"]):
|
|
|
|
|
2018-01-30 23:10:22 +01:00
|
|
|
mrssxmlurl = "http://media-utils.mtvnservices.com/services/MediaGenerator/" \
|
|
|
|
"mgid:arc:video:mtv.se:{0}?acceptMethods=hls".format(n["video_token"])
|
2018-01-27 00:03:42 +01:00
|
|
|
hls_asset = self.http.request("get", mrssxmlurl)
|
|
|
|
xml = ET.XML(hls_asset.text)
|
|
|
|
|
|
|
|
if xml.find("./video") is not None and xml.find("./video").find("item") is not None and \
|
|
|
|
xml.find("./video").find("item").find("rendition") is not None and \
|
|
|
|
xml.find("./video").find("item").find("rendition").find("src") is not None:
|
|
|
|
|
|
|
|
hls_url = xml.find("./video").find("item").find("rendition").find("src").text
|
2018-05-21 00:56:22 +02:00
|
|
|
stream = hlsparse(self.config, self.http.request("get", hls_url), hls_url, output=self.output)
|
2018-01-27 00:03:42 +01:00
|
|
|
if stream:
|
|
|
|
|
|
|
|
for key in list(stream.keys()):
|
|
|
|
yield stream[key]
|