2014-08-11 20:41:09 +02:00
|
|
|
from __future__ import absolute_import
|
|
|
|
import re
|
2014-08-11 21:25:10 +02:00
|
|
|
import os
|
2015-11-29 16:32:53 +01:00
|
|
|
import json
|
2014-08-11 20:41:09 +02:00
|
|
|
import xml.etree.ElementTree as ET
|
|
|
|
|
|
|
|
from svtplay_dl.service import Service, OpenGraphThumbMixin
|
2015-08-31 22:25:37 +02:00
|
|
|
from svtplay_dl.utils import is_py2_old
|
2015-09-06 14:19:10 +02:00
|
|
|
from svtplay_dl.error import ServiceError
|
2014-08-11 20:41:09 +02:00
|
|
|
from svtplay_dl.log import log
|
|
|
|
from svtplay_dl.fetcher.rtmp import RTMP
|
2015-11-29 16:32:53 +01:00
|
|
|
from svtplay_dl.fetcher.hls import hlsparse
|
2014-08-11 20:41:09 +02:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-08-11 20:41:09 +02:00
|
|
|
# This is _very_ similar to mtvservices..
|
|
|
|
class Mtvnn(Service, OpenGraphThumbMixin):
|
2016-06-02 00:54:35 +02:00
|
|
|
supported_domains = ['nickelodeon.se', "nickelodeon.nl", "nickelodeon.no", "www.comedycentral.se", "nickelodeon.dk"]
|
2014-08-11 20:41:09 +02:00
|
|
|
|
2015-12-26 11:46:14 +01:00
|
|
|
def get(self):
|
2015-08-30 00:06:20 +02:00
|
|
|
data = self.get_urldata()
|
2016-08-22 15:32:04 +02:00
|
|
|
match = re.search(r'data-mrss=[\'"](http://gakusei-cluster.mtvnn.com/v2/mrss.xml[^\'"]+)[\'"]', data)
|
2014-08-11 20:41:09 +02:00
|
|
|
if not match:
|
2015-09-06 14:19:10 +02:00
|
|
|
yield ServiceError("Can't find id for the video")
|
2014-10-06 23:21:43 +02:00
|
|
|
return
|
2015-07-05 21:51:58 +02:00
|
|
|
|
2015-08-31 19:45:15 +02:00
|
|
|
data = self.http.request("get", match.group(1)).content
|
2014-08-11 20:41:09 +02:00
|
|
|
xml = ET.XML(data)
|
|
|
|
mediagen = xml.find("channel").find("item").find("{http://search.yahoo.com/mrss/}group")
|
2014-08-11 21:25:10 +02:00
|
|
|
title = xml.find("channel").find("item").find("title").text
|
2015-12-26 11:46:14 +01:00
|
|
|
if self.options.output_auto:
|
|
|
|
directory = os.path.dirname(self.options.output)
|
2014-08-11 21:25:10 +02:00
|
|
|
if len(directory):
|
2015-12-26 11:46:14 +01:00
|
|
|
self.options.output = os.path.join(directory, title)
|
2014-08-11 21:25:10 +02:00
|
|
|
else:
|
2015-12-26 11:46:14 +01:00
|
|
|
self.options.output = title
|
2014-12-22 17:41:40 +01:00
|
|
|
|
2016-05-14 22:54:30 +02:00
|
|
|
if self.exclude():
|
2015-09-06 23:04:48 +02:00
|
|
|
yield ServiceError("Excluding video")
|
2014-12-22 17:41:40 +01:00
|
|
|
return
|
|
|
|
|
2015-07-05 21:51:58 +02:00
|
|
|
swfurl = mediagen.find("{http://search.yahoo.com/mrss/}player").attrib["url"]
|
2015-12-26 11:46:14 +01:00
|
|
|
self.options.other = "-W %s" % self.http.check_redirect(swfurl)
|
2015-07-05 21:51:58 +02:00
|
|
|
|
2014-08-11 20:41:09 +02:00
|
|
|
contenturl = mediagen.find("{http://search.yahoo.com/mrss/}content").attrib["url"]
|
2015-11-29 16:32:53 +01:00
|
|
|
filename = os.path.basename(contenturl)
|
|
|
|
data = self.http.request("get", "http://videos.mtvnn.com/api/v2/%s.js?video_format=hls" % filename).text
|
|
|
|
dataj = json.loads(data)
|
2015-08-31 19:45:15 +02:00
|
|
|
content = self.http.request("get", contenturl).content
|
2014-08-11 20:41:09 +02:00
|
|
|
xml = ET.XML(content)
|
|
|
|
ss = xml.find("video").find("item")
|
|
|
|
if is_py2_old:
|
|
|
|
sa = list(ss.getiterator("rendition"))
|
|
|
|
else:
|
|
|
|
sa = list(ss.iter("rendition"))
|
|
|
|
|
|
|
|
for i in sa:
|
2015-12-26 11:46:14 +01:00
|
|
|
yield RTMP(self.options, i.find("src").text, i.attrib["bitrate"])
|
|
|
|
streams = hlsparse(self.options, self.http.request("get", dataj["src"]), dataj["src"])
|
2015-11-29 16:32:53 +01:00
|
|
|
if streams:
|
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|
2014-08-11 22:45:59 +02:00
|
|
|
|
|
|
|
def find_all_episodes(self, options):
|
2015-08-30 00:06:20 +02:00
|
|
|
match = re.search(r"data-franchise='([^']+)'", self.get_urldata())
|
2014-08-11 22:45:59 +02:00
|
|
|
if match is None:
|
|
|
|
log.error("Couldn't program id")
|
2014-10-06 23:21:43 +02:00
|
|
|
return
|
2014-08-11 22:45:59 +02:00
|
|
|
programid = match.group(1)
|
2016-01-01 14:09:44 +01:00
|
|
|
match = re.findall(r"<li class='([a-z]+ )?playlist-item( [a-z]+)*?'( data-[-a-z]+='[^']+')* data-item-id='([^']+)'", self.get_urldata())
|
2014-08-11 22:45:59 +02:00
|
|
|
if not match:
|
|
|
|
log.error("Couldn't retrieve episode list")
|
2014-10-06 23:21:43 +02:00
|
|
|
return
|
2014-09-27 22:53:41 +02:00
|
|
|
episodNr = []
|
|
|
|
for i in match:
|
2016-01-01 14:09:44 +01:00
|
|
|
episodNr.append(i[3])
|
2014-08-11 22:45:59 +02:00
|
|
|
episodes = []
|
2014-12-21 13:16:05 +01:00
|
|
|
n = 0
|
2014-09-27 22:53:41 +02:00
|
|
|
for i in sorted(episodNr):
|
2014-12-21 13:16:05 +01:00
|
|
|
if n == options.all_last:
|
|
|
|
break
|
2014-08-11 22:45:59 +02:00
|
|
|
episodes.append("http://www.nickelodeon.se/serier/%s-something/videos/%s-something" % (programid, i))
|
2014-12-21 13:16:05 +01:00
|
|
|
n += 1
|
2014-08-11 22:45:59 +02:00
|
|
|
return episodes
|