2013-03-02 21:26:28 +01:00
|
|
|
# ex:ts=4:sw=4:sts=4:et
|
|
|
|
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
2013-04-27 13:17:00 +02:00
|
|
|
# pylint has issues with urlparse: "some types could not be inferred"
|
|
|
|
# pylint: disable=E1103
|
2013-03-01 23:39:42 +01:00
|
|
|
from __future__ import absolute_import
|
2019-08-25 00:40:39 +02:00
|
|
|
|
2014-06-07 20:43:40 +02:00
|
|
|
import copy
|
2019-08-25 00:40:39 +02:00
|
|
|
import json
|
|
|
|
import re
|
2018-01-30 22:07:21 +01:00
|
|
|
from urllib.parse import urlparse
|
|
|
|
|
2019-08-25 00:40:39 +02:00
|
|
|
from svtplay_dl.error import ServiceError
|
2014-05-01 16:52:05 +02:00
|
|
|
from svtplay_dl.fetcher.hds import hdsparse
|
2015-10-24 21:55:33 +02:00
|
|
|
from svtplay_dl.fetcher.hls import hlsparse
|
2019-08-25 00:40:39 +02:00
|
|
|
from svtplay_dl.service import OpenGraphThumbMixin
|
|
|
|
from svtplay_dl.service import Service
|
2014-08-31 01:20:36 +02:00
|
|
|
from svtplay_dl.subtitle import subtitle
|
2013-02-12 19:43:37 +01:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-01-26 01:54:20 +01:00
|
|
|
class Viaplay(Service, OpenGraphThumbMixin):
|
2014-01-01 14:57:17 +01:00
|
|
|
supported_domains = [
|
2019-08-25 00:27:31 +02:00
|
|
|
"tv3play.se",
|
|
|
|
"tv6play.se",
|
|
|
|
"tv8play.se",
|
|
|
|
"tv10play.se",
|
|
|
|
"tv3play.no",
|
|
|
|
"tv3play.dk",
|
|
|
|
"tv6play.no",
|
|
|
|
"viasat4play.no",
|
|
|
|
"tv3play.ee",
|
|
|
|
"tv3play.lv",
|
|
|
|
"tv3play.lt",
|
|
|
|
"tvplay.lv",
|
|
|
|
"viagame.com",
|
|
|
|
"juicyplay.se",
|
|
|
|
"viafree.se",
|
|
|
|
"viafree.dk",
|
|
|
|
"viafree.no",
|
|
|
|
"viafree.fi",
|
|
|
|
"play.tv3.lt",
|
|
|
|
"tv3play.tv3.ee",
|
|
|
|
"tvplay.skaties.lv",
|
2016-10-20 19:07:56 +02:00
|
|
|
]
|
2013-01-17 00:21:47 +01:00
|
|
|
|
2017-04-21 19:34:43 +02:00
|
|
|
def _get_video_id(self, url=None):
|
2014-03-09 15:56:00 +01:00
|
|
|
"""
|
|
|
|
Extract video id. It will try to avoid making an HTTP request
|
|
|
|
if it can find the ID in the URL, but otherwise it will try
|
|
|
|
to scrape it from the HTML document. Returns None in case it's
|
|
|
|
unable to extract the ID at all.
|
|
|
|
"""
|
2017-04-21 19:34:43 +02:00
|
|
|
if url:
|
|
|
|
html_data = self.http.request("get", url).text
|
|
|
|
else:
|
|
|
|
html_data = self.get_urldata()
|
2015-08-31 17:14:08 +02:00
|
|
|
html_data = self.get_urldata()
|
2014-06-22 22:46:00 +02:00
|
|
|
match = re.search(r'data-video-id="([0-9]+)"', html_data)
|
2014-06-26 22:04:16 +02:00
|
|
|
if match:
|
|
|
|
return match.group(1)
|
|
|
|
match = re.search(r'data-videoid="([0-9]+)', html_data)
|
2019-01-02 20:58:34 +01:00
|
|
|
if match:
|
|
|
|
return match.group(1)
|
|
|
|
match = re.search(r'"mediaGuid":"([0-9]+)"', html_data)
|
2014-03-09 15:56:00 +01:00
|
|
|
if match:
|
|
|
|
return match.group(1)
|
|
|
|
|
2016-08-17 00:43:34 +02:00
|
|
|
clips = False
|
2018-03-04 12:25:44 +01:00
|
|
|
slug = None
|
2016-08-17 00:43:34 +02:00
|
|
|
match = re.search('params":({.*}),"query', self.get_urldata())
|
2016-08-15 19:32:52 +02:00
|
|
|
if match:
|
2016-08-17 00:43:34 +02:00
|
|
|
jansson = json.loads(match.group(1))
|
2016-09-14 20:52:14 +02:00
|
|
|
if "seasonNumberOrVideoId" in jansson:
|
|
|
|
season = jansson["seasonNumberOrVideoId"]
|
2018-10-28 23:16:00 +01:00
|
|
|
match = re.search(r"\w-(\d+)$", season)
|
2016-09-28 12:48:56 +02:00
|
|
|
if match:
|
|
|
|
season = match.group(1)
|
2016-09-14 20:52:14 +02:00
|
|
|
else:
|
2018-05-31 00:16:12 +02:00
|
|
|
match = self._conentpage(self.get_urldata())
|
2018-06-03 18:00:01 +02:00
|
|
|
if match: # this only happen on the program page?
|
|
|
|
janson2 = json.loads(match.group(1))
|
2018-06-03 20:49:45 +02:00
|
|
|
if janson2["formatPage"]["format"]:
|
|
|
|
season = janson2["formatPage"]["format"]["seasonNumber"]
|
2019-09-06 22:49:49 +02:00
|
|
|
return janson2["formatPage"]["format"]["videos"][str(season)]["program"][0]["id"]
|
2018-05-31 00:16:12 +02:00
|
|
|
return None
|
2016-08-20 17:16:40 +02:00
|
|
|
if "videoIdOrEpisodeNumber" in jansson:
|
|
|
|
videp = jansson["videoIdOrEpisodeNumber"]
|
2019-08-25 00:27:31 +02:00
|
|
|
match = re.search(r"(\w+)-(\d+)", videp)
|
2016-08-20 17:16:40 +02:00
|
|
|
if match:
|
|
|
|
episodenr = match.group(2)
|
|
|
|
else:
|
|
|
|
episodenr = videp
|
|
|
|
clips = True
|
2019-08-25 00:27:31 +02:00
|
|
|
match = re.search(r"(s\w+)-(\d+)", season)
|
2016-09-14 20:52:14 +02:00
|
|
|
if match:
|
|
|
|
season = match.group(2)
|
2016-08-17 00:43:34 +02:00
|
|
|
else:
|
2016-10-01 12:33:20 +02:00
|
|
|
# sometimes videoIdOrEpisodeNumber does not work.. this is a workaround
|
2019-08-25 00:27:31 +02:00
|
|
|
match = re.search(r"(episode|avsnitt)-(\d+)", self.url)
|
2016-10-01 12:33:20 +02:00
|
|
|
if match:
|
|
|
|
episodenr = match.group(2)
|
|
|
|
else:
|
|
|
|
episodenr = season
|
2018-03-04 12:25:44 +01:00
|
|
|
if "slug" in jansson:
|
|
|
|
slug = jansson["slug"]
|
2016-08-17 00:43:34 +02:00
|
|
|
|
|
|
|
if clips:
|
|
|
|
return episodenr
|
|
|
|
else:
|
2018-03-06 19:37:36 +01:00
|
|
|
match = self._conentpage(self.get_urldata())
|
2016-08-17 00:43:34 +02:00
|
|
|
if match:
|
|
|
|
janson = json.loads(match.group(1))
|
2018-05-31 00:16:12 +02:00
|
|
|
for i in janson["formatPage"]["format"]["videos"].keys():
|
2019-09-06 22:49:49 +02:00
|
|
|
if "program" in janson["formatPage"]["format"]["videos"][str(i)]:
|
|
|
|
for n in janson["formatPage"]["format"]["videos"][i]["program"]:
|
|
|
|
if str(n["episodeNumber"]) and int(episodenr) == n["episodeNumber"] and int(season) == n["seasonNumber"]:
|
2018-03-04 12:25:44 +01:00
|
|
|
if slug is None or slug == n["formatSlug"]:
|
|
|
|
return n["id"]
|
2017-02-02 14:20:33 +01:00
|
|
|
elif n["id"] == episodenr:
|
|
|
|
return episodenr
|
2016-08-21 01:08:39 +02:00
|
|
|
|
2014-06-02 22:13:11 +02:00
|
|
|
parse = urlparse(self.url)
|
2019-08-25 00:27:31 +02:00
|
|
|
match = re.search(r"/\w+/(\d+)", parse.path)
|
2015-09-10 21:57:55 +02:00
|
|
|
if match:
|
|
|
|
return match.group(1)
|
2019-09-06 22:49:49 +02:00
|
|
|
match = re.search(r'iframe src="http://play.juicyplay.se[^\"]+id=(\d+)', html_data)
|
2014-03-09 15:56:00 +01:00
|
|
|
if match:
|
|
|
|
return match.group(1)
|
2018-05-06 15:30:41 +02:00
|
|
|
|
|
|
|
match = re.search(r'<meta property="og:image" content="([\S]+)"', html_data)
|
|
|
|
if match:
|
|
|
|
return match.group(1).split("/")[-2]
|
|
|
|
|
2014-03-09 15:56:00 +01:00
|
|
|
return None
|
2018-01-30 20:11:37 +01:00
|
|
|
|
2015-12-26 11:46:14 +01:00
|
|
|
def get(self):
|
2018-05-31 00:16:12 +02:00
|
|
|
parse = urlparse(self.url)
|
2014-12-08 23:07:02 +01:00
|
|
|
vid = self._get_video_id()
|
2014-03-09 15:56:00 +01:00
|
|
|
if vid is None:
|
2018-05-31 00:16:12 +02:00
|
|
|
if parse.path[:6] == "/sport":
|
|
|
|
result = self._sport()
|
2019-08-25 00:33:51 +02:00
|
|
|
yield from result
|
2018-05-31 00:16:12 +02:00
|
|
|
return
|
|
|
|
else:
|
2019-08-25 00:33:51 +02:00
|
|
|
yield ServiceError("Can't find video file for: {}".format(self.url))
|
2018-05-31 00:16:12 +02:00
|
|
|
return
|
2018-01-30 20:11:37 +01:00
|
|
|
|
2018-05-31 00:16:12 +02:00
|
|
|
data = self._get_video_data(vid)
|
2015-08-30 00:06:20 +02:00
|
|
|
if data.status_code == 403:
|
2015-09-06 14:19:10 +02:00
|
|
|
yield ServiceError("Can't play this because the video is geoblocked.")
|
2014-11-01 21:10:06 +01:00
|
|
|
return
|
2015-08-31 23:18:37 +02:00
|
|
|
dataj = json.loads(data.text)
|
2018-01-30 20:11:37 +01:00
|
|
|
|
2014-10-23 00:03:14 +02:00
|
|
|
if "msg" in dataj:
|
2015-09-06 14:19:10 +02:00
|
|
|
yield ServiceError(dataj["msg"])
|
2014-09-11 23:51:32 +02:00
|
|
|
return
|
2014-08-11 19:44:20 +02:00
|
|
|
|
2014-10-23 00:03:14 +02:00
|
|
|
if dataj["type"] == "live":
|
2018-05-13 13:06:45 +02:00
|
|
|
self.config.set("live", True)
|
2014-10-23 00:03:14 +02:00
|
|
|
|
2018-05-13 13:06:45 +02:00
|
|
|
self.output["id"] = vid
|
|
|
|
self._autoname(dataj)
|
2014-12-22 17:41:40 +01:00
|
|
|
|
2019-09-06 22:49:49 +02:00
|
|
|
streams = self.http.request("get", "http://playapi.mtgx.tv/v3/videos/stream/{}".format(vid))
|
2015-08-30 00:06:20 +02:00
|
|
|
if streams.status_code == 403:
|
2015-09-06 14:19:10 +02:00
|
|
|
yield ServiceError("Can't play this because the video is geoblocked.")
|
2014-11-01 21:10:06 +01:00
|
|
|
return
|
2015-08-31 23:18:37 +02:00
|
|
|
streamj = json.loads(streams.text)
|
2014-06-26 21:52:32 +02:00
|
|
|
|
2014-10-23 00:03:14 +02:00
|
|
|
if "msg" in streamj:
|
2019-09-06 22:49:49 +02:00
|
|
|
yield ServiceError("Can't play this because the video is either not found or geoblocked.")
|
2014-10-23 00:03:14 +02:00
|
|
|
return
|
|
|
|
|
2016-02-07 10:25:49 +01:00
|
|
|
if dataj["sami_path"]:
|
2016-12-05 19:57:57 +01:00
|
|
|
if dataj["sami_path"].endswith("vtt"):
|
|
|
|
subtype = "wrst"
|
|
|
|
else:
|
|
|
|
subtype = "sami"
|
2019-09-06 22:49:49 +02:00
|
|
|
yield subtitle(copy.copy(self.config), subtype, dataj["sami_path"], output=self.output)
|
2016-12-05 20:53:22 +01:00
|
|
|
if dataj["subtitles_webvtt"]:
|
2019-09-06 22:49:49 +02:00
|
|
|
yield subtitle(copy.copy(self.config), "wrst", dataj["subtitles_webvtt"], output=self.output)
|
2016-02-07 10:25:49 +01:00
|
|
|
if dataj["subtitles_for_hearing_impaired"]:
|
2016-12-05 19:57:57 +01:00
|
|
|
if dataj["subtitles_for_hearing_impaired"].endswith("vtt"):
|
|
|
|
subtype = "wrst"
|
|
|
|
else:
|
|
|
|
subtype = "sami"
|
2018-05-08 22:46:11 +02:00
|
|
|
if self.config.get("get_all_subtitles"):
|
2019-09-06 22:49:49 +02:00
|
|
|
yield subtitle(copy.copy(self.config), subtype, dataj["subtitles_for_hearing_impaired"], "-SDH", output=self.output)
|
2018-01-30 20:11:37 +01:00
|
|
|
else:
|
2019-09-06 22:49:49 +02:00
|
|
|
yield subtitle(copy.copy(self.config), subtype, dataj["subtitles_for_hearing_impaired"], output=self.output)
|
|
|
|
|
|
|
|
if streamj["streams"]["medium"] and streamj["streams"]["medium"][:7] != "[empty]":
|
2014-06-26 21:52:32 +02:00
|
|
|
filename = streamj["streams"]["medium"]
|
2015-09-10 21:38:29 +02:00
|
|
|
if ".f4m" in filename:
|
2019-09-06 22:49:49 +02:00
|
|
|
streams = hdsparse(self.config, self.http.request("get", filename, params={"hdcore": "3.7.0"}), filename, output=self.output)
|
2018-05-08 22:48:55 +02:00
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|
2014-05-01 17:04:08 +02:00
|
|
|
|
2014-06-26 21:52:32 +02:00
|
|
|
if streamj["streams"]["hls"]:
|
2019-09-06 22:49:49 +02:00
|
|
|
streams = hlsparse(self.config, self.http.request("get", streamj["streams"]["hls"]), streamj["streams"]["hls"], output=self.output)
|
2018-05-08 22:48:55 +02:00
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|
2014-01-11 23:02:47 +01:00
|
|
|
|
2018-05-21 22:56:22 +02:00
|
|
|
def find_all_episodes(self, config):
|
2017-09-08 18:59:18 +02:00
|
|
|
seasons = []
|
2018-10-28 23:16:00 +01:00
|
|
|
match = re.search(r"(sasong|sesong)-(\d+)", urlparse(self.url).path)
|
2016-08-15 22:46:32 +02:00
|
|
|
if match:
|
2017-09-08 18:59:18 +02:00
|
|
|
seasons.append(match.group(2))
|
|
|
|
else:
|
|
|
|
match = self._conentpage(self.get_urldata())
|
|
|
|
if match:
|
|
|
|
janson = json.loads(match.group(1))
|
2018-06-03 20:49:45 +02:00
|
|
|
for i in janson["formatPage"]["format"]["seasons"]:
|
2017-02-06 12:37:46 +01:00
|
|
|
seasons.append(i["seasonNumber"])
|
|
|
|
|
2018-05-21 22:56:22 +02:00
|
|
|
episodes = self._grab_episodes(config, seasons)
|
|
|
|
if config.get("all_last") > 0:
|
2019-08-25 00:27:31 +02:00
|
|
|
return episodes[-config.get("all_last") :]
|
2017-04-21 17:09:36 +02:00
|
|
|
return sorted(episodes)
|
2017-09-08 18:59:18 +02:00
|
|
|
|
2018-05-08 22:46:11 +02:00
|
|
|
def _grab_episodes(self, config, seasons):
|
2017-09-08 18:59:18 +02:00
|
|
|
episodes = []
|
|
|
|
baseurl = self.url
|
2018-10-28 23:16:00 +01:00
|
|
|
match = re.search(r"(saeson|sasong|sesong)-\d+", urlparse(self.url).path)
|
2017-09-08 18:59:18 +02:00
|
|
|
if match:
|
2018-10-28 23:16:00 +01:00
|
|
|
if re.search(r"(avsnitt|episode)", urlparse(baseurl).path):
|
2019-08-25 00:27:31 +02:00
|
|
|
baseurl = baseurl[: baseurl.rfind("/")]
|
|
|
|
baseurl = baseurl[: baseurl.rfind("/")]
|
2017-09-08 18:59:18 +02:00
|
|
|
|
|
|
|
for i in seasons:
|
2019-08-25 00:33:51 +02:00
|
|
|
url = "{}/{}-{}".format(baseurl, self._isswe(self.url), i)
|
2017-09-08 18:59:18 +02:00
|
|
|
res = self.http.get(url)
|
|
|
|
if res:
|
|
|
|
match = self._conentpage(res.text)
|
|
|
|
if match:
|
|
|
|
janson = json.loads(match.group(1))
|
2018-05-31 00:16:12 +02:00
|
|
|
if "program" in janson["formatPage"]["format"]["videos"][str(i)]:
|
2019-09-06 22:49:49 +02:00
|
|
|
for n in janson["formatPage"]["format"]["videos"][str(i)]["program"]:
|
|
|
|
episodes = self._videos_to_list(n["sharingUrl"], n["id"], episodes)
|
2018-05-08 22:46:11 +02:00
|
|
|
if config.get("include_clips"):
|
2018-05-31 00:16:12 +02:00
|
|
|
if "clip" in janson["formatPage"]["format"]["videos"][str(i)]:
|
2019-09-06 22:49:49 +02:00
|
|
|
for n in janson["formatPage"]["format"]["videos"][str(i)]["clip"]:
|
|
|
|
episodes = self._videos_to_list(n["sharingUrl"], n["id"], episodes)
|
2017-09-08 18:59:18 +02:00
|
|
|
return episodes
|
|
|
|
|
|
|
|
def _isswe(self, url):
|
2018-10-28 23:16:00 +01:00
|
|
|
if re.search(r".se$", urlparse(url).netloc):
|
2017-09-08 18:59:18 +02:00
|
|
|
return "sasong"
|
2018-10-28 23:16:00 +01:00
|
|
|
elif re.search(r".dk$", urlparse(url).netloc):
|
2018-03-06 19:37:36 +01:00
|
|
|
return "saeson"
|
2017-09-08 18:59:18 +02:00
|
|
|
else:
|
|
|
|
return "sesong"
|
|
|
|
|
|
|
|
def _conentpage(self, data):
|
2018-10-28 23:16:00 +01:00
|
|
|
return re.search(r'=({"sportsPlayer.*}); window.__config', data)
|
2017-09-08 18:59:18 +02:00
|
|
|
|
2018-01-30 20:11:37 +01:00
|
|
|
def _videos_to_list(self, url, vid, episodes):
|
2017-04-21 19:34:43 +02:00
|
|
|
dataj = json.loads(self._get_video_data(vid).text)
|
2018-01-30 20:11:37 +01:00
|
|
|
if "msg" not in dataj:
|
2018-05-08 22:50:17 +02:00
|
|
|
if url not in episodes:
|
2017-04-21 19:34:43 +02:00
|
|
|
episodes.append(url)
|
|
|
|
return episodes
|
2018-01-30 20:11:37 +01:00
|
|
|
|
2017-04-21 19:34:43 +02:00
|
|
|
def _get_video_data(self, vid):
|
2019-08-25 00:33:51 +02:00
|
|
|
url = "http://playapi.mtgx.tv/v3/videos/{}".format(vid)
|
2017-04-21 19:34:43 +02:00
|
|
|
data = self.http.request("get", url)
|
|
|
|
return data
|
2018-01-30 20:11:37 +01:00
|
|
|
|
2015-10-24 21:55:33 +02:00
|
|
|
def _autoname(self, dataj):
|
|
|
|
program = dataj["format_slug"]
|
2017-02-02 14:20:33 +01:00
|
|
|
season = None
|
2015-10-24 21:55:33 +02:00
|
|
|
episode = None
|
2017-02-02 14:20:33 +01:00
|
|
|
title = None
|
2017-02-15 23:15:50 +01:00
|
|
|
|
2017-02-02 14:20:33 +01:00
|
|
|
if "season" in dataj["format_position"]:
|
2019-09-06 22:49:49 +02:00
|
|
|
if dataj["format_position"]["season"] and dataj["format_position"]["season"] > 0:
|
2017-02-02 14:20:33 +01:00
|
|
|
season = dataj["format_position"]["season"]
|
2015-10-24 21:55:33 +02:00
|
|
|
if season:
|
2019-09-06 22:49:49 +02:00
|
|
|
if dataj["format_position"]["episode"] and len(dataj["format_position"]["episode"]) > 0:
|
2015-10-24 21:55:33 +02:00
|
|
|
episode = dataj["format_position"]["episode"]
|
2018-03-10 11:40:36 +01:00
|
|
|
if episode:
|
|
|
|
try:
|
|
|
|
episode = int(episode)
|
2018-08-19 19:03:06 +02:00
|
|
|
except (TypeError, ValueError):
|
2018-05-13 13:06:45 +02:00
|
|
|
title = episode
|
2018-03-10 11:40:36 +01:00
|
|
|
episode = None
|
2018-03-10 19:37:05 +01:00
|
|
|
else:
|
2019-09-06 22:49:49 +02:00
|
|
|
title = dataj["summary"].replace("{} - ".format(dataj["format_title"]), "")
|
2019-09-26 22:49:41 +02:00
|
|
|
if title and title[-1] == ".":
|
2019-08-25 00:27:31 +02:00
|
|
|
title = title[: len(title) - 1] # remove the last dot
|
2017-02-11 10:19:57 +01:00
|
|
|
|
2017-02-02 14:20:33 +01:00
|
|
|
if dataj["type"] == "clip":
|
2018-01-30 20:11:37 +01:00
|
|
|
# Removes the show name from the end of the filename
|
|
|
|
# e.g. Showname.S0X.title instead of Showname.S07.title-showname
|
2019-08-25 00:27:31 +02:00
|
|
|
match = re.search(r"(.+)-", dataj["title"])
|
2017-02-02 14:20:33 +01:00
|
|
|
if match:
|
2018-05-13 13:06:45 +02:00
|
|
|
title = match.group(1)
|
2017-02-15 23:15:50 +01:00
|
|
|
else:
|
2018-05-13 13:06:45 +02:00
|
|
|
title = dataj["title"]
|
2017-02-11 10:19:57 +01:00
|
|
|
if "derived_from_id" in dataj:
|
|
|
|
if dataj["derived_from_id"]:
|
|
|
|
parent_id = dataj["derived_from_id"]
|
2019-09-06 22:49:49 +02:00
|
|
|
parent_episode = self.http.request("get", "http://playapi.mtgx.tv/v3/videos/{}".format(parent_id))
|
2018-01-30 20:11:37 +01:00
|
|
|
if parent_episode.status_code != 403: # if not geoblocked
|
2017-02-11 10:19:57 +01:00
|
|
|
datajparent = json.loads(parent_episode.text)
|
|
|
|
if not season and datajparent["format_position"]["season"] > 0:
|
|
|
|
season = datajparent["format_position"]["season"]
|
|
|
|
if len(datajparent["format_position"]["episode"]) > 0:
|
|
|
|
episode = datajparent["format_position"]["episode"]
|
2017-02-15 23:15:50 +01:00
|
|
|
|
2018-05-13 13:06:45 +02:00
|
|
|
self.output["title"] = program
|
2018-06-03 15:50:34 +02:00
|
|
|
self.output["season"] = season
|
|
|
|
self.output["episode"] = episode
|
2018-05-13 13:06:45 +02:00
|
|
|
self.output["episodename"] = title
|
2017-02-15 23:15:50 +01:00
|
|
|
|
2018-05-13 13:06:45 +02:00
|
|
|
return True
|
2018-05-31 00:16:12 +02:00
|
|
|
|
|
|
|
def _sport(self):
|
|
|
|
content = self._conentpage(self.get_urldata())
|
|
|
|
if not content:
|
2019-08-25 00:33:51 +02:00
|
|
|
yield ServiceError("Can't find video file for: {}".format(self.url))
|
2018-05-31 00:16:12 +02:00
|
|
|
return
|
|
|
|
|
|
|
|
janson = json.loads(content.group(1))
|
|
|
|
if not janson["sportsPlayer"]["currentVideo"]:
|
2019-08-25 00:33:51 +02:00
|
|
|
yield ServiceError("Can't find video file for: {}".format(self.url))
|
2018-05-31 00:16:12 +02:00
|
|
|
return
|
|
|
|
|
|
|
|
self.output["title"] = janson["sportsPlayer"]["currentVideo"]["title"]
|
|
|
|
|
2019-09-06 22:49:49 +02:00
|
|
|
res = self.http.request("get", janson["sportsPlayer"]["currentVideo"]["_links"]["streamLink"]["href"])
|
2018-05-31 00:16:12 +02:00
|
|
|
if res.status_code == 403:
|
|
|
|
yield ServiceError("Can't play this because the video is geoblocked.")
|
|
|
|
return
|
|
|
|
|
|
|
|
for i in res.json()["embedded"]["prioritizedStreams"]:
|
2019-09-06 22:49:49 +02:00
|
|
|
streams = hlsparse(self.config, self.http.request("get", i["links"]["stream"]["href"]), i["links"]["stream"]["href"], output=self.output)
|
2018-05-31 00:16:12 +02:00
|
|
|
if streams:
|
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|