1
0
mirror of https://github.com/spaam/svtplay-dl.git synced 2024-11-30 23:24:16 +01:00
svtplay-dl/lib/svtplay_dl/service/viaplay.py

158 lines
6.0 KiB
Python
Raw Normal View History

2013-03-02 21:26:28 +01:00
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
# pylint has issues with urlparse: "some types could not be inferred"
# pylint: disable=E1103
from __future__ import absolute_import
import re
import json
2014-06-07 20:43:40 +02:00
import copy
2015-10-24 21:55:33 +02:00
import os
2015-10-24 21:55:33 +02:00
from svtplay_dl.utils import filenamify
2014-12-08 23:07:02 +01:00
from svtplay_dl.utils.urllib import urlparse
2014-01-26 01:54:20 +01:00
from svtplay_dl.service import Service, OpenGraphThumbMixin
from svtplay_dl.log import log
from svtplay_dl.fetcher.rtmp import RTMP
2014-05-01 16:52:05 +02:00
from svtplay_dl.fetcher.hds import hdsparse
2015-10-24 21:55:33 +02:00
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.subtitle import subtitle
from svtplay_dl.error import ServiceError
2015-09-15 20:10:32 +02:00
2014-01-26 01:54:20 +01:00
class Viaplay(Service, OpenGraphThumbMixin):
supported_domains = [
'tv3play.se', 'tv6play.se', 'tv8play.se', 'tv10play.se',
'tv3play.no', 'tv3play.dk', 'tv6play.no', 'viasat4play.no',
'tv3play.ee', 'tv3play.lv', 'tv3play.lt', 'tvplay.lv', 'viagame.com',
'juicyplay.se']
def _get_video_id(self):
"""
Extract video id. It will try to avoid making an HTTP request
if it can find the ID in the URL, but otherwise it will try
to scrape it from the HTML document. Returns None in case it's
unable to extract the ID at all.
"""
2015-08-31 17:14:08 +02:00
html_data = self.get_urldata()
match = re.search(r'data-video-id="([0-9]+)"', html_data)
if match:
return match.group(1)
match = re.search(r'data-videoid="([0-9]+)', html_data)
if match:
return match.group(1)
parse = urlparse(self.url)
match = re.search(r'/\w+/(\d+)', parse.path)
if match:
return match.group(1)
2016-02-22 20:43:57 +01:00
match = re.search(r'iframe src="http://play.juicyplay.se[^\"]+id=(\d+)', html_data)
if match:
return match.group(1)
return None
def get(self):
2014-12-08 23:07:02 +01:00
vid = self._get_video_id()
if vid is None:
yield ServiceError("Can't find video file for: %s" % self.url)
2014-10-06 23:21:43 +02:00
return
url = "http://playapi.mtgx.tv/v3/videos/%s" % vid
self.options.other = ""
data = self.http.request("get", url)
if data.status_code == 403:
yield ServiceError("Can't play this because the video is geoblocked.")
2014-11-01 21:10:06 +01:00
return
2015-08-31 23:18:37 +02:00
dataj = json.loads(data.text)
if "msg" in dataj:
yield ServiceError(dataj["msg"])
return
if dataj["type"] == "live":
self.options.live = True
if self.exclude(self.options):
2015-09-06 23:04:48 +02:00
yield ServiceError("Excluding video")
return
streams = self.http.request("get", "http://playapi.mtgx.tv/v3/videos/stream/%s" % vid)
if streams.status_code == 403:
yield ServiceError("Can't play this because the video is geoblocked.")
2014-11-01 21:10:06 +01:00
return
2015-08-31 23:18:37 +02:00
streamj = json.loads(streams.text)
if "msg" in streamj:
yield ServiceError("Can't play this because the video is either not found or geoblocked.")
return
if self.options.output_auto:
directory = os.path.dirname(self.options.output)
self.options.service = "tv3play"
2015-10-24 21:55:33 +02:00
basename = self._autoname(dataj)
title = "%s-%s-%s" % (basename, vid, self.options.service)
2015-10-24 21:55:33 +02:00
if len(directory):
self.options.output = os.path.join(directory, title)
2015-10-24 21:55:33 +02:00
else:
self.options.output = title
2015-10-24 21:55:33 +02:00
if dataj["sami_path"]:
yield subtitle(copy.copy(self.options), "sami", dataj["sami_path"])
if dataj["subtitles_for_hearing_impaired"]:
yield subtitle(copy.copy(self.options), "sami", dataj["subtitles_for_hearing_impaired"])
if streamj["streams"]["medium"]:
filename = streamj["streams"]["medium"]
if ".f4m" in filename:
streams = hdsparse(self.options, self.http.request("get", filename, params={"hdcore": "3.7.0"}), filename)
if streams:
for n in list(streams.keys()):
yield streams[n]
else:
parse = urlparse(filename)
match = re.search("^(/[^/]+)/(.*)", parse.path)
if not match:
yield ServiceError("Can't get rtmpparse info")
2014-11-25 21:46:33 +01:00
return
filename = "%s://%s:%s%s" % (parse.scheme, parse.hostname, parse.port, match.group(1))
path = "-y %s" % match.group(2)
self.options.other = "-W http://flvplayer.viastream.viasat.tv/flvplayer/play/swf/player.swf %s" % path
yield RTMP(copy.copy(self.options), filename, 800)
if streamj["streams"]["hls"]:
streams = hlsparse(self.options, self.http.request("get", streamj["streams"]["hls"]), streamj["streams"]["hls"])
if streams:
for n in list(streams.keys()):
yield streams[n]
def find_all_episodes(self, options):
2015-08-31 17:14:08 +02:00
format_id = re.search(r'data-format-id="(\d+)"', self.get_urldata())
if not format_id:
2014-11-25 21:46:33 +01:00
log.error("Can't find video info for all episodes")
return
2015-09-01 23:52:00 +02:00
data = self.http.request("get", "http://playapi.mtgx.tv/v1/sections?sections=videos.one,seasons.videolist&format=%s" % format_id.group(1)).text
jsondata = json.loads(data)
videos = jsondata["_embedded"]["sections"][1]["_embedded"]["seasons"][0]["_embedded"]["episodelist"]["_embedded"]["videos"]
2014-12-26 02:04:29 +01:00
n = 0
episodes = []
for i in videos:
if n == options.all_last:
break
episodes.append(i["sharing"]["url"])
n += 1
return episodes
2015-10-24 21:55:33 +02:00
def _autoname(self, dataj):
program = dataj["format_slug"]
season = dataj["format_position"]["season"]
episode = None
if season:
if len(dataj["format_position"]["episode"]) > 0:
episode = dataj["format_position"]["episode"]
name = filenamify(program)
if season:
name = "%s.s%s" % (name, season)
if episode:
name = "%se%s" % (name, episode)
return name