1
0
mirror of https://github.com/spaam/svtplay-dl.git synced 2024-11-24 20:25:41 +01:00
svtplay-dl/lib/svtplay_dl/service/tv4play.py

253 lines
9.7 KiB
Python
Raw Normal View History

2013-03-02 21:26:28 +01:00
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import, unicode_literals
import re
import xml.etree.ElementTree as ET
2014-04-03 19:52:51 +02:00
import json
2014-06-07 20:43:40 +02:00
import copy
2018-01-15 00:37:18 +01:00
from datetime import datetime, timedelta
2018-01-30 22:07:21 +01:00
from urllib.parse import urlparse, parse_qs, quote_plus
from svtplay_dl.service import Service, OpenGraphThumbMixin
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.fetcher.rtmp import RTMP
2014-04-27 13:24:53 +02:00
from svtplay_dl.fetcher.hds import hdsparse
from svtplay_dl.subtitle import subtitle
from svtplay_dl.error import ServiceError
2015-09-15 20:10:32 +02:00
class Tv4play(Service, OpenGraphThumbMixin):
supported_domains = ['tv4play.se', 'tv4.se']
def get(self):
2018-01-15 00:37:18 +01:00
parse = urlparse(self.url)
if parse.path[:8] == "/kanaler":
end_time_stamp = (datetime.utcnow() - timedelta(minutes=1, seconds=20)).replace(microsecond=0)
start_time_stamp = end_time_stamp - timedelta(minutes=1)
2018-01-15 00:37:18 +01:00
2018-01-30 20:11:37 +01:00
url = "https://bbr-l2v.akamaized.net/live/{0}/master.m3u8?in={1}&out={2}?".format(parse.path[9:],
start_time_stamp.isoformat(),
end_time_stamp.isoformat())
2018-05-13 13:06:45 +02:00
self.config.set("live", True)
self.options.hls_time_stamp = True
streams = hlsparse(self.config, self.http.request("get", url), url, output=self.output)
2018-05-08 22:48:55 +02:00
for n in list(streams.keys()):
yield streams[n]
2018-01-15 00:37:18 +01:00
return
data = self.get_urldata()
vid = findvid(self.url, data)
2017-10-07 16:01:56 +02:00
if not vid:
yield ServiceError("Can't find video id for {0}.".format(self.url))
return
url = "http://prima.tv4play.se/api/web/asset/{0}/play".format(vid)
data = self.http.request("get", url, cookies=self.cookies)
2015-08-30 10:20:47 +02:00
if data.status_code == 401:
xml = ET.XML(data.content)
2014-09-07 22:51:44 +02:00
code = xml.find("code").text
if code == "SESSION_NOT_AUTHENTICATED":
yield ServiceError("Can't access premium content")
2014-09-28 19:34:56 +02:00
elif code == "ASSET_PLAYBACK_INVALID_GEO_LOCATION":
yield ServiceError("Can't download this video because of geoblock.")
2014-09-07 22:51:44 +02:00
else:
2017-10-07 15:06:39 +02:00
yield ServiceError("Can't find any info for that video.")
2014-09-07 22:51:44 +02:00
return
2015-08-30 11:27:31 +02:00
if data.status_code == 404:
2017-10-07 15:06:39 +02:00
yield ServiceError("Can't find the video api.")
2015-08-30 11:27:31 +02:00
return
xml = ET.XML(data.content)
ss = xml.find("items")
2018-01-13 20:27:40 +01:00
sa = list(ss.iter("item"))
if xml.find("live").text:
2018-05-13 13:06:45 +02:00
self.config.set("live", (xml.find("live").text != "false"))
if xml.find("drmProtected").text == "true":
2017-10-07 15:06:39 +02:00
yield ServiceError("We can't download DRM protected content from this site.")
2014-11-25 21:46:33 +01:00
return
if xml.find("playbackStatus").text == "NOT_STARTED":
2017-10-07 15:06:39 +02:00
yield ServiceError("Can't download something that is not started.")
return
2018-05-21 22:13:08 +02:00
basename = self._autoname(vid)
if not basename:
yield ServiceError("Cant find vid id for autonaming.")
return
for i in sa:
2013-11-17 13:32:33 +01:00
if i.find("mediaFormat").text == "mp4":
base = urlparse(i.find("base").text)
parse = urlparse(i.find("url").text)
if "rtmp" in base.scheme:
swf = "http://www.tv4play.se/flash/tv4playflashlets.swf"
yield RTMP(copy.copy(self.config), i.find("base").text, i.find("bitrate").text, output=self.output,
other="-W {0} -y {1}".format(swf, i.find("url").text))
2018-01-30 20:11:37 +01:00
elif parse.path[len(parse.path) - 3:len(parse.path)] == "f4m":
2018-05-08 22:46:11 +02:00
streams = hdsparse(self.config, self.http.request("get", i.find("url").text,
params={"hdcore": "3.7.0"}), i.find("url").text, output=self.output)
2018-05-08 22:46:11 +02:00
for n in list(streams.keys()):
yield streams[n]
elif i.find("mediaFormat").text == "webvtt":
2018-05-08 22:46:11 +02:00
yield subtitle(copy.copy(self.config), "wrst", i.find("url").text, output=self.output)
2014-04-03 19:52:51 +02:00
url = "https://prima.tv4play.se/api/web/asset/{0}/play?protocol=hls3".format(vid)
data = self.http.request("get", url, cookies=self.cookies).content
2014-07-13 22:48:34 +02:00
xml = ET.XML(data)
ss = xml.find("items")
2018-01-13 20:27:40 +01:00
sa = list(ss.iter("item"))
2014-07-13 22:48:34 +02:00
for i in sa:
2014-08-18 22:22:03 +02:00
if i.find("mediaFormat").text == "mp4":
parse = urlparse(i.find("url").text)
if parse.path.endswith("m3u8"):
streams = hlsparse(self.config, self.http.request("get", i.find("url").text), i.find("url").text, output=self.output)
2018-05-08 22:48:55 +02:00
for n in list(streams.keys()):
yield streams[n]
2014-07-13 22:48:34 +02:00
def _get_show_info(self):
2017-10-07 15:55:22 +02:00
show = self._get_showname()
2018-05-13 13:06:45 +02:00
live = str(self.config.get("live")).lower()
2018-01-30 23:10:22 +01:00
data = self.http.request("get", "http://webapi.tv4play.se/play/video_assets?type=episode&is_live={0}&"
"platform=web&node_nids={1}&per_page=99999".format(live, show)).text
jsondata = json.loads(data)
return jsondata
def _get_clip_info(self, vid):
2017-10-07 15:55:22 +02:00
show = self._get_showname()
page = 1
assets = page * 1000
run = True
2018-05-13 13:06:45 +02:00
live = str(self.config.get("live")).lower()
while run:
2018-01-30 23:10:22 +01:00
data = self.http.request("get", "http://webapi.tv4play.se/play/video_assets?type=clips&is_live={0}"
"&platform=web&node_nids={1}&per_page=1000&page={2}".format(live, show, page)).text
jsondata = json.loads(data)
for i in jsondata["results"]:
if vid == i["id"]:
return i["title"]
if not run:
return None
total = jsondata["total_hits"]
if assets > total:
run = False
page += 1
assets = page * 1000
return None
2017-10-07 15:55:22 +02:00
def _get_showname(self):
parse = urlparse(self.url)
2017-10-07 15:55:22 +02:00
show = None
if parse.path.count("/") > 2:
match = re.search("^/([^/]+)/", parse.path)
if "program" == match.group(1):
match = re.search("^/program/([^/]+)/", parse.path)
if match:
show = match.group(1)
else:
show = match.group(1)
else:
2018-01-30 20:11:37 +01:00
show = parse.path[parse.path.find("/", 1) + 1:]
2017-10-07 15:55:22 +02:00
if show and not re.search("%", show):
show = quote_plus(show)
return show
def _seasoninfo(self, data):
if "season" in data and data["season"]:
season = "{:02d}".format(data["season"])
if "episode" in data:
episode = "{:02d}".format(data["episode"])
if int(season) == 0 and int(episode) == 0:
2018-05-13 13:06:45 +02:00
return False
self.output["season"] = season
self.output["episode"] = episode
return True
else:
2018-05-13 13:06:45 +02:00
self.output["season"] = season
return True
else:
2018-05-13 13:06:45 +02:00
return False
def _autoname(self, vid):
self.output["id"] = vid
2018-05-13 13:06:45 +02:00
jsondata = self._get_show_info()
for i in jsondata["results"]:
if vid == i["id"]:
season = self._seasoninfo(i)
if season:
index = len(i["program"]["name"])
self.output["title"] = i["title"][:index]
self.output["episodename"] = i["title"][index:]
return True
self.output["title"] = i["title"]
return True
aname = self._get_clip_info(vid)
if aname is not None:
self.output["title"] = aname
return True
2018-05-13 13:06:45 +02:00
aname = self._get_showname()
if aname is not None:
self.output["title"] = aname
return True
2018-05-13 13:06:45 +02:00
return "tv4Stream"
def _getdays(self, data, text):
try:
days = int(data["availability"][text])
except (ValueError, TypeError):
days = 999
return days
2018-05-13 13:06:45 +02:00
def find_all_episodes(self, config):
premium = False
jsondata = self._get_show_info()
2014-04-03 19:52:51 +02:00
episodes = []
n = 1
2014-04-03 19:52:51 +02:00
for i in jsondata["results"]:
if premium:
text = "availability_group_premium"
else:
text = "availability_group_free"
days = self._getdays(i, text)
if premium and days == 0:
days = self._getdays(i, "availability_group_free")
2014-11-25 21:48:08 +01:00
if days > 0:
video_id = i["id"]
2018-05-13 13:06:45 +02:00
url = "http://www.tv4play.se/program/{0}?video_id={1}".format(i["program"]["nid"], video_id)
2014-04-03 19:52:51 +02:00
episodes.append(url)
2018-05-13 13:06:45 +02:00
if n == config.get("all_last"):
break
n += 1
2014-12-21 12:27:16 +01:00
return episodes
2015-09-15 20:10:32 +02:00
def findvid(url, data):
parse = urlparse(url)
if "tv4play.se" in url:
if "video_id" in parse_qs(parse.query):
return parse_qs(parse.query)["video_id"][0]
match = re.search(r'burtVmanId: "(\d+)"', data)
if match:
return match.group(1)
else:
match = re.search(r"\"vid\":\"(\d+)\",", data)
if match:
return match.group(1)
match = re.search(r"-(\d+)$", url)
if match:
return match.group(1)
match = re.search(r"meta content='([^']+)' property='og:video'", data)
if match:
match = re.search(r"vid=(\d+)&", match.group(1))
if match:
return match.group(1)
2017-10-07 15:55:22 +02:00
return None