2013-03-02 21:26:28 +01:00
|
|
|
# ex:ts=4:sw=4:sts=4:et
|
|
|
|
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
2013-03-01 23:39:42 +01:00
|
|
|
from __future__ import absolute_import
|
2013-02-12 19:43:37 +01:00
|
|
|
import re
|
2014-08-27 22:58:37 +02:00
|
|
|
import os
|
2013-02-12 19:43:37 +01:00
|
|
|
import xml.etree.ElementTree as ET
|
2014-04-03 19:52:51 +02:00
|
|
|
import json
|
2014-06-07 20:43:40 +02:00
|
|
|
import copy
|
2013-02-12 19:43:37 +01:00
|
|
|
|
2015-08-31 21:54:10 +02:00
|
|
|
from svtplay_dl.utils.urllib import urlparse, parse_qs, quote_plus
|
2014-01-19 14:26:48 +01:00
|
|
|
from svtplay_dl.service import Service, OpenGraphThumbMixin
|
2016-01-27 19:28:37 +01:00
|
|
|
from svtplay_dl.utils import is_py2_old, is_py2, filenamify
|
2013-03-17 19:55:19 +01:00
|
|
|
from svtplay_dl.log import log
|
2015-10-04 14:37:16 +02:00
|
|
|
from svtplay_dl.fetcher.hls import hlsparse
|
2014-04-21 18:24:01 +02:00
|
|
|
from svtplay_dl.fetcher.rtmp import RTMP
|
2014-04-27 13:24:53 +02:00
|
|
|
from svtplay_dl.fetcher.hds import hdsparse
|
2014-08-31 01:20:36 +02:00
|
|
|
from svtplay_dl.subtitle import subtitle
|
2015-09-06 14:19:10 +02:00
|
|
|
from svtplay_dl.error import ServiceError
|
2013-02-12 19:43:37 +01:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-01-19 14:26:48 +01:00
|
|
|
class Tv4play(Service, OpenGraphThumbMixin):
|
2014-01-01 14:57:17 +01:00
|
|
|
supported_domains = ['tv4play.se', 'tv4.se']
|
2013-01-17 00:21:47 +01:00
|
|
|
|
2015-12-26 11:46:14 +01:00
|
|
|
def get(self):
|
2015-08-30 00:06:20 +02:00
|
|
|
data = self.get_urldata()
|
2015-01-20 13:10:35 +01:00
|
|
|
|
|
|
|
vid = findvid(self.url, data)
|
|
|
|
if vid is None:
|
2015-09-06 14:19:10 +02:00
|
|
|
yield ServiceError("Can't find video id for %s" % self.url)
|
2015-01-20 13:10:35 +01:00
|
|
|
return
|
2013-01-17 00:21:47 +01:00
|
|
|
|
2017-04-21 18:18:29 +02:00
|
|
|
# if self.options.username and self.options.password:
|
|
|
|
# work = self._login(self.options.username, self.options.password)
|
|
|
|
# if isinstance(work, Exception):
|
|
|
|
# yield work
|
|
|
|
# return
|
2015-10-05 19:43:57 +02:00
|
|
|
|
2016-04-19 21:58:11 +02:00
|
|
|
url = "http://prima.tv4play.se/api/web/asset/%s/play" % vid
|
2015-08-31 19:45:15 +02:00
|
|
|
data = self.http.request("get", url, cookies=self.cookies)
|
2015-08-30 10:20:47 +02:00
|
|
|
if data.status_code == 401:
|
2015-08-30 00:06:20 +02:00
|
|
|
xml = ET.XML(data.content)
|
2014-09-07 22:51:44 +02:00
|
|
|
code = xml.find("code").text
|
|
|
|
if code == "SESSION_NOT_AUTHENTICATED":
|
2015-09-06 14:19:10 +02:00
|
|
|
yield ServiceError("Can't access premium content")
|
2014-09-28 19:34:56 +02:00
|
|
|
elif code == "ASSET_PLAYBACK_INVALID_GEO_LOCATION":
|
2015-11-23 00:01:53 +01:00
|
|
|
yield ServiceError("Can't download this video because of geoblock.")
|
2014-09-07 22:51:44 +02:00
|
|
|
else:
|
2015-09-06 14:19:10 +02:00
|
|
|
yield ServiceError("Can't find any info for that video")
|
2014-09-07 22:51:44 +02:00
|
|
|
return
|
2015-08-30 11:27:31 +02:00
|
|
|
if data.status_code == 404:
|
2015-09-06 14:19:10 +02:00
|
|
|
yield ServiceError("Can't find the video api")
|
2015-08-30 11:27:31 +02:00
|
|
|
return
|
2015-08-30 00:06:20 +02:00
|
|
|
xml = ET.XML(data.content)
|
2013-01-17 00:21:47 +01:00
|
|
|
ss = xml.find("items")
|
2013-12-30 01:35:08 +01:00
|
|
|
if is_py2_old:
|
2013-01-17 00:21:47 +01:00
|
|
|
sa = list(ss.getiterator("item"))
|
|
|
|
else:
|
|
|
|
sa = list(ss.iter("item"))
|
|
|
|
|
|
|
|
if xml.find("live").text:
|
|
|
|
if xml.find("live").text != "false":
|
2015-12-26 11:46:14 +01:00
|
|
|
self.options.live = True
|
2014-06-03 16:25:31 +02:00
|
|
|
if xml.find("drmProtected").text == "true":
|
2015-09-06 14:19:10 +02:00
|
|
|
yield ServiceError("We cant download DRM protected content from this site.")
|
2014-11-25 21:46:33 +01:00
|
|
|
return
|
2016-03-27 13:02:49 +02:00
|
|
|
if xml.find("playbackStatus").text == "NOT_STARTED":
|
|
|
|
yield ServiceError("Can't download something that is not started")
|
|
|
|
return
|
2014-08-27 22:58:37 +02:00
|
|
|
|
2015-12-26 11:46:14 +01:00
|
|
|
if self.options.output_auto:
|
|
|
|
directory = os.path.dirname(self.options.output)
|
|
|
|
self.options.service = "tv4play"
|
2015-10-07 19:52:37 +02:00
|
|
|
basename = self._autoname(vid)
|
|
|
|
if basename is None:
|
|
|
|
yield ServiceError("Cant find vid id for autonaming")
|
|
|
|
return
|
2015-12-26 11:46:14 +01:00
|
|
|
title = "%s-%s-%s" % (basename, vid, self.options.service)
|
2014-08-27 22:58:37 +02:00
|
|
|
title = filenamify(title)
|
|
|
|
if len(directory):
|
2015-12-26 11:46:14 +01:00
|
|
|
self.options.output = os.path.join(directory, title)
|
2014-08-27 22:58:37 +02:00
|
|
|
else:
|
2015-12-26 11:46:14 +01:00
|
|
|
self.options.output = title
|
2014-08-27 22:58:37 +02:00
|
|
|
|
2016-05-14 22:54:30 +02:00
|
|
|
if self.exclude():
|
2015-09-06 23:04:48 +02:00
|
|
|
yield ServiceError("Excluding video")
|
2014-12-22 17:41:40 +01:00
|
|
|
return
|
|
|
|
|
2013-01-17 00:21:47 +01:00
|
|
|
for i in sa:
|
2013-11-17 13:32:33 +01:00
|
|
|
if i.find("mediaFormat").text == "mp4":
|
2014-06-23 21:22:26 +02:00
|
|
|
base = urlparse(i.find("base").text)
|
|
|
|
parse = urlparse(i.find("url").text)
|
2014-12-20 23:00:06 +01:00
|
|
|
if "rtmp" in base.scheme:
|
2014-04-21 18:24:01 +02:00
|
|
|
swf = "http://www.tv4play.se/flash/tv4playflashlets.swf"
|
2015-12-26 11:46:14 +01:00
|
|
|
self.options.other = "-W %s -y %s" % (swf, i.find("url").text)
|
|
|
|
yield RTMP(copy.copy(self.options), i.find("base").text, i.find("bitrate").text)
|
2014-06-23 21:22:26 +02:00
|
|
|
elif parse.path[len(parse.path)-3:len(parse.path)] == "f4m":
|
2015-12-26 11:46:14 +01:00
|
|
|
streams = hdsparse(self.options, self.http.request("get", i.find("url").text, params={"hdcore": "3.7.0"}), i.find("url").text)
|
2014-10-12 23:31:02 +02:00
|
|
|
if streams:
|
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|
2017-09-17 11:20:31 +02:00
|
|
|
elif i.find("mediaFormat").text == "webvtt":
|
|
|
|
yield subtitle(copy.copy(self.options), "wrst", i.find("url").text)
|
2014-04-03 19:52:51 +02:00
|
|
|
|
2016-11-10 08:31:56 +01:00
|
|
|
url = "https://prima.tv4play.se/api/web/asset/%s/play?protocol=hls3" % vid
|
2015-08-31 19:45:15 +02:00
|
|
|
data = self.http.request("get", url, cookies=self.cookies).content
|
2014-07-13 22:48:34 +02:00
|
|
|
xml = ET.XML(data)
|
|
|
|
ss = xml.find("items")
|
|
|
|
if is_py2_old:
|
|
|
|
sa = list(ss.getiterator("item"))
|
|
|
|
else:
|
|
|
|
sa = list(ss.iter("item"))
|
|
|
|
for i in sa:
|
2014-08-18 22:22:03 +02:00
|
|
|
if i.find("mediaFormat").text == "mp4":
|
|
|
|
parse = urlparse(i.find("url").text)
|
|
|
|
if parse.path.endswith("m3u8"):
|
2015-12-26 11:46:14 +01:00
|
|
|
streams = hlsparse(self.options, self.http.request("get", i.find("url").text), i.find("url").text)
|
2016-11-06 12:43:29 +01:00
|
|
|
if streams:
|
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|
2014-07-13 22:48:34 +02:00
|
|
|
|
2015-10-07 19:52:37 +02:00
|
|
|
def _get_show_info(self):
|
2016-01-03 15:10:24 +01:00
|
|
|
show = self._get_showname(self.url)
|
2016-12-05 22:09:20 +01:00
|
|
|
if self.options.live:
|
|
|
|
live = "true"
|
|
|
|
else:
|
|
|
|
live = "false"
|
|
|
|
data = self.http.request("get", "http://webapi.tv4play.se/play/video_assets?type=episode&is_live=%s&platform=web&node_nids=%s&per_page=99999" % (live, show)).text
|
2015-10-07 19:52:37 +02:00
|
|
|
jsondata = json.loads(data)
|
|
|
|
return jsondata
|
|
|
|
|
2015-12-15 00:30:40 +01:00
|
|
|
def _get_clip_info(self, vid):
|
2016-01-03 15:10:24 +01:00
|
|
|
show = self._get_showname(self.url)
|
2015-12-15 00:30:40 +01:00
|
|
|
page = 1
|
|
|
|
assets = page * 1000
|
|
|
|
run = True
|
2016-12-05 22:09:20 +01:00
|
|
|
if self.options.live:
|
|
|
|
live = "true"
|
|
|
|
else:
|
|
|
|
live = "false"
|
2015-12-15 00:30:40 +01:00
|
|
|
while run:
|
2016-12-05 22:09:20 +01:00
|
|
|
data = self.http.request("get", "http://webapi.tv4play.se/play/video_assets?type=clips&is_live=%s&platform=web&node_nids=%s&per_page=1000&page=%s" % (live, show, page)).text
|
2015-12-15 00:30:40 +01:00
|
|
|
jsondata = json.loads(data)
|
|
|
|
for i in jsondata["results"]:
|
|
|
|
if vid == i["id"]:
|
|
|
|
return i["title"]
|
|
|
|
if not run:
|
|
|
|
return None
|
|
|
|
total = jsondata["total_hits"]
|
|
|
|
if assets > total:
|
|
|
|
run = False
|
|
|
|
page += 1
|
|
|
|
assets = page * 1000
|
|
|
|
return None
|
|
|
|
|
2016-01-03 15:10:24 +01:00
|
|
|
def _get_showname(self, url):
|
|
|
|
parse = urlparse(self.url)
|
|
|
|
if parse.path.count("/") > 2:
|
|
|
|
match = re.search("^/([^/]+)/", parse.path)
|
2017-09-12 12:33:49 +02:00
|
|
|
if "program" == match.group(1):
|
|
|
|
match = re.search("^/program/([^/]+)/", parse.path)
|
|
|
|
if match:
|
|
|
|
show = match.group(1)
|
|
|
|
else:
|
|
|
|
show = match.group(1)
|
2016-01-03 15:10:24 +01:00
|
|
|
else:
|
|
|
|
show = parse.path[parse.path.find("/", 1)+1:]
|
|
|
|
if not re.search("%", show):
|
2016-04-30 13:54:26 +02:00
|
|
|
if is_py2 and isinstance(show, unicode):
|
|
|
|
show = show.encode("utf-8")
|
2016-01-03 15:10:24 +01:00
|
|
|
show = quote_plus(show)
|
|
|
|
return show
|
|
|
|
|
2017-02-19 11:50:33 +01:00
|
|
|
def _seasoninfo(self, data):
|
|
|
|
if "season" in data and data["season"]:
|
|
|
|
season = "{:02d}".format(data["season"])
|
2017-05-01 22:04:57 +02:00
|
|
|
if "episode" in data:
|
|
|
|
episode = "{:02d}".format(data["episode"])
|
|
|
|
if int(season) == 0 and int(episode) == 0:
|
|
|
|
return None
|
|
|
|
return "s%se%s" % (season, episode)
|
|
|
|
else:
|
|
|
|
return "s%s" % season
|
2017-02-19 11:50:33 +01:00
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
2015-10-07 19:52:37 +02:00
|
|
|
def _autoname(self, vid):
|
|
|
|
jsondata = self._get_show_info()
|
|
|
|
for i in jsondata["results"]:
|
|
|
|
if vid == i["id"]:
|
2017-02-19 11:50:33 +01:00
|
|
|
season = self._seasoninfo(i)
|
|
|
|
if season:
|
|
|
|
index = len(i["program"]["name"])
|
|
|
|
return i["title"][:index] + ".%s%s" % (season, i["title"][index:])
|
2015-10-07 19:52:37 +02:00
|
|
|
return i["title"]
|
2015-12-15 00:30:40 +01:00
|
|
|
return self._get_clip_info(vid)
|
2015-10-07 19:52:37 +02:00
|
|
|
|
2016-04-30 14:10:43 +02:00
|
|
|
def _getdays(self, data, text):
|
|
|
|
try:
|
|
|
|
days = int(data["availability"][text])
|
|
|
|
except (ValueError, TypeError):
|
|
|
|
days = 999
|
|
|
|
return days
|
|
|
|
|
2014-04-03 19:52:51 +02:00
|
|
|
def find_all_episodes(self, options):
|
2015-10-05 19:43:57 +02:00
|
|
|
premium = False
|
2017-04-21 18:18:29 +02:00
|
|
|
# if options.username and options.password:
|
|
|
|
# premium = self._login(options.username, options.password)
|
|
|
|
# if isinstance(premium, Exception):
|
|
|
|
# log.error(premium.message)
|
|
|
|
# return None
|
2015-10-05 19:43:57 +02:00
|
|
|
|
2015-10-07 19:52:37 +02:00
|
|
|
jsondata = self._get_show_info()
|
|
|
|
|
2014-04-03 19:52:51 +02:00
|
|
|
episodes = []
|
2014-12-21 13:01:28 +01:00
|
|
|
n = 1
|
2014-04-03 19:52:51 +02:00
|
|
|
for i in jsondata["results"]:
|
2015-10-05 19:43:57 +02:00
|
|
|
if premium:
|
|
|
|
text = "availability_group_premium"
|
|
|
|
else:
|
|
|
|
text = "availability_group_free"
|
|
|
|
|
2016-04-30 14:10:43 +02:00
|
|
|
days = self._getdays(i, text)
|
|
|
|
if premium and days == 0:
|
|
|
|
days = self._getdays(i, "availability_group_free")
|
|
|
|
|
2014-11-25 21:48:08 +01:00
|
|
|
if days > 0:
|
2014-07-22 10:13:49 +02:00
|
|
|
video_id = i["id"]
|
|
|
|
url = "http://www.tv4play.se/program/%s?video_id=%s" % (
|
2015-10-19 10:37:03 +02:00
|
|
|
i["program"]["nid"], video_id)
|
2014-04-03 19:52:51 +02:00
|
|
|
episodes.append(url)
|
2014-12-21 13:01:28 +01:00
|
|
|
if n == options.all_last:
|
|
|
|
break
|
|
|
|
n += 1
|
|
|
|
|
2014-12-21 12:27:16 +01:00
|
|
|
return episodes
|
2015-01-20 13:10:35 +01:00
|
|
|
|
2017-04-21 18:18:29 +02:00
|
|
|
# def _login(self, username, password):
|
|
|
|
# data = self.http.request("get", "https://www.tv4play.se/session/new?https=")
|
|
|
|
# url = "https://account.services.tv4play.se/session/authenticate"
|
|
|
|
# postdata = {"username" : username, "password": password, "https": "", "client": "web"}
|
2016-08-20 12:46:54 +02:00
|
|
|
|
2017-04-21 18:18:29 +02:00
|
|
|
# data = self.http.request("post", url, data=postdata, cookies=self.cookies)
|
|
|
|
# try:
|
|
|
|
# res = data.json()
|
|
|
|
# except ValueError:
|
|
|
|
# return ServiceError("Cant decode output from tv4play")
|
|
|
|
# if "errors" in res:
|
|
|
|
# message = res["errors"][0]
|
|
|
|
# if is_py2:
|
|
|
|
# message = message.encode("utf8")
|
|
|
|
# return ServiceError(message)
|
|
|
|
# self.cookies={"JSESSIONID": res["vimond_session_token"]}
|
2016-04-19 21:06:51 +02:00
|
|
|
|
2017-04-21 18:18:29 +02:00
|
|
|
# return True
|
2015-10-05 19:43:57 +02:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2015-01-20 13:10:35 +01:00
|
|
|
def findvid(url, data):
|
|
|
|
parse = urlparse(url)
|
|
|
|
if "tv4play.se" in url:
|
2017-09-12 12:25:16 +02:00
|
|
|
if "video_id" in parse_qs(parse.query):
|
|
|
|
return parse_qs(parse.query)["video_id"][0]
|
|
|
|
match = re.search(r'burtVmanId: "(\d+)"', data)
|
|
|
|
if match:
|
|
|
|
return match.group(1)
|
2015-01-20 13:10:35 +01:00
|
|
|
else:
|
|
|
|
match = re.search(r"\"vid\":\"(\d+)\",", data)
|
|
|
|
if match:
|
2017-09-12 12:25:16 +02:00
|
|
|
return match.group(1)
|
|
|
|
match = re.search(r"-(\d+)$", url)
|
|
|
|
if match:
|
|
|
|
return match.group(1)
|
|
|
|
match = re.search(r"meta content='([^']+)' property='og:video'", data)
|
|
|
|
if match:
|
|
|
|
match = re.search(r"vid=(\d+)&", match.group(1))
|
2015-01-20 13:10:35 +01:00
|
|
|
if match:
|
2017-09-12 12:25:16 +02:00
|
|
|
return match.group(1)
|
|
|
|
return None
|