2013-03-02 21:26:28 +01:00
|
|
|
# ex:ts=4:sw=4:sts=4:et
|
|
|
|
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
2013-03-01 23:39:42 +01:00
|
|
|
from __future__ import absolute_import
|
2013-01-17 00:21:47 +01:00
|
|
|
import re
|
2014-02-05 23:15:19 +01:00
|
|
|
import json
|
2013-01-17 00:21:47 +01:00
|
|
|
|
2013-04-21 12:44:31 +02:00
|
|
|
from svtplay_dl.service import Service
|
2015-08-30 00:06:20 +02:00
|
|
|
from svtplay_dl.utils import decode_html_entities
|
2017-06-05 13:14:22 +02:00
|
|
|
from svtplay_dl.utils.urllib import urlparse
|
2015-09-06 14:19:10 +02:00
|
|
|
from svtplay_dl.error import ServiceError
|
2015-10-04 14:37:16 +02:00
|
|
|
from svtplay_dl.fetcher.hls import hlsparse
|
2013-02-12 19:43:37 +01:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2017-09-01 20:53:04 +02:00
|
|
|
class Aftonbladettv(Service):
|
2017-06-05 13:14:22 +02:00
|
|
|
supported_domains = ['tv.aftonbladet.se', "svd.se"]
|
2013-01-17 00:21:47 +01:00
|
|
|
|
2015-12-26 11:46:14 +01:00
|
|
|
def get(self):
|
2015-08-30 00:06:20 +02:00
|
|
|
data = self.get_urldata()
|
2014-12-22 17:41:40 +01:00
|
|
|
|
2016-05-14 22:54:30 +02:00
|
|
|
if self.exclude():
|
2015-09-06 23:04:48 +02:00
|
|
|
yield ServiceError("Excluding video")
|
2014-12-22 17:41:40 +01:00
|
|
|
return
|
|
|
|
|
2016-10-06 22:46:40 +02:00
|
|
|
apiurl = None
|
|
|
|
match = re.search('data-player-config="([^"]+)"', data)
|
2013-01-17 00:21:47 +01:00
|
|
|
if not match:
|
2017-06-05 13:14:22 +02:00
|
|
|
match = re.search('data-svpPlayer-video="([^"]+)"', data)
|
|
|
|
if not match:
|
|
|
|
yield ServiceError("Can't find video info")
|
|
|
|
return
|
|
|
|
data = json.loads(decode_html_entities(match.group(1)))
|
|
|
|
if urlparse(self.url).netloc == "tv.aftonbadet.se":
|
|
|
|
videoId = data["playerOptions"]["id"]
|
|
|
|
apiurl = data["playerOptions"]["api"]
|
|
|
|
vendor = data["playerOptions"]["vendor"]
|
|
|
|
self.options.live = data["live"]
|
|
|
|
if not self.options.live:
|
|
|
|
dataurl = "{0}{1}/assets/{2}?appName=svp-player".format(apiurl, vendor, videoId)
|
|
|
|
data = self.http.request("get", dataurl).text
|
|
|
|
data = json.loads(data)
|
|
|
|
|
|
|
|
streams = hlsparse(self.options, self.http.request("get", data["streamUrls"]["hls"]), data["streamUrls"]["hls"])
|
|
|
|
if streams:
|
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|
2017-09-01 20:53:04 +02:00
|
|
|
|
|
|
|
class Aftonbladet(Service):
|
|
|
|
supported_domains = ["aftonbladet.se"]
|
|
|
|
|
|
|
|
|
|
|
|
def get(self):
|
|
|
|
data = self.get_urldata()
|
|
|
|
|
|
|
|
if self.exclude():
|
|
|
|
yield ServiceError("Excluding video")
|
|
|
|
return
|
|
|
|
|
|
|
|
match = re.search('window.FLUX_STATE = ({.*})</script>', data)
|
|
|
|
if not match:
|
|
|
|
yield ServiceError("Can't find video info")
|
|
|
|
return
|
|
|
|
|
|
|
|
janson = json.loads(match.group(1))
|
|
|
|
articleid = janson["article"]["currentArticleId"]
|
|
|
|
components = janson["articles"][articleid]["article"]["components"]
|
|
|
|
for i in components:
|
|
|
|
if "components" in i:
|
|
|
|
for n in i["components"]:
|
|
|
|
if "type" in n and n["type"] == "video":
|
2017-09-02 10:44:15 +02:00
|
|
|
streams = hlsparse(self.options, self.http.request("get", n["videoAsset"]["streamUrls"]["hls"]),
|
2017-09-01 20:53:04 +02:00
|
|
|
n["videoAsset"]["streamUrls"]["hls"])
|
2017-09-02 10:44:15 +02:00
|
|
|
if streams:
|
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|