2014-05-01 19:51:21 +02:00
|
|
|
# ex:ts=4:sw=4:sts=4:et
|
|
|
|
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
|
|
|
from __future__ import absolute_import
|
|
|
|
import re
|
2016-11-06 15:25:49 +01:00
|
|
|
import copy
|
|
|
|
import os
|
|
|
|
import hashlib
|
|
|
|
from svtplay_dl.service import Service, OpenGraphThumbMixin
|
|
|
|
from svtplay_dl.error import ServiceError
|
2014-05-01 19:51:21 +02:00
|
|
|
from svtplay_dl.log import log
|
2016-11-06 15:25:49 +01:00
|
|
|
from svtplay_dl.fetcher.hds import hdsparse
|
|
|
|
from svtplay_dl.fetcher.hls import hlsparse
|
|
|
|
from svtplay_dl.fetcher.dash import dashparse
|
2018-01-13 20:27:40 +01:00
|
|
|
from svtplay_dl.utils import ensure_unicode, filenamify, decode_html_entities
|
2016-11-06 15:25:49 +01:00
|
|
|
from svtplay_dl.subtitle import subtitle
|
|
|
|
from svtplay_dl.utils.urllib import urlparse, parse_qs
|
2014-05-01 19:51:21 +02:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2016-11-06 15:25:49 +01:00
|
|
|
class OppetArkiv(Service, OpenGraphThumbMixin):
|
2014-05-01 19:51:21 +02:00
|
|
|
supported_domains = ['oppetarkiv.se']
|
|
|
|
|
2016-11-06 15:25:49 +01:00
|
|
|
def get(self):
|
|
|
|
vid = self.find_video_id()
|
|
|
|
if vid is None:
|
|
|
|
yield ServiceError("Cant find video id for this video")
|
|
|
|
return
|
|
|
|
|
2017-10-09 22:35:13 +02:00
|
|
|
url = "http://api.svt.se/videoplayer-api/video/{0}".format(vid)
|
2016-11-06 15:25:49 +01:00
|
|
|
data = self.http.request("get", url)
|
|
|
|
if data.status_code == 404:
|
2017-10-09 22:35:13 +02:00
|
|
|
yield ServiceError("Can't get the json file for {0}".format(url))
|
2016-11-06 15:25:49 +01:00
|
|
|
return
|
|
|
|
|
|
|
|
data = data.json()
|
|
|
|
if "live" in data:
|
|
|
|
self.options.live = data["live"]
|
|
|
|
|
|
|
|
if self.options.output_auto:
|
|
|
|
self.options.service = "svtplay"
|
|
|
|
self.options.output = self.outputfilename(data, self.options.output, ensure_unicode(self.get_urldata()))
|
|
|
|
|
|
|
|
if self.exclude():
|
|
|
|
yield ServiceError("Excluding video")
|
|
|
|
return
|
|
|
|
if "subtitleReferences" in data:
|
|
|
|
for i in data["subtitleReferences"]:
|
|
|
|
if i["format"] == "websrt":
|
|
|
|
yield subtitle(copy.copy(self.options), "wrst", i["url"])
|
|
|
|
|
|
|
|
if len(data["videoReferences"]) == 0:
|
|
|
|
yield ServiceError("Media doesn't have any associated videos (yet?)")
|
|
|
|
return
|
|
|
|
|
|
|
|
for i in data["videoReferences"]:
|
|
|
|
parse = urlparse(i["url"])
|
|
|
|
query = parse_qs(parse.query)
|
|
|
|
if i["format"] == "hls" or i["format"] == "ios":
|
|
|
|
streams = hlsparse(self.options, self.http.request("get", i["url"]), i["url"])
|
|
|
|
if streams:
|
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|
|
|
|
if "alt" in query and len(query["alt"]) > 0:
|
|
|
|
alt = self.http.get(query["alt"][0])
|
|
|
|
if alt:
|
|
|
|
streams = hlsparse(self.options, self.http.request("get", alt.request.url), alt.request.url)
|
|
|
|
if streams:
|
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|
|
|
|
if i["format"] == "hds" or i["format"] == "flash":
|
|
|
|
match = re.search(r"\/se\/secure\/", i["url"])
|
|
|
|
if not match:
|
|
|
|
streams = hdsparse(self.options, self.http.request("get", i["url"], params={"hdcore": "3.7.0"}),
|
|
|
|
i["url"])
|
|
|
|
if streams:
|
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|
|
|
|
if "alt" in query and len(query["alt"]) > 0:
|
|
|
|
alt = self.http.get(query["alt"][0])
|
|
|
|
if alt:
|
|
|
|
streams = hdsparse(self.options,
|
|
|
|
self.http.request("get", alt.request.url, params={"hdcore": "3.7.0"}),
|
|
|
|
alt.request.url)
|
|
|
|
if streams:
|
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|
|
|
|
if i["format"] == "dash264" or i["format"] == "dashhbbtv":
|
|
|
|
streams = dashparse(self.options, self.http.request("get", i["url"]), i["url"])
|
|
|
|
if streams:
|
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|
|
|
|
|
|
|
|
if "alt" in query and len(query["alt"]) > 0:
|
|
|
|
alt = self.http.get(query["alt"][0])
|
|
|
|
if alt:
|
|
|
|
streams = dashparse(self.options, self.http.request("get", alt.request.url), alt.request.url)
|
|
|
|
if streams:
|
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|
|
|
|
|
|
|
|
def find_video_id(self):
|
|
|
|
match = re.search('data-video-id="([^"]+)"', self.get_urldata())
|
|
|
|
if match:
|
|
|
|
return match.group(1)
|
|
|
|
return None
|
|
|
|
|
2016-01-07 17:18:38 +01:00
|
|
|
def find_all_episodes(self, options):
|
2014-05-01 19:51:21 +02:00
|
|
|
page = 1
|
2015-08-31 17:14:31 +02:00
|
|
|
data = self.get_urldata()
|
2015-06-28 16:45:43 +02:00
|
|
|
match = re.search(r'"/etikett/titel/([^"/]+)', data)
|
2014-05-01 19:51:21 +02:00
|
|
|
if match is None:
|
2015-06-28 16:45:43 +02:00
|
|
|
match = re.search(r'"http://www.oppetarkiv.se/etikett/titel/([^/]+)/', self.url)
|
2014-05-01 19:51:21 +02:00
|
|
|
if match is None:
|
|
|
|
log.error("Couldn't find title")
|
2014-10-06 23:21:43 +02:00
|
|
|
return
|
2014-05-01 19:51:21 +02:00
|
|
|
program = match.group(1)
|
|
|
|
episodes = []
|
2014-12-21 13:10:26 +01:00
|
|
|
|
|
|
|
n = 0
|
2015-12-26 11:46:14 +01:00
|
|
|
if self.options.all_last > 0:
|
2014-12-21 13:10:26 +01:00
|
|
|
sort = "tid_fallande"
|
|
|
|
else:
|
|
|
|
sort = "tid_stigande"
|
|
|
|
|
2015-09-30 00:47:49 +02:00
|
|
|
while True:
|
2017-10-09 22:35:13 +02:00
|
|
|
url = "http://www.oppetarkiv.se/etikett/titel/{0}/?sida={1}&sort={2}&embed=true".format(program, page, sort)
|
2015-09-30 00:47:49 +02:00
|
|
|
data = self.http.request("get", url)
|
|
|
|
if data.status_code == 404:
|
|
|
|
break
|
|
|
|
|
|
|
|
data = data.text
|
2015-06-28 16:45:43 +02:00
|
|
|
regex = re.compile(r'href="(/video/[^"]+)"')
|
2014-05-01 19:51:21 +02:00
|
|
|
for match in regex.finditer(data):
|
2015-12-26 11:46:14 +01:00
|
|
|
if n == self.options.all_last:
|
2014-12-21 13:10:26 +01:00
|
|
|
break
|
2017-10-09 22:35:13 +02:00
|
|
|
episodes.append("http://www.oppetarkiv.se{0}".format(match.group(1)))
|
2014-12-21 13:10:26 +01:00
|
|
|
n += 1
|
2014-05-01 19:51:21 +02:00
|
|
|
page += 1
|
|
|
|
|
|
|
|
return episodes
|
2016-11-06 15:25:49 +01:00
|
|
|
|
|
|
|
def outputfilename(self, data, filename, raw):
|
|
|
|
directory = os.path.dirname(filename)
|
2018-01-13 20:27:40 +01:00
|
|
|
id = hashlib.sha256(data["programVersionId"].encode("utf-8")).hexdigest()[:7]
|
2017-04-21 22:00:53 +02:00
|
|
|
|
|
|
|
datatitle = re.search('data-title="([^"]+)"', self.get_urldata())
|
|
|
|
if not datatitle:
|
|
|
|
return None
|
|
|
|
datat = decode_html_entities(datatitle.group(1))
|
|
|
|
name = self.name(datat)
|
|
|
|
episode = self.seasoninfo(datat)
|
2018-01-13 20:27:40 +01:00
|
|
|
|
2017-04-21 22:00:53 +02:00
|
|
|
if episode:
|
|
|
|
title = "{0}.{1}-{2}-svtplay".format(name, episode, id)
|
|
|
|
else:
|
|
|
|
title = "{0}-{1}-svtplay".format(name, id)
|
2016-11-06 15:25:49 +01:00
|
|
|
title = filenamify(title)
|
|
|
|
if len(directory):
|
|
|
|
output = os.path.join(directory, title)
|
|
|
|
else:
|
|
|
|
output = title
|
|
|
|
return output
|
|
|
|
|
|
|
|
def seasoninfo(self, data):
|
2017-04-21 22:00:53 +02:00
|
|
|
episode = None
|
|
|
|
match = re.search("S.song (\d+) - Avsnitt (\d+)", data)
|
2016-11-06 15:25:49 +01:00
|
|
|
if match:
|
2017-04-21 22:00:53 +02:00
|
|
|
episode = "s{0:02d}e{1:02d}".format(int(match.group(1)), int(match.group(2)))
|
2016-11-06 15:25:49 +01:00
|
|
|
else:
|
2017-04-21 22:00:53 +02:00
|
|
|
match = re.search("Avsnitt (\d+)", data)
|
2016-11-06 15:25:49 +01:00
|
|
|
if match:
|
2017-04-21 22:00:53 +02:00
|
|
|
episode = "e{0:02d}".format(int(match.group(1)))
|
|
|
|
return episode
|
2016-11-06 15:25:49 +01:00
|
|
|
|
2017-04-21 22:00:53 +02:00
|
|
|
def name(selfs, data):
|
|
|
|
if data.find(" - S.song") > 0:
|
|
|
|
title = data[:data.find(" - S.song")]
|
2016-11-06 15:25:49 +01:00
|
|
|
else:
|
2017-04-21 22:00:53 +02:00
|
|
|
if data.find(" - Avsnitt") > 0:
|
|
|
|
title = data[:data.find(" - Avsnitt")]
|
|
|
|
else:
|
|
|
|
title = data
|
|
|
|
return title
|