1
0
mirror of https://github.com/spaam/svtplay-dl.git synced 2024-11-28 06:04:17 +01:00
svtplay-dl/lib/svtplay_dl/service/__init__.py

259 lines
8.6 KiB
Python
Raw Normal View History

2013-03-02 21:26:28 +01:00
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
2019-08-25 00:40:39 +02:00
2018-01-30 21:48:55 +01:00
import logging
import os
2019-08-25 00:40:39 +02:00
import re
2018-01-30 22:07:21 +01:00
from urllib.parse import urlparse
2018-01-30 21:48:55 +01:00
2019-08-25 00:40:39 +02:00
from svtplay_dl.utils.http import download_thumbnails
from svtplay_dl.utils.http import HTTP
from svtplay_dl.utils.parser import merge
from svtplay_dl.utils.parser import readconfig
from svtplay_dl.utils.parser import setup_defaults
2014-08-17 10:57:08 +02:00
2013-03-23 16:11:36 +01:00
2019-08-25 00:33:51 +02:00
class Service:
supported_domains = []
supported_domains_re = []
2018-05-13 13:06:45 +02:00
def __init__(self, config, _url, http=None):
self._url = _url
self._urldata = None
2014-12-08 23:07:02 +01:00
self._error = False
self.subtitle = None
self.cookies = {}
2018-05-13 13:06:45 +02:00
self.auto_name = None
2019-08-25 00:27:31 +02:00
self.output = {
"title": None,
"season": None,
"episode": None,
"episodename": None,
"id": None,
"service": self.__class__.__name__.lower(),
"tvshow": None,
"title_nice": None,
"showdescription": None,
"episodedescription": None,
"showthumbnailurl": None,
"episodethumbnailurl": None,
"publishing_datetime": None,
}
2018-05-13 13:06:45 +02:00
if not http:
2018-05-08 22:46:11 +02:00
self.http = HTTP(config)
2018-05-13 13:06:45 +02:00
else:
self.http = http
# Config
if config.get("configfile") and os.path.isfile(config.get("configfile")):
2019-08-25 00:27:31 +02:00
self.config = merge(
2019-09-06 22:31:52 +02:00
readconfig(
setup_defaults(),
config.get("configfile"),
service=self.__class__.__name__.lower(),
).get_variable(),
config.get_variable(),
2019-08-25 00:27:31 +02:00
)
else:
self.config = config
logging.debug("service: {}".format(self.__class__.__name__.lower()))
@property
def url(self):
return self._url
def get_urldata(self):
if self._urldata is None:
self._urldata = self.http.request("get", self.url).text
return self._urldata
2014-01-06 23:14:06 +01:00
@classmethod
def handles(cls, url):
urlp = urlparse(url)
# Apply supported_domains_re regexp to the netloc. This
# is meant for 'dynamic' domains, e.g. containing country
# information etc.
for domain_re in [re.compile(x) for x in cls.supported_domains_re]:
if domain_re.match(urlp.netloc):
return True
if urlp.netloc in cls.supported_domains:
return True
2018-01-30 20:11:37 +01:00
# For every listed domain, try with www.subdomain as well.
2019-08-25 00:27:31 +02:00
if urlp.netloc in ["www." + x for x in cls.supported_domains]:
return True
return False
def get_subtitle(self, options):
pass
# the options parameter is unused, but is part of the
# interface, so we don't want to remove it. Thus, the
# pylint ignore.
2018-01-30 20:11:37 +01:00
def find_all_episodes(self, options): # pylint: disable-msg=unused-argument
2018-01-30 21:48:55 +01:00
logging.warning("--all-episodes not implemented for this service")
return [self.url]
2018-01-30 20:11:37 +01:00
def opengraph_get(html, prop):
"""
Extract specified OpenGraph property from html.
>>> opengraph_get('<html><head><meta property="og:image" content="http://example.com/img.jpg"><meta ...', "image")
'http://example.com/img.jpg'
>>> opengraph_get('<html><head><meta content="http://example.com/img2.jpg" property="og:image"><meta ...', "image")
'http://example.com/img2.jpg'
>>> opengraph_get('<html><head><meta name="og:image" property="og:image" content="http://example.com/img3.jpg"><meta ...', "image")
'http://example.com/img3.jpg'
"""
match = re.search('<meta [^>]*property="og:' + prop + '" content="([^"]*)"', html)
if match is None:
2019-09-06 22:31:52 +02:00
match = re.search(
'<meta [^>]*content="([^"]*)" property="og:' + prop + '"', html
)
if match is None:
return None
return match.group(1)
2019-08-25 00:33:51 +02:00
class OpenGraphThumbMixin:
"""
Mix this into the service class to grab thumbnail from OpenGraph properties.
"""
2019-08-25 00:27:31 +02:00
def get_thumbnail(self, options):
url = opengraph_get(self.get_urldata(), "image")
if url is None:
return
download_thumbnails(options, [(False, url)])
2019-08-25 00:33:51 +02:00
class MetadataThumbMixin:
"""
Mix this into the service class to grab thumbnail from extracted metadata.
"""
2019-08-25 00:27:31 +02:00
def get_thumbnail(self, options):
urls = []
if self.output["showthumbnailurl"] is not None:
urls.append((True, self.output["showthumbnailurl"]))
if self.output["episodethumbnailurl"] is not None:
urls.append((False, self.output["episodethumbnailurl"]))
if urls:
download_thumbnails(self.output, options, urls)
2015-08-30 12:04:16 +02:00
class Generic(Service):
2019-08-25 00:27:31 +02:00
""" Videos embed in sites """
def get(self, sites):
data = self.http.request("get", self.url).text
match = re.search(r"src=(\"|\')(http://www.svt.se/wd[^\'\"]+)(\"|\')", data)
stream = None
if match:
url = match.group(2)
for i in sites:
if i.handles(url):
2015-01-05 21:52:34 +01:00
url = url.replace("&amp;", "&").replace("&#038;", "&")
2018-06-24 16:10:41 +02:00
return url, i(self.config, url)
match = re.search(r"src=\"(http://player.vimeo.com/video/[0-9]+)\" ", data)
if match:
for i in sites:
if i.handles(match.group(1)):
2018-06-24 16:10:41 +02:00
return match.group(1), i(self.config, url)
match = re.search(r"tv4play.se/iframe/video/(\d+)?", data)
2013-03-24 14:55:14 +01:00
if match:
url = "http://www.tv4play.se/?video_id=%s" % match.group(1)
for i in sites:
if i.handles(url):
2018-06-24 16:10:41 +02:00
return url, i(self.config, url)
match = re.search(r"embed.bambuser.com/broadcast/(\d+)", data)
if match:
url = "http://bambuser.com/v/%s" % match.group(1)
for i in sites:
if i.handles(url):
2018-06-24 16:10:41 +02:00
return url, i(self.config, url)
match = re.search(r'src="(http://tv.aftonbladet[^"]*)"', data)
if match:
url = match.group(1)
for i in sites:
if i.handles(url):
2018-06-24 16:10:41 +02:00
return url, i(self.config, url)
match = re.search(r'a href="(http://tv.aftonbladet[^"]*)" class="abVi', data)
if match:
url = match.group(1)
for i in sites:
if i.handles(url):
2018-06-24 16:10:41 +02:00
return url, i(self.config, url)
match = re.search(r"iframe src='(http://www.svtplay[^']*)'", data)
2014-02-05 23:15:19 +01:00
if match:
url = match.group(1)
for i in sites:
if i.handles(url):
2018-06-24 16:10:41 +02:00
return url, i(self.config, url)
match = re.search('src="(http://mm-resource-service.herokuapp.com[^"]*)"', data)
if match:
url = match.group(1)
for i in sites:
if i.handles(url):
2018-06-24 16:10:41 +02:00
return self.url, i(self.config, self.url)
match = re.search(r'src="([^.]+\.solidtango.com[^"+]+)"', data)
if match:
url = match.group(1)
for i in sites:
if i.handles(url):
2018-06-24 16:10:41 +02:00
return self.url, i(self.config, url)
2019-08-25 00:27:31 +02:00
match = re.search("(lemonwhale|lwcdn.com)", data)
if match:
url = "http://lemonwhale.com"
for i in sites:
if i.handles(url):
2018-06-24 16:10:41 +02:00
return self.url, i(self.config, self.url)
2019-09-06 22:31:52 +02:00
match = re.search(
's.src="(https://csp-ssl.picsearch.com[^"]+|http://csp.picsearch.com/rest[^"]+)',
data,
)
if match:
url = match.group(1)
for i in sites:
if i.handles(url):
2018-06-24 16:10:41 +02:00
return self.url, i(self.config, self.url)
2019-08-25 00:27:31 +02:00
match = re.search("(picsearch_ajax_auth|screen9-ajax-auth)", data)
if match:
url = "http://csp.picsearch.com"
for i in sites:
if i.handles(url):
2018-06-24 16:10:41 +02:00
return self.url, i(self.config, self.url)
match = re.search('iframe src="(//csp.screen9.com[^"]+)"', data)
if match:
url = "http:%s" % match.group(1)
for i in sites:
if i.handles(url):
2018-06-24 16:10:41 +02:00
return self.url, i(self.config, self.url)
match = re.search('source src="([^"]+)" type="application/x-mpegURL"', data)
if match:
for i in sites:
if i.__name__ == "Raw":
2018-06-24 16:10:41 +02:00
return self.url, i(self.config, match.group(1))
return self.url, stream
2018-01-30 20:11:37 +01:00
def service_handler(sites, options, url):
handler = None
for i in sites:
if i.handles(url):
handler = i(options, url)
break
return handler