2013-03-02 21:26:28 +01:00
|
|
|
# ex:ts=4:sw=4:sts=4:et
|
|
|
|
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
2013-03-01 23:39:42 +01:00
|
|
|
from __future__ import absolute_import
|
2013-03-23 16:11:36 +01:00
|
|
|
import re
|
2014-01-05 17:28:00 +01:00
|
|
|
from svtplay_dl.utils.urllib import urlparse
|
2015-08-30 00:06:20 +02:00
|
|
|
from svtplay_dl.utils import download_thumbnail, is_py2, HTTP
|
2014-08-17 10:57:08 +02:00
|
|
|
|
2014-01-19 14:26:48 +01:00
|
|
|
import logging
|
|
|
|
|
|
|
|
log = logging.getLogger('svtplay_dl')
|
2013-03-23 16:11:36 +01:00
|
|
|
|
|
|
|
class Service(object):
|
2014-01-01 14:57:17 +01:00
|
|
|
supported_domains = []
|
2014-01-01 15:50:47 +01:00
|
|
|
supported_domains_re = []
|
2014-01-01 14:57:17 +01:00
|
|
|
|
2015-12-26 11:46:14 +01:00
|
|
|
def __init__(self, options, _url):
|
|
|
|
self.options = options
|
2014-02-18 16:48:53 +01:00
|
|
|
self._url = _url
|
|
|
|
self._urldata = None
|
2014-12-08 23:07:02 +01:00
|
|
|
self._error = False
|
2015-12-26 11:46:14 +01:00
|
|
|
self.subtitle = None
|
|
|
|
self.cookies = {}
|
2015-12-26 12:14:14 +01:00
|
|
|
self.http = HTTP(options)
|
2014-02-18 16:48:53 +01:00
|
|
|
|
|
|
|
@property
|
|
|
|
def url(self):
|
|
|
|
return self._url
|
|
|
|
|
|
|
|
def get_urldata(self):
|
|
|
|
if self._urldata is None:
|
2015-08-31 23:18:18 +02:00
|
|
|
self._urldata = self.http.request("get", self.url).text
|
2015-08-30 00:06:20 +02:00
|
|
|
return self._urldata
|
2014-01-06 23:14:06 +01:00
|
|
|
|
2014-01-06 22:47:54 +01:00
|
|
|
@classmethod
|
|
|
|
def handles(cls, url):
|
2014-01-01 14:57:17 +01:00
|
|
|
urlp = urlparse(url)
|
|
|
|
|
2014-01-01 15:50:47 +01:00
|
|
|
# Apply supported_domains_re regexp to the netloc. This
|
|
|
|
# is meant for 'dynamic' domains, e.g. containing country
|
|
|
|
# information etc.
|
2014-01-06 22:47:54 +01:00
|
|
|
for domain_re in [re.compile(x) for x in cls.supported_domains_re]:
|
2014-01-01 15:50:47 +01:00
|
|
|
if domain_re.match(urlp.netloc):
|
|
|
|
return True
|
|
|
|
|
2014-01-06 22:47:54 +01:00
|
|
|
if urlp.netloc in cls.supported_domains:
|
2014-01-01 14:57:17 +01:00
|
|
|
return True
|
|
|
|
|
|
|
|
# For every listed domain, try with www. subdomain as well.
|
2014-01-06 22:47:54 +01:00
|
|
|
if urlp.netloc in ['www.'+x for x in cls.supported_domains]:
|
2014-01-01 14:57:17 +01:00
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
2013-03-01 23:39:42 +01:00
|
|
|
|
2014-01-11 23:02:47 +01:00
|
|
|
def get_subtitle(self, options):
|
|
|
|
pass
|
|
|
|
|
2016-05-14 22:41:33 +02:00
|
|
|
def exclude(self):
|
|
|
|
if self.options.exclude:
|
|
|
|
for i in self.options.exclude:
|
2015-07-13 19:26:51 +02:00
|
|
|
if is_py2:
|
|
|
|
i = i.decode("utf-8")
|
2016-05-14 22:41:33 +02:00
|
|
|
if i in self.options.output:
|
2014-12-22 18:35:58 +01:00
|
|
|
return True
|
2014-12-22 17:41:40 +01:00
|
|
|
return False
|
|
|
|
|
2014-03-19 22:57:49 +01:00
|
|
|
# the options parameter is unused, but is part of the
|
|
|
|
# interface, so we don't want to remove it. Thus, the
|
|
|
|
# pylint ignore.
|
|
|
|
def find_all_episodes(self, options): # pylint: disable-msg=unused-argument
|
2014-02-18 18:56:28 +01:00
|
|
|
log.warning("--all-episodes not implemented for this service")
|
|
|
|
return [self.url]
|
2014-01-19 14:26:48 +01:00
|
|
|
|
2014-02-18 16:17:02 +01:00
|
|
|
def opengraph_get(html, prop):
|
|
|
|
"""
|
|
|
|
Extract specified OpenGraph property from html.
|
|
|
|
|
|
|
|
>>> opengraph_get('<html><head><meta property="og:image" content="http://example.com/img.jpg"><meta ...', "image")
|
|
|
|
'http://example.com/img.jpg'
|
|
|
|
>>> opengraph_get('<html><head><meta content="http://example.com/img2.jpg" property="og:image"><meta ...', "image")
|
|
|
|
'http://example.com/img2.jpg'
|
|
|
|
>>> opengraph_get('<html><head><meta name="og:image" property="og:image" content="http://example.com/img3.jpg"><meta ...', "image")
|
|
|
|
'http://example.com/img3.jpg'
|
|
|
|
"""
|
|
|
|
match = re.search('<meta [^>]*property="og:' + prop + '" content="([^"]*)"', html)
|
|
|
|
if match is None:
|
|
|
|
match = re.search('<meta [^>]*content="([^"]*)" property="og:' + prop + '"', html)
|
|
|
|
if match is None:
|
|
|
|
return None
|
|
|
|
return match.group(1)
|
|
|
|
|
|
|
|
|
2014-01-19 14:26:48 +01:00
|
|
|
class OpenGraphThumbMixin(object):
|
|
|
|
"""
|
|
|
|
Mix this into the service class to grab thumbnail from OpenGraph properties.
|
|
|
|
"""
|
|
|
|
def get_thumbnail(self, options):
|
2015-08-31 22:04:59 +02:00
|
|
|
url = opengraph_get(self.get_urldata(), "image")
|
2014-02-18 16:17:02 +01:00
|
|
|
if url is None:
|
|
|
|
return
|
|
|
|
download_thumbnail(options, url)
|
2014-01-19 14:26:48 +01:00
|
|
|
|
|
|
|
|
2015-08-30 12:04:16 +02:00
|
|
|
class Generic(Service):
|
2013-03-10 13:28:31 +01:00
|
|
|
''' Videos embed in sites '''
|
2015-09-06 16:03:57 +02:00
|
|
|
def get(self, sites):
|
|
|
|
data = self.http.request("get", self.url).text
|
2014-12-22 10:39:51 +01:00
|
|
|
match = re.search(r"src=(\"|\')(http://www.svt.se/wd[^\'\"]+)(\"|\')", data)
|
2013-03-10 13:28:31 +01:00
|
|
|
stream = None
|
|
|
|
if match:
|
2014-12-22 10:39:51 +01:00
|
|
|
url = match.group(2)
|
2013-03-10 13:28:31 +01:00
|
|
|
for i in sites:
|
2014-01-01 15:03:15 +01:00
|
|
|
if i.handles(url):
|
2015-01-05 21:52:34 +01:00
|
|
|
url = url.replace("&", "&").replace("&", "&")
|
2016-03-16 19:36:40 +01:00
|
|
|
return url, i(self.options, url)
|
2013-03-10 13:28:31 +01:00
|
|
|
|
2013-05-05 12:57:42 +02:00
|
|
|
match = re.search(r"src=\"(http://player.vimeo.com/video/[0-9]+)\" ", data)
|
2013-03-10 13:28:31 +01:00
|
|
|
if match:
|
|
|
|
for i in sites:
|
2014-01-01 15:03:15 +01:00
|
|
|
if i.handles(match.group(1)):
|
2016-03-16 19:36:40 +01:00
|
|
|
return match.group(1), i(self.options, url)
|
2014-02-04 20:20:36 +01:00
|
|
|
match = re.search(r"tv4play.se/iframe/video/(\d+)?", data)
|
2013-03-24 14:55:14 +01:00
|
|
|
if match:
|
|
|
|
url = "http://www.tv4play.se/?video_id=%s" % match.group(1)
|
|
|
|
for i in sites:
|
2014-01-01 15:03:15 +01:00
|
|
|
if i.handles(url):
|
2016-03-16 19:36:40 +01:00
|
|
|
return url, i(self.options, url)
|
2014-02-05 20:42:34 +01:00
|
|
|
match = re.search(r"embed.bambuser.com/broadcast/(\d+)", data)
|
|
|
|
if match:
|
|
|
|
url = "http://bambuser.com/v/%s" % match.group(1)
|
|
|
|
for i in sites:
|
|
|
|
if i.handles(url):
|
2016-03-16 19:36:40 +01:00
|
|
|
return url, i(self.options, url)
|
2014-12-30 21:19:14 +01:00
|
|
|
match = re.search(r'src="(http://tv.aftonbladet[^"]*)"', data)
|
2014-02-08 22:47:27 +01:00
|
|
|
if match:
|
|
|
|
url = match.group(1)
|
|
|
|
for i in sites:
|
|
|
|
if i.handles(url):
|
2016-03-16 19:36:40 +01:00
|
|
|
return url, i(self.options, url)
|
2014-08-12 19:08:08 +02:00
|
|
|
match = re.search(r'a href="(http://tv.aftonbladet[^"]*)" class="abVi', data)
|
|
|
|
if match:
|
|
|
|
url = match.group(1)
|
|
|
|
for i in sites:
|
|
|
|
if i.handles(url):
|
2016-03-16 19:36:40 +01:00
|
|
|
return url, i(self.options, url)
|
2014-08-12 19:08:08 +02:00
|
|
|
|
2014-02-08 22:47:27 +01:00
|
|
|
match = re.search(r"iframe src='(http://www.svtplay[^']*)'", data)
|
2014-02-05 23:15:19 +01:00
|
|
|
if match:
|
|
|
|
url = match.group(1)
|
|
|
|
for i in sites:
|
|
|
|
if i.handles(url):
|
2016-03-16 19:36:40 +01:00
|
|
|
return url, i(self.options, url)
|
2014-02-05 20:42:34 +01:00
|
|
|
|
2016-02-19 21:29:49 +01:00
|
|
|
match = re.search('src="(http://mm-resource-service.herokuapp.com[^"]*)"', data)
|
|
|
|
if match:
|
|
|
|
url = match.group(1)
|
|
|
|
for i in sites:
|
|
|
|
if i.handles(url):
|
|
|
|
return self.url, i(self.options, self.url)
|
2016-03-20 21:04:09 +01:00
|
|
|
match = re.search(r'src="([^.]+\.solidtango.com[^"+]+)"', data)
|
|
|
|
if match:
|
|
|
|
url = match.group(1)
|
|
|
|
for i in sites:
|
|
|
|
if i.handles(url):
|
|
|
|
return self.url, i(self.options, url)
|
2016-03-16 20:56:20 +01:00
|
|
|
match = re.search('(lemonwhale|lwcdn.com)', data)
|
|
|
|
if match:
|
|
|
|
url = "http://lemonwhale.com"
|
|
|
|
for i in sites:
|
|
|
|
if i.handles(url):
|
|
|
|
return self.url, i(self.options, self.url)
|
2016-03-16 22:50:43 +01:00
|
|
|
match = re.search('s.src="(https://csp-ssl.picsearch.com[^"]+|http://csp.picsearch.com/rest[^"]+)', data)
|
|
|
|
if match:
|
|
|
|
url = match.group(1)
|
|
|
|
for i in sites:
|
|
|
|
if i.handles(url):
|
|
|
|
return self.url, i(self.options, self.url)
|
|
|
|
match = re.search('(picsearch_ajax_auth|screen9-ajax-auth)', data)
|
|
|
|
if match:
|
|
|
|
url = "http://csp.picsearch.com"
|
|
|
|
for i in sites:
|
|
|
|
if i.handles(url):
|
|
|
|
return self.url, i(self.options, self.url)
|
2016-03-20 18:20:19 +01:00
|
|
|
match = re.search('iframe src="(//csp.screen9.com[^"]+)"', data)
|
|
|
|
if match:
|
|
|
|
url = "http:%s" % match.group(1)
|
|
|
|
for i in sites:
|
|
|
|
if i.handles(url):
|
|
|
|
return self.url, i(self.options, self.url)
|
2016-02-19 21:29:49 +01:00
|
|
|
|
2016-08-23 01:01:31 +02:00
|
|
|
match = re.search('source src="([^"]+)" type="application/x-mpegURL"', data)
|
|
|
|
if match:
|
|
|
|
for i in sites:
|
|
|
|
if i.__name__ == "Raw":
|
|
|
|
return self.url, i(self.options, match.group(1))
|
|
|
|
|
2015-10-19 17:26:29 +02:00
|
|
|
return self.url, stream
|
2013-03-10 13:28:31 +01:00
|
|
|
|
2015-12-26 11:46:14 +01:00
|
|
|
def service_handler(sites, options, url):
|
2013-02-28 21:44:28 +01:00
|
|
|
handler = None
|
|
|
|
|
|
|
|
for i in sites:
|
2014-01-01 15:03:15 +01:00
|
|
|
if i.handles(url):
|
2015-12-26 11:46:14 +01:00
|
|
|
handler = i(options, url)
|
2013-02-28 21:44:28 +01:00
|
|
|
break
|
|
|
|
|
2014-01-01 14:57:17 +01:00
|
|
|
return handler
|