1
0
mirror of https://github.com/spaam/svtplay-dl.git synced 2024-12-01 07:34:16 +01:00
svtplay-dl/lib/svtplay_dl/service/__init__.py

189 lines
5.8 KiB
Python
Raw Normal View History

2013-03-02 21:26:28 +01:00
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
2013-03-23 16:11:36 +01:00
import re
2014-01-05 17:28:00 +01:00
from svtplay_dl.utils.urllib import urlparse
from svtplay_dl.utils import download_thumbnail
import logging
log = logging.getLogger('svtplay_dl')
2013-03-23 16:11:36 +01:00
class Service(object):
supported_domains = []
supported_domains_re = []
def __init__(self, _url):
self._url = _url
self._urldata = None
@property
def url(self):
return self._url
def get_urldata(self):
if self._urldata is None:
self._urldata = get_http_data(self.url)
return self._urldata
2014-01-06 23:14:06 +01:00
@classmethod
def handles(cls, url):
urlp = urlparse(url)
# Apply supported_domains_re regexp to the netloc. This
# is meant for 'dynamic' domains, e.g. containing country
# information etc.
for domain_re in [re.compile(x) for x in cls.supported_domains_re]:
if domain_re.match(urlp.netloc):
return True
if urlp.netloc in cls.supported_domains:
return True
# For every listed domain, try with www. subdomain as well.
if urlp.netloc in ['www.'+x for x in cls.supported_domains]:
return True
return False
def get_subtitle(self, options):
pass
# the options parameter is unused, but is part of the
# interface, so we don't want to remove it. Thus, the
# pylint ignore.
def find_all_episodes(self, options): # pylint: disable-msg=unused-argument
log.warning("--all-episodes not implemented for this service")
return [self.url]
def opengraph_get(html, prop):
"""
Extract specified OpenGraph property from html.
>>> opengraph_get('<html><head><meta property="og:image" content="http://example.com/img.jpg"><meta ...', "image")
'http://example.com/img.jpg'
>>> opengraph_get('<html><head><meta content="http://example.com/img2.jpg" property="og:image"><meta ...', "image")
'http://example.com/img2.jpg'
>>> opengraph_get('<html><head><meta name="og:image" property="og:image" content="http://example.com/img3.jpg"><meta ...', "image")
'http://example.com/img3.jpg'
"""
match = re.search('<meta [^>]*property="og:' + prop + '" content="([^"]*)"', html)
if match is None:
match = re.search('<meta [^>]*content="([^"]*)" property="og:' + prop + '"', html)
if match is None:
return None
return match.group(1)
class OpenGraphThumbMixin(object):
"""
Mix this into the service class to grab thumbnail from OpenGraph properties.
"""
def get_thumbnail(self, options):
url = opengraph_get(self.get_urldata(), "image")
if url is None:
return
download_thumbnail(options, url)
from svtplay_dl.service.aftonbladet import Aftonbladet
from svtplay_dl.service.dr import Dr
from svtplay_dl.service.expressen import Expressen
from svtplay_dl.service.hbo import Hbo
from svtplay_dl.service.justin import Justin
from svtplay_dl.service.kanal5 import Kanal5
from svtplay_dl.service.mtvservices import Mtvservices
from svtplay_dl.service.nrk import Nrk
from svtplay_dl.service.qbrick import Qbrick
from svtplay_dl.service.picsearch import Picsearch
from svtplay_dl.service.ruv import Ruv
from svtplay_dl.service.radioplay import Radioplay
from svtplay_dl.service.sr import Sr
from svtplay_dl.service.svtplay import Svtplay
from svtplay_dl.service.oppetarkiv import OppetArkiv
from svtplay_dl.service.tv4play import Tv4play
from svtplay_dl.service.urplay import Urplay
from svtplay_dl.service.viaplay import Viaplay
from svtplay_dl.service.vimeo import Vimeo
2014-02-05 20:37:50 +01:00
from svtplay_dl.service.bambuser import Bambuser
from svtplay_dl.service.lemonwhale import Lemonwhale
2013-03-23 16:11:36 +01:00
from svtplay_dl.utils import get_http_data
sites = [
Aftonbladet,
2014-02-05 20:37:50 +01:00
Bambuser,
Dr,
Expressen,
Hbo,
Justin,
Lemonwhale,
Kanal5,
Mtvservices,
Nrk,
Qbrick,
Picsearch,
Ruv,
Radioplay,
Sr,
Svtplay,
OppetArkiv,
Tv4play,
Urplay,
Viaplay,
Vimeo]
class Generic(object):
''' Videos embed in sites '''
2013-03-23 16:11:36 +01:00
def get(self, url):
data = get_http_data(url)
2013-11-14 22:43:39 +01:00
match = re.search(r"src=\"(http://www.svt.se/wd.*)\" height", data)
stream = None
if match:
url = match.group(1)
for i in sites:
if i.handles(url):
url = url.replace("&amp;", "&")
2014-01-06 23:14:06 +01:00
return url, i(url)
match = re.search(r"src=\"(http://player.vimeo.com/video/[0-9]+)\" ", data)
if match:
for i in sites:
if i.handles(match.group(1)):
2014-01-06 23:14:06 +01:00
return match.group(1), i(url)
match = re.search(r"tv4play.se/iframe/video/(\d+)?", data)
2013-03-24 14:55:14 +01:00
if match:
url = "http://www.tv4play.se/?video_id=%s" % match.group(1)
for i in sites:
if i.handles(url):
2014-01-06 23:14:06 +01:00
return url, i(url)
match = re.search(r"embed.bambuser.com/broadcast/(\d+)", data)
if match:
url = "http://bambuser.com/v/%s" % match.group(1)
for i in sites:
if i.handles(url):
return url, i(url)
2014-02-05 23:15:19 +01:00
match = re.search(r'iframe src="(http://tv.aftonbladet[^"]*)"', data)
if match:
url = match.group(1)
for i in sites:
if i.handles(url):
return url, i(url)
match = re.search(r"iframe src='(http://www.svtplay[^']*)'", data)
2014-02-05 23:15:19 +01:00
if match:
url = match.group(1)
for i in sites:
if i.handles(url):
return url, i(url)
return url, stream
def service_handler(url):
handler = None
for i in sites:
if i.handles(url):
2014-01-06 23:14:06 +01:00
handler = i(url)
break
return handler