2013-03-02 21:26:28 +01:00
|
|
|
# ex:ts=4:sw=4:sts=4:et
|
|
|
|
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
2013-03-01 23:39:42 +01:00
|
|
|
from __future__ import absolute_import
|
2013-03-23 16:11:36 +01:00
|
|
|
import re
|
2014-01-05 17:28:00 +01:00
|
|
|
from svtplay_dl.utils.urllib import urlparse
|
2014-01-19 14:26:48 +01:00
|
|
|
from svtplay_dl.utils import download_thumbnail
|
|
|
|
import logging
|
|
|
|
|
|
|
|
log = logging.getLogger('svtplay_dl')
|
2013-03-23 16:11:36 +01:00
|
|
|
|
|
|
|
class Service(object):
|
2014-01-01 14:57:17 +01:00
|
|
|
supported_domains = []
|
2014-01-01 15:50:47 +01:00
|
|
|
supported_domains_re = []
|
2014-01-01 14:57:17 +01:00
|
|
|
|
2014-01-06 23:14:06 +01:00
|
|
|
def __init__(self, url):
|
|
|
|
self.url = url
|
|
|
|
|
2014-01-06 22:47:54 +01:00
|
|
|
@classmethod
|
|
|
|
def handles(cls, url):
|
2014-01-01 14:57:17 +01:00
|
|
|
urlp = urlparse(url)
|
|
|
|
|
2014-01-01 15:50:47 +01:00
|
|
|
# Apply supported_domains_re regexp to the netloc. This
|
|
|
|
# is meant for 'dynamic' domains, e.g. containing country
|
|
|
|
# information etc.
|
2014-01-06 22:47:54 +01:00
|
|
|
for domain_re in [re.compile(x) for x in cls.supported_domains_re]:
|
2014-01-01 15:50:47 +01:00
|
|
|
if domain_re.match(urlp.netloc):
|
|
|
|
return True
|
|
|
|
|
2014-01-06 22:47:54 +01:00
|
|
|
if urlp.netloc in cls.supported_domains:
|
2014-01-01 14:57:17 +01:00
|
|
|
return True
|
|
|
|
|
|
|
|
# For every listed domain, try with www. subdomain as well.
|
2014-01-06 22:47:54 +01:00
|
|
|
if urlp.netloc in ['www.'+x for x in cls.supported_domains]:
|
2014-01-01 14:57:17 +01:00
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
2013-03-01 23:39:42 +01:00
|
|
|
|
2014-01-11 23:02:47 +01:00
|
|
|
def get_subtitle(self, options):
|
|
|
|
pass
|
|
|
|
|
2014-01-19 14:26:48 +01:00
|
|
|
|
|
|
|
class OpenGraphThumbMixin(object):
|
|
|
|
"""
|
|
|
|
Mix this into the service class to grab thumbnail from OpenGraph properties.
|
|
|
|
"""
|
|
|
|
def get_thumbnail(self, options):
|
|
|
|
data = get_http_data(self.url)
|
|
|
|
match = re.search(r'<meta property="og:image" content="([^"]*)"', data)
|
|
|
|
if match is None:
|
|
|
|
match = re.search(r'<meta content="([^"]*)" property="og:image"', data)
|
|
|
|
if match is None:
|
2014-01-26 01:19:17 +01:00
|
|
|
match = re.search(r'<meta name="og:image" property="og:image" content="([^"]*)" />', data)
|
|
|
|
if match is None:
|
|
|
|
return
|
2014-01-19 14:26:48 +01:00
|
|
|
download_thumbnail(options, match.group(1))
|
|
|
|
|
|
|
|
|
2013-03-23 15:56:25 +01:00
|
|
|
from svtplay_dl.service.aftonbladet import Aftonbladet
|
|
|
|
from svtplay_dl.service.dr import Dr
|
|
|
|
from svtplay_dl.service.expressen import Expressen
|
|
|
|
from svtplay_dl.service.hbo import Hbo
|
|
|
|
from svtplay_dl.service.justin import Justin
|
|
|
|
from svtplay_dl.service.kanal5 import Kanal5
|
2013-04-21 21:51:45 +02:00
|
|
|
from svtplay_dl.service.mtvservices import Mtvservices
|
2013-03-23 15:56:25 +01:00
|
|
|
from svtplay_dl.service.nrk import Nrk
|
|
|
|
from svtplay_dl.service.qbrick import Qbrick
|
|
|
|
from svtplay_dl.service.ruv import Ruv
|
|
|
|
from svtplay_dl.service.radioplay import Radioplay
|
|
|
|
from svtplay_dl.service.sr import Sr
|
|
|
|
from svtplay_dl.service.svtplay import Svtplay
|
|
|
|
from svtplay_dl.service.tv4play import Tv4play
|
|
|
|
from svtplay_dl.service.urplay import Urplay
|
|
|
|
from svtplay_dl.service.viaplay import Viaplay
|
|
|
|
from svtplay_dl.service.vimeo import Vimeo
|
2014-02-05 20:37:50 +01:00
|
|
|
from svtplay_dl.service.bambuser import Bambuser
|
2013-03-23 16:11:36 +01:00
|
|
|
from svtplay_dl.utils import get_http_data
|
2013-03-23 15:56:25 +01:00
|
|
|
|
2013-03-23 15:58:15 +01:00
|
|
|
sites = [
|
2014-01-06 22:47:54 +01:00
|
|
|
Aftonbladet,
|
2014-02-05 20:37:50 +01:00
|
|
|
Bambuser,
|
2014-01-06 22:47:54 +01:00
|
|
|
Dr,
|
|
|
|
Expressen,
|
|
|
|
Hbo,
|
|
|
|
Justin,
|
|
|
|
Kanal5,
|
|
|
|
Mtvservices,
|
|
|
|
Nrk,
|
|
|
|
Qbrick,
|
|
|
|
Ruv,
|
|
|
|
Radioplay,
|
|
|
|
Sr,
|
|
|
|
Svtplay,
|
|
|
|
Tv4play,
|
|
|
|
Urplay,
|
|
|
|
Viaplay,
|
|
|
|
Vimeo]
|
2013-03-23 15:58:15 +01:00
|
|
|
|
2013-02-28 21:44:28 +01:00
|
|
|
|
2013-03-10 13:28:31 +01:00
|
|
|
class Generic(object):
|
|
|
|
''' Videos embed in sites '''
|
2013-03-23 16:11:36 +01:00
|
|
|
def get(self, url):
|
2013-03-10 13:28:31 +01:00
|
|
|
data = get_http_data(url)
|
2013-11-14 22:43:39 +01:00
|
|
|
match = re.search(r"src=\"(http://www.svt.se/wd.*)\" height", data)
|
2013-03-10 13:28:31 +01:00
|
|
|
stream = None
|
|
|
|
if match:
|
|
|
|
url = match.group(1)
|
|
|
|
for i in sites:
|
2014-01-01 15:03:15 +01:00
|
|
|
if i.handles(url):
|
2014-01-06 23:14:06 +01:00
|
|
|
return url, i(url)
|
2013-03-10 13:28:31 +01:00
|
|
|
|
2013-05-05 12:57:42 +02:00
|
|
|
match = re.search(r"src=\"(http://player.vimeo.com/video/[0-9]+)\" ", data)
|
2013-03-10 13:28:31 +01:00
|
|
|
if match:
|
|
|
|
for i in sites:
|
2014-01-01 15:03:15 +01:00
|
|
|
if i.handles(match.group(1)):
|
2014-01-06 23:14:06 +01:00
|
|
|
return match.group(1), i(url)
|
2014-02-04 20:20:36 +01:00
|
|
|
match = re.search(r"tv4play.se/iframe/video/(\d+)?", data)
|
2013-03-24 14:55:14 +01:00
|
|
|
if match:
|
|
|
|
url = "http://www.tv4play.se/?video_id=%s" % match.group(1)
|
|
|
|
for i in sites:
|
2014-01-01 15:03:15 +01:00
|
|
|
if i.handles(url):
|
2014-01-06 23:14:06 +01:00
|
|
|
return url, i(url)
|
2014-02-05 20:42:34 +01:00
|
|
|
match = re.search(r"embed.bambuser.com/broadcast/(\d+)", data)
|
|
|
|
if match:
|
|
|
|
url = "http://bambuser.com/v/%s" % match.group(1)
|
|
|
|
for i in sites:
|
|
|
|
if i.handles(url):
|
|
|
|
return url, i(url)
|
2014-02-05 23:15:19 +01:00
|
|
|
match = re.search(r'iframe src="(http://tv.aftonbladet[^"]*)"', data)
|
2014-02-08 22:47:27 +01:00
|
|
|
if match:
|
|
|
|
url = match.group(1)
|
|
|
|
for i in sites:
|
|
|
|
if i.handles(url):
|
|
|
|
return url, i(url)
|
|
|
|
match = re.search(r"iframe src='(http://www.svtplay[^']*)'", data)
|
2014-02-05 23:15:19 +01:00
|
|
|
if match:
|
|
|
|
url = match.group(1)
|
|
|
|
for i in sites:
|
|
|
|
if i.handles(url):
|
|
|
|
return url, i(url)
|
2014-02-05 20:42:34 +01:00
|
|
|
|
2013-03-10 13:28:31 +01:00
|
|
|
return url, stream
|
|
|
|
|
2013-02-28 21:44:28 +01:00
|
|
|
def service_handler(url):
|
|
|
|
handler = None
|
|
|
|
|
|
|
|
for i in sites:
|
2014-01-01 15:03:15 +01:00
|
|
|
if i.handles(url):
|
2014-01-06 23:14:06 +01:00
|
|
|
handler = i(url)
|
2013-02-28 21:44:28 +01:00
|
|
|
break
|
|
|
|
|
2014-01-01 14:57:17 +01:00
|
|
|
return handler
|