2013-03-02 21:26:28 +01:00
|
|
|
# ex:ts=4:sw=4:sts=4:et
|
|
|
|
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
2013-03-01 23:39:42 +01:00
|
|
|
from __future__ import absolute_import
|
2013-01-17 00:21:47 +01:00
|
|
|
import sys
|
|
|
|
import logging
|
2013-03-24 20:20:16 +01:00
|
|
|
import re
|
2014-01-05 17:02:48 +01:00
|
|
|
import unicodedata
|
2014-07-22 10:19:10 +02:00
|
|
|
|
2014-01-05 16:58:17 +01:00
|
|
|
try:
|
|
|
|
import HTMLParser
|
|
|
|
except ImportError:
|
2014-07-22 10:19:10 +02:00
|
|
|
# pylint: disable-msg=import-error
|
2014-01-05 16:58:17 +01:00
|
|
|
import html.parser as HTMLParser
|
2013-01-17 00:21:47 +01:00
|
|
|
|
2013-12-30 01:35:08 +01:00
|
|
|
is_py2 = (sys.version_info[0] == 2)
|
|
|
|
is_py3 = (sys.version_info[0] == 3)
|
|
|
|
is_py2_old = (sys.version_info < (2, 7))
|
|
|
|
|
2013-04-27 13:46:38 +02:00
|
|
|
# Used for UA spoofing in get_http_data()
|
|
|
|
FIREFOX_UA = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
|
|
|
|
|
2013-01-17 00:21:47 +01:00
|
|
|
log = logging.getLogger('svtplay_dl')
|
|
|
|
progress_stream = sys.stderr
|
|
|
|
|
2015-09-13 22:05:17 +02:00
|
|
|
try:
|
|
|
|
from requests import Session
|
|
|
|
except ImportError:
|
|
|
|
print("You need to install python-requests to use this script")
|
|
|
|
sys.exit(3)
|
2015-08-30 00:06:20 +02:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2015-08-30 00:06:20 +02:00
|
|
|
class HTTP(Session):
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
Session.__init__(self, *args, **kwargs)
|
|
|
|
|
|
|
|
def check_redirect(self, url):
|
|
|
|
return self.get(url, stream=True).url
|
|
|
|
|
2015-08-31 19:45:15 +02:00
|
|
|
def request(self, method, url, *args, **kwargs):
|
2015-10-24 21:18:23 +02:00
|
|
|
headers = kwargs.pop("headers", None)
|
|
|
|
if headers:
|
|
|
|
headers["User-Agent"] = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.3"
|
|
|
|
else:
|
|
|
|
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.3"}
|
2015-08-31 19:45:15 +02:00
|
|
|
log.debug("HTTP getting %r", url)
|
2015-10-04 14:33:08 +02:00
|
|
|
res = Session.request(self, method, url, headers=headers, *args, **kwargs)
|
2015-08-31 19:45:15 +02:00
|
|
|
return res
|
2013-04-14 21:08:12 +02:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2015-01-05 22:40:40 +01:00
|
|
|
def sort_quality(data):
|
|
|
|
data = sorted(data, key=lambda x: (x.bitrate, x.name()), reverse=True)
|
|
|
|
datas = []
|
|
|
|
for i in data:
|
|
|
|
datas.append([i.bitrate, i.name()])
|
|
|
|
return datas
|
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2015-01-05 22:40:40 +01:00
|
|
|
def list_quality(videos):
|
|
|
|
data = sort_quality(videos)
|
|
|
|
log.info("Quality\tMethod")
|
|
|
|
for i in data:
|
|
|
|
log.info("%s\t%s" % (i[0], i[1].upper()))
|
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2013-01-17 00:21:47 +01:00
|
|
|
def select_quality(options, streams):
|
2014-07-13 16:03:03 +02:00
|
|
|
available = sorted(int(x.bitrate) for x in streams)
|
2013-03-25 19:27:26 +01:00
|
|
|
try:
|
|
|
|
optq = int(options.quality)
|
|
|
|
except ValueError:
|
|
|
|
log.error("Requested quality need to be a number")
|
|
|
|
sys.exit(4)
|
2013-03-23 18:26:48 +01:00
|
|
|
if optq:
|
2013-03-25 19:27:26 +01:00
|
|
|
try:
|
|
|
|
optf = int(options.flexibleq)
|
|
|
|
except ValueError:
|
|
|
|
log.error("Flexible-quality need to be a number")
|
|
|
|
sys.exit(4)
|
2013-03-23 18:26:48 +01:00
|
|
|
if not optf:
|
|
|
|
wanted = [optq]
|
|
|
|
else:
|
|
|
|
wanted = range(optq-optf, optq+optf+1)
|
2013-01-17 00:21:47 +01:00
|
|
|
else:
|
2013-03-23 18:26:48 +01:00
|
|
|
wanted = [available[-1]]
|
2013-01-17 00:21:47 +01:00
|
|
|
|
2013-03-23 18:26:48 +01:00
|
|
|
selected = None
|
|
|
|
for q in available:
|
|
|
|
if q in wanted:
|
|
|
|
selected = q
|
|
|
|
break
|
2014-09-28 23:32:46 +02:00
|
|
|
if not selected and selected != 0:
|
2015-01-05 22:40:40 +01:00
|
|
|
data = sort_quality(streams)
|
|
|
|
quality = ", ".join("%s (%s)" % (str(x), str(y)) for x, y in data)
|
2014-07-13 15:12:06 +02:00
|
|
|
log.error("Can't find that quality. Try one of: %s (or try --flexible-quality)", quality)
|
|
|
|
|
2013-01-17 00:21:47 +01:00
|
|
|
sys.exit(4)
|
2014-04-27 10:43:26 +02:00
|
|
|
for i in streams:
|
2014-04-27 13:19:56 +02:00
|
|
|
if int(i.bitrate) == selected:
|
2014-04-27 10:43:26 +02:00
|
|
|
stream = i
|
|
|
|
return stream
|
2013-01-17 00:21:47 +01:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-01-05 16:58:17 +01:00
|
|
|
def ensure_unicode(s):
|
|
|
|
"""
|
|
|
|
Ensure string is a unicode string. If it isn't it assumed it is
|
|
|
|
utf-8 and decodes it to a unicode string.
|
|
|
|
"""
|
2015-08-30 00:08:24 +02:00
|
|
|
if (is_py2 and isinstance(s, str)) or (is_py3 and isinstance(s, bytes)):
|
2014-01-05 16:58:17 +01:00
|
|
|
s = s.decode('utf-8', 'replace')
|
|
|
|
return s
|
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-01-05 16:58:17 +01:00
|
|
|
def decode_html_entities(s):
|
|
|
|
"""
|
|
|
|
Replaces html entities with the character they represent.
|
|
|
|
|
2014-01-06 22:12:56 +01:00
|
|
|
>>> print(decode_html_entities("<3 &"))
|
|
|
|
<3 &
|
2014-01-05 16:58:17 +01:00
|
|
|
"""
|
|
|
|
parser = HTMLParser.HTMLParser()
|
|
|
|
def unesc(m):
|
|
|
|
return parser.unescape(m.group())
|
|
|
|
return re.sub(r'(&[^;]+;)', unesc, ensure_unicode(s))
|
2014-01-05 17:02:48 +01:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-01-05 17:02:48 +01:00
|
|
|
def filenamify(title):
|
|
|
|
"""
|
|
|
|
Convert a string to something suitable as a file name.
|
2014-01-06 22:12:56 +01:00
|
|
|
|
|
|
|
>>> print(filenamify(u'Matlagning del 1 av 10 - R\xe4ksm\xf6rg\xe5s | SVT Play'))
|
2015-10-25 19:44:24 +01:00
|
|
|
matlagning.del.1.av.10.-.raksmorgas.svt.play
|
2014-01-06 22:12:56 +01:00
|
|
|
|
2014-01-05 17:02:48 +01:00
|
|
|
"""
|
|
|
|
# ensure it is unicode
|
|
|
|
title = ensure_unicode(title)
|
|
|
|
|
|
|
|
# NFD decomposes chars into base char and diacritical mark, which means that we will get base char when we strip out non-ascii.
|
|
|
|
title = unicodedata.normalize('NFD', title)
|
|
|
|
|
|
|
|
# Drop any non ascii letters/digits
|
2016-01-10 14:33:22 +01:00
|
|
|
title = re.sub(r'[^a-zA-Z0-9 .-]', '', title)
|
2015-10-25 01:29:04 +02:00
|
|
|
# Remove " and '
|
|
|
|
title = re.sub('[\"\']', '', title)
|
2014-01-05 17:02:48 +01:00
|
|
|
# Drop any leading/trailing whitespace that may have appeared
|
|
|
|
title = title.strip()
|
|
|
|
# Lowercase
|
|
|
|
title = title.lower()
|
2015-10-13 23:21:47 +02:00
|
|
|
# Replace whitespace with dot
|
|
|
|
title = re.sub(r'\s+', '.', title)
|
2014-01-05 17:02:48 +01:00
|
|
|
|
|
|
|
return title
|
2014-01-19 14:26:48 +01:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-01-19 14:26:48 +01:00
|
|
|
def download_thumbnail(options, url):
|
2015-08-30 12:04:49 +02:00
|
|
|
data = Session.get(url).content
|
2014-01-19 14:26:48 +01:00
|
|
|
|
|
|
|
filename = re.search(r"(.*)\.[a-z0-9]{2,3}$", options.output)
|
|
|
|
tbn = "%s.tbn" % filename.group(1)
|
|
|
|
log.info("Thumbnail: %s", tbn)
|
|
|
|
|
|
|
|
fd = open(tbn, "wb")
|
|
|
|
fd.write(data)
|
|
|
|
fd.close()
|