1
0
mirror of https://github.com/spaam/svtplay-dl.git synced 2024-11-30 23:24:16 +01:00
svtplay-dl/lib/svtplay_dl/utils/__init__.py

181 lines
5.3 KiB
Python
Raw Normal View History

# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil: coding: utf-8 -*-
# ex:ts=4:sw=4:sts=4:et:fenc=utf-8
from __future__ import absolute_import
import sys
import logging
2013-03-24 20:20:16 +01:00
import re
import unicodedata
from operator import itemgetter
2014-01-05 16:58:17 +01:00
try:
import HTMLParser
except ImportError:
# pylint: disable-msg=import-error
2014-01-05 16:58:17 +01:00
import html.parser as HTMLParser
2016-02-29 23:19:01 +01:00
try:
from requests import Session
except ImportError:
print("You need to install python-requests to use this script")
sys.exit(3)
2013-12-30 01:35:08 +01:00
is_py2 = (sys.version_info[0] == 2)
is_py3 = (sys.version_info[0] == 3)
is_py2_old = (sys.version_info < (2, 7))
# Used for UA spoofing in get_http_data()
2015-12-26 13:35:55 +01:00
FIREFOX_UA = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.3'
log = logging.getLogger('svtplay_dl')
progress_stream = sys.stderr
2015-09-15 20:10:32 +02:00
class HTTP(Session):
2015-12-26 12:14:14 +01:00
def __init__(self, options, *args, **kwargs):
Session.__init__(self, *args, **kwargs)
self.verify = options.ssl_verify
if options.http_headers:
self.headers.update(self.split_header(options.http_headers))
self.headers.update({"User-Agent": FIREFOX_UA})
def check_redirect(self, url):
return self.get(url, stream=True).url
def request(self, method, url, *args, **kwargs):
2015-10-24 21:18:23 +02:00
headers = kwargs.pop("headers", None)
if headers:
for i in headers.keys():
self.headers[i] = headers[i]
log.debug("HTTP getting %r", url)
res = Session.request(self, method, url, verify=self.verify, *args, **kwargs)
return res
2013-04-14 21:08:12 +02:00
def split_header(self, headers):
return dict(x.split('=') for x in headers.split(';'))
2015-09-15 20:10:32 +02:00
2015-01-05 22:40:40 +01:00
def sort_quality(data):
data = sorted(data, key=lambda x: (x.bitrate, x.name()), reverse=True)
datas = []
for i in data:
datas.append([i.bitrate, i.name()])
return datas
2015-09-15 20:10:32 +02:00
2015-01-05 22:40:40 +01:00
def list_quality(videos):
data = sort_quality(videos)
log.info("Quality\tMethod")
for i in data:
log.info("%s\t%s" % (i[0], i[1].upper()))
def prio_streams(options, streams, selected):
protocol_prio = options.stream_prio
if protocol_prio is None:
protocol_prio = ["hls", "hds", "http", "rtmp"]
if isinstance(protocol_prio, str):
protocol_prio = protocol_prio.split(',')
# Map score's to the reverse of the list's index values
proto_score = dict(zip(protocol_prio, range(len(protocol_prio), 0, -1)))
# Build a tuple (bitrate, proto_score, stream), and use it
# for sorting.
prioritized = [(s.bitrate, proto_score[s.name()], s) for
s in streams if s.name() in proto_score]
return [x[2] for
x in sorted(prioritized, key=itemgetter(0,1), reverse=True)
if x[0] == selected]
def select_quality(options, streams):
available = sorted(int(x.bitrate) for x in streams)
2013-03-25 19:27:26 +01:00
try:
optq = int(options.quality)
except ValueError:
log.error("Requested quality need to be a number")
sys.exit(4)
2013-03-23 18:26:48 +01:00
if optq:
2013-03-25 19:27:26 +01:00
try:
optf = int(options.flexibleq)
except ValueError:
log.error("Flexible-quality need to be a number")
sys.exit(4)
2013-03-23 18:26:48 +01:00
if not optf:
wanted = [optq]
else:
wanted = range(optq-optf, optq+optf+1)
else:
2013-03-23 18:26:48 +01:00
wanted = [available[-1]]
2013-03-23 18:26:48 +01:00
selected = None
for q in available:
if q in wanted:
selected = q
break
if not selected and selected != 0:
2015-01-05 22:40:40 +01:00
data = sort_quality(streams)
quality = ", ".join("%s (%s)" % (str(x), str(y)) for x, y in data)
log.error("Can't find that quality. Try one of: %s (or try --flexible-quality)", quality)
sys.exit(4)
return prio_streams(options, streams, selected)[0]
2015-09-15 20:10:32 +02:00
2014-01-05 16:58:17 +01:00
def ensure_unicode(s):
"""
Ensure string is a unicode string. If it isn't it assumed it is
utf-8 and decodes it to a unicode string.
"""
2015-08-30 00:08:24 +02:00
if (is_py2 and isinstance(s, str)) or (is_py3 and isinstance(s, bytes)):
2014-01-05 16:58:17 +01:00
s = s.decode('utf-8', 'replace')
return s
2015-09-15 20:10:32 +02:00
2014-01-05 16:58:17 +01:00
def decode_html_entities(s):
"""
Replaces html entities with the character they represent.
>>> print(decode_html_entities("&lt;3 &amp;"))
<3 &
2014-01-05 16:58:17 +01:00
"""
parser = HTMLParser.HTMLParser()
def unesc(m):
return parser.unescape(m.group())
return re.sub(r'(&[^;]+;)', unesc, ensure_unicode(s))
2015-09-15 20:10:32 +02:00
def filenamify(title):
"""
2016-01-10 14:55:21 +01:00
Convert a string to something suitable as a file name. E.g.
Matlagning del 1 av 10 - Räksmörgås | SVT Play
-> matlagning.del.1.av.10.-.raksmorgas.svt.play
"""
# ensure it is unicode
title = ensure_unicode(title)
2016-01-10 14:55:21 +01:00
# NFD decomposes chars into base char and diacritical mark, which
# means that we will get base char when we strip out non-ascii.
title = unicodedata.normalize('NFD', title)
2016-01-10 14:55:21 +01:00
# Convert to lowercase
# Drop any non ascii letters/digits
# Drop any leading/trailing whitespace that may have appeared
2016-01-10 14:55:21 +01:00
title = re.sub(r'[^a-z0-9 .-]', '', title.lower().strip())
# Replace whitespace with dot
title = re.sub(r'\s+', '.', title)
return title
2015-09-15 20:10:32 +02:00
def download_thumbnail(options, url):
data = Session.get(url).content
filename = re.search(r"(.*)\.[a-z0-9]{2,3}$", options.output)
tbn = "%s.tbn" % filename.group(1)
log.info("Thumbnail: %s", tbn)
fd = open(tbn, "wb")
fd.write(data)
fd.close()