1
0
mirror of https://github.com/spaam/svtplay-dl.git synced 2024-11-28 06:04:17 +01:00
svtplay-dl/lib/svtplay_dl/fetcher/hls.py

171 lines
5.2 KiB
Python
Raw Normal View History

2013-03-02 21:26:28 +01:00
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
2013-02-12 19:39:52 +01:00
import sys
import os
import re
2015-10-04 14:33:54 +02:00
import copy
2013-02-12 19:39:52 +01:00
from svtplay_dl.output import progressbar, progress_stream, ETA, output
from svtplay_dl.log import log
from svtplay_dl.error import UIException, ServiceError
2014-04-21 16:50:24 +02:00
from svtplay_dl.fetcher import VideoRetriever
2017-02-18 23:51:26 +01:00
from svtplay_dl.utils import HTTP
2014-04-21 16:50:24 +02:00
class HLSException(UIException):
def __init__(self, url, message):
self.url = url
super(HLSException, self).__init__(message)
class LiveHLSException(HLSException):
def __init__(self, url):
super(LiveHLSException, self).__init__(
url, "This is a live HLS stream, and they are not supported.")
2013-02-12 19:39:52 +01:00
2014-02-08 16:08:39 +01:00
def _get_full_url(url, srcurl):
if url[:4] == 'http':
return url
if url[0] == '/':
baseurl = re.search(r'^(http[s]{0,1}://[^/]+)/', srcurl)
return "%s%s" % (baseurl.group(1), url)
2014-02-08 16:08:39 +01:00
# remove everything after last / in the path of the URL
baseurl = re.sub(r'^([^\?]+)/[^/]*(\?.*)?$', r'\1', srcurl)
returl = "%s/%s" % (baseurl, url)
return returl
2015-09-15 20:10:32 +02:00
2015-10-04 14:33:54 +02:00
def hlsparse(options, res, url):
2014-04-21 21:55:39 +02:00
streams = {}
2014-04-21 21:42:49 +02:00
if not res:
return None
if res.status_code > 400:
streams[0] = ServiceError("Can't read HLS playlist. {0}".format(res.status_code))
return streams
files = (parsem3u(res.text))[1]
2017-02-18 23:51:26 +01:00
http = HTTP(options)
2014-04-21 21:55:39 +02:00
for i in files:
try:
bitrate = float(i[1]["BANDWIDTH"])/1000
except KeyError:
streams[0] = ServiceError("Can't read HLS playlist")
return streams
2017-02-18 23:51:26 +01:00
urls = _get_full_url(i[0], url)
res2 = http.get(urls, cookies=res.cookies)
2017-02-18 23:51:26 +01:00
if res2.status_code < 400:
streams[int(bitrate)] = HLS(copy.copy(options), urls, bitrate, cookies=res.cookies)
2014-04-21 21:55:39 +02:00
return streams
2014-04-21 21:42:49 +02:00
2015-09-15 20:10:32 +02:00
2014-04-21 21:55:39 +02:00
class HLS(VideoRetriever):
def name(self):
return "hls"
2014-04-21 16:50:24 +02:00
def download(self):
2014-04-21 21:42:49 +02:00
if self.options.live and not self.options.force:
raise LiveHLSException(self.url)
2015-10-04 14:36:06 +02:00
cookies = self.kwargs["cookies"]
m3u8 = self.http.request("get", self.url, cookies=cookies).text
2014-04-21 16:50:24 +02:00
globaldata, files = parsem3u(m3u8)
encrypted = False
key = None
2014-12-20 21:07:55 +01:00
if "KEY" in globaldata:
2014-04-21 16:50:24 +02:00
keydata = globaldata["KEY"]
encrypted = True
2013-02-12 19:39:52 +01:00
if encrypted:
2014-04-21 16:50:24 +02:00
try:
from Crypto.Cipher import AES
except ImportError:
log.error("You need to install pycrypto to download encrypted HLS streams")
sys.exit(2)
match = re.search(r'URI="(https?://.*?)"', keydata)
key = self.http.request("get", match.group(1)).content
2014-04-21 16:50:24 +02:00
rand = os.urandom(16)
decryptor = AES.new(key, AES.MODE_CBC, rand)
file_d = output(self.options, "ts")
2014-08-21 22:10:16 +02:00
if hasattr(file_d, "read") is False:
return
2014-04-21 16:50:24 +02:00
n = 1
2014-04-21 16:50:24 +02:00
eta = ETA(len(files))
for i in files:
item = _get_full_url(i[0], self.url)
2014-04-21 16:50:24 +02:00
if self.options.output != "-" and not self.options.silent:
2014-04-21 16:50:24 +02:00
eta.increment()
progressbar(len(files), n, ''.join(['ETA: ', str(eta)]))
n += 1
2015-10-04 14:36:06 +02:00
data = self.http.request("get", item, cookies=cookies)
if data.status_code == 404:
break
data = data.content
2014-04-21 16:50:24 +02:00
if encrypted:
data = decryptor.decrypt(data)
file_d.write(data)
2013-02-12 19:39:52 +01:00
2014-04-21 16:50:24 +02:00
if self.options.output != "-":
file_d.close()
if not self.options.silent:
progress_stream.write('\n')
self.finished = True
2013-02-12 19:39:52 +01:00
2015-09-15 20:10:32 +02:00
2013-02-12 19:39:52 +01:00
def parsem3u(data):
if not data.startswith("#EXTM3U"):
raise ValueError("Does not apprear to be a ext m3u file")
files = []
streaminfo = {}
globdata = {}
data = data.replace("\r", "\n")
for l in data.split("\n")[1:]:
if not l:
continue
if l.startswith("#EXT-X-STREAM-INF:"):
2014-07-28 16:01:27 +02:00
# not a proper parser
2013-02-12 19:39:52 +01:00
info = [x.strip().split("=", 1) for x in l[18:].split(",")]
for i in range(0, len(info)):
if info[i][0] == "BANDWIDTH":
streaminfo.update({info[i][0]: info[i][1]})
2014-04-21 21:42:49 +02:00
if info[i][0] == "RESOLUTION":
streaminfo.update({info[i][0]: info[i][1]})
elif l.startswith("#EXT-X-MAP:"):
line = l[11:]
if line.startswith("URI"):
files.append((line[5:].split("\"")[0], streaminfo))
elif l.startswith("#EXT-X-ENDLIST") or l.startswith("#EXT-X-BYTERANGE:"):
2013-02-12 19:39:52 +01:00
break
elif l.startswith("#EXT-X-"):
line = [l[7:].strip().split(":", 1)]
if len(line[0]) == 1:
line[0].append("None")
globdata.update(dict(line))
2013-02-12 19:39:52 +01:00
elif l.startswith("#EXTINF:"):
try:
dur, title = l[8:].strip().split(",", 1)
except:
dur = l[8:].strip()
title = None
2013-02-12 19:39:52 +01:00
streaminfo['duration'] = dur
streaminfo['title'] = title
elif l[0] == '#':
pass
else:
files.append((l, streaminfo))
streaminfo = {}
return globdata, files