2016-03-26 23:08:22 +01:00
|
|
|
# ex:ts=4:sw=4:sts=4:et
|
|
|
|
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
|
|
|
import copy
|
2019-08-25 00:40:39 +02:00
|
|
|
import math
|
2016-09-26 00:42:42 +02:00
|
|
|
import os
|
|
|
|
import re
|
2019-08-25 00:40:39 +02:00
|
|
|
import time
|
|
|
|
import xml.etree.ElementTree as ET
|
2018-02-25 12:19:31 +01:00
|
|
|
from datetime import datetime
|
2018-01-30 22:07:21 +01:00
|
|
|
from urllib.parse import urljoin
|
2016-03-26 23:08:22 +01:00
|
|
|
|
2019-08-25 00:40:39 +02:00
|
|
|
from svtplay_dl.error import ServiceError
|
|
|
|
from svtplay_dl.error import UIException
|
2016-03-26 23:08:22 +01:00
|
|
|
from svtplay_dl.fetcher import VideoRetriever
|
2022-12-10 14:05:56 +01:00
|
|
|
from svtplay_dl.subtitle import subtitle_probe
|
2019-08-25 00:40:39 +02:00
|
|
|
from svtplay_dl.utils.output import ETA
|
2021-05-03 01:43:37 +02:00
|
|
|
from svtplay_dl.utils.output import formatname
|
2019-08-25 00:40:39 +02:00
|
|
|
from svtplay_dl.utils.output import progress_stream
|
|
|
|
from svtplay_dl.utils.output import progressbar
|
2016-03-26 23:08:22 +01:00
|
|
|
|
|
|
|
|
|
|
|
class DASHException(UIException):
|
|
|
|
def __init__(self, url, message):
|
|
|
|
self.url = url
|
2019-08-25 00:33:51 +02:00
|
|
|
super().__init__(message)
|
2016-03-26 23:08:22 +01:00
|
|
|
|
|
|
|
|
|
|
|
class LiveDASHException(DASHException):
|
|
|
|
def __init__(self, url):
|
2019-08-25 00:33:51 +02:00
|
|
|
super().__init__(url, "This is a live DASH stream, and they are not supported.")
|
2016-03-26 23:08:22 +01:00
|
|
|
|
|
|
|
|
2019-08-25 00:33:51 +02:00
|
|
|
class DASHattibutes:
|
2019-07-12 22:37:05 +02:00
|
|
|
def __init__(self):
|
|
|
|
self.default = {}
|
|
|
|
|
|
|
|
def set(self, key, value):
|
|
|
|
self.default[key] = value
|
|
|
|
|
|
|
|
def get(self, key):
|
|
|
|
if key in self.default:
|
|
|
|
return self.default[key]
|
|
|
|
return 0
|
|
|
|
|
|
|
|
|
|
|
|
def templateelemt(attributes, element, filename, idnumber):
|
2017-09-16 21:38:14 +02:00
|
|
|
files = []
|
|
|
|
|
|
|
|
init = element.attrib["initialization"]
|
|
|
|
media = element.attrib["media"]
|
|
|
|
if "startNumber" in element.attrib:
|
|
|
|
start = int(element.attrib["startNumber"])
|
|
|
|
else:
|
2018-02-10 18:14:14 +01:00
|
|
|
start = 1
|
|
|
|
|
|
|
|
if "timescale" in element.attrib:
|
2019-07-12 22:37:05 +02:00
|
|
|
attributes.set("timescale", float(element.attrib["timescale"]))
|
|
|
|
else:
|
|
|
|
attributes.set("timescale", 1)
|
2018-02-10 18:14:14 +01:00
|
|
|
|
|
|
|
if "duration" in element.attrib:
|
2019-07-12 22:37:05 +02:00
|
|
|
attributes.set("duration", float(element.attrib["duration"]))
|
|
|
|
|
|
|
|
segments = []
|
2019-09-06 22:49:49 +02:00
|
|
|
timeline = element.findall("{urn:mpeg:dash:schema:mpd:2011}SegmentTimeline/{urn:mpeg:dash:schema:mpd:2011}S")
|
2019-07-12 22:37:05 +02:00
|
|
|
if timeline:
|
|
|
|
t = -1
|
|
|
|
for s in timeline:
|
|
|
|
duration = int(s.attrib["d"])
|
|
|
|
repeat = int(s.attrib["r"]) if "r" in s.attrib else 0
|
|
|
|
segmenttime = int(s.attrib["t"]) if "t" in s.attrib else 0
|
|
|
|
|
|
|
|
if t < 0:
|
|
|
|
t = segmenttime
|
|
|
|
count = repeat + 1
|
|
|
|
|
|
|
|
end = start + len(segments) + count
|
|
|
|
number = start + len(segments)
|
|
|
|
while number < end:
|
2019-09-06 22:49:49 +02:00
|
|
|
segments.append({"number": number, "duration": math.ceil(duration / attributes.get("timescale")), "time": t})
|
2019-07-12 22:37:05 +02:00
|
|
|
t += duration
|
|
|
|
number += 1
|
2021-05-23 23:37:21 +02:00
|
|
|
else:
|
|
|
|
if attributes.get("type") == "static":
|
|
|
|
end = math.ceil(attributes.get("mediaPresentationDuration") / (attributes.get("duration") / attributes.get("timescale")))
|
|
|
|
else:
|
|
|
|
# Saw this on dynamic live content
|
|
|
|
start = 0
|
|
|
|
now = time.time()
|
|
|
|
periodStartWC = time.mktime(attributes.get("availabilityStartTime").timetuple()) + start
|
|
|
|
periodEndWC = now + attributes.get("minimumUpdatePeriod")
|
|
|
|
periodDuration = periodEndWC - periodStartWC
|
|
|
|
segmentCount = math.ceil(periodDuration * attributes.get("timescale") / attributes.get("duration"))
|
|
|
|
availableStart = math.floor(
|
|
|
|
(now - periodStartWC - attributes.get("timeShiftBufferDepth")) * attributes.get("timescale") / attributes.get("duration"),
|
|
|
|
)
|
|
|
|
availableEnd = math.floor((now - periodStartWC) * attributes.get("timescale") / attributes.get("duration"))
|
|
|
|
start = max(0, availableStart)
|
|
|
|
end = min(segmentCount, availableEnd)
|
2019-07-12 22:37:05 +02:00
|
|
|
for number in range(start, end):
|
2019-09-06 22:49:49 +02:00
|
|
|
segments.append({"number": number, "duration": int(attributes.get("duration") / attributes.get("timescale"))})
|
|
|
|
|
|
|
|
name = media.replace("$RepresentationID$", idnumber).replace("$Bandwidth$", attributes.get("bandwidth"))
|
|
|
|
files.append(urljoin(filename, init.replace("$RepresentationID$", idnumber).replace("$Bandwidth$", attributes.get("bandwidth"))))
|
2019-07-12 22:37:05 +02:00
|
|
|
for segment in segments:
|
|
|
|
if "$Time$" in media:
|
|
|
|
new = name.replace("$Time$", str(segment["time"]))
|
|
|
|
if "$Number" in name:
|
|
|
|
if re.search(r"\$Number(\%\d+)d\$", name):
|
|
|
|
vname = name.replace("$Number", "").replace("$", "")
|
|
|
|
new = vname % segment["number"]
|
|
|
|
else:
|
|
|
|
new = name.replace("$Number$", str(segment["number"]))
|
|
|
|
|
|
|
|
files.append(urljoin(filename, new))
|
|
|
|
|
2017-09-16 21:38:14 +02:00
|
|
|
return files
|
|
|
|
|
|
|
|
|
2021-03-02 00:34:03 +01:00
|
|
|
def adaptionset(attributes, elements, url, baseurl=None):
|
2021-05-16 02:22:37 +02:00
|
|
|
streams = []
|
2017-09-16 21:38:14 +02:00
|
|
|
|
|
|
|
dirname = os.path.dirname(url) + "/"
|
|
|
|
if baseurl:
|
|
|
|
dirname = urljoin(dirname, baseurl)
|
2021-03-02 00:34:03 +01:00
|
|
|
for element in elements:
|
2021-05-16 02:22:37 +02:00
|
|
|
role = "main"
|
2021-03-02 00:34:03 +01:00
|
|
|
template = element.find("{urn:mpeg:dash:schema:mpd:2011}SegmentTemplate")
|
|
|
|
represtation = element.findall(".//{urn:mpeg:dash:schema:mpd:2011}Representation")
|
2021-05-16 02:22:37 +02:00
|
|
|
role_elemets = element.findall(".//{urn:mpeg:dash:schema:mpd:2011}Role")
|
2021-03-02 00:34:03 +01:00
|
|
|
|
|
|
|
codecs = None
|
|
|
|
if "codecs" in element.attrib:
|
|
|
|
codecs = element.attrib["codecs"]
|
2021-06-02 20:37:14 +02:00
|
|
|
lang = ""
|
2021-03-02 00:34:03 +01:00
|
|
|
if "lang" in element.attrib:
|
|
|
|
lang = element.attrib["lang"]
|
2021-05-16 02:22:37 +02:00
|
|
|
if role_elemets:
|
|
|
|
role = role_elemets[0].attrib["value"]
|
2021-03-02 00:34:03 +01:00
|
|
|
|
2021-05-16 15:09:33 +02:00
|
|
|
resolution = ""
|
2021-04-18 14:06:25 +02:00
|
|
|
if "maxWidth" in element.attrib and "maxHeight" in element.attrib:
|
|
|
|
resolution = f'{element.attrib["maxWidth"]}x{element.attrib["maxHeight"]}'
|
|
|
|
|
2021-03-02 00:34:03 +01:00
|
|
|
for i in represtation:
|
|
|
|
files = []
|
|
|
|
segments = False
|
|
|
|
filename = dirname
|
2021-03-14 00:04:44 +01:00
|
|
|
mimetype = None
|
2021-03-02 00:34:03 +01:00
|
|
|
attributes.set("bandwidth", i.attrib["bandwidth"])
|
|
|
|
bitrate = int(i.attrib["bandwidth"]) / 1000
|
2021-03-14 00:04:44 +01:00
|
|
|
if "mimeType" in element.attrib:
|
|
|
|
mimetype = element.attrib["mimeType"]
|
2021-03-02 00:34:03 +01:00
|
|
|
idnumber = i.attrib["id"]
|
|
|
|
channels = None
|
|
|
|
codec = None
|
|
|
|
if codecs is None and "codecs" in i.attrib:
|
|
|
|
codecs = i.attrib["codecs"]
|
2021-03-14 00:02:21 +01:00
|
|
|
if codecs and codecs[:3] == "avc":
|
2021-03-02 00:34:03 +01:00
|
|
|
codec = "h264"
|
2021-03-14 00:02:21 +01:00
|
|
|
elif codecs and codecs[:3] == "hvc":
|
2021-03-02 00:34:03 +01:00
|
|
|
codec = "hevc"
|
2023-12-10 20:36:26 +01:00
|
|
|
elif codecs and codecs[:3] == "dvh":
|
|
|
|
codec = "dvhevc"
|
2021-03-14 00:02:21 +01:00
|
|
|
else:
|
|
|
|
codec = codecs
|
2021-04-18 14:06:25 +02:00
|
|
|
if not resolution and "maxWidth" in i.attrib and "maxHeight" in i.attrib:
|
2021-05-16 15:09:33 +02:00
|
|
|
resolution = f'{i.attrib["maxWidth"]}x{i.attrib["maxHeight"]}'
|
|
|
|
elif not resolution and "width" in i.attrib and "height" in i.attrib:
|
|
|
|
resolution = f'{i.attrib["width"]}x{i.attrib["height"]}'
|
2021-03-02 00:34:03 +01:00
|
|
|
if i.find("{urn:mpeg:dash:schema:mpd:2011}AudioChannelConfiguration") is not None:
|
|
|
|
chan = i.find("{urn:mpeg:dash:schema:mpd:2011}AudioChannelConfiguration").attrib["value"]
|
|
|
|
if chan == "6":
|
|
|
|
channels = "51"
|
|
|
|
else:
|
|
|
|
channels = None
|
|
|
|
if i.find("{urn:mpeg:dash:schema:mpd:2011}BaseURL") is not None:
|
|
|
|
filename = urljoin(filename, i.find("{urn:mpeg:dash:schema:mpd:2011}BaseURL").text)
|
|
|
|
|
|
|
|
if i.find("{urn:mpeg:dash:schema:mpd:2011}SegmentBase") is not None:
|
|
|
|
segments = True
|
|
|
|
files.append(filename)
|
|
|
|
if template is not None:
|
|
|
|
segments = True
|
|
|
|
files = templateelemt(attributes, template, filename, idnumber)
|
|
|
|
elif i.find("{urn:mpeg:dash:schema:mpd:2011}SegmentTemplate") is not None:
|
|
|
|
segments = True
|
|
|
|
files = templateelemt(attributes, i.find("{urn:mpeg:dash:schema:mpd:2011}SegmentTemplate"), filename, idnumber)
|
2021-03-14 00:04:44 +01:00
|
|
|
if mimetype == "text/vtt":
|
|
|
|
files.append(filename)
|
2021-03-02 00:34:03 +01:00
|
|
|
|
|
|
|
if files:
|
2021-05-16 02:22:37 +02:00
|
|
|
streams.append(
|
|
|
|
{
|
|
|
|
"bitrate": bitrate,
|
|
|
|
"segments": segments,
|
|
|
|
"files": files,
|
|
|
|
"codecs": codec,
|
|
|
|
"channels": channels,
|
|
|
|
"lang": lang,
|
|
|
|
"mimetype": mimetype,
|
|
|
|
"resolution": resolution,
|
|
|
|
"role": role,
|
|
|
|
},
|
|
|
|
)
|
2021-05-16 15:09:33 +02:00
|
|
|
resolution = ""
|
2017-09-16 21:38:14 +02:00
|
|
|
return streams
|
|
|
|
|
|
|
|
|
2021-05-03 01:43:37 +02:00
|
|
|
def dashparse(config, res, url, output, **kwargs):
|
2016-10-16 19:35:38 +02:00
|
|
|
if not res:
|
2021-05-16 02:22:37 +02:00
|
|
|
return
|
2016-10-16 19:35:38 +02:00
|
|
|
|
2016-09-09 22:56:05 +02:00
|
|
|
if res.status_code >= 400:
|
2021-05-16 02:22:37 +02:00
|
|
|
yield ServiceError(f"Can't read DASH playlist. {res.status_code}")
|
2017-10-04 23:26:23 +02:00
|
|
|
if len(res.text) < 1:
|
2021-05-16 02:22:37 +02:00
|
|
|
yield ServiceError(f"Can't read DASH playlist. {res.status_code}, size: {len(res.text)}")
|
2018-10-06 23:17:07 +02:00
|
|
|
|
2021-05-16 02:22:37 +02:00
|
|
|
yield from _dashparse(config, res.text, url, output, cookies=res.cookies, **kwargs)
|
2018-10-06 23:17:07 +02:00
|
|
|
|
|
|
|
|
2021-05-03 01:43:37 +02:00
|
|
|
def _dashparse(config, text, url, output, cookies, **kwargs):
|
2018-10-06 23:17:07 +02:00
|
|
|
baseurl = None
|
2021-05-03 01:43:37 +02:00
|
|
|
loutput = copy.copy(output)
|
|
|
|
loutput["ext"] = "mp4"
|
2019-07-12 22:37:05 +02:00
|
|
|
attributes = DASHattibutes()
|
2018-10-06 23:17:07 +02:00
|
|
|
|
2023-10-10 00:56:55 +02:00
|
|
|
text = re.sub("&(?!amp;)", "&", text)
|
2018-10-06 23:17:07 +02:00
|
|
|
xml = ET.XML(text)
|
2016-03-26 23:08:22 +01:00
|
|
|
|
2017-10-04 23:10:57 +02:00
|
|
|
if xml.find("./{urn:mpeg:dash:schema:mpd:2011}BaseURL") is not None:
|
|
|
|
baseurl = xml.find("./{urn:mpeg:dash:schema:mpd:2011}BaseURL").text
|
|
|
|
|
2018-02-10 18:14:14 +01:00
|
|
|
if "availabilityStartTime" in xml.attrib:
|
2019-09-06 22:49:49 +02:00
|
|
|
attributes.set("availabilityStartTime", parse_dates(xml.attrib["availabilityStartTime"]))
|
2019-07-12 22:37:05 +02:00
|
|
|
attributes.set("publishTime", parse_dates(xml.attrib["publishTime"]))
|
2018-02-10 18:14:14 +01:00
|
|
|
|
2019-07-12 22:37:05 +02:00
|
|
|
if "mediaPresentationDuration" in xml.attrib:
|
2019-09-06 22:49:49 +02:00
|
|
|
attributes.set("mediaPresentationDuration", parse_duration(xml.attrib["mediaPresentationDuration"]))
|
2019-07-12 22:37:05 +02:00
|
|
|
if "timeShiftBufferDepth" in xml.attrib:
|
2019-09-06 22:49:49 +02:00
|
|
|
attributes.set("timeShiftBufferDepth", parse_duration(xml.attrib["timeShiftBufferDepth"]))
|
2019-07-12 22:37:05 +02:00
|
|
|
if "minimumUpdatePeriod" in xml.attrib:
|
2019-09-06 22:49:49 +02:00
|
|
|
attributes.set("minimumUpdatePeriod", parse_duration(xml.attrib["minimumUpdatePeriod"]))
|
2018-02-25 12:19:31 +01:00
|
|
|
|
2019-07-12 22:37:05 +02:00
|
|
|
attributes.set("type", xml.attrib["type"])
|
2019-09-06 22:49:49 +02:00
|
|
|
temp = xml.findall('.//{urn:mpeg:dash:schema:mpd:2011}AdaptationSet[@mimeType="audio/mp4"]')
|
2019-03-23 02:02:11 +01:00
|
|
|
if len(temp) == 0:
|
2019-09-06 22:49:49 +02:00
|
|
|
temp = xml.findall('.//{urn:mpeg:dash:schema:mpd:2011}AdaptationSet[@contentType="audio"]')
|
2019-07-12 22:37:05 +02:00
|
|
|
audiofiles = adaptionset(attributes, temp, url, baseurl)
|
2019-09-06 22:49:49 +02:00
|
|
|
temp = xml.findall('.//{urn:mpeg:dash:schema:mpd:2011}AdaptationSet[@mimeType="video/mp4"]')
|
2019-03-23 02:02:11 +01:00
|
|
|
if len(temp) == 0:
|
2019-09-06 22:49:49 +02:00
|
|
|
temp = xml.findall('.//{urn:mpeg:dash:schema:mpd:2011}AdaptationSet[@contentType="video"]')
|
2019-07-12 22:37:05 +02:00
|
|
|
videofiles = adaptionset(attributes, temp, url, baseurl)
|
2021-03-02 00:34:03 +01:00
|
|
|
temp = xml.findall('.//{urn:mpeg:dash:schema:mpd:2011}AdaptationSet[@contentType="text"]')
|
2023-06-28 11:28:38 +02:00
|
|
|
if len(temp) == 0:
|
|
|
|
temp = xml.findall('.//{urn:mpeg:dash:schema:mpd:2011}AdaptationSet[@mimeType="application/mp4"]')
|
2021-03-02 00:34:03 +01:00
|
|
|
subtitles = adaptionset(attributes, temp, url, baseurl)
|
2018-02-10 18:14:14 +01:00
|
|
|
|
|
|
|
if not audiofiles or not videofiles:
|
2021-05-16 02:22:37 +02:00
|
|
|
yield ServiceError("Found no Audiofiles or Videofiles to download.")
|
|
|
|
return
|
2020-07-28 21:26:57 +02:00
|
|
|
if "channels" in kwargs:
|
|
|
|
kwargs.pop("channels")
|
|
|
|
if "codec" in kwargs:
|
|
|
|
kwargs.pop("codec")
|
2021-05-16 02:22:37 +02:00
|
|
|
|
|
|
|
for video in videofiles:
|
|
|
|
for audio in audiofiles:
|
|
|
|
bitrate = video["bitrate"] + audio["bitrate"]
|
|
|
|
yield DASH(
|
|
|
|
copy.copy(config),
|
|
|
|
url,
|
|
|
|
bitrate,
|
|
|
|
cookies=cookies,
|
|
|
|
audio=audio["files"],
|
|
|
|
files=video["files"],
|
|
|
|
output=loutput,
|
|
|
|
segments=video["segments"],
|
|
|
|
codec=video["codecs"],
|
|
|
|
channels=audio["channels"],
|
|
|
|
resolution=video["resolution"],
|
|
|
|
language=audio["lang"],
|
|
|
|
role=audio["role"],
|
|
|
|
**kwargs,
|
2021-03-14 00:04:44 +01:00
|
|
|
)
|
2021-05-16 22:02:20 +02:00
|
|
|
for sub in subtitles:
|
2023-10-09 00:09:27 +02:00
|
|
|
if len(subtitles) > 1:
|
|
|
|
if sub["role"] and sub["role"] != "main" and sub["role"] != "subtitle":
|
|
|
|
sub["lang"] = f'{sub["lang"]}-{sub["role"]}'
|
2022-12-10 14:05:56 +01:00
|
|
|
yield from subtitle_probe(copy.copy(config), url, subfix=sub["lang"], output=copy.copy(loutput), files=sub["files"], **kwargs)
|
2016-09-26 00:42:42 +02:00
|
|
|
|
2016-03-26 23:08:22 +01:00
|
|
|
|
2019-07-12 22:37:05 +02:00
|
|
|
def parse_duration(duration):
|
2019-09-06 22:49:49 +02:00
|
|
|
match = re.search(r"P(?:(\d*)Y)?(?:(\d*)M)?(?:(\d*)D)?(?:T(?:(\d*)H)?(?:(\d*)M)?(?:([\d.]*)S)?)?", duration)
|
2019-07-12 22:37:05 +02:00
|
|
|
if not match:
|
|
|
|
return 0
|
|
|
|
year = int(match.group(1)) * 365 * 24 * 60 * 60 if match.group(1) else 0
|
|
|
|
month = int(match.group(2)) * 30 * 24 * 60 * 60 if match.group(2) else 0
|
|
|
|
day = int(match.group(3)) * 24 * 60 * 60 if match.group(3) else 0
|
|
|
|
hour = int(match.group(4)) * 60 * 60 if match.group(4) else 0
|
|
|
|
minute = int(match.group(5)) * 60 if match.group(5) else 0
|
|
|
|
second = float(match.group(6)) if match.group(6) else 0
|
|
|
|
return year + month + day + hour + minute + second
|
|
|
|
|
|
|
|
|
2018-03-25 17:06:21 +02:00
|
|
|
def parse_dates(date_str):
|
2021-06-30 12:45:06 +02:00
|
|
|
match = re.search(r"(.*:.*)\.(\d{5,9})Z", date_str)
|
2021-05-11 00:16:58 +02:00
|
|
|
if match:
|
2023-12-12 21:16:47 +01:00
|
|
|
date_str = f"{match.group(1)}.{int(int(match.group(2)) / 1000)}Z" # Need to translate nanoseconds to milliseconds
|
2019-07-12 22:37:05 +02:00
|
|
|
date_patterns = ["%Y-%m-%dT%H:%M:%S.%fZ", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%SZ"]
|
2018-03-25 17:06:21 +02:00
|
|
|
dt = None
|
|
|
|
for pattern in date_patterns:
|
|
|
|
try:
|
|
|
|
dt = datetime.strptime(date_str, pattern)
|
|
|
|
break
|
2018-05-13 14:21:27 +02:00
|
|
|
except Exception:
|
2018-03-25 17:06:21 +02:00
|
|
|
pass
|
|
|
|
if not dt:
|
2021-02-28 22:05:15 +01:00
|
|
|
raise ValueError(f"Can't parse date format: {date_str}")
|
2018-03-25 17:06:21 +02:00
|
|
|
|
|
|
|
return dt
|
|
|
|
|
|
|
|
|
2016-03-26 23:08:22 +01:00
|
|
|
class DASH(VideoRetriever):
|
2018-05-25 22:47:26 +02:00
|
|
|
@property
|
2016-03-26 23:08:22 +01:00
|
|
|
def name(self):
|
|
|
|
return "dash"
|
|
|
|
|
|
|
|
def download(self):
|
2018-05-13 01:45:23 +02:00
|
|
|
self.output_extention = "mp4"
|
2018-05-08 22:46:11 +02:00
|
|
|
if self.config.get("live") and not self.config.get("force"):
|
2016-03-26 23:08:22 +01:00
|
|
|
raise LiveDASHException(self.url)
|
|
|
|
|
2018-05-13 13:06:45 +02:00
|
|
|
if self.segments:
|
2020-12-06 18:56:52 +01:00
|
|
|
if self.audio and not self.config.get("only_video"):
|
2016-09-26 00:42:42 +02:00
|
|
|
self._download2(self.audio, audio=True)
|
2020-12-06 18:56:52 +01:00
|
|
|
if not self.config.get("only_audio"):
|
|
|
|
self._download2(self.files)
|
2016-09-26 00:42:42 +02:00
|
|
|
else:
|
2020-12-06 18:56:52 +01:00
|
|
|
if self.audio and not self.config.get("only_video"):
|
2017-10-27 00:08:53 +02:00
|
|
|
self._download_url(self.audio, audio=True)
|
2020-12-06 18:56:52 +01:00
|
|
|
if not self.config.get("only_audio"):
|
|
|
|
self._download_url(self.url)
|
2016-09-26 00:42:42 +02:00
|
|
|
|
|
|
|
def _download2(self, files, audio=False):
|
|
|
|
cookies = self.kwargs["cookies"]
|
|
|
|
|
|
|
|
if audio:
|
2021-05-03 01:43:37 +02:00
|
|
|
self.output["ext"] = "m4a"
|
2016-09-26 00:42:42 +02:00
|
|
|
else:
|
2021-05-03 01:43:37 +02:00
|
|
|
self.output["ext"] = "mp4"
|
|
|
|
|
|
|
|
filename = formatname(self.output, self.config)
|
|
|
|
file_d = open(filename, "wb")
|
|
|
|
|
2016-09-26 00:42:42 +02:00
|
|
|
eta = ETA(len(files))
|
|
|
|
n = 1
|
|
|
|
for i in files:
|
2018-05-08 22:46:11 +02:00
|
|
|
if not self.config.get("silent"):
|
2016-09-26 00:42:42 +02:00
|
|
|
eta.increment()
|
2019-08-25 00:27:31 +02:00
|
|
|
progressbar(len(files), n, "".join(["ETA: ", str(eta)]))
|
2016-09-26 00:42:42 +02:00
|
|
|
n += 1
|
|
|
|
data = self.http.request("get", i, cookies=cookies)
|
|
|
|
|
|
|
|
if data.status_code == 404:
|
|
|
|
break
|
|
|
|
data = data.content
|
|
|
|
file_d.write(data)
|
|
|
|
|
2017-11-24 23:11:48 +01:00
|
|
|
file_d.close()
|
2018-05-08 22:46:11 +02:00
|
|
|
if not self.config.get("silent"):
|
2019-08-25 00:27:31 +02:00
|
|
|
progress_stream.write("\n")
|
2017-11-24 23:11:48 +01:00
|
|
|
self.finished = True
|
2021-05-03 01:43:37 +02:00
|
|
|
|
|
|
|
def _download_url(self, url, audio=False, total_size=None):
|
|
|
|
cookies = self.kwargs["cookies"]
|
|
|
|
data = self.http.request("get", url, cookies=cookies, headers={"Range": "bytes=0-8192"})
|
|
|
|
if not total_size:
|
|
|
|
try:
|
|
|
|
total_size = data.headers["Content-Range"]
|
|
|
|
total_size = total_size[total_size.find("/") + 1 :]
|
|
|
|
total_size = int(total_size)
|
|
|
|
except KeyError:
|
|
|
|
raise KeyError("Can't get the total size.")
|
|
|
|
|
|
|
|
bytes_so_far = 8192
|
|
|
|
if audio:
|
|
|
|
self.output["ext"] = "m4a"
|
|
|
|
else:
|
|
|
|
self.output["ext"] = "mp4"
|
|
|
|
filename = formatname(self.output, self.config)
|
|
|
|
file_d = open(filename, "wb")
|
|
|
|
|
|
|
|
file_d.write(data.content)
|
|
|
|
eta = ETA(total_size)
|
|
|
|
while bytes_so_far < total_size:
|
|
|
|
if not self.config.get("silent"):
|
|
|
|
eta.update(bytes_so_far)
|
|
|
|
progressbar(total_size, bytes_so_far, "".join(["ETA: ", str(eta)]))
|
|
|
|
|
|
|
|
old = bytes_so_far + 1
|
|
|
|
bytes_so_far = total_size
|
|
|
|
|
|
|
|
bytes_range = f"bytes={old}-{bytes_so_far}"
|
|
|
|
|
|
|
|
data = self.http.request("get", url, cookies=cookies, headers={"Range": bytes_range})
|
|
|
|
file_d.write(data.content)
|
|
|
|
|
|
|
|
file_d.close()
|
|
|
|
progressbar(bytes_so_far, total_size, "ETA: complete")
|
|
|
|
# progress_stream.write('\n')
|
|
|
|
self.finished = True
|