2023-03-16 22:24:10 +01:00
|
|
|
import binascii
|
2014-04-21 19:52:09 +02:00
|
|
|
import json
|
2018-11-18 12:47:19 +01:00
|
|
|
import logging
|
2019-08-25 00:40:39 +02:00
|
|
|
import re
|
|
|
|
import xml.etree.ElementTree as ET
|
2018-01-30 22:09:31 +01:00
|
|
|
from io import StringIO
|
|
|
|
|
2023-03-16 22:24:10 +01:00
|
|
|
from cryptography.hazmat.backends import default_backend
|
|
|
|
from cryptography.hazmat.primitives.ciphers import algorithms
|
|
|
|
from cryptography.hazmat.primitives.ciphers import Cipher
|
|
|
|
from cryptography.hazmat.primitives.ciphers import modes
|
2015-09-20 15:15:50 +02:00
|
|
|
from requests import __build__ as requests_version
|
2022-12-10 14:05:56 +01:00
|
|
|
from svtplay_dl.fetcher.m3u8 import M3U8
|
2022-06-04 00:45:55 +02:00
|
|
|
from svtplay_dl.utils.fetcher import filter_files
|
2019-08-25 00:40:39 +02:00
|
|
|
from svtplay_dl.utils.http import get_full_url
|
|
|
|
from svtplay_dl.utils.http import HTTP
|
2021-05-03 01:43:37 +02:00
|
|
|
from svtplay_dl.utils.output import find_dupes
|
|
|
|
from svtplay_dl.utils.output import formatname
|
2019-08-25 00:40:39 +02:00
|
|
|
from svtplay_dl.utils.text import decode_html_entities
|
2014-04-21 19:52:09 +02:00
|
|
|
|
2015-01-25 14:41:29 +01:00
|
|
|
|
2022-12-10 14:05:56 +01:00
|
|
|
def subtitle_probe(config, url, **kwargs):
|
|
|
|
httpobject = kwargs.get("httpobject", None)
|
|
|
|
if httpobject:
|
|
|
|
http = httpobject
|
|
|
|
else:
|
|
|
|
http = HTTP(config)
|
|
|
|
subdata = http.request("get", url, cookies=kwargs.get("cookies", None))
|
|
|
|
|
|
|
|
if subdata.text.startswith("WEBVTT"):
|
|
|
|
yield subtitle(config, "wrst", url, **kwargs)
|
|
|
|
elif subdata.text.startswith("#EXTM3U"):
|
|
|
|
m3u8 = M3U8(subdata.text)
|
|
|
|
yield subtitle(config, "wrstsegment", url, **kwargs, m3u8=m3u8)
|
2023-10-09 00:09:27 +02:00
|
|
|
elif "<?xml" in subdata.text or "<MPD" in subdata.text:
|
2023-10-10 00:56:55 +02:00
|
|
|
text = re.sub("&(?!amp;)", "&", subdata.text)
|
|
|
|
xmldata = ET.fromstring(text)
|
2022-12-10 14:05:56 +01:00
|
|
|
if xmldata.tag.endswith("MPD"):
|
|
|
|
data = http.get(kwargs.get("files")[0]).content
|
|
|
|
if data.find(b"ftyp") > 0:
|
|
|
|
yield subtitle(config, "stpp", url, **kwargs)
|
2023-03-15 06:55:35 +01:00
|
|
|
elif data.startswith(b"WEBVTT"):
|
|
|
|
yield subtitle(config, "wrst", kwargs.get("files")[0], **kwargs)
|
2022-12-10 14:05:56 +01:00
|
|
|
elif xmldata.tag.endswith("tt"):
|
|
|
|
yield subtitle(config, "tt", url, **kwargs)
|
|
|
|
|
|
|
|
|
2019-08-25 00:33:51 +02:00
|
|
|
class subtitle:
|
2022-12-10 14:05:56 +01:00
|
|
|
def __init__(self, config, subtype, url, **kwargs):
|
2014-04-21 19:52:09 +02:00
|
|
|
self.url = url
|
|
|
|
self.subtitle = None
|
2018-05-08 22:46:11 +02:00
|
|
|
self.config = config
|
2014-08-31 01:20:36 +02:00
|
|
|
self.subtype = subtype
|
2018-05-08 22:46:11 +02:00
|
|
|
self.http = HTTP(config)
|
2022-12-10 14:05:56 +01:00
|
|
|
self.subfix = kwargs.get("subfix", None)
|
2016-12-05 20:45:14 +01:00
|
|
|
self.bom = False
|
2018-05-13 13:06:45 +02:00
|
|
|
self.output = kwargs.pop("output", None)
|
2018-07-05 01:26:33 +02:00
|
|
|
self.kwargs = kwargs
|
2014-04-21 19:52:09 +02:00
|
|
|
|
2018-05-12 15:12:37 +02:00
|
|
|
def __repr__(self):
|
2023-10-09 00:09:27 +02:00
|
|
|
return f"<Subtitle(type={self.subtype}, url={self.url} subfix={self.subfix}>"
|
2018-05-12 15:12:37 +02:00
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
def download(self):
|
2021-05-03 01:43:37 +02:00
|
|
|
output_ext = "srt"
|
|
|
|
if self.config.get("get_raw_subtitles"):
|
|
|
|
output_ext = self.subtype
|
|
|
|
|
|
|
|
if self.subfix and self.config.get("get_all_subtitles"):
|
|
|
|
self.output["ext"] = f"{self.subfix}.{output_ext}"
|
|
|
|
else:
|
|
|
|
self.output["ext"] = output_ext
|
2022-06-03 06:02:34 +02:00
|
|
|
|
2018-05-13 01:46:51 +02:00
|
|
|
subdata = self.http.request("get", self.url)
|
2016-11-14 21:51:39 +01:00
|
|
|
if subdata.status_code != 200:
|
2018-11-18 12:47:19 +01:00
|
|
|
logging.warning("Can't download subtitle file")
|
2016-11-14 21:51:39 +01:00
|
|
|
return
|
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
data = None
|
2016-12-05 20:45:14 +01:00
|
|
|
if "mtgx" in self.url and subdata.content[:3] == b"\xef\xbb\xbf":
|
|
|
|
subdata.encoding = "utf-8"
|
|
|
|
self.bom = True
|
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
if self.subtype == "tt":
|
|
|
|
data = self.tt(subdata)
|
|
|
|
if self.subtype == "json":
|
|
|
|
data = self.json(subdata)
|
|
|
|
if self.subtype == "sami":
|
|
|
|
data = self.sami(subdata)
|
|
|
|
if self.subtype == "smi":
|
|
|
|
data = self.smi(subdata)
|
|
|
|
if self.subtype == "wrst":
|
2017-09-17 11:21:04 +02:00
|
|
|
if "tv4play" in self.url and subdata.content[:3] == b"\xef\xbb\xbf":
|
|
|
|
self.bom = True
|
2019-08-19 21:56:34 +02:00
|
|
|
subdata.encoding = subdata.apparent_encoding
|
2014-08-31 01:20:36 +02:00
|
|
|
data = self.wrst(subdata)
|
2018-07-05 01:26:33 +02:00
|
|
|
if self.subtype == "wrstsegment":
|
|
|
|
data = self.wrstsegment(subdata)
|
2015-10-30 00:51:35 +01:00
|
|
|
if self.subtype == "raw":
|
2016-04-27 13:12:30 +02:00
|
|
|
data = self.raw(subdata)
|
2021-03-02 00:34:45 +01:00
|
|
|
if self.subtype == "stpp":
|
|
|
|
data = self.stpp(subdata)
|
2018-01-30 20:11:37 +01:00
|
|
|
|
2018-05-13 01:46:17 +02:00
|
|
|
if self.config.get("get_raw_subtitles"):
|
2021-05-03 01:43:37 +02:00
|
|
|
data = self.raw(subdata)
|
2018-01-30 20:11:37 +01:00
|
|
|
|
2022-06-03 06:02:34 +02:00
|
|
|
if len(data) > 0:
|
|
|
|
dupe, fileame = find_dupes(self.output, self.config, False)
|
|
|
|
if dupe and not self.config.get("force_subtitle"):
|
|
|
|
logging.warning("File (%s) already exists. Use --force-subtitle to overwrite", fileame.name)
|
|
|
|
return
|
|
|
|
self.save_file(data)
|
2018-01-30 20:11:37 +01:00
|
|
|
|
2021-05-03 01:43:37 +02:00
|
|
|
def save_file(self, data):
|
|
|
|
filename = formatname(self.output, self.config)
|
|
|
|
with open(filename, "w", encoding="utf-8") as file_d:
|
|
|
|
file_d.write(data)
|
2018-01-30 20:11:37 +01:00
|
|
|
|
|
|
|
def raw(self, subdata):
|
2018-01-13 20:27:40 +01:00
|
|
|
return subdata.text
|
2018-01-30 20:11:37 +01:00
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
def tt(self, subdata):
|
2014-04-21 19:52:09 +02:00
|
|
|
i = 1
|
2018-01-13 20:27:40 +01:00
|
|
|
subs = subdata.text
|
2021-03-02 00:34:45 +01:00
|
|
|
return self._tt(subs, i)
|
2016-01-27 19:49:38 +01:00
|
|
|
|
2021-03-02 00:34:45 +01:00
|
|
|
def _tt(self, subs, i):
|
|
|
|
data = ""
|
2019-08-25 00:27:31 +02:00
|
|
|
subdata = re.sub(' xmlns="[^"]+"', "", subs, count=1)
|
2015-10-10 16:31:42 +02:00
|
|
|
tree = ET.XML(subdata)
|
2022-12-10 14:05:56 +01:00
|
|
|
xml = tree.find("body")
|
|
|
|
if not xml:
|
|
|
|
return data
|
|
|
|
xml = xml.find("div")
|
2021-12-09 20:38:47 +01:00
|
|
|
if not xml:
|
|
|
|
return data
|
2015-10-10 16:31:42 +02:00
|
|
|
plist = list(xml.findall("p"))
|
2014-07-09 18:39:18 +02:00
|
|
|
for node in plist:
|
2014-04-21 19:52:09 +02:00
|
|
|
tag = norm(node.tag)
|
2021-12-18 21:37:09 +01:00
|
|
|
if tag in ("p", "span"):
|
2014-04-21 19:52:09 +02:00
|
|
|
begin = node.attrib["begin"]
|
|
|
|
if not ("dur" in node.attrib):
|
2021-03-02 00:34:45 +01:00
|
|
|
if "end" not in node.attrib:
|
|
|
|
duration = node.attrib["duration"]
|
2014-04-21 19:52:09 +02:00
|
|
|
else:
|
|
|
|
duration = node.attrib["dur"]
|
|
|
|
if not ("end" in node.attrib):
|
|
|
|
begin2 = begin.split(":")
|
|
|
|
duration2 = duration.split(":")
|
2016-02-08 21:28:39 +01:00
|
|
|
try:
|
|
|
|
sec = float(begin2[2]) + float(duration2[2])
|
|
|
|
except ValueError:
|
|
|
|
sec = 0.000
|
2021-12-18 19:52:08 +01:00
|
|
|
end = f"{int(begin2[0]):02d}:{int(begin2[1]):02d}:{sec:06.3f}"
|
2014-04-21 19:52:09 +02:00
|
|
|
else:
|
|
|
|
end = node.attrib["end"]
|
2021-12-18 19:52:08 +01:00
|
|
|
data += f"{i}\n{begin.replace('.', ',')} --> {end.replace('.', ',')}\n"
|
2014-07-09 18:39:18 +02:00
|
|
|
data = tt_text(node, data)
|
|
|
|
data += "\n"
|
2014-04-21 19:52:09 +02:00
|
|
|
i += 1
|
2018-01-13 20:27:40 +01:00
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
return data
|
2014-04-21 19:52:09 +02:00
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
def json(self, subdata):
|
2015-09-20 15:15:50 +02:00
|
|
|
data = json.loads(subdata.text)
|
2014-04-21 19:52:09 +02:00
|
|
|
number = 1
|
|
|
|
subs = ""
|
|
|
|
for i in data:
|
2021-12-18 19:52:08 +01:00
|
|
|
subs += f"{number}\n{timestr(int(i['startMillis']))} --> {timestr(int(i['endMillis']))}\n"
|
|
|
|
subs += f"{i['text']}\n\n"
|
2014-04-21 19:52:09 +02:00
|
|
|
number += 1
|
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
return subs
|
2014-04-21 19:52:09 +02:00
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
def sami(self, subdata):
|
2016-06-01 22:43:39 +02:00
|
|
|
text = subdata.text
|
2019-08-25 00:27:31 +02:00
|
|
|
text = re.sub(r"&", "&", text)
|
2016-06-01 22:43:39 +02:00
|
|
|
tree = ET.fromstring(text)
|
2018-10-28 14:17:27 +01:00
|
|
|
allsubs = tree.findall(".//Subtitle")
|
2014-04-21 19:52:09 +02:00
|
|
|
subs = ""
|
2018-10-28 14:17:27 +01:00
|
|
|
increase = 0
|
|
|
|
for sub in allsubs:
|
|
|
|
try:
|
|
|
|
number = int(sub.attrib["SpotNumber"])
|
|
|
|
except ValueError:
|
2018-10-28 23:16:00 +01:00
|
|
|
number = int(re.search(r"(\d+)", sub.attrib["SpotNumber"]).group(1))
|
2018-10-28 14:17:27 +01:00
|
|
|
increase += 1
|
|
|
|
n = number + increase
|
|
|
|
|
|
|
|
texts = sub.findall(".//Text")
|
|
|
|
all = ""
|
|
|
|
for text in texts:
|
2018-11-02 00:38:39 +01:00
|
|
|
line = ""
|
|
|
|
for txt in text.itertext():
|
2021-02-28 22:05:15 +01:00
|
|
|
line += f"{txt}"
|
2021-04-27 19:44:09 +02:00
|
|
|
all += f"{decode_html_entities(line.lstrip())}\n"
|
2021-12-18 19:52:08 +01:00
|
|
|
subs += f"{n}\n{timecolon(sub.attrib['TimeIn'])} --> {timecolon(sub.attrib['TimeOut'])}\n{all}\n"
|
2019-08-25 00:27:31 +02:00
|
|
|
subs = re.sub("&", r"&", subs)
|
2014-08-31 01:20:36 +02:00
|
|
|
return subs
|
2014-04-21 19:52:09 +02:00
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
def smi(self, subdata):
|
2015-09-20 15:15:50 +02:00
|
|
|
if requests_version < 0x20300:
|
2018-01-13 20:27:40 +01:00
|
|
|
subdata = subdata.content.decode("latin")
|
2015-09-20 15:15:50 +02:00
|
|
|
else:
|
2015-10-19 23:35:57 +02:00
|
|
|
subdata.encoding = "ISO-8859-1"
|
2015-09-20 15:15:50 +02:00
|
|
|
subdata = subdata.text
|
2015-04-28 23:00:24 +02:00
|
|
|
ssubdata = StringIO(subdata)
|
|
|
|
timea = 0
|
2014-04-21 19:52:09 +02:00
|
|
|
number = 1
|
2015-04-28 23:00:24 +02:00
|
|
|
data = None
|
2014-04-21 19:52:09 +02:00
|
|
|
subs = ""
|
2019-08-25 00:27:31 +02:00
|
|
|
TAG_RE = re.compile(r"<(?!\/?i).*?>")
|
|
|
|
bad_char = re.compile(r"\x96")
|
2015-04-28 23:00:24 +02:00
|
|
|
for i in ssubdata.readlines():
|
2015-05-01 22:34:02 +02:00
|
|
|
i = i.rstrip()
|
2015-04-28 23:16:44 +02:00
|
|
|
sync = re.search(r"<SYNC Start=(\d+)>", i)
|
2015-04-28 23:00:24 +02:00
|
|
|
if sync:
|
|
|
|
if int(sync.group(1)) != int(timea):
|
2015-05-01 22:34:02 +02:00
|
|
|
if data and data != " ":
|
2021-04-27 19:44:09 +02:00
|
|
|
subs += f"{number}\n{timestr(timea)} --> {timestr(sync.group(1))}\n"
|
2021-12-18 21:37:09 +01:00
|
|
|
text = decode_html_entities("%s\n" % TAG_RE.sub("", data.replace("<br>", "\n")))
|
2018-01-30 20:11:37 +01:00
|
|
|
if text[len(text) - 2] != "\n":
|
2015-05-01 22:34:02 +02:00
|
|
|
text += "\n"
|
2015-04-28 23:00:24 +02:00
|
|
|
subs += text
|
|
|
|
number += 1
|
|
|
|
timea = sync.group(1)
|
|
|
|
text = re.search("<P Class=SVCC>(.*)", i)
|
|
|
|
if text:
|
|
|
|
data = text.group(1)
|
2019-08-25 00:27:31 +02:00
|
|
|
recomp = re.compile(r"\r")
|
|
|
|
text = bad_char.sub("-", recomp.sub("", subs))
|
2014-08-31 01:20:36 +02:00
|
|
|
return text
|
2014-04-21 19:52:09 +02:00
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
def wrst(self, subdata):
|
2022-03-26 08:41:43 +01:00
|
|
|
return self._wrst(subdata.text)
|
|
|
|
|
|
|
|
def _wrst(self, data):
|
|
|
|
ssubdata = StringIO(data)
|
2014-04-21 19:52:09 +02:00
|
|
|
srt = ""
|
2014-06-07 18:50:51 +02:00
|
|
|
subtract = False
|
2015-03-01 21:44:55 +01:00
|
|
|
number_b = 1
|
|
|
|
number = 0
|
|
|
|
block = 0
|
|
|
|
subnr = False
|
2022-03-26 08:41:43 +01:00
|
|
|
cuetime = False
|
2019-08-22 07:57:35 +02:00
|
|
|
|
2015-03-01 21:44:55 +01:00
|
|
|
for i in ssubdata.readlines():
|
2015-04-28 23:16:44 +02:00
|
|
|
match = re.search(r"^[\r\n]+", i)
|
|
|
|
match2 = re.search(r"([\d:\.]+ --> [\d:\.]+)", i)
|
|
|
|
match3 = re.search(r"^(\d+)\s", i)
|
2022-03-26 08:41:43 +01:00
|
|
|
if match and number_b == 1 and self.bom:
|
2016-12-05 20:45:14 +01:00
|
|
|
continue
|
2015-03-01 21:44:55 +01:00
|
|
|
elif match and number_b > 1:
|
|
|
|
block = 0
|
|
|
|
srt += "\n"
|
2022-03-26 08:41:43 +01:00
|
|
|
cuetime = False
|
2015-03-01 21:44:55 +01:00
|
|
|
elif match2:
|
2022-03-26 08:41:43 +01:00
|
|
|
cuetime = True
|
2015-03-01 21:44:55 +01:00
|
|
|
if not subnr:
|
2021-12-18 19:52:08 +01:00
|
|
|
srt += f"{number_b}\n"
|
2019-09-06 22:49:49 +02:00
|
|
|
matchx = re.search(r"(?P<h1>\d+):(?P<m1>\d+):(?P<s1>[\d\.]+) --> (?P<h2>\d+):(?P<m2>\d+):(?P<s2>[\d\.]+)", i)
|
2017-06-05 17:31:47 +02:00
|
|
|
if matchx:
|
|
|
|
hour1 = int(matchx.group("h1"))
|
|
|
|
hour2 = int(matchx.group("h2"))
|
|
|
|
if int(number) == 1:
|
|
|
|
if hour1 > 9:
|
|
|
|
subtract = True
|
|
|
|
if subtract:
|
|
|
|
hour1 -= 10
|
|
|
|
hour2 -= 10
|
|
|
|
else:
|
2019-09-06 22:49:49 +02:00
|
|
|
matchx = re.search(r"(?P<m1>\d+):(?P<s1>[\d\.]+) --> (?P<m2>\d+):(?P<s2>[\d\.]+)", i)
|
2017-06-05 17:31:47 +02:00
|
|
|
hour1 = 0
|
|
|
|
hour2 = 0
|
2021-12-18 21:37:09 +01:00
|
|
|
time = (
|
|
|
|
f"{hour1:02d}:{matchx.group('m1')}:{matchx.group('s1').replace('.', ',')} --> "
|
|
|
|
f"{hour2:02d}:{matchx.group('m2')}:{matchx.group('s2').replace('.', ',')}\n"
|
|
|
|
)
|
2015-03-01 21:44:55 +01:00
|
|
|
srt += time
|
|
|
|
block = 1
|
|
|
|
subnr = False
|
|
|
|
number_b += 1
|
|
|
|
elif match3 and block == 0:
|
|
|
|
number = match3.group(1)
|
2021-12-18 19:52:08 +01:00
|
|
|
srt += f"{number}\n"
|
2015-03-01 21:44:55 +01:00
|
|
|
subnr = True
|
|
|
|
else:
|
2022-03-26 08:41:43 +01:00
|
|
|
if not cuetime:
|
|
|
|
continue
|
2022-12-22 09:06:34 +01:00
|
|
|
sub = _wsrt_colors(self.config.get("convert_subtitle_colors"), i)
|
2016-04-20 18:42:46 +02:00
|
|
|
srt += sub.strip()
|
2018-01-30 20:11:37 +01:00
|
|
|
srt += "\n"
|
2015-09-01 22:54:16 +02:00
|
|
|
srt = decode_html_entities(srt)
|
2015-04-28 22:59:07 +02:00
|
|
|
return srt
|
2014-04-27 15:33:05 +02:00
|
|
|
|
2018-07-05 01:26:33 +02:00
|
|
|
def wrstsegment(self, subdata):
|
2022-06-10 20:28:36 +02:00
|
|
|
pretext = []
|
2022-06-03 06:02:34 +02:00
|
|
|
if self.kwargs.get("filter", False):
|
2022-06-04 00:45:55 +02:00
|
|
|
self.kwargs["m3u8"] = filter_files(self.kwargs["m3u8"])
|
2022-06-03 06:02:34 +02:00
|
|
|
|
|
|
|
for _, i in enumerate(self.kwargs["m3u8"].media_segment):
|
2018-07-05 01:26:33 +02:00
|
|
|
itemurl = get_full_url(i["URI"], self.url)
|
|
|
|
cont = self.http.get(itemurl)
|
2023-03-16 22:24:10 +01:00
|
|
|
if self.kwargs["m3u8"].encrypted:
|
|
|
|
keyurl = get_full_url(i["EXT-X-KEY"]["URI"], self.url)
|
|
|
|
key = self.http.request("get", keyurl).content
|
|
|
|
iv = binascii.unhexlify(i["EXT-X-KEY"]["IV"][2:].zfill(32)) if "IV" in i["EXT-X-KEY"] else b"\x00" * 16
|
|
|
|
backend = default_backend()
|
|
|
|
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)
|
|
|
|
decryptor = cipher.decryptor()
|
|
|
|
if decryptor:
|
|
|
|
data = decryptor.update(cont.content).decode("utf-8")
|
|
|
|
else:
|
|
|
|
cont.encoding = "utf-8"
|
|
|
|
data = cont.text
|
|
|
|
pretext.append(data)
|
2022-12-22 09:06:34 +01:00
|
|
|
return _wrstsegments(pretext, self.config.get("convert_subtitle_colors"))
|
2018-07-05 01:26:33 +02:00
|
|
|
|
2021-03-02 00:34:45 +01:00
|
|
|
def stpp(self, subdata):
|
|
|
|
nr = 1
|
|
|
|
entries = []
|
|
|
|
|
|
|
|
for i in self.kwargs["files"]:
|
|
|
|
res = self.http.get(i)
|
|
|
|
start = res.content.find(b"mdat") + 4
|
|
|
|
if start > 3:
|
|
|
|
_data = self._tt(res.content[start:].decode(), nr)
|
|
|
|
if _data:
|
|
|
|
entries.append(_data.split("\n\n"))
|
|
|
|
nr += 1
|
|
|
|
|
|
|
|
new_entries = []
|
|
|
|
for entry in entries:
|
|
|
|
for i in entry:
|
|
|
|
if i:
|
|
|
|
new_entries.append(i.split("\n"))
|
|
|
|
|
|
|
|
entries = new_entries
|
|
|
|
changed = True
|
|
|
|
while changed:
|
|
|
|
changed, entries = _resolv(entries)
|
|
|
|
|
|
|
|
nr = 1
|
|
|
|
data = ""
|
|
|
|
for entry in entries:
|
|
|
|
for item in entry:
|
|
|
|
data += f"{item}\n"
|
|
|
|
data += "\n"
|
|
|
|
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
2022-12-22 09:06:34 +01:00
|
|
|
def _wrstsegments(entries: list, convert=False) -> str:
|
2022-06-10 20:28:36 +02:00
|
|
|
time = 0
|
|
|
|
subs = []
|
|
|
|
for cont in entries:
|
2023-11-29 00:04:09 +01:00
|
|
|
cont = re.sub(r"\n\n[-0-9a-f\d]+\n", "\n", cont) # remove sequence numbers
|
2022-06-10 20:28:36 +02:00
|
|
|
text = cont.split("\n")
|
|
|
|
for t in text: # is in text[1] for tv4play, but this should be more future proof
|
|
|
|
if "X-TIMESTAMP-MAP=MPEGTS" in t:
|
|
|
|
time = float(re.search(r"X-TIMESTAMP-MAP=MPEGTS:(\d+)", t).group(1)) / 90000
|
|
|
|
if time > 0:
|
|
|
|
time -= 10
|
|
|
|
itmes = []
|
|
|
|
if len(text) > 1:
|
|
|
|
for n in text:
|
|
|
|
if n: # don't get the empty lines.
|
|
|
|
itmes.append(n)
|
|
|
|
|
|
|
|
several_items = False
|
|
|
|
skip = False
|
|
|
|
pre_date_skip = True
|
|
|
|
sub = []
|
|
|
|
for x in range(len(itmes)):
|
2023-03-15 06:55:35 +01:00
|
|
|
item = itmes[x].rstrip()
|
2022-06-10 20:28:36 +02:00
|
|
|
if not item.rstrip():
|
|
|
|
continue
|
|
|
|
if strdate(item) and len(subs) > 0 and itmes[x + 1] == subs[-1][1]:
|
|
|
|
ha = strdate(subs[-1][0])
|
|
|
|
ha3 = strdate(item)
|
2023-03-15 06:55:35 +01:00
|
|
|
second = str2sec(ha3.group(4)) + time
|
2022-06-10 20:28:36 +02:00
|
|
|
subs[-1][0] = f"{ha.group(1).replace('.', ',')} --> {sec2str(second).replace('.', ',')}"
|
|
|
|
skip = True
|
|
|
|
pre_date_skip = False
|
|
|
|
continue
|
|
|
|
has_date = strdate(item)
|
|
|
|
if has_date:
|
|
|
|
if several_items:
|
|
|
|
subs.append(sub)
|
|
|
|
sub = []
|
|
|
|
skip = False
|
|
|
|
first = str2sec(has_date.group(1)) + time
|
2023-03-15 06:55:35 +01:00
|
|
|
second = str2sec(has_date.group(4)) + time
|
2022-06-10 20:28:36 +02:00
|
|
|
sub.append(f"{sec2str(first).replace('.', ',')} --> {sec2str(second).replace('.', ',')}")
|
|
|
|
several_items = True
|
|
|
|
pre_date_skip = False
|
|
|
|
elif has_date is None and skip is False and pre_date_skip is False:
|
2022-12-22 09:06:34 +01:00
|
|
|
sub.append(_wsrt_colors(convert, item))
|
2022-06-10 20:28:36 +02:00
|
|
|
if sub:
|
|
|
|
subs.append(sub)
|
|
|
|
string = ""
|
|
|
|
nr = 1
|
|
|
|
for sub in subs:
|
|
|
|
string += "{}\n{}\n\n".format(nr, "\n".join(sub))
|
|
|
|
nr += 1
|
|
|
|
|
|
|
|
string = re.sub("\r", "", string)
|
|
|
|
return string
|
|
|
|
|
|
|
|
|
2021-03-02 00:34:45 +01:00
|
|
|
def _resolv(entries):
|
|
|
|
skip = False
|
|
|
|
changed = False
|
|
|
|
new_entries = []
|
|
|
|
for nr, i in enumerate(entries):
|
|
|
|
if skip:
|
|
|
|
skip = False
|
|
|
|
continue
|
|
|
|
time_match = strdate(i[1].replace(",", "."))
|
|
|
|
time_match_next = None
|
|
|
|
if nr + 1 < len(entries):
|
|
|
|
time_match_next = strdate(entries[nr + 1][1].replace(",", "."))
|
|
|
|
left_time = time_match.group(1)
|
2023-03-15 06:55:35 +01:00
|
|
|
right_time = time_match.group(4)
|
|
|
|
if time_match_next and time_match.group(4) == time_match_next.group(1):
|
|
|
|
right_time = time_match_next.group(4)
|
2021-03-02 00:34:45 +01:00
|
|
|
skip = True
|
|
|
|
changed = True
|
|
|
|
next_entries = [nr + 1, f"{left_time} --> {right_time}"]
|
|
|
|
next_entries.extend(i[2:])
|
|
|
|
new_entries.append(next_entries)
|
|
|
|
return changed, new_entries
|
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-04-21 19:52:09 +02:00
|
|
|
def timestr(msec):
|
|
|
|
"""
|
|
|
|
Convert a millisecond value to a string of the following
|
|
|
|
format:
|
|
|
|
|
|
|
|
HH:MM:SS,SS
|
|
|
|
|
|
|
|
with 10 millisecond precision. Note the , seperator in
|
|
|
|
the seconds.
|
|
|
|
"""
|
|
|
|
sec = float(msec) / 1000
|
|
|
|
|
|
|
|
hours = int(sec / 3600)
|
|
|
|
sec -= hours * 3600
|
|
|
|
|
|
|
|
minutes = int(sec / 60)
|
|
|
|
sec -= minutes * 60
|
|
|
|
|
2021-12-18 19:52:08 +01:00
|
|
|
return f"{hours:02d}:{minutes:02d}:{sec:06.3f}".replace(".", ",")
|
2014-04-21 19:52:09 +02:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-12-15 22:19:58 +01:00
|
|
|
def timecolon(data):
|
2014-12-22 10:20:37 +01:00
|
|
|
match = re.search(r"(\d+:\d+:\d+):(\d+)", data)
|
2021-04-27 19:44:09 +02:00
|
|
|
return f"{match.group(1)},{match.group(2)}"
|
2014-12-15 22:19:58 +01:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-04-21 19:52:09 +02:00
|
|
|
def norm(name):
|
|
|
|
if name[0] == "{":
|
|
|
|
_, tag = name[1:].split("}")
|
|
|
|
return tag
|
|
|
|
else:
|
|
|
|
return name
|
2014-07-09 18:39:18 +02:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-07-09 18:39:18 +02:00
|
|
|
def tt_text(node, data):
|
|
|
|
if node.text:
|
2019-08-25 00:27:31 +02:00
|
|
|
data += "%s\n" % node.text.strip(" \t\n\r")
|
2014-07-09 18:39:18 +02:00
|
|
|
for i in node:
|
|
|
|
if i.text:
|
2019-08-25 00:27:31 +02:00
|
|
|
data += "%s\n" % i.text.strip(" \t\n\r")
|
2014-07-09 18:39:18 +02:00
|
|
|
if i.tail:
|
2019-08-25 00:27:31 +02:00
|
|
|
text = i.tail.strip(" \t\n\r")
|
2014-07-09 18:39:18 +02:00
|
|
|
if text:
|
2021-12-18 19:52:08 +01:00
|
|
|
data += f"{text}\n"
|
2015-01-05 21:52:34 +01:00
|
|
|
return data
|
2018-07-05 01:26:33 +02:00
|
|
|
|
|
|
|
|
|
|
|
def strdate(datestring):
|
2023-03-15 06:55:35 +01:00
|
|
|
match = re.search(r"^((\d+:\d+:\d+[\.,]*[0-9]*)?(\d+:\d+[\.,]*[0-9]*)?) --> ((\d+:\d+:\d+[\.,]*[0-9]*)?(\d+:\d+[\.,]*[0-9]*)?)$", datestring)
|
2018-07-05 01:26:33 +02:00
|
|
|
return match
|
|
|
|
|
|
|
|
|
|
|
|
def sec2str(seconds):
|
|
|
|
m, s = divmod(seconds, 60)
|
|
|
|
h, m = divmod(m, 60)
|
2021-04-27 19:44:09 +02:00
|
|
|
return f"{int(h):02d}:{int(m):02d}:{s:06.3f}"
|
2018-07-05 01:26:33 +02:00
|
|
|
|
|
|
|
|
|
|
|
def str2sec(string):
|
2023-03-15 06:55:35 +01:00
|
|
|
seconds = [3600, 60, 1]
|
|
|
|
return sum(x * float(t) for x, t in zip(seconds[3 - len(string.split(":")) :], string.split(":")))
|
2022-12-22 09:06:34 +01:00
|
|
|
|
|
|
|
|
|
|
|
def _wsrt_colors(convert, text):
|
|
|
|
if convert:
|
|
|
|
colors = {
|
|
|
|
"30": "#000000",
|
|
|
|
"31": "#ff0000",
|
|
|
|
"32": "#00ff00",
|
|
|
|
"33": "#ffff00",
|
|
|
|
"34": "#0000ff",
|
|
|
|
"35": "#ff00ff",
|
|
|
|
"36": "#00ffff",
|
|
|
|
"37": "#ffffff",
|
|
|
|
"c.black": "#000000",
|
|
|
|
"c.red": "#ff0000",
|
|
|
|
"c.green": "#00ff00",
|
|
|
|
"c.yellow": "#ffff00",
|
|
|
|
"c.blue": "#0000ff",
|
|
|
|
"c.magenta": "#ff00ff",
|
|
|
|
"c.cyan": "#00ffff",
|
|
|
|
"c.gray": "#ffffff",
|
|
|
|
}
|
|
|
|
for tag, color in colors.items():
|
|
|
|
regex1 = "<" + tag + ">"
|
|
|
|
replace = '<font color="' + color + '">'
|
|
|
|
text = re.sub(regex1, replace, text)
|
|
|
|
text = re.sub(f'</{tag.split(".")[0]}>', "</font>", text)
|
|
|
|
else:
|
|
|
|
text = re.sub("<[^>]*>", "", text)
|
|
|
|
return text
|