2014-04-21 19:52:09 +02:00
|
|
|
import xml.etree.ElementTree as ET
|
|
|
|
import json
|
|
|
|
import re
|
2018-11-18 12:47:19 +01:00
|
|
|
import logging
|
2018-01-30 22:09:31 +01:00
|
|
|
from io import StringIO
|
|
|
|
|
2018-03-13 00:33:39 +01:00
|
|
|
from svtplay_dl.utils.text import decode_html_entities
|
2018-07-05 01:26:33 +02:00
|
|
|
from svtplay_dl.utils.http import HTTP, get_full_url
|
2018-05-13 13:06:45 +02:00
|
|
|
from svtplay_dl.utils.output import output
|
|
|
|
|
2018-07-05 01:26:33 +02:00
|
|
|
|
2015-09-20 15:15:50 +02:00
|
|
|
from requests import __build__ as requests_version
|
2015-10-25 17:19:16 +01:00
|
|
|
import platform
|
2014-04-21 19:52:09 +02:00
|
|
|
|
2015-01-25 14:41:29 +01:00
|
|
|
|
2014-07-28 15:53:23 +02:00
|
|
|
class subtitle(object):
|
2018-05-08 22:46:11 +02:00
|
|
|
def __init__(self, config, subtype, url, subfix=None, **kwargs):
|
2014-04-21 19:52:09 +02:00
|
|
|
self.url = url
|
|
|
|
self.subtitle = None
|
2018-05-08 22:46:11 +02:00
|
|
|
self.config = config
|
2014-08-31 01:20:36 +02:00
|
|
|
self.subtype = subtype
|
2018-05-08 22:46:11 +02:00
|
|
|
self.http = HTTP(config)
|
2016-04-27 10:37:47 +02:00
|
|
|
self.subfix = subfix
|
2016-12-05 20:45:14 +01:00
|
|
|
self.bom = False
|
2018-05-13 13:06:45 +02:00
|
|
|
self.output = kwargs.pop("output", None)
|
2018-07-05 01:26:33 +02:00
|
|
|
self.kwargs = kwargs
|
2014-04-21 19:52:09 +02:00
|
|
|
|
2018-05-12 15:12:37 +02:00
|
|
|
def __repr__(self):
|
|
|
|
return "<Subtitle(type={}, url={}>".format(self.subtype, self.url)
|
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
def download(self):
|
2018-05-13 01:46:51 +02:00
|
|
|
subdata = self.http.request("get", self.url)
|
2016-11-14 21:51:39 +01:00
|
|
|
if subdata.status_code != 200:
|
2018-11-18 12:47:19 +01:00
|
|
|
logging.warning("Can't download subtitle file")
|
2016-11-14 21:51:39 +01:00
|
|
|
return
|
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
data = None
|
2016-12-05 20:45:14 +01:00
|
|
|
if "mtgx" in self.url and subdata.content[:3] == b"\xef\xbb\xbf":
|
|
|
|
subdata.encoding = "utf-8"
|
|
|
|
self.bom = True
|
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
if self.subtype == "tt":
|
|
|
|
data = self.tt(subdata)
|
|
|
|
if self.subtype == "json":
|
|
|
|
data = self.json(subdata)
|
|
|
|
if self.subtype == "sami":
|
|
|
|
data = self.sami(subdata)
|
|
|
|
if self.subtype == "smi":
|
|
|
|
data = self.smi(subdata)
|
|
|
|
if self.subtype == "wrst":
|
2017-09-17 11:21:04 +02:00
|
|
|
if "tv4play" in self.url and subdata.content[:3] == b"\xef\xbb\xbf":
|
|
|
|
subdata.encoding = "utf-8"
|
|
|
|
self.bom = True
|
2018-02-22 03:29:21 +01:00
|
|
|
if "dplay" in self.url:
|
|
|
|
subdata.encoding = "utf-8"
|
2014-08-31 01:20:36 +02:00
|
|
|
data = self.wrst(subdata)
|
2018-07-05 01:26:33 +02:00
|
|
|
if self.subtype == "wrstsegment":
|
|
|
|
data = self.wrstsegment(subdata)
|
2015-10-30 00:51:35 +01:00
|
|
|
if self.subtype == "raw":
|
2016-04-27 13:12:30 +02:00
|
|
|
data = self.raw(subdata)
|
2018-01-30 20:11:37 +01:00
|
|
|
|
2016-04-27 19:41:23 +02:00
|
|
|
if self.subfix:
|
2018-05-27 15:55:25 +02:00
|
|
|
if self.config.get("get_all_subtitles"):
|
|
|
|
if self.output["episodename"]:
|
|
|
|
self.output["episodename"] = "{}-{}".format(self.output["episodename"], self.subfix)
|
|
|
|
else:
|
|
|
|
self.output["episodename"] = self.subfix
|
2018-01-30 20:11:37 +01:00
|
|
|
|
2018-05-13 01:46:17 +02:00
|
|
|
if self.config.get("get_raw_subtitles"):
|
2016-04-27 13:12:30 +02:00
|
|
|
subdata = self.raw(subdata)
|
|
|
|
self.save_file(subdata, self.subtype)
|
2018-01-30 20:11:37 +01:00
|
|
|
|
2016-04-27 13:12:30 +02:00
|
|
|
self.save_file(data, "srt")
|
2018-01-30 20:11:37 +01:00
|
|
|
|
2016-04-27 13:12:30 +02:00
|
|
|
def save_file(self, data, subtype):
|
2018-01-13 20:27:40 +01:00
|
|
|
if platform.system() == "Windows":
|
2018-05-13 01:46:17 +02:00
|
|
|
file_d = output(self.output, self.config, subtype, mode="wt", encoding="utf-8")
|
2015-10-25 17:19:16 +01:00
|
|
|
else:
|
2018-05-13 01:46:17 +02:00
|
|
|
file_d = output(self.output, self.config, subtype, mode="wt")
|
2014-12-30 21:20:03 +01:00
|
|
|
if hasattr(file_d, "read") is False:
|
|
|
|
return
|
|
|
|
file_d.write(data)
|
|
|
|
file_d.close()
|
2018-01-30 20:11:37 +01:00
|
|
|
|
|
|
|
def raw(self, subdata):
|
2018-01-13 20:27:40 +01:00
|
|
|
return subdata.text
|
2018-01-30 20:11:37 +01:00
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
def tt(self, subdata):
|
2014-04-21 19:52:09 +02:00
|
|
|
i = 1
|
|
|
|
data = ""
|
2018-01-13 20:27:40 +01:00
|
|
|
subs = subdata.text
|
2016-01-27 19:49:38 +01:00
|
|
|
|
2015-10-10 16:31:42 +02:00
|
|
|
subdata = re.sub(' xmlns="[^"]+"', '', subs, count=1)
|
|
|
|
tree = ET.XML(subdata)
|
|
|
|
xml = tree.find("body").find("div")
|
|
|
|
plist = list(xml.findall("p"))
|
2014-07-09 18:39:18 +02:00
|
|
|
for node in plist:
|
2014-04-21 19:52:09 +02:00
|
|
|
tag = norm(node.tag)
|
2014-07-09 18:39:18 +02:00
|
|
|
if tag == "p" or tag == "span":
|
2014-04-21 19:52:09 +02:00
|
|
|
begin = node.attrib["begin"]
|
|
|
|
if not ("dur" in node.attrib):
|
|
|
|
duration = node.attrib["duration"]
|
|
|
|
else:
|
|
|
|
duration = node.attrib["dur"]
|
|
|
|
if not ("end" in node.attrib):
|
|
|
|
begin2 = begin.split(":")
|
|
|
|
duration2 = duration.split(":")
|
2016-02-08 21:28:39 +01:00
|
|
|
try:
|
|
|
|
sec = float(begin2[2]) + float(duration2[2])
|
|
|
|
except ValueError:
|
|
|
|
sec = 0.000
|
2016-10-24 21:24:36 +02:00
|
|
|
end = "%02d:%02d:%06.3f" % (int(begin2[0]), int(begin2[1]), sec)
|
2014-04-21 19:52:09 +02:00
|
|
|
else:
|
|
|
|
end = node.attrib["end"]
|
2014-12-26 02:04:29 +01:00
|
|
|
data += '%s\n%s --> %s\n' % (i, begin.replace(".", ","), end.replace(".", ","))
|
2014-07-09 18:39:18 +02:00
|
|
|
data = tt_text(node, data)
|
|
|
|
data += "\n"
|
2014-04-21 19:52:09 +02:00
|
|
|
i += 1
|
2018-01-13 20:27:40 +01:00
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
return data
|
2014-04-21 19:52:09 +02:00
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
def json(self, subdata):
|
2015-09-20 15:15:50 +02:00
|
|
|
data = json.loads(subdata.text)
|
2014-04-21 19:52:09 +02:00
|
|
|
number = 1
|
|
|
|
subs = ""
|
|
|
|
for i in data:
|
|
|
|
subs += "%s\n%s --> %s\n" % (number, timestr(int(i["startMillis"])), timestr(int(i["endMillis"])))
|
2018-01-13 20:27:40 +01:00
|
|
|
subs += "%s\n\n" % i["text"]
|
2014-04-21 19:52:09 +02:00
|
|
|
number += 1
|
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
return subs
|
2014-04-21 19:52:09 +02:00
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
def sami(self, subdata):
|
2016-06-01 22:43:39 +02:00
|
|
|
text = subdata.text
|
|
|
|
text = re.sub(r'&', '&', text)
|
|
|
|
tree = ET.fromstring(text)
|
2018-10-28 14:17:27 +01:00
|
|
|
allsubs = tree.findall(".//Subtitle")
|
2014-04-21 19:52:09 +02:00
|
|
|
subs = ""
|
2018-10-28 14:17:27 +01:00
|
|
|
increase = 0
|
|
|
|
for sub in allsubs:
|
|
|
|
try:
|
|
|
|
number = int(sub.attrib["SpotNumber"])
|
|
|
|
except ValueError:
|
2018-10-28 23:16:00 +01:00
|
|
|
number = int(re.search(r"(\d+)", sub.attrib["SpotNumber"]).group(1))
|
2018-10-28 14:17:27 +01:00
|
|
|
increase += 1
|
|
|
|
n = number + increase
|
|
|
|
|
|
|
|
texts = sub.findall(".//Text")
|
|
|
|
all = ""
|
|
|
|
for text in texts:
|
2018-11-02 00:38:39 +01:00
|
|
|
line = ""
|
|
|
|
for txt in text.itertext():
|
|
|
|
line += "{}".format(txt)
|
|
|
|
all += "{}\n".format(decode_html_entities(line.lstrip()))
|
|
|
|
subs += "{}\n{} --> {}\n{}\n".format(n, timecolon(sub.attrib["TimeIn"]), timecolon(sub.attrib["TimeOut"]), all)
|
2016-06-01 22:43:39 +02:00
|
|
|
subs = re.sub('&', r'&', subs)
|
2014-08-31 01:20:36 +02:00
|
|
|
return subs
|
2014-04-21 19:52:09 +02:00
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
def smi(self, subdata):
|
2015-09-20 15:15:50 +02:00
|
|
|
if requests_version < 0x20300:
|
2018-01-13 20:27:40 +01:00
|
|
|
subdata = subdata.content.decode("latin")
|
2015-09-20 15:15:50 +02:00
|
|
|
else:
|
2015-10-19 23:35:57 +02:00
|
|
|
subdata.encoding = "ISO-8859-1"
|
2015-09-20 15:15:50 +02:00
|
|
|
subdata = subdata.text
|
2015-04-28 23:00:24 +02:00
|
|
|
ssubdata = StringIO(subdata)
|
|
|
|
timea = 0
|
2014-04-21 19:52:09 +02:00
|
|
|
number = 1
|
2015-04-28 23:00:24 +02:00
|
|
|
data = None
|
2014-04-21 19:52:09 +02:00
|
|
|
subs = ""
|
2017-03-21 09:49:07 +01:00
|
|
|
TAG_RE = re.compile(r'<(?!\/?i).*?>')
|
2014-11-23 13:02:14 +01:00
|
|
|
bad_char = re.compile(r'\x96')
|
2015-04-28 23:00:24 +02:00
|
|
|
for i in ssubdata.readlines():
|
2015-05-01 22:34:02 +02:00
|
|
|
i = i.rstrip()
|
2015-04-28 23:16:44 +02:00
|
|
|
sync = re.search(r"<SYNC Start=(\d+)>", i)
|
2015-04-28 23:00:24 +02:00
|
|
|
if sync:
|
|
|
|
if int(sync.group(1)) != int(timea):
|
2015-05-01 22:34:02 +02:00
|
|
|
if data and data != " ":
|
2015-04-28 23:00:24 +02:00
|
|
|
subs += "%s\n%s --> %s\n" % (number, timestr(timea), timestr(sync.group(1)))
|
2017-03-22 23:22:57 +01:00
|
|
|
text = "%s\n" % TAG_RE.sub('', data.replace("<br>", "\n"))
|
|
|
|
text = decode_html_entities(text)
|
2018-01-30 20:11:37 +01:00
|
|
|
if text[len(text) - 2] != "\n":
|
2015-05-01 22:34:02 +02:00
|
|
|
text += "\n"
|
2015-04-28 23:00:24 +02:00
|
|
|
subs += text
|
|
|
|
number += 1
|
|
|
|
timea = sync.group(1)
|
|
|
|
text = re.search("<P Class=SVCC>(.*)", i)
|
|
|
|
if text:
|
|
|
|
data = text.group(1)
|
2014-11-23 13:02:14 +01:00
|
|
|
recomp = re.compile(r'\r')
|
2017-03-22 23:22:57 +01:00
|
|
|
text = bad_char.sub('-', recomp.sub('', subs))
|
2014-08-31 01:20:36 +02:00
|
|
|
return text
|
2014-04-21 19:52:09 +02:00
|
|
|
|
2014-08-31 01:20:36 +02:00
|
|
|
def wrst(self, subdata):
|
2015-09-20 15:15:50 +02:00
|
|
|
ssubdata = StringIO(subdata.text)
|
2014-04-21 19:52:09 +02:00
|
|
|
srt = ""
|
2014-06-07 18:50:51 +02:00
|
|
|
subtract = False
|
2015-03-01 21:44:55 +01:00
|
|
|
number_b = 1
|
|
|
|
number = 0
|
|
|
|
block = 0
|
|
|
|
subnr = False
|
2016-12-05 20:45:14 +01:00
|
|
|
if self.bom:
|
|
|
|
ssubdata.read(1)
|
2015-03-01 21:44:55 +01:00
|
|
|
for i in ssubdata.readlines():
|
2015-04-28 23:16:44 +02:00
|
|
|
match = re.search(r"^[\r\n]+", i)
|
|
|
|
match2 = re.search(r"([\d:\.]+ --> [\d:\.]+)", i)
|
|
|
|
match3 = re.search(r"^(\d+)\s", i)
|
2015-03-01 21:44:55 +01:00
|
|
|
if i[:6] == "WEBVTT":
|
2016-12-05 20:45:14 +01:00
|
|
|
continue
|
|
|
|
elif "X-TIMESTAMP" in i:
|
|
|
|
continue
|
|
|
|
elif match and number_b == 1 and self.bom:
|
|
|
|
continue
|
2015-03-01 21:44:55 +01:00
|
|
|
elif match and number_b > 1:
|
|
|
|
block = 0
|
|
|
|
srt += "\n"
|
|
|
|
elif match2:
|
|
|
|
if not subnr:
|
|
|
|
srt += "%s\n" % number_b
|
2017-06-05 17:31:47 +02:00
|
|
|
matchx = re.search(r'(?P<h1>\d+):(?P<m1>\d+):(?P<s1>[\d\.]+) --> (?P<h2>\d+):(?P<m2>\d+):(?P<s2>[\d\.]+)', i)
|
|
|
|
if matchx:
|
|
|
|
hour1 = int(matchx.group("h1"))
|
|
|
|
hour2 = int(matchx.group("h2"))
|
|
|
|
if int(number) == 1:
|
|
|
|
if hour1 > 9:
|
|
|
|
subtract = True
|
|
|
|
if subtract:
|
|
|
|
hour1 -= 10
|
|
|
|
hour2 -= 10
|
|
|
|
else:
|
|
|
|
matchx = re.search(r'(?P<m1>\d+):(?P<s1>[\d\.]+) --> (?P<m2>\d+):(?P<s2>[\d\.]+)', i)
|
|
|
|
hour1 = 0
|
|
|
|
hour2 = 0
|
2018-01-30 20:11:37 +01:00
|
|
|
time = "{0:02d}:{1}:{2} --> {3:02d}:{4}:{5}\n".format(hour1, matchx.group("m1"), matchx.group("s1").replace(".", ","),
|
|
|
|
hour2, matchx.group("m2"), matchx.group("s2").replace(".", ","))
|
2015-03-01 21:44:55 +01:00
|
|
|
srt += time
|
|
|
|
block = 1
|
|
|
|
subnr = False
|
|
|
|
number_b += 1
|
|
|
|
|
|
|
|
elif match3 and block == 0:
|
|
|
|
number = match3.group(1)
|
|
|
|
srt += "%s\n" % number
|
|
|
|
subnr = True
|
|
|
|
else:
|
2018-05-13 01:46:17 +02:00
|
|
|
if self.config.get("convert_subtitle_colors"):
|
2018-10-28 20:47:41 +01:00
|
|
|
colors = {
|
|
|
|
'30': '#000000', '31': '#ff0000', '32': '#00ff00', '33': '#ffff00', '34': '#0000ff',
|
|
|
|
'35': '#ff00ff', '36': '#00ffff', '37': '#ffffff', 'c.black': '#000000', 'c.red': '#ff0000',
|
|
|
|
'c.green': '#00ff00', 'c.yellow': '#ffff00', 'c.blue': '#0000ff', 'c.magneta': '#ff00ff',
|
|
|
|
'c.cyan': '#00ffff', 'c.gray': '#ffffff',
|
|
|
|
}
|
2016-05-09 15:10:58 +02:00
|
|
|
sub = i
|
|
|
|
for tag, color in colors.items():
|
|
|
|
regex1 = '<' + tag + '>'
|
|
|
|
replace = '<font color="' + color + '">'
|
|
|
|
sub = re.sub(regex1, replace, sub)
|
2018-01-30 20:11:37 +01:00
|
|
|
|
|
|
|
sub = re.sub('</.+>', '</font>', sub)
|
2016-05-09 15:10:58 +02:00
|
|
|
else:
|
|
|
|
sub = re.sub('<[^>]*>', '', i)
|
2016-04-20 18:42:46 +02:00
|
|
|
srt += sub.strip()
|
2018-01-30 20:11:37 +01:00
|
|
|
srt += "\n"
|
2015-09-01 22:54:16 +02:00
|
|
|
srt = decode_html_entities(srt)
|
2015-04-28 22:59:07 +02:00
|
|
|
return srt
|
2014-04-27 15:33:05 +02:00
|
|
|
|
2018-07-05 01:26:33 +02:00
|
|
|
def wrstsegment(self, subdata):
|
|
|
|
time = 0
|
|
|
|
subs = []
|
|
|
|
for i in self.kwargs["m3u8"].media_segment:
|
|
|
|
itemurl = get_full_url(i["URI"], self.url)
|
|
|
|
cont = self.http.get(itemurl)
|
|
|
|
if "cmore" in self.url:
|
|
|
|
cont.encoding = "utf-8"
|
|
|
|
text = cont.text.split("\n")
|
2018-07-10 22:45:55 +02:00
|
|
|
for t in text: # is in text[1] for tv4play, but this should be more future proof
|
|
|
|
if 'X-TIMESTAMP-MAP=MPEGTS' in t:
|
2018-07-12 00:52:19 +02:00
|
|
|
time = float(re.search(r"X-TIMESTAMP-MAP=MPEGTS:(\d+)", t).group(1)) / 90000 - 10
|
2018-07-05 01:26:33 +02:00
|
|
|
text = text[3:len(text) - 2]
|
2019-04-08 23:21:59 +02:00
|
|
|
itmes = []
|
2018-07-05 01:26:33 +02:00
|
|
|
if len(text) > 1:
|
|
|
|
for n in text:
|
2019-04-08 23:21:59 +02:00
|
|
|
if n: # don't get the empty lines.
|
2018-07-05 01:26:33 +02:00
|
|
|
itmes.append(n)
|
|
|
|
|
2019-04-08 23:21:59 +02:00
|
|
|
itemsn = 0
|
|
|
|
several_items = False
|
|
|
|
sub = []
|
|
|
|
|
|
|
|
for x in range(len(itmes)):
|
|
|
|
item = itmes[itemsn]
|
|
|
|
if strdate(item) and len(subs) > 0 and itmes[itemsn + 1] == subs[-1][1]:
|
|
|
|
ha = strdate(subs[-1][0])
|
|
|
|
ha3 = strdate(item)
|
|
|
|
second = str2sec(ha3.group(2)) + time
|
|
|
|
subs[-1][0] = "{} --> {}".format(ha.group(1), sec2str(second))
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
has_date = strdate(item)
|
|
|
|
if has_date:
|
|
|
|
if several_items:
|
|
|
|
subs.append(sub)
|
|
|
|
sub = []
|
|
|
|
first = str2sec(has_date.group(1)) + time
|
|
|
|
second = str2sec(has_date.group(2)) + time
|
|
|
|
sub.append("{} --> {}".format(sec2str(first), sec2str(second)))
|
|
|
|
several_items = True
|
|
|
|
elif has_date is None:
|
|
|
|
sub.append(item)
|
|
|
|
itemsn += 1
|
|
|
|
if sub:
|
|
|
|
subs.append(sub)
|
2018-07-05 01:26:33 +02:00
|
|
|
string = ""
|
|
|
|
nr = 1
|
|
|
|
for sub in subs:
|
|
|
|
string += "{}\n{}\n\n".format(nr, '\n'.join(sub))
|
|
|
|
nr += 1
|
|
|
|
|
|
|
|
return string
|
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-04-21 19:52:09 +02:00
|
|
|
def timestr(msec):
|
|
|
|
"""
|
|
|
|
Convert a millisecond value to a string of the following
|
|
|
|
format:
|
|
|
|
|
|
|
|
HH:MM:SS,SS
|
|
|
|
|
|
|
|
with 10 millisecond precision. Note the , seperator in
|
|
|
|
the seconds.
|
|
|
|
"""
|
|
|
|
sec = float(msec) / 1000
|
|
|
|
|
|
|
|
hours = int(sec / 3600)
|
|
|
|
sec -= hours * 3600
|
|
|
|
|
|
|
|
minutes = int(sec / 60)
|
|
|
|
sec -= minutes * 60
|
|
|
|
|
2016-06-03 00:09:11 +02:00
|
|
|
output = "%02d:%02d:%06.3f" % (hours, minutes, sec)
|
2014-04-21 19:52:09 +02:00
|
|
|
return output.replace(".", ",")
|
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-12-15 22:19:58 +01:00
|
|
|
def timecolon(data):
|
2014-12-22 10:20:37 +01:00
|
|
|
match = re.search(r"(\d+:\d+:\d+):(\d+)", data)
|
2014-12-15 22:19:58 +01:00
|
|
|
return "%s,%s" % (match.group(1), match.group(2))
|
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-04-21 19:52:09 +02:00
|
|
|
def norm(name):
|
|
|
|
if name[0] == "{":
|
|
|
|
_, tag = name[1:].split("}")
|
|
|
|
return tag
|
|
|
|
else:
|
|
|
|
return name
|
2014-07-09 18:39:18 +02:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-07-09 18:39:18 +02:00
|
|
|
def tt_text(node, data):
|
|
|
|
if node.text:
|
|
|
|
data += "%s\n" % node.text.strip(' \t\n\r')
|
|
|
|
for i in node:
|
|
|
|
if i.text:
|
|
|
|
data += "%s\n" % i.text.strip(' \t\n\r')
|
|
|
|
if i.tail:
|
|
|
|
text = i.tail.strip(' \t\n\r')
|
|
|
|
if text:
|
|
|
|
data += "%s\n" % text
|
2015-01-05 21:52:34 +01:00
|
|
|
return data
|
2018-07-05 01:26:33 +02:00
|
|
|
|
|
|
|
|
|
|
|
def strdate(datestring):
|
2018-10-28 23:16:00 +01:00
|
|
|
match = re.search(r"^(\d+:\d+:[\.0-9]+) --> (\d+:\d+:[\.0-9]+)", datestring)
|
2018-07-05 01:26:33 +02:00
|
|
|
return match
|
|
|
|
|
|
|
|
|
|
|
|
def sec2str(seconds):
|
|
|
|
m, s = divmod(seconds, 60)
|
|
|
|
h, m = divmod(m, 60)
|
|
|
|
return "{:02d}:{:02d}:{:06.3f}".format(int(h), int(m), s)
|
|
|
|
|
|
|
|
|
|
|
|
def str2sec(string):
|
|
|
|
return sum(x * float(t) for x, t in zip([3600, 60, 1], string.split(":")))
|