2019-08-25 00:40:39 +02:00
|
|
|
import copy
|
|
|
|
import json
|
2020-07-30 22:39:27 +02:00
|
|
|
import logging
|
2019-08-25 00:40:39 +02:00
|
|
|
import re
|
2020-07-30 22:39:27 +02:00
|
|
|
import uuid
|
2022-03-19 19:24:32 +01:00
|
|
|
from urllib.parse import urlparse
|
2013-02-12 19:43:37 +01:00
|
|
|
|
2019-08-25 00:40:39 +02:00
|
|
|
from svtplay_dl.error import ServiceError
|
|
|
|
from svtplay_dl.fetcher.hls import hlsparse
|
|
|
|
from svtplay_dl.service import OpenGraphThumbMixin
|
|
|
|
from svtplay_dl.service import Service
|
2015-03-01 21:46:22 +01:00
|
|
|
from svtplay_dl.subtitle import subtitle
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2018-01-30 20:11:37 +01:00
|
|
|
|
2014-01-26 01:51:53 +01:00
|
|
|
class Dr(Service, OpenGraphThumbMixin):
|
2019-08-25 00:27:31 +02:00
|
|
|
supported_domains = ["dr.dk"]
|
2013-01-17 00:21:47 +01:00
|
|
|
|
2015-12-26 11:46:14 +01:00
|
|
|
def get(self):
|
2015-08-30 00:06:20 +02:00
|
|
|
data = self.get_urldata()
|
2014-12-22 17:41:40 +01:00
|
|
|
|
2020-07-30 22:39:27 +02:00
|
|
|
match = re.search("__data = ([^<]+)</script>", data)
|
|
|
|
if not match:
|
2022-03-19 19:24:32 +01:00
|
|
|
match = re.search('source src="([^"]+)"', data)
|
|
|
|
if not match:
|
|
|
|
yield ServiceError("Cant find info for this video")
|
|
|
|
return
|
|
|
|
|
|
|
|
res = self.http.request("get", match.group(1))
|
|
|
|
if res.status_code > 400:
|
|
|
|
yield ServiceError("Can't play this because the video is geoblocked or not available.")
|
|
|
|
else:
|
|
|
|
yield from hlsparse(self.config, res, match.group(1), output=self.output)
|
2020-07-30 22:39:27 +02:00
|
|
|
return
|
|
|
|
janson = json.loads(match.group(1))
|
|
|
|
page = janson["cache"]["page"][list(janson["cache"]["page"].keys())[0]]
|
|
|
|
resolution = None
|
|
|
|
vid = None
|
2021-06-06 23:13:28 +02:00
|
|
|
if "item" in page["entries"][0]:
|
|
|
|
offers = page["entries"][0]["item"]["offers"]
|
|
|
|
elif "item" in page:
|
|
|
|
offers = page["item"]["offers"]
|
|
|
|
|
|
|
|
offerlist = []
|
2020-07-30 22:39:27 +02:00
|
|
|
for i in offers:
|
|
|
|
if i["deliveryType"] == "Stream":
|
2021-06-06 23:13:28 +02:00
|
|
|
offerlist.append([i["scopes"][0], i["resolution"]])
|
2020-07-30 22:39:27 +02:00
|
|
|
|
|
|
|
deviceid = uuid.uuid1()
|
|
|
|
res = self.http.request(
|
|
|
|
"post",
|
|
|
|
"https://isl.dr-massive.com/api/authorization/anonymous-sso?device=web_browser&ff=idp%2Cldp&lang=da",
|
|
|
|
json={"deviceId": str(deviceid), "scopes": ["Catalog"], "optout": True},
|
|
|
|
)
|
|
|
|
token = res.json()[0]["value"]
|
|
|
|
|
2021-06-06 23:13:28 +02:00
|
|
|
if len(offerlist) == 0:
|
|
|
|
yield ServiceError("Can't find any videos")
|
|
|
|
return
|
|
|
|
|
|
|
|
for i in offerlist:
|
|
|
|
vid, resolution = i
|
2021-12-18 21:37:09 +01:00
|
|
|
url = (
|
|
|
|
f"https://isl.dr-massive.com/api/account/items/{vid}/videos?delivery=stream&device=web_browser&"
|
|
|
|
f"ff=idp%2Cldp&lang=da&resolution={resolution}&sub=Anonymous"
|
|
|
|
)
|
2021-06-06 23:13:28 +02:00
|
|
|
res = self.http.request("get", url, headers={"authorization": f"Bearer {token}"})
|
|
|
|
for video in res.json():
|
2021-12-18 21:37:09 +01:00
|
|
|
if video["accessService"] == "StandardVideo" and video["format"] == "video/hls":
|
|
|
|
res = self.http.request("get", video["url"])
|
|
|
|
if res.status_code > 400:
|
|
|
|
yield ServiceError("Can't play this because the video is geoblocked or not available.")
|
|
|
|
else:
|
|
|
|
yield from hlsparse(self.config, res, video["url"], output=self.output)
|
|
|
|
if len(video["subtitles"]) > 0:
|
|
|
|
yield subtitle(copy.copy(self.config), "wrst", video["subtitles"][0]["link"], output=self.output)
|
2014-08-11 19:46:56 +02:00
|
|
|
|
2018-05-22 00:02:20 +02:00
|
|
|
def find_all_episodes(self, config):
|
2016-06-29 23:58:37 +02:00
|
|
|
episodes = []
|
2021-06-06 23:13:28 +02:00
|
|
|
seasons = []
|
2020-07-30 22:39:27 +02:00
|
|
|
data = self.get_urldata()
|
|
|
|
match = re.search("__data = ([^<]+)</script>", data)
|
|
|
|
if not match:
|
2022-03-19 19:24:32 +01:00
|
|
|
if "bonanza" in self.url:
|
|
|
|
parse = urlparse(self.url)
|
|
|
|
match = re.search(r"(\/bonanza\/serie\/[0-9]+\/[\-\w]+)", parse.path)
|
|
|
|
if match:
|
|
|
|
match = re.findall(rf"a href=\"({match.group(1)}\/\d+[^\"]+)\"", data)
|
|
|
|
if not match:
|
|
|
|
logging.error("Can't find video info.")
|
|
|
|
for url in match:
|
|
|
|
episodes.append(f"https://www.dr.dk{url}")
|
|
|
|
else:
|
|
|
|
logging.error("Can't find video info.")
|
|
|
|
return episodes
|
|
|
|
else:
|
|
|
|
logging.error("Can't find video info.")
|
|
|
|
return episodes
|
2020-07-30 22:39:27 +02:00
|
|
|
janson = json.loads(match.group(1))
|
|
|
|
page = janson["cache"]["page"][list(janson["cache"]["page"].keys())[0]]
|
2021-06-06 23:13:28 +02:00
|
|
|
|
|
|
|
if "show" in page["item"] and "seasons" in page["item"]["show"]:
|
|
|
|
for i in page["item"]["show"]["seasons"]["items"]:
|
|
|
|
seasons.append(f'https://www.dr.dk/drtv{i["path"]}')
|
|
|
|
|
|
|
|
if seasons:
|
|
|
|
for season in seasons:
|
|
|
|
data = self.http.get(season).text
|
|
|
|
match = re.search("__data = ([^<]+)</script>", data)
|
|
|
|
janson = json.loads(match.group(1))
|
|
|
|
page = janson["cache"]["page"][list(janson["cache"]["page"].keys())[0]]
|
|
|
|
episodes.extend(self._get_episodes(page))
|
|
|
|
else:
|
|
|
|
episodes.extend(self._get_episodes(page))
|
|
|
|
|
|
|
|
if config.get("all_last") != -1:
|
|
|
|
episodes = episodes[: config.get("all_last")]
|
|
|
|
else:
|
|
|
|
episodes.reverse()
|
|
|
|
|
|
|
|
return episodes
|
|
|
|
|
|
|
|
def _get_episodes(self, page):
|
|
|
|
episodes = []
|
|
|
|
if "episodes" in page["item"]:
|
|
|
|
entries = page["item"]["episodes"]["items"]
|
2020-07-30 22:39:27 +02:00
|
|
|
for i in entries:
|
2021-06-06 23:13:28 +02:00
|
|
|
episodes.append(f'https://www.dr.dk/drtv{i["watchPath"]}')
|
2020-07-30 22:39:27 +02:00
|
|
|
|
2016-06-29 23:58:37 +02:00
|
|
|
return episodes
|