2014-03-25 15:37:41 +01:00
|
|
|
# ex:ts=4:sw=4:sts=4:et
|
|
|
|
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
|
|
|
from __future__ import absolute_import
|
|
|
|
import re
|
|
|
|
import json
|
2014-06-07 20:43:40 +02:00
|
|
|
import copy
|
2018-01-30 22:07:21 +01:00
|
|
|
from urllib.parse import urlparse
|
2014-03-25 15:37:41 +01:00
|
|
|
|
|
|
|
from svtplay_dl.service import Service, OpenGraphThumbMixin
|
2016-04-19 21:08:17 +02:00
|
|
|
from svtplay_dl.fetcher.hls import hlsparse
|
2017-01-27 02:02:57 +01:00
|
|
|
from svtplay_dl.fetcher.http import HTTP
|
2015-09-06 14:19:10 +02:00
|
|
|
from svtplay_dl.error import ServiceError
|
2014-03-25 15:37:41 +01:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-03-25 15:37:41 +01:00
|
|
|
class Picsearch(Service, OpenGraphThumbMixin):
|
2016-03-20 18:20:19 +01:00
|
|
|
supported_domains = ['dn.se', 'mobil.dn.se', 'di.se', 'csp.picsearch.com', 'csp.screen9.com']
|
2014-03-25 15:37:41 +01:00
|
|
|
|
2015-12-26 11:46:14 +01:00
|
|
|
def get(self):
|
2017-01-27 02:02:57 +01:00
|
|
|
self.backupapi = None
|
|
|
|
|
2016-03-16 22:50:43 +01:00
|
|
|
ajax_auth = self.get_auth()
|
2014-03-25 15:37:41 +01:00
|
|
|
if not ajax_auth:
|
2016-03-16 22:50:43 +01:00
|
|
|
yield ServiceError("Cant find token for video")
|
|
|
|
return
|
|
|
|
|
|
|
|
mediaid = self.get_mediaid()
|
2014-03-25 15:37:41 +01:00
|
|
|
if not mediaid:
|
2016-03-16 22:50:43 +01:00
|
|
|
yield ServiceError("Cant find media id")
|
|
|
|
return
|
|
|
|
if not isinstance(mediaid, str):
|
|
|
|
mediaid = mediaid.group(1)
|
|
|
|
|
2018-01-30 23:10:22 +01:00
|
|
|
jsondata = self.http.request("get", "http://csp.screen9.com/player?eventParam=1&"
|
|
|
|
"ajaxauth={0}&method=embed&mediaid={1}".format(ajax_auth.group(1), mediaid)).text
|
2014-03-25 15:37:41 +01:00
|
|
|
jsondata = json.loads(jsondata)
|
2017-01-27 02:02:57 +01:00
|
|
|
|
|
|
|
if "data" in jsondata:
|
|
|
|
if "live" in jsondata["data"]["publishing_status"]:
|
2018-05-13 13:06:45 +02:00
|
|
|
self.config.set("live", jsondata["data"]["publishing_status"]["live"])
|
2017-01-27 02:02:57 +01:00
|
|
|
playlist = jsondata["data"]["streams"]
|
|
|
|
for i in playlist:
|
|
|
|
if "application/x-mpegurl" in i:
|
2018-05-13 13:06:45 +02:00
|
|
|
streams = hlsparse(self.config, self.http.request("get", i["application/x-mpegurl"]), i["application/x-mpegurl"])
|
2017-01-27 02:02:57 +01:00
|
|
|
if streams:
|
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|
|
|
|
if "video/mp4" in i:
|
2018-05-13 13:06:45 +02:00
|
|
|
yield HTTP(copy.copy(self.config), i["video/mp4"], 800)
|
2017-01-27 02:02:57 +01:00
|
|
|
|
|
|
|
if self.backupapi:
|
|
|
|
res = self.http.get(self.backupapi.replace("i=", ""), params={"i": "object"})
|
|
|
|
data = res.text.replace("ps.embedHandler(", "").replace('"");', '')
|
|
|
|
data = data[:data.rfind(",")]
|
|
|
|
jansson = json.loads(data)
|
|
|
|
for i in jansson["media"]["playerconfig"]["playlist"]:
|
|
|
|
if "provider" in i and i["provider"] == "httpstreaming":
|
2018-05-13 13:06:45 +02:00
|
|
|
streams = hlsparse(self.config, self.http.request("get", i["url"]), i["url"])
|
2018-05-08 22:48:55 +02:00
|
|
|
for n in list(streams.keys()):
|
|
|
|
yield streams[n]
|
2016-03-16 22:50:43 +01:00
|
|
|
|
|
|
|
def get_auth(self):
|
|
|
|
match = re.search(r"picsearch_ajax_auth[ ]*=[ ]*['\"]([^'\"]+)['\"]", self.get_urldata())
|
|
|
|
if not match:
|
|
|
|
match = re.search(r'screen9-ajax-auth="([^"]+)"', self.get_urldata())
|
2016-04-19 21:08:17 +02:00
|
|
|
if not match:
|
|
|
|
match = re.search('screen9"[ ]*:[ ]*"([^"]+)"', self.get_urldata())
|
2016-11-22 18:05:08 +01:00
|
|
|
if not match:
|
|
|
|
match = re.search('data-auth="([^"]+)"', self.get_urldata())
|
2016-03-16 22:50:43 +01:00
|
|
|
if not match:
|
|
|
|
match = re.search('s.src="(https://csp-ssl.picsearch.com[^"]+|http://csp.picsearch.com/rest[^"]+)', self.get_urldata())
|
|
|
|
if match:
|
|
|
|
data = self.http.request("get", match.group(1))
|
2017-01-27 02:02:57 +01:00
|
|
|
self.backupapi = match.group(1)
|
2016-03-16 22:50:43 +01:00
|
|
|
match = re.search(r'ajaxAuth": "([^"]+)"', data.text)
|
2016-03-20 18:20:19 +01:00
|
|
|
if not match:
|
|
|
|
match = re.search('iframe src="(//csp.screen9.com[^"]+)"', self.get_urldata())
|
|
|
|
if match:
|
2017-10-09 22:35:13 +02:00
|
|
|
url = "http:{0}".format(match.group(1))
|
2016-03-20 18:20:19 +01:00
|
|
|
data = self.http.request("get", url)
|
2017-01-27 02:02:57 +01:00
|
|
|
self.backupapi = url
|
2016-03-20 18:20:19 +01:00
|
|
|
match = re.search(r"picsearch_ajax_auth = '([^']+)'", data.text)
|
2017-01-27 02:02:57 +01:00
|
|
|
if not match:
|
|
|
|
match = re.search(r"screen9_ajax_auth = '([^']+)'", data.text)
|
2016-03-20 18:20:19 +01:00
|
|
|
|
2016-03-16 22:50:43 +01:00
|
|
|
return match
|
|
|
|
|
|
|
|
def get_mediaid(self):
|
|
|
|
match = re.search(r"mediaId = '([^']+)';", self.get_urldata())
|
|
|
|
if not match:
|
|
|
|
match = re.search(r'media-id="([^"]+)"', self.get_urldata())
|
|
|
|
if not match:
|
|
|
|
match = re.search(r'screen9-mid="([^"]+)"', self.get_urldata())
|
2016-04-19 21:08:17 +02:00
|
|
|
if not match:
|
|
|
|
match = re.search(r'data-id="([^"]+)"', self.get_urldata())
|
2018-01-21 15:03:15 +01:00
|
|
|
if not match:
|
|
|
|
match = re.search(r'data-id=([^ ]+) ', self.get_urldata())
|
2016-11-22 18:05:08 +01:00
|
|
|
if not match:
|
|
|
|
match = re.search(r'data-videoid="([^"]+)"', self.get_urldata())
|
2016-03-16 22:50:43 +01:00
|
|
|
if not match:
|
|
|
|
match = re.search('s.src="(https://csp-ssl.picsearch.com[^"]+|http://csp.picsearch.com/rest[^"]+)', self.get_urldata())
|
|
|
|
if match:
|
|
|
|
data = self.http.request("get", match.group(1))
|
|
|
|
match = re.search(r'mediaid": "([^"]+)"', data.text)
|
2016-03-20 18:20:19 +01:00
|
|
|
if not match:
|
|
|
|
match = re.search('iframe src="(//csp.screen9.com[^"]+)"', self.get_urldata())
|
|
|
|
if match:
|
2017-10-09 22:35:13 +02:00
|
|
|
url = "http:{0}".format(match.group(1))
|
2016-03-20 18:20:19 +01:00
|
|
|
data = self.http.request("get", url)
|
|
|
|
match = re.search(r"mediaid: '([^']+)'", data.text)
|
2016-03-16 22:50:43 +01:00
|
|
|
if not match:
|
|
|
|
urlp = urlparse(self.url)
|
|
|
|
match = urlp.fragment
|
|
|
|
return match
|