2013-03-02 21:26:28 +01:00
|
|
|
# ex:ts=4:sw=4:sts=4:et
|
|
|
|
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
2013-03-01 23:39:42 +01:00
|
|
|
from __future__ import absolute_import
|
2013-02-12 19:43:37 +01:00
|
|
|
import re
|
2013-03-03 10:58:37 +01:00
|
|
|
import json
|
2014-06-07 20:43:40 +02:00
|
|
|
import copy
|
2014-04-03 21:09:42 +02:00
|
|
|
import xml.etree.ElementTree as ET
|
2013-02-12 19:43:37 +01:00
|
|
|
|
2014-01-19 14:26:48 +01:00
|
|
|
from svtplay_dl.service import Service, OpenGraphThumbMixin
|
2015-03-07 10:43:21 +01:00
|
|
|
from svtplay_dl.utils.urllib import urljoin
|
2015-10-04 14:37:16 +02:00
|
|
|
from svtplay_dl.fetcher.hls import hlsparse
|
2015-09-06 22:41:29 +02:00
|
|
|
from svtplay_dl.log import log
|
2015-09-06 14:19:10 +02:00
|
|
|
from svtplay_dl.error import ServiceError
|
2014-08-31 01:20:36 +02:00
|
|
|
from svtplay_dl.subtitle import subtitle
|
2016-05-03 20:35:16 +02:00
|
|
|
from svtplay_dl.utils import filenamify
|
2013-02-12 19:43:37 +01:00
|
|
|
|
2015-09-15 20:10:32 +02:00
|
|
|
|
2014-01-19 14:26:48 +01:00
|
|
|
class Urplay(Service, OpenGraphThumbMixin):
|
2016-03-20 18:21:07 +01:00
|
|
|
supported_domains = ['urplay.se', 'ur.se', 'betaplay.ur.se', 'urskola.se']
|
2013-01-17 00:21:47 +01:00
|
|
|
|
2015-12-26 11:46:14 +01:00
|
|
|
def get(self):
|
2015-08-30 00:06:20 +02:00
|
|
|
data = self.get_urldata()
|
2014-12-08 23:07:02 +01:00
|
|
|
match = re.search(r"urPlayer.init\((.*)\);", data)
|
2014-01-03 12:15:21 +01:00
|
|
|
if not match:
|
2015-09-06 14:19:10 +02:00
|
|
|
yield ServiceError("Can't find json info")
|
2014-10-06 23:21:43 +02:00
|
|
|
return
|
2014-12-22 17:41:40 +01:00
|
|
|
|
2016-05-14 22:54:30 +02:00
|
|
|
if self.exclude():
|
2015-09-06 23:04:48 +02:00
|
|
|
yield ServiceError("Excluding video")
|
2014-12-22 17:41:40 +01:00
|
|
|
return
|
|
|
|
|
2014-01-03 12:15:21 +01:00
|
|
|
data = match.group(1)
|
2013-03-03 10:58:37 +01:00
|
|
|
jsondata = json.loads(data)
|
2015-05-06 10:33:24 +02:00
|
|
|
if len(jsondata["subtitles"]) > 0:
|
2016-05-03 20:35:16 +02:00
|
|
|
for sub in jsondata["subtitles"]:
|
|
|
|
if "label" in sub:
|
2016-11-30 23:21:42 +01:00
|
|
|
absurl = urljoin(self.url, sub["file"].split(",")[0])
|
|
|
|
if absurl.endswith("vtt"):
|
|
|
|
subtype = "wrst"
|
|
|
|
else:
|
|
|
|
subtype = "tt"
|
2016-05-03 20:35:16 +02:00
|
|
|
if self.options.get_all_subtitles:
|
2016-11-30 23:21:42 +01:00
|
|
|
yield subtitle(copy.copy(self.options), subtype, absurl, "-" + filenamify(sub["label"]))
|
2016-05-03 20:35:16 +02:00
|
|
|
else:
|
2016-11-30 23:21:42 +01:00
|
|
|
yield subtitle(copy.copy(self.options), subtype, absurl)
|
|
|
|
|
2015-10-25 02:07:25 +02:00
|
|
|
if "streamer" in jsondata["streaming_config"]:
|
|
|
|
basedomain = jsondata["streaming_config"]["streamer"]["redirect"]
|
|
|
|
else:
|
|
|
|
lbjson = self.http.request("get", jsondata["streaming_config"]["loadbalancer"]).text
|
|
|
|
lbjson = json.loads(lbjson)
|
|
|
|
basedomain = lbjson["redirect"]
|
2015-03-21 17:34:22 +01:00
|
|
|
http = "http://%s/%s" % (basedomain, jsondata["file_http"])
|
2014-01-09 00:32:14 +01:00
|
|
|
hd = None
|
2015-03-21 17:34:22 +01:00
|
|
|
if len(jsondata["file_http_hd"]) > 0:
|
|
|
|
http_hd = "http://%s/%s" % (basedomain, jsondata["file_http_hd"])
|
2014-01-09 00:32:14 +01:00
|
|
|
hls_hd = "%s%s" % (http_hd, jsondata["streaming_config"]["http_streaming"]["hls_file"])
|
|
|
|
hd = True
|
2013-03-03 10:58:37 +01:00
|
|
|
hls = "%s%s" % (http, jsondata["streaming_config"]["http_streaming"]["hls_file"])
|
2015-12-26 11:46:14 +01:00
|
|
|
streams = hlsparse(self.options, self.http.request("get", hls), hls)
|
2014-04-21 21:55:39 +02:00
|
|
|
for n in list(streams.keys()):
|
2015-10-04 14:37:16 +02:00
|
|
|
yield streams[n]
|
2014-01-09 00:32:14 +01:00
|
|
|
if hd:
|
2015-12-26 11:46:14 +01:00
|
|
|
streams = hlsparse(self.options, self.http.request("get", hls_hd), hls_hd)
|
2014-04-21 21:55:39 +02:00
|
|
|
for n in list(streams.keys()):
|
2015-10-04 14:37:16 +02:00
|
|
|
yield streams[n]
|
2014-01-11 23:02:47 +01:00
|
|
|
|
2015-03-07 10:43:21 +01:00
|
|
|
def scrape_episodes(self, options):
|
|
|
|
res = []
|
|
|
|
for relurl in re.findall(r'<a class="puff tv video"\s+title="[^"]*"\s+href="([^"]*)"',
|
2015-08-30 00:06:20 +02:00
|
|
|
self.get_urldata()):
|
2015-03-07 10:43:21 +01:00
|
|
|
res.append(urljoin(self.url, relurl.replace("&", "&")))
|
|
|
|
|
2016-04-26 23:22:59 +02:00
|
|
|
for relurl in re.findall(r'<a class="card program"\s+href="([^"]*)"',
|
|
|
|
self.get_urldata()):
|
|
|
|
res.append(urljoin(self.url, relurl.replace("&", "&")))
|
|
|
|
|
2015-03-07 10:43:21 +01:00
|
|
|
if options.all_last != -1:
|
|
|
|
res = res[-options.all_last:]
|
|
|
|
|
|
|
|
return res
|
|
|
|
|
2014-04-03 21:09:42 +02:00
|
|
|
def find_all_episodes(self, options):
|
|
|
|
match = re.search(r'<link rel="alternate" type="application/rss\+xml" [^>]*href="([^"]+)"',
|
2015-08-30 00:06:20 +02:00
|
|
|
self.get_urldata())
|
2014-04-03 21:09:42 +02:00
|
|
|
if match is None:
|
2015-03-07 10:43:21 +01:00
|
|
|
log.info("Couldn't retrieve episode list as rss, trying to scrape")
|
|
|
|
return self.scrape_episodes(options)
|
|
|
|
|
2014-04-03 21:09:42 +02:00
|
|
|
url = "http://urplay.se%s" % match.group(1).replace("&", "&")
|
2015-08-31 19:45:15 +02:00
|
|
|
xml = ET.XML(self.http.request("get", url).content)
|
2014-04-03 21:09:42 +02:00
|
|
|
|
2014-12-21 13:45:44 +01:00
|
|
|
episodes = [x.text for x in xml.findall(".//item/link")]
|
|
|
|
episodes_new = []
|
|
|
|
n = 0
|
|
|
|
for i in episodes:
|
|
|
|
if n == options.all_last:
|
|
|
|
break
|
2015-09-06 22:41:49 +02:00
|
|
|
if i not in episodes_new:
|
|
|
|
episodes_new.append(i)
|
2014-12-21 13:45:44 +01:00
|
|
|
n += 1
|
2015-03-07 10:43:21 +01:00
|
|
|
return episodes_new
|