1
0
mirror of https://github.com/spaam/svtplay-dl.git synced 2024-11-24 20:25:41 +01:00
svtplay-dl/lib/svtplay_dl/service/kanal5.py

135 lines
5.3 KiB
Python
Raw Normal View History

2013-03-02 21:26:28 +01:00
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
import re
import json
2014-06-07 20:43:40 +02:00
import copy
2014-08-12 19:57:03 +02:00
import os
from svtplay_dl.service import Service
2015-08-30 10:33:38 +02:00
from svtplay_dl.utils import filenamify
from svtplay_dl.log import log
2014-04-21 18:26:03 +02:00
from svtplay_dl.fetcher.rtmp import RTMP
2014-07-10 18:23:30 +02:00
from svtplay_dl.fetcher.hls import HLS, hlsparse
from svtplay_dl.subtitle import subtitle
from svtplay_dl.error import ServiceError
class Kanal5(Service):
2014-08-12 19:57:53 +02:00
supported_domains = ['kanal5play.se', 'kanal9play.se', 'kanal11play.se']
def __init__(self, url):
Service.__init__(self, url)
self.cookies = {}
self.subtitle = None
2014-01-06 23:14:06 +01:00
def get(self, options):
match = re.search(r".*video/([0-9]+)", self.url)
if not match:
yield ServiceError("Can't find video file")
return
2013-03-14 22:20:27 +01:00
video_id = match.group(1)
if options.username and options.password:
2014-07-28 16:01:27 +02:00
# get session cookie
data = self.http.request("get", "http://www.kanal5play.se/", cookies=self.cookies)
2014-08-28 07:56:47 +02:00
authurl = "https://kanal5swe.appspot.com/api/user/login?callback=jQuery171029989&email=%s&password=%s&_=136250" % \
(options.username, options.password)
data = self.http.request("get", authurl, cookies=data.cookies).text
match = re.search(r"({.*})\);", data)
2013-03-14 22:20:27 +01:00
jsondata = json.loads(match.group(1))
2014-08-12 00:08:51 +02:00
if jsondata["success"] is False:
yield ServiceError(jsondata["message"])
return
2013-03-14 22:20:27 +01:00
authToken = jsondata["userData"]["auth"]
self.cookies = {"authToken": authToken}
options.cookies = self.cookies
2013-03-14 22:20:27 +01:00
2014-04-21 18:26:03 +02:00
url = "http://www.kanal5play.se/api/getVideo?format=FLASH&videoId=%s" % video_id
2015-08-31 22:47:11 +02:00
data = self.http.request("get", url, cookies=self.cookies).text
2014-12-08 23:07:02 +01:00
data = json.loads(data)
options.cookies = self.cookies
2014-03-26 22:47:30 +01:00
if not options.live:
options.live = data["isLive"]
2014-08-12 19:57:03 +02:00
if options.output_auto:
directory = os.path.dirname(options.output)
options.service = "kanal5"
title = "%s-s%s-%s-%s-%s" % (data["program"]["name"], data["seasonNumber"], data["episodeText"], data["id"], options.service)
2014-08-12 19:57:03 +02:00
title = filenamify(title)
if len(directory):
options.output = os.path.join(directory, title)
2014-08-12 19:57:03 +02:00
else:
options.output = title
if self.exclude(options):
2015-09-06 23:04:48 +02:00
yield ServiceError("Excluding video")
return
if data["hasSubtitle"]:
yield subtitle(copy.copy(options), "json", "http://www.kanal5play.se/api/subtitles/%s" % video_id)
if options.force_subtitle:
return
2015-08-30 10:33:38 +02:00
show = True
2014-10-06 23:21:43 +02:00
if "streams" in data.keys():
for i in data["streams"]:
if i["drmProtected"]:
yield ServiceError("We cant download drm files for this site.")
return
steambaseurl = data["streamBaseUrl"]
bitrate = i["bitrate"]
if bitrate > 1000:
bitrate = bitrate / 1000
options2 = copy.copy(options)
options2.other = "-W %s -y %s " % ("http://www.kanal5play.se/flash/K5StandardPlayer.swf", i["source"])
options2.live = True
yield RTMP(options2, steambaseurl, bitrate)
url = "http://www.kanal5play.se/api/getVideo?format=IPAD&videoId=%s" % video_id
data = self.http.request("get", url, cookies=self.cookies)
2015-08-31 22:47:11 +02:00
data = json.loads(data.text)
2015-08-30 10:33:38 +02:00
if "reasonsForNoStreams" in data:
show = False
if "streams" in data.keys():
for i in data["streams"]:
streams = hlsparse(i["source"], self.http.request("get", i["source"]).text)
for n in list(streams.keys()):
yield HLS(copy.copy(options), streams[n], n)
2015-08-30 10:33:38 +02:00
if "reasonsForNoStreams" in data and show:
yield ServiceError(data["reasonsForNoStreams"][0])
def find_all_episodes(self, options):
program = re.search(".*/program/(\d+)", self.url)
if not program:
log.error("Can't find program id in url")
return None
baseurl = "http://www.kanal5play.se/content/program/%s" % program.group(1)
data = self.http.request("get", baseurl).text
sasong = re.search("/program/\d+/sasong/(\d+)", data)
if not sasong:
log.error("Can't find seasong id")
return None
seasong = int(sasong.group(1))
episodes = []
n = 0
more = True
while more:
url = "%s/sasong/%s" % (baseurl, seasong)
data = self.http.request("get", url)
if data.status_code == 404:
more = False
else:
regex = re.compile(r'href="(/play/program/\d+/video/\d+)"')
for match in regex.finditer(data.text):
if n == options.all_last:
break
url2 = "http://www.kanal5play.se%s" % match.group(1)
if url2 not in episodes:
episodes.append(url2)
n += 1
seasong -= 1
return episodes