ytdl/youtube_dl/extractor/vimeo.py

487 lines
19 KiB
Python
Raw Normal View History

# encoding: utf-8
2014-01-06 23:38:16 +01:00
from __future__ import unicode_literals
2013-06-23 20:18:21 +02:00
import json
import re
2013-07-29 13:12:09 +02:00
import itertools
2013-06-23 20:18:21 +02:00
from .common import InfoExtractor
from .subtitles import SubtitlesInfoExtractor
2013-06-23 20:18:21 +02:00
from ..utils import (
compat_HTTPError,
2013-06-23 20:18:21 +02:00
compat_urllib_parse,
compat_urllib_request,
clean_html,
get_element_by_attribute,
ExtractorError,
RegexNotFoundError,
2013-06-23 20:18:21 +02:00
std_headers,
unsmuggle_url,
urlencode_postdata,
int_or_none,
2013-06-23 20:18:21 +02:00
)
2013-12-22 03:17:56 +01:00
class VimeoBaseInfoExtractor(InfoExtractor):
_NETRC_MACHINE = 'vimeo'
_LOGIN_REQUIRED = False
def _login(self):
(username, password) = self._get_login_info()
if username is None:
if self._LOGIN_REQUIRED:
raise ExtractorError('No login info available, needed for using %s.' % self.IE_NAME, expected=True)
return
self.report_login()
login_url = 'https://vimeo.com/log_in'
webpage = self._download_webpage(login_url, None, False)
token = self._search_regex(r'xsrft: \'(.*?)\'', webpage, 'login token')
data = urlencode_postdata({
'email': username,
'password': password,
'action': 'login',
'service': 'vimeo',
'token': token,
})
login_request = compat_urllib_request.Request(login_url, data)
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
login_request.add_header('Cookie', 'xsrft=%s' % token)
self._download_webpage(login_request, None, False, 'Wrong login info')
class VimeoIE(VimeoBaseInfoExtractor, SubtitlesInfoExtractor):
2013-06-23 20:18:21 +02:00
"""Information extractor for vimeo.com."""
# _VALID_URL matches Vimeo URLs
2013-12-22 03:17:56 +01:00
_VALID_URL = r'''(?x)
2014-01-08 22:42:52 +01:00
(?P<proto>(?:https?:)?//)?
2013-12-22 03:17:56 +01:00
(?:(?:www|(?P<player>player))\.)?
vimeo(?P<pro>pro)?\.com/
(?:.*?/)?
(?:(?:play_redirect_hls|moogaloop\.swf)\?clip_id=)?
2013-12-22 03:17:56 +01:00
(?:videos?/)?
(?P<id>[0-9]+)
/?(?:[?&].*)?(?:[#].*)?$'''
2014-01-06 23:38:16 +01:00
IE_NAME = 'vimeo'
_TESTS = [
{
2014-01-06 23:38:16 +01:00
'url': 'http://vimeo.com/56015672#at=0',
'md5': '8879b6cc097e987f02484baf890129e5',
'info_dict': {
2014-02-17 11:44:24 +01:00
'id': '56015672',
'ext': 'mp4',
"upload_date": "20121220",
"description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
"uploader_id": "user7108434",
"uploader": "Filippo Valsorda",
2014-01-06 23:38:16 +01:00
"title": "youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550",
"duration": 10,
},
},
{
2014-01-06 23:38:16 +01:00
'url': 'http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876',
'md5': '3b5ca6aa22b60dfeeadf50b72e44ed82',
'note': 'Vimeo Pro video (#1197)',
'info_dict': {
'id': '68093876',
'ext': 'mp4',
2014-01-06 23:38:16 +01:00
'uploader_id': 'openstreetmapus',
'uploader': 'OpenStreetMap US',
'title': 'Andy Allan - Putting the Carto into OpenStreetMap Cartography',
'duration': 1595,
},
},
{
2014-01-06 23:38:16 +01:00
'url': 'http://player.vimeo.com/video/54469442',
'md5': '619b811a4417aa4abe78dc653becf511',
'note': 'Videos that embed the url in the player page',
'info_dict': {
'id': '54469442',
'ext': 'mp4',
2014-01-06 23:38:16 +01:00
'title': 'Kathy Sierra: Building the minimum Badass User, Business of Software',
'uploader': 'The BLN & Business of Software',
'uploader_id': 'theblnbusinessofsoftware',
'duration': 3610,
},
},
{
2014-01-06 23:38:16 +01:00
'url': 'http://vimeo.com/68375962',
'md5': 'aaf896bdb7ddd6476df50007a0ac0ae7',
'note': 'Video protected with password',
'info_dict': {
'id': '68375962',
'ext': 'mp4',
2014-01-06 23:38:16 +01:00
'title': 'youtube-dl password protected test video',
'upload_date': '20130614',
'uploader_id': 'user18948128',
'uploader': 'Jaime Marquínez Ferrándiz',
'duration': 10,
},
2014-01-06 23:38:16 +01:00
'params': {
'videopassword': 'youtube-dl',
},
},
{
'url': 'http://vimeo.com/76979871',
'md5': '3363dd6ffebe3784d56f4132317fd446',
'note': 'Video with subtitles',
'info_dict': {
'id': '76979871',
'ext': 'mp4',
'title': 'The New Vimeo Player (You Know, For Videos)',
'description': 'md5:2ec900bf97c3f389378a96aee11260ea',
'upload_date': '20131015',
'uploader_id': 'staff',
'uploader': 'Vimeo Staff',
'duration': 62,
}
},
]
2013-06-23 20:18:21 +02:00
@classmethod
def suitable(cls, url):
if VimeoChannelIE.suitable(url):
# Otherwise channel urls like http://vimeo.com/channels/31259 would
# match
return False
else:
return super(VimeoIE, cls).suitable(url)
2013-06-23 20:18:21 +02:00
def _verify_video_password(self, url, video_id, webpage):
password = self._downloader.params.get('videopassword', None)
2013-06-23 20:18:21 +02:00
if password is None:
2014-01-06 23:38:16 +01:00
raise ExtractorError('This video is protected by a password, use the --video-password option')
2014-01-07 05:19:28 +01:00
token = self._search_regex(r'xsrft: \'(.*?)\'', webpage, 'login token')
data = compat_urllib_parse.urlencode({
'password': password,
'token': token,
})
2013-06-23 20:18:21 +02:00
# I didn't manage to use the password with https
if url.startswith('https'):
pass_url = url.replace('https', 'http')
2013-06-23 20:18:21 +02:00
else:
pass_url = url
password_request = compat_urllib_request.Request(pass_url + '/password', data)
2013-06-23 20:18:21 +02:00
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
password_request.add_header('Cookie', 'xsrft=%s' % token)
self._download_webpage(password_request, video_id,
2014-01-06 23:38:16 +01:00
'Verifying the password',
'Wrong password')
2013-06-23 20:18:21 +02:00
def _verify_player_video_password(self, url, video_id):
password = self._downloader.params.get('videopassword', None)
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option')
data = compat_urllib_parse.urlencode({'password': password})
pass_url = url + '/check-password'
password_request = compat_urllib_request.Request(pass_url, data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
return self._download_json(
password_request, video_id,
'Verifying the password',
'Wrong password')
def _real_initialize(self):
self._login()
def _real_extract(self, url):
url, data = unsmuggle_url(url)
headers = std_headers
if data is not None:
headers = headers.copy()
headers.update(data)
2013-06-23 20:18:21 +02:00
# Extract ID from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
if mobj.group('pro') or mobj.group('player'):
url = 'http://player.vimeo.com/video/' + video_id
else:
2013-06-23 20:18:21 +02:00
url = 'https://vimeo.com/' + video_id
# Retrieve video webpage to extract further information
request = compat_urllib_request.Request(url, None, headers)
try:
webpage = self._download_webpage(request, video_id)
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
errmsg = ee.cause.read()
if b'Because of its privacy settings, this video cannot be played here' in errmsg:
raise ExtractorError(
'Cannot download embed-only video without embedding '
'URL. Please call youtube-dl with the URL of the page '
'that embeds this video.',
expected=True)
raise
2013-06-23 20:18:21 +02:00
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific.
self.report_extraction(video_id)
# Extract the config JSON
try:
try:
config_url = self._html_search_regex(
2014-01-06 23:38:16 +01:00
r' data-config-url="(.+?)"', webpage, 'config URL')
config_json = self._download_webpage(config_url, video_id)
config = json.loads(config_json)
except RegexNotFoundError:
# For pro videos or player.vimeo.com urls
# We try to find out to which variable is assigned the config dic
m_variable_name = re.search('(\w)\.video\.id', webpage)
if m_variable_name is not None:
config_re = r'%s=({.+?});' % re.escape(m_variable_name.group(1))
else:
config_re = [r' = {config:({.+?}),assets:', r'(?:[abc])=({.+?});']
2014-01-06 23:38:16 +01:00
config = self._search_regex(config_re, webpage, 'info section',
flags=re.DOTALL)
config = json.loads(config)
except Exception as e:
2013-06-23 20:18:21 +02:00
if re.search('The creator of this video has not given you permission to embed it on this domain.', webpage):
2014-01-06 23:38:16 +01:00
raise ExtractorError('The author has restricted the access to this video, try with the "--referer" option')
2013-06-23 20:18:21 +02:00
if re.search('<form[^>]+?id="pw_form"', webpage) is not None:
2013-06-23 20:18:21 +02:00
self._verify_video_password(url, video_id, webpage)
return self._real_extract(url)
else:
2014-01-06 23:38:16 +01:00
raise ExtractorError('Unable to extract info section',
cause=e)
else:
if config.get('view') == 4:
config = self._verify_player_video_password(url, video_id)
2013-06-23 20:18:21 +02:00
# Extract title
video_title = config["video"]["title"]
# Extract uploader and uploader_id
video_uploader = config["video"]["owner"]["name"]
video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None
# Extract video thumbnail
video_thumbnail = config["video"].get("thumbnail")
2014-02-28 12:00:12 +01:00
if video_thumbnail is None:
video_thumbs = config["video"].get("thumbs")
if video_thumbs and isinstance(video_thumbs, dict):
_, video_thumbnail = sorted((int(width), t_url) for (width, t_url) in video_thumbs.items())[-1]
2013-06-23 20:18:21 +02:00
# Extract video description
video_description = None
try:
2014-05-04 21:48:08 +02:00
video_description = get_element_by_attribute("class", "description_wrapper", webpage)
if video_description:
video_description = clean_html(video_description)
except AssertionError as err:
# On some pages like (http://player.vimeo.com/video/54469442) the
# html tags are not closed, python 2.6 cannot handle it
if err.args[0] == 'we should not get here!':
pass
else:
raise
2013-06-23 20:18:21 +02:00
# Extract video duration
video_duration = int_or_none(config["video"].get("duration"))
2013-06-23 20:18:21 +02:00
# Extract upload date
video_upload_date = None
mobj = re.search(r'<meta itemprop="dateCreated" content="(\d{4})-(\d{2})-(\d{2})T', webpage)
if mobj is not None:
video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
try:
2014-01-06 23:38:16 +01:00
view_count = int(self._search_regex(r'UserPlays:(\d+)', webpage, 'view count'))
like_count = int(self._search_regex(r'UserLikes:(\d+)', webpage, 'like count'))
comment_count = int(self._search_regex(r'UserComments:(\d+)', webpage, 'comment count'))
except RegexNotFoundError:
# This info is only available in vimeo.com/{id} urls
view_count = None
like_count = None
comment_count = None
2013-06-23 20:18:21 +02:00
# Vimeo specific: extract request signature and timestamp
sig = config['request']['signature']
timestamp = config['request']['timestamp']
# Vimeo specific: extract video codec and quality information
# First consider quality, then codecs, then take everything
codecs = [('vp6', 'flv'), ('vp8', 'flv'), ('h264', 'mp4')]
files = {'hd': [], 'sd': [], 'other': []}
config_files = config["video"].get("files") or config["request"].get("files")
2013-06-23 20:18:21 +02:00
for codec_name, codec_extension in codecs:
for quality in config_files.get(codec_name, []):
format_id = '-'.join((codec_name, quality)).lower()
key = quality if quality in files else 'other'
video_url = None
if isinstance(config_files[codec_name], dict):
file_info = config_files[codec_name][quality]
video_url = file_info.get('url')
2013-06-23 20:18:21 +02:00
else:
file_info = {}
if video_url is None:
video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \
% (video_id, sig, timestamp, quality, codec_name.upper())
files[key].append({
'ext': codec_extension,
'url': video_url,
'format_id': format_id,
'width': file_info.get('width'),
'height': file_info.get('height'),
})
formats = []
for key in ('other', 'sd', 'hd'):
formats += files[key]
if len(formats) == 0:
2014-01-06 23:38:16 +01:00
raise ExtractorError('No known codec found')
2013-06-23 20:18:21 +02:00
subtitles = {}
text_tracks = config['request'].get('text_tracks')
if text_tracks:
for tt in text_tracks:
subtitles[tt['lang']] = 'http://vimeo.com' + tt['url']
video_subtitles = self.extract_subtitles(video_id, subtitles)
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, subtitles)
return
return {
2014-02-03 14:24:11 +01:00
'id': video_id,
2013-06-23 20:18:21 +02:00
'uploader': video_uploader,
'uploader_id': video_uploader_id,
2014-02-03 14:24:11 +01:00
'upload_date': video_upload_date,
'title': video_title,
'thumbnail': video_thumbnail,
'description': video_description,
'duration': video_duration,
'formats': formats,
'webpage_url': url,
'view_count': view_count,
'like_count': like_count,
'comment_count': comment_count,
'subtitles': video_subtitles,
}
2013-07-29 13:12:09 +02:00
class VimeoChannelIE(InfoExtractor):
2014-01-06 23:38:16 +01:00
IE_NAME = 'vimeo:channel'
_VALID_URL = r'(?:https?://)?vimeo\.com/channels/(?P<id>[^/]+)/?(\?.*)?$'
2013-07-29 13:12:09 +02:00
_MORE_PAGES_INDICATOR = r'<a.+?rel="next"'
_TITLE_RE = r'<link rel="alternate"[^>]+?title="(.*?)"'
2013-07-29 13:12:09 +02:00
def _page_url(self, base_url, pagenum):
return '%s/videos/page:%d/' % (base_url, pagenum)
2013-12-06 22:01:41 +01:00
def _extract_list_title(self, webpage):
2014-01-06 23:38:16 +01:00
return self._html_search_regex(self._TITLE_RE, webpage, 'list title')
2013-12-06 22:01:41 +01:00
def _extract_videos(self, list_id, base_url):
2013-07-29 13:12:09 +02:00
video_ids = []
for pagenum in itertools.count(1):
webpage = self._download_webpage(
self._page_url(base_url, pagenum), list_id,
2014-01-06 23:38:16 +01:00
'Downloading page %s' % pagenum)
2013-07-29 13:12:09 +02:00
video_ids.extend(re.findall(r'id="clip_(\d+?)"', webpage))
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
break
entries = [self.url_result('http://vimeo.com/%s' % video_id, 'Vimeo')
for video_id in video_ids]
return {'_type': 'playlist',
'id': list_id,
2013-12-06 22:01:41 +01:00
'title': self._extract_list_title(webpage),
2013-07-29 13:12:09 +02:00
'entries': entries,
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id')
return self._extract_videos(channel_id, 'http://vimeo.com/channels/%s' % channel_id)
class VimeoUserIE(VimeoChannelIE):
2014-01-06 23:38:16 +01:00
IE_NAME = 'vimeo:user'
_VALID_URL = r'(?:https?://)?vimeo\.com/(?P<name>[^/]+)(?:/videos|[#?]|$)'
_TITLE_RE = r'<a[^>]+?class="user">([^<>]+?)</a>'
@classmethod
def suitable(cls, url):
2013-12-06 22:01:41 +01:00
if VimeoChannelIE.suitable(url) or VimeoIE.suitable(url) or VimeoAlbumIE.suitable(url) or VimeoGroupsIE.suitable(url):
return False
return super(VimeoUserIE, cls).suitable(url)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('name')
return self._extract_videos(name, 'http://vimeo.com/%s' % name)
class VimeoAlbumIE(VimeoChannelIE):
2014-01-06 23:38:16 +01:00
IE_NAME = 'vimeo:album'
_VALID_URL = r'(?:https?://)?vimeo\.com/album/(?P<id>\d+)'
_TITLE_RE = r'<header id="page_header">\n\s*<h1>(.*?)</h1>'
def _page_url(self, base_url, pagenum):
return '%s/page:%d/' % (base_url, pagenum)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
album_id = mobj.group('id')
return self._extract_videos(album_id, 'http://vimeo.com/album/%s' % album_id)
2013-12-06 22:01:41 +01:00
class VimeoGroupsIE(VimeoAlbumIE):
2014-01-06 23:38:16 +01:00
IE_NAME = 'vimeo:group'
_VALID_URL = r'(?:https?://)?vimeo\.com/groups/(?P<name>[^/]+)'
2013-12-06 22:01:41 +01:00
def _extract_list_title(self, webpage):
return self._og_search_title(webpage)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
name = mobj.group('name')
return self._extract_videos(name, 'http://vimeo.com/groups/%s' % name)
class VimeoReviewIE(InfoExtractor):
2014-01-06 23:38:16 +01:00
IE_NAME = 'vimeo:review'
IE_DESC = 'Review pages on vimeo'
_VALID_URL = r'(?:https?://)?vimeo\.com/[^/]+/review/(?P<id>[^/]+)'
_TEST = {
'url': 'https://vimeo.com/user21297594/review/75524534/3c257a1b5d',
'file': '75524534.mp4',
'md5': 'c507a72f780cacc12b2248bb4006d253',
'info_dict': {
'title': "DICK HARDWICK 'Comedian'",
'uploader': 'Richard Hardwick',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
player_url = 'https://player.vimeo.com/player/' + video_id
return self.url_result(player_url, 'Vimeo', video_id)
class VimeoWatchLaterIE(VimeoBaseInfoExtractor, VimeoChannelIE):
IE_NAME = 'vimeo:watchlater'
IE_DESC = 'Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)'
_VALID_URL = r'https?://vimeo\.com/home/watchlater|:vimeowatchlater'
_LOGIN_REQUIRED = True
_TITLE_RE = r'href="/home/watchlater".*?>(.*?)<'
def _real_initialize(self):
self._login()
def _page_url(self, base_url, pagenum):
url = '%s/page:%d/' % (base_url, pagenum)
request = compat_urllib_request.Request(url)
# Set the header to get a partial html page with the ids,
# the normal page doesn't contain them.
request.add_header('X-Requested-With', 'XMLHttpRequest')
return request
def _real_extract(self, url):
return self._extract_videos('watchlater', 'https://vimeo.com/home/watchlater')