diff --git a/.gitignore b/.gitignore index 1bea26359e..564bde1d19 100644 --- a/.gitignore +++ b/.gitignore @@ -1,17 +1,19 @@ *.pyc *.pyo *~ +*.DS_Store wine-py2exe/ py2exe.log -youtube-dl +*.kate-swp +build/ +dist/ +MANIFEST +README.txt youtube-dl.1 -LATEST_VERSION - -#OS X -.DS_Store -.AppleDouble -.LSOverride -Icon -._* -.Spotlight-V100 -.Trashes +youtube-dl.bash-completion +youtube-dl +youtube-dl.exe +youtube-dl.tar.gz +.coverage +cover/ +updates_key.pem diff --git a/.tarignore b/.tarignore new file mode 100644 index 0000000000..986afeed65 --- /dev/null +++ b/.tarignore @@ -0,0 +1,17 @@ +updates_key.pem +*.pyc +*.pyo +youtube-dl.exe +wine-py2exe/ +py2exe.log +*.kate-swp +build/ +dist/ +MANIFEST +*.DS_Store +youtube-dl.tar.gz +.coverage +cover/ +__pycache__/ +.git/ +*~ diff --git a/.travis.yml b/.travis.yml index 03947b1eb7..31eea852c8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,9 +1,14 @@ language: python -#specify the python version python: - "2.6" - "2.7" -#command to install the setup -install: -# command to run tests -script: nosetests test --nocapture + - "3.3" +script: nosetests test --verbose +notifications: + email: + - filippo.valsorda@gmail.com + - phihag@phihag.de + irc: + channels: + - "irc.freenode.org#youtube-dl" + skip_join: true diff --git a/CHANGELOG b/CHANGELOG new file mode 100644 index 0000000000..3fa1167331 --- /dev/null +++ b/CHANGELOG @@ -0,0 +1,14 @@ +2013.01.02 Codename: GIULIA + + * Add support for ComedyCentral clips + * Corrected Vimeo description fetching + * Added the --no-post-overwrites argument + * --verbose offers more environment info + * New info_dict field: uploader_id + * New updates system, with signature checking + * New IEs: NBA, JustinTV, FunnyOrDie, TweetReel, Steam, Ustream + * Fixed IEs: BlipTv + * Fixed for Python 3 IEs: Xvideo, Youku, XNXX, Dailymotion, Vimeo, InfoQ + * Simplified IEs and test code + * Various (Python 3 and other) fixes + * Revamped and expanded tests diff --git a/LATEST_VERSION b/LATEST_VERSION new file mode 100644 index 0000000000..d070c6ea3d --- /dev/null +++ b/LATEST_VERSION @@ -0,0 +1 @@ +2012.10.09 diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..68a49daad8 --- /dev/null +++ b/LICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000000..81f8e05cd9 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,3 @@ +include README.md +include test/*.py +include test/*.json \ No newline at end of file diff --git a/Makefile b/Makefile index 040c3b3f1b..0069e79753 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,7 @@ -all: youtube-dl README.md youtube-dl.1 youtube-dl.bash-completion LATEST_VERSION -# TODO: re-add youtube-dl.exe, and make sure it's 1. safe and 2. doesn't need sudo +all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion clean: - rm -f youtube-dl youtube-dl.exe youtube-dl.1 LATEST_VERSION youtube_dl/*.pyc + rm -rf youtube-dl youtube-dl.exe youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ PREFIX=/usr/local BINDIR=$(PREFIX)/bin @@ -17,43 +16,32 @@ install: youtube-dl youtube-dl.1 youtube-dl.bash-completion install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl -.PHONY: all clean install youtube-dl.bash-completion -# TODO un-phony README.md and youtube-dl.bash_completion by reading from .in files and generating from them +test: + #nosetests --with-coverage --cover-package=youtube_dl --cover-html --verbose --processes 4 test + nosetests --verbose test + +.PHONY: all clean install test youtube-dl: youtube_dl/*.py - zip --quiet --junk-paths youtube-dl youtube_dl/*.py + zip --quiet youtube-dl youtube_dl/*.py + zip --quiet --junk-paths youtube-dl youtube_dl/__main__.py echo '#!/usr/bin/env python' > youtube-dl cat youtube-dl.zip >> youtube-dl rm youtube-dl.zip chmod a+x youtube-dl -youtube-dl.exe: youtube_dl/*.py - bash devscripts/wine-py2exe.sh build_exe.py - README.md: youtube_dl/*.py - @options=$$(COLUMNS=80 python -m youtube_dl --help | sed -e '1,/.*General Options.*/ d' -e 's/^\W\{2\}\(\w\)/## \1/') && \ - header=$$(sed -e '/.*# OPTIONS/,$$ d' README.md) && \ - footer=$$(sed -e '1,/.*# FAQ/ d' README.md) && \ - echo "$${header}" > README.md && \ - echo >> README.md && \ - echo '# OPTIONS' >> README.md && \ - echo "$${options}" >> README.md&& \ - echo >> README.md && \ - echo '# FAQ' >> README.md && \ - echo "$${footer}" >> README.md + COLUMNS=80 python -m youtube_dl --help | python devscripts/make_readme.py -youtube-dl.1: - pandoc -s -w man README.md -o youtube-dl.1 +README.txt: README.md + pandoc -f markdown -t plain README.md -o README.txt -youtube-dl.bash-completion: - @options=`egrep -o '(--[a-z-]+) ' README.md | sort -u | xargs echo` && \ - content=`sed "s/opts=\"[^\"]*\"/opts=\"$${options}\"/g" youtube-dl.bash-completion` && \ - echo "$${content}" > youtube-dl.bash-completion +youtube-dl.1: README.md + pandoc -s -f markdown -t man README.md -o youtube-dl.1 -LATEST_VERSION: youtube_dl/__init__.py - python -m youtube_dl --version > LATEST_VERSION +youtube-dl.bash-completion: youtube_dl/*.py devscripts/bash-completion.in + python devscripts/bash-completion.py -test: - nosetests2 --nocapture test - -.PHONY: default compile update update-latest update-readme test clean +youtube-dl.tar.gz: all + tar -cvzf youtube-dl.tar.gz -s "|^./|./youtube-dl/|" \ + --exclude-from=".tarignore" -- . diff --git a/README.md b/README.md index 0ac75f5d7f..4b0de1b889 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -% youtube-dl(1) +% YOUTUBE-DL(1) # NAME youtube-dl @@ -20,6 +20,11 @@ # OPTIONS -i, --ignore-errors continue on download errors -r, --rate-limit LIMIT download rate limit (e.g. 50k or 44.6m) -R, --retries RETRIES number of retries (default is 10) + --buffer-size SIZE size of download buffer (e.g. 1024 or 16k) (default + is 1024) + --no-resize-buffer do not automatically adjust the buffer size. By + default, the buffer size is automatically resized + from an initial value of SIZE. --dump-user-agent display the current browser identification --user-agent UA specify a custom user agent --list-extractors List all supported extractors and the URLs they @@ -37,16 +42,22 @@ # OPTIONS Filesystem Options: -t, --title use title in file name --id use video ID in file name - -l, --literal use literal title in file name + -l, --literal [deprecated] alias of --title -A, --auto-number number downloaded files starting from 00000 - -o, --output TEMPLATE output filename template. Use %(stitle)s to get the + -o, --output TEMPLATE output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, - %(autonumber)s to get an automatically incremented - number, %(ext)s for the filename extension, - %(upload_date)s for the upload date (YYYYMMDD), - %(extractor)s for the provider (youtube, metacafe, - etc), %(id)s for the video id and %% for a literal - percent. Use - to output to stdout. + %(uploader_id)s for the uploader nickname if + different, %(autonumber)s to get an automatically + incremented number, %(ext)s for the filename + extension, %(upload_date)s for the upload date + (YYYYMMDD), %(extractor)s for the provider + (youtube, metacafe, etc), %(id)s for the video id + and %% for a literal percent. Use - to output to + stdout. Can also be used to download to a different + directory, for example with -o '/my/downloads/%(upl + oader)s/%(title)s-%(id)s.%(ext)s' . + --restrict-filenames Restrict filenames to only ASCII characters, and + avoid "&" and spaces in filenames -a, --batch-file FILE file containing URLs to download ('-' for stdin) -w, --no-overwrites do not overwrite files -c, --continue resume partially downloaded files @@ -101,6 +112,34 @@ # OPTIONS specific bitrate like 128K (default 5) -k, --keep-video keeps the video file on disk after the post- processing; the video is erased by default + --no-post-overwrites do not overwrite post-processed files; the post- + processed files are overwritten by default + +# CONFIGURATION + +You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.local/config/youtube-dl.conf`. + +# OUTPUT TEMPLATE + +The `-o` option allows users to indicate a template for the output file names. The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parenthesis, followed by a lowercase S. Allowed names are: + + - `id`: The sequence will be replaced by the video identifier. + - `url`: The sequence will be replaced by the video URL. + - `uploader`: The sequence will be replaced by the nickname of the person who uploaded the video. + - `upload_date`: The sequence will be replaced by the upload date in YYYYMMDD format. + - `title`: The sequence will be replaced by the video title. + - `ext`: The sequence will be replaced by the appropriate extension (like flv or mp4). + - `epoch`: The sequence will be replaced by the Unix epoch when creating the file. + - `autonumber`: The sequence will be replaced by a five-digit number that will be increased with each download, starting at zero. + +The current default template is `%(id)s.%(ext)s`, but that will be switchted to `%(title)s-%(id)s.%(ext)s` (which can be requested with `-t` at the moment). + +In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title: + + $ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc + youtube-dl test video ''_ä↭𝕐.mp4 # All kinds of weird characters + $ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames + youtube-dl_test_video_.mp4 # A simple file name # FAQ @@ -137,17 +176,9 @@ ### SyntaxError: Non-ASCII character ### means you're using an outdated version of Python. Please update to Python 2.6 or 2.7. -To run youtube-dl under Python 2.5, you'll have to manually check it out like this: - - git clone git://github.com/rg3/youtube-dl.git - cd youtube-dl - python -m youtube_dl --help - -Please note that Python 2.5 is not supported anymore. - ### What is this binary file? Where has the code gone? -Since June 2012 (#342) youtube-dl is packed as an executable zipfile, simply unzip it (might need renaming to `youtube-dl.zip` first on some systems) or clone the git repo to see the code. If you modify the code, you can run it by executing the `__main__.py` file. To recompile the executable, run `make compile`. +Since June 2012 (#342) youtube-dl is packed as an executable zipfile, simply unzip it (might need renaming to `youtube-dl.zip` first on some systems) or clone the git repository, as laid out above. If you modify the code, you can run it by executing the `__main__.py` file. To recompile the executable, run `make youtube-dl`. ### The exe throws a *Runtime error from Visual C++* @@ -166,6 +197,9 @@ # BUGS Please include: * Your exact command line, like `youtube-dl -t "http://www.youtube.com/watch?v=uHlDtZ6Oc3s&feature=channel_video_title"`. A common mistake is not to escape the `&`. Putting URLs in quotes should solve this problem. +* If possible re-run the command with `--verbose`, and include the full output, it is really helpful to us. * The output of `youtube-dl --version` * The output of `python --version` * The name and version of your Operating System ("Ubuntu 11.04 x64" or "Windows 7 x64" is usually enough). + +For discussions, join us in the irc channel #youtube-dl on freenode. diff --git a/bin/youtube-dl b/bin/youtube-dl new file mode 100755 index 0000000000..fc3cc8ad88 --- /dev/null +++ b/bin/youtube-dl @@ -0,0 +1,6 @@ +#!/usr/bin/env python + +import youtube_dl + +if __name__ == '__main__': + youtube_dl.main() diff --git a/build_exe.py b/build_exe.py deleted file mode 100644 index 9fa8186cbf..0000000000 --- a/build_exe.py +++ /dev/null @@ -1,48 +0,0 @@ -from distutils.core import setup -import py2exe -import sys, os - -"""This will create an exe that needs Microsoft Visual C++ 2008 Redistributable Package""" - -# If run without args, build executables -if len(sys.argv) == 1: - sys.argv.append("py2exe") - -# os.chdir(os.path.dirname(os.path.abspath(sys.argv[0]))) # conflict with wine-py2exe.sh -sys.path.append('./youtube_dl') - -options = { - "bundle_files": 1, - "compressed": 1, - "optimize": 2, - "dist_dir": '.', - "dll_excludes": ['w9xpopen.exe'] -} - -console = [{ - "script":"./youtube_dl/__main__.py", - "dest_base": "youtube-dl", -}] - -init_file = open('./youtube_dl/__init__.py') -for line in init_file.readlines(): - if line.startswith('__version__'): - version = line[11:].strip(" ='\n") - break -else: - version = '' - -setup(name='youtube-dl', - version=version, - description='Small command-line program to download videos from YouTube.com and other video sites', - url='https://github.com/rg3/youtube-dl', - packages=['youtube_dl'], - - console = console, - options = {"py2exe": options}, - zipfile = None, -) - -import shutil -shutil.rmtree("build") - diff --git a/devscripts/bash-completion.in b/devscripts/bash-completion.in new file mode 100644 index 0000000000..3b99a96145 --- /dev/null +++ b/devscripts/bash-completion.in @@ -0,0 +1,14 @@ +__youtube-dl() +{ + local cur prev opts + COMPREPLY=() + cur="${COMP_WORDS[COMP_CWORD]}" + opts="{{flags}}" + + if [[ ${cur} == * ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) + return 0 + fi +} + +complete -F __youtube-dl youtube-dl diff --git a/devscripts/bash-completion.py b/devscripts/bash-completion.py new file mode 100755 index 0000000000..49287724d6 --- /dev/null +++ b/devscripts/bash-completion.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python +import os +from os.path import dirname as dirn +import sys + +sys.path.append(dirn(dirn((os.path.abspath(__file__))))) +import youtube_dl + +BASH_COMPLETION_FILE = "youtube-dl.bash-completion" +BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in" + +def build_completion(opt_parser): + opts_flag = [] + for group in opt_parser.option_groups: + for option in group.option_list: + #for every long flag + opts_flag.append(option.get_opt_string()) + with open(BASH_COMPLETION_TEMPLATE) as f: + template = f.read() + with open(BASH_COMPLETION_FILE, "w") as f: + #just using the special char + filled_template = template.replace("{{flags}}", " ".join(opts_flag)) + f.write(filled_template) + +parser = youtube_dl.parseOpts()[0] +build_completion(parser) diff --git a/devscripts/gh-pages/add-version.py b/devscripts/gh-pages/add-version.py new file mode 100755 index 0000000000..6af8bb9d84 --- /dev/null +++ b/devscripts/gh-pages/add-version.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 + +import json +import sys +import hashlib +import urllib.request + +if len(sys.argv) <= 1: + print('Specify the version number as parameter') + sys.exit() +version = sys.argv[1] + +with open('update/LATEST_VERSION', 'w') as f: + f.write(version) + +versions_info = json.load(open('update/versions.json')) +if 'signature' in versions_info: + del versions_info['signature'] + +new_version = {} + +filenames = {'bin': 'youtube-dl', 'exe': 'youtube-dl.exe', 'tar': 'youtube-dl-%s.tar.gz' % version} +for key, filename in filenames.items(): + print('Downloading and checksumming %s...' %filename) + url = 'http://youtube-dl.org/downloads/%s/%s' % (version, filename) + data = urllib.request.urlopen(url).read() + sha256sum = hashlib.sha256(data).hexdigest() + new_version[key] = (url, sha256sum) + +versions_info['versions'][version] = new_version +versions_info['latest'] = version + +json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True) \ No newline at end of file diff --git a/devscripts/gh-pages/generate-download.py b/devscripts/gh-pages/generate-download.py new file mode 100755 index 0000000000..55912e12c4 --- /dev/null +++ b/devscripts/gh-pages/generate-download.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +import hashlib +import shutil +import subprocess +import tempfile +import urllib.request +import json + +versions_info = json.load(open('update/versions.json')) +version = versions_info['latest'] +URL = versions_info['versions'][version]['bin'][0] + +data = urllib.request.urlopen(URL).read() + +# Read template page +with open('download.html.in', 'r', encoding='utf-8') as tmplf: + template = tmplf.read() + +md5sum = hashlib.md5(data).hexdigest() +sha1sum = hashlib.sha1(data).hexdigest() +sha256sum = hashlib.sha256(data).hexdigest() +template = template.replace('@PROGRAM_VERSION@', version) +template = template.replace('@PROGRAM_URL@', URL) +template = template.replace('@PROGRAM_MD5SUM@', md5sum) +template = template.replace('@PROGRAM_SHA1SUM@', sha1sum) +template = template.replace('@PROGRAM_SHA256SUM@', sha256sum) +template = template.replace('@EXE_URL@', versions_info['versions'][version]['exe'][0]) +template = template.replace('@EXE_SHA256SUM@', versions_info['versions'][version]['exe'][1]) +template = template.replace('@TAR_URL@', versions_info['versions'][version]['tar'][0]) +template = template.replace('@TAR_SHA256SUM@', versions_info['versions'][version]['tar'][1]) +with open('download.html', 'w', encoding='utf-8') as dlf: + dlf.write(template) diff --git a/devscripts/gh-pages/sign-versions.py b/devscripts/gh-pages/sign-versions.py new file mode 100755 index 0000000000..dd126df52e --- /dev/null +++ b/devscripts/gh-pages/sign-versions.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 + +import rsa +import json +from binascii import hexlify + +versions_info = json.load(open('update/versions.json')) +if 'signature' in versions_info: + del versions_info['signature'] + +print('Enter the PKCS1 private key, followed by a blank line:') +privkey = '' +while True: + try: + line = input() + except EOFError: + break + if line == '': + break + privkey += line + '\n' +privkey = bytes(privkey, 'ascii') +privkey = rsa.PrivateKey.load_pkcs1(privkey) + +signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode() +print('signature: ' + signature) + +versions_info['signature'] = signature +json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True) \ No newline at end of file diff --git a/devscripts/gh-pages/update-copyright.py b/devscripts/gh-pages/update-copyright.py new file mode 100755 index 0000000000..12c2a91949 --- /dev/null +++ b/devscripts/gh-pages/update-copyright.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +# coding: utf-8 + +from __future__ import with_statement + +import datetime +import glob +import io # For Python 2 compatibilty +import os +import re + +year = str(datetime.datetime.now().year) +for fn in glob.glob('*.html*'): + with io.open(fn, encoding='utf-8') as f: + content = f.read() + newc = re.sub(u'(?PCopyright © 2006-)(?P[0-9]{4})', u'Copyright © 2006-' + year, content) + if content != newc: + tmpFn = fn + '.part' + with io.open(tmpFn, 'wt', encoding='utf-8') as outf: + outf.write(newc) + os.rename(tmpFn, fn) diff --git a/devscripts/make_readme.py b/devscripts/make_readme.py new file mode 100755 index 0000000000..7f2ea319cc --- /dev/null +++ b/devscripts/make_readme.py @@ -0,0 +1,20 @@ +import sys +import re + +README_FILE = 'README.md' +helptext = sys.stdin.read() + +with open(README_FILE) as f: + oldreadme = f.read() + +header = oldreadme[:oldreadme.index('# OPTIONS')] +footer = oldreadme[oldreadme.index('# CONFIGURATION'):] + +options = helptext[helptext.index(' General Options:')+19:] +options = re.sub(r'^ (\w.+)$', r'## \1', options, flags=re.M) +options = '# OPTIONS\n' + options + '\n' + +with open(README_FILE, 'w') as f: + f.write(header) + f.write(options) + f.write(footer) diff --git a/devscripts/release.sh b/devscripts/release.sh index 963a6c22bc..cf5784e8c9 100755 --- a/devscripts/release.sh +++ b/devscripts/release.sh @@ -1,11 +1,85 @@ #!/bin/sh +# IMPORTANT: the following assumptions are made +# * the GH repo is on the origin remote +# * the gh-pages branch is named so locally +# * the git config user.signingkey is properly set + +# You will need +# pip install coverage nose rsa + +# TODO +# release notes +# make hash on local files + +set -e + if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.06"; exit 1; fi version="$1" if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi -if [ ! -z "`git status --porcelain`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi -sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/__init__.py -make all -git add -A +if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi +if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi + +echo "\n### First of all, testing..." +make clean +nosetests --with-coverage --cover-package=youtube_dl --cover-html test || exit 1 + +echo "\n### Changing version in version.py..." +sed -i~ "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py + +echo "\n### Committing CHANGELOG README.md and youtube_dl/version.py..." +make README.md +git add CHANGELOG README.md youtube_dl/version.py git commit -m "release $version" -git tag -m "Release $version" "$version" \ No newline at end of file + +echo "\n### Now tagging, signing and pushing..." +git tag -s -m "Release $version" "$version" +git show "$version" +read -p "Is it good, can I push? (y/n) " -n 1 +if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi +echo +MASTER=$(git rev-parse --abbrev-ref HEAD) +git push origin $MASTER:master +git push origin "$version" + +echo "\n### OK, now it is time to build the binaries..." +REV=$(git rev-parse HEAD) +make youtube-dl youtube-dl.tar.gz +wget "http://jeromelaheurte.net:8142/download/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe || \ + wget "http://jeromelaheurte.net:8142/build/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe +mkdir -p "update_staging/$version" +mv youtube-dl youtube-dl.exe "update_staging/$version" +mv youtube-dl.tar.gz "update_staging/$version/youtube-dl-$version.tar.gz" +RELEASE_FILES=youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz +(cd update_staging/$version/ && md5sum $RELEASE_FILES > MD5SUMS) +(cd update_staging/$version/ && sha1sum $RELEASE_FILES > SHA1SUMS) +(cd update_staging/$version/ && sha256sum $RELEASE_FILES > SHA2-256SUMS) +(cd update_staging/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS) +git checkout HEAD -- youtube-dl youtube-dl.exe + +echo "\n### Signing and uploading the new binaries to youtube-dl.org..." +for f in $RELEASE_FILES; do gpg --detach-sig "update_staging/$version/$f"; done +scp -r "update_staging/$version" ytdl@youtube-dl.org:html/downloads/ +rm -r update_staging + +echo "\n### Now switching to gh-pages..." +git checkout gh-pages +git checkout "$MASTER" -- devscripts/gh-pages/ +git reset devscripts/gh-pages/ +devscripts/gh-pages/add-version.py $version +devscripts/gh-pages/sign-versions.py < updates_key.pem +devscripts/gh-pages/generate-download.py +devscripts/gh-pages/update-copyright.py +rm -r test_coverage +mv cover test_coverage +git add *.html *.html.in update test_coverage +git commit -m "release $version" +git show HEAD +read -p "Is it good, can I push? (y/n) " -n 1 +if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi +echo +git push origin gh-pages + +echo "\n### DONE!" +rm -r devscripts +git checkout $MASTER diff --git a/devscripts/transition_helper.py b/devscripts/transition_helper.py new file mode 100644 index 0000000000..d5ca2d4bac --- /dev/null +++ b/devscripts/transition_helper.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python + +import sys, os + +try: + import urllib.request as compat_urllib_request +except ImportError: # Python 2 + import urllib2 as compat_urllib_request + +sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n') +sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n') +sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n') + +try: + raw_input() +except NameError: # Python 3 + input() + +filename = sys.argv[0] + +API_URL = "https://api.github.com/repos/rg3/youtube-dl/downloads" +BIN_URL = "https://github.com/downloads/rg3/youtube-dl/youtube-dl" + +if not os.access(filename, os.W_OK): + sys.exit('ERROR: no write permissions on %s' % filename) + +try: + urlh = compat_urllib_request.urlopen(BIN_URL) + newcontent = urlh.read() + urlh.close() +except (IOError, OSError) as err: + sys.exit('ERROR: unable to download latest version') + +try: + with open(filename, 'wb') as outf: + outf.write(newcontent) +except (IOError, OSError) as err: + sys.exit('ERROR: unable to overwrite current version') + +sys.stderr.write(u'Done! Now you can run youtube-dl.\n') diff --git a/devscripts/transition_helper_exe/setup.py b/devscripts/transition_helper_exe/setup.py new file mode 100644 index 0000000000..aaf5c2983e --- /dev/null +++ b/devscripts/transition_helper_exe/setup.py @@ -0,0 +1,12 @@ +from distutils.core import setup +import py2exe + +py2exe_options = { + "bundle_files": 1, + "compressed": 1, + "optimize": 2, + "dist_dir": '.', + "dll_excludes": ['w9xpopen.exe'] +} + +setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None) \ No newline at end of file diff --git a/devscripts/transition_helper_exe/youtube-dl.py b/devscripts/transition_helper_exe/youtube-dl.py new file mode 100644 index 0000000000..dbb4c99e15 --- /dev/null +++ b/devscripts/transition_helper_exe/youtube-dl.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python + +import sys, os +import urllib2 +import json, hashlib + +def rsa_verify(message, signature, key): + from struct import pack + from hashlib import sha256 + from sys import version_info + def b(x): + if version_info[0] == 2: return x + else: return x.encode('latin1') + assert(type(message) == type(b(''))) + block_size = 0 + n = key[0] + while n: + block_size += 1 + n >>= 8 + signature = pow(int(signature, 16), key[1], key[0]) + raw_bytes = [] + while signature: + raw_bytes.insert(0, pack("B", signature & 0xFF)) + signature >>= 8 + signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes) + if signature[0:2] != b('\x00\x01'): return False + signature = signature[2:] + if not b('\x00') in signature: return False + signature = signature[signature.index(b('\x00'))+1:] + if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False + signature = signature[19:] + if signature != sha256(message).digest(): return False + return True + +sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n') +sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n') +sys.stderr.write(u'From now on, get the binaries from http://rg3.github.com/youtube-dl/download.html, not from the git repository.\n\n') + +raw_input() + +filename = sys.argv[0] + +UPDATE_URL = "http://rg3.github.com/youtube-dl/update/" +VERSION_URL = UPDATE_URL + 'LATEST_VERSION' +JSON_URL = UPDATE_URL + 'versions.json' +UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537) + +if not os.access(filename, os.W_OK): + sys.exit('ERROR: no write permissions on %s' % filename) + +exe = os.path.abspath(filename) +directory = os.path.dirname(exe) +if not os.access(directory, os.W_OK): + sys.exit('ERROR: no write permissions on %s' % directory) + +try: + versions_info = urllib2.urlopen(JSON_URL).read().decode('utf-8') + versions_info = json.loads(versions_info) +except: + sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.') +if not 'signature' in versions_info: + sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.') +signature = versions_info['signature'] +del versions_info['signature'] +if not rsa_verify(json.dumps(versions_info, sort_keys=True), signature, UPDATES_RSA_KEY): + sys.exit(u'ERROR: the versions file signature is invalid. Aborting.') + +version = versions_info['versions'][versions_info['latest']] + +try: + urlh = urllib2.urlopen(version['exe'][0]) + newcontent = urlh.read() + urlh.close() +except (IOError, OSError) as err: + sys.exit('ERROR: unable to download latest version') + +newcontent_hash = hashlib.sha256(newcontent).hexdigest() +if newcontent_hash != version['exe'][1]: + sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.') + +try: + with open(exe + '.new', 'wb') as outf: + outf.write(newcontent) +except (IOError, OSError) as err: + sys.exit(u'ERROR: unable to write the new version') + +try: + bat = os.path.join(directory, 'youtube-dl-updater.bat') + b = open(bat, 'w') + b.write(""" +echo Updating youtube-dl... +ping 127.0.0.1 -n 5 -w 1000 > NUL +move /Y "%s.new" "%s" +del "%s" + \n""" %(exe, exe, bat)) + b.close() + + os.startfile(bat) +except (IOError, OSError) as err: + sys.exit('ERROR: unable to overwrite current version') + +sys.stderr.write(u'Done! Now you can run youtube-dl.\n') diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000..6d019dcbb6 --- /dev/null +++ b/setup.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from __future__ import print_function +from distutils.core import setup +import pkg_resources +import sys + +try: + import py2exe + """This will create an exe that needs Microsoft Visual C++ 2008 Redistributable Package""" +except ImportError: + if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe': + print("Cannot import py2exe", file=sys.stderr) + exit(1) + +py2exe_options = { + "bundle_files": 1, + "compressed": 1, + "optimize": 2, + "dist_dir": '.', + "dll_excludes": ['w9xpopen.exe'] +} +py2exe_console = [{ + "script": "./youtube_dl/__main__.py", + "dest_base": "youtube-dl", +}] +py2exe_params = { + 'console': py2exe_console, + 'options': { "py2exe": py2exe_options }, + 'zipfile': None +} + +if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe': + params = py2exe_params +else: + params = { + 'scripts': ['bin/youtube-dl'], + 'data_files': [('etc/bash_completion.d', ['youtube-dl.bash-completion']), # Installing system-wide would require sudo... + ('share/doc/youtube_dl', ['README.txt']), + ('share/man/man1/', ['youtube-dl.1'])] + } + +# Get the version from youtube_dl/version.py without importing the package +exec(compile(open('youtube_dl/version.py').read(), 'youtube_dl/version.py', 'exec')) + +setup( + name = 'youtube_dl', + version = __version__, + description = 'YouTube video downloader', + long_description = 'Small command-line program to download videos from YouTube.com and other video sites.', + url = 'https://github.com/rg3/youtube-dl', + author = 'Ricardo Garcia', + maintainer = 'Philipp Hagemeister', + maintainer_email = 'phihag@phihag.de', + packages = ['youtube_dl'], + + # Provokes warning on most systems (why?!) + #test_suite = 'nose.collector', + #test_requires = ['nosetest'], + + classifiers = [ + "Topic :: Multimedia :: Video", + "Development Status :: 5 - Production/Stable", + "Environment :: Console", + "License :: Public Domain", + "Programming Language :: Python :: 2.6", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.3" + ], + + **params +) diff --git a/test/parameters.json b/test/parameters.json index cc2b017ebb..8215d25c5c 100644 --- a/test/parameters.json +++ b/test/parameters.json @@ -1 +1,40 @@ -{"username": null, "listformats": null, "skip_download": false, "usenetrc": false, "max_downloads": null, "noprogress": false, "forcethumbnail": false, "forceformat": false, "format_limit": null, "ratelimit": null, "nooverwrites": false, "forceurl": false, "writeinfojson": false, "simulate": false, "playliststart": 1, "continuedl": true, "password": null, "prefer_free_formats": false, "nopart": false, "retries": 10, "updatetime": true, "consoletitle": false, "verbose": true, "forcefilename": false, "ignoreerrors": false, "logtostderr": false, "format": null, "subtitleslang": null, "quiet": false, "outtmpl": "%(id)s.%(ext)s", "rejecttitle": null, "playlistend": -1, "writedescription": false, "forcetitle": false, "forcedescription": false, "writesubtitles": false, "matchtitle": null} \ No newline at end of file +{ + "consoletitle": false, + "continuedl": true, + "forcedescription": false, + "forcefilename": false, + "forceformat": false, + "forcethumbnail": false, + "forcetitle": false, + "forceurl": false, + "format": null, + "format_limit": null, + "ignoreerrors": false, + "listformats": null, + "logtostderr": false, + "matchtitle": null, + "max_downloads": null, + "nooverwrites": false, + "nopart": false, + "noprogress": false, + "outtmpl": "%(id)s.%(ext)s", + "password": null, + "playlistend": -1, + "playliststart": 1, + "prefer_free_formats": false, + "quiet": false, + "ratelimit": null, + "rejecttitle": null, + "retries": 10, + "simulate": false, + "skip_download": false, + "subtitleslang": null, + "test": true, + "updatetime": true, + "usenetrc": false, + "username": null, + "verbose": true, + "writedescription": false, + "writeinfojson": true, + "writesubtitles": false +} \ No newline at end of file diff --git a/test/test_all_urls.py b/test/test_all_urls.py new file mode 100644 index 0000000000..06de8e7b8e --- /dev/null +++ b/test/test_all_urls.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python + +import sys +import unittest + +# Allow direct execution +import os +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from youtube_dl.InfoExtractors import YoutubeIE, YoutubePlaylistIE + +class TestAllURLsMatching(unittest.TestCase): + def test_youtube_playlist_matching(self): + self.assertTrue(YoutubePlaylistIE().suitable(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')) + self.assertTrue(YoutubePlaylistIE().suitable(u'PL63F0C78739B09958')) + self.assertFalse(YoutubePlaylistIE().suitable(u'PLtS2H6bU1M')) + + def test_youtube_matching(self): + self.assertTrue(YoutubeIE().suitable(u'PLtS2H6bU1M')) + + def test_youtube_extract(self): + self.assertEqual(YoutubeIE()._extract_id('http://www.youtube.com/watch?&v=BaW_jenozKc'), 'BaW_jenozKc') + self.assertEqual(YoutubeIE()._extract_id('https://www.youtube.com/watch?&v=BaW_jenozKc'), 'BaW_jenozKc') + self.assertEqual(YoutubeIE()._extract_id('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc'), 'BaW_jenozKc') + +if __name__ == '__main__': + unittest.main() diff --git a/test/test_download.py b/test/test_download.py index 545afb9221..40d596fdf1 100644 --- a/test/test_download.py +++ b/test/test_download.py @@ -1,93 +1,125 @@ -#!/usr/bin/env python2 -import unittest +#!/usr/bin/env python + +import errno import hashlib +import io import os import json +import unittest +import sys +import hashlib +import socket -from youtube_dl.FileDownloader import FileDownloader -from youtube_dl.InfoExtractors import YoutubeIE, DailymotionIE -from youtube_dl.InfoExtractors import MetacafeIE, BlipTVIE +# Allow direct execution +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +import youtube_dl.FileDownloader +import youtube_dl.InfoExtractors +from youtube_dl.utils import * + +DEF_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests.json') +PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json") + +# General configuration (from __init__, not very elegant...) +jar = compat_cookiejar.CookieJar() +cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar) +proxy_handler = compat_urllib_request.ProxyHandler() +opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler()) +compat_urllib_request.install_opener(opener) + +def _try_rm(filename): + """ Remove a file if it exists """ + try: + os.remove(filename) + except OSError as ose: + if ose.errno != errno.ENOENT: + raise + +class FileDownloader(youtube_dl.FileDownloader): + def __init__(self, *args, **kwargs): + self.to_stderr = self.to_screen + self.processed_info_dicts = [] + return youtube_dl.FileDownloader.__init__(self, *args, **kwargs) + def process_info(self, info_dict): + self.processed_info_dicts.append(info_dict) + return youtube_dl.FileDownloader.process_info(self, info_dict) + +def _file_md5(fn): + with open(fn, 'rb') as f: + return hashlib.md5(f.read()).hexdigest() + +with io.open(DEF_FILE, encoding='utf-8') as deff: + defs = json.load(deff) +with io.open(PARAMETERS_FILE, encoding='utf-8') as pf: + parameters = json.load(pf) -class DownloadTest(unittest.TestCase): - PARAMETERS_FILE = "test/parameters.json" - #calculated with md5sum: - #md5sum (GNU coreutils) 8.19 +class TestDownload(unittest.TestCase): + def setUp(self): + self.parameters = parameters + self.defs = defs - YOUTUBE_SIZE = 1993883 - YOUTUBE_URL = "http://www.youtube.com/watch?v=BaW_jenozKc" - YOUTUBE_FILE = "BaW_jenozKc.mp4" +### Dynamically generate tests +def generator(test_case): - DAILYMOTION_MD5 = "d363a50e9eb4f22ce90d08d15695bb47" - DAILYMOTION_URL = "http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech" - DAILYMOTION_FILE = "x33vw9.mp4" + def test_template(self): + ie = getattr(youtube_dl.InfoExtractors, test_case['name'] + 'IE') + if not ie._WORKING: + print('Skipping: IE marked as not _WORKING') + return + if 'playlist' not in test_case and not test_case['file']: + print('Skipping: No output file specified') + return + if 'skip' in test_case: + print('Skipping: {0}'.format(test_case['skip'])) + return - METACAFE_SIZE = 5754305 - METACAFE_URL = "http://www.metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/" - METACAFE_FILE = "_aUehQsCQtM.flv" + params = self.parameters.copy() + params.update(test_case.get('params', {})) - BLIP_MD5 = "93c24d2f4e0782af13b8a7606ea97ba7" - BLIP_URL = "http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352" - BLIP_FILE = "5779306.m4v" + fd = FileDownloader(params) + fd.add_info_extractor(ie()) + for ien in test_case.get('add_ie', []): + fd.add_info_extractor(getattr(youtube_dl.InfoExtractors, ien + 'IE')()) - XVIDEO_MD5 = "" - XVIDEO_URL = "" - XVIDEO_FILE = "" + test_cases = test_case.get('playlist', [test_case]) + for tc in test_cases: + _try_rm(tc['file']) + _try_rm(tc['file'] + '.part') + _try_rm(tc['file'] + '.info.json') + try: + fd.download([test_case['url']]) + + for tc in test_cases: + if not test_case.get('params', {}).get('skip_download', False): + self.assertTrue(os.path.exists(tc['file'])) + self.assertTrue(os.path.exists(tc['file'] + '.info.json')) + if 'md5' in tc: + md5_for_file = _file_md5(tc['file']) + self.assertEqual(md5_for_file, tc['md5']) + with io.open(tc['file'] + '.info.json', encoding='utf-8') as infof: + info_dict = json.load(infof) + for (info_field, value) in tc.get('info_dict', {}).items(): + if value.startswith('md5:'): + md5_info_value = hashlib.md5(info_dict.get(info_field, '')).hexdigest() + self.assertEqual(value[3:], md5_info_value) + else: + self.assertEqual(value, info_dict.get(info_field)) + finally: + for tc in test_cases: + _try_rm(tc['file']) + _try_rm(tc['file'] + '.part') + _try_rm(tc['file'] + '.info.json') + + return test_template + +### And add them to TestDownload +for test_case in defs: + test_method = generator(test_case) + test_method.__name__ = "test_{0}".format(test_case["name"]) + setattr(TestDownload, test_method.__name__, test_method) + del test_method - def test_youtube(self): - #let's download a file from youtube - with open(DownloadTest.PARAMETERS_FILE) as f: - fd = FileDownloader(json.load(f)) - fd.add_info_extractor(YoutubeIE()) - fd.download([DownloadTest.YOUTUBE_URL]) - self.assertTrue(os.path.exists(DownloadTest.YOUTUBE_FILE)) - self.assertEqual(os.path.getsize(DownloadTest.YOUTUBE_FILE), DownloadTest.YOUTUBE_SIZE) - - def test_dailymotion(self): - with open(DownloadTest.PARAMETERS_FILE) as f: - fd = FileDownloader(json.load(f)) - fd.add_info_extractor(DailymotionIE()) - fd.download([DownloadTest.DAILYMOTION_URL]) - self.assertTrue(os.path.exists(DownloadTest.DAILYMOTION_FILE)) - md5_down_file = md5_for_file(DownloadTest.DAILYMOTION_FILE) - self.assertEqual(md5_down_file, DownloadTest.DAILYMOTION_MD5) - - def test_metacafe(self): - #this emulate a skip,to be 2.6 compatible - with open(DownloadTest.PARAMETERS_FILE) as f: - fd = FileDownloader(json.load(f)) - fd.add_info_extractor(MetacafeIE()) - fd.add_info_extractor(YoutubeIE()) - fd.download([DownloadTest.METACAFE_URL]) - self.assertTrue(os.path.exists(DownloadTest.METACAFE_FILE)) - self.assertEqual(os.path.getsize(DownloadTest.METACAFE_FILE), DownloadTest.METACAFE_SIZE) - - def test_blip(self): - with open(DownloadTest.PARAMETERS_FILE) as f: - fd = FileDownloader(json.load(f)) - fd.add_info_extractor(BlipTVIE()) - fd.download([DownloadTest.BLIP_URL]) - self.assertTrue(os.path.exists(DownloadTest.BLIP_FILE)) - md5_down_file = md5_for_file(DownloadTest.BLIP_FILE) - self.assertEqual(md5_down_file, DownloadTest.BLIP_MD5) - - def tearDown(self): - if os.path.exists(DownloadTest.YOUTUBE_FILE): - os.remove(DownloadTest.YOUTUBE_FILE) - if os.path.exists(DownloadTest.DAILYMOTION_FILE): - os.remove(DownloadTest.DAILYMOTION_FILE) - if os.path.exists(DownloadTest.METACAFE_FILE): - os.remove(DownloadTest.METACAFE_FILE) - if os.path.exists(DownloadTest.BLIP_FILE): - os.remove(DownloadTest.BLIP_FILE) - -def md5_for_file(filename, block_size=2**20): - with open(filename) as f: - md5 = hashlib.md5() - while True: - data = f.read(block_size) - if not data: - break - md5.update(data) - return md5.hexdigest() +if __name__ == '__main__': + unittest.main() diff --git a/test/test_execution.py b/test/test_execution.py new file mode 100644 index 0000000000..2b115fb31a --- /dev/null +++ b/test/test_execution.py @@ -0,0 +1,26 @@ +import unittest + +import sys +import os +import subprocess + +rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +try: + _DEV_NULL = subprocess.DEVNULL +except AttributeError: + _DEV_NULL = open(os.devnull, 'wb') + +class TestExecution(unittest.TestCase): + def test_import(self): + subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir) + + def test_module_exec(self): + if sys.version_info >= (2,7): # Python 2.6 doesn't support package execution + subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL) + + def test_main_exec(self): + subprocess.check_call([sys.executable, 'youtube_dl/__main__.py', '--version'], cwd=rootDir, stdout=_DEV_NULL) + +if __name__ == '__main__': + unittest.main() diff --git a/test/test_utils.py b/test/test_utils.py index e7c6d5b3d7..eeaaa7fad7 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -1,47 +1,100 @@ -# -*- coding: utf-8 -*- +#!/usr/bin/env python # Various small unit tests +import sys import unittest +# Allow direct execution +import os +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + #from youtube_dl.utils import htmlentity_transform from youtube_dl.utils import timeconvert from youtube_dl.utils import sanitize_filename from youtube_dl.utils import unescapeHTML from youtube_dl.utils import orderedSet +if sys.version_info < (3, 0): + _compat_str = lambda b: b.decode('unicode-escape') +else: + _compat_str = lambda s: s + class TestUtil(unittest.TestCase): - def test_timeconvert(self): - self.assertTrue(timeconvert('') is None) - self.assertTrue(timeconvert('bougrg') is None) + def test_timeconvert(self): + self.assertTrue(timeconvert('') is None) + self.assertTrue(timeconvert('bougrg') is None) - def test_sanitize_filename(self): - self.assertEqual(sanitize_filename(u'abc'), u'abc') - self.assertEqual(sanitize_filename(u'abc_d-e'), u'abc_d-e') + def test_sanitize_filename(self): + self.assertEqual(sanitize_filename('abc'), 'abc') + self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e') - self.assertEqual(sanitize_filename(u'123'), u'123') + self.assertEqual(sanitize_filename('123'), '123') - self.assertEqual(u'abc-de', sanitize_filename(u'abc/de')) - self.assertFalse(u'/' in sanitize_filename(u'abc/de///')) + self.assertEqual('abc_de', sanitize_filename('abc/de')) + self.assertFalse('/' in sanitize_filename('abc/de///')) - self.assertEqual(u'abc-de', sanitize_filename(u'abc/<>\\*|de')) - self.assertEqual(u'xxx', sanitize_filename(u'xxx/<>\\*|')) - self.assertEqual(u'yes no', sanitize_filename(u'yes? no')) - self.assertEqual(u'this - that', sanitize_filename(u'this: that')) + self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de')) + self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|')) + self.assertEqual('yes no', sanitize_filename('yes? no')) + self.assertEqual('this - that', sanitize_filename('this: that')) - self.assertEqual(sanitize_filename(u'ä'), u'ä') - self.assertEqual(sanitize_filename(u'кириллица'), u'кириллица') + self.assertEqual(sanitize_filename('AT&T'), 'AT&T') + aumlaut = _compat_str('\xe4') + self.assertEqual(sanitize_filename(aumlaut), aumlaut) + tests = _compat_str('\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430') + self.assertEqual(sanitize_filename(tests), tests) - for forbidden in u'"\0\\/': - self.assertTrue(forbidden not in sanitize_filename(forbidden)) + forbidden = '"\0\\/' + for fc in forbidden: + for fbc in forbidden: + self.assertTrue(fbc not in sanitize_filename(fc)) - def test_ordered_set(self): - self.assertEqual(orderedSet([1,1,2,3,4,4,5,6,7,3,5]), [1,2,3,4,5,6,7]) - self.assertEqual(orderedSet([]), []) - self.assertEqual(orderedSet([1]), [1]) - #keep the list ordered - self.assertEqual(orderedSet([135,1,1,1]), [135,1]) + def test_sanitize_filename_restricted(self): + self.assertEqual(sanitize_filename('abc', restricted=True), 'abc') + self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e') - def test_unescape_html(self): - self.assertEqual(unescapeHTML(u"%20;"), u"%20;") + self.assertEqual(sanitize_filename('123', restricted=True), '123') + + self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True)) + self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True)) + + self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True)) + self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True)) + self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True)) + self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True)) + + tests = _compat_str('a\xe4b\u4e2d\u56fd\u7684c') + self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c') + self.assertTrue(sanitize_filename(_compat_str('\xf6'), restricted=True) != '') # No empty filename + + forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#' + for fc in forbidden: + for fbc in forbidden: + self.assertTrue(fbc not in sanitize_filename(fc, restricted=True)) + + # Handle a common case more neatly + self.assertEqual(sanitize_filename(_compat_str('\u5927\u58f0\u5e26 - Song'), restricted=True), 'Song') + self.assertEqual(sanitize_filename(_compat_str('\u603b\u7edf: Speech'), restricted=True), 'Speech') + # .. but make sure the file name is never empty + self.assertTrue(sanitize_filename('-', restricted=True) != '') + self.assertTrue(sanitize_filename(':', restricted=True) != '') + + def test_sanitize_ids(self): + self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw') + self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw') + self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI') + + def test_ordered_set(self): + self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7]) + self.assertEqual(orderedSet([]), []) + self.assertEqual(orderedSet([1]), [1]) + #keep the list ordered + self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1]) + + def test_unescape_html(self): + self.assertEqual(unescapeHTML(_compat_str('%20;')), _compat_str('%20;')) + +if __name__ == '__main__': + unittest.main() diff --git a/test/test_write_info_json.py b/test/test_write_info_json.py new file mode 100644 index 0000000000..8134dda371 --- /dev/null +++ b/test/test_write_info_json.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python +# coding: utf-8 + +import json +import os +import sys +import unittest + +# Allow direct execution +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +import youtube_dl.FileDownloader +import youtube_dl.InfoExtractors +from youtube_dl.utils import * + +PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json") + +# General configuration (from __init__, not very elegant...) +jar = compat_cookiejar.CookieJar() +cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar) +proxy_handler = compat_urllib_request.ProxyHandler() +opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler()) +compat_urllib_request.install_opener(opener) + +class FileDownloader(youtube_dl.FileDownloader): + def __init__(self, *args, **kwargs): + youtube_dl.FileDownloader.__init__(self, *args, **kwargs) + self.to_stderr = self.to_screen + +with io.open(PARAMETERS_FILE, encoding='utf-8') as pf: + params = json.load(pf) +params['writeinfojson'] = True +params['skip_download'] = True +params['writedescription'] = True + +TEST_ID = 'BaW_jenozKc' +INFO_JSON_FILE = TEST_ID + '.mp4.info.json' +DESCRIPTION_FILE = TEST_ID + '.mp4.description' +EXPECTED_DESCRIPTION = u'''test chars: "'/\ä↭𝕐 + +This is a test video for youtube-dl. + +For more information, contact phihag@phihag.de .''' + +class TestInfoJSON(unittest.TestCase): + def setUp(self): + # Clear old files + self.tearDown() + + def test_info_json(self): + ie = youtube_dl.InfoExtractors.YoutubeIE() + fd = FileDownloader(params) + fd.add_info_extractor(ie) + fd.download([TEST_ID]) + self.assertTrue(os.path.exists(INFO_JSON_FILE)) + with io.open(INFO_JSON_FILE, 'r', encoding='utf-8') as jsonf: + jd = json.load(jsonf) + self.assertEqual(jd['upload_date'], u'20121002') + self.assertEqual(jd['description'], EXPECTED_DESCRIPTION) + self.assertEqual(jd['id'], TEST_ID) + self.assertEqual(jd['extractor'], 'youtube') + self.assertEqual(jd['title'], u'''youtube-dl test video "'/\ä↭𝕐''') + self.assertEqual(jd['uploader'], 'Philipp Hagemeister') + + self.assertTrue(os.path.exists(DESCRIPTION_FILE)) + with io.open(DESCRIPTION_FILE, 'r', encoding='utf-8') as descf: + descr = descf.read() + self.assertEqual(descr, EXPECTED_DESCRIPTION) + + def tearDown(self): + if os.path.exists(INFO_JSON_FILE): + os.remove(INFO_JSON_FILE) + if os.path.exists(DESCRIPTION_FILE): + os.remove(DESCRIPTION_FILE) + +if __name__ == '__main__': + unittest.main() diff --git a/test/test_youtube_lists.py b/test/test_youtube_lists.py new file mode 100644 index 0000000000..3044e0852a --- /dev/null +++ b/test/test_youtube_lists.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python + +import sys +import unittest +import json + +# Allow direct execution +import os +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from youtube_dl.InfoExtractors import YoutubeUserIE,YoutubePlaylistIE +from youtube_dl.utils import * + +PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json") +with io.open(PARAMETERS_FILE, encoding='utf-8') as pf: + parameters = json.load(pf) + +# General configuration (from __init__, not very elegant...) +jar = compat_cookiejar.CookieJar() +cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar) +proxy_handler = compat_urllib_request.ProxyHandler() +opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler()) +compat_urllib_request.install_opener(opener) + +class FakeDownloader(object): + def __init__(self): + self.result = [] + self.params = parameters + def to_screen(self, s): + print(s) + def trouble(self, s): + raise Exception(s) + def download(self, x): + self.result.append(x) + +class TestYoutubeLists(unittest.TestCase): + def test_youtube_playlist(self): + DL = FakeDownloader() + IE = YoutubePlaylistIE(DL) + IE.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re') + self.assertEqual(DL.result, [ + ['http://www.youtube.com/watch?v=bV9L5Ht9LgY'], + ['http://www.youtube.com/watch?v=FXxLjLQi3Fg'], + ['http://www.youtube.com/watch?v=tU3Bgo5qJZE'] + ]) + + def test_youtube_playlist_long(self): + DL = FakeDownloader() + IE = YoutubePlaylistIE(DL) + IE.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q') + self.assertTrue(len(DL.result) >= 799) + + def test_youtube_course(self): + DL = FakeDownloader() + IE = YoutubePlaylistIE(DL) + # TODO find a > 100 (paginating?) videos course + IE.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8') + self.assertEqual(DL.result[0], ['http://www.youtube.com/watch?v=j9WZyLZCBzs']) + self.assertEqual(len(DL.result), 25) + self.assertEqual(DL.result[-1], ['http://www.youtube.com/watch?v=rYefUsYuEp0']) + + def test_youtube_channel(self): + # I give up, please find a channel that does paginate and test this like test_youtube_playlist_long + pass # TODO + + def test_youtube_user(self): + DL = FakeDownloader() + IE = YoutubeUserIE(DL) + IE.extract('https://www.youtube.com/user/TheLinuxFoundation') + self.assertTrue(len(DL.result) >= 320) + +if __name__ == '__main__': + unittest.main() diff --git a/test/test_youtube_subtitles.py b/test/test_youtube_subtitles.py new file mode 100644 index 0000000000..5d3566a35f --- /dev/null +++ b/test/test_youtube_subtitles.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python + +import sys +import unittest +import json +import io +import hashlib + +# Allow direct execution +import os +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from youtube_dl.InfoExtractors import YoutubeIE +from youtube_dl.utils import * + +PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json") +with io.open(PARAMETERS_FILE, encoding='utf-8') as pf: + parameters = json.load(pf) + +# General configuration (from __init__, not very elegant...) +jar = compat_cookiejar.CookieJar() +cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar) +proxy_handler = compat_urllib_request.ProxyHandler() +opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler()) +compat_urllib_request.install_opener(opener) + +class FakeDownloader(object): + def __init__(self): + self.result = [] + self.params = parameters + def to_screen(self, s): + print(s) + def trouble(self, s): + raise Exception(s) + def download(self, x): + self.result.append(x) + +md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest() + +class TestYoutubeSubtitles(unittest.TestCase): + def test_youtube_subtitles(self): + DL = FakeDownloader() + DL.params['writesubtitles'] = True + IE = YoutubeIE(DL) + info_dict = IE.extract('QRS8MkLhQmM') + self.assertEqual(md5(info_dict[0]['subtitles']), 'c3228550d59116f3c29fba370b55d033') + + def test_youtube_subtitles_it(self): + DL = FakeDownloader() + DL.params['writesubtitles'] = True + DL.params['subtitleslang'] = 'it' + IE = YoutubeIE(DL) + info_dict = IE.extract('QRS8MkLhQmM') + self.assertEqual(md5(info_dict[0]['subtitles']), '132a88a0daf8e1520f393eb58f1f646a') + +if __name__ == '__main__': + unittest.main() diff --git a/test/tests.json b/test/tests.json new file mode 100644 index 0000000000..cd72a88e35 --- /dev/null +++ b/test/tests.json @@ -0,0 +1,164 @@ +[ + { + "name": "Youtube", + "url": "http://www.youtube.com/watch?v=BaW_jenozKc", + "file": "BaW_jenozKc.mp4", + "info_dict": { + "title": "youtube-dl test video \"'/\\ä↭𝕐", + "uploader": "Philipp Hagemeister", + "uploader_id": "phihag", + "upload_date": "20121002", + "description": "test chars: \"'/\\ä↭𝕐\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de ." + } + }, + { + "name": "Dailymotion", + "md5": "392c4b85a60a90dc4792da41ce3144eb", + "url": "http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech", + "file": "x33vw9.mp4" + }, + { + "name": "Metacafe", + "add_ie": ["Youtube"], + "url": "http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/", + "file": "_aUehQsCQtM.flv" + }, + { + "name": "BlipTV", + "md5": "b2d849efcf7ee18917e4b4d9ff37cafe", + "url": "http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352", + "file": "5779306.m4v" + }, + { + "name": "XVideos", + "md5": "1d0c835822f0a71a7bf011855db929d0", + "url": "http://www.xvideos.com/video939581/funny_porns_by_s_-1", + "file": "939581.flv" + }, + { + "name": "Vimeo", + "md5": "8879b6cc097e987f02484baf890129e5", + "url": "http://vimeo.com/56015672", + "file": "56015672.mp4", + "info_dict": { + "title": "youtube-dl test video - ★ \" ' 幸 / \\ ä ↭ 𝕐", + "uploader": "Filippo Valsorda", + "uploader_id": "user7108434", + "upload_date": "20121220", + "description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: ★ \" ' 幸 / \\ ä ↭ 𝕐" + } + }, + { + "name": "Soundcloud", + "md5": "ebef0a451b909710ed1d7787dddbf0d7", + "url": "http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy", + "file": "62986583.mp3" + }, + { + "name": "StanfordOpenClassroom", + "md5": "544a9468546059d4e80d76265b0443b8", + "url": "http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100", + "file": "PracticalUnix_intro-environment.mp4" + }, + { + "name": "XNXX", + "md5": "0831677e2b4761795f68d417e0b7b445", + "url": "http://video.xnxx.com/video1135332/lida_naked_funny_actress_5_", + "file": "1135332.flv" + }, + { + "name": "Youku", + "url": "http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html", + "file": "XNDgyMDQ2NTQw_part00.flv", + "md5": "ffe3f2e435663dc2d1eea34faeff5b5b", + "params": { "test": false } + }, + { + "name": "NBA", + "url": "http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html", + "file": "0021200253-okc-bkn-recap.nba.mp4", + "md5": "c0edcfc37607344e2ff8f13c378c88a4" + }, + { + "name": "JustinTV", + "url": "http://www.twitch.tv/thegamedevhub/b/296128360", + "file": "296128360.flv", + "md5": "ecaa8a790c22a40770901460af191c9a" + }, + { + "name": "MyVideo", + "url": "http://www.myvideo.de/watch/8229274/bowling_fail_or_win", + "file": "8229274.flv", + "md5": "2d2753e8130479ba2cb7e0a37002053e" + }, + { + "name": "Escapist", + "url": "http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate", + "file": "6618-Breaking-Down-Baldurs-Gate.flv", + "md5": "c6793dbda81388f4264c1ba18684a74d", + "skip": "Fails with timeout on Travis" + }, + { + "name": "GooglePlus", + "url": "https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH", + "file": "ZButuJc6CtH.flv" + }, + { + "name": "FunnyOrDie", + "url": "http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version", + "file": "0732f586d7.mp4", + "md5": "f647e9e90064b53b6e046e75d0241fbd" + }, + { + "name": "TweetReel", + "url": "http://tweetreel.com/?77smq", + "file": "77smq.mov", + "md5": "56b4d9ca9de467920f3f99a6d91255d6", + "info_dict": { + "uploader": "itszero", + "uploader_id": "itszero", + "upload_date": "20091225", + "description": "Installing Gentoo Linux on Powerbook G4, it turns out the sleep indicator becomes HDD activity indicator :D" + } + }, + { + "name": "Steam", + "url": "http://store.steampowered.com/video/105600/", + "playlist": [ + { + "file": "81300.flv", + "md5": "f870007cee7065d7c76b88f0a45ecc07", + "info_dict": { + "title": "Terraria 1.1 Trailer" + } + }, + { + "file": "80859.flv", + "md5": "61aaf31a5c5c3041afb58fb83cbb5751", + "info_dict": { + "title": "Terraria Trailer" + } + } + ] + }, + { + "name": "Ustream", + "url": "http://www.ustream.tv/recorded/20274954", + "file": "20274954.flv", + "md5": "088f151799e8f572f84eb62f17d73e5c", + "info_dict": { + "title": "Young Americans for Liberty February 7, 2012 2:28 AM" + } + }, + { + "name": "InfoQ", + "url": "http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things", + "file": "12-jan-pythonthings.mp4", + "info_dict": { + "title": "A Few of My Favorite [Python] Things" + }, + "params": { + "skip_download": true + } + } +] diff --git a/youtube-dl b/youtube-dl new file mode 100755 index 0000000000..6ba864a14c Binary files /dev/null and b/youtube-dl differ diff --git a/youtube-dl.bash-completion b/youtube-dl.bash-completion deleted file mode 100644 index 76451a2b27..0000000000 --- a/youtube-dl.bash-completion +++ /dev/null @@ -1,14 +0,0 @@ -__youtube-dl() -{ - local cur prev opts - COMPREPLY=() - cur="${COMP_WORDS[COMP_CWORD]}" - opts="--all-formats --audio-format --audio-quality --auto-number --batch-file --console-title --continue --cookies --dump-user-agent --extract-audio --format --get-description --get-filename --get-format --get-thumbnail --get-title --get-url --help --id --ignore-errors --keep-video --list-extractors --list-formats --literal --match-title --max-downloads --max-quality --netrc --no-continue --no-mtime --no-overwrites --no-part --no-progress --output --password --playlist-end --playlist-start --prefer-free-formats --quiet --rate-limit --reject-title --retries --simulate --skip-download --srt-lang --title --update --user-agent --username --verbose --version --write-description --write-info-json --write-srt" - - if [[ ${cur} == * ]] ; then - COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) - return 0 - fi -} - -complete -F __youtube-dl youtube-dl diff --git a/youtube-dl.exe b/youtube-dl.exe new file mode 100644 index 0000000000..45eee04bbd Binary files /dev/null and b/youtube-dl.exe differ diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py index 4449fe711f..be9e4918ec 100644 --- a/youtube_dl/FileDownloader.py +++ b/youtube_dl/FileDownloader.py @@ -1,693 +1,738 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -import httplib +from __future__ import absolute_import + import math +import io import os import re import socket import subprocess import sys import time -import urllib2 +import traceback if os.name == 'nt': - import ctypes + import ctypes -from utils import * +from .utils import * class FileDownloader(object): - """File Downloader class. - - File downloader objects are the ones responsible of downloading the - actual video file and writing it to disk if the user has requested - it, among some other tasks. In most cases there should be one per - program. As, given a video URL, the downloader doesn't know how to - extract all the needed information, task that InfoExtractors do, it - has to pass the URL to one of them. - - For this, file downloader objects have a method that allows - InfoExtractors to be registered in a given order. When it is passed - a URL, the file downloader handles it to the first InfoExtractor it - finds that reports being able to handle it. The InfoExtractor extracts - all the information about the video or videos the URL refers to, and - asks the FileDownloader to process the video information, possibly - downloading the video. - - File downloaders accept a lot of parameters. In order not to saturate - the object constructor with arguments, it receives a dictionary of - options instead. These options are available through the params - attribute for the InfoExtractors to use. The FileDownloader also - registers itself as the downloader in charge for the InfoExtractors - that are added to it, so this is a "mutual registration". - - Available options: - - username: Username for authentication purposes. - password: Password for authentication purposes. - usenetrc: Use netrc for authentication instead. - quiet: Do not print messages to stdout. - forceurl: Force printing final URL. - forcetitle: Force printing title. - forcethumbnail: Force printing thumbnail URL. - forcedescription: Force printing description. - forcefilename: Force printing final filename. - simulate: Do not download the video files. - format: Video format code. - format_limit: Highest quality format to try. - outtmpl: Template for output names. - ignoreerrors: Do not stop on download errors. - ratelimit: Download speed limit, in bytes/sec. - nooverwrites: Prevent overwriting files. - retries: Number of times to retry for HTTP error 5xx - continuedl: Try to continue downloads if possible. - noprogress: Do not print the progress bar. - playliststart: Playlist item to start at. - playlistend: Playlist item to end at. - matchtitle: Download only matching titles. - rejecttitle: Reject downloads for matching titles. - logtostderr: Log messages to stderr instead of stdout. - consoletitle: Display progress in console window's titlebar. - nopart: Do not use temporary .part files. - updatetime: Use the Last-modified header to set output file timestamps. - writedescription: Write the video description to a .description file - writeinfojson: Write the video description to a .info.json file - writesubtitles: Write the video subtitles to a .srt file - subtitleslang: Language of the subtitles to download - """ - - params = None - _ies = [] - _pps = [] - _download_retcode = None - _num_downloads = None - _screen_file = None - - def __init__(self, params): - """Create a FileDownloader object with the given options.""" - self._ies = [] - self._pps = [] - self._download_retcode = 0 - self._num_downloads = 0 - self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] - self.params = params - - @staticmethod - def format_bytes(bytes): - if bytes is None: - return 'N/A' - if type(bytes) is str: - bytes = float(bytes) - if bytes == 0.0: - exponent = 0 - else: - exponent = long(math.log(bytes, 1024.0)) - suffix = 'bkMGTPEZY'[exponent] - converted = float(bytes) / float(1024 ** exponent) - return '%.2f%s' % (converted, suffix) - - @staticmethod - def calc_percent(byte_counter, data_len): - if data_len is None: - return '---.-%' - return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0)) - - @staticmethod - def calc_eta(start, now, total, current): - if total is None: - return '--:--' - dif = now - start - if current == 0 or dif < 0.001: # One millisecond - return '--:--' - rate = float(current) / dif - eta = long((float(total) - float(current)) / rate) - (eta_mins, eta_secs) = divmod(eta, 60) - if eta_mins > 99: - return '--:--' - return '%02d:%02d' % (eta_mins, eta_secs) - - @staticmethod - def calc_speed(start, now, bytes): - dif = now - start - if bytes == 0 or dif < 0.001: # One millisecond - return '%10s' % '---b/s' - return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif)) - - @staticmethod - def best_block_size(elapsed_time, bytes): - new_min = max(bytes / 2.0, 1.0) - new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB - if elapsed_time < 0.001: - return long(new_max) - rate = bytes / elapsed_time - if rate > new_max: - return long(new_max) - if rate < new_min: - return long(new_min) - return long(rate) - - @staticmethod - def parse_bytes(bytestr): - """Parse a string indicating a byte quantity into a long integer.""" - matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr) - if matchobj is None: - return None - number = float(matchobj.group(1)) - multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower()) - return long(round(number * multiplier)) - - def add_info_extractor(self, ie): - """Add an InfoExtractor object to the end of the list.""" - self._ies.append(ie) - ie.set_downloader(self) - - def add_post_processor(self, pp): - """Add a PostProcessor object to the end of the chain.""" - self._pps.append(pp) - pp.set_downloader(self) - - def to_screen(self, message, skip_eol=False): - """Print message to stdout if not in quiet mode.""" - assert type(message) == type(u'') - if not self.params.get('quiet', False): - terminator = [u'\n', u''][skip_eol] - output = message + terminator - if 'b' not in self._screen_file.mode or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr - output = output.encode(preferredencoding(), 'ignore') - self._screen_file.write(output) - self._screen_file.flush() - - def to_stderr(self, message): - """Print message to stderr.""" - print >>sys.stderr, message.encode(preferredencoding()) - - def to_cons_title(self, message): - """Set console/terminal window title to message.""" - if not self.params.get('consoletitle', False): - return - if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow(): - # c_wchar_p() might not be necessary if `message` is - # already of type unicode() - ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) - elif 'TERM' in os.environ: - sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding())) - - def fixed_template(self): - """Checks if the output template is fixed.""" - return (re.search(ur'(?u)%\(.+?\)s', self.params['outtmpl']) is None) - - def trouble(self, message=None): - """Determine action to take when a download problem appears. - - Depending on if the downloader has been configured to ignore - download errors or not, this method may throw an exception or - not when errors are found, after printing the message. - """ - if message is not None: - self.to_stderr(message) - if not self.params.get('ignoreerrors', False): - raise DownloadError(message) - self._download_retcode = 1 - - def slow_down(self, start_time, byte_counter): - """Sleep if the download speed is over the rate limit.""" - rate_limit = self.params.get('ratelimit', None) - if rate_limit is None or byte_counter == 0: - return - now = time.time() - elapsed = now - start_time - if elapsed <= 0.0: - return - speed = float(byte_counter) / elapsed - if speed > rate_limit: - time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit) - - def temp_name(self, filename): - """Returns a temporary filename for the given filename.""" - if self.params.get('nopart', False) or filename == u'-' or \ - (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))): - return filename - return filename + u'.part' - - def undo_temp_name(self, filename): - if filename.endswith(u'.part'): - return filename[:-len(u'.part')] - return filename - - def try_rename(self, old_filename, new_filename): - try: - if old_filename == new_filename: - return - os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) - except (IOError, OSError), err: - self.trouble(u'ERROR: unable to rename file') - - def try_utime(self, filename, last_modified_hdr): - """Try to set the last-modified time of the given file.""" - if last_modified_hdr is None: - return - if not os.path.isfile(encodeFilename(filename)): - return - timestr = last_modified_hdr - if timestr is None: - return - filetime = timeconvert(timestr) - if filetime is None: - return filetime - try: - os.utime(filename, (time.time(), filetime)) - except: - pass - return filetime - - def report_writedescription(self, descfn): - """ Report that the description file is being written """ - self.to_screen(u'[info] Writing video description to: ' + descfn) - - def report_writesubtitles(self, srtfn): - """ Report that the subtitles file is being written """ - self.to_screen(u'[info] Writing video subtitles to: ' + srtfn) - - def report_writeinfojson(self, infofn): - """ Report that the metadata file has been written """ - self.to_screen(u'[info] Video description metadata as JSON to: ' + infofn) - - def report_destination(self, filename): - """Report destination filename.""" - self.to_screen(u'[download] Destination: ' + filename) - - def report_progress(self, percent_str, data_len_str, speed_str, eta_str): - """Report download progress.""" - if self.params.get('noprogress', False): - return - self.to_screen(u'\r[download] %s of %s at %s ETA %s' % - (percent_str, data_len_str, speed_str, eta_str), skip_eol=True) - self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' % - (percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip())) - - def report_resuming_byte(self, resume_len): - """Report attempt to resume at given byte.""" - self.to_screen(u'[download] Resuming download at byte %s' % resume_len) - - def report_retry(self, count, retries): - """Report retry in case of HTTP error 5xx""" - self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries)) - - def report_file_already_downloaded(self, file_name): - """Report file has already been fully downloaded.""" - try: - self.to_screen(u'[download] %s has already been downloaded' % file_name) - except (UnicodeEncodeError), err: - self.to_screen(u'[download] The file has already been downloaded') - - def report_unable_to_resume(self): - """Report it was impossible to resume download.""" - self.to_screen(u'[download] Unable to resume') - - def report_finish(self): - """Report download finished.""" - if self.params.get('noprogress', False): - self.to_screen(u'[download] Download completed') - else: - self.to_screen(u'') - - def increment_downloads(self): - """Increment the ordinal that assigns a number to each file.""" - self._num_downloads += 1 - - def prepare_filename(self, info_dict): - """Generate the output filename.""" - try: - template_dict = dict(info_dict) - template_dict['epoch'] = unicode(long(time.time())) - template_dict['autonumber'] = unicode('%05d' % self._num_downloads) - filename = self.params['outtmpl'] % template_dict - return filename - except (ValueError, KeyError), err: - self.trouble(u'ERROR: invalid system charset or erroneous output template') - return None - - def _match_entry(self, info_dict): - """ Returns None iff the file should be downloaded """ - - title = info_dict['title'] - matchtitle = self.params.get('matchtitle', False) - if matchtitle: - matchtitle = matchtitle.decode('utf8') - if not re.search(matchtitle, title, re.IGNORECASE): - return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"' - rejecttitle = self.params.get('rejecttitle', False) - if rejecttitle: - rejecttitle = rejecttitle.decode('utf8') - if re.search(rejecttitle, title, re.IGNORECASE): - return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"' - return None - - def process_info(self, info_dict): - """Process a single dictionary returned by an InfoExtractor.""" - - info_dict['stitle'] = sanitize_filename(info_dict['title']) - - reason = self._match_entry(info_dict) - if reason is not None: - self.to_screen(u'[download] ' + reason) - return - - max_downloads = self.params.get('max_downloads') - if max_downloads is not None: - if self._num_downloads > int(max_downloads): - raise MaxDownloadsReached() - - filename = self.prepare_filename(info_dict) - - # Forced printings - if self.params.get('forcetitle', False): - print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace') - if self.params.get('forceurl', False): - print info_dict['url'].encode(preferredencoding(), 'xmlcharrefreplace') - if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict: - print info_dict['thumbnail'].encode(preferredencoding(), 'xmlcharrefreplace') - if self.params.get('forcedescription', False) and 'description' in info_dict: - print info_dict['description'].encode(preferredencoding(), 'xmlcharrefreplace') - if self.params.get('forcefilename', False) and filename is not None: - print filename.encode(preferredencoding(), 'xmlcharrefreplace') - if self.params.get('forceformat', False): - print info_dict['format'].encode(preferredencoding(), 'xmlcharrefreplace') - - # Do nothing else if in simulate mode - if self.params.get('simulate', False): - return - - if filename is None: - return - - try: - dn = os.path.dirname(encodeFilename(filename)) - if dn != '' and not os.path.exists(dn): # dn is already encoded - os.makedirs(dn) - except (OSError, IOError), err: - self.trouble(u'ERROR: unable to create directory ' + unicode(err)) - return - - if self.params.get('writedescription', False): - try: - descfn = filename + u'.description' - self.report_writedescription(descfn) - descfile = open(encodeFilename(descfn), 'wb') - try: - descfile.write(info_dict['description'].encode('utf-8')) - finally: - descfile.close() - except (OSError, IOError): - self.trouble(u'ERROR: Cannot write description file ' + descfn) - return - - if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']: - # subtitles download errors are already managed as troubles in relevant IE - # that way it will silently go on when used with unsupporting IE - try: - srtfn = filename.rsplit('.', 1)[0] + u'.srt' - self.report_writesubtitles(srtfn) - srtfile = open(encodeFilename(srtfn), 'wb') - try: - srtfile.write(info_dict['subtitles'].encode('utf-8')) - finally: - srtfile.close() - except (OSError, IOError): - self.trouble(u'ERROR: Cannot write subtitles file ' + descfn) - return - - if self.params.get('writeinfojson', False): - infofn = filename + u'.info.json' - self.report_writeinfojson(infofn) - try: - json.dump - except (NameError,AttributeError): - self.trouble(u'ERROR: No JSON encoder found. Update to Python 2.6+, setup a json module, or leave out --write-info-json.') - return - try: - infof = open(encodeFilename(infofn), 'wb') - try: - json_info_dict = dict((k,v) for k,v in info_dict.iteritems() if not k in ('urlhandle',)) - json.dump(json_info_dict, infof) - finally: - infof.close() - except (OSError, IOError): - self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn) - return - - if not self.params.get('skip_download', False): - if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)): - success = True - else: - try: - success = self._do_download(filename, info_dict) - except (OSError, IOError), err: - raise UnavailableVideoError - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self.trouble(u'ERROR: unable to download video data: %s' % str(err)) - return - except (ContentTooShortError, ), err: - self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) - return - - if success: - try: - self.post_process(filename, info_dict) - except (PostProcessingError), err: - self.trouble(u'ERROR: postprocessing: %s' % str(err)) - return - - def download(self, url_list): - """Download a given list of URLs.""" - if len(url_list) > 1 and self.fixed_template(): - raise SameFileError(self.params['outtmpl']) - - for url in url_list: - suitable_found = False - for ie in self._ies: - # Go to next InfoExtractor if not suitable - if not ie.suitable(url): - continue - - # Suitable InfoExtractor found - suitable_found = True - - # Extract information from URL and process it - videos = ie.extract(url) - for video in videos or []: - video['extractor'] = ie.IE_NAME - try: - self.increment_downloads() - self.process_info(video) - except UnavailableVideoError: - self.trouble(u'\nERROR: unable to download video') - - # Suitable InfoExtractor had been found; go to next URL - break - - if not suitable_found: - self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url) - - return self._download_retcode - - def post_process(self, filename, ie_info): - """Run the postprocessing chain on the given file.""" - info = dict(ie_info) - info['filepath'] = filename - for pp in self._pps: - info = pp.run(info) - if info is None: - break - - def _download_with_rtmpdump(self, filename, url, player_url): - self.report_destination(filename) - tmpfilename = self.temp_name(filename) - - # Check for rtmpdump first - try: - subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT) - except (OSError, IOError): - self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run') - return False - - # Download using rtmpdump. rtmpdump returns exit code 2 when - # the connection was interrumpted and resuming appears to be - # possible. This is part of rtmpdump's normal usage, AFAIK. - basic_args = ['rtmpdump', '-q'] + [[], ['-W', player_url]][player_url is not None] + ['-r', url, '-o', tmpfilename] - args = basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)] - if self.params.get('verbose', False): - try: - import pipes - shell_quote = lambda args: ' '.join(map(pipes.quote, args)) - except ImportError: - shell_quote = repr - self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(args)) - retval = subprocess.call(args) - while retval == 2 or retval == 1: - prevsize = os.path.getsize(encodeFilename(tmpfilename)) - self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True) - time.sleep(5.0) # This seems to be needed - retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1]) - cursize = os.path.getsize(encodeFilename(tmpfilename)) - if prevsize == cursize and retval == 1: - break - # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those - if prevsize == cursize and retval == 2 and cursize > 1024: - self.to_screen(u'\r[rtmpdump] Could not download the whole video. This can happen for some advertisements.') - retval = 0 - break - if retval == 0: - self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(encodeFilename(tmpfilename))) - self.try_rename(tmpfilename, filename) - return True - else: - self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval) - return False - - def _do_download(self, filename, info_dict): - url = info_dict['url'] - player_url = info_dict.get('player_url', None) - - # Check file already present - if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False): - self.report_file_already_downloaded(filename) - return True - - # Attempt to download using rtmpdump - if url.startswith('rtmp'): - return self._download_with_rtmpdump(filename, url, player_url) - - tmpfilename = self.temp_name(filename) - stream = None - - # Do not include the Accept-Encoding header - headers = {'Youtubedl-no-compression': 'True'} - basic_request = urllib2.Request(url, None, headers) - request = urllib2.Request(url, None, headers) - - # Establish possible resume length - if os.path.isfile(encodeFilename(tmpfilename)): - resume_len = os.path.getsize(encodeFilename(tmpfilename)) - else: - resume_len = 0 - - open_mode = 'wb' - if resume_len != 0: - if self.params.get('continuedl', False): - self.report_resuming_byte(resume_len) - request.add_header('Range','bytes=%d-' % resume_len) - open_mode = 'ab' - else: - resume_len = 0 - - count = 0 - retries = self.params.get('retries', 0) - while count <= retries: - # Establish connection - try: - if count == 0 and 'urlhandle' in info_dict: - data = info_dict['urlhandle'] - data = urllib2.urlopen(request) - break - except (urllib2.HTTPError, ), err: - if (err.code < 500 or err.code >= 600) and err.code != 416: - # Unexpected HTTP error - raise - elif err.code == 416: - # Unable to resume (requested range not satisfiable) - try: - # Open the connection again without the range header - data = urllib2.urlopen(basic_request) - content_length = data.info()['Content-Length'] - except (urllib2.HTTPError, ), err: - if err.code < 500 or err.code >= 600: - raise - else: - # Examine the reported length - if (content_length is not None and - (resume_len - 100 < long(content_length) < resume_len + 100)): - # The file had already been fully downloaded. - # Explanation to the above condition: in issue #175 it was revealed that - # YouTube sometimes adds or removes a few bytes from the end of the file, - # changing the file size slightly and causing problems for some users. So - # I decided to implement a suggested change and consider the file - # completely downloaded if the file size differs less than 100 bytes from - # the one in the hard drive. - self.report_file_already_downloaded(filename) - self.try_rename(tmpfilename, filename) - return True - else: - # The length does not match, we start the download over - self.report_unable_to_resume() - open_mode = 'wb' - break - # Retry - count += 1 - if count <= retries: - self.report_retry(count, retries) - - if count > retries: - self.trouble(u'ERROR: giving up after %s retries' % retries) - return False - - data_len = data.info().get('Content-length', None) - if data_len is not None: - data_len = long(data_len) + resume_len - data_len_str = self.format_bytes(data_len) - byte_counter = 0 + resume_len - block_size = 1024 - start = time.time() - while True: - # Download and write - before = time.time() - data_block = data.read(block_size) - after = time.time() - if len(data_block) == 0: - break - byte_counter += len(data_block) - - # Open file just in time - if stream is None: - try: - (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode) - assert stream is not None - filename = self.undo_temp_name(tmpfilename) - self.report_destination(filename) - except (OSError, IOError), err: - self.trouble(u'ERROR: unable to open for writing: %s' % str(err)) - return False - try: - stream.write(data_block) - except (IOError, OSError), err: - self.trouble(u'\nERROR: unable to write data: %s' % str(err)) - return False - block_size = self.best_block_size(after - before, len(data_block)) - - # Progress message - speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len) - if data_len is None: - self.report_progress('Unknown %', data_len_str, speed_str, 'Unknown ETA') - else: - percent_str = self.calc_percent(byte_counter, data_len) - eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len) - self.report_progress(percent_str, data_len_str, speed_str, eta_str) - - # Apply rate limit - self.slow_down(start, byte_counter - resume_len) - - if stream is None: - self.trouble(u'\nERROR: Did not get any data blocks') - return False - stream.close() - self.report_finish() - if data_len is not None and byte_counter != data_len: - raise ContentTooShortError(byte_counter, long(data_len)) - self.try_rename(tmpfilename, filename) - - # Update file modification time - if self.params.get('updatetime', True): - info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None)) - - return True + """File Downloader class. + + File downloader objects are the ones responsible of downloading the + actual video file and writing it to disk if the user has requested + it, among some other tasks. In most cases there should be one per + program. As, given a video URL, the downloader doesn't know how to + extract all the needed information, task that InfoExtractors do, it + has to pass the URL to one of them. + + For this, file downloader objects have a method that allows + InfoExtractors to be registered in a given order. When it is passed + a URL, the file downloader handles it to the first InfoExtractor it + finds that reports being able to handle it. The InfoExtractor extracts + all the information about the video or videos the URL refers to, and + asks the FileDownloader to process the video information, possibly + downloading the video. + + File downloaders accept a lot of parameters. In order not to saturate + the object constructor with arguments, it receives a dictionary of + options instead. These options are available through the params + attribute for the InfoExtractors to use. The FileDownloader also + registers itself as the downloader in charge for the InfoExtractors + that are added to it, so this is a "mutual registration". + + Available options: + + username: Username for authentication purposes. + password: Password for authentication purposes. + usenetrc: Use netrc for authentication instead. + quiet: Do not print messages to stdout. + forceurl: Force printing final URL. + forcetitle: Force printing title. + forcethumbnail: Force printing thumbnail URL. + forcedescription: Force printing description. + forcefilename: Force printing final filename. + simulate: Do not download the video files. + format: Video format code. + format_limit: Highest quality format to try. + outtmpl: Template for output names. + restrictfilenames: Do not allow "&" and spaces in file names + ignoreerrors: Do not stop on download errors. + ratelimit: Download speed limit, in bytes/sec. + nooverwrites: Prevent overwriting files. + retries: Number of times to retry for HTTP error 5xx + buffersize: Size of download buffer in bytes. + noresizebuffer: Do not automatically resize the download buffer. + continuedl: Try to continue downloads if possible. + noprogress: Do not print the progress bar. + playliststart: Playlist item to start at. + playlistend: Playlist item to end at. + matchtitle: Download only matching titles. + rejecttitle: Reject downloads for matching titles. + logtostderr: Log messages to stderr instead of stdout. + consoletitle: Display progress in console window's titlebar. + nopart: Do not use temporary .part files. + updatetime: Use the Last-modified header to set output file timestamps. + writedescription: Write the video description to a .description file + writeinfojson: Write the video description to a .info.json file + writesubtitles: Write the video subtitles to a .srt file + subtitleslang: Language of the subtitles to download + test: Download only first bytes to test the downloader. + """ + + params = None + _ies = [] + _pps = [] + _download_retcode = None + _num_downloads = None + _screen_file = None + + def __init__(self, params): + """Create a FileDownloader object with the given options.""" + self._ies = [] + self._pps = [] + self._download_retcode = 0 + self._num_downloads = 0 + self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] + self.params = params + + if '%(stitle)s' in self.params['outtmpl']: + self.to_stderr(u'WARNING: %(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.') + + @staticmethod + def format_bytes(bytes): + if bytes is None: + return 'N/A' + if type(bytes) is str: + bytes = float(bytes) + if bytes == 0.0: + exponent = 0 + else: + exponent = int(math.log(bytes, 1024.0)) + suffix = 'bkMGTPEZY'[exponent] + converted = float(bytes) / float(1024 ** exponent) + return '%.2f%s' % (converted, suffix) + + @staticmethod + def calc_percent(byte_counter, data_len): + if data_len is None: + return '---.-%' + return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0)) + + @staticmethod + def calc_eta(start, now, total, current): + if total is None: + return '--:--' + dif = now - start + if current == 0 or dif < 0.001: # One millisecond + return '--:--' + rate = float(current) / dif + eta = int((float(total) - float(current)) / rate) + (eta_mins, eta_secs) = divmod(eta, 60) + if eta_mins > 99: + return '--:--' + return '%02d:%02d' % (eta_mins, eta_secs) + + @staticmethod + def calc_speed(start, now, bytes): + dif = now - start + if bytes == 0 or dif < 0.001: # One millisecond + return '%10s' % '---b/s' + return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif)) + + @staticmethod + def best_block_size(elapsed_time, bytes): + new_min = max(bytes / 2.0, 1.0) + new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB + if elapsed_time < 0.001: + return int(new_max) + rate = bytes / elapsed_time + if rate > new_max: + return int(new_max) + if rate < new_min: + return int(new_min) + return int(rate) + + @staticmethod + def parse_bytes(bytestr): + """Parse a string indicating a byte quantity into an integer.""" + matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr) + if matchobj is None: + return None + number = float(matchobj.group(1)) + multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower()) + return int(round(number * multiplier)) + + def add_info_extractor(self, ie): + """Add an InfoExtractor object to the end of the list.""" + self._ies.append(ie) + ie.set_downloader(self) + + def add_post_processor(self, pp): + """Add a PostProcessor object to the end of the chain.""" + self._pps.append(pp) + pp.set_downloader(self) + + def to_screen(self, message, skip_eol=False): + """Print message to stdout if not in quiet mode.""" + assert type(message) == type(u'') + if not self.params.get('quiet', False): + terminator = [u'\n', u''][skip_eol] + output = message + terminator + if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr + output = output.encode(preferredencoding(), 'ignore') + self._screen_file.write(output) + self._screen_file.flush() + + def to_stderr(self, message): + """Print message to stderr.""" + assert type(message) == type(u'') + output = message + u'\n' + if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr + output = output.encode(preferredencoding()) + sys.stderr.write(output) + + def to_cons_title(self, message): + """Set console/terminal window title to message.""" + if not self.params.get('consoletitle', False): + return + if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow(): + # c_wchar_p() might not be necessary if `message` is + # already of type unicode() + ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) + elif 'TERM' in os.environ: + sys.stderr.write('\033]0;%s\007' % message.encode(preferredencoding())) + + def fixed_template(self): + """Checks if the output template is fixed.""" + return (re.search(u'(?u)%\\(.+?\\)s', self.params['outtmpl']) is None) + + def trouble(self, message=None, tb=None): + """Determine action to take when a download problem appears. + + Depending on if the downloader has been configured to ignore + download errors or not, this method may throw an exception or + not when errors are found, after printing the message. + + tb, if given, is additional traceback information. + """ + if message is not None: + self.to_stderr(message) + if self.params.get('verbose'): + if tb is None: + tb_data = traceback.format_list(traceback.extract_stack()) + tb = u''.join(tb_data) + self.to_stderr(tb) + if not self.params.get('ignoreerrors', False): + raise DownloadError(message) + self._download_retcode = 1 + + def slow_down(self, start_time, byte_counter): + """Sleep if the download speed is over the rate limit.""" + rate_limit = self.params.get('ratelimit', None) + if rate_limit is None or byte_counter == 0: + return + now = time.time() + elapsed = now - start_time + if elapsed <= 0.0: + return + speed = float(byte_counter) / elapsed + if speed > rate_limit: + time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit) + + def temp_name(self, filename): + """Returns a temporary filename for the given filename.""" + if self.params.get('nopart', False) or filename == u'-' or \ + (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))): + return filename + return filename + u'.part' + + def undo_temp_name(self, filename): + if filename.endswith(u'.part'): + return filename[:-len(u'.part')] + return filename + + def try_rename(self, old_filename, new_filename): + try: + if old_filename == new_filename: + return + os.rename(encodeFilename(old_filename), encodeFilename(new_filename)) + except (IOError, OSError) as err: + self.trouble(u'ERROR: unable to rename file') + + def try_utime(self, filename, last_modified_hdr): + """Try to set the last-modified time of the given file.""" + if last_modified_hdr is None: + return + if not os.path.isfile(encodeFilename(filename)): + return + timestr = last_modified_hdr + if timestr is None: + return + filetime = timeconvert(timestr) + if filetime is None: + return filetime + try: + os.utime(filename, (time.time(), filetime)) + except: + pass + return filetime + + def report_writedescription(self, descfn): + """ Report that the description file is being written """ + self.to_screen(u'[info] Writing video description to: ' + descfn) + + def report_writesubtitles(self, srtfn): + """ Report that the subtitles file is being written """ + self.to_screen(u'[info] Writing video subtitles to: ' + srtfn) + + def report_writeinfojson(self, infofn): + """ Report that the metadata file has been written """ + self.to_screen(u'[info] Video description metadata as JSON to: ' + infofn) + + def report_destination(self, filename): + """Report destination filename.""" + self.to_screen(u'[download] Destination: ' + filename) + + def report_progress(self, percent_str, data_len_str, speed_str, eta_str): + """Report download progress.""" + if self.params.get('noprogress', False): + return + self.to_screen(u'\r[download] %s of %s at %s ETA %s' % + (percent_str, data_len_str, speed_str, eta_str), skip_eol=True) + self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' % + (percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip())) + + def report_resuming_byte(self, resume_len): + """Report attempt to resume at given byte.""" + self.to_screen(u'[download] Resuming download at byte %s' % resume_len) + + def report_retry(self, count, retries): + """Report retry in case of HTTP error 5xx""" + self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries)) + + def report_file_already_downloaded(self, file_name): + """Report file has already been fully downloaded.""" + try: + self.to_screen(u'[download] %s has already been downloaded' % file_name) + except (UnicodeEncodeError) as err: + self.to_screen(u'[download] The file has already been downloaded') + + def report_unable_to_resume(self): + """Report it was impossible to resume download.""" + self.to_screen(u'[download] Unable to resume') + + def report_finish(self): + """Report download finished.""" + if self.params.get('noprogress', False): + self.to_screen(u'[download] Download completed') + else: + self.to_screen(u'') + + def increment_downloads(self): + """Increment the ordinal that assigns a number to each file.""" + self._num_downloads += 1 + + def prepare_filename(self, info_dict): + """Generate the output filename.""" + try: + template_dict = dict(info_dict) + + template_dict['epoch'] = int(time.time()) + template_dict['autonumber'] = u'%05d' % self._num_downloads + + sanitize = lambda k,v: sanitize_filename( + u'NA' if v is None else compat_str(v), + restricted=self.params.get('restrictfilenames'), + is_id=(k==u'id')) + template_dict = dict((k, sanitize(k, v)) for k,v in template_dict.items()) + + filename = self.params['outtmpl'] % template_dict + return filename + except (ValueError, KeyError) as err: + self.trouble(u'ERROR: invalid system charset or erroneous output template') + return None + + def _match_entry(self, info_dict): + """ Returns None iff the file should be downloaded """ + + title = info_dict['title'] + matchtitle = self.params.get('matchtitle', False) + if matchtitle: + matchtitle = matchtitle.decode('utf8') + if not re.search(matchtitle, title, re.IGNORECASE): + return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"' + rejecttitle = self.params.get('rejecttitle', False) + if rejecttitle: + rejecttitle = rejecttitle.decode('utf8') + if re.search(rejecttitle, title, re.IGNORECASE): + return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"' + return None + + def process_info(self, info_dict): + """Process a single dictionary returned by an InfoExtractor.""" + + # Keep for backwards compatibility + info_dict['stitle'] = info_dict['title'] + + if not 'format' in info_dict: + info_dict['format'] = info_dict['ext'] + + reason = self._match_entry(info_dict) + if reason is not None: + self.to_screen(u'[download] ' + reason) + return + + max_downloads = self.params.get('max_downloads') + if max_downloads is not None: + if self._num_downloads > int(max_downloads): + raise MaxDownloadsReached() + + filename = self.prepare_filename(info_dict) + + # Forced printings + if self.params.get('forcetitle', False): + compat_print(info_dict['title']) + if self.params.get('forceurl', False): + compat_print(info_dict['url']) + if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict: + compat_print(info_dict['thumbnail']) + if self.params.get('forcedescription', False) and 'description' in info_dict: + compat_print(info_dict['description']) + if self.params.get('forcefilename', False) and filename is not None: + compat_print(filename) + if self.params.get('forceformat', False): + compat_print(info_dict['format']) + + # Do nothing else if in simulate mode + if self.params.get('simulate', False): + return + + if filename is None: + return + + try: + dn = os.path.dirname(encodeFilename(filename)) + if dn != '' and not os.path.exists(dn): # dn is already encoded + os.makedirs(dn) + except (OSError, IOError) as err: + self.trouble(u'ERROR: unable to create directory ' + compat_str(err)) + return + + if self.params.get('writedescription', False): + try: + descfn = filename + u'.description' + self.report_writedescription(descfn) + with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: + descfile.write(info_dict['description']) + except (OSError, IOError): + self.trouble(u'ERROR: Cannot write description file ' + descfn) + return + + if self.params.get('writesubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']: + # subtitles download errors are already managed as troubles in relevant IE + # that way it will silently go on when used with unsupporting IE + try: + srtfn = filename.rsplit('.', 1)[0] + u'.srt' + self.report_writesubtitles(srtfn) + with io.open(encodeFilename(srtfn), 'w', encoding='utf-8') as srtfile: + srtfile.write(info_dict['subtitles']) + except (OSError, IOError): + self.trouble(u'ERROR: Cannot write subtitles file ' + descfn) + return + + if self.params.get('writeinfojson', False): + infofn = filename + u'.info.json' + self.report_writeinfojson(infofn) + try: + json_info_dict = dict((k, v) for k,v in info_dict.items() if not k in ['urlhandle']) + write_json_file(json_info_dict, encodeFilename(infofn)) + except (OSError, IOError): + self.trouble(u'ERROR: Cannot write metadata to JSON file ' + infofn) + return + + if not self.params.get('skip_download', False): + if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)): + success = True + else: + try: + success = self._do_download(filename, info_dict) + except (OSError, IOError) as err: + raise UnavailableVideoError() + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self.trouble(u'ERROR: unable to download video data: %s' % str(err)) + return + except (ContentTooShortError, ) as err: + self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) + return + + if success: + try: + self.post_process(filename, info_dict) + except (PostProcessingError) as err: + self.trouble(u'ERROR: postprocessing: %s' % str(err)) + return + + def download(self, url_list): + """Download a given list of URLs.""" + if len(url_list) > 1 and self.fixed_template(): + raise SameFileError(self.params['outtmpl']) + + for url in url_list: + suitable_found = False + for ie in self._ies: + # Go to next InfoExtractor if not suitable + if not ie.suitable(url): + continue + + # Warn if the _WORKING attribute is False + if not ie.working(): + self.to_stderr(u'WARNING: the program functionality for this site has been marked as broken, ' + u'and will probably not work. If you want to go on, use the -i option.') + + # Suitable InfoExtractor found + suitable_found = True + + # Extract information from URL and process it + try: + videos = ie.extract(url) + except ExtractorError as de: # An error we somewhat expected + self.trouble(u'ERROR: ' + compat_str(de), de.format_traceback()) + break + except Exception as e: + if self.params.get('ignoreerrors', False): + self.trouble(u'ERROR: ' + compat_str(e), tb=compat_str(traceback.format_exc())) + break + else: + raise + + if len(videos or []) > 1 and self.fixed_template(): + raise SameFileError(self.params['outtmpl']) + + for video in videos or []: + video['extractor'] = ie.IE_NAME + try: + self.increment_downloads() + self.process_info(video) + except UnavailableVideoError: + self.trouble(u'\nERROR: unable to download video') + + # Suitable InfoExtractor had been found; go to next URL + break + + if not suitable_found: + self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url) + + return self._download_retcode + + def post_process(self, filename, ie_info): + """Run the postprocessing chain on the given file.""" + info = dict(ie_info) + info['filepath'] = filename + for pp in self._pps: + info = pp.run(info) + if info is None: + break + + def _download_with_rtmpdump(self, filename, url, player_url, page_url): + self.report_destination(filename) + tmpfilename = self.temp_name(filename) + + # Check for rtmpdump first + try: + subprocess.call(['rtmpdump', '-h'], stdout=(file(os.path.devnull, 'w')), stderr=subprocess.STDOUT) + except (OSError, IOError): + self.trouble(u'ERROR: RTMP download detected but "rtmpdump" could not be run') + return False + + # Download using rtmpdump. rtmpdump returns exit code 2 when + # the connection was interrumpted and resuming appears to be + # possible. This is part of rtmpdump's normal usage, AFAIK. + basic_args = ['rtmpdump', '-q', '-r', url, '-o', tmpfilename] + if player_url is not None: + basic_args += ['-W', player_url] + if page_url is not None: + basic_args += ['--pageUrl', page_url] + args = basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)] + if self.params.get('verbose', False): + try: + import pipes + shell_quote = lambda args: ' '.join(map(pipes.quote, args)) + except ImportError: + shell_quote = repr + self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(args)) + retval = subprocess.call(args) + while retval == 2 or retval == 1: + prevsize = os.path.getsize(encodeFilename(tmpfilename)) + self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True) + time.sleep(5.0) # This seems to be needed + retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1]) + cursize = os.path.getsize(encodeFilename(tmpfilename)) + if prevsize == cursize and retval == 1: + break + # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those + if prevsize == cursize and retval == 2 and cursize > 1024: + self.to_screen(u'\r[rtmpdump] Could not download the whole video. This can happen for some advertisements.') + retval = 0 + break + if retval == 0: + self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(encodeFilename(tmpfilename))) + self.try_rename(tmpfilename, filename) + return True + else: + self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval) + return False + + def _do_download(self, filename, info_dict): + url = info_dict['url'] + + # Check file already present + if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False): + self.report_file_already_downloaded(filename) + return True + + # Attempt to download using rtmpdump + if url.startswith('rtmp'): + return self._download_with_rtmpdump(filename, url, + info_dict.get('player_url', None), + info_dict.get('page_url', None)) + + tmpfilename = self.temp_name(filename) + stream = None + + # Do not include the Accept-Encoding header + headers = {'Youtubedl-no-compression': 'True'} + basic_request = compat_urllib_request.Request(url, None, headers) + request = compat_urllib_request.Request(url, None, headers) + + if self.params.get('test', False): + request.add_header('Range','bytes=0-10240') + + # Establish possible resume length + if os.path.isfile(encodeFilename(tmpfilename)): + resume_len = os.path.getsize(encodeFilename(tmpfilename)) + else: + resume_len = 0 + + open_mode = 'wb' + if resume_len != 0: + if self.params.get('continuedl', False): + self.report_resuming_byte(resume_len) + request.add_header('Range','bytes=%d-' % resume_len) + open_mode = 'ab' + else: + resume_len = 0 + + count = 0 + retries = self.params.get('retries', 0) + while count <= retries: + # Establish connection + try: + if count == 0 and 'urlhandle' in info_dict: + data = info_dict['urlhandle'] + data = compat_urllib_request.urlopen(request) + break + except (compat_urllib_error.HTTPError, ) as err: + if (err.code < 500 or err.code >= 600) and err.code != 416: + # Unexpected HTTP error + raise + elif err.code == 416: + # Unable to resume (requested range not satisfiable) + try: + # Open the connection again without the range header + data = compat_urllib_request.urlopen(basic_request) + content_length = data.info()['Content-Length'] + except (compat_urllib_error.HTTPError, ) as err: + if err.code < 500 or err.code >= 600: + raise + else: + # Examine the reported length + if (content_length is not None and + (resume_len - 100 < int(content_length) < resume_len + 100)): + # The file had already been fully downloaded. + # Explanation to the above condition: in issue #175 it was revealed that + # YouTube sometimes adds or removes a few bytes from the end of the file, + # changing the file size slightly and causing problems for some users. So + # I decided to implement a suggested change and consider the file + # completely downloaded if the file size differs less than 100 bytes from + # the one in the hard drive. + self.report_file_already_downloaded(filename) + self.try_rename(tmpfilename, filename) + return True + else: + # The length does not match, we start the download over + self.report_unable_to_resume() + open_mode = 'wb' + break + # Retry + count += 1 + if count <= retries: + self.report_retry(count, retries) + + if count > retries: + self.trouble(u'ERROR: giving up after %s retries' % retries) + return False + + data_len = data.info().get('Content-length', None) + if data_len is not None: + data_len = int(data_len) + resume_len + data_len_str = self.format_bytes(data_len) + byte_counter = 0 + resume_len + block_size = self.params.get('buffersize', 1024) + start = time.time() + while True: + # Download and write + before = time.time() + data_block = data.read(block_size) + after = time.time() + if len(data_block) == 0: + break + byte_counter += len(data_block) + + # Open file just in time + if stream is None: + try: + (stream, tmpfilename) = sanitize_open(tmpfilename, open_mode) + assert stream is not None + filename = self.undo_temp_name(tmpfilename) + self.report_destination(filename) + except (OSError, IOError) as err: + self.trouble(u'ERROR: unable to open for writing: %s' % str(err)) + return False + try: + stream.write(data_block) + except (IOError, OSError) as err: + self.trouble(u'\nERROR: unable to write data: %s' % str(err)) + return False + if not self.params.get('noresizebuffer', False): + block_size = self.best_block_size(after - before, len(data_block)) + + # Progress message + speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len) + if data_len is None: + self.report_progress('Unknown %', data_len_str, speed_str, 'Unknown ETA') + else: + percent_str = self.calc_percent(byte_counter, data_len) + eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len) + self.report_progress(percent_str, data_len_str, speed_str, eta_str) + + # Apply rate limit + self.slow_down(start, byte_counter - resume_len) + + if stream is None: + self.trouble(u'\nERROR: Did not get any data blocks') + return False + stream.close() + self.report_finish() + if data_len is not None and byte_counter != data_len: + raise ContentTooShortError(byte_counter, int(data_len)) + self.try_rename(tmpfilename, filename) + + # Update file modification time + if self.params.get('updatetime', True): + info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None)) + + return True diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py old mode 100644 new mode 100755 index d30de69435..e380f62a1f --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -1,3696 +1,3782 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +from __future__ import absolute_import + +import base64 import datetime -import HTMLParser -import httplib import netrc import os import re import socket import time -import urllib -import urllib2 import email.utils import xml.etree.ElementTree import random import math -from urlparse import parse_qs, urlparse -try: - import cStringIO as StringIO -except ImportError: - import StringIO - -from utils import * +from .utils import * class InfoExtractor(object): - """Information Extractor class. + """Information Extractor class. - Information extractors are the classes that, given a URL, extract - information from the video (or videos) the URL refers to. This - information includes the real video URL, the video title and simplified - title, author and others. The information is stored in a dictionary - which is then passed to the FileDownloader. The FileDownloader - processes this information possibly downloading the video to the file - system, among other possible outcomes. The dictionaries must include - the following fields: + Information extractors are the classes that, given a URL, extract + information about the video (or videos) the URL refers to. This + information includes the real video URL, the video title, author and + others. The information is stored in a dictionary which is then + passed to the FileDownloader. The FileDownloader processes this + information possibly downloading the video to the file system, among + other possible outcomes. - id: Video identifier. - url: Final video URL. - uploader: Nickname of the video uploader. - title: Literal title. - ext: Video filename extension. - format: Video format. - player_url: SWF Player URL (may be None). + The dictionaries must include the following fields: - The following fields are optional. Their primary purpose is to allow - youtube-dl to serve as the backend for a video search function, such - as the one in youtube2mp3. They are only used when their respective - forced printing functions are called: + id: Video identifier. + url: Final video URL. + title: Video title, unescaped. + ext: Video filename extension. + uploader: Full name of the video uploader. + upload_date: Video upload date (YYYYMMDD). - thumbnail: Full URL to a video thumbnail image. - description: One-line video description. + The following fields are optional: - Subclasses of this one should re-define the _real_initialize() and - _real_extract() methods and define a _VALID_URL regexp. - Probably, they should also be added to the list of extractors. - """ + format: The video format, defaults to ext (used for --get-format) + thumbnail: Full URL to a video thumbnail image. + description: One-line video description. + uploader_id: Nickname or id of the video uploader. + player_url: SWF Player URL (used for rtmpdump). + subtitles: The .srt file contents. + urlhandle: [internal] The urlHandle to be used to download the file, + like returned by urllib.request.urlopen - _ready = False - _downloader = None + The fields should all be Unicode strings. - def __init__(self, downloader=None): - """Constructor. Receives an optional downloader.""" - self._ready = False - self.set_downloader(downloader) + Subclasses of this one should re-define the _real_initialize() and + _real_extract() methods and define a _VALID_URL regexp. + Probably, they should also be added to the list of extractors. - def suitable(self, url): - """Receives a URL and returns True if suitable for this IE.""" - return re.match(self._VALID_URL, url) is not None + _real_extract() must return a *list* of information dictionaries as + described above. - def initialize(self): - """Initializes an instance (authentication, etc).""" - if not self._ready: - self._real_initialize() - self._ready = True + Finally, the _WORKING attribute should be set to False for broken IEs + in order to warn the users and skip the tests. + """ - def extract(self, url): - """Extracts URL information and returns it in list of dicts.""" - self.initialize() - return self._real_extract(url) + _ready = False + _downloader = None + _WORKING = True - def set_downloader(self, downloader): - """Sets the downloader for this IE.""" - self._downloader = downloader + def __init__(self, downloader=None): + """Constructor. Receives an optional downloader.""" + self._ready = False + self.set_downloader(downloader) - def _real_initialize(self): - """Real initialization process. Redefine in subclasses.""" - pass + def suitable(self, url): + """Receives a URL and returns True if suitable for this IE.""" + return re.match(self._VALID_URL, url) is not None - def _real_extract(self, url): - """Real extraction process. Redefine in subclasses.""" - pass + def working(self): + """Getter method for _WORKING.""" + return self._WORKING + def initialize(self): + """Initializes an instance (authentication, etc).""" + if not self._ready: + self._real_initialize() + self._ready = True + def extract(self, url): + """Extracts URL information and returns it in list of dicts.""" + self.initialize() + return self._real_extract(url) + + def set_downloader(self, downloader): + """Sets the downloader for this IE.""" + self._downloader = downloader + + def _real_initialize(self): + """Real initialization process. Redefine in subclasses.""" + pass + + def _real_extract(self, url): + """Real extraction process. Redefine in subclasses.""" + pass + + @property + def IE_NAME(self): + return type(self).__name__[:-2] + + def _download_webpage(self, url_or_request, video_id, note=None, errnote=None): + if note is None: + note = u'Downloading video webpage' + self._downloader.to_screen(u'[%s] %s: %s' % (self.IE_NAME, video_id, note)) + try: + urlh = compat_urllib_request.urlopen(url_or_request) + webpage_bytes = urlh.read() + return webpage_bytes.decode('utf-8', 'replace') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + if errnote is None: + errnote = u'Unable to download webpage' + raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2]) class YoutubeIE(InfoExtractor): - """Information extractor for youtube.com.""" + """Information extractor for youtube.com.""" - _VALID_URL = r"""^ - ( - (?:https?://)? # http(s):// (optional) - (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/| - tube\.majestyc\.net/) # the various hostnames, with wildcard subdomains - (?:.*?\#/)? # handle anchor (#/) redirect urls - (?!view_play_list|my_playlists|artist|playlist) # ignore playlist URLs - (?: # the various things that can precede the ID: - (?:(?:v|embed|e)/) # v/ or embed/ or e/ - |(?: # or the v= param in all its forms - (?:watch(?:_popup)?(?:\.php)?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) - (?:\?|\#!?) # the params delimiter ? or # or #! - (?:.+&)? # any other preceding param (like /?s=tuff&v=xxxx) - v= - ) - )? # optional -> youtube.com/xxxx is OK - )? # all until now is optional -> you can pass the naked ID - ([0-9A-Za-z_-]+) # here is it! the YouTube video ID - (?(1).+)? # if we found the ID, everything can follow - $""" - _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1' - _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en' - _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en' - _NEXT_URL_RE = r'[\?&]next_url=([^&]+)' - _NETRC_MACHINE = 'youtube' - # Listed in order of quality - _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13'] - _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13'] - _video_extensions = { - '13': '3gp', - '17': 'mp4', - '18': 'mp4', - '22': 'mp4', - '37': 'mp4', - '38': 'video', # You actually don't know if this will be MOV, AVI or whatever - '43': 'webm', - '44': 'webm', - '45': 'webm', - '46': 'webm', - } - _video_dimensions = { - '5': '240x400', - '6': '???', - '13': '???', - '17': '144x176', - '18': '360x640', - '22': '720x1280', - '34': '360x640', - '35': '480x854', - '37': '1080x1920', - '38': '3072x4096', - '43': '360x640', - '44': '480x854', - '45': '720x1280', - '46': '1080x1920', - } - IE_NAME = u'youtube' + _VALID_URL = r"""^ + ( + (?:https?://)? # http(s):// (optional) + (?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/| + tube\.majestyc\.net/) # the various hostnames, with wildcard subdomains + (?:.*?\#/)? # handle anchor (#/) redirect urls + (?!view_play_list|my_playlists|artist|playlist) # ignore playlist URLs + (?: # the various things that can precede the ID: + (?:(?:v|embed|e)/) # v/ or embed/ or e/ + |(?: # or the v= param in all its forms + (?:watch(?:_popup)?(?:\.php)?)? # preceding watch(_popup|.php) or nothing (like /?v=xxxx) + (?:\?|\#!?) # the params delimiter ? or # or #! + (?:.*?&)? # any other preceding param (like /?s=tuff&v=xxxx) + v= + ) + )? # optional -> youtube.com/xxxx is OK + )? # all until now is optional -> you can pass the naked ID + ([0-9A-Za-z_-]+) # here is it! the YouTube video ID + (?(1).+)? # if we found the ID, everything can follow + $""" + _LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1' + _LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en' + _AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en' + _NEXT_URL_RE = r'[\?&]next_url=([^&]+)' + _NETRC_MACHINE = 'youtube' + # Listed in order of quality + _available_formats = ['38', '37', '46', '22', '45', '35', '44', '34', '18', '43', '6', '5', '17', '13'] + _available_formats_prefer_free = ['38', '46', '37', '45', '22', '44', '35', '43', '34', '18', '6', '5', '17', '13'] + _video_extensions = { + '13': '3gp', + '17': 'mp4', + '18': 'mp4', + '22': 'mp4', + '37': 'mp4', + '38': 'video', # You actually don't know if this will be MOV, AVI or whatever + '43': 'webm', + '44': 'webm', + '45': 'webm', + '46': 'webm', + } + _video_dimensions = { + '5': '240x400', + '6': '???', + '13': '???', + '17': '144x176', + '18': '360x640', + '22': '720x1280', + '34': '360x640', + '35': '480x854', + '37': '1080x1920', + '38': '3072x4096', + '43': '360x640', + '44': '480x854', + '45': '720x1280', + '46': '1080x1920', + } + IE_NAME = u'youtube' - def suitable(self, url): - """Receives a URL and returns True if suitable for this IE.""" - return re.match(self._VALID_URL, url, re.VERBOSE) is not None + def suitable(self, url): + """Receives a URL and returns True if suitable for this IE.""" + return re.match(self._VALID_URL, url, re.VERBOSE) is not None - def report_lang(self): - """Report attempt to set language.""" - self._downloader.to_screen(u'[youtube] Setting language') + def report_lang(self): + """Report attempt to set language.""" + self._downloader.to_screen(u'[youtube] Setting language') - def report_login(self): - """Report attempt to log in.""" - self._downloader.to_screen(u'[youtube] Logging in') + def report_login(self): + """Report attempt to log in.""" + self._downloader.to_screen(u'[youtube] Logging in') - def report_age_confirmation(self): - """Report attempt to confirm age.""" - self._downloader.to_screen(u'[youtube] Confirming age') + def report_age_confirmation(self): + """Report attempt to confirm age.""" + self._downloader.to_screen(u'[youtube] Confirming age') - def report_video_webpage_download(self, video_id): - """Report attempt to download video webpage.""" - self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id) + def report_video_webpage_download(self, video_id): + """Report attempt to download video webpage.""" + self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id) - def report_video_info_webpage_download(self, video_id): - """Report attempt to download video info webpage.""" - self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id) + def report_video_info_webpage_download(self, video_id): + """Report attempt to download video info webpage.""" + self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id) - def report_video_subtitles_download(self, video_id): - """Report attempt to download video info webpage.""" - self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles' % video_id) + def report_video_subtitles_download(self, video_id): + """Report attempt to download video info webpage.""" + self._downloader.to_screen(u'[youtube] %s: Downloading video subtitles' % video_id) - def report_information_extraction(self, video_id): - """Report attempt to extract video information.""" - self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id) + def report_information_extraction(self, video_id): + """Report attempt to extract video information.""" + self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id) - def report_unavailable_format(self, video_id, format): - """Report extracted video URL.""" - self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format)) + def report_unavailable_format(self, video_id, format): + """Report extracted video URL.""" + self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format)) - def report_rtmp_download(self): - """Indicate the download will use the RTMP protocol.""" - self._downloader.to_screen(u'[youtube] RTMP download detected') + def report_rtmp_download(self): + """Indicate the download will use the RTMP protocol.""" + self._downloader.to_screen(u'[youtube] RTMP download detected') - def _closed_captions_xml_to_srt(self, xml_string): - srt = '' - texts = re.findall(r'([^<]+)', xml_string, re.MULTILINE) - # TODO parse xml instead of regex - for n, (start, dur_tag, dur, caption) in enumerate(texts): - if not dur: dur = '4' - start = float(start) - end = start + float(dur) - start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000) - end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000) - caption = unescapeHTML(caption) - caption = unescapeHTML(caption) # double cycle, intentional - srt += str(n+1) + '\n' - srt += start + ' --> ' + end + '\n' - srt += caption + '\n\n' - return srt + def _closed_captions_xml_to_srt(self, xml_string): + srt = '' + texts = re.findall(r'([^<]+)', xml_string, re.MULTILINE) + # TODO parse xml instead of regex + for n, (start, dur_tag, dur, caption) in enumerate(texts): + if not dur: dur = '4' + start = float(start) + end = start + float(dur) + start = "%02i:%02i:%02i,%03i" %(start/(60*60), start/60%60, start%60, start%1*1000) + end = "%02i:%02i:%02i,%03i" %(end/(60*60), end/60%60, end%60, end%1*1000) + caption = unescapeHTML(caption) + caption = unescapeHTML(caption) # double cycle, intentional + srt += str(n+1) + '\n' + srt += start + ' --> ' + end + '\n' + srt += caption + '\n\n' + return srt - def _print_formats(self, formats): - print 'Available formats:' - for x in formats: - print '%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???')) + def _extract_subtitles(self, video_id): + self.report_video_subtitles_download(video_id) + request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) + try: + srt_list = compat_urllib_request.urlopen(request).read().decode('utf-8') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None) + srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list) + srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list) + if not srt_lang_list: + return (u'WARNING: video has no closed captions', None) + if self._downloader.params.get('subtitleslang', False): + srt_lang = self._downloader.params.get('subtitleslang') + elif 'en' in srt_lang_list: + srt_lang = 'en' + else: + srt_lang = list(srt_lang_list.keys())[0] + if not srt_lang in srt_lang_list: + return (u'WARNING: no closed captions found in the specified language', None) + request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id)) + try: + srt_xml = compat_urllib_request.urlopen(request).read().decode('utf-8') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None) + if not srt_xml: + return (u'WARNING: unable to download video subtitles', None) + return (None, self._closed_captions_xml_to_srt(srt_xml)) - def _real_initialize(self): - if self._downloader is None: - return + def _print_formats(self, formats): + print('Available formats:') + for x in formats: + print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'flv'), self._video_dimensions.get(x, '???'))) - username = None - password = None - downloader_params = self._downloader.params + def _real_initialize(self): + if self._downloader is None: + return - # Attempt to use provided username and password or .netrc data - if downloader_params.get('username', None) is not None: - username = downloader_params['username'] - password = downloader_params['password'] - elif downloader_params.get('usenetrc', False): - try: - info = netrc.netrc().authenticators(self._NETRC_MACHINE) - if info is not None: - username = info[0] - password = info[2] - else: - raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) - except (IOError, netrc.NetrcParseError), err: - self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % str(err)) - return + username = None + password = None + downloader_params = self._downloader.params - # Set language - request = urllib2.Request(self._LANG_URL) - try: - self.report_lang() - urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.to_stderr(u'WARNING: unable to set language: %s' % str(err)) - return + # Attempt to use provided username and password or .netrc data + if downloader_params.get('username', None) is not None: + username = downloader_params['username'] + password = downloader_params['password'] + elif downloader_params.get('usenetrc', False): + try: + info = netrc.netrc().authenticators(self._NETRC_MACHINE) + if info is not None: + username = info[0] + password = info[2] + else: + raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE) + except (IOError, netrc.NetrcParseError) as err: + self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err)) + return - # No authentication to be performed - if username is None: - return + # Set language + request = compat_urllib_request.Request(self._LANG_URL) + try: + self.report_lang() + compat_urllib_request.urlopen(request).read() + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.to_stderr(u'WARNING: unable to set language: %s' % compat_str(err)) + return - # Log in - login_form = { - 'current_form': 'loginForm', - 'next': '/', - 'action_login': 'Log In', - 'username': username, - 'password': password, - } - request = urllib2.Request(self._LOGIN_URL, urllib.urlencode(login_form)) - try: - self.report_login() - login_results = urllib2.urlopen(request).read() - if re.search(r'(?i)]* name="loginForm"', login_results) is not None: - self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password') - return - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.to_stderr(u'WARNING: unable to log in: %s' % str(err)) - return + # No authentication to be performed + if username is None: + return - # Confirm age - age_form = { - 'next_url': '/', - 'action_confirm': 'Confirm', - } - request = urllib2.Request(self._AGE_URL, urllib.urlencode(age_form)) - try: - self.report_age_confirmation() - age_results = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err)) - return + # Log in + login_form = { + 'current_form': 'loginForm', + 'next': '/', + 'action_login': 'Log In', + 'username': username, + 'password': password, + } + request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form)) + try: + self.report_login() + login_results = compat_urllib_request.urlopen(request).read().decode('utf-8') + if re.search(r'(?i)]* name="loginForm"', login_results) is not None: + self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password') + return + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err)) + return - def _real_extract(self, url): - # Extract original video URL from URL with redirection, like age verification, using next_url parameter - mobj = re.search(self._NEXT_URL_RE, url) - if mobj: - url = 'http://www.youtube.com/' + urllib.unquote(mobj.group(1)).lstrip('/') + # Confirm age + age_form = { + 'next_url': '/', + 'action_confirm': 'Confirm', + } + request = compat_urllib_request.Request(self._AGE_URL, compat_urllib_parse.urlencode(age_form)) + try: + self.report_age_confirmation() + age_results = compat_urllib_request.urlopen(request).read().decode('utf-8') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) + return - # Extract video id from URL - mobj = re.match(self._VALID_URL, url, re.VERBOSE) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return - video_id = mobj.group(2) + def _extract_id(self, url): + mobj = re.match(self._VALID_URL, url, re.VERBOSE) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return + video_id = mobj.group(2) + return video_id - # Get video webpage - self.report_video_webpage_download(video_id) - request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id) - try: - video_webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % str(err)) - return + def _real_extract(self, url): + # Extract original video URL from URL with redirection, like age verification, using next_url parameter + mobj = re.search(self._NEXT_URL_RE, url) + if mobj: + url = 'http://www.youtube.com/' + compat_urllib_parse.unquote(mobj.group(1)).lstrip('/') + video_id = self._extract_id(url) - # Attempt to extract SWF player URL - mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage) - if mobj is not None: - player_url = re.sub(r'\\(.)', r'\1', mobj.group(1)) - else: - player_url = None + # Get video webpage + self.report_video_webpage_download(video_id) + url = 'http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id + request = compat_urllib_request.Request(url) + try: + video_webpage_bytes = compat_urllib_request.urlopen(request).read() + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err)) + return - # Get video info - self.report_video_info_webpage_download(video_id) - for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']: - video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en' - % (video_id, el_type)) - request = urllib2.Request(video_info_url) - try: - video_info_webpage = urllib2.urlopen(request).read() - video_info = parse_qs(video_info_webpage) - if 'token' in video_info: - break - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err)) - return - if 'token' not in video_info: - if 'reason' in video_info: - self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0].decode('utf-8')) - else: - self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason') - return + video_webpage = video_webpage_bytes.decode('utf-8', 'ignore') - # Check for "rental" videos - if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info: - self._downloader.trouble(u'ERROR: "rental" videos not supported') - return + # Attempt to extract SWF player URL + mobj = re.search(r'swfConfig.*?"(http:\\/\\/.*?watch.*?-.*?\.swf)"', video_webpage) + if mobj is not None: + player_url = re.sub(r'\\(.)', r'\1', mobj.group(1)) + else: + player_url = None - # Start extracting information - self.report_information_extraction(video_id) + # Get video info + self.report_video_info_webpage_download(video_id) + for el_type in ['&el=embedded', '&el=detailpage', '&el=vevo', '']: + video_info_url = ('http://www.youtube.com/get_video_info?&video_id=%s%s&ps=default&eurl=&gl=US&hl=en' + % (video_id, el_type)) + request = compat_urllib_request.Request(video_info_url) + try: + video_info_webpage_bytes = compat_urllib_request.urlopen(request).read() + video_info_webpage = video_info_webpage_bytes.decode('utf-8', 'ignore') + video_info = compat_parse_qs(video_info_webpage) + if 'token' in video_info: + break + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err)) + return + if 'token' not in video_info: + if 'reason' in video_info: + self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0]) + else: + self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason') + return - # uploader - if 'author' not in video_info: - self._downloader.trouble(u'ERROR: unable to extract uploader nickname') - return - video_uploader = urllib.unquote_plus(video_info['author'][0]) + # Check for "rental" videos + if 'ypc_video_rental_bar_text' in video_info and 'author' not in video_info: + self._downloader.trouble(u'ERROR: "rental" videos not supported') + return - # title - if 'title' not in video_info: - self._downloader.trouble(u'ERROR: unable to extract video title') - return - video_title = urllib.unquote_plus(video_info['title'][0]) - video_title = video_title.decode('utf-8') + # Start extracting information + self.report_information_extraction(video_id) - # thumbnail image - if 'thumbnail_url' not in video_info: - self._downloader.trouble(u'WARNING: unable to extract video thumbnail') - video_thumbnail = '' - else: # don't panic if we can't find it - video_thumbnail = urllib.unquote_plus(video_info['thumbnail_url'][0]) + # uploader + if 'author' not in video_info: + self._downloader.trouble(u'ERROR: unable to extract uploader name') + return + video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0]) - # upload date - upload_date = u'NA' - mobj = re.search(r'id="eow-date.*?>(.*?)', video_webpage, re.DOTALL) - if mobj is not None: - upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) - format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y'] - for expression in format_expressions: - try: - upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d') - except: - pass + # uploader_id + video_uploader_id = None + mobj = re.search(r'', video_webpage) + if mobj is not None: + video_uploader_id = mobj.group(1) + else: + self._downloader.trouble(u'WARNING: unable to extract uploader nickname') - # description - video_description = get_element_by_id("eow-description", video_webpage.decode('utf8')) - if video_description: video_description = clean_html(video_description) - else: video_description = '' - - # closed captions - video_subtitles = None - if self._downloader.params.get('writesubtitles', False): - try: - self.report_video_subtitles_download(video_id) - request = urllib2.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) - try: - srt_list = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - raise Trouble(u'WARNING: unable to download video subtitles: %s' % str(err)) - srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list) - srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list) - if not srt_lang_list: - raise Trouble(u'WARNING: video has no closed captions') - if self._downloader.params.get('subtitleslang', False): - srt_lang = self._downloader.params.get('subtitleslang') - elif 'en' in srt_lang_list: - srt_lang = 'en' - else: - srt_lang = srt_lang_list.keys()[0] - if not srt_lang in srt_lang_list: - raise Trouble(u'WARNING: no closed captions found in the specified language') - request = urllib2.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id)) - try: - srt_xml = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - raise Trouble(u'WARNING: unable to download video subtitles: %s' % str(err)) - if not srt_xml: - raise Trouble(u'WARNING: unable to download video subtitles') - video_subtitles = self._closed_captions_xml_to_srt(srt_xml.decode('utf-8')) - except Trouble as trouble: - self._downloader.trouble(trouble[0]) + # title + if 'title' not in video_info: + self._downloader.trouble(u'ERROR: unable to extract video title') + return + video_title = compat_urllib_parse.unquote_plus(video_info['title'][0]) - if 'length_seconds' not in video_info: - self._downloader.trouble(u'WARNING: unable to extract video duration') - video_duration = '' - else: - video_duration = urllib.unquote_plus(video_info['length_seconds'][0]) + # thumbnail image + if 'thumbnail_url' not in video_info: + self._downloader.trouble(u'WARNING: unable to extract video thumbnail') + video_thumbnail = '' + else: # don't panic if we can't find it + video_thumbnail = compat_urllib_parse.unquote_plus(video_info['thumbnail_url'][0]) - # token - video_token = urllib.unquote_plus(video_info['token'][0]) + # upload date + upload_date = None + mobj = re.search(r'id="eow-date.*?>(.*?)', video_webpage, re.DOTALL) + if mobj is not None: + upload_date = ' '.join(re.sub(r'[/,-]', r' ', mobj.group(1)).split()) + format_expressions = ['%d %B %Y', '%B %d %Y', '%b %d %Y'] + for expression in format_expressions: + try: + upload_date = datetime.datetime.strptime(upload_date, expression).strftime('%Y%m%d') + except: + pass - # Decide which formats to download - req_format = self._downloader.params.get('format', None) + # description + video_description = get_element_by_id("eow-description", video_webpage) + if video_description: + video_description = clean_html(video_description) + else: + video_description = '' - if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'): - self.report_rtmp_download() - video_url_list = [(None, video_info['conn'][0])] - elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1: - url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',') - url_data = [parse_qs(uds) for uds in url_data_strs] - url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data) - url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data) + # closed captions + video_subtitles = None + if self._downloader.params.get('writesubtitles', False): + (srt_error, video_subtitles) = self._extract_subtitles(video_id) + if srt_error: + self._downloader.trouble(srt_error) - format_limit = self._downloader.params.get('format_limit', None) - available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats - if format_limit is not None and format_limit in available_formats: - format_list = available_formats[available_formats.index(format_limit):] - else: - format_list = available_formats - existing_formats = [x for x in format_list if x in url_map] - if len(existing_formats) == 0: - self._downloader.trouble(u'ERROR: no known formats available for video') - return - if self._downloader.params.get('listformats', None): - self._print_formats(existing_formats) - return - if req_format is None or req_format == 'best': - video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality - elif req_format == 'worst': - video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality - elif req_format in ('-1', 'all'): - video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats - else: - # Specific formats. We pick the first in a slash-delimeted sequence. - # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'. - req_formats = req_format.split('/') - video_url_list = None - for rf in req_formats: - if rf in url_map: - video_url_list = [(rf, url_map[rf])] - break - if video_url_list is None: - self._downloader.trouble(u'ERROR: requested format not available') - return - else: - self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info') - return + if 'length_seconds' not in video_info: + self._downloader.trouble(u'WARNING: unable to extract video duration') + video_duration = '' + else: + video_duration = compat_urllib_parse.unquote_plus(video_info['length_seconds'][0]) - results = [] - for format_param, video_real_url in video_url_list: - # Extension - video_extension = self._video_extensions.get(format_param, 'flv') + # token + video_token = compat_urllib_parse.unquote_plus(video_info['token'][0]) - results.append({ - 'id': video_id.decode('utf-8'), - 'url': video_real_url.decode('utf-8'), - 'uploader': video_uploader.decode('utf-8'), - 'upload_date': upload_date, - 'title': video_title, - 'ext': video_extension.decode('utf-8'), - 'format': (format_param is None and u'NA' or format_param.decode('utf-8')), - 'thumbnail': video_thumbnail.decode('utf-8'), - 'description': video_description, - 'player_url': player_url, - 'subtitles': video_subtitles, - 'duration': video_duration - }) - return results + # Decide which formats to download + req_format = self._downloader.params.get('format', None) + + if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'): + self.report_rtmp_download() + video_url_list = [(None, video_info['conn'][0])] + elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1: + url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',') + url_data = [compat_parse_qs(uds) for uds in url_data_strs] + url_data = [ud for ud in url_data if 'itag' in ud and 'url' in ud] + url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data) + + format_limit = self._downloader.params.get('format_limit', None) + available_formats = self._available_formats_prefer_free if self._downloader.params.get('prefer_free_formats', False) else self._available_formats + if format_limit is not None and format_limit in available_formats: + format_list = available_formats[available_formats.index(format_limit):] + else: + format_list = available_formats + existing_formats = [x for x in format_list if x in url_map] + if len(existing_formats) == 0: + self._downloader.trouble(u'ERROR: no known formats available for video') + return + if self._downloader.params.get('listformats', None): + self._print_formats(existing_formats) + return + if req_format is None or req_format == 'best': + video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality + elif req_format == 'worst': + video_url_list = [(existing_formats[len(existing_formats)-1], url_map[existing_formats[len(existing_formats)-1]])] # worst quality + elif req_format in ('-1', 'all'): + video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats + else: + # Specific formats. We pick the first in a slash-delimeted sequence. + # For example, if '1/2/3/4' is requested and '2' and '4' are available, we pick '2'. + req_formats = req_format.split('/') + video_url_list = None + for rf in req_formats: + if rf in url_map: + video_url_list = [(rf, url_map[rf])] + break + if video_url_list is None: + self._downloader.trouble(u'ERROR: requested format not available') + return + else: + self._downloader.trouble(u'ERROR: no conn or url_encoded_fmt_stream_map information found in video info') + return + + results = [] + for format_param, video_real_url in video_url_list: + # Extension + video_extension = self._video_extensions.get(format_param, 'flv') + + video_format = '{0} - {1}'.format(format_param if format_param else video_extension, + self._video_dimensions.get(format_param, '???')) + + results.append({ + 'id': video_id, + 'url': video_real_url, + 'uploader': video_uploader, + 'uploader_id': video_uploader_id, + 'upload_date': upload_date, + 'title': video_title, + 'ext': video_extension, + 'format': video_format, + 'thumbnail': video_thumbnail, + 'description': video_description, + 'player_url': player_url, + 'subtitles': video_subtitles, + 'duration': video_duration + }) + return results class MetacafeIE(InfoExtractor): - """Information Extractor for metacafe.com.""" + """Information Extractor for metacafe.com.""" - _VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*' - _DISCLAIMER = 'http://www.metacafe.com/family_filter/' - _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user' - IE_NAME = u'metacafe' + _VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*' + _DISCLAIMER = 'http://www.metacafe.com/family_filter/' + _FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user' + IE_NAME = u'metacafe' - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) - def report_disclaimer(self): - """Report disclaimer retrieval.""" - self._downloader.to_screen(u'[metacafe] Retrieving disclaimer') + def report_disclaimer(self): + """Report disclaimer retrieval.""" + self._downloader.to_screen(u'[metacafe] Retrieving disclaimer') - def report_age_confirmation(self): - """Report attempt to confirm age.""" - self._downloader.to_screen(u'[metacafe] Confirming age') + def report_age_confirmation(self): + """Report attempt to confirm age.""" + self._downloader.to_screen(u'[metacafe] Confirming age') - def report_download_webpage(self, video_id): - """Report webpage download.""" - self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id) + def report_download_webpage(self, video_id): + """Report webpage download.""" + self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id) - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id) + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id) - def _real_initialize(self): - # Retrieve disclaimer - request = urllib2.Request(self._DISCLAIMER) - try: - self.report_disclaimer() - disclaimer = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % str(err)) - return + def _real_initialize(self): + # Retrieve disclaimer + request = compat_urllib_request.Request(self._DISCLAIMER) + try: + self.report_disclaimer() + disclaimer = compat_urllib_request.urlopen(request).read() + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err)) + return - # Confirm age - disclaimer_form = { - 'filters': '0', - 'submit': "Continue - I'm over 18", - } - request = urllib2.Request(self._FILTER_POST, urllib.urlencode(disclaimer_form)) - try: - self.report_age_confirmation() - disclaimer = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable to confirm age: %s' % str(err)) - return + # Confirm age + disclaimer_form = { + 'filters': '0', + 'submit': "Continue - I'm over 18", + } + request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form)) + try: + self.report_age_confirmation() + disclaimer = compat_urllib_request.urlopen(request).read() + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err)) + return - def _real_extract(self, url): - # Extract id and simplified title from URL - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return + def _real_extract(self, url): + # Extract id and simplified title from URL + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return - video_id = mobj.group(1) + video_id = mobj.group(1) - # Check if video comes from YouTube - mobj2 = re.match(r'^yt-(.*)$', video_id) - if mobj2 is not None: - self._downloader.download(['http://www.youtube.com/watch?v=%s' % mobj2.group(1)]) - return + # Check if video comes from YouTube + mobj2 = re.match(r'^yt-(.*)$', video_id) + if mobj2 is not None: + self._downloader.download(['http://www.youtube.com/watch?v=%s' % mobj2.group(1)]) + return - # Retrieve video webpage to extract further information - request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id) - try: - self.report_download_webpage(video_id) - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err)) - return + # Retrieve video webpage to extract further information + request = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id) + try: + self.report_download_webpage(video_id) + webpage = compat_urllib_request.urlopen(request).read() + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err)) + return - # Extract URL, uploader and title from webpage - self.report_extraction(video_id) - mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage) - if mobj is not None: - mediaURL = urllib.unquote(mobj.group(1)) - video_extension = mediaURL[-3:] + # Extract URL, uploader and title from webpage + self.report_extraction(video_id) + mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage) + if mobj is not None: + mediaURL = compat_urllib_parse.unquote(mobj.group(1)) + video_extension = mediaURL[-3:] - # Extract gdaKey if available - mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage) - if mobj is None: - video_url = mediaURL - else: - gdaKey = mobj.group(1) - video_url = '%s?__gda__=%s' % (mediaURL, gdaKey) - else: - mobj = re.search(r' name="flashvars" value="(.*?)"', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract media URL') - return - vardict = parse_qs(mobj.group(1)) - if 'mediaData' not in vardict: - self._downloader.trouble(u'ERROR: unable to extract media URL') - return - mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0]) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract media URL') - return - mediaURL = mobj.group(1).replace('\\/', '/') - video_extension = mediaURL[-3:] - video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2)) + # Extract gdaKey if available + mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage) + if mobj is None: + video_url = mediaURL + else: + gdaKey = mobj.group(1) + video_url = '%s?__gda__=%s' % (mediaURL, gdaKey) + else: + mobj = re.search(r' name="flashvars" value="(.*?)"', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract media URL') + return + vardict = compat_parse_qs(mobj.group(1)) + if 'mediaData' not in vardict: + self._downloader.trouble(u'ERROR: unable to extract media URL') + return + mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0]) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract media URL') + return + mediaURL = mobj.group(1).replace('\\/', '/') + video_extension = mediaURL[-3:] + video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2)) - mobj = re.search(r'(?im)(.*) - Video', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') - return - video_title = mobj.group(1).decode('utf-8') + mobj = re.search(r'(?im)(.*) - Video', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract title') + return + video_title = mobj.group(1).decode('utf-8') - mobj = re.search(r'submitter=(.*?);', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract uploader nickname') - return - video_uploader = mobj.group(1) + mobj = re.search(r'submitter=(.*?);', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract uploader nickname') + return + video_uploader = mobj.group(1) - return [{ - 'id': video_id.decode('utf-8'), - 'url': video_url.decode('utf-8'), - 'uploader': video_uploader.decode('utf-8'), - 'upload_date': u'NA', - 'title': video_title, - 'ext': video_extension.decode('utf-8'), - 'format': u'NA', - 'player_url': None, - }] + return [{ + 'id': video_id.decode('utf-8'), + 'url': video_url.decode('utf-8'), + 'uploader': video_uploader.decode('utf-8'), + 'upload_date': None, + 'title': video_title, + 'ext': video_extension.decode('utf-8'), + }] class DailymotionIE(InfoExtractor): - """Information Extractor for Dailymotion""" + """Information Extractor for Dailymotion""" - _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)' - IE_NAME = u'dailymotion' + _VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)' + IE_NAME = u'dailymotion' - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) - def report_download_webpage(self, video_id): - """Report webpage download.""" - self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id) + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id) - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id) + def _real_extract(self, url): + # Extract id and simplified title from URL + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: invalid URL: %s' % url) + return - def _real_extract(self, url): - # Extract id and simplified title from URL - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: invalid URL: %s' % url) - return + video_id = mobj.group(1).split('_')[0].split('?')[0] - video_id = mobj.group(1).split('_')[0].split('?')[0] + video_extension = 'mp4' - video_extension = 'mp4' + # Retrieve video webpage to extract further information + request = compat_urllib_request.Request(url) + request.add_header('Cookie', 'family_filter=off') + webpage = self._download_webpage(request, video_id) - # Retrieve video webpage to extract further information - request = urllib2.Request(url) - request.add_header('Cookie', 'family_filter=off') - try: - self.report_download_webpage(video_id) - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % str(err)) - return + # Extract URL, uploader and title from webpage + self.report_extraction(video_id) + mobj = re.search(r'\s*var flashvars = (.*)', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract media URL') + return + flashvars = compat_urllib_parse.unquote(mobj.group(1)) - # Extract URL, uploader and title from webpage - self.report_extraction(video_id) - mobj = re.search(r'\s*var flashvars = (.*)', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract media URL') - return - flashvars = urllib.unquote(mobj.group(1)) + for key in ['hd1080URL', 'hd720URL', 'hqURL', 'sdURL', 'ldURL', 'video_url']: + if key in flashvars: + max_quality = key + self._downloader.to_screen(u'[dailymotion] Using %s' % key) + break + else: + self._downloader.trouble(u'ERROR: unable to extract video URL') + return - for key in ['hd1080URL', 'hd720URL', 'hqURL', 'sdURL', 'ldURL', 'video_url']: - if key in flashvars: - max_quality = key - self._downloader.to_screen(u'[dailymotion] Using %s' % key) - break - else: - self._downloader.trouble(u'ERROR: unable to extract video URL') - return + mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video URL') + return - mobj = re.search(r'"' + max_quality + r'":"(.+?)"', flashvars) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video URL') - return + video_url = compat_urllib_parse.unquote(mobj.group(1)).replace('\\/', '/') - video_url = urllib.unquote(mobj.group(1)).replace('\\/', '/') + # TODO: support choosing qualities - # TODO: support choosing qualities + mobj = re.search(r'', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract title') + return + video_title = unescapeHTML(mobj.group('title')) - mobj = re.search(r'', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') - return - video_title = unescapeHTML(mobj.group('title').decode('utf-8')) + video_uploader = None + mobj = re.search(r'(?im)[^<]+?]+?>([^<]+?)', webpage) + if mobj is None: + # lookin for official user + mobj_official = re.search(r'', webpage) + if mobj_official is None: + self._downloader.trouble(u'WARNING: unable to extract uploader nickname') + else: + video_uploader = mobj_official.group(1) + else: + video_uploader = mobj.group(1) - video_uploader = u'NA' - mobj = re.search(r'(?im)[^<]+?]+?>([^<]+?)', webpage) - if mobj is None: - # lookin for official user - mobj_official = re.search(r'', webpage) - if mobj_official is None: - self._downloader.trouble(u'WARNING: unable to extract uploader nickname') - else: - video_uploader = mobj_official.group(1) - else: - video_uploader = mobj.group(1) + video_upload_date = None + mobj = re.search(r'
([0-9]{2})-([0-9]{2})-([0-9]{4})
', webpage) + if mobj is not None: + video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1) - video_upload_date = u'NA' - mobj = re.search(r'
([0-9]{2})-([0-9]{2})-([0-9]{4})
', webpage) - if mobj is not None: - video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1) - - return [{ - 'id': video_id.decode('utf-8'), - 'url': video_url.decode('utf-8'), - 'uploader': video_uploader.decode('utf-8'), - 'upload_date': video_upload_date, - 'title': video_title, - 'ext': video_extension.decode('utf-8'), - 'format': u'NA', - 'player_url': None, - }] - - -class GoogleIE(InfoExtractor): - """Information extractor for video.google.com.""" - - _VALID_URL = r'(?:http://)?video\.google\.(?:com(?:\.au)?|co\.(?:uk|jp|kr|cr)|ca|de|es|fr|it|nl|pl)/videoplay\?docid=([^\&]+).*' - IE_NAME = u'video.google' - - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) - - def report_download_webpage(self, video_id): - """Report webpage download.""" - self._downloader.to_screen(u'[video.google] %s: Downloading webpage' % video_id) - - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[video.google] %s: Extracting information' % video_id) - - def _real_extract(self, url): - # Extract id from URL - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) - return - - video_id = mobj.group(1) - - video_extension = 'mp4' - - # Retrieve video webpage to extract further information - request = urllib2.Request('http://video.google.com/videoplay?docid=%s&hl=en&oe=utf-8' % video_id) - try: - self.report_download_webpage(video_id) - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) - return - - # Extract URL, uploader, and title from webpage - self.report_extraction(video_id) - mobj = re.search(r"download_url:'([^']+)'", webpage) - if mobj is None: - video_extension = 'flv' - mobj = re.search(r"(?i)videoUrl\\x3d(.+?)\\x26", webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract media URL') - return - mediaURL = urllib.unquote(mobj.group(1)) - mediaURL = mediaURL.replace('\\x3d', '\x3d') - mediaURL = mediaURL.replace('\\x26', '\x26') - - video_url = mediaURL - - mobj = re.search(r'(.*)', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') - return - video_title = mobj.group(1).decode('utf-8') - - # Extract video description - mobj = re.search(r'([^<]*)', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video description') - return - video_description = mobj.group(1).decode('utf-8') - if not video_description: - video_description = 'No description available.' - - # Extract video thumbnail - if self._downloader.params.get('forcethumbnail', False): - request = urllib2.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id))) - try: - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) - return - mobj = re.search(r'', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video thumbnail') - return - video_thumbnail = mobj.group(1) - else: # we need something to pass to process_info - video_thumbnail = '' - - return [{ - 'id': video_id.decode('utf-8'), - 'url': video_url.decode('utf-8'), - 'uploader': u'NA', - 'upload_date': u'NA', - 'title': video_title, - 'ext': video_extension.decode('utf-8'), - 'format': u'NA', - 'player_url': None, - }] + return [{ + 'id': video_id, + 'url': video_url, + 'uploader': video_uploader, + 'upload_date': video_upload_date, + 'title': video_title, + 'ext': video_extension, + }] class PhotobucketIE(InfoExtractor): - """Information extractor for photobucket.com.""" + """Information extractor for photobucket.com.""" - _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)' - IE_NAME = u'photobucket' + _VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)' + IE_NAME = u'photobucket' - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) - def report_download_webpage(self, video_id): - """Report webpage download.""" - self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id) + def report_download_webpage(self, video_id): + """Report webpage download.""" + self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id) - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id) + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id) - def _real_extract(self, url): - # Extract id from URL - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) - return + def _real_extract(self, url): + # Extract id from URL + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) + return - video_id = mobj.group(1) + video_id = mobj.group(1) - video_extension = 'flv' + video_extension = 'flv' - # Retrieve video webpage to extract further information - request = urllib2.Request(url) - try: - self.report_download_webpage(video_id) - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) - return + # Retrieve video webpage to extract further information + request = compat_urllib_request.Request(url) + try: + self.report_download_webpage(video_id) + webpage = compat_urllib_request.urlopen(request).read() + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + return - # Extract URL, uploader, and title from webpage - self.report_extraction(video_id) - mobj = re.search(r'', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract media URL') - return - mediaURL = urllib.unquote(mobj.group(1)) + # Extract URL, uploader, and title from webpage + self.report_extraction(video_id) + mobj = re.search(r'', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract media URL') + return + mediaURL = compat_urllib_parse.unquote(mobj.group(1)) - video_url = mediaURL + video_url = mediaURL - mobj = re.search(r'(.*) video by (.*) - Photobucket', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract title') - return - video_title = mobj.group(1).decode('utf-8') + mobj = re.search(r'(.*) video by (.*) - Photobucket', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract title') + return + video_title = mobj.group(1).decode('utf-8') - video_uploader = mobj.group(2).decode('utf-8') + video_uploader = mobj.group(2).decode('utf-8') - return [{ - 'id': video_id.decode('utf-8'), - 'url': video_url.decode('utf-8'), - 'uploader': video_uploader, - 'upload_date': u'NA', - 'title': video_title, - 'ext': video_extension.decode('utf-8'), - 'format': u'NA', - 'player_url': None, - }] + return [{ + 'id': video_id.decode('utf-8'), + 'url': video_url.decode('utf-8'), + 'uploader': video_uploader, + 'upload_date': None, + 'title': video_title, + 'ext': video_extension.decode('utf-8'), + }] class YahooIE(InfoExtractor): - """Information extractor for video.yahoo.com.""" + """Information extractor for video.yahoo.com.""" - # _VALID_URL matches all Yahoo! Video URLs - # _VPAGE_URL matches only the extractable '/watch/' URLs - _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?' - _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?' - IE_NAME = u'video.yahoo' + _WORKING = False + # _VALID_URL matches all Yahoo! Video URLs + # _VPAGE_URL matches only the extractable '/watch/' URLs + _VALID_URL = r'(?:http://)?(?:[a-z]+\.)?video\.yahoo\.com/(?:watch|network)/([0-9]+)(?:/|\?v=)([0-9]+)(?:[#\?].*)?' + _VPAGE_URL = r'(?:http://)?video\.yahoo\.com/watch/([0-9]+)/([0-9]+)(?:[#\?].*)?' + IE_NAME = u'video.yahoo' - def __init__(self, downloader=None): - InfoExtractor.__init__(self, downloader) + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) - def report_download_webpage(self, video_id): - """Report webpage download.""" - self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id) + def report_download_webpage(self, video_id): + """Report webpage download.""" + self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id) - def report_extraction(self, video_id): - """Report information extraction.""" - self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id) + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id) - def _real_extract(self, url, new_video=True): - # Extract ID from URL - mobj = re.match(self._VALID_URL, url) - if mobj is None: - self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) - return + def _real_extract(self, url, new_video=True): + # Extract ID from URL + mobj = re.match(self._VALID_URL, url) + if mobj is None: + self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) + return - video_id = mobj.group(2) - video_extension = 'flv' + video_id = mobj.group(2) + video_extension = 'flv' - # Rewrite valid but non-extractable URLs as - # extractable English language /watch/ URLs - if re.match(self._VPAGE_URL, url) is None: - request = urllib2.Request(url) - try: - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) - return + # Rewrite valid but non-extractable URLs as + # extractable English language /watch/ URLs + if re.match(self._VPAGE_URL, url) is None: + request = compat_urllib_request.Request(url) + try: + webpage = compat_urllib_request.urlopen(request).read() + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + return - mobj = re.search(r'\("id", "([0-9]+)"\);', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: Unable to extract id field') - return - yahoo_id = mobj.group(1) + mobj = re.search(r'\("id", "([0-9]+)"\);', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: Unable to extract id field') + return + yahoo_id = mobj.group(1) - mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: Unable to extract vid field') - return - yahoo_vid = mobj.group(1) + mobj = re.search(r'\("vid", "([0-9]+)"\);', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: Unable to extract vid field') + return + yahoo_vid = mobj.group(1) - url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id) - return self._real_extract(url, new_video=False) + url = 'http://video.yahoo.com/watch/%s/%s' % (yahoo_vid, yahoo_id) + return self._real_extract(url, new_video=False) - # Retrieve video webpage to extract further information - request = urllib2.Request(url) - try: - self.report_download_webpage(video_id) - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) - return + # Retrieve video webpage to extract further information + request = compat_urllib_request.Request(url) + try: + self.report_download_webpage(video_id) + webpage = compat_urllib_request.urlopen(request).read() + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + return - # Extract uploader and title from webpage - self.report_extraction(video_id) - mobj = re.search(r'', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video title') - return - video_title = mobj.group(1).decode('utf-8') + # Extract uploader and title from webpage + self.report_extraction(video_id) + mobj = re.search(r'', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video title') + return + video_title = mobj.group(1).decode('utf-8') - mobj = re.search(r'

(.*)

', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video uploader') - return - video_uploader = mobj.group(1).decode('utf-8') + mobj = re.search(r'

(.*)

', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video uploader') + return + video_uploader = mobj.group(1).decode('utf-8') - # Extract video thumbnail - mobj = re.search(r'', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video thumbnail') - return - video_thumbnail = mobj.group(1).decode('utf-8') + # Extract video thumbnail + mobj = re.search(r'', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video thumbnail') + return + video_thumbnail = mobj.group(1).decode('utf-8') - # Extract video description - mobj = re.search(r'', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video description') - return - video_description = mobj.group(1).decode('utf-8') - if not video_description: - video_description = 'No description available.' + # Extract video description + mobj = re.search(r'', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video description') + return + video_description = mobj.group(1).decode('utf-8') + if not video_description: + video_description = 'No description available.' - # Extract video height and width - mobj = re.search(r'', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video height') - return - yv_video_height = mobj.group(1) + # Extract video height and width + mobj = re.search(r'', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video height') + return + yv_video_height = mobj.group(1) - mobj = re.search(r'', webpage) - if mobj is None: - self._downloader.trouble(u'ERROR: unable to extract video width') - return - yv_video_width = mobj.group(1) + mobj = re.search(r'', webpage) + if mobj is None: + self._downloader.trouble(u'ERROR: unable to extract video width') + return + yv_video_width = mobj.group(1) - # Retrieve video playlist to extract media URL - # I'm not completely sure what all these options are, but we - # seem to need most of them, otherwise the server sends a 401. - yv_lg = 'R0xx6idZnW2zlrKP8xxAIR' # not sure what this represents - yv_bitrate = '700' # according to Wikipedia this is hard-coded - request = urllib2.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id + - '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height + - '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797') - try: - self.report_download_webpage(video_id) - webpage = urllib2.urlopen(request).read() - except (urllib2.URLError, httplib.HTTPException, socket.error), err: - self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % str(err)) - return + # Retrieve video playlist to extract media URL + # I'm not completely sure what all these options are, but we + # seem to need most of them, otherwise the server sends a 401. + yv_lg = 'R0xx6idZnW2zlrKP8xxAIR' # not sure what this represents + yv_bitrate = '700' # according to Wikipedia this is hard-coded + request = compat_urllib_request.Request('http://cosmos.bcst.yahoo.com/up/yep/process/getPlaylistFOP.php?node_id=' + video_id + + '&tech=flash&mode=playlist&lg=' + yv_lg + '&bitrate=' + yv_bitrate + '&vidH=' + yv_video_height + + '&vidW=' + yv_video_width + '&swf=as3&rd=video.yahoo.com&tk=null&adsupported=v1,v2,&eventid=1301797') + try: + self.report_download_webpage(video_id) + webpage = compat_urllib_request.urlopen(request).read() + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + return - # Extract media URL from playlist XML - mobj = re.search(r'[^:]*: (.*?)( \([^\(]*\))?
', webpage) - if mobj is not None: - video_upload_date = mobj.group(1) + # Extract video description + video_description = get_element_by_attribute("itemprop", "description", webpage) + if video_description: video_description = clean_html(video_description) + else: video_description = '' - # Vimeo specific: extract request signature and timestamp - sig = config['request']['signature'] - timestamp = config['request']['timestamp'] + # Extract upload date + video_upload_date = None + mobj = re.search(r' 0: + video_quality = files[quality][0][2] + video_codec = files[quality][0][0] + video_extension = files[quality][0][1] + self._downloader.to_screen(u'[vimeo] %s: Downloading %s file at %s quality' % (video_id, video_codec.upper(), video_quality)) + break + else: + self._downloader.trouble(u'ERROR: no known codec found') + return + + video_url = "http://player.vimeo.com/play_redirect?clip_id=%s&sig=%s&time=%s&quality=%s&codecs=%s&type=moogaloop_local&embed_location=" \ + %(video_id, sig, timestamp, video_quality, video_codec.upper()) + + return [{ + 'id': video_id, + 'url': video_url, + 'uploader': video_uploader, + 'uploader_id': video_uploader_id, + 'upload_date': video_upload_date, + 'title': video_title, + 'ext': video_extension, + 'thumbnail': video_thumbnail, + 'description': video_description, + }] + + +class ArteTvIE(InfoExtractor): + """arte.tv information extractor.""" + + _VALID_URL = r'(?:http://)?videos\.arte\.tv/(?:fr|de)/videos/.*' + _LIVE_URL = r'index-[0-9]+\.html$' + + IE_NAME = u'arte.tv' + + def __init__(self, downloader=None): + InfoExtractor.__init__(self, downloader) + + def report_download_webpage(self, video_id): + """Report webpage download.""" + self._downloader.to_screen(u'[arte.tv] %s: Downloading webpage' % video_id) + + def report_extraction(self, video_id): + """Report information extraction.""" + self._downloader.to_screen(u'[arte.tv] %s: Extracting information' % video_id) + + def fetch_webpage(self, url): + request = compat_urllib_request.Request(url) + try: + self.report_download_webpage(url) + webpage = compat_urllib_request.urlopen(request).read() + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err)) + return + except ValueError as err: + self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) + return + return webpage + + def grep_webpage(self, url, regex, regexFlags, matchTuples): + page = self.fetch_webpage(url) + mobj = re.search(regex, page, regexFlags) + info = {} + + if mobj is None: + self._downloader.trouble(u'ERROR: Invalid URL: %s' % url) + return + + for (i, key, err) in matchTuples: + if mobj.group(i) is None: + self._downloader.trouble(err) + return + else: + info[key] = mobj.group(i) + + return info + + def extractLiveStream(self, url): + video_lang = url.split('/')[-4] + info = self.grep_webpage( + url, + r'src="(.*?/videothek_js.*?\.js)', + 0, + [ + (1, 'url', u'ERROR: Invalid URL: %s' % url) + ] + ) + http_host = url.split('/')[2] + next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url'))) + info = self.grep_webpage( + next_url, + r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' + + '(http://.*?\.swf).*?' + + '(rtmp://.*?)\'', + re.DOTALL, + [ + (1, 'path', u'ERROR: could not extract video path: %s' % url), + (2, 'player', u'ERROR: could not extract video player: %s' % url), + (3, 'url', u'ERROR: could not extract video url: %s' % url) + ] + ) + video_url = u'%s/%s' % (info.get('url'), info.get('path')) + + def extractPlus7Stream(self, url): + video_lang = url.split('/')[-3] + info = self.grep_webpage( + url, + r'param name="movie".*?videorefFileUrl=(http[^\'"&]*)', + 0, + [ + (1, 'url', u'ERROR: Invalid URL: %s' % url) + ] + ) + next_url = compat_urllib_parse.unquote(info.get('url')) + info = self.grep_webpage( + next_url, + r'