mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-02 06:27:21 +00:00
Merge branch 'master' into fix-dailywire
This commit is contained in:
commit
2db43bdb0e
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -28,7 +28,6 @@ # PLEASE FOLLOW THE GUIDE BELOW
|
||||||
### Before submitting a *pull request* make sure you have:
|
### Before submitting a *pull request* make sure you have:
|
||||||
- [ ] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
|
- [ ] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
|
||||||
- [ ] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
|
- [ ] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
|
||||||
- [ ] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions)
|
|
||||||
|
|
||||||
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check all of the following options that apply:
|
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check all of the following options that apply:
|
||||||
- [ ] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
|
- [ ] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
|
||||||
|
|
42
.github/workflows/build.yml
vendored
42
.github/workflows/build.yml
vendored
|
@ -237,7 +237,7 @@ jobs:
|
||||||
macos:
|
macos:
|
||||||
needs: process
|
needs: process
|
||||||
if: inputs.macos
|
if: inputs.macos
|
||||||
runs-on: macos-11
|
runs-on: macos-12
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
@ -260,11 +260,23 @@ jobs:
|
||||||
--pre -d curl_cffi_whls \
|
--pre -d curl_cffi_whls \
|
||||||
-r requirements.txt
|
-r requirements.txt
|
||||||
done
|
done
|
||||||
|
( # Overwrite x86_64-only libs with fat/universal2 libs or else Pyinstaller will do the opposite
|
||||||
|
# See https://github.com/yt-dlp/yt-dlp/pull/10069
|
||||||
|
cd curl_cffi_whls
|
||||||
|
mkdir -p curl_cffi/.dylibs
|
||||||
|
python_libdir=$(python3 -c 'import sys; from pathlib import Path; print(Path(sys.path[1]).parent)')
|
||||||
|
for dylib in lib{ssl,crypto}.3.dylib; do
|
||||||
|
cp "${python_libdir}/${dylib}" "curl_cffi/.dylibs/${dylib}"
|
||||||
|
for wheel in curl_cffi*macos*x86_64.whl; do
|
||||||
|
zip "${wheel}" "curl_cffi/.dylibs/${dylib}"
|
||||||
|
done
|
||||||
|
done
|
||||||
|
)
|
||||||
python3 -m delocate.cmd.delocate_fuse curl_cffi_whls/curl_cffi*.whl -w curl_cffi_universal2
|
python3 -m delocate.cmd.delocate_fuse curl_cffi_whls/curl_cffi*.whl -w curl_cffi_universal2
|
||||||
python3 -m delocate.cmd.delocate_fuse curl_cffi_whls/cffi*.whl -w curl_cffi_universal2
|
python3 -m delocate.cmd.delocate_fuse curl_cffi_whls/cffi*.whl -w curl_cffi_universal2
|
||||||
cd curl_cffi_universal2
|
cd curl_cffi_universal2
|
||||||
for wheel in *cffi*.whl; do mv -n -- "${wheel}" "${wheel/x86_64/universal2}"; done
|
for wheel in ./*cffi*.whl; do mv -n -- "${wheel}" "${wheel/x86_64/universal2}"; done
|
||||||
python3 -m pip install -U --user *cffi*.whl
|
python3 -m pip install -U --user ./*cffi*.whl
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
|
@ -311,7 +323,7 @@ jobs:
|
||||||
# Hack to get the latest patch version. Uncomment if needed
|
# Hack to get the latest patch version. Uncomment if needed
|
||||||
#brew install python@3.10
|
#brew install python@3.10
|
||||||
#export PYTHON_VERSION=$( $(brew --prefix)/opt/python@3.10/bin/python3 --version | cut -d ' ' -f 2 )
|
#export PYTHON_VERSION=$( $(brew --prefix)/opt/python@3.10/bin/python3 --version | cut -d ' ' -f 2 )
|
||||||
curl https://www.python.org/ftp/python/${PYTHON_VERSION}/python-${PYTHON_VERSION}-macos11.pkg -o "python.pkg"
|
curl "https://www.python.org/ftp/python/${PYTHON_VERSION}/python-${PYTHON_VERSION}-macos11.pkg" -o "python.pkg"
|
||||||
sudo installer -pkg python.pkg -target /
|
sudo installer -pkg python.pkg -target /
|
||||||
python3 --version
|
python3 --version
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
|
@ -360,8 +372,8 @@ jobs:
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: | # Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds
|
run: | # Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds
|
||||||
python devscripts/install_deps.py -o --include build
|
python devscripts/install_deps.py -o --include build
|
||||||
python devscripts/install_deps.py --include py2exe --include curl-cffi
|
python devscripts/install_deps.py --include curl-cffi
|
||||||
python -m pip install -U "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-5.8.0-py3-none-any.whl"
|
python -m pip install -U "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-6.7.0-py3-none-any.whl"
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
|
@ -369,12 +381,20 @@ jobs:
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
python -m bundle.py2exe
|
|
||||||
Move-Item ./dist/yt-dlp.exe ./dist/yt-dlp_min.exe
|
|
||||||
python -m bundle.pyinstaller
|
python -m bundle.pyinstaller
|
||||||
python -m bundle.pyinstaller --onedir
|
python -m bundle.pyinstaller --onedir
|
||||||
|
Move-Item ./dist/yt-dlp.exe ./dist/yt-dlp_real.exe
|
||||||
Compress-Archive -Path ./dist/yt-dlp/* -DestinationPath ./dist/yt-dlp_win.zip
|
Compress-Archive -Path ./dist/yt-dlp/* -DestinationPath ./dist/yt-dlp_win.zip
|
||||||
|
|
||||||
|
- name: Install Requirements (py2exe)
|
||||||
|
run: |
|
||||||
|
python devscripts/install_deps.py --include py2exe
|
||||||
|
- name: Build (py2exe)
|
||||||
|
run: |
|
||||||
|
python -m bundle.py2exe
|
||||||
|
Move-Item ./dist/yt-dlp.exe ./dist/yt-dlp_min.exe
|
||||||
|
Move-Item ./dist/yt-dlp_real.exe ./dist/yt-dlp.exe
|
||||||
|
|
||||||
- name: Verify --update-to
|
- name: Verify --update-to
|
||||||
if: vars.UPDATE_TO_VERIFICATION
|
if: vars.UPDATE_TO_VERIFICATION
|
||||||
run: |
|
run: |
|
||||||
|
@ -413,7 +433,7 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
python devscripts/install_deps.py -o --include build
|
python devscripts/install_deps.py -o --include build
|
||||||
python devscripts/install_deps.py
|
python devscripts/install_deps.py
|
||||||
python -m pip install -U "https://yt-dlp.github.io/Pyinstaller-Builds/i686/pyinstaller-5.8.0-py3-none-any.whl"
|
python -m pip install -U "https://yt-dlp.github.io/Pyinstaller-Builds/i686/pyinstaller-6.7.0-py3-none-any.whl"
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
|
@ -467,8 +487,8 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
cd ./artifact/
|
cd ./artifact/
|
||||||
# make sure SHA sums are also printed to stdout
|
# make sure SHA sums are also printed to stdout
|
||||||
sha256sum * | tee ../SHA2-256SUMS
|
sha256sum -- * | tee ../SHA2-256SUMS
|
||||||
sha512sum * | tee ../SHA2-512SUMS
|
sha512sum -- * | tee ../SHA2-512SUMS
|
||||||
|
|
||||||
- name: Make Update spec
|
- name: Make Update spec
|
||||||
run: |
|
run: |
|
||||||
|
|
2
.github/workflows/core.yml
vendored
2
.github/workflows/core.yml
vendored
|
@ -53,7 +53,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
- name: Install test requirements
|
- name: Install test requirements
|
||||||
run: python3 ./devscripts/install_deps.py --include dev --include curl-cffi
|
run: python3 ./devscripts/install_deps.py --include test --include curl-cffi
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
continue-on-error: False
|
continue-on-error: False
|
||||||
run: |
|
run: |
|
||||||
|
|
16
.github/workflows/quick-test.yml
vendored
16
.github/workflows/quick-test.yml
vendored
|
@ -15,13 +15,13 @@ jobs:
|
||||||
with:
|
with:
|
||||||
python-version: '3.8'
|
python-version: '3.8'
|
||||||
- name: Install test requirements
|
- name: Install test requirements
|
||||||
run: python3 ./devscripts/install_deps.py --include dev
|
run: python3 ./devscripts/install_deps.py --include test
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: |
|
run: |
|
||||||
python3 -m yt_dlp -v || true
|
python3 -m yt_dlp -v || true
|
||||||
python3 ./devscripts/run_tests.py core
|
python3 ./devscripts/run_tests.py core
|
||||||
flake8:
|
check:
|
||||||
name: Linter
|
name: Code check
|
||||||
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
|
@ -29,9 +29,11 @@ jobs:
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: '3.8'
|
python-version: '3.8'
|
||||||
- name: Install flake8
|
- name: Install dev dependencies
|
||||||
run: python3 ./devscripts/install_deps.py -o --include dev
|
run: python3 ./devscripts/install_deps.py -o --include static-analysis
|
||||||
- name: Make lazy extractors
|
- name: Make lazy extractors
|
||||||
run: python3 ./devscripts/make_lazy_extractors.py
|
run: python3 ./devscripts/make_lazy_extractors.py
|
||||||
- name: Run flake8
|
- name: Run ruff
|
||||||
run: flake8 .
|
run: ruff check --output-format github .
|
||||||
|
- name: Run autopep8
|
||||||
|
run: autopep8 --diff .
|
||||||
|
|
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -67,7 +67,7 @@ cookies
|
||||||
# Python
|
# Python
|
||||||
*.pyc
|
*.pyc
|
||||||
*.pyo
|
*.pyo
|
||||||
.pytest_cache
|
.*_cache
|
||||||
wine-py2exe/
|
wine-py2exe/
|
||||||
py2exe.log
|
py2exe.log
|
||||||
build/
|
build/
|
||||||
|
|
14
.pre-commit-config.yaml
Normal file
14
.pre-commit-config.yaml
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
repos:
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
- id: linter
|
||||||
|
name: Apply linter fixes
|
||||||
|
entry: ruff check --fix .
|
||||||
|
language: system
|
||||||
|
types: [python]
|
||||||
|
require_serial: true
|
||||||
|
- id: format
|
||||||
|
name: Apply formatting fixes
|
||||||
|
entry: autopep8 --in-place .
|
||||||
|
language: system
|
||||||
|
types: [python]
|
9
.pre-commit-hatch.yaml
Normal file
9
.pre-commit-hatch.yaml
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
repos:
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
- id: fix
|
||||||
|
name: Apply code fixes
|
||||||
|
entry: hatch fmt
|
||||||
|
language: system
|
||||||
|
types: [python]
|
||||||
|
require_serial: true
|
|
@ -134,18 +134,53 @@ ### Is the website primarily used for piracy?
|
||||||
|
|
||||||
# DEVELOPER INSTRUCTIONS
|
# DEVELOPER INSTRUCTIONS
|
||||||
|
|
||||||
Most users do not need to build yt-dlp and can [download the builds](https://github.com/yt-dlp/yt-dlp/releases) or get them via [the other installation methods](README.md#installation).
|
Most users do not need to build yt-dlp and can [download the builds](https://github.com/yt-dlp/yt-dlp/releases), get them via [the other installation methods](README.md#installation) or directly run it using `python -m yt_dlp`.
|
||||||
|
|
||||||
To run yt-dlp as a developer, you don't need to build anything either. Simply execute
|
`yt-dlp` uses [`hatch`](<https://hatch.pypa.io>) as a project management tool.
|
||||||
|
You can easily install it using [`pipx`](<https://pipx.pypa.io>) via `pipx install hatch`, or else via `pip` or your package manager of choice. Make sure you are using at least version `1.10.0`, otherwise some functionality might not work as expected.
|
||||||
|
|
||||||
python3 -m yt_dlp
|
If you plan on contributing to `yt-dlp`, best practice is to start by running the following command:
|
||||||
|
|
||||||
To run all the available core tests, use:
|
```shell
|
||||||
|
$ hatch run setup
|
||||||
|
```
|
||||||
|
|
||||||
python3 devscripts/run_tests.py
|
The above command will install a `pre-commit` hook so that required checks/fixes (linting, formatting) will run automatically before each commit. If any code needs to be linted or formatted, then the commit will be blocked and the necessary changes will be made; you should review all edits and re-commit the fixed version.
|
||||||
|
|
||||||
|
After this you can use `hatch shell` to enable a virtual environment that has `yt-dlp` and its development dependencies installed.
|
||||||
|
|
||||||
|
In addition, the following script commands can be used to run simple tasks such as linting or testing (without having to run `hatch shell` first):
|
||||||
|
* `hatch fmt`: Automatically fix linter violations and apply required code formatting changes
|
||||||
|
* See `hatch fmt --help` for more info
|
||||||
|
* `hatch test`: Run extractor or core tests
|
||||||
|
* See `hatch test --help` for more info
|
||||||
|
|
||||||
See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
|
See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
|
||||||
|
|
||||||
|
While it is strongly recommended to use `hatch` for yt-dlp development, if you are unable to do so, alternatively you can manually create a virtual environment and use the following commands:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# To only install development dependencies:
|
||||||
|
$ python -m devscripts.install_deps --include dev
|
||||||
|
|
||||||
|
# Or, for an editable install plus dev dependencies:
|
||||||
|
$ python -m pip install -e ".[default,dev]"
|
||||||
|
|
||||||
|
# To setup the pre-commit hook:
|
||||||
|
$ pre-commit install
|
||||||
|
|
||||||
|
# To be used in place of `hatch test`:
|
||||||
|
$ python -m devscripts.run_tests
|
||||||
|
|
||||||
|
# To be used in place of `hatch fmt`:
|
||||||
|
$ ruff check --fix .
|
||||||
|
$ autopep8 --in-place .
|
||||||
|
|
||||||
|
# To only check code instead of applying fixes:
|
||||||
|
$ ruff check .
|
||||||
|
$ autopep8 --diff .
|
||||||
|
```
|
||||||
|
|
||||||
If you want to create a build of yt-dlp yourself, you can follow the instructions [here](README.md#compile).
|
If you want to create a build of yt-dlp yourself, you can follow the instructions [here](README.md#compile).
|
||||||
|
|
||||||
|
|
||||||
|
@ -165,12 +200,16 @@ ## Adding support for a new site
|
||||||
1. [Fork this repository](https://github.com/yt-dlp/yt-dlp/fork)
|
1. [Fork this repository](https://github.com/yt-dlp/yt-dlp/fork)
|
||||||
1. Check out the source code with:
|
1. Check out the source code with:
|
||||||
|
|
||||||
git clone git@github.com:YOUR_GITHUB_USERNAME/yt-dlp.git
|
```shell
|
||||||
|
$ git clone git@github.com:YOUR_GITHUB_USERNAME/yt-dlp.git
|
||||||
|
```
|
||||||
|
|
||||||
1. Start a new git branch with
|
1. Start a new git branch with
|
||||||
|
|
||||||
cd yt-dlp
|
```shell
|
||||||
git checkout -b yourextractor
|
$ cd yt-dlp
|
||||||
|
$ git checkout -b yourextractor
|
||||||
|
```
|
||||||
|
|
||||||
1. Start with this simple template and save it to `yt_dlp/extractor/yourextractor.py`:
|
1. Start with this simple template and save it to `yt_dlp/extractor/yourextractor.py`:
|
||||||
|
|
||||||
|
@ -217,21 +256,27 @@ ## Adding support for a new site
|
||||||
# TODO more properties (see yt_dlp/extractor/common.py)
|
# TODO more properties (see yt_dlp/extractor/common.py)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
1. Add an import in [`yt_dlp/extractor/_extractors.py`](yt_dlp/extractor/_extractors.py). Note that the class name must end with `IE`.
|
1. Add an import in [`yt_dlp/extractor/_extractors.py`](yt_dlp/extractor/_extractors.py). Note that the class name must end with `IE`. Also note that when adding a parenthesized import group, the last import in the group must have a trailing comma in order for this formatting to be respected by our code formatter.
|
||||||
1. Run `python3 devscripts/run_tests.py YourExtractor`. This *may fail* at first, but you can continually re-run it until you're done. Upon failure, it will output the missing fields and/or correct values which you can copy. If you decide to add more than one test, the tests will then be named `YourExtractor`, `YourExtractor_1`, `YourExtractor_2`, etc. Note that tests with an `only_matching` key in the test's dict are not included in the count. You can also run all the tests in one go with `YourExtractor_all`
|
1. Run `hatch test YourExtractor`. This *may fail* at first, but you can continually re-run it until you're done. Upon failure, it will output the missing fields and/or correct values which you can copy. If you decide to add more than one test, the tests will then be named `YourExtractor`, `YourExtractor_1`, `YourExtractor_2`, etc. Note that tests with an `only_matching` key in the test's dict are not included in the count. You can also run all the tests in one go with `YourExtractor_all`
|
||||||
1. Make sure you have at least one test for your extractor. Even if all videos covered by the extractor are expected to be inaccessible for automated testing, tests should still be added with a `skip` parameter indicating why the particular test is disabled from running.
|
1. Make sure you have at least one test for your extractor. Even if all videos covered by the extractor are expected to be inaccessible for automated testing, tests should still be added with a `skip` parameter indicating why the particular test is disabled from running.
|
||||||
1. Have a look at [`yt_dlp/extractor/common.py`](yt_dlp/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](yt_dlp/extractor/common.py#L119-L440). Add tests and code for as many as you want.
|
1. Have a look at [`yt_dlp/extractor/common.py`](yt_dlp/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](yt_dlp/extractor/common.py#L119-L440). Add tests and code for as many as you want.
|
||||||
1. Make sure your code follows [yt-dlp coding conventions](#yt-dlp-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
|
1. Make sure your code follows [yt-dlp coding conventions](#yt-dlp-coding-conventions), passes [ruff](https://docs.astral.sh/ruff/tutorial/#getting-started) code checks and is properly formatted:
|
||||||
|
|
||||||
$ flake8 yt_dlp/extractor/yourextractor.py
|
```shell
|
||||||
|
$ hatch fmt --check
|
||||||
|
```
|
||||||
|
|
||||||
|
You can use `hatch fmt` to automatically fix problems. Rules that the linter/formatter enforces should not be disabled with `# noqa` unless a maintainer requests it. The only exception allowed is for old/printf-style string formatting in GraphQL query templates (use `# noqa: UP031`).
|
||||||
|
|
||||||
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.8 and above. Backward compatibility is not required for even older versions of Python.
|
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.8 and above. Backward compatibility is not required for even older versions of Python.
|
||||||
1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
||||||
|
|
||||||
$ git add yt_dlp/extractor/_extractors.py
|
```shell
|
||||||
$ git add yt_dlp/extractor/yourextractor.py
|
$ git add yt_dlp/extractor/_extractors.py
|
||||||
$ git commit -m '[yourextractor] Add extractor'
|
$ git add yt_dlp/extractor/yourextractor.py
|
||||||
$ git push origin yourextractor
|
$ git commit -m '[yourextractor] Add extractor'
|
||||||
|
$ git push origin yourextractor
|
||||||
|
```
|
||||||
|
|
||||||
1. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
1. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
||||||
|
|
||||||
|
|
21
CONTRIBUTORS
21
CONTRIBUTORS
|
@ -610,3 +610,24 @@ Offert4324
|
||||||
sta1us
|
sta1us
|
||||||
Tomoka1
|
Tomoka1
|
||||||
trwstin
|
trwstin
|
||||||
|
alexhuot1
|
||||||
|
clienthax
|
||||||
|
DaPotato69
|
||||||
|
emqi
|
||||||
|
hugohaa
|
||||||
|
imanoreotwe
|
||||||
|
JakeFinley96
|
||||||
|
lostfictions
|
||||||
|
minamotorin
|
||||||
|
ocococococ
|
||||||
|
Podiumnoche
|
||||||
|
RasmusAntons
|
||||||
|
roeniss
|
||||||
|
shoxie007
|
||||||
|
Szpachlarz
|
||||||
|
The-MAGI
|
||||||
|
TuxCoder
|
||||||
|
voidful
|
||||||
|
vtexier
|
||||||
|
WyohKnott
|
||||||
|
trueauracoral
|
||||||
|
|
121
Changelog.md
121
Changelog.md
|
@ -4,6 +4,127 @@ # Changelog
|
||||||
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
|
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
### 2024.05.27
|
||||||
|
|
||||||
|
#### Extractor changes
|
||||||
|
- [Fix parsing of base URL in SMIL manifest](https://github.com/yt-dlp/yt-dlp/commit/26603d0b34898818992bee4598e0607c07059511) ([#9225](https://github.com/yt-dlp/yt-dlp/issues/9225)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **peertube**: [Support livestreams](https://github.com/yt-dlp/yt-dlp/commit/12b248ce60be1aa1362edd839d915bba70dbee4b) ([#10044](https://github.com/yt-dlp/yt-dlp/issues/10044)) by [bashonly](https://github.com/bashonly), [trueauracoral](https://github.com/trueauracoral)
|
||||||
|
- **piksel**: [Update domain](https://github.com/yt-dlp/yt-dlp/commit/ae2194e1dd4a99d32eb3cab7c48a0ff03101ef3b) ([#9223](https://github.com/yt-dlp/yt-dlp/issues/9223)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **tiktok**: user: [Fix extraction loop](https://github.com/yt-dlp/yt-dlp/commit/c53c2e40fde8f2e15c7c62f8ca1a5d9e90ddc079) ([#10035](https://github.com/yt-dlp/yt-dlp/issues/10035)) by [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
|
#### Misc. changes
|
||||||
|
- **cleanup**: Miscellaneous: [5e3e19c](https://github.com/yt-dlp/yt-dlp/commit/5e3e19c93c52830da98d9d1ed84ea7a559efefbd) by [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
|
### 2024.05.26
|
||||||
|
|
||||||
|
#### Core changes
|
||||||
|
- [Better warning when requested subs format not found](https://github.com/yt-dlp/yt-dlp/commit/7e4259dff0b681a3f0e8a930799ce0394328c86e) ([#9873](https://github.com/yt-dlp/yt-dlp/issues/9873)) by [DaPotato69](https://github.com/DaPotato69)
|
||||||
|
- [Merged with youtube-dl a08f2b7](https://github.com/yt-dlp/yt-dlp/commit/a4da9db87b6486b270c15dfa07ab5bfedc83f6bd) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Warn if lack of ffmpeg alters format selection](https://github.com/yt-dlp/yt-dlp/commit/96da9525043f78aca4544d01761b13b2140e9ae6) ([#9805](https://github.com/yt-dlp/yt-dlp/issues/9805)) by [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
||||||
|
- **cookies**
|
||||||
|
- [Add `--cookies-from-browser` support for Whale](https://github.com/yt-dlp/yt-dlp/commit/dd9ad97b1fbdd36c086b8ba82328a4d954f78f8e) ([#9649](https://github.com/yt-dlp/yt-dlp/issues/9649)) by [roeniss](https://github.com/roeniss)
|
||||||
|
- [Get chrome session cookies with `--cookies-from-browser`](https://github.com/yt-dlp/yt-dlp/commit/f1f158976e38d38a260762accafe7bbe6d451151) ([#9747](https://github.com/yt-dlp/yt-dlp/issues/9747)) by [StefanLobbenmeier](https://github.com/StefanLobbenmeier)
|
||||||
|
- **windows**: [Improve shell quoting and tests](https://github.com/yt-dlp/yt-dlp/commit/64766459e37451b665c1464073c28361fbcf1c25) ([#9802](https://github.com/yt-dlp/yt-dlp/issues/9802)) by [Grub4K](https://github.com/Grub4K) (With fixes in [7e26bd5](https://github.com/yt-dlp/yt-dlp/commit/7e26bd53f9c5893518fde81dfd0079ec08dd841e))
|
||||||
|
|
||||||
|
#### Extractor changes
|
||||||
|
- [Add POST data hash to `--write-pages` filenames](https://github.com/yt-dlp/yt-dlp/commit/61b17437dc14a1c7e90ff48a6198df77828c6df4) ([#9879](https://github.com/yt-dlp/yt-dlp/issues/9879)) by [minamotorin](https://github.com/minamotorin) (With fixes in [c999bac](https://github.com/yt-dlp/yt-dlp/commit/c999bac02c5a4f755b2a82488a975e91c988ffd8) by [bashonly](https://github.com/bashonly))
|
||||||
|
- [Make `_search_nextjs_data` non fatal](https://github.com/yt-dlp/yt-dlp/commit/3ee1194288981c4f2c4abd8315326de0c424d2ce) ([#8937](https://github.com/yt-dlp/yt-dlp/issues/8937)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- **afreecatv**: live: [Add `cdn` extractor-arg](https://github.com/yt-dlp/yt-dlp/commit/315b3544296bb83012e20ee3af9d3cbf5600dd1c) ([#9666](https://github.com/yt-dlp/yt-dlp/issues/9666)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **alura**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/fc2879ecb05aaad36869609d154e4321362c1f63) ([#9658](https://github.com/yt-dlp/yt-dlp/issues/9658)) by [hugohaa](https://github.com/hugohaa)
|
||||||
|
- **artetv**: [Label forced subtitles](https://github.com/yt-dlp/yt-dlp/commit/7b5674949fd03a33b47b67b31d56a5adf1c48c91) ([#9945](https://github.com/yt-dlp/yt-dlp/issues/9945)) by [vtexier](https://github.com/vtexier)
|
||||||
|
- **bbc**: [Fix and extend extraction](https://github.com/yt-dlp/yt-dlp/commit/7975ddf245d22af034d5b983eeb1c5ec6c2ce053) ([#9705](https://github.com/yt-dlp/yt-dlp/issues/9705)) by [dirkf](https://github.com/dirkf), [kylegustavo](https://github.com/kylegustavo), [pukkandan](https://github.com/pukkandan)
|
||||||
|
- **bilibili**: [Fix `--geo-verification-proxy` support](https://github.com/yt-dlp/yt-dlp/commit/2338827072dacab0f15348b70aec8685feefc8d1) ([#9817](https://github.com/yt-dlp/yt-dlp/issues/9817)) by [fireattack](https://github.com/fireattack)
|
||||||
|
- **bilibilispacevideo**
|
||||||
|
- [Better error message](https://github.com/yt-dlp/yt-dlp/commit/06d52c87314e0bbc16c43c405090843885577b88) ([#9839](https://github.com/yt-dlp/yt-dlp/issues/9839)) by [fireattack](https://github.com/fireattack)
|
||||||
|
- [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/4cc99d7b6cce8b39506ead01407445d576b63ee4) ([#9905](https://github.com/yt-dlp/yt-dlp/issues/9905)) by [c-basalt](https://github.com/c-basalt)
|
||||||
|
- **boosty**: [Add cookies support](https://github.com/yt-dlp/yt-dlp/commit/145dc6f6563e80d2da1b3e9aea2ffa795b71622c) ([#9522](https://github.com/yt-dlp/yt-dlp/issues/9522)) by [RasmusAntons](https://github.com/RasmusAntons)
|
||||||
|
- **brilliantpala**: [Fix login](https://github.com/yt-dlp/yt-dlp/commit/eead3bbc01f6529862bdad1f0b2adeabda4f006e) ([#9788](https://github.com/yt-dlp/yt-dlp/issues/9788)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- **canalalpha**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/00a9f2e1f7fa69499221f2e8dd73a08efeef79bc) ([#9675](https://github.com/yt-dlp/yt-dlp/issues/9675)) by [kclauhk](https://github.com/kclauhk)
|
||||||
|
- **cbc.ca**: player: [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/c8bf48f3a8fa29587e7c73ef5a7710385a5ea725) ([#9866](https://github.com/yt-dlp/yt-dlp/issues/9866)) by [carusocr](https://github.com/carusocr)
|
||||||
|
- **cda**: [Fix age-gated web extraction](https://github.com/yt-dlp/yt-dlp/commit/6d8a53d870ff6795f509085bfbf3981417999038) ([#9939](https://github.com/yt-dlp/yt-dlp/issues/9939)) by [dirkf](https://github.com/dirkf), [emqi](https://github.com/emqi), [Podiumnoche](https://github.com/Podiumnoche), [Szpachlarz](https://github.com/Szpachlarz)
|
||||||
|
- **commonmistakes**: [Raise error on blob URLs](https://github.com/yt-dlp/yt-dlp/commit/98d71d8c5e5dab08b561ee6f137e968d2a004262) ([#9897](https://github.com/yt-dlp/yt-dlp/issues/9897)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **crunchyroll**
|
||||||
|
- [Always make metadata available](https://github.com/yt-dlp/yt-dlp/commit/cb2fb4a643949322adba561ca73bcba3221ec0c5) ([#9772](https://github.com/yt-dlp/yt-dlp/issues/9772)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix auth and remove cookies support](https://github.com/yt-dlp/yt-dlp/commit/ff38a011d57b763f3a69bebd25a5dc9044a717ce) ([#9749](https://github.com/yt-dlp/yt-dlp/issues/9749)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix stream extraction](https://github.com/yt-dlp/yt-dlp/commit/f2816634e3be88fe158b342ee33918de3c272a54) ([#10005](https://github.com/yt-dlp/yt-dlp/issues/10005)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Support browser impersonation](https://github.com/yt-dlp/yt-dlp/commit/5904853ae5788509fdc4892cb7ecdfa9ae7f78e6) ([#9857](https://github.com/yt-dlp/yt-dlp/issues/9857)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **dangalplay**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/0d067e77c3f5527946fb0c22ee1c7011994cba40) ([#10021](https://github.com/yt-dlp/yt-dlp/issues/10021)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **discoveryplus**: [Fix dmax.de and related extractors](https://github.com/yt-dlp/yt-dlp/commit/90d2da311bbb5dc06f385ee428c7e4590936e995) ([#10020](https://github.com/yt-dlp/yt-dlp/issues/10020)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **eplus**: [Handle URLs without videos](https://github.com/yt-dlp/yt-dlp/commit/351dc0bc334c4e1b5f00c152818c3ec0ed71f788) ([#9855](https://github.com/yt-dlp/yt-dlp/issues/9855)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- **europarlwebstream**: [Support new URL format](https://github.com/yt-dlp/yt-dlp/commit/800a43983e5fb719526ce4cb3956216085c63268) ([#9647](https://github.com/yt-dlp/yt-dlp/issues/9647)) by [seproDev](https://github.com/seproDev), [voidful](https://github.com/voidful)
|
||||||
|
- **facebook**: [Fix DASH formats extraction](https://github.com/yt-dlp/yt-dlp/commit/e3b42d8b1b8bcfff7ba146c19fc3f6f6ba843cea) ([#9734](https://github.com/yt-dlp/yt-dlp/issues/9734)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **godresource**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/65e709d23530959075816e966c42179ad46e8e3b) ([#9629](https://github.com/yt-dlp/yt-dlp/issues/9629)) by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
- **googledrive**: [Fix formats extraction](https://github.com/yt-dlp/yt-dlp/commit/85ec2a337ac325cf6427cbafd56f0a034c1a5218) ([#9908](https://github.com/yt-dlp/yt-dlp/issues/9908)) by [WyohKnott](https://github.com/WyohKnott)
|
||||||
|
- **hearthisat**: [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/5bbfdb7c999b22f1aeca0c3489c167d6eb73013b) ([#9949](https://github.com/yt-dlp/yt-dlp/issues/9949)) by [bohwaz](https://github.com/bohwaz), [seproDev](https://github.com/seproDev)
|
||||||
|
- **hytale**: [Use `CloudflareStreamIE` explicitly](https://github.com/yt-dlp/yt-dlp/commit/31b417e1d1ccc67d5c027bf8878f483dc34cb118) ([#9672](https://github.com/yt-dlp/yt-dlp/issues/9672)) by [llamasblade](https://github.com/llamasblade)
|
||||||
|
- **instagram**: [Support `/reels/` URLs](https://github.com/yt-dlp/yt-dlp/commit/06cb0638392b607b47d3c2ac48eb2ebecb0f060d) ([#9539](https://github.com/yt-dlp/yt-dlp/issues/9539)) by [amir16yp](https://github.com/amir16yp)
|
||||||
|
- **jiocinema**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/1463945ae5fb05986a0bd1aa02e41d1a08d93a02) ([#10026](https://github.com/yt-dlp/yt-dlp/issues/10026)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **jiosaavn**: [Extract via API and fix playlists](https://github.com/yt-dlp/yt-dlp/commit/0c21c53885cf03f4040467ae8c44d7ff51016116) ([#9656](https://github.com/yt-dlp/yt-dlp/issues/9656)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **lci**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/5a2eebc76770fca91ffabeff658d560f716fec80) ([#10025](https://github.com/yt-dlp/yt-dlp/issues/10025)) by [ocococococ](https://github.com/ocococococ)
|
||||||
|
- **mixch**: [Extract comments](https://github.com/yt-dlp/yt-dlp/commit/b38018b781b062d5169d104ab430489aef8e7f1e) ([#9860](https://github.com/yt-dlp/yt-dlp/issues/9860)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- **moviepilot**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/296df0da1d38a44d34c99b60a18066c301774537) ([#9366](https://github.com/yt-dlp/yt-dlp/issues/9366)) by [panatexxa](https://github.com/panatexxa)
|
||||||
|
- **netease**: program: [Improve `--no-playlist` message](https://github.com/yt-dlp/yt-dlp/commit/73f12119b52d98281804b0c072b2ed6aa841ec88) ([#9488](https://github.com/yt-dlp/yt-dlp/issues/9488)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- **nfb**: [Fix extractors](https://github.com/yt-dlp/yt-dlp/commit/0a1a8e3005f66c44bf67633dccd4df19c3fccd1a) ([#9650](https://github.com/yt-dlp/yt-dlp/issues/9650)) by [rrgomes](https://github.com/rrgomes)
|
||||||
|
- **ntslive**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/be7db1a5a8c483726c511c30ea4689cbb8b27962) ([#9641](https://github.com/yt-dlp/yt-dlp/issues/9641)) by [lostfictions](https://github.com/lostfictions)
|
||||||
|
- **orf**: on: [Improve extraction](https://github.com/yt-dlp/yt-dlp/commit/0dd53faeca2ba0ce138e4092d07b5f2dbf2422f9) ([#9677](https://github.com/yt-dlp/yt-dlp/issues/9677)) by [TuxCoder](https://github.com/TuxCoder)
|
||||||
|
- **orftvthek**: [Remove extractor](https://github.com/yt-dlp/yt-dlp/commit/3779f2a307ba3ef1d28e107cdd71b221dfb4eb36) ([#10011](https://github.com/yt-dlp/yt-dlp/issues/10011)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **patreon**
|
||||||
|
- [Extract multiple embeds](https://github.com/yt-dlp/yt-dlp/commit/036e0d92c6052465673d459678322ea03e61483d) ([#9850](https://github.com/yt-dlp/yt-dlp/issues/9850)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix Vimeo embed extraction](https://github.com/yt-dlp/yt-dlp/commit/c9ce57d9bf51541da2381d99bc096a9d0ddf1f27) ([#9712](https://github.com/yt-dlp/yt-dlp/issues/9712)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **piapro**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/3ba8de62d61d782256f5c1e9939a0762039657de) ([#9311](https://github.com/yt-dlp/yt-dlp/issues/9311)) by [FinnRG](https://github.com/FinnRG), [seproDev](https://github.com/seproDev)
|
||||||
|
- **pornhub**: [Fix login by email address](https://github.com/yt-dlp/yt-dlp/commit/518c1afc1592cae3e4eb39dc646b5bc059333112) ([#9914](https://github.com/yt-dlp/yt-dlp/issues/9914)) by [feederbox826](https://github.com/feederbox826)
|
||||||
|
- **qub**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/6b54cccdcb892bca3e55993480d8b86f1c7e6da6) ([#7019](https://github.com/yt-dlp/yt-dlp/issues/7019)) by [alexhuot1](https://github.com/alexhuot1), [dirkf](https://github.com/dirkf)
|
||||||
|
- **reddit**: [Fix subtitles extraction](https://github.com/yt-dlp/yt-dlp/commit/82f4f4444e26daf35b7302c406fe2312f78f619e) ([#10006](https://github.com/yt-dlp/yt-dlp/issues/10006)) by [kclauhk](https://github.com/kclauhk)
|
||||||
|
- **soundcloud**
|
||||||
|
- [Add `formats` extractor-arg](https://github.com/yt-dlp/yt-dlp/commit/beaf832c7a9d57833f365ce18f6115b88071b296) ([#10004](https://github.com/yt-dlp/yt-dlp/issues/10004)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Extract `genres`](https://github.com/yt-dlp/yt-dlp/commit/231c2eacc41b06b65c63edf94c0d04768a5da607) ([#9821](https://github.com/yt-dlp/yt-dlp/issues/9821)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **taptap**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/63b569bc5e7d461753637a20ad84a575adee4c0a) ([#9776](https://github.com/yt-dlp/yt-dlp/issues/9776)) by [c-basalt](https://github.com/c-basalt)
|
||||||
|
- **tele5**: [Overhaul extractor](https://github.com/yt-dlp/yt-dlp/commit/c92e4e625e9e6bbbbf8e3b20c3e7ebe57c16072d) ([#10024](https://github.com/yt-dlp/yt-dlp/issues/10024)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **theatercomplextown**: [Fix extractors](https://github.com/yt-dlp/yt-dlp/commit/8056a3026ed6ec6a6d0ed56fdd7ebcd16e928341) ([#9754](https://github.com/yt-dlp/yt-dlp/issues/9754)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **tiktok**
|
||||||
|
- [Add `device_id` extractor-arg](https://github.com/yt-dlp/yt-dlp/commit/3584b8390bd21c0393a3079eeee71aed56a1c1d8) ([#9951](https://github.com/yt-dlp/yt-dlp/issues/9951)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Extract all web formats](https://github.com/yt-dlp/yt-dlp/commit/4ccd73fea0f6f4be343e1ec7f22dd03799addcf8) ([#9960](https://github.com/yt-dlp/yt-dlp/issues/9960)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Extract via mobile API only if extractor-arg is passed](https://github.com/yt-dlp/yt-dlp/commit/41ba4a808b597a3afed78c89675a30deb6844450) ([#9938](https://github.com/yt-dlp/yt-dlp/issues/9938)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix subtitles extraction](https://github.com/yt-dlp/yt-dlp/commit/eef1e9f44ff14c5e65b759bb1eafa3946cdaf719) ([#9961](https://github.com/yt-dlp/yt-dlp/issues/9961)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- collection: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/119d41f27061d220d276a2d38cfc8d873437452a) ([#9986](https://github.com/yt-dlp/yt-dlp/issues/9986)) by [bashonly](https://github.com/bashonly), [imanoreotwe](https://github.com/imanoreotwe)
|
||||||
|
- user: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/347f13dd9bccc2b4db3ea25689410d45d8370ed4) ([#9661](https://github.com/yt-dlp/yt-dlp/issues/9661)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **tv5monde**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/6db96268c521e945d42649607db1574f5d92e082) ([#9143](https://github.com/yt-dlp/yt-dlp/issues/9143)) by [alard](https://github.com/alard), [seproDev](https://github.com/seproDev)
|
||||||
|
- **twitter**
|
||||||
|
- [Fix auth for x.com migration](https://github.com/yt-dlp/yt-dlp/commit/3e35aa32c74bc108375be8c8b6b3bfc90dfff1b4) ([#9952](https://github.com/yt-dlp/yt-dlp/issues/9952)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Support x.com URLs](https://github.com/yt-dlp/yt-dlp/commit/4813173e4544f125d6f2afc31e600727d761b8dd) ([#9926](https://github.com/yt-dlp/yt-dlp/issues/9926)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **vk**: [Improve format extraction](https://github.com/yt-dlp/yt-dlp/commit/df5c9e733aaba703cf285c0372b6d61629330c82) ([#9885](https://github.com/yt-dlp/yt-dlp/issues/9885)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **wrestleuniverse**: [Avoid partial stream formats](https://github.com/yt-dlp/yt-dlp/commit/c4853655cb9a793129280806af643de43c48f4d5) ([#9800](https://github.com/yt-dlp/yt-dlp/issues/9800)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **xiaohongshu**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/a2e9031605d87c469be9ce98dbbdf4960b727338) ([#9646](https://github.com/yt-dlp/yt-dlp/issues/9646)) by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
- **xvideos**: quickies: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/b207d26f83fb8ab0ce56df74dff43ff583a3264f) ([#9834](https://github.com/yt-dlp/yt-dlp/issues/9834)) by [JakeFinley96](https://github.com/JakeFinley96)
|
||||||
|
- **youporn**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/351368cb9a6731b886a58f5a10fd6b302bbe47be) ([#8827](https://github.com/yt-dlp/yt-dlp/issues/8827)) by [The-MAGI](https://github.com/The-MAGI)
|
||||||
|
- **youtube**
|
||||||
|
- [Add `mediaconnect` client](https://github.com/yt-dlp/yt-dlp/commit/cf212d0a331aba05c32117573f760cdf3af8c62f) ([#9546](https://github.com/yt-dlp/yt-dlp/issues/9546)) by [clienthax](https://github.com/clienthax)
|
||||||
|
- [Extract upload timestamp if available](https://github.com/yt-dlp/yt-dlp/commit/96a134dea6397a5f2131947c427aac52c8b4e677) ([#9856](https://github.com/yt-dlp/yt-dlp/issues/9856)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- [Fix comments extraction](https://github.com/yt-dlp/yt-dlp/commit/8e15177b4113c355989881e4e030f695a9b59c3a) ([#9775](https://github.com/yt-dlp/yt-dlp/issues/9775)) by [bbilly1](https://github.com/bbilly1), [jakeogh](https://github.com/jakeogh), [minamotorin](https://github.com/minamotorin), [shoxie007](https://github.com/shoxie007)
|
||||||
|
- [Remove `android` from default clients](https://github.com/yt-dlp/yt-dlp/commit/12d8ea8246fa901de302ff5cc748caddadc82f41) ([#9553](https://github.com/yt-dlp/yt-dlp/issues/9553)) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- **zenyandex**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/c4b87dd885ee5391e5f481e7c8bd550a7c543623) ([#9813](https://github.com/yt-dlp/yt-dlp/issues/9813)) by [src-tinkerer](https://github.com/src-tinkerer)
|
||||||
|
|
||||||
|
#### Networking changes
|
||||||
|
- [Add `extensions` attribute to `Response`](https://github.com/yt-dlp/yt-dlp/commit/bec9a59e8ec82c18e3bf9268eaa436793dd52e35) ([#9756](https://github.com/yt-dlp/yt-dlp/issues/9756)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **Request Handler**
|
||||||
|
- requests
|
||||||
|
- [Patch support for `requests` 2.32.2+](https://github.com/yt-dlp/yt-dlp/commit/3f7999533ebe41c2a579d91b4e4cb211cfcd3bc0) ([#9992](https://github.com/yt-dlp/yt-dlp/issues/9992)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Update to `requests` 2.32.0](https://github.com/yt-dlp/yt-dlp/commit/c36513f1be2ef3d3cec864accbffda1afaa06ffd) ([#9980](https://github.com/yt-dlp/yt-dlp/issues/9980)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
|
||||||
|
#### Misc. changes
|
||||||
|
- [Add `hatch`, `ruff`, `pre-commit` and improve dev docs](https://github.com/yt-dlp/yt-dlp/commit/e897bd8292a41999cf51dba91b390db5643c72db) ([#7409](https://github.com/yt-dlp/yt-dlp/issues/7409)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K), [seproDev](https://github.com/seproDev)
|
||||||
|
- **build**
|
||||||
|
- [Migrate `linux_exe` to static musl builds](https://github.com/yt-dlp/yt-dlp/commit/ac817bc83efd939dca3e40c4b527d0ccfc77172b) ([#9811](https://github.com/yt-dlp/yt-dlp/issues/9811)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Normalize `curl_cffi` group to `curl-cffi`](https://github.com/yt-dlp/yt-dlp/commit/02483bea1c4dbe1bace8ca4d19700104fbb8a00f) ([#9698](https://github.com/yt-dlp/yt-dlp/issues/9698)) by [bashonly](https://github.com/bashonly) (With fixes in [89f535e](https://github.com/yt-dlp/yt-dlp/commit/89f535e2656964b4061c25a7739d4d6ba0a30568))
|
||||||
|
- [Run `macos_legacy` job on `macos-12`](https://github.com/yt-dlp/yt-dlp/commit/1a366403d9c26b992faa77e00f4d02ead57559e3) ([#9804](https://github.com/yt-dlp/yt-dlp/issues/9804)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [`macos` job requires `setuptools<70`](https://github.com/yt-dlp/yt-dlp/commit/78c57cc0e0998b8ed90e4306f410aa4be4115cd7) ([#9993](https://github.com/yt-dlp/yt-dlp/issues/9993)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **cleanup**
|
||||||
|
- [Remove questionable extractors](https://github.com/yt-dlp/yt-dlp/commit/01395a34345d1c6ba1b73ca92f94dd200dc45341) ([#9911](https://github.com/yt-dlp/yt-dlp/issues/9911)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- Miscellaneous: [5c019f6](https://github.com/yt-dlp/yt-dlp/commit/5c019f6328ad40d66561eac3c4de0b3cd070d0f6), [ae2af11](https://github.com/yt-dlp/yt-dlp/commit/ae2af1104f80caf2f47544763a33db2c17a3e1de) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K), [seproDev](https://github.com/seproDev)
|
||||||
|
- **test**
|
||||||
|
- [Add HTTP proxy tests](https://github.com/yt-dlp/yt-dlp/commit/3c7a287e281d9f9a353dce8902ff78a84c24a040) ([#9578](https://github.com/yt-dlp/yt-dlp/issues/9578)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- [Fix connect timeout test](https://github.com/yt-dlp/yt-dlp/commit/53b4d44f55cca66ac33dab092ef2a30b1164b684) ([#9906](https://github.com/yt-dlp/yt-dlp/issues/9906)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
|
||||||
### 2024.04.09
|
### 2024.04.09
|
||||||
|
|
||||||
#### Important changes
|
#### Important changes
|
||||||
|
|
11
Makefile
11
Makefile
|
@ -27,7 +27,7 @@ clean-dist:
|
||||||
yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS
|
yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS
|
||||||
clean-cache:
|
clean-cache:
|
||||||
find . \( \
|
find . \( \
|
||||||
-type d -name .pytest_cache -o -type d -name __pycache__ -o -name "*.pyc" -o -name "*.class" \
|
-type d -name ".*_cache" -o -type d -name __pycache__ -o -name "*.pyc" -o -name "*.class" \
|
||||||
\) -prune -exec rm -rf {} \;
|
\) -prune -exec rm -rf {} \;
|
||||||
|
|
||||||
completion-bash: completions/bash/yt-dlp
|
completion-bash: completions/bash/yt-dlp
|
||||||
|
@ -70,14 +70,15 @@ uninstall:
|
||||||
rm -f $(DESTDIR)$(SHAREDIR)/fish/vendor_completions.d/yt-dlp.fish
|
rm -f $(DESTDIR)$(SHAREDIR)/fish/vendor_completions.d/yt-dlp.fish
|
||||||
|
|
||||||
codetest:
|
codetest:
|
||||||
flake8 .
|
ruff check .
|
||||||
|
autopep8 --diff .
|
||||||
|
|
||||||
test:
|
test:
|
||||||
$(PYTHON) -m pytest
|
$(PYTHON) -m pytest -Werror
|
||||||
$(MAKE) codetest
|
$(MAKE) codetest
|
||||||
|
|
||||||
offlinetest: codetest
|
offlinetest: codetest
|
||||||
$(PYTHON) -m pytest -k "not download"
|
$(PYTHON) -m pytest -Werror -m "not download"
|
||||||
|
|
||||||
CODE_FOLDERS_CMD = find yt_dlp -type f -name '__init__.py' | sed 's,/__init__.py,,' | grep -v '/__' | sort
|
CODE_FOLDERS_CMD = find yt_dlp -type f -name '__init__.py' | sed 's,/__init__.py,,' | grep -v '/__' | sort
|
||||||
CODE_FOLDERS != $(CODE_FOLDERS_CMD)
|
CODE_FOLDERS != $(CODE_FOLDERS_CMD)
|
||||||
|
@ -151,7 +152,7 @@ yt-dlp.tar.gz: all
|
||||||
--exclude '*.pyo' \
|
--exclude '*.pyo' \
|
||||||
--exclude '*~' \
|
--exclude '*~' \
|
||||||
--exclude '__pycache__' \
|
--exclude '__pycache__' \
|
||||||
--exclude '.pytest_cache' \
|
--exclude '.*_cache' \
|
||||||
--exclude '.git' \
|
--exclude '.git' \
|
||||||
-- \
|
-- \
|
||||||
README.md supportedsites.md Changelog.md LICENSE \
|
README.md supportedsites.md Changelog.md LICENSE \
|
||||||
|
|
49
README.md
49
README.md
|
@ -108,7 +108,6 @@ #### Alternatives
|
||||||
[yt-dlp_x86.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_x86.exe)|Windows (Win7 SP1+) standalone x86 (32-bit) binary
|
[yt-dlp_x86.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_x86.exe)|Windows (Win7 SP1+) standalone x86 (32-bit) binary
|
||||||
[yt-dlp_min.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_min.exe)|Windows (Win7 SP1+) standalone x64 binary built with `py2exe`<br/> ([Not recommended](#standalone-py2exe-builds-windows))
|
[yt-dlp_min.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_min.exe)|Windows (Win7 SP1+) standalone x64 binary built with `py2exe`<br/> ([Not recommended](#standalone-py2exe-builds-windows))
|
||||||
[yt-dlp_linux](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux)|Linux standalone x64 binary
|
[yt-dlp_linux](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux)|Linux standalone x64 binary
|
||||||
[yt-dlp_linux.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux.zip)|Unpackaged Linux executable (no auto-update)
|
|
||||||
[yt-dlp_linux_armv7l](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux_armv7l)|Linux standalone armv7l (32-bit) binary
|
[yt-dlp_linux_armv7l](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux_armv7l)|Linux standalone armv7l (32-bit) binary
|
||||||
[yt-dlp_linux_aarch64](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux_aarch64)|Linux standalone aarch64 (64-bit) binary
|
[yt-dlp_linux_aarch64](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux_aarch64)|Linux standalone aarch64 (64-bit) binary
|
||||||
[yt-dlp_win.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_win.zip)|Unpackaged Windows executable (no auto-update)
|
[yt-dlp_win.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_win.zip)|Unpackaged Windows executable (no auto-update)
|
||||||
|
@ -170,7 +169,7 @@ # To update to nightly from stable executable/binary:
|
||||||
yt-dlp --update-to nightly
|
yt-dlp --update-to nightly
|
||||||
|
|
||||||
# To install nightly with pip:
|
# To install nightly with pip:
|
||||||
python3 -m pip install -U --pre yt-dlp[default]
|
python3 -m pip install -U --pre "yt-dlp[default]"
|
||||||
```
|
```
|
||||||
|
|
||||||
## DEPENDENCIES
|
## DEPENDENCIES
|
||||||
|
@ -202,7 +201,7 @@ #### Impersonation
|
||||||
The following provide support for impersonating browser requests. This may be required for some sites that employ TLS fingerprinting.
|
The following provide support for impersonating browser requests. This may be required for some sites that employ TLS fingerprinting.
|
||||||
|
|
||||||
* [**curl_cffi**](https://github.com/yifeikong/curl_cffi) (recommended) - Python binding for [curl-impersonate](https://github.com/lwthiker/curl-impersonate). Provides impersonation targets for Chrome, Edge and Safari. Licensed under [MIT](https://github.com/yifeikong/curl_cffi/blob/main/LICENSE)
|
* [**curl_cffi**](https://github.com/yifeikong/curl_cffi) (recommended) - Python binding for [curl-impersonate](https://github.com/lwthiker/curl-impersonate). Provides impersonation targets for Chrome, Edge and Safari. Licensed under [MIT](https://github.com/yifeikong/curl_cffi/blob/main/LICENSE)
|
||||||
* Can be installed with the `curl-cffi` group, e.g. `pip install yt-dlp[default,curl-cffi]`
|
* Can be installed with the `curl-cffi` group, e.g. `pip install "yt-dlp[default,curl-cffi]"`
|
||||||
* Currently only included in `yt-dlp.exe` and `yt-dlp_macos` builds
|
* Currently only included in `yt-dlp.exe` and `yt-dlp_macos` builds
|
||||||
|
|
||||||
|
|
||||||
|
@ -263,7 +262,7 @@ ### Platform-independent Binary (UNIX)
|
||||||
|
|
||||||
### Standalone Py2Exe Builds (Windows)
|
### Standalone Py2Exe Builds (Windows)
|
||||||
|
|
||||||
While we provide the option to build with [py2exe](https://www.py2exe.org), it is recommended to build [using PyInstaller](#standalone-pyinstaller-builds) instead since the py2exe builds **cannot contain `pycryptodomex`/`certifi` and needs VC++14** on the target computer to run.
|
While we provide the option to build with [py2exe](https://www.py2exe.org), it is recommended to build [using PyInstaller](#standalone-pyinstaller-builds) instead since the py2exe builds **cannot contain `pycryptodomex`/`certifi`/`requests` and need VC++14** on the target computer to run.
|
||||||
|
|
||||||
If you wish to build it anyway, install Python (if it is not already installed) and you can run the following commands:
|
If you wish to build it anyway, install Python (if it is not already installed) and you can run the following commands:
|
||||||
|
|
||||||
|
@ -402,6 +401,9 @@ ## Network Options:
|
||||||
--impersonate CLIENT[:OS] Client to impersonate for requests. E.g.
|
--impersonate CLIENT[:OS] Client to impersonate for requests. E.g.
|
||||||
chrome, chrome-110, chrome:windows-10. Pass
|
chrome, chrome-110, chrome:windows-10. Pass
|
||||||
--impersonate="" to impersonate any client.
|
--impersonate="" to impersonate any client.
|
||||||
|
Note that forcing impersonation for all
|
||||||
|
requests may have a detrimental impact on
|
||||||
|
download speed and stability
|
||||||
--list-impersonate-targets List available clients to impersonate.
|
--list-impersonate-targets List available clients to impersonate.
|
||||||
-4, --force-ipv4 Make all connections via IPv4
|
-4, --force-ipv4 Make all connections via IPv4
|
||||||
-6, --force-ipv6 Make all connections via IPv6
|
-6, --force-ipv6 Make all connections via IPv6
|
||||||
|
@ -666,16 +668,17 @@ ## Filesystem Options:
|
||||||
The name of the browser to load cookies
|
The name of the browser to load cookies
|
||||||
from. Currently supported browsers are:
|
from. Currently supported browsers are:
|
||||||
brave, chrome, chromium, edge, firefox,
|
brave, chrome, chromium, edge, firefox,
|
||||||
opera, safari, vivaldi. Optionally, the
|
opera, safari, vivaldi, whale. Optionally,
|
||||||
KEYRING used for decrypting Chromium cookies
|
the KEYRING used for decrypting Chromium
|
||||||
on Linux, the name/path of the PROFILE to
|
cookies on Linux, the name/path of the
|
||||||
load cookies from, and the CONTAINER name
|
PROFILE to load cookies from, and the
|
||||||
(if Firefox) ("none" for no container) can
|
CONTAINER name (if Firefox) ("none" for no
|
||||||
be given with their respective seperators.
|
container) can be given with their
|
||||||
By default, all containers of the most
|
respective seperators. By default, all
|
||||||
recently accessed profile are used.
|
containers of the most recently accessed
|
||||||
Currently supported keyrings are: basictext,
|
profile are used. Currently supported
|
||||||
gnomekeyring, kwallet, kwallet5, kwallet6
|
keyrings are: basictext, gnomekeyring,
|
||||||
|
kwallet, kwallet5, kwallet6
|
||||||
--no-cookies-from-browser Do not load cookies from browser (default)
|
--no-cookies-from-browser Do not load cookies from browser (default)
|
||||||
--cache-dir DIR Location in the filesystem where yt-dlp can
|
--cache-dir DIR Location in the filesystem where yt-dlp can
|
||||||
store some downloaded information (such as
|
store some downloaded information (such as
|
||||||
|
@ -1751,7 +1754,7 @@ # Replace all spaces and "_" in title and uploader with a `-`
|
||||||
|
|
||||||
# EXTRACTOR ARGUMENTS
|
# EXTRACTOR ARGUMENTS
|
||||||
|
|
||||||
Some extractors accept additional arguments which can be passed using `--extractor-args KEY:ARGS`. `ARGS` is a `;` (semicolon) separated string of `ARG=VAL1,VAL2`. E.g. `--extractor-args "youtube:player-client=android_embedded,web;include_live_dash" --extractor-args "funimation:version=uncut"`
|
Some extractors accept additional arguments which can be passed using `--extractor-args KEY:ARGS`. `ARGS` is a `;` (semicolon) separated string of `ARG=VAL1,VAL2`. E.g. `--extractor-args "youtube:player-client=android_embedded,web;formats=incomplete" --extractor-args "funimation:version=uncut"`
|
||||||
|
|
||||||
Note: In CLI, `ARG` can use `-` instead of `_`; e.g. `youtube:player-client"` becomes `youtube:player_client"`
|
Note: In CLI, `ARG` can use `-` instead of `_`; e.g. `youtube:player-client"` becomes `youtube:player_client"`
|
||||||
|
|
||||||
|
@ -1760,7 +1763,7 @@ # EXTRACTOR ARGUMENTS
|
||||||
#### youtube
|
#### youtube
|
||||||
* `lang`: Prefer translated metadata (`title`, `description` etc) of this language code (case-sensitive). By default, the video primary language metadata is preferred, with a fallback to `en` translated. See [youtube.py](https://github.com/yt-dlp/yt-dlp/blob/c26f9b991a0681fd3ea548d535919cec1fbbd430/yt_dlp/extractor/youtube.py#L381-L390) for list of supported content language codes
|
* `lang`: Prefer translated metadata (`title`, `description` etc) of this language code (case-sensitive). By default, the video primary language metadata is preferred, with a fallback to `en` translated. See [youtube.py](https://github.com/yt-dlp/yt-dlp/blob/c26f9b991a0681fd3ea548d535919cec1fbbd430/yt_dlp/extractor/youtube.py#L381-L390) for list of supported content language codes
|
||||||
* `skip`: One or more of `hls`, `dash` or `translated_subs` to skip extraction of the m3u8 manifests, dash manifests and [auto-translated subtitles](https://github.com/yt-dlp/yt-dlp/issues/4090#issuecomment-1158102032) respectively
|
* `skip`: One or more of `hls`, `dash` or `translated_subs` to skip extraction of the m3u8 manifests, dash manifests and [auto-translated subtitles](https://github.com/yt-dlp/yt-dlp/issues/4090#issuecomment-1158102032) respectively
|
||||||
* `player_client`: Clients to extract video data from. The main clients are `web`, `android` and `ios` with variants `_music`, `_embedded`, `_embedscreen`, `_creator` (e.g. `web_embedded`); and `mweb`, `mweb_embedscreen` and `tv_embedded` (agegate bypass) with no variants. By default, `ios,android,web` is used, but `tv_embedded` and `creator` variants are added as required for age-gated videos. Similarly, the music variants are added for `music.youtube.com` urls. You can use `all` to use all the clients, and `default` for the default clients.
|
* `player_client`: Clients to extract video data from. The main clients are `web`, `ios` and `android`, with variants `_music`, `_embedded`, `_embedscreen`, `_creator` (e.g. `web_embedded`); and `mweb`, `mweb_embedscreen` and `tv_embedded` (agegate bypass) with no variants. By default, `ios,web` is used, but `tv_embedded` and `creator` variants are added as required for age-gated videos. Similarly, the music variants are added for `music.youtube.com` urls. The `android` clients will always be given lowest priority since their formats are broken. You can use `all` to use all the clients, and `default` for the default clients.
|
||||||
* `player_skip`: Skip some network requests that are generally needed for robust extraction. One or more of `configs` (skip client configs), `webpage` (skip initial webpage), `js` (skip js player). While these options can help reduce the number of requests needed or avoid some rate-limiting, they could cause some issues. See [#860](https://github.com/yt-dlp/yt-dlp/pull/860) for more details
|
* `player_skip`: Skip some network requests that are generally needed for robust extraction. One or more of `configs` (skip client configs), `webpage` (skip initial webpage), `js` (skip js player). While these options can help reduce the number of requests needed or avoid some rate-limiting, they could cause some issues. See [#860](https://github.com/yt-dlp/yt-dlp/pull/860) for more details
|
||||||
* `player_params`: YouTube player parameters to use for player requests. Will overwrite any default ones set by yt-dlp.
|
* `player_params`: YouTube player parameters to use for player requests. Will overwrite any default ones set by yt-dlp.
|
||||||
* `comment_sort`: `top` or `new` (default) - choose comment sorting mode (on YouTube's side)
|
* `comment_sort`: `top` or `new` (default) - choose comment sorting mode (on YouTube's side)
|
||||||
|
@ -1813,8 +1816,9 @@ #### tiktok
|
||||||
* `app_name`: Default app name to use with mobile API calls, e.g. `trill`
|
* `app_name`: Default app name to use with mobile API calls, e.g. `trill`
|
||||||
* `app_version`: Default app version to use with mobile API calls - should be set along with `manifest_app_version`, e.g. `34.1.2`
|
* `app_version`: Default app version to use with mobile API calls - should be set along with `manifest_app_version`, e.g. `34.1.2`
|
||||||
* `manifest_app_version`: Default numeric app version to use with mobile API calls, e.g. `2023401020`
|
* `manifest_app_version`: Default numeric app version to use with mobile API calls, e.g. `2023401020`
|
||||||
* `aid`: Default app ID to use with API calls, e.g. `1180`
|
* `aid`: Default app ID to use with mobile API calls, e.g. `1180`
|
||||||
* `app_info`: One or more app info strings in the format of `<iid>/[app_name]/[app_version]/[manifest_app_version]/[aid]`, where `iid` is the unique app install ID. `iid` is the only required value; all other values and their `/` separators can be omitted, e.g. `tiktok:app_info=1234567890123456789` or `tiktok:app_info=123,456/trill///1180,789//34.0.1/340001`
|
* `app_info`: Enable mobile API extraction with one or more app info strings in the format of `<iid>/[app_name]/[app_version]/[manifest_app_version]/[aid]`, where `iid` is the unique app install ID. `iid` is the only required value; all other values and their `/` separators can be omitted, e.g. `tiktok:app_info=1234567890123456789` or `tiktok:app_info=123,456/trill///1180,789//34.0.1/340001`
|
||||||
|
* `device_id`: Enable mobile API extraction with a genuine device ID to be used with mobile API calls. Default is a random 19-digit string
|
||||||
|
|
||||||
#### rokfinchannel
|
#### rokfinchannel
|
||||||
* `tab`: Which tab to download - one of `new`, `top`, `videos`, `podcasts`, `streams`, `stacks`
|
* `tab`: Which tab to download - one of `new`, `top`, `videos`, `podcasts`, `streams`, `stacks`
|
||||||
|
@ -1834,12 +1838,18 @@ #### nhkradirulive (NHK らじる★らじる LIVE)
|
||||||
#### nflplusreplay
|
#### nflplusreplay
|
||||||
* `type`: Type(s) of game replays to extract. Valid types are: `full_game`, `full_game_spanish`, `condensed_game` and `all_22`. You can use `all` to extract all available replay types, which is the default
|
* `type`: Type(s) of game replays to extract. Valid types are: `full_game`, `full_game_spanish`, `condensed_game` and `all_22`. You can use `all` to extract all available replay types, which is the default
|
||||||
|
|
||||||
|
#### jiocinema
|
||||||
|
* `refresh_token`: The `refreshToken` UUID from browser local storage can be passed to extend the life of your login session when logging in with `token` as username and the `accessToken` from browser local storage as password
|
||||||
|
|
||||||
#### jiosaavn
|
#### jiosaavn
|
||||||
* `bitrate`: Audio bitrates to request. One or more of `16`, `32`, `64`, `128`, `320`. Default is `128,320`
|
* `bitrate`: Audio bitrates to request. One or more of `16`, `32`, `64`, `128`, `320`. Default is `128,320`
|
||||||
|
|
||||||
#### afreecatvlive
|
#### afreecatvlive
|
||||||
* `cdn`: One or more CDN IDs to use with the API call for stream URLs, e.g. `gcp_cdn`, `gs_cdn_pc_app`, `gs_cdn_mobile_web`, `gs_cdn_pc_web`
|
* `cdn`: One or more CDN IDs to use with the API call for stream URLs, e.g. `gcp_cdn`, `gs_cdn_pc_app`, `gs_cdn_mobile_web`, `gs_cdn_pc_web`
|
||||||
|
|
||||||
|
#### soundcloud
|
||||||
|
* `formats`: Formats to request from the API. Requested values should be in the format of `{protocol}_{extension}` (omitting the bitrate), e.g. `hls_opus,http_aac`. The `*` character functions as a wildcard, e.g. `*_mp3`, and can passed by itself to request all formats. Known protocols include `http`, `hls` and `hls-aes`; known extensions include `aac`, `opus` and `mp3`. Original `download` formats are always extracted. Default is `http_aac,hls_aac,http_opus,hls_opus,http_mp3,hls_mp3`
|
||||||
|
|
||||||
**Note**: These options may be changed/removed in the future without concern for backward compatibility
|
**Note**: These options may be changed/removed in the future without concern for backward compatibility
|
||||||
|
|
||||||
<!-- MANPAGE: MOVE "INSTALLATION" SECTION HERE -->
|
<!-- MANPAGE: MOVE "INSTALLATION" SECTION HERE -->
|
||||||
|
@ -2116,7 +2126,7 @@ # CHANGES FROM YOUTUBE-DL
|
||||||
|
|
||||||
### New features
|
### New features
|
||||||
|
|
||||||
* Forked from [**yt-dlc@f9401f2**](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee) and merged with [**youtube-dl@be008e6**](https://github.com/ytdl-org/youtube-dl/commit/be008e657d79832642e2158557c899249c9e31cd) ([exceptions](https://github.com/yt-dlp/yt-dlp/issues/21))
|
* Forked from [**yt-dlc@f9401f2**](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee) and merged with [**youtube-dl@a08f2b7**](https://github.com/ytdl-org/youtube-dl/commit/a08f2b7e4567cdc50c0614ee0a4ffdff49b8b6e6) ([exceptions](https://github.com/yt-dlp/yt-dlp/issues/21))
|
||||||
|
|
||||||
* **[SponsorBlock Integration](#sponsorblock-options)**: You can mark/remove sponsor sections in YouTube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API
|
* **[SponsorBlock Integration](#sponsorblock-options)**: You can mark/remove sponsor sections in YouTube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API
|
||||||
|
|
||||||
|
@ -2326,6 +2336,7 @@ #### No longer supported
|
||||||
--write-annotations No supported site has annotations now
|
--write-annotations No supported site has annotations now
|
||||||
--no-write-annotations Default
|
--no-write-annotations Default
|
||||||
--compat-options seperate-video-versions No longer needed
|
--compat-options seperate-video-versions No longer needed
|
||||||
|
--compat-options no-youtube-prefer-utc-upload-date No longer supported
|
||||||
|
|
||||||
#### Removed
|
#### Removed
|
||||||
These options were deprecated since 2014 and have now been entirely removed
|
These options were deprecated since 2014 and have now been entirely removed
|
||||||
|
|
|
@ -42,9 +42,9 @@ def main():
|
||||||
# py2exe cannot import Crypto
|
# py2exe cannot import Crypto
|
||||||
'Crypto',
|
'Crypto',
|
||||||
'Cryptodome',
|
'Cryptodome',
|
||||||
# py2exe appears to confuse this with our socks library.
|
# requests >=2.32.0 breaks py2exe builds due to certifi dependency
|
||||||
# We don't use pysocks and urllib3.contrib.socks would fail to import if tried.
|
'requests',
|
||||||
'urllib3.contrib.socks'
|
'urllib3',
|
||||||
],
|
],
|
||||||
'dll_excludes': ['w9xpopen.exe', 'crypt32.dll'],
|
'dll_excludes': ['w9xpopen.exe', 'crypt32.dll'],
|
||||||
# Modules that are only imported dynamically must be added here
|
# Modules that are only imported dynamically must be added here
|
||||||
|
|
|
@ -68,7 +68,7 @@ def exe(onedir):
|
||||||
'dist/',
|
'dist/',
|
||||||
onedir and f'{name}/',
|
onedir and f'{name}/',
|
||||||
name,
|
name,
|
||||||
OS_NAME == 'win32' and '.exe'
|
OS_NAME == 'win32' and '.exe',
|
||||||
)))
|
)))
|
||||||
|
|
||||||
|
|
||||||
|
@ -113,7 +113,7 @@ def windows_set_version(exe, version):
|
||||||
),
|
),
|
||||||
kids=[
|
kids=[
|
||||||
StringFileInfo([StringTable('040904B0', [
|
StringFileInfo([StringTable('040904B0', [
|
||||||
StringStruct('Comments', 'yt-dlp%s Command Line Interface' % suffix),
|
StringStruct('Comments', f'yt-dlp{suffix} Command Line Interface'),
|
||||||
StringStruct('CompanyName', 'https://github.com/yt-dlp'),
|
StringStruct('CompanyName', 'https://github.com/yt-dlp'),
|
||||||
StringStruct('FileDescription', 'yt-dlp%s' % (MACHINE and f' ({MACHINE})')),
|
StringStruct('FileDescription', 'yt-dlp%s' % (MACHINE and f' ({MACHINE})')),
|
||||||
StringStruct('FileVersion', version),
|
StringStruct('FileVersion', version),
|
||||||
|
@ -123,8 +123,8 @@ def windows_set_version(exe, version):
|
||||||
StringStruct('ProductName', f'yt-dlp{suffix}'),
|
StringStruct('ProductName', f'yt-dlp{suffix}'),
|
||||||
StringStruct(
|
StringStruct(
|
||||||
'ProductVersion', f'{version}{suffix} on Python {platform.python_version()}'),
|
'ProductVersion', f'{version}{suffix} on Python {platform.python_version()}'),
|
||||||
])]), VarFileInfo([VarStruct('Translation', [0, 1200])])
|
])]), VarFileInfo([VarStruct('Translation', [0, 1200])]),
|
||||||
]
|
],
|
||||||
))
|
))
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,8 @@
|
||||||
|
|
||||||
import yt_dlp
|
import yt_dlp
|
||||||
|
|
||||||
BASH_COMPLETION_FILE = "completions/bash/yt-dlp"
|
BASH_COMPLETION_FILE = 'completions/bash/yt-dlp'
|
||||||
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
|
BASH_COMPLETION_TEMPLATE = 'devscripts/bash-completion.in'
|
||||||
|
|
||||||
|
|
||||||
def build_completion(opt_parser):
|
def build_completion(opt_parser):
|
||||||
|
@ -21,9 +21,9 @@ def build_completion(opt_parser):
|
||||||
opts_flag.append(option.get_opt_string())
|
opts_flag.append(option.get_opt_string())
|
||||||
with open(BASH_COMPLETION_TEMPLATE) as f:
|
with open(BASH_COMPLETION_TEMPLATE) as f:
|
||||||
template = f.read()
|
template = f.read()
|
||||||
with open(BASH_COMPLETION_FILE, "w") as f:
|
with open(BASH_COMPLETION_FILE, 'w') as f:
|
||||||
# just using the special char
|
# just using the special char
|
||||||
filled_template = template.replace("{{flags}}", " ".join(opts_flag))
|
filled_template = template.replace('{{flags}}', ' '.join(opts_flag))
|
||||||
f.write(filled_template)
|
f.write(filled_template)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -147,5 +147,27 @@
|
||||||
"action": "add",
|
"action": "add",
|
||||||
"when": "9590cc6b4768e190183d7d071a6c78170889116a",
|
"when": "9590cc6b4768e190183d7d071a6c78170889116a",
|
||||||
"short": "[priority] Security: [[CVE-2024-22423](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-22423)] [Prevent RCE when using `--exec` with `%q` on Windows](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-hjq6-52gw-2g7p)\n - The shell escape function now properly escapes `%`, `\\` and `\\n`.\n - `utils.Popen` has been patched accordingly."
|
"short": "[priority] Security: [[CVE-2024-22423](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-22423)] [Prevent RCE when using `--exec` with `%q` on Windows](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-hjq6-52gw-2g7p)\n - The shell escape function now properly escapes `%`, `\\` and `\\n`.\n - `utils.Popen` has been patched accordingly."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "change",
|
||||||
|
"when": "41ba4a808b597a3afed78c89675a30deb6844450",
|
||||||
|
"short": "[ie/tiktok] Extract via mobile API only if extractor-arg is passed (#9938)",
|
||||||
|
"authors": ["bashonly"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "remove",
|
||||||
|
"when": "6e36d17f404556f0e3a43f441c477a71a91877d9"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "change",
|
||||||
|
"when": "beaf832c7a9d57833f365ce18f6115b88071b296",
|
||||||
|
"short": "[ie/soundcloud] Add `formats` extractor-arg (#10004)",
|
||||||
|
"authors": ["bashonly", "Grub4K"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "change",
|
||||||
|
"when": "5c019f6328ad40d66561eac3c4de0b3cd070d0f6",
|
||||||
|
"short": "[cleanup] Misc (#9765)",
|
||||||
|
"authors": ["bashonly", "Grub4K", "seproDev"]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
@ -42,17 +42,25 @@ def parse_args():
|
||||||
def main():
|
def main():
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
project_table = parse_toml(read_file(args.input))['project']
|
project_table = parse_toml(read_file(args.input))['project']
|
||||||
|
recursive_pattern = re.compile(rf'{project_table["name"]}\[(?P<group_name>[\w-]+)\]')
|
||||||
optional_groups = project_table['optional-dependencies']
|
optional_groups = project_table['optional-dependencies']
|
||||||
excludes = args.exclude or []
|
excludes = args.exclude or []
|
||||||
|
|
||||||
|
def yield_deps(group):
|
||||||
|
for dep in group:
|
||||||
|
if mobj := recursive_pattern.fullmatch(dep):
|
||||||
|
yield from optional_groups.get(mobj.group('group_name'), [])
|
||||||
|
else:
|
||||||
|
yield dep
|
||||||
|
|
||||||
targets = []
|
targets = []
|
||||||
if not args.only_optional: # `-o` should exclude 'dependencies' and the 'default' group
|
if not args.only_optional: # `-o` should exclude 'dependencies' and the 'default' group
|
||||||
targets.extend(project_table['dependencies'])
|
targets.extend(project_table['dependencies'])
|
||||||
if 'default' not in excludes: # `--exclude default` should exclude entire 'default' group
|
if 'default' not in excludes: # `--exclude default` should exclude entire 'default' group
|
||||||
targets.extend(optional_groups['default'])
|
targets.extend(yield_deps(optional_groups['default']))
|
||||||
|
|
||||||
for include in filter(None, map(optional_groups.get, args.include or [])):
|
for include in filter(None, map(optional_groups.get, args.include or [])):
|
||||||
targets.extend(include)
|
targets.extend(yield_deps(include))
|
||||||
|
|
||||||
targets = [t for t in targets if re.match(r'[\w-]+', t).group(0).lower() not in excludes]
|
targets = [t for t in targets if re.match(r'[\w-]+', t).group(0).lower() not in excludes]
|
||||||
|
|
||||||
|
|
|
@ -223,10 +223,10 @@ def format_single_change(self, info: CommitInfo):
|
||||||
|
|
||||||
return message if not sep else f'{message}{sep}{rest}'
|
return message if not sep else f'{message}{sep}{rest}'
|
||||||
|
|
||||||
def _format_message_link(self, message, hash):
|
def _format_message_link(self, message, commit_hash):
|
||||||
assert message or hash, 'Improperly defined commit message or override'
|
assert message or commit_hash, 'Improperly defined commit message or override'
|
||||||
message = message if message else hash[:HASH_LENGTH]
|
message = message if message else commit_hash[:HASH_LENGTH]
|
||||||
return f'[{message}]({self.repo_url}/commit/{hash})' if hash else message
|
return f'[{message}]({self.repo_url}/commit/{commit_hash})' if commit_hash else message
|
||||||
|
|
||||||
def _format_issues(self, issues):
|
def _format_issues(self, issues):
|
||||||
return ', '.join(f'[#{issue}]({self.repo_url}/issues/{issue})' for issue in issues)
|
return ', '.join(f'[#{issue}]({self.repo_url}/issues/{issue})' for issue in issues)
|
||||||
|
@ -356,7 +356,7 @@ def apply_overrides(self, overrides):
|
||||||
logger.info(f'CHANGE {self._commits[commit.hash]} -> {commit}')
|
logger.info(f'CHANGE {self._commits[commit.hash]} -> {commit}')
|
||||||
self._commits[commit.hash] = commit
|
self._commits[commit.hash] = commit
|
||||||
|
|
||||||
self._commits = {key: value for key, value in reversed(self._commits.items())}
|
self._commits = dict(reversed(self._commits.items()))
|
||||||
|
|
||||||
def groups(self):
|
def groups(self):
|
||||||
group_dict = defaultdict(list)
|
group_dict = defaultdict(list)
|
||||||
|
|
|
@ -51,7 +51,7 @@ def apply_patch(text, patch):
|
||||||
),
|
),
|
||||||
( # Headings
|
( # Headings
|
||||||
r'(?m)^ (\w.+\n)( (?=\w))?',
|
r'(?m)^ (\w.+\n)( (?=\w))?',
|
||||||
r'## \1'
|
r'## \1',
|
||||||
),
|
),
|
||||||
( # Fixup `--date` formatting
|
( # Fixup `--date` formatting
|
||||||
rf'(?m)( --date DATE.+({delim}[^\[]+)*)\[.+({delim}.+)*$',
|
rf'(?m)( --date DATE.+({delim}[^\[]+)*)\[.+({delim}.+)*$',
|
||||||
|
@ -61,26 +61,26 @@ def apply_patch(text, patch):
|
||||||
),
|
),
|
||||||
( # Do not split URLs
|
( # Do not split URLs
|
||||||
rf'({delim[:-1]})? (?P<label>\[\S+\] )?(?P<url>https?({delim})?:({delim})?/({delim})?/(({delim})?\S+)+)\s',
|
rf'({delim[:-1]})? (?P<label>\[\S+\] )?(?P<url>https?({delim})?:({delim})?/({delim})?/(({delim})?\S+)+)\s',
|
||||||
lambda mobj: ''.join((delim, mobj.group('label') or '', re.sub(r'\s+', '', mobj.group('url')), '\n'))
|
lambda mobj: ''.join((delim, mobj.group('label') or '', re.sub(r'\s+', '', mobj.group('url')), '\n')),
|
||||||
),
|
),
|
||||||
( # Do not split "words"
|
( # Do not split "words"
|
||||||
rf'(?m)({delim}\S+)+$',
|
rf'(?m)({delim}\S+)+$',
|
||||||
lambda mobj: ''.join((delim, mobj.group(0).replace(delim, '')))
|
lambda mobj: ''.join((delim, mobj.group(0).replace(delim, ''))),
|
||||||
),
|
),
|
||||||
( # Allow overshooting last line
|
( # Allow overshooting last line
|
||||||
rf'(?m)^(?P<prev>.+)${delim}(?P<current>.+)$(?!{delim})',
|
rf'(?m)^(?P<prev>.+)${delim}(?P<current>.+)$(?!{delim})',
|
||||||
lambda mobj: (mobj.group().replace(delim, ' ')
|
lambda mobj: (mobj.group().replace(delim, ' ')
|
||||||
if len(mobj.group()) - len(delim) + 1 <= max_width + ALLOWED_OVERSHOOT
|
if len(mobj.group()) - len(delim) + 1 <= max_width + ALLOWED_OVERSHOOT
|
||||||
else mobj.group())
|
else mobj.group()),
|
||||||
),
|
),
|
||||||
( # Avoid newline when a space is available b/w switch and description
|
( # Avoid newline when a space is available b/w switch and description
|
||||||
DISABLE_PATCH, # This creates issues with prepare_manpage
|
DISABLE_PATCH, # This creates issues with prepare_manpage
|
||||||
r'(?m)^(\s{4}-.{%d})(%s)' % (switch_col_width - 6, delim),
|
r'(?m)^(\s{4}-.{%d})(%s)' % (switch_col_width - 6, delim),
|
||||||
r'\1 '
|
r'\1 ',
|
||||||
),
|
),
|
||||||
( # Replace brackets with a Markdown link
|
( # Replace brackets with a Markdown link
|
||||||
r'SponsorBlock API \((http.+)\)',
|
r'SponsorBlock API \((http.+)\)',
|
||||||
r'[SponsorBlock API](\1)'
|
r'[SponsorBlock API](\1)',
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -1,4 +0,0 @@
|
||||||
@echo off
|
|
||||||
|
|
||||||
>&2 echo run_tests.bat is deprecated. Please use `devscripts/run_tests.py` instead
|
|
||||||
python %~dp0run_tests.py %~1
|
|
|
@ -4,6 +4,7 @@
|
||||||
import functools
|
import functools
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
import shlex
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
@ -18,6 +19,8 @@ def parse_args():
|
||||||
'test', help='a extractor tests, or one of "core" or "download"', nargs='*')
|
'test', help='a extractor tests, or one of "core" or "download"', nargs='*')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-k', help='run a test matching EXPRESSION. Same as "pytest -k"', metavar='EXPRESSION')
|
'-k', help='run a test matching EXPRESSION. Same as "pytest -k"', metavar='EXPRESSION')
|
||||||
|
parser.add_argument(
|
||||||
|
'--pytest-args', help='arguments to passthrough to pytest')
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
@ -26,15 +29,16 @@ def run_tests(*tests, pattern=None, ci=False):
|
||||||
run_download = 'download' in tests
|
run_download = 'download' in tests
|
||||||
tests = list(map(fix_test_name, tests))
|
tests = list(map(fix_test_name, tests))
|
||||||
|
|
||||||
arguments = ['pytest', '-Werror', '--tb=short']
|
pytest_args = args.pytest_args or os.getenv('HATCH_TEST_ARGS', '')
|
||||||
|
arguments = ['pytest', '-Werror', '--tb=short', *shlex.split(pytest_args)]
|
||||||
if ci:
|
if ci:
|
||||||
arguments.append('--color=yes')
|
arguments.append('--color=yes')
|
||||||
|
if pattern:
|
||||||
|
arguments.extend(['-k', pattern])
|
||||||
if run_core:
|
if run_core:
|
||||||
arguments.extend(['-m', 'not download'])
|
arguments.extend(['-m', 'not download'])
|
||||||
elif run_download:
|
elif run_download:
|
||||||
arguments.extend(['-m', 'download'])
|
arguments.extend(['-m', 'download'])
|
||||||
elif pattern:
|
|
||||||
arguments.extend(['-k', pattern])
|
|
||||||
else:
|
else:
|
||||||
arguments.extend(
|
arguments.extend(
|
||||||
f'test/test_download.py::TestDownload::test_{test}' for test in tests)
|
f'test/test_download.py::TestDownload::test_{test}' for test in tests)
|
||||||
|
@ -46,13 +50,13 @@ def run_tests(*tests, pattern=None, ci=False):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
arguments = [sys.executable, '-Werror', '-m', 'unittest']
|
arguments = [sys.executable, '-Werror', '-m', 'unittest']
|
||||||
|
if pattern:
|
||||||
|
arguments.extend(['-k', pattern])
|
||||||
if run_core:
|
if run_core:
|
||||||
print('"pytest" needs to be installed to run core tests', file=sys.stderr, flush=True)
|
print('"pytest" needs to be installed to run core tests', file=sys.stderr, flush=True)
|
||||||
return 1
|
return 1
|
||||||
elif run_download:
|
elif run_download:
|
||||||
arguments.append('test.test_download')
|
arguments.append('test.test_download')
|
||||||
elif pattern:
|
|
||||||
arguments.extend(['-k', pattern])
|
|
||||||
else:
|
else:
|
||||||
arguments.extend(
|
arguments.extend(
|
||||||
f'test.test_download.TestDownload.test_{test}' for test in tests)
|
f'test.test_download.TestDownload.test_{test}' for test in tests)
|
||||||
|
|
|
@ -1,4 +0,0 @@
|
||||||
#!/usr/bin/env sh
|
|
||||||
|
|
||||||
>&2 echo 'run_tests.sh is deprecated. Please use `devscripts/run_tests.py` instead'
|
|
||||||
python3 devscripts/run_tests.py "$1"
|
|
|
@ -30,7 +30,7 @@ def property_setter(name, value):
|
||||||
opts = parse_options()
|
opts = parse_options()
|
||||||
transform = compose_functions(
|
transform = compose_functions(
|
||||||
property_setter('VARIANT', opts.variant),
|
property_setter('VARIANT', opts.variant),
|
||||||
property_setter('UPDATE_HINT', opts.update_message)
|
property_setter('UPDATE_HINT', opts.update_message),
|
||||||
)
|
)
|
||||||
|
|
||||||
write_file(VERSION_FILE, transform(read_file(VERSION_FILE)))
|
write_file(VERSION_FILE, transform(read_file(VERSION_FILE)))
|
||||||
|
|
|
@ -24,7 +24,7 @@ def get_new_version(version, revision):
|
||||||
else:
|
else:
|
||||||
old_version = read_version().split('.')
|
old_version = read_version().split('.')
|
||||||
if version.split('.') == old_version[:3]:
|
if version.split('.') == old_version[:3]:
|
||||||
revision = str(int((old_version + [0])[3]) + 1)
|
revision = str(int(([*old_version, 0])[3]) + 1)
|
||||||
|
|
||||||
return f'{version}.{revision}' if revision else version
|
return f'{version}.{revision}' if revision else version
|
||||||
|
|
||||||
|
|
|
@ -9,15 +9,15 @@
|
||||||
|
|
||||||
import yt_dlp
|
import yt_dlp
|
||||||
|
|
||||||
ZSH_COMPLETION_FILE = "completions/zsh/_yt-dlp"
|
ZSH_COMPLETION_FILE = 'completions/zsh/_yt-dlp'
|
||||||
ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in"
|
ZSH_COMPLETION_TEMPLATE = 'devscripts/zsh-completion.in'
|
||||||
|
|
||||||
|
|
||||||
def build_completion(opt_parser):
|
def build_completion(opt_parser):
|
||||||
opts = [opt for group in opt_parser.option_groups
|
opts = [opt for group in opt_parser.option_groups
|
||||||
for opt in group.option_list]
|
for opt in group.option_list]
|
||||||
opts_file = [opt for opt in opts if opt.metavar == "FILE"]
|
opts_file = [opt for opt in opts if opt.metavar == 'FILE']
|
||||||
opts_dir = [opt for opt in opts if opt.metavar == "DIR"]
|
opts_dir = [opt for opt in opts if opt.metavar == 'DIR']
|
||||||
|
|
||||||
fileopts = []
|
fileopts = []
|
||||||
for opt in opts_file:
|
for opt in opts_file:
|
||||||
|
@ -38,11 +38,11 @@ def build_completion(opt_parser):
|
||||||
with open(ZSH_COMPLETION_TEMPLATE) as f:
|
with open(ZSH_COMPLETION_TEMPLATE) as f:
|
||||||
template = f.read()
|
template = f.read()
|
||||||
|
|
||||||
template = template.replace("{{fileopts}}", "|".join(fileopts))
|
template = template.replace('{{fileopts}}', '|'.join(fileopts))
|
||||||
template = template.replace("{{diropts}}", "|".join(diropts))
|
template = template.replace('{{diropts}}', '|'.join(diropts))
|
||||||
template = template.replace("{{flags}}", " ".join(flags))
|
template = template.replace('{{flags}}', ' '.join(flags))
|
||||||
|
|
||||||
with open(ZSH_COMPLETION_FILE, "w") as f:
|
with open(ZSH_COMPLETION_FILE, 'w') as f:
|
||||||
f.write(template)
|
f.write(template)
|
||||||
|
|
||||||
|
|
||||||
|
|
17
pyinst.py
17
pyinst.py
|
@ -1,17 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
# Allow execution from anywhere
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
||||||
|
|
||||||
import warnings
|
|
||||||
|
|
||||||
from bundle.pyinstaller import main
|
|
||||||
|
|
||||||
warnings.warn(DeprecationWarning('`pyinst.py` is deprecated and will be removed in a future version. '
|
|
||||||
'Use `bundle.pyinstaller` instead'))
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
273
pyproject.toml
273
pyproject.toml
|
@ -46,7 +46,7 @@ dependencies = [
|
||||||
"certifi",
|
"certifi",
|
||||||
"mutagen",
|
"mutagen",
|
||||||
"pycryptodomex",
|
"pycryptodomex",
|
||||||
"requests>=2.31.0,<3",
|
"requests>=2.32.2,<3",
|
||||||
"urllib3>=1.26.17,<3",
|
"urllib3>=1.26.17,<3",
|
||||||
"websockets>=12.0",
|
"websockets>=12.0",
|
||||||
]
|
]
|
||||||
|
@ -62,18 +62,27 @@ build = [
|
||||||
"build",
|
"build",
|
||||||
"hatchling",
|
"hatchling",
|
||||||
"pip",
|
"pip",
|
||||||
|
"setuptools",
|
||||||
"wheel",
|
"wheel",
|
||||||
]
|
]
|
||||||
dev = [
|
dev = [
|
||||||
"flake8",
|
"pre-commit",
|
||||||
"isort",
|
"yt-dlp[static-analysis]",
|
||||||
"pytest",
|
"yt-dlp[test]",
|
||||||
|
]
|
||||||
|
static-analysis = [
|
||||||
|
"autopep8~=2.0",
|
||||||
|
"ruff~=0.4.4",
|
||||||
|
]
|
||||||
|
test = [
|
||||||
|
"pytest~=8.1",
|
||||||
]
|
]
|
||||||
pyinstaller = [
|
pyinstaller = [
|
||||||
"pyinstaller>=6.3; sys_platform!='darwin'",
|
"pyinstaller>=6.7.0", # for compat with setuptools>=70
|
||||||
"pyinstaller==5.13.2; sys_platform=='darwin'", # needed for curl_cffi
|
]
|
||||||
|
py2exe = [
|
||||||
|
"py2exe>=0.12",
|
||||||
]
|
]
|
||||||
py2exe = ["py2exe>=0.12"]
|
|
||||||
|
|
||||||
[project.urls]
|
[project.urls]
|
||||||
Documentation = "https://github.com/yt-dlp/yt-dlp#readme"
|
Documentation = "https://github.com/yt-dlp/yt-dlp#readme"
|
||||||
|
@ -122,3 +131,253 @@ artifacts = ["/yt_dlp/extractor/lazy_extractors.py"]
|
||||||
[tool.hatch.version]
|
[tool.hatch.version]
|
||||||
path = "yt_dlp/version.py"
|
path = "yt_dlp/version.py"
|
||||||
pattern = "_pkg_version = '(?P<version>[^']+)'"
|
pattern = "_pkg_version = '(?P<version>[^']+)'"
|
||||||
|
|
||||||
|
[tool.hatch.envs.default]
|
||||||
|
features = ["curl-cffi", "default"]
|
||||||
|
dependencies = ["pre-commit"]
|
||||||
|
path = ".venv"
|
||||||
|
installer = "uv"
|
||||||
|
|
||||||
|
[tool.hatch.envs.default.scripts]
|
||||||
|
setup = "pre-commit install --config .pre-commit-hatch.yaml"
|
||||||
|
yt-dlp = "python -Werror -Xdev -m yt_dlp {args}"
|
||||||
|
|
||||||
|
[tool.hatch.envs.hatch-static-analysis]
|
||||||
|
detached = true
|
||||||
|
features = ["static-analysis"]
|
||||||
|
dependencies = [] # override hatch ruff version
|
||||||
|
config-path = "pyproject.toml"
|
||||||
|
|
||||||
|
[tool.hatch.envs.hatch-static-analysis.scripts]
|
||||||
|
format-check = "autopep8 --diff {args:.}"
|
||||||
|
format-fix = "autopep8 --in-place {args:.}"
|
||||||
|
lint-check = "ruff check {args:.}"
|
||||||
|
lint-fix = "ruff check --fix {args:.}"
|
||||||
|
|
||||||
|
[tool.hatch.envs.hatch-test]
|
||||||
|
features = ["test"]
|
||||||
|
dependencies = [
|
||||||
|
"pytest-randomly~=3.15",
|
||||||
|
"pytest-rerunfailures~=14.0",
|
||||||
|
"pytest-xdist[psutil]~=3.5",
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.hatch.envs.hatch-test.scripts]
|
||||||
|
run = "python -m devscripts.run_tests {args}"
|
||||||
|
run-cov = "echo Code coverage not implemented && exit 1"
|
||||||
|
|
||||||
|
[[tool.hatch.envs.hatch-test.matrix]]
|
||||||
|
python = [
|
||||||
|
"3.8",
|
||||||
|
"3.9",
|
||||||
|
"3.10",
|
||||||
|
"3.11",
|
||||||
|
"3.12",
|
||||||
|
"pypy3.8",
|
||||||
|
"pypy3.9",
|
||||||
|
"pypy3.10",
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.ruff]
|
||||||
|
line-length = 120
|
||||||
|
|
||||||
|
[tool.ruff.lint]
|
||||||
|
ignore = [
|
||||||
|
"E402", # module-import-not-at-top-of-file
|
||||||
|
"E501", # line-too-long
|
||||||
|
"E731", # lambda-assignment
|
||||||
|
"E741", # ambiguous-variable-name
|
||||||
|
"UP036", # outdated-version-block
|
||||||
|
"B006", # mutable-argument-default
|
||||||
|
"B008", # function-call-in-default-argument
|
||||||
|
"B011", # assert-false
|
||||||
|
"B017", # assert-raises-exception
|
||||||
|
"B023", # function-uses-loop-variable (false positives)
|
||||||
|
"B028", # no-explicit-stacklevel
|
||||||
|
"B904", # raise-without-from-inside-except
|
||||||
|
"C401", # unnecessary-generator-set
|
||||||
|
"C402", # unnecessary-generator-dict
|
||||||
|
"PIE790", # unnecessary-placeholder
|
||||||
|
"SIM102", # collapsible-if
|
||||||
|
"SIM108", # if-else-block-instead-of-if-exp
|
||||||
|
"SIM112", # uncapitalized-environment-variables
|
||||||
|
"SIM113", # enumerate-for-loop
|
||||||
|
"SIM114", # if-with-same-arms
|
||||||
|
"SIM115", # open-file-with-context-handler
|
||||||
|
"SIM117", # multiple-with-statements
|
||||||
|
"SIM223", # expr-and-false
|
||||||
|
"SIM300", # yoda-conditions
|
||||||
|
"TD001", # invalid-todo-tag
|
||||||
|
"TD002", # missing-todo-author
|
||||||
|
"TD003", # missing-todo-link
|
||||||
|
"PLE0604", # invalid-all-object (false positives)
|
||||||
|
"PLW0603", # global-statement
|
||||||
|
"PLW1510", # subprocess-run-without-check
|
||||||
|
"PLW2901", # redefined-loop-name
|
||||||
|
"RUF001", # ambiguous-unicode-character-string
|
||||||
|
"RUF012", # mutable-class-default
|
||||||
|
"RUF100", # unused-noqa (flake8 has slightly different behavior)
|
||||||
|
]
|
||||||
|
select = [
|
||||||
|
"E", # pycodestyle Error
|
||||||
|
"W", # pycodestyle Warning
|
||||||
|
"F", # Pyflakes
|
||||||
|
"I", # isort
|
||||||
|
"Q", # flake8-quotes
|
||||||
|
"N803", # invalid-argument-name
|
||||||
|
"N804", # invalid-first-argument-name-for-class-method
|
||||||
|
"UP", # pyupgrade
|
||||||
|
"B", # flake8-bugbear
|
||||||
|
"A", # flake8-builtins
|
||||||
|
"COM", # flake8-commas
|
||||||
|
"C4", # flake8-comprehensions
|
||||||
|
"FA", # flake8-future-annotations
|
||||||
|
"ISC", # flake8-implicit-str-concat
|
||||||
|
"ICN003", # banned-import-from
|
||||||
|
"PIE", # flake8-pie
|
||||||
|
"T20", # flake8-print
|
||||||
|
"RSE", # flake8-raise
|
||||||
|
"RET504", # unnecessary-assign
|
||||||
|
"SIM", # flake8-simplify
|
||||||
|
"TID251", # banned-api
|
||||||
|
"TD", # flake8-todos
|
||||||
|
"PLC", # Pylint Convention
|
||||||
|
"PLE", # Pylint Error
|
||||||
|
"PLW", # Pylint Warning
|
||||||
|
"RUF", # Ruff-specific rules
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.ruff.lint.per-file-ignores]
|
||||||
|
"devscripts/lazy_load_template.py" = [
|
||||||
|
"F401", # unused-import
|
||||||
|
]
|
||||||
|
"!yt_dlp/extractor/**.py" = [
|
||||||
|
"I", # isort
|
||||||
|
"ICN003", # banned-import-from
|
||||||
|
"T20", # flake8-print
|
||||||
|
"A002", # builtin-argument-shadowing
|
||||||
|
"C408", # unnecessary-collection-call
|
||||||
|
]
|
||||||
|
"yt_dlp/jsinterp.py" = [
|
||||||
|
"UP031", # printf-string-formatting
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.ruff.lint.isort]
|
||||||
|
known-first-party = [
|
||||||
|
"bundle",
|
||||||
|
"devscripts",
|
||||||
|
"test",
|
||||||
|
]
|
||||||
|
relative-imports-order = "closest-to-furthest"
|
||||||
|
|
||||||
|
[tool.ruff.lint.flake8-quotes]
|
||||||
|
docstring-quotes = "double"
|
||||||
|
multiline-quotes = "single"
|
||||||
|
inline-quotes = "single"
|
||||||
|
avoid-escape = false
|
||||||
|
|
||||||
|
[tool.ruff.lint.pep8-naming]
|
||||||
|
classmethod-decorators = [
|
||||||
|
"yt_dlp.utils.classproperty",
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.ruff.lint.flake8-import-conventions]
|
||||||
|
banned-from = [
|
||||||
|
"base64",
|
||||||
|
"datetime",
|
||||||
|
"functools",
|
||||||
|
"glob",
|
||||||
|
"hashlib",
|
||||||
|
"itertools",
|
||||||
|
"json",
|
||||||
|
"math",
|
||||||
|
"os",
|
||||||
|
"pathlib",
|
||||||
|
"random",
|
||||||
|
"re",
|
||||||
|
"string",
|
||||||
|
"sys",
|
||||||
|
"time",
|
||||||
|
"urllib",
|
||||||
|
"uuid",
|
||||||
|
"xml",
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.ruff.lint.flake8-tidy-imports.banned-api]
|
||||||
|
"yt_dlp.compat.compat_str".msg = "Use `str` instead."
|
||||||
|
"yt_dlp.compat.compat_b64decode".msg = "Use `base64.b64decode` instead."
|
||||||
|
"yt_dlp.compat.compat_urlparse".msg = "Use `urllib.parse` instead."
|
||||||
|
"yt_dlp.compat.compat_parse_qs".msg = "Use `urllib.parse.parse_qs` instead."
|
||||||
|
"yt_dlp.compat.compat_urllib_parse_unquote".msg = "Use `urllib.parse.unquote` instead."
|
||||||
|
"yt_dlp.compat.compat_urllib_parse_urlencode".msg = "Use `urllib.parse.urlencode` instead."
|
||||||
|
"yt_dlp.compat.compat_urllib_parse_urlparse".msg = "Use `urllib.parse.urlparse` instead."
|
||||||
|
"yt_dlp.compat.compat_shlex_quote".msg = "Use `yt_dlp.utils.shell_quote` instead."
|
||||||
|
"yt_dlp.utils.error_to_compat_str".msg = "Use `str` instead."
|
||||||
|
|
||||||
|
[tool.autopep8]
|
||||||
|
max_line_length = 120
|
||||||
|
recursive = true
|
||||||
|
exit-code = true
|
||||||
|
jobs = 0
|
||||||
|
select = [
|
||||||
|
"E101",
|
||||||
|
"E112",
|
||||||
|
"E113",
|
||||||
|
"E115",
|
||||||
|
"E116",
|
||||||
|
"E117",
|
||||||
|
"E121",
|
||||||
|
"E122",
|
||||||
|
"E123",
|
||||||
|
"E124",
|
||||||
|
"E125",
|
||||||
|
"E126",
|
||||||
|
"E127",
|
||||||
|
"E128",
|
||||||
|
"E129",
|
||||||
|
"E131",
|
||||||
|
"E201",
|
||||||
|
"E202",
|
||||||
|
"E203",
|
||||||
|
"E211",
|
||||||
|
"E221",
|
||||||
|
"E222",
|
||||||
|
"E223",
|
||||||
|
"E224",
|
||||||
|
"E225",
|
||||||
|
"E226",
|
||||||
|
"E227",
|
||||||
|
"E228",
|
||||||
|
"E231",
|
||||||
|
"E241",
|
||||||
|
"E242",
|
||||||
|
"E251",
|
||||||
|
"E252",
|
||||||
|
"E261",
|
||||||
|
"E262",
|
||||||
|
"E265",
|
||||||
|
"E266",
|
||||||
|
"E271",
|
||||||
|
"E272",
|
||||||
|
"E273",
|
||||||
|
"E274",
|
||||||
|
"E275",
|
||||||
|
"E301",
|
||||||
|
"E302",
|
||||||
|
"E303",
|
||||||
|
"E304",
|
||||||
|
"E305",
|
||||||
|
"E306",
|
||||||
|
"E502",
|
||||||
|
"E701",
|
||||||
|
"E702",
|
||||||
|
"E704",
|
||||||
|
"W391",
|
||||||
|
"W504",
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.pytest.ini_options]
|
||||||
|
addopts = "-ra -v --strict-markers"
|
||||||
|
markers = [
|
||||||
|
"download",
|
||||||
|
]
|
||||||
|
|
|
@ -14,12 +14,6 @@ remove-duplicate-keys = true
|
||||||
remove-unused-variables = true
|
remove-unused-variables = true
|
||||||
|
|
||||||
|
|
||||||
[tool:pytest]
|
|
||||||
addopts = -ra -v --strict-markers
|
|
||||||
markers =
|
|
||||||
download
|
|
||||||
|
|
||||||
|
|
||||||
[tox:tox]
|
[tox:tox]
|
||||||
skipsdist = true
|
skipsdist = true
|
||||||
envlist = py{38,39,310,311,312},pypy{38,39,310}
|
envlist = py{38,39,310,311,312},pypy{38,39,310}
|
||||||
|
|
36
setup.py
36
setup.py
|
@ -1,36 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
# Allow execution from anywhere
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
||||||
|
|
||||||
import warnings
|
|
||||||
|
|
||||||
|
|
||||||
if sys.argv[1:2] == ['py2exe']:
|
|
||||||
warnings.warn(DeprecationWarning('`setup.py py2exe` is deprecated and will be removed in a future version. '
|
|
||||||
'Use `bundle.py2exe` instead'))
|
|
||||||
|
|
||||||
import bundle.py2exe
|
|
||||||
|
|
||||||
bundle.py2exe.main()
|
|
||||||
|
|
||||||
elif 'build_lazy_extractors' in sys.argv:
|
|
||||||
warnings.warn(DeprecationWarning('`setup.py build_lazy_extractors` is deprecated and will be removed in a future version. '
|
|
||||||
'Use `devscripts.make_lazy_extractors` instead'))
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
os.chdir(sys.path[0])
|
|
||||||
print('running build_lazy_extractors')
|
|
||||||
subprocess.run([sys.executable, 'devscripts/make_lazy_extractors.py'])
|
|
||||||
|
|
||||||
else:
|
|
||||||
|
|
||||||
print(
|
|
||||||
'ERROR: Building by calling `setup.py` is deprecated. '
|
|
||||||
'Use a build frontend like `build` instead. ',
|
|
||||||
'Refer to https://build.pypa.io for more info', file=sys.stderr)
|
|
||||||
sys.exit(1)
|
|
|
@ -14,7 +14,6 @@ # Supported sites
|
||||||
- **6play**
|
- **6play**
|
||||||
- **7plus**
|
- **7plus**
|
||||||
- **8tracks**
|
- **8tracks**
|
||||||
- **91porn**
|
|
||||||
- **9c9media**
|
- **9c9media**
|
||||||
- **9gag**: 9GAG
|
- **9gag**: 9GAG
|
||||||
- **9News**
|
- **9News**
|
||||||
|
@ -220,7 +219,7 @@ # Supported sites
|
||||||
- **BusinessInsider**
|
- **BusinessInsider**
|
||||||
- **BuzzFeed**
|
- **BuzzFeed**
|
||||||
- **BYUtv**: (**Currently broken**)
|
- **BYUtv**: (**Currently broken**)
|
||||||
- **CableAV**
|
- **CaffeineTV**
|
||||||
- **Callin**
|
- **Callin**
|
||||||
- **Caltrans**
|
- **Caltrans**
|
||||||
- **CAM4**
|
- **CAM4**
|
||||||
|
@ -333,6 +332,8 @@ # Supported sites
|
||||||
- **DailyWirePodcast**
|
- **DailyWirePodcast**
|
||||||
- **damtomo:record**
|
- **damtomo:record**
|
||||||
- **damtomo:video**
|
- **damtomo:video**
|
||||||
|
- **dangalplay**: [*dangalplay*](## "netrc machine")
|
||||||
|
- **dangalplay:season**: [*dangalplay*](## "netrc machine")
|
||||||
- **daum.net**
|
- **daum.net**
|
||||||
- **daum.net:clip**
|
- **daum.net:clip**
|
||||||
- **daum.net:playlist**
|
- **daum.net:playlist**
|
||||||
|
@ -396,7 +397,6 @@ # Supported sites
|
||||||
- **EinsUndEinsTV**: [*1und1tv*](## "netrc machine")
|
- **EinsUndEinsTV**: [*1und1tv*](## "netrc machine")
|
||||||
- **EinsUndEinsTVLive**: [*1und1tv*](## "netrc machine")
|
- **EinsUndEinsTVLive**: [*1und1tv*](## "netrc machine")
|
||||||
- **EinsUndEinsTVRecordings**: [*1und1tv*](## "netrc machine")
|
- **EinsUndEinsTVRecordings**: [*1und1tv*](## "netrc machine")
|
||||||
- **Einthusan**
|
|
||||||
- **eitb.tv**
|
- **eitb.tv**
|
||||||
- **ElementorEmbed**
|
- **ElementorEmbed**
|
||||||
- **Elonet**
|
- **Elonet**
|
||||||
|
@ -498,6 +498,7 @@ # Supported sites
|
||||||
- **GameStar**
|
- **GameStar**
|
||||||
- **Gaskrank**
|
- **Gaskrank**
|
||||||
- **Gazeta**: (**Currently broken**)
|
- **Gazeta**: (**Currently broken**)
|
||||||
|
- **GBNews**: GB News clips, features and live streams
|
||||||
- **GDCVault**: [*gdcvault*](## "netrc machine") (**Currently broken**)
|
- **GDCVault**: [*gdcvault*](## "netrc machine") (**Currently broken**)
|
||||||
- **GediDigital**
|
- **GediDigital**
|
||||||
- **gem.cbc.ca**: [*cbcgem*](## "netrc machine")
|
- **gem.cbc.ca**: [*cbcgem*](## "netrc machine")
|
||||||
|
@ -527,6 +528,7 @@ # Supported sites
|
||||||
- **GMANetworkVideo**
|
- **GMANetworkVideo**
|
||||||
- **Go**
|
- **Go**
|
||||||
- **GoDiscovery**
|
- **GoDiscovery**
|
||||||
|
- **GodResource**
|
||||||
- **GodTube**: (**Currently broken**)
|
- **GodTube**: (**Currently broken**)
|
||||||
- **Gofile**
|
- **Gofile**
|
||||||
- **Golem**
|
- **Golem**
|
||||||
|
@ -630,11 +632,11 @@ # Supported sites
|
||||||
- **iwara:user**: [*iwara*](## "netrc machine")
|
- **iwara:user**: [*iwara*](## "netrc machine")
|
||||||
- **Ixigua**
|
- **Ixigua**
|
||||||
- **Izlesene**
|
- **Izlesene**
|
||||||
- **Jable**
|
|
||||||
- **JablePlaylist**
|
|
||||||
- **Jamendo**
|
- **Jamendo**
|
||||||
- **JamendoAlbum**
|
- **JamendoAlbum**
|
||||||
- **JeuxVideo**: (**Currently broken**)
|
- **JeuxVideo**: (**Currently broken**)
|
||||||
|
- **jiocinema**: [*jiocinema*](## "netrc machine")
|
||||||
|
- **jiocinema:series**: [*jiocinema*](## "netrc machine")
|
||||||
- **jiosaavn:album**
|
- **jiosaavn:album**
|
||||||
- **jiosaavn:playlist**
|
- **jiosaavn:playlist**
|
||||||
- **jiosaavn:song**
|
- **jiosaavn:song**
|
||||||
|
@ -974,6 +976,7 @@ # Supported sites
|
||||||
- **NRKTVSeason**
|
- **NRKTVSeason**
|
||||||
- **NRKTVSeries**
|
- **NRKTVSeries**
|
||||||
- **NRLTV**: (**Currently broken**)
|
- **NRLTV**: (**Currently broken**)
|
||||||
|
- **nts.live**
|
||||||
- **ntv.ru**
|
- **ntv.ru**
|
||||||
- **NubilesPorn**: [*nubiles-porn*](## "netrc machine")
|
- **NubilesPorn**: [*nubiles-porn*](## "netrc machine")
|
||||||
- **nuum:live**
|
- **nuum:live**
|
||||||
|
@ -1015,7 +1018,6 @@ # Supported sites
|
||||||
- **orf:on**
|
- **orf:on**
|
||||||
- **orf:podcast**
|
- **orf:podcast**
|
||||||
- **orf:radio**
|
- **orf:radio**
|
||||||
- **orf:tvthek**: ORF TVthek
|
|
||||||
- **OsnatelTV**: [*osnateltv*](## "netrc machine")
|
- **OsnatelTV**: [*osnateltv*](## "netrc machine")
|
||||||
- **OsnatelTVLive**: [*osnateltv*](## "netrc machine")
|
- **OsnatelTVLive**: [*osnateltv*](## "netrc machine")
|
||||||
- **OsnatelTVRecordings**: [*osnateltv*](## "netrc machine")
|
- **OsnatelTVRecordings**: [*osnateltv*](## "netrc machine")
|
||||||
|
@ -1394,6 +1396,10 @@ # Supported sites
|
||||||
- **SztvHu**
|
- **SztvHu**
|
||||||
- **t-online.de**: (**Currently broken**)
|
- **t-online.de**: (**Currently broken**)
|
||||||
- **Tagesschau**: (**Currently broken**)
|
- **Tagesschau**: (**Currently broken**)
|
||||||
|
- **TapTapApp**
|
||||||
|
- **TapTapAppIntl**
|
||||||
|
- **TapTapMoment**
|
||||||
|
- **TapTapPostIntl**
|
||||||
- **Tass**: (**Currently broken**)
|
- **Tass**: (**Currently broken**)
|
||||||
- **TBS**
|
- **TBS**
|
||||||
- **TBSJPEpisode**
|
- **TBSJPEpisode**
|
||||||
|
@ -1412,7 +1418,7 @@ # Supported sites
|
||||||
- **TedSeries**
|
- **TedSeries**
|
||||||
- **TedTalk**
|
- **TedTalk**
|
||||||
- **Tele13**
|
- **Tele13**
|
||||||
- **Tele5**: (**Currently broken**)
|
- **Tele5**
|
||||||
- **TeleBruxelles**
|
- **TeleBruxelles**
|
||||||
- **TelecaribePlay**
|
- **TelecaribePlay**
|
||||||
- **Telecinco**: telecinco.es, cuatro.com and mediaset.es
|
- **Telecinco**: telecinco.es, cuatro.com and mediaset.es
|
||||||
|
@ -1452,11 +1458,12 @@ # Supported sites
|
||||||
- **ThreeSpeak**
|
- **ThreeSpeak**
|
||||||
- **ThreeSpeakUser**
|
- **ThreeSpeakUser**
|
||||||
- **TikTok**
|
- **TikTok**
|
||||||
|
- **tiktok:collection**
|
||||||
- **tiktok:effect**: (**Currently broken**)
|
- **tiktok:effect**: (**Currently broken**)
|
||||||
- **tiktok:live**
|
- **tiktok:live**
|
||||||
- **tiktok:sound**: (**Currently broken**)
|
- **tiktok:sound**: (**Currently broken**)
|
||||||
- **tiktok:tag**: (**Currently broken**)
|
- **tiktok:tag**: (**Currently broken**)
|
||||||
- **tiktok:user**: (**Currently broken**)
|
- **tiktok:user**
|
||||||
- **TLC**
|
- **TLC**
|
||||||
- **TMZ**
|
- **TMZ**
|
||||||
- **TNAFlix**
|
- **TNAFlix**
|
||||||
|
@ -1501,7 +1508,7 @@ # Supported sites
|
||||||
- **tv2play.hu**
|
- **tv2play.hu**
|
||||||
- **tv2playseries.hu**
|
- **tv2playseries.hu**
|
||||||
- **TV4**: tv4.se and tv4play.se
|
- **TV4**: tv4.se and tv4play.se
|
||||||
- **TV5MondePlus**: TV5MONDE+
|
- **TV5MONDE**
|
||||||
- **tv5unis**
|
- **tv5unis**
|
||||||
- **tv5unis:video**
|
- **tv5unis:video**
|
||||||
- **tv8.it**
|
- **tv8.it**
|
||||||
|
@ -1639,8 +1646,6 @@ # Supported sites
|
||||||
- **voicy**: (**Currently broken**)
|
- **voicy**: (**Currently broken**)
|
||||||
- **voicy:channel**: (**Currently broken**)
|
- **voicy:channel**: (**Currently broken**)
|
||||||
- **VolejTV**
|
- **VolejTV**
|
||||||
- **Voot**: [*voot*](## "netrc machine") (**Currently broken**)
|
|
||||||
- **VootSeries**: [*voot*](## "netrc machine") (**Currently broken**)
|
|
||||||
- **VoxMedia**
|
- **VoxMedia**
|
||||||
- **VoxMediaVolume**
|
- **VoxMediaVolume**
|
||||||
- **vpro**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **vpro**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
|
@ -1715,10 +1720,10 @@ # Supported sites
|
||||||
- **wykop:post:comment**
|
- **wykop:post:comment**
|
||||||
- **Xanimu**
|
- **Xanimu**
|
||||||
- **XboxClips**
|
- **XboxClips**
|
||||||
- **XFileShare**: XFileShare based sites: Aparat, ClipWatching, GoUnlimited, GoVid, HolaVid, Streamty, TheVideoBee, Uqload, VidBom, vidlo, VidLocker, VidShare, VUp, WolfStream, XVideoSharing
|
|
||||||
- **XHamster**
|
- **XHamster**
|
||||||
- **XHamsterEmbed**
|
- **XHamsterEmbed**
|
||||||
- **XHamsterUser**
|
- **XHamsterUser**
|
||||||
|
- **XiaoHongShu**: 小红书
|
||||||
- **ximalaya**: 喜马拉雅FM
|
- **ximalaya**: 喜马拉雅FM
|
||||||
- **ximalaya:album**: 喜马拉雅FM 专辑
|
- **ximalaya:album**: 喜马拉雅FM 专辑
|
||||||
- **xinpianchang**: xinpianchang.com (**Currently broken**)
|
- **xinpianchang**: xinpianchang.com (**Currently broken**)
|
||||||
|
@ -1749,8 +1754,12 @@ # Supported sites
|
||||||
- **YouNowLive**
|
- **YouNowLive**
|
||||||
- **YouNowMoment**
|
- **YouNowMoment**
|
||||||
- **YouPorn**
|
- **YouPorn**
|
||||||
- **YourPorn**
|
- **YouPornCategory**: YouPorn category, with sorting, filtering and pagination
|
||||||
- **YourUpload**
|
- **YouPornChannel**: YouPorn channel, with sorting and pagination
|
||||||
|
- **YouPornCollection**: YouPorn collection (user playlist), with sorting and pagination
|
||||||
|
- **YouPornStar**: YouPorn Pornstar, with description, sorting and pagination
|
||||||
|
- **YouPornTag**: YouPorn tag (porntags), with sorting, filtering and pagination
|
||||||
|
- **YouPornVideos**: YouPorn video (browse) playlists, with sorting, filtering and pagination
|
||||||
- **youtube**: YouTube
|
- **youtube**: YouTube
|
||||||
- **youtube:clip**
|
- **youtube:clip**
|
||||||
- **youtube:favorites**: YouTube liked videos; ":ytfav" keyword (requires cookies)
|
- **youtube:favorites**: YouTube liked videos; ":ytfav" keyword (requires cookies)
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
import functools
|
|
||||||
import inspect
|
import inspect
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -10,7 +9,9 @@
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def handler(request):
|
def handler(request):
|
||||||
RH_KEY = request.param
|
RH_KEY = getattr(request, 'param', None)
|
||||||
|
if not RH_KEY:
|
||||||
|
return
|
||||||
if inspect.isclass(RH_KEY) and issubclass(RH_KEY, RequestHandler):
|
if inspect.isclass(RH_KEY) and issubclass(RH_KEY, RequestHandler):
|
||||||
handler = RH_KEY
|
handler = RH_KEY
|
||||||
elif RH_KEY in _REQUEST_HANDLERS:
|
elif RH_KEY in _REQUEST_HANDLERS:
|
||||||
|
@ -18,9 +19,46 @@ def handler(request):
|
||||||
else:
|
else:
|
||||||
pytest.skip(f'{RH_KEY} request handler is not available')
|
pytest.skip(f'{RH_KEY} request handler is not available')
|
||||||
|
|
||||||
return functools.partial(handler, logger=FakeLogger)
|
class HandlerWrapper(handler):
|
||||||
|
RH_KEY = handler.RH_KEY
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super().__init__(logger=FakeLogger, **kwargs)
|
||||||
|
|
||||||
|
return HandlerWrapper
|
||||||
|
|
||||||
|
|
||||||
def validate_and_send(rh, req):
|
@pytest.fixture(autouse=True)
|
||||||
rh.validate(req)
|
def skip_handler(request, handler):
|
||||||
return rh.send(req)
|
"""usage: pytest.mark.skip_handler('my_handler', 'reason')"""
|
||||||
|
for marker in request.node.iter_markers('skip_handler'):
|
||||||
|
if marker.args[0] == handler.RH_KEY:
|
||||||
|
pytest.skip(marker.args[1] if len(marker.args) > 1 else '')
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def skip_handler_if(request, handler):
|
||||||
|
"""usage: pytest.mark.skip_handler_if('my_handler', lambda request: True, 'reason')"""
|
||||||
|
for marker in request.node.iter_markers('skip_handler_if'):
|
||||||
|
if marker.args[0] == handler.RH_KEY and marker.args[1](request):
|
||||||
|
pytest.skip(marker.args[2] if len(marker.args) > 2 else '')
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def skip_handlers_if(request, handler):
|
||||||
|
"""usage: pytest.mark.skip_handlers_if(lambda request, handler: True, 'reason')"""
|
||||||
|
for marker in request.node.iter_markers('skip_handlers_if'):
|
||||||
|
if handler and marker.args[0](request, handler):
|
||||||
|
pytest.skip(marker.args[1] if len(marker.args) > 1 else '')
|
||||||
|
|
||||||
|
|
||||||
|
def pytest_configure(config):
|
||||||
|
config.addinivalue_line(
|
||||||
|
'markers', 'skip_handler(handler): skip test for the given handler',
|
||||||
|
)
|
||||||
|
config.addinivalue_line(
|
||||||
|
'markers', 'skip_handler_if(handler): skip test for the given handler if condition is true',
|
||||||
|
)
|
||||||
|
config.addinivalue_line(
|
||||||
|
'markers', 'skip_handlers_if(handler): skip test for handlers when the condition is true',
|
||||||
|
)
|
||||||
|
|
|
@ -16,8 +16,8 @@
|
||||||
import pytest
|
import pytest
|
||||||
is_download_test = pytest.mark.download
|
is_download_test = pytest.mark.download
|
||||||
else:
|
else:
|
||||||
def is_download_test(testClass):
|
def is_download_test(test_class):
|
||||||
return testClass
|
return test_class
|
||||||
|
|
||||||
|
|
||||||
def get_params(override=None):
|
def get_params(override=None):
|
||||||
|
@ -45,10 +45,10 @@ def try_rm(filename):
|
||||||
|
|
||||||
|
|
||||||
def report_warning(message, *args, **kwargs):
|
def report_warning(message, *args, **kwargs):
|
||||||
'''
|
"""
|
||||||
Print the message to stderr, it will be prefixed with 'WARNING:'
|
Print the message to stderr, it will be prefixed with 'WARNING:'
|
||||||
If stderr is a tty file the 'WARNING:' will be colored
|
If stderr is a tty file the 'WARNING:' will be colored
|
||||||
'''
|
"""
|
||||||
if sys.stderr.isatty() and compat_os_name != 'nt':
|
if sys.stderr.isatty() and compat_os_name != 'nt':
|
||||||
_msg_header = '\033[0;33mWARNING:\033[0m'
|
_msg_header = '\033[0;33mWARNING:\033[0m'
|
||||||
else:
|
else:
|
||||||
|
@ -138,15 +138,14 @@ def expect_value(self, got, expected, field):
|
||||||
elif isinstance(expected, list) and isinstance(got, list):
|
elif isinstance(expected, list) and isinstance(got, list):
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
len(expected), len(got),
|
len(expected), len(got),
|
||||||
'Expect a list of length %d, but got a list of length %d for field %s' % (
|
f'Expect a list of length {len(expected)}, but got a list of length {len(got)} for field {field}')
|
||||||
len(expected), len(got), field))
|
|
||||||
for index, (item_got, item_expected) in enumerate(zip(got, expected)):
|
for index, (item_got, item_expected) in enumerate(zip(got, expected)):
|
||||||
type_got = type(item_got)
|
type_got = type(item_got)
|
||||||
type_expected = type(item_expected)
|
type_expected = type(item_expected)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
type_expected, type_got,
|
type_expected, type_got,
|
||||||
'Type mismatch for list item at index %d for field %s, expected %r, got %r' % (
|
f'Type mismatch for list item at index {index} for field {field}, '
|
||||||
index, field, type_expected, type_got))
|
f'expected {type_expected!r}, got {type_got!r}')
|
||||||
expect_value(self, item_got, item_expected, field)
|
expect_value(self, item_got, item_expected, field)
|
||||||
else:
|
else:
|
||||||
if isinstance(expected, str) and expected.startswith('md5:'):
|
if isinstance(expected, str) and expected.startswith('md5:'):
|
||||||
|
@ -224,7 +223,7 @@ def sanitize(key, value):
|
||||||
test_info_dict.pop('display_id')
|
test_info_dict.pop('display_id')
|
||||||
|
|
||||||
# Remove deprecated fields
|
# Remove deprecated fields
|
||||||
for old in YoutubeDL._deprecated_multivalue_fields.keys():
|
for old in YoutubeDL._deprecated_multivalue_fields:
|
||||||
test_info_dict.pop(old, None)
|
test_info_dict.pop(old, None)
|
||||||
|
|
||||||
# release_year may be generated from release_date
|
# release_year may be generated from release_date
|
||||||
|
@ -246,11 +245,11 @@ def expect_info_dict(self, got_dict, expected_dict):
|
||||||
if expected_dict.get('ext'):
|
if expected_dict.get('ext'):
|
||||||
mandatory_fields.extend(('url', 'ext'))
|
mandatory_fields.extend(('url', 'ext'))
|
||||||
for key in mandatory_fields:
|
for key in mandatory_fields:
|
||||||
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
|
self.assertTrue(got_dict.get(key), f'Missing mandatory field {key}')
|
||||||
# Check for mandatory fields that are automatically set by YoutubeDL
|
# Check for mandatory fields that are automatically set by YoutubeDL
|
||||||
if got_dict.get('_type', 'video') == 'video':
|
if got_dict.get('_type', 'video') == 'video':
|
||||||
for key in ['webpage_url', 'extractor', 'extractor_key']:
|
for key in ['webpage_url', 'extractor', 'extractor_key']:
|
||||||
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
|
self.assertTrue(got_dict.get(key), f'Missing field: {key}')
|
||||||
|
|
||||||
test_info_dict = sanitize_got_info_dict(got_dict)
|
test_info_dict = sanitize_got_info_dict(got_dict)
|
||||||
|
|
||||||
|
@ -258,7 +257,7 @@ def expect_info_dict(self, got_dict, expected_dict):
|
||||||
if missing_keys:
|
if missing_keys:
|
||||||
def _repr(v):
|
def _repr(v):
|
||||||
if isinstance(v, str):
|
if isinstance(v, str):
|
||||||
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n')
|
return "'{}'".format(v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n'))
|
||||||
elif isinstance(v, type):
|
elif isinstance(v, type):
|
||||||
return v.__name__
|
return v.__name__
|
||||||
else:
|
else:
|
||||||
|
@ -275,8 +274,7 @@ def _repr(v):
|
||||||
write_string(info_dict_str.replace('\n', '\n '), out=sys.stderr)
|
write_string(info_dict_str.replace('\n', '\n '), out=sys.stderr)
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
missing_keys,
|
missing_keys,
|
||||||
'Missing keys in test definition: %s' % (
|
'Missing keys in test definition: {}'.format(', '.join(sorted(missing_keys))))
|
||||||
', '.join(sorted(missing_keys))))
|
|
||||||
|
|
||||||
|
|
||||||
def assertRegexpMatches(self, text, regexp, msg=None):
|
def assertRegexpMatches(self, text, regexp, msg=None):
|
||||||
|
@ -285,9 +283,9 @@ def assertRegexpMatches(self, text, regexp, msg=None):
|
||||||
else:
|
else:
|
||||||
m = re.match(regexp, text)
|
m = re.match(regexp, text)
|
||||||
if not m:
|
if not m:
|
||||||
note = 'Regexp didn\'t match: %r not found' % (regexp)
|
note = f'Regexp didn\'t match: {regexp!r} not found'
|
||||||
if len(text) < 1000:
|
if len(text) < 1000:
|
||||||
note += ' in %r' % text
|
note += f' in {text!r}'
|
||||||
if msg is None:
|
if msg is None:
|
||||||
msg = note
|
msg = note
|
||||||
else:
|
else:
|
||||||
|
@ -310,7 +308,7 @@ def assertLessEqual(self, got, expected, msg=None):
|
||||||
|
|
||||||
|
|
||||||
def assertEqual(self, got, expected, msg=None):
|
def assertEqual(self, got, expected, msg=None):
|
||||||
if not (got == expected):
|
if got != expected:
|
||||||
if msg is None:
|
if msg is None:
|
||||||
msg = f'{got!r} not equal to {expected!r}'
|
msg = f'{got!r} not equal to {expected!r}'
|
||||||
self.assertTrue(got == expected, msg)
|
self.assertTrue(got == expected, msg)
|
||||||
|
@ -338,3 +336,8 @@ def http_server_port(httpd):
|
||||||
def verify_address_availability(address):
|
def verify_address_availability(address):
|
||||||
if find_available_port(address) is None:
|
if find_available_port(address) is None:
|
||||||
pytest.skip(f'Unable to bind to source address {address} (address may not exist)')
|
pytest.skip(f'Unable to bind to source address {address} (address may not exist)')
|
||||||
|
|
||||||
|
|
||||||
|
def validate_and_send(rh, req):
|
||||||
|
rh.validate(req)
|
||||||
|
return rh.send(req)
|
||||||
|
|
|
@ -262,19 +262,19 @@ def test_search_json_ld_realworld(self):
|
||||||
''',
|
''',
|
||||||
{
|
{
|
||||||
'chapters': [
|
'chapters': [
|
||||||
{"title": "Explosie Turnhout", "start_time": 70, "end_time": 440},
|
{'title': 'Explosie Turnhout', 'start_time': 70, 'end_time': 440},
|
||||||
{"title": "Jaarwisseling", "start_time": 440, "end_time": 1179},
|
{'title': 'Jaarwisseling', 'start_time': 440, 'end_time': 1179},
|
||||||
{"title": "Natuurbranden Colorado", "start_time": 1179, "end_time": 1263},
|
{'title': 'Natuurbranden Colorado', 'start_time': 1179, 'end_time': 1263},
|
||||||
{"title": "Klimaatverandering", "start_time": 1263, "end_time": 1367},
|
{'title': 'Klimaatverandering', 'start_time': 1263, 'end_time': 1367},
|
||||||
{"title": "Zacht weer", "start_time": 1367, "end_time": 1383},
|
{'title': 'Zacht weer', 'start_time': 1367, 'end_time': 1383},
|
||||||
{"title": "Financiële balans", "start_time": 1383, "end_time": 1484},
|
{'title': 'Financiële balans', 'start_time': 1383, 'end_time': 1484},
|
||||||
{"title": "Club Brugge", "start_time": 1484, "end_time": 1575},
|
{'title': 'Club Brugge', 'start_time': 1484, 'end_time': 1575},
|
||||||
{"title": "Mentale gezondheid bij topsporters", "start_time": 1575, "end_time": 1728},
|
{'title': 'Mentale gezondheid bij topsporters', 'start_time': 1575, 'end_time': 1728},
|
||||||
{"title": "Olympische Winterspelen", "start_time": 1728, "end_time": 1873},
|
{'title': 'Olympische Winterspelen', 'start_time': 1728, 'end_time': 1873},
|
||||||
{"title": "Sober oudjaar in Nederland", "start_time": 1873, "end_time": 2079.23}
|
{'title': 'Sober oudjaar in Nederland', 'start_time': 1873, 'end_time': 2079.23},
|
||||||
],
|
],
|
||||||
'title': 'Het journaal - Aflevering 365 (Seizoen 2021)'
|
'title': 'Het journaal - Aflevering 365 (Seizoen 2021)',
|
||||||
}, {}
|
}, {},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
# test multiple thumbnails in a list
|
# test multiple thumbnails in a list
|
||||||
|
@ -301,13 +301,13 @@ def test_search_json_ld_realworld(self):
|
||||||
'thumbnails': [{'url': 'https://www.rainews.it/cropgd/640x360/dl/img/2021/12/30/1640886376927_GettyImages.jpg'}],
|
'thumbnails': [{'url': 'https://www.rainews.it/cropgd/640x360/dl/img/2021/12/30/1640886376927_GettyImages.jpg'}],
|
||||||
},
|
},
|
||||||
{},
|
{},
|
||||||
)
|
),
|
||||||
]
|
]
|
||||||
for html, expected_dict, search_json_ld_kwargs in _TESTS:
|
for html, expected_dict, search_json_ld_kwargs in _TESTS:
|
||||||
expect_dict(
|
expect_dict(
|
||||||
self,
|
self,
|
||||||
self.ie._search_json_ld(html, None, **search_json_ld_kwargs),
|
self.ie._search_json_ld(html, None, **search_json_ld_kwargs),
|
||||||
expected_dict
|
expected_dict,
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_download_json(self):
|
def test_download_json(self):
|
||||||
|
@ -366,7 +366,7 @@ def test_parse_html5_media_entries(self):
|
||||||
'height': 740,
|
'height': 740,
|
||||||
'tbr': 1500,
|
'tbr': 1500,
|
||||||
}],
|
}],
|
||||||
'thumbnail': '//pics.r18.com/digital/amateur/mgmr105/mgmr105jp.jpg'
|
'thumbnail': '//pics.r18.com/digital/amateur/mgmr105/mgmr105jp.jpg',
|
||||||
})
|
})
|
||||||
|
|
||||||
# from https://www.csfd.cz/
|
# from https://www.csfd.cz/
|
||||||
|
@ -419,9 +419,9 @@ def test_parse_html5_media_entries(self):
|
||||||
'height': 1080,
|
'height': 1080,
|
||||||
}],
|
}],
|
||||||
'subtitles': {
|
'subtitles': {
|
||||||
'cs': [{'url': 'https://video.csfd.cz/files/subtitles/163/344/163344115_4c388b.srt'}]
|
'cs': [{'url': 'https://video.csfd.cz/files/subtitles/163/344/163344115_4c388b.srt'}],
|
||||||
},
|
},
|
||||||
'thumbnail': 'https://img.csfd.cz/files/images/film/video/preview/163/344/163344118_748d20.png?h360'
|
'thumbnail': 'https://img.csfd.cz/files/images/film/video/preview/163/344/163344118_748d20.png?h360',
|
||||||
})
|
})
|
||||||
|
|
||||||
# from https://tamasha.com/v/Kkdjw
|
# from https://tamasha.com/v/Kkdjw
|
||||||
|
@ -452,7 +452,7 @@ def test_parse_html5_media_entries(self):
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'format_id': '144p',
|
'format_id': '144p',
|
||||||
'height': 144,
|
'height': 144,
|
||||||
}]
|
}],
|
||||||
})
|
})
|
||||||
|
|
||||||
# from https://www.directvnow.com
|
# from https://www.directvnow.com
|
||||||
|
@ -470,7 +470,7 @@ def test_parse_html5_media_entries(self):
|
||||||
'formats': [{
|
'formats': [{
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4',
|
'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4',
|
||||||
}]
|
}],
|
||||||
})
|
})
|
||||||
|
|
||||||
# from https://www.directvnow.com
|
# from https://www.directvnow.com
|
||||||
|
@ -488,7 +488,7 @@ def test_parse_html5_media_entries(self):
|
||||||
'formats': [{
|
'formats': [{
|
||||||
'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4',
|
'url': 'https://cdn.directv.com/content/dam/dtv/prod/website_directvnow-international/videos/DTVN_hdr_HBO_v3.mp4',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
}]
|
}],
|
||||||
})
|
})
|
||||||
|
|
||||||
# from https://www.klarna.com/uk/
|
# from https://www.klarna.com/uk/
|
||||||
|
@ -547,8 +547,8 @@ def test_extract_jwplayer_data_realworld(self):
|
||||||
'id': 'XEgvuql4',
|
'id': 'XEgvuql4',
|
||||||
'formats': [{
|
'formats': [{
|
||||||
'url': 'rtmp://192.138.214.154/live/sjclive',
|
'url': 'rtmp://192.138.214.154/live/sjclive',
|
||||||
'ext': 'flv'
|
'ext': 'flv',
|
||||||
}]
|
}],
|
||||||
})
|
})
|
||||||
|
|
||||||
# from https://www.pornoxo.com/videos/7564/striptease-from-sexy-secretary/
|
# from https://www.pornoxo.com/videos/7564/striptease-from-sexy-secretary/
|
||||||
|
@ -588,8 +588,8 @@ def test_extract_jwplayer_data_realworld(self):
|
||||||
'thumbnail': 'https://t03.vipstreamservice.com/thumbs/pxo-full/2009-12/14/a4b2157147afe5efa93ce1978e0265289c193874e02597.flv-full-13.jpg',
|
'thumbnail': 'https://t03.vipstreamservice.com/thumbs/pxo-full/2009-12/14/a4b2157147afe5efa93ce1978e0265289c193874e02597.flv-full-13.jpg',
|
||||||
'formats': [{
|
'formats': [{
|
||||||
'url': 'https://cdn.pornoxo.com/key=MF+oEbaxqTKb50P-w9G3nA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/4b2157147afe5efa93ce1978e0265289c193874e02597.flv',
|
'url': 'https://cdn.pornoxo.com/key=MF+oEbaxqTKb50P-w9G3nA,end=1489689259,ip=104.199.146.27/ip=104.199.146.27/speed=6573765/buffer=3.0/2009-12/4b2157147afe5efa93ce1978e0265289c193874e02597.flv',
|
||||||
'ext': 'flv'
|
'ext': 'flv',
|
||||||
}]
|
}],
|
||||||
})
|
})
|
||||||
|
|
||||||
# from http://www.indiedb.com/games/king-machine/videos
|
# from http://www.indiedb.com/games/king-machine/videos
|
||||||
|
@ -610,12 +610,12 @@ def test_extract_jwplayer_data_realworld(self):
|
||||||
'formats': [{
|
'formats': [{
|
||||||
'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode_mp4/king-machine-trailer.mp4',
|
'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode_mp4/king-machine-trailer.mp4',
|
||||||
'height': 360,
|
'height': 360,
|
||||||
'ext': 'mp4'
|
'ext': 'mp4',
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode720p_mp4/king-machine-trailer.mp4',
|
'url': 'http://cdn.dbolical.com/cache/videos/games/1/50/49678/encode720p_mp4/king-machine-trailer.mp4',
|
||||||
'height': 720,
|
'height': 720,
|
||||||
'ext': 'mp4'
|
'ext': 'mp4',
|
||||||
}]
|
}],
|
||||||
})
|
})
|
||||||
|
|
||||||
def test_parse_m3u8_formats(self):
|
def test_parse_m3u8_formats(self):
|
||||||
|
@ -866,7 +866,7 @@ def test_parse_m3u8_formats(self):
|
||||||
'height': 1080,
|
'height': 1080,
|
||||||
'vcodec': 'avc1.64002a',
|
'vcodec': 'avc1.64002a',
|
||||||
}],
|
}],
|
||||||
{}
|
{},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
'bipbop_16x9',
|
'bipbop_16x9',
|
||||||
|
@ -990,45 +990,45 @@ def test_parse_m3u8_formats(self):
|
||||||
'en': [{
|
'en': [{
|
||||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/eng/prog_index.m3u8',
|
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/eng/prog_index.m3u8',
|
||||||
'ext': 'vtt',
|
'ext': 'vtt',
|
||||||
'protocol': 'm3u8_native'
|
'protocol': 'm3u8_native',
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/eng_forced/prog_index.m3u8',
|
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/eng_forced/prog_index.m3u8',
|
||||||
'ext': 'vtt',
|
'ext': 'vtt',
|
||||||
'protocol': 'm3u8_native'
|
'protocol': 'm3u8_native',
|
||||||
}],
|
}],
|
||||||
'fr': [{
|
'fr': [{
|
||||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/fra/prog_index.m3u8',
|
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/fra/prog_index.m3u8',
|
||||||
'ext': 'vtt',
|
'ext': 'vtt',
|
||||||
'protocol': 'm3u8_native'
|
'protocol': 'm3u8_native',
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/fra_forced/prog_index.m3u8',
|
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/fra_forced/prog_index.m3u8',
|
||||||
'ext': 'vtt',
|
'ext': 'vtt',
|
||||||
'protocol': 'm3u8_native'
|
'protocol': 'm3u8_native',
|
||||||
}],
|
}],
|
||||||
'es': [{
|
'es': [{
|
||||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/spa/prog_index.m3u8',
|
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/spa/prog_index.m3u8',
|
||||||
'ext': 'vtt',
|
'ext': 'vtt',
|
||||||
'protocol': 'm3u8_native'
|
'protocol': 'm3u8_native',
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/spa_forced/prog_index.m3u8',
|
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/spa_forced/prog_index.m3u8',
|
||||||
'ext': 'vtt',
|
'ext': 'vtt',
|
||||||
'protocol': 'm3u8_native'
|
'protocol': 'm3u8_native',
|
||||||
}],
|
}],
|
||||||
'ja': [{
|
'ja': [{
|
||||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/jpn/prog_index.m3u8',
|
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/jpn/prog_index.m3u8',
|
||||||
'ext': 'vtt',
|
'ext': 'vtt',
|
||||||
'protocol': 'm3u8_native'
|
'protocol': 'm3u8_native',
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/jpn_forced/prog_index.m3u8',
|
'url': 'https://devstreaming-cdn.apple.com/videos/streaming/examples/bipbop_16x9/subtitles/jpn_forced/prog_index.m3u8',
|
||||||
'ext': 'vtt',
|
'ext': 'vtt',
|
||||||
'protocol': 'm3u8_native'
|
'protocol': 'm3u8_native',
|
||||||
}],
|
}],
|
||||||
}
|
},
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
for m3u8_file, m3u8_url, expected_formats, expected_subs in _TEST_CASES:
|
for m3u8_file, m3u8_url, expected_formats, expected_subs in _TEST_CASES:
|
||||||
with open('./test/testdata/m3u8/%s.m3u8' % m3u8_file, encoding='utf-8') as f:
|
with open(f'./test/testdata/m3u8/{m3u8_file}.m3u8', encoding='utf-8') as f:
|
||||||
formats, subs = self.ie._parse_m3u8_formats_and_subtitles(
|
formats, subs = self.ie._parse_m3u8_formats_and_subtitles(
|
||||||
f.read(), m3u8_url, ext='mp4')
|
f.read(), m3u8_url, ext='mp4')
|
||||||
self.ie._sort_formats(formats)
|
self.ie._sort_formats(formats)
|
||||||
|
@ -1366,14 +1366,14 @@ def test_parse_mpd_formats(self):
|
||||||
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
|
||||||
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
|
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
|
||||||
'protocol': 'http_dash_segments',
|
'protocol': 'http_dash_segments',
|
||||||
}
|
},
|
||||||
]
|
],
|
||||||
},
|
},
|
||||||
)
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
for mpd_file, mpd_url, mpd_base_url, expected_formats, expected_subtitles in _TEST_CASES:
|
for mpd_file, mpd_url, mpd_base_url, expected_formats, expected_subtitles in _TEST_CASES:
|
||||||
with open('./test/testdata/mpd/%s.mpd' % mpd_file, encoding='utf-8') as f:
|
with open(f'./test/testdata/mpd/{mpd_file}.mpd', encoding='utf-8') as f:
|
||||||
formats, subtitles = self.ie._parse_mpd_formats_and_subtitles(
|
formats, subtitles = self.ie._parse_mpd_formats_and_subtitles(
|
||||||
compat_etree_fromstring(f.read().encode()),
|
compat_etree_fromstring(f.read().encode()),
|
||||||
mpd_base_url=mpd_base_url, mpd_url=mpd_url)
|
mpd_base_url=mpd_base_url, mpd_url=mpd_url)
|
||||||
|
@ -1408,7 +1408,7 @@ def test_parse_ism_formats(self):
|
||||||
'sampling_rate': 48000,
|
'sampling_rate': 48000,
|
||||||
'channels': 2,
|
'channels': 2,
|
||||||
'bits_per_sample': 16,
|
'bits_per_sample': 16,
|
||||||
'nal_unit_length_field': 4
|
'nal_unit_length_field': 4,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'format_id': 'video-100',
|
'format_id': 'video-100',
|
||||||
|
@ -1431,7 +1431,7 @@ def test_parse_ism_formats(self):
|
||||||
'codec_private_data': '00000001674D401FDA0544EFFC2D002CBC40000003004000000C03C60CA80000000168EF32C8',
|
'codec_private_data': '00000001674D401FDA0544EFFC2D002CBC40000003004000000C03C60CA80000000168EF32C8',
|
||||||
'channels': 2,
|
'channels': 2,
|
||||||
'bits_per_sample': 16,
|
'bits_per_sample': 16,
|
||||||
'nal_unit_length_field': 4
|
'nal_unit_length_field': 4,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'format_id': 'video-326',
|
'format_id': 'video-326',
|
||||||
|
@ -1454,7 +1454,7 @@ def test_parse_ism_formats(self):
|
||||||
'codec_private_data': '00000001674D401FDA0241FE23FFC3BC83BA44000003000400000300C03C60CA800000000168EF32C8',
|
'codec_private_data': '00000001674D401FDA0241FE23FFC3BC83BA44000003000400000300C03C60CA800000000168EF32C8',
|
||||||
'channels': 2,
|
'channels': 2,
|
||||||
'bits_per_sample': 16,
|
'bits_per_sample': 16,
|
||||||
'nal_unit_length_field': 4
|
'nal_unit_length_field': 4,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'format_id': 'video-698',
|
'format_id': 'video-698',
|
||||||
|
@ -1477,7 +1477,7 @@ def test_parse_ism_formats(self):
|
||||||
'codec_private_data': '00000001674D401FDA0350BFB97FF06AF06AD1000003000100000300300F1832A00000000168EF32C8',
|
'codec_private_data': '00000001674D401FDA0350BFB97FF06AF06AD1000003000100000300300F1832A00000000168EF32C8',
|
||||||
'channels': 2,
|
'channels': 2,
|
||||||
'bits_per_sample': 16,
|
'bits_per_sample': 16,
|
||||||
'nal_unit_length_field': 4
|
'nal_unit_length_field': 4,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'format_id': 'video-1493',
|
'format_id': 'video-1493',
|
||||||
|
@ -1500,7 +1500,7 @@ def test_parse_ism_formats(self):
|
||||||
'codec_private_data': '00000001674D401FDA011C3DE6FFF0D890D871000003000100000300300F1832A00000000168EF32C8',
|
'codec_private_data': '00000001674D401FDA011C3DE6FFF0D890D871000003000100000300300F1832A00000000168EF32C8',
|
||||||
'channels': 2,
|
'channels': 2,
|
||||||
'bits_per_sample': 16,
|
'bits_per_sample': 16,
|
||||||
'nal_unit_length_field': 4
|
'nal_unit_length_field': 4,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'format_id': 'video-4482',
|
'format_id': 'video-4482',
|
||||||
|
@ -1523,7 +1523,7 @@ def test_parse_ism_formats(self):
|
||||||
'codec_private_data': '00000001674D401FDA01A816F97FFC1ABC1AB440000003004000000C03C60CA80000000168EF32C8',
|
'codec_private_data': '00000001674D401FDA01A816F97FFC1ABC1AB440000003004000000C03C60CA80000000168EF32C8',
|
||||||
'channels': 2,
|
'channels': 2,
|
||||||
'bits_per_sample': 16,
|
'bits_per_sample': 16,
|
||||||
'nal_unit_length_field': 4
|
'nal_unit_length_field': 4,
|
||||||
},
|
},
|
||||||
}],
|
}],
|
||||||
{
|
{
|
||||||
|
@ -1538,10 +1538,10 @@ def test_parse_ism_formats(self):
|
||||||
'duration': 8880746666,
|
'duration': 8880746666,
|
||||||
'timescale': 10000000,
|
'timescale': 10000000,
|
||||||
'fourcc': 'TTML',
|
'fourcc': 'TTML',
|
||||||
'codec_private_data': ''
|
'codec_private_data': '',
|
||||||
}
|
},
|
||||||
}
|
},
|
||||||
]
|
],
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
@ -1571,7 +1571,7 @@ def test_parse_ism_formats(self):
|
||||||
'sampling_rate': 48000,
|
'sampling_rate': 48000,
|
||||||
'channels': 2,
|
'channels': 2,
|
||||||
'bits_per_sample': 16,
|
'bits_per_sample': 16,
|
||||||
'nal_unit_length_field': 4
|
'nal_unit_length_field': 4,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'format_id': 'audio_deu_1-224',
|
'format_id': 'audio_deu_1-224',
|
||||||
|
@ -1597,7 +1597,7 @@ def test_parse_ism_formats(self):
|
||||||
'sampling_rate': 48000,
|
'sampling_rate': 48000,
|
||||||
'channels': 6,
|
'channels': 6,
|
||||||
'bits_per_sample': 16,
|
'bits_per_sample': 16,
|
||||||
'nal_unit_length_field': 4
|
'nal_unit_length_field': 4,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'format_id': 'video_deu-23',
|
'format_id': 'video_deu-23',
|
||||||
|
@ -1622,7 +1622,7 @@ def test_parse_ism_formats(self):
|
||||||
'codec_private_data': '000000016742C00CDB06077E5C05A808080A00000300020000030009C0C02EE0177CC6300F142AE00000000168CA8DC8',
|
'codec_private_data': '000000016742C00CDB06077E5C05A808080A00000300020000030009C0C02EE0177CC6300F142AE00000000168CA8DC8',
|
||||||
'channels': 2,
|
'channels': 2,
|
||||||
'bits_per_sample': 16,
|
'bits_per_sample': 16,
|
||||||
'nal_unit_length_field': 4
|
'nal_unit_length_field': 4,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'format_id': 'video_deu-403',
|
'format_id': 'video_deu-403',
|
||||||
|
@ -1647,7 +1647,7 @@ def test_parse_ism_formats(self):
|
||||||
'codec_private_data': '00000001674D4014E98323B602D4040405000003000100000300320F1429380000000168EAECF2',
|
'codec_private_data': '00000001674D4014E98323B602D4040405000003000100000300320F1429380000000168EAECF2',
|
||||||
'channels': 2,
|
'channels': 2,
|
||||||
'bits_per_sample': 16,
|
'bits_per_sample': 16,
|
||||||
'nal_unit_length_field': 4
|
'nal_unit_length_field': 4,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'format_id': 'video_deu-680',
|
'format_id': 'video_deu-680',
|
||||||
|
@ -1672,7 +1672,7 @@ def test_parse_ism_formats(self):
|
||||||
'codec_private_data': '00000001674D401EE981405FF2E02D4040405000000300100000030320F162D3800000000168EAECF2',
|
'codec_private_data': '00000001674D401EE981405FF2E02D4040405000000300100000030320F162D3800000000168EAECF2',
|
||||||
'channels': 2,
|
'channels': 2,
|
||||||
'bits_per_sample': 16,
|
'bits_per_sample': 16,
|
||||||
'nal_unit_length_field': 4
|
'nal_unit_length_field': 4,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'format_id': 'video_deu-1253',
|
'format_id': 'video_deu-1253',
|
||||||
|
@ -1698,7 +1698,7 @@ def test_parse_ism_formats(self):
|
||||||
'codec_private_data': '00000001674D401EE981405FF2E02D4040405000000300100000030320F162D3800000000168EAECF2',
|
'codec_private_data': '00000001674D401EE981405FF2E02D4040405000000300100000030320F162D3800000000168EAECF2',
|
||||||
'channels': 2,
|
'channels': 2,
|
||||||
'bits_per_sample': 16,
|
'bits_per_sample': 16,
|
||||||
'nal_unit_length_field': 4
|
'nal_unit_length_field': 4,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'format_id': 'video_deu-2121',
|
'format_id': 'video_deu-2121',
|
||||||
|
@ -1723,7 +1723,7 @@ def test_parse_ism_formats(self):
|
||||||
'codec_private_data': '00000001674D401EECA0601BD80B50101014000003000400000300C83C58B6580000000168E93B3C80',
|
'codec_private_data': '00000001674D401EECA0601BD80B50101014000003000400000300C83C58B6580000000168E93B3C80',
|
||||||
'channels': 2,
|
'channels': 2,
|
||||||
'bits_per_sample': 16,
|
'bits_per_sample': 16,
|
||||||
'nal_unit_length_field': 4
|
'nal_unit_length_field': 4,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'format_id': 'video_deu-3275',
|
'format_id': 'video_deu-3275',
|
||||||
|
@ -1748,7 +1748,7 @@ def test_parse_ism_formats(self):
|
||||||
'codec_private_data': '00000001674D4020ECA02802DD80B501010140000003004000000C83C60C65800000000168E93B3C80',
|
'codec_private_data': '00000001674D4020ECA02802DD80B501010140000003004000000C83C60C65800000000168E93B3C80',
|
||||||
'channels': 2,
|
'channels': 2,
|
||||||
'bits_per_sample': 16,
|
'bits_per_sample': 16,
|
||||||
'nal_unit_length_field': 4
|
'nal_unit_length_field': 4,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'format_id': 'video_deu-5300',
|
'format_id': 'video_deu-5300',
|
||||||
|
@ -1773,7 +1773,7 @@ def test_parse_ism_formats(self):
|
||||||
'codec_private_data': '00000001674D4028ECA03C0113F2E02D4040405000000300100000030320F18319600000000168E93B3C80',
|
'codec_private_data': '00000001674D4028ECA03C0113F2E02D4040405000000300100000030320F18319600000000168E93B3C80',
|
||||||
'channels': 2,
|
'channels': 2,
|
||||||
'bits_per_sample': 16,
|
'bits_per_sample': 16,
|
||||||
'nal_unit_length_field': 4
|
'nal_unit_length_field': 4,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'format_id': 'video_deu-8079',
|
'format_id': 'video_deu-8079',
|
||||||
|
@ -1798,7 +1798,7 @@ def test_parse_ism_formats(self):
|
||||||
'codec_private_data': '00000001674D4028ECA03C0113F2E02D4040405000000300100000030320F18319600000000168E93B3C80',
|
'codec_private_data': '00000001674D4028ECA03C0113F2E02D4040405000000300100000030320F18319600000000168E93B3C80',
|
||||||
'channels': 2,
|
'channels': 2,
|
||||||
'bits_per_sample': 16,
|
'bits_per_sample': 16,
|
||||||
'nal_unit_length_field': 4
|
'nal_unit_length_field': 4,
|
||||||
},
|
},
|
||||||
}],
|
}],
|
||||||
{},
|
{},
|
||||||
|
@ -1806,7 +1806,7 @@ def test_parse_ism_formats(self):
|
||||||
]
|
]
|
||||||
|
|
||||||
for ism_file, ism_url, expected_formats, expected_subtitles in _TEST_CASES:
|
for ism_file, ism_url, expected_formats, expected_subtitles in _TEST_CASES:
|
||||||
with open('./test/testdata/ism/%s.Manifest' % ism_file, encoding='utf-8') as f:
|
with open(f'./test/testdata/ism/{ism_file}.Manifest', encoding='utf-8') as f:
|
||||||
formats, subtitles = self.ie._parse_ism_formats_and_subtitles(
|
formats, subtitles = self.ie._parse_ism_formats_and_subtitles(
|
||||||
compat_etree_fromstring(f.read().encode()), ism_url=ism_url)
|
compat_etree_fromstring(f.read().encode()), ism_url=ism_url)
|
||||||
self.ie._sort_formats(formats)
|
self.ie._sort_formats(formats)
|
||||||
|
@ -1827,12 +1827,12 @@ def test_parse_f4m_formats(self):
|
||||||
'tbr': 2148,
|
'tbr': 2148,
|
||||||
'width': 1280,
|
'width': 1280,
|
||||||
'height': 720,
|
'height': 720,
|
||||||
}]
|
}],
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
for f4m_file, f4m_url, expected_formats in _TEST_CASES:
|
for f4m_file, f4m_url, expected_formats in _TEST_CASES:
|
||||||
with open('./test/testdata/f4m/%s.f4m' % f4m_file, encoding='utf-8') as f:
|
with open(f'./test/testdata/f4m/{f4m_file}.f4m', encoding='utf-8') as f:
|
||||||
formats = self.ie._parse_f4m_formats(
|
formats = self.ie._parse_f4m_formats(
|
||||||
compat_etree_fromstring(f.read().encode()),
|
compat_etree_fromstring(f.read().encode()),
|
||||||
f4m_url, None)
|
f4m_url, None)
|
||||||
|
@ -1873,13 +1873,13 @@ def test_parse_xspf(self):
|
||||||
}, {
|
}, {
|
||||||
'manifest_url': 'https://example.org/src/foo_xspf.xspf',
|
'manifest_url': 'https://example.org/src/foo_xspf.xspf',
|
||||||
'url': 'https://example.com/track3.mp3',
|
'url': 'https://example.com/track3.mp3',
|
||||||
}]
|
}],
|
||||||
}]
|
}],
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
for xspf_file, xspf_url, expected_entries in _TEST_CASES:
|
for xspf_file, xspf_url, expected_entries in _TEST_CASES:
|
||||||
with open('./test/testdata/xspf/%s.xspf' % xspf_file, encoding='utf-8') as f:
|
with open(f'./test/testdata/xspf/{xspf_file}.xspf', encoding='utf-8') as f:
|
||||||
entries = self.ie._parse_xspf(
|
entries = self.ie._parse_xspf(
|
||||||
compat_etree_fromstring(f.read().encode()),
|
compat_etree_fromstring(f.read().encode()),
|
||||||
xspf_file, xspf_url=xspf_url, xspf_base_url=xspf_url)
|
xspf_file, xspf_url=xspf_url, xspf_base_url=xspf_url)
|
||||||
|
@ -1902,7 +1902,7 @@ def test_response_with_expected_status_returns_content(self):
|
||||||
server_thread.start()
|
server_thread.start()
|
||||||
|
|
||||||
(content, urlh) = self.ie._download_webpage_handle(
|
(content, urlh) = self.ie._download_webpage_handle(
|
||||||
'http://127.0.0.1:%d/teapot' % port, None,
|
f'http://127.0.0.1:{port}/teapot', None,
|
||||||
expected_status=TEAPOT_RESPONSE_STATUS)
|
expected_status=TEAPOT_RESPONSE_STATUS)
|
||||||
self.assertEqual(content, TEAPOT_RESPONSE_BODY)
|
self.assertEqual(content, TEAPOT_RESPONSE_BODY)
|
||||||
|
|
||||||
|
@ -1912,7 +1912,7 @@ def test_search_nextjs_data(self):
|
||||||
self.assertEqual(self.ie._search_nextjs_data('', None, fatal=False), {})
|
self.assertEqual(self.ie._search_nextjs_data('', None, fatal=False), {})
|
||||||
self.assertEqual(self.ie._search_nextjs_data('', None, default=None), None)
|
self.assertEqual(self.ie._search_nextjs_data('', None, default=None), None)
|
||||||
self.assertEqual(self.ie._search_nextjs_data('', None, default={}), {})
|
self.assertEqual(self.ie._search_nextjs_data('', None, default={}), {})
|
||||||
with self.assertRaises(DeprecationWarning):
|
with self.assertWarns(DeprecationWarning):
|
||||||
self.assertEqual(self.ie._search_nextjs_data('', None, default='{}'), {})
|
self.assertEqual(self.ie._search_nextjs_data('', None, default='{}'), {})
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
|
import contextlib
|
||||||
import copy
|
import copy
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
@ -129,8 +130,8 @@ def test(inp, *expected, multi=False):
|
||||||
'allow_multiple_audio_streams': multi,
|
'allow_multiple_audio_streams': multi,
|
||||||
})
|
})
|
||||||
ydl.process_ie_result(info_dict.copy())
|
ydl.process_ie_result(info_dict.copy())
|
||||||
downloaded = map(lambda x: x['format_id'], ydl.downloaded_info_dicts)
|
downloaded = [x['format_id'] for x in ydl.downloaded_info_dicts]
|
||||||
self.assertEqual(list(downloaded), list(expected))
|
self.assertEqual(downloaded, list(expected))
|
||||||
|
|
||||||
test('20/47', '47')
|
test('20/47', '47')
|
||||||
test('20/71/worst', '35')
|
test('20/71/worst', '35')
|
||||||
|
@ -515,10 +516,8 @@ def test_format_filtering(self):
|
||||||
self.assertEqual(downloaded_ids, ['D', 'C', 'B'])
|
self.assertEqual(downloaded_ids, ['D', 'C', 'B'])
|
||||||
|
|
||||||
ydl = YDL({'format': 'best[height<40]'})
|
ydl = YDL({'format': 'best[height<40]'})
|
||||||
try:
|
with contextlib.suppress(ExtractorError):
|
||||||
ydl.process_ie_result(info_dict)
|
ydl.process_ie_result(info_dict)
|
||||||
except ExtractorError:
|
|
||||||
pass
|
|
||||||
self.assertEqual(ydl.downloaded_info_dicts, [])
|
self.assertEqual(ydl.downloaded_info_dicts, [])
|
||||||
|
|
||||||
def test_default_format_spec(self):
|
def test_default_format_spec(self):
|
||||||
|
@ -652,8 +651,8 @@ def test_add_extra_info(self):
|
||||||
'formats': [
|
'formats': [
|
||||||
{'id': 'id 1', 'height': 1080, 'width': 1920},
|
{'id': 'id 1', 'height': 1080, 'width': 1920},
|
||||||
{'id': 'id 2', 'height': 720},
|
{'id': 'id 2', 'height': 720},
|
||||||
{'id': 'id 3'}
|
{'id': 'id 3'},
|
||||||
]
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
def test_prepare_outtmpl_and_filename(self):
|
def test_prepare_outtmpl_and_filename(self):
|
||||||
|
@ -773,7 +772,7 @@ def expect_same_infodict(out):
|
||||||
test('%(formats)j', (json.dumps(FORMATS), None))
|
test('%(formats)j', (json.dumps(FORMATS), None))
|
||||||
test('%(formats)#j', (
|
test('%(formats)#j', (
|
||||||
json.dumps(FORMATS, indent=4),
|
json.dumps(FORMATS, indent=4),
|
||||||
json.dumps(FORMATS, indent=4).replace(':', ':').replace('"', """).replace('\n', ' ')
|
json.dumps(FORMATS, indent=4).replace(':', ':').replace('"', '"').replace('\n', ' '),
|
||||||
))
|
))
|
||||||
test('%(title5).3B', 'á')
|
test('%(title5).3B', 'á')
|
||||||
test('%(title5)U', 'áéí 𝐀')
|
test('%(title5)U', 'áéí 𝐀')
|
||||||
|
@ -843,8 +842,8 @@ def gen():
|
||||||
|
|
||||||
# Empty filename
|
# Empty filename
|
||||||
test('%(foo|)s-%(bar|)s.%(ext)s', '-.mp4')
|
test('%(foo|)s-%(bar|)s.%(ext)s', '-.mp4')
|
||||||
# test('%(foo|)s.%(ext)s', ('.mp4', '_.mp4')) # fixme
|
# test('%(foo|)s.%(ext)s', ('.mp4', '_.mp4')) # FIXME: ?
|
||||||
# test('%(foo|)s', ('', '_')) # fixme
|
# test('%(foo|)s', ('', '_')) # FIXME: ?
|
||||||
|
|
||||||
# Environment variable expansion for prepare_filename
|
# Environment variable expansion for prepare_filename
|
||||||
os.environ['__yt_dlp_var'] = 'expanded'
|
os.environ['__yt_dlp_var'] = 'expanded'
|
||||||
|
@ -861,7 +860,7 @@ def gen():
|
||||||
test('Hello %(title1)s', 'Hello $PATH')
|
test('Hello %(title1)s', 'Hello $PATH')
|
||||||
test('Hello %(title2)s', 'Hello %PATH%')
|
test('Hello %(title2)s', 'Hello %PATH%')
|
||||||
test('%(title3)s', ('foo/bar\\test', 'foo⧸bar⧹test'))
|
test('%(title3)s', ('foo/bar\\test', 'foo⧸bar⧹test'))
|
||||||
test('folder/%(title3)s', ('folder/foo/bar\\test', 'folder%sfoo⧸bar⧹test' % os.path.sep))
|
test('folder/%(title3)s', ('folder/foo/bar\\test', f'folder{os.path.sep}foo⧸bar⧹test'))
|
||||||
|
|
||||||
def test_format_note(self):
|
def test_format_note(self):
|
||||||
ydl = YoutubeDL()
|
ydl = YoutubeDL()
|
||||||
|
@ -883,22 +882,22 @@ def run(self, info):
|
||||||
f.write('EXAMPLE')
|
f.write('EXAMPLE')
|
||||||
return [info['filepath']], info
|
return [info['filepath']], info
|
||||||
|
|
||||||
def run_pp(params, PP):
|
def run_pp(params, pp):
|
||||||
with open(filename, 'w') as f:
|
with open(filename, 'w') as f:
|
||||||
f.write('EXAMPLE')
|
f.write('EXAMPLE')
|
||||||
ydl = YoutubeDL(params)
|
ydl = YoutubeDL(params)
|
||||||
ydl.add_post_processor(PP())
|
ydl.add_post_processor(pp())
|
||||||
ydl.post_process(filename, {'filepath': filename})
|
ydl.post_process(filename, {'filepath': filename})
|
||||||
|
|
||||||
run_pp({'keepvideo': True}, SimplePP)
|
run_pp({'keepvideo': True}, SimplePP)
|
||||||
self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename)
|
self.assertTrue(os.path.exists(filename), f'{filename} doesn\'t exist')
|
||||||
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile)
|
self.assertTrue(os.path.exists(audiofile), f'{audiofile} doesn\'t exist')
|
||||||
os.unlink(filename)
|
os.unlink(filename)
|
||||||
os.unlink(audiofile)
|
os.unlink(audiofile)
|
||||||
|
|
||||||
run_pp({'keepvideo': False}, SimplePP)
|
run_pp({'keepvideo': False}, SimplePP)
|
||||||
self.assertFalse(os.path.exists(filename), '%s exists' % filename)
|
self.assertFalse(os.path.exists(filename), f'{filename} exists')
|
||||||
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile)
|
self.assertTrue(os.path.exists(audiofile), f'{audiofile} doesn\'t exist')
|
||||||
os.unlink(audiofile)
|
os.unlink(audiofile)
|
||||||
|
|
||||||
class ModifierPP(PostProcessor):
|
class ModifierPP(PostProcessor):
|
||||||
|
@ -908,7 +907,7 @@ def run(self, info):
|
||||||
return [], info
|
return [], info
|
||||||
|
|
||||||
run_pp({'keepvideo': False}, ModifierPP)
|
run_pp({'keepvideo': False}, ModifierPP)
|
||||||
self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename)
|
self.assertTrue(os.path.exists(filename), f'{filename} doesn\'t exist')
|
||||||
os.unlink(filename)
|
os.unlink(filename)
|
||||||
|
|
||||||
def test_match_filter(self):
|
def test_match_filter(self):
|
||||||
|
@ -920,7 +919,7 @@ def test_match_filter(self):
|
||||||
'duration': 30,
|
'duration': 30,
|
||||||
'filesize': 10 * 1024,
|
'filesize': 10 * 1024,
|
||||||
'playlist_id': '42',
|
'playlist_id': '42',
|
||||||
'uploader': "變態妍字幕版 太妍 тест",
|
'uploader': '變態妍字幕版 太妍 тест',
|
||||||
'creator': "тест ' 123 ' тест--",
|
'creator': "тест ' 123 ' тест--",
|
||||||
'webpage_url': 'http://example.com/watch?v=shenanigans',
|
'webpage_url': 'http://example.com/watch?v=shenanigans',
|
||||||
}
|
}
|
||||||
|
@ -933,7 +932,7 @@ def test_match_filter(self):
|
||||||
'description': 'foo',
|
'description': 'foo',
|
||||||
'filesize': 5 * 1024,
|
'filesize': 5 * 1024,
|
||||||
'playlist_id': '43',
|
'playlist_id': '43',
|
||||||
'uploader': "тест 123",
|
'uploader': 'тест 123',
|
||||||
'webpage_url': 'http://example.com/watch?v=SHENANIGANS',
|
'webpage_url': 'http://example.com/watch?v=SHENANIGANS',
|
||||||
}
|
}
|
||||||
videos = [first, second]
|
videos = [first, second]
|
||||||
|
@ -1180,7 +1179,7 @@ def _real_extract(self, url):
|
||||||
})
|
})
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': 'Video %s' % video_id,
|
'title': f'Video {video_id}',
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1194,8 +1193,8 @@ def _entries(self):
|
||||||
'_type': 'url_transparent',
|
'_type': 'url_transparent',
|
||||||
'ie_key': VideoIE.ie_key(),
|
'ie_key': VideoIE.ie_key(),
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'url': 'video:%s' % video_id,
|
'url': f'video:{video_id}',
|
||||||
'title': 'Video Transparent %s' % video_id,
|
'title': f'Video Transparent {video_id}',
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|
|
@ -87,7 +87,7 @@ def test_decrypt_text(self):
|
||||||
password = intlist_to_bytes(self.key).decode()
|
password = intlist_to_bytes(self.key).decode()
|
||||||
encrypted = base64.b64encode(
|
encrypted = base64.b64encode(
|
||||||
intlist_to_bytes(self.iv[:8])
|
intlist_to_bytes(self.iv[:8])
|
||||||
+ b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae'
|
+ b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae',
|
||||||
).decode()
|
).decode()
|
||||||
decrypted = (aes_decrypt_text(encrypted, password, 16))
|
decrypted = (aes_decrypt_text(encrypted, password, 16))
|
||||||
self.assertEqual(decrypted, self.secret_msg)
|
self.assertEqual(decrypted, self.secret_msg)
|
||||||
|
@ -95,7 +95,7 @@ def test_decrypt_text(self):
|
||||||
password = intlist_to_bytes(self.key).decode()
|
password = intlist_to_bytes(self.key).decode()
|
||||||
encrypted = base64.b64encode(
|
encrypted = base64.b64encode(
|
||||||
intlist_to_bytes(self.iv[:8])
|
intlist_to_bytes(self.iv[:8])
|
||||||
+ b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83'
|
+ b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83',
|
||||||
).decode()
|
).decode()
|
||||||
decrypted = (aes_decrypt_text(encrypted, password, 32))
|
decrypted = (aes_decrypt_text(encrypted, password, 32))
|
||||||
self.assertEqual(decrypted, self.secret_msg)
|
self.assertEqual(decrypted, self.secret_msg)
|
||||||
|
@ -132,16 +132,16 @@ def test_pad_block(self):
|
||||||
block = [0x21, 0xA0, 0x43, 0xFF]
|
block = [0x21, 0xA0, 0x43, 0xFF]
|
||||||
|
|
||||||
self.assertEqual(pad_block(block, 'pkcs7'),
|
self.assertEqual(pad_block(block, 'pkcs7'),
|
||||||
block + [0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C])
|
[*block, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C, 0x0C])
|
||||||
|
|
||||||
self.assertEqual(pad_block(block, 'iso7816'),
|
self.assertEqual(pad_block(block, 'iso7816'),
|
||||||
block + [0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
|
[*block, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
|
||||||
|
|
||||||
self.assertEqual(pad_block(block, 'whitespace'),
|
self.assertEqual(pad_block(block, 'whitespace'),
|
||||||
block + [0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20])
|
[*block, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20])
|
||||||
|
|
||||||
self.assertEqual(pad_block(block, 'zero'),
|
self.assertEqual(pad_block(block, 'zero'),
|
||||||
block + [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
|
[*block, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])
|
||||||
|
|
||||||
block = list(range(16))
|
block = list(range(16))
|
||||||
for mode in ('pkcs7', 'iso7816', 'whitespace', 'zero'):
|
for mode in ('pkcs7', 'iso7816', 'whitespace', 'zero'):
|
||||||
|
|
|
@ -15,8 +15,8 @@
|
||||||
from yt_dlp.compat import (
|
from yt_dlp.compat import (
|
||||||
compat_etree_fromstring,
|
compat_etree_fromstring,
|
||||||
compat_expanduser,
|
compat_expanduser,
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote, # noqa: TID251
|
||||||
compat_urllib_parse_urlencode,
|
compat_urllib_parse_urlencode, # noqa: TID251
|
||||||
)
|
)
|
||||||
from yt_dlp.compat.urllib.request import getproxies
|
from yt_dlp.compat.urllib.request import getproxies
|
||||||
|
|
||||||
|
@ -24,15 +24,15 @@
|
||||||
class TestCompat(unittest.TestCase):
|
class TestCompat(unittest.TestCase):
|
||||||
def test_compat_passthrough(self):
|
def test_compat_passthrough(self):
|
||||||
with self.assertWarns(DeprecationWarning):
|
with self.assertWarns(DeprecationWarning):
|
||||||
compat.compat_basestring
|
_ = compat.compat_basestring
|
||||||
|
|
||||||
with self.assertWarns(DeprecationWarning):
|
with self.assertWarns(DeprecationWarning):
|
||||||
compat.WINDOWS_VT_MODE
|
_ = compat.WINDOWS_VT_MODE
|
||||||
|
|
||||||
self.assertEqual(urllib.request.getproxies, getproxies)
|
self.assertEqual(urllib.request.getproxies, getproxies)
|
||||||
|
|
||||||
with self.assertWarns(DeprecationWarning):
|
with self.assertWarns(DeprecationWarning):
|
||||||
compat.compat_pycrypto_AES # Must not raise error
|
_ = compat.compat_pycrypto_AES # Must not raise error
|
||||||
|
|
||||||
def test_compat_expanduser(self):
|
def test_compat_expanduser(self):
|
||||||
old_home = os.environ.get('HOME')
|
old_home = os.environ.get('HOME')
|
||||||
|
|
|
@ -71,7 +71,7 @@ def _generate_expected_groups():
|
||||||
Path('/etc/yt-dlp.conf'),
|
Path('/etc/yt-dlp.conf'),
|
||||||
Path('/etc/yt-dlp/config'),
|
Path('/etc/yt-dlp/config'),
|
||||||
Path('/etc/yt-dlp/config.txt'),
|
Path('/etc/yt-dlp/config.txt'),
|
||||||
]
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -106,7 +106,7 @@ def test_chrome_cookie_decryptor_linux_v11(self):
|
||||||
|
|
||||||
def test_chrome_cookie_decryptor_windows_v10(self):
|
def test_chrome_cookie_decryptor_windows_v10(self):
|
||||||
with MonkeyPatch(cookies, {
|
with MonkeyPatch(cookies, {
|
||||||
'_get_windows_v10_key': lambda *args, **kwargs: b'Y\xef\xad\xad\xeerp\xf0Y\xe6\x9b\x12\xc2<z\x16]\n\xbb\xb8\xcb\xd7\x9bA\xc3\x14e\x99{\xd6\xf4&'
|
'_get_windows_v10_key': lambda *args, **kwargs: b'Y\xef\xad\xad\xeerp\xf0Y\xe6\x9b\x12\xc2<z\x16]\n\xbb\xb8\xcb\xd7\x9bA\xc3\x14e\x99{\xd6\xf4&',
|
||||||
}):
|
}):
|
||||||
encrypted_value = b'v10T\xb8\xf3\xb8\x01\xa7TtcV\xfc\x88\xb8\xb8\xef\x05\xb5\xfd\x18\xc90\x009\xab\xb1\x893\x85)\x87\xe1\xa9-\xa3\xad='
|
encrypted_value = b'v10T\xb8\xf3\xb8\x01\xa7TtcV\xfc\x88\xb8\xb8\xef\x05\xb5\xfd\x18\xc90\x009\xab\xb1\x893\x85)\x87\xe1\xa9-\xa3\xad='
|
||||||
value = '32101439'
|
value = '32101439'
|
||||||
|
@ -121,17 +121,17 @@ def test_chrome_cookie_decryptor_mac_v10(self):
|
||||||
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
self.assertEqual(decryptor.decrypt(encrypted_value), value)
|
||||||
|
|
||||||
def test_safari_cookie_parsing(self):
|
def test_safari_cookie_parsing(self):
|
||||||
cookies = \
|
cookies = (
|
||||||
b'cook\x00\x00\x00\x01\x00\x00\x00i\x00\x00\x01\x00\x01\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00Y' \
|
b'cook\x00\x00\x00\x01\x00\x00\x00i\x00\x00\x01\x00\x01\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00Y'
|
||||||
b'\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x008\x00\x00\x00B\x00\x00\x00F\x00\x00\x00H' \
|
b'\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x008\x00\x00\x00B\x00\x00\x00F\x00\x00\x00H'
|
||||||
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x03\xa5>\xc3A\x00\x00\x80\xc3\x07:\xc3A' \
|
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x03\xa5>\xc3A\x00\x00\x80\xc3\x07:\xc3A'
|
||||||
b'localhost\x00foo\x00/\x00test%20%3Bcookie\x00\x00\x00\x054\x07\x17 \x05\x00\x00\x00Kbplist00\xd1\x01' \
|
b'localhost\x00foo\x00/\x00test%20%3Bcookie\x00\x00\x00\x054\x07\x17 \x05\x00\x00\x00Kbplist00\xd1\x01'
|
||||||
b'\x02_\x10\x18NSHTTPCookieAcceptPolicy\x10\x02\x08\x0b&\x00\x00\x00\x00\x00\x00\x01\x01\x00\x00\x00' \
|
b'\x02_\x10\x18NSHTTPCookieAcceptPolicy\x10\x02\x08\x0b&\x00\x00\x00\x00\x00\x00\x01\x01\x00\x00\x00'
|
||||||
b'\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00('
|
b'\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00(')
|
||||||
|
|
||||||
jar = parse_safari_cookies(cookies)
|
jar = parse_safari_cookies(cookies)
|
||||||
self.assertEqual(len(jar), 1)
|
self.assertEqual(len(jar), 1)
|
||||||
cookie = list(jar)[0]
|
cookie = next(iter(jar))
|
||||||
self.assertEqual(cookie.domain, 'localhost')
|
self.assertEqual(cookie.domain, 'localhost')
|
||||||
self.assertEqual(cookie.port, None)
|
self.assertEqual(cookie.port, None)
|
||||||
self.assertEqual(cookie.path, '/')
|
self.assertEqual(cookie.path, '/')
|
||||||
|
@ -164,7 +164,7 @@ def _run_tests(self, *cases):
|
||||||
attributes = {
|
attributes = {
|
||||||
key: value
|
key: value
|
||||||
for key, value in dict(morsel).items()
|
for key, value in dict(morsel).items()
|
||||||
if value != ""
|
if value != ''
|
||||||
}
|
}
|
||||||
self.assertEqual(attributes, expected_attributes, message)
|
self.assertEqual(attributes, expected_attributes, message)
|
||||||
|
|
||||||
|
@ -174,133 +174,133 @@ def test_parsing(self):
|
||||||
self._run_tests(
|
self._run_tests(
|
||||||
# Copied from https://github.com/python/cpython/blob/v3.10.7/Lib/test/test_http_cookies.py
|
# Copied from https://github.com/python/cpython/blob/v3.10.7/Lib/test/test_http_cookies.py
|
||||||
(
|
(
|
||||||
"Test basic cookie",
|
'Test basic cookie',
|
||||||
"chips=ahoy; vienna=finger",
|
'chips=ahoy; vienna=finger',
|
||||||
{"chips": "ahoy", "vienna": "finger"},
|
{'chips': 'ahoy', 'vienna': 'finger'},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Test quoted cookie",
|
'Test quoted cookie',
|
||||||
'keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"',
|
'keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"',
|
||||||
{"keebler": 'E=mc2; L="Loves"; fudge=\012;'},
|
{'keebler': 'E=mc2; L="Loves"; fudge=\012;'},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Allow '=' in an unquoted value",
|
"Allow '=' in an unquoted value",
|
||||||
"keebler=E=mc2",
|
'keebler=E=mc2',
|
||||||
{"keebler": "E=mc2"},
|
{'keebler': 'E=mc2'},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Allow cookies with ':' in their name",
|
"Allow cookies with ':' in their name",
|
||||||
"key:term=value:term",
|
'key:term=value:term',
|
||||||
{"key:term": "value:term"},
|
{'key:term': 'value:term'},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Allow '[' and ']' in cookie values",
|
"Allow '[' and ']' in cookie values",
|
||||||
"a=b; c=[; d=r; f=h",
|
'a=b; c=[; d=r; f=h',
|
||||||
{"a": "b", "c": "[", "d": "r", "f": "h"},
|
{'a': 'b', 'c': '[', 'd': 'r', 'f': 'h'},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Test basic cookie attributes",
|
'Test basic cookie attributes',
|
||||||
'Customer="WILE_E_COYOTE"; Version=1; Path=/acme',
|
'Customer="WILE_E_COYOTE"; Version=1; Path=/acme',
|
||||||
{"Customer": ("WILE_E_COYOTE", {"version": "1", "path": "/acme"})},
|
{'Customer': ('WILE_E_COYOTE', {'version': '1', 'path': '/acme'})},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Test flag only cookie attributes",
|
'Test flag only cookie attributes',
|
||||||
'Customer="WILE_E_COYOTE"; HttpOnly; Secure',
|
'Customer="WILE_E_COYOTE"; HttpOnly; Secure',
|
||||||
{"Customer": ("WILE_E_COYOTE", {"httponly": True, "secure": True})},
|
{'Customer': ('WILE_E_COYOTE', {'httponly': True, 'secure': True})},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Test flag only attribute with values",
|
'Test flag only attribute with values',
|
||||||
"eggs=scrambled; httponly=foo; secure=bar; Path=/bacon",
|
'eggs=scrambled; httponly=foo; secure=bar; Path=/bacon',
|
||||||
{"eggs": ("scrambled", {"httponly": "foo", "secure": "bar", "path": "/bacon"})},
|
{'eggs': ('scrambled', {'httponly': 'foo', 'secure': 'bar', 'path': '/bacon'})},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Test special case for 'expires' attribute, 4 digit year",
|
"Test special case for 'expires' attribute, 4 digit year",
|
||||||
'Customer="W"; expires=Wed, 01 Jan 2010 00:00:00 GMT',
|
'Customer="W"; expires=Wed, 01 Jan 2010 00:00:00 GMT',
|
||||||
{"Customer": ("W", {"expires": "Wed, 01 Jan 2010 00:00:00 GMT"})},
|
{'Customer': ('W', {'expires': 'Wed, 01 Jan 2010 00:00:00 GMT'})},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Test special case for 'expires' attribute, 2 digit year",
|
"Test special case for 'expires' attribute, 2 digit year",
|
||||||
'Customer="W"; expires=Wed, 01 Jan 98 00:00:00 GMT',
|
'Customer="W"; expires=Wed, 01 Jan 98 00:00:00 GMT',
|
||||||
{"Customer": ("W", {"expires": "Wed, 01 Jan 98 00:00:00 GMT"})},
|
{'Customer': ('W', {'expires': 'Wed, 01 Jan 98 00:00:00 GMT'})},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Test extra spaces in keys and values",
|
'Test extra spaces in keys and values',
|
||||||
"eggs = scrambled ; secure ; path = bar ; foo=foo ",
|
'eggs = scrambled ; secure ; path = bar ; foo=foo ',
|
||||||
{"eggs": ("scrambled", {"secure": True, "path": "bar"}), "foo": "foo"},
|
{'eggs': ('scrambled', {'secure': True, 'path': 'bar'}), 'foo': 'foo'},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Test quoted attributes",
|
'Test quoted attributes',
|
||||||
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"',
|
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"',
|
||||||
{"Customer": ("WILE_E_COYOTE", {"version": "1", "path": "/acme"})}
|
{'Customer': ('WILE_E_COYOTE', {'version': '1', 'path': '/acme'})},
|
||||||
),
|
),
|
||||||
# Our own tests that CPython passes
|
# Our own tests that CPython passes
|
||||||
(
|
(
|
||||||
"Allow ';' in quoted value",
|
"Allow ';' in quoted value",
|
||||||
'chips="a;hoy"; vienna=finger',
|
'chips="a;hoy"; vienna=finger',
|
||||||
{"chips": "a;hoy", "vienna": "finger"},
|
{'chips': 'a;hoy', 'vienna': 'finger'},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Keep only the last set value",
|
'Keep only the last set value',
|
||||||
"a=c; a=b",
|
'a=c; a=b',
|
||||||
{"a": "b"},
|
{'a': 'b'},
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_lenient_parsing(self):
|
def test_lenient_parsing(self):
|
||||||
self._run_tests(
|
self._run_tests(
|
||||||
(
|
(
|
||||||
"Ignore and try to skip invalid cookies",
|
'Ignore and try to skip invalid cookies',
|
||||||
'chips={"ahoy;": 1}; vienna="finger;"',
|
'chips={"ahoy;": 1}; vienna="finger;"',
|
||||||
{"vienna": "finger;"},
|
{'vienna': 'finger;'},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Ignore cookies without a name",
|
'Ignore cookies without a name',
|
||||||
"a=b; unnamed; c=d",
|
'a=b; unnamed; c=d',
|
||||||
{"a": "b", "c": "d"},
|
{'a': 'b', 'c': 'd'},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Ignore '\"' cookie without name",
|
"Ignore '\"' cookie without name",
|
||||||
'a=b; "; c=d',
|
'a=b; "; c=d',
|
||||||
{"a": "b", "c": "d"},
|
{'a': 'b', 'c': 'd'},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Skip all space separated values",
|
'Skip all space separated values',
|
||||||
"x a=b c=d x; e=f",
|
'x a=b c=d x; e=f',
|
||||||
{"a": "b", "c": "d", "e": "f"},
|
{'a': 'b', 'c': 'd', 'e': 'f'},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Skip all space separated values",
|
'Skip all space separated values',
|
||||||
'x a=b; data={"complex": "json", "with": "key=value"}; x c=d x',
|
'x a=b; data={"complex": "json", "with": "key=value"}; x c=d x',
|
||||||
{"a": "b", "c": "d"},
|
{'a': 'b', 'c': 'd'},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Expect quote mending",
|
'Expect quote mending',
|
||||||
'a=b; invalid="; c=d',
|
'a=b; invalid="; c=d',
|
||||||
{"a": "b", "c": "d"},
|
{'a': 'b', 'c': 'd'},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Reset morsel after invalid to not capture attributes",
|
'Reset morsel after invalid to not capture attributes',
|
||||||
"a=b; invalid; Version=1; c=d",
|
'a=b; invalid; Version=1; c=d',
|
||||||
{"a": "b", "c": "d"},
|
{'a': 'b', 'c': 'd'},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Reset morsel after invalid to not capture attributes",
|
'Reset morsel after invalid to not capture attributes',
|
||||||
"a=b; $invalid; $Version=1; c=d",
|
'a=b; $invalid; $Version=1; c=d',
|
||||||
{"a": "b", "c": "d"},
|
{'a': 'b', 'c': 'd'},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Continue after non-flag attribute without value",
|
'Continue after non-flag attribute without value',
|
||||||
"a=b; path; Version=1; c=d",
|
'a=b; path; Version=1; c=d',
|
||||||
{"a": "b", "c": "d"},
|
{'a': 'b', 'c': 'd'},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Allow cookie attributes with `$` prefix",
|
'Allow cookie attributes with `$` prefix',
|
||||||
'Customer="WILE_E_COYOTE"; $Version=1; $Secure; $Path=/acme',
|
'Customer="WILE_E_COYOTE"; $Version=1; $Secure; $Path=/acme',
|
||||||
{"Customer": ("WILE_E_COYOTE", {"version": "1", "secure": True, "path": "/acme"})},
|
{'Customer': ('WILE_E_COYOTE', {'version': '1', 'secure': True, 'path': '/acme'})},
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"Invalid Morsel keys should not result in an error",
|
'Invalid Morsel keys should not result in an error',
|
||||||
"Key=Value; [Invalid]=Value; Another=Value",
|
'Key=Value; [Invalid]=Value; Another=Value',
|
||||||
{"Key": "Value", "Another": "Value"},
|
{'Key': 'Value', 'Another': 'Value'},
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
|
@ -94,7 +94,7 @@ def test_template(self):
|
||||||
'playlist', [] if is_playlist else [test_case])
|
'playlist', [] if is_playlist else [test_case])
|
||||||
|
|
||||||
def print_skipping(reason):
|
def print_skipping(reason):
|
||||||
print('Skipping %s: %s' % (test_case['name'], reason))
|
print('Skipping {}: {}'.format(test_case['name'], reason))
|
||||||
self.skipTest(reason)
|
self.skipTest(reason)
|
||||||
|
|
||||||
if not ie.working():
|
if not ie.working():
|
||||||
|
@ -117,7 +117,7 @@ def print_skipping(reason):
|
||||||
|
|
||||||
for other_ie in other_ies:
|
for other_ie in other_ies:
|
||||||
if not other_ie.working():
|
if not other_ie.working():
|
||||||
print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
|
print_skipping(f'test depends on {other_ie.ie_key()}IE, marked as not WORKING')
|
||||||
|
|
||||||
params = get_params(test_case.get('params', {}))
|
params = get_params(test_case.get('params', {}))
|
||||||
params['outtmpl'] = tname + '_' + params['outtmpl']
|
params['outtmpl'] = tname + '_' + params['outtmpl']
|
||||||
|
@ -148,10 +148,7 @@ def match_exception(err):
|
||||||
return False
|
return False
|
||||||
if err.__class__.__name__ == expected_exception:
|
if err.__class__.__name__ == expected_exception:
|
||||||
return True
|
return True
|
||||||
for exc in err.exc_info:
|
return any(exc.__class__.__name__ == expected_exception for exc in err.exc_info)
|
||||||
if exc.__class__.__name__ == expected_exception:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def try_rm_tcs_files(tcs=None):
|
def try_rm_tcs_files(tcs=None):
|
||||||
if tcs is None:
|
if tcs is None:
|
||||||
|
@ -181,7 +178,7 @@ def try_rm_tcs_files(tcs=None):
|
||||||
raise
|
raise
|
||||||
|
|
||||||
if try_num == RETRIES:
|
if try_num == RETRIES:
|
||||||
report_warning('%s failed due to network errors, skipping...' % tname)
|
report_warning(f'{tname} failed due to network errors, skipping...')
|
||||||
return
|
return
|
||||||
|
|
||||||
print(f'Retrying: {try_num} failed tries\n\n##########\n\n')
|
print(f'Retrying: {try_num} failed tries\n\n##########\n\n')
|
||||||
|
@ -244,9 +241,8 @@ def try_rm_tcs_files(tcs=None):
|
||||||
got_fsize = os.path.getsize(tc_filename)
|
got_fsize = os.path.getsize(tc_filename)
|
||||||
assertGreaterEqual(
|
assertGreaterEqual(
|
||||||
self, got_fsize, expected_minsize,
|
self, got_fsize, expected_minsize,
|
||||||
'Expected %s to be at least %s, but it\'s only %s ' %
|
f'Expected {tc_filename} to be at least {format_bytes(expected_minsize)}, '
|
||||||
(tc_filename, format_bytes(expected_minsize),
|
f'but it\'s only {format_bytes(got_fsize)} ')
|
||||||
format_bytes(got_fsize)))
|
|
||||||
if 'md5' in tc:
|
if 'md5' in tc:
|
||||||
md5_for_file = _file_md5(tc_filename)
|
md5_for_file = _file_md5(tc_filename)
|
||||||
self.assertEqual(tc['md5'], md5_for_file)
|
self.assertEqual(tc['md5'], md5_for_file)
|
||||||
|
@ -255,7 +251,7 @@ def try_rm_tcs_files(tcs=None):
|
||||||
info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
|
info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
os.path.exists(info_json_fn),
|
os.path.exists(info_json_fn),
|
||||||
'Missing info file %s' % info_json_fn)
|
f'Missing info file {info_json_fn}')
|
||||||
with open(info_json_fn, encoding='utf-8') as infof:
|
with open(info_json_fn, encoding='utf-8') as infof:
|
||||||
info_dict = json.load(infof)
|
info_dict = json.load(infof)
|
||||||
expect_info_dict(self, info_dict, tc.get('info_dict', {}))
|
expect_info_dict(self, info_dict, tc.get('info_dict', {}))
|
||||||
|
|
|
@ -38,9 +38,9 @@ def send_content_range(self, total=None):
|
||||||
end = int(mobj.group(2))
|
end = int(mobj.group(2))
|
||||||
valid_range = start is not None and end is not None
|
valid_range = start is not None and end is not None
|
||||||
if valid_range:
|
if valid_range:
|
||||||
content_range = 'bytes %d-%d' % (start, end)
|
content_range = f'bytes {start}-{end}'
|
||||||
if total:
|
if total:
|
||||||
content_range += '/%d' % total
|
content_range += f'/{total}'
|
||||||
self.send_header('Content-Range', content_range)
|
self.send_header('Content-Range', content_range)
|
||||||
return (end - start + 1) if valid_range else total
|
return (end - start + 1) if valid_range else total
|
||||||
|
|
||||||
|
@ -84,7 +84,7 @@ def download(self, params, ep):
|
||||||
filename = 'testfile.mp4'
|
filename = 'testfile.mp4'
|
||||||
try_rm(encodeFilename(filename))
|
try_rm(encodeFilename(filename))
|
||||||
self.assertTrue(downloader.real_download(filename, {
|
self.assertTrue(downloader.real_download(filename, {
|
||||||
'url': 'http://127.0.0.1:%d/%s' % (self.port, ep),
|
'url': f'http://127.0.0.1:{self.port}/{ep}',
|
||||||
}), ep)
|
}), ep)
|
||||||
self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE, ep)
|
self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE, ep)
|
||||||
try_rm(encodeFilename(filename))
|
try_rm(encodeFilename(filename))
|
||||||
|
|
380
test/test_http_proxy.py
Normal file
380
test/test_http_proxy.py
Normal file
|
@ -0,0 +1,380 @@
|
||||||
|
import abc
|
||||||
|
import base64
|
||||||
|
import contextlib
|
||||||
|
import functools
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import ssl
|
||||||
|
import threading
|
||||||
|
from http.server import BaseHTTPRequestHandler
|
||||||
|
from socketserver import ThreadingTCPServer
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from test.helper import http_server_port, verify_address_availability
|
||||||
|
from test.test_networking import TEST_DIR
|
||||||
|
from test.test_socks import IPv6ThreadingTCPServer
|
||||||
|
from yt_dlp.dependencies import urllib3
|
||||||
|
from yt_dlp.networking import Request
|
||||||
|
from yt_dlp.networking.exceptions import HTTPError, ProxyError, SSLError
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPProxyAuthMixin:
|
||||||
|
|
||||||
|
def proxy_auth_error(self):
|
||||||
|
self.send_response(407)
|
||||||
|
self.send_header('Proxy-Authenticate', 'Basic realm="test http proxy"')
|
||||||
|
self.end_headers()
|
||||||
|
return False
|
||||||
|
|
||||||
|
def do_proxy_auth(self, username, password):
|
||||||
|
if username is None and password is None:
|
||||||
|
return True
|
||||||
|
|
||||||
|
proxy_auth_header = self.headers.get('Proxy-Authorization', None)
|
||||||
|
if proxy_auth_header is None:
|
||||||
|
return self.proxy_auth_error()
|
||||||
|
|
||||||
|
if not proxy_auth_header.startswith('Basic '):
|
||||||
|
return self.proxy_auth_error()
|
||||||
|
|
||||||
|
auth = proxy_auth_header[6:]
|
||||||
|
|
||||||
|
try:
|
||||||
|
auth_username, auth_password = base64.b64decode(auth).decode().split(':', 1)
|
||||||
|
except Exception:
|
||||||
|
return self.proxy_auth_error()
|
||||||
|
|
||||||
|
if auth_username != (username or '') or auth_password != (password or ''):
|
||||||
|
return self.proxy_auth_error()
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPProxyHandler(BaseHTTPRequestHandler, HTTPProxyAuthMixin):
|
||||||
|
def __init__(self, *args, proxy_info=None, username=None, password=None, request_handler=None, **kwargs):
|
||||||
|
self.username = username
|
||||||
|
self.password = password
|
||||||
|
self.proxy_info = proxy_info
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def do_GET(self):
|
||||||
|
if not self.do_proxy_auth(self.username, self.password):
|
||||||
|
self.server.close_request(self.request)
|
||||||
|
return
|
||||||
|
if self.path.endswith('/proxy_info'):
|
||||||
|
payload = json.dumps(self.proxy_info or {
|
||||||
|
'client_address': self.client_address,
|
||||||
|
'connect': False,
|
||||||
|
'connect_host': None,
|
||||||
|
'connect_port': None,
|
||||||
|
'headers': dict(self.headers),
|
||||||
|
'path': self.path,
|
||||||
|
'proxy': ':'.join(str(y) for y in self.connection.getsockname()),
|
||||||
|
})
|
||||||
|
self.send_response(200)
|
||||||
|
self.send_header('Content-Type', 'application/json; charset=utf-8')
|
||||||
|
self.send_header('Content-Length', str(len(payload)))
|
||||||
|
self.end_headers()
|
||||||
|
self.wfile.write(payload.encode())
|
||||||
|
else:
|
||||||
|
self.send_response(404)
|
||||||
|
self.end_headers()
|
||||||
|
|
||||||
|
self.server.close_request(self.request)
|
||||||
|
|
||||||
|
|
||||||
|
if urllib3:
|
||||||
|
import urllib3.util.ssltransport
|
||||||
|
|
||||||
|
class SSLTransport(urllib3.util.ssltransport.SSLTransport):
|
||||||
|
"""
|
||||||
|
Modified version of urllib3 SSLTransport to support server side SSL
|
||||||
|
|
||||||
|
This allows us to chain multiple TLS connections.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True, server_side=False):
|
||||||
|
self.incoming = ssl.MemoryBIO()
|
||||||
|
self.outgoing = ssl.MemoryBIO()
|
||||||
|
|
||||||
|
self.suppress_ragged_eofs = suppress_ragged_eofs
|
||||||
|
self.socket = socket
|
||||||
|
|
||||||
|
self.sslobj = ssl_context.wrap_bio(
|
||||||
|
self.incoming,
|
||||||
|
self.outgoing,
|
||||||
|
server_hostname=server_hostname,
|
||||||
|
server_side=server_side,
|
||||||
|
)
|
||||||
|
self._ssl_io_loop(self.sslobj.do_handshake)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _io_refs(self):
|
||||||
|
return self.socket._io_refs
|
||||||
|
|
||||||
|
@_io_refs.setter
|
||||||
|
def _io_refs(self, value):
|
||||||
|
self.socket._io_refs = value
|
||||||
|
|
||||||
|
def shutdown(self, *args, **kwargs):
|
||||||
|
self.socket.shutdown(*args, **kwargs)
|
||||||
|
else:
|
||||||
|
SSLTransport = None
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPSProxyHandler(HTTPProxyHandler):
|
||||||
|
def __init__(self, request, *args, **kwargs):
|
||||||
|
certfn = os.path.join(TEST_DIR, 'testcert.pem')
|
||||||
|
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
|
||||||
|
sslctx.load_cert_chain(certfn, None)
|
||||||
|
if isinstance(request, ssl.SSLSocket):
|
||||||
|
request = SSLTransport(request, ssl_context=sslctx, server_side=True)
|
||||||
|
else:
|
||||||
|
request = sslctx.wrap_socket(request, server_side=True)
|
||||||
|
super().__init__(request, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPConnectProxyHandler(BaseHTTPRequestHandler, HTTPProxyAuthMixin):
|
||||||
|
protocol_version = 'HTTP/1.1'
|
||||||
|
default_request_version = 'HTTP/1.1'
|
||||||
|
|
||||||
|
def __init__(self, *args, username=None, password=None, request_handler=None, **kwargs):
|
||||||
|
self.username = username
|
||||||
|
self.password = password
|
||||||
|
self.request_handler = request_handler
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def do_CONNECT(self):
|
||||||
|
if not self.do_proxy_auth(self.username, self.password):
|
||||||
|
self.server.close_request(self.request)
|
||||||
|
return
|
||||||
|
self.send_response(200)
|
||||||
|
self.end_headers()
|
||||||
|
proxy_info = {
|
||||||
|
'client_address': self.client_address,
|
||||||
|
'connect': True,
|
||||||
|
'connect_host': self.path.split(':')[0],
|
||||||
|
'connect_port': int(self.path.split(':')[1]),
|
||||||
|
'headers': dict(self.headers),
|
||||||
|
'path': self.path,
|
||||||
|
'proxy': ':'.join(str(y) for y in self.connection.getsockname()),
|
||||||
|
}
|
||||||
|
self.request_handler(self.request, self.client_address, self.server, proxy_info=proxy_info)
|
||||||
|
self.server.close_request(self.request)
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPSConnectProxyHandler(HTTPConnectProxyHandler):
|
||||||
|
def __init__(self, request, *args, **kwargs):
|
||||||
|
certfn = os.path.join(TEST_DIR, 'testcert.pem')
|
||||||
|
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
|
||||||
|
sslctx.load_cert_chain(certfn, None)
|
||||||
|
request = sslctx.wrap_socket(request, server_side=True)
|
||||||
|
self._original_request = request
|
||||||
|
super().__init__(request, *args, **kwargs)
|
||||||
|
|
||||||
|
def do_CONNECT(self):
|
||||||
|
super().do_CONNECT()
|
||||||
|
self.server.close_request(self._original_request)
|
||||||
|
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def proxy_server(proxy_server_class, request_handler, bind_ip=None, **proxy_server_kwargs):
|
||||||
|
server = server_thread = None
|
||||||
|
try:
|
||||||
|
bind_address = bind_ip or '127.0.0.1'
|
||||||
|
server_type = ThreadingTCPServer if '.' in bind_address else IPv6ThreadingTCPServer
|
||||||
|
server = server_type(
|
||||||
|
(bind_address, 0), functools.partial(proxy_server_class, request_handler=request_handler, **proxy_server_kwargs))
|
||||||
|
server_port = http_server_port(server)
|
||||||
|
server_thread = threading.Thread(target=server.serve_forever)
|
||||||
|
server_thread.daemon = True
|
||||||
|
server_thread.start()
|
||||||
|
if '.' not in bind_address:
|
||||||
|
yield f'[{bind_address}]:{server_port}'
|
||||||
|
else:
|
||||||
|
yield f'{bind_address}:{server_port}'
|
||||||
|
finally:
|
||||||
|
server.shutdown()
|
||||||
|
server.server_close()
|
||||||
|
server_thread.join(2.0)
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPProxyTestContext(abc.ABC):
|
||||||
|
REQUEST_HANDLER_CLASS = None
|
||||||
|
REQUEST_PROTO = None
|
||||||
|
|
||||||
|
def http_server(self, server_class, *args, **kwargs):
|
||||||
|
return proxy_server(server_class, self.REQUEST_HANDLER_CLASS, *args, **kwargs)
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def proxy_info_request(self, handler, target_domain=None, target_port=None, **req_kwargs) -> dict:
|
||||||
|
"""return a dict of proxy_info"""
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPProxyHTTPTestContext(HTTPProxyTestContext):
|
||||||
|
# Standard HTTP Proxy for http requests
|
||||||
|
REQUEST_HANDLER_CLASS = HTTPProxyHandler
|
||||||
|
REQUEST_PROTO = 'http'
|
||||||
|
|
||||||
|
def proxy_info_request(self, handler, target_domain=None, target_port=None, **req_kwargs):
|
||||||
|
request = Request(f'http://{target_domain or "127.0.0.1"}:{target_port or "40000"}/proxy_info', **req_kwargs)
|
||||||
|
handler.validate(request)
|
||||||
|
return json.loads(handler.send(request).read().decode())
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPProxyHTTPSTestContext(HTTPProxyTestContext):
|
||||||
|
# HTTP Connect proxy, for https requests
|
||||||
|
REQUEST_HANDLER_CLASS = HTTPSProxyHandler
|
||||||
|
REQUEST_PROTO = 'https'
|
||||||
|
|
||||||
|
def proxy_info_request(self, handler, target_domain=None, target_port=None, **req_kwargs):
|
||||||
|
request = Request(f'https://{target_domain or "127.0.0.1"}:{target_port or "40000"}/proxy_info', **req_kwargs)
|
||||||
|
handler.validate(request)
|
||||||
|
return json.loads(handler.send(request).read().decode())
|
||||||
|
|
||||||
|
|
||||||
|
CTX_MAP = {
|
||||||
|
'http': HTTPProxyHTTPTestContext,
|
||||||
|
'https': HTTPProxyHTTPSTestContext,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope='module')
|
||||||
|
def ctx(request):
|
||||||
|
return CTX_MAP[request.param]()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
|
@pytest.mark.parametrize('ctx', ['http'], indirect=True) # pure http proxy can only support http
|
||||||
|
class TestHTTPProxy:
|
||||||
|
def test_http_no_auth(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPProxyHandler) as server_address:
|
||||||
|
with handler(proxies={ctx.REQUEST_PROTO: f'http://{server_address}'}) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert proxy_info['connect'] is False
|
||||||
|
assert 'Proxy-Authorization' not in proxy_info['headers']
|
||||||
|
|
||||||
|
def test_http_auth(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPProxyHandler, username='test', password='test') as server_address:
|
||||||
|
with handler(proxies={ctx.REQUEST_PROTO: f'http://test:test@{server_address}'}) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert 'Proxy-Authorization' in proxy_info['headers']
|
||||||
|
|
||||||
|
def test_http_bad_auth(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPProxyHandler, username='test', password='test') as server_address:
|
||||||
|
with handler(proxies={ctx.REQUEST_PROTO: f'http://test:bad@{server_address}'}) as rh:
|
||||||
|
with pytest.raises(HTTPError) as exc_info:
|
||||||
|
ctx.proxy_info_request(rh)
|
||||||
|
assert exc_info.value.response.status == 407
|
||||||
|
exc_info.value.response.close()
|
||||||
|
|
||||||
|
def test_http_source_address(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPProxyHandler) as server_address:
|
||||||
|
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||||
|
verify_address_availability(source_address)
|
||||||
|
with handler(proxies={ctx.REQUEST_PROTO: f'http://{server_address}'},
|
||||||
|
source_address=source_address) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert proxy_info['client_address'][0] == source_address
|
||||||
|
|
||||||
|
@pytest.mark.skip_handler('Urllib', 'urllib does not support https proxies')
|
||||||
|
def test_https(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPSProxyHandler) as server_address:
|
||||||
|
with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'https://{server_address}'}) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert proxy_info['connect'] is False
|
||||||
|
assert 'Proxy-Authorization' not in proxy_info['headers']
|
||||||
|
|
||||||
|
@pytest.mark.skip_handler('Urllib', 'urllib does not support https proxies')
|
||||||
|
def test_https_verify_failed(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPSProxyHandler) as server_address:
|
||||||
|
with handler(verify=True, proxies={ctx.REQUEST_PROTO: f'https://{server_address}'}) as rh:
|
||||||
|
# Accept SSLError as may not be feasible to tell if it is proxy or request error.
|
||||||
|
# note: if request proto also does ssl verification, this may also be the error of the request.
|
||||||
|
# Until we can support passing custom cacerts to handlers, we cannot properly test this for all cases.
|
||||||
|
with pytest.raises((ProxyError, SSLError)):
|
||||||
|
ctx.proxy_info_request(rh)
|
||||||
|
|
||||||
|
def test_http_with_idn(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPProxyHandler) as server_address:
|
||||||
|
with handler(proxies={ctx.REQUEST_PROTO: f'http://{server_address}'}) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh, target_domain='中文.tw')
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert proxy_info['path'].startswith('http://xn--fiq228c.tw')
|
||||||
|
assert proxy_info['headers']['Host'].split(':', 1)[0] == 'xn--fiq228c.tw'
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'handler,ctx', [
|
||||||
|
('Requests', 'https'),
|
||||||
|
('CurlCFFI', 'https'),
|
||||||
|
], indirect=True)
|
||||||
|
class TestHTTPConnectProxy:
|
||||||
|
def test_http_connect_no_auth(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPConnectProxyHandler) as server_address:
|
||||||
|
with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'http://{server_address}'}) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert proxy_info['connect'] is True
|
||||||
|
assert 'Proxy-Authorization' not in proxy_info['headers']
|
||||||
|
|
||||||
|
def test_http_connect_auth(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPConnectProxyHandler, username='test', password='test') as server_address:
|
||||||
|
with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'http://test:test@{server_address}'}) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert 'Proxy-Authorization' in proxy_info['headers']
|
||||||
|
|
||||||
|
@pytest.mark.skip_handler(
|
||||||
|
'Requests',
|
||||||
|
'bug in urllib3 causes unclosed socket: https://github.com/urllib3/urllib3/issues/3374',
|
||||||
|
)
|
||||||
|
def test_http_connect_bad_auth(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPConnectProxyHandler, username='test', password='test') as server_address:
|
||||||
|
with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'http://test:bad@{server_address}'}) as rh:
|
||||||
|
with pytest.raises(ProxyError):
|
||||||
|
ctx.proxy_info_request(rh)
|
||||||
|
|
||||||
|
def test_http_connect_source_address(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPConnectProxyHandler) as server_address:
|
||||||
|
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||||
|
verify_address_availability(source_address)
|
||||||
|
with handler(proxies={ctx.REQUEST_PROTO: f'http://{server_address}'},
|
||||||
|
source_address=source_address,
|
||||||
|
verify=False) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert proxy_info['client_address'][0] == source_address
|
||||||
|
|
||||||
|
@pytest.mark.skipif(urllib3 is None, reason='requires urllib3 to test')
|
||||||
|
def test_https_connect_proxy(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPSConnectProxyHandler) as server_address:
|
||||||
|
with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'https://{server_address}'}) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert proxy_info['connect'] is True
|
||||||
|
assert 'Proxy-Authorization' not in proxy_info['headers']
|
||||||
|
|
||||||
|
@pytest.mark.skipif(urllib3 is None, reason='requires urllib3 to test')
|
||||||
|
def test_https_connect_verify_failed(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPSConnectProxyHandler) as server_address:
|
||||||
|
with handler(verify=True, proxies={ctx.REQUEST_PROTO: f'https://{server_address}'}) as rh:
|
||||||
|
# Accept SSLError as may not be feasible to tell if it is proxy or request error.
|
||||||
|
# note: if request proto also does ssl verification, this may also be the error of the request.
|
||||||
|
# Until we can support passing custom cacerts to handlers, we cannot properly test this for all cases.
|
||||||
|
with pytest.raises((ProxyError, SSLError)):
|
||||||
|
ctx.proxy_info_request(rh)
|
||||||
|
|
||||||
|
@pytest.mark.skipif(urllib3 is None, reason='requires urllib3 to test')
|
||||||
|
def test_https_connect_proxy_auth(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPSConnectProxyHandler, username='test', password='test') as server_address:
|
||||||
|
with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'https://test:test@{server_address}'}) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert 'Proxy-Authorization' in proxy_info['headers']
|
|
@ -29,11 +29,11 @@ def error(self, msg):
|
||||||
@is_download_test
|
@is_download_test
|
||||||
class TestIqiyiSDKInterpreter(unittest.TestCase):
|
class TestIqiyiSDKInterpreter(unittest.TestCase):
|
||||||
def test_iqiyi_sdk_interpreter(self):
|
def test_iqiyi_sdk_interpreter(self):
|
||||||
'''
|
"""
|
||||||
Test the functionality of IqiyiSDKInterpreter by trying to log in
|
Test the functionality of IqiyiSDKInterpreter by trying to log in
|
||||||
|
|
||||||
If `sign` is incorrect, /validate call throws an HTTP 556 error
|
If `sign` is incorrect, /validate call throws an HTTP 556 error
|
||||||
'''
|
"""
|
||||||
logger = WarningLogger()
|
logger = WarningLogger()
|
||||||
ie = IqiyiIE(FakeYDL({'logger': logger}))
|
ie = IqiyiIE(FakeYDL({'logger': logger}))
|
||||||
ie._perform_login('foo', 'bar')
|
ie._perform_login('foo', 'bar')
|
||||||
|
|
|
@ -21,7 +21,7 @@ def test_netrc_present(self):
|
||||||
continue
|
continue
|
||||||
self.assertTrue(
|
self.assertTrue(
|
||||||
ie._NETRC_MACHINE,
|
ie._NETRC_MACHINE,
|
||||||
'Extractor %s supports login, but is missing a _NETRC_MACHINE property' % ie.IE_NAME)
|
f'Extractor {ie.IE_NAME} supports login, but is missing a _NETRC_MACHINE property')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
@ -6,6 +6,8 @@
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
|
from yt_dlp.networking.common import Features, DEFAULT_TIMEOUT
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
import gzip
|
import gzip
|
||||||
|
@ -27,8 +29,12 @@
|
||||||
from email.message import Message
|
from email.message import Message
|
||||||
from http.cookiejar import CookieJar
|
from http.cookiejar import CookieJar
|
||||||
|
|
||||||
from test.conftest import validate_and_send
|
from test.helper import (
|
||||||
from test.helper import FakeYDL, http_server_port, verify_address_availability
|
FakeYDL,
|
||||||
|
http_server_port,
|
||||||
|
validate_and_send,
|
||||||
|
verify_address_availability,
|
||||||
|
)
|
||||||
from yt_dlp.cookies import YoutubeDLCookieJar
|
from yt_dlp.cookies import YoutubeDLCookieJar
|
||||||
from yt_dlp.dependencies import brotli, curl_cffi, requests, urllib3
|
from yt_dlp.dependencies import brotli, curl_cffi, requests, urllib3
|
||||||
from yt_dlp.networking import (
|
from yt_dlp.networking import (
|
||||||
|
@ -62,21 +68,6 @@
|
||||||
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
|
||||||
|
|
||||||
def _build_proxy_handler(name):
|
|
||||||
class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler):
|
|
||||||
proxy_name = name
|
|
||||||
|
|
||||||
def log_message(self, format, *args):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def do_GET(self):
|
|
||||||
self.send_response(200)
|
|
||||||
self.send_header('Content-Type', 'text/plain; charset=utf-8')
|
|
||||||
self.end_headers()
|
|
||||||
self.wfile.write(f'{self.proxy_name}: {self.path}'.encode())
|
|
||||||
return HTTPTestRequestHandler
|
|
||||||
|
|
||||||
|
|
||||||
class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler):
|
class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler):
|
||||||
protocol_version = 'HTTP/1.1'
|
protocol_version = 'HTTP/1.1'
|
||||||
default_request_version = 'HTTP/1.1'
|
default_request_version = 'HTTP/1.1'
|
||||||
|
@ -317,8 +308,9 @@ def setup_class(cls):
|
||||||
cls.https_server_thread.start()
|
cls.https_server_thread.start()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
class TestHTTPRequestHandler(TestRequestHandlerBase):
|
class TestHTTPRequestHandler(TestRequestHandlerBase):
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_verify_cert(self, handler):
|
def test_verify_cert(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with pytest.raises(CertificateVerifyError):
|
with pytest.raises(CertificateVerifyError):
|
||||||
|
@ -329,7 +321,6 @@ def test_verify_cert(self, handler):
|
||||||
assert r.status == 200
|
assert r.status == 200
|
||||||
r.close()
|
r.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_ssl_error(self, handler):
|
def test_ssl_error(self, handler):
|
||||||
# HTTPS server with too old TLS version
|
# HTTPS server with too old TLS version
|
||||||
# XXX: is there a better way to test this than to create a new server?
|
# XXX: is there a better way to test this than to create a new server?
|
||||||
|
@ -347,7 +338,6 @@ def test_ssl_error(self, handler):
|
||||||
validate_and_send(rh, Request(f'https://127.0.0.1:{https_port}/headers'))
|
validate_and_send(rh, Request(f'https://127.0.0.1:{https_port}/headers'))
|
||||||
assert not issubclass(exc_info.type, CertificateVerifyError)
|
assert not issubclass(exc_info.type, CertificateVerifyError)
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_percent_encode(self, handler):
|
def test_percent_encode(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
# Unicode characters should be encoded with uppercase percent-encoding
|
# Unicode characters should be encoded with uppercase percent-encoding
|
||||||
|
@ -359,7 +349,6 @@ def test_percent_encode(self, handler):
|
||||||
assert res.status == 200
|
assert res.status == 200
|
||||||
res.close()
|
res.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
@pytest.mark.parametrize('path', [
|
@pytest.mark.parametrize('path', [
|
||||||
'/a/b/./../../headers',
|
'/a/b/./../../headers',
|
||||||
'/redirect_dotsegments',
|
'/redirect_dotsegments',
|
||||||
|
@ -375,25 +364,22 @@ def test_remove_dot_segments(self, handler, path):
|
||||||
assert res.url == f'http://127.0.0.1:{self.http_port}/headers'
|
assert res.url == f'http://127.0.0.1:{self.http_port}/headers'
|
||||||
res.close()
|
res.close()
|
||||||
|
|
||||||
# Not supported by CurlCFFI (non-standard)
|
@pytest.mark.skip_handler('CurlCFFI', 'not supported by curl-cffi (non-standard)')
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
|
||||||
def test_unicode_path_redirection(self, handler):
|
def test_unicode_path_redirection(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
r = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/302-non-ascii-redirect'))
|
r = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/302-non-ascii-redirect'))
|
||||||
assert r.url == f'http://127.0.0.1:{self.http_port}/%E4%B8%AD%E6%96%87.html'
|
assert r.url == f'http://127.0.0.1:{self.http_port}/%E4%B8%AD%E6%96%87.html'
|
||||||
r.close()
|
r.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_raise_http_error(self, handler):
|
def test_raise_http_error(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
for bad_status in (400, 500, 599, 302):
|
for bad_status in (400, 500, 599, 302):
|
||||||
with pytest.raises(HTTPError):
|
with pytest.raises(HTTPError):
|
||||||
validate_and_send(rh, Request('http://127.0.0.1:%d/gen_%d' % (self.http_port, bad_status)))
|
validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/gen_{bad_status}'))
|
||||||
|
|
||||||
# Should not raise an error
|
# Should not raise an error
|
||||||
validate_and_send(rh, Request('http://127.0.0.1:%d/gen_200' % self.http_port)).close()
|
validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/gen_200')).close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_response_url(self, handler):
|
def test_response_url(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
# Response url should be that of the last url in redirect chain
|
# Response url should be that of the last url in redirect chain
|
||||||
|
@ -405,7 +391,6 @@ def test_response_url(self, handler):
|
||||||
res2.close()
|
res2.close()
|
||||||
|
|
||||||
# Covers some basic cases we expect some level of consistency between request handlers for
|
# Covers some basic cases we expect some level of consistency between request handlers for
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
@pytest.mark.parametrize('redirect_status,method,expected', [
|
@pytest.mark.parametrize('redirect_status,method,expected', [
|
||||||
# A 303 must either use GET or HEAD for subsequent request
|
# A 303 must either use GET or HEAD for subsequent request
|
||||||
(303, 'POST', ('', 'GET', False)),
|
(303, 'POST', ('', 'GET', False)),
|
||||||
|
@ -447,7 +432,6 @@ def test_redirect(self, handler, redirect_status, method, expected):
|
||||||
assert expected[1] == res.headers.get('method')
|
assert expected[1] == res.headers.get('method')
|
||||||
assert expected[2] == ('content-length' in headers.decode().lower())
|
assert expected[2] == ('content-length' in headers.decode().lower())
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_request_cookie_header(self, handler):
|
def test_request_cookie_header(self, handler):
|
||||||
# We should accept a Cookie header being passed as in normal headers and handle it appropriately.
|
# We should accept a Cookie header being passed as in normal headers and handle it appropriately.
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
|
@ -480,19 +464,16 @@ def test_request_cookie_header(self, handler):
|
||||||
assert b'cookie: test=ytdlp' not in data.lower()
|
assert b'cookie: test=ytdlp' not in data.lower()
|
||||||
assert b'cookie: test=test3' in data.lower()
|
assert b'cookie: test=test3' in data.lower()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_redirect_loop(self, handler):
|
def test_redirect_loop(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with pytest.raises(HTTPError, match='redirect loop'):
|
with pytest.raises(HTTPError, match='redirect loop'):
|
||||||
validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/redirect_loop'))
|
validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/redirect_loop'))
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_incompleteread(self, handler):
|
def test_incompleteread(self, handler):
|
||||||
with handler(timeout=2) as rh:
|
with handler(timeout=2) as rh:
|
||||||
with pytest.raises(IncompleteRead, match='13 bytes read, 234221 more expected'):
|
with pytest.raises(IncompleteRead, match='13 bytes read, 234221 more expected'):
|
||||||
validate_and_send(rh, Request('http://127.0.0.1:%d/incompleteread' % self.http_port)).read()
|
validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/incompleteread')).read()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_cookies(self, handler):
|
def test_cookies(self, handler):
|
||||||
cookiejar = YoutubeDLCookieJar()
|
cookiejar = YoutubeDLCookieJar()
|
||||||
cookiejar.set_cookie(http.cookiejar.Cookie(
|
cookiejar.set_cookie(http.cookiejar.Cookie(
|
||||||
|
@ -509,7 +490,6 @@ def test_cookies(self, handler):
|
||||||
rh, Request(f'http://127.0.0.1:{self.http_port}/headers', extensions={'cookiejar': cookiejar})).read()
|
rh, Request(f'http://127.0.0.1:{self.http_port}/headers', extensions={'cookiejar': cookiejar})).read()
|
||||||
assert b'cookie: test=ytdlp' in data.lower()
|
assert b'cookie: test=ytdlp' in data.lower()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_headers(self, handler):
|
def test_headers(self, handler):
|
||||||
|
|
||||||
with handler(headers=HTTPHeaderDict({'test1': 'test', 'test2': 'test2'})) as rh:
|
with handler(headers=HTTPHeaderDict({'test1': 'test', 'test2': 'test2'})) as rh:
|
||||||
|
@ -525,7 +505,6 @@ def test_headers(self, handler):
|
||||||
assert b'test2: test2' not in data
|
assert b'test2: test2' not in data
|
||||||
assert b'test3: test3' in data
|
assert b'test3: test3' in data
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_read_timeout(self, handler):
|
def test_read_timeout(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
# Default timeout is 20 seconds, so this should go through
|
# Default timeout is 20 seconds, so this should go through
|
||||||
|
@ -541,26 +520,21 @@ def test_read_timeout(self, handler):
|
||||||
validate_and_send(
|
validate_and_send(
|
||||||
rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_1', extensions={'timeout': 4}))
|
rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_1', extensions={'timeout': 4}))
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_connect_timeout(self, handler):
|
def test_connect_timeout(self, handler):
|
||||||
# nothing should be listening on this port
|
# nothing should be listening on this port
|
||||||
connect_timeout_url = 'http://10.255.255.255'
|
connect_timeout_url = 'http://10.255.255.255'
|
||||||
with handler(timeout=0.01) as rh:
|
with handler(timeout=0.01) as rh, pytest.raises(TransportError):
|
||||||
now = time.time()
|
now = time.time()
|
||||||
with pytest.raises(TransportError):
|
validate_and_send(rh, Request(connect_timeout_url))
|
||||||
validate_and_send(
|
assert time.time() - now < DEFAULT_TIMEOUT
|
||||||
rh, Request(connect_timeout_url))
|
|
||||||
assert 0.01 <= time.time() - now < 20
|
|
||||||
|
|
||||||
with handler() as rh:
|
# Per request timeout, should override handler timeout
|
||||||
with pytest.raises(TransportError):
|
request = Request(connect_timeout_url, extensions={'timeout': 0.01})
|
||||||
# Per request timeout, should override handler timeout
|
with handler() as rh, pytest.raises(TransportError):
|
||||||
now = time.time()
|
now = time.time()
|
||||||
validate_and_send(
|
validate_and_send(rh, request)
|
||||||
rh, Request(connect_timeout_url, extensions={'timeout': 0.01}))
|
assert time.time() - now < DEFAULT_TIMEOUT
|
||||||
assert 0.01 <= time.time() - now < 20
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_source_address(self, handler):
|
def test_source_address(self, handler):
|
||||||
source_address = f'127.0.0.{random.randint(5, 255)}'
|
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||||
# on some systems these loopback addresses we need for testing may not be available
|
# on some systems these loopback addresses we need for testing may not be available
|
||||||
|
@ -572,13 +546,13 @@ def test_source_address(self, handler):
|
||||||
assert source_address == data
|
assert source_address == data
|
||||||
|
|
||||||
# Not supported by CurlCFFI
|
# Not supported by CurlCFFI
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.skip_handler('CurlCFFI', 'not supported by curl-cffi')
|
||||||
def test_gzip_trailing_garbage(self, handler):
|
def test_gzip_trailing_garbage(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
data = validate_and_send(rh, Request(f'http://localhost:{self.http_port}/trailing_garbage')).read().decode()
|
data = validate_and_send(rh, Request(f'http://localhost:{self.http_port}/trailing_garbage')).read().decode()
|
||||||
assert data == '<html><video src="/vid.mp4" /></html>'
|
assert data == '<html><video src="/vid.mp4" /></html>'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.skip_handler('CurlCFFI', 'not applicable to curl-cffi')
|
||||||
@pytest.mark.skipif(not brotli, reason='brotli support is not installed')
|
@pytest.mark.skipif(not brotli, reason='brotli support is not installed')
|
||||||
def test_brotli(self, handler):
|
def test_brotli(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
|
@ -589,7 +563,6 @@ def test_brotli(self, handler):
|
||||||
assert res.headers.get('Content-Encoding') == 'br'
|
assert res.headers.get('Content-Encoding') == 'br'
|
||||||
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_deflate(self, handler):
|
def test_deflate(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
res = validate_and_send(
|
res = validate_and_send(
|
||||||
|
@ -599,7 +572,6 @@ def test_deflate(self, handler):
|
||||||
assert res.headers.get('Content-Encoding') == 'deflate'
|
assert res.headers.get('Content-Encoding') == 'deflate'
|
||||||
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_gzip(self, handler):
|
def test_gzip(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
res = validate_and_send(
|
res = validate_and_send(
|
||||||
|
@ -609,7 +581,6 @@ def test_gzip(self, handler):
|
||||||
assert res.headers.get('Content-Encoding') == 'gzip'
|
assert res.headers.get('Content-Encoding') == 'gzip'
|
||||||
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_multiple_encodings(self, handler):
|
def test_multiple_encodings(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
for pair in ('gzip,deflate', 'deflate, gzip', 'gzip, gzip', 'deflate, deflate'):
|
for pair in ('gzip,deflate', 'deflate, gzip', 'gzip, gzip', 'deflate, deflate'):
|
||||||
|
@ -620,8 +591,7 @@ def test_multiple_encodings(self, handler):
|
||||||
assert res.headers.get('Content-Encoding') == pair
|
assert res.headers.get('Content-Encoding') == pair
|
||||||
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
||||||
|
|
||||||
# Not supported by curl_cffi
|
@pytest.mark.skip_handler('CurlCFFI', 'not supported by curl-cffi')
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
|
||||||
def test_unsupported_encoding(self, handler):
|
def test_unsupported_encoding(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
res = validate_and_send(
|
res = validate_and_send(
|
||||||
|
@ -631,7 +601,6 @@ def test_unsupported_encoding(self, handler):
|
||||||
assert res.headers.get('Content-Encoding') == 'unsupported'
|
assert res.headers.get('Content-Encoding') == 'unsupported'
|
||||||
assert res.read() == b'raw'
|
assert res.read() == b'raw'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_read(self, handler):
|
def test_read(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
res = validate_and_send(
|
res = validate_and_send(
|
||||||
|
@ -642,83 +611,48 @@ def test_read(self, handler):
|
||||||
assert res.read().decode().endswith('\n\n')
|
assert res.read().decode().endswith('\n\n')
|
||||||
assert res.read() == b''
|
assert res.read() == b''
|
||||||
|
|
||||||
|
def test_request_disable_proxy(self, handler):
|
||||||
|
for proxy_proto in handler._SUPPORTED_PROXY_SCHEMES or ['http']:
|
||||||
|
# Given the handler is configured with a proxy
|
||||||
|
with handler(proxies={'http': f'{proxy_proto}://10.255.255.255'}, timeout=5) as rh:
|
||||||
|
# When a proxy is explicitly set to None for the request
|
||||||
|
res = validate_and_send(
|
||||||
|
rh, Request(f'http://127.0.0.1:{self.http_port}/headers', proxies={'http': None}))
|
||||||
|
# Then no proxy should be used
|
||||||
|
res.close()
|
||||||
|
assert res.status == 200
|
||||||
|
|
||||||
class TestHTTPProxy(TestRequestHandlerBase):
|
@pytest.mark.skip_handlers_if(
|
||||||
# Note: this only tests http urls over non-CONNECT proxy
|
lambda _, handler: Features.NO_PROXY not in handler._SUPPORTED_FEATURES, 'handler does not support NO_PROXY')
|
||||||
@classmethod
|
|
||||||
def setup_class(cls):
|
|
||||||
super().setup_class()
|
|
||||||
# HTTP Proxy server
|
|
||||||
cls.proxy = http.server.ThreadingHTTPServer(
|
|
||||||
('127.0.0.1', 0), _build_proxy_handler('normal'))
|
|
||||||
cls.proxy_port = http_server_port(cls.proxy)
|
|
||||||
cls.proxy_thread = threading.Thread(target=cls.proxy.serve_forever)
|
|
||||||
cls.proxy_thread.daemon = True
|
|
||||||
cls.proxy_thread.start()
|
|
||||||
|
|
||||||
# Geo proxy server
|
|
||||||
cls.geo_proxy = http.server.ThreadingHTTPServer(
|
|
||||||
('127.0.0.1', 0), _build_proxy_handler('geo'))
|
|
||||||
cls.geo_port = http_server_port(cls.geo_proxy)
|
|
||||||
cls.geo_proxy_thread = threading.Thread(target=cls.geo_proxy.serve_forever)
|
|
||||||
cls.geo_proxy_thread.daemon = True
|
|
||||||
cls.geo_proxy_thread.start()
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_http_proxy(self, handler):
|
|
||||||
http_proxy = f'http://127.0.0.1:{self.proxy_port}'
|
|
||||||
geo_proxy = f'http://127.0.0.1:{self.geo_port}'
|
|
||||||
|
|
||||||
# Test global http proxy
|
|
||||||
# Test per request http proxy
|
|
||||||
# Test per request http proxy disables proxy
|
|
||||||
url = 'http://foo.com/bar'
|
|
||||||
|
|
||||||
# Global HTTP proxy
|
|
||||||
with handler(proxies={'http': http_proxy}) as rh:
|
|
||||||
res = validate_and_send(rh, Request(url)).read().decode()
|
|
||||||
assert res == f'normal: {url}'
|
|
||||||
|
|
||||||
# Per request proxy overrides global
|
|
||||||
res = validate_and_send(rh, Request(url, proxies={'http': geo_proxy})).read().decode()
|
|
||||||
assert res == f'geo: {url}'
|
|
||||||
|
|
||||||
# and setting to None disables all proxies for that request
|
|
||||||
real_url = f'http://127.0.0.1:{self.http_port}/headers'
|
|
||||||
res = validate_and_send(
|
|
||||||
rh, Request(real_url, proxies={'http': None})).read().decode()
|
|
||||||
assert res != f'normal: {real_url}'
|
|
||||||
assert 'Accept' in res
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_noproxy(self, handler):
|
def test_noproxy(self, handler):
|
||||||
with handler(proxies={'proxy': f'http://127.0.0.1:{self.proxy_port}'}) as rh:
|
for proxy_proto in handler._SUPPORTED_PROXY_SCHEMES or ['http']:
|
||||||
# NO_PROXY
|
# Given the handler is configured with a proxy
|
||||||
for no_proxy in (f'127.0.0.1:{self.http_port}', '127.0.0.1', 'localhost'):
|
with handler(proxies={'http': f'{proxy_proto}://10.255.255.255'}, timeout=5) as rh:
|
||||||
nop_response = validate_and_send(
|
for no_proxy in (f'127.0.0.1:{self.http_port}', '127.0.0.1', 'localhost'):
|
||||||
rh, Request(f'http://127.0.0.1:{self.http_port}/headers', proxies={'no': no_proxy})).read().decode(
|
# When request no proxy includes the request url host
|
||||||
'utf-8')
|
nop_response = validate_and_send(
|
||||||
assert 'Accept' in nop_response
|
rh, Request(f'http://127.0.0.1:{self.http_port}/headers', proxies={'no': no_proxy}))
|
||||||
|
# Then the proxy should not be used
|
||||||
|
assert nop_response.status == 200
|
||||||
|
nop_response.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
@pytest.mark.skip_handlers_if(
|
||||||
|
lambda _, handler: Features.ALL_PROXY not in handler._SUPPORTED_FEATURES, 'handler does not support ALL_PROXY')
|
||||||
def test_allproxy(self, handler):
|
def test_allproxy(self, handler):
|
||||||
url = 'http://foo.com/bar'
|
# This is a bit of a hacky test, but it should be enough to check whether the handler is using the proxy.
|
||||||
with handler() as rh:
|
# 0.1s might not be enough of a timeout if proxy is not used in all cases, but should still get failures.
|
||||||
response = validate_and_send(rh, Request(url, proxies={'all': f'http://127.0.0.1:{self.proxy_port}'})).read().decode(
|
with handler(proxies={'all': 'http://10.255.255.255'}, timeout=0.1) as rh:
|
||||||
'utf-8')
|
with pytest.raises(TransportError):
|
||||||
assert response == f'normal: {url}'
|
validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/headers')).close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
with handler(timeout=0.1) as rh:
|
||||||
def test_http_proxy_with_idn(self, handler):
|
with pytest.raises(TransportError):
|
||||||
with handler(proxies={
|
validate_and_send(
|
||||||
'http': f'http://127.0.0.1:{self.proxy_port}',
|
rh, Request(
|
||||||
}) as rh:
|
f'http://127.0.0.1:{self.http_port}/headers', proxies={'all': 'http://10.255.255.255'})).close()
|
||||||
url = 'http://中文.tw/'
|
|
||||||
response = rh.send(Request(url)).read().decode()
|
|
||||||
# b'xn--fiq228c' is '中文'.encode('idna')
|
|
||||||
assert response == 'normal: http://xn--fiq228c.tw/'
|
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
class TestClientCertificate:
|
class TestClientCertificate:
|
||||||
@classmethod
|
@classmethod
|
||||||
def setup_class(cls):
|
def setup_class(cls):
|
||||||
|
@ -745,27 +679,23 @@ def _run_test(self, handler, **handler_kwargs):
|
||||||
) as rh:
|
) as rh:
|
||||||
validate_and_send(rh, Request(f'https://127.0.0.1:{self.port}/video.html')).read().decode()
|
validate_and_send(rh, Request(f'https://127.0.0.1:{self.port}/video.html')).read().decode()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_certificate_combined_nopass(self, handler):
|
def test_certificate_combined_nopass(self, handler):
|
||||||
self._run_test(handler, client_cert={
|
self._run_test(handler, client_cert={
|
||||||
'client_certificate': os.path.join(self.certdir, 'clientwithkey.crt'),
|
'client_certificate': os.path.join(self.certdir, 'clientwithkey.crt'),
|
||||||
})
|
})
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_certificate_nocombined_nopass(self, handler):
|
def test_certificate_nocombined_nopass(self, handler):
|
||||||
self._run_test(handler, client_cert={
|
self._run_test(handler, client_cert={
|
||||||
'client_certificate': os.path.join(self.certdir, 'client.crt'),
|
'client_certificate': os.path.join(self.certdir, 'client.crt'),
|
||||||
'client_certificate_key': os.path.join(self.certdir, 'client.key'),
|
'client_certificate_key': os.path.join(self.certdir, 'client.key'),
|
||||||
})
|
})
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_certificate_combined_pass(self, handler):
|
def test_certificate_combined_pass(self, handler):
|
||||||
self._run_test(handler, client_cert={
|
self._run_test(handler, client_cert={
|
||||||
'client_certificate': os.path.join(self.certdir, 'clientwithencryptedkey.crt'),
|
'client_certificate': os.path.join(self.certdir, 'clientwithencryptedkey.crt'),
|
||||||
'client_certificate_password': 'foobar',
|
'client_certificate_password': 'foobar',
|
||||||
})
|
})
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_certificate_nocombined_pass(self, handler):
|
def test_certificate_nocombined_pass(self, handler):
|
||||||
self._run_test(handler, client_cert={
|
self._run_test(handler, client_cert={
|
||||||
'client_certificate': os.path.join(self.certdir, 'client.crt'),
|
'client_certificate': os.path.join(self.certdir, 'client.crt'),
|
||||||
|
@ -810,7 +740,7 @@ class TestRequestHandlerMisc:
|
||||||
@pytest.mark.parametrize('handler,logger_name', [
|
@pytest.mark.parametrize('handler,logger_name', [
|
||||||
('Requests', 'urllib3'),
|
('Requests', 'urllib3'),
|
||||||
('Websockets', 'websockets.client'),
|
('Websockets', 'websockets.client'),
|
||||||
('Websockets', 'websockets.server')
|
('Websockets', 'websockets.server'),
|
||||||
], indirect=['handler'])
|
], indirect=['handler'])
|
||||||
def test_remove_logging_handler(self, handler, logger_name):
|
def test_remove_logging_handler(self, handler, logger_name):
|
||||||
# Ensure any logging handlers, which may contain a YoutubeDL instance,
|
# Ensure any logging handlers, which may contain a YoutubeDL instance,
|
||||||
|
@ -824,8 +754,8 @@ def test_remove_logging_handler(self, handler, logger_name):
|
||||||
assert len(logging_handlers) == before_count
|
assert len(logging_handlers) == before_count
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
||||||
class TestUrllibRequestHandler(TestRequestHandlerBase):
|
class TestUrllibRequestHandler(TestRequestHandlerBase):
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
|
||||||
def test_file_urls(self, handler):
|
def test_file_urls(self, handler):
|
||||||
# See https://github.com/ytdl-org/youtube-dl/issues/8227
|
# See https://github.com/ytdl-org/youtube-dl/issues/8227
|
||||||
tf = tempfile.NamedTemporaryFile(delete=False)
|
tf = tempfile.NamedTemporaryFile(delete=False)
|
||||||
|
@ -847,7 +777,6 @@ def test_file_urls(self, handler):
|
||||||
|
|
||||||
os.unlink(tf.name)
|
os.unlink(tf.name)
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
|
||||||
def test_http_error_returns_content(self, handler):
|
def test_http_error_returns_content(self, handler):
|
||||||
# urllib HTTPError will try close the underlying response if reference to the HTTPError object is lost
|
# urllib HTTPError will try close the underlying response if reference to the HTTPError object is lost
|
||||||
def get_response():
|
def get_response():
|
||||||
|
@ -860,31 +789,29 @@ def get_response():
|
||||||
|
|
||||||
assert get_response().read() == b'<html></html>'
|
assert get_response().read() == b'<html></html>'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
|
||||||
def test_verify_cert_error_text(self, handler):
|
def test_verify_cert_error_text(self, handler):
|
||||||
# Check the output of the error message
|
# Check the output of the error message
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with pytest.raises(
|
with pytest.raises(
|
||||||
CertificateVerifyError,
|
CertificateVerifyError,
|
||||||
match=r'\[SSL: CERTIFICATE_VERIFY_FAILED\] certificate verify failed: self.signed certificate'
|
match=r'\[SSL: CERTIFICATE_VERIFY_FAILED\] certificate verify failed: self.signed certificate',
|
||||||
):
|
):
|
||||||
validate_and_send(rh, Request(f'https://127.0.0.1:{self.https_port}/headers'))
|
validate_and_send(rh, Request(f'https://127.0.0.1:{self.https_port}/headers'))
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
|
||||||
@pytest.mark.parametrize('req,match,version_check', [
|
@pytest.mark.parametrize('req,match,version_check', [
|
||||||
# https://github.com/python/cpython/blob/987b712b4aeeece336eed24fcc87a950a756c3e2/Lib/http/client.py#L1256
|
# https://github.com/python/cpython/blob/987b712b4aeeece336eed24fcc87a950a756c3e2/Lib/http/client.py#L1256
|
||||||
# bpo-39603: Check implemented in 3.7.9+, 3.8.5+
|
# bpo-39603: Check implemented in 3.7.9+, 3.8.5+
|
||||||
(
|
(
|
||||||
Request('http://127.0.0.1', method='GET\n'),
|
Request('http://127.0.0.1', method='GET\n'),
|
||||||
'method can\'t contain control characters',
|
'method can\'t contain control characters',
|
||||||
lambda v: v < (3, 7, 9) or (3, 8, 0) <= v < (3, 8, 5)
|
lambda v: v < (3, 7, 9) or (3, 8, 0) <= v < (3, 8, 5),
|
||||||
),
|
),
|
||||||
# https://github.com/python/cpython/blob/987b712b4aeeece336eed24fcc87a950a756c3e2/Lib/http/client.py#L1265
|
# https://github.com/python/cpython/blob/987b712b4aeeece336eed24fcc87a950a756c3e2/Lib/http/client.py#L1265
|
||||||
# bpo-38576: Check implemented in 3.7.8+, 3.8.3+
|
# bpo-38576: Check implemented in 3.7.8+, 3.8.3+
|
||||||
(
|
(
|
||||||
Request('http://127.0.0. 1', method='GET'),
|
Request('http://127.0.0. 1', method='GET'),
|
||||||
'URL can\'t contain control characters',
|
'URL can\'t contain control characters',
|
||||||
lambda v: v < (3, 7, 8) or (3, 8, 0) <= v < (3, 8, 3)
|
lambda v: v < (3, 7, 8) or (3, 8, 0) <= v < (3, 8, 3),
|
||||||
),
|
),
|
||||||
# https://github.com/python/cpython/blob/987b712b4aeeece336eed24fcc87a950a756c3e2/Lib/http/client.py#L1288C31-L1288C50
|
# https://github.com/python/cpython/blob/987b712b4aeeece336eed24fcc87a950a756c3e2/Lib/http/client.py#L1288C31-L1288C50
|
||||||
(Request('http://127.0.0.1', headers={'foo\n': 'bar'}), 'Invalid header name', None),
|
(Request('http://127.0.0.1', headers={'foo\n': 'bar'}), 'Invalid header name', None),
|
||||||
|
@ -913,7 +840,7 @@ class TestRequestsRequestHandler(TestRequestHandlerBase):
|
||||||
(lambda: requests.exceptions.InvalidHeader(), RequestError),
|
(lambda: requests.exceptions.InvalidHeader(), RequestError),
|
||||||
# catch-all: https://github.com/psf/requests/blob/main/src/requests/adapters.py#L535
|
# catch-all: https://github.com/psf/requests/blob/main/src/requests/adapters.py#L535
|
||||||
(lambda: urllib3.exceptions.HTTPError(), TransportError),
|
(lambda: urllib3.exceptions.HTTPError(), TransportError),
|
||||||
(lambda: requests.exceptions.RequestException(), RequestError)
|
(lambda: requests.exceptions.RequestException(), RequestError),
|
||||||
# (lambda: requests.exceptions.TooManyRedirects(), HTTPError) - Needs a response object
|
# (lambda: requests.exceptions.TooManyRedirects(), HTTPError) - Needs a response object
|
||||||
])
|
])
|
||||||
def test_request_error_mapping(self, handler, monkeypatch, raised, expected):
|
def test_request_error_mapping(self, handler, monkeypatch, raised, expected):
|
||||||
|
@ -941,12 +868,12 @@ def request(self, *args, **kwargs):
|
||||||
(
|
(
|
||||||
lambda: urllib3.exceptions.ProtocolError('error', http.client.IncompleteRead(partial=b'abc', expected=4)),
|
lambda: urllib3.exceptions.ProtocolError('error', http.client.IncompleteRead(partial=b'abc', expected=4)),
|
||||||
IncompleteRead,
|
IncompleteRead,
|
||||||
'3 bytes read, 4 more expected'
|
'3 bytes read, 4 more expected',
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
lambda: urllib3.exceptions.ProtocolError('error', urllib3.exceptions.IncompleteRead(partial=3, expected=5)),
|
lambda: urllib3.exceptions.ProtocolError('error', urllib3.exceptions.IncompleteRead(partial=3, expected=5)),
|
||||||
IncompleteRead,
|
IncompleteRead,
|
||||||
'3 bytes read, 5 more expected'
|
'3 bytes read, 5 more expected',
|
||||||
),
|
),
|
||||||
])
|
])
|
||||||
def test_response_error_mapping(self, handler, monkeypatch, raised, expected, match):
|
def test_response_error_mapping(self, handler, monkeypatch, raised, expected, match):
|
||||||
|
@ -1198,11 +1125,11 @@ class HTTPSupportedRH(ValidationRH):
|
||||||
('https', False, {}),
|
('https', False, {}),
|
||||||
]),
|
]),
|
||||||
(NoCheckRH, [('http', False, {})]),
|
(NoCheckRH, [('http', False, {})]),
|
||||||
(ValidationRH, [('http', UnsupportedRequest, {})])
|
(ValidationRH, [('http', UnsupportedRequest, {})]),
|
||||||
]
|
]
|
||||||
|
|
||||||
PROXY_SCHEME_TESTS = [
|
PROXY_SCHEME_TESTS = [
|
||||||
# scheme, expected to fail
|
# proxy scheme, expected to fail
|
||||||
('Urllib', 'http', [
|
('Urllib', 'http', [
|
||||||
('http', False),
|
('http', False),
|
||||||
('https', UnsupportedRequest),
|
('https', UnsupportedRequest),
|
||||||
|
@ -1228,30 +1155,41 @@ class HTTPSupportedRH(ValidationRH):
|
||||||
('socks5', False),
|
('socks5', False),
|
||||||
('socks5h', False),
|
('socks5h', False),
|
||||||
]),
|
]),
|
||||||
|
('Websockets', 'ws', [
|
||||||
|
('http', UnsupportedRequest),
|
||||||
|
('https', UnsupportedRequest),
|
||||||
|
('socks4', False),
|
||||||
|
('socks4a', False),
|
||||||
|
('socks5', False),
|
||||||
|
('socks5h', False),
|
||||||
|
]),
|
||||||
(NoCheckRH, 'http', [('http', False)]),
|
(NoCheckRH, 'http', [('http', False)]),
|
||||||
(HTTPSupportedRH, 'http', [('http', UnsupportedRequest)]),
|
(HTTPSupportedRH, 'http', [('http', UnsupportedRequest)]),
|
||||||
('Websockets', 'ws', [('http', UnsupportedRequest)]),
|
|
||||||
(NoCheckRH, 'http', [('http', False)]),
|
(NoCheckRH, 'http', [('http', False)]),
|
||||||
(HTTPSupportedRH, 'http', [('http', UnsupportedRequest)]),
|
(HTTPSupportedRH, 'http', [('http', UnsupportedRequest)]),
|
||||||
]
|
]
|
||||||
|
|
||||||
PROXY_KEY_TESTS = [
|
PROXY_KEY_TESTS = [
|
||||||
# key, expected to fail
|
# proxy key, proxy scheme, expected to fail
|
||||||
('Urllib', [
|
('Urllib', 'http', [
|
||||||
('all', False),
|
('all', 'http', False),
|
||||||
('unrelated', False),
|
('unrelated', 'http', False),
|
||||||
]),
|
]),
|
||||||
('Requests', [
|
('Requests', 'http', [
|
||||||
('all', False),
|
('all', 'http', False),
|
||||||
('unrelated', False),
|
('unrelated', 'http', False),
|
||||||
]),
|
]),
|
||||||
('CurlCFFI', [
|
('CurlCFFI', 'http', [
|
||||||
('all', False),
|
('all', 'http', False),
|
||||||
('unrelated', False),
|
('unrelated', 'http', False),
|
||||||
]),
|
]),
|
||||||
(NoCheckRH, [('all', False)]),
|
('Websockets', 'ws', [
|
||||||
(HTTPSupportedRH, [('all', UnsupportedRequest)]),
|
('all', 'socks5', False),
|
||||||
(HTTPSupportedRH, [('no', UnsupportedRequest)]),
|
('unrelated', 'socks5', False),
|
||||||
|
]),
|
||||||
|
(NoCheckRH, 'http', [('all', 'http', False)]),
|
||||||
|
(HTTPSupportedRH, 'http', [('all', 'http', UnsupportedRequest)]),
|
||||||
|
(HTTPSupportedRH, 'http', [('no', 'http', UnsupportedRequest)]),
|
||||||
]
|
]
|
||||||
|
|
||||||
EXTENSION_TESTS = [
|
EXTENSION_TESTS = [
|
||||||
|
@ -1281,7 +1219,7 @@ class HTTPSupportedRH(ValidationRH):
|
||||||
({'impersonate': ImpersonateTarget('chrome', None, None, None)}, False),
|
({'impersonate': ImpersonateTarget('chrome', None, None, None)}, False),
|
||||||
({'impersonate': ImpersonateTarget(None, None, None, None)}, False),
|
({'impersonate': ImpersonateTarget(None, None, None, None)}, False),
|
||||||
({'impersonate': ImpersonateTarget()}, False),
|
({'impersonate': ImpersonateTarget()}, False),
|
||||||
({'impersonate': 'chrome'}, AssertionError)
|
({'impersonate': 'chrome'}, AssertionError),
|
||||||
]),
|
]),
|
||||||
(NoCheckRH, 'http', [
|
(NoCheckRH, 'http', [
|
||||||
({'cookiejar': 'notacookiejar'}, False),
|
({'cookiejar': 'notacookiejar'}, False),
|
||||||
|
@ -1293,28 +1231,54 @@ class HTTPSupportedRH(ValidationRH):
|
||||||
]),
|
]),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('handler,fail,scheme', [
|
||||||
|
('Urllib', False, 'http'),
|
||||||
|
('Requests', False, 'http'),
|
||||||
|
('CurlCFFI', False, 'http'),
|
||||||
|
('Websockets', False, 'ws'),
|
||||||
|
], indirect=['handler'])
|
||||||
|
def test_no_proxy(self, handler, fail, scheme):
|
||||||
|
run_validation(handler, fail, Request(f'{scheme}://', proxies={'no': '127.0.0.1,github.com'}))
|
||||||
|
run_validation(handler, fail, Request(f'{scheme}://'), proxies={'no': '127.0.0.1,github.com'})
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('handler,scheme', [
|
||||||
|
('Urllib', 'http'),
|
||||||
|
(HTTPSupportedRH, 'http'),
|
||||||
|
('Requests', 'http'),
|
||||||
|
('CurlCFFI', 'http'),
|
||||||
|
('Websockets', 'ws'),
|
||||||
|
], indirect=['handler'])
|
||||||
|
def test_empty_proxy(self, handler, scheme):
|
||||||
|
run_validation(handler, False, Request(f'{scheme}://', proxies={scheme: None}))
|
||||||
|
run_validation(handler, False, Request(f'{scheme}://'), proxies={scheme: None})
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('proxy_url', ['//example.com', 'example.com', '127.0.0.1', '/a/b/c'])
|
||||||
|
@pytest.mark.parametrize('handler,scheme', [
|
||||||
|
('Urllib', 'http'),
|
||||||
|
(HTTPSupportedRH, 'http'),
|
||||||
|
('Requests', 'http'),
|
||||||
|
('CurlCFFI', 'http'),
|
||||||
|
('Websockets', 'ws'),
|
||||||
|
], indirect=['handler'])
|
||||||
|
def test_invalid_proxy_url(self, handler, scheme, proxy_url):
|
||||||
|
run_validation(handler, UnsupportedRequest, Request(f'{scheme}://', proxies={scheme: proxy_url}))
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,scheme,fail,handler_kwargs', [
|
@pytest.mark.parametrize('handler,scheme,fail,handler_kwargs', [
|
||||||
(handler_tests[0], scheme, fail, handler_kwargs)
|
(handler_tests[0], scheme, fail, handler_kwargs)
|
||||||
for handler_tests in URL_SCHEME_TESTS
|
for handler_tests in URL_SCHEME_TESTS
|
||||||
for scheme, fail, handler_kwargs in handler_tests[1]
|
for scheme, fail, handler_kwargs in handler_tests[1]
|
||||||
|
|
||||||
], indirect=['handler'])
|
], indirect=['handler'])
|
||||||
def test_url_scheme(self, handler, scheme, fail, handler_kwargs):
|
def test_url_scheme(self, handler, scheme, fail, handler_kwargs):
|
||||||
run_validation(handler, fail, Request(f'{scheme}://'), **(handler_kwargs or {}))
|
run_validation(handler, fail, Request(f'{scheme}://'), **(handler_kwargs or {}))
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,fail', [('Urllib', False), ('Requests', False), ('CurlCFFI', False)], indirect=['handler'])
|
@pytest.mark.parametrize('handler,scheme,proxy_key,proxy_scheme,fail', [
|
||||||
def test_no_proxy(self, handler, fail):
|
(handler_tests[0], handler_tests[1], proxy_key, proxy_scheme, fail)
|
||||||
run_validation(handler, fail, Request('http://', proxies={'no': '127.0.0.1,github.com'}))
|
|
||||||
run_validation(handler, fail, Request('http://'), proxies={'no': '127.0.0.1,github.com'})
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,proxy_key,fail', [
|
|
||||||
(handler_tests[0], proxy_key, fail)
|
|
||||||
for handler_tests in PROXY_KEY_TESTS
|
for handler_tests in PROXY_KEY_TESTS
|
||||||
for proxy_key, fail in handler_tests[1]
|
for proxy_key, proxy_scheme, fail in handler_tests[2]
|
||||||
], indirect=['handler'])
|
], indirect=['handler'])
|
||||||
def test_proxy_key(self, handler, proxy_key, fail):
|
def test_proxy_key(self, handler, scheme, proxy_key, proxy_scheme, fail):
|
||||||
run_validation(handler, fail, Request('http://', proxies={proxy_key: 'http://example.com'}))
|
run_validation(handler, fail, Request(f'{scheme}://', proxies={proxy_key: f'{proxy_scheme}://example.com'}))
|
||||||
run_validation(handler, fail, Request('http://'), proxies={proxy_key: 'http://example.com'})
|
run_validation(handler, fail, Request(f'{scheme}://'), proxies={proxy_key: f'{proxy_scheme}://example.com'})
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,req_scheme,scheme,fail', [
|
@pytest.mark.parametrize('handler,req_scheme,scheme,fail', [
|
||||||
(handler_tests[0], handler_tests[1], scheme, fail)
|
(handler_tests[0], handler_tests[1], scheme, fail)
|
||||||
|
@ -1325,16 +1289,6 @@ def test_proxy_scheme(self, handler, req_scheme, scheme, fail):
|
||||||
run_validation(handler, fail, Request(f'{req_scheme}://', proxies={req_scheme: f'{scheme}://example.com'}))
|
run_validation(handler, fail, Request(f'{req_scheme}://', proxies={req_scheme: f'{scheme}://example.com'}))
|
||||||
run_validation(handler, fail, Request(f'{req_scheme}://'), proxies={req_scheme: f'{scheme}://example.com'})
|
run_validation(handler, fail, Request(f'{req_scheme}://'), proxies={req_scheme: f'{scheme}://example.com'})
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', HTTPSupportedRH, 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_empty_proxy(self, handler):
|
|
||||||
run_validation(handler, False, Request('http://', proxies={'http': None}))
|
|
||||||
run_validation(handler, False, Request('http://'), proxies={'http': None})
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('proxy_url', ['//example.com', 'example.com', '127.0.0.1', '/a/b/c'])
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
|
||||||
def test_invalid_proxy_url(self, handler, proxy_url):
|
|
||||||
run_validation(handler, UnsupportedRequest, Request('http://', proxies={'http': proxy_url}))
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,scheme,extensions,fail', [
|
@pytest.mark.parametrize('handler,scheme,extensions,fail', [
|
||||||
(handler_tests[0], handler_tests[1], extensions, fail)
|
(handler_tests[0], handler_tests[1], extensions, fail)
|
||||||
for handler_tests in EXTENSION_TESTS
|
for handler_tests in EXTENSION_TESTS
|
||||||
|
@ -1520,7 +1474,7 @@ def test_compat_opener(self):
|
||||||
@pytest.mark.parametrize('proxy,expected', [
|
@pytest.mark.parametrize('proxy,expected', [
|
||||||
('http://127.0.0.1:8080', {'all': 'http://127.0.0.1:8080'}),
|
('http://127.0.0.1:8080', {'all': 'http://127.0.0.1:8080'}),
|
||||||
('', {'all': '__noproxy__'}),
|
('', {'all': '__noproxy__'}),
|
||||||
(None, {'http': 'http://127.0.0.1:8081', 'https': 'http://127.0.0.1:8081'}) # env, set https
|
(None, {'http': 'http://127.0.0.1:8081', 'https': 'http://127.0.0.1:8081'}), # env, set https
|
||||||
])
|
])
|
||||||
def test_proxy(self, proxy, expected, monkeypatch):
|
def test_proxy(self, proxy, expected, monkeypatch):
|
||||||
monkeypatch.setenv('HTTP_PROXY', 'http://127.0.0.1:8081')
|
monkeypatch.setenv('HTTP_PROXY', 'http://127.0.0.1:8081')
|
||||||
|
@ -1592,7 +1546,7 @@ def _send(self, request: Request):
|
||||||
with FakeImpersonationRHYDL() as ydl:
|
with FakeImpersonationRHYDL() as ydl:
|
||||||
with pytest.raises(
|
with pytest.raises(
|
||||||
RequestError,
|
RequestError,
|
||||||
match=r'Impersonate target "test" is not available'
|
match=r'Impersonate target "test" is not available',
|
||||||
):
|
):
|
||||||
ydl.urlopen(Request('http://', extensions={'impersonate': ImpersonateTarget('test', None, None, None)}))
|
ydl.urlopen(Request('http://', extensions={'impersonate': ImpersonateTarget('test', None, None, None)}))
|
||||||
|
|
||||||
|
@ -1604,7 +1558,7 @@ def _send(self, request: Request):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
_SUPPORTED_URL_SCHEMES = ('http',)
|
_SUPPORTED_URL_SCHEMES = ('http',)
|
||||||
_SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget('abc',): 'test'}
|
_SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget('abc'): 'test'}
|
||||||
_SUPPORTED_PROXY_SCHEMES = None
|
_SUPPORTED_PROXY_SCHEMES = None
|
||||||
|
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
|
@ -1613,14 +1567,14 @@ def _send(self, request: Request):
|
||||||
with FakeHTTPRHYDL() as ydl:
|
with FakeHTTPRHYDL() as ydl:
|
||||||
with pytest.raises(
|
with pytest.raises(
|
||||||
RequestError,
|
RequestError,
|
||||||
match=r'Impersonate target "test" is not available'
|
match=r'Impersonate target "test" is not available',
|
||||||
):
|
):
|
||||||
ydl.urlopen(Request('http://', extensions={'impersonate': ImpersonateTarget('test', None, None, None)}))
|
ydl.urlopen(Request('http://', extensions={'impersonate': ImpersonateTarget('test', None, None, None)}))
|
||||||
|
|
||||||
def test_raise_impersonate_error(self):
|
def test_raise_impersonate_error(self):
|
||||||
with pytest.raises(
|
with pytest.raises(
|
||||||
YoutubeDLError,
|
YoutubeDLError,
|
||||||
match=r'Impersonate target "test" is not available'
|
match=r'Impersonate target "test" is not available',
|
||||||
):
|
):
|
||||||
FakeYDL({'impersonate': ImpersonateTarget('test', None, None, None)})
|
FakeYDL({'impersonate': ImpersonateTarget('test', None, None, None)})
|
||||||
|
|
||||||
|
@ -1638,7 +1592,7 @@ def _send(self, request: Request):
|
||||||
monkeypatch.setattr(FakeYDL, 'build_request_director', lambda cls, handlers, preferences=None: brh(cls, handlers=[IRH]))
|
monkeypatch.setattr(FakeYDL, 'build_request_director', lambda cls, handlers, preferences=None: brh(cls, handlers=[IRH]))
|
||||||
|
|
||||||
with FakeYDL({
|
with FakeYDL({
|
||||||
'impersonate': ImpersonateTarget('abc', None, None, None)
|
'impersonate': ImpersonateTarget('abc', None, None, None),
|
||||||
}) as ydl:
|
}) as ydl:
|
||||||
rh = self.build_handler(ydl, IRH)
|
rh = self.build_handler(ydl, IRH)
|
||||||
assert rh.impersonate == ImpersonateTarget('abc', None, None, None)
|
assert rh.impersonate == ImpersonateTarget('abc', None, None, None)
|
||||||
|
@ -1650,7 +1604,7 @@ class TestRH(ImpersonateRequestHandler):
|
||||||
def _send(self, request: Request):
|
def _send(self, request: Request):
|
||||||
pass
|
pass
|
||||||
_SUPPORTED_URL_SCHEMES = ('http',)
|
_SUPPORTED_URL_SCHEMES = ('http',)
|
||||||
_SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget(target_client,): 'test'}
|
_SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget(target_client): 'test'}
|
||||||
RH_KEY = target_client
|
RH_KEY = target_client
|
||||||
RH_NAME = target_client
|
RH_NAME = target_client
|
||||||
handlers.append(TestRH)
|
handlers.append(TestRH)
|
||||||
|
@ -1660,7 +1614,7 @@ def _send(self, request: Request):
|
||||||
assert set(ydl._get_available_impersonate_targets()) == {
|
assert set(ydl._get_available_impersonate_targets()) == {
|
||||||
(ImpersonateTarget('xyz'), 'xyz'),
|
(ImpersonateTarget('xyz'), 'xyz'),
|
||||||
(ImpersonateTarget('abc'), 'abc'),
|
(ImpersonateTarget('abc'), 'abc'),
|
||||||
(ImpersonateTarget('asd'), 'asd')
|
(ImpersonateTarget('asd'), 'asd'),
|
||||||
}
|
}
|
||||||
assert ydl._impersonate_target_available(ImpersonateTarget('abc'))
|
assert ydl._impersonate_target_available(ImpersonateTarget('abc'))
|
||||||
assert ydl._impersonate_target_available(ImpersonateTarget())
|
assert ydl._impersonate_target_available(ImpersonateTarget())
|
||||||
|
@ -1883,7 +1837,7 @@ def test_copy(self):
|
||||||
extensions={'cookiejar': CookieJar()},
|
extensions={'cookiejar': CookieJar()},
|
||||||
headers={'Accept-Encoding': 'br'},
|
headers={'Accept-Encoding': 'br'},
|
||||||
proxies={'http': 'http://127.0.0.1'},
|
proxies={'http': 'http://127.0.0.1'},
|
||||||
data=[b'123']
|
data=[b'123'],
|
||||||
)
|
)
|
||||||
req_copy = req.copy()
|
req_copy = req.copy()
|
||||||
assert req_copy is not req
|
assert req_copy is not req
|
||||||
|
@ -1909,7 +1863,7 @@ class AnotherRequest(Request):
|
||||||
assert isinstance(req.copy(), AnotherRequest)
|
assert isinstance(req.copy(), AnotherRequest)
|
||||||
|
|
||||||
def test_url(self):
|
def test_url(self):
|
||||||
req = Request(url='https://фtest.example.com/ some spaceв?ä=c',)
|
req = Request(url='https://фtest.example.com/ some spaceв?ä=c')
|
||||||
assert req.url == 'https://xn--test-z6d.example.com/%20some%20space%D0%B2?%C3%A4=c'
|
assert req.url == 'https://xn--test-z6d.example.com/%20some%20space%D0%B2?%C3%A4=c'
|
||||||
|
|
||||||
assert Request(url='//example.com').url == 'http://example.com'
|
assert Request(url='//example.com').url == 'http://example.com'
|
||||||
|
@ -1924,7 +1878,7 @@ class TestResponse:
|
||||||
('custom', 200, 'custom'),
|
('custom', 200, 'custom'),
|
||||||
(None, 404, 'Not Found'), # fallback status
|
(None, 404, 'Not Found'), # fallback status
|
||||||
('', 403, 'Forbidden'),
|
('', 403, 'Forbidden'),
|
||||||
(None, 999, None)
|
(None, 999, None),
|
||||||
])
|
])
|
||||||
def test_reason(self, reason, status, expected):
|
def test_reason(self, reason, status, expected):
|
||||||
res = Response(io.BytesIO(b''), url='test://', headers={}, status=status, reason=reason)
|
res = Response(io.BytesIO(b''), url='test://', headers={}, status=status, reason=reason)
|
||||||
|
@ -1979,7 +1933,7 @@ def test_target_from_str(self, target_str, expected):
|
||||||
|
|
||||||
@pytest.mark.parametrize('target_str', [
|
@pytest.mark.parametrize('target_str', [
|
||||||
'-120', ':-12.0', '-12:-12', '-:-',
|
'-120', ':-12.0', '-12:-12', '-:-',
|
||||||
'::', 'a-c-d:', 'a-c-d:e-f-g', 'a:b:'
|
'::', 'a-c-d:', 'a-c-d:e-f-g', 'a:b:',
|
||||||
])
|
])
|
||||||
def test_target_from_invalid_str(self, target_str):
|
def test_target_from_invalid_str(self, target_str):
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
|
@ -1995,7 +1949,7 @@ def test_target_from_invalid_str(self, target_str):
|
||||||
(ImpersonateTarget('abc', '120', 'xyz', None), 'abc-120:xyz'),
|
(ImpersonateTarget('abc', '120', 'xyz', None), 'abc-120:xyz'),
|
||||||
(ImpersonateTarget('abc', None, 'xyz'), 'abc:xyz'),
|
(ImpersonateTarget('abc', None, 'xyz'), 'abc:xyz'),
|
||||||
(ImpersonateTarget(None, None, 'xyz', '6.5'), ':xyz-6.5'),
|
(ImpersonateTarget(None, None, 'xyz', '6.5'), ':xyz-6.5'),
|
||||||
(ImpersonateTarget('abc', ), 'abc'),
|
(ImpersonateTarget('abc'), 'abc'),
|
||||||
(ImpersonateTarget(None, None, None, None), ''),
|
(ImpersonateTarget(None, None, None, None), ''),
|
||||||
])
|
])
|
||||||
def test_str(self, target, expected):
|
def test_str(self, target, expected):
|
||||||
|
|
|
@ -39,7 +39,7 @@ def test_select_proxy(self):
|
||||||
proxies = {
|
proxies = {
|
||||||
'all': 'socks5://example.com',
|
'all': 'socks5://example.com',
|
||||||
'http': 'http://example.com:1080',
|
'http': 'http://example.com:1080',
|
||||||
'no': 'bypass.example.com,yt-dl.org'
|
'no': 'bypass.example.com,yt-dl.org',
|
||||||
}
|
}
|
||||||
|
|
||||||
assert select_proxy('https://example.com', proxies) == proxies['all']
|
assert select_proxy('https://example.com', proxies) == proxies['all']
|
||||||
|
@ -54,7 +54,7 @@ def test_select_proxy(self):
|
||||||
'port': 1080,
|
'port': 1080,
|
||||||
'rdns': True,
|
'rdns': True,
|
||||||
'username': None,
|
'username': None,
|
||||||
'password': None
|
'password': None,
|
||||||
}),
|
}),
|
||||||
('socks5://user:@example.com:5555', {
|
('socks5://user:@example.com:5555', {
|
||||||
'proxytype': ProxyType.SOCKS5,
|
'proxytype': ProxyType.SOCKS5,
|
||||||
|
@ -62,7 +62,7 @@ def test_select_proxy(self):
|
||||||
'port': 5555,
|
'port': 5555,
|
||||||
'rdns': False,
|
'rdns': False,
|
||||||
'username': 'user',
|
'username': 'user',
|
||||||
'password': ''
|
'password': '',
|
||||||
}),
|
}),
|
||||||
('socks4://u%40ser:pa%20ss@127.0.0.1:1080', {
|
('socks4://u%40ser:pa%20ss@127.0.0.1:1080', {
|
||||||
'proxytype': ProxyType.SOCKS4,
|
'proxytype': ProxyType.SOCKS4,
|
||||||
|
@ -70,7 +70,7 @@ def test_select_proxy(self):
|
||||||
'port': 1080,
|
'port': 1080,
|
||||||
'rdns': False,
|
'rdns': False,
|
||||||
'username': 'u@ser',
|
'username': 'u@ser',
|
||||||
'password': 'pa ss'
|
'password': 'pa ss',
|
||||||
}),
|
}),
|
||||||
('socks4a://:pa%20ss@127.0.0.1', {
|
('socks4a://:pa%20ss@127.0.0.1', {
|
||||||
'proxytype': ProxyType.SOCKS4A,
|
'proxytype': ProxyType.SOCKS4A,
|
||||||
|
@ -78,8 +78,8 @@ def test_select_proxy(self):
|
||||||
'port': 1080,
|
'port': 1080,
|
||||||
'rdns': True,
|
'rdns': True,
|
||||||
'username': '',
|
'username': '',
|
||||||
'password': 'pa ss'
|
'password': 'pa ss',
|
||||||
})
|
}),
|
||||||
])
|
])
|
||||||
def test_make_socks_proxy_opts(self, socks_proxy, expected):
|
def test_make_socks_proxy_opts(self, socks_proxy, expected):
|
||||||
assert make_socks_proxy_opts(socks_proxy) == expected
|
assert make_socks_proxy_opts(socks_proxy) == expected
|
||||||
|
|
|
@ -27,7 +27,7 @@ def test_default_overwrites(self):
|
||||||
[
|
[
|
||||||
sys.executable, 'yt_dlp/__main__.py',
|
sys.executable, 'yt_dlp/__main__.py',
|
||||||
'-o', 'test.webm',
|
'-o', 'test.webm',
|
||||||
'https://www.youtube.com/watch?v=jNQXAC9IVRw'
|
'https://www.youtube.com/watch?v=jNQXAC9IVRw',
|
||||||
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
sout, serr = outp.communicate()
|
sout, serr = outp.communicate()
|
||||||
self.assertTrue(b'has already been downloaded' in sout)
|
self.assertTrue(b'has already been downloaded' in sout)
|
||||||
|
@ -39,7 +39,7 @@ def test_yes_overwrites(self):
|
||||||
[
|
[
|
||||||
sys.executable, 'yt_dlp/__main__.py', '--yes-overwrites',
|
sys.executable, 'yt_dlp/__main__.py', '--yes-overwrites',
|
||||||
'-o', 'test.webm',
|
'-o', 'test.webm',
|
||||||
'https://www.youtube.com/watch?v=jNQXAC9IVRw'
|
'https://www.youtube.com/watch?v=jNQXAC9IVRw',
|
||||||
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
], cwd=root_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
sout, serr = outp.communicate()
|
sout, serr = outp.communicate()
|
||||||
self.assertTrue(b'has already been downloaded' not in sout)
|
self.assertTrue(b'has already been downloaded' not in sout)
|
||||||
|
|
|
@ -31,7 +31,7 @@ def test_extractor_classes(self):
|
||||||
|
|
||||||
# don't load modules with underscore prefix
|
# don't load modules with underscore prefix
|
||||||
self.assertFalse(
|
self.assertFalse(
|
||||||
f'{PACKAGE_NAME}.extractor._ignore' in sys.modules.keys(),
|
f'{PACKAGE_NAME}.extractor._ignore' in sys.modules,
|
||||||
'loaded module beginning with underscore')
|
'loaded module beginning with underscore')
|
||||||
self.assertNotIn('IgnorePluginIE', plugins_ie.keys())
|
self.assertNotIn('IgnorePluginIE', plugins_ie.keys())
|
||||||
|
|
||||||
|
|
|
@ -59,7 +59,7 @@ def hook_two(self, filename):
|
||||||
|
|
||||||
def hook_three(self, filename):
|
def hook_three(self, filename):
|
||||||
self.files.append(filename)
|
self.files.append(filename)
|
||||||
raise Exception('Test exception for \'%s\'' % filename)
|
raise Exception(f'Test exception for \'{filename}\'')
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
for f in self.files:
|
for f in self.files:
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
|
|
||||||
|
|
||||||
from yt_dlp import YoutubeDL
|
from yt_dlp import YoutubeDL
|
||||||
from yt_dlp.compat import compat_shlex_quote
|
from yt_dlp.utils import shell_quote
|
||||||
from yt_dlp.postprocessor import (
|
from yt_dlp.postprocessor import (
|
||||||
ExecPP,
|
ExecPP,
|
||||||
FFmpegThumbnailsConvertorPP,
|
FFmpegThumbnailsConvertorPP,
|
||||||
|
@ -65,7 +65,7 @@ class TestExec(unittest.TestCase):
|
||||||
def test_parse_cmd(self):
|
def test_parse_cmd(self):
|
||||||
pp = ExecPP(YoutubeDL(), '')
|
pp = ExecPP(YoutubeDL(), '')
|
||||||
info = {'filepath': 'file name'}
|
info = {'filepath': 'file name'}
|
||||||
cmd = 'echo %s' % compat_shlex_quote(info['filepath'])
|
cmd = 'echo {}'.format(shell_quote(info['filepath']))
|
||||||
|
|
||||||
self.assertEqual(pp.parse_cmd('echo', info), cmd)
|
self.assertEqual(pp.parse_cmd('echo', info), cmd)
|
||||||
self.assertEqual(pp.parse_cmd('echo {}', info), cmd)
|
self.assertEqual(pp.parse_cmd('echo {}', info), cmd)
|
||||||
|
@ -125,7 +125,8 @@ def test_remove_marked_arrange_sponsors_CanGetThroughUnaltered(self):
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, chapters, [])
|
self._remove_marked_arrange_sponsors_test_impl(chapters, chapters, [])
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_ChapterWithSponsors(self):
|
def test_remove_marked_arrange_sponsors_ChapterWithSponsors(self):
|
||||||
chapters = self._chapters([70], ['c']) + [
|
chapters = [
|
||||||
|
*self._chapters([70], ['c']),
|
||||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||||
self._sponsor_chapter(30, 40, 'preview'),
|
self._sponsor_chapter(30, 40, 'preview'),
|
||||||
self._sponsor_chapter(50, 60, 'filler')]
|
self._sponsor_chapter(50, 60, 'filler')]
|
||||||
|
@ -136,7 +137,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithSponsors(self):
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_SponsorBlockChapters(self):
|
def test_remove_marked_arrange_sponsors_SponsorBlockChapters(self):
|
||||||
chapters = self._chapters([70], ['c']) + [
|
chapters = [
|
||||||
|
*self._chapters([70], ['c']),
|
||||||
self._sponsor_chapter(10, 20, 'chapter', title='sb c1'),
|
self._sponsor_chapter(10, 20, 'chapter', title='sb c1'),
|
||||||
self._sponsor_chapter(15, 16, 'chapter', title='sb c2'),
|
self._sponsor_chapter(15, 16, 'chapter', title='sb c2'),
|
||||||
self._sponsor_chapter(30, 40, 'preview'),
|
self._sponsor_chapter(30, 40, 'preview'),
|
||||||
|
@ -149,10 +151,14 @@ def test_remove_marked_arrange_sponsors_SponsorBlockChapters(self):
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_UniqueNamesForOverlappingSponsors(self):
|
def test_remove_marked_arrange_sponsors_UniqueNamesForOverlappingSponsors(self):
|
||||||
chapters = self._chapters([120], ['c']) + [
|
chapters = [
|
||||||
self._sponsor_chapter(10, 45, 'sponsor'), self._sponsor_chapter(20, 40, 'selfpromo'),
|
*self._chapters([120], ['c']),
|
||||||
self._sponsor_chapter(50, 70, 'sponsor'), self._sponsor_chapter(60, 85, 'selfpromo'),
|
self._sponsor_chapter(10, 45, 'sponsor'),
|
||||||
self._sponsor_chapter(90, 120, 'selfpromo'), self._sponsor_chapter(100, 110, 'sponsor')]
|
self._sponsor_chapter(20, 40, 'selfpromo'),
|
||||||
|
self._sponsor_chapter(50, 70, 'sponsor'),
|
||||||
|
self._sponsor_chapter(60, 85, 'selfpromo'),
|
||||||
|
self._sponsor_chapter(90, 120, 'selfpromo'),
|
||||||
|
self._sponsor_chapter(100, 110, 'sponsor')]
|
||||||
expected = self._chapters(
|
expected = self._chapters(
|
||||||
[10, 20, 40, 45, 50, 60, 70, 85, 90, 100, 110, 120],
|
[10, 20, 40, 45, 50, 60, 70, 85, 90, 100, 110, 120],
|
||||||
['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion',
|
['c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Sponsor, Unpaid/Self Promotion',
|
||||||
|
@ -172,7 +178,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithCuts(self):
|
||||||
chapters, self._chapters([40], ['c']), cuts)
|
chapters, self._chapters([40], ['c']), cuts)
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_ChapterWithSponsorsAndCuts(self):
|
def test_remove_marked_arrange_sponsors_ChapterWithSponsorsAndCuts(self):
|
||||||
chapters = self._chapters([70], ['c']) + [
|
chapters = [
|
||||||
|
*self._chapters([70], ['c']),
|
||||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||||
self._sponsor_chapter(30, 40, 'selfpromo', remove=True),
|
self._sponsor_chapter(30, 40, 'selfpromo', remove=True),
|
||||||
self._sponsor_chapter(50, 60, 'interaction')]
|
self._sponsor_chapter(50, 60, 'interaction')]
|
||||||
|
@ -185,24 +192,29 @@ def test_remove_marked_arrange_sponsors_ChapterWithSponsorsAndCuts(self):
|
||||||
def test_remove_marked_arrange_sponsors_ChapterWithSponsorCutInTheMiddle(self):
|
def test_remove_marked_arrange_sponsors_ChapterWithSponsorCutInTheMiddle(self):
|
||||||
cuts = [self._sponsor_chapter(20, 30, 'selfpromo', remove=True),
|
cuts = [self._sponsor_chapter(20, 30, 'selfpromo', remove=True),
|
||||||
self._chapter(40, 50, remove=True)]
|
self._chapter(40, 50, remove=True)]
|
||||||
chapters = self._chapters([70], ['c']) + [self._sponsor_chapter(10, 60, 'sponsor')] + cuts
|
chapters = [
|
||||||
|
*self._chapters([70], ['c']),
|
||||||
|
self._sponsor_chapter(10, 60, 'sponsor'),
|
||||||
|
*cuts]
|
||||||
expected = self._chapters(
|
expected = self._chapters(
|
||||||
[10, 40, 50], ['c', '[SponsorBlock]: Sponsor', 'c'])
|
[10, 40, 50], ['c', '[SponsorBlock]: Sponsor', 'c'])
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_ChapterWithCutHidingSponsor(self):
|
def test_remove_marked_arrange_sponsors_ChapterWithCutHidingSponsor(self):
|
||||||
cuts = [self._sponsor_chapter(20, 50, 'selfpromo', remove=True)]
|
cuts = [self._sponsor_chapter(20, 50, 'selfpromo', remove=True)]
|
||||||
chapters = self._chapters([60], ['c']) + [
|
chapters = [
|
||||||
|
*self._chapters([60], ['c']),
|
||||||
self._sponsor_chapter(10, 20, 'intro'),
|
self._sponsor_chapter(10, 20, 'intro'),
|
||||||
self._sponsor_chapter(30, 40, 'sponsor'),
|
self._sponsor_chapter(30, 40, 'sponsor'),
|
||||||
self._sponsor_chapter(50, 60, 'outro'),
|
self._sponsor_chapter(50, 60, 'outro'),
|
||||||
] + cuts
|
*cuts]
|
||||||
expected = self._chapters(
|
expected = self._chapters(
|
||||||
[10, 20, 30], ['c', '[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits'])
|
[10, 20, 30], ['c', '[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits'])
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_ChapterWithAdjacentSponsors(self):
|
def test_remove_marked_arrange_sponsors_ChapterWithAdjacentSponsors(self):
|
||||||
chapters = self._chapters([70], ['c']) + [
|
chapters = [
|
||||||
|
*self._chapters([70], ['c']),
|
||||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||||
self._sponsor_chapter(20, 30, 'selfpromo'),
|
self._sponsor_chapter(20, 30, 'selfpromo'),
|
||||||
self._sponsor_chapter(30, 40, 'interaction')]
|
self._sponsor_chapter(30, 40, 'interaction')]
|
||||||
|
@ -213,7 +225,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithAdjacentSponsors(self):
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_ChapterWithAdjacentCuts(self):
|
def test_remove_marked_arrange_sponsors_ChapterWithAdjacentCuts(self):
|
||||||
chapters = self._chapters([70], ['c']) + [
|
chapters = [
|
||||||
|
*self._chapters([70], ['c']),
|
||||||
self._sponsor_chapter(10, 20, 'sponsor'),
|
self._sponsor_chapter(10, 20, 'sponsor'),
|
||||||
self._sponsor_chapter(20, 30, 'interaction', remove=True),
|
self._sponsor_chapter(20, 30, 'interaction', remove=True),
|
||||||
self._chapter(30, 40, remove=True),
|
self._chapter(30, 40, remove=True),
|
||||||
|
@ -226,7 +239,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithAdjacentCuts(self):
|
||||||
chapters, expected, [self._chapter(20, 50, remove=True)])
|
chapters, expected, [self._chapter(20, 50, remove=True)])
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_ChapterWithOverlappingSponsors(self):
|
def test_remove_marked_arrange_sponsors_ChapterWithOverlappingSponsors(self):
|
||||||
chapters = self._chapters([70], ['c']) + [
|
chapters = [
|
||||||
|
*self._chapters([70], ['c']),
|
||||||
self._sponsor_chapter(10, 30, 'sponsor'),
|
self._sponsor_chapter(10, 30, 'sponsor'),
|
||||||
self._sponsor_chapter(20, 50, 'selfpromo'),
|
self._sponsor_chapter(20, 50, 'selfpromo'),
|
||||||
self._sponsor_chapter(40, 60, 'interaction')]
|
self._sponsor_chapter(40, 60, 'interaction')]
|
||||||
|
@ -238,7 +252,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithOverlappingSponsors(self):
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_ChapterWithOverlappingCuts(self):
|
def test_remove_marked_arrange_sponsors_ChapterWithOverlappingCuts(self):
|
||||||
chapters = self._chapters([70], ['c']) + [
|
chapters = [
|
||||||
|
*self._chapters([70], ['c']),
|
||||||
self._sponsor_chapter(10, 30, 'sponsor', remove=True),
|
self._sponsor_chapter(10, 30, 'sponsor', remove=True),
|
||||||
self._sponsor_chapter(20, 50, 'selfpromo', remove=True),
|
self._sponsor_chapter(20, 50, 'selfpromo', remove=True),
|
||||||
self._sponsor_chapter(40, 60, 'interaction', remove=True)]
|
self._sponsor_chapter(40, 60, 'interaction', remove=True)]
|
||||||
|
@ -246,7 +261,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithOverlappingCuts(self):
|
||||||
chapters, self._chapters([20], ['c']), [self._chapter(10, 60, remove=True)])
|
chapters, self._chapters([20], ['c']), [self._chapter(10, 60, remove=True)])
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsors(self):
|
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsors(self):
|
||||||
chapters = self._chapters([170], ['c']) + [
|
chapters = [
|
||||||
|
*self._chapters([170], ['c']),
|
||||||
self._sponsor_chapter(0, 30, 'intro'),
|
self._sponsor_chapter(0, 30, 'intro'),
|
||||||
self._sponsor_chapter(20, 50, 'sponsor'),
|
self._sponsor_chapter(20, 50, 'sponsor'),
|
||||||
self._sponsor_chapter(40, 60, 'selfpromo'),
|
self._sponsor_chapter(40, 60, 'selfpromo'),
|
||||||
|
@ -267,7 +283,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsors(sel
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingCuts(self):
|
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingCuts(self):
|
||||||
chapters = self._chapters([170], ['c']) + [
|
chapters = [
|
||||||
|
*self._chapters([170], ['c']),
|
||||||
self._chapter(0, 30, remove=True),
|
self._chapter(0, 30, remove=True),
|
||||||
self._sponsor_chapter(20, 50, 'sponsor', remove=True),
|
self._sponsor_chapter(20, 50, 'sponsor', remove=True),
|
||||||
self._chapter(40, 60, remove=True),
|
self._chapter(40, 60, remove=True),
|
||||||
|
@ -284,7 +301,8 @@ def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingCuts(self):
|
||||||
chapters, self._chapters([20], ['c']), expected_cuts)
|
chapters, self._chapters([20], ['c']), expected_cuts)
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_OverlappingSponsorsDifferentTitlesAfterCut(self):
|
def test_remove_marked_arrange_sponsors_OverlappingSponsorsDifferentTitlesAfterCut(self):
|
||||||
chapters = self._chapters([60], ['c']) + [
|
chapters = [
|
||||||
|
*self._chapters([60], ['c']),
|
||||||
self._sponsor_chapter(10, 60, 'sponsor'),
|
self._sponsor_chapter(10, 60, 'sponsor'),
|
||||||
self._sponsor_chapter(10, 40, 'intro'),
|
self._sponsor_chapter(10, 40, 'intro'),
|
||||||
self._sponsor_chapter(30, 50, 'interaction'),
|
self._sponsor_chapter(30, 50, 'interaction'),
|
||||||
|
@ -297,7 +315,8 @@ def test_remove_marked_arrange_sponsors_OverlappingSponsorsDifferentTitlesAfterC
|
||||||
chapters, expected, [self._chapter(30, 50, remove=True)])
|
chapters, expected, [self._chapter(30, 50, remove=True)])
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_SponsorsNoLongerOverlapAfterCut(self):
|
def test_remove_marked_arrange_sponsors_SponsorsNoLongerOverlapAfterCut(self):
|
||||||
chapters = self._chapters([70], ['c']) + [
|
chapters = [
|
||||||
|
*self._chapters([70], ['c']),
|
||||||
self._sponsor_chapter(10, 30, 'sponsor'),
|
self._sponsor_chapter(10, 30, 'sponsor'),
|
||||||
self._sponsor_chapter(20, 50, 'interaction'),
|
self._sponsor_chapter(20, 50, 'interaction'),
|
||||||
self._sponsor_chapter(30, 50, 'selfpromo', remove=True),
|
self._sponsor_chapter(30, 50, 'selfpromo', remove=True),
|
||||||
|
@ -310,7 +329,8 @@ def test_remove_marked_arrange_sponsors_SponsorsNoLongerOverlapAfterCut(self):
|
||||||
chapters, expected, [self._chapter(30, 50, remove=True)])
|
chapters, expected, [self._chapter(30, 50, remove=True)])
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_SponsorsStillOverlapAfterCut(self):
|
def test_remove_marked_arrange_sponsors_SponsorsStillOverlapAfterCut(self):
|
||||||
chapters = self._chapters([70], ['c']) + [
|
chapters = [
|
||||||
|
*self._chapters([70], ['c']),
|
||||||
self._sponsor_chapter(10, 60, 'sponsor'),
|
self._sponsor_chapter(10, 60, 'sponsor'),
|
||||||
self._sponsor_chapter(20, 60, 'interaction'),
|
self._sponsor_chapter(20, 60, 'interaction'),
|
||||||
self._sponsor_chapter(30, 50, 'selfpromo', remove=True)]
|
self._sponsor_chapter(30, 50, 'selfpromo', remove=True)]
|
||||||
|
@ -321,7 +341,8 @@ def test_remove_marked_arrange_sponsors_SponsorsStillOverlapAfterCut(self):
|
||||||
chapters, expected, [self._chapter(30, 50, remove=True)])
|
chapters, expected, [self._chapter(30, 50, remove=True)])
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsorsAndCuts(self):
|
def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsorsAndCuts(self):
|
||||||
chapters = self._chapters([200], ['c']) + [
|
chapters = [
|
||||||
|
*self._chapters([200], ['c']),
|
||||||
self._sponsor_chapter(10, 40, 'sponsor'),
|
self._sponsor_chapter(10, 40, 'sponsor'),
|
||||||
self._sponsor_chapter(10, 30, 'intro'),
|
self._sponsor_chapter(10, 30, 'intro'),
|
||||||
self._chapter(20, 30, remove=True),
|
self._chapter(20, 30, remove=True),
|
||||||
|
@ -347,8 +368,9 @@ def test_remove_marked_arrange_sponsors_ChapterWithRunsOfOverlappingSponsorsAndC
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, expected_cuts)
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, expected_cuts)
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_SponsorOverlapsMultipleChapters(self):
|
def test_remove_marked_arrange_sponsors_SponsorOverlapsMultipleChapters(self):
|
||||||
chapters = (self._chapters([20, 40, 60, 80, 100], ['c1', 'c2', 'c3', 'c4', 'c5'])
|
chapters = [
|
||||||
+ [self._sponsor_chapter(10, 90, 'sponsor')])
|
*self._chapters([20, 40, 60, 80, 100], ['c1', 'c2', 'c3', 'c4', 'c5']),
|
||||||
|
self._sponsor_chapter(10, 90, 'sponsor')]
|
||||||
expected = self._chapters([10, 90, 100], ['c1', '[SponsorBlock]: Sponsor', 'c5'])
|
expected = self._chapters([10, 90, 100], ['c1', '[SponsorBlock]: Sponsor', 'c5'])
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
@ -359,9 +381,10 @@ def test_remove_marked_arrange_sponsors_CutOverlapsMultipleChapters(self):
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_SponsorsWithinSomeChaptersAndOverlappingOthers(self):
|
def test_remove_marked_arrange_sponsors_SponsorsWithinSomeChaptersAndOverlappingOthers(self):
|
||||||
chapters = (self._chapters([10, 40, 60, 80], ['c1', 'c2', 'c3', 'c4'])
|
chapters = [
|
||||||
+ [self._sponsor_chapter(20, 30, 'sponsor'),
|
*self._chapters([10, 40, 60, 80], ['c1', 'c2', 'c3', 'c4']),
|
||||||
self._sponsor_chapter(50, 70, 'selfpromo')])
|
self._sponsor_chapter(20, 30, 'sponsor'),
|
||||||
|
self._sponsor_chapter(50, 70, 'selfpromo')]
|
||||||
expected = self._chapters([10, 20, 30, 40, 50, 70, 80],
|
expected = self._chapters([10, 20, 30, 40, 50, 70, 80],
|
||||||
['c1', 'c2', '[SponsorBlock]: Sponsor', 'c2', 'c3',
|
['c1', 'c2', '[SponsorBlock]: Sponsor', 'c2', 'c3',
|
||||||
'[SponsorBlock]: Unpaid/Self Promotion', 'c4'])
|
'[SponsorBlock]: Unpaid/Self Promotion', 'c4'])
|
||||||
|
@ -374,8 +397,9 @@ def test_remove_marked_arrange_sponsors_CutsWithinSomeChaptersAndOverlappingOthe
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_ChaptersAfterLastSponsor(self):
|
def test_remove_marked_arrange_sponsors_ChaptersAfterLastSponsor(self):
|
||||||
chapters = (self._chapters([20, 40, 50, 60], ['c1', 'c2', 'c3', 'c4'])
|
chapters = [
|
||||||
+ [self._sponsor_chapter(10, 30, 'music_offtopic')])
|
*self._chapters([20, 40, 50, 60], ['c1', 'c2', 'c3', 'c4']),
|
||||||
|
self._sponsor_chapter(10, 30, 'music_offtopic')]
|
||||||
expected = self._chapters(
|
expected = self._chapters(
|
||||||
[10, 30, 40, 50, 60],
|
[10, 30, 40, 50, 60],
|
||||||
['c1', '[SponsorBlock]: Non-Music Section', 'c2', 'c3', 'c4'])
|
['c1', '[SponsorBlock]: Non-Music Section', 'c2', 'c3', 'c4'])
|
||||||
|
@ -388,8 +412,9 @@ def test_remove_marked_arrange_sponsors_ChaptersAfterLastCut(self):
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_SponsorStartsAtChapterStart(self):
|
def test_remove_marked_arrange_sponsors_SponsorStartsAtChapterStart(self):
|
||||||
chapters = (self._chapters([10, 20, 40], ['c1', 'c2', 'c3'])
|
chapters = [
|
||||||
+ [self._sponsor_chapter(20, 30, 'sponsor')])
|
*self._chapters([10, 20, 40], ['c1', 'c2', 'c3']),
|
||||||
|
self._sponsor_chapter(20, 30, 'sponsor')]
|
||||||
expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3'])
|
expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3'])
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
@ -400,8 +425,9 @@ def test_remove_marked_arrange_sponsors_CutStartsAtChapterStart(self):
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_SponsorEndsAtChapterEnd(self):
|
def test_remove_marked_arrange_sponsors_SponsorEndsAtChapterEnd(self):
|
||||||
chapters = (self._chapters([10, 30, 40], ['c1', 'c2', 'c3'])
|
chapters = [
|
||||||
+ [self._sponsor_chapter(20, 30, 'sponsor')])
|
*self._chapters([10, 30, 40], ['c1', 'c2', 'c3']),
|
||||||
|
self._sponsor_chapter(20, 30, 'sponsor')]
|
||||||
expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3'])
|
expected = self._chapters([10, 20, 30, 40], ['c1', 'c2', '[SponsorBlock]: Sponsor', 'c3'])
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
@ -412,8 +438,9 @@ def test_remove_marked_arrange_sponsors_CutEndsAtChapterEnd(self):
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_SponsorCoincidesWithChapters(self):
|
def test_remove_marked_arrange_sponsors_SponsorCoincidesWithChapters(self):
|
||||||
chapters = (self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4'])
|
chapters = [
|
||||||
+ [self._sponsor_chapter(10, 30, 'sponsor')])
|
*self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']),
|
||||||
|
self._sponsor_chapter(10, 30, 'sponsor')]
|
||||||
expected = self._chapters([10, 30, 40], ['c1', '[SponsorBlock]: Sponsor', 'c4'])
|
expected = self._chapters([10, 30, 40], ['c1', '[SponsorBlock]: Sponsor', 'c4'])
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
@ -424,8 +451,9 @@ def test_remove_marked_arrange_sponsors_CutCoincidesWithChapters(self):
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_SponsorsAtVideoBoundaries(self):
|
def test_remove_marked_arrange_sponsors_SponsorsAtVideoBoundaries(self):
|
||||||
chapters = (self._chapters([20, 40, 60], ['c1', 'c2', 'c3'])
|
chapters = [
|
||||||
+ [self._sponsor_chapter(0, 10, 'intro'), self._sponsor_chapter(50, 60, 'outro')])
|
*self._chapters([20, 40, 60], ['c1', 'c2', 'c3']),
|
||||||
|
self._sponsor_chapter(0, 10, 'intro'), self._sponsor_chapter(50, 60, 'outro')]
|
||||||
expected = self._chapters(
|
expected = self._chapters(
|
||||||
[10, 20, 40, 50, 60], ['[SponsorBlock]: Intermission/Intro Animation', 'c1', 'c2', 'c3', '[SponsorBlock]: Endcards/Credits'])
|
[10, 20, 40, 50, 60], ['[SponsorBlock]: Intermission/Intro Animation', 'c1', 'c2', 'c3', '[SponsorBlock]: Endcards/Credits'])
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
@ -437,8 +465,10 @@ def test_remove_marked_arrange_sponsors_CutsAtVideoBoundaries(self):
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_SponsorsOverlapChaptersAtVideoBoundaries(self):
|
def test_remove_marked_arrange_sponsors_SponsorsOverlapChaptersAtVideoBoundaries(self):
|
||||||
chapters = (self._chapters([10, 40, 50], ['c1', 'c2', 'c3'])
|
chapters = [
|
||||||
+ [self._sponsor_chapter(0, 20, 'intro'), self._sponsor_chapter(30, 50, 'outro')])
|
*self._chapters([10, 40, 50], ['c1', 'c2', 'c3']),
|
||||||
|
self._sponsor_chapter(0, 20, 'intro'),
|
||||||
|
self._sponsor_chapter(30, 50, 'outro')]
|
||||||
expected = self._chapters(
|
expected = self._chapters(
|
||||||
[20, 30, 50], ['[SponsorBlock]: Intermission/Intro Animation', 'c2', '[SponsorBlock]: Endcards/Credits'])
|
[20, 30, 50], ['[SponsorBlock]: Intermission/Intro Animation', 'c2', '[SponsorBlock]: Endcards/Credits'])
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
@ -450,8 +480,10 @@ def test_remove_marked_arrange_sponsors_CutsOverlapChaptersAtVideoBoundaries(sel
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, cuts)
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_EverythingSponsored(self):
|
def test_remove_marked_arrange_sponsors_EverythingSponsored(self):
|
||||||
chapters = (self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4'])
|
chapters = [
|
||||||
+ [self._sponsor_chapter(0, 20, 'intro'), self._sponsor_chapter(20, 40, 'outro')])
|
*self._chapters([10, 20, 30, 40], ['c1', 'c2', 'c3', 'c4']),
|
||||||
|
self._sponsor_chapter(0, 20, 'intro'),
|
||||||
|
self._sponsor_chapter(20, 40, 'outro')]
|
||||||
expected = self._chapters([20, 40], ['[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits'])
|
expected = self._chapters([20, 40], ['[SponsorBlock]: Intermission/Intro Animation', '[SponsorBlock]: Endcards/Credits'])
|
||||||
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
self._remove_marked_arrange_sponsors_test_impl(chapters, expected, [])
|
||||||
|
|
||||||
|
@ -491,38 +523,39 @@ def test_remove_marked_arrange_sponsors_TinyChapterAtTheStartPrependedToTheNext(
|
||||||
chapters, self._chapters([2.5], ['c2']), cuts)
|
chapters, self._chapters([2.5], ['c2']), cuts)
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_TinyChaptersResultingFromSponsorOverlapAreIgnored(self):
|
def test_remove_marked_arrange_sponsors_TinyChaptersResultingFromSponsorOverlapAreIgnored(self):
|
||||||
chapters = self._chapters([1, 3, 4], ['c1', 'c2', 'c3']) + [
|
chapters = [
|
||||||
|
*self._chapters([1, 3, 4], ['c1', 'c2', 'c3']),
|
||||||
self._sponsor_chapter(1.5, 2.5, 'sponsor')]
|
self._sponsor_chapter(1.5, 2.5, 'sponsor')]
|
||||||
self._remove_marked_arrange_sponsors_test_impl(
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
chapters, self._chapters([1.5, 2.5, 4], ['c1', '[SponsorBlock]: Sponsor', 'c3']), [])
|
chapters, self._chapters([1.5, 2.5, 4], ['c1', '[SponsorBlock]: Sponsor', 'c3']), [])
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_TinySponsorsOverlapsAreIgnored(self):
|
def test_remove_marked_arrange_sponsors_TinySponsorsOverlapsAreIgnored(self):
|
||||||
chapters = self._chapters([2, 3, 5], ['c1', 'c2', 'c3']) + [
|
chapters = [
|
||||||
|
*self._chapters([2, 3, 5], ['c1', 'c2', 'c3']),
|
||||||
self._sponsor_chapter(1, 3, 'sponsor'),
|
self._sponsor_chapter(1, 3, 'sponsor'),
|
||||||
self._sponsor_chapter(2.5, 4, 'selfpromo')
|
self._sponsor_chapter(2.5, 4, 'selfpromo')]
|
||||||
]
|
|
||||||
self._remove_marked_arrange_sponsors_test_impl(
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
chapters, self._chapters([1, 3, 4, 5], [
|
chapters, self._chapters([1, 3, 4, 5], [
|
||||||
'c1', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion', 'c3']), [])
|
'c1', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion', 'c3']), [])
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_TinySponsorsPrependedToTheNextSponsor(self):
|
def test_remove_marked_arrange_sponsors_TinySponsorsPrependedToTheNextSponsor(self):
|
||||||
chapters = self._chapters([4], ['c']) + [
|
chapters = [
|
||||||
|
*self._chapters([4], ['c']),
|
||||||
self._sponsor_chapter(1.5, 2, 'sponsor'),
|
self._sponsor_chapter(1.5, 2, 'sponsor'),
|
||||||
self._sponsor_chapter(2, 4, 'selfpromo')
|
self._sponsor_chapter(2, 4, 'selfpromo')]
|
||||||
]
|
|
||||||
self._remove_marked_arrange_sponsors_test_impl(
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
chapters, self._chapters([1.5, 4], ['c', '[SponsorBlock]: Unpaid/Self Promotion']), [])
|
chapters, self._chapters([1.5, 4], ['c', '[SponsorBlock]: Unpaid/Self Promotion']), [])
|
||||||
|
|
||||||
def test_remove_marked_arrange_sponsors_SmallestSponsorInTheOverlapGetsNamed(self):
|
def test_remove_marked_arrange_sponsors_SmallestSponsorInTheOverlapGetsNamed(self):
|
||||||
self._pp._sponsorblock_chapter_title = '[SponsorBlock]: %(name)s'
|
self._pp._sponsorblock_chapter_title = '[SponsorBlock]: %(name)s'
|
||||||
chapters = self._chapters([10], ['c']) + [
|
chapters = [
|
||||||
|
*self._chapters([10], ['c']),
|
||||||
self._sponsor_chapter(2, 8, 'sponsor'),
|
self._sponsor_chapter(2, 8, 'sponsor'),
|
||||||
self._sponsor_chapter(4, 6, 'selfpromo')
|
self._sponsor_chapter(4, 6, 'selfpromo')]
|
||||||
]
|
|
||||||
self._remove_marked_arrange_sponsors_test_impl(
|
self._remove_marked_arrange_sponsors_test_impl(
|
||||||
chapters, self._chapters([2, 4, 6, 8, 10], [
|
chapters, self._chapters([2, 4, 6, 8, 10], [
|
||||||
'c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion',
|
'c', '[SponsorBlock]: Sponsor', '[SponsorBlock]: Unpaid/Self Promotion',
|
||||||
'[SponsorBlock]: Sponsor', 'c'
|
'[SponsorBlock]: Sponsor', 'c',
|
||||||
]), [])
|
]), [])
|
||||||
|
|
||||||
def test_make_concat_opts_CommonCase(self):
|
def test_make_concat_opts_CommonCase(self):
|
||||||
|
|
|
@ -95,7 +95,7 @@ def handle(self):
|
||||||
return
|
return
|
||||||
|
|
||||||
elif Socks5Auth.AUTH_USER_PASS in methods:
|
elif Socks5Auth.AUTH_USER_PASS in methods:
|
||||||
self.connection.sendall(struct.pack("!BB", SOCKS5_VERSION, Socks5Auth.AUTH_USER_PASS))
|
self.connection.sendall(struct.pack('!BB', SOCKS5_VERSION, Socks5Auth.AUTH_USER_PASS))
|
||||||
|
|
||||||
_, user_len = struct.unpack('!BB', self.connection.recv(2))
|
_, user_len = struct.unpack('!BB', self.connection.recv(2))
|
||||||
username = self.connection.recv(user_len).decode()
|
username = self.connection.recv(user_len).decode()
|
||||||
|
@ -174,7 +174,7 @@ def handle(self):
|
||||||
if 0x0 < dest_ip <= 0xFF:
|
if 0x0 < dest_ip <= 0xFF:
|
||||||
use_remote_dns = True
|
use_remote_dns = True
|
||||||
else:
|
else:
|
||||||
socks_info['ipv4_address'] = socket.inet_ntoa(struct.pack("!I", dest_ip))
|
socks_info['ipv4_address'] = socket.inet_ntoa(struct.pack('!I', dest_ip))
|
||||||
|
|
||||||
user_id = self._read_until_null().decode()
|
user_id = self._read_until_null().decode()
|
||||||
if user_id != (self.socks_kwargs.get('user_id') or ''):
|
if user_id != (self.socks_kwargs.get('user_id') or ''):
|
||||||
|
@ -291,7 +291,7 @@ def ctx(request):
|
||||||
('Urllib', 'http'),
|
('Urllib', 'http'),
|
||||||
('Requests', 'http'),
|
('Requests', 'http'),
|
||||||
('Websockets', 'ws'),
|
('Websockets', 'ws'),
|
||||||
('CurlCFFI', 'http')
|
('CurlCFFI', 'http'),
|
||||||
], indirect=True)
|
], indirect=True)
|
||||||
class TestSocks4Proxy:
|
class TestSocks4Proxy:
|
||||||
def test_socks4_no_auth(self, handler, ctx):
|
def test_socks4_no_auth(self, handler, ctx):
|
||||||
|
@ -366,7 +366,7 @@ def test_timeout(self, handler, ctx):
|
||||||
('Urllib', 'http'),
|
('Urllib', 'http'),
|
||||||
('Requests', 'http'),
|
('Requests', 'http'),
|
||||||
('Websockets', 'ws'),
|
('Websockets', 'ws'),
|
||||||
('CurlCFFI', 'http')
|
('CurlCFFI', 'http'),
|
||||||
], indirect=True)
|
], indirect=True)
|
||||||
class TestSocks5Proxy:
|
class TestSocks5Proxy:
|
||||||
|
|
||||||
|
|
|
@ -40,12 +40,11 @@ def setUp(self):
|
||||||
self.ie = self.IE()
|
self.ie = self.IE()
|
||||||
self.DL.add_info_extractor(self.ie)
|
self.DL.add_info_extractor(self.ie)
|
||||||
if not self.IE.working():
|
if not self.IE.working():
|
||||||
print('Skipping: %s marked as not _WORKING' % self.IE.ie_key())
|
print(f'Skipping: {self.IE.ie_key()} marked as not _WORKING')
|
||||||
self.skipTest('IE marked as not _WORKING')
|
self.skipTest('IE marked as not _WORKING')
|
||||||
|
|
||||||
def getInfoDict(self):
|
def getInfoDict(self):
|
||||||
info_dict = self.DL.extract_info(self.url, download=False)
|
return self.DL.extract_info(self.url, download=False)
|
||||||
return info_dict
|
|
||||||
|
|
||||||
def getSubtitles(self):
|
def getSubtitles(self):
|
||||||
info_dict = self.getInfoDict()
|
info_dict = self.getInfoDict()
|
||||||
|
@ -87,7 +86,7 @@ def test_youtube_allsubtitles(self):
|
||||||
self.assertEqual(md5(subtitles['en']), 'ae1bd34126571a77aabd4d276b28044d')
|
self.assertEqual(md5(subtitles['en']), 'ae1bd34126571a77aabd4d276b28044d')
|
||||||
self.assertEqual(md5(subtitles['it']), '0e0b667ba68411d88fd1c5f4f4eab2f9')
|
self.assertEqual(md5(subtitles['it']), '0e0b667ba68411d88fd1c5f4f4eab2f9')
|
||||||
for lang in ['fr', 'de']:
|
for lang in ['fr', 'de']:
|
||||||
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
self.assertTrue(subtitles.get(lang) is not None, f'Subtitles for \'{lang}\' not extracted')
|
||||||
|
|
||||||
def _test_subtitles_format(self, fmt, md5_hash, lang='en'):
|
def _test_subtitles_format(self, fmt, md5_hash, lang='en'):
|
||||||
self.DL.params['writesubtitles'] = True
|
self.DL.params['writesubtitles'] = True
|
||||||
|
@ -157,7 +156,7 @@ def test_allsubtitles(self):
|
||||||
self.assertEqual(md5(subtitles['en']), '976553874490cba125086bbfea3ff76f')
|
self.assertEqual(md5(subtitles['en']), '976553874490cba125086bbfea3ff76f')
|
||||||
self.assertEqual(md5(subtitles['fr']), '594564ec7d588942e384e920e5341792')
|
self.assertEqual(md5(subtitles['fr']), '594564ec7d588942e384e920e5341792')
|
||||||
for lang in ['es', 'fr', 'de']:
|
for lang in ['es', 'fr', 'de']:
|
||||||
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
self.assertTrue(subtitles.get(lang) is not None, f'Subtitles for \'{lang}\' not extracted')
|
||||||
|
|
||||||
def test_nosubtitles(self):
|
def test_nosubtitles(self):
|
||||||
self.DL.expect_warning('video doesn\'t have subtitles')
|
self.DL.expect_warning('video doesn\'t have subtitles')
|
||||||
|
@ -182,7 +181,7 @@ def test_allsubtitles(self):
|
||||||
self.assertEqual(md5(subtitles['en']), '4262c1665ff928a2dada178f62cb8d14')
|
self.assertEqual(md5(subtitles['en']), '4262c1665ff928a2dada178f62cb8d14')
|
||||||
self.assertEqual(md5(subtitles['fr']), '66a63f7f42c97a50f8c0e90bc7797bb5')
|
self.assertEqual(md5(subtitles['fr']), '66a63f7f42c97a50f8c0e90bc7797bb5')
|
||||||
for lang in ['es', 'fr', 'de']:
|
for lang in ['es', 'fr', 'de']:
|
||||||
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
self.assertTrue(subtitles.get(lang) is not None, f'Subtitles for \'{lang}\' not extracted')
|
||||||
|
|
||||||
|
|
||||||
@is_download_test
|
@is_download_test
|
||||||
|
|
|
@ -31,7 +31,7 @@ def test_traversal_base(self):
|
||||||
'allow tuple path'
|
'allow tuple path'
|
||||||
assert traverse_obj(_TEST_DATA, ['str']) == 'str', \
|
assert traverse_obj(_TEST_DATA, ['str']) == 'str', \
|
||||||
'allow list path'
|
'allow list path'
|
||||||
assert traverse_obj(_TEST_DATA, (value for value in ("str",))) == 'str', \
|
assert traverse_obj(_TEST_DATA, (value for value in ('str',))) == 'str', \
|
||||||
'allow iterable path'
|
'allow iterable path'
|
||||||
assert traverse_obj(_TEST_DATA, 'str') == 'str', \
|
assert traverse_obj(_TEST_DATA, 'str') == 'str', \
|
||||||
'single items should be treated as a path'
|
'single items should be treated as a path'
|
||||||
|
@ -70,7 +70,7 @@ def test_traversal_function(self):
|
||||||
|
|
||||||
def test_traversal_set(self):
|
def test_traversal_set(self):
|
||||||
# transformation/type, like `expected_type`
|
# transformation/type, like `expected_type`
|
||||||
assert traverse_obj(_TEST_DATA, (..., {str.upper}, )) == ['STR'], \
|
assert traverse_obj(_TEST_DATA, (..., {str.upper})) == ['STR'], \
|
||||||
'Function in set should be a transformation'
|
'Function in set should be a transformation'
|
||||||
assert traverse_obj(_TEST_DATA, (..., {str})) == ['str'], \
|
assert traverse_obj(_TEST_DATA, (..., {str})) == ['str'], \
|
||||||
'Type in set should be a type filter'
|
'Type in set should be a type filter'
|
||||||
|
@ -276,7 +276,7 @@ def test_traversal_traverse_string(self):
|
||||||
'`...` should result in string (same value) if `traverse_string`'
|
'`...` should result in string (same value) if `traverse_string`'
|
||||||
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', slice(0, None, 2)), traverse_string=True) == 'sr', \
|
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', slice(0, None, 2)), traverse_string=True) == 'sr', \
|
||||||
'`slice` should result in string if `traverse_string`'
|
'`slice` should result in string if `traverse_string`'
|
||||||
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == "s"), traverse_string=True) == 'str', \
|
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == 's'), traverse_string=True) == 'str', \
|
||||||
'function should result in string if `traverse_string`'
|
'function should result in string if `traverse_string`'
|
||||||
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)), traverse_string=True) == ['s', 'r'], \
|
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)), traverse_string=True) == ['s', 'r'], \
|
||||||
'branching should result in list if `traverse_string`'
|
'branching should result in list if `traverse_string`'
|
||||||
|
|
|
@ -78,11 +78,11 @@
|
||||||
|
|
||||||
TEST_LOCKFILE_COMMENT = '# This file is used for regulating self-update'
|
TEST_LOCKFILE_COMMENT = '# This file is used for regulating self-update'
|
||||||
|
|
||||||
TEST_LOCKFILE_V1 = r'''%s
|
TEST_LOCKFILE_V1 = rf'''{TEST_LOCKFILE_COMMENT}
|
||||||
lock 2022.08.18.36 .+ Python 3\.6
|
lock 2022.08.18.36 .+ Python 3\.6
|
||||||
lock 2023.11.16 (?!win_x86_exe).+ Python 3\.7
|
lock 2023.11.16 (?!win_x86_exe).+ Python 3\.7
|
||||||
lock 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
lock 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||||
''' % TEST_LOCKFILE_COMMENT
|
'''
|
||||||
|
|
||||||
TEST_LOCKFILE_V2_TMPL = r'''%s
|
TEST_LOCKFILE_V2_TMPL = r'''%s
|
||||||
lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3\.6
|
lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3\.6
|
||||||
|
@ -98,12 +98,12 @@
|
||||||
|
|
||||||
TEST_LOCKFILE_ACTUAL = TEST_LOCKFILE_V2_TMPL % TEST_LOCKFILE_V1.rstrip('\n')
|
TEST_LOCKFILE_ACTUAL = TEST_LOCKFILE_V2_TMPL % TEST_LOCKFILE_V1.rstrip('\n')
|
||||||
|
|
||||||
TEST_LOCKFILE_FORK = r'''%s# Test if a fork blocks updates to non-numeric tags
|
TEST_LOCKFILE_FORK = rf'''{TEST_LOCKFILE_ACTUAL}# Test if a fork blocks updates to non-numeric tags
|
||||||
lockV2 fork/yt-dlp pr0000 .+ Python 3.6
|
lockV2 fork/yt-dlp pr0000 .+ Python 3.6
|
||||||
lockV2 fork/yt-dlp pr1234 (?!win_x86_exe).+ Python 3\.7
|
lockV2 fork/yt-dlp pr1234 (?!win_x86_exe).+ Python 3\.7
|
||||||
lockV2 fork/yt-dlp pr1234 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
lockV2 fork/yt-dlp pr1234 win_x86_exe .+ Windows-(?:Vista|2008Server)
|
||||||
lockV2 fork/yt-dlp pr9999 .+ Python 3.11
|
lockV2 fork/yt-dlp pr9999 .+ Python 3.11
|
||||||
''' % TEST_LOCKFILE_ACTUAL
|
'''
|
||||||
|
|
||||||
|
|
||||||
class FakeUpdater(Updater):
|
class FakeUpdater(Updater):
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
import sys
|
import sys
|
||||||
import unittest
|
import unittest
|
||||||
import warnings
|
import warnings
|
||||||
|
import datetime as dt
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
@ -27,6 +28,7 @@
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
InAdvancePagedList,
|
InAdvancePagedList,
|
||||||
LazyList,
|
LazyList,
|
||||||
|
NO_DEFAULT,
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
Popen,
|
Popen,
|
||||||
age_restricted,
|
age_restricted,
|
||||||
|
@ -274,8 +276,8 @@ def env(var):
|
||||||
self.assertEqual(expand_path(env('HOME')), os.getenv('HOME'))
|
self.assertEqual(expand_path(env('HOME')), os.getenv('HOME'))
|
||||||
self.assertEqual(expand_path('~'), os.getenv('HOME'))
|
self.assertEqual(expand_path('~'), os.getenv('HOME'))
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
expand_path('~/%s' % env('yt_dlp_EXPATH_PATH')),
|
expand_path('~/{}'.format(env('yt_dlp_EXPATH_PATH'))),
|
||||||
'%s/expanded' % os.getenv('HOME'))
|
'{}/expanded'.format(os.getenv('HOME')))
|
||||||
finally:
|
finally:
|
||||||
os.environ['HOME'] = old_home or ''
|
os.environ['HOME'] = old_home or ''
|
||||||
|
|
||||||
|
@ -354,12 +356,12 @@ def test_datetime_from_str(self):
|
||||||
self.assertEqual(datetime_from_str('now+23hours', precision='hour'), datetime_from_str('now+23hours', precision='auto'))
|
self.assertEqual(datetime_from_str('now+23hours', precision='hour'), datetime_from_str('now+23hours', precision='auto'))
|
||||||
|
|
||||||
def test_daterange(self):
|
def test_daterange(self):
|
||||||
_20century = DateRange("19000101", "20000101")
|
_20century = DateRange('19000101', '20000101')
|
||||||
self.assertFalse("17890714" in _20century)
|
self.assertFalse('17890714' in _20century)
|
||||||
_ac = DateRange("00010101")
|
_ac = DateRange('00010101')
|
||||||
self.assertTrue("19690721" in _ac)
|
self.assertTrue('19690721' in _ac)
|
||||||
_firstmilenium = DateRange(end="10000101")
|
_firstmilenium = DateRange(end='10000101')
|
||||||
self.assertTrue("07110427" in _firstmilenium)
|
self.assertTrue('07110427' in _firstmilenium)
|
||||||
|
|
||||||
def test_unified_dates(self):
|
def test_unified_dates(self):
|
||||||
self.assertEqual(unified_strdate('December 21, 2010'), '20101221')
|
self.assertEqual(unified_strdate('December 21, 2010'), '20101221')
|
||||||
|
@ -504,7 +506,7 @@ def test_xpath_attr(self):
|
||||||
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True)
|
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True)
|
||||||
|
|
||||||
def test_smuggle_url(self):
|
def test_smuggle_url(self):
|
||||||
data = {"ö": "ö", "abc": [3]}
|
data = {'ö': 'ö', 'abc': [3]}
|
||||||
url = 'https://foo.bar/baz?x=y#a'
|
url = 'https://foo.bar/baz?x=y#a'
|
||||||
smug_url = smuggle_url(url, data)
|
smug_url = smuggle_url(url, data)
|
||||||
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
|
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
|
||||||
|
@ -768,6 +770,11 @@ def test_encode_compat_str(self):
|
||||||
|
|
||||||
def test_parse_iso8601(self):
|
def test_parse_iso8601(self):
|
||||||
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
|
||||||
|
self.assertEqual(parse_iso8601('2014-03-23T23:04:26-07:00'), 1395641066)
|
||||||
|
self.assertEqual(parse_iso8601('2014-03-23T23:04:26', timezone=dt.timedelta(hours=-7)), 1395641066)
|
||||||
|
self.assertEqual(parse_iso8601('2014-03-23T23:04:26', timezone=NO_DEFAULT), None)
|
||||||
|
# default does not override timezone in date_str
|
||||||
|
self.assertEqual(parse_iso8601('2014-03-23T23:04:26-07:00', timezone=dt.timedelta(hours=-10)), 1395641066)
|
||||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
|
||||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
|
||||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
|
||||||
|
@ -777,7 +784,7 @@ def test_parse_iso8601(self):
|
||||||
def test_strip_jsonp(self):
|
def test_strip_jsonp(self):
|
||||||
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
|
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
|
||||||
d = json.loads(stripped)
|
d = json.loads(stripped)
|
||||||
self.assertEqual(d, [{"id": "532cb", "x": 3}])
|
self.assertEqual(d, [{'id': '532cb', 'x': 3}])
|
||||||
|
|
||||||
stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
|
stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
|
||||||
d = json.loads(stripped)
|
d = json.loads(stripped)
|
||||||
|
@ -915,19 +922,19 @@ def test_escape_rfc3986(self):
|
||||||
def test_normalize_url(self):
|
def test_normalize_url(self):
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
normalize_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
|
normalize_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
|
||||||
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4'
|
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4',
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
normalize_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
|
normalize_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
|
||||||
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290'
|
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290',
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
normalize_url('http://тест.рф/фрагмент'),
|
normalize_url('http://тест.рф/фрагмент'),
|
||||||
'http://xn--e1aybc.xn--p1ai/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
|
'http://xn--e1aybc.xn--p1ai/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82',
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
normalize_url('http://тест.рф/абв?абв=абв#абв'),
|
normalize_url('http://тест.рф/абв?абв=абв#абв'),
|
||||||
'http://xn--e1aybc.xn--p1ai/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
|
'http://xn--e1aybc.xn--p1ai/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2',
|
||||||
)
|
)
|
||||||
self.assertEqual(normalize_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
|
self.assertEqual(normalize_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
|
||||||
|
|
||||||
|
@ -972,7 +979,7 @@ def test_js_to_json_vars_strings(self):
|
||||||
'e': 'false',
|
'e': 'false',
|
||||||
'f': '"false"',
|
'f': '"false"',
|
||||||
'g': 'var',
|
'g': 'var',
|
||||||
}
|
},
|
||||||
)),
|
)),
|
||||||
{
|
{
|
||||||
'null': None,
|
'null': None,
|
||||||
|
@ -981,8 +988,8 @@ def test_js_to_json_vars_strings(self):
|
||||||
'trueStr': 'true',
|
'trueStr': 'true',
|
||||||
'false': False,
|
'false': False,
|
||||||
'falseStr': 'false',
|
'falseStr': 'false',
|
||||||
'unresolvedVar': 'var'
|
'unresolvedVar': 'var',
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertDictEqual(
|
self.assertDictEqual(
|
||||||
|
@ -998,14 +1005,14 @@ def test_js_to_json_vars_strings(self):
|
||||||
'b': '"123"',
|
'b': '"123"',
|
||||||
'c': '1.23',
|
'c': '1.23',
|
||||||
'd': '"1.23"',
|
'd': '"1.23"',
|
||||||
}
|
},
|
||||||
)),
|
)),
|
||||||
{
|
{
|
||||||
'int': 123,
|
'int': 123,
|
||||||
'intStr': '123',
|
'intStr': '123',
|
||||||
'float': 1.23,
|
'float': 1.23,
|
||||||
'floatStr': '1.23',
|
'floatStr': '1.23',
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertDictEqual(
|
self.assertDictEqual(
|
||||||
|
@ -1021,14 +1028,14 @@ def test_js_to_json_vars_strings(self):
|
||||||
'b': '"{}"',
|
'b': '"{}"',
|
||||||
'c': '[]',
|
'c': '[]',
|
||||||
'd': '"[]"',
|
'd': '"[]"',
|
||||||
}
|
},
|
||||||
)),
|
)),
|
||||||
{
|
{
|
||||||
'object': {},
|
'object': {},
|
||||||
'objectStr': '{}',
|
'objectStr': '{}',
|
||||||
'array': [],
|
'array': [],
|
||||||
'arrayStr': '[]',
|
'arrayStr': '[]',
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_js_to_json_realworld(self):
|
def test_js_to_json_realworld(self):
|
||||||
|
@ -1074,7 +1081,7 @@ def test_js_to_json_realworld(self):
|
||||||
|
|
||||||
def test_js_to_json_edgecases(self):
|
def test_js_to_json_edgecases(self):
|
||||||
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
|
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
|
||||||
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
|
self.assertEqual(json.loads(on), {'abc_def': "1'\\2\\'3\"4"})
|
||||||
|
|
||||||
on = js_to_json('{"abc": true}')
|
on = js_to_json('{"abc": true}')
|
||||||
self.assertEqual(json.loads(on), {'abc': True})
|
self.assertEqual(json.loads(on), {'abc': True})
|
||||||
|
@ -1106,9 +1113,9 @@ def test_js_to_json_edgecases(self):
|
||||||
'c': 0,
|
'c': 0,
|
||||||
'd': 42.42,
|
'd': 42.42,
|
||||||
'e': [],
|
'e': [],
|
||||||
'f': "abc",
|
'f': 'abc',
|
||||||
'g': "",
|
'g': '',
|
||||||
'42': 42
|
'42': 42,
|
||||||
})
|
})
|
||||||
|
|
||||||
on = js_to_json('["abc", "def",]')
|
on = js_to_json('["abc", "def",]')
|
||||||
|
@ -1202,8 +1209,8 @@ def test_js_to_json_common_constructors(self):
|
||||||
self.assertEqual(json.loads(js_to_json('Array(5, 10)')), [5, 10])
|
self.assertEqual(json.loads(js_to_json('Array(5, 10)')), [5, 10])
|
||||||
self.assertEqual(json.loads(js_to_json('new Array(15,5)')), [15, 5])
|
self.assertEqual(json.loads(js_to_json('new Array(15,5)')), [15, 5])
|
||||||
self.assertEqual(json.loads(js_to_json('new Map([Array(5, 10),new Array(15,5)])')), {'5': 10, '15': 5})
|
self.assertEqual(json.loads(js_to_json('new Map([Array(5, 10),new Array(15,5)])')), {'5': 10, '15': 5})
|
||||||
self.assertEqual(json.loads(js_to_json('new Date("123")')), "123")
|
self.assertEqual(json.loads(js_to_json('new Date("123")')), '123')
|
||||||
self.assertEqual(json.loads(js_to_json('new Date(\'2023-10-19\')')), "2023-10-19")
|
self.assertEqual(json.loads(js_to_json('new Date(\'2023-10-19\')')), '2023-10-19')
|
||||||
|
|
||||||
def test_extract_attributes(self):
|
def test_extract_attributes(self):
|
||||||
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
|
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
|
||||||
|
@ -1258,7 +1265,7 @@ def test_intlist_to_bytes(self):
|
||||||
def test_args_to_str(self):
|
def test_args_to_str(self):
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
|
args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
|
||||||
'foo ba/r -baz \'2 be\' \'\'' if compat_os_name != 'nt' else 'foo ba/r -baz "2 be" ""'
|
'foo ba/r -baz \'2 be\' \'\'' if compat_os_name != 'nt' else 'foo ba/r -baz "2 be" ""',
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_parse_filesize(self):
|
def test_parse_filesize(self):
|
||||||
|
@ -1341,10 +1348,10 @@ def test_is_html(self):
|
||||||
self.assertTrue(is_html( # UTF-8 with BOM
|
self.assertTrue(is_html( # UTF-8 with BOM
|
||||||
b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa'))
|
b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa'))
|
||||||
self.assertTrue(is_html( # UTF-16-LE
|
self.assertTrue(is_html( # UTF-16-LE
|
||||||
b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00'
|
b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00',
|
||||||
))
|
))
|
||||||
self.assertTrue(is_html( # UTF-16-BE
|
self.assertTrue(is_html( # UTF-16-BE
|
||||||
b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4'
|
b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4',
|
||||||
))
|
))
|
||||||
self.assertTrue(is_html( # UTF-32-BE
|
self.assertTrue(is_html( # UTF-32-BE
|
||||||
b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4'))
|
b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4'))
|
||||||
|
@ -1928,7 +1935,7 @@ def test_locked_file(self):
|
||||||
with locked_file(FILE, test_mode, False):
|
with locked_file(FILE, test_mode, False):
|
||||||
pass
|
pass
|
||||||
except (BlockingIOError, PermissionError):
|
except (BlockingIOError, PermissionError):
|
||||||
if not testing_write: # FIXME
|
if not testing_write: # FIXME: blocked read access
|
||||||
print(f'Known issue: Exclusive lock ({lock_mode}) blocks read access ({test_mode})')
|
print(f'Known issue: Exclusive lock ({lock_mode}) blocks read access ({test_mode})')
|
||||||
continue
|
continue
|
||||||
self.assertTrue(testing_write, f'{test_mode} is blocked by {lock_mode}')
|
self.assertTrue(testing_write, f'{test_mode} is blocked by {lock_mode}')
|
||||||
|
@ -1996,7 +2003,7 @@ def total(*x, **kwargs):
|
||||||
msg='int fn with expected_type int should give int')
|
msg='int fn with expected_type int should give int')
|
||||||
self.assertEqual(try_call(lambda: 1, expected_type=dict), None,
|
self.assertEqual(try_call(lambda: 1, expected_type=dict), None,
|
||||||
msg='int fn with wrong expected_type should give None')
|
msg='int fn with wrong expected_type should give None')
|
||||||
self.assertEqual(try_call(total, args=(0, 1, 0, ), expected_type=int), 1,
|
self.assertEqual(try_call(total, args=(0, 1, 0), expected_type=int), 1,
|
||||||
msg='fn should accept arglist')
|
msg='fn should accept arglist')
|
||||||
self.assertEqual(try_call(total, kwargs={'a': 0, 'b': 1, 'c': 0}, expected_type=int), 1,
|
self.assertEqual(try_call(total, kwargs={'a': 0, 'b': 1, 'c': 0}, expected_type=int), 1,
|
||||||
msg='fn should accept kwargs')
|
msg='fn should accept kwargs')
|
||||||
|
|
|
@ -3,10 +3,12 @@
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from test.helper import verify_address_availability
|
from test.helper import verify_address_availability
|
||||||
|
from yt_dlp.networking.common import Features, DEFAULT_TIMEOUT
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
@ -18,7 +20,7 @@
|
||||||
import ssl
|
import ssl
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
from yt_dlp import socks
|
from yt_dlp import socks, traverse_obj
|
||||||
from yt_dlp.cookies import YoutubeDLCookieJar
|
from yt_dlp.cookies import YoutubeDLCookieJar
|
||||||
from yt_dlp.dependencies import websockets
|
from yt_dlp.dependencies import websockets
|
||||||
from yt_dlp.networking import Request
|
from yt_dlp.networking import Request
|
||||||
|
@ -114,6 +116,7 @@ def ws_validate_and_send(rh, req):
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(not websockets, reason='websockets must be installed to test websocket request handlers')
|
@pytest.mark.skipif(not websockets, reason='websockets must be installed to test websocket request handlers')
|
||||||
|
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||||
class TestWebsSocketRequestHandlerConformance:
|
class TestWebsSocketRequestHandlerConformance:
|
||||||
@classmethod
|
@classmethod
|
||||||
def setup_class(cls):
|
def setup_class(cls):
|
||||||
|
@ -129,7 +132,6 @@ def setup_class(cls):
|
||||||
cls.mtls_wss_thread, cls.mtls_wss_port = create_mtls_wss_websocket_server()
|
cls.mtls_wss_thread, cls.mtls_wss_port = create_mtls_wss_websocket_server()
|
||||||
cls.mtls_wss_base_url = f'wss://127.0.0.1:{cls.mtls_wss_port}'
|
cls.mtls_wss_base_url = f'wss://127.0.0.1:{cls.mtls_wss_port}'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_basic_websockets(self, handler):
|
def test_basic_websockets(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
ws = ws_validate_and_send(rh, Request(self.ws_base_url))
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url))
|
||||||
|
@ -141,7 +143,6 @@ def test_basic_websockets(self, handler):
|
||||||
|
|
||||||
# https://www.rfc-editor.org/rfc/rfc6455.html#section-5.6
|
# https://www.rfc-editor.org/rfc/rfc6455.html#section-5.6
|
||||||
@pytest.mark.parametrize('msg,opcode', [('str', 1), (b'bytes', 2)])
|
@pytest.mark.parametrize('msg,opcode', [('str', 1), (b'bytes', 2)])
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_send_types(self, handler, msg, opcode):
|
def test_send_types(self, handler, msg, opcode):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
ws = ws_validate_and_send(rh, Request(self.ws_base_url))
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url))
|
||||||
|
@ -149,7 +150,6 @@ def test_send_types(self, handler, msg, opcode):
|
||||||
assert int(ws.recv()) == opcode
|
assert int(ws.recv()) == opcode
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_verify_cert(self, handler):
|
def test_verify_cert(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with pytest.raises(CertificateVerifyError):
|
with pytest.raises(CertificateVerifyError):
|
||||||
|
@ -160,14 +160,12 @@ def test_verify_cert(self, handler):
|
||||||
assert ws.status == 101
|
assert ws.status == 101
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_ssl_error(self, handler):
|
def test_ssl_error(self, handler):
|
||||||
with handler(verify=False) as rh:
|
with handler(verify=False) as rh:
|
||||||
with pytest.raises(SSLError, match=r'ssl(?:v3|/tls) alert handshake failure') as exc_info:
|
with pytest.raises(SSLError, match=r'ssl(?:v3|/tls) alert handshake failure') as exc_info:
|
||||||
ws_validate_and_send(rh, Request(self.bad_wss_host))
|
ws_validate_and_send(rh, Request(self.bad_wss_host))
|
||||||
assert not issubclass(exc_info.type, CertificateVerifyError)
|
assert not issubclass(exc_info.type, CertificateVerifyError)
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
@pytest.mark.parametrize('path,expected', [
|
@pytest.mark.parametrize('path,expected', [
|
||||||
# Unicode characters should be encoded with uppercase percent-encoding
|
# Unicode characters should be encoded with uppercase percent-encoding
|
||||||
('/中文', '/%E4%B8%AD%E6%96%87'),
|
('/中文', '/%E4%B8%AD%E6%96%87'),
|
||||||
|
@ -182,7 +180,6 @@ def test_percent_encode(self, handler, path, expected):
|
||||||
assert ws.status == 101
|
assert ws.status == 101
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_remove_dot_segments(self, handler):
|
def test_remove_dot_segments(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
# This isn't a comprehensive test,
|
# This isn't a comprehensive test,
|
||||||
|
@ -195,7 +192,6 @@ def test_remove_dot_segments(self, handler):
|
||||||
|
|
||||||
# We are restricted to known HTTP status codes in http.HTTPStatus
|
# We are restricted to known HTTP status codes in http.HTTPStatus
|
||||||
# Redirects are not supported for websockets
|
# Redirects are not supported for websockets
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
@pytest.mark.parametrize('status', (200, 204, 301, 302, 303, 400, 500, 511))
|
@pytest.mark.parametrize('status', (200, 204, 301, 302, 303, 400, 500, 511))
|
||||||
def test_raise_http_error(self, handler, status):
|
def test_raise_http_error(self, handler, status):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
|
@ -203,17 +199,30 @@ def test_raise_http_error(self, handler, status):
|
||||||
ws_validate_and_send(rh, Request(f'{self.ws_base_url}/gen_{status}'))
|
ws_validate_and_send(rh, Request(f'{self.ws_base_url}/gen_{status}'))
|
||||||
assert exc_info.value.status == status
|
assert exc_info.value.status == status
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
@pytest.mark.parametrize('params,extensions', [
|
@pytest.mark.parametrize('params,extensions', [
|
||||||
({'timeout': sys.float_info.min}, {}),
|
({'timeout': sys.float_info.min}, {}),
|
||||||
({}, {'timeout': sys.float_info.min}),
|
({}, {'timeout': sys.float_info.min}),
|
||||||
])
|
])
|
||||||
def test_timeout(self, handler, params, extensions):
|
def test_read_timeout(self, handler, params, extensions):
|
||||||
with handler(**params) as rh:
|
with handler(**params) as rh:
|
||||||
with pytest.raises(TransportError):
|
with pytest.raises(TransportError):
|
||||||
ws_validate_and_send(rh, Request(self.ws_base_url, extensions=extensions))
|
ws_validate_and_send(rh, Request(self.ws_base_url, extensions=extensions))
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
def test_connect_timeout(self, handler):
|
||||||
|
# nothing should be listening on this port
|
||||||
|
connect_timeout_url = 'ws://10.255.255.255'
|
||||||
|
with handler(timeout=0.01) as rh, pytest.raises(TransportError):
|
||||||
|
now = time.time()
|
||||||
|
ws_validate_and_send(rh, Request(connect_timeout_url))
|
||||||
|
assert time.time() - now < DEFAULT_TIMEOUT
|
||||||
|
|
||||||
|
# Per request timeout, should override handler timeout
|
||||||
|
request = Request(connect_timeout_url, extensions={'timeout': 0.01})
|
||||||
|
with handler() as rh, pytest.raises(TransportError):
|
||||||
|
now = time.time()
|
||||||
|
ws_validate_and_send(rh, request)
|
||||||
|
assert time.time() - now < DEFAULT_TIMEOUT
|
||||||
|
|
||||||
def test_cookies(self, handler):
|
def test_cookies(self, handler):
|
||||||
cookiejar = YoutubeDLCookieJar()
|
cookiejar = YoutubeDLCookieJar()
|
||||||
cookiejar.set_cookie(http.cookiejar.Cookie(
|
cookiejar.set_cookie(http.cookiejar.Cookie(
|
||||||
|
@ -239,7 +248,6 @@ def test_cookies(self, handler):
|
||||||
assert json.loads(ws.recv())['cookie'] == 'test=ytdlp'
|
assert json.loads(ws.recv())['cookie'] == 'test=ytdlp'
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_source_address(self, handler):
|
def test_source_address(self, handler):
|
||||||
source_address = f'127.0.0.{random.randint(5, 255)}'
|
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||||
verify_address_availability(source_address)
|
verify_address_availability(source_address)
|
||||||
|
@ -249,7 +257,6 @@ def test_source_address(self, handler):
|
||||||
assert source_address == ws.recv()
|
assert source_address == ws.recv()
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_response_url(self, handler):
|
def test_response_url(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
url = f'{self.ws_base_url}/something'
|
url = f'{self.ws_base_url}/something'
|
||||||
|
@ -257,7 +264,6 @@ def test_response_url(self, handler):
|
||||||
assert ws.url == url
|
assert ws.url == url
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_request_headers(self, handler):
|
def test_request_headers(self, handler):
|
||||||
with handler(headers=HTTPHeaderDict({'test1': 'test', 'test2': 'test2'})) as rh:
|
with handler(headers=HTTPHeaderDict({'test1': 'test', 'test2': 'test2'})) as rh:
|
||||||
# Global Headers
|
# Global Headers
|
||||||
|
@ -291,18 +297,55 @@ def test_request_headers(self, handler):
|
||||||
'client_certificate': os.path.join(MTLS_CERT_DIR, 'client.crt'),
|
'client_certificate': os.path.join(MTLS_CERT_DIR, 'client.crt'),
|
||||||
'client_certificate_key': os.path.join(MTLS_CERT_DIR, 'clientencrypted.key'),
|
'client_certificate_key': os.path.join(MTLS_CERT_DIR, 'clientencrypted.key'),
|
||||||
'client_certificate_password': 'foobar',
|
'client_certificate_password': 'foobar',
|
||||||
}
|
},
|
||||||
))
|
))
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_mtls(self, handler, client_cert):
|
def test_mtls(self, handler, client_cert):
|
||||||
with handler(
|
with handler(
|
||||||
# Disable client-side validation of unacceptable self-signed testcert.pem
|
# Disable client-side validation of unacceptable self-signed testcert.pem
|
||||||
# The test is of a check on the server side, so unaffected
|
# The test is of a check on the server side, so unaffected
|
||||||
verify=False,
|
verify=False,
|
||||||
client_cert=client_cert
|
client_cert=client_cert,
|
||||||
) as rh:
|
) as rh:
|
||||||
ws_validate_and_send(rh, Request(self.mtls_wss_base_url)).close()
|
ws_validate_and_send(rh, Request(self.mtls_wss_base_url)).close()
|
||||||
|
|
||||||
|
def test_request_disable_proxy(self, handler):
|
||||||
|
for proxy_proto in handler._SUPPORTED_PROXY_SCHEMES or ['ws']:
|
||||||
|
# Given handler is configured with a proxy
|
||||||
|
with handler(proxies={'ws': f'{proxy_proto}://10.255.255.255'}, timeout=5) as rh:
|
||||||
|
# When a proxy is explicitly set to None for the request
|
||||||
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url, proxies={'http': None}))
|
||||||
|
# Then no proxy should be used
|
||||||
|
assert ws.status == 101
|
||||||
|
ws.close()
|
||||||
|
|
||||||
|
@pytest.mark.skip_handlers_if(
|
||||||
|
lambda _, handler: Features.NO_PROXY not in handler._SUPPORTED_FEATURES, 'handler does not support NO_PROXY')
|
||||||
|
def test_noproxy(self, handler):
|
||||||
|
for proxy_proto in handler._SUPPORTED_PROXY_SCHEMES or ['ws']:
|
||||||
|
# Given the handler is configured with a proxy
|
||||||
|
with handler(proxies={'ws': f'{proxy_proto}://10.255.255.255'}, timeout=5) as rh:
|
||||||
|
for no_proxy in (f'127.0.0.1:{self.ws_port}', '127.0.0.1', 'localhost'):
|
||||||
|
# When request no proxy includes the request url host
|
||||||
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url, proxies={'no': no_proxy}))
|
||||||
|
# Then the proxy should not be used
|
||||||
|
assert ws.status == 101
|
||||||
|
ws.close()
|
||||||
|
|
||||||
|
@pytest.mark.skip_handlers_if(
|
||||||
|
lambda _, handler: Features.ALL_PROXY not in handler._SUPPORTED_FEATURES, 'handler does not support ALL_PROXY')
|
||||||
|
def test_allproxy(self, handler):
|
||||||
|
supported_proto = traverse_obj(handler._SUPPORTED_PROXY_SCHEMES, 0, default='ws')
|
||||||
|
# This is a bit of a hacky test, but it should be enough to check whether the handler is using the proxy.
|
||||||
|
# 0.1s might not be enough of a timeout if proxy is not used in all cases, but should still get failures.
|
||||||
|
with handler(proxies={'all': f'{supported_proto}://10.255.255.255'}, timeout=0.1) as rh:
|
||||||
|
with pytest.raises(TransportError):
|
||||||
|
ws_validate_and_send(rh, Request(self.ws_base_url)).close()
|
||||||
|
|
||||||
|
with handler(timeout=0.1) as rh:
|
||||||
|
with pytest.raises(TransportError):
|
||||||
|
ws_validate_and_send(
|
||||||
|
rh, Request(self.ws_base_url, proxies={'all': f'{supported_proto}://10.255.255.255'})).close()
|
||||||
|
|
||||||
|
|
||||||
def create_fake_ws_connection(raised):
|
def create_fake_ws_connection(raised):
|
||||||
import websockets.sync.client
|
import websockets.sync.client
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
|
|
||||||
class TestYoutubeMisc(unittest.TestCase):
|
class TestYoutubeMisc(unittest.TestCase):
|
||||||
def test_youtube_extract(self):
|
def test_youtube_extract(self):
|
||||||
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)
|
assertExtractId = lambda url, video_id: self.assertEqual(YoutubeIE.extract_id(url), video_id)
|
||||||
assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||||
assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||||
assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc')
|
assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||||
|
|
|
@ -46,17 +46,17 @@
|
||||||
(
|
(
|
||||||
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
|
||||||
84,
|
84,
|
||||||
'123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>'
|
'123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>',
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl9FYC6l.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl9FYC6l.js',
|
||||||
83,
|
83,
|
||||||
'123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F'
|
'123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F',
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflCGk6yw/html5player.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflCGk6yw/html5player.js',
|
||||||
'4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288',
|
'4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288',
|
||||||
'82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B'
|
'82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B',
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js',
|
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js',
|
||||||
|
@ -207,7 +207,7 @@ def tearDown(self):
|
||||||
def t_factory(name, sig_func, url_pattern):
|
def t_factory(name, sig_func, url_pattern):
|
||||||
def make_tfunc(url, sig_input, expected_sig):
|
def make_tfunc(url, sig_input, expected_sig):
|
||||||
m = url_pattern.match(url)
|
m = url_pattern.match(url)
|
||||||
assert m, '%r should follow URL format' % url
|
assert m, f'{url!r} should follow URL format'
|
||||||
test_id = m.group('id')
|
test_id = m.group('id')
|
||||||
|
|
||||||
def test_func(self):
|
def test_func(self):
|
||||||
|
|
|
@ -109,7 +109,6 @@
|
||||||
determine_protocol,
|
determine_protocol,
|
||||||
encode_compat_str,
|
encode_compat_str,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
error_to_compat_str,
|
|
||||||
escapeHTML,
|
escapeHTML,
|
||||||
expand_path,
|
expand_path,
|
||||||
extract_basic_auth,
|
extract_basic_auth,
|
||||||
|
@ -583,7 +582,7 @@ class YoutubeDL:
|
||||||
'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start', 'is_dash_periods', 'request_data',
|
'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start', 'is_dash_periods', 'request_data',
|
||||||
'preference', 'language', 'language_preference', 'quality', 'source_preference', 'cookies',
|
'preference', 'language', 'language_preference', 'quality', 'source_preference', 'cookies',
|
||||||
'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
|
'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
|
||||||
'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
|
'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time',
|
||||||
}
|
}
|
||||||
_deprecated_multivalue_fields = {
|
_deprecated_multivalue_fields = {
|
||||||
'album_artist': 'album_artists',
|
'album_artist': 'album_artists',
|
||||||
|
@ -594,7 +593,7 @@ class YoutubeDL:
|
||||||
}
|
}
|
||||||
_format_selection_exts = {
|
_format_selection_exts = {
|
||||||
'audio': set(MEDIA_EXTENSIONS.common_audio),
|
'audio': set(MEDIA_EXTENSIONS.common_audio),
|
||||||
'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )),
|
'video': {*MEDIA_EXTENSIONS.common_video, '3gp'},
|
||||||
'storyboards': set(MEDIA_EXTENSIONS.storyboards),
|
'storyboards': set(MEDIA_EXTENSIONS.storyboards),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -628,7 +627,7 @@ def __init__(self, params=None, auto_init=True):
|
||||||
error=sys.stderr,
|
error=sys.stderr,
|
||||||
screen=sys.stderr if self.params.get('quiet') else stdout,
|
screen=sys.stderr if self.params.get('quiet') else stdout,
|
||||||
console=None if compat_os_name == 'nt' else next(
|
console=None if compat_os_name == 'nt' else next(
|
||||||
filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
|
filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None),
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -679,9 +678,9 @@ def process_color_policy(stream):
|
||||||
width_args = [] if width is None else ['-w', str(width)]
|
width_args = [] if width is None else ['-w', str(width)]
|
||||||
sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
|
sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
|
||||||
try:
|
try:
|
||||||
self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
|
self._output_process = Popen(['bidiv', *width_args], **sp_kwargs)
|
||||||
except OSError:
|
except OSError:
|
||||||
self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
|
self._output_process = Popen(['fribidi', '-c', 'UTF-8', *width_args], **sp_kwargs)
|
||||||
self._output_channel = os.fdopen(master, 'rb')
|
self._output_channel = os.fdopen(master, 'rb')
|
||||||
except OSError as ose:
|
except OSError as ose:
|
||||||
if ose.errno == errno.ENOENT:
|
if ose.errno == errno.ENOENT:
|
||||||
|
@ -822,8 +821,7 @@ def warn_if_short_id(self, argv):
|
||||||
)
|
)
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'Long argument string detected. '
|
'Long argument string detected. '
|
||||||
'Use -- to separate parameters and URLs, like this:\n%s' %
|
f'Use -- to separate parameters and URLs, like this:\n{shell_quote(correct_argv)}')
|
||||||
shell_quote(correct_argv))
|
|
||||||
|
|
||||||
def add_info_extractor(self, ie):
|
def add_info_extractor(self, ie):
|
||||||
"""Add an InfoExtractor object to the end of the list."""
|
"""Add an InfoExtractor object to the end of the list."""
|
||||||
|
@ -922,7 +920,7 @@ def to_screen(self, message, skip_eol=False, quiet=None, only_once=False):
|
||||||
if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
|
if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
|
||||||
return
|
return
|
||||||
self._write_string(
|
self._write_string(
|
||||||
'%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
|
'{}{}'.format(self._bidi_workaround(message), ('' if skip_eol else '\n')),
|
||||||
self._out_files.screen, only_once=only_once)
|
self._out_files.screen, only_once=only_once)
|
||||||
|
|
||||||
def to_stderr(self, message, only_once=False):
|
def to_stderr(self, message, only_once=False):
|
||||||
|
@ -1045,10 +1043,10 @@ def _format_err(self, *args, **kwargs):
|
||||||
return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
|
return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
|
||||||
|
|
||||||
def report_warning(self, message, only_once=False):
|
def report_warning(self, message, only_once=False):
|
||||||
'''
|
"""
|
||||||
Print the message to stderr, it will be prefixed with 'WARNING:'
|
Print the message to stderr, it will be prefixed with 'WARNING:'
|
||||||
If stderr is a tty file the 'WARNING:' will be colored
|
If stderr is a tty file the 'WARNING:' will be colored
|
||||||
'''
|
"""
|
||||||
if self.params.get('logger') is not None:
|
if self.params.get('logger') is not None:
|
||||||
self.params['logger'].warning(message)
|
self.params['logger'].warning(message)
|
||||||
else:
|
else:
|
||||||
|
@ -1066,14 +1064,14 @@ def deprecated_feature(self, message):
|
||||||
self.to_stderr(f'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
|
self.to_stderr(f'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True)
|
||||||
|
|
||||||
def report_error(self, message, *args, **kwargs):
|
def report_error(self, message, *args, **kwargs):
|
||||||
'''
|
"""
|
||||||
Do the same as trouble, but prefixes the message with 'ERROR:', colored
|
Do the same as trouble, but prefixes the message with 'ERROR:', colored
|
||||||
in red if stderr is a tty file.
|
in red if stderr is a tty file.
|
||||||
'''
|
"""
|
||||||
self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
|
self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
|
||||||
|
|
||||||
def write_debug(self, message, only_once=False):
|
def write_debug(self, message, only_once=False):
|
||||||
'''Log debug message or Print message to stderr'''
|
"""Log debug message or Print message to stderr"""
|
||||||
if not self.params.get('verbose', False):
|
if not self.params.get('verbose', False):
|
||||||
return
|
return
|
||||||
message = f'[debug] {message}'
|
message = f'[debug] {message}'
|
||||||
|
@ -1085,14 +1083,14 @@ def write_debug(self, message, only_once=False):
|
||||||
def report_file_already_downloaded(self, file_name):
|
def report_file_already_downloaded(self, file_name):
|
||||||
"""Report file has already been fully downloaded."""
|
"""Report file has already been fully downloaded."""
|
||||||
try:
|
try:
|
||||||
self.to_screen('[download] %s has already been downloaded' % file_name)
|
self.to_screen(f'[download] {file_name} has already been downloaded')
|
||||||
except UnicodeEncodeError:
|
except UnicodeEncodeError:
|
||||||
self.to_screen('[download] The file has already been downloaded')
|
self.to_screen('[download] The file has already been downloaded')
|
||||||
|
|
||||||
def report_file_delete(self, file_name):
|
def report_file_delete(self, file_name):
|
||||||
"""Report that existing file will be deleted."""
|
"""Report that existing file will be deleted."""
|
||||||
try:
|
try:
|
||||||
self.to_screen('Deleting existing file %s' % file_name)
|
self.to_screen(f'Deleting existing file {file_name}')
|
||||||
except UnicodeEncodeError:
|
except UnicodeEncodeError:
|
||||||
self.to_screen('Deleting existing file')
|
self.to_screen('Deleting existing file')
|
||||||
|
|
||||||
|
@ -1147,7 +1145,7 @@ def _outtmpl_expandpath(outtmpl):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def escape_outtmpl(outtmpl):
|
def escape_outtmpl(outtmpl):
|
||||||
''' Escape any remaining strings like %s, %abc% etc. '''
|
""" Escape any remaining strings like %s, %abc% etc. """
|
||||||
return re.sub(
|
return re.sub(
|
||||||
STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
|
STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
|
||||||
lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
|
lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
|
||||||
|
@ -1155,7 +1153,7 @@ def escape_outtmpl(outtmpl):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def validate_outtmpl(cls, outtmpl):
|
def validate_outtmpl(cls, outtmpl):
|
||||||
''' @return None or Exception object '''
|
""" @return None or Exception object """
|
||||||
outtmpl = re.sub(
|
outtmpl = re.sub(
|
||||||
STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
|
STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'),
|
||||||
lambda mobj: f'{mobj.group(0)[:-1]}s',
|
lambda mobj: f'{mobj.group(0)[:-1]}s',
|
||||||
|
@ -1208,13 +1206,13 @@ def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
|
||||||
}
|
}
|
||||||
# Field is of the form key1.key2...
|
# Field is of the form key1.key2...
|
||||||
# where keys (except first) can be string, int, slice or "{field, ...}"
|
# where keys (except first) can be string, int, slice or "{field, ...}"
|
||||||
FIELD_INNER_RE = r'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'}
|
FIELD_INNER_RE = r'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'} # noqa: UP031
|
||||||
FIELD_RE = r'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % {
|
FIELD_RE = r'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % { # noqa: UP031
|
||||||
'inner': FIELD_INNER_RE,
|
'inner': FIELD_INNER_RE,
|
||||||
'field': rf'\w*(?:\.{FIELD_INNER_RE})*'
|
'field': rf'\w*(?:\.{FIELD_INNER_RE})*',
|
||||||
}
|
}
|
||||||
MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
|
MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
|
||||||
MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
|
MATH_OPERATORS_RE = r'(?:{})'.format('|'.join(map(re.escape, MATH_FUNCTIONS.keys())))
|
||||||
INTERNAL_FORMAT_RE = re.compile(rf'''(?xs)
|
INTERNAL_FORMAT_RE = re.compile(rf'''(?xs)
|
||||||
(?P<negate>-)?
|
(?P<negate>-)?
|
||||||
(?P<fields>{FIELD_RE})
|
(?P<fields>{FIELD_RE})
|
||||||
|
@ -1337,7 +1335,7 @@ def create_key(outer_mobj):
|
||||||
value, default = None, na
|
value, default = None, na
|
||||||
|
|
||||||
fmt = outer_mobj.group('format')
|
fmt = outer_mobj.group('format')
|
||||||
if fmt == 's' and last_field in field_size_compat_map.keys() and isinstance(value, int):
|
if fmt == 's' and last_field in field_size_compat_map and isinstance(value, int):
|
||||||
fmt = f'0{field_size_compat_map[last_field]:d}d'
|
fmt = f'0{field_size_compat_map[last_field]:d}d'
|
||||||
|
|
||||||
flags = outer_mobj.group('conversion') or ''
|
flags = outer_mobj.group('conversion') or ''
|
||||||
|
@ -1362,7 +1360,7 @@ def create_key(outer_mobj):
|
||||||
elif fmt[-1] == 'U': # unicode normalized
|
elif fmt[-1] == 'U': # unicode normalized
|
||||||
value, fmt = unicodedata.normalize(
|
value, fmt = unicodedata.normalize(
|
||||||
# "+" = compatibility equivalence, "#" = NFD
|
# "+" = compatibility equivalence, "#" = NFD
|
||||||
'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
|
'NF{}{}'.format('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
|
||||||
value), str_fmt
|
value), str_fmt
|
||||||
elif fmt[-1] == 'D': # decimal suffix
|
elif fmt[-1] == 'D': # decimal suffix
|
||||||
num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
|
num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
|
||||||
|
@ -1390,7 +1388,7 @@ def create_key(outer_mobj):
|
||||||
if fmt[-1] in 'csra':
|
if fmt[-1] in 'csra':
|
||||||
value = sanitizer(last_field, value)
|
value = sanitizer(last_field, value)
|
||||||
|
|
||||||
key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
|
key = '{}\0{}'.format(key.replace('%', '%\0'), outer_mobj.group('format'))
|
||||||
TMPL_DICT[key] = value
|
TMPL_DICT[key] = value
|
||||||
return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
|
return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
|
||||||
|
|
||||||
|
@ -1479,9 +1477,9 @@ def check_filter():
|
||||||
|
|
||||||
date = info_dict.get('upload_date')
|
date = info_dict.get('upload_date')
|
||||||
if date is not None:
|
if date is not None:
|
||||||
dateRange = self.params.get('daterange', DateRange())
|
date_range = self.params.get('daterange', DateRange())
|
||||||
if date not in dateRange:
|
if date not in date_range:
|
||||||
return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
|
return f'{date_from_str(date).isoformat()} upload date is not in range {date_range}'
|
||||||
view_count = info_dict.get('view_count')
|
view_count = info_dict.get('view_count')
|
||||||
if view_count is not None:
|
if view_count is not None:
|
||||||
min_views = self.params.get('min_views')
|
min_views = self.params.get('min_views')
|
||||||
|
@ -1491,7 +1489,7 @@ def check_filter():
|
||||||
if max_views is not None and view_count > max_views:
|
if max_views is not None and view_count > max_views:
|
||||||
return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
|
return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
|
||||||
if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
|
if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
|
||||||
return 'Skipping "%s" because it is age restricted' % video_title
|
return f'Skipping "{video_title}" because it is age restricted'
|
||||||
|
|
||||||
match_filter = self.params.get('match_filter')
|
match_filter = self.params.get('match_filter')
|
||||||
if match_filter is None:
|
if match_filter is None:
|
||||||
|
@ -1544,7 +1542,7 @@ def check_filter():
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def add_extra_info(info_dict, extra_info):
|
def add_extra_info(info_dict, extra_info):
|
||||||
'''Set the keys from extra_info in info dict if they are missing'''
|
"""Set the keys from extra_info in info dict if they are missing"""
|
||||||
for key, value in extra_info.items():
|
for key, value in extra_info.items():
|
||||||
info_dict.setdefault(key, value)
|
info_dict.setdefault(key, value)
|
||||||
|
|
||||||
|
@ -1590,7 +1588,7 @@ def extract_info(self, url, download=True, ie_key=None, extra_info=None,
|
||||||
self.to_screen(f'[download] {self._format_screen(temp_id, self.Styles.ID)}: '
|
self.to_screen(f'[download] {self._format_screen(temp_id, self.Styles.ID)}: '
|
||||||
'has already been recorded in the archive')
|
'has already been recorded in the archive')
|
||||||
if self.params.get('break_on_existing', False):
|
if self.params.get('break_on_existing', False):
|
||||||
raise ExistingVideoReached()
|
raise ExistingVideoReached
|
||||||
break
|
break
|
||||||
return self.__extract_info(url, self.get_info_extractor(key), download, extra_info, process)
|
return self.__extract_info(url, self.get_info_extractor(key), download, extra_info, process)
|
||||||
else:
|
else:
|
||||||
|
@ -1616,8 +1614,8 @@ def wrapper(self, *args, **kwargs):
|
||||||
except GeoRestrictedError as e:
|
except GeoRestrictedError as e:
|
||||||
msg = e.msg
|
msg = e.msg
|
||||||
if e.countries:
|
if e.countries:
|
||||||
msg += '\nThis video is available in %s.' % ', '.join(
|
msg += '\nThis video is available in {}.'.format(', '.join(
|
||||||
map(ISO3166Utils.short2full, e.countries))
|
map(ISO3166Utils.short2full, e.countries)))
|
||||||
msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
|
msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
|
||||||
self.report_error(msg)
|
self.report_error(msg)
|
||||||
except ExtractorError as e: # An error we somewhat expected
|
except ExtractorError as e: # An error we somewhat expected
|
||||||
|
@ -1826,8 +1824,8 @@ def process_ie_result(self, ie_result, download=True, extra_info=None):
|
||||||
if isinstance(additional_urls, str):
|
if isinstance(additional_urls, str):
|
||||||
additional_urls = [additional_urls]
|
additional_urls = [additional_urls]
|
||||||
self.to_screen(
|
self.to_screen(
|
||||||
'[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
|
'[info] {}: {} additional URL(s) requested'.format(ie_result['id'], len(additional_urls)))
|
||||||
self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
|
self.write_debug('Additional URLs: "{}"'.format('", "'.join(additional_urls)))
|
||||||
ie_result['additional_entries'] = [
|
ie_result['additional_entries'] = [
|
||||||
self.extract_info(
|
self.extract_info(
|
||||||
url, download, extra_info=extra_info,
|
url, download, extra_info=extra_info,
|
||||||
|
@ -1879,8 +1877,8 @@ def process_ie_result(self, ie_result, download=True, extra_info=None):
|
||||||
webpage_url = ie_result.get('webpage_url') # Playlists maynot have webpage_url
|
webpage_url = ie_result.get('webpage_url') # Playlists maynot have webpage_url
|
||||||
if webpage_url and webpage_url in self._playlist_urls:
|
if webpage_url and webpage_url in self._playlist_urls:
|
||||||
self.to_screen(
|
self.to_screen(
|
||||||
'[download] Skipping already downloaded playlist: %s'
|
'[download] Skipping already downloaded playlist: {}'.format(
|
||||||
% ie_result.get('title') or ie_result.get('id'))
|
ie_result.get('title')) or ie_result.get('id'))
|
||||||
return
|
return
|
||||||
|
|
||||||
self._playlist_level += 1
|
self._playlist_level += 1
|
||||||
|
@ -1895,8 +1893,8 @@ def process_ie_result(self, ie_result, download=True, extra_info=None):
|
||||||
self._playlist_urls.clear()
|
self._playlist_urls.clear()
|
||||||
elif result_type == 'compat_list':
|
elif result_type == 'compat_list':
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'Extractor %s returned a compat_list result. '
|
'Extractor {} returned a compat_list result. '
|
||||||
'It needs to be updated.' % ie_result.get('extractor'))
|
'It needs to be updated.'.format(ie_result.get('extractor')))
|
||||||
|
|
||||||
def _fixup(r):
|
def _fixup(r):
|
||||||
self.add_extra_info(r, {
|
self.add_extra_info(r, {
|
||||||
|
@ -1913,7 +1911,7 @@ def _fixup(r):
|
||||||
]
|
]
|
||||||
return ie_result
|
return ie_result
|
||||||
else:
|
else:
|
||||||
raise Exception('Invalid result type: %s' % result_type)
|
raise Exception(f'Invalid result type: {result_type}')
|
||||||
|
|
||||||
def _ensure_dir_exists(self, path):
|
def _ensure_dir_exists(self, path):
|
||||||
return make_dir(path, self.report_error)
|
return make_dir(path, self.report_error)
|
||||||
|
@ -2029,8 +2027,9 @@ def __process_playlist(self, ie_result, download):
|
||||||
resolved_entries[i] = (playlist_index, NO_DEFAULT)
|
resolved_entries[i] = (playlist_index, NO_DEFAULT)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
self.to_screen('[download] Downloading item %s of %s' % (
|
self.to_screen(
|
||||||
self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
|
f'[download] Downloading item {self._format_screen(i + 1, self.Styles.ID)} '
|
||||||
|
f'of {self._format_screen(n_entries, self.Styles.EMPHASIS)}')
|
||||||
|
|
||||||
entry_result = self.__process_iterable_entry(entry, download, collections.ChainMap({
|
entry_result = self.__process_iterable_entry(entry, download, collections.ChainMap({
|
||||||
'playlist_index': playlist_index,
|
'playlist_index': playlist_index,
|
||||||
|
@ -2080,9 +2079,9 @@ def _build_format_filter(self, filter_spec):
|
||||||
}
|
}
|
||||||
operator_rex = re.compile(r'''(?x)\s*
|
operator_rex = re.compile(r'''(?x)\s*
|
||||||
(?P<key>[\w.-]+)\s*
|
(?P<key>[\w.-]+)\s*
|
||||||
(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
|
(?P<op>{})(?P<none_inclusive>\s*\?)?\s*
|
||||||
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
|
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
|
||||||
''' % '|'.join(map(re.escape, OPERATORS.keys())))
|
'''.format('|'.join(map(re.escape, OPERATORS.keys()))))
|
||||||
m = operator_rex.fullmatch(filter_spec)
|
m = operator_rex.fullmatch(filter_spec)
|
||||||
if m:
|
if m:
|
||||||
try:
|
try:
|
||||||
|
@ -2093,7 +2092,7 @@ def _build_format_filter(self, filter_spec):
|
||||||
comparison_value = parse_filesize(m.group('value') + 'B')
|
comparison_value = parse_filesize(m.group('value') + 'B')
|
||||||
if comparison_value is None:
|
if comparison_value is None:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
'Invalid value %r in format specification %r' % (
|
'Invalid value {!r} in format specification {!r}'.format(
|
||||||
m.group('value'), filter_spec))
|
m.group('value'), filter_spec))
|
||||||
op = OPERATORS[m.group('op')]
|
op = OPERATORS[m.group('op')]
|
||||||
|
|
||||||
|
@ -2103,15 +2102,15 @@ def _build_format_filter(self, filter_spec):
|
||||||
'^=': lambda attr, value: attr.startswith(value),
|
'^=': lambda attr, value: attr.startswith(value),
|
||||||
'$=': lambda attr, value: attr.endswith(value),
|
'$=': lambda attr, value: attr.endswith(value),
|
||||||
'*=': lambda attr, value: value in attr,
|
'*=': lambda attr, value: value in attr,
|
||||||
'~=': lambda attr, value: value.search(attr) is not None
|
'~=': lambda attr, value: value.search(attr) is not None,
|
||||||
}
|
}
|
||||||
str_operator_rex = re.compile(r'''(?x)\s*
|
str_operator_rex = re.compile(r'''(?x)\s*
|
||||||
(?P<key>[a-zA-Z0-9._-]+)\s*
|
(?P<key>[a-zA-Z0-9._-]+)\s*
|
||||||
(?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
|
(?P<negation>!\s*)?(?P<op>{})\s*(?P<none_inclusive>\?\s*)?
|
||||||
(?P<quote>["'])?
|
(?P<quote>["'])?
|
||||||
(?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
|
(?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
|
||||||
(?(quote)(?P=quote))\s*
|
(?(quote)(?P=quote))\s*
|
||||||
''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
|
'''.format('|'.join(map(re.escape, STR_OPERATORS.keys()))))
|
||||||
m = str_operator_rex.fullmatch(filter_spec)
|
m = str_operator_rex.fullmatch(filter_spec)
|
||||||
if m:
|
if m:
|
||||||
if m.group('op') == '~=':
|
if m.group('op') == '~=':
|
||||||
|
@ -2125,7 +2124,7 @@ def _build_format_filter(self, filter_spec):
|
||||||
op = str_op
|
op = str_op
|
||||||
|
|
||||||
if not m:
|
if not m:
|
||||||
raise SyntaxError('Invalid filter specification %r' % filter_spec)
|
raise SyntaxError(f'Invalid filter specification {filter_spec!r}')
|
||||||
|
|
||||||
def _filter(f):
|
def _filter(f):
|
||||||
actual_value = f.get(m.group('key'))
|
actual_value = f.get(m.group('key'))
|
||||||
|
@ -2141,7 +2140,7 @@ def _check_formats(self, formats):
|
||||||
if working:
|
if working:
|
||||||
yield f
|
yield f
|
||||||
continue
|
continue
|
||||||
self.to_screen('[info] Testing format %s' % f['format_id'])
|
self.to_screen('[info] Testing format {}'.format(f['format_id']))
|
||||||
path = self.get_output_path('temp')
|
path = self.get_output_path('temp')
|
||||||
if not self._ensure_dir_exists(f'{path}/'):
|
if not self._ensure_dir_exists(f'{path}/'):
|
||||||
continue
|
continue
|
||||||
|
@ -2149,19 +2148,19 @@ def _check_formats(self, formats):
|
||||||
temp_file.close()
|
temp_file.close()
|
||||||
try:
|
try:
|
||||||
success, _ = self.dl(temp_file.name, f, test=True)
|
success, _ = self.dl(temp_file.name, f, test=True)
|
||||||
except (DownloadError, OSError, ValueError) + network_exceptions:
|
except (DownloadError, OSError, ValueError, *network_exceptions):
|
||||||
success = False
|
success = False
|
||||||
finally:
|
finally:
|
||||||
if os.path.exists(temp_file.name):
|
if os.path.exists(temp_file.name):
|
||||||
try:
|
try:
|
||||||
os.remove(temp_file.name)
|
os.remove(temp_file.name)
|
||||||
except OSError:
|
except OSError:
|
||||||
self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
|
self.report_warning(f'Unable to delete temporary file "{temp_file.name}"')
|
||||||
f['__working'] = success
|
f['__working'] = success
|
||||||
if success:
|
if success:
|
||||||
yield f
|
yield f
|
||||||
else:
|
else:
|
||||||
self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
|
self.to_screen('[info] Unable to download format {}. Skipping...'.format(f['format_id']))
|
||||||
|
|
||||||
def _select_formats(self, formats, selector):
|
def _select_formats(self, formats, selector):
|
||||||
return list(selector({
|
return list(selector({
|
||||||
|
@ -2214,8 +2213,8 @@ def syntax_error(note, start):
|
||||||
|
|
||||||
def _parse_filter(tokens):
|
def _parse_filter(tokens):
|
||||||
filter_parts = []
|
filter_parts = []
|
||||||
for type, string_, start, _, _ in tokens:
|
for type_, string_, _start, _, _ in tokens:
|
||||||
if type == tokenize.OP and string_ == ']':
|
if type_ == tokenize.OP and string_ == ']':
|
||||||
return ''.join(filter_parts)
|
return ''.join(filter_parts)
|
||||||
else:
|
else:
|
||||||
filter_parts.append(string_)
|
filter_parts.append(string_)
|
||||||
|
@ -2225,23 +2224,23 @@ def _remove_unused_ops(tokens):
|
||||||
# E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
|
# E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
|
||||||
ALLOWED_OPS = ('/', '+', ',', '(', ')')
|
ALLOWED_OPS = ('/', '+', ',', '(', ')')
|
||||||
last_string, last_start, last_end, last_line = None, None, None, None
|
last_string, last_start, last_end, last_line = None, None, None, None
|
||||||
for type, string_, start, end, line in tokens:
|
for type_, string_, start, end, line in tokens:
|
||||||
if type == tokenize.OP and string_ == '[':
|
if type_ == tokenize.OP and string_ == '[':
|
||||||
if last_string:
|
if last_string:
|
||||||
yield tokenize.NAME, last_string, last_start, last_end, last_line
|
yield tokenize.NAME, last_string, last_start, last_end, last_line
|
||||||
last_string = None
|
last_string = None
|
||||||
yield type, string_, start, end, line
|
yield type_, string_, start, end, line
|
||||||
# everything inside brackets will be handled by _parse_filter
|
# everything inside brackets will be handled by _parse_filter
|
||||||
for type, string_, start, end, line in tokens:
|
for type_, string_, start, end, line in tokens:
|
||||||
yield type, string_, start, end, line
|
yield type_, string_, start, end, line
|
||||||
if type == tokenize.OP and string_ == ']':
|
if type_ == tokenize.OP and string_ == ']':
|
||||||
break
|
break
|
||||||
elif type == tokenize.OP and string_ in ALLOWED_OPS:
|
elif type_ == tokenize.OP and string_ in ALLOWED_OPS:
|
||||||
if last_string:
|
if last_string:
|
||||||
yield tokenize.NAME, last_string, last_start, last_end, last_line
|
yield tokenize.NAME, last_string, last_start, last_end, last_line
|
||||||
last_string = None
|
last_string = None
|
||||||
yield type, string_, start, end, line
|
yield type_, string_, start, end, line
|
||||||
elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
|
elif type_ in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
|
||||||
if not last_string:
|
if not last_string:
|
||||||
last_string = string_
|
last_string = string_
|
||||||
last_start = start
|
last_start = start
|
||||||
|
@ -2254,13 +2253,13 @@ def _remove_unused_ops(tokens):
|
||||||
def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
|
def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
|
||||||
selectors = []
|
selectors = []
|
||||||
current_selector = None
|
current_selector = None
|
||||||
for type, string_, start, _, _ in tokens:
|
for type_, string_, start, _, _ in tokens:
|
||||||
# ENCODING is only defined in Python 3.x
|
# ENCODING is only defined in Python 3.x
|
||||||
if type == getattr(tokenize, 'ENCODING', None):
|
if type_ == getattr(tokenize, 'ENCODING', None):
|
||||||
continue
|
continue
|
||||||
elif type in [tokenize.NAME, tokenize.NUMBER]:
|
elif type_ in [tokenize.NAME, tokenize.NUMBER]:
|
||||||
current_selector = FormatSelector(SINGLE, string_, [])
|
current_selector = FormatSelector(SINGLE, string_, [])
|
||||||
elif type == tokenize.OP:
|
elif type_ == tokenize.OP:
|
||||||
if string_ == ')':
|
if string_ == ')':
|
||||||
if not inside_group:
|
if not inside_group:
|
||||||
# ')' will be handled by the parentheses group
|
# ')' will be handled by the parentheses group
|
||||||
|
@ -2303,7 +2302,7 @@ def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, ins
|
||||||
current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
|
current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
|
||||||
else:
|
else:
|
||||||
raise syntax_error(f'Operator not recognized: "{string_}"', start)
|
raise syntax_error(f'Operator not recognized: "{string_}"', start)
|
||||||
elif type == tokenize.ENDMARKER:
|
elif type_ == tokenize.ENDMARKER:
|
||||||
break
|
break
|
||||||
if current_selector:
|
if current_selector:
|
||||||
selectors.append(current_selector)
|
selectors.append(current_selector)
|
||||||
|
@ -2378,7 +2377,7 @@ def _merge(formats_pair):
|
||||||
'acodec': the_only_audio.get('acodec'),
|
'acodec': the_only_audio.get('acodec'),
|
||||||
'abr': the_only_audio.get('abr'),
|
'abr': the_only_audio.get('abr'),
|
||||||
'asr': the_only_audio.get('asr'),
|
'asr': the_only_audio.get('asr'),
|
||||||
'audio_channels': the_only_audio.get('audio_channels')
|
'audio_channels': the_only_audio.get('audio_channels'),
|
||||||
})
|
})
|
||||||
|
|
||||||
return new_dict
|
return new_dict
|
||||||
|
@ -2459,9 +2458,9 @@ def selector_function(ctx):
|
||||||
|
|
||||||
format_fallback = not format_type and not format_modified # for b, w
|
format_fallback = not format_type and not format_modified # for b, w
|
||||||
_filter_f = (
|
_filter_f = (
|
||||||
(lambda f: f.get('%scodec' % format_type) != 'none')
|
(lambda f: f.get(f'{format_type}codec') != 'none')
|
||||||
if format_type and format_modified # bv*, ba*, wv*, wa*
|
if format_type and format_modified # bv*, ba*, wv*, wa*
|
||||||
else (lambda f: f.get('%scodec' % not_format_type) == 'none')
|
else (lambda f: f.get(f'{not_format_type}codec') == 'none')
|
||||||
if format_type # bv, ba, wv, wa
|
if format_type # bv, ba, wv, wa
|
||||||
else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
|
else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
|
||||||
if not format_modified # b, w
|
if not format_modified # b, w
|
||||||
|
@ -2529,7 +2528,7 @@ def __iter__(self):
|
||||||
|
|
||||||
def __next__(self):
|
def __next__(self):
|
||||||
if self.counter >= len(self.tokens):
|
if self.counter >= len(self.tokens):
|
||||||
raise StopIteration()
|
raise StopIteration
|
||||||
value = self.tokens[self.counter]
|
value = self.tokens[self.counter]
|
||||||
self.counter += 1
|
self.counter += 1
|
||||||
return value
|
return value
|
||||||
|
@ -2612,7 +2611,7 @@ def check_thumbnails(thumbnails):
|
||||||
self._sort_thumbnails(thumbnails)
|
self._sort_thumbnails(thumbnails)
|
||||||
for i, t in enumerate(thumbnails):
|
for i, t in enumerate(thumbnails):
|
||||||
if t.get('id') is None:
|
if t.get('id') is None:
|
||||||
t['id'] = '%d' % i
|
t['id'] = str(i)
|
||||||
if t.get('width') and t.get('height'):
|
if t.get('width') and t.get('height'):
|
||||||
t['resolution'] = '%dx%d' % (t['width'], t['height'])
|
t['resolution'] = '%dx%d' % (t['width'], t['height'])
|
||||||
t['url'] = sanitize_url(t['url'])
|
t['url'] = sanitize_url(t['url'])
|
||||||
|
@ -2673,8 +2672,8 @@ def _fill_common_fields(self, info_dict, final=True):
|
||||||
# Auto generate title fields corresponding to the *_number fields when missing
|
# Auto generate title fields corresponding to the *_number fields when missing
|
||||||
# in order to always have clean titles. This is very common for TV series.
|
# in order to always have clean titles. This is very common for TV series.
|
||||||
for field in ('chapter', 'season', 'episode'):
|
for field in ('chapter', 'season', 'episode'):
|
||||||
if final and info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
|
if final and info_dict.get(f'{field}_number') is not None and not info_dict.get(field):
|
||||||
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
|
info_dict[field] = '%s %d' % (field.capitalize(), info_dict[f'{field}_number'])
|
||||||
|
|
||||||
for old_key, new_key in self._deprecated_multivalue_fields.items():
|
for old_key, new_key in self._deprecated_multivalue_fields.items():
|
||||||
if new_key in info_dict and old_key in info_dict:
|
if new_key in info_dict and old_key in info_dict:
|
||||||
|
@ -2706,8 +2705,8 @@ def process_video_result(self, info_dict, download=True):
|
||||||
|
|
||||||
def report_force_conversion(field, field_not, conversion):
|
def report_force_conversion(field, field_not, conversion):
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'"%s" field is not %s - forcing %s conversion, there is an error in extractor'
|
f'"{field}" field is not {field_not} - forcing {conversion} conversion, '
|
||||||
% (field, field_not, conversion))
|
'there is an error in extractor')
|
||||||
|
|
||||||
def sanitize_string_field(info, string_field):
|
def sanitize_string_field(info, string_field):
|
||||||
field = info.get(string_field)
|
field = info.get(string_field)
|
||||||
|
@ -2824,28 +2823,28 @@ def is_wellformed(f):
|
||||||
if not formats:
|
if not formats:
|
||||||
self.raise_no_formats(info_dict)
|
self.raise_no_formats(info_dict)
|
||||||
|
|
||||||
for format in formats:
|
for fmt in formats:
|
||||||
sanitize_string_field(format, 'format_id')
|
sanitize_string_field(fmt, 'format_id')
|
||||||
sanitize_numeric_fields(format)
|
sanitize_numeric_fields(fmt)
|
||||||
format['url'] = sanitize_url(format['url'])
|
fmt['url'] = sanitize_url(fmt['url'])
|
||||||
if format.get('ext') is None:
|
if fmt.get('ext') is None:
|
||||||
format['ext'] = determine_ext(format['url']).lower()
|
fmt['ext'] = determine_ext(fmt['url']).lower()
|
||||||
if format['ext'] in ('aac', 'opus', 'mp3', 'flac', 'vorbis'):
|
if fmt['ext'] in ('aac', 'opus', 'mp3', 'flac', 'vorbis'):
|
||||||
if format.get('acodec') is None:
|
if fmt.get('acodec') is None:
|
||||||
format['acodec'] = format['ext']
|
fmt['acodec'] = fmt['ext']
|
||||||
if format.get('protocol') is None:
|
if fmt.get('protocol') is None:
|
||||||
format['protocol'] = determine_protocol(format)
|
fmt['protocol'] = determine_protocol(fmt)
|
||||||
if format.get('resolution') is None:
|
if fmt.get('resolution') is None:
|
||||||
format['resolution'] = self.format_resolution(format, default=None)
|
fmt['resolution'] = self.format_resolution(fmt, default=None)
|
||||||
if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
|
if fmt.get('dynamic_range') is None and fmt.get('vcodec') != 'none':
|
||||||
format['dynamic_range'] = 'SDR'
|
fmt['dynamic_range'] = 'SDR'
|
||||||
if format.get('aspect_ratio') is None:
|
if fmt.get('aspect_ratio') is None:
|
||||||
format['aspect_ratio'] = try_call(lambda: round(format['width'] / format['height'], 2))
|
fmt['aspect_ratio'] = try_call(lambda: round(fmt['width'] / fmt['height'], 2))
|
||||||
# For fragmented formats, "tbr" is often max bitrate and not average
|
# For fragmented formats, "tbr" is often max bitrate and not average
|
||||||
if (('manifest-filesize-approx' in self.params['compat_opts'] or not format.get('manifest_url'))
|
if (('manifest-filesize-approx' in self.params['compat_opts'] or not fmt.get('manifest_url'))
|
||||||
and not format.get('filesize') and not format.get('filesize_approx')):
|
and not fmt.get('filesize') and not fmt.get('filesize_approx')):
|
||||||
format['filesize_approx'] = filesize_from_tbr(format.get('tbr'), info_dict.get('duration'))
|
fmt['filesize_approx'] = filesize_from_tbr(fmt.get('tbr'), info_dict.get('duration'))
|
||||||
format['http_headers'] = self._calc_headers(collections.ChainMap(format, info_dict), load_cookies=True)
|
fmt['http_headers'] = self._calc_headers(collections.ChainMap(fmt, info_dict), load_cookies=True)
|
||||||
|
|
||||||
# Safeguard against old/insecure infojson when using --load-info-json
|
# Safeguard against old/insecure infojson when using --load-info-json
|
||||||
if info_dict.get('http_headers'):
|
if info_dict.get('http_headers'):
|
||||||
|
@ -2858,36 +2857,36 @@ def is_wellformed(f):
|
||||||
|
|
||||||
self.sort_formats({
|
self.sort_formats({
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'_format_sort_fields': info_dict.get('_format_sort_fields')
|
'_format_sort_fields': info_dict.get('_format_sort_fields'),
|
||||||
})
|
})
|
||||||
|
|
||||||
# Sanitize and group by format_id
|
# Sanitize and group by format_id
|
||||||
formats_dict = {}
|
formats_dict = {}
|
||||||
for i, format in enumerate(formats):
|
for i, fmt in enumerate(formats):
|
||||||
if not format.get('format_id'):
|
if not fmt.get('format_id'):
|
||||||
format['format_id'] = str(i)
|
fmt['format_id'] = str(i)
|
||||||
else:
|
else:
|
||||||
# Sanitize format_id from characters used in format selector expression
|
# Sanitize format_id from characters used in format selector expression
|
||||||
format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
|
fmt['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', fmt['format_id'])
|
||||||
formats_dict.setdefault(format['format_id'], []).append(format)
|
formats_dict.setdefault(fmt['format_id'], []).append(fmt)
|
||||||
|
|
||||||
# Make sure all formats have unique format_id
|
# Make sure all formats have unique format_id
|
||||||
common_exts = set(itertools.chain(*self._format_selection_exts.values()))
|
common_exts = set(itertools.chain(*self._format_selection_exts.values()))
|
||||||
for format_id, ambiguous_formats in formats_dict.items():
|
for format_id, ambiguous_formats in formats_dict.items():
|
||||||
ambigious_id = len(ambiguous_formats) > 1
|
ambigious_id = len(ambiguous_formats) > 1
|
||||||
for i, format in enumerate(ambiguous_formats):
|
for i, fmt in enumerate(ambiguous_formats):
|
||||||
if ambigious_id:
|
if ambigious_id:
|
||||||
format['format_id'] = '%s-%d' % (format_id, i)
|
fmt['format_id'] = f'{format_id}-{i}'
|
||||||
# Ensure there is no conflict between id and ext in format selection
|
# Ensure there is no conflict between id and ext in format selection
|
||||||
# See https://github.com/yt-dlp/yt-dlp/issues/1282
|
# See https://github.com/yt-dlp/yt-dlp/issues/1282
|
||||||
if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
|
if fmt['format_id'] != fmt['ext'] and fmt['format_id'] in common_exts:
|
||||||
format['format_id'] = 'f%s' % format['format_id']
|
fmt['format_id'] = 'f{}'.format(fmt['format_id'])
|
||||||
|
|
||||||
if format.get('format') is None:
|
if fmt.get('format') is None:
|
||||||
format['format'] = '{id} - {res}{note}'.format(
|
fmt['format'] = '{id} - {res}{note}'.format(
|
||||||
id=format['format_id'],
|
id=fmt['format_id'],
|
||||||
res=self.format_resolution(format),
|
res=self.format_resolution(fmt),
|
||||||
note=format_field(format, 'format_note', ' (%s)'),
|
note=format_field(fmt, 'format_note', ' (%s)'),
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.params.get('check_formats') is True:
|
if self.params.get('check_formats') is True:
|
||||||
|
@ -3009,7 +3008,7 @@ def to_screen(*msg):
|
||||||
info_dict['requested_downloads'] = downloaded_formats
|
info_dict['requested_downloads'] = downloaded_formats
|
||||||
info_dict = self.run_all_pps('after_video', info_dict)
|
info_dict = self.run_all_pps('after_video', info_dict)
|
||||||
if max_downloads_reached:
|
if max_downloads_reached:
|
||||||
raise MaxDownloadsReached()
|
raise MaxDownloadsReached
|
||||||
|
|
||||||
# We update the info dict with the selected best quality format (backwards compatibility)
|
# We update the info dict with the selected best quality format (backwards compatibility)
|
||||||
info_dict.update(best_format)
|
info_dict.update(best_format)
|
||||||
|
@ -3070,8 +3069,8 @@ def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
|
||||||
else:
|
else:
|
||||||
f = formats[-1]
|
f = formats[-1]
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'No subtitle format found matching "%s" for language %s, '
|
'No subtitle format found matching "{}" for language {}, '
|
||||||
'using %s' % (formats_query, lang, f['ext']))
|
'using {}. Use --list-subs for a list of available subtitles'.format(formats_query, lang, f['ext']))
|
||||||
subs[lang] = f
|
subs[lang] = f
|
||||||
return subs
|
return subs
|
||||||
|
|
||||||
|
@ -3226,7 +3225,7 @@ def replace_info_dict(new_info):
|
||||||
|
|
||||||
def check_max_downloads():
|
def check_max_downloads():
|
||||||
if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
|
if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
|
||||||
raise MaxDownloadsReached()
|
raise MaxDownloadsReached
|
||||||
|
|
||||||
if self.params.get('simulate'):
|
if self.params.get('simulate'):
|
||||||
info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
|
info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
|
||||||
|
@ -3400,7 +3399,7 @@ def correct_ext(filename, ext=new_ext):
|
||||||
for f in info_dict['requested_formats'] if fd != FFmpegFD else []:
|
for f in info_dict['requested_formats'] if fd != FFmpegFD else []:
|
||||||
f['filepath'] = fname = prepend_extension(
|
f['filepath'] = fname = prepend_extension(
|
||||||
correct_ext(temp_filename, info_dict['ext']),
|
correct_ext(temp_filename, info_dict['ext']),
|
||||||
'f%s' % f['format_id'], info_dict['ext'])
|
'f{}'.format(f['format_id']), info_dict['ext'])
|
||||||
downloaded.append(fname)
|
downloaded.append(fname)
|
||||||
info_dict['url'] = '\n'.join(f['url'] for f in info_dict['requested_formats'])
|
info_dict['url'] = '\n'.join(f['url'] for f in info_dict['requested_formats'])
|
||||||
success, real_download = self.dl(temp_filename, info_dict)
|
success, real_download = self.dl(temp_filename, info_dict)
|
||||||
|
@ -3433,7 +3432,7 @@ def correct_ext(filename, ext=new_ext):
|
||||||
if temp_filename != '-':
|
if temp_filename != '-':
|
||||||
fname = prepend_extension(
|
fname = prepend_extension(
|
||||||
correct_ext(temp_filename, new_info['ext']),
|
correct_ext(temp_filename, new_info['ext']),
|
||||||
'f%s' % f['format_id'], new_info['ext'])
|
'f{}'.format(f['format_id']), new_info['ext'])
|
||||||
if not self._ensure_dir_exists(fname):
|
if not self._ensure_dir_exists(fname):
|
||||||
return
|
return
|
||||||
f['filepath'] = fname
|
f['filepath'] = fname
|
||||||
|
@ -3465,11 +3464,11 @@ def correct_ext(filename, ext=new_ext):
|
||||||
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
|
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
|
||||||
|
|
||||||
except network_exceptions as err:
|
except network_exceptions as err:
|
||||||
self.report_error('unable to download video data: %s' % error_to_compat_str(err))
|
self.report_error(f'unable to download video data: {err}')
|
||||||
return
|
return
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
raise UnavailableVideoError(err)
|
raise UnavailableVideoError(err)
|
||||||
except (ContentTooShortError, ) as err:
|
except ContentTooShortError as err:
|
||||||
self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
|
self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -3536,13 +3535,13 @@ def ffmpeg_fixup(cndn, msg, cls):
|
||||||
try:
|
try:
|
||||||
replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
|
replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
|
||||||
except PostProcessingError as err:
|
except PostProcessingError as err:
|
||||||
self.report_error('Postprocessing: %s' % str(err))
|
self.report_error(f'Postprocessing: {err}')
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
for ph in self._post_hooks:
|
for ph in self._post_hooks:
|
||||||
ph(info_dict['filepath'])
|
ph(info_dict['filepath'])
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
self.report_error('post hooks: %s' % str(err))
|
self.report_error(f'post hooks: {err}')
|
||||||
return
|
return
|
||||||
info_dict['__write_download_archive'] = True
|
info_dict['__write_download_archive'] = True
|
||||||
|
|
||||||
|
@ -3609,7 +3608,7 @@ def download_with_info_file(self, info_filename):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def sanitize_info(info_dict, remove_private_keys=False):
|
def sanitize_info(info_dict, remove_private_keys=False):
|
||||||
''' Sanitize the infodict for converting to json '''
|
""" Sanitize the infodict for converting to json """
|
||||||
if info_dict is None:
|
if info_dict is None:
|
||||||
return info_dict
|
return info_dict
|
||||||
info_dict.setdefault('epoch', int(time.time()))
|
info_dict.setdefault('epoch', int(time.time()))
|
||||||
|
@ -3644,7 +3643,7 @@ def filter_fn(obj):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def filter_requested_info(info_dict, actually_filter=True):
|
def filter_requested_info(info_dict, actually_filter=True):
|
||||||
''' Alias of sanitize_info for backward compatibility '''
|
""" Alias of sanitize_info for backward compatibility """
|
||||||
return YoutubeDL.sanitize_info(info_dict, actually_filter)
|
return YoutubeDL.sanitize_info(info_dict, actually_filter)
|
||||||
|
|
||||||
def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
|
def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
|
||||||
|
@ -3666,7 +3665,7 @@ def actual_post_extract(info_dict):
|
||||||
actual_post_extract(video_dict or {})
|
actual_post_extract(video_dict or {})
|
||||||
return
|
return
|
||||||
|
|
||||||
post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
|
post_extractor = info_dict.pop('__post_extractor', None) or dict
|
||||||
info_dict.update(post_extractor())
|
info_dict.update(post_extractor())
|
||||||
|
|
||||||
actual_post_extract(info_dict or {})
|
actual_post_extract(info_dict or {})
|
||||||
|
@ -3771,7 +3770,7 @@ def format_resolution(format, default='unknown'):
|
||||||
if format.get('width') and format.get('height'):
|
if format.get('width') and format.get('height'):
|
||||||
return '%dx%d' % (format['width'], format['height'])
|
return '%dx%d' % (format['width'], format['height'])
|
||||||
elif format.get('height'):
|
elif format.get('height'):
|
||||||
return '%sp' % format['height']
|
return '{}p'.format(format['height'])
|
||||||
elif format.get('width'):
|
elif format.get('width'):
|
||||||
return '%dx?' % format['width']
|
return '%dx?' % format['width']
|
||||||
return default
|
return default
|
||||||
|
@ -3788,7 +3787,7 @@ def _format_note(self, fdict):
|
||||||
if fdict.get('language'):
|
if fdict.get('language'):
|
||||||
if res:
|
if res:
|
||||||
res += ' '
|
res += ' '
|
||||||
res += '[%s]' % fdict['language']
|
res += '[{}]'.format(fdict['language'])
|
||||||
if fdict.get('format_note') is not None:
|
if fdict.get('format_note') is not None:
|
||||||
if res:
|
if res:
|
||||||
res += ' '
|
res += ' '
|
||||||
|
@ -3800,7 +3799,7 @@ def _format_note(self, fdict):
|
||||||
if fdict.get('container') is not None:
|
if fdict.get('container') is not None:
|
||||||
if res:
|
if res:
|
||||||
res += ', '
|
res += ', '
|
||||||
res += '%s container' % fdict['container']
|
res += '{} container'.format(fdict['container'])
|
||||||
if (fdict.get('vcodec') is not None
|
if (fdict.get('vcodec') is not None
|
||||||
and fdict.get('vcodec') != 'none'):
|
and fdict.get('vcodec') != 'none'):
|
||||||
if res:
|
if res:
|
||||||
|
@ -3815,7 +3814,7 @@ def _format_note(self, fdict):
|
||||||
if fdict.get('fps') is not None:
|
if fdict.get('fps') is not None:
|
||||||
if res:
|
if res:
|
||||||
res += ', '
|
res += ', '
|
||||||
res += '%sfps' % fdict['fps']
|
res += '{}fps'.format(fdict['fps'])
|
||||||
if fdict.get('acodec') is not None:
|
if fdict.get('acodec') is not None:
|
||||||
if res:
|
if res:
|
||||||
res += ', '
|
res += ', '
|
||||||
|
@ -3858,7 +3857,7 @@ def render_formats_table(self, info_dict):
|
||||||
format_field(f, 'format_id'),
|
format_field(f, 'format_id'),
|
||||||
format_field(f, 'ext'),
|
format_field(f, 'ext'),
|
||||||
self.format_resolution(f),
|
self.format_resolution(f),
|
||||||
self._format_note(f)
|
self._format_note(f),
|
||||||
] for f in formats if (f.get('preference') or 0) >= -1000]
|
] for f in formats if (f.get('preference') or 0) >= -1000]
|
||||||
return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
|
return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
|
||||||
|
|
||||||
|
@ -3964,11 +3963,11 @@ def print_debug_header(self):
|
||||||
from .extractor.extractors import _LAZY_LOADER
|
from .extractor.extractors import _LAZY_LOADER
|
||||||
from .extractor.extractors import (
|
from .extractor.extractors import (
|
||||||
_PLUGIN_CLASSES as plugin_ies,
|
_PLUGIN_CLASSES as plugin_ies,
|
||||||
_PLUGIN_OVERRIDES as plugin_ie_overrides
|
_PLUGIN_OVERRIDES as plugin_ie_overrides,
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_encoding(stream):
|
def get_encoding(stream):
|
||||||
ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
|
ret = str(getattr(stream, 'encoding', f'missing ({type(stream).__name__})'))
|
||||||
additional_info = []
|
additional_info = []
|
||||||
if os.environ.get('TERM', '').lower() == 'dumb':
|
if os.environ.get('TERM', '').lower() == 'dumb':
|
||||||
additional_info.append('dumb')
|
additional_info.append('dumb')
|
||||||
|
@ -3979,13 +3978,13 @@ def get_encoding(stream):
|
||||||
ret = f'{ret} ({",".join(additional_info)})'
|
ret = f'{ret} ({",".join(additional_info)})'
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
|
encoding_str = 'Encodings: locale {}, fs {}, pref {}, {}'.format(
|
||||||
locale.getpreferredencoding(),
|
locale.getpreferredencoding(),
|
||||||
sys.getfilesystemencoding(),
|
sys.getfilesystemencoding(),
|
||||||
self.get_encoding(),
|
self.get_encoding(),
|
||||||
', '.join(
|
', '.join(
|
||||||
f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
|
f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
|
||||||
if stream is not None and key != 'console')
|
if stream is not None and key != 'console'),
|
||||||
)
|
)
|
||||||
|
|
||||||
logger = self.params.get('logger')
|
logger = self.params.get('logger')
|
||||||
|
@ -4017,7 +4016,7 @@ def get_encoding(stream):
|
||||||
else:
|
else:
|
||||||
write_debug('Lazy loading extractors is disabled')
|
write_debug('Lazy loading extractors is disabled')
|
||||||
if self.params['compat_opts']:
|
if self.params['compat_opts']:
|
||||||
write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
|
write_debug('Compatibility options: {}'.format(', '.join(self.params['compat_opts'])))
|
||||||
|
|
||||||
if current_git_head():
|
if current_git_head():
|
||||||
write_debug(f'Git HEAD: {current_git_head()}')
|
write_debug(f'Git HEAD: {current_git_head()}')
|
||||||
|
@ -4026,14 +4025,14 @@ def get_encoding(stream):
|
||||||
exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
|
exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
|
||||||
ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
|
ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
|
||||||
if ffmpeg_features:
|
if ffmpeg_features:
|
||||||
exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
|
exe_versions['ffmpeg'] += ' ({})'.format(','.join(sorted(ffmpeg_features)))
|
||||||
|
|
||||||
exe_versions['rtmpdump'] = rtmpdump_version()
|
exe_versions['rtmpdump'] = rtmpdump_version()
|
||||||
exe_versions['phantomjs'] = PhantomJSwrapper._version()
|
exe_versions['phantomjs'] = PhantomJSwrapper._version()
|
||||||
exe_str = ', '.join(
|
exe_str = ', '.join(
|
||||||
f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
|
f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
|
||||||
) or 'none'
|
) or 'none'
|
||||||
write_debug('exe versions: %s' % exe_str)
|
write_debug(f'exe versions: {exe_str}')
|
||||||
|
|
||||||
from .compat.compat_utils import get_package_info
|
from .compat.compat_utils import get_package_info
|
||||||
from .dependencies import available_dependencies
|
from .dependencies import available_dependencies
|
||||||
|
@ -4045,7 +4044,7 @@ def get_encoding(stream):
|
||||||
write_debug(f'Proxy map: {self.proxies}')
|
write_debug(f'Proxy map: {self.proxies}')
|
||||||
write_debug(f'Request Handlers: {", ".join(rh.RH_NAME for rh in self._request_director.handlers.values())}')
|
write_debug(f'Request Handlers: {", ".join(rh.RH_NAME for rh in self._request_director.handlers.values())}')
|
||||||
for plugin_type, plugins in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}.items():
|
for plugin_type, plugins in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}.items():
|
||||||
display_list = ['%s%s' % (
|
display_list = ['{}{}'.format(
|
||||||
klass.__name__, '' if klass.__name__ == name else f' as {name}')
|
klass.__name__, '' if klass.__name__ == name else f' as {name}')
|
||||||
for name, klass in plugins.items()]
|
for name, klass in plugins.items()]
|
||||||
if plugin_type == 'Extractor':
|
if plugin_type == 'Extractor':
|
||||||
|
@ -4062,14 +4061,13 @@ def get_encoding(stream):
|
||||||
# Not implemented
|
# Not implemented
|
||||||
if False and self.params.get('call_home'):
|
if False and self.params.get('call_home'):
|
||||||
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
|
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
|
||||||
write_debug('Public IP address: %s' % ipaddr)
|
write_debug(f'Public IP address: {ipaddr}')
|
||||||
latest_version = self.urlopen(
|
latest_version = self.urlopen(
|
||||||
'https://yt-dl.org/latest/version').read().decode()
|
'https://yt-dl.org/latest/version').read().decode()
|
||||||
if version_tuple(latest_version) > version_tuple(__version__):
|
if version_tuple(latest_version) > version_tuple(__version__):
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'You are using an outdated version (newest version: %s)! '
|
f'You are using an outdated version (newest version: {latest_version})! '
|
||||||
'See https://yt-dl.org/update if you need help updating.' %
|
'See https://yt-dl.org/update if you need help updating.')
|
||||||
latest_version)
|
|
||||||
|
|
||||||
@functools.cached_property
|
@functools.cached_property
|
||||||
def proxies(self):
|
def proxies(self):
|
||||||
|
@ -4103,7 +4101,7 @@ def _opener(self):
|
||||||
return handler._get_instance(cookiejar=self.cookiejar, proxies=self.proxies)
|
return handler._get_instance(cookiejar=self.cookiejar, proxies=self.proxies)
|
||||||
|
|
||||||
def _get_available_impersonate_targets(self):
|
def _get_available_impersonate_targets(self):
|
||||||
# todo(future): make available as public API
|
# TODO(future): make available as public API
|
||||||
return [
|
return [
|
||||||
(target, rh.RH_NAME)
|
(target, rh.RH_NAME)
|
||||||
for rh in self._request_director.handlers.values()
|
for rh in self._request_director.handlers.values()
|
||||||
|
@ -4112,7 +4110,7 @@ def _get_available_impersonate_targets(self):
|
||||||
]
|
]
|
||||||
|
|
||||||
def _impersonate_target_available(self, target):
|
def _impersonate_target_available(self, target):
|
||||||
# todo(future): make available as public API
|
# TODO(future): make available as public API
|
||||||
return any(
|
return any(
|
||||||
rh.is_supported_target(target)
|
rh.is_supported_target(target)
|
||||||
for rh in self._request_director.handlers.values()
|
for rh in self._request_director.handlers.values()
|
||||||
|
@ -4238,7 +4236,7 @@ def get_encoding(self):
|
||||||
return encoding
|
return encoding
|
||||||
|
|
||||||
def _write_info_json(self, label, ie_result, infofn, overwrite=None):
|
def _write_info_json(self, label, ie_result, infofn, overwrite=None):
|
||||||
''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
|
""" Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error """
|
||||||
if overwrite is None:
|
if overwrite is None:
|
||||||
overwrite = self.params.get('overwrites', True)
|
overwrite = self.params.get('overwrites', True)
|
||||||
if not self.params.get('writeinfojson'):
|
if not self.params.get('writeinfojson'):
|
||||||
|
@ -4261,7 +4259,7 @@ def _write_info_json(self, label, ie_result, infofn, overwrite=None):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _write_description(self, label, ie_result, descfn):
|
def _write_description(self, label, ie_result, descfn):
|
||||||
''' Write description and returns True = written, False = skip, None = error '''
|
""" Write description and returns True = written, False = skip, None = error """
|
||||||
if not self.params.get('writedescription'):
|
if not self.params.get('writedescription'):
|
||||||
return False
|
return False
|
||||||
elif not descfn:
|
elif not descfn:
|
||||||
|
@ -4285,7 +4283,7 @@ def _write_description(self, label, ie_result, descfn):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _write_subtitles(self, info_dict, filename):
|
def _write_subtitles(self, info_dict, filename):
|
||||||
''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
|
""" Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error"""
|
||||||
ret = []
|
ret = []
|
||||||
subtitles = info_dict.get('requested_subtitles')
|
subtitles = info_dict.get('requested_subtitles')
|
||||||
if not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
|
if not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
|
||||||
|
@ -4331,7 +4329,7 @@ def _write_subtitles(self, info_dict, filename):
|
||||||
self.dl(sub_filename, sub_copy, subtitle=True)
|
self.dl(sub_filename, sub_copy, subtitle=True)
|
||||||
sub_info['filepath'] = sub_filename
|
sub_info['filepath'] = sub_filename
|
||||||
ret.append((sub_filename, sub_filename_final))
|
ret.append((sub_filename, sub_filename_final))
|
||||||
except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
|
except (DownloadError, ExtractorError, OSError, ValueError, *network_exceptions) as err:
|
||||||
msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
|
msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
|
||||||
if self.params.get('ignoreerrors') is not True: # False or 'only_download'
|
if self.params.get('ignoreerrors') is not True: # False or 'only_download'
|
||||||
if not self.params.get('ignoreerrors'):
|
if not self.params.get('ignoreerrors'):
|
||||||
|
@ -4341,7 +4339,7 @@ def _write_subtitles(self, info_dict, filename):
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
|
def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
|
||||||
''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename); or None if error '''
|
""" Write thumbnails to file and return list of (thumb_filename, final_thumb_filename); or None if error """
|
||||||
write_all = self.params.get('write_all_thumbnails', False)
|
write_all = self.params.get('write_all_thumbnails', False)
|
||||||
thumbnails, ret = [], []
|
thumbnails, ret = [], []
|
||||||
if write_all or self.params.get('writethumbnail', False):
|
if write_all or self.params.get('writethumbnail', False):
|
||||||
|
@ -4368,8 +4366,8 @@ def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None
|
||||||
|
|
||||||
existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
|
existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
|
||||||
if existing_thumb:
|
if existing_thumb:
|
||||||
self.to_screen('[info] %s is already present' % (
|
self.to_screen('[info] {} is already present'.format((
|
||||||
thumb_display_id if multiple else f'{label} thumbnail').capitalize())
|
thumb_display_id if multiple else f'{label} thumbnail').capitalize()))
|
||||||
t['filepath'] = existing_thumb
|
t['filepath'] = existing_thumb
|
||||||
ret.append((existing_thumb, thumb_filename_final))
|
ret.append((existing_thumb, thumb_filename_final))
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
import re
|
import re
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from .compat import compat_os_name, compat_shlex_quote
|
from .compat import compat_os_name
|
||||||
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
|
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
|
||||||
from .downloader.external import get_external_downloader
|
from .downloader.external import get_external_downloader
|
||||||
from .extractor import list_extractor_classes
|
from .extractor import list_extractor_classes
|
||||||
|
@ -58,6 +58,7 @@
|
||||||
read_stdin,
|
read_stdin,
|
||||||
render_table,
|
render_table,
|
||||||
setproctitle,
|
setproctitle,
|
||||||
|
shell_quote,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
variadic,
|
variadic,
|
||||||
write_string,
|
write_string,
|
||||||
|
@ -115,9 +116,9 @@ def print_extractor_information(opts, urls):
|
||||||
ie.description(markdown=False, search_examples=_SEARCHES)
|
ie.description(markdown=False, search_examples=_SEARCHES)
|
||||||
for ie in list_extractor_classes(opts.age_limit) if ie.working() and ie.IE_DESC is not False)
|
for ie in list_extractor_classes(opts.age_limit) if ie.working() and ie.IE_DESC is not False)
|
||||||
elif opts.ap_list_mso:
|
elif opts.ap_list_mso:
|
||||||
out = 'Supported TV Providers:\n%s\n' % render_table(
|
out = 'Supported TV Providers:\n{}\n'.format(render_table(
|
||||||
['mso', 'mso name'],
|
['mso', 'mso name'],
|
||||||
[[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()])
|
[[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()]))
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
write_string(out, out=sys.stdout)
|
write_string(out, out=sys.stdout)
|
||||||
|
@ -129,7 +130,7 @@ def _unused_compat_opt(name):
|
||||||
if name not in opts.compat_opts:
|
if name not in opts.compat_opts:
|
||||||
return False
|
return False
|
||||||
opts.compat_opts.discard(name)
|
opts.compat_opts.discard(name)
|
||||||
opts.compat_opts.update(['*%s' % name])
|
opts.compat_opts.update([f'*{name}'])
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def set_default_compat(compat_name, opt_name, default=True, remove_compat=True):
|
def set_default_compat(compat_name, opt_name, default=True, remove_compat=True):
|
||||||
|
@ -222,7 +223,7 @@ def validate_minmax(min_val, max_val, min_name, max_name=None):
|
||||||
validate_minmax(opts.sleep_interval, opts.max_sleep_interval, 'sleep interval')
|
validate_minmax(opts.sleep_interval, opts.max_sleep_interval, 'sleep interval')
|
||||||
|
|
||||||
if opts.wait_for_video is not None:
|
if opts.wait_for_video is not None:
|
||||||
min_wait, max_wait, *_ = map(parse_duration, opts.wait_for_video.split('-', 1) + [None])
|
min_wait, max_wait, *_ = map(parse_duration, [*opts.wait_for_video.split('-', 1), None])
|
||||||
validate(min_wait is not None and not (max_wait is None and '-' in opts.wait_for_video),
|
validate(min_wait is not None and not (max_wait is None and '-' in opts.wait_for_video),
|
||||||
'time range to wait for video', opts.wait_for_video)
|
'time range to wait for video', opts.wait_for_video)
|
||||||
validate_minmax(min_wait, max_wait, 'time range to wait for video')
|
validate_minmax(min_wait, max_wait, 'time range to wait for video')
|
||||||
|
@ -264,9 +265,9 @@ def parse_retries(name, value):
|
||||||
# Retry sleep function
|
# Retry sleep function
|
||||||
def parse_sleep_func(expr):
|
def parse_sleep_func(expr):
|
||||||
NUMBER_RE = r'\d+(?:\.\d+)?'
|
NUMBER_RE = r'\d+(?:\.\d+)?'
|
||||||
op, start, limit, step, *_ = tuple(re.fullmatch(
|
op, start, limit, step, *_ = (*tuple(re.fullmatch(
|
||||||
rf'(?:(linear|exp)=)?({NUMBER_RE})(?::({NUMBER_RE})?)?(?::({NUMBER_RE}))?',
|
rf'(?:(linear|exp)=)?({NUMBER_RE})(?::({NUMBER_RE})?)?(?::({NUMBER_RE}))?',
|
||||||
expr.strip()).groups()) + (None, None)
|
expr.strip()).groups()), None, None)
|
||||||
|
|
||||||
if op == 'exp':
|
if op == 'exp':
|
||||||
return lambda n: min(float(start) * (float(step or 2) ** n), float(limit or 'inf'))
|
return lambda n: min(float(start) * (float(step or 2) ** n), float(limit or 'inf'))
|
||||||
|
@ -396,13 +397,13 @@ def parse_chapters(name, value, advanced=False):
|
||||||
# MetadataParser
|
# MetadataParser
|
||||||
def metadataparser_actions(f):
|
def metadataparser_actions(f):
|
||||||
if isinstance(f, str):
|
if isinstance(f, str):
|
||||||
cmd = '--parse-metadata %s' % compat_shlex_quote(f)
|
cmd = f'--parse-metadata {shell_quote(f)}'
|
||||||
try:
|
try:
|
||||||
actions = [MetadataFromFieldPP.to_action(f)]
|
actions = [MetadataFromFieldPP.to_action(f)]
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
raise ValueError(f'{cmd} is invalid; {err}')
|
raise ValueError(f'{cmd} is invalid; {err}')
|
||||||
else:
|
else:
|
||||||
cmd = '--replace-in-metadata %s' % ' '.join(map(compat_shlex_quote, f))
|
cmd = f'--replace-in-metadata {shell_quote(f)}'
|
||||||
actions = ((MetadataParserPP.Actions.REPLACE, x, *f[1:]) for x in f[0].split(','))
|
actions = ((MetadataParserPP.Actions.REPLACE, x, *f[1:]) for x in f[0].split(','))
|
||||||
|
|
||||||
for action in actions:
|
for action in actions:
|
||||||
|
@ -413,7 +414,7 @@ def metadataparser_actions(f):
|
||||||
yield action
|
yield action
|
||||||
|
|
||||||
if opts.metafromtitle is not None:
|
if opts.metafromtitle is not None:
|
||||||
opts.parse_metadata.setdefault('pre_process', []).append('title:%s' % opts.metafromtitle)
|
opts.parse_metadata.setdefault('pre_process', []).append(f'title:{opts.metafromtitle}')
|
||||||
opts.parse_metadata = {
|
opts.parse_metadata = {
|
||||||
k: list(itertools.chain(*map(metadataparser_actions, v)))
|
k: list(itertools.chain(*map(metadataparser_actions, v)))
|
||||||
for k, v in opts.parse_metadata.items()
|
for k, v in opts.parse_metadata.items()
|
||||||
|
@ -602,7 +603,7 @@ def get_postprocessors(opts):
|
||||||
yield {
|
yield {
|
||||||
'key': 'MetadataParser',
|
'key': 'MetadataParser',
|
||||||
'actions': actions,
|
'actions': actions,
|
||||||
'when': when
|
'when': when,
|
||||||
}
|
}
|
||||||
sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove
|
sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove
|
||||||
if sponsorblock_query:
|
if sponsorblock_query:
|
||||||
|
@ -610,19 +611,19 @@ def get_postprocessors(opts):
|
||||||
'key': 'SponsorBlock',
|
'key': 'SponsorBlock',
|
||||||
'categories': sponsorblock_query,
|
'categories': sponsorblock_query,
|
||||||
'api': opts.sponsorblock_api,
|
'api': opts.sponsorblock_api,
|
||||||
'when': 'after_filter'
|
'when': 'after_filter',
|
||||||
}
|
}
|
||||||
if opts.convertsubtitles:
|
if opts.convertsubtitles:
|
||||||
yield {
|
yield {
|
||||||
'key': 'FFmpegSubtitlesConvertor',
|
'key': 'FFmpegSubtitlesConvertor',
|
||||||
'format': opts.convertsubtitles,
|
'format': opts.convertsubtitles,
|
||||||
'when': 'before_dl'
|
'when': 'before_dl',
|
||||||
}
|
}
|
||||||
if opts.convertthumbnails:
|
if opts.convertthumbnails:
|
||||||
yield {
|
yield {
|
||||||
'key': 'FFmpegThumbnailsConvertor',
|
'key': 'FFmpegThumbnailsConvertor',
|
||||||
'format': opts.convertthumbnails,
|
'format': opts.convertthumbnails,
|
||||||
'when': 'before_dl'
|
'when': 'before_dl',
|
||||||
}
|
}
|
||||||
if opts.extractaudio:
|
if opts.extractaudio:
|
||||||
yield {
|
yield {
|
||||||
|
@ -647,7 +648,7 @@ def get_postprocessors(opts):
|
||||||
yield {
|
yield {
|
||||||
'key': 'FFmpegEmbedSubtitle',
|
'key': 'FFmpegEmbedSubtitle',
|
||||||
# already_have_subtitle = True prevents the file from being deleted after embedding
|
# already_have_subtitle = True prevents the file from being deleted after embedding
|
||||||
'already_have_subtitle': opts.writesubtitles and keep_subs
|
'already_have_subtitle': opts.writesubtitles and keep_subs,
|
||||||
}
|
}
|
||||||
if not opts.writeautomaticsub and keep_subs:
|
if not opts.writeautomaticsub and keep_subs:
|
||||||
opts.writesubtitles = True
|
opts.writesubtitles = True
|
||||||
|
@ -660,7 +661,7 @@ def get_postprocessors(opts):
|
||||||
'remove_sponsor_segments': opts.sponsorblock_remove,
|
'remove_sponsor_segments': opts.sponsorblock_remove,
|
||||||
'remove_ranges': opts.remove_ranges,
|
'remove_ranges': opts.remove_ranges,
|
||||||
'sponsorblock_chapter_title': opts.sponsorblock_chapter_title,
|
'sponsorblock_chapter_title': opts.sponsorblock_chapter_title,
|
||||||
'force_keyframes': opts.force_keyframes_at_cuts
|
'force_keyframes': opts.force_keyframes_at_cuts,
|
||||||
}
|
}
|
||||||
# FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and
|
# FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and
|
||||||
# FFmpegExtractAudioPP as containers before conversion may not support
|
# FFmpegExtractAudioPP as containers before conversion may not support
|
||||||
|
@ -694,7 +695,7 @@ def get_postprocessors(opts):
|
||||||
yield {
|
yield {
|
||||||
'key': 'EmbedThumbnail',
|
'key': 'EmbedThumbnail',
|
||||||
# already_have_thumbnail = True prevents the file from being deleted after embedding
|
# already_have_thumbnail = True prevents the file from being deleted after embedding
|
||||||
'already_have_thumbnail': opts.writethumbnail
|
'already_have_thumbnail': opts.writethumbnail,
|
||||||
}
|
}
|
||||||
if not opts.writethumbnail:
|
if not opts.writethumbnail:
|
||||||
opts.writethumbnail = True
|
opts.writethumbnail = True
|
||||||
|
@ -741,7 +742,7 @@ def parse_options(argv=None):
|
||||||
print_only = bool(opts.forceprint) and all(k not in opts.forceprint for k in POSTPROCESS_WHEN[3:])
|
print_only = bool(opts.forceprint) and all(k not in opts.forceprint for k in POSTPROCESS_WHEN[3:])
|
||||||
any_getting = any(getattr(opts, k) for k in (
|
any_getting = any(getattr(opts, k) for k in (
|
||||||
'dumpjson', 'dump_single_json', 'getdescription', 'getduration', 'getfilename',
|
'dumpjson', 'dump_single_json', 'getdescription', 'getduration', 'getfilename',
|
||||||
'getformat', 'getid', 'getthumbnail', 'gettitle', 'geturl'
|
'getformat', 'getid', 'getthumbnail', 'gettitle', 'geturl',
|
||||||
))
|
))
|
||||||
if opts.quiet is None:
|
if opts.quiet is None:
|
||||||
opts.quiet = any_getting or opts.print_json or bool(opts.forceprint)
|
opts.quiet = any_getting or opts.print_json or bool(opts.forceprint)
|
||||||
|
@ -1002,7 +1003,7 @@ def _real_main(argv=None):
|
||||||
def make_row(target, handler):
|
def make_row(target, handler):
|
||||||
return [
|
return [
|
||||||
join_nonempty(target.client.title(), target.version, delim='-') or '-',
|
join_nonempty(target.client.title(), target.version, delim='-') or '-',
|
||||||
join_nonempty((target.os or "").title(), target.os_version, delim='-') or '-',
|
join_nonempty((target.os or '').title(), target.os_version, delim='-') or '-',
|
||||||
handler,
|
handler,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
|
@ -68,7 +68,7 @@ def pad_block(block, padding_mode):
|
||||||
raise NotImplementedError(f'Padding mode {padding_mode} is not implemented')
|
raise NotImplementedError(f'Padding mode {padding_mode} is not implemented')
|
||||||
|
|
||||||
if padding_mode == 'iso7816' and padding_size:
|
if padding_mode == 'iso7816' and padding_size:
|
||||||
block = block + [0x80] # NB: += mutates list
|
block = [*block, 0x80] # NB: += mutates list
|
||||||
padding_size -= 1
|
padding_size -= 1
|
||||||
|
|
||||||
return block + [PADDING_BYTE[padding_mode]] * padding_size
|
return block + [PADDING_BYTE[padding_mode]] * padding_size
|
||||||
|
@ -110,9 +110,7 @@ def aes_ecb_decrypt(data, key, iv=None):
|
||||||
for i in range(block_count):
|
for i in range(block_count):
|
||||||
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||||
encrypted_data += aes_decrypt(block, expanded_key)
|
encrypted_data += aes_decrypt(block, expanded_key)
|
||||||
encrypted_data = encrypted_data[:len(data)]
|
return encrypted_data[:len(data)]
|
||||||
|
|
||||||
return encrypted_data
|
|
||||||
|
|
||||||
|
|
||||||
def aes_ctr_decrypt(data, key, iv):
|
def aes_ctr_decrypt(data, key, iv):
|
||||||
|
@ -148,9 +146,7 @@ def aes_ctr_encrypt(data, key, iv):
|
||||||
|
|
||||||
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
|
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
|
||||||
encrypted_data += xor(block, cipher_counter_block)
|
encrypted_data += xor(block, cipher_counter_block)
|
||||||
encrypted_data = encrypted_data[:len(data)]
|
return encrypted_data[:len(data)]
|
||||||
|
|
||||||
return encrypted_data
|
|
||||||
|
|
||||||
|
|
||||||
def aes_cbc_decrypt(data, key, iv):
|
def aes_cbc_decrypt(data, key, iv):
|
||||||
|
@ -174,9 +170,7 @@ def aes_cbc_decrypt(data, key, iv):
|
||||||
decrypted_block = aes_decrypt(block, expanded_key)
|
decrypted_block = aes_decrypt(block, expanded_key)
|
||||||
decrypted_data += xor(decrypted_block, previous_cipher_block)
|
decrypted_data += xor(decrypted_block, previous_cipher_block)
|
||||||
previous_cipher_block = block
|
previous_cipher_block = block
|
||||||
decrypted_data = decrypted_data[:len(data)]
|
return decrypted_data[:len(data)]
|
||||||
|
|
||||||
return decrypted_data
|
|
||||||
|
|
||||||
|
|
||||||
def aes_cbc_encrypt(data, key, iv, *, padding_mode='pkcs7'):
|
def aes_cbc_encrypt(data, key, iv, *, padding_mode='pkcs7'):
|
||||||
|
@ -224,7 +218,7 @@ def aes_gcm_decrypt_and_verify(data, key, tag, nonce):
|
||||||
hash_subkey = aes_encrypt([0] * BLOCK_SIZE_BYTES, key_expansion(key))
|
hash_subkey = aes_encrypt([0] * BLOCK_SIZE_BYTES, key_expansion(key))
|
||||||
|
|
||||||
if len(nonce) == 12:
|
if len(nonce) == 12:
|
||||||
j0 = nonce + [0, 0, 0, 1]
|
j0 = [*nonce, 0, 0, 0, 1]
|
||||||
else:
|
else:
|
||||||
fill = (BLOCK_SIZE_BYTES - (len(nonce) % BLOCK_SIZE_BYTES)) % BLOCK_SIZE_BYTES + 8
|
fill = (BLOCK_SIZE_BYTES - (len(nonce) % BLOCK_SIZE_BYTES)) % BLOCK_SIZE_BYTES + 8
|
||||||
ghash_in = nonce + [0] * fill + bytes_to_intlist((8 * len(nonce)).to_bytes(8, 'big'))
|
ghash_in = nonce + [0] * fill + bytes_to_intlist((8 * len(nonce)).to_bytes(8, 'big'))
|
||||||
|
@ -242,11 +236,11 @@ def aes_gcm_decrypt_and_verify(data, key, tag, nonce):
|
||||||
data
|
data
|
||||||
+ [0] * (BLOCK_SIZE_BYTES - len(data) + pad_len) # pad
|
+ [0] * (BLOCK_SIZE_BYTES - len(data) + pad_len) # pad
|
||||||
+ bytes_to_intlist((0 * 8).to_bytes(8, 'big') # length of associated data
|
+ bytes_to_intlist((0 * 8).to_bytes(8, 'big') # length of associated data
|
||||||
+ ((len(data) * 8).to_bytes(8, 'big'))) # length of data
|
+ ((len(data) * 8).to_bytes(8, 'big'))), # length of data
|
||||||
)
|
)
|
||||||
|
|
||||||
if tag != aes_ctr_encrypt(s_tag, key, j0):
|
if tag != aes_ctr_encrypt(s_tag, key, j0):
|
||||||
raise ValueError("Mismatching authentication tag")
|
raise ValueError('Mismatching authentication tag')
|
||||||
|
|
||||||
return decrypted_data
|
return decrypted_data
|
||||||
|
|
||||||
|
@ -288,9 +282,7 @@ def aes_decrypt(data, expanded_key):
|
||||||
data = list(iter_mix_columns(data, MIX_COLUMN_MATRIX_INV))
|
data = list(iter_mix_columns(data, MIX_COLUMN_MATRIX_INV))
|
||||||
data = shift_rows_inv(data)
|
data = shift_rows_inv(data)
|
||||||
data = sub_bytes_inv(data)
|
data = sub_bytes_inv(data)
|
||||||
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
return xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
||||||
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
def aes_decrypt_text(data, password, key_size_bytes):
|
def aes_decrypt_text(data, password, key_size_bytes):
|
||||||
|
@ -318,9 +310,7 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
||||||
cipher = data[NONCE_LENGTH_BYTES:]
|
cipher = data[NONCE_LENGTH_BYTES:]
|
||||||
|
|
||||||
decrypted_data = aes_ctr_decrypt(cipher, key, nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES))
|
decrypted_data = aes_ctr_decrypt(cipher, key, nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES))
|
||||||
plaintext = intlist_to_bytes(decrypted_data)
|
return intlist_to_bytes(decrypted_data)
|
||||||
|
|
||||||
return plaintext
|
|
||||||
|
|
||||||
|
|
||||||
RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
|
RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
|
||||||
|
@ -428,9 +418,7 @@ def key_expansion(data):
|
||||||
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
||||||
temp = data[-4:]
|
temp = data[-4:]
|
||||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||||
data = data[:expanded_key_size_bytes]
|
return data[:expanded_key_size_bytes]
|
||||||
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
def iter_vector(iv):
|
def iter_vector(iv):
|
||||||
|
@ -511,7 +499,7 @@ def block_product(block_x, block_y):
|
||||||
# NIST SP 800-38D, Algorithm 1
|
# NIST SP 800-38D, Algorithm 1
|
||||||
|
|
||||||
if len(block_x) != BLOCK_SIZE_BYTES or len(block_y) != BLOCK_SIZE_BYTES:
|
if len(block_x) != BLOCK_SIZE_BYTES or len(block_y) != BLOCK_SIZE_BYTES:
|
||||||
raise ValueError("Length of blocks need to be %d bytes" % BLOCK_SIZE_BYTES)
|
raise ValueError(f'Length of blocks need to be {BLOCK_SIZE_BYTES} bytes')
|
||||||
|
|
||||||
block_r = [0xE1] + [0] * (BLOCK_SIZE_BYTES - 1)
|
block_r = [0xE1] + [0] * (BLOCK_SIZE_BYTES - 1)
|
||||||
block_v = block_y[:]
|
block_v = block_y[:]
|
||||||
|
@ -534,7 +522,7 @@ def ghash(subkey, data):
|
||||||
# NIST SP 800-38D, Algorithm 2
|
# NIST SP 800-38D, Algorithm 2
|
||||||
|
|
||||||
if len(data) % BLOCK_SIZE_BYTES:
|
if len(data) % BLOCK_SIZE_BYTES:
|
||||||
raise ValueError("Length of data should be %d bytes" % BLOCK_SIZE_BYTES)
|
raise ValueError(f'Length of data should be {BLOCK_SIZE_BYTES} bytes')
|
||||||
|
|
||||||
last_y = [0] * BLOCK_SIZE_BYTES
|
last_y = [0] * BLOCK_SIZE_BYTES
|
||||||
for i in range(0, len(data), BLOCK_SIZE_BYTES):
|
for i in range(0, len(data), BLOCK_SIZE_BYTES):
|
||||||
|
|
|
@ -81,10 +81,10 @@ def remove(self):
|
||||||
|
|
||||||
cachedir = self._get_root_dir()
|
cachedir = self._get_root_dir()
|
||||||
if not any((term in cachedir) for term in ('cache', 'tmp')):
|
if not any((term in cachedir) for term in ('cache', 'tmp')):
|
||||||
raise Exception('Not removing directory %s - this does not look like a cache dir' % cachedir)
|
raise Exception(f'Not removing directory {cachedir} - this does not look like a cache dir')
|
||||||
|
|
||||||
self._ydl.to_screen(
|
self._ydl.to_screen(
|
||||||
'Removing cache dir %s .' % cachedir, skip_eol=True)
|
f'Removing cache dir {cachedir} .', skip_eol=True)
|
||||||
if os.path.exists(cachedir):
|
if os.path.exists(cachedir):
|
||||||
self._ydl.to_screen('.', skip_eol=True)
|
self._ydl.to_screen('.', skip_eol=True)
|
||||||
shutil.rmtree(cachedir)
|
shutil.rmtree(cachedir)
|
||||||
|
|
|
@ -35,7 +35,7 @@
|
||||||
from ..dependencies import brotli as compat_brotli # noqa: F401
|
from ..dependencies import brotli as compat_brotli # noqa: F401
|
||||||
from ..dependencies import websockets as compat_websockets # noqa: F401
|
from ..dependencies import websockets as compat_websockets # noqa: F401
|
||||||
from ..dependencies.Cryptodome import AES as compat_pycrypto_AES # noqa: F401
|
from ..dependencies.Cryptodome import AES as compat_pycrypto_AES # noqa: F401
|
||||||
from ..networking.exceptions import HTTPError as compat_HTTPError # noqa: F401
|
from ..networking.exceptions import HTTPError as compat_HTTPError
|
||||||
|
|
||||||
passthrough_module(__name__, '...utils', ('WINDOWS_VT_MODE', 'windows_enable_vt_mode'))
|
passthrough_module(__name__, '...utils', ('WINDOWS_VT_MODE', 'windows_enable_vt_mode'))
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,6 @@
|
||||||
del passthrough_module
|
del passthrough_module
|
||||||
|
|
||||||
try:
|
try:
|
||||||
cache # >= 3.9
|
_ = cache # >= 3.9
|
||||||
except NameError:
|
except NameError:
|
||||||
cache = lru_cache(maxsize=None)
|
cache = lru_cache(maxsize=None)
|
||||||
|
|
|
@ -46,7 +46,7 @@
|
||||||
from .utils._utils import _YDLLogger
|
from .utils._utils import _YDLLogger
|
||||||
from .utils.networking import normalize_url
|
from .utils.networking import normalize_url
|
||||||
|
|
||||||
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi'}
|
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi', 'whale'}
|
||||||
SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'}
|
SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'}
|
||||||
|
|
||||||
|
|
||||||
|
@ -146,7 +146,7 @@ def _extract_firefox_cookies(profile, container, logger):
|
||||||
identities = json.load(containers).get('identities', [])
|
identities = json.load(containers).get('identities', [])
|
||||||
container_id = next((context.get('userContextId') for context in identities if container in (
|
container_id = next((context.get('userContextId') for context in identities if container in (
|
||||||
context.get('name'),
|
context.get('name'),
|
||||||
try_call(lambda: re.fullmatch(r'userContext([^\.]+)\.label', context['l10nID']).group())
|
try_call(lambda: re.fullmatch(r'userContext([^\.]+)\.label', context['l10nID']).group()),
|
||||||
)), None)
|
)), None)
|
||||||
if not isinstance(container_id, int):
|
if not isinstance(container_id, int):
|
||||||
raise ValueError(f'could not find firefox container "{container}" in containers.json')
|
raise ValueError(f'could not find firefox container "{container}" in containers.json')
|
||||||
|
@ -219,6 +219,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
||||||
'edge': os.path.join(appdata_local, R'Microsoft\Edge\User Data'),
|
'edge': os.path.join(appdata_local, R'Microsoft\Edge\User Data'),
|
||||||
'opera': os.path.join(appdata_roaming, R'Opera Software\Opera Stable'),
|
'opera': os.path.join(appdata_roaming, R'Opera Software\Opera Stable'),
|
||||||
'vivaldi': os.path.join(appdata_local, R'Vivaldi\User Data'),
|
'vivaldi': os.path.join(appdata_local, R'Vivaldi\User Data'),
|
||||||
|
'whale': os.path.join(appdata_local, R'Naver\Naver Whale\User Data'),
|
||||||
}[browser_name]
|
}[browser_name]
|
||||||
|
|
||||||
elif sys.platform == 'darwin':
|
elif sys.platform == 'darwin':
|
||||||
|
@ -230,6 +231,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
||||||
'edge': os.path.join(appdata, 'Microsoft Edge'),
|
'edge': os.path.join(appdata, 'Microsoft Edge'),
|
||||||
'opera': os.path.join(appdata, 'com.operasoftware.Opera'),
|
'opera': os.path.join(appdata, 'com.operasoftware.Opera'),
|
||||||
'vivaldi': os.path.join(appdata, 'Vivaldi'),
|
'vivaldi': os.path.join(appdata, 'Vivaldi'),
|
||||||
|
'whale': os.path.join(appdata, 'Naver/Whale'),
|
||||||
}[browser_name]
|
}[browser_name]
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
@ -241,6 +243,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
||||||
'edge': os.path.join(config, 'microsoft-edge'),
|
'edge': os.path.join(config, 'microsoft-edge'),
|
||||||
'opera': os.path.join(config, 'opera'),
|
'opera': os.path.join(config, 'opera'),
|
||||||
'vivaldi': os.path.join(config, 'vivaldi'),
|
'vivaldi': os.path.join(config, 'vivaldi'),
|
||||||
|
'whale': os.path.join(config, 'naver-whale'),
|
||||||
}[browser_name]
|
}[browser_name]
|
||||||
|
|
||||||
# Linux keyring names can be determined by snooping on dbus while opening the browser in KDE:
|
# Linux keyring names can be determined by snooping on dbus while opening the browser in KDE:
|
||||||
|
@ -252,6 +255,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
||||||
'edge': 'Microsoft Edge' if sys.platform == 'darwin' else 'Chromium',
|
'edge': 'Microsoft Edge' if sys.platform == 'darwin' else 'Chromium',
|
||||||
'opera': 'Opera' if sys.platform == 'darwin' else 'Chromium',
|
'opera': 'Opera' if sys.platform == 'darwin' else 'Chromium',
|
||||||
'vivaldi': 'Vivaldi' if sys.platform == 'darwin' else 'Chrome',
|
'vivaldi': 'Vivaldi' if sys.platform == 'darwin' else 'Chrome',
|
||||||
|
'whale': 'Whale',
|
||||||
}[browser_name]
|
}[browser_name]
|
||||||
|
|
||||||
browsers_without_profiles = {'opera'}
|
browsers_without_profiles = {'opera'}
|
||||||
|
@ -259,7 +263,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
||||||
return {
|
return {
|
||||||
'browser_dir': browser_dir,
|
'browser_dir': browser_dir,
|
||||||
'keyring_name': keyring_name,
|
'keyring_name': keyring_name,
|
||||||
'supports_profiles': browser_name not in browsers_without_profiles
|
'supports_profiles': browser_name not in browsers_without_profiles,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -347,6 +351,11 @@ def _process_chrome_cookie(decryptor, host_key, name, value, encrypted_value, pa
|
||||||
if value is None:
|
if value is None:
|
||||||
return is_encrypted, None
|
return is_encrypted, None
|
||||||
|
|
||||||
|
# In chrome, session cookies have expires_utc set to 0
|
||||||
|
# In our cookie-store, cookies that do not expire should have expires set to None
|
||||||
|
if not expires_utc:
|
||||||
|
expires_utc = None
|
||||||
|
|
||||||
return is_encrypted, http.cookiejar.Cookie(
|
return is_encrypted, http.cookiejar.Cookie(
|
||||||
version=0, name=name, value=value, port=None, port_specified=False,
|
version=0, name=name, value=value, port=None, port_specified=False,
|
||||||
domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'),
|
domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'),
|
||||||
|
@ -817,7 +826,7 @@ def _choose_linux_keyring(logger):
|
||||||
elif desktop_environment == _LinuxDesktopEnvironment.KDE6:
|
elif desktop_environment == _LinuxDesktopEnvironment.KDE6:
|
||||||
linux_keyring = _LinuxKeyring.KWALLET6
|
linux_keyring = _LinuxKeyring.KWALLET6
|
||||||
elif desktop_environment in (
|
elif desktop_environment in (
|
||||||
_LinuxDesktopEnvironment.KDE3, _LinuxDesktopEnvironment.LXQT, _LinuxDesktopEnvironment.OTHER
|
_LinuxDesktopEnvironment.KDE3, _LinuxDesktopEnvironment.LXQT, _LinuxDesktopEnvironment.OTHER,
|
||||||
):
|
):
|
||||||
linux_keyring = _LinuxKeyring.BASICTEXT
|
linux_keyring = _LinuxKeyring.BASICTEXT
|
||||||
else:
|
else:
|
||||||
|
@ -852,7 +861,7 @@ def _get_kwallet_network_wallet(keyring, logger):
|
||||||
'dbus-send', '--session', '--print-reply=literal',
|
'dbus-send', '--session', '--print-reply=literal',
|
||||||
f'--dest={service_name}',
|
f'--dest={service_name}',
|
||||||
wallet_path,
|
wallet_path,
|
||||||
'org.kde.KWallet.networkWallet'
|
'org.kde.KWallet.networkWallet',
|
||||||
], text=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
], text=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
||||||
|
|
||||||
if returncode:
|
if returncode:
|
||||||
|
@ -882,7 +891,7 @@ def _get_kwallet_password(browser_keyring_name, keyring, logger):
|
||||||
'kwallet-query',
|
'kwallet-query',
|
||||||
'--read-password', f'{browser_keyring_name} Safe Storage',
|
'--read-password', f'{browser_keyring_name} Safe Storage',
|
||||||
'--folder', f'{browser_keyring_name} Keys',
|
'--folder', f'{browser_keyring_name} Keys',
|
||||||
network_wallet
|
network_wallet,
|
||||||
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
||||||
|
|
||||||
if returncode:
|
if returncode:
|
||||||
|
@ -922,9 +931,8 @@ def _get_gnome_keyring_password(browser_keyring_name, logger):
|
||||||
for item in col.get_all_items():
|
for item in col.get_all_items():
|
||||||
if item.get_label() == f'{browser_keyring_name} Safe Storage':
|
if item.get_label() == f'{browser_keyring_name} Safe Storage':
|
||||||
return item.get_secret()
|
return item.get_secret()
|
||||||
else:
|
logger.error('failed to read from keyring')
|
||||||
logger.error('failed to read from keyring')
|
return b''
|
||||||
return b''
|
|
||||||
|
|
||||||
|
|
||||||
def _get_linux_keyring_password(browser_keyring_name, keyring, logger):
|
def _get_linux_keyring_password(browser_keyring_name, keyring, logger):
|
||||||
|
@ -1044,7 +1052,7 @@ class DATA_BLOB(ctypes.Structure):
|
||||||
None, # pvReserved: must be NULL
|
None, # pvReserved: must be NULL
|
||||||
None, # pPromptStruct: information about prompts to display
|
None, # pPromptStruct: information about prompts to display
|
||||||
0, # dwFlags
|
0, # dwFlags
|
||||||
ctypes.byref(blob_out) # pDataOut
|
ctypes.byref(blob_out), # pDataOut
|
||||||
)
|
)
|
||||||
if not ret:
|
if not ret:
|
||||||
logger.warning('failed to decrypt with DPAPI', only_once=True)
|
logger.warning('failed to decrypt with DPAPI', only_once=True)
|
||||||
|
@ -1120,24 +1128,24 @@ class LenientSimpleCookie(http.cookies.SimpleCookie):
|
||||||
_LEGAL_VALUE_CHARS = _LEGAL_KEY_CHARS + re.escape('(),/<=>?@[]{}')
|
_LEGAL_VALUE_CHARS = _LEGAL_KEY_CHARS + re.escape('(),/<=>?@[]{}')
|
||||||
|
|
||||||
_RESERVED = {
|
_RESERVED = {
|
||||||
"expires",
|
'expires',
|
||||||
"path",
|
'path',
|
||||||
"comment",
|
'comment',
|
||||||
"domain",
|
'domain',
|
||||||
"max-age",
|
'max-age',
|
||||||
"secure",
|
'secure',
|
||||||
"httponly",
|
'httponly',
|
||||||
"version",
|
'version',
|
||||||
"samesite",
|
'samesite',
|
||||||
}
|
}
|
||||||
|
|
||||||
_FLAGS = {"secure", "httponly"}
|
_FLAGS = {'secure', 'httponly'}
|
||||||
|
|
||||||
# Added 'bad' group to catch the remaining value
|
# Added 'bad' group to catch the remaining value
|
||||||
_COOKIE_PATTERN = re.compile(r"""
|
_COOKIE_PATTERN = re.compile(r'''
|
||||||
\s* # Optional whitespace at start of cookie
|
\s* # Optional whitespace at start of cookie
|
||||||
(?P<key> # Start of group 'key'
|
(?P<key> # Start of group 'key'
|
||||||
[""" + _LEGAL_KEY_CHARS + r"""]+?# Any word of at least one letter
|
[''' + _LEGAL_KEY_CHARS + r''']+?# Any word of at least one letter
|
||||||
) # End of group 'key'
|
) # End of group 'key'
|
||||||
( # Optional group: there may not be a value.
|
( # Optional group: there may not be a value.
|
||||||
\s*=\s* # Equal Sign
|
\s*=\s* # Equal Sign
|
||||||
|
@ -1147,7 +1155,7 @@ class LenientSimpleCookie(http.cookies.SimpleCookie):
|
||||||
| # or
|
| # or
|
||||||
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
|
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
|
||||||
| # or
|
| # or
|
||||||
[""" + _LEGAL_VALUE_CHARS + r"""]* # Any word or empty string
|
[''' + _LEGAL_VALUE_CHARS + r''']* # Any word or empty string
|
||||||
) # End of group 'val'
|
) # End of group 'val'
|
||||||
| # or
|
| # or
|
||||||
(?P<bad>(?:\\;|[^;])*?) # 'bad' group fallback for invalid values
|
(?P<bad>(?:\\;|[^;])*?) # 'bad' group fallback for invalid values
|
||||||
|
@ -1155,7 +1163,7 @@ class LenientSimpleCookie(http.cookies.SimpleCookie):
|
||||||
)? # End of optional value group
|
)? # End of optional value group
|
||||||
\s* # Any number of spaces.
|
\s* # Any number of spaces.
|
||||||
(\s+|;|$) # Ending either at space, semicolon, or EOS.
|
(\s+|;|$) # Ending either at space, semicolon, or EOS.
|
||||||
""", re.ASCII | re.VERBOSE)
|
''', re.ASCII | re.VERBOSE)
|
||||||
|
|
||||||
def load(self, data):
|
def load(self, data):
|
||||||
# Workaround for https://github.com/yt-dlp/yt-dlp/issues/4776
|
# Workaround for https://github.com/yt-dlp/yt-dlp/issues/4776
|
||||||
|
@ -1251,14 +1259,14 @@ def _really_save(self, f, ignore_discard, ignore_expires):
|
||||||
# with no name, whereas http.cookiejar regards it as a
|
# with no name, whereas http.cookiejar regards it as a
|
||||||
# cookie with no value.
|
# cookie with no value.
|
||||||
name, value = '', name
|
name, value = '', name
|
||||||
f.write('%s\n' % '\t'.join((
|
f.write('{}\n'.format('\t'.join((
|
||||||
cookie.domain,
|
cookie.domain,
|
||||||
self._true_or_false(cookie.domain.startswith('.')),
|
self._true_or_false(cookie.domain.startswith('.')),
|
||||||
cookie.path,
|
cookie.path,
|
||||||
self._true_or_false(cookie.secure),
|
self._true_or_false(cookie.secure),
|
||||||
str_or_none(cookie.expires, default=''),
|
str_or_none(cookie.expires, default=''),
|
||||||
name, value
|
name, value,
|
||||||
)))
|
))))
|
||||||
|
|
||||||
def save(self, filename=None, ignore_discard=True, ignore_expires=True):
|
def save(self, filename=None, ignore_discard=True, ignore_expires=True):
|
||||||
"""
|
"""
|
||||||
|
@ -1297,10 +1305,10 @@ def prepare_line(line):
|
||||||
return line
|
return line
|
||||||
cookie_list = line.split('\t')
|
cookie_list = line.split('\t')
|
||||||
if len(cookie_list) != self._ENTRY_LEN:
|
if len(cookie_list) != self._ENTRY_LEN:
|
||||||
raise http.cookiejar.LoadError('invalid length %d' % len(cookie_list))
|
raise http.cookiejar.LoadError(f'invalid length {len(cookie_list)}')
|
||||||
cookie = self._CookieFileEntry(*cookie_list)
|
cookie = self._CookieFileEntry(*cookie_list)
|
||||||
if cookie.expires_at and not cookie.expires_at.isdigit():
|
if cookie.expires_at and not cookie.expires_at.isdigit():
|
||||||
raise http.cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
|
raise http.cookiejar.LoadError(f'invalid expires at {cookie.expires_at}')
|
||||||
return line
|
return line
|
||||||
|
|
||||||
cf = io.StringIO()
|
cf = io.StringIO()
|
||||||
|
|
|
@ -404,7 +404,7 @@ def with_fields(*tups, default=''):
|
||||||
|
|
||||||
def report_resuming_byte(self, resume_len):
|
def report_resuming_byte(self, resume_len):
|
||||||
"""Report attempt to resume at given byte."""
|
"""Report attempt to resume at given byte."""
|
||||||
self.to_screen('[download] Resuming download at byte %s' % resume_len)
|
self.to_screen(f'[download] Resuming download at byte {resume_len}')
|
||||||
|
|
||||||
def report_retry(self, err, count, retries, frag_index=NO_DEFAULT, fatal=True):
|
def report_retry(self, err, count, retries, frag_index=NO_DEFAULT, fatal=True):
|
||||||
"""Report retry"""
|
"""Report retry"""
|
||||||
|
|
|
@ -55,7 +55,7 @@ def real_download(self, filename, info_dict):
|
||||||
# correct and expected termination thus all postprocessing
|
# correct and expected termination thus all postprocessing
|
||||||
# should take place
|
# should take place
|
||||||
retval = 0
|
retval = 0
|
||||||
self.to_screen('[%s] Interrupted by user' % self.get_basename())
|
self.to_screen(f'[{self.get_basename()}] Interrupted by user')
|
||||||
finally:
|
finally:
|
||||||
if self._cookies_tempfile:
|
if self._cookies_tempfile:
|
||||||
self.try_remove(self._cookies_tempfile)
|
self.try_remove(self._cookies_tempfile)
|
||||||
|
@ -172,7 +172,7 @@ def _call_downloader(self, tmpfilename, info_dict):
|
||||||
decrypt_fragment = self.decrypter(info_dict)
|
decrypt_fragment = self.decrypter(info_dict)
|
||||||
dest, _ = self.sanitize_open(tmpfilename, 'wb')
|
dest, _ = self.sanitize_open(tmpfilename, 'wb')
|
||||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||||
fragment_filename = '%s-Frag%d' % (tmpfilename, frag_index)
|
fragment_filename = f'{tmpfilename}-Frag{frag_index}'
|
||||||
try:
|
try:
|
||||||
src, _ = self.sanitize_open(fragment_filename, 'rb')
|
src, _ = self.sanitize_open(fragment_filename, 'rb')
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
|
@ -186,7 +186,7 @@ def _call_downloader(self, tmpfilename, info_dict):
|
||||||
if not self.params.get('keep_fragments', False):
|
if not self.params.get('keep_fragments', False):
|
||||||
self.try_remove(encodeFilename(fragment_filename))
|
self.try_remove(encodeFilename(fragment_filename))
|
||||||
dest.close()
|
dest.close()
|
||||||
self.try_remove(encodeFilename('%s.frag.urls' % tmpfilename))
|
self.try_remove(encodeFilename(f'{tmpfilename}.frag.urls'))
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def _call_process(self, cmd, info_dict):
|
def _call_process(self, cmd, info_dict):
|
||||||
|
@ -336,11 +336,11 @@ def _make_cmd(self, tmpfilename, info_dict):
|
||||||
|
|
||||||
if 'fragments' in info_dict:
|
if 'fragments' in info_dict:
|
||||||
cmd += ['--uri-selector=inorder']
|
cmd += ['--uri-selector=inorder']
|
||||||
url_list_file = '%s.frag.urls' % tmpfilename
|
url_list_file = f'{tmpfilename}.frag.urls'
|
||||||
url_list = []
|
url_list = []
|
||||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||||
fragment_filename = '%s-Frag%d' % (os.path.basename(tmpfilename), frag_index)
|
fragment_filename = f'{os.path.basename(tmpfilename)}-Frag{frag_index}'
|
||||||
url_list.append('%s\n\tout=%s' % (fragment['url'], self._aria2c_filename(fragment_filename)))
|
url_list.append('{}\n\tout={}'.format(fragment['url'], self._aria2c_filename(fragment_filename)))
|
||||||
stream, _ = self.sanitize_open(url_list_file, 'wb')
|
stream, _ = self.sanitize_open(url_list_file, 'wb')
|
||||||
stream.write('\n'.join(url_list).encode())
|
stream.write('\n'.join(url_list).encode())
|
||||||
stream.close()
|
stream.close()
|
||||||
|
@ -357,7 +357,7 @@ def aria2c_rpc(self, rpc_port, rpc_secret, method, params=()):
|
||||||
'id': sanitycheck,
|
'id': sanitycheck,
|
||||||
'method': method,
|
'method': method,
|
||||||
'params': [f'token:{rpc_secret}', *params],
|
'params': [f'token:{rpc_secret}', *params],
|
||||||
}).encode('utf-8')
|
}).encode()
|
||||||
request = Request(
|
request = Request(
|
||||||
f'http://localhost:{rpc_port}/jsonrpc',
|
f'http://localhost:{rpc_port}/jsonrpc',
|
||||||
data=d, headers={
|
data=d, headers={
|
||||||
|
@ -416,7 +416,7 @@ def get_stat(key, *obj, average=False):
|
||||||
'total_bytes_estimate': total,
|
'total_bytes_estimate': total,
|
||||||
'eta': (total - downloaded) / (speed or 1),
|
'eta': (total - downloaded) / (speed or 1),
|
||||||
'fragment_index': min(frag_count, len(completed) + 1) if fragmented else None,
|
'fragment_index': min(frag_count, len(completed) + 1) if fragmented else None,
|
||||||
'elapsed': time.time() - started
|
'elapsed': time.time() - started,
|
||||||
})
|
})
|
||||||
self._hook_progress(status, info_dict)
|
self._hook_progress(status, info_dict)
|
||||||
|
|
||||||
|
@ -509,12 +509,12 @@ def _call_downloader(self, tmpfilename, info_dict):
|
||||||
proxy = self.params.get('proxy')
|
proxy = self.params.get('proxy')
|
||||||
if proxy:
|
if proxy:
|
||||||
if not re.match(r'^[\da-zA-Z]+://', proxy):
|
if not re.match(r'^[\da-zA-Z]+://', proxy):
|
||||||
proxy = 'http://%s' % proxy
|
proxy = f'http://{proxy}'
|
||||||
|
|
||||||
if proxy.startswith('socks'):
|
if proxy.startswith('socks'):
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'%s does not support SOCKS proxies. Downloading is likely to fail. '
|
f'{self.get_basename()} does not support SOCKS proxies. Downloading is likely to fail. '
|
||||||
'Consider adding --hls-prefer-native to your command.' % self.get_basename())
|
'Consider adding --hls-prefer-native to your command.')
|
||||||
|
|
||||||
# Since December 2015 ffmpeg supports -http_proxy option (see
|
# Since December 2015 ffmpeg supports -http_proxy option (see
|
||||||
# http://git.videolan.org/?p=ffmpeg.git;a=commit;h=b4eb1f29ebddd60c41a2eb39f5af701e38e0d3fd)
|
# http://git.videolan.org/?p=ffmpeg.git;a=commit;h=b4eb1f29ebddd60c41a2eb39f5af701e38e0d3fd)
|
||||||
|
@ -575,7 +575,7 @@ def _call_downloader(self, tmpfilename, info_dict):
|
||||||
if end_time:
|
if end_time:
|
||||||
args += ['-t', str(end_time - start_time)]
|
args += ['-t', str(end_time - start_time)]
|
||||||
|
|
||||||
args += self._configuration_args((f'_i{i + 1}', '_i')) + ['-i', fmt['url']]
|
args += [*self._configuration_args((f'_i{i + 1}', '_i')), '-i', fmt['url']]
|
||||||
|
|
||||||
if not (start_time or end_time) or not self.params.get('force_keyframes_at_cuts'):
|
if not (start_time or end_time) or not self.params.get('force_keyframes_at_cuts'):
|
||||||
args += ['-c', 'copy']
|
args += ['-c', 'copy']
|
||||||
|
|
|
@ -67,12 +67,12 @@ def read_asrt(self):
|
||||||
self.read_bytes(3)
|
self.read_bytes(3)
|
||||||
quality_entry_count = self.read_unsigned_char()
|
quality_entry_count = self.read_unsigned_char()
|
||||||
# QualityEntryCount
|
# QualityEntryCount
|
||||||
for i in range(quality_entry_count):
|
for _ in range(quality_entry_count):
|
||||||
self.read_string()
|
self.read_string()
|
||||||
|
|
||||||
segment_run_count = self.read_unsigned_int()
|
segment_run_count = self.read_unsigned_int()
|
||||||
segments = []
|
segments = []
|
||||||
for i in range(segment_run_count):
|
for _ in range(segment_run_count):
|
||||||
first_segment = self.read_unsigned_int()
|
first_segment = self.read_unsigned_int()
|
||||||
fragments_per_segment = self.read_unsigned_int()
|
fragments_per_segment = self.read_unsigned_int()
|
||||||
segments.append((first_segment, fragments_per_segment))
|
segments.append((first_segment, fragments_per_segment))
|
||||||
|
@ -91,12 +91,12 @@ def read_afrt(self):
|
||||||
|
|
||||||
quality_entry_count = self.read_unsigned_char()
|
quality_entry_count = self.read_unsigned_char()
|
||||||
# QualitySegmentUrlModifiers
|
# QualitySegmentUrlModifiers
|
||||||
for i in range(quality_entry_count):
|
for _ in range(quality_entry_count):
|
||||||
self.read_string()
|
self.read_string()
|
||||||
|
|
||||||
fragments_count = self.read_unsigned_int()
|
fragments_count = self.read_unsigned_int()
|
||||||
fragments = []
|
fragments = []
|
||||||
for i in range(fragments_count):
|
for _ in range(fragments_count):
|
||||||
first = self.read_unsigned_int()
|
first = self.read_unsigned_int()
|
||||||
first_ts = self.read_unsigned_long_long()
|
first_ts = self.read_unsigned_long_long()
|
||||||
duration = self.read_unsigned_int()
|
duration = self.read_unsigned_int()
|
||||||
|
@ -135,11 +135,11 @@ def read_abst(self):
|
||||||
self.read_string() # MovieIdentifier
|
self.read_string() # MovieIdentifier
|
||||||
server_count = self.read_unsigned_char()
|
server_count = self.read_unsigned_char()
|
||||||
# ServerEntryTable
|
# ServerEntryTable
|
||||||
for i in range(server_count):
|
for _ in range(server_count):
|
||||||
self.read_string()
|
self.read_string()
|
||||||
quality_count = self.read_unsigned_char()
|
quality_count = self.read_unsigned_char()
|
||||||
# QualityEntryTable
|
# QualityEntryTable
|
||||||
for i in range(quality_count):
|
for _ in range(quality_count):
|
||||||
self.read_string()
|
self.read_string()
|
||||||
# DrmData
|
# DrmData
|
||||||
self.read_string()
|
self.read_string()
|
||||||
|
@ -148,14 +148,14 @@ def read_abst(self):
|
||||||
|
|
||||||
segments_count = self.read_unsigned_char()
|
segments_count = self.read_unsigned_char()
|
||||||
segments = []
|
segments = []
|
||||||
for i in range(segments_count):
|
for _ in range(segments_count):
|
||||||
box_size, box_type, box_data = self.read_box_info()
|
box_size, box_type, box_data = self.read_box_info()
|
||||||
assert box_type == b'asrt'
|
assert box_type == b'asrt'
|
||||||
segment = FlvReader(box_data).read_asrt()
|
segment = FlvReader(box_data).read_asrt()
|
||||||
segments.append(segment)
|
segments.append(segment)
|
||||||
fragments_run_count = self.read_unsigned_char()
|
fragments_run_count = self.read_unsigned_char()
|
||||||
fragments = []
|
fragments = []
|
||||||
for i in range(fragments_run_count):
|
for _ in range(fragments_run_count):
|
||||||
box_size, box_type, box_data = self.read_box_info()
|
box_size, box_type, box_data = self.read_box_info()
|
||||||
assert box_type == b'afrt'
|
assert box_type == b'afrt'
|
||||||
fragments.append(FlvReader(box_data).read_afrt())
|
fragments.append(FlvReader(box_data).read_afrt())
|
||||||
|
@ -309,7 +309,7 @@ def _parse_bootstrap_node(self, node, base_url):
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
man_url = info_dict['url']
|
man_url = info_dict['url']
|
||||||
requested_bitrate = info_dict.get('tbr')
|
requested_bitrate = info_dict.get('tbr')
|
||||||
self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
|
self.to_screen(f'[{self.FD_NAME}] Downloading f4m manifest')
|
||||||
|
|
||||||
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
||||||
man_url = urlh.url
|
man_url = urlh.url
|
||||||
|
@ -326,8 +326,8 @@ def real_download(self, filename, info_dict):
|
||||||
formats = sorted(formats, key=lambda f: f[0])
|
formats = sorted(formats, key=lambda f: f[0])
|
||||||
rate, media = formats[-1]
|
rate, media = formats[-1]
|
||||||
else:
|
else:
|
||||||
rate, media = list(filter(
|
rate, media = next(filter(
|
||||||
lambda f: int(f[0]) == requested_bitrate, formats))[0]
|
lambda f: int(f[0]) == requested_bitrate, formats))
|
||||||
|
|
||||||
# Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec.
|
# Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec.
|
||||||
man_base_url = get_base_url(doc) or man_url
|
man_base_url = get_base_url(doc) or man_url
|
||||||
|
|
|
@ -199,7 +199,7 @@ def _prepare_frag_download(self, ctx):
|
||||||
'.ytdl file is corrupt' if is_corrupt else
|
'.ytdl file is corrupt' if is_corrupt else
|
||||||
'Inconsistent state of incomplete fragment download')
|
'Inconsistent state of incomplete fragment download')
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'%s. Restarting from the beginning ...' % message)
|
f'{message}. Restarting from the beginning ...')
|
||||||
ctx['fragment_index'] = resume_len = 0
|
ctx['fragment_index'] = resume_len = 0
|
||||||
if 'ytdl_corrupt' in ctx:
|
if 'ytdl_corrupt' in ctx:
|
||||||
del ctx['ytdl_corrupt']
|
del ctx['ytdl_corrupt']
|
||||||
|
@ -366,10 +366,10 @@ def decrypt_fragment(fragment, frag_content):
|
||||||
return decrypt_fragment
|
return decrypt_fragment
|
||||||
|
|
||||||
def download_and_append_fragments_multiple(self, *args, **kwargs):
|
def download_and_append_fragments_multiple(self, *args, **kwargs):
|
||||||
'''
|
"""
|
||||||
@params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
|
@params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
|
||||||
all args must be either tuple or list
|
all args must be either tuple or list
|
||||||
'''
|
"""
|
||||||
interrupt_trigger = [True]
|
interrupt_trigger = [True]
|
||||||
max_progress = len(args)
|
max_progress = len(args)
|
||||||
if max_progress == 1:
|
if max_progress == 1:
|
||||||
|
@ -424,7 +424,7 @@ def interrupt_trigger_iter(fg):
|
||||||
finally:
|
finally:
|
||||||
tpe.shutdown(wait=True)
|
tpe.shutdown(wait=True)
|
||||||
if not interrupt_trigger[0] and not is_live:
|
if not interrupt_trigger[0] and not is_live:
|
||||||
raise KeyboardInterrupt()
|
raise KeyboardInterrupt
|
||||||
# we expect the user wants to stop and DO WANT the preceding postprocessors to run;
|
# we expect the user wants to stop and DO WANT the preceding postprocessors to run;
|
||||||
# so returning a intermediate result here instead of KeyboardInterrupt on live
|
# so returning a intermediate result here instead of KeyboardInterrupt on live
|
||||||
return result
|
return result
|
||||||
|
|
|
@ -72,7 +72,7 @@ def check_results():
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
man_url = info_dict['url']
|
man_url = info_dict['url']
|
||||||
self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)
|
self.to_screen(f'[{self.FD_NAME}] Downloading m3u8 manifest')
|
||||||
|
|
||||||
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
||||||
man_url = urlh.url
|
man_url = urlh.url
|
||||||
|
@ -228,7 +228,7 @@ def is_ad_fragment_end(s):
|
||||||
'url': frag_url,
|
'url': frag_url,
|
||||||
'decrypt_info': decrypt_info,
|
'decrypt_info': decrypt_info,
|
||||||
'byte_range': byte_range,
|
'byte_range': byte_range,
|
||||||
'media_sequence': media_sequence
|
'media_sequence': media_sequence,
|
||||||
})
|
})
|
||||||
media_sequence += 1
|
media_sequence += 1
|
||||||
|
|
||||||
|
@ -350,9 +350,8 @@ def pack_fragment(frag_content, frag_index):
|
||||||
# XXX: this should probably be silent as well
|
# XXX: this should probably be silent as well
|
||||||
# or verify that all segments contain the same data
|
# or verify that all segments contain the same data
|
||||||
self.report_warning(bug_reports_message(
|
self.report_warning(bug_reports_message(
|
||||||
'Discarding a %s block found in the middle of the stream; '
|
f'Discarding a {type(block).__name__} block found in the middle of the stream; '
|
||||||
'if the subtitles display incorrectly,'
|
'if the subtitles display incorrectly,'))
|
||||||
% (type(block).__name__)))
|
|
||||||
continue
|
continue
|
||||||
block.write_into(output)
|
block.write_into(output)
|
||||||
|
|
||||||
|
|
|
@ -176,7 +176,7 @@ def establish_connection():
|
||||||
'downloaded_bytes': ctx.resume_len,
|
'downloaded_bytes': ctx.resume_len,
|
||||||
'total_bytes': ctx.resume_len,
|
'total_bytes': ctx.resume_len,
|
||||||
}, info_dict)
|
}, info_dict)
|
||||||
raise SucceedDownload()
|
raise SucceedDownload
|
||||||
else:
|
else:
|
||||||
# The length does not match, we start the download over
|
# The length does not match, we start the download over
|
||||||
self.report_unable_to_resume()
|
self.report_unable_to_resume()
|
||||||
|
@ -194,7 +194,7 @@ def establish_connection():
|
||||||
|
|
||||||
def close_stream():
|
def close_stream():
|
||||||
if ctx.stream is not None:
|
if ctx.stream is not None:
|
||||||
if not ctx.tmpfilename == '-':
|
if ctx.tmpfilename != '-':
|
||||||
ctx.stream.close()
|
ctx.stream.close()
|
||||||
ctx.stream = None
|
ctx.stream = None
|
||||||
|
|
||||||
|
@ -268,20 +268,20 @@ def retry(e):
|
||||||
ctx.filename = self.undo_temp_name(ctx.tmpfilename)
|
ctx.filename = self.undo_temp_name(ctx.tmpfilename)
|
||||||
self.report_destination(ctx.filename)
|
self.report_destination(ctx.filename)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
self.report_error('unable to open for writing: %s' % str(err))
|
self.report_error(f'unable to open for writing: {err}')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if self.params.get('xattr_set_filesize', False) and data_len is not None:
|
if self.params.get('xattr_set_filesize', False) and data_len is not None:
|
||||||
try:
|
try:
|
||||||
write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode())
|
write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode())
|
||||||
except (XAttrUnavailableError, XAttrMetadataError) as err:
|
except (XAttrUnavailableError, XAttrMetadataError) as err:
|
||||||
self.report_error('unable to set filesize xattr: %s' % str(err))
|
self.report_error(f'unable to set filesize xattr: {err}')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ctx.stream.write(data_block)
|
ctx.stream.write(data_block)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
self.to_stderr('\n')
|
self.to_stderr('\n')
|
||||||
self.report_error('unable to write data: %s' % str(err))
|
self.report_error(f'unable to write data: {err}')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Apply rate limit
|
# Apply rate limit
|
||||||
|
@ -327,7 +327,7 @@ def retry(e):
|
||||||
elif now - ctx.throttle_start > 3:
|
elif now - ctx.throttle_start > 3:
|
||||||
if ctx.stream is not None and ctx.tmpfilename != '-':
|
if ctx.stream is not None and ctx.tmpfilename != '-':
|
||||||
ctx.stream.close()
|
ctx.stream.close()
|
||||||
raise ThrottledDownload()
|
raise ThrottledDownload
|
||||||
elif speed:
|
elif speed:
|
||||||
ctx.throttle_start = None
|
ctx.throttle_start = None
|
||||||
|
|
||||||
|
@ -338,7 +338,7 @@ def retry(e):
|
||||||
|
|
||||||
if not is_test and ctx.chunk_size and ctx.content_len is not None and byte_counter < ctx.content_len:
|
if not is_test and ctx.chunk_size and ctx.content_len is not None and byte_counter < ctx.content_len:
|
||||||
ctx.resume_len = byte_counter
|
ctx.resume_len = byte_counter
|
||||||
raise NextFragment()
|
raise NextFragment
|
||||||
|
|
||||||
if ctx.tmpfilename != '-':
|
if ctx.tmpfilename != '-':
|
||||||
ctx.stream.close()
|
ctx.stream.close()
|
||||||
|
|
|
@ -251,7 +251,7 @@ def real_download(self, filename, info_dict):
|
||||||
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
||||||
|
|
||||||
frag_index = 0
|
frag_index = 0
|
||||||
for i, segment in enumerate(segments):
|
for segment in segments:
|
||||||
frag_index += 1
|
frag_index += 1
|
||||||
if frag_index <= ctx['fragment_index']:
|
if frag_index <= ctx['fragment_index']:
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
|
|
||||||
|
|
||||||
class MhtmlFD(FragmentFD):
|
class MhtmlFD(FragmentFD):
|
||||||
_STYLESHEET = """\
|
_STYLESHEET = '''\
|
||||||
html, body {
|
html, body {
|
||||||
margin: 0;
|
margin: 0;
|
||||||
padding: 0;
|
padding: 0;
|
||||||
|
@ -45,7 +45,7 @@ class MhtmlFD(FragmentFD):
|
||||||
max-width: 100%;
|
max-width: 100%;
|
||||||
max-height: calc(100vh - 5em);
|
max-height: calc(100vh - 5em);
|
||||||
}
|
}
|
||||||
"""
|
'''
|
||||||
_STYLESHEET = re.sub(r'\s+', ' ', _STYLESHEET)
|
_STYLESHEET = re.sub(r'\s+', ' ', _STYLESHEET)
|
||||||
_STYLESHEET = re.sub(r'\B \B|(?<=[\w\-]) (?=[^\w\-])|(?<=[^\w\-]) (?=[\w\-])', '', _STYLESHEET)
|
_STYLESHEET = re.sub(r'\B \B|(?<=[\w\-]) (?=[^\w\-])|(?<=[^\w\-]) (?=[\w\-])', '', _STYLESHEET)
|
||||||
|
|
||||||
|
@ -57,24 +57,19 @@ def _escape_mime(s):
|
||||||
)).decode('us-ascii') + '?='
|
)).decode('us-ascii') + '?='
|
||||||
|
|
||||||
def _gen_cid(self, i, fragment, frag_boundary):
|
def _gen_cid(self, i, fragment, frag_boundary):
|
||||||
return '%u.%s@yt-dlp.github.io.invalid' % (i, frag_boundary)
|
return f'{i}.{frag_boundary}@yt-dlp.github.io.invalid'
|
||||||
|
|
||||||
def _gen_stub(self, *, fragments, frag_boundary, title):
|
def _gen_stub(self, *, fragments, frag_boundary, title):
|
||||||
output = io.StringIO()
|
output = io.StringIO()
|
||||||
|
|
||||||
output.write((
|
output.write(
|
||||||
'<!DOCTYPE html>'
|
'<!DOCTYPE html>'
|
||||||
'<html>'
|
'<html>'
|
||||||
'<head>'
|
'<head>'
|
||||||
'' '<meta name="generator" content="yt-dlp {version}">'
|
f'<meta name="generator" content="yt-dlp {escapeHTML(YT_DLP_VERSION)}">'
|
||||||
'' '<title>{title}</title>'
|
f'<title>{escapeHTML(title)}</title>'
|
||||||
'' '<style>{styles}</style>'
|
f'<style>{self._STYLESHEET}</style>'
|
||||||
'<body>'
|
'<body>')
|
||||||
).format(
|
|
||||||
version=escapeHTML(YT_DLP_VERSION),
|
|
||||||
styles=self._STYLESHEET,
|
|
||||||
title=escapeHTML(title)
|
|
||||||
))
|
|
||||||
|
|
||||||
t0 = 0
|
t0 = 0
|
||||||
for i, frag in enumerate(fragments):
|
for i, frag in enumerate(fragments):
|
||||||
|
@ -87,15 +82,12 @@ def _gen_stub(self, *, fragments, frag_boundary, title):
|
||||||
num=i + 1,
|
num=i + 1,
|
||||||
t0=srt_subtitles_timecode(t0),
|
t0=srt_subtitles_timecode(t0),
|
||||||
t1=srt_subtitles_timecode(t1),
|
t1=srt_subtitles_timecode(t1),
|
||||||
duration=formatSeconds(frag['duration'], msec=True)
|
duration=formatSeconds(frag['duration'], msec=True),
|
||||||
))
|
))
|
||||||
except (KeyError, ValueError, TypeError):
|
except (KeyError, ValueError, TypeError):
|
||||||
t1 = None
|
t1 = None
|
||||||
output.write((
|
output.write(f'<figcaption>Slide #{i + 1}</figcaption>')
|
||||||
'<figcaption>Slide #{num}</figcaption>'
|
output.write(f'<img src="cid:{self._gen_cid(i, frag, frag_boundary)}">')
|
||||||
).format(num=i + 1))
|
|
||||||
output.write('<img src="cid:{cid}">'.format(
|
|
||||||
cid=self._gen_cid(i, frag, frag_boundary)))
|
|
||||||
output.write('</figure>')
|
output.write('</figure>')
|
||||||
t0 = t1
|
t0 = t1
|
||||||
|
|
||||||
|
@ -126,31 +118,24 @@ def real_download(self, filename, info_dict):
|
||||||
stub = self._gen_stub(
|
stub = self._gen_stub(
|
||||||
fragments=fragments,
|
fragments=fragments,
|
||||||
frag_boundary=frag_boundary,
|
frag_boundary=frag_boundary,
|
||||||
title=title
|
title=title,
|
||||||
)
|
)
|
||||||
|
|
||||||
ctx['dest_stream'].write((
|
ctx['dest_stream'].write((
|
||||||
'MIME-Version: 1.0\r\n'
|
'MIME-Version: 1.0\r\n'
|
||||||
'From: <nowhere@yt-dlp.github.io.invalid>\r\n'
|
'From: <nowhere@yt-dlp.github.io.invalid>\r\n'
|
||||||
'To: <nowhere@yt-dlp.github.io.invalid>\r\n'
|
'To: <nowhere@yt-dlp.github.io.invalid>\r\n'
|
||||||
'Subject: {title}\r\n'
|
f'Subject: {self._escape_mime(title)}\r\n'
|
||||||
'Content-type: multipart/related; '
|
'Content-type: multipart/related; '
|
||||||
'' 'boundary="{boundary}"; '
|
f'boundary="{frag_boundary}"; '
|
||||||
'' 'type="text/html"\r\n'
|
'type="text/html"\r\n'
|
||||||
'X.yt-dlp.Origin: {origin}\r\n'
|
f'X.yt-dlp.Origin: {origin}\r\n'
|
||||||
'\r\n'
|
'\r\n'
|
||||||
'--{boundary}\r\n'
|
f'--{frag_boundary}\r\n'
|
||||||
'Content-Type: text/html; charset=utf-8\r\n'
|
'Content-Type: text/html; charset=utf-8\r\n'
|
||||||
'Content-Length: {length}\r\n'
|
f'Content-Length: {len(stub)}\r\n'
|
||||||
'\r\n'
|
'\r\n'
|
||||||
'{stub}\r\n'
|
f'{stub}\r\n').encode())
|
||||||
).format(
|
|
||||||
origin=origin,
|
|
||||||
boundary=frag_boundary,
|
|
||||||
length=len(stub),
|
|
||||||
title=self._escape_mime(title),
|
|
||||||
stub=stub
|
|
||||||
).encode())
|
|
||||||
extra_state['header_written'] = True
|
extra_state['header_written'] = True
|
||||||
|
|
||||||
for i, fragment in enumerate(fragments):
|
for i, fragment in enumerate(fragments):
|
||||||
|
|
|
@ -15,7 +15,7 @@ class NiconicoDmcFD(FileDownloader):
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
from ..extractor.niconico import NiconicoIE
|
from ..extractor.niconico import NiconicoIE
|
||||||
|
|
||||||
self.to_screen('[%s] Downloading from DMC' % self.FD_NAME)
|
self.to_screen(f'[{self.FD_NAME}] Downloading from DMC')
|
||||||
ie = NiconicoIE(self.ydl)
|
ie = NiconicoIE(self.ydl)
|
||||||
info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict)
|
info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict)
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ def heartbeat():
|
||||||
try:
|
try:
|
||||||
self.ydl.urlopen(request).read()
|
self.ydl.urlopen(request).read()
|
||||||
except Exception:
|
except Exception:
|
||||||
self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)
|
self.to_screen(f'[{self.FD_NAME}] Heartbeat failed')
|
||||||
|
|
||||||
with heartbeat_lock:
|
with heartbeat_lock:
|
||||||
if not download_complete:
|
if not download_complete:
|
||||||
|
@ -85,14 +85,14 @@ def communicate_ws(reconnect):
|
||||||
'quality': live_quality,
|
'quality': live_quality,
|
||||||
'protocol': 'hls+fmp4',
|
'protocol': 'hls+fmp4',
|
||||||
'latency': live_latency,
|
'latency': live_latency,
|
||||||
'chasePlay': False
|
'chasePlay': False,
|
||||||
},
|
},
|
||||||
'room': {
|
'room': {
|
||||||
'protocol': 'webSocket',
|
'protocol': 'webSocket',
|
||||||
'commentable': True
|
'commentable': True,
|
||||||
},
|
},
|
||||||
'reconnect': True,
|
'reconnect': True,
|
||||||
}
|
},
|
||||||
}))
|
}))
|
||||||
else:
|
else:
|
||||||
ws = ws_extractor
|
ws = ws_extractor
|
||||||
|
@ -118,7 +118,7 @@ def communicate_ws(reconnect):
|
||||||
elif self.ydl.params.get('verbose', False):
|
elif self.ydl.params.get('verbose', False):
|
||||||
if len(recv) > 100:
|
if len(recv) > 100:
|
||||||
recv = recv[:100] + '...'
|
recv = recv[:100] + '...'
|
||||||
self.to_screen('[debug] Server said: %s' % recv)
|
self.to_screen(f'[debug] Server said: {recv}')
|
||||||
|
|
||||||
def ws_main():
|
def ws_main():
|
||||||
reconnect = False
|
reconnect = False
|
||||||
|
@ -128,7 +128,7 @@ def ws_main():
|
||||||
if ret is True:
|
if ret is True:
|
||||||
return
|
return
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
self.to_screen('[%s] %s: Connection error occured, reconnecting after 10 seconds: %s' % ('niconico:live', video_id, str_or_none(e)))
|
self.to_screen('[{}] {}: Connection error occured, reconnecting after 10 seconds: {}'.format('niconico:live', video_id, str_or_none(e)))
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
continue
|
continue
|
||||||
finally:
|
finally:
|
||||||
|
|
|
@ -180,9 +180,9 @@ def run_rtmpdump(args):
|
||||||
|
|
||||||
while retval in (RD_INCOMPLETE, RD_FAILED) and not test and not live:
|
while retval in (RD_INCOMPLETE, RD_FAILED) and not test and not live:
|
||||||
prevsize = os.path.getsize(encodeFilename(tmpfilename))
|
prevsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||||
self.to_screen('[rtmpdump] Downloaded %s bytes' % prevsize)
|
self.to_screen(f'[rtmpdump] Downloaded {prevsize} bytes')
|
||||||
time.sleep(5.0) # This seems to be needed
|
time.sleep(5.0) # This seems to be needed
|
||||||
args = basic_args + ['--resume']
|
args = [*basic_args, '--resume']
|
||||||
if retval == RD_FAILED:
|
if retval == RD_FAILED:
|
||||||
args += ['--skip', '1']
|
args += ['--skip', '1']
|
||||||
args = [encodeArgument(a) for a in args]
|
args = [encodeArgument(a) for a in args]
|
||||||
|
@ -197,7 +197,7 @@ def run_rtmpdump(args):
|
||||||
break
|
break
|
||||||
if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE):
|
if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE):
|
||||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||||
self.to_screen('[rtmpdump] Downloaded %s bytes' % fsize)
|
self.to_screen(f'[rtmpdump] Downloaded {fsize} bytes')
|
||||||
self.try_rename(tmpfilename, filename)
|
self.try_rename(tmpfilename, filename)
|
||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
'downloaded_bytes': fsize,
|
'downloaded_bytes': fsize,
|
||||||
|
|
|
@ -18,7 +18,7 @@ class YoutubeLiveChatFD(FragmentFD):
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
video_id = info_dict['video_id']
|
video_id = info_dict['video_id']
|
||||||
self.to_screen('[%s] Downloading live chat' % self.FD_NAME)
|
self.to_screen(f'[{self.FD_NAME}] Downloading live chat')
|
||||||
if not self.params.get('skip_download') and info_dict['protocol'] == 'youtube_live_chat':
|
if not self.params.get('skip_download') and info_dict['protocol'] == 'youtube_live_chat':
|
||||||
self.report_warning('Live chat download runs until the livestream ends. '
|
self.report_warning('Live chat download runs until the livestream ends. '
|
||||||
'If you wish to download the video simultaneously, run a separate yt-dlp instance')
|
'If you wish to download the video simultaneously, run a separate yt-dlp instance')
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -4,12 +4,11 @@
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
dict_get,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
js_to_json,
|
dict_get,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
js_to_json,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
|
@ -67,7 +66,7 @@ class ABCIE(InfoExtractor):
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'WWI Centenary',
|
'title': 'WWI Centenary',
|
||||||
'description': 'md5:c2379ec0ca84072e86b446e536954546',
|
'description': 'md5:c2379ec0ca84072e86b446e536954546',
|
||||||
}
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.abc.net.au/news/programs/the-world/2020-06-10/black-lives-matter-protests-spawn-support-for/12342074',
|
'url': 'https://www.abc.net.au/news/programs/the-world/2020-06-10/black-lives-matter-protests-spawn-support-for/12342074',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -75,7 +74,7 @@ class ABCIE(InfoExtractor):
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Black Lives Matter protests spawn support for Papuans in Indonesia',
|
'title': 'Black Lives Matter protests spawn support for Papuans in Indonesia',
|
||||||
'description': 'md5:2961a17dc53abc558589ccd0fb8edd6f',
|
'description': 'md5:2961a17dc53abc558589ccd0fb8edd6f',
|
||||||
}
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.abc.net.au/btn/newsbreak/btn-newsbreak-20200814/12560476',
|
'url': 'https://www.abc.net.au/btn/newsbreak/btn-newsbreak-20200814/12560476',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -86,7 +85,7 @@ class ABCIE(InfoExtractor):
|
||||||
'upload_date': '20200813',
|
'upload_date': '20200813',
|
||||||
'uploader': 'Behind the News',
|
'uploader': 'Behind the News',
|
||||||
'uploader_id': 'behindthenews',
|
'uploader_id': 'behindthenews',
|
||||||
}
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.abc.net.au/news/2023-06-25/wagner-boss-orders-troops-back-to-bases-to-avoid-bloodshed/102520540',
|
'url': 'https://www.abc.net.au/news/2023-06-25/wagner-boss-orders-troops-back-to-bases-to-avoid-bloodshed/102520540',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -95,7 +94,7 @@ class ABCIE(InfoExtractor):
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'description': 'Wagner troops leave Rostov-on-Don and\xa0Yevgeny Prigozhin will move to Belarus under a deal brokered by Belarusian President Alexander Lukashenko to end the mutiny.',
|
'description': 'Wagner troops leave Rostov-on-Don and\xa0Yevgeny Prigozhin will move to Belarus under a deal brokered by Belarusian President Alexander Lukashenko to end the mutiny.',
|
||||||
'thumbnail': 'https://live-production.wcms.abc-cdn.net.au/0c170f5b57f0105c432f366c0e8e267b?impolicy=wcms_crop_resize&cropH=2813&cropW=5000&xPos=0&yPos=249&width=862&height=485',
|
'thumbnail': 'https://live-production.wcms.abc-cdn.net.au/0c170f5b57f0105c432f366c0e8e267b?impolicy=wcms_crop_resize&cropH=2813&cropW=5000&xPos=0&yPos=249&width=862&height=485',
|
||||||
}
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -126,7 +125,7 @@ def _real_extract(self, url):
|
||||||
if mobj is None:
|
if mobj is None:
|
||||||
expired = self._html_search_regex(r'(?s)class="expired-(?:video|audio)".+?<span>(.+?)</span>', webpage, 'expired', None)
|
expired = self._html_search_regex(r'(?s)class="expired-(?:video|audio)".+?<span>(.+?)</span>', webpage, 'expired', None)
|
||||||
if expired:
|
if expired:
|
||||||
raise ExtractorError('%s said: %s' % (self.IE_NAME, expired), expected=True)
|
raise ExtractorError(f'{self.IE_NAME} said: {expired}', expected=True)
|
||||||
raise ExtractorError('Unable to extract video urls')
|
raise ExtractorError('Unable to extract video urls')
|
||||||
|
|
||||||
urls_info = self._parse_json(
|
urls_info = self._parse_json(
|
||||||
|
@ -164,7 +163,7 @@ def _real_extract(self, url):
|
||||||
'height': height,
|
'height': height,
|
||||||
'tbr': bitrate,
|
'tbr': bitrate,
|
||||||
'filesize': int_or_none(url_info.get('filesize')),
|
'filesize': int_or_none(url_info.get('filesize')),
|
||||||
'format_id': format_id
|
'format_id': format_id,
|
||||||
})
|
})
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
@ -288,13 +287,12 @@ def _real_extract(self, url):
|
||||||
stream = next(s for s in video_params['playlist'] if s.get('type') in ('program', 'livestream'))
|
stream = next(s for s in video_params['playlist'] if s.get('type') in ('program', 'livestream'))
|
||||||
|
|
||||||
house_number = video_params.get('episodeHouseNumber') or video_id
|
house_number = video_params.get('episodeHouseNumber') or video_id
|
||||||
path = '/auth/hls/sign?ts={0}&hn={1}&d=android-tablet'.format(
|
path = f'/auth/hls/sign?ts={int(time.time())}&hn={house_number}&d=android-tablet'
|
||||||
int(time.time()), house_number)
|
|
||||||
sig = hmac.new(
|
sig = hmac.new(
|
||||||
b'android.content.res.Resources',
|
b'android.content.res.Resources',
|
||||||
path.encode('utf-8'), hashlib.sha256).hexdigest()
|
path.encode(), hashlib.sha256).hexdigest()
|
||||||
token = self._download_webpage(
|
token = self._download_webpage(
|
||||||
'http://iview.abc.net.au{0}&sig={1}'.format(path, sig), video_id)
|
f'http://iview.abc.net.au{path}&sig={sig}', video_id)
|
||||||
|
|
||||||
def tokenize_url(url, token):
|
def tokenize_url(url, token):
|
||||||
return update_url_query(url, {
|
return update_url_query(url, {
|
||||||
|
@ -303,7 +301,7 @@ def tokenize_url(url, token):
|
||||||
|
|
||||||
for sd in ('1080', '720', 'sd', 'sd-low'):
|
for sd in ('1080', '720', 'sd', 'sd-low'):
|
||||||
sd_url = try_get(
|
sd_url = try_get(
|
||||||
stream, lambda x: x['streams']['hls'][sd], compat_str)
|
stream, lambda x: x['streams']['hls'][sd], str)
|
||||||
if not sd_url:
|
if not sd_url:
|
||||||
continue
|
continue
|
||||||
formats = self._extract_m3u8_formats(
|
formats = self._extract_m3u8_formats(
|
||||||
|
@ -358,7 +356,7 @@ class ABCIViewShowSeriesIE(InfoExtractor):
|
||||||
'description': 'md5:93119346c24a7c322d446d8eece430ff',
|
'description': 'md5:93119346c24a7c322d446d8eece430ff',
|
||||||
'series': 'Upper Middle Bogan',
|
'series': 'Upper Middle Bogan',
|
||||||
'season': 'Series 1',
|
'season': 'Series 1',
|
||||||
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$'
|
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$',
|
||||||
},
|
},
|
||||||
'playlist_count': 8,
|
'playlist_count': 8,
|
||||||
}, {
|
}, {
|
||||||
|
@ -386,7 +384,7 @@ class ABCIViewShowSeriesIE(InfoExtractor):
|
||||||
'description': 'Satirist Mark Humphries brings his unique perspective on current political events for 7.30.',
|
'description': 'Satirist Mark Humphries brings his unique perspective on current political events for 7.30.',
|
||||||
'series': '7.30 Mark Humphries Satire',
|
'series': '7.30 Mark Humphries Satire',
|
||||||
'season': 'Episodes',
|
'season': 'Episodes',
|
||||||
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$'
|
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$',
|
||||||
},
|
},
|
||||||
'playlist_count': 15,
|
'playlist_count': 15,
|
||||||
}]
|
}]
|
||||||
|
@ -398,7 +396,7 @@ def _real_extract(self, url):
|
||||||
r'window\.__INITIAL_STATE__\s*=\s*[\'"](.+?)[\'"]\s*;',
|
r'window\.__INITIAL_STATE__\s*=\s*[\'"](.+?)[\'"]\s*;',
|
||||||
webpage, 'initial state')
|
webpage, 'initial state')
|
||||||
video_data = self._parse_json(
|
video_data = self._parse_json(
|
||||||
unescapeHTML(webpage_data).encode('utf-8').decode('unicode_escape'), show_id)
|
unescapeHTML(webpage_data).encode().decode('unicode_escape'), show_id)
|
||||||
video_data = video_data['route']['pageData']['_embedded']
|
video_data = video_data['route']['pageData']['_embedded']
|
||||||
|
|
||||||
highlight = try_get(video_data, lambda x: x['highlightVideo']['shareUrl'])
|
highlight = try_get(video_data, lambda x: x['highlightVideo']['shareUrl'])
|
||||||
|
|
|
@ -58,7 +58,7 @@ def _real_extract(self, url):
|
||||||
display_id = mobj.group('display_id')
|
display_id = mobj.group('display_id')
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
info_dict = self._extract_feed_info(
|
info_dict = self._extract_feed_info(
|
||||||
'http://abcnews.go.com/video/itemfeed?id=%s' % video_id)
|
f'http://abcnews.go.com/video/itemfeed?id={video_id}')
|
||||||
info_dict.update({
|
info_dict.update({
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'display_id': display_id,
|
'display_id': display_id,
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
dict_get,
|
dict_get,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -57,11 +56,11 @@ def _real_extract(self, url):
|
||||||
data = self._download_json(
|
data = self._download_json(
|
||||||
'https://api.abcotvs.com/v2/content', display_id, query={
|
'https://api.abcotvs.com/v2/content', display_id, query={
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'key': 'otv.web.%s.story' % station,
|
'key': f'otv.web.{station}.story',
|
||||||
'station': station,
|
'station': station,
|
||||||
})['data']
|
})['data']
|
||||||
video = try_get(data, lambda x: x['featuredMedia']['video'], dict) or data
|
video = try_get(data, lambda x: x['featuredMedia']['video'], dict) or data
|
||||||
video_id = compat_str(dict_get(video, ('id', 'publishedKey'), video_id))
|
video_id = str(dict_get(video, ('id', 'publishedKey'), video_id))
|
||||||
title = video.get('title') or video['linkText']
|
title = video.get('title') or video['linkText']
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
|
|
|
@ -12,20 +12,21 @@
|
||||||
import urllib.request
|
import urllib.request
|
||||||
import urllib.response
|
import urllib.response
|
||||||
import uuid
|
import uuid
|
||||||
from ..utils.networking import clean_proxies
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..aes import aes_ecb_decrypt
|
from ..aes import aes_ecb_decrypt
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
OnDemandPagedList,
|
||||||
bytes_to_intlist,
|
bytes_to_intlist,
|
||||||
decode_base_n,
|
decode_base_n,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
intlist_to_bytes,
|
intlist_to_bytes,
|
||||||
OnDemandPagedList,
|
|
||||||
time_seconds,
|
time_seconds,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
)
|
)
|
||||||
|
from ..utils.networking import clean_proxies
|
||||||
|
|
||||||
|
|
||||||
def add_opener(ydl, handler): # FIXME: Create proper API in .networking
|
def add_opener(ydl, handler): # FIXME: Create proper API in .networking
|
||||||
|
@ -65,8 +66,8 @@ def _get_videokey_from_ticket(self, ticket):
|
||||||
query={'t': media_token},
|
query={'t': media_token},
|
||||||
data=json.dumps({
|
data=json.dumps({
|
||||||
'kv': 'a',
|
'kv': 'a',
|
||||||
'lt': ticket
|
'lt': ticket,
|
||||||
}).encode('utf-8'),
|
}).encode(),
|
||||||
headers={
|
headers={
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
})
|
})
|
||||||
|
@ -76,7 +77,7 @@ def _get_videokey_from_ticket(self, ticket):
|
||||||
|
|
||||||
h = hmac.new(
|
h = hmac.new(
|
||||||
binascii.unhexlify(self.HKEY),
|
binascii.unhexlify(self.HKEY),
|
||||||
(license_response['cid'] + self.ie._DEVICE_ID).encode('utf-8'),
|
(license_response['cid'] + self.ie._DEVICE_ID).encode(),
|
||||||
digestmod=hashlib.sha256)
|
digestmod=hashlib.sha256)
|
||||||
enckey = bytes_to_intlist(h.digest())
|
enckey = bytes_to_intlist(h.digest())
|
||||||
|
|
||||||
|
@ -102,11 +103,11 @@ class AbemaTVBaseIE(InfoExtractor):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _generate_aks(cls, deviceid):
|
def _generate_aks(cls, deviceid):
|
||||||
deviceid = deviceid.encode('utf-8')
|
deviceid = deviceid.encode()
|
||||||
# add 1 hour and then drop minute and secs
|
# add 1 hour and then drop minute and secs
|
||||||
ts_1hour = int((time_seconds() // 3600 + 1) * 3600)
|
ts_1hour = int((time_seconds() // 3600 + 1) * 3600)
|
||||||
time_struct = time.gmtime(ts_1hour)
|
time_struct = time.gmtime(ts_1hour)
|
||||||
ts_1hour_str = str(ts_1hour).encode('utf-8')
|
ts_1hour_str = str(ts_1hour).encode()
|
||||||
|
|
||||||
tmp = None
|
tmp = None
|
||||||
|
|
||||||
|
@ -118,7 +119,7 @@ def mix_once(nonce):
|
||||||
|
|
||||||
def mix_tmp(count):
|
def mix_tmp(count):
|
||||||
nonlocal tmp
|
nonlocal tmp
|
||||||
for i in range(count):
|
for _ in range(count):
|
||||||
mix_once(tmp)
|
mix_once(tmp)
|
||||||
|
|
||||||
def mix_twist(nonce):
|
def mix_twist(nonce):
|
||||||
|
@ -159,7 +160,7 @@ def _get_device_token(self):
|
||||||
data=json.dumps({
|
data=json.dumps({
|
||||||
'deviceId': self._DEVICE_ID,
|
'deviceId': self._DEVICE_ID,
|
||||||
'applicationKeySecret': aks,
|
'applicationKeySecret': aks,
|
||||||
}).encode('utf-8'),
|
}).encode(),
|
||||||
headers={
|
headers={
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
})
|
})
|
||||||
|
@ -179,7 +180,7 @@ def _get_media_token(self, invalidate=False, to_show=True):
|
||||||
'osLang': 'ja_JP',
|
'osLang': 'ja_JP',
|
||||||
'osTimezone': 'Asia/Tokyo',
|
'osTimezone': 'Asia/Tokyo',
|
||||||
'appId': 'tv.abema',
|
'appId': 'tv.abema',
|
||||||
'appVersion': '3.27.1'
|
'appVersion': '3.27.1',
|
||||||
}, headers={
|
}, headers={
|
||||||
'Authorization': f'bearer {self._get_device_token()}',
|
'Authorization': f'bearer {self._get_device_token()}',
|
||||||
})['token']
|
})['token']
|
||||||
|
@ -201,8 +202,8 @@ def _perform_login(self, username, password):
|
||||||
f'https://api.abema.io/v1/auth/{ep}', None, note='Logging in',
|
f'https://api.abema.io/v1/auth/{ep}', None, note='Logging in',
|
||||||
data=json.dumps({
|
data=json.dumps({
|
||||||
method: username,
|
method: username,
|
||||||
'password': password
|
'password': password,
|
||||||
}).encode('utf-8'), headers={
|
}).encode(), headers={
|
||||||
'Authorization': f'bearer {self._get_device_token()}',
|
'Authorization': f'bearer {self._get_device_token()}',
|
||||||
'Origin': 'https://abema.tv',
|
'Origin': 'https://abema.tv',
|
||||||
'Referer': 'https://abema.tv/',
|
'Referer': 'https://abema.tv/',
|
||||||
|
@ -343,7 +344,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
description = self._html_search_regex(
|
description = self._html_search_regex(
|
||||||
(r'<p\s+class="com-video-EpisodeDetailsBlock__content"><span\s+class=".+?">(.+?)</span></p><div',
|
(r'<p\s+class="com-video-EpisodeDetailsBlock__content"><span\s+class=".+?">(.+?)</span></p><div',
|
||||||
r'<span\s+class=".+?SlotSummary.+?">(.+?)</span></div><div',),
|
r'<span\s+class=".+?SlotSummary.+?">(.+?)</span></div><div'),
|
||||||
webpage, 'description', default=None, group=1)
|
webpage, 'description', default=None, group=1)
|
||||||
if not description:
|
if not description:
|
||||||
og_desc = self._html_search_meta(
|
og_desc = self._html_search_meta(
|
||||||
|
|
|
@ -67,7 +67,7 @@ class ACastIE(ACastBaseIE):
|
||||||
'display_id': '2.raggarmordet-rosterurdetforflutna',
|
'display_id': '2.raggarmordet-rosterurdetforflutna',
|
||||||
'season_number': 4,
|
'season_number': 4,
|
||||||
'season': 'Season 4',
|
'season': 'Season 4',
|
||||||
}
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://embed.acast.com/adambuxton/ep.12-adam-joeschristmaspodcast2015',
|
'url': 'http://embed.acast.com/adambuxton/ep.12-adam-joeschristmaspodcast2015',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
@ -93,13 +93,13 @@ class ACastIE(ACastBaseIE):
|
||||||
'series': 'Democracy Sausage with Mark Kenny',
|
'series': 'Democracy Sausage with Mark Kenny',
|
||||||
'timestamp': 1684826362,
|
'timestamp': 1684826362,
|
||||||
'description': 'md5:feabe1fc5004c78ee59c84a46bf4ba16',
|
'description': 'md5:feabe1fc5004c78ee59c84a46bf4ba16',
|
||||||
}
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
channel, display_id = self._match_valid_url(url).groups()
|
channel, display_id = self._match_valid_url(url).groups()
|
||||||
episode = self._call_api(
|
episode = self._call_api(
|
||||||
'%s/episodes/%s' % (channel, display_id),
|
f'{channel}/episodes/{display_id}',
|
||||||
display_id, {'showInfo': 'true'})
|
display_id, {'showInfo': 'true'})
|
||||||
return self._extract_episode(
|
return self._extract_episode(
|
||||||
episode, self._extract_show_info(episode.get('show') or {}))
|
episode, self._extract_show_info(episode.get('show') or {}))
|
||||||
|
@ -130,7 +130,7 @@ class ACastChannelIE(ACastBaseIE):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def suitable(cls, url):
|
def suitable(cls, url):
|
||||||
return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url)
|
return False if ACastIE.suitable(url) else super().suitable(url)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
show_slug = self._match_id(url)
|
show_slug = self._match_id(url)
|
||||||
|
|
|
@ -3,10 +3,10 @@
|
||||||
float_or_none,
|
float_or_none,
|
||||||
format_field,
|
format_field,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
str_or_none,
|
|
||||||
traverse_obj,
|
|
||||||
parse_codecs,
|
parse_codecs,
|
||||||
parse_qs,
|
parse_qs,
|
||||||
|
str_or_none,
|
||||||
|
traverse_obj,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ def _extract_metadata(self, video_id, video_info):
|
||||||
'width': int_or_none(video.get('width')),
|
'width': int_or_none(video.get('width')),
|
||||||
'height': int_or_none(video.get('height')),
|
'height': int_or_none(video.get('height')),
|
||||||
'tbr': float_or_none(video.get('avgBitrate')),
|
'tbr': float_or_none(video.get('avgBitrate')),
|
||||||
**parse_codecs(video.get('codecs', ''))
|
**parse_codecs(video.get('codecs', '')),
|
||||||
})
|
})
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
@ -77,7 +77,7 @@ class AcFunVideoIE(AcFunVideoBaseIE):
|
||||||
'comment_count': int,
|
'comment_count': int,
|
||||||
'thumbnail': r're:^https?://.*\.(jpg|jpeg)',
|
'thumbnail': r're:^https?://.*\.(jpg|jpeg)',
|
||||||
'description': 'md5:67583aaf3a0f933bd606bc8a2d3ebb17',
|
'description': 'md5:67583aaf3a0f933bd606bc8a2d3ebb17',
|
||||||
}
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|
|
@ -7,21 +7,20 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
||||||
from ..compat import compat_b64decode
|
|
||||||
from ..networking.exceptions import HTTPError
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
ass_subtitles_timecode,
|
ass_subtitles_timecode,
|
||||||
bytes_to_intlist,
|
bytes_to_intlist,
|
||||||
bytes_to_long,
|
bytes_to_long,
|
||||||
ExtractorError,
|
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
intlist_to_bytes,
|
intlist_to_bytes,
|
||||||
long_to_bytes,
|
long_to_bytes,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
pkcs1pad,
|
pkcs1pad,
|
||||||
strip_or_none,
|
|
||||||
str_or_none,
|
str_or_none,
|
||||||
|
strip_or_none,
|
||||||
try_get,
|
try_get,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
|
@ -111,9 +110,9 @@ def _get_subtitles(self, sub_url, video_id):
|
||||||
|
|
||||||
# http://animationdigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
|
# http://animationdigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
|
||||||
dec_subtitles = unpad_pkcs7(aes_cbc_decrypt_bytes(
|
dec_subtitles = unpad_pkcs7(aes_cbc_decrypt_bytes(
|
||||||
compat_b64decode(enc_subtitles[24:]),
|
base64.b64decode(enc_subtitles[24:]),
|
||||||
binascii.unhexlify(self._K + '7fac1178830cfe0c'),
|
binascii.unhexlify(self._K + '7fac1178830cfe0c'),
|
||||||
compat_b64decode(enc_subtitles[:24])))
|
base64.b64decode(enc_subtitles[:24])))
|
||||||
subtitles_json = self._parse_json(dec_subtitles.decode(), None, fatal=False)
|
subtitles_json = self._parse_json(dec_subtitles.decode(), None, fatal=False)
|
||||||
if not subtitles_json:
|
if not subtitles_json:
|
||||||
return None
|
return None
|
||||||
|
@ -136,7 +135,7 @@ def _get_subtitles(self, sub_url, video_id):
|
||||||
if start is None or end is None or text is None:
|
if start is None or end is None or text is None:
|
||||||
continue
|
continue
|
||||||
alignment = self._POS_ALIGN_MAP.get(position_align, 2) + self._LINE_ALIGN_MAP.get(line_align, 0)
|
alignment = self._POS_ALIGN_MAP.get(position_align, 2) + self._LINE_ALIGN_MAP.get(line_align, 0)
|
||||||
ssa += os.linesep + 'Dialogue: Marked=0,%s,%s,Default,,0,0,0,,%s%s' % (
|
ssa += os.linesep + 'Dialogue: Marked=0,{},{},Default,,0,0,0,,{}{}'.format(
|
||||||
ass_subtitles_timecode(start),
|
ass_subtitles_timecode(start),
|
||||||
ass_subtitles_timecode(end),
|
ass_subtitles_timecode(end),
|
||||||
'{\\a%d}' % alignment if alignment != 2 else '',
|
'{\\a%d}' % alignment if alignment != 2 else '',
|
||||||
|
@ -178,7 +177,7 @@ def _perform_login(self, username, password):
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
lang, video_id = self._match_valid_url(url).group('lang', 'id')
|
lang, video_id = self._match_valid_url(url).group('lang', 'id')
|
||||||
video_base_url = self._PLAYER_BASE_URL + 'video/%s/' % video_id
|
video_base_url = self._PLAYER_BASE_URL + f'video/{video_id}/'
|
||||||
player = self._download_json(
|
player = self._download_json(
|
||||||
video_base_url + 'configuration', video_id,
|
video_base_url + 'configuration', video_id,
|
||||||
'Downloading player config JSON metadata',
|
'Downloading player config JSON metadata',
|
||||||
|
@ -219,12 +218,12 @@ def _real_extract(self, url):
|
||||||
links_url, video_id, 'Downloading links JSON metadata', headers={
|
links_url, video_id, 'Downloading links JSON metadata', headers={
|
||||||
'X-Player-Token': authorization,
|
'X-Player-Token': authorization,
|
||||||
'X-Target-Distribution': lang,
|
'X-Target-Distribution': lang,
|
||||||
**self._HEADERS
|
**self._HEADERS,
|
||||||
}, query={
|
}, query={
|
||||||
'freeWithAds': 'true',
|
'freeWithAds': 'true',
|
||||||
'adaptive': 'false',
|
'adaptive': 'false',
|
||||||
'withMetadata': 'true',
|
'withMetadata': 'true',
|
||||||
'source': 'Web'
|
'source': 'Web',
|
||||||
})
|
})
|
||||||
break
|
break
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
|
@ -256,7 +255,7 @@ def _real_extract(self, url):
|
||||||
for quality, load_balancer_url in qualities.items():
|
for quality, load_balancer_url in qualities.items():
|
||||||
load_balancer_data = self._download_json(
|
load_balancer_data = self._download_json(
|
||||||
load_balancer_url, video_id,
|
load_balancer_url, video_id,
|
||||||
'Downloading %s %s JSON metadata' % (format_id, quality),
|
f'Downloading {format_id} {quality} JSON metadata',
|
||||||
fatal=False) or {}
|
fatal=False) or {}
|
||||||
m3u8_url = load_balancer_data.get('location')
|
m3u8_url = load_balancer_data.get('location')
|
||||||
if not m3u8_url:
|
if not m3u8_url:
|
||||||
|
@ -276,7 +275,7 @@ def _real_extract(self, url):
|
||||||
self.raise_login_required('This video requires a subscription', method='password')
|
self.raise_login_required('This video requires a subscription', method='password')
|
||||||
|
|
||||||
video = (self._download_json(
|
video = (self._download_json(
|
||||||
self._API_BASE_URL + 'video/%s' % video_id, video_id,
|
self._API_BASE_URL + f'video/{video_id}', video_id,
|
||||||
'Downloading additional video metadata', fatal=False) or {}).get('video') or {}
|
'Downloading additional video metadata', fatal=False) or {}).get('video') or {}
|
||||||
show = video.get('show') or {}
|
show = video.get('show') or {}
|
||||||
|
|
||||||
|
@ -320,7 +319,7 @@ def _real_extract(self, url):
|
||||||
f'{self._API_BASE_URL}video/show/{show_id}', video_show_slug,
|
f'{self._API_BASE_URL}video/show/{show_id}', video_show_slug,
|
||||||
'Downloading episode list', headers={
|
'Downloading episode list', headers={
|
||||||
'X-Target-Distribution': lang,
|
'X-Target-Distribution': lang,
|
||||||
**self._HEADERS
|
**self._HEADERS,
|
||||||
}, query={
|
}, query={
|
||||||
'order': 'asc',
|
'order': 'asc',
|
||||||
'limit': '-1',
|
'limit': '-1',
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
|
||||||
compat_parse_qs,
|
|
||||||
compat_urlparse,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class AdobeConnectIE(InfoExtractor):
|
class AdobeConnectIE(InfoExtractor):
|
||||||
|
@ -12,13 +10,13 @@ def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
title = self._html_extract_title(webpage)
|
title = self._html_extract_title(webpage)
|
||||||
qs = compat_parse_qs(self._search_regex(r"swfUrl\s*=\s*'([^']+)'", webpage, 'swf url').split('?')[1])
|
qs = urllib.parse.parse_qs(self._search_regex(r"swfUrl\s*=\s*'([^']+)'", webpage, 'swf url').split('?')[1])
|
||||||
is_live = qs.get('isLive', ['false'])[0] == 'true'
|
is_live = qs.get('isLive', ['false'])[0] == 'true'
|
||||||
formats = []
|
formats = []
|
||||||
for con_string in qs['conStrings'][0].split(','):
|
for con_string in qs['conStrings'][0].split(','):
|
||||||
formats.append({
|
formats.append({
|
||||||
'format_id': con_string.split('://')[0],
|
'format_id': con_string.split('://')[0],
|
||||||
'app': compat_urlparse.quote('?' + con_string.split('?')[1] + 'flvplayerapp/' + qs['appInstance'][0]),
|
'app': urllib.parse.quote('?' + con_string.split('?')[1] + 'flvplayerapp/' + qs['appInstance'][0]),
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
'play_path': 'mp4:' + qs['streamName'][0],
|
'play_path': 'mp4:' + qs['streamName'][0],
|
||||||
'rtmp_conn': 'S:' + qs['ticket'][0],
|
'rtmp_conn': 'S:' + qs['ticket'][0],
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -2,13 +2,12 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ISO639Utils,
|
||||||
|
OnDemandPagedList,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
ISO639Utils,
|
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
OnDemandPagedList,
|
|
||||||
parse_duration,
|
parse_duration,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
|
@ -36,7 +35,7 @@ def _parse_subtitles(self, video_data, url_key):
|
||||||
return subtitles
|
return subtitles
|
||||||
|
|
||||||
def _parse_video_data(self, video_data):
|
def _parse_video_data(self, video_data):
|
||||||
video_id = compat_str(video_data['id'])
|
video_id = str(video_data['id'])
|
||||||
title = video_data['title']
|
title = video_data['title']
|
||||||
|
|
||||||
s3_extracted = False
|
s3_extracted = False
|
||||||
|
@ -151,7 +150,7 @@ def _fetch_page(self, display_id, query, page):
|
||||||
page += 1
|
page += 1
|
||||||
query['page'] = page
|
query['page'] = page
|
||||||
for element_data in self._call_api(
|
for element_data in self._call_api(
|
||||||
self._RESOURCE, display_id, query, 'Download Page %d' % page):
|
self._RESOURCE, display_id, query, f'Download Page {page}'):
|
||||||
yield self._process_data(element_data)
|
yield self._process_data(element_data)
|
||||||
|
|
||||||
def _extract_playlist_entries(self, display_id, query):
|
def _extract_playlist_entries(self, display_id, query):
|
||||||
|
|
|
@ -91,7 +91,7 @@ def _real_extract(self, url):
|
||||||
getShowBySlug(slug:"%s") {
|
getShowBySlug(slug:"%s") {
|
||||||
%%s
|
%%s
|
||||||
}
|
}
|
||||||
}''' % show_path
|
}''' % show_path # noqa: UP031
|
||||||
if episode_path:
|
if episode_path:
|
||||||
query = query % '''title
|
query = query % '''title
|
||||||
getVideoBySlug(slug:"%s") {
|
getVideoBySlug(slug:"%s") {
|
||||||
|
@ -128,7 +128,7 @@ def _real_extract(self, url):
|
||||||
episode_title = title = video_data['title']
|
episode_title = title = video_data['title']
|
||||||
series = show_data.get('title')
|
series = show_data.get('title')
|
||||||
if series:
|
if series:
|
||||||
title = '%s - %s' % (series, title)
|
title = f'{series} - {title}'
|
||||||
info = {
|
info = {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
|
@ -191,7 +191,7 @@ def _real_extract(self, url):
|
||||||
if not slug:
|
if not slug:
|
||||||
continue
|
continue
|
||||||
entries.append(self.url_result(
|
entries.append(self.url_result(
|
||||||
'http://adultswim.com/videos/%s/%s' % (show_path, slug),
|
f'http://adultswim.com/videos/{show_path}/{slug}',
|
||||||
'AdultSwim', video.get('_id')))
|
'AdultSwim', video.get('_id')))
|
||||||
return self.playlist_result(
|
return self.playlist_result(
|
||||||
entries, show_path, show_data.get('title'),
|
entries, show_path, show_data.get('title'),
|
||||||
|
|
|
@ -73,8 +73,8 @@ def _extract_aen_smil(self, smil_url, video_id, auth=None):
|
||||||
def _extract_aetn_info(self, domain, filter_key, filter_value, url):
|
def _extract_aetn_info(self, domain, filter_key, filter_value, url):
|
||||||
requestor_id, brand = self._DOMAIN_MAP[domain]
|
requestor_id, brand = self._DOMAIN_MAP[domain]
|
||||||
result = self._download_json(
|
result = self._download_json(
|
||||||
'https://feeds.video.aetnd.com/api/v2/%s/videos' % brand,
|
f'https://feeds.video.aetnd.com/api/v2/{brand}/videos',
|
||||||
filter_value, query={'filter[%s]' % filter_key: filter_value})
|
filter_value, query={f'filter[{filter_key}]': filter_value})
|
||||||
result = traverse_obj(
|
result = traverse_obj(
|
||||||
result, ('results',
|
result, ('results',
|
||||||
lambda k, v: k == 0 and v[filter_key] == filter_value),
|
lambda k, v: k == 0 and v[filter_key] == filter_value),
|
||||||
|
@ -142,7 +142,7 @@ class AENetworksIE(AENetworksBaseIE):
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
'add_ie': ['ThePlatform'],
|
'add_ie': ['ThePlatform'],
|
||||||
'skip': 'Geo-restricted - This content is not available in your location.'
|
'skip': 'Geo-restricted - This content is not available in your location.',
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.aetv.com/shows/duck-dynasty/season-9/episode-1',
|
'url': 'http://www.aetv.com/shows/duck-dynasty/season-9/episode-1',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -171,28 +171,28 @@ class AENetworksIE(AENetworksBaseIE):
|
||||||
'skip': 'This video is only available for users of participating TV providers.',
|
'skip': 'This video is only available for users of participating TV providers.',
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.fyi.tv/shows/tiny-house-nation/season-1/episode-8',
|
'url': 'http://www.fyi.tv/shows/tiny-house-nation/season-1/episode-8',
|
||||||
'only_matching': True
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.mylifetime.com/shows/project-runway-junior/season-1/episode-6',
|
'url': 'http://www.mylifetime.com/shows/project-runway-junior/season-1/episode-6',
|
||||||
'only_matching': True
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.mylifetime.com/movies/center-stage-on-pointe/full-movie',
|
'url': 'http://www.mylifetime.com/movies/center-stage-on-pointe/full-movie',
|
||||||
'only_matching': True
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://watch.lifetimemovieclub.com/movies/10-year-reunion/full-movie',
|
'url': 'https://watch.lifetimemovieclub.com/movies/10-year-reunion/full-movie',
|
||||||
'only_matching': True
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.history.com/specials/sniper-into-the-kill-zone/full-special',
|
'url': 'http://www.history.com/specials/sniper-into-the-kill-zone/full-special',
|
||||||
'only_matching': True
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.aetv.com/specials/hunting-jonbenets-killer-the-untold-story/preview-hunting-jonbenets-killer-the-untold-story',
|
'url': 'https://www.aetv.com/specials/hunting-jonbenets-killer-the-untold-story/preview-hunting-jonbenets-killer-the-untold-story',
|
||||||
'only_matching': True
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.history.com/videos/history-of-valentines-day',
|
'url': 'http://www.history.com/videos/history-of-valentines-day',
|
||||||
'only_matching': True
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://play.aetv.com/shows/duck-dynasty/videos/best-of-duck-dynasty-getting-quack-in-shape',
|
'url': 'https://play.aetv.com/shows/duck-dynasty/videos/best-of-duck-dynasty-getting-quack-in-shape',
|
||||||
'only_matching': True
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -209,14 +209,14 @@ def _call_api(self, resource, slug, brand, fields):
|
||||||
%s(slug: "%s") {
|
%s(slug: "%s") {
|
||||||
%s
|
%s
|
||||||
}
|
}
|
||||||
}''' % (resource, slug, fields),
|
}''' % (resource, slug, fields), # noqa: UP031
|
||||||
}))['data'][resource]
|
}))['data'][resource]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
domain, slug = self._match_valid_url(url).groups()
|
domain, slug = self._match_valid_url(url).groups()
|
||||||
_, brand = self._DOMAIN_MAP[domain]
|
_, brand = self._DOMAIN_MAP[domain]
|
||||||
playlist = self._call_api(self._RESOURCE, slug, brand, self._FIELDS)
|
playlist = self._call_api(self._RESOURCE, slug, brand, self._FIELDS)
|
||||||
base_url = 'http://watch.%s' % domain
|
base_url = f'http://watch.{domain}'
|
||||||
|
|
||||||
entries = []
|
entries = []
|
||||||
for item in (playlist.get(self._ITEMS_KEY) or []):
|
for item in (playlist.get(self._ITEMS_KEY) or []):
|
||||||
|
@ -248,10 +248,10 @@ class AENetworksCollectionIE(AENetworksListBaseIE):
|
||||||
'playlist_mincount': 12,
|
'playlist_mincount': 12,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://watch.historyvault.com/shows/america-the-story-of-us-2/season-1/list/america-the-story-of-us',
|
'url': 'https://watch.historyvault.com/shows/america-the-story-of-us-2/season-1/list/america-the-story-of-us',
|
||||||
'only_matching': True
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.historyvault.com/collections/mysteryquest',
|
'url': 'https://www.historyvault.com/collections/mysteryquest',
|
||||||
'only_matching': True
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
_RESOURCE = 'list'
|
_RESOURCE = 'list'
|
||||||
_ITEMS_KEY = 'items'
|
_ITEMS_KEY = 'items'
|
||||||
|
@ -309,7 +309,7 @@ class HistoryTopicIE(AENetworksBaseIE):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '40700995724',
|
'id': '40700995724',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': "History of Valentine’s Day",
|
'title': 'History of Valentine’s Day',
|
||||||
'description': 'md5:7b57ea4829b391995b405fa60bd7b5f7',
|
'description': 'md5:7b57ea4829b391995b405fa60bd7b5f7',
|
||||||
'timestamp': 1375819729,
|
'timestamp': 1375819729,
|
||||||
'upload_date': '20130806',
|
'upload_date': '20130806',
|
||||||
|
@ -364,6 +364,6 @@ def _real_extract(self, url):
|
||||||
display_id = self._match_id(url)
|
display_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
player_url = self._search_regex(
|
player_url = self._search_regex(
|
||||||
r'<phoenix-iframe[^>]+src="(%s)' % HistoryPlayerIE._VALID_URL,
|
rf'<phoenix-iframe[^>]+src="({HistoryPlayerIE._VALID_URL})',
|
||||||
webpage, 'player URL')
|
webpage, 'player URL')
|
||||||
return self.url_result(player_url, HistoryPlayerIE.ie_key())
|
return self.url_result(player_url, HistoryPlayerIE.ie_key())
|
||||||
|
|
|
@ -16,8 +16,8 @@ class AeonCoIE(InfoExtractor):
|
||||||
'uploader': 'Semiconductor',
|
'uploader': 'Semiconductor',
|
||||||
'uploader_id': 'semiconductor',
|
'uploader_id': 'semiconductor',
|
||||||
'uploader_url': 'https://vimeo.com/semiconductor',
|
'uploader_url': 'https://vimeo.com/semiconductor',
|
||||||
'duration': 348
|
'duration': 348,
|
||||||
}
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://aeon.co/videos/dazzling-timelapse-shows-how-microbes-spoil-our-food-and-sometimes-enrich-it',
|
'url': 'https://aeon.co/videos/dazzling-timelapse-shows-how-microbes-spoil-our-food-and-sometimes-enrich-it',
|
||||||
'md5': '03582d795382e49f2fd0b427b55de409',
|
'md5': '03582d795382e49f2fd0b427b55de409',
|
||||||
|
@ -29,8 +29,8 @@ class AeonCoIE(InfoExtractor):
|
||||||
'uploader': 'Aeon Video',
|
'uploader': 'Aeon Video',
|
||||||
'uploader_id': 'aeonvideo',
|
'uploader_id': 'aeonvideo',
|
||||||
'uploader_url': 'https://vimeo.com/aeonvideo',
|
'uploader_url': 'https://vimeo.com/aeonvideo',
|
||||||
'duration': 1344
|
'duration': 1344,
|
||||||
}
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://aeon.co/videos/chew-over-the-prisoners-dilemma-and-see-if-you-can-find-the-rational-path-out',
|
'url': 'https://aeon.co/videos/chew-over-the-prisoners-dilemma-and-see-if-you-can-find-the-rational-path-out',
|
||||||
'md5': '1cfda0bf3ae24df17d00f2c0cb6cc21b',
|
'md5': '1cfda0bf3ae24df17d00f2c0cb6cc21b',
|
||||||
|
|
|
@ -55,7 +55,7 @@ def _perform_login(self, username, password):
|
||||||
if result != 1:
|
if result != 1:
|
||||||
error = _ERRORS.get(result, 'You have failed to log in.')
|
error = _ERRORS.get(result, 'You have failed to log in.')
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
'Unable to login: %s said: %s' % (self.IE_NAME, error),
|
f'Unable to login: {self.IE_NAME} said: {error}',
|
||||||
expected=True)
|
expected=True)
|
||||||
|
|
||||||
|
|
||||||
|
@ -227,7 +227,7 @@ def _real_extract(self, url):
|
||||||
**traverse_obj(file_element, {
|
**traverse_obj(file_element, {
|
||||||
'duration': ('duration', {functools.partial(int_or_none, scale=1000)}),
|
'duration': ('duration', {functools.partial(int_or_none, scale=1000)}),
|
||||||
'timestamp': ('file_start', {unified_timestamp}),
|
'timestamp': ('file_start', {unified_timestamp}),
|
||||||
})
|
}),
|
||||||
})
|
})
|
||||||
|
|
||||||
if traverse_obj(data, ('adult_status', {str})) == 'notLogin':
|
if traverse_obj(data, ('adult_status', {str})) == 'notLogin':
|
||||||
|
|
|
@ -168,7 +168,7 @@ def _real_extract(self, url):
|
||||||
for ext in ('aac', 'mp3'):
|
for ext in ('aac', 'mp3'):
|
||||||
url_data = self._download_json(
|
url_data = self._download_json(
|
||||||
f'https://api.podcast.radioagora.pl/api4/getSongUrl?podcast_id={media_id}&device_id={uuid.uuid4()}&ppre=false&audio={ext}',
|
f'https://api.podcast.radioagora.pl/api4/getSongUrl?podcast_id={media_id}&device_id={uuid.uuid4()}&ppre=false&audio={ext}',
|
||||||
media_id, 'Downloading podcast %s URL' % ext)
|
media_id, f'Downloading podcast {ext} URL')
|
||||||
# prevents inserting the mp3 (default) multiple times
|
# prevents inserting the mp3 (default) multiple times
|
||||||
if 'link_ssl' in url_data and f'.{ext}' in url_data['link_ssl']:
|
if 'link_ssl' in url_data and f'.{ext}' in url_data['link_ssl']:
|
||||||
formats.append({
|
formats.append({
|
||||||
|
@ -206,8 +206,8 @@ class TokFMAuditionIE(InfoExtractor):
|
||||||
}
|
}
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _create_url(id):
|
def _create_url(video_id):
|
||||||
return f'https://audycje.tokfm.pl/audycja/{id}'
|
return f'https://audycje.tokfm.pl/audycja/{video_id}'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
audition_id = self._match_id(url)
|
audition_id = self._match_id(url)
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
int_or_none,
|
int_or_none,
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
traverse_obj
|
traverse_obj,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ class AirTVIE(InfoExtractor):
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'thumbnail': 'https://cdn-sp-gcs.air.tv/videos/W/8/W87jcWleSn2hXZN47zJZsQ/b13fc56464f47d9d62a36d110b9b5a72-4096x2160_9.jpg',
|
'thumbnail': 'https://cdn-sp-gcs.air.tv/videos/W/8/W87jcWleSn2hXZN47zJZsQ/b13fc56464f47d9d62a36d110b9b5a72-4096x2160_9.jpg',
|
||||||
'timestamp': 1664792603,
|
'timestamp': 1664792603,
|
||||||
}
|
},
|
||||||
}, {
|
}, {
|
||||||
# with youtube_id
|
# with youtube_id
|
||||||
'url': 'https://www.air.tv/watch?v=sv57EC8tRXG6h8dNXFUU1Q',
|
'url': 'https://www.air.tv/watch?v=sv57EC8tRXG6h8dNXFUU1Q',
|
||||||
|
@ -54,7 +54,7 @@ class AirTVIE(InfoExtractor):
|
||||||
'channel': 'Newsflare',
|
'channel': 'Newsflare',
|
||||||
'duration': 37,
|
'duration': 37,
|
||||||
'upload_date': '20180511',
|
'upload_date': '20180511',
|
||||||
}
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _get_formats_and_subtitle(self, json_data, video_id):
|
def _get_formats_and_subtitle(self, json_data, video_id):
|
||||||
|
|
|
@ -22,7 +22,7 @@ class AitubeKZVideoIE(InfoExtractor):
|
||||||
'timestamp': 1667370519,
|
'timestamp': 1667370519,
|
||||||
'title': 'Ангел хранитель 1 серия',
|
'title': 'Ангел хранитель 1 серия',
|
||||||
'channel_follower_count': int,
|
'channel_follower_count': int,
|
||||||
}
|
},
|
||||||
}, {
|
}, {
|
||||||
# embed url
|
# embed url
|
||||||
'url': 'https://aitube.kz/embed/?id=9291d29b-c038-49a1-ad42-3da2051d353c',
|
'url': 'https://aitube.kz/embed/?id=9291d29b-c038-49a1-ad42-3da2051d353c',
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
float_or_none,
|
float_or_none,
|
||||||
try_get,
|
try_get,
|
||||||
|
@ -44,7 +43,7 @@ def _real_extract(self, url):
|
||||||
'title': title,
|
'title': title,
|
||||||
'thumbnail': data.get('coverUrl'),
|
'thumbnail': data.get('coverUrl'),
|
||||||
'uploader': try_get(
|
'uploader': try_get(
|
||||||
data, lambda x: x['followBar']['name'], compat_str),
|
data, lambda x: x['followBar']['name'], str),
|
||||||
'timestamp': float_or_none(data.get('startTimeLong'), scale=1000),
|
'timestamp': float_or_none(data.get('startTimeLong'), scale=1000),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,7 @@ class AlJazeeraIE(InfoExtractor):
|
||||||
'timestamp': 1636219149,
|
'timestamp': 1636219149,
|
||||||
'description': 'U sarajevskim naseljima Rajlovac i Reljevo stambeni objekti, ali i industrijska postrojenja i dalje su pod vodom.',
|
'description': 'U sarajevskim naseljima Rajlovac i Reljevo stambeni objekti, ali i industrijska postrojenja i dalje su pod vodom.',
|
||||||
'upload_date': '20211106',
|
'upload_date': '20211106',
|
||||||
}
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://balkans.aljazeera.net/videos/2021/11/6/djokovic-usao-u-finale-mastersa-u-parizu',
|
'url': 'https://balkans.aljazeera.net/videos/2021/11/6/djokovic-usao-u-finale-mastersa-u-parizu',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -33,7 +33,7 @@ class AlJazeeraIE(InfoExtractor):
|
||||||
BRIGHTCOVE_URL_RE = r'https?://players.brightcove.net/(?P<account>\d+)/(?P<player_id>[a-zA-Z0-9]+)_(?P<embed>[^/]+)/index.html\?videoId=(?P<id>\d+)'
|
BRIGHTCOVE_URL_RE = r'https?://players.brightcove.net/(?P<account>\d+)/(?P<player_id>[a-zA-Z0-9]+)_(?P<embed>[^/]+)/index.html\?videoId=(?P<id>\d+)'
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
base, post_type, id = self._match_valid_url(url).groups()
|
base, post_type, display_id = self._match_valid_url(url).groups()
|
||||||
wp = {
|
wp = {
|
||||||
'balkans.aljazeera.net': 'ajb',
|
'balkans.aljazeera.net': 'ajb',
|
||||||
'chinese.aljazeera.net': 'chinese',
|
'chinese.aljazeera.net': 'chinese',
|
||||||
|
@ -47,11 +47,11 @@ def _real_extract(self, url):
|
||||||
'news': 'news',
|
'news': 'news',
|
||||||
}[post_type.split('/')[0]]
|
}[post_type.split('/')[0]]
|
||||||
video = self._download_json(
|
video = self._download_json(
|
||||||
f'https://{base}/graphql', id, query={
|
f'https://{base}/graphql', display_id, query={
|
||||||
'wp-site': wp,
|
'wp-site': wp,
|
||||||
'operationName': 'ArchipelagoSingleArticleQuery',
|
'operationName': 'ArchipelagoSingleArticleQuery',
|
||||||
'variables': json.dumps({
|
'variables': json.dumps({
|
||||||
'name': id,
|
'name': display_id,
|
||||||
'postType': post_type,
|
'postType': post_type,
|
||||||
}),
|
}),
|
||||||
}, headers={
|
}, headers={
|
||||||
|
@ -64,7 +64,7 @@ def _real_extract(self, url):
|
||||||
embed = 'default'
|
embed = 'default'
|
||||||
|
|
||||||
if video_id is None:
|
if video_id is None:
|
||||||
webpage = self._download_webpage(url, id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
account, player_id, embed, video_id = self._search_regex(self.BRIGHTCOVE_URL_RE, webpage, 'video id',
|
account, player_id, embed, video_id = self._search_regex(self.BRIGHTCOVE_URL_RE, webpage, 'video id',
|
||||||
group=(1, 2, 3, 4), default=(None, None, None, None))
|
group=(1, 2, 3, 4), default=(None, None, None, None))
|
||||||
|
@ -73,11 +73,11 @@ def _real_extract(self, url):
|
||||||
return {
|
return {
|
||||||
'_type': 'url_transparent',
|
'_type': 'url_transparent',
|
||||||
'url': url,
|
'url': url,
|
||||||
'ie_key': 'Generic'
|
'ie_key': 'Generic',
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'_type': 'url_transparent',
|
'_type': 'url_transparent',
|
||||||
'url': f'https://players.brightcove.net/{account}/{player_id}_{embed}/index.html?videoId={video_id}',
|
'url': f'https://players.brightcove.net/{account}/{player_id}_{embed}/index.html?videoId={video_id}',
|
||||||
'ie_key': 'BrightcoveNew'
|
'ie_key': 'BrightcoveNew',
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
qualities,
|
qualities,
|
||||||
|
@ -95,11 +94,11 @@ def _real_extract(self, url):
|
||||||
duration = int_or_none(video.get('duration'))
|
duration = int_or_none(video.get('duration'))
|
||||||
view_count = int_or_none(video.get('view_count'))
|
view_count = int_or_none(video.get('view_count'))
|
||||||
timestamp = unified_timestamp(try_get(
|
timestamp = unified_timestamp(try_get(
|
||||||
video, lambda x: x['added_at']['date'], compat_str))
|
video, lambda x: x['added_at']['date'], str))
|
||||||
else:
|
else:
|
||||||
video_id = display_id
|
video_id = display_id
|
||||||
media_data = self._download_json(
|
media_data = self._download_json(
|
||||||
'http://www.allocine.fr/ws/AcVisiondataV5.ashx?media=%s' % video_id, display_id)
|
f'http://www.allocine.fr/ws/AcVisiondataV5.ashx?media={video_id}', display_id)
|
||||||
title = remove_end(strip_or_none(self._html_extract_title(webpage), ' - AlloCiné'))
|
title = remove_end(strip_or_none(self._html_extract_title(webpage), ' - AlloCiné'))
|
||||||
for key, value in media_data['video'].items():
|
for key, value in media_data['video'].items():
|
||||||
if not key.endswith('Path'):
|
if not key.endswith('Path'):
|
||||||
|
|
|
@ -12,7 +12,6 @@
|
||||||
)
|
)
|
||||||
from ..utils.traversal import traverse_obj
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
_FIELDS = '''
|
_FIELDS = '''
|
||||||
_id
|
_id
|
||||||
clipImageSource
|
clipImageSource
|
||||||
|
@ -34,27 +33,27 @@
|
||||||
video: getClip(clipIdentifier: $id) {
|
video: getClip(clipIdentifier: $id) {
|
||||||
%s %s
|
%s %s
|
||||||
}
|
}
|
||||||
}''' % (_FIELDS, _EXTRA_FIELDS),
|
}''' % (_FIELDS, _EXTRA_FIELDS), # noqa: UP031
|
||||||
'montage': '''query ($id: String!) {
|
'montage': '''query ($id: String!) {
|
||||||
video: getMontage(clipIdentifier: $id) {
|
video: getMontage(clipIdentifier: $id) {
|
||||||
%s
|
%s
|
||||||
}
|
}
|
||||||
}''' % _FIELDS,
|
}''' % _FIELDS, # noqa: UP031
|
||||||
'Clips': '''query ($page: Int!, $user: String!, $game: Int) {
|
'Clips': '''query ($page: Int!, $user: String!, $game: Int) {
|
||||||
videos: clips(search: createdDate, page: $page, user: $user, mobile: false, game: $game) {
|
videos: clips(search: createdDate, page: $page, user: $user, mobile: false, game: $game) {
|
||||||
data { %s %s }
|
data { %s %s }
|
||||||
}
|
}
|
||||||
}''' % (_FIELDS, _EXTRA_FIELDS),
|
}''' % (_FIELDS, _EXTRA_FIELDS), # noqa: UP031
|
||||||
'Montages': '''query ($page: Int!, $user: String!) {
|
'Montages': '''query ($page: Int!, $user: String!) {
|
||||||
videos: montages(search: createdDate, page: $page, user: $user) {
|
videos: montages(search: createdDate, page: $page, user: $user) {
|
||||||
data { %s }
|
data { %s }
|
||||||
}
|
}
|
||||||
}''' % _FIELDS,
|
}''' % _FIELDS, # noqa: UP031
|
||||||
'Mobile Clips': '''query ($page: Int!, $user: String!) {
|
'Mobile Clips': '''query ($page: Int!, $user: String!) {
|
||||||
videos: clips(search: createdDate, page: $page, user: $user, mobile: true) {
|
videos: clips(search: createdDate, page: $page, user: $user, mobile: true) {
|
||||||
data { %s %s }
|
data { %s %s }
|
||||||
}
|
}
|
||||||
}''' % (_FIELDS, _EXTRA_FIELDS),
|
}''' % (_FIELDS, _EXTRA_FIELDS), # noqa: UP031
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -122,7 +121,7 @@ class AllstarIE(AllstarBaseIE):
|
||||||
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
|
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
|
||||||
'upload_date': '20230425',
|
'upload_date': '20230425',
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
}
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://allstar.gg/clip?clip=8LJLY4JKB',
|
'url': 'https://allstar.gg/clip?clip=8LJLY4JKB',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -140,7 +139,7 @@ class AllstarIE(AllstarBaseIE):
|
||||||
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
|
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
|
||||||
'upload_date': '20230702',
|
'upload_date': '20230702',
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
}
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://allstar.gg/montage?montage=643e64089da7e9363e1fa66c',
|
'url': 'https://allstar.gg/montage?montage=643e64089da7e9363e1fa66c',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -156,7 +155,7 @@ class AllstarIE(AllstarBaseIE):
|
||||||
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
|
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
|
||||||
'upload_date': '20230418',
|
'upload_date': '20230418',
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
}
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://allstar.gg/montage?montage=RILJMH6QOS',
|
'url': 'https://allstar.gg/montage?montage=RILJMH6QOS',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -172,7 +171,7 @@ class AllstarIE(AllstarBaseIE):
|
||||||
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
|
'uploader_url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d',
|
||||||
'upload_date': '20230703',
|
'upload_date': '20230703',
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
}
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -192,28 +191,28 @@ class AllstarProfileIE(AllstarBaseIE):
|
||||||
'id': '62b8bdfc9021052f7905882d-clips',
|
'id': '62b8bdfc9021052f7905882d-clips',
|
||||||
'title': 'cherokee - Clips',
|
'title': 'cherokee - Clips',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 15
|
'playlist_mincount': 15,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://allstar.gg/u/cherokee?game=730&view=Clips',
|
'url': 'https://allstar.gg/u/cherokee?game=730&view=Clips',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '62b8bdfc9021052f7905882d-clips-730',
|
'id': '62b8bdfc9021052f7905882d-clips-730',
|
||||||
'title': 'cherokee - Clips - 730',
|
'title': 'cherokee - Clips - 730',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 15
|
'playlist_mincount': 15,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d?view=Montages',
|
'url': 'https://allstar.gg/u/62b8bdfc9021052f7905882d?view=Montages',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '62b8bdfc9021052f7905882d-montages',
|
'id': '62b8bdfc9021052f7905882d-montages',
|
||||||
'title': 'cherokee - Montages',
|
'title': 'cherokee - Montages',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 4
|
'playlist_mincount': 4,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://allstar.gg/profile?user=cherokee&view=Mobile Clips',
|
'url': 'https://allstar.gg/profile?user=cherokee&view=Mobile Clips',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '62b8bdfc9021052f7905882d-mobile',
|
'id': '62b8bdfc9021052f7905882d-mobile',
|
||||||
'title': 'cherokee - Mobile Clips',
|
'title': 'cherokee - Mobile Clips',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 1
|
'playlist_mincount': 1,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
_PAGE_SIZE = 10
|
_PAGE_SIZE = 10
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
parse_iso8601,
|
int_or_none,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
parse_filesize,
|
parse_filesize,
|
||||||
int_or_none,
|
parse_iso8601,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ class AlphaPornoIE(InfoExtractor):
|
||||||
'tbr': 1145,
|
'tbr': 1145,
|
||||||
'categories': list,
|
'categories': list,
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
class Alsace20TVBaseIE(InfoExtractor):
|
class Alsace20TVBaseIE(InfoExtractor):
|
||||||
def _extract_video(self, video_id, url=None):
|
def _extract_video(self, video_id, url=None):
|
||||||
info = self._download_json(
|
info = self._download_json(
|
||||||
'https://www.alsace20.tv/visionneuse/visio_v9_js.php?key=%s&habillage=0&mode=html' % (video_id, ),
|
f'https://www.alsace20.tv/visionneuse/visio_v9_js.php?key={video_id}&habillage=0&mode=html',
|
||||||
video_id) or {}
|
video_id) or {}
|
||||||
title = info.get('titre')
|
title = info.get('titre')
|
||||||
|
|
||||||
|
@ -24,9 +24,9 @@ def _extract_video(self, video_id, url=None):
|
||||||
else self._extract_mpd_formats(fmt_url, video_id, mpd_id=res, fatal=False))
|
else self._extract_mpd_formats(fmt_url, video_id, mpd_id=res, fatal=False))
|
||||||
|
|
||||||
webpage = (url and self._download_webpage(url, video_id, fatal=False)) or ''
|
webpage = (url and self._download_webpage(url, video_id, fatal=False)) or ''
|
||||||
thumbnail = url_or_none(dict_get(info, ('image', 'preview', )) or self._og_search_thumbnail(webpage))
|
thumbnail = url_or_none(dict_get(info, ('image', 'preview')) or self._og_search_thumbnail(webpage))
|
||||||
upload_date = self._search_regex(r'/(\d{6})_', thumbnail, 'upload_date', default=None)
|
upload_date = self._search_regex(r'/(\d{6})_', thumbnail, 'upload_date', default=None)
|
||||||
upload_date = unified_strdate('20%s-%s-%s' % (upload_date[:2], upload_date[2:4], upload_date[4:])) if upload_date else None
|
upload_date = unified_strdate(f'20{upload_date[:2]}-{upload_date[2:4]}-{upload_date[4:]}') if upload_date else None
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
|
|
|
@ -34,7 +34,7 @@ class AltCensoredIE(InfoExtractor):
|
||||||
'thumbnail': 'https://archive.org/download/youtube-k0srjLSkga8/youtube-k0srjLSkga8.thumbs/k0srjLSkga8_000925.jpg',
|
'thumbnail': 'https://archive.org/download/youtube-k0srjLSkga8/youtube-k0srjLSkga8.thumbs/k0srjLSkga8_000925.jpg',
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'categories': ['News & Politics'],
|
'categories': ['News & Politics'],
|
||||||
}
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue