mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-19 06:39:16 +00:00
Merge branch 'yt-dlp:master' into pr/yt-live-from-start-range
This commit is contained in:
commit
50c943e8a0
10
.github/banner.svg
vendored
10
.github/banner.svg
vendored
File diff suppressed because one or more lines are too long
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 15 KiB |
25
.github/workflows/build.yml
vendored
25
.github/workflows/build.yml
vendored
|
@ -107,6 +107,8 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0 # Needed for changelog
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
|
@ -133,6 +135,7 @@ jobs:
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
python devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
||||||
|
python devscripts/update_changelog.py -vv
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
- name: Build Unix platform-independent binary
|
- name: Build Unix platform-independent binary
|
||||||
run: |
|
run: |
|
||||||
|
@ -244,9 +247,25 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
brew install coreutils
|
brew install coreutils
|
||||||
python3 devscripts/install_deps.py --user -o --include build
|
python3 devscripts/install_deps.py --user -o --include build
|
||||||
python3 devscripts/install_deps.py --print --include pyinstaller > requirements.txt
|
python3 devscripts/install_deps.py --print --include pyinstaller_macos > requirements.txt
|
||||||
# We need to ignore wheels otherwise we break universal2 builds
|
# We need to ignore wheels otherwise we break universal2 builds
|
||||||
python3 -m pip install -U --user --no-binary :all: -r requirements.txt
|
python3 -m pip install -U --user --no-binary :all: -r requirements.txt
|
||||||
|
# We need to fuse our own universal2 wheels for curl_cffi
|
||||||
|
python3 -m pip install -U --user delocate
|
||||||
|
mkdir curl_cffi_whls curl_cffi_universal2
|
||||||
|
python3 devscripts/install_deps.py --print -o --include curl_cffi > requirements.txt
|
||||||
|
for platform in "macosx_11_0_arm64" "macosx_11_0_x86_64"; do
|
||||||
|
python3 -m pip download \
|
||||||
|
--only-binary=:all: \
|
||||||
|
--platform "${platform}" \
|
||||||
|
--pre -d curl_cffi_whls \
|
||||||
|
-r requirements.txt
|
||||||
|
done
|
||||||
|
python3 -m delocate.cmd.delocate_fuse curl_cffi_whls/curl_cffi*.whl -w curl_cffi_universal2
|
||||||
|
python3 -m delocate.cmd.delocate_fuse curl_cffi_whls/cffi*.whl -w curl_cffi_universal2
|
||||||
|
cd curl_cffi_universal2
|
||||||
|
for wheel in *cffi*.whl; do mv -n -- "${wheel}" "${wheel/x86_64/universal2}"; done
|
||||||
|
python3 -m pip install -U --user *cffi*.whl
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
|
@ -300,7 +319,7 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
brew install coreutils
|
brew install coreutils
|
||||||
python3 devscripts/install_deps.py --user -o --include build
|
python3 devscripts/install_deps.py --user -o --include build
|
||||||
python3 devscripts/install_deps.py --user --include pyinstaller
|
python3 devscripts/install_deps.py --user --include pyinstaller_macos --include curl_cffi
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
|
@ -342,7 +361,7 @@ jobs:
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: | # Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds
|
run: | # Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds
|
||||||
python devscripts/install_deps.py -o --include build
|
python devscripts/install_deps.py -o --include build
|
||||||
python devscripts/install_deps.py --include py2exe
|
python devscripts/install_deps.py --include py2exe --include curl_cffi
|
||||||
python -m pip install -U "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-5.8.0-py3-none-any.whl"
|
python -m pip install -U "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-5.8.0-py3-none-any.whl"
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
|
|
2
.github/workflows/core.yml
vendored
2
.github/workflows/core.yml
vendored
|
@ -53,7 +53,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
- name: Install test requirements
|
- name: Install test requirements
|
||||||
run: python3 ./devscripts/install_deps.py --include dev
|
run: python3 ./devscripts/install_deps.py --include dev --include curl_cffi
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
continue-on-error: False
|
continue-on-error: False
|
||||||
run: |
|
run: |
|
||||||
|
|
22
.github/workflows/release.yml
vendored
22
.github/workflows/release.yml
vendored
|
@ -189,13 +189,8 @@ jobs:
|
||||||
if: |
|
if: |
|
||||||
!inputs.prerelease && env.target_repo == github.repository
|
!inputs.prerelease && env.target_repo == github.repository
|
||||||
run: |
|
run: |
|
||||||
|
python devscripts/update_changelog.py -vv
|
||||||
make doc
|
make doc
|
||||||
sed '/### /Q' Changelog.md >> ./CHANGELOG
|
|
||||||
echo '### ${{ env.version }}' >> ./CHANGELOG
|
|
||||||
python ./devscripts/make_changelog.py -vv -c >> ./CHANGELOG
|
|
||||||
echo >> ./CHANGELOG
|
|
||||||
grep -Poz '(?s)### \d+\.\d+\.\d+.+' 'Changelog.md' | head -n -1 >> ./CHANGELOG
|
|
||||||
cat ./CHANGELOG > Changelog.md
|
|
||||||
|
|
||||||
- name: Push to release
|
- name: Push to release
|
||||||
id: push_release
|
id: push_release
|
||||||
|
@ -266,6 +261,7 @@ jobs:
|
||||||
pypi_project: ${{ needs.prepare.outputs.pypi_project }}
|
pypi_project: ${{ needs.prepare.outputs.pypi_project }}
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py -c "${{ env.channel }}" -r "${{ env.target_repo }}" -s "${{ env.suffix }}" "${{ env.version }}"
|
python devscripts/update-version.py -c "${{ env.channel }}" -r "${{ env.target_repo }}" -s "${{ env.suffix }}" "${{ env.version }}"
|
||||||
|
python devscripts/update_changelog.py -vv
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
sed -i -E '0,/(name = ")[^"]+(")/s//\1${{ env.pypi_project }}\2/' pyproject.toml
|
sed -i -E '0,/(name = ")[^"]+(")/s//\1${{ env.pypi_project }}\2/' pyproject.toml
|
||||||
|
|
||||||
|
@ -312,19 +308,19 @@ jobs:
|
||||||
target_tag: ${{ needs.prepare.outputs.target_tag }}
|
target_tag: ${{ needs.prepare.outputs.target_tag }}
|
||||||
run: |
|
run: |
|
||||||
printf '%s' \
|
printf '%s' \
|
||||||
'[![Installation](https://img.shields.io/badge/-Which%20file%20should%20I%20download%3F-white.svg?style=for-the-badge)]' \
|
'[![Installation](https://img.shields.io/badge/-Which%20file%20to%20download%3F-white.svg?style=for-the-badge)]' \
|
||||||
'(https://github.com/${{ github.repository }}#installation "Installation instructions") ' \
|
'(https://github.com/${{ github.repository }}#installation "Installation instructions") ' \
|
||||||
|
'[![Discord](https://img.shields.io/discord/807245652072857610?color=blue&labelColor=555555&label=&logo=discord&style=for-the-badge)]' \
|
||||||
|
'(https://discord.gg/H5MNcFW63r "Discord") ' \
|
||||||
|
'[![Donate](https://img.shields.io/badge/_-Donate-red.svg?logo=githubsponsors&labelColor=555555&style=for-the-badge)]' \
|
||||||
|
'(https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators "Donate") ' \
|
||||||
'[![Documentation](https://img.shields.io/badge/-Docs-brightgreen.svg?style=for-the-badge&logo=GitBook&labelColor=555555)]' \
|
'[![Documentation](https://img.shields.io/badge/-Docs-brightgreen.svg?style=for-the-badge&logo=GitBook&labelColor=555555)]' \
|
||||||
'(https://github.com/${{ github.repository }}' \
|
'(https://github.com/${{ github.repository }}' \
|
||||||
'${{ env.target_repo == github.repository && format('/tree/{0}', env.target_tag) || '' }}#readme "Documentation") ' \
|
'${{ env.target_repo == github.repository && format('/tree/{0}', env.target_tag) || '' }}#readme "Documentation") ' \
|
||||||
'[![Donate](https://img.shields.io/badge/_-Donate-red.svg?logo=githubsponsors&labelColor=555555&style=for-the-badge)]' \
|
|
||||||
'(https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators "Donate") ' \
|
|
||||||
'[![Discord](https://img.shields.io/discord/807245652072857610?color=blue&labelColor=555555&label=&logo=discord&style=for-the-badge)]' \
|
|
||||||
'(https://discord.gg/H5MNcFW63r "Discord") ' \
|
|
||||||
${{ env.target_repo == 'yt-dlp/yt-dlp' && '\
|
${{ env.target_repo == 'yt-dlp/yt-dlp' && '\
|
||||||
"[![Nightly](https://img.shields.io/badge/Get%20nightly%20builds-purple.svg?style=for-the-badge)]" \
|
"[![Nightly](https://img.shields.io/badge/Nightly%20builds-purple.svg?style=for-the-badge)]" \
|
||||||
"(https://github.com/yt-dlp/yt-dlp-nightly-builds/releases/latest \"Nightly builds\") " \
|
"(https://github.com/yt-dlp/yt-dlp-nightly-builds/releases/latest \"Nightly builds\") " \
|
||||||
"[![Master](https://img.shields.io/badge/Get%20master%20builds-lightblue.svg?style=for-the-badge)]" \
|
"[![Master](https://img.shields.io/badge/Master%20builds-lightblue.svg?style=for-the-badge)]" \
|
||||||
"(https://github.com/yt-dlp/yt-dlp-master-builds/releases/latest \"Master builds\")"' || '' }} > ./RELEASE_NOTES
|
"(https://github.com/yt-dlp/yt-dlp-master-builds/releases/latest \"Master builds\")"' || '' }} > ./RELEASE_NOTES
|
||||||
printf '\n\n' >> ./RELEASE_NOTES
|
printf '\n\n' >> ./RELEASE_NOTES
|
||||||
cat >> ./RELEASE_NOTES << EOF
|
cat >> ./RELEASE_NOTES << EOF
|
||||||
|
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -33,6 +33,7 @@ cookies
|
||||||
*.gif
|
*.gif
|
||||||
*.jpeg
|
*.jpeg
|
||||||
*.jpg
|
*.jpg
|
||||||
|
*.lrc
|
||||||
*.m4a
|
*.m4a
|
||||||
*.m4v
|
*.m4v
|
||||||
*.mhtml
|
*.mhtml
|
||||||
|
@ -40,6 +41,7 @@ cookies
|
||||||
*.mov
|
*.mov
|
||||||
*.mp3
|
*.mp3
|
||||||
*.mp4
|
*.mp4
|
||||||
|
*.mpg
|
||||||
*.mpga
|
*.mpga
|
||||||
*.oga
|
*.oga
|
||||||
*.ogg
|
*.ogg
|
||||||
|
@ -47,6 +49,7 @@ cookies
|
||||||
*.png
|
*.png
|
||||||
*.sbv
|
*.sbv
|
||||||
*.srt
|
*.srt
|
||||||
|
*.ssa
|
||||||
*.swf
|
*.swf
|
||||||
*.swp
|
*.swp
|
||||||
*.tt
|
*.tt
|
||||||
|
|
|
@ -79,7 +79,7 @@ ### Are you using the latest version?
|
||||||
|
|
||||||
### Is the issue already documented?
|
### Is the issue already documented?
|
||||||
|
|
||||||
Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/yt-dlp/yt-dlp/search?type=Issues) of this repository. If there is an issue, subcribe to it to be notified when there is any progress. Unless you have something useful to add to the converation, please refrain from commenting.
|
Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/yt-dlp/yt-dlp/search?type=Issues) of this repository. If there is an issue, subscribe to it to be notified when there is any progress. Unless you have something useful to add to the conversation, please refrain from commenting.
|
||||||
|
|
||||||
Additionally, it is also helpful to see if the issue has already been documented in the [youtube-dl issue tracker](https://github.com/ytdl-org/youtube-dl/issues). If similar issues have already been reported in youtube-dl (but not in our issue tracker), links to them can be included in your issue report here.
|
Additionally, it is also helpful to see if the issue has already been documented in the [youtube-dl issue tracker](https://github.com/ytdl-org/youtube-dl/issues). If similar issues have already been reported in youtube-dl (but not in our issue tracker), links to them can be included in your issue report here.
|
||||||
|
|
||||||
|
@ -138,11 +138,11 @@ # DEVELOPER INSTRUCTIONS
|
||||||
|
|
||||||
To run yt-dlp as a developer, you don't need to build anything either. Simply execute
|
To run yt-dlp as a developer, you don't need to build anything either. Simply execute
|
||||||
|
|
||||||
python -m yt_dlp
|
python3 -m yt_dlp
|
||||||
|
|
||||||
To run all the available core tests, use:
|
To run all the available core tests, use:
|
||||||
|
|
||||||
python devscripts/run_tests.py
|
python3 devscripts/run_tests.py
|
||||||
|
|
||||||
See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
|
See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
|
||||||
|
|
||||||
|
@ -151,7 +151,7 @@ # DEVELOPER INSTRUCTIONS
|
||||||
|
|
||||||
## Adding new feature or making overarching changes
|
## Adding new feature or making overarching changes
|
||||||
|
|
||||||
Before you start writing code for implementing a new feature, open an issue explaining your feature request and atleast one use case. This allows the maintainers to decide whether such a feature is desired for the project in the first place, and will provide an avenue to discuss some implementation details. If you open a pull request for a new feature without discussing with us first, do not be surprised when we ask for large changes to the code, or even reject it outright.
|
Before you start writing code for implementing a new feature, open an issue explaining your feature request and at least one use case. This allows the maintainers to decide whether such a feature is desired for the project in the first place, and will provide an avenue to discuss some implementation details. If you open a pull request for a new feature without discussing with us first, do not be surprised when we ask for large changes to the code, or even reject it outright.
|
||||||
|
|
||||||
The same applies for changes to the documentation, code style, or overarching changes to the architecture
|
The same applies for changes to the documentation, code style, or overarching changes to the architecture
|
||||||
|
|
||||||
|
@ -218,7 +218,7 @@ ## Adding support for a new site
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
1. Add an import in [`yt_dlp/extractor/_extractors.py`](yt_dlp/extractor/_extractors.py). Note that the class name must end with `IE`.
|
1. Add an import in [`yt_dlp/extractor/_extractors.py`](yt_dlp/extractor/_extractors.py). Note that the class name must end with `IE`.
|
||||||
1. Run `python devscripts/run_tests.py YourExtractor`. This *may fail* at first, but you can continually re-run it until you're done. Upon failure, it will output the missing fields and/or correct values which you can copy. If you decide to add more than one test, the tests will then be named `YourExtractor`, `YourExtractor_1`, `YourExtractor_2`, etc. Note that tests with an `only_matching` key in the test's dict are not included in the count. You can also run all the tests in one go with `YourExtractor_all`
|
1. Run `python3 devscripts/run_tests.py YourExtractor`. This *may fail* at first, but you can continually re-run it until you're done. Upon failure, it will output the missing fields and/or correct values which you can copy. If you decide to add more than one test, the tests will then be named `YourExtractor`, `YourExtractor_1`, `YourExtractor_2`, etc. Note that tests with an `only_matching` key in the test's dict are not included in the count. You can also run all the tests in one go with `YourExtractor_all`
|
||||||
1. Make sure you have at least one test for your extractor. Even if all videos covered by the extractor are expected to be inaccessible for automated testing, tests should still be added with a `skip` parameter indicating why the particular test is disabled from running.
|
1. Make sure you have at least one test for your extractor. Even if all videos covered by the extractor are expected to be inaccessible for automated testing, tests should still be added with a `skip` parameter indicating why the particular test is disabled from running.
|
||||||
1. Have a look at [`yt_dlp/extractor/common.py`](yt_dlp/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](yt_dlp/extractor/common.py#L119-L440). Add tests and code for as many as you want.
|
1. Have a look at [`yt_dlp/extractor/common.py`](yt_dlp/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](yt_dlp/extractor/common.py#L119-L440). Add tests and code for as many as you want.
|
||||||
1. Make sure your code follows [yt-dlp coding conventions](#yt-dlp-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
|
1. Make sure your code follows [yt-dlp coding conventions](#yt-dlp-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
|
||||||
|
@ -237,7 +237,7 @@ ## Adding support for a new site
|
||||||
|
|
||||||
In any case, thank you very much for your contributions!
|
In any case, thank you very much for your contributions!
|
||||||
|
|
||||||
**Tip:** To test extractors that require login information, create a file `test/local_parameters.json` and add `"usenetrc": true` or your username and password in it:
|
**Tip:** To test extractors that require login information, create a file `test/local_parameters.json` and add `"usenetrc": true` or your `username`&`password` or `cookiefile`/`cookiesfrombrowser` in it:
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"username": "your user name",
|
"username": "your user name",
|
||||||
|
@ -264,7 +264,7 @@ ### Mandatory and optional metafields
|
||||||
|
|
||||||
For pornographic sites, appropriate `age_limit` must also be returned.
|
For pornographic sites, appropriate `age_limit` must also be returned.
|
||||||
|
|
||||||
The extractor is allowed to return the info dict without url or formats in some special cases if it allows the user to extract usefull information with `--ignore-no-formats-error` - e.g. when the video is a live stream that has not started yet.
|
The extractor is allowed to return the info dict without url or formats in some special cases if it allows the user to extract useful information with `--ignore-no-formats-error` - e.g. when the video is a live stream that has not started yet.
|
||||||
|
|
||||||
[Any field](yt_dlp/extractor/common.py#219-L426) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerant** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
[Any field](yt_dlp/extractor/common.py#219-L426) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerant** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
||||||
|
|
||||||
|
|
58
CONTRIBUTORS
58
CONTRIBUTORS
|
@ -542,3 +542,61 @@ prettykool
|
||||||
S-Aarab
|
S-Aarab
|
||||||
sonmezberkay
|
sonmezberkay
|
||||||
TSRBerry
|
TSRBerry
|
||||||
|
114514ns
|
||||||
|
agibson-fl
|
||||||
|
alard
|
||||||
|
alien-developers
|
||||||
|
antonkesy
|
||||||
|
ArnauvGilotra
|
||||||
|
Arthurszzz
|
||||||
|
Bibhav48
|
||||||
|
Bl4Cc4t
|
||||||
|
boredzo
|
||||||
|
Caesim404
|
||||||
|
chkuendig
|
||||||
|
chtk
|
||||||
|
Danish-H
|
||||||
|
dasidiot
|
||||||
|
diman8
|
||||||
|
divStar
|
||||||
|
DmitryScaletta
|
||||||
|
feederbox826
|
||||||
|
gmes78
|
||||||
|
gonzalezjo
|
||||||
|
hui1601
|
||||||
|
infanf
|
||||||
|
jazz1611
|
||||||
|
jingtra
|
||||||
|
jkmartindale
|
||||||
|
johnvictorfs
|
||||||
|
llistochek
|
||||||
|
marcdumais
|
||||||
|
martinxyz
|
||||||
|
michal-repo
|
||||||
|
mrmedieval
|
||||||
|
nbr23
|
||||||
|
Nicals
|
||||||
|
Noor-5
|
||||||
|
NurTasin
|
||||||
|
pompos02
|
||||||
|
Pranaxcau
|
||||||
|
pwaldhauer
|
||||||
|
RaduManole
|
||||||
|
RalphORama
|
||||||
|
rrgomes
|
||||||
|
ruiminggu
|
||||||
|
rvsit
|
||||||
|
sefidel
|
||||||
|
shmohawk
|
||||||
|
Snack-X
|
||||||
|
src-tinkerer
|
||||||
|
stilor
|
||||||
|
syntaxsurge
|
||||||
|
t-nil
|
||||||
|
ufukk
|
||||||
|
vista-narvas
|
||||||
|
x11x
|
||||||
|
xpadev-net
|
||||||
|
Xpl0itU
|
||||||
|
YoshichikaAAA
|
||||||
|
zhijinwuu
|
||||||
|
|
230
Changelog.md
230
Changelog.md
|
@ -4,6 +4,228 @@ # Changelog
|
||||||
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
|
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
### 2024.03.10
|
||||||
|
|
||||||
|
#### Core changes
|
||||||
|
- [Add `--compat-options 2023`](https://github.com/yt-dlp/yt-dlp/commit/3725b4f0c93ca3943e6300013a9670e4ab757fda) ([#9084](https://github.com/yt-dlp/yt-dlp/issues/9084)) by [Grub4K](https://github.com/Grub4K) (With fixes in [ffff1bc](https://github.com/yt-dlp/yt-dlp/commit/ffff1bc6598fc7a9258e51bc153cab812467f9f9) by [pukkandan](https://github.com/pukkandan))
|
||||||
|
- [Create `ydl._request_director` when needed](https://github.com/yt-dlp/yt-dlp/commit/069b2aedae2279668b6051627a81fc4fbd9c146a) by [pukkandan](https://github.com/pukkandan) (With fixes in [dbd8b1b](https://github.com/yt-dlp/yt-dlp/commit/dbd8b1bff9afd8f05f982bcd52c20bc173c266ca) by [Grub4k](https://github.com/Grub4k))
|
||||||
|
- [Don't select storyboard formats as fallback](https://github.com/yt-dlp/yt-dlp/commit/d63eae7e7ffb1f3e733e552b9e5e82355bfba214) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Handle `--load-info-json` format selection errors](https://github.com/yt-dlp/yt-dlp/commit/263a4b55ac17a796e8991ca8d2d86a3c349f8a60) ([#9392](https://github.com/yt-dlp/yt-dlp/issues/9392)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Warn user when not launching through shell on Windows](https://github.com/yt-dlp/yt-dlp/commit/6a6cdcd1824a14e3b336332c8f31f65497b8c4b8) ([#9250](https://github.com/yt-dlp/yt-dlp/issues/9250)) by [Grub4K](https://github.com/Grub4K), [seproDev](https://github.com/seproDev)
|
||||||
|
- **cookies**
|
||||||
|
- [Fix `--cookies-from-browser` for `snap` Firefox](https://github.com/yt-dlp/yt-dlp/commit/cbed249aaa053a3f425b9bafc97f8dbd71c44487) ([#9016](https://github.com/yt-dlp/yt-dlp/issues/9016)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Fix `--cookies-from-browser` with macOS Firefox profiles](https://github.com/yt-dlp/yt-dlp/commit/85b33f5c163f60dbd089a6b9bc2ba1366d3ddf93) ([#8909](https://github.com/yt-dlp/yt-dlp/issues/8909)) by [RalphORama](https://github.com/RalphORama)
|
||||||
|
- [Improve error message for Windows `--cookies-from-browser chrome` issue](https://github.com/yt-dlp/yt-dlp/commit/2792092afd367e39251ace1fb2819c855ab8919f) ([#9080](https://github.com/yt-dlp/yt-dlp/issues/9080)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- **plugins**: [Handle `PermissionError`](https://github.com/yt-dlp/yt-dlp/commit/9a8afadd172b7cab143f0049959fa64973589d94) ([#9229](https://github.com/yt-dlp/yt-dlp/issues/9229)) by [pukkandan](https://github.com/pukkandan), [syntaxsurge](https://github.com/syntaxsurge)
|
||||||
|
- **utils**
|
||||||
|
- [Improve `repr` of `DateRange`, `match_filter_func`](https://github.com/yt-dlp/yt-dlp/commit/45491a2a30da4d1723cfa9288cb664813bb09afb) by [pukkandan](https://github.com/pukkandan)
|
||||||
|
- `traverse_obj`: [Support `xml.etree.ElementTree.Element`](https://github.com/yt-dlp/yt-dlp/commit/ffbd4f2a02fee387ea5e0a267ce32df5259111ac) ([#8911](https://github.com/yt-dlp/yt-dlp/issues/8911)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- **webvtt**: [Don't parse single fragment files](https://github.com/yt-dlp/yt-dlp/commit/f24e44e8cbd88ce338d52f594a19330f64d38b50) ([#9034](https://github.com/yt-dlp/yt-dlp/issues/9034)) by [seproDev](https://github.com/seproDev)
|
||||||
|
|
||||||
|
#### Extractor changes
|
||||||
|
- [Migrate commonly plural fields to lists](https://github.com/yt-dlp/yt-dlp/commit/104a7b5a46dc1805157fb4cc11c05876934d37c1) ([#8917](https://github.com/yt-dlp/yt-dlp/issues/8917)) by [llistochek](https://github.com/llistochek), [pukkandan](https://github.com/pukkandan) (With fixes in [b136e2a](https://github.com/yt-dlp/yt-dlp/commit/b136e2af341f7a88028aea4c5cd50efe2fa9b182) by [bashonly](https://github.com/bashonly))
|
||||||
|
- [Support multi-period MPD streams](https://github.com/yt-dlp/yt-dlp/commit/4ce57d3b873c2887814cbec03d029533e82f7db5) ([#6654](https://github.com/yt-dlp/yt-dlp/issues/6654)) by [alard](https://github.com/alard), [pukkandan](https://github.com/pukkandan)
|
||||||
|
- **abematv**
|
||||||
|
- [Fix extraction with cache](https://github.com/yt-dlp/yt-dlp/commit/c51316f8a69fbd0080f2720777d42ab438e254a3) ([#8895](https://github.com/yt-dlp/yt-dlp/issues/8895)) by [sefidel](https://github.com/sefidel)
|
||||||
|
- [Support login for playlists](https://github.com/yt-dlp/yt-dlp/commit/8226a3818f804478c756cf460baa9bf3a3b062a5) ([#8901](https://github.com/yt-dlp/yt-dlp/issues/8901)) by [sefidel](https://github.com/sefidel)
|
||||||
|
- **adn**
|
||||||
|
- [Add support for German site](https://github.com/yt-dlp/yt-dlp/commit/5eb1458be4767385a9bf1d570ff08e46100cbaa2) ([#8708](https://github.com/yt-dlp/yt-dlp/issues/8708)) by [infanf](https://github.com/infanf)
|
||||||
|
- [Improve auth error handling](https://github.com/yt-dlp/yt-dlp/commit/9526b1f179d19f75284eceaa5e0ee381af18cf19) ([#9068](https://github.com/yt-dlp/yt-dlp/issues/9068)) by [infanf](https://github.com/infanf)
|
||||||
|
- **aenetworks**: [Rating should be optional for AP extraction](https://github.com/yt-dlp/yt-dlp/commit/014cb5774d7afe624b6eb4e07f7be924b9e5e186) ([#9005](https://github.com/yt-dlp/yt-dlp/issues/9005)) by [agibson-fl](https://github.com/agibson-fl)
|
||||||
|
- **altcensored**: channel: [Fix playlist extraction](https://github.com/yt-dlp/yt-dlp/commit/e28e135d6fd6a430fed3e20dfe1a8c8bbc5f9185) ([#9297](https://github.com/yt-dlp/yt-dlp/issues/9297)) by [marcdumais](https://github.com/marcdumais)
|
||||||
|
- **amadeustv**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/e641aab7a61df7406df60ebfe0c77bd5186b2b41) ([#8744](https://github.com/yt-dlp/yt-dlp/issues/8744)) by [ArnauvGilotra](https://github.com/ArnauvGilotra)
|
||||||
|
- **ant1newsgrembed**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/1ed5ee2f045f717e814f84ba461dadc58e712266) ([#9191](https://github.com/yt-dlp/yt-dlp/issues/9191)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **archiveorg**: [Fix format URL encoding](https://github.com/yt-dlp/yt-dlp/commit/3894ab9574748188bbacbd925a3971eda6fa2bb0) ([#9279](https://github.com/yt-dlp/yt-dlp/issues/9279)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **ard**
|
||||||
|
- mediathek
|
||||||
|
- [Revert to using old id](https://github.com/yt-dlp/yt-dlp/commit/b6951271ac014761c9c317b9cecd5e8e139cfa7c) ([#8916](https://github.com/yt-dlp/yt-dlp/issues/8916)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Support cookies to verify age](https://github.com/yt-dlp/yt-dlp/commit/c099ec9392b0283dde34b290d1a04158ad8eb882) ([#9037](https://github.com/yt-dlp/yt-dlp/issues/9037)) by [StefanLobbenmeier](https://github.com/StefanLobbenmeier)
|
||||||
|
- **art19**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/999ea80beb053491089d256104c4188aced3110f) ([#9099](https://github.com/yt-dlp/yt-dlp/issues/9099)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **artetv**: [Separate closed captions](https://github.com/yt-dlp/yt-dlp/commit/393b487a4ea391c44e811505ec98531031d7e81e) ([#8231](https://github.com/yt-dlp/yt-dlp/issues/8231)) by [Nicals](https://github.com/Nicals), [seproDev](https://github.com/seproDev)
|
||||||
|
- **asobichannel**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/12f042740550c06552819374e2251deb7a519bab) ([#8700](https://github.com/yt-dlp/yt-dlp/issues/8700)) by [Snack-X](https://github.com/Snack-X)
|
||||||
|
- **bigo**: [Fix JSON extraction](https://github.com/yt-dlp/yt-dlp/commit/85a2d07c1f82c2082b568963d1c32ad3fc848f61) ([#8893](https://github.com/yt-dlp/yt-dlp/issues/8893)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **bilibili**
|
||||||
|
- [Add referer header and fix metadata extraction](https://github.com/yt-dlp/yt-dlp/commit/1713c882730a928ac344c099874d2093fc2c8b51) ([#8832](https://github.com/yt-dlp/yt-dlp/issues/8832)) by [SirElderling](https://github.com/SirElderling) (With fixes in [f1570ab](https://github.com/yt-dlp/yt-dlp/commit/f1570ab84d5f49564256c620063d2d3e9ed4acf0) by [TobiX](https://github.com/TobiX))
|
||||||
|
- [Support `--no-playlist`](https://github.com/yt-dlp/yt-dlp/commit/e439693f729daf6fb15457baea1bca10ef5da34d) ([#9139](https://github.com/yt-dlp/yt-dlp/issues/9139)) by [c-basalt](https://github.com/c-basalt)
|
||||||
|
- **bilibilisearch**: [Set cookie to fix extraction](https://github.com/yt-dlp/yt-dlp/commit/ffa017cfc5973b265c92248546fcf5020dc43eaf) ([#9119](https://github.com/yt-dlp/yt-dlp/issues/9119)) by [c-basalt](https://github.com/c-basalt)
|
||||||
|
- **biliintl**: [Fix and improve subtitles extraction](https://github.com/yt-dlp/yt-dlp/commit/cf6413e840476c15e5b166dc2f7cc2a90a4a9aad) ([#7077](https://github.com/yt-dlp/yt-dlp/issues/7077)) by [dirkf](https://github.com/dirkf), [HobbyistDev](https://github.com/HobbyistDev), [itachi-19](https://github.com/itachi-19), [seproDev](https://github.com/seproDev)
|
||||||
|
- **boosty**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/540b68298192874c75ad5ee4589bed64d02a7d55) ([#9144](https://github.com/yt-dlp/yt-dlp/issues/9144)) by [un-def](https://github.com/un-def)
|
||||||
|
- **ccma**: [Extract 1080p DASH formats](https://github.com/yt-dlp/yt-dlp/commit/4253e3b7f483127bd812bdac02466f4a5b47ff34) ([#9130](https://github.com/yt-dlp/yt-dlp/issues/9130)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **cctv**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/6ad11fef65474bcf70f3a8556850d93c141e44a2) ([#9325](https://github.com/yt-dlp/yt-dlp/issues/9325)) by [src-tinkerer](https://github.com/src-tinkerer)
|
||||||
|
- **chzzk**
|
||||||
|
- [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/ba6b0c8261e9f0a6373885736ff90a89dd1fb614) ([#8887](https://github.com/yt-dlp/yt-dlp/issues/8887)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- live: [Support `--wait-for-video`](https://github.com/yt-dlp/yt-dlp/commit/804f2366117b7065552a1c3cddb9ec19b688a5c1) ([#9309](https://github.com/yt-dlp/yt-dlp/issues/9309)) by [hui1601](https://github.com/hui1601)
|
||||||
|
- **cineverse**: [Detect when login required](https://github.com/yt-dlp/yt-dlp/commit/fc2cc626f07328a6c71b5e21853e4cfa7b1e6256) ([#9081](https://github.com/yt-dlp/yt-dlp/issues/9081)) by [garret1317](https://github.com/garret1317)
|
||||||
|
- **cloudflarestream**
|
||||||
|
- [Extract subtitles](https://github.com/yt-dlp/yt-dlp/commit/4d9dc0abe24ad5d9d22a16f40fc61137dcd103f7) ([#9007](https://github.com/yt-dlp/yt-dlp/issues/9007)) by [Bibhav48](https://github.com/Bibhav48)
|
||||||
|
- [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/f3d5face83f948c24bcb91e06d4fa6e8622d7d79) ([#9280](https://github.com/yt-dlp/yt-dlp/issues/9280)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Improve embed detection](https://github.com/yt-dlp/yt-dlp/commit/464c919ea82aefdf35f138a1ab2dd0bb8fb7fd0e) ([#9287](https://github.com/yt-dlp/yt-dlp/issues/9287)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **cloudycdn, lsm**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/5dda3b291f59f388f953337e9fb09a94b64aaf34) ([#8643](https://github.com/yt-dlp/yt-dlp/issues/8643)) by [Caesim404](https://github.com/Caesim404)
|
||||||
|
- **cnbc**: [Overhaul extractors](https://github.com/yt-dlp/yt-dlp/commit/998dffb5a2343ec709b3d6bbf2bf019649080239) ([#8741](https://github.com/yt-dlp/yt-dlp/issues/8741)) by [gonzalezjo](https://github.com/gonzalezjo), [Noor-5](https://github.com/Noor-5), [ruiminggu](https://github.com/ruiminggu), [seproDev](https://github.com/seproDev), [zhijinwuu](https://github.com/zhijinwuu)
|
||||||
|
- **craftsy**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/96f3924bac174f2fd401f86f78e77d7e0c5ee008) ([#9384](https://github.com/yt-dlp/yt-dlp/issues/9384)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **crooksandliars**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/03536126d32bd861e38536371f0cd5f1b71dcb7a) ([#9192](https://github.com/yt-dlp/yt-dlp/issues/9192)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **crtvg**: [Fix `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/785ab1af7f131e73444634ad57b39478651a43d3) ([#9404](https://github.com/yt-dlp/yt-dlp/issues/9404)) by [Xpl0itU](https://github.com/Xpl0itU)
|
||||||
|
- **dailymotion**: [Support search](https://github.com/yt-dlp/yt-dlp/commit/11ffa92a61e5847b3dfa8975f91ecb3ac2178841) ([#8292](https://github.com/yt-dlp/yt-dlp/issues/8292)) by [drzraf](https://github.com/drzraf), [seproDev](https://github.com/seproDev)
|
||||||
|
- **douyin**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/9ff946645568e71046487571eefa9cb524a5189b) ([#9239](https://github.com/yt-dlp/yt-dlp/issues/9239)) by [114514ns](https://github.com/114514ns), [bashonly](https://github.com/bashonly) (With fixes in [e546e5d](https://github.com/yt-dlp/yt-dlp/commit/e546e5d3b33a50075e574a2e7b8eda7ea874d21e) by [bashonly](https://github.com/bashonly))
|
||||||
|
- **duboku**: [Fix m3u8 formats extraction](https://github.com/yt-dlp/yt-dlp/commit/d3d4187da90a6b85f4ebae4bb07693cc9b412d75) ([#9161](https://github.com/yt-dlp/yt-dlp/issues/9161)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **dumpert**: [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/eedb38ce4093500e19279d50b708fb9c18bf4dbf) ([#9320](https://github.com/yt-dlp/yt-dlp/issues/9320)) by [rvsit](https://github.com/rvsit)
|
||||||
|
- **elementorembed**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/6171b050d70435008e64fa06aa6f19c4e5bec75f) ([#8948](https://github.com/yt-dlp/yt-dlp/issues/8948)) by [pompos02](https://github.com/pompos02), [seproDev](https://github.com/seproDev)
|
||||||
|
- **eporner**: [Extract AV1 formats](https://github.com/yt-dlp/yt-dlp/commit/96d0f8c1cb8aec250c5614bfde6b5fb95f10819b) ([#9028](https://github.com/yt-dlp/yt-dlp/issues/9028)) by [michal-repo](https://github.com/michal-repo)
|
||||||
|
- **errjupiter**
|
||||||
|
- [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/a514cc2feb1c3b265b19acab11487acad8bb3ab0) ([#8549](https://github.com/yt-dlp/yt-dlp/issues/8549)) by [glensc](https://github.com/glensc)
|
||||||
|
- [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/80ed8bdeba5a945f127ef9ab055a4823329a1210) ([#9218](https://github.com/yt-dlp/yt-dlp/issues/9218)) by [glensc](https://github.com/glensc)
|
||||||
|
- **facebook**
|
||||||
|
- [Add new ID format](https://github.com/yt-dlp/yt-dlp/commit/cf9af2c7f1fedd881a157b3fbe725e5494b00924) ([#3824](https://github.com/yt-dlp/yt-dlp/issues/3824)) by [kclauhk](https://github.com/kclauhk), [Wikidepia](https://github.com/Wikidepia)
|
||||||
|
- [Improve extraction](https://github.com/yt-dlp/yt-dlp/commit/2e30b5567b5c6113d46b39163db5b044aea8667e) by [jingtra](https://github.com/jingtra), [ringus1](https://github.com/ringus1)
|
||||||
|
- [Improve thumbnail extraction](https://github.com/yt-dlp/yt-dlp/commit/3c4d3ee491b0ec22ed3cade51d943d3d27141ba7) ([#9060](https://github.com/yt-dlp/yt-dlp/issues/9060)) by [kclauhk](https://github.com/kclauhk)
|
||||||
|
- [Set format HTTP chunk size](https://github.com/yt-dlp/yt-dlp/commit/5b68c478fb0b93ea6b8fac23f50e12217fa063db) ([#9058](https://github.com/yt-dlp/yt-dlp/issues/9058)) by [bashonly](https://github.com/bashonly), [kclauhk](https://github.com/kclauhk)
|
||||||
|
- [Support events](https://github.com/yt-dlp/yt-dlp/commit/9b5efaf86b99a2664fff9fc725d275f766c3221d) ([#9055](https://github.com/yt-dlp/yt-dlp/issues/9055)) by [kclauhk](https://github.com/kclauhk)
|
||||||
|
- [Support permalink URLs](https://github.com/yt-dlp/yt-dlp/commit/87286e93af949c4e6a0f8ba34af6a1ab5aa102b6) ([#9061](https://github.com/yt-dlp/yt-dlp/issues/9061)) by [kclauhk](https://github.com/kclauhk)
|
||||||
|
- ads: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/a40b0070c2a00d3ed839897462171a82323aa875) ([#8870](https://github.com/yt-dlp/yt-dlp/issues/8870)) by [kclauhk](https://github.com/kclauhk)
|
||||||
|
- **flextv**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/4f043479090dc8a7e06e0bb53691e5414320dfb2) ([#9178](https://github.com/yt-dlp/yt-dlp/issues/9178)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **floatplane**: [Improve metadata extraction](https://github.com/yt-dlp/yt-dlp/commit/9cd90447907a59c8a2727583f4a755fb23ed8cd3) ([#8934](https://github.com/yt-dlp/yt-dlp/issues/8934)) by [chtk](https://github.com/chtk)
|
||||||
|
- **francetv**
|
||||||
|
- [Fix DAI livestreams](https://github.com/yt-dlp/yt-dlp/commit/e4fbe5f886a6693f2466877c12e99c30c5442ace) ([#9380](https://github.com/yt-dlp/yt-dlp/issues/9380)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix extractors](https://github.com/yt-dlp/yt-dlp/commit/9749ac7fecbfda391afbadf2870797ce0e382622) ([#9333](https://github.com/yt-dlp/yt-dlp/issues/9333)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix m3u8 formats extraction](https://github.com/yt-dlp/yt-dlp/commit/ede624d1db649f5a4b61f8abbb746f365322de27) ([#9347](https://github.com/yt-dlp/yt-dlp/issues/9347)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **funk**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/cd0443fb14e2ed805abb02792473457553a123d1) ([#9194](https://github.com/yt-dlp/yt-dlp/issues/9194)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **generic**: [Follow https redirects properly](https://github.com/yt-dlp/yt-dlp/commit/c8c9039e640495700f76a13496e3418bdd4382ba) ([#9121](https://github.com/yt-dlp/yt-dlp/issues/9121)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **getcourseru**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/4310b6650eeb5630295f4591b37720877878c57a) ([#8873](https://github.com/yt-dlp/yt-dlp/issues/8873)) by [divStar](https://github.com/divStar), [seproDev](https://github.com/seproDev)
|
||||||
|
- **gofile**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/77c2472ca1ef9050a66aa68bc5fa1bee88706c66) ([#9074](https://github.com/yt-dlp/yt-dlp/issues/9074)) by [jazz1611](https://github.com/jazz1611)
|
||||||
|
- **googledrive**: [Fix source file extraction](https://github.com/yt-dlp/yt-dlp/commit/5498729c59b03a9511c64552da3ba2f802166f8d) ([#8990](https://github.com/yt-dlp/yt-dlp/issues/8990)) by [jazz1611](https://github.com/jazz1611)
|
||||||
|
- **goplay**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/7e90e34fa4617b53f8c8a9e69f460508cb1f51b0) ([#6654](https://github.com/yt-dlp/yt-dlp/issues/6654)) by [alard](https://github.com/alard)
|
||||||
|
- **gopro**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/4a07a455bbf7acf87550053bbba949c828e350ba) ([#9019](https://github.com/yt-dlp/yt-dlp/issues/9019)) by [stilor](https://github.com/stilor)
|
||||||
|
- **ilpost**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/aa5dcc4ee65916a36cbe1b1b5b29b9110c3163ed) ([#9001](https://github.com/yt-dlp/yt-dlp/issues/9001)) by [CapacitorSet](https://github.com/CapacitorSet)
|
||||||
|
- **jiosaavnsong**: [Support more bitrates](https://github.com/yt-dlp/yt-dlp/commit/5154dc0a687528f995cde22b5ff63f82c740e98a) ([#8834](https://github.com/yt-dlp/yt-dlp/issues/8834)) by [alien-developers](https://github.com/alien-developers), [bashonly](https://github.com/bashonly)
|
||||||
|
- **kukululive**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/20cdad5a2c0499d5a6746f5466a2ab0c97b75884) ([#8877](https://github.com/yt-dlp/yt-dlp/issues/8877)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **lefigarovideoembed**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/9401736fd08767c58af45a1e36ff5929c5fa1ac9) ([#9198](https://github.com/yt-dlp/yt-dlp/issues/9198)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **linkedin**: [Fix metadata and extract subtitles](https://github.com/yt-dlp/yt-dlp/commit/017adb28e7fe7b8c8fc472332d86740f31141519) ([#9056](https://github.com/yt-dlp/yt-dlp/issues/9056)) by [barsnick](https://github.com/barsnick)
|
||||||
|
- **magellantv**: [Support episodes](https://github.com/yt-dlp/yt-dlp/commit/3dc9232e1aa58fe3c2d8cafb50e8162d6f0e891e) ([#9199](https://github.com/yt-dlp/yt-dlp/issues/9199)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **magentamusik**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/5e2e24b2c5795756d81785b06b10723ddb6db7b2) ([#7790](https://github.com/yt-dlp/yt-dlp/issues/7790)) by [pwaldhauer](https://github.com/pwaldhauer), [seproDev](https://github.com/seproDev)
|
||||||
|
- **medaltv**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/02e343f6ef6d7b3f9087ff69e4a1db0b4b4a5c5d) ([#9098](https://github.com/yt-dlp/yt-dlp/issues/9098)) by [Danish-H](https://github.com/Danish-H)
|
||||||
|
- **mlbarticle**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/50e06e21a68e336198198bda332b8e7d2314f201) ([#9021](https://github.com/yt-dlp/yt-dlp/issues/9021)) by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
- **motherless**: [Support uploader playlists](https://github.com/yt-dlp/yt-dlp/commit/9f1e9dab21bbe651544c8f4663b0e615dc450e4d) ([#8994](https://github.com/yt-dlp/yt-dlp/issues/8994)) by [dasidiot](https://github.com/dasidiot)
|
||||||
|
- **mujrozhlas**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/4170b3d7120e06db3391eef39c5add18a1ddf2c3) ([#9306](https://github.com/yt-dlp/yt-dlp/issues/9306)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **mx3**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/5a63454b3637b3603434026cddfeac509218b90e) ([#8736](https://github.com/yt-dlp/yt-dlp/issues/8736)) by [martinxyz](https://github.com/martinxyz)
|
||||||
|
- **naver**: [Fix extractors](https://github.com/yt-dlp/yt-dlp/commit/a281beba8d8f007cf220f96dd1d9412bb070c7d8) ([#8883](https://github.com/yt-dlp/yt-dlp/issues/8883)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **nebula**: [Support podcasts](https://github.com/yt-dlp/yt-dlp/commit/0de09c5b9ed619d4a93d7c451c6ddff0381de808) ([#9140](https://github.com/yt-dlp/yt-dlp/issues/9140)) by [c-basalt](https://github.com/c-basalt), [seproDev](https://github.com/seproDev)
|
||||||
|
- **nerdcubedfeed**: [Overhaul extractor](https://github.com/yt-dlp/yt-dlp/commit/29a74a6126101aabaa1726ae41b1ca55cf26e7a7) ([#9269](https://github.com/yt-dlp/yt-dlp/issues/9269)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **newgrounds**
|
||||||
|
- [Fix login and clean up extraction](https://github.com/yt-dlp/yt-dlp/commit/0fcefb92f3ebfc5cada19c1e85a715f020d0f333) ([#9356](https://github.com/yt-dlp/yt-dlp/issues/9356)) by [Grub4K](https://github.com/Grub4K), [mrmedieval](https://github.com/mrmedieval)
|
||||||
|
- user: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/3e083191cdc34dd8c482da9a9b4bc682f824cb9d) ([#9046](https://github.com/yt-dlp/yt-dlp/issues/9046)) by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
- **nfb**: [Add support for onf.ca and series](https://github.com/yt-dlp/yt-dlp/commit/4b8b0dded8c65cd5b2ab2e858058ba98c9bf49ff) ([#8997](https://github.com/yt-dlp/yt-dlp/issues/8997)) by [bashonly](https://github.com/bashonly), [rrgomes](https://github.com/rrgomes)
|
||||||
|
- **nhkradiru**: [Extract extended description](https://github.com/yt-dlp/yt-dlp/commit/4392447d9404e3c25cfeb8f5bdfff31b0448da39) ([#9162](https://github.com/yt-dlp/yt-dlp/issues/9162)) by [garret1317](https://github.com/garret1317)
|
||||||
|
- **nhkradirulive**: [Make metadata extraction non-fatal](https://github.com/yt-dlp/yt-dlp/commit/5af1f19787f7d652fce72dd3ab9536cdd980fe85) ([#8956](https://github.com/yt-dlp/yt-dlp/issues/8956)) by [garret1317](https://github.com/garret1317)
|
||||||
|
- **niconico**
|
||||||
|
- [Remove legacy danmaku extraction](https://github.com/yt-dlp/yt-dlp/commit/974d444039c8bbffb57265c6792cd52d169fe1b9) ([#9209](https://github.com/yt-dlp/yt-dlp/issues/9209)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- [Support DMS formats](https://github.com/yt-dlp/yt-dlp/commit/aa13a8e3dd3b698cc40ec438988b1ad834e11a41) ([#9282](https://github.com/yt-dlp/yt-dlp/issues/9282)) by [pzhlkj6612](https://github.com/pzhlkj6612), [xpadev-net](https://github.com/xpadev-net) (With fixes in [40966e8](https://github.com/yt-dlp/yt-dlp/commit/40966e8da27bbf770dacf9be9363fcc3ad72cc9f) by [pzhlkj6612](https://github.com/pzhlkj6612))
|
||||||
|
- **ninaprotocol**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/62c65bfaf81e04e6746f6fdbafe384eb3edddfbc) ([#8946](https://github.com/yt-dlp/yt-dlp/issues/8946)) by [RaduManole](https://github.com/RaduManole), [seproDev](https://github.com/seproDev)
|
||||||
|
- **ninenews**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/43694ce13c5a9f1afca8b02b8b2b9b1576d6503d) ([#8840](https://github.com/yt-dlp/yt-dlp/issues/8840)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- **nova**: [Fix embed extraction](https://github.com/yt-dlp/yt-dlp/commit/c168d8791d0974a8a8fcb3b4a4bc2d830df51622) ([#9221](https://github.com/yt-dlp/yt-dlp/issues/9221)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **ntvru**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/7a29cbbd5fd7363e7e8535ee1506b7052465d13f) ([#9276](https://github.com/yt-dlp/yt-dlp/issues/9276)) by [bashonly](https://github.com/bashonly), [dirkf](https://github.com/dirkf)
|
||||||
|
- **nuum**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/acaf806c15f0a802ba286c23af02a10cf4bd4731) ([#8868](https://github.com/yt-dlp/yt-dlp/issues/8868)) by [DmitryScaletta](https://github.com/DmitryScaletta), [seproDev](https://github.com/seproDev)
|
||||||
|
- **nytimes**
|
||||||
|
- [Extract timestamp](https://github.com/yt-dlp/yt-dlp/commit/05420227aaab60a39c0f9ade069c5862be36b1fa) ([#9142](https://github.com/yt-dlp/yt-dlp/issues/9142)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- [Overhaul extractors](https://github.com/yt-dlp/yt-dlp/commit/07256b9fee23960799024b95d5972abc7174aa81) ([#9075](https://github.com/yt-dlp/yt-dlp/issues/9075)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- **onefootball**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/644738ddaa45428cb0babd41ead22454e5a2545e) ([#9222](https://github.com/yt-dlp/yt-dlp/issues/9222)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **openrec**: [Pass referer for m3u8 formats](https://github.com/yt-dlp/yt-dlp/commit/f591e605dfee4085ec007d6d056c943cbcacc429) ([#9253](https://github.com/yt-dlp/yt-dlp/issues/9253)) by [fireattack](https://github.com/fireattack)
|
||||||
|
- **orf**: on: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/a0d50aabc5462aee302bd3f2663d3a3554875789) ([#9113](https://github.com/yt-dlp/yt-dlp/issues/9113)) by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
- **patreon**: [Fix embedded HLS extraction](https://github.com/yt-dlp/yt-dlp/commit/f0e8bc7c60b61fe18b63116c975609d76b904771) ([#8993](https://github.com/yt-dlp/yt-dlp/issues/8993)) by [johnvictorfs](https://github.com/johnvictorfs)
|
||||||
|
- **peertube**: [Update instances](https://github.com/yt-dlp/yt-dlp/commit/35d96982f1033e36215d323317981ee17e8ab0d5) ([#9070](https://github.com/yt-dlp/yt-dlp/issues/9070)) by [Chocobozzz](https://github.com/Chocobozzz)
|
||||||
|
- **piapro**: [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/8e6e3651727b0b85764857fc6329fe5e0a3f00de) ([#8999](https://github.com/yt-dlp/yt-dlp/issues/8999)) by [FinnRG](https://github.com/FinnRG)
|
||||||
|
- **playsuisse**: [Add login support](https://github.com/yt-dlp/yt-dlp/commit/cae6e461073fb7c32fd32052a3e6721447c469bc) ([#9077](https://github.com/yt-dlp/yt-dlp/issues/9077)) by [chkuendig](https://github.com/chkuendig)
|
||||||
|
- **pornhub**: [Fix login support](https://github.com/yt-dlp/yt-dlp/commit/de954c1b4d3a6db8a6525507e65303c7bb03f39f) ([#9227](https://github.com/yt-dlp/yt-dlp/issues/9227)) by [feederbox826](https://github.com/feederbox826)
|
||||||
|
- **pr0gramm**: [Enable POL filter and provide tags without login](https://github.com/yt-dlp/yt-dlp/commit/5f25f348f9eb5db842b1ec6799f95bebb7ba35a7) ([#9051](https://github.com/yt-dlp/yt-dlp/issues/9051)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- **prankcastpost**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/a2bac6b7adb7b0e955125838e20bb39eece630ce) ([#8933](https://github.com/yt-dlp/yt-dlp/issues/8933)) by [columndeeply](https://github.com/columndeeply)
|
||||||
|
- **radiko**: [Extract more metadata](https://github.com/yt-dlp/yt-dlp/commit/e3ce2b385ec1f03fac9d4210c57fda77134495fc) ([#9115](https://github.com/yt-dlp/yt-dlp/issues/9115)) by [YoshichikaAAA](https://github.com/YoshichikaAAA)
|
||||||
|
- **rai**
|
||||||
|
- [Filter unavailable formats](https://github.com/yt-dlp/yt-dlp/commit/f78814923748277e7067b796f25870686fb46205) ([#9189](https://github.com/yt-dlp/yt-dlp/issues/9189)) by [nixxo](https://github.com/nixxo)
|
||||||
|
- [Fix m3u8 formats extraction](https://github.com/yt-dlp/yt-dlp/commit/8f423cf8051fbfeedd57cca00d106012e6e86a97) ([#9291](https://github.com/yt-dlp/yt-dlp/issues/9291)) by [nixxo](https://github.com/nixxo)
|
||||||
|
- **redcdnlivx, sejm**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/fcaa2e735b00b15a2b0d9f55f4187c654b4b5b39) ([#8676](https://github.com/yt-dlp/yt-dlp/issues/8676)) by [selfisekai](https://github.com/selfisekai)
|
||||||
|
- **redtube**
|
||||||
|
- [Fix formats extraction](https://github.com/yt-dlp/yt-dlp/commit/c91d8b1899403daff6fc15206ad32de8db17fb8f) ([#9076](https://github.com/yt-dlp/yt-dlp/issues/9076)) by [jazz1611](https://github.com/jazz1611)
|
||||||
|
- [Support redtube.com.br URLs](https://github.com/yt-dlp/yt-dlp/commit/4a6ff0b47a700dee3ee5c54804c31965308479ae) ([#9103](https://github.com/yt-dlp/yt-dlp/issues/9103)) by [jazz1611](https://github.com/jazz1611)
|
||||||
|
- **ridehome**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/cd7086c0d54ec1d7e02a30bd5bd934bdb2c54642) ([#8875](https://github.com/yt-dlp/yt-dlp/issues/8875)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- **rinsefmartistplaylist**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/1a36dbad712d359ec1c5b73d9bbbe562c03e9660) ([#8794](https://github.com/yt-dlp/yt-dlp/issues/8794)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- **roosterteeth**
|
||||||
|
- [Add Brightcove fallback](https://github.com/yt-dlp/yt-dlp/commit/b2cc150ad83ba20ceb2d6e73d09854eed3c2d05c) ([#9403](https://github.com/yt-dlp/yt-dlp/issues/9403)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Extract ad-free streams](https://github.com/yt-dlp/yt-dlp/commit/dd29e6e5fdf0f3758cb0829e73749832768f1a4e) ([#9355](https://github.com/yt-dlp/yt-dlp/issues/9355)) by [jkmartindale](https://github.com/jkmartindale)
|
||||||
|
- [Extract release date and timestamp](https://github.com/yt-dlp/yt-dlp/commit/dfd8c0b69683b1c11beea039a96dd2949026c1d7) ([#9393](https://github.com/yt-dlp/yt-dlp/issues/9393)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Support bonus features](https://github.com/yt-dlp/yt-dlp/commit/8993721ecb34867b52b79f6e92b233008d1cbe78) ([#9406](https://github.com/yt-dlp/yt-dlp/issues/9406)) by [Bl4Cc4t](https://github.com/Bl4Cc4t)
|
||||||
|
- **rule34video**
|
||||||
|
- [Extract `creators`](https://github.com/yt-dlp/yt-dlp/commit/3d9dc2f3590e10abf1561ebdaed96734a740587c) ([#9258](https://github.com/yt-dlp/yt-dlp/issues/9258)) by [gmes78](https://github.com/gmes78)
|
||||||
|
- [Extract more metadata](https://github.com/yt-dlp/yt-dlp/commit/fee2d8d9c38f9b5f0a8df347c1e698983339c34d) ([#7416](https://github.com/yt-dlp/yt-dlp/issues/7416)) by [gmes78](https://github.com/gmes78)
|
||||||
|
- [Fix `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/c0ecceeefe6ebd27452d9d8f20658f83ae121d04) ([#9044](https://github.com/yt-dlp/yt-dlp/issues/9044)) by [gmes78](https://github.com/gmes78)
|
||||||
|
- **rumblechannel**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/0023af81fbce01984f35b34ecaf8562739831227) ([#9092](https://github.com/yt-dlp/yt-dlp/issues/9092)) by [Pranaxcau](https://github.com/Pranaxcau), [vista-narvas](https://github.com/vista-narvas)
|
||||||
|
- **screencastify**: [Update `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/0bee29493ca8f91a0055a3706c7c94f5860188df) ([#9232](https://github.com/yt-dlp/yt-dlp/issues/9232)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **svtpage**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/ddd4b5e10a653bee78e656107710021c1b82934c) ([#8938](https://github.com/yt-dlp/yt-dlp/issues/8938)) by [diman8](https://github.com/diman8)
|
||||||
|
- **swearnet**: [Raise for login required](https://github.com/yt-dlp/yt-dlp/commit/b05640d532c43a52c0a0da096bb2dbd51e105ec0) ([#9281](https://github.com/yt-dlp/yt-dlp/issues/9281)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **tiktok**: [Fix webpage extraction](https://github.com/yt-dlp/yt-dlp/commit/d9b4154cbcb979d7e30af3a73b1bee422aae5aa3) ([#9327](https://github.com/yt-dlp/yt-dlp/issues/9327)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **trtworld**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/8ab84650837e58046430c9f4b615c56a8886e071) ([#8701](https://github.com/yt-dlp/yt-dlp/issues/8701)) by [ufukk](https://github.com/ufukk)
|
||||||
|
- **tvp**: [Support livestreams](https://github.com/yt-dlp/yt-dlp/commit/882e3b753c79c7799ce135c3a5edb72494b576af) ([#8860](https://github.com/yt-dlp/yt-dlp/issues/8860)) by [selfisekai](https://github.com/selfisekai)
|
||||||
|
- **twitch**: [Fix m3u8 extraction](https://github.com/yt-dlp/yt-dlp/commit/5b8c69ae04444a4c80a5a99917e40f75a116c3b8) ([#8960](https://github.com/yt-dlp/yt-dlp/issues/8960)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **twitter**
|
||||||
|
- [Extract bitrate for HLS audio formats](https://github.com/yt-dlp/yt-dlp/commit/28e53d60df9b8aadd52a93504e30e885c9c35262) ([#9257](https://github.com/yt-dlp/yt-dlp/issues/9257)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Extract numeric `channel_id`](https://github.com/yt-dlp/yt-dlp/commit/55f1833376505ed1e4be0516b09bb3ea4425e8a4) ([#9263](https://github.com/yt-dlp/yt-dlp/issues/9263)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **txxx**: [Extract thumbnails](https://github.com/yt-dlp/yt-dlp/commit/d79c7e9937c388c68b722ab7450960e43ef776d6) ([#9063](https://github.com/yt-dlp/yt-dlp/issues/9063)) by [shmohawk](https://github.com/shmohawk)
|
||||||
|
- **utreon**: [Support playeur.com](https://github.com/yt-dlp/yt-dlp/commit/41d6b61e9852a5b97f47cc8a7718b31fb23f0aea) ([#9182](https://github.com/yt-dlp/yt-dlp/issues/9182)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **vbox7**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/67bb70cd700c8d4c3149cd9e0539a5f32c3d1ce6) ([#9100](https://github.com/yt-dlp/yt-dlp/issues/9100)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **viewlift**: [Add support for chorki.com](https://github.com/yt-dlp/yt-dlp/commit/41b6cdb4197aaf7ad82bdad6885eb5d5c64acd74) ([#9095](https://github.com/yt-dlp/yt-dlp/issues/9095)) by [NurTasin](https://github.com/NurTasin)
|
||||||
|
- **vimeo**
|
||||||
|
- [Extract `live_status` and `release_timestamp`](https://github.com/yt-dlp/yt-dlp/commit/f0426e9ca57dd14b82e6c13afc17947614f1e8eb) ([#9290](https://github.com/yt-dlp/yt-dlp/issues/9290)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- [Fix API headers](https://github.com/yt-dlp/yt-dlp/commit/8e765755f7f4909e1b535e61b7376b2d66e1ba6a) ([#9125](https://github.com/yt-dlp/yt-dlp/issues/9125)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix login](https://github.com/yt-dlp/yt-dlp/commit/2e8de097ad82da378e97005e8f1ff7e5aebca585) ([#9274](https://github.com/yt-dlp/yt-dlp/issues/9274)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **viously**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/95e82347b398d8bb160767cdd975edecd62cbabd) ([#8927](https://github.com/yt-dlp/yt-dlp/issues/8927)) by [nbr23](https://github.com/nbr23), [seproDev](https://github.com/seproDev)
|
||||||
|
- **youtube**
|
||||||
|
- [Better error when all player responses are skipped](https://github.com/yt-dlp/yt-dlp/commit/5eedc208ec89d6284777060c94aadd06502338b9) ([#9083](https://github.com/yt-dlp/yt-dlp/issues/9083)) by [Grub4K](https://github.com/Grub4K), [pukkandan](https://github.com/pukkandan)
|
||||||
|
- [Bump Android and iOS client versions](https://github.com/yt-dlp/yt-dlp/commit/413d3675804599bc8fe419c19e36490fd8f0b30f) ([#9317](https://github.com/yt-dlp/yt-dlp/issues/9317)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Further bump client versions](https://github.com/yt-dlp/yt-dlp/commit/7aad06541e543fa3452d3d2513e6f079aad1f99b) ([#9395](https://github.com/yt-dlp/yt-dlp/issues/9395)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- tab: [Fix `tags` extraction](https://github.com/yt-dlp/yt-dlp/commit/8828f4576bd862438d4fbf634f1d6ab18a217b0e) ([#9413](https://github.com/yt-dlp/yt-dlp/issues/9413)) by [x11x](https://github.com/x11x)
|
||||||
|
- **zenporn**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/f00c0def7434fac3c88503c2a77c4b2419b8e5ca) ([#8509](https://github.com/yt-dlp/yt-dlp/issues/8509)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- **zetland**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/2f4b57594673035a59d72f7667588da848820034) ([#9116](https://github.com/yt-dlp/yt-dlp/issues/9116)) by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
|
||||||
|
#### Downloader changes
|
||||||
|
- **http**: [Reset resume length to handle `FileNotFoundError`](https://github.com/yt-dlp/yt-dlp/commit/2d91b9845621639c53dca7ee9d3d954f3624ba18) ([#8399](https://github.com/yt-dlp/yt-dlp/issues/8399)) by [boredzo](https://github.com/boredzo)
|
||||||
|
|
||||||
|
#### Networking changes
|
||||||
|
- [Remove `_CompatHTTPError`](https://github.com/yt-dlp/yt-dlp/commit/811d298b231cfa29e75c321b23a91d1c2b17602c) ([#8871](https://github.com/yt-dlp/yt-dlp/issues/8871)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- **Request Handler**
|
||||||
|
- [Remove additional logging handlers on close](https://github.com/yt-dlp/yt-dlp/commit/0085e2bab8465ee7d46d16fcade3ed5e96cc8a48) ([#9032](https://github.com/yt-dlp/yt-dlp/issues/9032)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- requests: [Apply `remove_dot_segments` to absolute redirect locations](https://github.com/yt-dlp/yt-dlp/commit/35f4f764a786685ea45d84abe1cf1ad3847f4c97) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
|
||||||
|
#### Misc. changes
|
||||||
|
- **build**
|
||||||
|
- [Add `default` optional dependency group](https://github.com/yt-dlp/yt-dlp/commit/cf91400a1dd6cc99b11a6d163e1af73b64d618c9) ([#9295](https://github.com/yt-dlp/yt-dlp/issues/9295)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Add transitional `setup.py` and `pyinst.py`](https://github.com/yt-dlp/yt-dlp/commit/0abf2f1f153ab47990edbeee3477dc55f74c7f89) ([#9296](https://github.com/yt-dlp/yt-dlp/issues/9296)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K), [pukkandan](https://github.com/pukkandan)
|
||||||
|
- [Bump `actions/upload-artifact` to v4 and adjust workflows](https://github.com/yt-dlp/yt-dlp/commit/3876429d72afb35247f4b2531eb9b16cfc7e0968) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Bump `conda-incubator/setup-miniconda` to v3](https://github.com/yt-dlp/yt-dlp/commit/b0059f0413a6ba6ab0a3aec1f00188ce083cd8bf) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix `secretstorage` for ARM builds](https://github.com/yt-dlp/yt-dlp/commit/920397634d1e84e76d2cb897bd6d69ba0c6bd5ca) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Migrate to `pyproject.toml` and `hatchling`](https://github.com/yt-dlp/yt-dlp/commit/775cde82dc5b1dc64ab0539a92dd8c7ba6c0ad33) by [bashonly](https://github.com/bashonly) (With fixes in [43cfd46](https://github.com/yt-dlp/yt-dlp/commit/43cfd462c0d01eff22c1d4290aeb96eb1ea2c0e1))
|
||||||
|
- [Move bundle scripts into `bundle` submodule](https://github.com/yt-dlp/yt-dlp/commit/a1b778428991b1779203bac243ef4e9b6baea90c) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Support failed build job re-runs](https://github.com/yt-dlp/yt-dlp/commit/eabbccc439720fba381919a88be4fe4d96464cbd) ([#9277](https://github.com/yt-dlp/yt-dlp/issues/9277)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- Makefile
|
||||||
|
- [Add automated `CODE_FOLDERS` and `CODE_FILES`](https://github.com/yt-dlp/yt-dlp/commit/868d2f60a7cb59b410c8cbfb452cbdb072687b81) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Ensure compatibility with BSD `make`](https://github.com/yt-dlp/yt-dlp/commit/beaa1a44554d04d9fe63a743a5bb4431ca778f28) ([#9210](https://github.com/yt-dlp/yt-dlp/issues/9210)) by [bashonly](https://github.com/bashonly) (With fixes in [73fcfa3](https://github.com/yt-dlp/yt-dlp/commit/73fcfa39f59113a8728249de2c4cee3025f17dc2))
|
||||||
|
- [Fix man pages generated by `pandoc>=3`](https://github.com/yt-dlp/yt-dlp/commit/fb44020fa98e47620b3aa1dab94b4c5b7bfb40bd) ([#7047](https://github.com/yt-dlp/yt-dlp/issues/7047)) by [t-nil](https://github.com/t-nil)
|
||||||
|
- **ci**: [Bump `actions/setup-python` to v5](https://github.com/yt-dlp/yt-dlp/commit/b14e818b37f62e3224da157b3ad768b3f0815fcd) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **cleanup**
|
||||||
|
- [Build files cleanup](https://github.com/yt-dlp/yt-dlp/commit/867f637b95b342e1cb9f1dc3c6cf0ffe727187ce) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix infodict returned fields](https://github.com/yt-dlp/yt-dlp/commit/f4f9f6d00edcac6d4eb2b3fb78bf81326235d492) ([#8906](https://github.com/yt-dlp/yt-dlp/issues/8906)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- [Fix typo in README.md](https://github.com/yt-dlp/yt-dlp/commit/292d60b1ed3b9fe5bcb2775a894cca99b0f9473e) ([#8894](https://github.com/yt-dlp/yt-dlp/issues/8894)) by [antonkesy](https://github.com/antonkesy)
|
||||||
|
- [Mark broken and remove dead extractors](https://github.com/yt-dlp/yt-dlp/commit/df773c3d5d1cc1f877cf8582f0072e386fc49318) ([#9238](https://github.com/yt-dlp/yt-dlp/issues/9238)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- [Match both `http` and `https` in `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/a687226b48f71b874fa18b0165ec528d591f53fb) ([#8968](https://github.com/yt-dlp/yt-dlp/issues/8968)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- [Remove unused code](https://github.com/yt-dlp/yt-dlp/commit/ed3bb2b0a12c44334e0d09481752dabf2ca1dc13) ([#8968](https://github.com/yt-dlp/yt-dlp/issues/8968)) by [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
||||||
|
- Miscellaneous
|
||||||
|
- [93240fc](https://github.com/yt-dlp/yt-dlp/commit/93240fc1848de4a94f25844c96e0dcd282ef1d3b) by [bashonly](https://github.com/bashonly), [Grub4k](https://github.com/Grub4k), [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
||||||
|
- [615a844](https://github.com/yt-dlp/yt-dlp/commit/615a84447e8322720be77a0e64298d7f42848693) by [bashonly](https://github.com/bashonly), [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
||||||
|
- **devscripts**
|
||||||
|
- `install_deps`: [Add script and migrate to it](https://github.com/yt-dlp/yt-dlp/commit/b8a433aaca86b15cb9f1a451b0f69371d2fc22a9) by [bashonly](https://github.com/bashonly)
|
||||||
|
- `tomlparse`: [Add makeshift toml parser](https://github.com/yt-dlp/yt-dlp/commit/fd647775e27e030ab17387c249e2ebeba68f8ff0) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- **docs**: [Misc Cleanup](https://github.com/yt-dlp/yt-dlp/commit/47ab66db0f083a76c7fba0f6e136b21dd5a93e3b) ([#8977](https://github.com/yt-dlp/yt-dlp/issues/8977)) by [Arthurszzz](https://github.com/Arthurszzz), [bashonly](https://github.com/bashonly), [Grub4k](https://github.com/Grub4k), [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
||||||
|
- **test**
|
||||||
|
- [Skip source address tests if the address cannot be bound to](https://github.com/yt-dlp/yt-dlp/commit/69d31914952dd33082ac7019c6f76b43c45b9d06) ([#8900](https://github.com/yt-dlp/yt-dlp/issues/8900)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- websockets: [Fix timeout test on Windows](https://github.com/yt-dlp/yt-dlp/commit/ac340d0745a9de5d494033e3507ef624ba25add3) ([#9344](https://github.com/yt-dlp/yt-dlp/issues/9344)) by [seproDev](https://github.com/seproDev)
|
||||||
|
|
||||||
### 2023.12.30
|
### 2023.12.30
|
||||||
|
|
||||||
#### Core changes
|
#### Core changes
|
||||||
|
@ -1936,7 +2158,7 @@ ### 2022.04.08
|
||||||
* [utils] `format_decimal_suffix`: Fix for very large numbers by [s0u1h](https://github.com/s0u1h)
|
* [utils] `format_decimal_suffix`: Fix for very large numbers by [s0u1h](https://github.com/s0u1h)
|
||||||
* [utils] `traverse_obj`: Allow filtering by value
|
* [utils] `traverse_obj`: Allow filtering by value
|
||||||
* [utils] Add `filter_dict`, `get_first`, `try_call`
|
* [utils] Add `filter_dict`, `get_first`, `try_call`
|
||||||
* [utils] ExtractorError: Fix for older python versions
|
* [utils] ExtractorError: Fix for older Python versions
|
||||||
* [utils] WebSocketsWrapper: Allow omitting `__enter__` invocation by [Lesmiscore](https://github.com/Lesmiscore)
|
* [utils] WebSocketsWrapper: Allow omitting `__enter__` invocation by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
* [docs] Add an `.editorconfig` file by [fstirlitz](https://github.com/fstirlitz)
|
* [docs] Add an `.editorconfig` file by [fstirlitz](https://github.com/fstirlitz)
|
||||||
* [docs] Clarify the exact `BSD` license of dependencies by [MrRawes](https://github.com/MrRawes)
|
* [docs] Clarify the exact `BSD` license of dependencies by [MrRawes](https://github.com/MrRawes)
|
||||||
|
@ -3400,7 +3622,7 @@ ### 2021.05.20
|
||||||
* [cleanup] code formatting, youtube tests and readme
|
* [cleanup] code formatting, youtube tests and readme
|
||||||
|
|
||||||
### 2021.05.11
|
### 2021.05.11
|
||||||
* **Deprecate support for python versions < 3.6**
|
* **Deprecate support for Python versions < 3.6**
|
||||||
* **Subtitle extraction from manifests** by [fstirlitz](https://github.com/fstirlitz). See [be6202f](https://github.com/yt-dlp/yt-dlp/commit/be6202f12b97858b9d716e608394b51065d0419f) for details
|
* **Subtitle extraction from manifests** by [fstirlitz](https://github.com/fstirlitz). See [be6202f](https://github.com/yt-dlp/yt-dlp/commit/be6202f12b97858b9d716e608394b51065d0419f) for details
|
||||||
* **Improve output template:**
|
* **Improve output template:**
|
||||||
* Allow slicing lists/strings using `field.start:end:step`
|
* Allow slicing lists/strings using `field.start:end:step`
|
||||||
|
@ -3690,7 +3912,7 @@ ### 2021.02.19
|
||||||
* Remove unnecessary `field_preference` and misuse of `preference` from extractors
|
* Remove unnecessary `field_preference` and misuse of `preference` from extractors
|
||||||
* Build improvements:
|
* Build improvements:
|
||||||
* Fix hash output by [shirt](https://github.com/shirt-dev)
|
* Fix hash output by [shirt](https://github.com/shirt-dev)
|
||||||
* Lock python package versions for x86 and use `wheels` by [shirt](https://github.com/shirt-dev)
|
* Lock Python package versions for x86 and use `wheels` by [shirt](https://github.com/shirt-dev)
|
||||||
* Exclude `vcruntime140.dll` from UPX by [jbruchon](https://github.com/jbruchon)
|
* Exclude `vcruntime140.dll` from UPX by [jbruchon](https://github.com/jbruchon)
|
||||||
* Set version number based on UTC time, not local time
|
* Set version number based on UTC time, not local time
|
||||||
* Publish on PyPi only if token is set
|
* Publish on PyPi only if token is set
|
||||||
|
@ -3757,7 +3979,7 @@ ### 2021.02.04
|
||||||
* Fix "Default format spec" appearing in quiet mode
|
* Fix "Default format spec" appearing in quiet mode
|
||||||
* [FormatSort] Allow user to prefer av01 over vp9 (The default is still vp9)
|
* [FormatSort] Allow user to prefer av01 over vp9 (The default is still vp9)
|
||||||
* [FormatSort] fix bug where `quality` had more priority than `hasvid`
|
* [FormatSort] fix bug where `quality` had more priority than `hasvid`
|
||||||
* [pyinst] Automatically detect python architecture and working directory
|
* [pyinst] Automatically detect Python architecture and working directory
|
||||||
* Strip out internal fields such as `_filename` from infojson
|
* Strip out internal fields such as `_filename` from infojson
|
||||||
|
|
||||||
|
|
||||||
|
|
19
Makefile
19
Makefile
|
@ -2,7 +2,7 @@ all: lazy-extractors yt-dlp doc pypi-files
|
||||||
clean: clean-test clean-dist
|
clean: clean-test clean-dist
|
||||||
clean-all: clean clean-cache
|
clean-all: clean clean-cache
|
||||||
completions: completion-bash completion-fish completion-zsh
|
completions: completion-bash completion-fish completion-zsh
|
||||||
doc: README.md CONTRIBUTING.md issuetemplates supportedsites
|
doc: README.md CONTRIBUTING.md CONTRIBUTORS issuetemplates supportedsites
|
||||||
ot: offlinetest
|
ot: offlinetest
|
||||||
tar: yt-dlp.tar.gz
|
tar: yt-dlp.tar.gz
|
||||||
|
|
||||||
|
@ -17,8 +17,8 @@ pypi-files: AUTHORS Changelog.md LICENSE README.md README.txt supportedsites \
|
||||||
clean-test:
|
clean-test:
|
||||||
rm -rf test/testdata/sigs/player-*.js tmp/ *.annotations.xml *.aria2 *.description *.dump *.frag \
|
rm -rf test/testdata/sigs/player-*.js tmp/ *.annotations.xml *.aria2 *.description *.dump *.frag \
|
||||||
*.frag.aria2 *.frag.urls *.info.json *.live_chat.json *.meta *.part* *.tmp *.temp *.unknown_video *.ytdl \
|
*.frag.aria2 *.frag.urls *.info.json *.live_chat.json *.meta *.part* *.tmp *.temp *.unknown_video *.ytdl \
|
||||||
*.3gp *.ape *.ass *.avi *.desktop *.f4v *.flac *.flv *.gif *.jpeg *.jpg *.m4a *.m4v *.mhtml *.mkv *.mov *.mp3 \
|
*.3gp *.ape *.ass *.avi *.desktop *.f4v *.flac *.flv *.gif *.jpeg *.jpg *.lrc *.m4a *.m4v *.mhtml *.mkv *.mov *.mp3 *.mp4 \
|
||||||
*.mp4 *.mpga *.oga *.ogg *.opus *.png *.sbv *.srt *.swf *.swp *.tt *.ttml *.url *.vtt *.wav *.webloc *.webm *.webp
|
*.mpg *.mpga *.oga *.ogg *.opus *.png *.sbv *.srt *.ssa *.swf *.swp *.tt *.ttml *.url *.vtt *.wav *.webloc *.webm *.webp
|
||||||
clean-dist:
|
clean-dist:
|
||||||
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ \
|
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ \
|
||||||
yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS
|
yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS
|
||||||
|
@ -156,5 +156,14 @@ yt-dlp.tar.gz: all
|
||||||
Makefile yt-dlp.1 README.txt completions .gitignore \
|
Makefile yt-dlp.1 README.txt completions .gitignore \
|
||||||
setup.cfg yt-dlp yt_dlp pyproject.toml devscripts test
|
setup.cfg yt-dlp yt_dlp pyproject.toml devscripts test
|
||||||
|
|
||||||
AUTHORS:
|
AUTHORS: Changelog.md
|
||||||
git shortlog -s -n HEAD | cut -f2 | sort > AUTHORS
|
@if [ -d '.git' ] && command -v git > /dev/null ; then \
|
||||||
|
echo 'Generating $@ from git commit history' ; \
|
||||||
|
git shortlog -s -n HEAD | cut -f2 | sort > $@ ; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
CONTRIBUTORS: Changelog.md
|
||||||
|
@if [ -d '.git' ] && command -v git > /dev/null ; then \
|
||||||
|
echo 'Updating $@ from git commit history' ; \
|
||||||
|
$(PYTHON) devscripts/make_changelog.py -v -c > /dev/null ; \
|
||||||
|
fi
|
||||||
|
|
331
README.md
331
README.md
|
@ -17,17 +17,15 @@
|
||||||
</div>
|
</div>
|
||||||
<!-- MANPAGE: END EXCLUDED SECTION -->
|
<!-- MANPAGE: END EXCLUDED SECTION -->
|
||||||
|
|
||||||
yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on the now inactive [youtube-dlc](https://github.com/blackjack4494/yt-dlc). The main focus of this project is adding new features and patches while also keeping up to date with the original project
|
yt-dlp is a feature-rich command-line audio/video downloader with support for [thousands of sites](supportedsites.md). The project is a fork of [youtube-dl](https://github.com/ytdl-org/youtube-dl) based on the now inactive [youtube-dlc](https://github.com/blackjack4494/yt-dlc).
|
||||||
|
|
||||||
<!-- MANPAGE: MOVE "USAGE AND OPTIONS" SECTION HERE -->
|
<!-- MANPAGE: MOVE "USAGE AND OPTIONS" SECTION HERE -->
|
||||||
|
|
||||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||||
* [NEW FEATURES](#new-features)
|
|
||||||
* [Differences in default behavior](#differences-in-default-behavior)
|
|
||||||
* [INSTALLATION](#installation)
|
* [INSTALLATION](#installation)
|
||||||
* [Detailed instructions](https://github.com/yt-dlp/yt-dlp/wiki/Installation)
|
* [Detailed instructions](https://github.com/yt-dlp/yt-dlp/wiki/Installation)
|
||||||
* [Update](#update)
|
|
||||||
* [Release Files](#release-files)
|
* [Release Files](#release-files)
|
||||||
|
* [Update](#update)
|
||||||
* [Dependencies](#dependencies)
|
* [Dependencies](#dependencies)
|
||||||
* [Compile](#compile)
|
* [Compile](#compile)
|
||||||
* [USAGE AND OPTIONS](#usage-and-options)
|
* [USAGE AND OPTIONS](#usage-and-options)
|
||||||
|
@ -65,7 +63,10 @@
|
||||||
* [Developing Plugins](#developing-plugins)
|
* [Developing Plugins](#developing-plugins)
|
||||||
* [EMBEDDING YT-DLP](#embedding-yt-dlp)
|
* [EMBEDDING YT-DLP](#embedding-yt-dlp)
|
||||||
* [Embedding examples](#embedding-examples)
|
* [Embedding examples](#embedding-examples)
|
||||||
* [DEPRECATED OPTIONS](#deprecated-options)
|
* [CHANGES FROM YOUTUBE-DL](#changes-from-youtube-dl)
|
||||||
|
* [New features](#new-features)
|
||||||
|
* [Differences in default behavior](#differences-in-default-behavior)
|
||||||
|
* [Deprecated options](#deprecated-options)
|
||||||
* [CONTRIBUTING](CONTRIBUTING.md#contributing-to-yt-dlp)
|
* [CONTRIBUTING](CONTRIBUTING.md#contributing-to-yt-dlp)
|
||||||
* [Opening an Issue](CONTRIBUTING.md#opening-an-issue)
|
* [Opening an Issue](CONTRIBUTING.md#opening-an-issue)
|
||||||
* [Developer Instructions](CONTRIBUTING.md#developer-instructions)
|
* [Developer Instructions](CONTRIBUTING.md#developer-instructions)
|
||||||
|
@ -74,103 +75,6 @@
|
||||||
<!-- MANPAGE: END EXCLUDED SECTION -->
|
<!-- MANPAGE: END EXCLUDED SECTION -->
|
||||||
|
|
||||||
|
|
||||||
# NEW FEATURES
|
|
||||||
|
|
||||||
* Forked from [**yt-dlc@f9401f2**](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee) and merged with [**youtube-dl@be008e6**](https://github.com/ytdl-org/youtube-dl/commit/be008e657d79832642e2158557c899249c9e31cd) ([exceptions](https://github.com/yt-dlp/yt-dlp/issues/21))
|
|
||||||
|
|
||||||
* **[SponsorBlock Integration](#sponsorblock-options)**: You can mark/remove sponsor sections in YouTube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API
|
|
||||||
|
|
||||||
* **[Format Sorting](#sorting-formats)**: The default format sorting options have been changed so that higher resolution and better codecs will be now preferred instead of simply using larger bitrate. Furthermore, you can now specify the sort order using `-S`. This allows for much easier format selection than what is possible by simply using `--format` ([examples](#format-selection-examples))
|
|
||||||
|
|
||||||
* **Merged with animelover1984/youtube-dl**: You get most of the features and improvements from [animelover1984/youtube-dl](https://github.com/animelover1984/youtube-dl) including `--write-comments`, `BiliBiliSearch`, `BilibiliChannel`, Embedding thumbnail in mp4/ogg/opus, playlist infojson etc. Note that NicoNico livestreams are not available. See [#31](https://github.com/yt-dlp/yt-dlp/pull/31) for details.
|
|
||||||
|
|
||||||
* **YouTube improvements**:
|
|
||||||
* Supports Clips, Stories (`ytstories:<channel UCID>`), Search (including filters)**\***, YouTube Music Search, Channel-specific search, Search prefixes (`ytsearch:`, `ytsearchdate:`)**\***, Mixes, and Feeds (`:ytfav`, `:ytwatchlater`, `:ytsubs`, `:ythistory`, `:ytrec`, `:ytnotif`)
|
|
||||||
* Fix for [n-sig based throttling](https://github.com/ytdl-org/youtube-dl/issues/29326) **\***
|
|
||||||
* Supports some (but not all) age-gated content without cookies
|
|
||||||
* Download livestreams from the start using `--live-from-start` (*experimental*)
|
|
||||||
* Channel URLs download all uploads of the channel, including shorts and live
|
|
||||||
|
|
||||||
* **Cookies from browser**: Cookies can be automatically extracted from all major web browsers using `--cookies-from-browser BROWSER[+KEYRING][:PROFILE][::CONTAINER]`
|
|
||||||
|
|
||||||
* **Download time range**: Videos can be downloaded partially based on either timestamps or chapters using `--download-sections`
|
|
||||||
|
|
||||||
* **Split video by chapters**: Videos can be split into multiple files based on chapters using `--split-chapters`
|
|
||||||
|
|
||||||
* **Multi-threaded fragment downloads**: Download multiple fragments of m3u8/mpd videos in parallel. Use `--concurrent-fragments` (`-N`) option to set the number of threads used
|
|
||||||
|
|
||||||
* **Aria2c with HLS/DASH**: You can use `aria2c` as the external downloader for DASH(mpd) and HLS(m3u8) formats
|
|
||||||
|
|
||||||
* **New and fixed extractors**: Many new extractors have been added and a lot of existing ones have been fixed. See the [changelog](Changelog.md) or the [list of supported sites](supportedsites.md)
|
|
||||||
|
|
||||||
* **New MSOs**: Philo, Spectrum, SlingTV, Cablevision, RCN etc.
|
|
||||||
|
|
||||||
* **Subtitle extraction from manifests**: Subtitles can be extracted from streaming media manifests. See [commit/be6202f](https://github.com/yt-dlp/yt-dlp/commit/be6202f12b97858b9d716e608394b51065d0419f) for details
|
|
||||||
|
|
||||||
* **Multiple paths and output templates**: You can give different [output templates](#output-template) and download paths for different types of files. You can also set a temporary path where intermediary files are downloaded to using `--paths` (`-P`)
|
|
||||||
|
|
||||||
* **Portable Configuration**: Configuration files are automatically loaded from the home and root directories. See [CONFIGURATION](#configuration) for details
|
|
||||||
|
|
||||||
* **Output template improvements**: Output templates can now have date-time formatting, numeric offsets, object traversal etc. See [output template](#output-template) for details. Even more advanced operations can also be done with the help of `--parse-metadata` and `--replace-in-metadata`
|
|
||||||
|
|
||||||
* **Other new options**: Many new options have been added such as `--alias`, `--print`, `--concat-playlist`, `--wait-for-video`, `--retry-sleep`, `--sleep-requests`, `--convert-thumbnails`, `--force-download-archive`, `--force-overwrites`, `--break-match-filter` etc
|
|
||||||
|
|
||||||
* **Improvements**: Regex and other operators in `--format`/`--match-filter`, multiple `--postprocessor-args` and `--downloader-args`, faster archive checking, more [format selection options](#format-selection), merge multi-video/audio, multiple `--config-locations`, `--exec` at different stages, etc
|
|
||||||
|
|
||||||
* **Plugins**: Extractors and PostProcessors can be loaded from an external file. See [plugins](#plugins) for details
|
|
||||||
|
|
||||||
* **Self updater**: The releases can be updated using `yt-dlp -U`, and downgraded using `--update-to` if required
|
|
||||||
|
|
||||||
* **Automated builds**: [Nightly/master builds](#update-channels) can be used with `--update-to nightly` and `--update-to master`
|
|
||||||
|
|
||||||
See [changelog](Changelog.md) or [commits](https://github.com/yt-dlp/yt-dlp/commits) for the full list of changes
|
|
||||||
|
|
||||||
Features marked with a **\*** have been back-ported to youtube-dl
|
|
||||||
|
|
||||||
### Differences in default behavior
|
|
||||||
|
|
||||||
Some of yt-dlp's default options are different from that of youtube-dl and youtube-dlc:
|
|
||||||
|
|
||||||
* yt-dlp supports only [Python 3.8+](## "Windows 7"), and *may* remove support for more versions as they [become EOL](https://devguide.python.org/versions/#python-release-cycle); while [youtube-dl still supports Python 2.6+ and 3.2+](https://github.com/ytdl-org/youtube-dl/issues/30568#issue-1118238743)
|
|
||||||
* The options `--auto-number` (`-A`), `--title` (`-t`) and `--literal` (`-l`), no longer work. See [removed options](#Removed) for details
|
|
||||||
* `avconv` is not supported as an alternative to `ffmpeg`
|
|
||||||
* yt-dlp stores config files in slightly different locations to youtube-dl. See [CONFIGURATION](#configuration) for a list of correct locations
|
|
||||||
* The default [output template](#output-template) is `%(title)s [%(id)s].%(ext)s`. There is no real reason for this change. This was changed before yt-dlp was ever made public and now there are no plans to change it back to `%(title)s-%(id)s.%(ext)s`. Instead, you may use `--compat-options filename`
|
|
||||||
* The default [format sorting](#sorting-formats) is different from youtube-dl and prefers higher resolution and better codecs rather than higher bitrates. You can use the `--format-sort` option to change this to any order you prefer, or use `--compat-options format-sort` to use youtube-dl's sorting order
|
|
||||||
* The default format selector is `bv*+ba/b`. This means that if a combined video + audio format that is better than the best video-only format is found, the former will be preferred. Use `-f bv+ba/b` or `--compat-options format-spec` to revert this
|
|
||||||
* Unlike youtube-dlc, yt-dlp does not allow merging multiple audio/video streams into one file by default (since this conflicts with the use of `-f bv*+ba`). If needed, this feature must be enabled using `--audio-multistreams` and `--video-multistreams`. You can also use `--compat-options multistreams` to enable both
|
|
||||||
* `--no-abort-on-error` is enabled by default. Use `--abort-on-error` or `--compat-options abort-on-error` to abort on errors instead
|
|
||||||
* When writing metadata files such as thumbnails, description or infojson, the same information (if available) is also written for playlists. Use `--no-write-playlist-metafiles` or `--compat-options no-playlist-metafiles` to not write these files
|
|
||||||
* `--add-metadata` attaches the `infojson` to `mkv` files in addition to writing the metadata when used with `--write-info-json`. Use `--no-embed-info-json` or `--compat-options no-attach-info-json` to revert this
|
|
||||||
* Some metadata are embedded into different fields when using `--add-metadata` as compared to youtube-dl. Most notably, `comment` field contains the `webpage_url` and `synopsis` contains the `description`. You can [use `--parse-metadata`](#modifying-metadata) to modify this to your liking or use `--compat-options embed-metadata` to revert this
|
|
||||||
* `playlist_index` behaves differently when used with options like `--playlist-reverse` and `--playlist-items`. See [#302](https://github.com/yt-dlp/yt-dlp/issues/302) for details. You can use `--compat-options playlist-index` if you want to keep the earlier behavior
|
|
||||||
* The output of `-F` is listed in a new format. Use `--compat-options list-formats` to revert this
|
|
||||||
* Live chats (if available) are considered as subtitles. Use `--sub-langs all,-live_chat` to download all subtitles except live chat. You can also use `--compat-options no-live-chat` to prevent any live chat/danmaku from downloading
|
|
||||||
* YouTube channel URLs download all uploads of the channel. To download only the videos in a specific tab, pass the tab's URL. If the channel does not show the requested tab, an error will be raised. Also, `/live` URLs raise an error if there are no live videos instead of silently downloading the entire channel. You may use `--compat-options no-youtube-channel-redirect` to revert all these redirections
|
|
||||||
* Unavailable videos are also listed for YouTube playlists. Use `--compat-options no-youtube-unavailable-videos` to remove this
|
|
||||||
* The upload dates extracted from YouTube are in UTC [when available](https://github.com/yt-dlp/yt-dlp/blob/89e4d86171c7b7c997c77d4714542e0383bf0db0/yt_dlp/extractor/youtube.py#L3898-L3900). Use `--compat-options no-youtube-prefer-utc-upload-date` to prefer the non-UTC upload date.
|
|
||||||
* If `ffmpeg` is used as the downloader, the downloading and merging of formats happen in a single step when possible. Use `--compat-options no-direct-merge` to revert this
|
|
||||||
* Thumbnail embedding in `mp4` is done with mutagen if possible. Use `--compat-options embed-thumbnail-atomicparsley` to force the use of AtomicParsley instead
|
|
||||||
* Some internal metadata such as filenames are removed by default from the infojson. Use `--no-clean-infojson` or `--compat-options no-clean-infojson` to revert this
|
|
||||||
* When `--embed-subs` and `--write-subs` are used together, the subtitles are written to disk and also embedded in the media file. You can use just `--embed-subs` to embed the subs and automatically delete the separate file. See [#630 (comment)](https://github.com/yt-dlp/yt-dlp/issues/630#issuecomment-893659460) for more info. `--compat-options no-keep-subs` can be used to revert this
|
|
||||||
* `certifi` will be used for SSL root certificates, if installed. If you want to use system certificates (e.g. self-signed), use `--compat-options no-certifi`
|
|
||||||
* yt-dlp's sanitization of invalid characters in filenames is different/smarter than in youtube-dl. You can use `--compat-options filename-sanitization` to revert to youtube-dl's behavior
|
|
||||||
* yt-dlp tries to parse the external downloader outputs into the standard progress output if possible (Currently implemented: [~~aria2c~~](https://github.com/yt-dlp/yt-dlp/issues/5931)). You can use `--compat-options no-external-downloader-progress` to get the downloader output as-is
|
|
||||||
* yt-dlp versions between 2021.09.01 and 2023.01.02 applies `--match-filter` to nested playlists. This was an unintentional side-effect of [8f18ac](https://github.com/yt-dlp/yt-dlp/commit/8f18aca8717bb0dd49054555af8d386e5eda3a88) and is fixed in [d7b460](https://github.com/yt-dlp/yt-dlp/commit/d7b460d0e5fc710950582baed2e3fc616ed98a80). Use `--compat-options playlist-match-filter` to revert this
|
|
||||||
* yt-dlp versions between 2021.11.10 and 2023.06.21 estimated `filesize_approx` values for fragmented/manifest formats. This was added for convenience in [f2fe69](https://github.com/yt-dlp/yt-dlp/commit/f2fe69c7b0d208bdb1f6292b4ae92bc1e1a7444a), but was reverted in [0dff8e](https://github.com/yt-dlp/yt-dlp/commit/0dff8e4d1e6e9fb938f4256ea9af7d81f42fd54f) due to the potentially extreme inaccuracy of the estimated values. Use `--compat-options manifest-filesize-approx` to keep extracting the estimated values
|
|
||||||
* yt-dlp uses modern http client backends such as `requests`. Use `--compat-options prefer-legacy-http-handler` to prefer the legacy http handler (`urllib`) to be used for standard http requests.
|
|
||||||
* The sub-module `swfinterp` is removed.
|
|
||||||
|
|
||||||
For ease of use, a few more compat options are available:
|
|
||||||
|
|
||||||
* `--compat-options all`: Use all compat options (Do NOT use)
|
|
||||||
* `--compat-options youtube-dl`: Same as `--compat-options all,-multistreams,-playlist-match-filter,-manifest-filesize-approx`
|
|
||||||
* `--compat-options youtube-dlc`: Same as `--compat-options all,-no-live-chat,-no-youtube-channel-redirect,-playlist-match-filter,-manifest-filesize-approx`
|
|
||||||
* `--compat-options 2021`: Same as `--compat-options 2022,no-certifi,filename-sanitization,no-youtube-prefer-utc-upload-date`
|
|
||||||
* `--compat-options 2022`: Same as `--compat-options 2023,playlist-match-filter,no-external-downloader-progress,prefer-legacy-http-handler,manifest-filesize-approx`
|
|
||||||
* `--compat-options 2023`: Currently does nothing. Use this to enable all future compat options
|
|
||||||
|
|
||||||
|
|
||||||
# INSTALLATION
|
# INSTALLATION
|
||||||
|
|
||||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||||
|
@ -186,41 +90,6 @@ # INSTALLATION
|
||||||
You can install yt-dlp using [the binaries](#release-files), [pip](https://pypi.org/project/yt-dlp) or one using a third-party package manager. See [the wiki](https://github.com/yt-dlp/yt-dlp/wiki/Installation) for detailed instructions
|
You can install yt-dlp using [the binaries](#release-files), [pip](https://pypi.org/project/yt-dlp) or one using a third-party package manager. See [the wiki](https://github.com/yt-dlp/yt-dlp/wiki/Installation) for detailed instructions
|
||||||
|
|
||||||
|
|
||||||
## UPDATE
|
|
||||||
You can use `yt-dlp -U` to update if you are using the [release binaries](#release-files)
|
|
||||||
|
|
||||||
If you [installed with pip](https://github.com/yt-dlp/yt-dlp/wiki/Installation#with-pip), simply re-run the same command that was used to install the program
|
|
||||||
|
|
||||||
For other third-party package managers, see [the wiki](https://github.com/yt-dlp/yt-dlp/wiki/Installation#third-party-package-managers) or refer their documentation
|
|
||||||
|
|
||||||
<a id="update-channels"/>
|
|
||||||
|
|
||||||
There are currently three release channels for binaries: `stable`, `nightly` and `master`.
|
|
||||||
|
|
||||||
* `stable` is the default channel, and many of its changes have been tested by users of the `nightly` and `master` channels.
|
|
||||||
* The `nightly` channel has releases scheduled to build every day around midnight UTC, for a snapshot of the project's new patches and changes. This is the **recommended channel for regular users** of yt-dlp. The `nightly` releases are available from [yt-dlp/yt-dlp-nightly-builds](https://github.com/yt-dlp/yt-dlp-nightly-builds/releases) or as development releases of the `yt-dlp` PyPI package (which can be installed with pip's `--pre` flag).
|
|
||||||
* The `master` channel features releases that are built after each push to the master branch, and these will have the very latest fixes and additions, but may also be more prone to regressions. They are available from [yt-dlp/yt-dlp-master-builds](https://github.com/yt-dlp/yt-dlp-master-builds/releases).
|
|
||||||
|
|
||||||
When using `--update`/`-U`, a release binary will only update to its current channel.
|
|
||||||
`--update-to CHANNEL` can be used to switch to a different channel when a newer version is available. `--update-to [CHANNEL@]TAG` can also be used to upgrade or downgrade to specific tags from a channel.
|
|
||||||
|
|
||||||
You may also use `--update-to <repository>` (`<owner>/<repository>`) to update to a channel on a completely different repository. Be careful with what repository you are updating to though, there is no verification done for binaries from different repositories.
|
|
||||||
|
|
||||||
Example usage:
|
|
||||||
* `yt-dlp --update-to master` switch to the `master` channel and update to its latest release
|
|
||||||
* `yt-dlp --update-to stable@2023.07.06` upgrade/downgrade to release to `stable` channel tag `2023.07.06`
|
|
||||||
* `yt-dlp --update-to 2023.10.07` upgrade/downgrade to tag `2023.10.07` if it exists on the current channel
|
|
||||||
* `yt-dlp --update-to example/yt-dlp@2023.09.24` upgrade/downgrade to the release from the `example/yt-dlp` repository, tag `2023.09.24`
|
|
||||||
|
|
||||||
**Important**: Any user experiencing an issue with the `stable` release should install or update to the `nightly` release before submitting a bug report:
|
|
||||||
```
|
|
||||||
# To update to nightly from stable executable/binary:
|
|
||||||
yt-dlp --update-to nightly
|
|
||||||
|
|
||||||
# To install nightly with pip:
|
|
||||||
python -m pip install -U --pre yt-dlp
|
|
||||||
```
|
|
||||||
|
|
||||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||||
## RELEASE FILES
|
## RELEASE FILES
|
||||||
|
|
||||||
|
@ -236,7 +105,7 @@ #### Alternatives
|
||||||
|
|
||||||
File|Description
|
File|Description
|
||||||
:---|:---
|
:---|:---
|
||||||
[yt-dlp_x86.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_x86.exe)|Windows (Vista SP2+) standalone x86 (32-bit) binary
|
[yt-dlp_x86.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_x86.exe)|Windows (Win7 SP1+) standalone x86 (32-bit) binary
|
||||||
[yt-dlp_min.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_min.exe)|Windows (Win7 SP1+) standalone x64 binary built with `py2exe`<br/> ([Not recommended](#standalone-py2exe-builds-windows))
|
[yt-dlp_min.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_min.exe)|Windows (Win7 SP1+) standalone x64 binary built with `py2exe`<br/> ([Not recommended](#standalone-py2exe-builds-windows))
|
||||||
[yt-dlp_linux](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux)|Linux standalone x64 binary
|
[yt-dlp_linux](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux)|Linux standalone x64 binary
|
||||||
[yt-dlp_linux.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux.zip)|Unpackaged Linux executable (no auto-update)
|
[yt-dlp_linux.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux.zip)|Unpackaged Linux executable (no auto-update)
|
||||||
|
@ -267,6 +136,42 @@ #### Misc
|
||||||
|
|
||||||
**Note**: The manpages, shell completion (autocomplete) files etc. are available inside the [source tarball](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz)
|
**Note**: The manpages, shell completion (autocomplete) files etc. are available inside the [source tarball](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz)
|
||||||
|
|
||||||
|
|
||||||
|
## UPDATE
|
||||||
|
You can use `yt-dlp -U` to update if you are using the [release binaries](#release-files)
|
||||||
|
|
||||||
|
If you [installed with pip](https://github.com/yt-dlp/yt-dlp/wiki/Installation#with-pip), simply re-run the same command that was used to install the program
|
||||||
|
|
||||||
|
For other third-party package managers, see [the wiki](https://github.com/yt-dlp/yt-dlp/wiki/Installation#third-party-package-managers) or refer their documentation
|
||||||
|
|
||||||
|
<a id="update-channels"></a>
|
||||||
|
|
||||||
|
There are currently three release channels for binaries: `stable`, `nightly` and `master`.
|
||||||
|
|
||||||
|
* `stable` is the default channel, and many of its changes have been tested by users of the `nightly` and `master` channels.
|
||||||
|
* The `nightly` channel has releases scheduled to build every day around midnight UTC, for a snapshot of the project's new patches and changes. This is the **recommended channel for regular users** of yt-dlp. The `nightly` releases are available from [yt-dlp/yt-dlp-nightly-builds](https://github.com/yt-dlp/yt-dlp-nightly-builds/releases) or as development releases of the `yt-dlp` PyPI package (which can be installed with pip's `--pre` flag).
|
||||||
|
* The `master` channel features releases that are built after each push to the master branch, and these will have the very latest fixes and additions, but may also be more prone to regressions. They are available from [yt-dlp/yt-dlp-master-builds](https://github.com/yt-dlp/yt-dlp-master-builds/releases).
|
||||||
|
|
||||||
|
When using `--update`/`-U`, a release binary will only update to its current channel.
|
||||||
|
`--update-to CHANNEL` can be used to switch to a different channel when a newer version is available. `--update-to [CHANNEL@]TAG` can also be used to upgrade or downgrade to specific tags from a channel.
|
||||||
|
|
||||||
|
You may also use `--update-to <repository>` (`<owner>/<repository>`) to update to a channel on a completely different repository. Be careful with what repository you are updating to though, there is no verification done for binaries from different repositories.
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
* `yt-dlp --update-to master` switch to the `master` channel and update to its latest release
|
||||||
|
* `yt-dlp --update-to stable@2023.07.06` upgrade/downgrade to release to `stable` channel tag `2023.07.06`
|
||||||
|
* `yt-dlp --update-to 2023.10.07` upgrade/downgrade to tag `2023.10.07` if it exists on the current channel
|
||||||
|
* `yt-dlp --update-to example/yt-dlp@2023.09.24` upgrade/downgrade to the release from the `example/yt-dlp` repository, tag `2023.09.24`
|
||||||
|
|
||||||
|
**Important**: Any user experiencing an issue with the `stable` release should install or update to the `nightly` release before submitting a bug report:
|
||||||
|
```
|
||||||
|
# To update to nightly from stable executable/binary:
|
||||||
|
yt-dlp --update-to nightly
|
||||||
|
|
||||||
|
# To install nightly with pip:
|
||||||
|
python3 -m pip install -U --pre yt-dlp[default]
|
||||||
|
```
|
||||||
|
|
||||||
## DEPENDENCIES
|
## DEPENDENCIES
|
||||||
Python versions 3.8+ (CPython and PyPy) are supported. Other versions and implementations may or may not work correctly.
|
Python versions 3.8+ (CPython and PyPy) are supported. Other versions and implementations may or may not work correctly.
|
||||||
|
|
||||||
|
@ -283,7 +188,7 @@ ### Strongly recommended
|
||||||
|
|
||||||
There are bugs in ffmpeg that cause various issues when used alongside yt-dlp. Since ffmpeg is such an important dependency, we provide [custom builds](https://github.com/yt-dlp/FFmpeg-Builds#ffmpeg-static-auto-builds) with patches for some of these issues at [yt-dlp/FFmpeg-Builds](https://github.com/yt-dlp/FFmpeg-Builds). See [the readme](https://github.com/yt-dlp/FFmpeg-Builds#patches-applied) for details on the specific issues solved by these builds
|
There are bugs in ffmpeg that cause various issues when used alongside yt-dlp. Since ffmpeg is such an important dependency, we provide [custom builds](https://github.com/yt-dlp/FFmpeg-Builds#ffmpeg-static-auto-builds) with patches for some of these issues at [yt-dlp/FFmpeg-Builds](https://github.com/yt-dlp/FFmpeg-Builds). See [the readme](https://github.com/yt-dlp/FFmpeg-Builds#patches-applied) for details on the specific issues solved by these builds
|
||||||
|
|
||||||
**Important**: What you need is ffmpeg *binary*, **NOT** [the python package of the same name](https://pypi.org/project/ffmpeg)
|
**Important**: What you need is ffmpeg *binary*, **NOT** [the Python package of the same name](https://pypi.org/project/ffmpeg)
|
||||||
|
|
||||||
### Networking
|
### Networking
|
||||||
* [**certifi**](https://github.com/certifi/python-certifi)\* - Provides Mozilla's root certificate bundle. Licensed under [MPLv2](https://github.com/certifi/python-certifi/blob/master/LICENSE)
|
* [**certifi**](https://github.com/certifi/python-certifi)\* - Provides Mozilla's root certificate bundle. Licensed under [MPLv2](https://github.com/certifi/python-certifi/blob/master/LICENSE)
|
||||||
|
@ -291,6 +196,15 @@ ### Networking
|
||||||
* [**websockets**](https://github.com/aaugustin/websockets)\* - For downloading over websocket. Licensed under [BSD-3-Clause](https://github.com/aaugustin/websockets/blob/main/LICENSE)
|
* [**websockets**](https://github.com/aaugustin/websockets)\* - For downloading over websocket. Licensed under [BSD-3-Clause](https://github.com/aaugustin/websockets/blob/main/LICENSE)
|
||||||
* [**requests**](https://github.com/psf/requests)\* - HTTP library. For HTTPS proxy and persistent connections support. Licensed under [Apache-2.0](https://github.com/psf/requests/blob/main/LICENSE)
|
* [**requests**](https://github.com/psf/requests)\* - HTTP library. For HTTPS proxy and persistent connections support. Licensed under [Apache-2.0](https://github.com/psf/requests/blob/main/LICENSE)
|
||||||
|
|
||||||
|
#### Impersonation
|
||||||
|
|
||||||
|
The following provide support for impersonating browser requests. This may be required for some sites that employ TLS fingerprinting.
|
||||||
|
|
||||||
|
* [**curl_cffi**](https://github.com/yifeikong/curl_cffi) (recommended) - Python binding for [curl-impersonate](https://github.com/lwthiker/curl-impersonate). Provides impersonation targets for Chrome, Edge and Safari. Licensed under [MIT](https://github.com/yifeikong/curl_cffi/blob/main/LICENSE)
|
||||||
|
* Can be installed with the `curl_cffi` group, e.g. `pip install yt-dlp[default,curl_cffi]`
|
||||||
|
* Only included in `yt-dlp.exe`, `yt-dlp_macos` and `yt-dlp_macos_legacy` builds
|
||||||
|
|
||||||
|
|
||||||
### Metadata
|
### Metadata
|
||||||
|
|
||||||
* [**mutagen**](https://github.com/quodlibet/mutagen)\* - For `--embed-thumbnail` in certain formats. Licensed under [GPLv2+](https://github.com/quodlibet/mutagen/blob/master/COPYING)
|
* [**mutagen**](https://github.com/quodlibet/mutagen)\* - For `--embed-thumbnail` in certain formats. Licensed under [GPLv2+](https://github.com/quodlibet/mutagen/blob/master/COPYING)
|
||||||
|
@ -321,7 +235,9 @@ ### Deprecated
|
||||||
## COMPILE
|
## COMPILE
|
||||||
|
|
||||||
### Standalone PyInstaller Builds
|
### Standalone PyInstaller Builds
|
||||||
To build the standalone executable, you must have Python and `pyinstaller` (plus any of yt-dlp's [optional dependencies](#dependencies) if needed). The executable will be built for the same architecture (x86/ARM, 32/64 bit) as the Python used. You can run the following commands:
|
To build the standalone executable, you must have Python and `pyinstaller` (plus any of yt-dlp's [optional dependencies](#dependencies) if needed). The executable will be built for the same CPU architecture as the Python used.
|
||||||
|
|
||||||
|
You can run the following commands:
|
||||||
|
|
||||||
```
|
```
|
||||||
python3 devscripts/install_deps.py --include pyinstaller
|
python3 devscripts/install_deps.py --include pyinstaller
|
||||||
|
@ -331,11 +247,11 @@ ### Standalone PyInstaller Builds
|
||||||
|
|
||||||
On some systems, you may need to use `py` or `python` instead of `python3`.
|
On some systems, you may need to use `py` or `python` instead of `python3`.
|
||||||
|
|
||||||
`bundle/pyinstaller.py` accepts any arguments that can be passed to `pyinstaller`, such as `--onefile/-F` or `--onedir/-D`, which is further [documented here](https://pyinstaller.org/en/stable/usage.html#what-to-generate).
|
`python -m bundle.pyinstaller` accepts any arguments that can be passed to `pyinstaller`, such as `--onefile/-F` or `--onedir/-D`, which is further [documented here](https://pyinstaller.org/en/stable/usage.html#what-to-generate).
|
||||||
|
|
||||||
**Note**: Pyinstaller versions below 4.4 [do not support](https://github.com/pyinstaller/pyinstaller#requirements-and-tested-platforms) Python installed from the Windows store without using a virtual environment.
|
**Note**: Pyinstaller versions below 4.4 [do not support](https://github.com/pyinstaller/pyinstaller#requirements-and-tested-platforms) Python installed from the Windows store without using a virtual environment.
|
||||||
|
|
||||||
**Important**: Running `pyinstaller` directly **without** using `bundle/pyinstaller.py` is **not** officially supported. This may or may not work correctly.
|
**Important**: Running `pyinstaller` directly **instead of** using `python -m bundle.pyinstaller` is **not** officially supported. This may or may not work correctly.
|
||||||
|
|
||||||
### Platform-independent Binary (UNIX)
|
### Platform-independent Binary (UNIX)
|
||||||
You will need the build tools `python` (3.8+), `zip`, `make` (GNU), `pandoc`\* and `pytest`\*.
|
You will need the build tools `python` (3.8+), `zip`, `make` (GNU), `pandoc`\* and `pytest`\*.
|
||||||
|
@ -418,7 +334,7 @@ ## General Options:
|
||||||
URLs, but emits an error if this is not
|
URLs, but emits an error if this is not
|
||||||
possible instead of searching
|
possible instead of searching
|
||||||
--ignore-config Don't load any more configuration files
|
--ignore-config Don't load any more configuration files
|
||||||
except those given by --config-locations.
|
except those given to --config-locations.
|
||||||
For backward compatibility, if this option
|
For backward compatibility, if this option
|
||||||
is found inside the system configuration
|
is found inside the system configuration
|
||||||
file, the user configuration is not loaded.
|
file, the user configuration is not loaded.
|
||||||
|
@ -482,6 +398,10 @@ ## Network Options:
|
||||||
direct connection
|
direct connection
|
||||||
--socket-timeout SECONDS Time to wait before giving up, in seconds
|
--socket-timeout SECONDS Time to wait before giving up, in seconds
|
||||||
--source-address IP Client-side IP address to bind to
|
--source-address IP Client-side IP address to bind to
|
||||||
|
--impersonate CLIENT[:OS] Client to impersonate for requests. E.g.
|
||||||
|
chrome, chrome-110, chrome:windows-10. Pass
|
||||||
|
--impersonate="" to impersonate any client.
|
||||||
|
--list-impersonate-targets List available clients to impersonate.
|
||||||
-4, --force-ipv4 Make all connections via IPv4
|
-4, --force-ipv4 Make all connections via IPv4
|
||||||
-6, --force-ipv6 Make all connections via IPv6
|
-6, --force-ipv6 Make all connections via IPv6
|
||||||
--enable-file-urls Enable file:// URLs. This is disabled by
|
--enable-file-urls Enable file:// URLs. This is disabled by
|
||||||
|
@ -683,7 +603,7 @@ ## Filesystem Options:
|
||||||
-o, --output [TYPES:]TEMPLATE Output filename template; see "OUTPUT
|
-o, --output [TYPES:]TEMPLATE Output filename template; see "OUTPUT
|
||||||
TEMPLATE" for details
|
TEMPLATE" for details
|
||||||
--output-na-placeholder TEXT Placeholder for unavailable fields in
|
--output-na-placeholder TEXT Placeholder for unavailable fields in
|
||||||
"OUTPUT TEMPLATE" (default: "NA")
|
--output (default: "NA")
|
||||||
--restrict-filenames Restrict filenames to only ASCII characters,
|
--restrict-filenames Restrict filenames to only ASCII characters,
|
||||||
and avoid "&" and spaces in filenames
|
and avoid "&" and spaces in filenames
|
||||||
--no-restrict-filenames Allow Unicode characters, "&" and spaces in
|
--no-restrict-filenames Allow Unicode characters, "&" and spaces in
|
||||||
|
@ -1172,12 +1092,12 @@ # CONFIGURATION
|
||||||
You can configure yt-dlp by placing any supported command line option to a configuration file. The configuration is loaded from the following locations:
|
You can configure yt-dlp by placing any supported command line option to a configuration file. The configuration is loaded from the following locations:
|
||||||
|
|
||||||
1. **Main Configuration**:
|
1. **Main Configuration**:
|
||||||
* The file given by `--config-location`
|
* The file given to `--config-location`
|
||||||
1. **Portable Configuration**: (Recommended for portable installations)
|
1. **Portable Configuration**: (Recommended for portable installations)
|
||||||
* If using a binary, `yt-dlp.conf` in the same directory as the binary
|
* If using a binary, `yt-dlp.conf` in the same directory as the binary
|
||||||
* If running from source-code, `yt-dlp.conf` in the parent directory of `yt_dlp`
|
* If running from source-code, `yt-dlp.conf` in the parent directory of `yt_dlp`
|
||||||
1. **Home Configuration**:
|
1. **Home Configuration**:
|
||||||
* `yt-dlp.conf` in the home path given by `-P`
|
* `yt-dlp.conf` in the home path given to `-P`
|
||||||
* If `-P` is not given, the current directory is searched
|
* If `-P` is not given, the current directory is searched
|
||||||
1. **User Configuration**:
|
1. **User Configuration**:
|
||||||
* `${XDG_CONFIG_HOME}/yt-dlp.conf`
|
* `${XDG_CONFIG_HOME}/yt-dlp.conf`
|
||||||
|
@ -1296,7 +1216,7 @@ # OUTPUT TEMPLATE
|
||||||
|
|
||||||
Additionally, you can set different output templates for the various metadata files separately from the general output template by specifying the type of file followed by the template separated by a colon `:`. The different file types supported are `subtitle`, `thumbnail`, `description`, `annotation` (deprecated), `infojson`, `link`, `pl_thumbnail`, `pl_description`, `pl_infojson`, `chapter`, `pl_video`. E.g. `-o "%(title)s.%(ext)s" -o "thumbnail:%(title)s\%(title)s.%(ext)s"` will put the thumbnails in a folder with the same name as the video. If any of the templates is empty, that type of file will not be written. E.g. `--write-thumbnail -o "thumbnail:"` will write thumbnails only for playlists and not for video.
|
Additionally, you can set different output templates for the various metadata files separately from the general output template by specifying the type of file followed by the template separated by a colon `:`. The different file types supported are `subtitle`, `thumbnail`, `description`, `annotation` (deprecated), `infojson`, `link`, `pl_thumbnail`, `pl_description`, `pl_infojson`, `chapter`, `pl_video`. E.g. `-o "%(title)s.%(ext)s" -o "thumbnail:%(title)s\%(title)s.%(ext)s"` will put the thumbnails in a folder with the same name as the video. If any of the templates is empty, that type of file will not be written. E.g. `--write-thumbnail -o "thumbnail:"` will write thumbnails only for playlists and not for video.
|
||||||
|
|
||||||
<a id="outtmpl-postprocess-note"/>
|
<a id="outtmpl-postprocess-note"></a>
|
||||||
|
|
||||||
**Note**: Due to post-processing (i.e. merging etc.), the actual output filename might differ. Use `--print after_move:filepath` to get the name after all post-processing is complete.
|
**Note**: Due to post-processing (i.e. merging etc.), the actual output filename might differ. Use `--print after_move:filepath` to get the name after all post-processing is complete.
|
||||||
|
|
||||||
|
@ -1310,6 +1230,8 @@ # OUTPUT TEMPLATE
|
||||||
- `description` (string): The description of the video
|
- `description` (string): The description of the video
|
||||||
- `display_id` (string): An alternative identifier for the video
|
- `display_id` (string): An alternative identifier for the video
|
||||||
- `uploader` (string): Full name of the video uploader
|
- `uploader` (string): Full name of the video uploader
|
||||||
|
- `uploader_id` (string): Nickname or id of the video uploader
|
||||||
|
- `uploader_url` (string): URL to the video uploader's profile
|
||||||
- `license` (string): License name the video is licensed under
|
- `license` (string): License name the video is licensed under
|
||||||
- `creators` (list): The creators of the video
|
- `creators` (list): The creators of the video
|
||||||
- `creator` (string): The creators of the video; comma-separated
|
- `creator` (string): The creators of the video; comma-separated
|
||||||
|
@ -1320,9 +1242,9 @@ # OUTPUT TEMPLATE
|
||||||
- `release_year` (numeric): Year (YYYY) when the video or album was released
|
- `release_year` (numeric): Year (YYYY) when the video or album was released
|
||||||
- `modified_timestamp` (numeric): UNIX timestamp of the moment the video was last modified
|
- `modified_timestamp` (numeric): UNIX timestamp of the moment the video was last modified
|
||||||
- `modified_date` (string): The date (YYYYMMDD) when the video was last modified in UTC
|
- `modified_date` (string): The date (YYYYMMDD) when the video was last modified in UTC
|
||||||
- `uploader_id` (string): Nickname or id of the video uploader
|
|
||||||
- `channel` (string): Full name of the channel the video is uploaded on
|
- `channel` (string): Full name of the channel the video is uploaded on
|
||||||
- `channel_id` (string): Id of the channel
|
- `channel_id` (string): Id of the channel
|
||||||
|
- `channel_url` (string): URL of the channel
|
||||||
- `channel_follower_count` (numeric): Number of followers of the channel
|
- `channel_follower_count` (numeric): Number of followers of the channel
|
||||||
- `channel_is_verified` (boolean): Whether the channel is verified on the platform
|
- `channel_is_verified` (boolean): Whether the channel is verified on the platform
|
||||||
- `location` (string): Physical location where the video was filmed
|
- `location` (string): Physical location where the video was filmed
|
||||||
|
@ -1362,6 +1284,9 @@ # OUTPUT TEMPLATE
|
||||||
- `webpage_url_basename` (string): The basename of the webpage URL
|
- `webpage_url_basename` (string): The basename of the webpage URL
|
||||||
- `webpage_url_domain` (string): The domain of the webpage URL
|
- `webpage_url_domain` (string): The domain of the webpage URL
|
||||||
- `original_url` (string): The URL given by the user (or same as `webpage_url` for playlist entries)
|
- `original_url` (string): The URL given by the user (or same as `webpage_url` for playlist entries)
|
||||||
|
- `categories` (list): List of categories the video belongs to
|
||||||
|
- `tags` (list): List of tags assigned to the video
|
||||||
|
- `cast` (list): List of cast members
|
||||||
|
|
||||||
All the fields in [Filtering Formats](#filtering-formats) can also be used
|
All the fields in [Filtering Formats](#filtering-formats) can also be used
|
||||||
|
|
||||||
|
@ -1374,6 +1299,7 @@ # OUTPUT TEMPLATE
|
||||||
Available for the video that is an episode of some series or programme:
|
Available for the video that is an episode of some series or programme:
|
||||||
|
|
||||||
- `series` (string): Title of the series or programme the video episode belongs to
|
- `series` (string): Title of the series or programme the video episode belongs to
|
||||||
|
- `series_id` (string): Id of the series or programme the video episode belongs to
|
||||||
- `season` (string): Title of the season the video episode belongs to
|
- `season` (string): Title of the season the video episode belongs to
|
||||||
- `season_number` (numeric): Number of the season the video episode belongs to
|
- `season_number` (numeric): Number of the season the video episode belongs to
|
||||||
- `season_id` (string): Id of the season the video episode belongs to
|
- `season_id` (string): Id of the season the video episode belongs to
|
||||||
|
@ -1750,9 +1676,9 @@ # MODIFYING METADATA
|
||||||
|
|
||||||
The metadata obtained by the extractors can be modified by using `--parse-metadata` and `--replace-in-metadata`
|
The metadata obtained by the extractors can be modified by using `--parse-metadata` and `--replace-in-metadata`
|
||||||
|
|
||||||
`--replace-in-metadata FIELDS REGEX REPLACE` is used to replace text in any metadata field using [python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax). [Backreferences](https://docs.python.org/3/library/re.html?highlight=backreferences#re.sub) can be used in the replace string for advanced use.
|
`--replace-in-metadata FIELDS REGEX REPLACE` is used to replace text in any metadata field using [Python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax). [Backreferences](https://docs.python.org/3/library/re.html?highlight=backreferences#re.sub) can be used in the replace string for advanced use.
|
||||||
|
|
||||||
The general syntax of `--parse-metadata FROM:TO` is to give the name of a field or an [output template](#output-template) to extract data from, and the format to interpret it as, separated by a colon `:`. Either a [python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax) with named capture groups, a single field name, or a similar syntax to the [output template](#output-template) (only `%(field)s` formatting is supported) can be used for `TO`. The option can be used multiple times to parse and modify various fields.
|
The general syntax of `--parse-metadata FROM:TO` is to give the name of a field or an [output template](#output-template) to extract data from, and the format to interpret it as, separated by a colon `:`. Either a [Python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax) with named capture groups, a single field name, or a similar syntax to the [output template](#output-template) (only `%(field)s` formatting is supported) can be used for `TO`. The option can be used multiple times to parse and modify various fields.
|
||||||
|
|
||||||
Note that these options preserve their relative order, allowing replacements to be made in parsed fields and viceversa. Also, any field thus created can be used in the [output template](#output-template) and will also affect the media file's metadata added when using `--embed-metadata`.
|
Note that these options preserve their relative order, allowing replacements to be made in parsed fields and viceversa. Also, any field thus created can be used in the [output template](#output-template) and will also affect the media file's metadata added when using `--embed-metadata`.
|
||||||
|
|
||||||
|
@ -2174,9 +2100,106 @@ #### Use a custom format selector
|
||||||
ydl.download(URLS)
|
ydl.download(URLS)
|
||||||
```
|
```
|
||||||
|
|
||||||
<!-- MANPAGE: MOVE "NEW FEATURES" SECTION HERE -->
|
|
||||||
|
|
||||||
# DEPRECATED OPTIONS
|
# CHANGES FROM YOUTUBE-DL
|
||||||
|
|
||||||
|
### New features
|
||||||
|
|
||||||
|
* Forked from [**yt-dlc@f9401f2**](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee) and merged with [**youtube-dl@be008e6**](https://github.com/ytdl-org/youtube-dl/commit/be008e657d79832642e2158557c899249c9e31cd) ([exceptions](https://github.com/yt-dlp/yt-dlp/issues/21))
|
||||||
|
|
||||||
|
* **[SponsorBlock Integration](#sponsorblock-options)**: You can mark/remove sponsor sections in YouTube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API
|
||||||
|
|
||||||
|
* **[Format Sorting](#sorting-formats)**: The default format sorting options have been changed so that higher resolution and better codecs will be now preferred instead of simply using larger bitrate. Furthermore, you can now specify the sort order using `-S`. This allows for much easier format selection than what is possible by simply using `--format` ([examples](#format-selection-examples))
|
||||||
|
|
||||||
|
* **Merged with animelover1984/youtube-dl**: You get most of the features and improvements from [animelover1984/youtube-dl](https://github.com/animelover1984/youtube-dl) including `--write-comments`, `BiliBiliSearch`, `BilibiliChannel`, Embedding thumbnail in mp4/ogg/opus, playlist infojson etc. Note that NicoNico livestreams are not available. See [#31](https://github.com/yt-dlp/yt-dlp/pull/31) for details.
|
||||||
|
|
||||||
|
* **YouTube improvements**:
|
||||||
|
* Supports Clips, Stories (`ytstories:<channel UCID>`), Search (including filters)**\***, YouTube Music Search, Channel-specific search, Search prefixes (`ytsearch:`, `ytsearchdate:`)**\***, Mixes, and Feeds (`:ytfav`, `:ytwatchlater`, `:ytsubs`, `:ythistory`, `:ytrec`, `:ytnotif`)
|
||||||
|
* Fix for [n-sig based throttling](https://github.com/ytdl-org/youtube-dl/issues/29326) **\***
|
||||||
|
* Supports some (but not all) age-gated content without cookies
|
||||||
|
* Download livestreams from the start using `--live-from-start` (*experimental*)
|
||||||
|
* Channel URLs download all uploads of the channel, including shorts and live
|
||||||
|
|
||||||
|
* **Cookies from browser**: Cookies can be automatically extracted from all major web browsers using `--cookies-from-browser BROWSER[+KEYRING][:PROFILE][::CONTAINER]`
|
||||||
|
|
||||||
|
* **Download time range**: Videos can be downloaded partially based on either timestamps or chapters using `--download-sections`
|
||||||
|
|
||||||
|
* **Split video by chapters**: Videos can be split into multiple files based on chapters using `--split-chapters`
|
||||||
|
|
||||||
|
* **Multi-threaded fragment downloads**: Download multiple fragments of m3u8/mpd videos in parallel. Use `--concurrent-fragments` (`-N`) option to set the number of threads used
|
||||||
|
|
||||||
|
* **Aria2c with HLS/DASH**: You can use `aria2c` as the external downloader for DASH(mpd) and HLS(m3u8) formats
|
||||||
|
|
||||||
|
* **New and fixed extractors**: Many new extractors have been added and a lot of existing ones have been fixed. See the [changelog](Changelog.md) or the [list of supported sites](supportedsites.md)
|
||||||
|
|
||||||
|
* **New MSOs**: Philo, Spectrum, SlingTV, Cablevision, RCN etc.
|
||||||
|
|
||||||
|
* **Subtitle extraction from manifests**: Subtitles can be extracted from streaming media manifests. See [commit/be6202f](https://github.com/yt-dlp/yt-dlp/commit/be6202f12b97858b9d716e608394b51065d0419f) for details
|
||||||
|
|
||||||
|
* **Multiple paths and output templates**: You can give different [output templates](#output-template) and download paths for different types of files. You can also set a temporary path where intermediary files are downloaded to using `--paths` (`-P`)
|
||||||
|
|
||||||
|
* **Portable Configuration**: Configuration files are automatically loaded from the home and root directories. See [CONFIGURATION](#configuration) for details
|
||||||
|
|
||||||
|
* **Output template improvements**: Output templates can now have date-time formatting, numeric offsets, object traversal etc. See [output template](#output-template) for details. Even more advanced operations can also be done with the help of `--parse-metadata` and `--replace-in-metadata`
|
||||||
|
|
||||||
|
* **Other new options**: Many new options have been added such as `--alias`, `--print`, `--concat-playlist`, `--wait-for-video`, `--retry-sleep`, `--sleep-requests`, `--convert-thumbnails`, `--force-download-archive`, `--force-overwrites`, `--break-match-filter` etc
|
||||||
|
|
||||||
|
* **Improvements**: Regex and other operators in `--format`/`--match-filter`, multiple `--postprocessor-args` and `--downloader-args`, faster archive checking, more [format selection options](#format-selection), merge multi-video/audio, multiple `--config-locations`, `--exec` at different stages, etc
|
||||||
|
|
||||||
|
* **Plugins**: Extractors and PostProcessors can be loaded from an external file. See [plugins](#plugins) for details
|
||||||
|
|
||||||
|
* **Self updater**: The releases can be updated using `yt-dlp -U`, and downgraded using `--update-to` if required
|
||||||
|
|
||||||
|
* **Automated builds**: [Nightly/master builds](#update-channels) can be used with `--update-to nightly` and `--update-to master`
|
||||||
|
|
||||||
|
See [changelog](Changelog.md) or [commits](https://github.com/yt-dlp/yt-dlp/commits) for the full list of changes
|
||||||
|
|
||||||
|
Features marked with a **\*** have been back-ported to youtube-dl
|
||||||
|
|
||||||
|
### Differences in default behavior
|
||||||
|
|
||||||
|
Some of yt-dlp's default options are different from that of youtube-dl and youtube-dlc:
|
||||||
|
|
||||||
|
* yt-dlp supports only [Python 3.8+](## "Windows 7"), and *may* remove support for more versions as they [become EOL](https://devguide.python.org/versions/#python-release-cycle); while [youtube-dl still supports Python 2.6+ and 3.2+](https://github.com/ytdl-org/youtube-dl/issues/30568#issue-1118238743)
|
||||||
|
* The options `--auto-number` (`-A`), `--title` (`-t`) and `--literal` (`-l`), no longer work. See [removed options](#Removed) for details
|
||||||
|
* `avconv` is not supported as an alternative to `ffmpeg`
|
||||||
|
* yt-dlp stores config files in slightly different locations to youtube-dl. See [CONFIGURATION](#configuration) for a list of correct locations
|
||||||
|
* The default [output template](#output-template) is `%(title)s [%(id)s].%(ext)s`. There is no real reason for this change. This was changed before yt-dlp was ever made public and now there are no plans to change it back to `%(title)s-%(id)s.%(ext)s`. Instead, you may use `--compat-options filename`
|
||||||
|
* The default [format sorting](#sorting-formats) is different from youtube-dl and prefers higher resolution and better codecs rather than higher bitrates. You can use the `--format-sort` option to change this to any order you prefer, or use `--compat-options format-sort` to use youtube-dl's sorting order
|
||||||
|
* The default format selector is `bv*+ba/b`. This means that if a combined video + audio format that is better than the best video-only format is found, the former will be preferred. Use `-f bv+ba/b` or `--compat-options format-spec` to revert this
|
||||||
|
* Unlike youtube-dlc, yt-dlp does not allow merging multiple audio/video streams into one file by default (since this conflicts with the use of `-f bv*+ba`). If needed, this feature must be enabled using `--audio-multistreams` and `--video-multistreams`. You can also use `--compat-options multistreams` to enable both
|
||||||
|
* `--no-abort-on-error` is enabled by default. Use `--abort-on-error` or `--compat-options abort-on-error` to abort on errors instead
|
||||||
|
* When writing metadata files such as thumbnails, description or infojson, the same information (if available) is also written for playlists. Use `--no-write-playlist-metafiles` or `--compat-options no-playlist-metafiles` to not write these files
|
||||||
|
* `--add-metadata` attaches the `infojson` to `mkv` files in addition to writing the metadata when used with `--write-info-json`. Use `--no-embed-info-json` or `--compat-options no-attach-info-json` to revert this
|
||||||
|
* Some metadata are embedded into different fields when using `--add-metadata` as compared to youtube-dl. Most notably, `comment` field contains the `webpage_url` and `synopsis` contains the `description`. You can [use `--parse-metadata`](#modifying-metadata) to modify this to your liking or use `--compat-options embed-metadata` to revert this
|
||||||
|
* `playlist_index` behaves differently when used with options like `--playlist-reverse` and `--playlist-items`. See [#302](https://github.com/yt-dlp/yt-dlp/issues/302) for details. You can use `--compat-options playlist-index` if you want to keep the earlier behavior
|
||||||
|
* The output of `-F` is listed in a new format. Use `--compat-options list-formats` to revert this
|
||||||
|
* Live chats (if available) are considered as subtitles. Use `--sub-langs all,-live_chat` to download all subtitles except live chat. You can also use `--compat-options no-live-chat` to prevent any live chat/danmaku from downloading
|
||||||
|
* YouTube channel URLs download all uploads of the channel. To download only the videos in a specific tab, pass the tab's URL. If the channel does not show the requested tab, an error will be raised. Also, `/live` URLs raise an error if there are no live videos instead of silently downloading the entire channel. You may use `--compat-options no-youtube-channel-redirect` to revert all these redirections
|
||||||
|
* Unavailable videos are also listed for YouTube playlists. Use `--compat-options no-youtube-unavailable-videos` to remove this
|
||||||
|
* The upload dates extracted from YouTube are in UTC [when available](https://github.com/yt-dlp/yt-dlp/blob/89e4d86171c7b7c997c77d4714542e0383bf0db0/yt_dlp/extractor/youtube.py#L3898-L3900). Use `--compat-options no-youtube-prefer-utc-upload-date` to prefer the non-UTC upload date.
|
||||||
|
* If `ffmpeg` is used as the downloader, the downloading and merging of formats happen in a single step when possible. Use `--compat-options no-direct-merge` to revert this
|
||||||
|
* Thumbnail embedding in `mp4` is done with mutagen if possible. Use `--compat-options embed-thumbnail-atomicparsley` to force the use of AtomicParsley instead
|
||||||
|
* Some internal metadata such as filenames are removed by default from the infojson. Use `--no-clean-infojson` or `--compat-options no-clean-infojson` to revert this
|
||||||
|
* When `--embed-subs` and `--write-subs` are used together, the subtitles are written to disk and also embedded in the media file. You can use just `--embed-subs` to embed the subs and automatically delete the separate file. See [#630 (comment)](https://github.com/yt-dlp/yt-dlp/issues/630#issuecomment-893659460) for more info. `--compat-options no-keep-subs` can be used to revert this
|
||||||
|
* `certifi` will be used for SSL root certificates, if installed. If you want to use system certificates (e.g. self-signed), use `--compat-options no-certifi`
|
||||||
|
* yt-dlp's sanitization of invalid characters in filenames is different/smarter than in youtube-dl. You can use `--compat-options filename-sanitization` to revert to youtube-dl's behavior
|
||||||
|
* ~~yt-dlp tries to parse the external downloader outputs into the standard progress output if possible (Currently implemented: [aria2c](https://github.com/yt-dlp/yt-dlp/issues/5931)). You can use `--compat-options no-external-downloader-progress` to get the downloader output as-is~~
|
||||||
|
* yt-dlp versions between 2021.09.01 and 2023.01.02 applies `--match-filter` to nested playlists. This was an unintentional side-effect of [8f18ac](https://github.com/yt-dlp/yt-dlp/commit/8f18aca8717bb0dd49054555af8d386e5eda3a88) and is fixed in [d7b460](https://github.com/yt-dlp/yt-dlp/commit/d7b460d0e5fc710950582baed2e3fc616ed98a80). Use `--compat-options playlist-match-filter` to revert this
|
||||||
|
* yt-dlp versions between 2021.11.10 and 2023.06.21 estimated `filesize_approx` values for fragmented/manifest formats. This was added for convenience in [f2fe69](https://github.com/yt-dlp/yt-dlp/commit/f2fe69c7b0d208bdb1f6292b4ae92bc1e1a7444a), but was reverted in [0dff8e](https://github.com/yt-dlp/yt-dlp/commit/0dff8e4d1e6e9fb938f4256ea9af7d81f42fd54f) due to the potentially extreme inaccuracy of the estimated values. Use `--compat-options manifest-filesize-approx` to keep extracting the estimated values
|
||||||
|
* yt-dlp uses modern http client backends such as `requests`. Use `--compat-options prefer-legacy-http-handler` to prefer the legacy http handler (`urllib`) to be used for standard http requests.
|
||||||
|
* The sub-modules `swfinterp`, `casefold` are removed.
|
||||||
|
|
||||||
|
For ease of use, a few more compat options are available:
|
||||||
|
|
||||||
|
* `--compat-options all`: Use all compat options (Do NOT use)
|
||||||
|
* `--compat-options youtube-dl`: Same as `--compat-options all,-multistreams,-playlist-match-filter,-manifest-filesize-approx`
|
||||||
|
* `--compat-options youtube-dlc`: Same as `--compat-options all,-no-live-chat,-no-youtube-channel-redirect,-playlist-match-filter,-manifest-filesize-approx`
|
||||||
|
* `--compat-options 2021`: Same as `--compat-options 2022,no-certifi,filename-sanitization,no-youtube-prefer-utc-upload-date`
|
||||||
|
* `--compat-options 2022`: Same as `--compat-options 2023,playlist-match-filter,no-external-downloader-progress,prefer-legacy-http-handler,manifest-filesize-approx`
|
||||||
|
* `--compat-options 2023`: Currently does nothing. Use this to enable all future compat options
|
||||||
|
|
||||||
|
### Deprecated options
|
||||||
|
|
||||||
These are all the deprecated options and the current alternative to achieve the same effect
|
These are all the deprecated options and the current alternative to achieve the same effect
|
||||||
|
|
||||||
|
@ -2212,7 +2235,6 @@ #### Redundant options
|
||||||
--no-playlist-reverse Default
|
--no-playlist-reverse Default
|
||||||
--no-colors --color no_color
|
--no-colors --color no_color
|
||||||
|
|
||||||
|
|
||||||
#### Not recommended
|
#### Not recommended
|
||||||
While these options still work, their use is not recommended since there are other alternatives to achieve the same
|
While these options still work, their use is not recommended since there are other alternatives to achieve the same
|
||||||
|
|
||||||
|
@ -2239,7 +2261,6 @@ #### Not recommended
|
||||||
--geo-bypass-country CODE --xff CODE
|
--geo-bypass-country CODE --xff CODE
|
||||||
--geo-bypass-ip-block IP_BLOCK --xff IP_BLOCK
|
--geo-bypass-ip-block IP_BLOCK --xff IP_BLOCK
|
||||||
|
|
||||||
|
|
||||||
#### Developer options
|
#### Developer options
|
||||||
These options are not intended to be used by the end-user
|
These options are not intended to be used by the end-user
|
||||||
|
|
||||||
|
@ -2249,7 +2270,6 @@ #### Developer options
|
||||||
--allow-unplayable-formats List unplayable formats also
|
--allow-unplayable-formats List unplayable formats also
|
||||||
--no-allow-unplayable-formats Default
|
--no-allow-unplayable-formats Default
|
||||||
|
|
||||||
|
|
||||||
#### Old aliases
|
#### Old aliases
|
||||||
These are aliases that are no longer documented for various reasons
|
These are aliases that are no longer documented for various reasons
|
||||||
|
|
||||||
|
@ -2302,6 +2322,7 @@ #### Removed
|
||||||
-A, --auto-number -o "%(autonumber)s-%(id)s.%(ext)s"
|
-A, --auto-number -o "%(autonumber)s-%(id)s.%(ext)s"
|
||||||
-t, -l, --title, --literal -o "%(title)s-%(id)s.%(ext)s"
|
-t, -l, --title, --literal -o "%(title)s-%(id)s.%(ext)s"
|
||||||
|
|
||||||
|
|
||||||
# CONTRIBUTING
|
# CONTRIBUTING
|
||||||
See [CONTRIBUTING.md](CONTRIBUTING.md#contributing-to-yt-dlp) for instructions on [Opening an Issue](CONTRIBUTING.md#opening-an-issue) and [Contributing code to the project](CONTRIBUTING.md#developer-instructions)
|
See [CONTRIBUTING.md](CONTRIBUTING.md#contributing-to-yt-dlp) for instructions on [Opening an Issue](CONTRIBUTING.md#opening-an-issue) and [Contributing code to the project](CONTRIBUTING.md#developer-instructions)
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
# Empty file
|
|
|
@ -20,7 +20,7 @@ def main():
|
||||||
'py2exe builds do not support pycryptodomex and needs VC++14 to run. '
|
'py2exe builds do not support pycryptodomex and needs VC++14 to run. '
|
||||||
'It is recommended to run "pyinst.py" to build using pyinstaller instead')
|
'It is recommended to run "pyinst.py" to build using pyinstaller instead')
|
||||||
|
|
||||||
return freeze(
|
freeze(
|
||||||
console=[{
|
console=[{
|
||||||
'script': './yt_dlp/__main__.py',
|
'script': './yt_dlp/__main__.py',
|
||||||
'dest_base': 'yt-dlp',
|
'dest_base': 'yt-dlp',
|
||||||
|
@ -28,7 +28,7 @@ def main():
|
||||||
}],
|
}],
|
||||||
version_info={
|
version_info={
|
||||||
'version': VERSION,
|
'version': VERSION,
|
||||||
'description': 'A youtube-dl fork with additional features and patches',
|
'description': 'A feature-rich command-line audio/video downloader',
|
||||||
'comments': 'Official repository: <https://github.com/yt-dlp/yt-dlp>',
|
'comments': 'Official repository: <https://github.com/yt-dlp/yt-dlp>',
|
||||||
'product_name': 'yt-dlp',
|
'product_name': 'yt-dlp',
|
||||||
'product_version': VERSION,
|
'product_version': VERSION,
|
||||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -1 +0,0 @@
|
||||||
# Empty file needed to make devscripts.utils properly importable from outside
|
|
|
@ -120,5 +120,11 @@
|
||||||
"when": "15f22b4880b6b3f71f350c64d70976ae65b9f1ca",
|
"when": "15f22b4880b6b3f71f350c64d70976ae65b9f1ca",
|
||||||
"short": "[webvtt] Allow spaces before newlines for CueBlock (#7681)",
|
"short": "[webvtt] Allow spaces before newlines for CueBlock (#7681)",
|
||||||
"authors": ["TSRBerry"]
|
"authors": ["TSRBerry"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "change",
|
||||||
|
"when": "4ce57d3b873c2887814cbec03d029533e82f7db5",
|
||||||
|
"short": "[ie] Support multi-period MPD streams (#6654)",
|
||||||
|
"authors": ["alard", "pukkandan"]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
@ -19,7 +19,7 @@ def parse_args():
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'input', nargs='?', metavar='TOMLFILE', default='pyproject.toml', help='Input file (default: %(default)s)')
|
'input', nargs='?', metavar='TOMLFILE', default='pyproject.toml', help='Input file (default: %(default)s)')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-e', '--exclude', metavar='REQUIREMENT', action='append', help='Exclude a required dependency')
|
'-e', '--exclude', metavar='DEPENDENCY', action='append', help='Exclude a dependency')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-i', '--include', metavar='GROUP', action='append', help='Include an optional dependency group')
|
'-i', '--include', metavar='GROUP', action='append', help='Include an optional dependency group')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
@ -33,21 +33,28 @@ def parse_args():
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
toml_data = parse_toml(read_file(args.input))
|
project_table = parse_toml(read_file(args.input))['project']
|
||||||
deps = toml_data['project']['dependencies']
|
optional_groups = project_table['optional-dependencies']
|
||||||
targets = deps.copy() if not args.only_optional else []
|
excludes = args.exclude or []
|
||||||
|
|
||||||
for exclude in args.exclude or []:
|
deps = []
|
||||||
for dep in deps:
|
if not args.only_optional: # `-o` should exclude 'dependencies' and the 'default' group
|
||||||
simplified_dep = re.match(r'[\w-]+', dep)[0]
|
deps.extend(project_table['dependencies'])
|
||||||
if dep in targets and (exclude.lower() == simplified_dep.lower() or exclude == dep):
|
if 'default' not in excludes: # `--exclude default` should exclude entire 'default' group
|
||||||
targets.remove(dep)
|
deps.extend(optional_groups['default'])
|
||||||
|
|
||||||
optional_deps = toml_data['project']['optional-dependencies']
|
def name(dependency):
|
||||||
for include in args.include or []:
|
return re.match(r'[\w-]+', dependency)[0].lower()
|
||||||
group = optional_deps.get(include)
|
|
||||||
if group:
|
target_map = {name(dep): dep for dep in deps}
|
||||||
targets.extend(group)
|
|
||||||
|
for include in filter(None, map(optional_groups.get, args.include or [])):
|
||||||
|
target_map.update(zip(map(name, include), include))
|
||||||
|
|
||||||
|
for exclude in map(name, excludes):
|
||||||
|
target_map.pop(exclude, None)
|
||||||
|
|
||||||
|
targets = list(target_map.values())
|
||||||
|
|
||||||
if args.print:
|
if args.print:
|
||||||
for target in targets:
|
for target in targets:
|
||||||
|
|
|
@ -253,7 +253,7 @@ class CommitRange:
|
||||||
''', re.VERBOSE | re.DOTALL)
|
''', re.VERBOSE | re.DOTALL)
|
||||||
EXTRACTOR_INDICATOR_RE = re.compile(r'(?:Fix|Add)\s+Extractors?', re.IGNORECASE)
|
EXTRACTOR_INDICATOR_RE = re.compile(r'(?:Fix|Add)\s+Extractors?', re.IGNORECASE)
|
||||||
REVERT_RE = re.compile(r'(?:\[[^\]]+\]\s+)?(?i:Revert)\s+([\da-f]{40})')
|
REVERT_RE = re.compile(r'(?:\[[^\]]+\]\s+)?(?i:Revert)\s+([\da-f]{40})')
|
||||||
FIXES_RE = re.compile(r'(?i:Fix(?:es)?(?:\s+bugs?)?(?:\s+in|\s+for)?|Revert)\s+([\da-f]{40})')
|
FIXES_RE = re.compile(r'(?i:Fix(?:es)?(?:\s+bugs?)?(?:\s+in|\s+for)?|Revert|Improve)\s+([\da-f]{40})')
|
||||||
UPSTREAM_MERGE_RE = re.compile(r'Update to ytdl-commit-([\da-f]+)')
|
UPSTREAM_MERGE_RE = re.compile(r'Update to ytdl-commit-([\da-f]+)')
|
||||||
|
|
||||||
def __init__(self, start, end, default_author=None):
|
def __init__(self, start, end, default_author=None):
|
||||||
|
@ -445,7 +445,32 @@ def get_new_contributors(contributors_path, commits):
|
||||||
return sorted(new_contributors, key=str.casefold)
|
return sorted(new_contributors, key=str.casefold)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
def create_changelog(args):
|
||||||
|
logging.basicConfig(
|
||||||
|
datefmt='%Y-%m-%d %H-%M-%S', format='{asctime} | {levelname:<8} | {message}',
|
||||||
|
level=logging.WARNING - 10 * args.verbosity, style='{', stream=sys.stderr)
|
||||||
|
|
||||||
|
commits = CommitRange(None, args.commitish, args.default_author)
|
||||||
|
|
||||||
|
if not args.no_override:
|
||||||
|
if args.override_path.exists():
|
||||||
|
overrides = json.loads(read_file(args.override_path))
|
||||||
|
commits.apply_overrides(overrides)
|
||||||
|
else:
|
||||||
|
logger.warning(f'File {args.override_path.as_posix()} does not exist')
|
||||||
|
|
||||||
|
logger.info(f'Loaded {len(commits)} commits')
|
||||||
|
|
||||||
|
new_contributors = get_new_contributors(args.contributors_path, commits)
|
||||||
|
if new_contributors:
|
||||||
|
if args.contributors:
|
||||||
|
write_file(args.contributors_path, '\n'.join(new_contributors) + '\n', mode='a')
|
||||||
|
logger.info(f'New contributors: {", ".join(new_contributors)}')
|
||||||
|
|
||||||
|
return Changelog(commits.groups(), args.repo, args.collapsible)
|
||||||
|
|
||||||
|
|
||||||
|
def create_parser():
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
|
@ -477,27 +502,9 @@ def get_new_contributors(contributors_path, commits):
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--collapsible', action='store_true',
|
'--collapsible', action='store_true',
|
||||||
help='make changelog collapsible (default: %(default)s)')
|
help='make changelog collapsible (default: %(default)s)')
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
logging.basicConfig(
|
return parser
|
||||||
datefmt='%Y-%m-%d %H-%M-%S', format='{asctime} | {levelname:<8} | {message}',
|
|
||||||
level=logging.WARNING - 10 * args.verbosity, style='{', stream=sys.stderr)
|
|
||||||
|
|
||||||
commits = CommitRange(None, args.commitish, args.default_author)
|
|
||||||
|
|
||||||
if not args.no_override:
|
if __name__ == '__main__':
|
||||||
if args.override_path.exists():
|
print(create_changelog(create_parser().parse_args()))
|
||||||
overrides = json.loads(read_file(args.override_path))
|
|
||||||
commits.apply_overrides(overrides)
|
|
||||||
else:
|
|
||||||
logger.warning(f'File {args.override_path.as_posix()} does not exist')
|
|
||||||
|
|
||||||
logger.info(f'Loaded {len(commits)} commits')
|
|
||||||
|
|
||||||
new_contributors = get_new_contributors(args.contributors_path, commits)
|
|
||||||
if new_contributors:
|
|
||||||
if args.contributors:
|
|
||||||
write_file(args.contributors_path, '\n'.join(new_contributors) + '\n', mode='a')
|
|
||||||
logger.info(f'New contributors: {", ".join(new_contributors)}')
|
|
||||||
|
|
||||||
print(Changelog(commits.groups(), args.repo, args.collapsible))
|
|
||||||
|
|
|
@ -24,7 +24,7 @@
|
||||||
|
|
||||||
# NAME
|
# NAME
|
||||||
|
|
||||||
yt\-dlp \- A youtube-dl fork with additional features and patches
|
yt\-dlp \- A feature\-rich command\-line audio/video downloader
|
||||||
|
|
||||||
# SYNOPSIS
|
# SYNOPSIS
|
||||||
|
|
||||||
|
|
26
devscripts/update_changelog.py
Executable file
26
devscripts/update_changelog.py
Executable file
|
@ -0,0 +1,26 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from devscripts.make_changelog import create_changelog, create_parser
|
||||||
|
from devscripts.utils import read_file, read_version, write_file
|
||||||
|
|
||||||
|
# Always run after devscripts/update-version.py, and run before `make doc|pypi-files|tar|all`
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = create_parser()
|
||||||
|
parser.description = 'Update an existing changelog file with an entry for a new release'
|
||||||
|
parser.add_argument(
|
||||||
|
'--changelog-path', type=Path, default=Path(__file__).parent.parent / 'Changelog.md',
|
||||||
|
help='path to the Changelog file')
|
||||||
|
args = parser.parse_args()
|
||||||
|
new_entry = create_changelog(args)
|
||||||
|
|
||||||
|
header, sep, changelog = read_file(args.changelog_path).partition('\n### ')
|
||||||
|
write_file(args.changelog_path, f'{header}{sep}{read_version()}\n{new_entry}\n{sep}{changelog}')
|
17
pyinst.py
Executable file
17
pyinst.py
Executable file
|
@ -0,0 +1,17 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Allow execution from anywhere
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
from bundle.pyinstaller import main
|
||||||
|
|
||||||
|
warnings.warn(DeprecationWarning('`pyinst.py` is deprecated and will be removed in a future version. '
|
||||||
|
'Use `bundle.pyinstaller` instead'))
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
|
@ -8,8 +8,9 @@ maintainers = [
|
||||||
{name = "pukkandan", email = "pukkandan.ytdlp@gmail.com"},
|
{name = "pukkandan", email = "pukkandan.ytdlp@gmail.com"},
|
||||||
{name = "Grub4K", email = "contact@grub4k.xyz"},
|
{name = "Grub4K", email = "contact@grub4k.xyz"},
|
||||||
{name = "bashonly", email = "bashonly@protonmail.com"},
|
{name = "bashonly", email = "bashonly@protonmail.com"},
|
||||||
|
{name = "coletdjnz", email = "coletdjnz@protonmail.com"},
|
||||||
]
|
]
|
||||||
description = "A youtube-dl fork with additional features and patches"
|
description = "A feature-rich command-line audio/video downloader"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.8"
|
requires-python = ">=3.8"
|
||||||
keywords = [
|
keywords = [
|
||||||
|
@ -51,6 +52,8 @@ dependencies = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.optional-dependencies]
|
[project.optional-dependencies]
|
||||||
|
default = []
|
||||||
|
curl_cffi = ["curl-cffi==0.5.10; implementation_name=='cpython'"]
|
||||||
secretstorage = [
|
secretstorage = [
|
||||||
"cffi",
|
"cffi",
|
||||||
"secretstorage",
|
"secretstorage",
|
||||||
|
@ -67,6 +70,7 @@ dev = [
|
||||||
"pytest",
|
"pytest",
|
||||||
]
|
]
|
||||||
pyinstaller = ["pyinstaller>=6.3"]
|
pyinstaller = ["pyinstaller>=6.3"]
|
||||||
|
pyinstaller_macos = ["pyinstaller==5.13.2"] # needed for curl_cffi builds
|
||||||
py2exe = ["py2exe>=0.12"]
|
py2exe = ["py2exe>=0.12"]
|
||||||
|
|
||||||
[project.urls]
|
[project.urls]
|
||||||
|
|
36
setup.py
Executable file
36
setup.py
Executable file
|
@ -0,0 +1,36 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Allow execution from anywhere
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
|
||||||
|
if sys.argv[1:2] == ['py2exe']:
|
||||||
|
warnings.warn(DeprecationWarning('`setup.py py2exe` is deprecated and will be removed in a future version. '
|
||||||
|
'Use `bundle.py2exe` instead'))
|
||||||
|
|
||||||
|
import bundle.py2exe
|
||||||
|
|
||||||
|
bundle.py2exe.main()
|
||||||
|
|
||||||
|
elif 'build_lazy_extractors' in sys.argv:
|
||||||
|
warnings.warn(DeprecationWarning('`setup.py build_lazy_extractors` is deprecated and will be removed in a future version. '
|
||||||
|
'Use `devscripts.make_lazy_extractors` instead'))
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
os.chdir(sys.path[0])
|
||||||
|
print('running build_lazy_extractors')
|
||||||
|
subprocess.run([sys.executable, 'devscripts/make_lazy_extractors.py'])
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
print(
|
||||||
|
'ERROR: Building by calling `setup.py` is deprecated. '
|
||||||
|
'Use a build frontend like `build` instead. ',
|
||||||
|
'Refer to https://build.pypa.io for more info', file=sys.stderr)
|
||||||
|
sys.exit(1)
|
|
@ -5,7 +5,7 @@ # Supported sites
|
||||||
- **1tv**: Первый канал
|
- **1tv**: Первый канал
|
||||||
- **20min**
|
- **20min**
|
||||||
- **23video**
|
- **23video**
|
||||||
- **247sports**
|
- **247sports**: (**Currently broken**)
|
||||||
- **24tv.ua**
|
- **24tv.ua**
|
||||||
- **3qsdn**: 3Q SDN
|
- **3qsdn**: 3Q SDN
|
||||||
- **3sat**
|
- **3sat**
|
||||||
|
@ -17,6 +17,7 @@ # Supported sites
|
||||||
- **91porn**
|
- **91porn**
|
||||||
- **9c9media**
|
- **9c9media**
|
||||||
- **9gag**: 9GAG
|
- **9gag**: 9GAG
|
||||||
|
- **9News**
|
||||||
- **9now.com.au**
|
- **9now.com.au**
|
||||||
- **abc.net.au**
|
- **abc.net.au**
|
||||||
- **abc.net.au:iview**
|
- **abc.net.au:iview**
|
||||||
|
@ -26,13 +27,14 @@ # Supported sites
|
||||||
- **abcotvs**: ABC Owned Television Stations
|
- **abcotvs**: ABC Owned Television Stations
|
||||||
- **abcotvs:clips**
|
- **abcotvs:clips**
|
||||||
- **AbemaTV**: [*abematv*](## "netrc machine")
|
- **AbemaTV**: [*abematv*](## "netrc machine")
|
||||||
- **AbemaTVTitle**
|
- **AbemaTVTitle**: [*abematv*](## "netrc machine")
|
||||||
- **AcademicEarth:Course**
|
- **AcademicEarth:Course**
|
||||||
- **acast**
|
- **acast**
|
||||||
- **acast:channel**
|
- **acast:channel**
|
||||||
- **AcFunBangumi**
|
- **AcFunBangumi**
|
||||||
- **AcFunVideo**
|
- **AcFunVideo**
|
||||||
- **ADN**: [*animationdigitalnetwork*](## "netrc machine") Animation Digital Network
|
- **ADN**: [*animationdigitalnetwork*](## "netrc machine") Animation Digital Network
|
||||||
|
- **ADNSeason**: [*animationdigitalnetwork*](## "netrc machine") Animation Digital Network
|
||||||
- **AdobeConnect**
|
- **AdobeConnect**
|
||||||
- **adobetv**
|
- **adobetv**
|
||||||
- **adobetv:channel**
|
- **adobetv:channel**
|
||||||
|
@ -61,6 +63,7 @@ # Supported sites
|
||||||
- **altcensored:channel**
|
- **altcensored:channel**
|
||||||
- **Alura**: [*alura*](## "netrc machine")
|
- **Alura**: [*alura*](## "netrc machine")
|
||||||
- **AluraCourse**: [*aluracourse*](## "netrc machine")
|
- **AluraCourse**: [*aluracourse*](## "netrc machine")
|
||||||
|
- **AmadeusTV**
|
||||||
- **Amara**
|
- **Amara**
|
||||||
- **AmazonMiniTV**
|
- **AmazonMiniTV**
|
||||||
- **amazonminitv:season**: Amazon MiniTV Season, "minitv:season:" prefix
|
- **amazonminitv:season**: Amazon MiniTV Season, "minitv:season:" prefix
|
||||||
|
@ -93,11 +96,15 @@ # Supported sites
|
||||||
- **ARDMediathek**
|
- **ARDMediathek**
|
||||||
- **ARDMediathekCollection**
|
- **ARDMediathekCollection**
|
||||||
- **Arkena**
|
- **Arkena**
|
||||||
|
- **Art19**
|
||||||
|
- **Art19Show**
|
||||||
- **arte.sky.it**
|
- **arte.sky.it**
|
||||||
- **ArteTV**
|
- **ArteTV**
|
||||||
- **ArteTVCategory**
|
- **ArteTVCategory**
|
||||||
- **ArteTVEmbed**
|
- **ArteTVEmbed**
|
||||||
- **ArteTVPlaylist**
|
- **ArteTVPlaylist**
|
||||||
|
- **asobichannel**: ASOBI CHANNEL
|
||||||
|
- **asobichannel:tag**: ASOBI CHANNEL
|
||||||
- **AtresPlayer**: [*atresplayer*](## "netrc machine")
|
- **AtresPlayer**: [*atresplayer*](## "netrc machine")
|
||||||
- **AtScaleConfEvent**
|
- **AtScaleConfEvent**
|
||||||
- **ATVAt**
|
- **ATVAt**
|
||||||
|
@ -180,13 +187,14 @@ # Supported sites
|
||||||
- **BitChute**
|
- **BitChute**
|
||||||
- **BitChuteChannel**
|
- **BitChuteChannel**
|
||||||
- **BlackboardCollaborate**
|
- **BlackboardCollaborate**
|
||||||
- **BleacherReport**
|
- **BleacherReport**: (**Currently broken**)
|
||||||
- **BleacherReportCMS**
|
- **BleacherReportCMS**: (**Currently broken**)
|
||||||
- **blerp**
|
- **blerp**
|
||||||
- **blogger.com**
|
- **blogger.com**
|
||||||
- **Bloomberg**
|
- **Bloomberg**
|
||||||
- **BokeCC**
|
- **BokeCC**
|
||||||
- **BongaCams**
|
- **BongaCams**
|
||||||
|
- **Boosty**
|
||||||
- **BostonGlobe**
|
- **BostonGlobe**
|
||||||
- **Box**
|
- **Box**
|
||||||
- **BoxCastVideo**
|
- **BoxCastVideo**
|
||||||
|
@ -231,8 +239,7 @@ # Supported sites
|
||||||
- **cbc.ca**
|
- **cbc.ca**
|
||||||
- **cbc.ca:player**
|
- **cbc.ca:player**
|
||||||
- **cbc.ca:player:playlist**
|
- **cbc.ca:player:playlist**
|
||||||
- **CBS**
|
- **CBS**: (**Currently broken**)
|
||||||
- **CBSInteractive**
|
|
||||||
- **CBSLocal**
|
- **CBSLocal**
|
||||||
- **CBSLocalArticle**
|
- **CBSLocalArticle**
|
||||||
- **CBSLocalLive**
|
- **CBSLocalLive**
|
||||||
|
@ -240,8 +247,8 @@ # Supported sites
|
||||||
- **cbsnews:embed**
|
- **cbsnews:embed**
|
||||||
- **cbsnews:live**: CBS News Livestream
|
- **cbsnews:live**: CBS News Livestream
|
||||||
- **cbsnews:livevideo**: CBS News Live Videos
|
- **cbsnews:livevideo**: CBS News Live Videos
|
||||||
- **cbssports**
|
- **cbssports**: (**Currently broken**)
|
||||||
- **cbssports:embed**
|
- **cbssports:embed**: (**Currently broken**)
|
||||||
- **CCMA**
|
- **CCMA**
|
||||||
- **CCTV**: 央视网
|
- **CCTV**: 央视网
|
||||||
- **CDA**: [*cdapl*](## "netrc machine")
|
- **CDA**: [*cdapl*](## "netrc machine")
|
||||||
|
@ -251,10 +258,10 @@ # Supported sites
|
||||||
- **CharlieRose**
|
- **CharlieRose**
|
||||||
- **Chaturbate**
|
- **Chaturbate**
|
||||||
- **Chilloutzone**
|
- **Chilloutzone**
|
||||||
- **Chingari**
|
- **chzzk:live**
|
||||||
- **ChingariUser**
|
- **chzzk:video**
|
||||||
- **cielotv.it**
|
- **cielotv.it**
|
||||||
- **Cinemax**
|
- **Cinemax**: (**Currently broken**)
|
||||||
- **CinetecaMilano**
|
- **CinetecaMilano**
|
||||||
- **Cineverse**
|
- **Cineverse**
|
||||||
- **CineverseDetails**
|
- **CineverseDetails**
|
||||||
|
@ -263,16 +270,15 @@ # Supported sites
|
||||||
- **ciscowebex**: Cisco Webex
|
- **ciscowebex**: Cisco Webex
|
||||||
- **CJSW**
|
- **CJSW**
|
||||||
- **Clipchamp**
|
- **Clipchamp**
|
||||||
- **cliphunter**
|
|
||||||
- **Clippit**
|
- **Clippit**
|
||||||
- **ClipRs**
|
- **ClipRs**: (**Currently broken**)
|
||||||
- **ClipYouEmbed**
|
- **ClipYouEmbed**
|
||||||
- **CloserToTruth**
|
- **CloserToTruth**: (**Currently broken**)
|
||||||
- **CloudflareStream**
|
- **CloudflareStream**
|
||||||
|
- **CloudyCDN**
|
||||||
- **Clubic**: (**Currently broken**)
|
- **Clubic**: (**Currently broken**)
|
||||||
- **Clyp**
|
- **Clyp**
|
||||||
- **cmt.com**: (**Currently broken**)
|
- **cmt.com**: (**Currently broken**)
|
||||||
- **CNBC**
|
|
||||||
- **CNBCVideo**
|
- **CNBCVideo**
|
||||||
- **CNN**
|
- **CNN**
|
||||||
- **CNNArticle**
|
- **CNNArticle**
|
||||||
|
@ -320,6 +326,7 @@ # Supported sites
|
||||||
- **DailyMail**
|
- **DailyMail**
|
||||||
- **dailymotion**: [*dailymotion*](## "netrc machine")
|
- **dailymotion**: [*dailymotion*](## "netrc machine")
|
||||||
- **dailymotion:playlist**: [*dailymotion*](## "netrc machine")
|
- **dailymotion:playlist**: [*dailymotion*](## "netrc machine")
|
||||||
|
- **dailymotion:search**: [*dailymotion*](## "netrc machine")
|
||||||
- **dailymotion:user**: [*dailymotion*](## "netrc machine")
|
- **dailymotion:user**: [*dailymotion*](## "netrc machine")
|
||||||
- **DailyWire**
|
- **DailyWire**
|
||||||
- **DailyWirePodcast**
|
- **DailyWirePodcast**
|
||||||
|
@ -340,7 +347,6 @@ # Supported sites
|
||||||
- **DeuxM**
|
- **DeuxM**
|
||||||
- **DeuxMNews**
|
- **DeuxMNews**
|
||||||
- **DHM**: Filmarchiv - Deutsches Historisches Museum (**Currently broken**)
|
- **DHM**: Filmarchiv - Deutsches Historisches Museum (**Currently broken**)
|
||||||
- **Digg**
|
|
||||||
- **DigitalConcertHall**: [*digitalconcerthall*](## "netrc machine") DigitalConcertHall extractor
|
- **DigitalConcertHall**: [*digitalconcerthall*](## "netrc machine") DigitalConcertHall extractor
|
||||||
- **DigitallySpeaking**
|
- **DigitallySpeaking**
|
||||||
- **Digiteka**
|
- **Digiteka**
|
||||||
|
@ -373,14 +379,14 @@ # Supported sites
|
||||||
- **drtv:live**
|
- **drtv:live**
|
||||||
- **drtv:season**
|
- **drtv:season**
|
||||||
- **drtv:series**
|
- **drtv:series**
|
||||||
- **DTube**
|
- **DTube**: (**Currently broken**)
|
||||||
- **duboku**: www.duboku.io
|
- **duboku**: www.duboku.io
|
||||||
- **duboku:list**: www.duboku.io entire series
|
- **duboku:list**: www.duboku.io entire series
|
||||||
- **Dumpert**
|
- **Dumpert**
|
||||||
- **Duoplay**
|
- **Duoplay**
|
||||||
- **dvtv**: http://video.aktualne.cz/
|
- **dvtv**: http://video.aktualne.cz/
|
||||||
- **dw**
|
- **dw**: (**Currently broken**)
|
||||||
- **dw:article**
|
- **dw:article**: (**Currently broken**)
|
||||||
- **EaglePlatform**
|
- **EaglePlatform**
|
||||||
- **EbaumsWorld**
|
- **EbaumsWorld**
|
||||||
- **Ebay**
|
- **Ebay**
|
||||||
|
@ -391,6 +397,7 @@ # Supported sites
|
||||||
- **EinsUndEinsTVRecordings**: [*1und1tv*](## "netrc machine")
|
- **EinsUndEinsTVRecordings**: [*1und1tv*](## "netrc machine")
|
||||||
- **Einthusan**
|
- **Einthusan**
|
||||||
- **eitb.tv**
|
- **eitb.tv**
|
||||||
|
- **ElementorEmbed**
|
||||||
- **Elonet**
|
- **Elonet**
|
||||||
- **ElPais**: El País
|
- **ElPais**: El País
|
||||||
- **ElTreceTV**: El Trece TV (Argentina)
|
- **ElTreceTV**: El Trece TV (Argentina)
|
||||||
|
@ -405,6 +412,7 @@ # Supported sites
|
||||||
- **Erocast**
|
- **Erocast**
|
||||||
- **EroProfile**: [*eroprofile*](## "netrc machine")
|
- **EroProfile**: [*eroprofile*](## "netrc machine")
|
||||||
- **EroProfile:album**
|
- **EroProfile:album**
|
||||||
|
- **ERRJupiter**
|
||||||
- **ertflix**: ERTFLIX videos
|
- **ertflix**: ERTFLIX videos
|
||||||
- **ertflix:codename**: ERTFLIX videos by codename
|
- **ertflix:codename**: ERTFLIX videos by codename
|
||||||
- **ertwebtv:embed**: ert.gr webtv embedded videos
|
- **ertwebtv:embed**: ert.gr webtv embedded videos
|
||||||
|
@ -412,7 +420,7 @@ # Supported sites
|
||||||
- **ESPNArticle**
|
- **ESPNArticle**
|
||||||
- **ESPNCricInfo**
|
- **ESPNCricInfo**
|
||||||
- **EttuTv**
|
- **EttuTv**
|
||||||
- **Europa**
|
- **Europa**: (**Currently broken**)
|
||||||
- **EuroParlWebstream**
|
- **EuroParlWebstream**
|
||||||
- **EuropeanTour**
|
- **EuropeanTour**
|
||||||
- **Eurosport**
|
- **Eurosport**
|
||||||
|
@ -423,22 +431,23 @@ # Supported sites
|
||||||
- **Expressen**
|
- **Expressen**
|
||||||
- **EyedoTV**
|
- **EyedoTV**
|
||||||
- **facebook**: [*facebook*](## "netrc machine")
|
- **facebook**: [*facebook*](## "netrc machine")
|
||||||
|
- **facebook:ads**
|
||||||
- **facebook:reel**
|
- **facebook:reel**
|
||||||
- **FacebookPluginsVideo**
|
- **FacebookPluginsVideo**
|
||||||
- **fancode:live**: [*fancode*](## "netrc machine")
|
- **fancode:live**: [*fancode*](## "netrc machine") (**Currently broken**)
|
||||||
- **fancode:vod**: [*fancode*](## "netrc machine")
|
- **fancode:vod**: [*fancode*](## "netrc machine") (**Currently broken**)
|
||||||
- **faz.net**
|
- **faz.net**
|
||||||
- **fc2**: [*fc2*](## "netrc machine")
|
- **fc2**: [*fc2*](## "netrc machine")
|
||||||
- **fc2:embed**
|
- **fc2:embed**
|
||||||
- **fc2:live**
|
- **fc2:live**
|
||||||
- **Fczenit**
|
- **Fczenit**
|
||||||
- **Fifa**
|
- **Fifa**
|
||||||
- **Filmmodu**
|
|
||||||
- **filmon**
|
- **filmon**
|
||||||
- **filmon:channel**
|
- **filmon:channel**
|
||||||
- **Filmweb**
|
- **Filmweb**
|
||||||
- **FiveThirtyEight**
|
- **FiveThirtyEight**
|
||||||
- **FiveTV**
|
- **FiveTV**
|
||||||
|
- **FlexTV**
|
||||||
- **Flickr**
|
- **Flickr**
|
||||||
- **Floatplane**
|
- **Floatplane**
|
||||||
- **FloatplaneChannel**
|
- **FloatplaneChannel**
|
||||||
|
@ -477,7 +486,6 @@ # Supported sites
|
||||||
- **Gab**
|
- **Gab**
|
||||||
- **GabTV**
|
- **GabTV**
|
||||||
- **Gaia**: [*gaia*](## "netrc machine")
|
- **Gaia**: [*gaia*](## "netrc machine")
|
||||||
- **GameInformer**
|
|
||||||
- **GameJolt**
|
- **GameJolt**
|
||||||
- **GameJoltCommunity**
|
- **GameJoltCommunity**
|
||||||
- **GameJoltGame**
|
- **GameJoltGame**
|
||||||
|
@ -487,18 +495,19 @@ # Supported sites
|
||||||
- **GameSpot**
|
- **GameSpot**
|
||||||
- **GameStar**
|
- **GameStar**
|
||||||
- **Gaskrank**
|
- **Gaskrank**
|
||||||
- **Gazeta**
|
- **Gazeta**: (**Currently broken**)
|
||||||
- **GDCVault**: [*gdcvault*](## "netrc machine")
|
- **GDCVault**: [*gdcvault*](## "netrc machine") (**Currently broken**)
|
||||||
- **GediDigital**
|
- **GediDigital**
|
||||||
- **gem.cbc.ca**: [*cbcgem*](## "netrc machine")
|
- **gem.cbc.ca**: [*cbcgem*](## "netrc machine")
|
||||||
- **gem.cbc.ca:live**
|
- **gem.cbc.ca:live**
|
||||||
- **gem.cbc.ca:playlist**
|
- **gem.cbc.ca:playlist**
|
||||||
- **Genius**
|
- **Genius**
|
||||||
- **GeniusLyrics**
|
- **GeniusLyrics**
|
||||||
|
- **GetCourseRu**: [*getcourseru*](## "netrc machine")
|
||||||
|
- **GetCourseRuPlayer**
|
||||||
- **Gettr**
|
- **Gettr**
|
||||||
- **GettrStreaming**
|
- **GettrStreaming**
|
||||||
- **GiantBomb**
|
- **GiantBomb**
|
||||||
- **Giga**
|
|
||||||
- **GlattvisionTV**: [*glattvisiontv*](## "netrc machine")
|
- **GlattvisionTV**: [*glattvisiontv*](## "netrc machine")
|
||||||
- **GlattvisionTVLive**: [*glattvisiontv*](## "netrc machine")
|
- **GlattvisionTVLive**: [*glattvisiontv*](## "netrc machine")
|
||||||
- **GlattvisionTVRecordings**: [*glattvisiontv*](## "netrc machine")
|
- **GlattvisionTVRecordings**: [*glattvisiontv*](## "netrc machine")
|
||||||
|
@ -516,7 +525,7 @@ # Supported sites
|
||||||
- **GMANetworkVideo**
|
- **GMANetworkVideo**
|
||||||
- **Go**
|
- **Go**
|
||||||
- **GoDiscovery**
|
- **GoDiscovery**
|
||||||
- **GodTube**
|
- **GodTube**: (**Currently broken**)
|
||||||
- **Gofile**
|
- **Gofile**
|
||||||
- **Golem**
|
- **Golem**
|
||||||
- **goodgame:stream**
|
- **goodgame:stream**
|
||||||
|
@ -551,7 +560,7 @@ # Supported sites
|
||||||
- **HollywoodReporter**
|
- **HollywoodReporter**
|
||||||
- **HollywoodReporterPlaylist**
|
- **HollywoodReporterPlaylist**
|
||||||
- **Holodex**
|
- **Holodex**
|
||||||
- **HotNewHipHop**
|
- **HotNewHipHop**: (**Currently broken**)
|
||||||
- **hotstar**
|
- **hotstar**
|
||||||
- **hotstar:playlist**
|
- **hotstar:playlist**
|
||||||
- **hotstar:season**
|
- **hotstar:season**
|
||||||
|
@ -579,6 +588,7 @@ # Supported sites
|
||||||
- **IGNVideo**
|
- **IGNVideo**
|
||||||
- **iheartradio**
|
- **iheartradio**
|
||||||
- **iheartradio:podcast**
|
- **iheartradio:podcast**
|
||||||
|
- **IlPost**
|
||||||
- **Iltalehti**
|
- **Iltalehti**
|
||||||
- **imdb**: Internet Movie Database trailers
|
- **imdb**: Internet Movie Database trailers
|
||||||
- **imdb:list**: Internet Movie Database lists
|
- **imdb:list**: Internet Movie Database lists
|
||||||
|
@ -592,7 +602,7 @@ # Supported sites
|
||||||
- **Instagram**: [*instagram*](## "netrc machine")
|
- **Instagram**: [*instagram*](## "netrc machine")
|
||||||
- **instagram:story**: [*instagram*](## "netrc machine")
|
- **instagram:story**: [*instagram*](## "netrc machine")
|
||||||
- **instagram:tag**: [*instagram*](## "netrc machine") Instagram hashtag search URLs
|
- **instagram:tag**: [*instagram*](## "netrc machine") Instagram hashtag search URLs
|
||||||
- **instagram:user**: [*instagram*](## "netrc machine") Instagram user profile
|
- **instagram:user**: [*instagram*](## "netrc machine") Instagram user profile (**Currently broken**)
|
||||||
- **InstagramIOS**: IOS instagram:// URL
|
- **InstagramIOS**: IOS instagram:// URL
|
||||||
- **Internazionale**
|
- **Internazionale**
|
||||||
- **InternetVideoArchive**
|
- **InternetVideoArchive**
|
||||||
|
@ -622,7 +632,7 @@ # Supported sites
|
||||||
- **JablePlaylist**
|
- **JablePlaylist**
|
||||||
- **Jamendo**
|
- **Jamendo**
|
||||||
- **JamendoAlbum**
|
- **JamendoAlbum**
|
||||||
- **JeuxVideo**
|
- **JeuxVideo**: (**Currently broken**)
|
||||||
- **JioSaavnAlbum**
|
- **JioSaavnAlbum**
|
||||||
- **JioSaavnSong**
|
- **JioSaavnSong**
|
||||||
- **Joj**
|
- **Joj**
|
||||||
|
@ -634,12 +644,10 @@ # Supported sites
|
||||||
- **JWPlatform**
|
- **JWPlatform**
|
||||||
- **Kakao**
|
- **Kakao**
|
||||||
- **Kaltura**
|
- **Kaltura**
|
||||||
- **Kanal2**
|
- **KankaNews**: (**Currently broken**)
|
||||||
- **KankaNews**
|
|
||||||
- **Karaoketv**
|
- **Karaoketv**
|
||||||
- **KarriereVideos**
|
- **Katsomo**: (**Currently broken**)
|
||||||
- **Katsomo**
|
- **KelbyOne**: (**Currently broken**)
|
||||||
- **KelbyOne**
|
|
||||||
- **Ketnet**
|
- **Ketnet**
|
||||||
- **khanacademy**
|
- **khanacademy**
|
||||||
- **khanacademy:unit**
|
- **khanacademy:unit**
|
||||||
|
@ -651,18 +659,17 @@ # Supported sites
|
||||||
- **KinoPoisk**
|
- **KinoPoisk**
|
||||||
- **Kommunetv**
|
- **Kommunetv**
|
||||||
- **KompasVideo**
|
- **KompasVideo**
|
||||||
- **KonserthusetPlay**
|
- **Koo**: (**Currently broken**)
|
||||||
- **Koo**
|
- **KrasView**: Красвью (**Currently broken**)
|
||||||
- **KrasView**: Красвью
|
|
||||||
- **KTH**
|
- **KTH**
|
||||||
- **Ku6**
|
- **Ku6**
|
||||||
- **KUSI**
|
- **KukuluLive**
|
||||||
- **kuwo:album**: 酷我音乐 - 专辑
|
- **kuwo:album**: 酷我音乐 - 专辑 (**Currently broken**)
|
||||||
- **kuwo:category**: 酷我音乐 - 分类
|
- **kuwo:category**: 酷我音乐 - 分类 (**Currently broken**)
|
||||||
- **kuwo:chart**: 酷我音乐 - 排行榜
|
- **kuwo:chart**: 酷我音乐 - 排行榜 (**Currently broken**)
|
||||||
- **kuwo:mv**: 酷我音乐 - MV
|
- **kuwo:mv**: 酷我音乐 - MV (**Currently broken**)
|
||||||
- **kuwo:singer**: 酷我音乐 - 歌手
|
- **kuwo:singer**: 酷我音乐 - 歌手 (**Currently broken**)
|
||||||
- **kuwo:song**: 酷我音乐
|
- **kuwo:song**: 酷我音乐 (**Currently broken**)
|
||||||
- **la7.it**
|
- **la7.it**
|
||||||
- **la7.it:pod:episode**
|
- **la7.it:pod:episode**
|
||||||
- **la7.it:podcast**
|
- **la7.it:podcast**
|
||||||
|
@ -677,7 +684,7 @@ # Supported sites
|
||||||
- **Lcp**
|
- **Lcp**
|
||||||
- **LcpPlay**
|
- **LcpPlay**
|
||||||
- **Le**: 乐视网
|
- **Le**: 乐视网
|
||||||
- **Lecture2Go**
|
- **Lecture2Go**: (**Currently broken**)
|
||||||
- **Lecturio**: [*lecturio*](## "netrc machine")
|
- **Lecturio**: [*lecturio*](## "netrc machine")
|
||||||
- **LecturioCourse**: [*lecturio*](## "netrc machine")
|
- **LecturioCourse**: [*lecturio*](## "netrc machine")
|
||||||
- **LecturioDeCourse**: [*lecturio*](## "netrc machine")
|
- **LecturioDeCourse**: [*lecturio*](## "netrc machine")
|
||||||
|
@ -685,7 +692,7 @@ # Supported sites
|
||||||
- **LeFigaroVideoSection**
|
- **LeFigaroVideoSection**
|
||||||
- **LEGO**
|
- **LEGO**
|
||||||
- **Lemonde**
|
- **Lemonde**
|
||||||
- **Lenta**
|
- **Lenta**: (**Currently broken**)
|
||||||
- **LePlaylist**
|
- **LePlaylist**
|
||||||
- **LetvCloud**: 乐视云
|
- **LetvCloud**: 乐视云
|
||||||
- **Libsyn**
|
- **Libsyn**
|
||||||
|
@ -709,31 +716,32 @@ # Supported sites
|
||||||
- **Lnk**
|
- **Lnk**
|
||||||
- **LnkGo**
|
- **LnkGo**
|
||||||
- **loc**: Library of Congress
|
- **loc**: Library of Congress
|
||||||
- **LocalNews8**
|
|
||||||
- **LoveHomePorn**
|
- **LoveHomePorn**
|
||||||
- **LRTStream**
|
- **LRTStream**
|
||||||
- **LRTVOD**
|
- **LRTVOD**
|
||||||
|
- **LSMLREmbed**
|
||||||
|
- **LSMLTVEmbed**
|
||||||
|
- **LSMReplay**
|
||||||
- **Lumni**
|
- **Lumni**
|
||||||
- **lynda**: [*lynda*](## "netrc machine") lynda.com videos
|
- **lynda**: [*lynda*](## "netrc machine") lynda.com videos
|
||||||
- **lynda:course**: [*lynda*](## "netrc machine") lynda.com online courses
|
- **lynda:course**: [*lynda*](## "netrc machine") lynda.com online courses
|
||||||
- **maariv.co.il**
|
- **maariv.co.il**
|
||||||
- **MagellanTV**
|
- **MagellanTV**
|
||||||
- **MagentaMusik360**
|
- **MagentaMusik**
|
||||||
- **mailru**: Видео@Mail.Ru
|
- **mailru**: Видео@Mail.Ru
|
||||||
- **mailru:music**: Музыка@Mail.Ru
|
- **mailru:music**: Музыка@Mail.Ru
|
||||||
- **mailru:music:search**: Музыка@Mail.Ru
|
- **mailru:music:search**: Музыка@Mail.Ru
|
||||||
- **MainStreaming**: MainStreaming Player
|
- **MainStreaming**: MainStreaming Player
|
||||||
- **MallTV**
|
|
||||||
- **mangomolo:live**
|
- **mangomolo:live**
|
||||||
- **mangomolo:video**
|
- **mangomolo:video**
|
||||||
- **MangoTV**: 芒果TV
|
- **MangoTV**: 芒果TV
|
||||||
- **ManotoTV**: Manoto TV (Episode)
|
- **ManotoTV**: Manoto TV (Episode)
|
||||||
- **ManotoTVLive**: Manoto TV (Live)
|
- **ManotoTVLive**: Manoto TV (Live)
|
||||||
- **ManotoTVShow**: Manoto TV (Show)
|
- **ManotoTVShow**: Manoto TV (Show)
|
||||||
- **ManyVids**
|
- **ManyVids**: (**Currently broken**)
|
||||||
- **MaoriTV**
|
- **MaoriTV**
|
||||||
- **Markiza**
|
- **Markiza**: (**Currently broken**)
|
||||||
- **MarkizaPage**
|
- **MarkizaPage**: (**Currently broken**)
|
||||||
- **massengeschmack.tv**
|
- **massengeschmack.tv**
|
||||||
- **Masters**
|
- **Masters**
|
||||||
- **MatchTV**
|
- **MatchTV**
|
||||||
|
@ -760,7 +768,6 @@ # Supported sites
|
||||||
- **MelonVOD**
|
- **MelonVOD**
|
||||||
- **Metacritic**
|
- **Metacritic**
|
||||||
- **mewatch**
|
- **mewatch**
|
||||||
- **MiaoPai**
|
|
||||||
- **MicrosoftEmbed**
|
- **MicrosoftEmbed**
|
||||||
- **microsoftstream**: Microsoft Stream
|
- **microsoftstream**: Microsoft Stream
|
||||||
- **mildom**: Record ongoing live by specific user in Mildom
|
- **mildom**: Record ongoing live by specific user in Mildom
|
||||||
|
@ -770,7 +777,6 @@ # Supported sites
|
||||||
- **minds**
|
- **minds**
|
||||||
- **minds:channel**
|
- **minds:channel**
|
||||||
- **minds:group**
|
- **minds:group**
|
||||||
- **MinistryGrid**
|
|
||||||
- **Minoto**
|
- **Minoto**
|
||||||
- **mirrativ**
|
- **mirrativ**
|
||||||
- **mirrativ:user**
|
- **mirrativ:user**
|
||||||
|
@ -793,11 +799,11 @@ # Supported sites
|
||||||
- **Mojvideo**
|
- **Mojvideo**
|
||||||
- **Monstercat**
|
- **Monstercat**
|
||||||
- **MonsterSirenHypergryphMusic**
|
- **MonsterSirenHypergryphMusic**
|
||||||
- **Morningstar**: morningstar.com
|
|
||||||
- **Motherless**
|
- **Motherless**
|
||||||
- **MotherlessGallery**
|
- **MotherlessGallery**
|
||||||
- **MotherlessGroup**
|
- **MotherlessGroup**
|
||||||
- **Motorsport**: motorsport.com
|
- **MotherlessUploader**
|
||||||
|
- **Motorsport**: motorsport.com (**Currently broken**)
|
||||||
- **MotorTrend**
|
- **MotorTrend**
|
||||||
- **MotorTrendOnDemand**
|
- **MotorTrendOnDemand**
|
||||||
- **MovieFap**
|
- **MovieFap**
|
||||||
|
@ -808,17 +814,17 @@ # Supported sites
|
||||||
- **MSN**: (**Currently broken**)
|
- **MSN**: (**Currently broken**)
|
||||||
- **mtg**: MTG services
|
- **mtg**: MTG services
|
||||||
- **mtv**
|
- **mtv**
|
||||||
- **mtv.de**
|
- **mtv.de**: (**Currently broken**)
|
||||||
- **mtv.it**
|
- **mtv.it**
|
||||||
- **mtv.it:programma**
|
- **mtv.it:programma**
|
||||||
- **mtv:video**
|
- **mtv:video**
|
||||||
- **mtvjapan**
|
- **mtvjapan**
|
||||||
- **mtvservices:embedded**
|
- **mtvservices:embedded**
|
||||||
- **MTVUutisetArticle**
|
- **MTVUutisetArticle**: (**Currently broken**)
|
||||||
- **MuenchenTV**: münchen.tv
|
- **MuenchenTV**: münchen.tv (**Currently broken**)
|
||||||
- **MujRozhlas**
|
- **MujRozhlas**
|
||||||
- **Murrtube**
|
- **Murrtube**: (**Currently broken**)
|
||||||
- **MurrtubeUser**: Murrtube user profile
|
- **MurrtubeUser**: Murrtube user profile (**Currently broken**)
|
||||||
- **MuseAI**
|
- **MuseAI**
|
||||||
- **MuseScore**
|
- **MuseScore**
|
||||||
- **MusicdexAlbum**
|
- **MusicdexAlbum**
|
||||||
|
@ -827,6 +833,9 @@ # Supported sites
|
||||||
- **MusicdexSong**
|
- **MusicdexSong**
|
||||||
- **mva**: Microsoft Virtual Academy videos
|
- **mva**: Microsoft Virtual Academy videos
|
||||||
- **mva:course**: Microsoft Virtual Academy courses
|
- **mva:course**: Microsoft Virtual Academy courses
|
||||||
|
- **Mx3**
|
||||||
|
- **Mx3Neo**
|
||||||
|
- **Mx3Volksmusik**
|
||||||
- **Mxplayer**
|
- **Mxplayer**
|
||||||
- **MxplayerShow**
|
- **MxplayerShow**
|
||||||
- **MySpace**
|
- **MySpace**
|
||||||
|
@ -862,11 +871,11 @@ # Supported sites
|
||||||
- **ndr**: NDR.de - Norddeutscher Rundfunk
|
- **ndr**: NDR.de - Norddeutscher Rundfunk
|
||||||
- **ndr:embed**
|
- **ndr:embed**
|
||||||
- **ndr:embed:base**
|
- **ndr:embed:base**
|
||||||
- **NDTV**
|
- **NDTV**: (**Currently broken**)
|
||||||
- **Nebula**: [*watchnebula*](## "netrc machine")
|
|
||||||
- **nebula:channel**: [*watchnebula*](## "netrc machine")
|
- **nebula:channel**: [*watchnebula*](## "netrc machine")
|
||||||
- **nebula:class**: [*watchnebula*](## "netrc machine")
|
- **nebula:media**: [*watchnebula*](## "netrc machine")
|
||||||
- **nebula:subscriptions**: [*watchnebula*](## "netrc machine")
|
- **nebula:subscriptions**: [*watchnebula*](## "netrc machine")
|
||||||
|
- **nebula:video**: [*watchnebula*](## "netrc machine")
|
||||||
- **NekoHacker**
|
- **NekoHacker**
|
||||||
- **NerdCubedFeed**
|
- **NerdCubedFeed**
|
||||||
- **netease:album**: 网易云音乐 - 专辑
|
- **netease:album**: 网易云音乐 - 专辑
|
||||||
|
@ -882,18 +891,19 @@ # Supported sites
|
||||||
- **Netverse**
|
- **Netverse**
|
||||||
- **NetversePlaylist**
|
- **NetversePlaylist**
|
||||||
- **NetverseSearch**: "netsearch:" prefix
|
- **NetverseSearch**: "netsearch:" prefix
|
||||||
- **Netzkino**
|
- **Netzkino**: (**Currently broken**)
|
||||||
- **Newgrounds**
|
- **Newgrounds**: [*newgrounds*](## "netrc machine")
|
||||||
- **Newgrounds:playlist**
|
- **Newgrounds:playlist**
|
||||||
- **Newgrounds:user**
|
- **Newgrounds:user**
|
||||||
- **NewsPicks**
|
- **NewsPicks**
|
||||||
- **Newsy**
|
- **Newsy**
|
||||||
- **NextMedia**: 蘋果日報
|
- **NextMedia**: 蘋果日報
|
||||||
- **NextMediaActionNews**: 蘋果日報 - 動新聞
|
- **NextMediaActionNews**: 蘋果日報 - 動新聞
|
||||||
- **NextTV**: 壹電視
|
- **NextTV**: 壹電視 (**Currently broken**)
|
||||||
- **Nexx**
|
- **Nexx**
|
||||||
- **NexxEmbed**
|
- **NexxEmbed**
|
||||||
- **NFB**
|
- **nfb**: nfb.ca and onf.ca films and episodes
|
||||||
|
- **nfb:series**: nfb.ca and onf.ca series
|
||||||
- **NFHSNetwork**
|
- **NFHSNetwork**
|
||||||
- **nfl.com**
|
- **nfl.com**
|
||||||
- **nfl.com:article**
|
- **nfl.com:article**
|
||||||
|
@ -925,11 +935,12 @@ # Supported sites
|
||||||
- **nicovideo:search**: Nico video search; "nicosearch:" prefix
|
- **nicovideo:search**: Nico video search; "nicosearch:" prefix
|
||||||
- **nicovideo:search:date**: Nico video search, newest first; "nicosearchdate:" prefix
|
- **nicovideo:search:date**: Nico video search, newest first; "nicosearchdate:" prefix
|
||||||
- **nicovideo:search_url**: Nico video search URLs
|
- **nicovideo:search_url**: Nico video search URLs
|
||||||
|
- **NinaProtocol**
|
||||||
- **Nintendo**
|
- **Nintendo**
|
||||||
- **Nitter**
|
- **Nitter**
|
||||||
- **njoy**: N-JOY
|
- **njoy**: N-JOY
|
||||||
- **njoy:embed**
|
- **njoy:embed**
|
||||||
- **NobelPrize**
|
- **NobelPrize**: (**Currently broken**)
|
||||||
- **NoicePodcast**
|
- **NoicePodcast**
|
||||||
- **NonkTube**
|
- **NonkTube**
|
||||||
- **NoodleMagazine**
|
- **NoodleMagazine**
|
||||||
|
@ -941,7 +952,7 @@ # Supported sites
|
||||||
- **nowness**
|
- **nowness**
|
||||||
- **nowness:playlist**
|
- **nowness:playlist**
|
||||||
- **nowness:series**
|
- **nowness:series**
|
||||||
- **Noz**
|
- **Noz**: (**Currently broken**)
|
||||||
- **npo**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **npo**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
- **npo.nl:live**
|
- **npo.nl:live**
|
||||||
- **npo.nl:radio**
|
- **npo.nl:radio**
|
||||||
|
@ -960,15 +971,18 @@ # Supported sites
|
||||||
- **NRLTV**: (**Currently broken**)
|
- **NRLTV**: (**Currently broken**)
|
||||||
- **ntv.ru**
|
- **ntv.ru**
|
||||||
- **NubilesPorn**: [*nubiles-porn*](## "netrc machine")
|
- **NubilesPorn**: [*nubiles-porn*](## "netrc machine")
|
||||||
|
- **nuum:live**
|
||||||
|
- **nuum:media**
|
||||||
|
- **nuum:tab**
|
||||||
- **Nuvid**
|
- **Nuvid**
|
||||||
- **NYTimes**
|
- **NYTimes**
|
||||||
- **NYTimesArticle**
|
- **NYTimesArticle**
|
||||||
- **NYTimesCooking**
|
- **NYTimesCookingGuide**
|
||||||
|
- **NYTimesCookingRecipe**
|
||||||
- **nzherald**
|
- **nzherald**
|
||||||
- **NZOnScreen**
|
- **NZOnScreen**
|
||||||
- **NZZ**
|
- **NZZ**
|
||||||
- **ocw.mit.edu**
|
- **ocw.mit.edu**
|
||||||
- **OdaTV**
|
|
||||||
- **Odnoklassniki**
|
- **Odnoklassniki**
|
||||||
- **OfTV**
|
- **OfTV**
|
||||||
- **OfTVPlaylist**
|
- **OfTVPlaylist**
|
||||||
|
@ -993,6 +1007,7 @@ # Supported sites
|
||||||
- **OraTV**
|
- **OraTV**
|
||||||
- **orf:fm4:story**: fm4.orf.at stories
|
- **orf:fm4:story**: fm4.orf.at stories
|
||||||
- **orf:iptv**: iptv.ORF.at
|
- **orf:iptv**: iptv.ORF.at
|
||||||
|
- **orf:on**
|
||||||
- **orf:podcast**
|
- **orf:podcast**
|
||||||
- **orf:radio**
|
- **orf:radio**
|
||||||
- **orf:tvthek**: ORF TVthek
|
- **orf:tvthek**: ORF TVthek
|
||||||
|
@ -1015,7 +1030,7 @@ # Supported sites
|
||||||
- **ParamountPressExpress**
|
- **ParamountPressExpress**
|
||||||
- **Parler**: Posts on parler.com
|
- **Parler**: Posts on parler.com
|
||||||
- **parliamentlive.tv**: UK parliament videos
|
- **parliamentlive.tv**: UK parliament videos
|
||||||
- **Parlview**
|
- **Parlview**: (**Currently broken**)
|
||||||
- **Patreon**
|
- **Patreon**
|
||||||
- **PatreonCampaign**
|
- **PatreonCampaign**
|
||||||
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
||||||
|
@ -1049,19 +1064,19 @@ # Supported sites
|
||||||
- **Platzi**: [*platzi*](## "netrc machine")
|
- **Platzi**: [*platzi*](## "netrc machine")
|
||||||
- **PlatziCourse**: [*platzi*](## "netrc machine")
|
- **PlatziCourse**: [*platzi*](## "netrc machine")
|
||||||
- **player.sky.it**
|
- **player.sky.it**
|
||||||
|
- **playeur**
|
||||||
- **PlayPlusTV**: [*playplustv*](## "netrc machine")
|
- **PlayPlusTV**: [*playplustv*](## "netrc machine")
|
||||||
- **PlayStuff**
|
- **PlaySuisse**: [*playsuisse*](## "netrc machine")
|
||||||
- **PlaySuisse**
|
|
||||||
- **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz
|
- **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz
|
||||||
- **PlayVids**
|
- **PlayVids**
|
||||||
- **Playwire**
|
- **Playwire**
|
||||||
- **pluralsight**: [*pluralsight*](## "netrc machine")
|
- **pluralsight**: [*pluralsight*](## "netrc machine")
|
||||||
- **pluralsight:course**
|
- **pluralsight:course**
|
||||||
- **PlutoTV**
|
- **PlutoTV**: (**Currently broken**)
|
||||||
- **PodbayFM**
|
- **PodbayFM**
|
||||||
- **PodbayFMChannel**
|
- **PodbayFMChannel**
|
||||||
- **Podchaser**
|
- **Podchaser**
|
||||||
- **podomatic**
|
- **podomatic**: (**Currently broken**)
|
||||||
- **Pokemon**
|
- **Pokemon**
|
||||||
- **PokemonWatch**
|
- **PokemonWatch**
|
||||||
- **PokerGo**: [*pokergo*](## "netrc machine")
|
- **PokerGo**: [*pokergo*](## "netrc machine")
|
||||||
|
@ -1085,15 +1100,16 @@ # Supported sites
|
||||||
- **PornHubUser**: [*pornhub*](## "netrc machine")
|
- **PornHubUser**: [*pornhub*](## "netrc machine")
|
||||||
- **PornHubUserVideosUpload**: [*pornhub*](## "netrc machine")
|
- **PornHubUserVideosUpload**: [*pornhub*](## "netrc machine")
|
||||||
- **Pornotube**
|
- **Pornotube**
|
||||||
- **PornoVoisines**
|
- **PornoVoisines**: (**Currently broken**)
|
||||||
- **PornoXO**
|
- **PornoXO**: (**Currently broken**)
|
||||||
- **PornTop**
|
- **PornTop**
|
||||||
- **PornTube**
|
- **PornTube**
|
||||||
- **Pr0gramm**
|
- **Pr0gramm**
|
||||||
- **PrankCast**
|
- **PrankCast**
|
||||||
|
- **PrankCastPost**
|
||||||
- **PremiershipRugby**
|
- **PremiershipRugby**
|
||||||
- **PressTV**
|
- **PressTV**
|
||||||
- **ProjectVeritas**
|
- **ProjectVeritas**: (**Currently broken**)
|
||||||
- **prosiebensat1**: ProSiebenSat.1 Digital
|
- **prosiebensat1**: ProSiebenSat.1 Digital
|
||||||
- **PRXAccount**
|
- **PRXAccount**
|
||||||
- **PRXSeries**
|
- **PRXSeries**
|
||||||
|
@ -1115,11 +1131,11 @@ # Supported sites
|
||||||
- **QuantumTVLive**: [*quantumtv*](## "netrc machine")
|
- **QuantumTVLive**: [*quantumtv*](## "netrc machine")
|
||||||
- **QuantumTVRecordings**: [*quantumtv*](## "netrc machine")
|
- **QuantumTVRecordings**: [*quantumtv*](## "netrc machine")
|
||||||
- **Qub**
|
- **Qub**
|
||||||
- **R7**
|
- **R7**: (**Currently broken**)
|
||||||
- **R7Article**
|
- **R7Article**: (**Currently broken**)
|
||||||
- **Radiko**
|
- **Radiko**
|
||||||
- **RadikoRadio**
|
- **RadikoRadio**
|
||||||
- **radio.de**
|
- **radio.de**: (**Currently broken**)
|
||||||
- **radiocanada**
|
- **radiocanada**
|
||||||
- **radiocanada:audiovideo**
|
- **radiocanada:audiovideo**
|
||||||
- **RadioComercial**
|
- **RadioComercial**
|
||||||
|
@ -1129,7 +1145,7 @@ # Supported sites
|
||||||
- **RadioFrancePodcast**
|
- **RadioFrancePodcast**
|
||||||
- **RadioFranceProfile**
|
- **RadioFranceProfile**
|
||||||
- **RadioFranceProgramSchedule**
|
- **RadioFranceProgramSchedule**
|
||||||
- **RadioJavan**
|
- **RadioJavan**: (**Currently broken**)
|
||||||
- **radiokapital**
|
- **radiokapital**
|
||||||
- **radiokapital:show**
|
- **radiokapital:show**
|
||||||
- **RadioZetPodcast**
|
- **RadioZetPodcast**
|
||||||
|
@ -1151,33 +1167,34 @@ # Supported sites
|
||||||
- **RbgTum**
|
- **RbgTum**
|
||||||
- **RbgTumCourse**
|
- **RbgTumCourse**
|
||||||
- **RbgTumNewCourse**
|
- **RbgTumNewCourse**
|
||||||
- **RBMARadio**
|
|
||||||
- **RCS**
|
- **RCS**
|
||||||
- **RCSEmbeds**
|
- **RCSEmbeds**
|
||||||
- **RCSVarious**
|
- **RCSVarious**
|
||||||
- **RCTIPlus**
|
- **RCTIPlus**
|
||||||
- **RCTIPlusSeries**
|
- **RCTIPlusSeries**
|
||||||
- **RCTIPlusTV**
|
- **RCTIPlusTV**
|
||||||
- **RDS**: RDS.ca
|
- **RDS**: RDS.ca (**Currently broken**)
|
||||||
- **RedBull**
|
- **RedBull**
|
||||||
- **RedBullEmbed**
|
- **RedBullEmbed**
|
||||||
- **RedBullTV**
|
- **RedBullTV**
|
||||||
- **RedBullTVRrnContent**
|
- **RedBullTVRrnContent**
|
||||||
|
- **redcdnlivx**
|
||||||
- **Reddit**: [*reddit*](## "netrc machine")
|
- **Reddit**: [*reddit*](## "netrc machine")
|
||||||
- **RedGifs**
|
- **RedGifs**
|
||||||
- **RedGifsSearch**: Redgifs search
|
- **RedGifsSearch**: Redgifs search
|
||||||
- **RedGifsUser**: Redgifs user
|
- **RedGifsUser**: Redgifs user
|
||||||
- **RedTube**
|
- **RedTube**
|
||||||
- **RegioTV**
|
- **RENTV**: (**Currently broken**)
|
||||||
- **RENTV**
|
- **RENTVArticle**: (**Currently broken**)
|
||||||
- **RENTVArticle**
|
- **Restudy**: (**Currently broken**)
|
||||||
- **Restudy**
|
- **Reuters**: (**Currently broken**)
|
||||||
- **Reuters**
|
|
||||||
- **ReverbNation**
|
- **ReverbNation**
|
||||||
- **RheinMainTV**
|
- **RheinMainTV**
|
||||||
|
- **RideHome**
|
||||||
- **RinseFM**
|
- **RinseFM**
|
||||||
|
- **RinseFMArtistPlaylist**
|
||||||
- **RMCDecouverte**
|
- **RMCDecouverte**
|
||||||
- **RockstarGames**
|
- **RockstarGames**: (**Currently broken**)
|
||||||
- **Rokfin**: [*rokfin*](## "netrc machine")
|
- **Rokfin**: [*rokfin*](## "netrc machine")
|
||||||
- **rokfin:channel**: Rokfin Channels
|
- **rokfin:channel**: Rokfin Channels
|
||||||
- **rokfin:search**: Rokfin Search; "rkfnsearch:" prefix
|
- **rokfin:search**: Rokfin Search; "rkfnsearch:" prefix
|
||||||
|
@ -1187,7 +1204,7 @@ # Supported sites
|
||||||
- **RottenTomatoes**
|
- **RottenTomatoes**
|
||||||
- **Rozhlas**
|
- **Rozhlas**
|
||||||
- **RozhlasVltava**
|
- **RozhlasVltava**
|
||||||
- **RTBF**: [*rtbf*](## "netrc machine")
|
- **RTBF**: [*rtbf*](## "netrc machine") (**Currently broken**)
|
||||||
- **RTDocumentry**
|
- **RTDocumentry**
|
||||||
- **RTDocumentryPlaylist**
|
- **RTDocumentryPlaylist**
|
||||||
- **rte**: Raidió Teilifís Éireann TV
|
- **rte**: Raidió Teilifís Éireann TV
|
||||||
|
@ -1201,7 +1218,7 @@ # Supported sites
|
||||||
- **RTNews**
|
- **RTNews**
|
||||||
- **RTP**
|
- **RTP**
|
||||||
- **RTRFM**
|
- **RTRFM**
|
||||||
- **RTS**: RTS.ch
|
- **RTS**: RTS.ch (**Currently broken**)
|
||||||
- **RTVCKaltura**
|
- **RTVCKaltura**
|
||||||
- **RTVCPlay**
|
- **RTVCPlay**
|
||||||
- **RTVCPlayEmbed**
|
- **RTVCPlayEmbed**
|
||||||
|
@ -1234,7 +1251,7 @@ # Supported sites
|
||||||
- **safari**: [*safari*](## "netrc machine") safaribooksonline.com online video
|
- **safari**: [*safari*](## "netrc machine") safaribooksonline.com online video
|
||||||
- **safari:api**: [*safari*](## "netrc machine")
|
- **safari:api**: [*safari*](## "netrc machine")
|
||||||
- **safari:course**: [*safari*](## "netrc machine") safaribooksonline.com online courses
|
- **safari:course**: [*safari*](## "netrc machine") safaribooksonline.com online courses
|
||||||
- **Saitosan**
|
- **Saitosan**: (**Currently broken**)
|
||||||
- **SAKTV**: [*saktv*](## "netrc machine")
|
- **SAKTV**: [*saktv*](## "netrc machine")
|
||||||
- **SAKTVLive**: [*saktv*](## "netrc machine")
|
- **SAKTVLive**: [*saktv*](## "netrc machine")
|
||||||
- **SAKTVRecordings**: [*saktv*](## "netrc machine")
|
- **SAKTVRecordings**: [*saktv*](## "netrc machine")
|
||||||
|
@ -1244,7 +1261,6 @@ # Supported sites
|
||||||
- **SampleFocus**
|
- **SampleFocus**
|
||||||
- **Sangiin**: 参議院インターネット審議中継 (archive)
|
- **Sangiin**: 参議院インターネット審議中継 (archive)
|
||||||
- **Sapo**: SAPO Vídeos
|
- **Sapo**: SAPO Vídeos
|
||||||
- **savefrom.net**
|
|
||||||
- **SBS**: sbs.com.au
|
- **SBS**: sbs.com.au
|
||||||
- **sbs.co.kr**
|
- **sbs.co.kr**
|
||||||
- **sbs.co.kr:allvod_program**
|
- **sbs.co.kr:allvod_program**
|
||||||
|
@ -1261,13 +1277,13 @@ # Supported sites
|
||||||
- **Scrolller**
|
- **Scrolller**
|
||||||
- **SCTE**: [*scte*](## "netrc machine") (**Currently broken**)
|
- **SCTE**: [*scte*](## "netrc machine") (**Currently broken**)
|
||||||
- **SCTECourse**: [*scte*](## "netrc machine") (**Currently broken**)
|
- **SCTECourse**: [*scte*](## "netrc machine") (**Currently broken**)
|
||||||
- **Seeker**
|
- **sejm**
|
||||||
- **SenalColombiaLive**
|
- **SenalColombiaLive**: (**Currently broken**)
|
||||||
- **SenateGov**
|
- **SenateGov**
|
||||||
- **SenateISVP**
|
- **SenateISVP**
|
||||||
- **SendtoNews**
|
- **SendtoNews**: (**Currently broken**)
|
||||||
- **Servus**
|
- **Servus**
|
||||||
- **Sexu**
|
- **Sexu**: (**Currently broken**)
|
||||||
- **SeznamZpravy**
|
- **SeznamZpravy**
|
||||||
- **SeznamZpravyArticle**
|
- **SeznamZpravyArticle**
|
||||||
- **Shahid**: [*shahid*](## "netrc machine")
|
- **Shahid**: [*shahid*](## "netrc machine")
|
||||||
|
@ -1289,9 +1305,9 @@ # Supported sites
|
||||||
- **sky:news:story**
|
- **sky:news:story**
|
||||||
- **sky:sports**
|
- **sky:sports**
|
||||||
- **sky:sports:news**
|
- **sky:sports:news**
|
||||||
- **SkylineWebcams**
|
- **SkylineWebcams**: (**Currently broken**)
|
||||||
- **skynewsarabia:article**
|
- **skynewsarabia:article**: (**Currently broken**)
|
||||||
- **skynewsarabia:video**
|
- **skynewsarabia:video**: (**Currently broken**)
|
||||||
- **SkyNewsAU**
|
- **SkyNewsAU**
|
||||||
- **Slideshare**
|
- **Slideshare**
|
||||||
- **SlidesLive**
|
- **SlidesLive**
|
||||||
|
@ -1342,7 +1358,7 @@ # Supported sites
|
||||||
- **StacommuVOD**: [*stacommu*](## "netrc machine")
|
- **StacommuVOD**: [*stacommu*](## "netrc machine")
|
||||||
- **StagePlusVODConcert**: [*stageplus*](## "netrc machine")
|
- **StagePlusVODConcert**: [*stageplus*](## "netrc machine")
|
||||||
- **stanfordoc**: Stanford Open ClassRoom
|
- **stanfordoc**: Stanford Open ClassRoom
|
||||||
- **StarTrek**
|
- **StarTrek**: (**Currently broken**)
|
||||||
- **startv**
|
- **startv**
|
||||||
- **Steam**
|
- **Steam**
|
||||||
- **SteamCommunityBroadcast**
|
- **SteamCommunityBroadcast**
|
||||||
|
@ -1353,7 +1369,6 @@ # Supported sites
|
||||||
- **StoryFireUser**
|
- **StoryFireUser**
|
||||||
- **Streamable**
|
- **Streamable**
|
||||||
- **StreamCZ**
|
- **StreamCZ**
|
||||||
- **StreamFF**
|
|
||||||
- **StreetVoice**
|
- **StreetVoice**
|
||||||
- **StretchInternet**
|
- **StretchInternet**
|
||||||
- **Stripchat**
|
- **Stripchat**
|
||||||
|
@ -1367,22 +1382,21 @@ # Supported sites
|
||||||
- **SVTPlay**: SVT Play and Öppet arkiv
|
- **SVTPlay**: SVT Play and Öppet arkiv
|
||||||
- **SVTSeries**
|
- **SVTSeries**
|
||||||
- **SwearnetEpisode**
|
- **SwearnetEpisode**
|
||||||
- **Syfy**
|
- **Syfy**: (**Currently broken**)
|
||||||
- **SYVDK**
|
- **SYVDK**
|
||||||
- **SztvHu**
|
- **SztvHu**
|
||||||
- **t-online.de**
|
- **t-online.de**: (**Currently broken**)
|
||||||
- **Tagesschau**
|
- **Tagesschau**: (**Currently broken**)
|
||||||
- **Tass**
|
- **Tass**: (**Currently broken**)
|
||||||
- **TBS**
|
- **TBS**
|
||||||
- **TBSJPEpisode**
|
- **TBSJPEpisode**
|
||||||
- **TBSJPPlaylist**
|
- **TBSJPPlaylist**
|
||||||
- **TBSJPProgram**
|
- **TBSJPProgram**
|
||||||
- **TDSLifeway**
|
- **Teachable**: [*teachable*](## "netrc machine") (**Currently broken**)
|
||||||
- **Teachable**: [*teachable*](## "netrc machine")
|
|
||||||
- **TeachableCourse**: [*teachable*](## "netrc machine")
|
- **TeachableCourse**: [*teachable*](## "netrc machine")
|
||||||
- **teachertube**: teachertube.com videos
|
- **teachertube**: teachertube.com videos (**Currently broken**)
|
||||||
- **teachertube:user:collection**: teachertube.com user and collection videos
|
- **teachertube:user:collection**: teachertube.com user and collection videos (**Currently broken**)
|
||||||
- **TeachingChannel**
|
- **TeachingChannel**: (**Currently broken**)
|
||||||
- **Teamcoco**
|
- **Teamcoco**
|
||||||
- **TeamTreeHouse**: [*teamtreehouse*](## "netrc machine")
|
- **TeamTreeHouse**: [*teamtreehouse*](## "netrc machine")
|
||||||
- **techtv.mit.edu**
|
- **techtv.mit.edu**
|
||||||
|
@ -1391,20 +1405,20 @@ # Supported sites
|
||||||
- **TedSeries**
|
- **TedSeries**
|
||||||
- **TedTalk**
|
- **TedTalk**
|
||||||
- **Tele13**
|
- **Tele13**
|
||||||
- **Tele5**
|
- **Tele5**: (**Currently broken**)
|
||||||
- **TeleBruxelles**
|
- **TeleBruxelles**
|
||||||
- **TelecaribePlay**
|
- **TelecaribePlay**
|
||||||
- **Telecinco**: telecinco.es, cuatro.com and mediaset.es
|
- **Telecinco**: telecinco.es, cuatro.com and mediaset.es
|
||||||
- **Telegraaf**
|
- **Telegraaf**
|
||||||
- **telegram:embed**
|
- **telegram:embed**
|
||||||
- **TeleMB**
|
- **TeleMB**: (**Currently broken**)
|
||||||
- **Telemundo**
|
- **Telemundo**: (**Currently broken**)
|
||||||
- **TeleQuebec**
|
- **TeleQuebec**
|
||||||
- **TeleQuebecEmission**
|
- **TeleQuebecEmission**
|
||||||
- **TeleQuebecLive**
|
- **TeleQuebecLive**
|
||||||
- **TeleQuebecSquat**
|
- **TeleQuebecSquat**
|
||||||
- **TeleQuebecVideo**
|
- **TeleQuebecVideo**
|
||||||
- **TeleTask**
|
- **TeleTask**: (**Currently broken**)
|
||||||
- **Telewebion**
|
- **Telewebion**
|
||||||
- **Tempo**
|
- **Tempo**
|
||||||
- **TennisTV**: [*tennistv*](## "netrc machine")
|
- **TennisTV**: [*tennistv*](## "netrc machine")
|
||||||
|
@ -1458,6 +1472,7 @@ # Supported sites
|
||||||
- **TrovoChannelVod**: All VODs of a trovo.live channel; "trovovod:" prefix
|
- **TrovoChannelVod**: All VODs of a trovo.live channel; "trovovod:" prefix
|
||||||
- **TrovoVod**
|
- **TrovoVod**
|
||||||
- **TrtCocukVideo**
|
- **TrtCocukVideo**
|
||||||
|
- **TrtWorld**
|
||||||
- **TrueID**
|
- **TrueID**
|
||||||
- **TruNews**
|
- **TruNews**
|
||||||
- **Truth**
|
- **Truth**
|
||||||
|
@ -1471,7 +1486,6 @@ # Supported sites
|
||||||
- **TuneInPodcast**
|
- **TuneInPodcast**
|
||||||
- **TuneInPodcastEpisode**
|
- **TuneInPodcastEpisode**
|
||||||
- **TuneInStation**
|
- **TuneInStation**
|
||||||
- **Turbo**
|
|
||||||
- **tv.dfb.de**
|
- **tv.dfb.de**
|
||||||
- **TV2**
|
- **TV2**
|
||||||
- **TV2Article**
|
- **TV2Article**
|
||||||
|
@ -1493,8 +1507,8 @@ # Supported sites
|
||||||
- **tvigle**: Интернет-телевидение Tvigle.ru
|
- **tvigle**: Интернет-телевидение Tvigle.ru
|
||||||
- **TVIPlayer**
|
- **TVIPlayer**
|
||||||
- **tvland.com**
|
- **tvland.com**
|
||||||
- **TVN24**
|
- **TVN24**: (**Currently broken**)
|
||||||
- **TVNoe**
|
- **TVNoe**: (**Currently broken**)
|
||||||
- **tvopengr:embed**: tvopen.gr embedded videos
|
- **tvopengr:embed**: tvopen.gr embedded videos
|
||||||
- **tvopengr:watch**: tvopen.gr (and ethnos.gr) videos
|
- **tvopengr:watch**: tvopen.gr (and ethnos.gr) videos
|
||||||
- **tvp**: Telewizja Polska
|
- **tvp**: Telewizja Polska
|
||||||
|
@ -1527,15 +1541,15 @@ # Supported sites
|
||||||
- **UDNEmbed**: 聯合影音
|
- **UDNEmbed**: 聯合影音
|
||||||
- **UFCArabia**: [*ufcarabia*](## "netrc machine")
|
- **UFCArabia**: [*ufcarabia*](## "netrc machine")
|
||||||
- **UFCTV**: [*ufctv*](## "netrc machine")
|
- **UFCTV**: [*ufctv*](## "netrc machine")
|
||||||
- **ukcolumn**
|
- **ukcolumn**: (**Currently broken**)
|
||||||
- **UKTVPlay**
|
- **UKTVPlay**
|
||||||
- **umg:de**: Universal Music Deutschland
|
- **umg:de**: Universal Music Deutschland (**Currently broken**)
|
||||||
- **Unistra**
|
- **Unistra**
|
||||||
- **Unity**
|
- **Unity**: (**Currently broken**)
|
||||||
- **uol.com.br**
|
- **uol.com.br**
|
||||||
- **uplynk**
|
- **uplynk**
|
||||||
- **uplynk:preplay**
|
- **uplynk:preplay**
|
||||||
- **Urort**: NRK P3 Urørt
|
- **Urort**: NRK P3 Urørt (**Currently broken**)
|
||||||
- **URPlay**
|
- **URPlay**
|
||||||
- **USANetwork**
|
- **USANetwork**
|
||||||
- **USAToday**
|
- **USAToday**
|
||||||
|
@ -1543,13 +1557,12 @@ # Supported sites
|
||||||
- **ustream:channel**
|
- **ustream:channel**
|
||||||
- **ustudio**
|
- **ustudio**
|
||||||
- **ustudio:embed**
|
- **ustudio:embed**
|
||||||
- **Utreon**
|
- **Varzesh3**: (**Currently broken**)
|
||||||
- **Varzesh3**
|
|
||||||
- **Vbox7**
|
- **Vbox7**
|
||||||
- **Veo**
|
- **Veo**
|
||||||
- **Veoh**
|
- **Veoh**
|
||||||
- **veoh:user**
|
- **veoh:user**
|
||||||
- **Vesti**: Вести.Ru
|
- **Vesti**: Вести.Ru (**Currently broken**)
|
||||||
- **Vevo**
|
- **Vevo**
|
||||||
- **VevoPlaylist**
|
- **VevoPlaylist**
|
||||||
- **VGTV**: VGTV, BTTV, FTV, Aftenposten and Aftonbladet
|
- **VGTV**: VGTV, BTTV, FTV, Aftenposten and Aftonbladet
|
||||||
|
@ -1565,7 +1578,7 @@ # Supported sites
|
||||||
- **video.sky.it**
|
- **video.sky.it**
|
||||||
- **video.sky.it:live**
|
- **video.sky.it:live**
|
||||||
- **VideoDetective**
|
- **VideoDetective**
|
||||||
- **videofy.me**
|
- **videofy.me**: (**Currently broken**)
|
||||||
- **VideoKen**
|
- **VideoKen**
|
||||||
- **VideoKenCategory**
|
- **VideoKenCategory**
|
||||||
- **VideoKenPlayer**
|
- **VideoKenPlayer**
|
||||||
|
@ -1601,7 +1614,8 @@ # Supported sites
|
||||||
- **ViMP:Playlist**
|
- **ViMP:Playlist**
|
||||||
- **Vine**
|
- **Vine**
|
||||||
- **vine:user**
|
- **vine:user**
|
||||||
- **Viqeo**
|
- **Viously**
|
||||||
|
- **Viqeo**: (**Currently broken**)
|
||||||
- **Viu**
|
- **Viu**
|
||||||
- **viu:ott**: [*viu*](## "netrc machine")
|
- **viu:ott**: [*viu*](## "netrc machine")
|
||||||
- **viu:playlist**
|
- **viu:playlist**
|
||||||
|
@ -1615,8 +1629,8 @@ # Supported sites
|
||||||
- **Vocaroo**
|
- **Vocaroo**
|
||||||
- **VODPl**
|
- **VODPl**
|
||||||
- **VODPlatform**
|
- **VODPlatform**
|
||||||
- **voicy**
|
- **voicy**: (**Currently broken**)
|
||||||
- **voicy:channel**
|
- **voicy:channel**: (**Currently broken**)
|
||||||
- **VolejTV**
|
- **VolejTV**
|
||||||
- **Voot**: [*voot*](## "netrc machine") (**Currently broken**)
|
- **Voot**: [*voot*](## "netrc machine") (**Currently broken**)
|
||||||
- **VootSeries**: [*voot*](## "netrc machine") (**Currently broken**)
|
- **VootSeries**: [*voot*](## "netrc machine") (**Currently broken**)
|
||||||
|
@ -1627,7 +1641,7 @@ # Supported sites
|
||||||
- **vqq:video**
|
- **vqq:video**
|
||||||
- **VRT**: VRT NWS, Flanders News, Flandern Info and Sporza
|
- **VRT**: VRT NWS, Flanders News, Flandern Info and Sporza
|
||||||
- **VrtNU**: [*vrtnu*](## "netrc machine") VRT MAX
|
- **VrtNU**: [*vrtnu*](## "netrc machine") VRT MAX
|
||||||
- **VTM**
|
- **VTM**: (**Currently broken**)
|
||||||
- **VTXTV**: [*vtxtv*](## "netrc machine")
|
- **VTXTV**: [*vtxtv*](## "netrc machine")
|
||||||
- **VTXTVLive**: [*vtxtv*](## "netrc machine")
|
- **VTXTVLive**: [*vtxtv*](## "netrc machine")
|
||||||
- **VTXTVRecordings**: [*vtxtv*](## "netrc machine")
|
- **VTXTVRecordings**: [*vtxtv*](## "netrc machine")
|
||||||
|
@ -1638,9 +1652,6 @@ # Supported sites
|
||||||
- **WalyTV**: [*walytv*](## "netrc machine")
|
- **WalyTV**: [*walytv*](## "netrc machine")
|
||||||
- **WalyTVLive**: [*walytv*](## "netrc machine")
|
- **WalyTVLive**: [*walytv*](## "netrc machine")
|
||||||
- **WalyTVRecordings**: [*walytv*](## "netrc machine")
|
- **WalyTVRecordings**: [*walytv*](## "netrc machine")
|
||||||
- **wasdtv:clip**
|
|
||||||
- **wasdtv:record**
|
|
||||||
- **wasdtv:stream**
|
|
||||||
- **washingtonpost**
|
- **washingtonpost**
|
||||||
- **washingtonpost:article**
|
- **washingtonpost:article**
|
||||||
- **wat.tv**
|
- **wat.tv**
|
||||||
|
@ -1658,7 +1669,7 @@ # Supported sites
|
||||||
- **Weibo**
|
- **Weibo**
|
||||||
- **WeiboUser**
|
- **WeiboUser**
|
||||||
- **WeiboVideo**
|
- **WeiboVideo**
|
||||||
- **WeiqiTV**: WQTV
|
- **WeiqiTV**: WQTV (**Currently broken**)
|
||||||
- **wetv:episode**
|
- **wetv:episode**
|
||||||
- **WeTvSeries**
|
- **WeTvSeries**
|
||||||
- **Weverse**: [*weverse*](## "netrc machine")
|
- **Weverse**: [*weverse*](## "netrc machine")
|
||||||
|
@ -1703,8 +1714,8 @@ # Supported sites
|
||||||
- **XHamsterUser**
|
- **XHamsterUser**
|
||||||
- **ximalaya**: 喜马拉雅FM
|
- **ximalaya**: 喜马拉雅FM
|
||||||
- **ximalaya:album**: 喜马拉雅FM 专辑
|
- **ximalaya:album**: 喜马拉雅FM 专辑
|
||||||
- **xinpianchang**: xinpianchang.com
|
- **xinpianchang**: xinpianchang.com (**Currently broken**)
|
||||||
- **XMinus**
|
- **XMinus**: (**Currently broken**)
|
||||||
- **XNXX**
|
- **XNXX**
|
||||||
- **Xstream**
|
- **Xstream**
|
||||||
- **XVideos**
|
- **XVideos**
|
||||||
|
@ -1720,8 +1731,8 @@ # Supported sites
|
||||||
- **yandexmusic:track**: Яндекс.Музыка - Трек
|
- **yandexmusic:track**: Яндекс.Музыка - Трек
|
||||||
- **YandexVideo**
|
- **YandexVideo**
|
||||||
- **YandexVideoPreview**
|
- **YandexVideoPreview**
|
||||||
- **YapFiles**
|
- **YapFiles**: (**Currently broken**)
|
||||||
- **Yappy**
|
- **Yappy**: (**Currently broken**)
|
||||||
- **YappyProfile**
|
- **YappyProfile**
|
||||||
- **YleAreena**
|
- **YleAreena**
|
||||||
- **YouJizz**
|
- **YouJizz**
|
||||||
|
@ -1762,9 +1773,11 @@ # Supported sites
|
||||||
- **ZDFChannel**
|
- **ZDFChannel**
|
||||||
- **Zee5**: [*zee5*](## "netrc machine")
|
- **Zee5**: [*zee5*](## "netrc machine")
|
||||||
- **zee5:series**
|
- **zee5:series**
|
||||||
- **ZeeNews**
|
- **ZeeNews**: (**Currently broken**)
|
||||||
|
- **ZenPorn**
|
||||||
- **ZenYandex**
|
- **ZenYandex**
|
||||||
- **ZenYandexChannel**
|
- **ZenYandexChannel**
|
||||||
|
- **ZetlandDKArticle**
|
||||||
- **Zhihu**
|
- **Zhihu**
|
||||||
- **zingmp3**: zingmp3.vn
|
- **zingmp3**: zingmp3.vn
|
||||||
- **zingmp3:album**
|
- **zingmp3:album**
|
||||||
|
|
|
@ -45,7 +45,7 @@ def test_lazy_extractors(self):
|
||||||
self.assertTrue(os.path.exists(LAZY_EXTRACTORS))
|
self.assertTrue(os.path.exists(LAZY_EXTRACTORS))
|
||||||
|
|
||||||
_, stderr = self.run_yt_dlp(opts=('-s', 'test:'))
|
_, stderr = self.run_yt_dlp(opts=('-s', 'test:'))
|
||||||
# `MIN_RECOMMENDED` emits a deprecated feature warning for deprecated python versions
|
# `MIN_RECOMMENDED` emits a deprecated feature warning for deprecated Python versions
|
||||||
if stderr and stderr.startswith('Deprecated Feature: Support for Python'):
|
if stderr and stderr.startswith('Deprecated Feature: Support for Python'):
|
||||||
stderr = ''
|
stderr = ''
|
||||||
self.assertFalse(stderr)
|
self.assertFalse(stderr)
|
||||||
|
|
|
@ -27,9 +27,10 @@
|
||||||
from email.message import Message
|
from email.message import Message
|
||||||
from http.cookiejar import CookieJar
|
from http.cookiejar import CookieJar
|
||||||
|
|
||||||
|
from test.conftest import validate_and_send
|
||||||
from test.helper import FakeYDL, http_server_port, verify_address_availability
|
from test.helper import FakeYDL, http_server_port, verify_address_availability
|
||||||
from yt_dlp.cookies import YoutubeDLCookieJar
|
from yt_dlp.cookies import YoutubeDLCookieJar
|
||||||
from yt_dlp.dependencies import brotli, requests, urllib3
|
from yt_dlp.dependencies import brotli, curl_cffi, requests, urllib3
|
||||||
from yt_dlp.networking import (
|
from yt_dlp.networking import (
|
||||||
HEADRequest,
|
HEADRequest,
|
||||||
PUTRequest,
|
PUTRequest,
|
||||||
|
@ -50,10 +51,13 @@
|
||||||
TransportError,
|
TransportError,
|
||||||
UnsupportedRequest,
|
UnsupportedRequest,
|
||||||
)
|
)
|
||||||
|
from yt_dlp.networking.impersonate import (
|
||||||
|
ImpersonateRequestHandler,
|
||||||
|
ImpersonateTarget,
|
||||||
|
)
|
||||||
|
from yt_dlp.utils import YoutubeDLError
|
||||||
from yt_dlp.utils._utils import _YDLLogger as FakeLogger
|
from yt_dlp.utils._utils import _YDLLogger as FakeLogger
|
||||||
from yt_dlp.utils.networking import HTTPHeaderDict
|
from yt_dlp.utils.networking import HTTPHeaderDict, std_headers
|
||||||
|
|
||||||
from test.conftest import validate_and_send
|
|
||||||
|
|
||||||
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
|
||||||
|
@ -69,12 +73,13 @@ def do_GET(self):
|
||||||
self.send_response(200)
|
self.send_response(200)
|
||||||
self.send_header('Content-Type', 'text/plain; charset=utf-8')
|
self.send_header('Content-Type', 'text/plain; charset=utf-8')
|
||||||
self.end_headers()
|
self.end_headers()
|
||||||
self.wfile.write('{self.proxy_name}: {self.path}'.format(self=self).encode())
|
self.wfile.write(f'{self.proxy_name}: {self.path}'.encode())
|
||||||
return HTTPTestRequestHandler
|
return HTTPTestRequestHandler
|
||||||
|
|
||||||
|
|
||||||
class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler):
|
class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler):
|
||||||
protocol_version = 'HTTP/1.1'
|
protocol_version = 'HTTP/1.1'
|
||||||
|
default_request_version = 'HTTP/1.1'
|
||||||
|
|
||||||
def log_message(self, format, *args):
|
def log_message(self, format, *args):
|
||||||
pass
|
pass
|
||||||
|
@ -112,6 +117,8 @@ def _status(self, status):
|
||||||
def _read_data(self):
|
def _read_data(self):
|
||||||
if 'Content-Length' in self.headers:
|
if 'Content-Length' in self.headers:
|
||||||
return self.rfile.read(int(self.headers['Content-Length']))
|
return self.rfile.read(int(self.headers['Content-Length']))
|
||||||
|
else:
|
||||||
|
return b''
|
||||||
|
|
||||||
def do_POST(self):
|
def do_POST(self):
|
||||||
data = self._read_data() + str(self.headers).encode()
|
data = self._read_data() + str(self.headers).encode()
|
||||||
|
@ -195,7 +202,8 @@ def do_GET(self):
|
||||||
self._headers()
|
self._headers()
|
||||||
elif self.path.startswith('/308-to-headers'):
|
elif self.path.startswith('/308-to-headers'):
|
||||||
self.send_response(308)
|
self.send_response(308)
|
||||||
self.send_header('Location', '/headers')
|
# redirect to "localhost" for testing cookie redirection handling
|
||||||
|
self.send_header('Location', f'http://localhost:{self.connection.getsockname()[1]}/headers')
|
||||||
self.send_header('Content-Length', '0')
|
self.send_header('Content-Length', '0')
|
||||||
self.end_headers()
|
self.end_headers()
|
||||||
elif self.path == '/trailing_garbage':
|
elif self.path == '/trailing_garbage':
|
||||||
|
@ -310,7 +318,7 @@ def setup_class(cls):
|
||||||
|
|
||||||
|
|
||||||
class TestHTTPRequestHandler(TestRequestHandlerBase):
|
class TestHTTPRequestHandler(TestRequestHandlerBase):
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_verify_cert(self, handler):
|
def test_verify_cert(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with pytest.raises(CertificateVerifyError):
|
with pytest.raises(CertificateVerifyError):
|
||||||
|
@ -321,7 +329,7 @@ def test_verify_cert(self, handler):
|
||||||
assert r.status == 200
|
assert r.status == 200
|
||||||
r.close()
|
r.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_ssl_error(self, handler):
|
def test_ssl_error(self, handler):
|
||||||
# HTTPS server with too old TLS version
|
# HTTPS server with too old TLS version
|
||||||
# XXX: is there a better way to test this than to create a new server?
|
# XXX: is there a better way to test this than to create a new server?
|
||||||
|
@ -335,11 +343,11 @@ def test_ssl_error(self, handler):
|
||||||
https_server_thread.start()
|
https_server_thread.start()
|
||||||
|
|
||||||
with handler(verify=False) as rh:
|
with handler(verify=False) as rh:
|
||||||
with pytest.raises(SSLError, match=r'ssl(?:v3|/tls) alert handshake failure') as exc_info:
|
with pytest.raises(SSLError, match=r'(?i)ssl(?:v3|/tls).alert.handshake.failure') as exc_info:
|
||||||
validate_and_send(rh, Request(f'https://127.0.0.1:{https_port}/headers'))
|
validate_and_send(rh, Request(f'https://127.0.0.1:{https_port}/headers'))
|
||||||
assert not issubclass(exc_info.type, CertificateVerifyError)
|
assert not issubclass(exc_info.type, CertificateVerifyError)
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_percent_encode(self, handler):
|
def test_percent_encode(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
# Unicode characters should be encoded with uppercase percent-encoding
|
# Unicode characters should be encoded with uppercase percent-encoding
|
||||||
|
@ -351,7 +359,7 @@ def test_percent_encode(self, handler):
|
||||||
assert res.status == 200
|
assert res.status == 200
|
||||||
res.close()
|
res.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
@pytest.mark.parametrize('path', [
|
@pytest.mark.parametrize('path', [
|
||||||
'/a/b/./../../headers',
|
'/a/b/./../../headers',
|
||||||
'/redirect_dotsegments',
|
'/redirect_dotsegments',
|
||||||
|
@ -367,6 +375,7 @@ def test_remove_dot_segments(self, handler, path):
|
||||||
assert res.url == f'http://127.0.0.1:{self.http_port}/headers'
|
assert res.url == f'http://127.0.0.1:{self.http_port}/headers'
|
||||||
res.close()
|
res.close()
|
||||||
|
|
||||||
|
# Not supported by CurlCFFI (non-standard)
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_unicode_path_redirection(self, handler):
|
def test_unicode_path_redirection(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
|
@ -374,7 +383,7 @@ def test_unicode_path_redirection(self, handler):
|
||||||
assert r.url == f'http://127.0.0.1:{self.http_port}/%E4%B8%AD%E6%96%87.html'
|
assert r.url == f'http://127.0.0.1:{self.http_port}/%E4%B8%AD%E6%96%87.html'
|
||||||
r.close()
|
r.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_raise_http_error(self, handler):
|
def test_raise_http_error(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
for bad_status in (400, 500, 599, 302):
|
for bad_status in (400, 500, 599, 302):
|
||||||
|
@ -384,7 +393,7 @@ def test_raise_http_error(self, handler):
|
||||||
# Should not raise an error
|
# Should not raise an error
|
||||||
validate_and_send(rh, Request('http://127.0.0.1:%d/gen_200' % self.http_port)).close()
|
validate_and_send(rh, Request('http://127.0.0.1:%d/gen_200' % self.http_port)).close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_response_url(self, handler):
|
def test_response_url(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
# Response url should be that of the last url in redirect chain
|
# Response url should be that of the last url in redirect chain
|
||||||
|
@ -395,62 +404,50 @@ def test_response_url(self, handler):
|
||||||
assert res2.url == f'http://127.0.0.1:{self.http_port}/gen_200'
|
assert res2.url == f'http://127.0.0.1:{self.http_port}/gen_200'
|
||||||
res2.close()
|
res2.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
# Covers some basic cases we expect some level of consistency between request handlers for
|
||||||
def test_redirect(self, handler):
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
|
@pytest.mark.parametrize('redirect_status,method,expected', [
|
||||||
|
# A 303 must either use GET or HEAD for subsequent request
|
||||||
|
(303, 'POST', ('', 'GET', False)),
|
||||||
|
(303, 'HEAD', ('', 'HEAD', False)),
|
||||||
|
|
||||||
|
# 301 and 302 turn POST only into a GET
|
||||||
|
(301, 'POST', ('', 'GET', False)),
|
||||||
|
(301, 'HEAD', ('', 'HEAD', False)),
|
||||||
|
(302, 'POST', ('', 'GET', False)),
|
||||||
|
(302, 'HEAD', ('', 'HEAD', False)),
|
||||||
|
|
||||||
|
# 307 and 308 should not change method
|
||||||
|
(307, 'POST', ('testdata', 'POST', True)),
|
||||||
|
(308, 'POST', ('testdata', 'POST', True)),
|
||||||
|
(307, 'HEAD', ('', 'HEAD', False)),
|
||||||
|
(308, 'HEAD', ('', 'HEAD', False)),
|
||||||
|
])
|
||||||
|
def test_redirect(self, handler, redirect_status, method, expected):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
def do_req(redirect_status, method, assert_no_content=False):
|
data = b'testdata' if method == 'POST' else None
|
||||||
data = b'testdata' if method in ('POST', 'PUT') else None
|
headers = {}
|
||||||
|
if data is not None:
|
||||||
|
headers['Content-Type'] = 'application/test'
|
||||||
res = validate_and_send(
|
res = validate_and_send(
|
||||||
rh, Request(f'http://127.0.0.1:{self.http_port}/redirect_{redirect_status}', method=method, data=data))
|
rh, Request(f'http://127.0.0.1:{self.http_port}/redirect_{redirect_status}', method=method, data=data,
|
||||||
|
headers=headers))
|
||||||
|
|
||||||
headers = b''
|
headers = b''
|
||||||
data_sent = b''
|
data_recv = b''
|
||||||
if data is not None:
|
if data is not None:
|
||||||
data_sent += res.read(len(data))
|
data_recv += res.read(len(data))
|
||||||
if data_sent != data:
|
if data_recv != data:
|
||||||
headers += data_sent
|
headers += data_recv
|
||||||
data_sent = b''
|
data_recv = b''
|
||||||
|
|
||||||
headers += res.read()
|
headers += res.read()
|
||||||
|
|
||||||
if assert_no_content or data is None:
|
assert expected[0] == data_recv.decode()
|
||||||
assert b'Content-Type' not in headers
|
assert expected[1] == res.headers.get('method')
|
||||||
assert b'Content-Length' not in headers
|
assert expected[2] == ('content-length' in headers.decode().lower())
|
||||||
else:
|
|
||||||
assert b'Content-Type' in headers
|
|
||||||
assert b'Content-Length' in headers
|
|
||||||
|
|
||||||
return data_sent.decode(), res.headers.get('method', '')
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
|
|
||||||
# A 303 must either use GET or HEAD for subsequent request
|
|
||||||
assert do_req(303, 'POST', True) == ('', 'GET')
|
|
||||||
assert do_req(303, 'HEAD') == ('', 'HEAD')
|
|
||||||
|
|
||||||
assert do_req(303, 'PUT', True) == ('', 'GET')
|
|
||||||
|
|
||||||
# 301 and 302 turn POST only into a GET
|
|
||||||
assert do_req(301, 'POST', True) == ('', 'GET')
|
|
||||||
assert do_req(301, 'HEAD') == ('', 'HEAD')
|
|
||||||
assert do_req(302, 'POST', True) == ('', 'GET')
|
|
||||||
assert do_req(302, 'HEAD') == ('', 'HEAD')
|
|
||||||
|
|
||||||
assert do_req(301, 'PUT') == ('testdata', 'PUT')
|
|
||||||
assert do_req(302, 'PUT') == ('testdata', 'PUT')
|
|
||||||
|
|
||||||
# 307 and 308 should not change method
|
|
||||||
for m in ('POST', 'PUT'):
|
|
||||||
assert do_req(307, m) == ('testdata', m)
|
|
||||||
assert do_req(308, m) == ('testdata', m)
|
|
||||||
|
|
||||||
assert do_req(307, 'HEAD') == ('', 'HEAD')
|
|
||||||
assert do_req(308, 'HEAD') == ('', 'HEAD')
|
|
||||||
|
|
||||||
# These should not redirect and instead raise an HTTPError
|
|
||||||
for code in (300, 304, 305, 306):
|
|
||||||
with pytest.raises(HTTPError):
|
|
||||||
do_req(code, 'GET')
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
|
||||||
def test_request_cookie_header(self, handler):
|
def test_request_cookie_header(self, handler):
|
||||||
# We should accept a Cookie header being passed as in normal headers and handle it appropriately.
|
# We should accept a Cookie header being passed as in normal headers and handle it appropriately.
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
|
@ -459,16 +456,17 @@ def test_request_cookie_header(self, handler):
|
||||||
rh, Request(
|
rh, Request(
|
||||||
f'http://127.0.0.1:{self.http_port}/headers',
|
f'http://127.0.0.1:{self.http_port}/headers',
|
||||||
headers={'Cookie': 'test=test'})).read().decode()
|
headers={'Cookie': 'test=test'})).read().decode()
|
||||||
assert 'Cookie: test=test' in res
|
assert 'cookie: test=test' in res.lower()
|
||||||
|
|
||||||
# Specified Cookie header should be removed on any redirect
|
# Specified Cookie header should be removed on any redirect
|
||||||
res = validate_and_send(
|
res = validate_and_send(
|
||||||
rh, Request(
|
rh, Request(
|
||||||
f'http://127.0.0.1:{self.http_port}/308-to-headers',
|
f'http://127.0.0.1:{self.http_port}/308-to-headers',
|
||||||
headers={'Cookie': 'test=test'})).read().decode()
|
headers={'Cookie': 'test=test2'})).read().decode()
|
||||||
assert 'Cookie: test=test' not in res
|
assert 'cookie: test=test2' not in res.lower()
|
||||||
|
|
||||||
# Specified Cookie header should override global cookiejar for that request
|
# Specified Cookie header should override global cookiejar for that request
|
||||||
|
# Whether cookies from the cookiejar is applied on the redirect is considered undefined for now
|
||||||
cookiejar = YoutubeDLCookieJar()
|
cookiejar = YoutubeDLCookieJar()
|
||||||
cookiejar.set_cookie(http.cookiejar.Cookie(
|
cookiejar.set_cookie(http.cookiejar.Cookie(
|
||||||
version=0, name='test', value='ytdlp', port=None, port_specified=False,
|
version=0, name='test', value='ytdlp', port=None, port_specified=False,
|
||||||
|
@ -478,23 +476,23 @@ def test_request_cookie_header(self, handler):
|
||||||
|
|
||||||
with handler(cookiejar=cookiejar) as rh:
|
with handler(cookiejar=cookiejar) as rh:
|
||||||
data = validate_and_send(
|
data = validate_and_send(
|
||||||
rh, Request(f'http://127.0.0.1:{self.http_port}/headers', headers={'cookie': 'test=test'})).read()
|
rh, Request(f'http://127.0.0.1:{self.http_port}/headers', headers={'cookie': 'test=test3'})).read()
|
||||||
assert b'Cookie: test=ytdlp' not in data
|
assert b'cookie: test=ytdlp' not in data.lower()
|
||||||
assert b'Cookie: test=test' in data
|
assert b'cookie: test=test3' in data.lower()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_redirect_loop(self, handler):
|
def test_redirect_loop(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with pytest.raises(HTTPError, match='redirect loop'):
|
with pytest.raises(HTTPError, match='redirect loop'):
|
||||||
validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/redirect_loop'))
|
validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/redirect_loop'))
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_incompleteread(self, handler):
|
def test_incompleteread(self, handler):
|
||||||
with handler(timeout=2) as rh:
|
with handler(timeout=2) as rh:
|
||||||
with pytest.raises(IncompleteRead):
|
with pytest.raises(IncompleteRead, match='13 bytes read, 234221 more expected'):
|
||||||
validate_and_send(rh, Request('http://127.0.0.1:%d/incompleteread' % self.http_port)).read()
|
validate_and_send(rh, Request('http://127.0.0.1:%d/incompleteread' % self.http_port)).read()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_cookies(self, handler):
|
def test_cookies(self, handler):
|
||||||
cookiejar = YoutubeDLCookieJar()
|
cookiejar = YoutubeDLCookieJar()
|
||||||
cookiejar.set_cookie(http.cookiejar.Cookie(
|
cookiejar.set_cookie(http.cookiejar.Cookie(
|
||||||
|
@ -503,47 +501,66 @@ def test_cookies(self, handler):
|
||||||
|
|
||||||
with handler(cookiejar=cookiejar) as rh:
|
with handler(cookiejar=cookiejar) as rh:
|
||||||
data = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/headers')).read()
|
data = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/headers')).read()
|
||||||
assert b'Cookie: test=ytdlp' in data
|
assert b'cookie: test=ytdlp' in data.lower()
|
||||||
|
|
||||||
# Per request
|
# Per request
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
data = validate_and_send(
|
data = validate_and_send(
|
||||||
rh, Request(f'http://127.0.0.1:{self.http_port}/headers', extensions={'cookiejar': cookiejar})).read()
|
rh, Request(f'http://127.0.0.1:{self.http_port}/headers', extensions={'cookiejar': cookiejar})).read()
|
||||||
assert b'Cookie: test=ytdlp' in data
|
assert b'cookie: test=ytdlp' in data.lower()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_headers(self, handler):
|
def test_headers(self, handler):
|
||||||
|
|
||||||
with handler(headers=HTTPHeaderDict({'test1': 'test', 'test2': 'test2'})) as rh:
|
with handler(headers=HTTPHeaderDict({'test1': 'test', 'test2': 'test2'})) as rh:
|
||||||
# Global Headers
|
# Global Headers
|
||||||
data = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/headers')).read()
|
data = validate_and_send(rh, Request(f'http://127.0.0.1:{self.http_port}/headers')).read().lower()
|
||||||
assert b'Test1: test' in data
|
assert b'test1: test' in data
|
||||||
|
|
||||||
# Per request headers, merged with global
|
# Per request headers, merged with global
|
||||||
data = validate_and_send(rh, Request(
|
data = validate_and_send(rh, Request(
|
||||||
f'http://127.0.0.1:{self.http_port}/headers', headers={'test2': 'changed', 'test3': 'test3'})).read()
|
f'http://127.0.0.1:{self.http_port}/headers', headers={'test2': 'changed', 'test3': 'test3'})).read().lower()
|
||||||
assert b'Test1: test' in data
|
assert b'test1: test' in data
|
||||||
assert b'Test2: changed' in data
|
assert b'test2: changed' in data
|
||||||
assert b'Test2: test2' not in data
|
assert b'test2: test2' not in data
|
||||||
assert b'Test3: test3' in data
|
assert b'test3: test3' in data
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_timeout(self, handler):
|
def test_read_timeout(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
# Default timeout is 20 seconds, so this should go through
|
# Default timeout is 20 seconds, so this should go through
|
||||||
validate_and_send(
|
validate_and_send(
|
||||||
rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_3'))
|
rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_1'))
|
||||||
|
|
||||||
with handler(timeout=0.5) as rh:
|
with handler(timeout=0.1) as rh:
|
||||||
with pytest.raises(TransportError):
|
with pytest.raises(TransportError):
|
||||||
validate_and_send(
|
validate_and_send(
|
||||||
rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_1'))
|
rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_5'))
|
||||||
|
|
||||||
# Per request timeout, should override handler timeout
|
# Per request timeout, should override handler timeout
|
||||||
validate_and_send(
|
validate_and_send(
|
||||||
rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_1', extensions={'timeout': 4}))
|
rh, Request(f'http://127.0.0.1:{self.http_port}/timeout_1', extensions={'timeout': 4}))
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
|
def test_connect_timeout(self, handler):
|
||||||
|
# nothing should be listening on this port
|
||||||
|
connect_timeout_url = 'http://10.255.255.255'
|
||||||
|
with handler(timeout=0.01) as rh:
|
||||||
|
now = time.time()
|
||||||
|
with pytest.raises(TransportError):
|
||||||
|
validate_and_send(
|
||||||
|
rh, Request(connect_timeout_url))
|
||||||
|
assert 0.01 <= time.time() - now < 20
|
||||||
|
|
||||||
|
with handler() as rh:
|
||||||
|
with pytest.raises(TransportError):
|
||||||
|
# Per request timeout, should override handler timeout
|
||||||
|
now = time.time()
|
||||||
|
validate_and_send(
|
||||||
|
rh, Request(connect_timeout_url, extensions={'timeout': 0.01}))
|
||||||
|
assert 0.01 <= time.time() - now < 20
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_source_address(self, handler):
|
def test_source_address(self, handler):
|
||||||
source_address = f'127.0.0.{random.randint(5, 255)}'
|
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||||
# on some systems these loopback addresses we need for testing may not be available
|
# on some systems these loopback addresses we need for testing may not be available
|
||||||
|
@ -554,6 +571,7 @@ def test_source_address(self, handler):
|
||||||
rh, Request(f'http://127.0.0.1:{self.http_port}/source_address')).read().decode()
|
rh, Request(f'http://127.0.0.1:{self.http_port}/source_address')).read().decode()
|
||||||
assert source_address == data
|
assert source_address == data
|
||||||
|
|
||||||
|
# Not supported by CurlCFFI
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_gzip_trailing_garbage(self, handler):
|
def test_gzip_trailing_garbage(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
|
@ -571,7 +589,7 @@ def test_brotli(self, handler):
|
||||||
assert res.headers.get('Content-Encoding') == 'br'
|
assert res.headers.get('Content-Encoding') == 'br'
|
||||||
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_deflate(self, handler):
|
def test_deflate(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
res = validate_and_send(
|
res = validate_and_send(
|
||||||
|
@ -581,7 +599,7 @@ def test_deflate(self, handler):
|
||||||
assert res.headers.get('Content-Encoding') == 'deflate'
|
assert res.headers.get('Content-Encoding') == 'deflate'
|
||||||
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_gzip(self, handler):
|
def test_gzip(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
res = validate_and_send(
|
res = validate_and_send(
|
||||||
|
@ -591,7 +609,7 @@ def test_gzip(self, handler):
|
||||||
assert res.headers.get('Content-Encoding') == 'gzip'
|
assert res.headers.get('Content-Encoding') == 'gzip'
|
||||||
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_multiple_encodings(self, handler):
|
def test_multiple_encodings(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
for pair in ('gzip,deflate', 'deflate, gzip', 'gzip, gzip', 'deflate, deflate'):
|
for pair in ('gzip,deflate', 'deflate, gzip', 'gzip, gzip', 'deflate, deflate'):
|
||||||
|
@ -602,17 +620,18 @@ def test_multiple_encodings(self, handler):
|
||||||
assert res.headers.get('Content-Encoding') == pair
|
assert res.headers.get('Content-Encoding') == pair
|
||||||
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
assert res.read() == b'<html><video src="/vid.mp4" /></html>'
|
||||||
|
|
||||||
|
# Not supported by curl_cffi
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
||||||
def test_unsupported_encoding(self, handler):
|
def test_unsupported_encoding(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
res = validate_and_send(
|
res = validate_and_send(
|
||||||
rh, Request(
|
rh, Request(
|
||||||
f'http://127.0.0.1:{self.http_port}/content-encoding',
|
f'http://127.0.0.1:{self.http_port}/content-encoding',
|
||||||
headers={'ytdl-encoding': 'unsupported'}))
|
headers={'ytdl-encoding': 'unsupported', 'Accept-Encoding': '*'}))
|
||||||
assert res.headers.get('Content-Encoding') == 'unsupported'
|
assert res.headers.get('Content-Encoding') == 'unsupported'
|
||||||
assert res.read() == b'raw'
|
assert res.read() == b'raw'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_read(self, handler):
|
def test_read(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
res = validate_and_send(
|
res = validate_and_send(
|
||||||
|
@ -620,9 +639,12 @@ def test_read(self, handler):
|
||||||
assert res.readable()
|
assert res.readable()
|
||||||
assert res.read(1) == b'H'
|
assert res.read(1) == b'H'
|
||||||
assert res.read(3) == b'ost'
|
assert res.read(3) == b'ost'
|
||||||
|
assert res.read().decode().endswith('\n\n')
|
||||||
|
assert res.read() == b''
|
||||||
|
|
||||||
|
|
||||||
class TestHTTPProxy(TestRequestHandlerBase):
|
class TestHTTPProxy(TestRequestHandlerBase):
|
||||||
|
# Note: this only tests http urls over non-CONNECT proxy
|
||||||
@classmethod
|
@classmethod
|
||||||
def setup_class(cls):
|
def setup_class(cls):
|
||||||
super().setup_class()
|
super().setup_class()
|
||||||
|
@ -642,7 +664,7 @@ def setup_class(cls):
|
||||||
cls.geo_proxy_thread.daemon = True
|
cls.geo_proxy_thread.daemon = True
|
||||||
cls.geo_proxy_thread.start()
|
cls.geo_proxy_thread.start()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_http_proxy(self, handler):
|
def test_http_proxy(self, handler):
|
||||||
http_proxy = f'http://127.0.0.1:{self.proxy_port}'
|
http_proxy = f'http://127.0.0.1:{self.proxy_port}'
|
||||||
geo_proxy = f'http://127.0.0.1:{self.geo_port}'
|
geo_proxy = f'http://127.0.0.1:{self.geo_port}'
|
||||||
|
@ -668,7 +690,7 @@ def test_http_proxy(self, handler):
|
||||||
assert res != f'normal: {real_url}'
|
assert res != f'normal: {real_url}'
|
||||||
assert 'Accept' in res
|
assert 'Accept' in res
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_noproxy(self, handler):
|
def test_noproxy(self, handler):
|
||||||
with handler(proxies={'proxy': f'http://127.0.0.1:{self.proxy_port}'}) as rh:
|
with handler(proxies={'proxy': f'http://127.0.0.1:{self.proxy_port}'}) as rh:
|
||||||
# NO_PROXY
|
# NO_PROXY
|
||||||
|
@ -678,7 +700,7 @@ def test_noproxy(self, handler):
|
||||||
'utf-8')
|
'utf-8')
|
||||||
assert 'Accept' in nop_response
|
assert 'Accept' in nop_response
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_allproxy(self, handler):
|
def test_allproxy(self, handler):
|
||||||
url = 'http://foo.com/bar'
|
url = 'http://foo.com/bar'
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
|
@ -686,7 +708,7 @@ def test_allproxy(self, handler):
|
||||||
'utf-8')
|
'utf-8')
|
||||||
assert response == f'normal: {url}'
|
assert response == f'normal: {url}'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_http_proxy_with_idn(self, handler):
|
def test_http_proxy_with_idn(self, handler):
|
||||||
with handler(proxies={
|
with handler(proxies={
|
||||||
'http': f'http://127.0.0.1:{self.proxy_port}',
|
'http': f'http://127.0.0.1:{self.proxy_port}',
|
||||||
|
@ -698,7 +720,6 @@ def test_http_proxy_with_idn(self, handler):
|
||||||
|
|
||||||
|
|
||||||
class TestClientCertificate:
|
class TestClientCertificate:
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def setup_class(cls):
|
def setup_class(cls):
|
||||||
certfn = os.path.join(TEST_DIR, 'testcert.pem')
|
certfn = os.path.join(TEST_DIR, 'testcert.pem')
|
||||||
|
@ -724,27 +745,27 @@ def _run_test(self, handler, **handler_kwargs):
|
||||||
) as rh:
|
) as rh:
|
||||||
validate_and_send(rh, Request(f'https://127.0.0.1:{self.port}/video.html')).read().decode()
|
validate_and_send(rh, Request(f'https://127.0.0.1:{self.port}/video.html')).read().decode()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_certificate_combined_nopass(self, handler):
|
def test_certificate_combined_nopass(self, handler):
|
||||||
self._run_test(handler, client_cert={
|
self._run_test(handler, client_cert={
|
||||||
'client_certificate': os.path.join(self.certdir, 'clientwithkey.crt'),
|
'client_certificate': os.path.join(self.certdir, 'clientwithkey.crt'),
|
||||||
})
|
})
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_certificate_nocombined_nopass(self, handler):
|
def test_certificate_nocombined_nopass(self, handler):
|
||||||
self._run_test(handler, client_cert={
|
self._run_test(handler, client_cert={
|
||||||
'client_certificate': os.path.join(self.certdir, 'client.crt'),
|
'client_certificate': os.path.join(self.certdir, 'client.crt'),
|
||||||
'client_certificate_key': os.path.join(self.certdir, 'client.key'),
|
'client_certificate_key': os.path.join(self.certdir, 'client.key'),
|
||||||
})
|
})
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_certificate_combined_pass(self, handler):
|
def test_certificate_combined_pass(self, handler):
|
||||||
self._run_test(handler, client_cert={
|
self._run_test(handler, client_cert={
|
||||||
'client_certificate': os.path.join(self.certdir, 'clientwithencryptedkey.crt'),
|
'client_certificate': os.path.join(self.certdir, 'clientwithencryptedkey.crt'),
|
||||||
'client_certificate_password': 'foobar',
|
'client_certificate_password': 'foobar',
|
||||||
})
|
})
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_certificate_nocombined_pass(self, handler):
|
def test_certificate_nocombined_pass(self, handler):
|
||||||
self._run_test(handler, client_cert={
|
self._run_test(handler, client_cert={
|
||||||
'client_certificate': os.path.join(self.certdir, 'client.crt'),
|
'client_certificate': os.path.join(self.certdir, 'client.crt'),
|
||||||
|
@ -753,6 +774,18 @@ def test_certificate_nocombined_pass(self, handler):
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('handler', ['CurlCFFI'], indirect=True)
|
||||||
|
class TestHTTPImpersonateRequestHandler(TestRequestHandlerBase):
|
||||||
|
def test_supported_impersonate_targets(self, handler):
|
||||||
|
with handler(headers=std_headers) as rh:
|
||||||
|
# note: this assumes the impersonate request handler supports the impersonate extension
|
||||||
|
for target in rh.supported_targets:
|
||||||
|
res = validate_and_send(rh, Request(
|
||||||
|
f'http://127.0.0.1:{self.http_port}/headers', extensions={'impersonate': target}))
|
||||||
|
assert res.status == 200
|
||||||
|
assert std_headers['user-agent'].lower() not in res.read().decode().lower()
|
||||||
|
|
||||||
|
|
||||||
class TestRequestHandlerMisc:
|
class TestRequestHandlerMisc:
|
||||||
"""Misc generic tests for request handlers, not related to request or validation testing"""
|
"""Misc generic tests for request handlers, not related to request or validation testing"""
|
||||||
@pytest.mark.parametrize('handler,logger_name', [
|
@pytest.mark.parametrize('handler,logger_name', [
|
||||||
|
@ -931,6 +964,172 @@ def mock_close(*args, **kwargs):
|
||||||
assert called
|
assert called
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('handler', ['CurlCFFI'], indirect=True)
|
||||||
|
class TestCurlCFFIRequestHandler(TestRequestHandlerBase):
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('params,extensions', [
|
||||||
|
({}, {'impersonate': ImpersonateTarget('chrome')}),
|
||||||
|
({'impersonate': ImpersonateTarget('chrome', '110')}, {}),
|
||||||
|
({'impersonate': ImpersonateTarget('chrome', '99')}, {'impersonate': ImpersonateTarget('chrome', '110')}),
|
||||||
|
])
|
||||||
|
def test_impersonate(self, handler, params, extensions):
|
||||||
|
with handler(headers=std_headers, **params) as rh:
|
||||||
|
res = validate_and_send(
|
||||||
|
rh, Request(f'http://127.0.0.1:{self.http_port}/headers', extensions=extensions)).read().decode()
|
||||||
|
assert 'sec-ch-ua: "Chromium";v="110"' in res
|
||||||
|
# Check that user agent is added over ours
|
||||||
|
assert 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36' in res
|
||||||
|
|
||||||
|
def test_headers(self, handler):
|
||||||
|
with handler(headers=std_headers) as rh:
|
||||||
|
# Ensure curl-impersonate overrides our standard headers (usually added
|
||||||
|
res = validate_and_send(
|
||||||
|
rh, Request(f'http://127.0.0.1:{self.http_port}/headers', extensions={
|
||||||
|
'impersonate': ImpersonateTarget('safari')}, headers={'x-custom': 'test', 'sec-fetch-mode': 'custom'})).read().decode().lower()
|
||||||
|
|
||||||
|
assert std_headers['user-agent'].lower() not in res
|
||||||
|
assert std_headers['accept-language'].lower() not in res
|
||||||
|
assert std_headers['sec-fetch-mode'].lower() not in res
|
||||||
|
# other than UA, custom headers that differ from std_headers should be kept
|
||||||
|
assert 'sec-fetch-mode: custom' in res
|
||||||
|
assert 'x-custom: test' in res
|
||||||
|
# but when not impersonating don't remove std_headers
|
||||||
|
res = validate_and_send(
|
||||||
|
rh, Request(f'http://127.0.0.1:{self.http_port}/headers', headers={'x-custom': 'test'})).read().decode().lower()
|
||||||
|
# std_headers should be present
|
||||||
|
for k, v in std_headers.items():
|
||||||
|
assert f'{k}: {v}'.lower() in res
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('raised,expected,match', [
|
||||||
|
(lambda: curl_cffi.requests.errors.RequestsError(
|
||||||
|
'', code=curl_cffi.const.CurlECode.PARTIAL_FILE), IncompleteRead, None),
|
||||||
|
(lambda: curl_cffi.requests.errors.RequestsError(
|
||||||
|
'', code=curl_cffi.const.CurlECode.OPERATION_TIMEDOUT), TransportError, None),
|
||||||
|
(lambda: curl_cffi.requests.errors.RequestsError(
|
||||||
|
'', code=curl_cffi.const.CurlECode.RECV_ERROR), TransportError, None),
|
||||||
|
])
|
||||||
|
def test_response_error_mapping(self, handler, monkeypatch, raised, expected, match):
|
||||||
|
import curl_cffi.requests
|
||||||
|
|
||||||
|
from yt_dlp.networking._curlcffi import CurlCFFIResponseAdapter
|
||||||
|
curl_res = curl_cffi.requests.Response()
|
||||||
|
res = CurlCFFIResponseAdapter(curl_res)
|
||||||
|
|
||||||
|
def mock_read(*args, **kwargs):
|
||||||
|
try:
|
||||||
|
raise raised()
|
||||||
|
except Exception as e:
|
||||||
|
e.response = curl_res
|
||||||
|
raise
|
||||||
|
monkeypatch.setattr(res.fp, 'read', mock_read)
|
||||||
|
|
||||||
|
with pytest.raises(expected, match=match) as exc_info:
|
||||||
|
res.read()
|
||||||
|
|
||||||
|
assert exc_info.type is expected
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('raised,expected,match', [
|
||||||
|
(lambda: curl_cffi.requests.errors.RequestsError(
|
||||||
|
'', code=curl_cffi.const.CurlECode.OPERATION_TIMEDOUT), TransportError, None),
|
||||||
|
(lambda: curl_cffi.requests.errors.RequestsError(
|
||||||
|
'', code=curl_cffi.const.CurlECode.PEER_FAILED_VERIFICATION), CertificateVerifyError, None),
|
||||||
|
(lambda: curl_cffi.requests.errors.RequestsError(
|
||||||
|
'', code=curl_cffi.const.CurlECode.SSL_CONNECT_ERROR), SSLError, None),
|
||||||
|
(lambda: curl_cffi.requests.errors.RequestsError(
|
||||||
|
'', code=curl_cffi.const.CurlECode.TOO_MANY_REDIRECTS), HTTPError, None),
|
||||||
|
(lambda: curl_cffi.requests.errors.RequestsError(
|
||||||
|
'', code=curl_cffi.const.CurlECode.PROXY), ProxyError, None),
|
||||||
|
])
|
||||||
|
def test_request_error_mapping(self, handler, monkeypatch, raised, expected, match):
|
||||||
|
import curl_cffi.requests
|
||||||
|
curl_res = curl_cffi.requests.Response()
|
||||||
|
curl_res.status_code = 301
|
||||||
|
|
||||||
|
with handler() as rh:
|
||||||
|
original_get_instance = rh._get_instance
|
||||||
|
|
||||||
|
def mock_get_instance(*args, **kwargs):
|
||||||
|
instance = original_get_instance(*args, **kwargs)
|
||||||
|
|
||||||
|
def request(*_, **__):
|
||||||
|
try:
|
||||||
|
raise raised()
|
||||||
|
except Exception as e:
|
||||||
|
e.response = curl_res
|
||||||
|
raise
|
||||||
|
monkeypatch.setattr(instance, 'request', request)
|
||||||
|
return instance
|
||||||
|
|
||||||
|
monkeypatch.setattr(rh, '_get_instance', mock_get_instance)
|
||||||
|
|
||||||
|
with pytest.raises(expected) as exc_info:
|
||||||
|
rh.send(Request('http://fake'))
|
||||||
|
|
||||||
|
assert exc_info.type is expected
|
||||||
|
|
||||||
|
def test_response_reader(self, handler):
|
||||||
|
class FakeResponse:
|
||||||
|
def __init__(self, raise_error=False):
|
||||||
|
self.raise_error = raise_error
|
||||||
|
self.closed = False
|
||||||
|
|
||||||
|
def iter_content(self):
|
||||||
|
yield b'foo'
|
||||||
|
yield b'bar'
|
||||||
|
yield b'z'
|
||||||
|
if self.raise_error:
|
||||||
|
raise Exception('test')
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.closed = True
|
||||||
|
|
||||||
|
from yt_dlp.networking._curlcffi import CurlCFFIResponseReader
|
||||||
|
|
||||||
|
res = CurlCFFIResponseReader(FakeResponse())
|
||||||
|
assert res.readable
|
||||||
|
assert res.bytes_read == 0
|
||||||
|
assert res.read(1) == b'f'
|
||||||
|
assert res.bytes_read == 3
|
||||||
|
assert res._buffer == b'oo'
|
||||||
|
|
||||||
|
assert res.read(2) == b'oo'
|
||||||
|
assert res.bytes_read == 3
|
||||||
|
assert res._buffer == b''
|
||||||
|
|
||||||
|
assert res.read(2) == b'ba'
|
||||||
|
assert res.bytes_read == 6
|
||||||
|
assert res._buffer == b'r'
|
||||||
|
|
||||||
|
assert res.read(3) == b'rz'
|
||||||
|
assert res.bytes_read == 7
|
||||||
|
assert res._buffer == b''
|
||||||
|
assert res.closed
|
||||||
|
assert res._response.closed
|
||||||
|
|
||||||
|
# should handle no size param
|
||||||
|
res2 = CurlCFFIResponseReader(FakeResponse())
|
||||||
|
assert res2.read() == b'foobarz'
|
||||||
|
assert res2.bytes_read == 7
|
||||||
|
assert res2._buffer == b''
|
||||||
|
assert res2.closed
|
||||||
|
|
||||||
|
# should close on an exception
|
||||||
|
res3 = CurlCFFIResponseReader(FakeResponse(raise_error=True))
|
||||||
|
with pytest.raises(Exception, match='test'):
|
||||||
|
res3.read()
|
||||||
|
assert res3._buffer == b''
|
||||||
|
assert res3.bytes_read == 7
|
||||||
|
assert res3.closed
|
||||||
|
|
||||||
|
# buffer should be cleared on close
|
||||||
|
res4 = CurlCFFIResponseReader(FakeResponse())
|
||||||
|
res4.read(2)
|
||||||
|
assert res4._buffer == b'o'
|
||||||
|
res4.close()
|
||||||
|
assert res4.closed
|
||||||
|
assert res4._buffer == b''
|
||||||
|
|
||||||
|
|
||||||
def run_validation(handler, error, req, **handler_kwargs):
|
def run_validation(handler, error, req, **handler_kwargs):
|
||||||
with handler(**handler_kwargs) as rh:
|
with handler(**handler_kwargs) as rh:
|
||||||
if error:
|
if error:
|
||||||
|
@ -975,6 +1174,10 @@ class HTTPSupportedRH(ValidationRH):
|
||||||
('ws', False, {}),
|
('ws', False, {}),
|
||||||
('wss', False, {}),
|
('wss', False, {}),
|
||||||
]),
|
]),
|
||||||
|
('CurlCFFI', [
|
||||||
|
('http', False, {}),
|
||||||
|
('https', False, {}),
|
||||||
|
]),
|
||||||
(NoCheckRH, [('http', False, {})]),
|
(NoCheckRH, [('http', False, {})]),
|
||||||
(ValidationRH, [('http', UnsupportedRequest, {})])
|
(ValidationRH, [('http', UnsupportedRequest, {})])
|
||||||
]
|
]
|
||||||
|
@ -998,6 +1201,14 @@ class HTTPSupportedRH(ValidationRH):
|
||||||
('socks5', False),
|
('socks5', False),
|
||||||
('socks5h', False),
|
('socks5h', False),
|
||||||
]),
|
]),
|
||||||
|
('CurlCFFI', 'http', [
|
||||||
|
('http', False),
|
||||||
|
('https', False),
|
||||||
|
('socks4', False),
|
||||||
|
('socks4a', False),
|
||||||
|
('socks5', False),
|
||||||
|
('socks5h', False),
|
||||||
|
]),
|
||||||
(NoCheckRH, 'http', [('http', False)]),
|
(NoCheckRH, 'http', [('http', False)]),
|
||||||
(HTTPSupportedRH, 'http', [('http', UnsupportedRequest)]),
|
(HTTPSupportedRH, 'http', [('http', UnsupportedRequest)]),
|
||||||
('Websockets', 'ws', [('http', UnsupportedRequest)]),
|
('Websockets', 'ws', [('http', UnsupportedRequest)]),
|
||||||
|
@ -1015,6 +1226,10 @@ class HTTPSupportedRH(ValidationRH):
|
||||||
('all', False),
|
('all', False),
|
||||||
('unrelated', False),
|
('unrelated', False),
|
||||||
]),
|
]),
|
||||||
|
('CurlCFFI', [
|
||||||
|
('all', False),
|
||||||
|
('unrelated', False),
|
||||||
|
]),
|
||||||
(NoCheckRH, [('all', False)]),
|
(NoCheckRH, [('all', False)]),
|
||||||
(HTTPSupportedRH, [('all', UnsupportedRequest)]),
|
(HTTPSupportedRH, [('all', UnsupportedRequest)]),
|
||||||
(HTTPSupportedRH, [('no', UnsupportedRequest)]),
|
(HTTPSupportedRH, [('no', UnsupportedRequest)]),
|
||||||
|
@ -1036,6 +1251,19 @@ class HTTPSupportedRH(ValidationRH):
|
||||||
({'timeout': 'notatimeout'}, AssertionError),
|
({'timeout': 'notatimeout'}, AssertionError),
|
||||||
({'unsupported': 'value'}, UnsupportedRequest),
|
({'unsupported': 'value'}, UnsupportedRequest),
|
||||||
]),
|
]),
|
||||||
|
('CurlCFFI', 'http', [
|
||||||
|
({'cookiejar': 'notacookiejar'}, AssertionError),
|
||||||
|
({'cookiejar': YoutubeDLCookieJar()}, False),
|
||||||
|
({'timeout': 1}, False),
|
||||||
|
({'timeout': 'notatimeout'}, AssertionError),
|
||||||
|
({'unsupported': 'value'}, UnsupportedRequest),
|
||||||
|
({'impersonate': ImpersonateTarget('badtarget', None, None, None)}, UnsupportedRequest),
|
||||||
|
({'impersonate': 123}, AssertionError),
|
||||||
|
({'impersonate': ImpersonateTarget('chrome', None, None, None)}, False),
|
||||||
|
({'impersonate': ImpersonateTarget(None, None, None, None)}, False),
|
||||||
|
({'impersonate': ImpersonateTarget()}, False),
|
||||||
|
({'impersonate': 'chrome'}, AssertionError)
|
||||||
|
]),
|
||||||
(NoCheckRH, 'http', [
|
(NoCheckRH, 'http', [
|
||||||
({'cookiejar': 'notacookiejar'}, False),
|
({'cookiejar': 'notacookiejar'}, False),
|
||||||
({'somerandom': 'test'}, False), # but any extension is allowed through
|
({'somerandom': 'test'}, False), # but any extension is allowed through
|
||||||
|
@ -1055,7 +1283,7 @@ class HTTPSupportedRH(ValidationRH):
|
||||||
def test_url_scheme(self, handler, scheme, fail, handler_kwargs):
|
def test_url_scheme(self, handler, scheme, fail, handler_kwargs):
|
||||||
run_validation(handler, fail, Request(f'{scheme}://'), **(handler_kwargs or {}))
|
run_validation(handler, fail, Request(f'{scheme}://'), **(handler_kwargs or {}))
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,fail', [('Urllib', False), ('Requests', False)], indirect=['handler'])
|
@pytest.mark.parametrize('handler,fail', [('Urllib', False), ('Requests', False), ('CurlCFFI', False)], indirect=['handler'])
|
||||||
def test_no_proxy(self, handler, fail):
|
def test_no_proxy(self, handler, fail):
|
||||||
run_validation(handler, fail, Request('http://', proxies={'no': '127.0.0.1,github.com'}))
|
run_validation(handler, fail, Request('http://', proxies={'no': '127.0.0.1,github.com'}))
|
||||||
run_validation(handler, fail, Request('http://'), proxies={'no': '127.0.0.1,github.com'})
|
run_validation(handler, fail, Request('http://'), proxies={'no': '127.0.0.1,github.com'})
|
||||||
|
@ -1078,13 +1306,13 @@ def test_proxy_scheme(self, handler, req_scheme, scheme, fail):
|
||||||
run_validation(handler, fail, Request(f'{req_scheme}://', proxies={req_scheme: f'{scheme}://example.com'}))
|
run_validation(handler, fail, Request(f'{req_scheme}://', proxies={req_scheme: f'{scheme}://example.com'}))
|
||||||
run_validation(handler, fail, Request(f'{req_scheme}://'), proxies={req_scheme: f'{scheme}://example.com'})
|
run_validation(handler, fail, Request(f'{req_scheme}://'), proxies={req_scheme: f'{scheme}://example.com'})
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', HTTPSupportedRH, 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', HTTPSupportedRH, 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_empty_proxy(self, handler):
|
def test_empty_proxy(self, handler):
|
||||||
run_validation(handler, False, Request('http://', proxies={'http': None}))
|
run_validation(handler, False, Request('http://', proxies={'http': None}))
|
||||||
run_validation(handler, False, Request('http://'), proxies={'http': None})
|
run_validation(handler, False, Request('http://'), proxies={'http': None})
|
||||||
|
|
||||||
@pytest.mark.parametrize('proxy_url', ['//example.com', 'example.com', '127.0.0.1', '/a/b/c'])
|
@pytest.mark.parametrize('proxy_url', ['//example.com', 'example.com', '127.0.0.1', '/a/b/c'])
|
||||||
@pytest.mark.parametrize('handler', ['Urllib', 'Requests'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
def test_invalid_proxy_url(self, handler, proxy_url):
|
def test_invalid_proxy_url(self, handler, proxy_url):
|
||||||
run_validation(handler, UnsupportedRequest, Request('http://', proxies={'http': proxy_url}))
|
run_validation(handler, UnsupportedRequest, Request('http://', proxies={'http': proxy_url}))
|
||||||
|
|
||||||
|
@ -1113,6 +1341,10 @@ def __init__(self, request):
|
||||||
|
|
||||||
class FakeRH(RequestHandler):
|
class FakeRH(RequestHandler):
|
||||||
|
|
||||||
|
def __init__(self, *args, **params):
|
||||||
|
self.params = params
|
||||||
|
super().__init__(*args, **params)
|
||||||
|
|
||||||
def _validate(self, request):
|
def _validate(self, request):
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -1271,15 +1503,10 @@ def test_compat_opener(self):
|
||||||
('', {'all': '__noproxy__'}),
|
('', {'all': '__noproxy__'}),
|
||||||
(None, {'http': 'http://127.0.0.1:8081', 'https': 'http://127.0.0.1:8081'}) # env, set https
|
(None, {'http': 'http://127.0.0.1:8081', 'https': 'http://127.0.0.1:8081'}) # env, set https
|
||||||
])
|
])
|
||||||
def test_proxy(self, proxy, expected):
|
def test_proxy(self, proxy, expected, monkeypatch):
|
||||||
old_http_proxy = os.environ.get('HTTP_PROXY')
|
monkeypatch.setenv('HTTP_PROXY', 'http://127.0.0.1:8081')
|
||||||
try:
|
|
||||||
os.environ['HTTP_PROXY'] = 'http://127.0.0.1:8081' # ensure that provided proxies override env
|
|
||||||
with FakeYDL({'proxy': proxy}) as ydl:
|
with FakeYDL({'proxy': proxy}) as ydl:
|
||||||
assert ydl.proxies == expected
|
assert ydl.proxies == expected
|
||||||
finally:
|
|
||||||
if old_http_proxy:
|
|
||||||
os.environ['HTTP_PROXY'] = old_http_proxy
|
|
||||||
|
|
||||||
def test_compat_request(self):
|
def test_compat_request(self):
|
||||||
with FakeRHYDL() as ydl:
|
with FakeRHYDL() as ydl:
|
||||||
|
@ -1331,6 +1558,95 @@ def test_legacy_server_connect_error(self):
|
||||||
with pytest.raises(SSLError, match='testerror'):
|
with pytest.raises(SSLError, match='testerror'):
|
||||||
ydl.urlopen('ssl://testerror')
|
ydl.urlopen('ssl://testerror')
|
||||||
|
|
||||||
|
def test_unsupported_impersonate_target(self):
|
||||||
|
class FakeImpersonationRHYDL(FakeYDL):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
class HTTPRH(RequestHandler):
|
||||||
|
def _send(self, request: Request):
|
||||||
|
pass
|
||||||
|
_SUPPORTED_URL_SCHEMES = ('http',)
|
||||||
|
_SUPPORTED_PROXY_SCHEMES = None
|
||||||
|
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self._request_director = self.build_request_director([HTTPRH])
|
||||||
|
|
||||||
|
with FakeImpersonationRHYDL() as ydl:
|
||||||
|
with pytest.raises(
|
||||||
|
RequestError,
|
||||||
|
match=r'Impersonate target "test" is not available'
|
||||||
|
):
|
||||||
|
ydl.urlopen(Request('http://', extensions={'impersonate': ImpersonateTarget('test', None, None, None)}))
|
||||||
|
|
||||||
|
def test_unsupported_impersonate_extension(self):
|
||||||
|
class FakeHTTPRHYDL(FakeYDL):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
class IRH(ImpersonateRequestHandler):
|
||||||
|
def _send(self, request: Request):
|
||||||
|
pass
|
||||||
|
|
||||||
|
_SUPPORTED_URL_SCHEMES = ('http',)
|
||||||
|
_SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget('abc',): 'test'}
|
||||||
|
_SUPPORTED_PROXY_SCHEMES = None
|
||||||
|
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self._request_director = self.build_request_director([IRH])
|
||||||
|
|
||||||
|
with FakeHTTPRHYDL() as ydl:
|
||||||
|
with pytest.raises(
|
||||||
|
RequestError,
|
||||||
|
match=r'Impersonate target "test" is not available'
|
||||||
|
):
|
||||||
|
ydl.urlopen(Request('http://', extensions={'impersonate': ImpersonateTarget('test', None, None, None)}))
|
||||||
|
|
||||||
|
def test_raise_impersonate_error(self):
|
||||||
|
with pytest.raises(
|
||||||
|
YoutubeDLError,
|
||||||
|
match=r'Impersonate target "test" is not available'
|
||||||
|
):
|
||||||
|
FakeYDL({'impersonate': ImpersonateTarget('test', None, None, None)})
|
||||||
|
|
||||||
|
def test_pass_impersonate_param(self, monkeypatch):
|
||||||
|
|
||||||
|
class IRH(ImpersonateRequestHandler):
|
||||||
|
def _send(self, request: Request):
|
||||||
|
pass
|
||||||
|
|
||||||
|
_SUPPORTED_URL_SCHEMES = ('http',)
|
||||||
|
_SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget('abc'): 'test'}
|
||||||
|
|
||||||
|
# Bypass the check on initialize
|
||||||
|
brh = FakeYDL.build_request_director
|
||||||
|
monkeypatch.setattr(FakeYDL, 'build_request_director', lambda cls, handlers, preferences=None: brh(cls, handlers=[IRH]))
|
||||||
|
|
||||||
|
with FakeYDL({
|
||||||
|
'impersonate': ImpersonateTarget('abc', None, None, None)
|
||||||
|
}) as ydl:
|
||||||
|
rh = self.build_handler(ydl, IRH)
|
||||||
|
assert rh.impersonate == ImpersonateTarget('abc', None, None, None)
|
||||||
|
|
||||||
|
def test_get_impersonate_targets(self):
|
||||||
|
handlers = []
|
||||||
|
for target_client in ('abc', 'xyz', 'asd'):
|
||||||
|
class TestRH(ImpersonateRequestHandler):
|
||||||
|
def _send(self, request: Request):
|
||||||
|
pass
|
||||||
|
_SUPPORTED_URL_SCHEMES = ('http',)
|
||||||
|
_SUPPORTED_IMPERSONATE_TARGET_MAP = {ImpersonateTarget(target_client,): 'test'}
|
||||||
|
RH_KEY = target_client
|
||||||
|
RH_NAME = target_client
|
||||||
|
handlers.append(TestRH)
|
||||||
|
|
||||||
|
with FakeYDL() as ydl:
|
||||||
|
ydl._request_director = ydl.build_request_director(handlers)
|
||||||
|
assert set(ydl._get_available_impersonate_targets()) == {
|
||||||
|
(ImpersonateTarget('xyz'), 'xyz'),
|
||||||
|
(ImpersonateTarget('abc'), 'abc'),
|
||||||
|
(ImpersonateTarget('asd'), 'asd')
|
||||||
|
}
|
||||||
|
assert ydl._impersonate_target_available(ImpersonateTarget('abc'))
|
||||||
|
assert ydl._impersonate_target_available(ImpersonateTarget())
|
||||||
|
assert not ydl._impersonate_target_available(ImpersonateTarget('zxy'))
|
||||||
|
|
||||||
@pytest.mark.parametrize('proxy_key,proxy_url,expected', [
|
@pytest.mark.parametrize('proxy_key,proxy_url,expected', [
|
||||||
('http', '__noproxy__', None),
|
('http', '__noproxy__', None),
|
||||||
('no', '127.0.0.1,foo.bar', '127.0.0.1,foo.bar'),
|
('no', '127.0.0.1,foo.bar', '127.0.0.1,foo.bar'),
|
||||||
|
@ -1341,23 +1657,17 @@ def test_legacy_server_connect_error(self):
|
||||||
('http', 'socks4://example.com', 'socks4://example.com'),
|
('http', 'socks4://example.com', 'socks4://example.com'),
|
||||||
('unrelated', '/bad/proxy', '/bad/proxy'), # clean_proxies should ignore bad proxies
|
('unrelated', '/bad/proxy', '/bad/proxy'), # clean_proxies should ignore bad proxies
|
||||||
])
|
])
|
||||||
def test_clean_proxy(self, proxy_key, proxy_url, expected):
|
def test_clean_proxy(self, proxy_key, proxy_url, expected, monkeypatch):
|
||||||
# proxies should be cleaned in urlopen()
|
# proxies should be cleaned in urlopen()
|
||||||
with FakeRHYDL() as ydl:
|
with FakeRHYDL() as ydl:
|
||||||
req = ydl.urlopen(Request('test://', proxies={proxy_key: proxy_url})).request
|
req = ydl.urlopen(Request('test://', proxies={proxy_key: proxy_url})).request
|
||||||
assert req.proxies[proxy_key] == expected
|
assert req.proxies[proxy_key] == expected
|
||||||
|
|
||||||
# and should also be cleaned when building the handler
|
# and should also be cleaned when building the handler
|
||||||
env_key = f'{proxy_key.upper()}_PROXY'
|
monkeypatch.setenv(f'{proxy_key.upper()}_PROXY', proxy_url)
|
||||||
old_env_proxy = os.environ.get(env_key)
|
|
||||||
try:
|
|
||||||
os.environ[env_key] = proxy_url # ensure that provided proxies override env
|
|
||||||
with FakeYDL() as ydl:
|
with FakeYDL() as ydl:
|
||||||
rh = self.build_handler(ydl)
|
rh = self.build_handler(ydl)
|
||||||
assert rh.proxies[proxy_key] == expected
|
assert rh.proxies[proxy_key] == expected
|
||||||
finally:
|
|
||||||
if old_env_proxy:
|
|
||||||
os.environ[env_key] = old_env_proxy
|
|
||||||
|
|
||||||
def test_clean_proxy_header(self):
|
def test_clean_proxy_header(self):
|
||||||
with FakeRHYDL() as ydl:
|
with FakeRHYDL() as ydl:
|
||||||
|
@ -1629,3 +1939,71 @@ def test_compat(self):
|
||||||
assert res.geturl() == res.url
|
assert res.geturl() == res.url
|
||||||
assert res.info() is res.headers
|
assert res.info() is res.headers
|
||||||
assert res.getheader('test') == res.get_header('test')
|
assert res.getheader('test') == res.get_header('test')
|
||||||
|
|
||||||
|
|
||||||
|
class TestImpersonateTarget:
|
||||||
|
@pytest.mark.parametrize('target_str,expected', [
|
||||||
|
('abc', ImpersonateTarget('abc', None, None, None)),
|
||||||
|
('abc-120_esr', ImpersonateTarget('abc', '120_esr', None, None)),
|
||||||
|
('abc-120:xyz', ImpersonateTarget('abc', '120', 'xyz', None)),
|
||||||
|
('abc-120:xyz-5.6', ImpersonateTarget('abc', '120', 'xyz', '5.6')),
|
||||||
|
('abc:xyz', ImpersonateTarget('abc', None, 'xyz', None)),
|
||||||
|
('abc:', ImpersonateTarget('abc', None, None, None)),
|
||||||
|
('abc-120:', ImpersonateTarget('abc', '120', None, None)),
|
||||||
|
(':xyz', ImpersonateTarget(None, None, 'xyz', None)),
|
||||||
|
(':xyz-6.5', ImpersonateTarget(None, None, 'xyz', '6.5')),
|
||||||
|
(':', ImpersonateTarget(None, None, None, None)),
|
||||||
|
('', ImpersonateTarget(None, None, None, None)),
|
||||||
|
])
|
||||||
|
def test_target_from_str(self, target_str, expected):
|
||||||
|
assert ImpersonateTarget.from_str(target_str) == expected
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('target_str', [
|
||||||
|
'-120', ':-12.0', '-12:-12', '-:-',
|
||||||
|
'::', 'a-c-d:', 'a-c-d:e-f-g', 'a:b:'
|
||||||
|
])
|
||||||
|
def test_target_from_invalid_str(self, target_str):
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
ImpersonateTarget.from_str(target_str)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('target,expected', [
|
||||||
|
(ImpersonateTarget('abc', None, None, None), 'abc'),
|
||||||
|
(ImpersonateTarget('abc', '120', None, None), 'abc-120'),
|
||||||
|
(ImpersonateTarget('abc', '120', 'xyz', None), 'abc-120:xyz'),
|
||||||
|
(ImpersonateTarget('abc', '120', 'xyz', '5'), 'abc-120:xyz-5'),
|
||||||
|
(ImpersonateTarget('abc', None, 'xyz', None), 'abc:xyz'),
|
||||||
|
(ImpersonateTarget('abc', '120', None, None), 'abc-120'),
|
||||||
|
(ImpersonateTarget('abc', '120', 'xyz', None), 'abc-120:xyz'),
|
||||||
|
(ImpersonateTarget('abc', None, 'xyz'), 'abc:xyz'),
|
||||||
|
(ImpersonateTarget(None, None, 'xyz', '6.5'), ':xyz-6.5'),
|
||||||
|
(ImpersonateTarget('abc', ), 'abc'),
|
||||||
|
(ImpersonateTarget(None, None, None, None), ''),
|
||||||
|
])
|
||||||
|
def test_str(self, target, expected):
|
||||||
|
assert str(target) == expected
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('args', [
|
||||||
|
('abc', None, None, '5'),
|
||||||
|
('abc', '120', None, '5'),
|
||||||
|
(None, '120', None, None),
|
||||||
|
(None, '120', None, '5'),
|
||||||
|
(None, None, None, '5'),
|
||||||
|
(None, '120', 'xyz', '5'),
|
||||||
|
])
|
||||||
|
def test_invalid_impersonate_target(self, args):
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
ImpersonateTarget(*args)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('target1,target2,is_in,is_eq', [
|
||||||
|
(ImpersonateTarget('abc', None, None, None), ImpersonateTarget('abc', None, None, None), True, True),
|
||||||
|
(ImpersonateTarget('abc', None, None, None), ImpersonateTarget('abc', '120', None, None), True, False),
|
||||||
|
(ImpersonateTarget('abc', None, 'xyz', 'test'), ImpersonateTarget('abc', '120', 'xyz', None), True, False),
|
||||||
|
(ImpersonateTarget('abc', '121', 'xyz', 'test'), ImpersonateTarget('abc', '120', 'xyz', 'test'), False, False),
|
||||||
|
(ImpersonateTarget('abc'), ImpersonateTarget('abc', '120', 'xyz', 'test'), True, False),
|
||||||
|
(ImpersonateTarget('abc', '120', 'xyz', 'test'), ImpersonateTarget('abc'), True, False),
|
||||||
|
(ImpersonateTarget(), ImpersonateTarget('abc', '120', 'xyz'), True, False),
|
||||||
|
(ImpersonateTarget(), ImpersonateTarget(), True, True),
|
||||||
|
])
|
||||||
|
def test_impersonate_target_in(self, target1, target2, is_in, is_eq):
|
||||||
|
assert (target1 in target2) is is_in
|
||||||
|
assert (target1 == target2) is is_eq
|
||||||
|
|
|
@ -286,8 +286,14 @@ def ctx(request):
|
||||||
return CTX_MAP[request.param]()
|
return CTX_MAP[request.param]()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'handler,ctx', [
|
||||||
|
('Urllib', 'http'),
|
||||||
|
('Requests', 'http'),
|
||||||
|
('Websockets', 'ws'),
|
||||||
|
('CurlCFFI', 'http')
|
||||||
|
], indirect=True)
|
||||||
class TestSocks4Proxy:
|
class TestSocks4Proxy:
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks4_no_auth(self, handler, ctx):
|
def test_socks4_no_auth(self, handler, ctx):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
||||||
|
@ -295,7 +301,6 @@ def test_socks4_no_auth(self, handler, ctx):
|
||||||
rh, proxies={'all': f'socks4://{server_address}'})
|
rh, proxies={'all': f'socks4://{server_address}'})
|
||||||
assert response['version'] == 4
|
assert response['version'] == 4
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks4_auth(self, handler, ctx):
|
def test_socks4_auth(self, handler, ctx):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with ctx.socks_server(Socks4ProxyHandler, user_id='user') as server_address:
|
with ctx.socks_server(Socks4ProxyHandler, user_id='user') as server_address:
|
||||||
|
@ -305,7 +310,6 @@ def test_socks4_auth(self, handler, ctx):
|
||||||
rh, proxies={'all': f'socks4://user:@{server_address}'})
|
rh, proxies={'all': f'socks4://user:@{server_address}'})
|
||||||
assert response['version'] == 4
|
assert response['version'] == 4
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks4a_ipv4_target(self, handler, ctx):
|
def test_socks4a_ipv4_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks4a://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks4a://{server_address}'}) as rh:
|
||||||
|
@ -313,7 +317,6 @@ def test_socks4a_ipv4_target(self, handler, ctx):
|
||||||
assert response['version'] == 4
|
assert response['version'] == 4
|
||||||
assert (response['ipv4_address'] == '127.0.0.1') != (response['domain_address'] == '127.0.0.1')
|
assert (response['ipv4_address'] == '127.0.0.1') != (response['domain_address'] == '127.0.0.1')
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks4a_domain_target(self, handler, ctx):
|
def test_socks4a_domain_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks4a://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks4a://{server_address}'}) as rh:
|
||||||
|
@ -322,7 +325,6 @@ def test_socks4a_domain_target(self, handler, ctx):
|
||||||
assert response['ipv4_address'] is None
|
assert response['ipv4_address'] is None
|
||||||
assert response['domain_address'] == 'localhost'
|
assert response['domain_address'] == 'localhost'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_ipv4_client_source_address(self, handler, ctx):
|
def test_ipv4_client_source_address(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
||||||
source_address = f'127.0.0.{random.randint(5, 255)}'
|
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||||
|
@ -333,7 +335,6 @@ def test_ipv4_client_source_address(self, handler, ctx):
|
||||||
assert response['client_address'][0] == source_address
|
assert response['client_address'][0] == source_address
|
||||||
assert response['version'] == 4
|
assert response['version'] == 4
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
@pytest.mark.parametrize('reply_code', [
|
@pytest.mark.parametrize('reply_code', [
|
||||||
Socks4CD.REQUEST_REJECTED_OR_FAILED,
|
Socks4CD.REQUEST_REJECTED_OR_FAILED,
|
||||||
Socks4CD.REQUEST_REJECTED_CANNOT_CONNECT_TO_IDENTD,
|
Socks4CD.REQUEST_REJECTED_CANNOT_CONNECT_TO_IDENTD,
|
||||||
|
@ -345,7 +346,6 @@ def test_socks4_errors(self, handler, ctx, reply_code):
|
||||||
with pytest.raises(ProxyError):
|
with pytest.raises(ProxyError):
|
||||||
ctx.socks_info_request(rh)
|
ctx.socks_info_request(rh)
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_ipv6_socks4_proxy(self, handler, ctx):
|
def test_ipv6_socks4_proxy(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks4ProxyHandler, bind_ip='::1') as server_address:
|
with ctx.socks_server(Socks4ProxyHandler, bind_ip='::1') as server_address:
|
||||||
with handler(proxies={'all': f'socks4://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks4://{server_address}'}) as rh:
|
||||||
|
@ -354,7 +354,6 @@ def test_ipv6_socks4_proxy(self, handler, ctx):
|
||||||
assert response['ipv4_address'] == '127.0.0.1'
|
assert response['ipv4_address'] == '127.0.0.1'
|
||||||
assert response['version'] == 4
|
assert response['version'] == 4
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_timeout(self, handler, ctx):
|
def test_timeout(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks4ProxyHandler, sleep=2) as server_address:
|
with ctx.socks_server(Socks4ProxyHandler, sleep=2) as server_address:
|
||||||
with handler(proxies={'all': f'socks4://{server_address}'}, timeout=0.5) as rh:
|
with handler(proxies={'all': f'socks4://{server_address}'}, timeout=0.5) as rh:
|
||||||
|
@ -362,9 +361,15 @@ def test_timeout(self, handler, ctx):
|
||||||
ctx.socks_info_request(rh)
|
ctx.socks_info_request(rh)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'handler,ctx', [
|
||||||
|
('Urllib', 'http'),
|
||||||
|
('Requests', 'http'),
|
||||||
|
('Websockets', 'ws'),
|
||||||
|
('CurlCFFI', 'http')
|
||||||
|
], indirect=True)
|
||||||
class TestSocks5Proxy:
|
class TestSocks5Proxy:
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks5_no_auth(self, handler, ctx):
|
def test_socks5_no_auth(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||||
|
@ -372,7 +377,6 @@ def test_socks5_no_auth(self, handler, ctx):
|
||||||
assert response['auth_methods'] == [0x0]
|
assert response['auth_methods'] == [0x0]
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks5_user_pass(self, handler, ctx):
|
def test_socks5_user_pass(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler, auth=('test', 'testpass')) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler, auth=('test', 'testpass')) as server_address:
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
|
@ -385,7 +389,6 @@ def test_socks5_user_pass(self, handler, ctx):
|
||||||
assert response['auth_methods'] == [Socks5Auth.AUTH_NONE, Socks5Auth.AUTH_USER_PASS]
|
assert response['auth_methods'] == [Socks5Auth.AUTH_NONE, Socks5Auth.AUTH_USER_PASS]
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks5_ipv4_target(self, handler, ctx):
|
def test_socks5_ipv4_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||||
|
@ -393,7 +396,6 @@ def test_socks5_ipv4_target(self, handler, ctx):
|
||||||
assert response['ipv4_address'] == '127.0.0.1'
|
assert response['ipv4_address'] == '127.0.0.1'
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks5_domain_target(self, handler, ctx):
|
def test_socks5_domain_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||||
|
@ -401,7 +403,6 @@ def test_socks5_domain_target(self, handler, ctx):
|
||||||
assert (response['ipv4_address'] == '127.0.0.1') != (response['ipv6_address'] == '::1')
|
assert (response['ipv4_address'] == '127.0.0.1') != (response['ipv6_address'] == '::1')
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks5h_domain_target(self, handler, ctx):
|
def test_socks5h_domain_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5h://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5h://{server_address}'}) as rh:
|
||||||
|
@ -410,7 +411,6 @@ def test_socks5h_domain_target(self, handler, ctx):
|
||||||
assert response['domain_address'] == 'localhost'
|
assert response['domain_address'] == 'localhost'
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks5h_ip_target(self, handler, ctx):
|
def test_socks5h_ip_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5h://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5h://{server_address}'}) as rh:
|
||||||
|
@ -419,7 +419,6 @@ def test_socks5h_ip_target(self, handler, ctx):
|
||||||
assert response['domain_address'] is None
|
assert response['domain_address'] is None
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks5_ipv6_destination(self, handler, ctx):
|
def test_socks5_ipv6_destination(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||||
|
@ -427,7 +426,6 @@ def test_socks5_ipv6_destination(self, handler, ctx):
|
||||||
assert response['ipv6_address'] == '::1'
|
assert response['ipv6_address'] == '::1'
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_ipv6_socks5_proxy(self, handler, ctx):
|
def test_ipv6_socks5_proxy(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler, bind_ip='::1') as server_address:
|
with ctx.socks_server(Socks5ProxyHandler, bind_ip='::1') as server_address:
|
||||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||||
|
@ -438,7 +436,6 @@ def test_ipv6_socks5_proxy(self, handler, ctx):
|
||||||
|
|
||||||
# XXX: is there any feasible way of testing IPv6 source addresses?
|
# XXX: is there any feasible way of testing IPv6 source addresses?
|
||||||
# Same would go for non-proxy source_address test...
|
# Same would go for non-proxy source_address test...
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_ipv4_client_source_address(self, handler, ctx):
|
def test_ipv4_client_source_address(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
source_address = f'127.0.0.{random.randint(5, 255)}'
|
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||||
|
@ -448,7 +445,6 @@ def test_ipv4_client_source_address(self, handler, ctx):
|
||||||
assert response['client_address'][0] == source_address
|
assert response['client_address'][0] == source_address
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
@pytest.mark.parametrize('reply_code', [
|
@pytest.mark.parametrize('reply_code', [
|
||||||
Socks5Reply.GENERAL_FAILURE,
|
Socks5Reply.GENERAL_FAILURE,
|
||||||
Socks5Reply.CONNECTION_NOT_ALLOWED,
|
Socks5Reply.CONNECTION_NOT_ALLOWED,
|
||||||
|
@ -465,7 +461,6 @@ def test_socks5_errors(self, handler, ctx, reply_code):
|
||||||
with pytest.raises(ProxyError):
|
with pytest.raises(ProxyError):
|
||||||
ctx.socks_info_request(rh)
|
ctx.socks_info_request(rh)
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_timeout(self, handler, ctx):
|
def test_timeout(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler, sleep=2) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler, sleep=2) as server_address:
|
||||||
with handler(proxies={'all': f'socks5://{server_address}'}, timeout=1) as rh:
|
with handler(proxies={'all': f'socks5://{server_address}'}, timeout=1) as rh:
|
||||||
|
|
|
@ -2391,7 +2391,7 @@ def test_traverse_obj(self):
|
||||||
self.assertEqual(traverse_obj(etree, '//year/text()'), ['2008', '2011', '2011'],
|
self.assertEqual(traverse_obj(etree, '//year/text()'), ['2008', '2011', '2011'],
|
||||||
msg='`text()` at end of path should give the inner text')
|
msg='`text()` at end of path should give the inner text')
|
||||||
self.assertEqual(traverse_obj(etree, '//*[@direction]/@direction'), ['E', 'W', 'N', 'W', 'E'],
|
self.assertEqual(traverse_obj(etree, '//*[@direction]/@direction'), ['E', 'W', 'N', 'W', 'E'],
|
||||||
msg='full python xpath features should be supported')
|
msg='full Python xpath features should be supported')
|
||||||
self.assertEqual(traverse_obj(etree, (0, '@name')), 'Liechtenstein',
|
self.assertEqual(traverse_obj(etree, (0, '@name')), 'Liechtenstein',
|
||||||
msg='special transformations should act on current element')
|
msg='special transformations should act on current element')
|
||||||
self.assertEqual(traverse_obj(etree, ('country', 0, ..., 'text()', {int_or_none})), [1, 2008, 141100],
|
self.assertEqual(traverse_obj(etree, ('country', 0, ..., 'text()', {int_or_none})), [1, 2008, 141100],
|
||||||
|
|
|
@ -32,8 +32,6 @@
|
||||||
)
|
)
|
||||||
from yt_dlp.utils.networking import HTTPHeaderDict
|
from yt_dlp.utils.networking import HTTPHeaderDict
|
||||||
|
|
||||||
from test.conftest import validate_and_send
|
|
||||||
|
|
||||||
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
|
||||||
|
|
||||||
|
@ -66,7 +64,9 @@ def process_request(self, request):
|
||||||
|
|
||||||
def create_websocket_server(**ws_kwargs):
|
def create_websocket_server(**ws_kwargs):
|
||||||
import websockets.sync.server
|
import websockets.sync.server
|
||||||
wsd = websockets.sync.server.serve(websocket_handler, '127.0.0.1', 0, process_request=process_request, **ws_kwargs)
|
wsd = websockets.sync.server.serve(
|
||||||
|
websocket_handler, '127.0.0.1', 0,
|
||||||
|
process_request=process_request, open_timeout=2, **ws_kwargs)
|
||||||
ws_port = wsd.socket.getsockname()[1]
|
ws_port = wsd.socket.getsockname()[1]
|
||||||
ws_server_thread = threading.Thread(target=wsd.serve_forever)
|
ws_server_thread = threading.Thread(target=wsd.serve_forever)
|
||||||
ws_server_thread.daemon = True
|
ws_server_thread.daemon = True
|
||||||
|
@ -100,6 +100,19 @@ def create_mtls_wss_websocket_server():
|
||||||
return create_websocket_server(ssl_context=sslctx)
|
return create_websocket_server(ssl_context=sslctx)
|
||||||
|
|
||||||
|
|
||||||
|
def ws_validate_and_send(rh, req):
|
||||||
|
rh.validate(req)
|
||||||
|
max_tries = 3
|
||||||
|
for i in range(max_tries):
|
||||||
|
try:
|
||||||
|
return rh.send(req)
|
||||||
|
except TransportError as e:
|
||||||
|
if i < (max_tries - 1) and 'connection closed during handshake' in str(e):
|
||||||
|
# websockets server sometimes hangs on new connections
|
||||||
|
continue
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(not websockets, reason='websockets must be installed to test websocket request handlers')
|
@pytest.mark.skipif(not websockets, reason='websockets must be installed to test websocket request handlers')
|
||||||
class TestWebsSocketRequestHandlerConformance:
|
class TestWebsSocketRequestHandlerConformance:
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -119,7 +132,7 @@ def setup_class(cls):
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||||
def test_basic_websockets(self, handler):
|
def test_basic_websockets(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url))
|
||||||
assert 'upgrade' in ws.headers
|
assert 'upgrade' in ws.headers
|
||||||
assert ws.status == 101
|
assert ws.status == 101
|
||||||
ws.send('foo')
|
ws.send('foo')
|
||||||
|
@ -131,7 +144,7 @@ def test_basic_websockets(self, handler):
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||||
def test_send_types(self, handler, msg, opcode):
|
def test_send_types(self, handler, msg, opcode):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url))
|
||||||
ws.send(msg)
|
ws.send(msg)
|
||||||
assert int(ws.recv()) == opcode
|
assert int(ws.recv()) == opcode
|
||||||
ws.close()
|
ws.close()
|
||||||
|
@ -140,10 +153,10 @@ def test_send_types(self, handler, msg, opcode):
|
||||||
def test_verify_cert(self, handler):
|
def test_verify_cert(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with pytest.raises(CertificateVerifyError):
|
with pytest.raises(CertificateVerifyError):
|
||||||
validate_and_send(rh, Request(self.wss_base_url))
|
ws_validate_and_send(rh, Request(self.wss_base_url))
|
||||||
|
|
||||||
with handler(verify=False) as rh:
|
with handler(verify=False) as rh:
|
||||||
ws = validate_and_send(rh, Request(self.wss_base_url))
|
ws = ws_validate_and_send(rh, Request(self.wss_base_url))
|
||||||
assert ws.status == 101
|
assert ws.status == 101
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
|
@ -151,7 +164,7 @@ def test_verify_cert(self, handler):
|
||||||
def test_ssl_error(self, handler):
|
def test_ssl_error(self, handler):
|
||||||
with handler(verify=False) as rh:
|
with handler(verify=False) as rh:
|
||||||
with pytest.raises(SSLError, match=r'ssl(?:v3|/tls) alert handshake failure') as exc_info:
|
with pytest.raises(SSLError, match=r'ssl(?:v3|/tls) alert handshake failure') as exc_info:
|
||||||
validate_and_send(rh, Request(self.bad_wss_host))
|
ws_validate_and_send(rh, Request(self.bad_wss_host))
|
||||||
assert not issubclass(exc_info.type, CertificateVerifyError)
|
assert not issubclass(exc_info.type, CertificateVerifyError)
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||||
|
@ -163,7 +176,7 @@ def test_ssl_error(self, handler):
|
||||||
])
|
])
|
||||||
def test_percent_encode(self, handler, path, expected):
|
def test_percent_encode(self, handler, path, expected):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
ws = validate_and_send(rh, Request(f'{self.ws_base_url}{path}'))
|
ws = ws_validate_and_send(rh, Request(f'{self.ws_base_url}{path}'))
|
||||||
ws.send('path')
|
ws.send('path')
|
||||||
assert ws.recv() == expected
|
assert ws.recv() == expected
|
||||||
assert ws.status == 101
|
assert ws.status == 101
|
||||||
|
@ -174,7 +187,7 @@ def test_remove_dot_segments(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
# This isn't a comprehensive test,
|
# This isn't a comprehensive test,
|
||||||
# but it should be enough to check whether the handler is removing dot segments
|
# but it should be enough to check whether the handler is removing dot segments
|
||||||
ws = validate_and_send(rh, Request(f'{self.ws_base_url}/a/b/./../../test'))
|
ws = ws_validate_and_send(rh, Request(f'{self.ws_base_url}/a/b/./../../test'))
|
||||||
assert ws.status == 101
|
assert ws.status == 101
|
||||||
ws.send('path')
|
ws.send('path')
|
||||||
assert ws.recv() == '/test'
|
assert ws.recv() == '/test'
|
||||||
|
@ -187,18 +200,18 @@ def test_remove_dot_segments(self, handler):
|
||||||
def test_raise_http_error(self, handler, status):
|
def test_raise_http_error(self, handler, status):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with pytest.raises(HTTPError) as exc_info:
|
with pytest.raises(HTTPError) as exc_info:
|
||||||
validate_and_send(rh, Request(f'{self.ws_base_url}/gen_{status}'))
|
ws_validate_and_send(rh, Request(f'{self.ws_base_url}/gen_{status}'))
|
||||||
assert exc_info.value.status == status
|
assert exc_info.value.status == status
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||||
@pytest.mark.parametrize('params,extensions', [
|
@pytest.mark.parametrize('params,extensions', [
|
||||||
({'timeout': 0.00001}, {}),
|
({'timeout': sys.float_info.min}, {}),
|
||||||
({}, {'timeout': 0.00001}),
|
({}, {'timeout': sys.float_info.min}),
|
||||||
])
|
])
|
||||||
def test_timeout(self, handler, params, extensions):
|
def test_timeout(self, handler, params, extensions):
|
||||||
with handler(**params) as rh:
|
with handler(**params) as rh:
|
||||||
with pytest.raises(TransportError):
|
with pytest.raises(TransportError):
|
||||||
validate_and_send(rh, Request(self.ws_base_url, extensions=extensions))
|
ws_validate_and_send(rh, Request(self.ws_base_url, extensions=extensions))
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||||
def test_cookies(self, handler):
|
def test_cookies(self, handler):
|
||||||
|
@ -210,18 +223,18 @@ def test_cookies(self, handler):
|
||||||
comment_url=None, rest={}))
|
comment_url=None, rest={}))
|
||||||
|
|
||||||
with handler(cookiejar=cookiejar) as rh:
|
with handler(cookiejar=cookiejar) as rh:
|
||||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url))
|
||||||
ws.send('headers')
|
ws.send('headers')
|
||||||
assert json.loads(ws.recv())['cookie'] == 'test=ytdlp'
|
assert json.loads(ws.recv())['cookie'] == 'test=ytdlp'
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url))
|
||||||
ws.send('headers')
|
ws.send('headers')
|
||||||
assert 'cookie' not in json.loads(ws.recv())
|
assert 'cookie' not in json.loads(ws.recv())
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
ws = validate_and_send(rh, Request(self.ws_base_url, extensions={'cookiejar': cookiejar}))
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url, extensions={'cookiejar': cookiejar}))
|
||||||
ws.send('headers')
|
ws.send('headers')
|
||||||
assert json.loads(ws.recv())['cookie'] == 'test=ytdlp'
|
assert json.loads(ws.recv())['cookie'] == 'test=ytdlp'
|
||||||
ws.close()
|
ws.close()
|
||||||
|
@ -231,7 +244,7 @@ def test_source_address(self, handler):
|
||||||
source_address = f'127.0.0.{random.randint(5, 255)}'
|
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||||
verify_address_availability(source_address)
|
verify_address_availability(source_address)
|
||||||
with handler(source_address=source_address) as rh:
|
with handler(source_address=source_address) as rh:
|
||||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url))
|
||||||
ws.send('source_address')
|
ws.send('source_address')
|
||||||
assert source_address == ws.recv()
|
assert source_address == ws.recv()
|
||||||
ws.close()
|
ws.close()
|
||||||
|
@ -240,7 +253,7 @@ def test_source_address(self, handler):
|
||||||
def test_response_url(self, handler):
|
def test_response_url(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
url = f'{self.ws_base_url}/something'
|
url = f'{self.ws_base_url}/something'
|
||||||
ws = validate_and_send(rh, Request(url))
|
ws = ws_validate_and_send(rh, Request(url))
|
||||||
assert ws.url == url
|
assert ws.url == url
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
|
@ -248,14 +261,14 @@ def test_response_url(self, handler):
|
||||||
def test_request_headers(self, handler):
|
def test_request_headers(self, handler):
|
||||||
with handler(headers=HTTPHeaderDict({'test1': 'test', 'test2': 'test2'})) as rh:
|
with handler(headers=HTTPHeaderDict({'test1': 'test', 'test2': 'test2'})) as rh:
|
||||||
# Global Headers
|
# Global Headers
|
||||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url))
|
||||||
ws.send('headers')
|
ws.send('headers')
|
||||||
headers = HTTPHeaderDict(json.loads(ws.recv()))
|
headers = HTTPHeaderDict(json.loads(ws.recv()))
|
||||||
assert headers['test1'] == 'test'
|
assert headers['test1'] == 'test'
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
# Per request headers, merged with global
|
# Per request headers, merged with global
|
||||||
ws = validate_and_send(rh, Request(
|
ws = ws_validate_and_send(rh, Request(
|
||||||
self.ws_base_url, headers={'test2': 'changed', 'test3': 'test3'}))
|
self.ws_base_url, headers={'test2': 'changed', 'test3': 'test3'}))
|
||||||
ws.send('headers')
|
ws.send('headers')
|
||||||
headers = HTTPHeaderDict(json.loads(ws.recv()))
|
headers = HTTPHeaderDict(json.loads(ws.recv()))
|
||||||
|
@ -288,7 +301,7 @@ def test_mtls(self, handler, client_cert):
|
||||||
verify=False,
|
verify=False,
|
||||||
client_cert=client_cert
|
client_cert=client_cert
|
||||||
) as rh:
|
) as rh:
|
||||||
validate_and_send(rh, Request(self.mtls_wss_base_url)).close()
|
ws_validate_and_send(rh, Request(self.mtls_wss_base_url)).close()
|
||||||
|
|
||||||
|
|
||||||
def create_fake_ws_connection(raised):
|
def create_fake_ws_connection(raised):
|
||||||
|
|
|
@ -47,6 +47,7 @@
|
||||||
SSLError,
|
SSLError,
|
||||||
network_exceptions,
|
network_exceptions,
|
||||||
)
|
)
|
||||||
|
from .networking.impersonate import ImpersonateRequestHandler
|
||||||
from .plugins import directories as plugin_directories
|
from .plugins import directories as plugin_directories
|
||||||
from .postprocessor import _PLUGIN_CLASSES as plugin_pps
|
from .postprocessor import _PLUGIN_CLASSES as plugin_pps
|
||||||
from .postprocessor import (
|
from .postprocessor import (
|
||||||
|
@ -104,6 +105,7 @@
|
||||||
SameFileError,
|
SameFileError,
|
||||||
UnavailableVideoError,
|
UnavailableVideoError,
|
||||||
UserNotLive,
|
UserNotLive,
|
||||||
|
YoutubeDLError,
|
||||||
age_restricted,
|
age_restricted,
|
||||||
args_to_str,
|
args_to_str,
|
||||||
bug_reports_message,
|
bug_reports_message,
|
||||||
|
@ -407,6 +409,8 @@ class YoutubeDL:
|
||||||
- "detect_or_warn": check whether we can do anything
|
- "detect_or_warn": check whether we can do anything
|
||||||
about it, warn otherwise (default)
|
about it, warn otherwise (default)
|
||||||
source_address: Client-side IP address to bind to.
|
source_address: Client-side IP address to bind to.
|
||||||
|
impersonate: Client to impersonate for requests.
|
||||||
|
An ImpersonateTarget (from yt_dlp.networking.impersonate)
|
||||||
sleep_interval_requests: Number of seconds to sleep between requests
|
sleep_interval_requests: Number of seconds to sleep between requests
|
||||||
during extraction
|
during extraction
|
||||||
sleep_interval: Number of seconds to sleep before each download when
|
sleep_interval: Number of seconds to sleep before each download when
|
||||||
|
@ -580,7 +584,7 @@ class YoutubeDL:
|
||||||
'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
|
'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
|
||||||
'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
|
'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
|
||||||
'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
|
'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
|
||||||
'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
|
'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start', 'is_dash_periods', 'request_data',
|
||||||
'preference', 'language', 'language_preference', 'quality', 'source_preference', 'cookies',
|
'preference', 'language', 'language_preference', 'quality', 'source_preference', 'cookies',
|
||||||
'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
|
'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
|
||||||
'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
|
'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
|
||||||
|
@ -718,6 +722,13 @@ def check_deprecated(param, option, suggestion):
|
||||||
for msg in self.params.get('_deprecation_warnings', []):
|
for msg in self.params.get('_deprecation_warnings', []):
|
||||||
self.deprecated_feature(msg)
|
self.deprecated_feature(msg)
|
||||||
|
|
||||||
|
if impersonate_target := self.params.get('impersonate'):
|
||||||
|
if not self._impersonate_target_available(impersonate_target):
|
||||||
|
raise YoutubeDLError(
|
||||||
|
f'Impersonate target "{impersonate_target}" is not available. '
|
||||||
|
f'Use --list-impersonate-targets to see available targets. '
|
||||||
|
f'You may be missing dependencies required to support this target.')
|
||||||
|
|
||||||
if 'list-formats' in self.params['compat_opts']:
|
if 'list-formats' in self.params['compat_opts']:
|
||||||
self.params['listformats_table'] = False
|
self.params['listformats_table'] = False
|
||||||
|
|
||||||
|
@ -967,6 +978,7 @@ def __exit__(self, *args):
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
self.save_cookies()
|
self.save_cookies()
|
||||||
|
if '_request_director' in self.__dict__:
|
||||||
self._request_director.close()
|
self._request_director.close()
|
||||||
del self._request_director
|
del self._request_director
|
||||||
|
|
||||||
|
@ -2231,7 +2243,7 @@ def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, ins
|
||||||
selectors = []
|
selectors = []
|
||||||
current_selector = None
|
current_selector = None
|
||||||
for type, string_, start, _, _ in tokens:
|
for type, string_, start, _, _ in tokens:
|
||||||
# ENCODING is only defined in python 3.x
|
# ENCODING is only defined in Python 3.x
|
||||||
if type == getattr(tokenize, 'ENCODING', None):
|
if type == getattr(tokenize, 'ENCODING', None):
|
||||||
continue
|
continue
|
||||||
elif type in [tokenize.NAME, tokenize.NUMBER]:
|
elif type in [tokenize.NAME, tokenize.NUMBER]:
|
||||||
|
@ -2654,6 +2666,7 @@ def _fill_common_fields(self, info_dict, final=True):
|
||||||
|
|
||||||
for old_key, new_key in self._deprecated_multivalue_fields.items():
|
for old_key, new_key in self._deprecated_multivalue_fields.items():
|
||||||
if new_key in info_dict and old_key in info_dict:
|
if new_key in info_dict and old_key in info_dict:
|
||||||
|
if '_version' not in info_dict: # HACK: Do not warn when using --load-info-json
|
||||||
self.deprecation_warning(f'Do not return {old_key!r} when {new_key!r} is present')
|
self.deprecation_warning(f'Do not return {old_key!r} when {new_key!r} is present')
|
||||||
elif old_value := info_dict.get(old_key):
|
elif old_value := info_dict.get(old_key):
|
||||||
info_dict[new_key] = old_value.split(', ')
|
info_dict[new_key] = old_value.split(', ')
|
||||||
|
@ -3581,6 +3594,8 @@ def download_with_info_file(self, info_filename):
|
||||||
raise
|
raise
|
||||||
self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
|
self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
|
||||||
self.download([webpage_url])
|
self.download([webpage_url])
|
||||||
|
except ExtractorError as e:
|
||||||
|
self.report_error(e)
|
||||||
return self._download_retcode
|
return self._download_retcode
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -4078,6 +4093,22 @@ def _opener(self):
|
||||||
handler = self._request_director.handlers['Urllib']
|
handler = self._request_director.handlers['Urllib']
|
||||||
return handler._get_instance(cookiejar=self.cookiejar, proxies=self.proxies)
|
return handler._get_instance(cookiejar=self.cookiejar, proxies=self.proxies)
|
||||||
|
|
||||||
|
def _get_available_impersonate_targets(self):
|
||||||
|
# todo(future): make available as public API
|
||||||
|
return [
|
||||||
|
(target, rh.RH_NAME)
|
||||||
|
for rh in self._request_director.handlers.values()
|
||||||
|
if isinstance(rh, ImpersonateRequestHandler)
|
||||||
|
for target in rh.supported_targets
|
||||||
|
]
|
||||||
|
|
||||||
|
def _impersonate_target_available(self, target):
|
||||||
|
# todo(future): make available as public API
|
||||||
|
return any(
|
||||||
|
rh.is_supported_target(target)
|
||||||
|
for rh in self._request_director.handlers.values()
|
||||||
|
if isinstance(rh, ImpersonateRequestHandler))
|
||||||
|
|
||||||
def urlopen(self, req):
|
def urlopen(self, req):
|
||||||
""" Start an HTTP download """
|
""" Start an HTTP download """
|
||||||
if isinstance(req, str):
|
if isinstance(req, str):
|
||||||
|
@ -4109,9 +4140,13 @@ def urlopen(self, req):
|
||||||
raise RequestError(
|
raise RequestError(
|
||||||
'file:// URLs are disabled by default in yt-dlp for security reasons. '
|
'file:// URLs are disabled by default in yt-dlp for security reasons. '
|
||||||
'Use --enable-file-urls to enable at your own risk.', cause=ue) from ue
|
'Use --enable-file-urls to enable at your own risk.', cause=ue) from ue
|
||||||
if 'unsupported proxy type: "https"' in ue.msg.lower():
|
if (
|
||||||
|
'unsupported proxy type: "https"' in ue.msg.lower()
|
||||||
|
and 'requests' not in self._request_director.handlers
|
||||||
|
and 'curl_cffi' not in self._request_director.handlers
|
||||||
|
):
|
||||||
raise RequestError(
|
raise RequestError(
|
||||||
'To use an HTTPS proxy for this request, one of the following dependencies needs to be installed: requests')
|
'To use an HTTPS proxy for this request, one of the following dependencies needs to be installed: requests, curl_cffi')
|
||||||
|
|
||||||
elif (
|
elif (
|
||||||
re.match(r'unsupported url scheme: "wss?"', ue.msg.lower())
|
re.match(r'unsupported url scheme: "wss?"', ue.msg.lower())
|
||||||
|
@ -4121,6 +4156,13 @@ def urlopen(self, req):
|
||||||
'This request requires WebSocket support. '
|
'This request requires WebSocket support. '
|
||||||
'Ensure one of the following dependencies are installed: websockets',
|
'Ensure one of the following dependencies are installed: websockets',
|
||||||
cause=ue) from ue
|
cause=ue) from ue
|
||||||
|
|
||||||
|
elif re.match(r'unsupported (?:extensions: impersonate|impersonate target)', ue.msg.lower()):
|
||||||
|
raise RequestError(
|
||||||
|
f'Impersonate target "{req.extensions["impersonate"]}" is not available.'
|
||||||
|
f' See --list-impersonate-targets for available targets.'
|
||||||
|
f' This request requires browser impersonation, however you may be missing dependencies'
|
||||||
|
f' required to support this target.')
|
||||||
raise
|
raise
|
||||||
except SSLError as e:
|
except SSLError as e:
|
||||||
if 'UNSAFE_LEGACY_RENEGOTIATION_DISABLED' in str(e):
|
if 'UNSAFE_LEGACY_RENEGOTIATION_DISABLED' in str(e):
|
||||||
|
@ -4153,6 +4195,7 @@ def build_request_director(self, handlers, preferences=None):
|
||||||
'timeout': 'socket_timeout',
|
'timeout': 'socket_timeout',
|
||||||
'legacy_ssl_support': 'legacyserverconnect',
|
'legacy_ssl_support': 'legacyserverconnect',
|
||||||
'enable_file_urls': 'enable_file_urls',
|
'enable_file_urls': 'enable_file_urls',
|
||||||
|
'impersonate': 'impersonate',
|
||||||
'client_cert': {
|
'client_cert': {
|
||||||
'client_certificate': 'client_certificate',
|
'client_certificate': 'client_certificate',
|
||||||
'client_certificate_key': 'client_certificate_key',
|
'client_certificate_key': 'client_certificate_key',
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
raise ImportError(
|
raise ImportError(
|
||||||
f'You are using an unsupported version of Python. Only Python versions 3.8 and above are supported by yt-dlp') # noqa: F541
|
f'You are using an unsupported version of Python. Only Python versions 3.8 and above are supported by yt-dlp') # noqa: F541
|
||||||
|
|
||||||
__license__ = 'Public Domain'
|
__license__ = 'The Unlicense'
|
||||||
|
|
||||||
import collections
|
import collections
|
||||||
import getpass
|
import getpass
|
||||||
|
@ -20,6 +20,7 @@
|
||||||
from .downloader.external import get_external_downloader
|
from .downloader.external import get_external_downloader
|
||||||
from .extractor import list_extractor_classes
|
from .extractor import list_extractor_classes
|
||||||
from .extractor.adobepass import MSO_INFO
|
from .extractor.adobepass import MSO_INFO
|
||||||
|
from .networking.impersonate import ImpersonateTarget
|
||||||
from .options import parseOpts
|
from .options import parseOpts
|
||||||
from .postprocessor import (
|
from .postprocessor import (
|
||||||
FFmpegExtractAudioPP,
|
FFmpegExtractAudioPP,
|
||||||
|
@ -49,6 +50,7 @@
|
||||||
float_or_none,
|
float_or_none,
|
||||||
format_field,
|
format_field,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
join_nonempty,
|
||||||
match_filter_func,
|
match_filter_func,
|
||||||
parse_bytes,
|
parse_bytes,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
|
@ -395,6 +397,9 @@ def parse_chapters(name, value, advanced=False):
|
||||||
f'Supported keyrings are: {", ".join(sorted(SUPPORTED_KEYRINGS))}')
|
f'Supported keyrings are: {", ".join(sorted(SUPPORTED_KEYRINGS))}')
|
||||||
opts.cookiesfrombrowser = (browser_name, profile, keyring, container)
|
opts.cookiesfrombrowser = (browser_name, profile, keyring, container)
|
||||||
|
|
||||||
|
if opts.impersonate is not None:
|
||||||
|
opts.impersonate = ImpersonateTarget.from_str(opts.impersonate.lower())
|
||||||
|
|
||||||
# MetadataParser
|
# MetadataParser
|
||||||
def metadataparser_actions(f):
|
def metadataparser_actions(f):
|
||||||
if isinstance(f, str):
|
if isinstance(f, str):
|
||||||
|
@ -918,6 +923,7 @@ def parse_options(argv=None):
|
||||||
'postprocessors': postprocessors,
|
'postprocessors': postprocessors,
|
||||||
'fixup': opts.fixup,
|
'fixup': opts.fixup,
|
||||||
'source_address': opts.source_address,
|
'source_address': opts.source_address,
|
||||||
|
'impersonate': opts.impersonate,
|
||||||
'call_home': opts.call_home,
|
'call_home': opts.call_home,
|
||||||
'sleep_interval_requests': opts.sleep_interval_requests,
|
'sleep_interval_requests': opts.sleep_interval_requests,
|
||||||
'sleep_interval': opts.sleep_interval,
|
'sleep_interval': opts.sleep_interval,
|
||||||
|
@ -987,6 +993,41 @@ def _real_main(argv=None):
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
ydl._download_retcode = 100
|
ydl._download_retcode = 100
|
||||||
|
|
||||||
|
if opts.list_impersonate_targets:
|
||||||
|
|
||||||
|
known_targets = [
|
||||||
|
# List of simplified targets we know are supported,
|
||||||
|
# to help users know what dependencies may be required.
|
||||||
|
(ImpersonateTarget('chrome'), 'curl_cffi'),
|
||||||
|
(ImpersonateTarget('edge'), 'curl_cffi'),
|
||||||
|
(ImpersonateTarget('safari'), 'curl_cffi'),
|
||||||
|
]
|
||||||
|
|
||||||
|
available_targets = ydl._get_available_impersonate_targets()
|
||||||
|
|
||||||
|
def make_row(target, handler):
|
||||||
|
return [
|
||||||
|
join_nonempty(target.client.title(), target.version, delim='-') or '-',
|
||||||
|
join_nonempty((target.os or "").title(), target.os_version, delim='-') or '-',
|
||||||
|
handler,
|
||||||
|
]
|
||||||
|
|
||||||
|
rows = [make_row(target, handler) for target, handler in available_targets]
|
||||||
|
|
||||||
|
for known_target, known_handler in known_targets:
|
||||||
|
if not any(
|
||||||
|
known_target in target and handler == known_handler
|
||||||
|
for target, handler in available_targets
|
||||||
|
):
|
||||||
|
rows.append([
|
||||||
|
ydl._format_out(text, ydl.Styles.SUPPRESS)
|
||||||
|
for text in make_row(known_target, f'{known_handler} (not available)')
|
||||||
|
])
|
||||||
|
|
||||||
|
ydl.to_screen('[info] Available impersonate targets')
|
||||||
|
ydl.to_stdout(render_table(['Client', 'OS', 'Source'], rows, extra_gap=2, delim='-'))
|
||||||
|
return
|
||||||
|
|
||||||
if not actual_use:
|
if not actual_use:
|
||||||
if pre_process:
|
if pre_process:
|
||||||
return ydl._download_retcode
|
return ydl._download_retcode
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
# Execute with
|
# Execute with
|
||||||
# $ python -m yt_dlp
|
# $ python3 -m yt_dlp
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from PyInstaller.utils.hooks import collect_submodules
|
from PyInstaller.utils.hooks import collect_submodules, collect_data_files
|
||||||
|
|
||||||
|
|
||||||
def pycryptodome_module():
|
def pycryptodome_module():
|
||||||
|
@ -10,7 +10,7 @@ def pycryptodome_module():
|
||||||
try:
|
try:
|
||||||
import Crypto # noqa: F401
|
import Crypto # noqa: F401
|
||||||
print('WARNING: Using Crypto since Cryptodome is not available. '
|
print('WARNING: Using Crypto since Cryptodome is not available. '
|
||||||
'Install with: pip install pycryptodomex', file=sys.stderr)
|
'Install with: python3 -m pip install pycryptodomex', file=sys.stderr)
|
||||||
return 'Crypto'
|
return 'Crypto'
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
@ -25,10 +25,12 @@ def get_hidden_imports():
|
||||||
for module in ('websockets', 'requests', 'urllib3'):
|
for module in ('websockets', 'requests', 'urllib3'):
|
||||||
yield from collect_submodules(module)
|
yield from collect_submodules(module)
|
||||||
# These are auto-detected, but explicitly add them just in case
|
# These are auto-detected, but explicitly add them just in case
|
||||||
yield from ('mutagen', 'brotli', 'certifi', 'secretstorage')
|
yield from ('mutagen', 'brotli', 'certifi', 'secretstorage', 'curl_cffi')
|
||||||
|
|
||||||
|
|
||||||
hiddenimports = list(get_hidden_imports())
|
hiddenimports = list(get_hidden_imports())
|
||||||
print(f'Adding imports: {hiddenimports}')
|
print(f'Adding imports: {hiddenimports}')
|
||||||
|
|
||||||
excludedimports = ['youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins', 'devscripts', 'bundle']
|
excludedimports = ['youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins', 'devscripts', 'bundle']
|
||||||
|
|
||||||
|
datas = collect_data_files('curl_cffi', includes=['cacert.pem'])
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
import warnings
|
|
||||||
|
|
||||||
warnings.warn(DeprecationWarning(f'{__name__} is deprecated'))
|
|
||||||
|
|
||||||
casefold = str.casefold
|
|
|
@ -10,10 +10,10 @@
|
||||||
from .. import compat_os_name
|
from .. import compat_os_name
|
||||||
|
|
||||||
if compat_os_name == 'nt':
|
if compat_os_name == 'nt':
|
||||||
# On older python versions, proxies are extracted from Windows registry erroneously. [1]
|
# On older Python versions, proxies are extracted from Windows registry erroneously. [1]
|
||||||
# If the https proxy in the registry does not have a scheme, urllib will incorrectly add https:// to it. [2]
|
# If the https proxy in the registry does not have a scheme, urllib will incorrectly add https:// to it. [2]
|
||||||
# It is unlikely that the user has actually set it to be https, so we should be fine to safely downgrade
|
# It is unlikely that the user has actually set it to be https, so we should be fine to safely downgrade
|
||||||
# it to http on these older python versions to avoid issues
|
# it to http on these older Python versions to avoid issues
|
||||||
# This also applies for ftp proxy type, as ftp:// proxy scheme is not supported.
|
# This also applies for ftp proxy type, as ftp:// proxy scheme is not supported.
|
||||||
# 1: https://github.com/python/cpython/issues/86793
|
# 1: https://github.com/python/cpython/issues/86793
|
||||||
# 2: https://github.com/python/cpython/blob/51f1ae5ceb0673316c4e4b0175384e892e33cc6e/Lib/urllib/request.py#L2683-L2698
|
# 2: https://github.com/python/cpython/blob/51f1ae5ceb0673316c4e4b0175384e892e33cc6e/Lib/urllib/request.py#L2683-L2698
|
||||||
|
|
|
@ -121,7 +121,7 @@ def _extract_firefox_cookies(profile, container, logger):
|
||||||
logger.info('Extracting cookies from firefox')
|
logger.info('Extracting cookies from firefox')
|
||||||
if not sqlite3:
|
if not sqlite3:
|
||||||
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
|
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
|
||||||
'Please use a python interpreter compiled with sqlite3 support')
|
'Please use a Python interpreter compiled with sqlite3 support')
|
||||||
return YoutubeDLCookieJar()
|
return YoutubeDLCookieJar()
|
||||||
|
|
||||||
if profile is None:
|
if profile is None:
|
||||||
|
@ -264,7 +264,7 @@ def _extract_chrome_cookies(browser_name, profile, keyring, logger):
|
||||||
|
|
||||||
if not sqlite3:
|
if not sqlite3:
|
||||||
logger.warning(f'Cannot extract cookies from {browser_name} without sqlite3 support. '
|
logger.warning(f'Cannot extract cookies from {browser_name} without sqlite3 support. '
|
||||||
'Please use a python interpreter compiled with sqlite3 support')
|
'Please use a Python interpreter compiled with sqlite3 support')
|
||||||
return YoutubeDLCookieJar()
|
return YoutubeDLCookieJar()
|
||||||
|
|
||||||
config = _get_chromium_based_browser_settings(browser_name)
|
config = _get_chromium_based_browser_settings(browser_name)
|
||||||
|
|
|
@ -46,16 +46,14 @@
|
||||||
# We need to get the underlying `sqlite` version, see https://github.com/yt-dlp/yt-dlp/issues/8152
|
# We need to get the underlying `sqlite` version, see https://github.com/yt-dlp/yt-dlp/issues/8152
|
||||||
sqlite3._yt_dlp__version = sqlite3.sqlite_version
|
sqlite3._yt_dlp__version = sqlite3.sqlite_version
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# although sqlite3 is part of the standard library, it is possible to compile python without
|
# although sqlite3 is part of the standard library, it is possible to compile Python without
|
||||||
# sqlite support. See: https://github.com/yt-dlp/yt-dlp/issues/544
|
# sqlite support. See: https://github.com/yt-dlp/yt-dlp/issues/544
|
||||||
sqlite3 = None
|
sqlite3 = None
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import websockets
|
import websockets
|
||||||
except (ImportError, SyntaxError):
|
except ImportError:
|
||||||
# websockets 3.10 on python 3.6 causes SyntaxError
|
|
||||||
# See https://github.com/yt-dlp/yt-dlp/issues/2633
|
|
||||||
websockets = None
|
websockets = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -76,6 +74,10 @@
|
||||||
if hasattr(xattr, 'set'): # pyxattr
|
if hasattr(xattr, 'set'): # pyxattr
|
||||||
xattr._yt_dlp__identifier = 'pyxattr'
|
xattr._yt_dlp__identifier = 'pyxattr'
|
||||||
|
|
||||||
|
try:
|
||||||
|
import curl_cffi
|
||||||
|
except ImportError:
|
||||||
|
curl_cffi = None
|
||||||
|
|
||||||
from . import Cryptodome
|
from . import Cryptodome
|
||||||
|
|
||||||
|
|
|
@ -237,8 +237,13 @@ def download():
|
||||||
|
|
||||||
def retry(e):
|
def retry(e):
|
||||||
close_stream()
|
close_stream()
|
||||||
ctx.resume_len = (byte_counter if ctx.tmpfilename == '-'
|
if ctx.tmpfilename == '-':
|
||||||
else os.path.getsize(encodeFilename(ctx.tmpfilename)))
|
ctx.resume_len = byte_counter
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
ctx.resume_len = os.path.getsize(encodeFilename(ctx.tmpfilename))
|
||||||
|
except FileNotFoundError:
|
||||||
|
ctx.resume_len = 0
|
||||||
raise RetryDownload(e)
|
raise RetryDownload(e)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
|
|
@ -320,7 +320,6 @@
|
||||||
CBSIE,
|
CBSIE,
|
||||||
ParamountPressExpressIE,
|
ParamountPressExpressIE,
|
||||||
)
|
)
|
||||||
from .cbsinteractive import CBSInteractiveIE
|
|
||||||
from .cbsnews import (
|
from .cbsnews import (
|
||||||
CBSNewsEmbedIE,
|
CBSNewsEmbedIE,
|
||||||
CBSNewsIE,
|
CBSNewsIE,
|
||||||
|
@ -348,10 +347,6 @@
|
||||||
from .charlierose import CharlieRoseIE
|
from .charlierose import CharlieRoseIE
|
||||||
from .chaturbate import ChaturbateIE
|
from .chaturbate import ChaturbateIE
|
||||||
from .chilloutzone import ChilloutzoneIE
|
from .chilloutzone import ChilloutzoneIE
|
||||||
from .chingari import (
|
|
||||||
ChingariIE,
|
|
||||||
ChingariUserIE,
|
|
||||||
)
|
|
||||||
from .chzzk import (
|
from .chzzk import (
|
||||||
CHZZKLiveIE,
|
CHZZKLiveIE,
|
||||||
CHZZKVideoIE,
|
CHZZKVideoIE,
|
||||||
|
@ -369,7 +364,6 @@
|
||||||
from .ciscowebex import CiscoWebexIE
|
from .ciscowebex import CiscoWebexIE
|
||||||
from .cjsw import CJSWIE
|
from .cjsw import CJSWIE
|
||||||
from .clipchamp import ClipchampIE
|
from .clipchamp import ClipchampIE
|
||||||
from .cliphunter import CliphunterIE
|
|
||||||
from .clippit import ClippitIE
|
from .clippit import ClippitIE
|
||||||
from .cliprs import ClipRsIE
|
from .cliprs import ClipRsIE
|
||||||
from .closertotruth import CloserToTruthIE
|
from .closertotruth import CloserToTruthIE
|
||||||
|
@ -444,6 +438,7 @@
|
||||||
from .dailymotion import (
|
from .dailymotion import (
|
||||||
DailymotionIE,
|
DailymotionIE,
|
||||||
DailymotionPlaylistIE,
|
DailymotionPlaylistIE,
|
||||||
|
DailymotionSearchIE,
|
||||||
DailymotionUserIE,
|
DailymotionUserIE,
|
||||||
)
|
)
|
||||||
from .dailywire import (
|
from .dailywire import (
|
||||||
|
@ -475,7 +470,6 @@
|
||||||
)
|
)
|
||||||
from .dfb import DFBIE
|
from .dfb import DFBIE
|
||||||
from .dhm import DHMIE
|
from .dhm import DHMIE
|
||||||
from .digg import DiggIE
|
|
||||||
from .douyutv import (
|
from .douyutv import (
|
||||||
DouyuShowIE,
|
DouyuShowIE,
|
||||||
DouyuTVIE,
|
DouyuTVIE,
|
||||||
|
@ -609,7 +603,6 @@
|
||||||
)
|
)
|
||||||
from .fczenit import FczenitIE
|
from .fczenit import FczenitIE
|
||||||
from .fifa import FifaIE
|
from .fifa import FifaIE
|
||||||
from .filmmodu import FilmmoduIE
|
|
||||||
from .filmon import (
|
from .filmon import (
|
||||||
FilmOnIE,
|
FilmOnIE,
|
||||||
FilmOnChannelIE,
|
FilmOnChannelIE,
|
||||||
|
@ -675,7 +668,6 @@
|
||||||
GabIE,
|
GabIE,
|
||||||
)
|
)
|
||||||
from .gaia import GaiaIE
|
from .gaia import GaiaIE
|
||||||
from .gameinformer import GameInformerIE
|
|
||||||
from .gamejolt import (
|
from .gamejolt import (
|
||||||
GameJoltIE,
|
GameJoltIE,
|
||||||
GameJoltUserIE,
|
GameJoltUserIE,
|
||||||
|
@ -704,7 +696,6 @@
|
||||||
GettrStreamingIE,
|
GettrStreamingIE,
|
||||||
)
|
)
|
||||||
from .giantbomb import GiantBombIE
|
from .giantbomb import GiantBombIE
|
||||||
from .giga import GigaIE
|
|
||||||
from .glide import GlideIE
|
from .glide import GlideIE
|
||||||
from .globalplayer import (
|
from .globalplayer import (
|
||||||
GlobalPlayerLiveIE,
|
GlobalPlayerLiveIE,
|
||||||
|
@ -895,10 +886,8 @@
|
||||||
from .jwplatform import JWPlatformIE
|
from .jwplatform import JWPlatformIE
|
||||||
from .kakao import KakaoIE
|
from .kakao import KakaoIE
|
||||||
from .kaltura import KalturaIE
|
from .kaltura import KalturaIE
|
||||||
from .kanal2 import Kanal2IE
|
|
||||||
from .kankanews import KankaNewsIE
|
from .kankanews import KankaNewsIE
|
||||||
from .karaoketv import KaraoketvIE
|
from .karaoketv import KaraoketvIE
|
||||||
from .karrierevideos import KarriereVideosIE
|
|
||||||
from .kelbyone import KelbyOneIE
|
from .kelbyone import KelbyOneIE
|
||||||
from .khanacademy import (
|
from .khanacademy import (
|
||||||
KhanAcademyIE,
|
KhanAcademyIE,
|
||||||
|
@ -914,13 +903,11 @@
|
||||||
from .kinopoisk import KinoPoiskIE
|
from .kinopoisk import KinoPoiskIE
|
||||||
from .kommunetv import KommunetvIE
|
from .kommunetv import KommunetvIE
|
||||||
from .kompas import KompasVideoIE
|
from .kompas import KompasVideoIE
|
||||||
from .konserthusetplay import KonserthusetPlayIE
|
|
||||||
from .koo import KooIE
|
from .koo import KooIE
|
||||||
from .kth import KTHIE
|
from .kth import KTHIE
|
||||||
from .krasview import KrasViewIE
|
from .krasview import KrasViewIE
|
||||||
from .ku6 import Ku6IE
|
from .ku6 import Ku6IE
|
||||||
from .kukululive import KukuluLiveIE
|
from .kukululive import KukuluLiveIE
|
||||||
from .kusi import KUSIIE
|
|
||||||
from .kuwo import (
|
from .kuwo import (
|
||||||
KuwoIE,
|
KuwoIE,
|
||||||
KuwoAlbumIE,
|
KuwoAlbumIE,
|
||||||
|
@ -1002,7 +989,6 @@
|
||||||
LnkGoIE,
|
LnkGoIE,
|
||||||
LnkIE,
|
LnkIE,
|
||||||
)
|
)
|
||||||
from .localnews8 import LocalNews8IE
|
|
||||||
from .lovehomeporn import LoveHomePornIE
|
from .lovehomeporn import LoveHomePornIE
|
||||||
from .lrt import (
|
from .lrt import (
|
||||||
LRTVODIE,
|
LRTVODIE,
|
||||||
|
@ -1029,7 +1015,6 @@
|
||||||
MailRuMusicSearchIE,
|
MailRuMusicSearchIE,
|
||||||
)
|
)
|
||||||
from .mainstreaming import MainStreamingIE
|
from .mainstreaming import MainStreamingIE
|
||||||
from .malltv import MallTVIE
|
|
||||||
from .mangomolo import (
|
from .mangomolo import (
|
||||||
MangomoloVideoIE,
|
MangomoloVideoIE,
|
||||||
MangomoloLiveIE,
|
MangomoloLiveIE,
|
||||||
|
@ -1073,7 +1058,6 @@
|
||||||
from .melonvod import MelonVODIE
|
from .melonvod import MelonVODIE
|
||||||
from .metacritic import MetacriticIE
|
from .metacritic import MetacriticIE
|
||||||
from .mgtv import MGTVIE
|
from .mgtv import MGTVIE
|
||||||
from .miaopai import MiaoPaiIE
|
|
||||||
from .microsoftstream import MicrosoftStreamIE
|
from .microsoftstream import MicrosoftStreamIE
|
||||||
from .microsoftvirtualacademy import (
|
from .microsoftvirtualacademy import (
|
||||||
MicrosoftVirtualAcademyIE,
|
MicrosoftVirtualAcademyIE,
|
||||||
|
@ -1091,7 +1075,6 @@
|
||||||
MindsChannelIE,
|
MindsChannelIE,
|
||||||
MindsGroupIE,
|
MindsGroupIE,
|
||||||
)
|
)
|
||||||
from .ministrygrid import MinistryGridIE
|
|
||||||
from .minoto import MinotoIE
|
from .minoto import MinotoIE
|
||||||
from .mirrativ import (
|
from .mirrativ import (
|
||||||
MirrativIE,
|
MirrativIE,
|
||||||
|
@ -1119,7 +1102,6 @@
|
||||||
from .mocha import MochaVideoIE
|
from .mocha import MochaVideoIE
|
||||||
from .mojvideo import MojvideoIE
|
from .mojvideo import MojvideoIE
|
||||||
from .monstercat import MonstercatIE
|
from .monstercat import MonstercatIE
|
||||||
from .morningstar import MorningstarIE
|
|
||||||
from .motherless import (
|
from .motherless import (
|
||||||
MotherlessIE,
|
MotherlessIE,
|
||||||
MotherlessGroupIE,
|
MotherlessGroupIE,
|
||||||
|
@ -1364,7 +1346,6 @@
|
||||||
from .nzherald import NZHeraldIE
|
from .nzherald import NZHeraldIE
|
||||||
from .nzonscreen import NZOnScreenIE
|
from .nzonscreen import NZOnScreenIE
|
||||||
from .nzz import NZZIE
|
from .nzz import NZZIE
|
||||||
from .odatv import OdaTVIE
|
|
||||||
from .odkmedia import OnDemandChinaEpisodeIE
|
from .odkmedia import OnDemandChinaEpisodeIE
|
||||||
from .odnoklassniki import OdnoklassnikiIE
|
from .odnoklassniki import OdnoklassnikiIE
|
||||||
from .oftv import (
|
from .oftv import (
|
||||||
|
@ -1476,7 +1457,6 @@
|
||||||
PlatziCourseIE,
|
PlatziCourseIE,
|
||||||
)
|
)
|
||||||
from .playplustv import PlayPlusTVIE
|
from .playplustv import PlayPlusTVIE
|
||||||
from .playstuff import PlayStuffIE
|
|
||||||
from .playsuisse import PlaySuisseIE
|
from .playsuisse import PlaySuisseIE
|
||||||
from .playtvak import PlaytvakIE
|
from .playtvak import PlaytvakIE
|
||||||
from .playwire import PlaywireIE
|
from .playwire import PlaywireIE
|
||||||
|
@ -1598,7 +1578,6 @@
|
||||||
RayWenderlichIE,
|
RayWenderlichIE,
|
||||||
RayWenderlichCourseIE,
|
RayWenderlichCourseIE,
|
||||||
)
|
)
|
||||||
from .rbmaradio import RBMARadioIE
|
|
||||||
from .rbgtum import (
|
from .rbgtum import (
|
||||||
RbgTumIE,
|
RbgTumIE,
|
||||||
RbgTumCourseIE,
|
RbgTumCourseIE,
|
||||||
|
@ -1630,7 +1609,6 @@
|
||||||
RedGifsUserIE,
|
RedGifsUserIE,
|
||||||
)
|
)
|
||||||
from .redtube import RedTubeIE
|
from .redtube import RedTubeIE
|
||||||
from .regiotv import RegioTVIE
|
|
||||||
from .rentv import (
|
from .rentv import (
|
||||||
RENTVIE,
|
RENTVIE,
|
||||||
RENTVArticleIE,
|
RENTVArticleIE,
|
||||||
|
@ -1639,6 +1617,7 @@
|
||||||
from .reuters import ReutersIE
|
from .reuters import ReutersIE
|
||||||
from .reverbnation import ReverbNationIE
|
from .reverbnation import ReverbNationIE
|
||||||
from .rheinmaintv import RheinMainTVIE
|
from .rheinmaintv import RheinMainTVIE
|
||||||
|
from .ridehome import RideHomeIE
|
||||||
from .rinsefm import (
|
from .rinsefm import (
|
||||||
RinseFMIE,
|
RinseFMIE,
|
||||||
RinseFMArtistPlaylistIE,
|
RinseFMArtistPlaylistIE,
|
||||||
|
@ -1737,7 +1716,6 @@
|
||||||
from .saitosan import SaitosanIE
|
from .saitosan import SaitosanIE
|
||||||
from .samplefocus import SampleFocusIE
|
from .samplefocus import SampleFocusIE
|
||||||
from .sapo import SapoIE
|
from .sapo import SapoIE
|
||||||
from .savefrom import SaveFromIE
|
|
||||||
from .sbs import SBSIE
|
from .sbs import SBSIE
|
||||||
from .sbscokr import (
|
from .sbscokr import (
|
||||||
SBSCoKrIE,
|
SBSCoKrIE,
|
||||||
|
@ -1757,7 +1735,6 @@
|
||||||
SCTECourseIE,
|
SCTECourseIE,
|
||||||
)
|
)
|
||||||
from .scrolller import ScrolllerIE
|
from .scrolller import ScrolllerIE
|
||||||
from .seeker import SeekerIE
|
|
||||||
from .sejmpl import SejmIE
|
from .sejmpl import SejmIE
|
||||||
from .senalcolombia import SenalColombiaLiveIE
|
from .senalcolombia import SenalColombiaLiveIE
|
||||||
from .senategov import SenateISVPIE, SenateGovIE
|
from .senategov import SenateISVPIE, SenateGovIE
|
||||||
|
@ -1900,7 +1877,6 @@
|
||||||
)
|
)
|
||||||
from .streamable import StreamableIE
|
from .streamable import StreamableIE
|
||||||
from .streamcz import StreamCZIE
|
from .streamcz import StreamCZIE
|
||||||
from .streamff import StreamFFIE
|
|
||||||
from .streetvoice import StreetVoiceIE
|
from .streetvoice import StreetVoiceIE
|
||||||
from .stretchinternet import StretchInternetIE
|
from .stretchinternet import StretchInternetIE
|
||||||
from .stripchat import StripchatIE
|
from .stripchat import StripchatIE
|
||||||
|
@ -1929,7 +1905,6 @@
|
||||||
TBSJPProgramIE,
|
TBSJPProgramIE,
|
||||||
TBSJPPlaylistIE,
|
TBSJPPlaylistIE,
|
||||||
)
|
)
|
||||||
from .tdslifeway import TDSLifewayIE
|
|
||||||
from .teachable import (
|
from .teachable import (
|
||||||
TeachableIE,
|
TeachableIE,
|
||||||
TeachableCourseIE,
|
TeachableCourseIE,
|
||||||
|
@ -2499,6 +2474,7 @@
|
||||||
Zee5SeriesIE,
|
Zee5SeriesIE,
|
||||||
)
|
)
|
||||||
from .zeenews import ZeeNewsIE
|
from .zeenews import ZeeNewsIE
|
||||||
|
from .zenporn import ZenPornIE
|
||||||
from .zetland import ZetlandDKArticleIE
|
from .zetland import ZetlandDKArticleIE
|
||||||
from .zhihu import ZhihuIE
|
from .zhihu import ZhihuIE
|
||||||
from .zingmp3 import (
|
from .zingmp3 import (
|
||||||
|
|
|
@ -245,7 +245,6 @@ class ABCIViewIE(InfoExtractor):
|
||||||
'episode_id': 'NC2203H039S00',
|
'episode_id': 'NC2203H039S00',
|
||||||
'season_number': 2022,
|
'season_number': 2022,
|
||||||
'season': 'Season 2022',
|
'season': 'Season 2022',
|
||||||
'episode_number': None,
|
|
||||||
'episode': 'Locking Up Kids',
|
'episode': 'Locking Up Kids',
|
||||||
'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/nc/NC2203H039S00_636d8a0944a22_1920.jpg',
|
'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/nc/NC2203H039S00_636d8a0944a22_1920.jpg',
|
||||||
'timestamp': 1668460497,
|
'timestamp': 1668460497,
|
||||||
|
@ -271,8 +270,6 @@ class ABCIViewIE(InfoExtractor):
|
||||||
'episode_id': 'RF2004Q043S00',
|
'episode_id': 'RF2004Q043S00',
|
||||||
'season_number': 2021,
|
'season_number': 2021,
|
||||||
'season': 'Season 2021',
|
'season': 'Season 2021',
|
||||||
'episode_number': None,
|
|
||||||
'episode': None,
|
|
||||||
'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/rf/RF2004Q043S00_61a950639dbc0_1920.jpg',
|
'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/rf/RF2004Q043S00_61a950639dbc0_1920.jpg',
|
||||||
'timestamp': 1638710705,
|
'timestamp': 1638710705,
|
||||||
|
|
||||||
|
|
|
@ -53,7 +53,7 @@ def __init__(self, ie: 'AbemaTVIE'):
|
||||||
# the protocol that this should really handle is 'abematv-license://'
|
# the protocol that this should really handle is 'abematv-license://'
|
||||||
# abematv_license_open is just a placeholder for development purposes
|
# abematv_license_open is just a placeholder for development purposes
|
||||||
# ref. https://github.com/python/cpython/blob/f4c03484da59049eb62a9bf7777b963e2267d187/Lib/urllib/request.py#L510
|
# ref. https://github.com/python/cpython/blob/f4c03484da59049eb62a9bf7777b963e2267d187/Lib/urllib/request.py#L510
|
||||||
setattr(self, 'abematv-license_open', getattr(self, 'abematv_license_open'))
|
setattr(self, 'abematv-license_open', getattr(self, 'abematv_license_open', None))
|
||||||
self.ie = ie
|
self.ie = ie
|
||||||
|
|
||||||
def _get_videokey_from_ticket(self, ticket):
|
def _get_videokey_from_ticket(self, ticket):
|
||||||
|
@ -259,7 +259,7 @@ class AbemaTVIE(AbemaTVBaseIE):
|
||||||
'title': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
'title': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
||||||
'series': 'ゆるキャン△ SEASON2',
|
'series': 'ゆるキャン△ SEASON2',
|
||||||
'episode': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
'episode': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
||||||
'series_number': 2,
|
'season_number': 2,
|
||||||
'episode_number': 1,
|
'episode_number': 1,
|
||||||
'description': 'md5:9c5a3172ae763278f9303922f0ea5b17',
|
'description': 'md5:9c5a3172ae763278f9303922f0ea5b17',
|
||||||
},
|
},
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
float_or_none,
|
float_or_none,
|
||||||
format_field,
|
format_field,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
str_or_none,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
parse_codecs,
|
parse_codecs,
|
||||||
parse_qs,
|
parse_qs,
|
||||||
|
@ -129,7 +130,7 @@ class AcFunBangumiIE(AcFunVideoBaseIE):
|
||||||
'title': '红孩儿之趴趴蛙寻石记 第5话 ',
|
'title': '红孩儿之趴趴蛙寻石记 第5话 ',
|
||||||
'duration': 760.0,
|
'duration': 760.0,
|
||||||
'season': '红孩儿之趴趴蛙寻石记',
|
'season': '红孩儿之趴趴蛙寻石记',
|
||||||
'season_id': 5023171,
|
'season_id': '5023171',
|
||||||
'season_number': 1, # series has only 1 season
|
'season_number': 1, # series has only 1 season
|
||||||
'episode': 'Episode 5',
|
'episode': 'Episode 5',
|
||||||
'episode_number': 5,
|
'episode_number': 5,
|
||||||
|
@ -146,7 +147,7 @@ class AcFunBangumiIE(AcFunVideoBaseIE):
|
||||||
'title': '叽歪老表(第二季) 第5话 坚不可摧',
|
'title': '叽歪老表(第二季) 第5话 坚不可摧',
|
||||||
'season': '叽歪老表(第二季)',
|
'season': '叽歪老表(第二季)',
|
||||||
'season_number': 2,
|
'season_number': 2,
|
||||||
'season_id': 6065485,
|
'season_id': '6065485',
|
||||||
'episode': '坚不可摧',
|
'episode': '坚不可摧',
|
||||||
'episode_number': 5,
|
'episode_number': 5,
|
||||||
'upload_date': '20220324',
|
'upload_date': '20220324',
|
||||||
|
@ -191,7 +192,7 @@ def _real_extract(self, url):
|
||||||
'title': json_bangumi_data.get('showTitle'),
|
'title': json_bangumi_data.get('showTitle'),
|
||||||
'thumbnail': json_bangumi_data.get('image'),
|
'thumbnail': json_bangumi_data.get('image'),
|
||||||
'season': json_bangumi_data.get('bangumiTitle'),
|
'season': json_bangumi_data.get('bangumiTitle'),
|
||||||
'season_id': season_id,
|
'season_id': str_or_none(season_id),
|
||||||
'season_number': season_number,
|
'season_number': season_number,
|
||||||
'episode': json_bangumi_data.get('title'),
|
'episode': json_bangumi_data.get('title'),
|
||||||
'episode_number': episode_number,
|
'episode_number': episode_number,
|
||||||
|
|
|
@ -107,7 +107,6 @@ def _real_extract(self, url):
|
||||||
title
|
title
|
||||||
tvRating
|
tvRating
|
||||||
}''' % episode_path
|
}''' % episode_path
|
||||||
['getVideoBySlug']
|
|
||||||
else:
|
else:
|
||||||
query = query % '''metaDescription
|
query = query % '''metaDescription
|
||||||
title
|
title
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
InAdvancePagedList,
|
InAdvancePagedList,
|
||||||
|
clean_html,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
orderedSet,
|
orderedSet,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
|
@ -32,13 +33,15 @@ class AltCensoredIE(InfoExtractor):
|
||||||
'duration': 926.09,
|
'duration': 926.09,
|
||||||
'thumbnail': 'https://archive.org/download/youtube-k0srjLSkga8/youtube-k0srjLSkga8.thumbs/k0srjLSkga8_000925.jpg',
|
'thumbnail': 'https://archive.org/download/youtube-k0srjLSkga8/youtube-k0srjLSkga8.thumbs/k0srjLSkga8_000925.jpg',
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'categories': ['News & Politics'], # FIXME
|
'categories': ['News & Politics'],
|
||||||
}
|
}
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
category = clean_html(self._html_search_regex(
|
||||||
|
r'<a href="/category/\d+">([^<]+)</a>', webpage, 'category', default=None))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'_type': 'url_transparent',
|
'_type': 'url_transparent',
|
||||||
|
@ -46,9 +49,7 @@ def _real_extract(self, url):
|
||||||
'ie_key': ArchiveOrgIE.ie_key(),
|
'ie_key': ArchiveOrgIE.ie_key(),
|
||||||
'view_count': str_to_int(self._html_search_regex(
|
'view_count': str_to_int(self._html_search_regex(
|
||||||
r'YouTube Views:(?:\s| )*([\d,]+)', webpage, 'view count', default=None)),
|
r'YouTube Views:(?:\s| )*([\d,]+)', webpage, 'view count', default=None)),
|
||||||
'categories': self._html_search_regex(
|
'categories': [category] if category else None,
|
||||||
r'<a href="/category/\d+">\s*\n?\s*([^<]+)</a>',
|
|
||||||
webpage, 'category', default='').split() or None,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,7 @@ def _real_extract(self, url):
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
info = self._download_and_extract_api_data(video_id, netloc)
|
info = self._download_and_extract_api_data(video_id, netloc)
|
||||||
info['description'] = self._og_search_description(webpage, default=None)
|
info['description'] = self._og_search_description(webpage, default=None)
|
||||||
info['_old_archive_ids'] = [make_archive_id('Ant1NewsGrWatch', video_id)],
|
info['_old_archive_ids'] = [make_archive_id('Ant1NewsGrWatch', video_id)]
|
||||||
return info
|
return info
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
urlhandle_detect_ext,
|
urlhandle_detect_ext,
|
||||||
|
variadic,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -49,7 +50,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||||
'release_date': '19681210',
|
'release_date': '19681210',
|
||||||
'timestamp': 1268695290,
|
'timestamp': 1268695290,
|
||||||
'upload_date': '20100315',
|
'upload_date': '20100315',
|
||||||
'creator': 'SRI International',
|
'creators': ['SRI International'],
|
||||||
'uploader': 'laura@archive.org',
|
'uploader': 'laura@archive.org',
|
||||||
'thumbnail': r're:https://archive\.org/download/.*\.jpg',
|
'thumbnail': r're:https://archive\.org/download/.*\.jpg',
|
||||||
'display_id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect.cdr',
|
'display_id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect.cdr',
|
||||||
|
@ -109,7 +110,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||||
'title': 'Turning',
|
'title': 'Turning',
|
||||||
'ext': 'flac',
|
'ext': 'flac',
|
||||||
'track': 'Turning',
|
'track': 'Turning',
|
||||||
'creator': 'Grateful Dead',
|
'creators': ['Grateful Dead'],
|
||||||
'display_id': 'gd1977-05-08d01t01.flac',
|
'display_id': 'gd1977-05-08d01t01.flac',
|
||||||
'track_number': 1,
|
'track_number': 1,
|
||||||
'album': '1977-05-08 - Barton Hall - Cornell University',
|
'album': '1977-05-08 - Barton Hall - Cornell University',
|
||||||
|
@ -129,7 +130,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||||
'location': 'Barton Hall - Cornell University',
|
'location': 'Barton Hall - Cornell University',
|
||||||
'duration': 438.68,
|
'duration': 438.68,
|
||||||
'track': 'Deal',
|
'track': 'Deal',
|
||||||
'creator': 'Grateful Dead',
|
'creators': ['Grateful Dead'],
|
||||||
'album': '1977-05-08 - Barton Hall - Cornell University',
|
'album': '1977-05-08 - Barton Hall - Cornell University',
|
||||||
'release_date': '19770508',
|
'release_date': '19770508',
|
||||||
'display_id': 'gd1977-05-08d01t07.flac',
|
'display_id': 'gd1977-05-08d01t07.flac',
|
||||||
|
@ -167,7 +168,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||||
'upload_date': '20160610',
|
'upload_date': '20160610',
|
||||||
'description': 'md5:f70956a156645a658a0dc9513d9e78b7',
|
'description': 'md5:f70956a156645a658a0dc9513d9e78b7',
|
||||||
'uploader': 'dimitrios@archive.org',
|
'uploader': 'dimitrios@archive.org',
|
||||||
'creator': ['British Broadcasting Corporation', 'Time-Life Films'],
|
'creators': ['British Broadcasting Corporation', 'Time-Life Films'],
|
||||||
'timestamp': 1465594947,
|
'timestamp': 1465594947,
|
||||||
},
|
},
|
||||||
'playlist': [
|
'playlist': [
|
||||||
|
@ -257,7 +258,7 @@ def _real_extract(self, url):
|
||||||
'title': m['title'],
|
'title': m['title'],
|
||||||
'description': clean_html(m.get('description')),
|
'description': clean_html(m.get('description')),
|
||||||
'uploader': dict_get(m, ['uploader', 'adder']),
|
'uploader': dict_get(m, ['uploader', 'adder']),
|
||||||
'creator': m.get('creator'),
|
'creators': traverse_obj(m, ('creator', {variadic}, {lambda x: x[0] and list(x)})),
|
||||||
'license': m.get('licenseurl'),
|
'license': m.get('licenseurl'),
|
||||||
'release_date': unified_strdate(m.get('date')),
|
'release_date': unified_strdate(m.get('date')),
|
||||||
'timestamp': unified_timestamp(dict_get(m, ['publicdate', 'addeddate'])),
|
'timestamp': unified_timestamp(dict_get(m, ['publicdate', 'addeddate'])),
|
||||||
|
@ -272,7 +273,7 @@ def _real_extract(self, url):
|
||||||
'title': f.get('title') or f['name'],
|
'title': f.get('title') or f['name'],
|
||||||
'display_id': f['name'],
|
'display_id': f['name'],
|
||||||
'description': clean_html(f.get('description')),
|
'description': clean_html(f.get('description')),
|
||||||
'creator': f.get('creator'),
|
'creators': traverse_obj(f, ('creator', {variadic}, {lambda x: x[0] and list(x)})),
|
||||||
'duration': parse_duration(f.get('length')),
|
'duration': parse_duration(f.get('length')),
|
||||||
'track_number': int_or_none(f.get('track')),
|
'track_number': int_or_none(f.get('track')),
|
||||||
'album': f.get('album'),
|
'album': f.get('album'),
|
||||||
|
|
|
@ -142,10 +142,10 @@ class ArteTVIE(ArteTVBaseIE):
|
||||||
def _fix_accessible_subs_locale(subs):
|
def _fix_accessible_subs_locale(subs):
|
||||||
updated_subs = {}
|
updated_subs = {}
|
||||||
for lang, sub_formats in subs.items():
|
for lang, sub_formats in subs.items():
|
||||||
for format in sub_formats:
|
for fmt in sub_formats:
|
||||||
if format.get('url', '').endswith('-MAL.m3u8'):
|
if fmt.get('url', '').endswith('-MAL.m3u8'):
|
||||||
lang += '-acc'
|
lang += '-acc'
|
||||||
updated_subs.setdefault(lang, []).append(format)
|
updated_subs.setdefault(lang, []).append(fmt)
|
||||||
return updated_subs
|
return updated_subs
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|
|
@ -24,7 +24,8 @@ class AxsIE(InfoExtractor):
|
||||||
'timestamp': 1685729564,
|
'timestamp': 1685729564,
|
||||||
'duration': 1284.216,
|
'duration': 1284.216,
|
||||||
'series': 'Rock & Roll Road Trip with Sammy Hagar',
|
'series': 'Rock & Roll Road Trip with Sammy Hagar',
|
||||||
'season': 2,
|
'season': 'Season 2',
|
||||||
|
'season_number': 2,
|
||||||
'episode': '3',
|
'episode': '3',
|
||||||
'thumbnail': 'https://images.dotstudiopro.com/5f4e9d330a0c3b295a7e8394',
|
'thumbnail': 'https://images.dotstudiopro.com/5f4e9d330a0c3b295a7e8394',
|
||||||
},
|
},
|
||||||
|
@ -41,7 +42,8 @@ class AxsIE(InfoExtractor):
|
||||||
'timestamp': 1676403615,
|
'timestamp': 1676403615,
|
||||||
'duration': 2570.668,
|
'duration': 2570.668,
|
||||||
'series': 'The Big Interview with Dan Rather',
|
'series': 'The Big Interview with Dan Rather',
|
||||||
'season': 3,
|
'season': 'Season 3',
|
||||||
|
'season_number': 3,
|
||||||
'episode': '5',
|
'episode': '5',
|
||||||
'thumbnail': 'https://images.dotstudiopro.com/5f4d1901f340b50d937cec32',
|
'thumbnail': 'https://images.dotstudiopro.com/5f4d1901f340b50d937cec32',
|
||||||
},
|
},
|
||||||
|
@ -77,7 +79,7 @@ def _real_extract(self, url):
|
||||||
'title': ('title', {str}),
|
'title': ('title', {str}),
|
||||||
'description': ('description', {str}),
|
'description': ('description', {str}),
|
||||||
'series': ('seriestitle', {str}),
|
'series': ('seriestitle', {str}),
|
||||||
'season': ('season', {int}),
|
'season_number': ('season', {int}),
|
||||||
'episode': ('episode', {str}),
|
'episode': ('episode', {str}),
|
||||||
'duration': ('duration', {float_or_none}),
|
'duration': ('duration', {float_or_none}),
|
||||||
'timestamp': ('updated_at', {parse_iso8601}),
|
'timestamp': ('updated_at', {parse_iso8601}),
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
|
|
||||||
|
|
||||||
class BeatBumpVideoIE(InfoExtractor):
|
class BeatBumpVideoIE(InfoExtractor):
|
||||||
_VALID_URL = r'https://beatbump\.(?:ml|io)/listen\?id=(?P<id>[\w-]+)'
|
_VALID_URL = r'https?://beatbump\.(?:ml|io)/listen\?id=(?P<id>[\w-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://beatbump.ml/listen?id=MgNrAu2pzNs',
|
'url': 'https://beatbump.ml/listen?id=MgNrAu2pzNs',
|
||||||
'md5': '5ff3fff41d3935b9810a9731e485fe66',
|
'md5': '5ff3fff41d3935b9810a9731e485fe66',
|
||||||
|
@ -48,7 +48,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
|
|
||||||
class BeatBumpPlaylistIE(InfoExtractor):
|
class BeatBumpPlaylistIE(InfoExtractor):
|
||||||
_VALID_URL = r'https://beatbump\.(?:ml|io)/(?:release\?id=|artist/|playlist/)(?P<id>[\w-]+)'
|
_VALID_URL = r'https?://beatbump\.(?:ml|io)/(?:release\?id=|artist/|playlist/)(?P<id>[\w-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://beatbump.ml/release?id=MPREb_gTAcphH99wE',
|
'url': 'https://beatbump.ml/release?id=MPREb_gTAcphH99wE',
|
||||||
'playlist_count': 50,
|
'playlist_count': 50,
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
str_or_none,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
try_get,
|
try_get,
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
|
@ -22,7 +23,7 @@ class BeegIE(InfoExtractor):
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
'upload_date': '20220131',
|
'upload_date': '20220131',
|
||||||
'timestamp': 1643656455,
|
'timestamp': 1643656455,
|
||||||
'display_id': 2540839,
|
'display_id': '2540839',
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://beeg.com/-0599050563103750?t=4-861',
|
'url': 'https://beeg.com/-0599050563103750?t=4-861',
|
||||||
|
@ -36,7 +37,7 @@ class BeegIE(InfoExtractor):
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
'description': 'md5:b4fc879a58ae6c604f8f259155b7e3b9',
|
'description': 'md5:b4fc879a58ae6c604f8f259155b7e3b9',
|
||||||
'timestamp': 1643623200,
|
'timestamp': 1643623200,
|
||||||
'display_id': 2569965,
|
'display_id': '2569965',
|
||||||
'upload_date': '20220131',
|
'upload_date': '20220131',
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
|
@ -78,7 +79,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'display_id': first_fact.get('id'),
|
'display_id': str_or_none(first_fact.get('id')),
|
||||||
'title': traverse_obj(video, ('file', 'stuff', 'sf_name')),
|
'title': traverse_obj(video, ('file', 'stuff', 'sf_name')),
|
||||||
'description': traverse_obj(video, ('file', 'stuff', 'sf_story')),
|
'description': traverse_obj(video, ('file', 'stuff', 'sf_story')),
|
||||||
'timestamp': unified_timestamp(first_fact.get('fc_created')),
|
'timestamp': unified_timestamp(first_fact.get('fc_created')),
|
||||||
|
|
|
@ -32,7 +32,7 @@ class BellMediaIE(InfoExtractor):
|
||||||
'description': 'md5:810f7f8c6a83ad5b48677c3f8e5bb2c3',
|
'description': 'md5:810f7f8c6a83ad5b48677c3f8e5bb2c3',
|
||||||
'upload_date': '20180525',
|
'upload_date': '20180525',
|
||||||
'timestamp': 1527288600,
|
'timestamp': 1527288600,
|
||||||
'season_id': 73997,
|
'season_id': '73997',
|
||||||
'season': '2018',
|
'season': '2018',
|
||||||
'thumbnail': 'http://images2.9c9media.com/image_asset/2018_5_25_baf30cbd-b28d-4a18-9903-4bb8713b00f5_PNG_956x536.jpg',
|
'thumbnail': 'http://images2.9c9media.com/image_asset/2018_5_25_baf30cbd-b28d-4a18-9903-4bb8713b00f5_PNG_956x536.jpg',
|
||||||
'tags': [],
|
'tags': [],
|
||||||
|
|
|
@ -93,7 +93,6 @@ class BFMTVArticleIE(BFMTVBaseIE):
|
||||||
'id': '6318445464112',
|
'id': '6318445464112',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Le plein de bioéthanol fait de plus en plus mal à la pompe',
|
'title': 'Le plein de bioéthanol fait de plus en plus mal à la pompe',
|
||||||
'description': None,
|
|
||||||
'uploader_id': '876630703001',
|
'uploader_id': '876630703001',
|
||||||
'upload_date': '20230110',
|
'upload_date': '20230110',
|
||||||
'timestamp': 1673341692,
|
'timestamp': 1673341692,
|
||||||
|
|
|
@ -1965,6 +1965,7 @@ class BiliIntlIE(BiliIntlBaseIE):
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def _make_url(video_id, series_id=None):
|
def _make_url(video_id, series_id=None):
|
||||||
if series_id:
|
if series_id:
|
||||||
return f'https://www.bilibili.tv/en/play/{series_id}/{video_id}'
|
return f'https://www.bilibili.tv/en/play/{series_id}/{video_id}'
|
||||||
|
|
|
@ -185,7 +185,6 @@ class BitChuteChannelIE(InfoExtractor):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'UGlrF9o9b-Q',
|
'id': 'UGlrF9o9b-Q',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'filesize': None,
|
|
||||||
'title': 'This is the first video on #BitChute !',
|
'title': 'This is the first video on #BitChute !',
|
||||||
'description': 'md5:a0337e7b1fe39e32336974af8173a034',
|
'description': 'md5:a0337e7b1fe39e32336974af8173a034',
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
|
|
@ -4,10 +4,12 @@
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
str_or_none,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class BleacherReportIE(InfoExtractor):
|
class BleacherReportIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/articles/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/articles/(?P<id>\d+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://bleacherreport.com/articles/2496438-fsu-stat-projections-is-jalen-ramsey-best-defensive-player-in-college-football',
|
'url': 'http://bleacherreport.com/articles/2496438-fsu-stat-projections-is-jalen-ramsey-best-defensive-player-in-college-football',
|
||||||
|
@ -16,7 +18,7 @@ class BleacherReportIE(InfoExtractor):
|
||||||
'id': '2496438',
|
'id': '2496438',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'FSU Stat Projections: Is Jalen Ramsey Best Defensive Player in College Football?',
|
'title': 'FSU Stat Projections: Is Jalen Ramsey Best Defensive Player in College Football?',
|
||||||
'uploader_id': 3992341,
|
'uploader_id': '3992341',
|
||||||
'description': 'CFB, ACC, Florida State',
|
'description': 'CFB, ACC, Florida State',
|
||||||
'timestamp': 1434380212,
|
'timestamp': 1434380212,
|
||||||
'upload_date': '20150615',
|
'upload_date': '20150615',
|
||||||
|
@ -33,7 +35,7 @@ class BleacherReportIE(InfoExtractor):
|
||||||
'timestamp': 1446839961,
|
'timestamp': 1446839961,
|
||||||
'uploader': 'Sean Fay',
|
'uploader': 'Sean Fay',
|
||||||
'description': 'md5:b1601e2314c4d8eec23b6eafe086a757',
|
'description': 'md5:b1601e2314c4d8eec23b6eafe086a757',
|
||||||
'uploader_id': 6466954,
|
'uploader_id': '6466954',
|
||||||
'upload_date': '20151011',
|
'upload_date': '20151011',
|
||||||
},
|
},
|
||||||
'add_ie': ['Youtube'],
|
'add_ie': ['Youtube'],
|
||||||
|
@ -58,7 +60,7 @@ def _real_extract(self, url):
|
||||||
'id': article_id,
|
'id': article_id,
|
||||||
'title': article_data['title'],
|
'title': article_data['title'],
|
||||||
'uploader': article_data.get('author', {}).get('name'),
|
'uploader': article_data.get('author', {}).get('name'),
|
||||||
'uploader_id': article_data.get('authorId'),
|
'uploader_id': str_or_none(article_data.get('authorId')),
|
||||||
'timestamp': parse_iso8601(article_data.get('createdAt')),
|
'timestamp': parse_iso8601(article_data.get('createdAt')),
|
||||||
'thumbnails': thumbnails,
|
'thumbnails': thumbnails,
|
||||||
'comment_count': int_or_none(article_data.get('commentsCount')),
|
'comment_count': int_or_none(article_data.get('commentsCount')),
|
||||||
|
@ -82,6 +84,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
|
|
||||||
class BleacherReportCMSIE(AMPIE):
|
class BleacherReportCMSIE(AMPIE):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/video_embed\?id=(?P<id>[0-9a-f-]{36}|\d{5})'
|
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/video_embed\?id=(?P<id>[0-9a-f-]{36}|\d{5})'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://bleacherreport.com/video_embed?id=8fd44c2f-3dc5-4821-9118-2c825a98c0e1&library=video-cms',
|
'url': 'http://bleacherreport.com/video_embed?id=8fd44c2f-3dc5-4821-9118-2c825a98c0e1&library=video-cms',
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
|
|
||||||
class CableAVIE(InfoExtractor):
|
class CableAVIE(InfoExtractor):
|
||||||
_VALID_URL = r'https://cableav\.tv/(?P<id>[a-zA-Z0-9]+)'
|
_VALID_URL = r'https?://cableav\.tv/(?P<id>[a-zA-Z0-9]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://cableav.tv/lS4iR9lWjN8/',
|
'url': 'https://cableav.tv/lS4iR9lWjN8/',
|
||||||
'md5': '7e3fe5e49d61c4233b7f5b0f69b15e18',
|
'md5': '7e3fe5e49d61c4233b7f5b0f69b15e18',
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
|
|
||||||
|
|
||||||
class CamFMShowIE(InfoExtractor):
|
class CamFMShowIE(InfoExtractor):
|
||||||
_VALID_URL = r'https://(?:www\.)?camfm\.co\.uk/shows/(?P<id>[^/]+)'
|
_VALID_URL = r'https?://(?:www\.)?camfm\.co\.uk/shows/(?P<id>[^/]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'playlist_mincount': 5,
|
'playlist_mincount': 5,
|
||||||
'url': 'https://camfm.co.uk/shows/soul-mining/',
|
'url': 'https://camfm.co.uk/shows/soul-mining/',
|
||||||
|
@ -42,7 +42,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
|
|
||||||
class CamFMEpisodeIE(InfoExtractor):
|
class CamFMEpisodeIE(InfoExtractor):
|
||||||
_VALID_URL = r'https://(?:www\.)?camfm\.co\.uk/player/(?P<id>[^/]+)'
|
_VALID_URL = r'https?://(?:www\.)?camfm\.co\.uk/player/(?P<id>[^/]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://camfm.co.uk/player/43336',
|
'url': 'https://camfm.co.uk/player/43336',
|
||||||
'skip': 'Episode will expire - don\'t actually know when, but it will go eventually',
|
'skip': 'Episode will expire - don\'t actually know when, but it will go eventually',
|
||||||
|
|
|
@ -76,6 +76,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
|
|
||||||
class CBSIE(CBSBaseIE):
|
class CBSIE(CBSBaseIE):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
(?:
|
(?:
|
||||||
cbs:|
|
cbs:|
|
||||||
|
|
|
@ -1,98 +0,0 @@
|
||||||
from .cbs import CBSIE
|
|
||||||
from ..utils import int_or_none
|
|
||||||
|
|
||||||
|
|
||||||
class CBSInteractiveIE(CBSIE): # XXX: Do not subclass from concrete IE
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?(?P<site>cnet|zdnet)\.com/(?:videos|video(?:/share)?)/(?P<id>[^/?]+)'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://www.cnet.com/videos/hands-on-with-microsofts-windows-8-1-update/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'R49SYt__yAfmlXR85z4f7gNmCBDcN_00',
|
|
||||||
'display_id': 'hands-on-with-microsofts-windows-8-1-update',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Hands-on with Microsoft Windows 8.1 Update',
|
|
||||||
'description': 'The new update to the Windows 8 OS brings improved performance for mouse and keyboard users.',
|
|
||||||
'uploader_id': '6085384d-619e-11e3-b231-14feb5ca9861',
|
|
||||||
'uploader': 'Sarah Mitroff',
|
|
||||||
'duration': 70,
|
|
||||||
'timestamp': 1396479627,
|
|
||||||
'upload_date': '20140402',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# m3u8 download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.cnet.com/videos/whiny-pothole-tweets-at-local-government-when-hit-by-cars-tomorrow-daily-187/',
|
|
||||||
'md5': 'f11d27b2fa18597fbf92444d2a9ed386',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'kjOJd_OoVJqbg_ZD8MZCOk8Wekb9QccK',
|
|
||||||
'display_id': 'whiny-pothole-tweets-at-local-government-when-hit-by-cars-tomorrow-daily-187',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Whiny potholes tweet at local government when hit by cars (Tomorrow Daily 187)',
|
|
||||||
'description': 'md5:d2b9a95a5ffe978ae6fbd4cf944d618f',
|
|
||||||
'uploader_id': 'b163284d-6b73-44fc-b3e6-3da66c392d40',
|
|
||||||
'uploader': 'Ashley Esqueda',
|
|
||||||
'duration': 1482,
|
|
||||||
'timestamp': 1433289889,
|
|
||||||
'upload_date': '20150603',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.zdnet.com/video/share/video-keeping-android-smartphones-and-tablets-secure/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'k0r4T_ehht4xW_hAOqiVQPuBDPZ8SRjt',
|
|
||||||
'display_id': 'video-keeping-android-smartphones-and-tablets-secure',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Video: Keeping Android smartphones and tablets secure',
|
|
||||||
'description': 'Here\'s the best way to keep Android devices secure, and what you do when they\'ve come to the end of their lives.',
|
|
||||||
'uploader_id': 'f2d97ea2-8175-11e2-9d12-0018fe8a00b0',
|
|
||||||
'uploader': 'Adrian Kingsley-Hughes',
|
|
||||||
'duration': 731,
|
|
||||||
'timestamp': 1449129925,
|
|
||||||
'upload_date': '20151203',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# m3u8 download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.zdnet.com/video/huawei-matebook-x-video/',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
|
||||||
|
|
||||||
MPX_ACCOUNTS = {
|
|
||||||
'cnet': 2198311517,
|
|
||||||
'zdnet': 2387448114,
|
|
||||||
}
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
site, display_id = self._match_valid_url(url).groups()
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
|
||||||
|
|
||||||
data_json = self._html_search_regex(
|
|
||||||
r"data(?:-(?:cnet|zdnet))?-video(?:-(?:uvp(?:js)?|player))?-options='([^']+)'",
|
|
||||||
webpage, 'data json')
|
|
||||||
data = self._parse_json(data_json, display_id)
|
|
||||||
vdata = data.get('video') or (data.get('videos') or data.get('playlist'))[0]
|
|
||||||
|
|
||||||
video_id = vdata['mpxRefId']
|
|
||||||
|
|
||||||
title = vdata['title']
|
|
||||||
author = vdata.get('author')
|
|
||||||
if author:
|
|
||||||
uploader = '%s %s' % (author['firstName'], author['lastName'])
|
|
||||||
uploader_id = author.get('id')
|
|
||||||
else:
|
|
||||||
uploader = None
|
|
||||||
uploader_id = None
|
|
||||||
|
|
||||||
info = self._extract_video_info(video_id, site, self.MPX_ACCOUNTS[site])
|
|
||||||
info.update({
|
|
||||||
'id': video_id,
|
|
||||||
'display_id': display_id,
|
|
||||||
'title': title,
|
|
||||||
'duration': int_or_none(vdata.get('duration')),
|
|
||||||
'uploader': uploader,
|
|
||||||
'uploader_id': uploader_id,
|
|
||||||
})
|
|
||||||
return info
|
|
|
@ -8,6 +8,7 @@
|
||||||
|
|
||||||
# class CBSSportsEmbedIE(CBSBaseIE):
|
# class CBSSportsEmbedIE(CBSBaseIE):
|
||||||
class CBSSportsEmbedIE(InfoExtractor):
|
class CBSSportsEmbedIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
IE_NAME = 'cbssports:embed'
|
IE_NAME = 'cbssports:embed'
|
||||||
_VALID_URL = r'''(?ix)https?://(?:(?:www\.)?cbs|embed\.247)sports\.com/player/embed.+?
|
_VALID_URL = r'''(?ix)https?://(?:(?:www\.)?cbs|embed\.247)sports\.com/player/embed.+?
|
||||||
(?:
|
(?:
|
||||||
|
@ -75,6 +76,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
|
|
||||||
class CBSSportsIE(CBSSportsBaseIE):
|
class CBSSportsIE(CBSSportsBaseIE):
|
||||||
|
_WORKING = False
|
||||||
IE_NAME = 'cbssports'
|
IE_NAME = 'cbssports'
|
||||||
_VALID_URL = r'https?://(?:www\.)?cbssports\.com/[^/]+/video/(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:www\.)?cbssports\.com/[^/]+/video/(?P<id>[^/?#&]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
|
@ -92,6 +94,7 @@ class CBSSportsIE(CBSSportsBaseIE):
|
||||||
|
|
||||||
|
|
||||||
class TwentyFourSevenSportsIE(CBSSportsBaseIE):
|
class TwentyFourSevenSportsIE(CBSSportsBaseIE):
|
||||||
|
_WORKING = False
|
||||||
IE_NAME = '247sports'
|
IE_NAME = '247sports'
|
||||||
_VALID_URL = r'https?://(?:www\.)?247sports\.com/Video/(?:[^/?#&]+-)?(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?247sports\.com/Video/(?:[^/?#&]+-)?(?P<id>\d+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
|
|
|
@ -88,6 +88,20 @@ class CCTVIE(InfoExtractor):
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
# videoCenterId: "id"
|
||||||
|
'url': 'http://news.cctv.com/2024/02/21/ARTIcU5tKIOIF2myEGCATkLo240221.shtml',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '5c846c0518444308ba32c4159df3b3e0',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '《平“语”近人——习近平喜欢的典故》第三季 第5集:风物长宜放眼量',
|
||||||
|
'uploader': 'yangjuan',
|
||||||
|
'timestamp': 1708554940,
|
||||||
|
'upload_date': '20240221',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
# var ids = ["id"]
|
# var ids = ["id"]
|
||||||
'url': 'http://www.ncpa-classic.com/clt/more/416/index.shtml',
|
'url': 'http://www.ncpa-classic.com/clt/more/416/index.shtml',
|
||||||
|
@ -128,7 +142,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
video_id = self._search_regex(
|
video_id = self._search_regex(
|
||||||
[r'var\s+guid\s*=\s*["\']([\da-fA-F]+)',
|
[r'var\s+guid\s*=\s*["\']([\da-fA-F]+)',
|
||||||
r'videoCenterId["\']\s*,\s*["\']([\da-fA-F]+)',
|
r'videoCenterId(?:["\']\s*,|:)\s*["\']([\da-fA-F]+)',
|
||||||
r'changePlayer\s*\(\s*["\']([\da-fA-F]+)',
|
r'changePlayer\s*\(\s*["\']([\da-fA-F]+)',
|
||||||
r'load[Vv]ideo\s*\(\s*["\']([\da-fA-F]+)',
|
r'load[Vv]ideo\s*\(\s*["\']([\da-fA-F]+)',
|
||||||
r'var\s+initMyAray\s*=\s*["\']([\da-fA-F]+)',
|
r'var\s+initMyAray\s*=\s*["\']([\da-fA-F]+)',
|
||||||
|
|
|
@ -51,7 +51,7 @@ class CeskaTelevizeIE(InfoExtractor):
|
||||||
'url': 'http://www.ceskatelevize.cz/ivysilani/zive/ct4/',
|
'url': 'http://www.ceskatelevize.cz/ivysilani/zive/ct4/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 402,
|
'id': '402',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': r're:^ČT Sport \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
|
'title': r're:^ČT Sport \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
|
||||||
'is_live': True,
|
'is_live': True,
|
||||||
|
|
|
@ -17,6 +17,7 @@ class CGTNIE(InfoExtractor):
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
'timestamp': 1615295940,
|
'timestamp': 1615295940,
|
||||||
'upload_date': '20210309',
|
'upload_date': '20210309',
|
||||||
|
'categories': ['Video'],
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True
|
'skip_download': True
|
||||||
|
@ -29,8 +30,8 @@ class CGTNIE(InfoExtractor):
|
||||||
'title': 'China, Indonesia vow to further deepen maritime cooperation',
|
'title': 'China, Indonesia vow to further deepen maritime cooperation',
|
||||||
'thumbnail': r're:^https?://.*\.png$',
|
'thumbnail': r're:^https?://.*\.png$',
|
||||||
'description': 'China and Indonesia vowed to upgrade their cooperation into the maritime sector and also for political security, economy, and cultural and people-to-people exchanges.',
|
'description': 'China and Indonesia vowed to upgrade their cooperation into the maritime sector and also for political security, economy, and cultural and people-to-people exchanges.',
|
||||||
'author': 'CGTN',
|
'creators': ['CGTN'],
|
||||||
'category': 'China',
|
'categories': ['China'],
|
||||||
'timestamp': 1622950200,
|
'timestamp': 1622950200,
|
||||||
'upload_date': '20210606',
|
'upload_date': '20210606',
|
||||||
},
|
},
|
||||||
|
@ -45,7 +46,12 @@ def _real_extract(self, url):
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
download_url = self._html_search_regex(r'data-video ="(?P<url>.+m3u8)"', webpage, 'download_url')
|
download_url = self._html_search_regex(r'data-video ="(?P<url>.+m3u8)"', webpage, 'download_url')
|
||||||
datetime_str = self._html_search_regex(r'<span class="date">\s*(.+?)\s*</span>', webpage, 'datetime_str', fatal=False)
|
datetime_str = self._html_search_regex(
|
||||||
|
r'<span class="date">\s*(.+?)\s*</span>', webpage, 'datetime_str', fatal=False)
|
||||||
|
category = self._html_search_regex(
|
||||||
|
r'<span class="section">\s*(.+?)\s*</span>', webpage, 'category', fatal=False)
|
||||||
|
author = self._search_regex(
|
||||||
|
r'<div class="news-author-name">\s*(.+?)\s*</div>', webpage, 'author', default=None)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
@ -53,9 +59,7 @@ def _real_extract(self, url):
|
||||||
'description': self._og_search_description(webpage, default=None),
|
'description': self._og_search_description(webpage, default=None),
|
||||||
'thumbnail': self._og_search_thumbnail(webpage),
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
'formats': self._extract_m3u8_formats(download_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls'),
|
'formats': self._extract_m3u8_formats(download_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls'),
|
||||||
'category': self._html_search_regex(r'<span class="section">\s*(.+?)\s*</span>',
|
'categories': [category] if category else None,
|
||||||
webpage, 'category', fatal=False),
|
'creators': [author] if author else None,
|
||||||
'author': self._html_search_regex(r'<div class="news-author-name">\s*(.+?)\s*</div>',
|
|
||||||
webpage, 'author', default=None, fatal=False),
|
|
||||||
'timestamp': try_get(unified_timestamp(datetime_str), lambda x: x - 8 * 3600),
|
'timestamp': try_get(unified_timestamp(datetime_str), lambda x: x - 8 * 3600),
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,207 +0,0 @@
|
||||||
import itertools
|
|
||||||
import json
|
|
||||||
import urllib.parse
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import (
|
|
||||||
ExtractorError,
|
|
||||||
clean_html,
|
|
||||||
int_or_none,
|
|
||||||
str_to_int,
|
|
||||||
url_or_none,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ChingariBaseIE(InfoExtractor):
|
|
||||||
def _get_post(self, id, post_data):
|
|
||||||
media_data = post_data['mediaLocation']
|
|
||||||
base_url = media_data['base']
|
|
||||||
author_data = post_data.get('authorData', {})
|
|
||||||
song_data = post_data.get('song', {}) # revist this in future for differentiating b/w 'art' and 'author'
|
|
||||||
|
|
||||||
formats = [{
|
|
||||||
'format_id': frmt,
|
|
||||||
'width': str_to_int(frmt[1:]),
|
|
||||||
'url': base_url + frmt_path,
|
|
||||||
} for frmt, frmt_path in media_data.get('transcoded', {}).items()]
|
|
||||||
|
|
||||||
if media_data.get('path'):
|
|
||||||
formats.append({
|
|
||||||
'format_id': 'original',
|
|
||||||
'format_note': 'Direct video.',
|
|
||||||
'url': base_url + '/apipublic' + media_data['path'],
|
|
||||||
'quality': 10,
|
|
||||||
})
|
|
||||||
timestamp = str_to_int(post_data.get('created_at'))
|
|
||||||
if timestamp:
|
|
||||||
timestamp = int_or_none(timestamp, 1000)
|
|
||||||
|
|
||||||
thumbnail, uploader_url = None, None
|
|
||||||
if media_data.get('thumbnail'):
|
|
||||||
thumbnail = base_url + media_data.get('thumbnail')
|
|
||||||
if author_data.get('username'):
|
|
||||||
uploader_url = 'https://chingari.io/' + author_data.get('username')
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': id,
|
|
||||||
'extractor_key': ChingariIE.ie_key(),
|
|
||||||
'extractor': 'Chingari',
|
|
||||||
'title': urllib.parse.unquote_plus(clean_html(post_data.get('caption'))),
|
|
||||||
'description': urllib.parse.unquote_plus(clean_html(post_data.get('caption'))),
|
|
||||||
'duration': media_data.get('duration'),
|
|
||||||
'thumbnail': url_or_none(thumbnail),
|
|
||||||
'like_count': post_data.get('likeCount'),
|
|
||||||
'view_count': post_data.get('viewsCount'),
|
|
||||||
'comment_count': post_data.get('commentCount'),
|
|
||||||
'repost_count': post_data.get('shareCount'),
|
|
||||||
'timestamp': timestamp,
|
|
||||||
'uploader_id': post_data.get('userId') or author_data.get('_id'),
|
|
||||||
'uploader': author_data.get('name'),
|
|
||||||
'uploader_url': url_or_none(uploader_url),
|
|
||||||
'track': song_data.get('title'),
|
|
||||||
'artist': song_data.get('author'),
|
|
||||||
'formats': formats,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class ChingariIE(ChingariBaseIE):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?chingari\.io/share/post\?id=(?P<id>[^&/#?]+)'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'https://chingari.io/share/post?id=612f8f4ce1dc57090e8a7beb',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '612f8f4ce1dc57090e8a7beb',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Happy birthday Srila Prabhupada',
|
|
||||||
'description': 'md5:c7080ebfdfeb06016e638c286d6bc3fa',
|
|
||||||
'duration': 0,
|
|
||||||
'thumbnail': 'https://media.chingari.io/uploads/c41d30e2-06b6-4e3b-9b4b-edbb929cec06-1630506826911/thumbnail/198f993f-ce87-4623-82c6-cd071bd6d4f4-1630506828016.jpg',
|
|
||||||
'like_count': int,
|
|
||||||
'view_count': int,
|
|
||||||
'comment_count': int,
|
|
||||||
'repost_count': int,
|
|
||||||
'timestamp': 1630506828,
|
|
||||||
'upload_date': '20210901',
|
|
||||||
'uploader_id': '5f0403982c8bd344f4813f8c',
|
|
||||||
'uploader': 'ISKCON,Inc.',
|
|
||||||
'uploader_url': 'https://chingari.io/iskcon,inc',
|
|
||||||
'track': None,
|
|
||||||
'artist': None,
|
|
||||||
},
|
|
||||||
'params': {'skip_download': True}
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
id = self._match_id(url)
|
|
||||||
post_json = self._download_json(f'https://api.chingari.io/post/post_details/{id}', id)
|
|
||||||
if post_json['code'] != 200:
|
|
||||||
raise ExtractorError(post_json['message'], expected=True)
|
|
||||||
post_data = post_json['data']
|
|
||||||
return self._get_post(id, post_data)
|
|
||||||
|
|
||||||
|
|
||||||
class ChingariUserIE(ChingariBaseIE):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?chingari\.io/(?!share/post)(?P<id>[^/?]+)'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'https://chingari.io/dada1023',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'dada1023',
|
|
||||||
},
|
|
||||||
'params': {'playlistend': 3},
|
|
||||||
'playlist': [{
|
|
||||||
'url': 'https://chingari.io/share/post?id=614781f3ade60b3a0bfff42a',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '614781f3ade60b3a0bfff42a',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': '#chingaribappa ',
|
|
||||||
'description': 'md5:d1df21d84088770468fa63afe3b17857',
|
|
||||||
'duration': 7,
|
|
||||||
'thumbnail': 'https://media.chingari.io/uploads/346d86d4-abb2-474e-a164-ffccf2bbcb72-1632076273717/thumbnail/b0b3aac2-2b86-4dd1-909d-9ed6e57cf77c-1632076275552.jpg',
|
|
||||||
'like_count': int,
|
|
||||||
'view_count': int,
|
|
||||||
'comment_count': int,
|
|
||||||
'repost_count': int,
|
|
||||||
'timestamp': 1632076275,
|
|
||||||
'upload_date': '20210919',
|
|
||||||
'uploader_id': '5efc4b12cca35c3d1794c2d3',
|
|
||||||
'uploader': 'dada (girish) dhawale',
|
|
||||||
'uploader_url': 'https://chingari.io/dada1023',
|
|
||||||
'track': None,
|
|
||||||
'artist': None
|
|
||||||
},
|
|
||||||
'params': {'skip_download': True}
|
|
||||||
}, {
|
|
||||||
'url': 'https://chingari.io/share/post?id=6146b132bcbf860959e12cba',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '6146b132bcbf860959e12cba',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Tactor harvesting',
|
|
||||||
'description': 'md5:8403f12dce68828b77ecee7eb7e887b7',
|
|
||||||
'duration': 59.3,
|
|
||||||
'thumbnail': 'https://media.chingari.io/uploads/b353ca70-7a87-400d-93a6-fa561afaec86-1632022814584/thumbnail/c09302e3-2043-41b1-a2fe-77d97e5bd676-1632022834260.jpg',
|
|
||||||
'like_count': int,
|
|
||||||
'view_count': int,
|
|
||||||
'comment_count': int,
|
|
||||||
'repost_count': int,
|
|
||||||
'timestamp': 1632022834,
|
|
||||||
'upload_date': '20210919',
|
|
||||||
'uploader_id': '5efc4b12cca35c3d1794c2d3',
|
|
||||||
'uploader': 'dada (girish) dhawale',
|
|
||||||
'uploader_url': 'https://chingari.io/dada1023',
|
|
||||||
'track': None,
|
|
||||||
'artist': None
|
|
||||||
},
|
|
||||||
'params': {'skip_download': True}
|
|
||||||
}, {
|
|
||||||
'url': 'https://chingari.io/share/post?id=6145651b74cb030a64c40b82',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '6145651b74cb030a64c40b82',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': '#odiabhajan ',
|
|
||||||
'description': 'md5:687ea36835b9276cf2af90f25e7654cb',
|
|
||||||
'duration': 56.67,
|
|
||||||
'thumbnail': 'https://media.chingari.io/uploads/6cbf216b-babc-4cce-87fe-ceaac8d706ac-1631937782708/thumbnail/8855754f-6669-48ce-b269-8cc0699ed6da-1631937819522.jpg',
|
|
||||||
'like_count': int,
|
|
||||||
'view_count': int,
|
|
||||||
'comment_count': int,
|
|
||||||
'repost_count': int,
|
|
||||||
'timestamp': 1631937819,
|
|
||||||
'upload_date': '20210918',
|
|
||||||
'uploader_id': '5efc4b12cca35c3d1794c2d3',
|
|
||||||
'uploader': 'dada (girish) dhawale',
|
|
||||||
'uploader_url': 'https://chingari.io/dada1023',
|
|
||||||
'track': None,
|
|
||||||
'artist': None
|
|
||||||
},
|
|
||||||
'params': {'skip_download': True}
|
|
||||||
}],
|
|
||||||
}, {
|
|
||||||
'url': 'https://chingari.io/iskcon%2Cinc',
|
|
||||||
'playlist_mincount': 1025,
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'iskcon%2Cinc',
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _entries(self, id):
|
|
||||||
skip = 0
|
|
||||||
has_more = True
|
|
||||||
for page in itertools.count():
|
|
||||||
posts = self._download_json('https://api.chingari.io/users/getPosts', id,
|
|
||||||
data=json.dumps({'userId': id, 'ownerId': id, 'skip': skip, 'limit': 20}).encode(),
|
|
||||||
headers={'content-type': 'application/json;charset=UTF-8'},
|
|
||||||
note='Downloading page %s' % page)
|
|
||||||
for post in posts.get('data', []):
|
|
||||||
post_data = post['post']
|
|
||||||
yield self._get_post(post_data['_id'], post_data)
|
|
||||||
skip += 20
|
|
||||||
has_more = posts['hasMoreData']
|
|
||||||
if not has_more:
|
|
||||||
break
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
alt_id = self._match_id(url)
|
|
||||||
post_json = self._download_json(f'https://api.chingari.io/user/{alt_id}', alt_id)
|
|
||||||
if post_json['code'] != 200:
|
|
||||||
raise ExtractorError(post_json['message'], expected=True)
|
|
||||||
id = post_json['data']['_id']
|
|
||||||
return self.playlist_result(self._entries(id), playlist_id=alt_id)
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
UserNotLive,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
@ -40,7 +40,7 @@ def _real_extract(self, url):
|
||||||
note='Downloading channel info', errnote='Unable to download channel info')['content']
|
note='Downloading channel info', errnote='Unable to download channel info')['content']
|
||||||
|
|
||||||
if live_detail.get('status') == 'CLOSE':
|
if live_detail.get('status') == 'CLOSE':
|
||||||
raise ExtractorError('The channel is not currently live', expected=True)
|
raise UserNotLive(video_id=channel_id)
|
||||||
|
|
||||||
live_playback = self._parse_json(live_detail['livePlaybackJson'], channel_id)
|
live_playback = self._parse_json(live_detail['livePlaybackJson'], channel_id)
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
|
|
||||||
class CinemaxIE(HBOBaseIE):
|
class CinemaxIE(HBOBaseIE):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://(?:www\.)?cinemax\.com/(?P<path>[^/]+/video/[0-9a-z-]+-(?P<id>\d+))'
|
_VALID_URL = r'https?://(?:www\.)?cinemax\.com/(?P<path>[^/]+/video/[0-9a-z-]+-(?P<id>\d+))'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.cinemax.com/warrior/video/s1-ep-1-recap-20126903',
|
'url': 'https://www.cinemax.com/warrior/video/s1-ep-1-recap-20126903',
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
|
|
||||||
|
|
||||||
class CineverseBaseIE(InfoExtractor):
|
class CineverseBaseIE(InfoExtractor):
|
||||||
_VALID_URL_BASE = r'https://www\.(?P<host>%s)' % '|'.join(map(re.escape, (
|
_VALID_URL_BASE = r'https?://www\.(?P<host>%s)' % '|'.join(map(re.escape, (
|
||||||
'cineverse.com',
|
'cineverse.com',
|
||||||
'asiancrush.com',
|
'asiancrush.com',
|
||||||
'dovechannel.com',
|
'dovechannel.com',
|
||||||
|
|
|
@ -1,76 +0,0 @@
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import (
|
|
||||||
int_or_none,
|
|
||||||
url_or_none,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class CliphunterIE(InfoExtractor):
|
|
||||||
IE_NAME = 'cliphunter'
|
|
||||||
|
|
||||||
_VALID_URL = r'''(?x)https?://(?:www\.)?cliphunter\.com/w/
|
|
||||||
(?P<id>[0-9]+)/
|
|
||||||
(?P<seo>.+?)(?:$|[#\?])
|
|
||||||
'''
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://www.cliphunter.com/w/1012420/Fun_Jynx_Maze_solo',
|
|
||||||
'md5': 'b7c9bbd4eb3a226ab91093714dcaa480',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '1012420',
|
|
||||||
'ext': 'flv',
|
|
||||||
'title': 'Fun Jynx Maze solo',
|
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
|
||||||
'age_limit': 18,
|
|
||||||
},
|
|
||||||
'skip': 'Video gone',
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.cliphunter.com/w/2019449/ShesNew__My_booty_girlfriend_Victoria_Paradices_pussy_filled_with_jizz',
|
|
||||||
'md5': '55a723c67bfc6da6b0cfa00d55da8a27',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '2019449',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'ShesNew - My booty girlfriend, Victoria Paradice\'s pussy filled with jizz',
|
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
|
||||||
'age_limit': 18,
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
|
|
||||||
video_title = self._search_regex(
|
|
||||||
r'mediaTitle = "([^"]+)"', webpage, 'title')
|
|
||||||
|
|
||||||
gexo_files = self._parse_json(
|
|
||||||
self._search_regex(
|
|
||||||
r'var\s+gexoFiles\s*=\s*({.+?});', webpage, 'gexo files'),
|
|
||||||
video_id)
|
|
||||||
|
|
||||||
formats = []
|
|
||||||
for format_id, f in gexo_files.items():
|
|
||||||
video_url = url_or_none(f.get('url'))
|
|
||||||
if not video_url:
|
|
||||||
continue
|
|
||||||
fmt = f.get('fmt')
|
|
||||||
height = f.get('h')
|
|
||||||
format_id = '%s_%sp' % (fmt, height) if fmt and height else format_id
|
|
||||||
formats.append({
|
|
||||||
'url': video_url,
|
|
||||||
'format_id': format_id,
|
|
||||||
'width': int_or_none(f.get('w')),
|
|
||||||
'height': int_or_none(height),
|
|
||||||
'tbr': int_or_none(f.get('br')),
|
|
||||||
})
|
|
||||||
|
|
||||||
thumbnail = self._search_regex(
|
|
||||||
r"var\s+mov_thumb\s*=\s*'([^']+)';",
|
|
||||||
webpage, 'thumbnail', fatal=False)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'title': video_title,
|
|
||||||
'formats': formats,
|
|
||||||
'age_limit': self._rta_search(webpage),
|
|
||||||
'thumbnail': thumbnail,
|
|
||||||
}
|
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
|
|
||||||
class ClipRsIE(OnetBaseIE):
|
class ClipRsIE(OnetBaseIE):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://(?:www\.)?clip\.rs/(?P<id>[^/]+)/\d+'
|
_VALID_URL = r'https?://(?:www\.)?clip\.rs/(?P<id>[^/]+)/\d+'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.clip.rs/premijera-frajle-predstavljaju-novi-spot-za-pesmu-moli-me-moli/3732',
|
'url': 'http://www.clip.rs/premijera-frajle-predstavljaju-novi-spot-za-pesmu-moli-me-moli/3732',
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
|
|
||||||
|
|
||||||
class CloserToTruthIE(InfoExtractor):
|
class CloserToTruthIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://(?:www\.)?closertotruth\.com/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:www\.)?closertotruth\.com/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://closertotruth.com/series/solutions-the-mind-body-problem#video-3688',
|
'url': 'http://closertotruth.com/series/solutions-the-mind-body-problem#video-3688',
|
||||||
|
|
|
@ -21,7 +21,7 @@ class CNBCVideoIE(InfoExtractor):
|
||||||
'modified_date': '20231208',
|
'modified_date': '20231208',
|
||||||
'release_date': '20231207',
|
'release_date': '20231207',
|
||||||
'duration': 65,
|
'duration': 65,
|
||||||
'author': 'Sean Conlon',
|
'creators': ['Sean Conlon'],
|
||||||
'title': 'Here\'s a first look at McDonald\'s new spinoff brand, CosMc\'s',
|
'title': 'Here\'s a first look at McDonald\'s new spinoff brand, CosMc\'s',
|
||||||
'thumbnail': 'https://image.cnbcfm.com/api/v1/image/107344192-1701894812493-CosMcsskyHero_2336x1040_hero-desktop.jpg?v=1701894855',
|
'thumbnail': 'https://image.cnbcfm.com/api/v1/image/107344192-1701894812493-CosMcsskyHero_2336x1040_hero-desktop.jpg?v=1701894855',
|
||||||
},
|
},
|
||||||
|
@ -29,7 +29,7 @@ class CNBCVideoIE(InfoExtractor):
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.cnbc.com/video/2023/12/08/jim-cramer-shares-his-take-on-seattles-tech-scene.html',
|
'url': 'https://www.cnbc.com/video/2023/12/08/jim-cramer-shares-his-take-on-seattles-tech-scene.html',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'author': 'Jim Cramer',
|
'creators': ['Jim Cramer'],
|
||||||
'channel': 'Mad Money with Jim Cramer',
|
'channel': 'Mad Money with Jim Cramer',
|
||||||
'description': 'md5:72925be21b952e95eba51178dddf4e3e',
|
'description': 'md5:72925be21b952e95eba51178dddf4e3e',
|
||||||
'duration': 299.0,
|
'duration': 299.0,
|
||||||
|
@ -49,7 +49,7 @@ class CNBCVideoIE(InfoExtractor):
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.cnbc.com/video/2023/12/08/the-epicenter-of-ai-is-in-seattle-says-jim-cramer.html',
|
'url': 'https://www.cnbc.com/video/2023/12/08/the-epicenter-of-ai-is-in-seattle-says-jim-cramer.html',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'author': 'Jim Cramer',
|
'creators': ['Jim Cramer'],
|
||||||
'channel': 'Mad Money with Jim Cramer',
|
'channel': 'Mad Money with Jim Cramer',
|
||||||
'description': 'md5:72925be21b952e95eba51178dddf4e3e',
|
'description': 'md5:72925be21b952e95eba51178dddf4e3e',
|
||||||
'duration': 113.0,
|
'duration': 113.0,
|
||||||
|
@ -86,12 +86,12 @@ def _real_extract(self, url):
|
||||||
'id': ('id', {str_or_none}),
|
'id': ('id', {str_or_none}),
|
||||||
'title': ('title', {str}),
|
'title': ('title', {str}),
|
||||||
'description': ('description', {str}),
|
'description': ('description', {str}),
|
||||||
'author': ('author', ..., 'name', {str}),
|
'creators': ('author', ..., 'name', {str}),
|
||||||
'timestamp': ('datePublished', {parse_iso8601}),
|
'timestamp': ('datePublished', {parse_iso8601}),
|
||||||
'release_timestamp': ('uploadDate', {parse_iso8601}),
|
'release_timestamp': ('uploadDate', {parse_iso8601}),
|
||||||
'modified_timestamp': ('dateLastPublished', {parse_iso8601}),
|
'modified_timestamp': ('dateLastPublished', {parse_iso8601}),
|
||||||
'thumbnail': ('thumbnail', {url_or_none}),
|
'thumbnail': ('thumbnail', {url_or_none}),
|
||||||
'duration': ('duration', {int_or_none}),
|
'duration': ('duration', {int_or_none}),
|
||||||
'channel': ('section', 'title', {str}),
|
'channel': ('section', 'title', {str}),
|
||||||
}, get_all=False),
|
}),
|
||||||
}
|
}
|
||||||
|
|
|
@ -262,7 +262,7 @@ class InfoExtractor:
|
||||||
|
|
||||||
direct: True if a direct video file was given (must only be set by GenericIE)
|
direct: True if a direct video file was given (must only be set by GenericIE)
|
||||||
alt_title: A secondary title of the video.
|
alt_title: A secondary title of the video.
|
||||||
display_id An alternative identifier for the video, not necessarily
|
display_id: An alternative identifier for the video, not necessarily
|
||||||
unique, but available before title. Typically, id is
|
unique, but available before title. Typically, id is
|
||||||
something like "4234987", title "Dancing naked mole rats",
|
something like "4234987", title "Dancing naked mole rats",
|
||||||
and display_id "dancing-naked-mole-rats"
|
and display_id "dancing-naked-mole-rats"
|
||||||
|
@ -747,7 +747,7 @@ def extract(self, url):
|
||||||
raise
|
raise
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
e.video_id = e.video_id or self.get_temp_id(url)
|
e.video_id = e.video_id or self.get_temp_id(url)
|
||||||
e.ie = e.ie or self.IE_NAME,
|
e.ie = e.ie or self.IE_NAME
|
||||||
e.traceback = e.traceback or sys.exc_info()[2]
|
e.traceback = e.traceback or sys.exc_info()[2]
|
||||||
raise
|
raise
|
||||||
except IncompleteRead as e:
|
except IncompleteRead as e:
|
||||||
|
@ -1339,7 +1339,10 @@ def _get_netrc_login_info(self, netrc_machine=None):
|
||||||
else:
|
else:
|
||||||
return None, None
|
return None, None
|
||||||
if not info:
|
if not info:
|
||||||
raise netrc.NetrcParseError(f'No authenticators for {netrc_machine}')
|
self.to_screen(f'No authenticators for {netrc_machine}')
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
self.write_debug(f'Using netrc for {netrc_machine} authentication')
|
||||||
return info[0], info[2]
|
return info[0], info[2]
|
||||||
|
|
||||||
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
|
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
|
||||||
|
|
|
@ -65,7 +65,7 @@ def is_live(v_type):
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': str_or_none(content['details'].get('description_%s_t' % (url_lang, ))),
|
'description': str_or_none(content['details'].get('description_%s_t' % (url_lang, ))),
|
||||||
'timestamp': unified_timestamp(content['details'].get('liveDateTime')),
|
'timestamp': unified_timestamp(content['details'].get('liveDateTime')),
|
||||||
'category': [category] if category else None,
|
'categories': [category] if category else None,
|
||||||
'thumbnail': urljoin(url, str_or_none(content['details'].get('image_%s_s' % (url_lang, )))),
|
'thumbnail': urljoin(url, str_or_none(content['details'].get('image_%s_s' % (url_lang, )))),
|
||||||
'is_live': is_live(content['details'].get('type')),
|
'is_live': is_live(content['details'].get('type')),
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,13 @@
|
||||||
|
import json
|
||||||
|
|
||||||
from .brightcove import BrightcoveNewIE
|
from .brightcove import BrightcoveNewIE
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
dict_get,
|
extract_attributes,
|
||||||
get_element_by_id,
|
get_element_html_by_class,
|
||||||
js_to_json,
|
get_element_text_and_html_by_tag,
|
||||||
traverse_obj,
|
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class CraftsyIE(InfoExtractor):
|
class CraftsyIE(InfoExtractor):
|
||||||
|
@ -41,28 +42,34 @@ def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
video_data = self._parse_json(self._search_regex(
|
video_player = get_element_html_by_class('class-video-player', webpage)
|
||||||
r'class_video_player_vars\s*=\s*({.*})\s*;',
|
video_data = traverse_obj(video_player, (
|
||||||
get_element_by_id('vidstore-classes_class-video-player-js-extra', webpage),
|
{extract_attributes}, 'wire:snapshot', {json.loads}, 'data', {dict})) or {}
|
||||||
'video data'), video_id, transform_source=js_to_json)
|
video_js = traverse_obj(video_player, (
|
||||||
|
{lambda x: get_element_text_and_html_by_tag('video-js', x)}, 1, {extract_attributes})) or {}
|
||||||
|
|
||||||
account_id = traverse_obj(video_data, ('video_player', 'bc_account_id'))
|
has_access = video_data.get('userHasAccess')
|
||||||
|
lessons = traverse_obj(video_data, ('lessons', ..., ..., lambda _, v: v['video_id']))
|
||||||
|
|
||||||
entries = []
|
preview_id = video_js.get('data-video-id')
|
||||||
class_preview = traverse_obj(video_data, ('video_player', 'class_preview'))
|
if preview_id and preview_id not in traverse_obj(lessons, (..., 'video_id')):
|
||||||
if class_preview:
|
if not lessons and not has_access:
|
||||||
v_id = class_preview.get('video_id')
|
self.report_warning(
|
||||||
entries.append(self.url_result(
|
'Only extracting preview. For the full class, pass cookies '
|
||||||
f'http://players.brightcove.net/{account_id}/default_default/index.html?videoId={v_id}',
|
+ f'from an account that has access. {self._login_hint()}')
|
||||||
BrightcoveNewIE, v_id, class_preview.get('title')))
|
lessons.append({'video_id': preview_id})
|
||||||
|
|
||||||
if dict_get(video_data, ('is_free', 'user_has_access')):
|
if not lessons and not has_access:
|
||||||
entries += [
|
self.raise_login_required('You do not have access to this class')
|
||||||
self.url_result(
|
|
||||||
|
account_id = video_data.get('accountId') or video_js['data-account']
|
||||||
|
|
||||||
|
def entries(lessons):
|
||||||
|
for lesson in lessons:
|
||||||
|
yield self.url_result(
|
||||||
f'http://players.brightcove.net/{account_id}/default_default/index.html?videoId={lesson["video_id"]}',
|
f'http://players.brightcove.net/{account_id}/default_default/index.html?videoId={lesson["video_id"]}',
|
||||||
BrightcoveNewIE, lesson['video_id'], lesson.get('title'))
|
BrightcoveNewIE, lesson['video_id'], lesson.get('title'))
|
||||||
for lesson in video_data['lessons']]
|
|
||||||
|
|
||||||
return self.playlist_result(
|
return self.playlist_result(
|
||||||
entries, video_id, video_data.get('class_title'),
|
entries(lessons), video_id, self._html_search_meta(('og:title', 'twitter:title'), webpage),
|
||||||
self._html_search_meta(('og:description', 'description'), webpage, default=None))
|
self._html_search_meta(('og:description', 'description'), webpage, default=None))
|
||||||
|
|
|
@ -1,18 +1,32 @@
|
||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import remove_end
|
from ..utils import make_archive_id, remove_end
|
||||||
|
|
||||||
|
|
||||||
class CrtvgIE(InfoExtractor):
|
class CrtvgIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?crtvg\.es/tvg/a-carta/[^/#?]+-(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?crtvg\.es/tvg/a-carta/(?P<id>[^/#?]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.crtvg.es/tvg/a-carta/os-caimans-do-tea-5839623',
|
'url': 'https://www.crtvg.es/tvg/a-carta/os-caimans-do-tea-5839623',
|
||||||
'md5': 'c0958d9ff90e4503a75544358758921d',
|
'md5': 'c0958d9ff90e4503a75544358758921d',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '5839623',
|
'id': 'os-caimans-do-tea-5839623',
|
||||||
'title': 'Os caimáns do Tea',
|
'title': 'Os caimáns do Tea',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'description': 'md5:f71cfba21ae564f0a6f415b31de1f842',
|
'description': 'md5:f71cfba21ae564f0a6f415b31de1f842',
|
||||||
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
|
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
|
||||||
|
'_old_archive_ids': ['crtvg 5839623'],
|
||||||
|
},
|
||||||
|
'params': {'skip_download': 'm3u8'}
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.crtvg.es/tvg/a-carta/a-parabolica-love-story',
|
||||||
|
'md5': '9a47b95a1749db7b7eb3214904624584',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'a-parabolica-love-story',
|
||||||
|
'title': 'A parabólica / Trabuco, o can mordedor / Love Story',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'description': 'md5:f71cfba21ae564f0a6f415b31de1f842',
|
||||||
|
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
|
||||||
},
|
},
|
||||||
'params': {'skip_download': 'm3u8'}
|
'params': {'skip_download': 'm3u8'}
|
||||||
}]
|
}]
|
||||||
|
@ -24,8 +38,13 @@ def _real_extract(self, url):
|
||||||
formats = self._extract_m3u8_formats(video_url + '/playlist.m3u8', video_id, fatal=False)
|
formats = self._extract_m3u8_formats(video_url + '/playlist.m3u8', video_id, fatal=False)
|
||||||
formats.extend(self._extract_mpd_formats(video_url + '/manifest.mpd', video_id, fatal=False))
|
formats.extend(self._extract_mpd_formats(video_url + '/manifest.mpd', video_id, fatal=False))
|
||||||
|
|
||||||
|
old_video_id = None
|
||||||
|
if mobj := re.fullmatch(r'[^/#?]+-(?P<old_id>\d{7})', video_id):
|
||||||
|
old_video_id = [make_archive_id(self, mobj.group('old_id'))]
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
'_old_archive_ids': old_video_id,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'title': remove_end(self._html_search_meta(
|
'title': remove_end(self._html_search_meta(
|
||||||
['og:title', 'twitter:title'], webpage, 'title', default=None), ' | CRTVG'),
|
['og:title', 'twitter:title'], webpage, 'title', default=None), ' | CRTVG'),
|
||||||
|
|
|
@ -136,7 +136,7 @@ def _call_api(self, path, internal_id, lang, note='api', query={}):
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def _extract_formats(self, stream_response, display_id=None):
|
def _extract_formats(self, stream_response, display_id=None):
|
||||||
requested_formats = self._configuration_arg('format') or ['adaptive_hls']
|
requested_formats = self._configuration_arg('format') or ['vo_adaptive_hls']
|
||||||
available_formats = {}
|
available_formats = {}
|
||||||
for stream_type, streams in traverse_obj(
|
for stream_type, streams in traverse_obj(
|
||||||
stream_response, (('streams', ('data', 0)), {dict.items}, ...)):
|
stream_response, (('streams', ('data', 0)), {dict.items}, ...)):
|
||||||
|
@ -514,7 +514,7 @@ class CrunchyrollMusicIE(CrunchyrollBaseIE):
|
||||||
'track': 'Egaono Hana',
|
'track': 'Egaono Hana',
|
||||||
'artist': 'Goose house',
|
'artist': 'Goose house',
|
||||||
'thumbnail': r're:(?i)^https://www.crunchyroll.com/imgsrv/.*\.jpeg?$',
|
'thumbnail': r're:(?i)^https://www.crunchyroll.com/imgsrv/.*\.jpeg?$',
|
||||||
'genre': ['J-Pop'],
|
'genres': ['J-Pop'],
|
||||||
},
|
},
|
||||||
'params': {'skip_download': 'm3u8'},
|
'params': {'skip_download': 'm3u8'},
|
||||||
}, {
|
}, {
|
||||||
|
@ -527,7 +527,7 @@ class CrunchyrollMusicIE(CrunchyrollBaseIE):
|
||||||
'track': 'Crossing Field',
|
'track': 'Crossing Field',
|
||||||
'artist': 'LiSA',
|
'artist': 'LiSA',
|
||||||
'thumbnail': r're:(?i)^https://www.crunchyroll.com/imgsrv/.*\.jpeg?$',
|
'thumbnail': r're:(?i)^https://www.crunchyroll.com/imgsrv/.*\.jpeg?$',
|
||||||
'genre': ['Anime'],
|
'genres': ['Anime'],
|
||||||
},
|
},
|
||||||
'params': {'skip_download': 'm3u8'},
|
'params': {'skip_download': 'm3u8'},
|
||||||
}, {
|
}, {
|
||||||
|
@ -541,7 +541,7 @@ class CrunchyrollMusicIE(CrunchyrollBaseIE):
|
||||||
'artist': 'LiSA',
|
'artist': 'LiSA',
|
||||||
'thumbnail': r're:(?i)^https://www.crunchyroll.com/imgsrv/.*\.jpeg?$',
|
'thumbnail': r're:(?i)^https://www.crunchyroll.com/imgsrv/.*\.jpeg?$',
|
||||||
'description': 'md5:747444e7e6300907b7a43f0a0503072e',
|
'description': 'md5:747444e7e6300907b7a43f0a0503072e',
|
||||||
'genre': ['J-Pop'],
|
'genres': ['J-Pop'],
|
||||||
},
|
},
|
||||||
'params': {'skip_download': 'm3u8'},
|
'params': {'skip_download': 'm3u8'},
|
||||||
}, {
|
}, {
|
||||||
|
@ -594,7 +594,7 @@ def _transform_music_response(data):
|
||||||
'width': ('width', {int_or_none}),
|
'width': ('width', {int_or_none}),
|
||||||
'height': ('height', {int_or_none}),
|
'height': ('height', {int_or_none}),
|
||||||
}),
|
}),
|
||||||
'genre': ('genres', ..., 'displayValue'),
|
'genres': ('genres', ..., 'displayValue'),
|
||||||
'age_limit': ('maturity_ratings', -1, {parse_age_limit}),
|
'age_limit': ('maturity_ratings', -1, {parse_age_limit}),
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
|
@ -611,7 +611,7 @@ class CrunchyrollArtistIE(CrunchyrollBaseIE):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'MA179CB50D',
|
'id': 'MA179CB50D',
|
||||||
'title': 'LiSA',
|
'title': 'LiSA',
|
||||||
'genre': ['J-Pop', 'Anime', 'Rock'],
|
'genres': ['J-Pop', 'Anime', 'Rock'],
|
||||||
'description': 'md5:16d87de61a55c3f7d6c454b73285938e',
|
'description': 'md5:16d87de61a55c3f7d6c454b73285938e',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 83,
|
'playlist_mincount': 83,
|
||||||
|
@ -645,6 +645,6 @@ def _transform_artist_response(data):
|
||||||
'width': ('width', {int_or_none}),
|
'width': ('width', {int_or_none}),
|
||||||
'height': ('height', {int_or_none}),
|
'height': ('height', {int_or_none}),
|
||||||
}),
|
}),
|
||||||
'genre': ('genres', ..., 'displayValue'),
|
'genres': ('genres', ..., 'displayValue'),
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
|
|
|
@ -110,11 +110,11 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
|
|
||||||
class CybraryCourseIE(CybraryBaseIE):
|
class CybraryCourseIE(CybraryBaseIE):
|
||||||
_VALID_URL = r'https://app\.cybrary\.it/browse/course/(?P<id>[\w-]+)/?(?:$|[#?])'
|
_VALID_URL = r'https?://app\.cybrary\.it/browse/course/(?P<id>[\w-]+)/?(?:$|[#?])'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://app.cybrary.it/browse/course/az-500-microsoft-azure-security-technologies',
|
'url': 'https://app.cybrary.it/browse/course/az-500-microsoft-azure-security-technologies',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 898,
|
'id': '898',
|
||||||
'title': 'AZ-500: Microsoft Azure Security Technologies',
|
'title': 'AZ-500: Microsoft Azure Security Technologies',
|
||||||
'description': 'md5:69549d379c0fc1dec92926d4e8b6fbd4'
|
'description': 'md5:69549d379c0fc1dec92926d4e8b6fbd4'
|
||||||
},
|
},
|
||||||
|
@ -122,7 +122,7 @@ class CybraryCourseIE(CybraryBaseIE):
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://app.cybrary.it/browse/course/cybrary-orientation',
|
'url': 'https://app.cybrary.it/browse/course/cybrary-orientation',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 1245,
|
'id': '1245',
|
||||||
'title': 'Cybrary Orientation',
|
'title': 'Cybrary Orientation',
|
||||||
'description': 'md5:9e69ff66b32fe78744e0ad4babe2e88e'
|
'description': 'md5:9e69ff66b32fe78744e0ad4babe2e88e'
|
||||||
},
|
},
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import functools
|
import functools
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..networking.exceptions import HTTPError
|
from ..networking.exceptions import HTTPError
|
||||||
|
@ -44,11 +45,12 @@ def _real_initialize(self):
|
||||||
self._FAMILY_FILTER = ff == 'on' if ff else age_restricted(18, self.get_param('age_limit'))
|
self._FAMILY_FILTER = ff == 'on' if ff else age_restricted(18, self.get_param('age_limit'))
|
||||||
self._set_dailymotion_cookie('ff', 'on' if self._FAMILY_FILTER else 'off')
|
self._set_dailymotion_cookie('ff', 'on' if self._FAMILY_FILTER else 'off')
|
||||||
|
|
||||||
def _call_api(self, object_type, xid, object_fields, note, filter_extra=None):
|
def _get_token(self, xid):
|
||||||
if not self._HEADERS.get('Authorization'):
|
|
||||||
cookies = self._get_dailymotion_cookies()
|
cookies = self._get_dailymotion_cookies()
|
||||||
token = self._get_cookie_value(cookies, 'access_token') or self._get_cookie_value(cookies, 'client_token')
|
token = self._get_cookie_value(cookies, 'access_token') or self._get_cookie_value(cookies, 'client_token')
|
||||||
if not token:
|
if token:
|
||||||
|
return token
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
'client_id': 'f1a362d288c1b98099c7',
|
'client_id': 'f1a362d288c1b98099c7',
|
||||||
'client_secret': 'eea605b96e01c796ff369935357eca920c5da4c5',
|
'client_secret': 'eea605b96e01c796ff369935357eca920c5da4c5',
|
||||||
|
@ -73,7 +75,11 @@ def _call_api(self, object_type, xid, object_fields, note, filter_extra=None):
|
||||||
e.cause.response.read().decode(), xid)['error_description'], expected=True)
|
e.cause.response.read().decode(), xid)['error_description'], expected=True)
|
||||||
raise
|
raise
|
||||||
self._set_dailymotion_cookie('access_token' if username else 'client_token', token)
|
self._set_dailymotion_cookie('access_token' if username else 'client_token', token)
|
||||||
self._HEADERS['Authorization'] = 'Bearer ' + token
|
return token
|
||||||
|
|
||||||
|
def _call_api(self, object_type, xid, object_fields, note, filter_extra=None):
|
||||||
|
if not self._HEADERS.get('Authorization'):
|
||||||
|
self._HEADERS['Authorization'] = f'Bearer {self._get_token(xid)}'
|
||||||
|
|
||||||
resp = self._download_json(
|
resp = self._download_json(
|
||||||
'https://graphql.api.dailymotion.com/', xid, note, data=json.dumps({
|
'https://graphql.api.dailymotion.com/', xid, note, data=json.dumps({
|
||||||
|
@ -393,9 +399,55 @@ def _extract_embed_urls(cls, url, webpage):
|
||||||
yield '//dailymotion.com/playlist/%s' % p
|
yield '//dailymotion.com/playlist/%s' % p
|
||||||
|
|
||||||
|
|
||||||
|
class DailymotionSearchIE(DailymotionPlaylistBaseIE):
|
||||||
|
IE_NAME = 'dailymotion:search'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/search/(?P<id>[^/?#]+)/videos'
|
||||||
|
_PAGE_SIZE = 20
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.dailymotion.com/search/king of turtles/videos',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'king of turtles',
|
||||||
|
'title': 'king of turtles',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 90,
|
||||||
|
}]
|
||||||
|
_SEARCH_QUERY = 'query SEARCH_QUERY( $query: String! $page: Int $limit: Int ) { search { videos( query: $query first: $limit page: $page ) { edges { node { xid } } } } } '
|
||||||
|
|
||||||
|
def _call_search_api(self, term, page, note):
|
||||||
|
if not self._HEADERS.get('Authorization'):
|
||||||
|
self._HEADERS['Authorization'] = f'Bearer {self._get_token(term)}'
|
||||||
|
resp = self._download_json(
|
||||||
|
'https://graphql.api.dailymotion.com/', None, note, data=json.dumps({
|
||||||
|
'operationName': 'SEARCH_QUERY',
|
||||||
|
'query': self._SEARCH_QUERY,
|
||||||
|
'variables': {
|
||||||
|
'limit': 20,
|
||||||
|
'page': page,
|
||||||
|
'query': term,
|
||||||
|
}
|
||||||
|
}).encode(), headers=self._HEADERS)
|
||||||
|
obj = traverse_obj(resp, ('data', 'search', {dict}))
|
||||||
|
if not obj:
|
||||||
|
raise ExtractorError(
|
||||||
|
traverse_obj(resp, ('errors', 0, 'message', {str})) or 'Could not fetch search data')
|
||||||
|
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def _fetch_page(self, term, page):
|
||||||
|
page += 1
|
||||||
|
response = self._call_search_api(term, page, f'Searching "{term}" page {page}')
|
||||||
|
for xid in traverse_obj(response, ('videos', 'edges', ..., 'node', 'xid')):
|
||||||
|
yield self.url_result(f'https://www.dailymotion.com/video/{xid}', DailymotionIE, xid)
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
term = urllib.parse.unquote_plus(self._match_id(url))
|
||||||
|
return self.playlist_result(
|
||||||
|
OnDemandPagedList(functools.partial(self._fetch_page, term), self._PAGE_SIZE), term, term)
|
||||||
|
|
||||||
|
|
||||||
class DailymotionUserIE(DailymotionPlaylistBaseIE):
|
class DailymotionUserIE(DailymotionPlaylistBaseIE):
|
||||||
IE_NAME = 'dailymotion:user'
|
IE_NAME = 'dailymotion:user'
|
||||||
_VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|swf|#|video|playlist)/)(?:(?:old/)?user/)?(?P<id>[^/]+)'
|
_VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|swf|#|video|playlist|search)/)(?:(?:old/)?user/)?(?P<id>[^/?#]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.dailymotion.com/user/nqtv',
|
'url': 'https://www.dailymotion.com/user/nqtv',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
|
|
@ -83,7 +83,6 @@ class DamtomoRecordIE(DamtomoBaseIE):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '27376862',
|
'id': '27376862',
|
||||||
'title': 'イカSUMMER [良音]',
|
'title': 'イカSUMMER [良音]',
|
||||||
'description': None,
|
|
||||||
'uploader': 'NANA',
|
'uploader': 'NANA',
|
||||||
'uploader_id': 'MzAyMDExNTY',
|
'uploader_id': 'MzAyMDExNTY',
|
||||||
'upload_date': '20210721',
|
'upload_date': '20210721',
|
||||||
|
|
|
@ -27,7 +27,7 @@ class DaumIE(DaumBaseIE):
|
||||||
'duration': 2117,
|
'duration': 2117,
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'comment_count': int,
|
'comment_count': int,
|
||||||
'uploader_id': 186139,
|
'uploader_id': '186139',
|
||||||
'uploader': '콘간지',
|
'uploader': '콘간지',
|
||||||
'timestamp': 1387310323,
|
'timestamp': 1387310323,
|
||||||
},
|
},
|
||||||
|
@ -44,7 +44,7 @@ class DaumIE(DaumBaseIE):
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'comment_count': int,
|
'comment_count': int,
|
||||||
'uploader': 'MBC 예능',
|
'uploader': 'MBC 예능',
|
||||||
'uploader_id': 132251,
|
'uploader_id': '132251',
|
||||||
'timestamp': 1421604228,
|
'timestamp': 1421604228,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
@ -63,7 +63,7 @@ class DaumIE(DaumBaseIE):
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'comment_count': int,
|
'comment_count': int,
|
||||||
'uploader': '까칠한 墮落始祖 황비홍님의',
|
'uploader': '까칠한 墮落始祖 황비홍님의',
|
||||||
'uploader_id': 560824,
|
'uploader_id': '560824',
|
||||||
'timestamp': 1203770745,
|
'timestamp': 1203770745,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
@ -77,7 +77,7 @@ class DaumIE(DaumBaseIE):
|
||||||
'description': '러블리즈 - Destiny (나의 지구) (Lovelyz - Destiny)\r\n\r\n[쇼! 음악중심] 20160611, 507회',
|
'description': '러블리즈 - Destiny (나의 지구) (Lovelyz - Destiny)\r\n\r\n[쇼! 음악중심] 20160611, 507회',
|
||||||
'upload_date': '20170129',
|
'upload_date': '20170129',
|
||||||
'uploader': '쇼! 음악중심',
|
'uploader': '쇼! 음악중심',
|
||||||
'uploader_id': 2653210,
|
'uploader_id': '2653210',
|
||||||
'timestamp': 1485684628,
|
'timestamp': 1485684628,
|
||||||
},
|
},
|
||||||
}]
|
}]
|
||||||
|
@ -107,7 +107,7 @@ class DaumClipIE(DaumBaseIE):
|
||||||
'duration': 3868,
|
'duration': 3868,
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'uploader': 'GOMeXP',
|
'uploader': 'GOMeXP',
|
||||||
'uploader_id': 6667,
|
'uploader_id': '6667',
|
||||||
'timestamp': 1377911092,
|
'timestamp': 1377911092,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
|
|
@ -1,54 +0,0 @@
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import js_to_json
|
|
||||||
|
|
||||||
|
|
||||||
class DiggIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?digg\.com/video/(?P<id>[^/?#&]+)'
|
|
||||||
_TESTS = [{
|
|
||||||
# JWPlatform via provider
|
|
||||||
'url': 'http://digg.com/video/sci-fi-short-jonah-daniel-kaluuya-get-out',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'LcqvmS0b',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': "'Get Out' Star Daniel Kaluuya Goes On 'Moby Dick'-Like Journey In Sci-Fi Short 'Jonah'",
|
|
||||||
'description': 'md5:541bb847648b6ee3d6514bc84b82efda',
|
|
||||||
'upload_date': '20180109',
|
|
||||||
'timestamp': 1515530551,
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
# Youtube via provider
|
|
||||||
'url': 'http://digg.com/video/dog-boat-seal-play',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
# vimeo as regular embed
|
|
||||||
'url': 'http://digg.com/video/dream-girl-short-film',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
display_id = self._match_id(url)
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
|
||||||
|
|
||||||
info = self._parse_json(
|
|
||||||
self._search_regex(
|
|
||||||
r'(?s)video_info\s*=\s*({.+?});\n', webpage, 'video info',
|
|
||||||
default='{}'), display_id, transform_source=js_to_json,
|
|
||||||
fatal=False)
|
|
||||||
|
|
||||||
video_id = info.get('video_id')
|
|
||||||
|
|
||||||
if video_id:
|
|
||||||
provider = info.get('provider_name')
|
|
||||||
if provider == 'youtube':
|
|
||||||
return self.url_result(
|
|
||||||
video_id, ie='Youtube', video_id=video_id)
|
|
||||||
elif provider == 'jwplayer':
|
|
||||||
return self.url_result(
|
|
||||||
'jwplatform:%s' % video_id, ie='JWPlatform',
|
|
||||||
video_id=video_id)
|
|
||||||
|
|
||||||
return self.url_result(url, 'Generic')
|
|
|
@ -9,6 +9,7 @@
|
||||||
|
|
||||||
|
|
||||||
class DTubeIE(InfoExtractor):
|
class DTubeIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://(?:www\.)?d\.tube/(?:#!/)?v/(?P<uploader_id>[0-9a-z.-]+)/(?P<id>[0-9a-z]{8})'
|
_VALID_URL = r'https?://(?:www\.)?d\.tube/(?:#!/)?v/(?P<uploader_id>[0-9a-z.-]+)/(?P<id>[0-9a-z]{8})'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'https://d.tube/#!/v/broncnutz/x380jtr1',
|
'url': 'https://d.tube/#!/v/broncnutz/x380jtr1',
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
|
import base64
|
||||||
import re
|
import re
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urlparse
|
from ..compat import compat_urlparse
|
||||||
|
@ -129,11 +131,15 @@ def _real_extract(self, url):
|
||||||
data_url = player_data.get('url')
|
data_url = player_data.get('url')
|
||||||
if not data_url:
|
if not data_url:
|
||||||
raise ExtractorError('Cannot find url in player_data')
|
raise ExtractorError('Cannot find url in player_data')
|
||||||
data_from = player_data.get('from')
|
player_encrypt = player_data.get('encrypt')
|
||||||
|
if player_encrypt == 1:
|
||||||
|
data_url = urllib.parse.unquote(data_url)
|
||||||
|
elif player_encrypt == 2:
|
||||||
|
data_url = urllib.parse.unquote(base64.b64decode(data_url).decode('ascii'))
|
||||||
|
|
||||||
# if it is an embedded iframe, maybe it's an external source
|
# if it is an embedded iframe, maybe it's an external source
|
||||||
headers = {'Referer': webpage_url}
|
headers = {'Referer': webpage_url}
|
||||||
if data_from == 'iframe':
|
if player_data.get('from') == 'iframe':
|
||||||
# use _type url_transparent to retain the meaningful details
|
# use _type url_transparent to retain the meaningful details
|
||||||
# of the video.
|
# of the video.
|
||||||
return {
|
return {
|
||||||
|
|
|
@ -8,9 +8,9 @@
|
||||||
|
|
||||||
class DumpertIE(InfoExtractor):
|
class DumpertIE(InfoExtractor):
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
(?P<protocol>https?)://(?:(?:www|legacy)\.)?dumpert\.nl(?:
|
(?P<protocol>https?)://(?:(?:www|legacy)\.)?dumpert\.nl/(?:
|
||||||
/(?:mediabase|embed|item)/|
|
(?:mediabase|embed|item)/|
|
||||||
(?:/toppers|/latest|/?)\?selectedId=
|
[^#]*[?&]selectedId=
|
||||||
)(?P<id>[0-9]+[/_][0-9a-zA-Z]+)'''
|
)(?P<id>[0-9]+[/_][0-9a-zA-Z]+)'''
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.dumpert.nl/item/6646981_951bc60f',
|
'url': 'https://www.dumpert.nl/item/6646981_951bc60f',
|
||||||
|
@ -56,6 +56,9 @@ class DumpertIE(InfoExtractor):
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.dumpert.nl/?selectedId=100031688_b317a185',
|
'url': 'https://www.dumpert.nl/?selectedId=100031688_b317a185',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.dumpert.nl/toppers/dag?selectedId=100086074_f5cef3ac',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
|
|
||||||
|
|
||||||
class DuoplayIE(InfoExtractor):
|
class DuoplayIE(InfoExtractor):
|
||||||
_VALID_URL = r'https://duoplay\.ee/(?P<id>\d+)/[\w-]+/?(?:\?(?:[^#]+&)?ep=(?P<ep>\d+))?'
|
_VALID_URL = r'https?://duoplay\.ee/(?P<id>\d+)/[\w-]+/?(?:\?(?:[^#]+&)?ep=(?P<ep>\d+))?'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'note': 'Siberi võmm S02E12',
|
'note': 'Siberi võmm S02E12',
|
||||||
'url': 'https://duoplay.ee/4312/siberi-vomm?ep=24',
|
'url': 'https://duoplay.ee/4312/siberi-vomm?ep=24',
|
||||||
|
@ -32,7 +32,7 @@ class DuoplayIE(InfoExtractor):
|
||||||
'season_number': 2,
|
'season_number': 2,
|
||||||
'episode': 'Operatsioon "Öö"',
|
'episode': 'Operatsioon "Öö"',
|
||||||
'episode_number': 12,
|
'episode_number': 12,
|
||||||
'episode_id': 24,
|
'episode_id': '24',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'note': 'Empty title',
|
'note': 'Empty title',
|
||||||
|
@ -50,7 +50,7 @@ class DuoplayIE(InfoExtractor):
|
||||||
'series_id': '17',
|
'series_id': '17',
|
||||||
'season': 'Season 2',
|
'season': 'Season 2',
|
||||||
'season_number': 2,
|
'season_number': 2,
|
||||||
'episode_id': 14,
|
'episode_id': '14',
|
||||||
'release_year': 2010,
|
'release_year': 2010,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
@ -99,6 +99,6 @@ def _real_extract(self, url):
|
||||||
'season_number': ('season_id', {int_or_none}),
|
'season_number': ('season_id', {int_or_none}),
|
||||||
'episode': 'subtitle',
|
'episode': 'subtitle',
|
||||||
'episode_number': ('episode_nr', {int_or_none}),
|
'episode_number': ('episode_nr', {int_or_none}),
|
||||||
'episode_id': ('episode_id', {int_or_none}),
|
'episode_id': ('episode_id', {str_or_none}),
|
||||||
}, get_all=False) if episode_attr.get('category') != 'movies' else {}),
|
}, get_all=False) if episode_attr.get('category') != 'movies' else {}),
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,8 @@
|
||||||
|
|
||||||
|
|
||||||
class DWIE(InfoExtractor):
|
class DWIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
|
_ENABLED = None # XXX: pass through to GenericIE
|
||||||
IE_NAME = 'dw'
|
IE_NAME = 'dw'
|
||||||
_VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+(?:av|e)-(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+(?:av|e)-(?P<id>\d+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
|
@ -82,6 +84,8 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
|
|
||||||
class DWArticleIE(InfoExtractor):
|
class DWArticleIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
|
_ENABLED = None # XXX: pass through to GenericIE
|
||||||
IE_NAME = 'dw:article'
|
IE_NAME = 'dw:article'
|
||||||
_VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+a-(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+a-(?P<id>\d+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
|
|
|
@ -19,7 +19,7 @@ def _call_api(self, path, video_id, resource, fatal=True):
|
||||||
class EggheadCourseIE(EggheadBaseIE):
|
class EggheadCourseIE(EggheadBaseIE):
|
||||||
IE_DESC = 'egghead.io course'
|
IE_DESC = 'egghead.io course'
|
||||||
IE_NAME = 'egghead:course'
|
IE_NAME = 'egghead:course'
|
||||||
_VALID_URL = r'https://(?:app\.)?egghead\.io/(?:course|playlist)s/(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:app\.)?egghead\.io/(?:course|playlist)s/(?P<id>[^/?#&]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://egghead.io/courses/professor-frisby-introduces-composable-functional-javascript',
|
'url': 'https://egghead.io/courses/professor-frisby-introduces-composable-functional-javascript',
|
||||||
'playlist_count': 29,
|
'playlist_count': 29,
|
||||||
|
@ -65,7 +65,7 @@ def _real_extract(self, url):
|
||||||
class EggheadLessonIE(EggheadBaseIE):
|
class EggheadLessonIE(EggheadBaseIE):
|
||||||
IE_DESC = 'egghead.io lesson'
|
IE_DESC = 'egghead.io lesson'
|
||||||
IE_NAME = 'egghead:lesson'
|
IE_NAME = 'egghead:lesson'
|
||||||
_VALID_URL = r'https://(?:app\.)?egghead\.io/(?:api/v1/)?lessons/(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:app\.)?egghead\.io/(?:api/v1/)?lessons/(?P<id>[^/?#&]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://egghead.io/lessons/javascript-linear-data-flow-with-container-style-types-box',
|
'url': 'https://egghead.io/lessons/javascript-linear-data-flow-with-container-style-types-box',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
|
|
@ -42,7 +42,6 @@ class EplusIbIE(InfoExtractor):
|
||||||
'live_status': 'was_live',
|
'live_status': 'was_live',
|
||||||
'release_date': '20210719',
|
'release_date': '20210719',
|
||||||
'release_timestamp': 1626703200,
|
'release_timestamp': 1626703200,
|
||||||
'description': None,
|
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
|
|
||||||
|
|
||||||
class EuropaIE(InfoExtractor):
|
class EuropaIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)'
|
_VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758',
|
'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758',
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
|
|
||||||
|
|
||||||
class FancodeVodIE(InfoExtractor):
|
class FancodeVodIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
IE_NAME = 'fancode:vod'
|
IE_NAME = 'fancode:vod'
|
||||||
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?fancode\.com/video/(?P<id>[0-9]+)\b'
|
_VALID_URL = r'https?://(?:www\.)?fancode\.com/video/(?P<id>[0-9]+)\b'
|
||||||
|
@ -126,6 +127,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
|
|
||||||
class FancodeLiveIE(FancodeVodIE): # XXX: Do not subclass from concrete IE
|
class FancodeLiveIE(FancodeVodIE): # XXX: Do not subclass from concrete IE
|
||||||
|
_WORKING = False
|
||||||
IE_NAME = 'fancode:live'
|
IE_NAME = 'fancode:live'
|
||||||
|
|
||||||
_VALID_URL = r'https?://(www\.)?fancode\.com/match/(?P<id>[0-9]+).+'
|
_VALID_URL = r'https?://(www\.)?fancode\.com/match/(?P<id>[0-9]+).+'
|
||||||
|
|
|
@ -1,69 +0,0 @@
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import int_or_none
|
|
||||||
|
|
||||||
|
|
||||||
class FilmmoduIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?filmmodu\.org/(?P<id>[^/]+-(?:turkce-dublaj-izle|altyazili-izle))'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'https://www.filmmodu.org/f9-altyazili-izle',
|
|
||||||
'md5': 'aeefd955c2a508a5bdaa3bcec8eeb0d4',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '10804',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'F9',
|
|
||||||
'description': 'md5:2713f584a4d65afa2611e2948d0b953c',
|
|
||||||
'subtitles': {
|
|
||||||
'tr': [{
|
|
||||||
'ext': 'vtt',
|
|
||||||
}],
|
|
||||||
},
|
|
||||||
'thumbnail': r're:https://s[0-9]+.filmmodu.org/uploads/movie/cover/10804/xXHZeb1yhJvnSHPzZDqee0zfMb6.jpg',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.filmmodu.org/the-godfather-turkce-dublaj-izle',
|
|
||||||
'md5': '109f2fcb9c941330eed133971c035c00',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '3646',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Baba',
|
|
||||||
'description': 'md5:d43fd651937cd75cc650883ebd8d8461',
|
|
||||||
'thumbnail': r're:https://s[0-9]+.filmmodu.org/uploads/movie/cover/3646/6xKCYgH16UuwEGAyroLU6p8HLIn.jpg',
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
title = self._og_search_title(webpage, fatal=True)
|
|
||||||
description = self._og_search_description(webpage)
|
|
||||||
thumbnail = self._og_search_thumbnail(webpage)
|
|
||||||
real_video_id = self._search_regex(r'var\s*videoId\s*=\s*\'([0-9]+)\'', webpage, 'video_id')
|
|
||||||
video_type = self._search_regex(r'var\s*videoType\s*=\s*\'([a-z]+)\'', webpage, 'video_type')
|
|
||||||
data = self._download_json('https://www.filmmodu.org/get-source', real_video_id, query={
|
|
||||||
'movie_id': real_video_id,
|
|
||||||
'type': video_type,
|
|
||||||
})
|
|
||||||
formats = [{
|
|
||||||
'url': source['src'],
|
|
||||||
'ext': 'mp4',
|
|
||||||
'format_id': source['label'],
|
|
||||||
'height': int_or_none(source.get('res')),
|
|
||||||
'protocol': 'm3u8_native',
|
|
||||||
} for source in data['sources']]
|
|
||||||
|
|
||||||
subtitles = {}
|
|
||||||
|
|
||||||
if data.get('subtitle'):
|
|
||||||
subtitles['tr'] = [{
|
|
||||||
'url': data['subtitle'],
|
|
||||||
}]
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': real_video_id,
|
|
||||||
'display_id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'description': description,
|
|
||||||
'formats': formats,
|
|
||||||
'subtitles': subtitles,
|
|
||||||
'thumbnail': thumbnail,
|
|
||||||
}
|
|
|
@ -1,60 +1,49 @@
|
||||||
|
import re
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .dailymotion import DailymotionIE
|
from .dailymotion import DailymotionIE
|
||||||
|
from ..networking import HEADRequest
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
|
||||||
determine_ext,
|
determine_ext,
|
||||||
|
filter_dict,
|
||||||
format_field,
|
format_field,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
parse_qs,
|
smuggle_url,
|
||||||
|
unsmuggle_url,
|
||||||
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class FranceTVBaseInfoExtractor(InfoExtractor):
|
class FranceTVBaseInfoExtractor(InfoExtractor):
|
||||||
def _make_url_result(self, video_or_full_id, catalog=None):
|
def _make_url_result(self, video_id, url=None):
|
||||||
full_id = 'francetv:%s' % video_or_full_id
|
video_id = video_id.split('@')[0] # for compat with old @catalog IDs
|
||||||
if '@' not in video_or_full_id and catalog:
|
full_id = f'francetv:{video_id}'
|
||||||
full_id += '@%s' % catalog
|
if url:
|
||||||
return self.url_result(
|
full_id = smuggle_url(full_id, {'hostname': urllib.parse.urlparse(url).hostname})
|
||||||
full_id, ie=FranceTVIE.ie_key(),
|
return self.url_result(full_id, FranceTVIE, video_id)
|
||||||
video_id=video_or_full_id.split('@')[0])
|
|
||||||
|
|
||||||
|
|
||||||
class FranceTVIE(InfoExtractor):
|
class FranceTVIE(InfoExtractor):
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'francetv:(?P<id>[^@#]+)'
|
||||||
(?:
|
_GEO_COUNTRIES = ['FR']
|
||||||
https?://
|
_GEO_BYPASS = False
|
||||||
sivideo\.webservices\.francetelevisions\.fr/tools/getInfosOeuvre/v2/\?
|
|
||||||
.*?\bidDiffusion=[^&]+|
|
|
||||||
(?:
|
|
||||||
https?://videos\.francetv\.fr/video/|
|
|
||||||
francetv:
|
|
||||||
)
|
|
||||||
(?P<id>[^@]+)(?:@(?P<catalog>.+))?
|
|
||||||
)
|
|
||||||
'''
|
|
||||||
_EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?://)?embed\.francetv\.fr/\?ue=.+?)\1']
|
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# without catalog
|
'url': 'francetv:ec217ecc-0733-48cf-ac06-af1347b849d1',
|
||||||
'url': 'https://sivideo.webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=162311093&callback=_jsonp_loader_callback_request_0',
|
|
||||||
'md5': 'c2248a8de38c4e65ea8fae7b5df2d84f',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '162311093',
|
'id': 'ec217ecc-0733-48cf-ac06-af1347b849d1',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': '13h15, le dimanche... - Les mystères de Jésus',
|
'title': '13h15, le dimanche... - Les mystères de Jésus',
|
||||||
'description': 'md5:75efe8d4c0a8205e5904498ffe1e1a42',
|
|
||||||
'timestamp': 1502623500,
|
'timestamp': 1502623500,
|
||||||
|
'duration': 2580,
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
'upload_date': '20170813',
|
'upload_date': '20170813',
|
||||||
},
|
},
|
||||||
}, {
|
'params': {'skip_download': 'm3u8'},
|
||||||
# with catalog
|
|
||||||
'url': 'https://sivideo.webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=NI_1004933&catalogue=Zouzous&callback=_jsonp_loader_callback_request_4',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://videos.francetv.fr/video/NI_657393@Regions',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
}, {
|
||||||
'url': 'francetv:162311093',
|
'url': 'francetv:162311093',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
@ -76,10 +65,7 @@ class FranceTVIE(InfoExtractor):
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _extract_video(self, video_id, catalogue=None):
|
def _extract_video(self, video_id, hostname=None):
|
||||||
# Videos are identified by idDiffusion so catalogue part is optional.
|
|
||||||
# However when provided, some extra formats may be returned so we pass
|
|
||||||
# it if available.
|
|
||||||
is_live = None
|
is_live = None
|
||||||
videos = []
|
videos = []
|
||||||
title = None
|
title = None
|
||||||
|
@ -91,18 +77,20 @@ def _extract_video(self, video_id, catalogue=None):
|
||||||
timestamp = None
|
timestamp = None
|
||||||
spritesheets = None
|
spritesheets = None
|
||||||
|
|
||||||
for device_type in ('desktop', 'mobile'):
|
# desktop+chrome returns dash; mobile+safari returns hls
|
||||||
|
for device_type, browser in [('desktop', 'chrome'), ('mobile', 'safari')]:
|
||||||
dinfo = self._download_json(
|
dinfo = self._download_json(
|
||||||
'https://player.webservices.francetelevisions.fr/v1/videos/%s' % video_id,
|
f'https://k7.ftven.fr/videos/{video_id}', video_id,
|
||||||
video_id, 'Downloading %s video JSON' % device_type, query={
|
f'Downloading {device_type} {browser} video JSON', query=filter_dict({
|
||||||
'device_type': device_type,
|
'device_type': device_type,
|
||||||
'browser': 'chrome',
|
'browser': browser,
|
||||||
}, fatal=False)
|
'domain': hostname,
|
||||||
|
}), fatal=False)
|
||||||
|
|
||||||
if not dinfo:
|
if not dinfo:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
video = dinfo.get('video')
|
video = traverse_obj(dinfo, ('video', {dict}))
|
||||||
if video:
|
if video:
|
||||||
videos.append(video)
|
videos.append(video)
|
||||||
if duration is None:
|
if duration is None:
|
||||||
|
@ -112,7 +100,7 @@ def _extract_video(self, video_id, catalogue=None):
|
||||||
if spritesheets is None:
|
if spritesheets is None:
|
||||||
spritesheets = video.get('spritesheets')
|
spritesheets = video.get('spritesheets')
|
||||||
|
|
||||||
meta = dinfo.get('meta')
|
meta = traverse_obj(dinfo, ('meta', {dict}))
|
||||||
if meta:
|
if meta:
|
||||||
if title is None:
|
if title is None:
|
||||||
title = meta.get('title')
|
title = meta.get('title')
|
||||||
|
@ -126,43 +114,46 @@ def _extract_video(self, video_id, catalogue=None):
|
||||||
if timestamp is None:
|
if timestamp is None:
|
||||||
timestamp = parse_iso8601(meta.get('broadcasted_at'))
|
timestamp = parse_iso8601(meta.get('broadcasted_at'))
|
||||||
|
|
||||||
formats = []
|
formats, subtitles, video_url = [], {}, None
|
||||||
subtitles = {}
|
for video in traverse_obj(videos, lambda _, v: url_or_none(v['url'])):
|
||||||
for video in videos:
|
video_url = video['url']
|
||||||
format_id = video.get('format')
|
format_id = video.get('format')
|
||||||
|
|
||||||
video_url = None
|
if token_url := url_or_none(video.get('token')):
|
||||||
if video.get('workflow') == 'token-akamai':
|
tokenized_url = traverse_obj(self._download_json(
|
||||||
token_url = video.get('token')
|
token_url, video_id, f'Downloading signed {format_id} manifest URL',
|
||||||
if token_url:
|
fatal=False, query={
|
||||||
token_json = self._download_json(
|
'format': 'json',
|
||||||
token_url, video_id,
|
'url': video_url,
|
||||||
'Downloading signed %s manifest URL' % format_id)
|
}), ('url', {url_or_none}))
|
||||||
if token_json:
|
if tokenized_url:
|
||||||
video_url = token_json.get('url')
|
video_url = tokenized_url
|
||||||
if not video_url:
|
|
||||||
video_url = video.get('url')
|
|
||||||
|
|
||||||
ext = determine_ext(video_url)
|
ext = determine_ext(video_url)
|
||||||
if ext == 'f4m':
|
if ext == 'f4m':
|
||||||
formats.extend(self._extract_f4m_formats(
|
formats.extend(self._extract_f4m_formats(
|
||||||
video_url, video_id, f4m_id=format_id, fatal=False))
|
video_url, video_id, f4m_id=format_id or ext, fatal=False))
|
||||||
elif ext == 'm3u8':
|
elif ext == 'm3u8':
|
||||||
|
format_id = format_id or 'hls'
|
||||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
||||||
video_url, video_id, 'mp4',
|
video_url, video_id, 'mp4', m3u8_id=format_id, fatal=False)
|
||||||
entry_protocol='m3u8_native', m3u8_id=format_id,
|
for f in traverse_obj(fmts, lambda _, v: v['vcodec'] == 'none' and v.get('tbr') is None):
|
||||||
fatal=False)
|
if mobj := re.match(rf'{format_id}-[Aa]udio-\w+-(?P<bitrate>\d+)', f['format_id']):
|
||||||
|
f.update({
|
||||||
|
'tbr': int_or_none(mobj.group('bitrate')),
|
||||||
|
'acodec': 'mp4a',
|
||||||
|
})
|
||||||
formats.extend(fmts)
|
formats.extend(fmts)
|
||||||
self._merge_subtitles(subs, target=subtitles)
|
self._merge_subtitles(subs, target=subtitles)
|
||||||
elif ext == 'mpd':
|
elif ext == 'mpd':
|
||||||
fmts, subs = self._extract_mpd_formats_and_subtitles(
|
fmts, subs = self._extract_mpd_formats_and_subtitles(
|
||||||
video_url, video_id, mpd_id=format_id, fatal=False)
|
video_url, video_id, mpd_id=format_id or 'dash', fatal=False)
|
||||||
formats.extend(fmts)
|
formats.extend(fmts)
|
||||||
self._merge_subtitles(subs, target=subtitles)
|
self._merge_subtitles(subs, target=subtitles)
|
||||||
elif video_url.startswith('rtmp'):
|
elif video_url.startswith('rtmp'):
|
||||||
formats.append({
|
formats.append({
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'format_id': 'rtmp-%s' % format_id,
|
'format_id': join_nonempty('rtmp', format_id),
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
})
|
})
|
||||||
else:
|
else:
|
||||||
|
@ -174,6 +165,13 @@ def _extract_video(self, video_id, catalogue=None):
|
||||||
|
|
||||||
# XXX: what is video['captions']?
|
# XXX: what is video['captions']?
|
||||||
|
|
||||||
|
if not formats and video_url:
|
||||||
|
urlh = self._request_webpage(
|
||||||
|
HEADRequest(video_url), video_id, 'Checking for geo-restriction',
|
||||||
|
fatal=False, expected_status=403)
|
||||||
|
if urlh and urlh.headers.get('x-errortype') == 'geo':
|
||||||
|
self.raise_geo_restricted(countries=self._GEO_COUNTRIES, metadata_available=True)
|
||||||
|
|
||||||
for f in formats:
|
for f in formats:
|
||||||
if f.get('acodec') != 'none' and f.get('language') in ('qtz', 'qad'):
|
if f.get('acodec') != 'none' and f.get('language') in ('qtz', 'qad'):
|
||||||
f['language_preference'] = -10
|
f['language_preference'] = -10
|
||||||
|
@ -194,7 +192,7 @@ def _extract_video(self, video_id, catalogue=None):
|
||||||
# a 10×10 grid of thumbnails corresponding to approximately
|
# a 10×10 grid of thumbnails corresponding to approximately
|
||||||
# 2 seconds of the video; the last spritesheet may be shorter
|
# 2 seconds of the video; the last spritesheet may be shorter
|
||||||
'duration': 200,
|
'duration': 200,
|
||||||
} for sheet in spritesheets]
|
} for sheet in traverse_obj(spritesheets, (..., {url_or_none}))]
|
||||||
})
|
})
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
@ -210,21 +208,15 @@ def _extract_video(self, video_id, catalogue=None):
|
||||||
'series': title if episode_number else None,
|
'series': title if episode_number else None,
|
||||||
'episode_number': int_or_none(episode_number),
|
'episode_number': int_or_none(episode_number),
|
||||||
'season_number': int_or_none(season_number),
|
'season_number': int_or_none(season_number),
|
||||||
|
'_format_sort_fields': ('res', 'tbr', 'proto'), # prioritize m3u8 over dash
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = self._match_valid_url(url)
|
url, smuggled_data = unsmuggle_url(url, {})
|
||||||
video_id = mobj.group('id')
|
video_id = self._match_id(url)
|
||||||
catalog = mobj.group('catalog')
|
hostname = smuggled_data.get('hostname') or 'www.france.tv'
|
||||||
|
|
||||||
if not video_id:
|
return self._extract_video(video_id, hostname=hostname)
|
||||||
qs = parse_qs(url)
|
|
||||||
video_id = qs.get('idDiffusion', [None])[0]
|
|
||||||
catalog = qs.get('catalogue', [None])[0]
|
|
||||||
if not video_id:
|
|
||||||
raise ExtractorError('Invalid URL', expected=True)
|
|
||||||
|
|
||||||
return self._extract_video(video_id, catalog)
|
|
||||||
|
|
||||||
|
|
||||||
class FranceTVSiteIE(FranceTVBaseInfoExtractor):
|
class FranceTVSiteIE(FranceTVBaseInfoExtractor):
|
||||||
|
@ -246,6 +238,7 @@ class FranceTVSiteIE(FranceTVBaseInfoExtractor):
|
||||||
},
|
},
|
||||||
'add_ie': [FranceTVIE.ie_key()],
|
'add_ie': [FranceTVIE.ie_key()],
|
||||||
}, {
|
}, {
|
||||||
|
# geo-restricted
|
||||||
'url': 'https://www.france.tv/enfants/six-huit-ans/foot2rue/saison-1/3066387-duel-au-vieux-port.html',
|
'url': 'https://www.france.tv/enfants/six-huit-ans/foot2rue/saison-1/3066387-duel-au-vieux-port.html',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'a9050959-eedd-4b4a-9b0d-de6eeaa73e44',
|
'id': 'a9050959-eedd-4b4a-9b0d-de6eeaa73e44',
|
||||||
|
@ -261,6 +254,26 @@ class FranceTVSiteIE(FranceTVBaseInfoExtractor):
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
'duration': 1441,
|
'duration': 1441,
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
# geo-restricted livestream (workflow == 'token-akamai')
|
||||||
|
'url': 'https://www.france.tv/france-4/direct.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '9a6a7670-dde9-4264-adbc-55b89558594b',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': r're:France 4 en direct .+',
|
||||||
|
'live_status': 'is_live',
|
||||||
|
},
|
||||||
|
'skip': 'geo-restricted livestream',
|
||||||
|
}, {
|
||||||
|
# livestream (workflow == 'dai')
|
||||||
|
'url': 'https://www.france.tv/france-2/direct.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '006194ea-117d-4bcf-94a9-153d999c59ae',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': r're:France 2 en direct .+',
|
||||||
|
'live_status': 'is_live',
|
||||||
|
},
|
||||||
|
'params': {'skip_download': 'livestream'},
|
||||||
}, {
|
}, {
|
||||||
# france3
|
# france3
|
||||||
'url': 'https://www.france.tv/france-3/des-chiffres-et-des-lettres/139063-emission-du-mardi-9-mai-2017.html',
|
'url': 'https://www.france.tv/france-3/des-chiffres-et-des-lettres/139063-emission-du-mardi-9-mai-2017.html',
|
||||||
|
@ -277,10 +290,6 @@ class FranceTVSiteIE(FranceTVBaseInfoExtractor):
|
||||||
# franceo
|
# franceo
|
||||||
'url': 'https://www.france.tv/france-o/archipels/132249-mon-ancetre-l-esclave.html',
|
'url': 'https://www.france.tv/france-o/archipels/132249-mon-ancetre-l-esclave.html',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
|
||||||
# france2 live
|
|
||||||
'url': 'https://www.france.tv/france-2/direct.html',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.france.tv/documentaires/histoire/136517-argentine-les-500-bebes-voles-de-la-dictature.html',
|
'url': 'https://www.france.tv/documentaires/histoire/136517-argentine-les-500-bebes-voles-de-la-dictature.html',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
@ -304,17 +313,16 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
catalogue = None
|
|
||||||
video_id = self._search_regex(
|
video_id = self._search_regex(
|
||||||
r'(?:data-main-video\s*=|videoId["\']?\s*[:=])\s*(["\'])(?P<id>(?:(?!\1).)+)\1',
|
r'(?:data-main-video\s*=|videoId["\']?\s*[:=])\s*(["\'])(?P<id>(?:(?!\1).)+)\1',
|
||||||
webpage, 'video id', default=None, group='id')
|
webpage, 'video id', default=None, group='id')
|
||||||
|
|
||||||
if not video_id:
|
if not video_id:
|
||||||
video_id, catalogue = self._html_search_regex(
|
video_id = self._html_search_regex(
|
||||||
r'(?:href=|player\.setVideo\(\s*)"http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"',
|
r'(?:href=|player\.setVideo\(\s*)"http://videos?\.francetv\.fr/video/([^@"]+@[^"]+)"',
|
||||||
webpage, 'video ID').split('@')
|
webpage, 'video ID')
|
||||||
|
|
||||||
return self._make_url_result(video_id, catalogue)
|
return self._make_url_result(video_id, url=url)
|
||||||
|
|
||||||
|
|
||||||
class FranceTVInfoIE(FranceTVBaseInfoExtractor):
|
class FranceTVInfoIE(FranceTVBaseInfoExtractor):
|
||||||
|
@ -328,8 +336,9 @@ class FranceTVInfoIE(FranceTVBaseInfoExtractor):
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Soir 3',
|
'title': 'Soir 3',
|
||||||
'upload_date': '20190822',
|
'upload_date': '20190822',
|
||||||
'timestamp': 1566510900,
|
'timestamp': 1566510730,
|
||||||
'description': 'md5:72d167097237701d6e8452ff03b83c00',
|
'thumbnail': r're:^https?://.*\.jpe?g$',
|
||||||
|
'duration': 1637,
|
||||||
'subtitles': {
|
'subtitles': {
|
||||||
'fr': 'mincount:2',
|
'fr': 'mincount:2',
|
||||||
},
|
},
|
||||||
|
@ -344,8 +353,8 @@ class FranceTVInfoIE(FranceTVBaseInfoExtractor):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '7d204c9e-a2d3-11eb-9e4c-000d3a23d482',
|
'id': '7d204c9e-a2d3-11eb-9e4c-000d3a23d482',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Covid-19 : une situation catastrophique à New Dehli',
|
'title': 'Covid-19 : une situation catastrophique à New Dehli - Édition du mercredi 21 avril 2021',
|
||||||
'thumbnail': str,
|
'thumbnail': r're:^https?://.*\.jpe?g$',
|
||||||
'duration': 76,
|
'duration': 76,
|
||||||
'timestamp': 1619028518,
|
'timestamp': 1619028518,
|
||||||
'upload_date': '20210421',
|
'upload_date': '20210421',
|
||||||
|
@ -371,11 +380,17 @@ class FranceTVInfoIE(FranceTVBaseInfoExtractor):
|
||||||
'id': 'x4iiko0',
|
'id': 'x4iiko0',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'NDDL, référendum, Brexit : Cécile Duflot répond à Patrick Cohen',
|
'title': 'NDDL, référendum, Brexit : Cécile Duflot répond à Patrick Cohen',
|
||||||
'description': 'Au lendemain de la victoire du "oui" au référendum sur l\'aéroport de Notre-Dame-des-Landes, l\'ancienne ministre écologiste est l\'invitée de Patrick Cohen. Plus d\'info : https://www.franceinter.fr/emissions/le-7-9/le-7-9-27-juin-2016',
|
'description': 'md5:fdcb582c370756293a65cdfbc6ecd90e',
|
||||||
'timestamp': 1467011958,
|
'timestamp': 1467011958,
|
||||||
'upload_date': '20160627',
|
|
||||||
'uploader': 'France Inter',
|
'uploader': 'France Inter',
|
||||||
'uploader_id': 'x2q2ez',
|
'uploader_id': 'x2q2ez',
|
||||||
|
'upload_date': '20160627',
|
||||||
|
'view_count': int,
|
||||||
|
'tags': ['Politique', 'France Inter', '27 juin 2016', 'Linvité de 8h20', 'Cécile Duflot', 'Patrick Cohen'],
|
||||||
|
'age_limit': 0,
|
||||||
|
'duration': 640,
|
||||||
|
'like_count': int,
|
||||||
|
'thumbnail': r're:https://[^/?#]+/v/[^/?#]+/x1080',
|
||||||
},
|
},
|
||||||
'add_ie': ['Dailymotion'],
|
'add_ie': ['Dailymotion'],
|
||||||
}, {
|
}, {
|
||||||
|
@ -405,4 +420,4 @@ def _real_extract(self, url):
|
||||||
r'(?:data-id|<figure[^<]+\bid)=["\']([\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'),
|
r'(?:data-id|<figure[^<]+\bid)=["\']([\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'),
|
||||||
webpage, 'video id')
|
webpage, 'video id')
|
||||||
|
|
||||||
return self._make_url_result(video_id)
|
return self._make_url_result(video_id, url=url)
|
||||||
|
|
|
@ -301,7 +301,7 @@ class FunimationShowIE(FunimationBaseIE):
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.funimation.com/en/shows/sk8-the-infinity',
|
'url': 'https://www.funimation.com/en/shows/sk8-the-infinity',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 1315000,
|
'id': '1315000',
|
||||||
'title': 'SK8 the Infinity'
|
'title': 'SK8 the Infinity'
|
||||||
},
|
},
|
||||||
'playlist_count': 13,
|
'playlist_count': 13,
|
||||||
|
@ -312,7 +312,7 @@ class FunimationShowIE(FunimationBaseIE):
|
||||||
# without lang code
|
# without lang code
|
||||||
'url': 'https://www.funimation.com/shows/ouran-high-school-host-club/',
|
'url': 'https://www.funimation.com/shows/ouran-high-school-host-club/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 39643,
|
'id': '39643',
|
||||||
'title': 'Ouran High School Host Club'
|
'title': 'Ouran High School Host Club'
|
||||||
},
|
},
|
||||||
'playlist_count': 26,
|
'playlist_count': 26,
|
||||||
|
@ -339,7 +339,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'_type': 'playlist',
|
'_type': 'playlist',
|
||||||
'id': show_info['id'],
|
'id': str_or_none(show_info['id']),
|
||||||
'title': show_info['name'],
|
'title': show_info['name'],
|
||||||
'entries': orderedSet(
|
'entries': orderedSet(
|
||||||
self.url_result(
|
self.url_result(
|
||||||
|
|
|
@ -19,7 +19,6 @@ class GabTVIE(InfoExtractor):
|
||||||
'id': '61217eacea5665de450d0488',
|
'id': '61217eacea5665de450d0488',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'WHY WAS AMERICA IN AFGHANISTAN - AMERICA FIRST AGAINST AMERICAN OLIGARCHY',
|
'title': 'WHY WAS AMERICA IN AFGHANISTAN - AMERICA FIRST AGAINST AMERICAN OLIGARCHY',
|
||||||
'description': None,
|
|
||||||
'uploader': 'Wurzelroot',
|
'uploader': 'Wurzelroot',
|
||||||
'uploader_id': '608fb0a85738fd1974984f7d',
|
'uploader_id': '608fb0a85738fd1974984f7d',
|
||||||
'thumbnail': 'https://tv.gab.com/image/61217eacea5665de450d0488',
|
'thumbnail': 'https://tv.gab.com/image/61217eacea5665de450d0488',
|
||||||
|
|
|
@ -1,46 +0,0 @@
|
||||||
from .brightcove import BrightcoveNewIE
|
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import (
|
|
||||||
clean_html,
|
|
||||||
get_element_by_class,
|
|
||||||
get_element_by_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class GameInformerIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?gameinformer\.com/(?:[^/]+/)*(?P<id>[^.?&#]+)'
|
|
||||||
_TESTS = [{
|
|
||||||
# normal Brightcove embed code extracted with BrightcoveNewIE._extract_url
|
|
||||||
'url': 'http://www.gameinformer.com/b/features/archive/2015/09/26/replay-animal-crossing.aspx',
|
|
||||||
'md5': '292f26da1ab4beb4c9099f1304d2b071',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '4515472681001',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Replay - Animal Crossing',
|
|
||||||
'description': 'md5:2e211891b215c85d061adc7a4dd2d930',
|
|
||||||
'timestamp': 1443457610,
|
|
||||||
'upload_date': '20150928',
|
|
||||||
'uploader_id': '694940074001',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
# Brightcove id inside unique element with field--name-field-brightcove-video-id class
|
|
||||||
'url': 'https://www.gameinformer.com/video-feature/new-gameplay-today/2019/07/09/new-gameplay-today-streets-of-rogue',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '6057111913001',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'New Gameplay Today – Streets Of Rogue',
|
|
||||||
'timestamp': 1562699001,
|
|
||||||
'upload_date': '20190709',
|
|
||||||
'uploader_id': '694940074001',
|
|
||||||
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/694940074001/default_default/index.html?videoId=%s'
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
display_id = self._match_id(url)
|
|
||||||
webpage = self._download_webpage(
|
|
||||||
url, display_id, headers=self.geo_verification_headers())
|
|
||||||
brightcove_id = clean_html(get_element_by_class('field--name-field-brightcove-video-id', webpage) or get_element_by_id('video-source-content', webpage))
|
|
||||||
brightcove_url = self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id if brightcove_id else BrightcoveNewIE._extract_url(self, webpage)
|
|
||||||
return self.url_result(brightcove_url, 'BrightcoveNew', brightcove_id)
|
|
|
@ -88,7 +88,7 @@ def _parse_post(self, post_data):
|
||||||
'uploader_id': user_data.get('username'),
|
'uploader_id': user_data.get('username'),
|
||||||
'uploader_url': format_field(user_data, 'url', 'https://gamejolt.com%s'),
|
'uploader_url': format_field(user_data, 'url', 'https://gamejolt.com%s'),
|
||||||
'categories': [try_get(category, lambda x: '%s - %s' % (x['community']['name'], x['channel'].get('display_title') or x['channel']['title']))
|
'categories': [try_get(category, lambda x: '%s - %s' % (x['community']['name'], x['channel'].get('display_title') or x['channel']['title']))
|
||||||
for category in post_data.get('communities' or [])],
|
for category in post_data.get('communities') or []],
|
||||||
'tags': traverse_obj(
|
'tags': traverse_obj(
|
||||||
lead_content, ('content', ..., 'content', ..., 'marks', ..., 'attrs', 'tag'), expected_type=str_or_none),
|
lead_content, ('content', ..., 'content', ..., 'marks', ..., 'attrs', 'tag'), expected_type=str_or_none),
|
||||||
'like_count': int_or_none(post_data.get('like_count')),
|
'like_count': int_or_none(post_data.get('like_count')),
|
||||||
|
@ -267,9 +267,9 @@ class GameJoltIE(GameJoltBaseIE):
|
||||||
'id': 'dszyjnwi',
|
'id': 'dszyjnwi',
|
||||||
'ext': 'webm',
|
'ext': 'webm',
|
||||||
'title': 'gif-presentacion-mejorado-dszyjnwi',
|
'title': 'gif-presentacion-mejorado-dszyjnwi',
|
||||||
'n_entries': 1,
|
|
||||||
}
|
}
|
||||||
}]
|
}],
|
||||||
|
'playlist_count': 1,
|
||||||
}, {
|
}, {
|
||||||
# Multiple GIFs
|
# Multiple GIFs
|
||||||
'url': 'https://gamejolt.com/p/gif-yhsqkumq',
|
'url': 'https://gamejolt.com/p/gif-yhsqkumq',
|
||||||
|
@ -374,7 +374,6 @@ class GameJoltGameSoundtrackIE(GameJoltBaseIE):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '657899',
|
'id': '657899',
|
||||||
'title': 'Friday Night Funkin\': Vs Oswald',
|
'title': 'Friday Night Funkin\': Vs Oswald',
|
||||||
'n_entries': None,
|
|
||||||
},
|
},
|
||||||
'playlist': [{
|
'playlist': [{
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -384,7 +383,6 @@ class GameJoltGameSoundtrackIE(GameJoltBaseIE):
|
||||||
'url': r're:^https://.+vs-oswald-menu-music\.mp3$',
|
'url': r're:^https://.+vs-oswald-menu-music\.mp3$',
|
||||||
'release_timestamp': 1635190816,
|
'release_timestamp': 1635190816,
|
||||||
'release_date': '20211025',
|
'release_date': '20211025',
|
||||||
'n_entries': 3,
|
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -394,7 +392,6 @@ class GameJoltGameSoundtrackIE(GameJoltBaseIE):
|
||||||
'url': r're:^https://.+rabbit-s-luck--full-version-\.mp3$',
|
'url': r're:^https://.+rabbit-s-luck--full-version-\.mp3$',
|
||||||
'release_timestamp': 1635190841,
|
'release_timestamp': 1635190841,
|
||||||
'release_date': '20211025',
|
'release_date': '20211025',
|
||||||
'n_entries': 3,
|
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -404,9 +401,9 @@ class GameJoltGameSoundtrackIE(GameJoltBaseIE):
|
||||||
'url': r're:^https://.+last-straw\.mp3$',
|
'url': r're:^https://.+last-straw\.mp3$',
|
||||||
'release_timestamp': 1635881104,
|
'release_timestamp': 1635881104,
|
||||||
'release_date': '20211102',
|
'release_date': '20211102',
|
||||||
'n_entries': 3,
|
|
||||||
}
|
}
|
||||||
}]
|
}],
|
||||||
|
'playlist_count': 3,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|
|
@ -21,7 +21,6 @@ class GaskrankIE(InfoExtractor):
|
||||||
'display_id': 'strike-einparken-durch-anfaenger-crash-mit-groesserem-flurschaden',
|
'display_id': 'strike-einparken-durch-anfaenger-crash-mit-groesserem-flurschaden',
|
||||||
'uploader_id': 'Bikefun',
|
'uploader_id': 'Bikefun',
|
||||||
'upload_date': '20170110',
|
'upload_date': '20170110',
|
||||||
'uploader_url': None,
|
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.gaskrank.tv/tv/racing/isle-of-man-tt-2011-michael-du-15920.htm',
|
'url': 'http://www.gaskrank.tv/tv/racing/isle-of-man-tt-2011-michael-du-15920.htm',
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
|
|
||||||
class GazetaIE(InfoExtractor):
|
class GazetaIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'(?P<url>https?://(?:www\.)?gazeta\.ru/(?:[^/]+/)?video/(?:main/)*(?:\d{4}/\d{2}/\d{2}/)?(?P<id>[A-Za-z0-9-_.]+)\.s?html)'
|
_VALID_URL = r'(?P<url>https?://(?:www\.)?gazeta\.ru/(?:[^/]+/)?video/(?:main/)*(?:\d{4}/\d{2}/\d{2}/)?(?P<id>[A-Za-z0-9-_.]+)\.s?html)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.gazeta.ru/video/main/zadaite_vopros_vladislavu_yurevichu.shtml',
|
'url': 'http://www.gazeta.ru/video/main/zadaite_vopros_vladislavu_yurevichu.shtml',
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
|
|
||||||
|
|
||||||
class GDCVaultIE(InfoExtractor):
|
class GDCVaultIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://(?:www\.)?gdcvault\.com/play/(?P<id>\d+)(?:/(?P<name>[\w-]+))?'
|
_VALID_URL = r'https?://(?:www\.)?gdcvault\.com/play/(?P<id>\d+)(?:/(?P<name>[\w-]+))?'
|
||||||
_NETRC_MACHINE = 'gdcvault'
|
_NETRC_MACHINE = 'gdcvault'
|
||||||
_TESTS = [
|
_TESTS = [
|
||||||
|
|
|
@ -2394,7 +2394,6 @@ def _real_extract(self, url):
|
||||||
'Referer': smuggled_data.get('referer'),
|
'Referer': smuggled_data.get('referer'),
|
||||||
}))
|
}))
|
||||||
new_url = full_response.url
|
new_url = full_response.url
|
||||||
url = urllib.parse.urlparse(url)._replace(scheme=urllib.parse.urlparse(new_url).scheme).geturl()
|
|
||||||
if new_url != extract_basic_auth(url)[0]:
|
if new_url != extract_basic_auth(url)[0]:
|
||||||
self.report_following_redirect(new_url)
|
self.report_following_redirect(new_url)
|
||||||
if force_videoid:
|
if force_videoid:
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue