mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-04 15:33:16 +00:00
Merge branch 'master' into master
This commit is contained in:
commit
05b22ed99c
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
1
.github/PULL_REQUEST_TEMPLATE.md
vendored
|
@ -28,7 +28,6 @@ # PLEASE FOLLOW THE GUIDE BELOW
|
||||||
### Before submitting a *pull request* make sure you have:
|
### Before submitting a *pull request* make sure you have:
|
||||||
- [ ] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
|
- [ ] At least skimmed through [contributing guidelines](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) including [yt-dlp coding conventions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#yt-dlp-coding-conventions)
|
||||||
- [ ] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
|
- [ ] [Searched](https://github.com/yt-dlp/yt-dlp/search?q=is%3Apr&type=Issues) the bugtracker for similar pull requests
|
||||||
- [ ] Checked the code with [flake8](https://pypi.python.org/pypi/flake8) and [ran relevant tests](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions)
|
|
||||||
|
|
||||||
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check all of the following options that apply:
|
### In order to be accepted and merged into yt-dlp each piece of code must be in public domain or released under [Unlicense](http://unlicense.org/). Check all of the following options that apply:
|
||||||
- [ ] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
|
- [ ] I am the original author of this code and I am willing to release it under [Unlicense](http://unlicense.org/)
|
||||||
|
|
10
.github/banner.svg
vendored
10
.github/banner.svg
vendored
File diff suppressed because one or more lines are too long
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 15 KiB |
138
.github/workflows/build.yml
vendored
138
.github/workflows/build.yml
vendored
|
@ -12,6 +12,9 @@ on:
|
||||||
unix:
|
unix:
|
||||||
default: true
|
default: true
|
||||||
type: boolean
|
type: boolean
|
||||||
|
linux_static:
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
linux_arm:
|
linux_arm:
|
||||||
default: true
|
default: true
|
||||||
type: boolean
|
type: boolean
|
||||||
|
@ -27,9 +30,6 @@ on:
|
||||||
windows32:
|
windows32:
|
||||||
default: true
|
default: true
|
||||||
type: boolean
|
type: boolean
|
||||||
meta_files:
|
|
||||||
default: true
|
|
||||||
type: boolean
|
|
||||||
origin:
|
origin:
|
||||||
required: false
|
required: false
|
||||||
default: ''
|
default: ''
|
||||||
|
@ -52,7 +52,11 @@ on:
|
||||||
default: stable
|
default: stable
|
||||||
type: string
|
type: string
|
||||||
unix:
|
unix:
|
||||||
description: yt-dlp, yt-dlp.tar.gz, yt-dlp_linux, yt-dlp_linux.zip
|
description: yt-dlp, yt-dlp.tar.gz
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
linux_static:
|
||||||
|
description: yt-dlp_linux
|
||||||
default: true
|
default: true
|
||||||
type: boolean
|
type: boolean
|
||||||
linux_arm:
|
linux_arm:
|
||||||
|
@ -75,10 +79,6 @@ on:
|
||||||
description: yt-dlp_x86.exe
|
description: yt-dlp_x86.exe
|
||||||
default: true
|
default: true
|
||||||
type: boolean
|
type: boolean
|
||||||
meta_files:
|
|
||||||
description: SHA2-256SUMS, SHA2-512SUMS, _update_spec
|
|
||||||
default: true
|
|
||||||
type: boolean
|
|
||||||
origin:
|
origin:
|
||||||
description: Origin
|
description: Origin
|
||||||
required: false
|
required: false
|
||||||
|
@ -107,60 +107,31 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0 # Needed for changelog
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
- uses: conda-incubator/setup-miniconda@v3
|
|
||||||
with:
|
|
||||||
miniforge-variant: Mambaforge
|
|
||||||
use-mamba: true
|
|
||||||
channels: conda-forge
|
|
||||||
auto-update-conda: true
|
|
||||||
activate-environment: ""
|
|
||||||
auto-activate-base: false
|
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: |
|
run: |
|
||||||
sudo apt -y install zip pandoc man sed
|
sudo apt -y install zip pandoc man sed
|
||||||
cat > ./requirements.txt << EOF
|
|
||||||
python=3.10.*
|
|
||||||
brotli-python
|
|
||||||
EOF
|
|
||||||
python devscripts/install_deps.py --print \
|
|
||||||
--exclude brotli --exclude brotlicffi \
|
|
||||||
--include secretstorage --include pyinstaller >> ./requirements.txt
|
|
||||||
mamba create -n build --file ./requirements.txt
|
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
python devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
||||||
|
python devscripts/update_changelog.py -vv
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
- name: Build Unix platform-independent binary
|
- name: Build Unix platform-independent binary
|
||||||
run: |
|
run: |
|
||||||
make all tar
|
make all tar
|
||||||
- name: Build Unix standalone binary
|
|
||||||
shell: bash -l {0}
|
|
||||||
run: |
|
|
||||||
unset LD_LIBRARY_PATH # Harmful; set by setup-python
|
|
||||||
conda activate build
|
|
||||||
python -m bundle.pyinstaller --onedir
|
|
||||||
(cd ./dist/yt-dlp_linux && zip -r ../yt-dlp_linux.zip .)
|
|
||||||
python -m bundle.pyinstaller
|
|
||||||
mv ./dist/yt-dlp_linux ./yt-dlp_linux
|
|
||||||
mv ./dist/yt-dlp_linux.zip ./yt-dlp_linux.zip
|
|
||||||
|
|
||||||
- name: Verify --update-to
|
- name: Verify --update-to
|
||||||
if: vars.UPDATE_TO_VERIFICATION
|
if: vars.UPDATE_TO_VERIFICATION
|
||||||
run: |
|
run: |
|
||||||
binaries=("yt-dlp" "yt-dlp_linux")
|
chmod +x ./yt-dlp
|
||||||
for binary in "${binaries[@]}"; do
|
cp ./yt-dlp ./yt-dlp_downgraded
|
||||||
chmod +x ./${binary}
|
version="$(./yt-dlp --version)"
|
||||||
cp ./${binary} ./${binary}_downgraded
|
./yt-dlp_downgraded -v --update-to yt-dlp/yt-dlp@2023.03.04
|
||||||
version="$(./${binary} --version)"
|
downgraded_version="$(./yt-dlp_downgraded --version)"
|
||||||
./${binary}_downgraded -v --update-to yt-dlp/yt-dlp@2023.03.04
|
[[ "$version" != "$downgraded_version" ]]
|
||||||
downgraded_version="$(./${binary}_downgraded --version)"
|
|
||||||
[[ "$version" != "$downgraded_version" ]]
|
|
||||||
done
|
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
|
@ -168,8 +139,39 @@ jobs:
|
||||||
path: |
|
path: |
|
||||||
yt-dlp
|
yt-dlp
|
||||||
yt-dlp.tar.gz
|
yt-dlp.tar.gz
|
||||||
yt-dlp_linux
|
compression-level: 0
|
||||||
yt-dlp_linux.zip
|
|
||||||
|
linux_static:
|
||||||
|
needs: process
|
||||||
|
if: inputs.linux_static
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Build static executable
|
||||||
|
env:
|
||||||
|
channel: ${{ inputs.channel }}
|
||||||
|
origin: ${{ needs.process.outputs.origin }}
|
||||||
|
version: ${{ inputs.version }}
|
||||||
|
run: |
|
||||||
|
mkdir ~/build
|
||||||
|
cd bundle/docker
|
||||||
|
docker compose up --build static
|
||||||
|
sudo chown "${USER}:docker" ~/build/yt-dlp_linux
|
||||||
|
- name: Verify --update-to
|
||||||
|
if: vars.UPDATE_TO_VERIFICATION
|
||||||
|
run: |
|
||||||
|
chmod +x ~/build/yt-dlp_linux
|
||||||
|
cp ~/build/yt-dlp_linux ~/build/yt-dlp_linux_downgraded
|
||||||
|
version="$(~/build/yt-dlp_linux --version)"
|
||||||
|
~/build/yt-dlp_linux_downgraded -v --update-to yt-dlp/yt-dlp@2023.03.04
|
||||||
|
downgraded_version="$(~/build/yt-dlp_linux_downgraded --version)"
|
||||||
|
[[ "$version" != "$downgraded_version" ]]
|
||||||
|
- name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: build-bin-${{ github.job }}
|
||||||
|
path: |
|
||||||
|
~/build/yt-dlp_linux
|
||||||
compression-level: 0
|
compression-level: 0
|
||||||
|
|
||||||
linux_arm:
|
linux_arm:
|
||||||
|
@ -247,6 +249,22 @@ jobs:
|
||||||
python3 devscripts/install_deps.py --print --include pyinstaller > requirements.txt
|
python3 devscripts/install_deps.py --print --include pyinstaller > requirements.txt
|
||||||
# We need to ignore wheels otherwise we break universal2 builds
|
# We need to ignore wheels otherwise we break universal2 builds
|
||||||
python3 -m pip install -U --user --no-binary :all: -r requirements.txt
|
python3 -m pip install -U --user --no-binary :all: -r requirements.txt
|
||||||
|
# We need to fuse our own universal2 wheels for curl_cffi
|
||||||
|
python3 -m pip install -U --user delocate
|
||||||
|
mkdir curl_cffi_whls curl_cffi_universal2
|
||||||
|
python3 devscripts/install_deps.py --print -o --include curl-cffi > requirements.txt
|
||||||
|
for platform in "macosx_11_0_arm64" "macosx_11_0_x86_64"; do
|
||||||
|
python3 -m pip download \
|
||||||
|
--only-binary=:all: \
|
||||||
|
--platform "${platform}" \
|
||||||
|
--pre -d curl_cffi_whls \
|
||||||
|
-r requirements.txt
|
||||||
|
done
|
||||||
|
python3 -m delocate.cmd.delocate_fuse curl_cffi_whls/curl_cffi*.whl -w curl_cffi_universal2
|
||||||
|
python3 -m delocate.cmd.delocate_fuse curl_cffi_whls/cffi*.whl -w curl_cffi_universal2
|
||||||
|
cd curl_cffi_universal2
|
||||||
|
for wheel in *cffi*.whl; do mv -n -- "${wheel}" "${wheel/x86_64/universal2}"; done
|
||||||
|
python3 -m pip install -U --user *cffi*.whl
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
|
@ -280,7 +298,7 @@ jobs:
|
||||||
macos_legacy:
|
macos_legacy:
|
||||||
needs: process
|
needs: process
|
||||||
if: inputs.macos_legacy
|
if: inputs.macos_legacy
|
||||||
runs-on: macos-latest
|
runs-on: macos-12
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
@ -342,7 +360,7 @@ jobs:
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: | # Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds
|
run: | # Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds
|
||||||
python devscripts/install_deps.py -o --include build
|
python devscripts/install_deps.py -o --include build
|
||||||
python devscripts/install_deps.py --include py2exe
|
python devscripts/install_deps.py --include curl-cffi
|
||||||
python -m pip install -U "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-5.8.0-py3-none-any.whl"
|
python -m pip install -U "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-5.8.0-py3-none-any.whl"
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
|
@ -351,12 +369,20 @@ jobs:
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
python -m bundle.py2exe
|
|
||||||
Move-Item ./dist/yt-dlp.exe ./dist/yt-dlp_min.exe
|
|
||||||
python -m bundle.pyinstaller
|
python -m bundle.pyinstaller
|
||||||
python -m bundle.pyinstaller --onedir
|
python -m bundle.pyinstaller --onedir
|
||||||
|
Move-Item ./dist/yt-dlp.exe ./dist/yt-dlp_real.exe
|
||||||
Compress-Archive -Path ./dist/yt-dlp/* -DestinationPath ./dist/yt-dlp_win.zip
|
Compress-Archive -Path ./dist/yt-dlp/* -DestinationPath ./dist/yt-dlp_win.zip
|
||||||
|
|
||||||
|
- name: Install Requirements (py2exe)
|
||||||
|
run: |
|
||||||
|
python devscripts/install_deps.py --include py2exe
|
||||||
|
- name: Build (py2exe)
|
||||||
|
run: |
|
||||||
|
python -m bundle.py2exe
|
||||||
|
Move-Item ./dist/yt-dlp.exe ./dist/yt-dlp_min.exe
|
||||||
|
Move-Item ./dist/yt-dlp_real.exe ./dist/yt-dlp.exe
|
||||||
|
|
||||||
- name: Verify --update-to
|
- name: Verify --update-to
|
||||||
if: vars.UPDATE_TO_VERIFICATION
|
if: vars.UPDATE_TO_VERIFICATION
|
||||||
run: |
|
run: |
|
||||||
|
@ -427,10 +453,11 @@ jobs:
|
||||||
compression-level: 0
|
compression-level: 0
|
||||||
|
|
||||||
meta_files:
|
meta_files:
|
||||||
if: inputs.meta_files && always() && !cancelled()
|
if: always() && !cancelled()
|
||||||
needs:
|
needs:
|
||||||
- process
|
- process
|
||||||
- unix
|
- unix
|
||||||
|
- linux_static
|
||||||
- linux_arm
|
- linux_arm
|
||||||
- macos
|
- macos
|
||||||
- macos_legacy
|
- macos_legacy
|
||||||
|
@ -447,8 +474,9 @@ jobs:
|
||||||
- name: Make SHA2-SUMS files
|
- name: Make SHA2-SUMS files
|
||||||
run: |
|
run: |
|
||||||
cd ./artifact/
|
cd ./artifact/
|
||||||
sha256sum * > ../SHA2-256SUMS
|
# make sure SHA sums are also printed to stdout
|
||||||
sha512sum * > ../SHA2-512SUMS
|
sha256sum * | tee ../SHA2-256SUMS
|
||||||
|
sha512sum * | tee ../SHA2-512SUMS
|
||||||
|
|
||||||
- name: Make Update spec
|
- name: Make Update spec
|
||||||
run: |
|
run: |
|
||||||
|
|
2
.github/workflows/core.yml
vendored
2
.github/workflows/core.yml
vendored
|
@ -53,7 +53,7 @@ jobs:
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
- name: Install test requirements
|
- name: Install test requirements
|
||||||
run: python3 ./devscripts/install_deps.py --include dev
|
run: python3 ./devscripts/install_deps.py --include test --include curl-cffi
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
continue-on-error: False
|
continue-on-error: False
|
||||||
run: |
|
run: |
|
||||||
|
|
18
.github/workflows/quick-test.yml
vendored
18
.github/workflows/quick-test.yml
vendored
|
@ -15,21 +15,25 @@ jobs:
|
||||||
with:
|
with:
|
||||||
python-version: '3.8'
|
python-version: '3.8'
|
||||||
- name: Install test requirements
|
- name: Install test requirements
|
||||||
run: python3 ./devscripts/install_deps.py --include dev
|
run: python3 ./devscripts/install_deps.py --include test
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: |
|
run: |
|
||||||
python3 -m yt_dlp -v || true
|
python3 -m yt_dlp -v || true
|
||||||
python3 ./devscripts/run_tests.py core
|
python3 ./devscripts/run_tests.py core
|
||||||
flake8:
|
check:
|
||||||
name: Linter
|
name: Code check
|
||||||
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v5
|
||||||
- name: Install flake8
|
with:
|
||||||
run: python3 ./devscripts/install_deps.py -o --include dev
|
python-version: '3.8'
|
||||||
|
- name: Install dev dependencies
|
||||||
|
run: python3 ./devscripts/install_deps.py -o --include static-analysis
|
||||||
- name: Make lazy extractors
|
- name: Make lazy extractors
|
||||||
run: python3 ./devscripts/make_lazy_extractors.py
|
run: python3 ./devscripts/make_lazy_extractors.py
|
||||||
- name: Run flake8
|
- name: Run ruff
|
||||||
run: flake8 .
|
run: ruff check --output-format github .
|
||||||
|
- name: Run autopep8
|
||||||
|
run: autopep8 --diff .
|
||||||
|
|
22
.github/workflows/release.yml
vendored
22
.github/workflows/release.yml
vendored
|
@ -189,13 +189,8 @@ jobs:
|
||||||
if: |
|
if: |
|
||||||
!inputs.prerelease && env.target_repo == github.repository
|
!inputs.prerelease && env.target_repo == github.repository
|
||||||
run: |
|
run: |
|
||||||
|
python devscripts/update_changelog.py -vv
|
||||||
make doc
|
make doc
|
||||||
sed '/### /Q' Changelog.md >> ./CHANGELOG
|
|
||||||
echo '### ${{ env.version }}' >> ./CHANGELOG
|
|
||||||
python ./devscripts/make_changelog.py -vv -c >> ./CHANGELOG
|
|
||||||
echo >> ./CHANGELOG
|
|
||||||
grep -Poz '(?s)### \d+\.\d+\.\d+.+' 'Changelog.md' | head -n -1 >> ./CHANGELOG
|
|
||||||
cat ./CHANGELOG > Changelog.md
|
|
||||||
|
|
||||||
- name: Push to release
|
- name: Push to release
|
||||||
id: push_release
|
id: push_release
|
||||||
|
@ -266,6 +261,7 @@ jobs:
|
||||||
pypi_project: ${{ needs.prepare.outputs.pypi_project }}
|
pypi_project: ${{ needs.prepare.outputs.pypi_project }}
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py -c "${{ env.channel }}" -r "${{ env.target_repo }}" -s "${{ env.suffix }}" "${{ env.version }}"
|
python devscripts/update-version.py -c "${{ env.channel }}" -r "${{ env.target_repo }}" -s "${{ env.suffix }}" "${{ env.version }}"
|
||||||
|
python devscripts/update_changelog.py -vv
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
sed -i -E '0,/(name = ")[^"]+(")/s//\1${{ env.pypi_project }}\2/' pyproject.toml
|
sed -i -E '0,/(name = ")[^"]+(")/s//\1${{ env.pypi_project }}\2/' pyproject.toml
|
||||||
|
|
||||||
|
@ -312,19 +308,19 @@ jobs:
|
||||||
target_tag: ${{ needs.prepare.outputs.target_tag }}
|
target_tag: ${{ needs.prepare.outputs.target_tag }}
|
||||||
run: |
|
run: |
|
||||||
printf '%s' \
|
printf '%s' \
|
||||||
'[![Installation](https://img.shields.io/badge/-Which%20file%20should%20I%20download%3F-white.svg?style=for-the-badge)]' \
|
'[![Installation](https://img.shields.io/badge/-Which%20file%20to%20download%3F-white.svg?style=for-the-badge)]' \
|
||||||
'(https://github.com/${{ github.repository }}#installation "Installation instructions") ' \
|
'(https://github.com/${{ github.repository }}#installation "Installation instructions") ' \
|
||||||
|
'[![Discord](https://img.shields.io/discord/807245652072857610?color=blue&labelColor=555555&label=&logo=discord&style=for-the-badge)]' \
|
||||||
|
'(https://discord.gg/H5MNcFW63r "Discord") ' \
|
||||||
|
'[![Donate](https://img.shields.io/badge/_-Donate-red.svg?logo=githubsponsors&labelColor=555555&style=for-the-badge)]' \
|
||||||
|
'(https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators "Donate") ' \
|
||||||
'[![Documentation](https://img.shields.io/badge/-Docs-brightgreen.svg?style=for-the-badge&logo=GitBook&labelColor=555555)]' \
|
'[![Documentation](https://img.shields.io/badge/-Docs-brightgreen.svg?style=for-the-badge&logo=GitBook&labelColor=555555)]' \
|
||||||
'(https://github.com/${{ github.repository }}' \
|
'(https://github.com/${{ github.repository }}' \
|
||||||
'${{ env.target_repo == github.repository && format('/tree/{0}', env.target_tag) || '' }}#readme "Documentation") ' \
|
'${{ env.target_repo == github.repository && format('/tree/{0}', env.target_tag) || '' }}#readme "Documentation") ' \
|
||||||
'[![Donate](https://img.shields.io/badge/_-Donate-red.svg?logo=githubsponsors&labelColor=555555&style=for-the-badge)]' \
|
|
||||||
'(https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators "Donate") ' \
|
|
||||||
'[![Discord](https://img.shields.io/discord/807245652072857610?color=blue&labelColor=555555&label=&logo=discord&style=for-the-badge)]' \
|
|
||||||
'(https://discord.gg/H5MNcFW63r "Discord") ' \
|
|
||||||
${{ env.target_repo == 'yt-dlp/yt-dlp' && '\
|
${{ env.target_repo == 'yt-dlp/yt-dlp' && '\
|
||||||
"[![Nightly](https://img.shields.io/badge/Get%20nightly%20builds-purple.svg?style=for-the-badge)]" \
|
"[![Nightly](https://img.shields.io/badge/Nightly%20builds-purple.svg?style=for-the-badge)]" \
|
||||||
"(https://github.com/yt-dlp/yt-dlp-nightly-builds/releases/latest \"Nightly builds\") " \
|
"(https://github.com/yt-dlp/yt-dlp-nightly-builds/releases/latest \"Nightly builds\") " \
|
||||||
"[![Master](https://img.shields.io/badge/Get%20master%20builds-lightblue.svg?style=for-the-badge)]" \
|
"[![Master](https://img.shields.io/badge/Master%20builds-lightblue.svg?style=for-the-badge)]" \
|
||||||
"(https://github.com/yt-dlp/yt-dlp-master-builds/releases/latest \"Master builds\")"' || '' }} > ./RELEASE_NOTES
|
"(https://github.com/yt-dlp/yt-dlp-master-builds/releases/latest \"Master builds\")"' || '' }} > ./RELEASE_NOTES
|
||||||
printf '\n\n' >> ./RELEASE_NOTES
|
printf '\n\n' >> ./RELEASE_NOTES
|
||||||
cat >> ./RELEASE_NOTES << EOF
|
cat >> ./RELEASE_NOTES << EOF
|
||||||
|
|
5
.gitignore
vendored
5
.gitignore
vendored
|
@ -33,6 +33,7 @@ cookies
|
||||||
*.gif
|
*.gif
|
||||||
*.jpeg
|
*.jpeg
|
||||||
*.jpg
|
*.jpg
|
||||||
|
*.lrc
|
||||||
*.m4a
|
*.m4a
|
||||||
*.m4v
|
*.m4v
|
||||||
*.mhtml
|
*.mhtml
|
||||||
|
@ -40,6 +41,7 @@ cookies
|
||||||
*.mov
|
*.mov
|
||||||
*.mp3
|
*.mp3
|
||||||
*.mp4
|
*.mp4
|
||||||
|
*.mpg
|
||||||
*.mpga
|
*.mpga
|
||||||
*.oga
|
*.oga
|
||||||
*.ogg
|
*.ogg
|
||||||
|
@ -47,6 +49,7 @@ cookies
|
||||||
*.png
|
*.png
|
||||||
*.sbv
|
*.sbv
|
||||||
*.srt
|
*.srt
|
||||||
|
*.ssa
|
||||||
*.swf
|
*.swf
|
||||||
*.swp
|
*.swp
|
||||||
*.tt
|
*.tt
|
||||||
|
@ -64,7 +67,7 @@ cookies
|
||||||
# Python
|
# Python
|
||||||
*.pyc
|
*.pyc
|
||||||
*.pyo
|
*.pyo
|
||||||
.pytest_cache
|
.*_cache
|
||||||
wine-py2exe/
|
wine-py2exe/
|
||||||
py2exe.log
|
py2exe.log
|
||||||
build/
|
build/
|
||||||
|
|
14
.pre-commit-config.yaml
Normal file
14
.pre-commit-config.yaml
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
repos:
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
- id: linter
|
||||||
|
name: Apply linter fixes
|
||||||
|
entry: ruff check --fix .
|
||||||
|
language: system
|
||||||
|
types: [python]
|
||||||
|
require_serial: true
|
||||||
|
- id: format
|
||||||
|
name: Apply formatting fixes
|
||||||
|
entry: autopep8 --in-place .
|
||||||
|
language: system
|
||||||
|
types: [python]
|
9
.pre-commit-hatch.yaml
Normal file
9
.pre-commit-hatch.yaml
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
repos:
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
- id: fix
|
||||||
|
name: Apply code fixes
|
||||||
|
entry: hatch fmt
|
||||||
|
language: system
|
||||||
|
types: [python]
|
||||||
|
require_serial: true
|
|
@ -79,7 +79,7 @@ ### Are you using the latest version?
|
||||||
|
|
||||||
### Is the issue already documented?
|
### Is the issue already documented?
|
||||||
|
|
||||||
Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/yt-dlp/yt-dlp/search?type=Issues) of this repository. If there is an issue, subcribe to it to be notified when there is any progress. Unless you have something useful to add to the converation, please refrain from commenting.
|
Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/yt-dlp/yt-dlp/search?type=Issues) of this repository. If there is an issue, subscribe to it to be notified when there is any progress. Unless you have something useful to add to the conversation, please refrain from commenting.
|
||||||
|
|
||||||
Additionally, it is also helpful to see if the issue has already been documented in the [youtube-dl issue tracker](https://github.com/ytdl-org/youtube-dl/issues). If similar issues have already been reported in youtube-dl (but not in our issue tracker), links to them can be included in your issue report here.
|
Additionally, it is also helpful to see if the issue has already been documented in the [youtube-dl issue tracker](https://github.com/ytdl-org/youtube-dl/issues). If similar issues have already been reported in youtube-dl (but not in our issue tracker), links to them can be included in your issue report here.
|
||||||
|
|
||||||
|
@ -134,24 +134,59 @@ ### Is the website primarily used for piracy?
|
||||||
|
|
||||||
# DEVELOPER INSTRUCTIONS
|
# DEVELOPER INSTRUCTIONS
|
||||||
|
|
||||||
Most users do not need to build yt-dlp and can [download the builds](https://github.com/yt-dlp/yt-dlp/releases) or get them via [the other installation methods](README.md#installation).
|
Most users do not need to build yt-dlp and can [download the builds](https://github.com/yt-dlp/yt-dlp/releases), get them via [the other installation methods](README.md#installation) or directly run it using `python -m yt_dlp`.
|
||||||
|
|
||||||
To run yt-dlp as a developer, you don't need to build anything either. Simply execute
|
`yt-dlp` uses [`hatch`](<https://hatch.pypa.io>) as a project management tool.
|
||||||
|
You can easily install it using [`pipx`](<https://pipx.pypa.io>) via `pipx install hatch`, or else via `pip` or your package manager of choice. Make sure you are using at least version `1.10.0`, otherwise some functionality might not work as expected.
|
||||||
|
|
||||||
python -m yt_dlp
|
If you plan on contributing to `yt-dlp`, best practice is to start by running the following command:
|
||||||
|
|
||||||
To run all the available core tests, use:
|
```shell
|
||||||
|
$ hatch run setup
|
||||||
|
```
|
||||||
|
|
||||||
python devscripts/run_tests.py
|
The above command will install a `pre-commit` hook so that required checks/fixes (linting, formatting) will run automatically before each commit. If any code needs to be linted or formatted, then the commit will be blocked and the necessary changes will be made; you should review all edits and re-commit the fixed version.
|
||||||
|
|
||||||
|
After this you can use `hatch shell` to enable a virtual environment that has `yt-dlp` and its development dependencies installed.
|
||||||
|
|
||||||
|
In addition, the following script commands can be used to run simple tasks such as linting or testing (without having to run `hatch shell` first):
|
||||||
|
* `hatch fmt`: Automatically fix linter violations and apply required code formatting changes
|
||||||
|
* See `hatch fmt --help` for more info
|
||||||
|
* `hatch test`: Run extractor or core tests
|
||||||
|
* See `hatch test --help` for more info
|
||||||
|
|
||||||
See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
|
See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
|
||||||
|
|
||||||
|
While it is strongly recommended to use `hatch` for yt-dlp development, if you are unable to do so, alternatively you can manually create a virtual environment and use the following commands:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# To only install development dependencies:
|
||||||
|
$ python -m devscripts.install_deps --include dev
|
||||||
|
|
||||||
|
# Or, for an editable install plus dev dependencies:
|
||||||
|
$ python -m pip install -e ".[default,dev]"
|
||||||
|
|
||||||
|
# To setup the pre-commit hook:
|
||||||
|
$ pre-commit install
|
||||||
|
|
||||||
|
# To be used in place of `hatch test`:
|
||||||
|
$ python -m devscripts.run_tests
|
||||||
|
|
||||||
|
# To be used in place of `hatch fmt`:
|
||||||
|
$ ruff check --fix .
|
||||||
|
$ autopep8 --in-place .
|
||||||
|
|
||||||
|
# To only check code instead of applying fixes:
|
||||||
|
$ ruff check .
|
||||||
|
$ autopep8 --diff .
|
||||||
|
```
|
||||||
|
|
||||||
If you want to create a build of yt-dlp yourself, you can follow the instructions [here](README.md#compile).
|
If you want to create a build of yt-dlp yourself, you can follow the instructions [here](README.md#compile).
|
||||||
|
|
||||||
|
|
||||||
## Adding new feature or making overarching changes
|
## Adding new feature or making overarching changes
|
||||||
|
|
||||||
Before you start writing code for implementing a new feature, open an issue explaining your feature request and atleast one use case. This allows the maintainers to decide whether such a feature is desired for the project in the first place, and will provide an avenue to discuss some implementation details. If you open a pull request for a new feature without discussing with us first, do not be surprised when we ask for large changes to the code, or even reject it outright.
|
Before you start writing code for implementing a new feature, open an issue explaining your feature request and at least one use case. This allows the maintainers to decide whether such a feature is desired for the project in the first place, and will provide an avenue to discuss some implementation details. If you open a pull request for a new feature without discussing with us first, do not be surprised when we ask for large changes to the code, or even reject it outright.
|
||||||
|
|
||||||
The same applies for changes to the documentation, code style, or overarching changes to the architecture
|
The same applies for changes to the documentation, code style, or overarching changes to the architecture
|
||||||
|
|
||||||
|
@ -165,12 +200,16 @@ ## Adding support for a new site
|
||||||
1. [Fork this repository](https://github.com/yt-dlp/yt-dlp/fork)
|
1. [Fork this repository](https://github.com/yt-dlp/yt-dlp/fork)
|
||||||
1. Check out the source code with:
|
1. Check out the source code with:
|
||||||
|
|
||||||
git clone git@github.com:YOUR_GITHUB_USERNAME/yt-dlp.git
|
```shell
|
||||||
|
$ git clone git@github.com:YOUR_GITHUB_USERNAME/yt-dlp.git
|
||||||
|
```
|
||||||
|
|
||||||
1. Start a new git branch with
|
1. Start a new git branch with
|
||||||
|
|
||||||
cd yt-dlp
|
```shell
|
||||||
git checkout -b yourextractor
|
$ cd yt-dlp
|
||||||
|
$ git checkout -b yourextractor
|
||||||
|
```
|
||||||
|
|
||||||
1. Start with this simple template and save it to `yt_dlp/extractor/yourextractor.py`:
|
1. Start with this simple template and save it to `yt_dlp/extractor/yourextractor.py`:
|
||||||
|
|
||||||
|
@ -217,27 +256,33 @@ ## Adding support for a new site
|
||||||
# TODO more properties (see yt_dlp/extractor/common.py)
|
# TODO more properties (see yt_dlp/extractor/common.py)
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
1. Add an import in [`yt_dlp/extractor/_extractors.py`](yt_dlp/extractor/_extractors.py). Note that the class name must end with `IE`.
|
1. Add an import in [`yt_dlp/extractor/_extractors.py`](yt_dlp/extractor/_extractors.py). Note that the class name must end with `IE`. Also note that when adding a parenthesized import group, the last import in the group must have a trailing comma in order for this formatting to be respected by our code formatter.
|
||||||
1. Run `python devscripts/run_tests.py YourExtractor`. This *may fail* at first, but you can continually re-run it until you're done. Upon failure, it will output the missing fields and/or correct values which you can copy. If you decide to add more than one test, the tests will then be named `YourExtractor`, `YourExtractor_1`, `YourExtractor_2`, etc. Note that tests with an `only_matching` key in the test's dict are not included in the count. You can also run all the tests in one go with `YourExtractor_all`
|
1. Run `hatch test YourExtractor`. This *may fail* at first, but you can continually re-run it until you're done. Upon failure, it will output the missing fields and/or correct values which you can copy. If you decide to add more than one test, the tests will then be named `YourExtractor`, `YourExtractor_1`, `YourExtractor_2`, etc. Note that tests with an `only_matching` key in the test's dict are not included in the count. You can also run all the tests in one go with `YourExtractor_all`
|
||||||
1. Make sure you have at least one test for your extractor. Even if all videos covered by the extractor are expected to be inaccessible for automated testing, tests should still be added with a `skip` parameter indicating why the particular test is disabled from running.
|
1. Make sure you have at least one test for your extractor. Even if all videos covered by the extractor are expected to be inaccessible for automated testing, tests should still be added with a `skip` parameter indicating why the particular test is disabled from running.
|
||||||
1. Have a look at [`yt_dlp/extractor/common.py`](yt_dlp/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](yt_dlp/extractor/common.py#L119-L440). Add tests and code for as many as you want.
|
1. Have a look at [`yt_dlp/extractor/common.py`](yt_dlp/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](yt_dlp/extractor/common.py#L119-L440). Add tests and code for as many as you want.
|
||||||
1. Make sure your code follows [yt-dlp coding conventions](#yt-dlp-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
|
1. Make sure your code follows [yt-dlp coding conventions](#yt-dlp-coding-conventions), passes [ruff](https://docs.astral.sh/ruff/tutorial/#getting-started) code checks and is properly formatted:
|
||||||
|
|
||||||
$ flake8 yt_dlp/extractor/yourextractor.py
|
```shell
|
||||||
|
$ hatch fmt --check
|
||||||
|
```
|
||||||
|
|
||||||
|
You can use `hatch fmt` to automatically fix problems.
|
||||||
|
|
||||||
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.8 and above. Backward compatibility is not required for even older versions of Python.
|
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.8 and above. Backward compatibility is not required for even older versions of Python.
|
||||||
1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
||||||
|
|
||||||
$ git add yt_dlp/extractor/_extractors.py
|
```shell
|
||||||
$ git add yt_dlp/extractor/yourextractor.py
|
$ git add yt_dlp/extractor/_extractors.py
|
||||||
$ git commit -m '[yourextractor] Add extractor'
|
$ git add yt_dlp/extractor/yourextractor.py
|
||||||
$ git push origin yourextractor
|
$ git commit -m '[yourextractor] Add extractor'
|
||||||
|
$ git push origin yourextractor
|
||||||
|
```
|
||||||
|
|
||||||
1. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
1. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
||||||
|
|
||||||
In any case, thank you very much for your contributions!
|
In any case, thank you very much for your contributions!
|
||||||
|
|
||||||
**Tip:** To test extractors that require login information, create a file `test/local_parameters.json` and add `"usenetrc": true` or your username and password in it:
|
**Tip:** To test extractors that require login information, create a file `test/local_parameters.json` and add `"usenetrc": true` or your `username`&`password` or `cookiefile`/`cookiesfrombrowser` in it:
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"username": "your user name",
|
"username": "your user name",
|
||||||
|
@ -264,7 +309,7 @@ ### Mandatory and optional metafields
|
||||||
|
|
||||||
For pornographic sites, appropriate `age_limit` must also be returned.
|
For pornographic sites, appropriate `age_limit` must also be returned.
|
||||||
|
|
||||||
The extractor is allowed to return the info dict without url or formats in some special cases if it allows the user to extract usefull information with `--ignore-no-formats-error` - e.g. when the video is a live stream that has not started yet.
|
The extractor is allowed to return the info dict without url or formats in some special cases if it allows the user to extract useful information with `--ignore-no-formats-error` - e.g. when the video is a live stream that has not started yet.
|
||||||
|
|
||||||
[Any field](yt_dlp/extractor/common.py#219-L426) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerant** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
[Any field](yt_dlp/extractor/common.py#219-L426) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerant** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
||||||
|
|
||||||
|
|
89
CONTRIBUTORS
89
CONTRIBUTORS
|
@ -542,3 +542,92 @@ prettykool
|
||||||
S-Aarab
|
S-Aarab
|
||||||
sonmezberkay
|
sonmezberkay
|
||||||
TSRBerry
|
TSRBerry
|
||||||
|
114514ns
|
||||||
|
agibson-fl
|
||||||
|
alard
|
||||||
|
alien-developers
|
||||||
|
antonkesy
|
||||||
|
ArnauvGilotra
|
||||||
|
Arthurszzz
|
||||||
|
Bibhav48
|
||||||
|
Bl4Cc4t
|
||||||
|
boredzo
|
||||||
|
Caesim404
|
||||||
|
chkuendig
|
||||||
|
chtk
|
||||||
|
Danish-H
|
||||||
|
dasidiot
|
||||||
|
diman8
|
||||||
|
divStar
|
||||||
|
DmitryScaletta
|
||||||
|
feederbox826
|
||||||
|
gmes78
|
||||||
|
gonzalezjo
|
||||||
|
hui1601
|
||||||
|
infanf
|
||||||
|
jazz1611
|
||||||
|
jingtra
|
||||||
|
jkmartindale
|
||||||
|
johnvictorfs
|
||||||
|
llistochek
|
||||||
|
marcdumais
|
||||||
|
martinxyz
|
||||||
|
michal-repo
|
||||||
|
mrmedieval
|
||||||
|
nbr23
|
||||||
|
Nicals
|
||||||
|
Noor-5
|
||||||
|
NurTasin
|
||||||
|
pompos02
|
||||||
|
Pranaxcau
|
||||||
|
pwaldhauer
|
||||||
|
RaduManole
|
||||||
|
RalphORama
|
||||||
|
rrgomes
|
||||||
|
ruiminggu
|
||||||
|
rvsit
|
||||||
|
sefidel
|
||||||
|
shmohawk
|
||||||
|
Snack-X
|
||||||
|
src-tinkerer
|
||||||
|
stilor
|
||||||
|
syntaxsurge
|
||||||
|
t-nil
|
||||||
|
ufukk
|
||||||
|
vista-narvas
|
||||||
|
x11x
|
||||||
|
xpadev-net
|
||||||
|
Xpl0itU
|
||||||
|
YoshichikaAAA
|
||||||
|
zhijinwuu
|
||||||
|
alb
|
||||||
|
hruzgar
|
||||||
|
kasper93
|
||||||
|
leoheitmannruiz
|
||||||
|
luiso1979
|
||||||
|
nipotan
|
||||||
|
Offert4324
|
||||||
|
sta1us
|
||||||
|
Tomoka1
|
||||||
|
trwstin
|
||||||
|
alexhuot1
|
||||||
|
clienthax
|
||||||
|
DaPotato69
|
||||||
|
emqi
|
||||||
|
hugohaa
|
||||||
|
imanoreotwe
|
||||||
|
JakeFinley96
|
||||||
|
lostfictions
|
||||||
|
minamotorin
|
||||||
|
ocococococ
|
||||||
|
Podiumnoche
|
||||||
|
RasmusAntons
|
||||||
|
roeniss
|
||||||
|
shoxie007
|
||||||
|
Szpachlarz
|
||||||
|
The-MAGI
|
||||||
|
TuxCoder
|
||||||
|
voidful
|
||||||
|
vtexier
|
||||||
|
WyohKnott
|
||||||
|
trueauracoral
|
||||||
|
|
446
Changelog.md
446
Changelog.md
|
@ -4,6 +4,444 @@ # Changelog
|
||||||
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
|
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
### 2024.05.27
|
||||||
|
|
||||||
|
#### Extractor changes
|
||||||
|
- [Fix parsing of base URL in SMIL manifest](https://github.com/yt-dlp/yt-dlp/commit/26603d0b34898818992bee4598e0607c07059511) ([#9225](https://github.com/yt-dlp/yt-dlp/issues/9225)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **peertube**: [Support livestreams](https://github.com/yt-dlp/yt-dlp/commit/12b248ce60be1aa1362edd839d915bba70dbee4b) ([#10044](https://github.com/yt-dlp/yt-dlp/issues/10044)) by [bashonly](https://github.com/bashonly), [trueauracoral](https://github.com/trueauracoral)
|
||||||
|
- **piksel**: [Update domain](https://github.com/yt-dlp/yt-dlp/commit/ae2194e1dd4a99d32eb3cab7c48a0ff03101ef3b) ([#9223](https://github.com/yt-dlp/yt-dlp/issues/9223)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **tiktok**: user: [Fix extraction loop](https://github.com/yt-dlp/yt-dlp/commit/c53c2e40fde8f2e15c7c62f8ca1a5d9e90ddc079) ([#10035](https://github.com/yt-dlp/yt-dlp/issues/10035)) by [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
|
#### Misc. changes
|
||||||
|
- **cleanup**: Miscellaneous: [5e3e19c](https://github.com/yt-dlp/yt-dlp/commit/5e3e19c93c52830da98d9d1ed84ea7a559efefbd) by [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
|
### 2024.05.26
|
||||||
|
|
||||||
|
#### Core changes
|
||||||
|
- [Better warning when requested subs format not found](https://github.com/yt-dlp/yt-dlp/commit/7e4259dff0b681a3f0e8a930799ce0394328c86e) ([#9873](https://github.com/yt-dlp/yt-dlp/issues/9873)) by [DaPotato69](https://github.com/DaPotato69)
|
||||||
|
- [Merged with youtube-dl a08f2b7](https://github.com/yt-dlp/yt-dlp/commit/a4da9db87b6486b270c15dfa07ab5bfedc83f6bd) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Warn if lack of ffmpeg alters format selection](https://github.com/yt-dlp/yt-dlp/commit/96da9525043f78aca4544d01761b13b2140e9ae6) ([#9805](https://github.com/yt-dlp/yt-dlp/issues/9805)) by [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
||||||
|
- **cookies**
|
||||||
|
- [Add `--cookies-from-browser` support for Whale](https://github.com/yt-dlp/yt-dlp/commit/dd9ad97b1fbdd36c086b8ba82328a4d954f78f8e) ([#9649](https://github.com/yt-dlp/yt-dlp/issues/9649)) by [roeniss](https://github.com/roeniss)
|
||||||
|
- [Get chrome session cookies with `--cookies-from-browser`](https://github.com/yt-dlp/yt-dlp/commit/f1f158976e38d38a260762accafe7bbe6d451151) ([#9747](https://github.com/yt-dlp/yt-dlp/issues/9747)) by [StefanLobbenmeier](https://github.com/StefanLobbenmeier)
|
||||||
|
- **windows**: [Improve shell quoting and tests](https://github.com/yt-dlp/yt-dlp/commit/64766459e37451b665c1464073c28361fbcf1c25) ([#9802](https://github.com/yt-dlp/yt-dlp/issues/9802)) by [Grub4K](https://github.com/Grub4K) (With fixes in [7e26bd5](https://github.com/yt-dlp/yt-dlp/commit/7e26bd53f9c5893518fde81dfd0079ec08dd841e))
|
||||||
|
|
||||||
|
#### Extractor changes
|
||||||
|
- [Add POST data hash to `--write-pages` filenames](https://github.com/yt-dlp/yt-dlp/commit/61b17437dc14a1c7e90ff48a6198df77828c6df4) ([#9879](https://github.com/yt-dlp/yt-dlp/issues/9879)) by [minamotorin](https://github.com/minamotorin) (With fixes in [c999bac](https://github.com/yt-dlp/yt-dlp/commit/c999bac02c5a4f755b2a82488a975e91c988ffd8) by [bashonly](https://github.com/bashonly))
|
||||||
|
- [Make `_search_nextjs_data` non fatal](https://github.com/yt-dlp/yt-dlp/commit/3ee1194288981c4f2c4abd8315326de0c424d2ce) ([#8937](https://github.com/yt-dlp/yt-dlp/issues/8937)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- **afreecatv**: live: [Add `cdn` extractor-arg](https://github.com/yt-dlp/yt-dlp/commit/315b3544296bb83012e20ee3af9d3cbf5600dd1c) ([#9666](https://github.com/yt-dlp/yt-dlp/issues/9666)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **alura**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/fc2879ecb05aaad36869609d154e4321362c1f63) ([#9658](https://github.com/yt-dlp/yt-dlp/issues/9658)) by [hugohaa](https://github.com/hugohaa)
|
||||||
|
- **artetv**: [Label forced subtitles](https://github.com/yt-dlp/yt-dlp/commit/7b5674949fd03a33b47b67b31d56a5adf1c48c91) ([#9945](https://github.com/yt-dlp/yt-dlp/issues/9945)) by [vtexier](https://github.com/vtexier)
|
||||||
|
- **bbc**: [Fix and extend extraction](https://github.com/yt-dlp/yt-dlp/commit/7975ddf245d22af034d5b983eeb1c5ec6c2ce053) ([#9705](https://github.com/yt-dlp/yt-dlp/issues/9705)) by [dirkf](https://github.com/dirkf), [kylegustavo](https://github.com/kylegustavo), [pukkandan](https://github.com/pukkandan)
|
||||||
|
- **bilibili**: [Fix `--geo-verification-proxy` support](https://github.com/yt-dlp/yt-dlp/commit/2338827072dacab0f15348b70aec8685feefc8d1) ([#9817](https://github.com/yt-dlp/yt-dlp/issues/9817)) by [fireattack](https://github.com/fireattack)
|
||||||
|
- **bilibilispacevideo**
|
||||||
|
- [Better error message](https://github.com/yt-dlp/yt-dlp/commit/06d52c87314e0bbc16c43c405090843885577b88) ([#9839](https://github.com/yt-dlp/yt-dlp/issues/9839)) by [fireattack](https://github.com/fireattack)
|
||||||
|
- [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/4cc99d7b6cce8b39506ead01407445d576b63ee4) ([#9905](https://github.com/yt-dlp/yt-dlp/issues/9905)) by [c-basalt](https://github.com/c-basalt)
|
||||||
|
- **boosty**: [Add cookies support](https://github.com/yt-dlp/yt-dlp/commit/145dc6f6563e80d2da1b3e9aea2ffa795b71622c) ([#9522](https://github.com/yt-dlp/yt-dlp/issues/9522)) by [RasmusAntons](https://github.com/RasmusAntons)
|
||||||
|
- **brilliantpala**: [Fix login](https://github.com/yt-dlp/yt-dlp/commit/eead3bbc01f6529862bdad1f0b2adeabda4f006e) ([#9788](https://github.com/yt-dlp/yt-dlp/issues/9788)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- **canalalpha**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/00a9f2e1f7fa69499221f2e8dd73a08efeef79bc) ([#9675](https://github.com/yt-dlp/yt-dlp/issues/9675)) by [kclauhk](https://github.com/kclauhk)
|
||||||
|
- **cbc.ca**: player: [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/c8bf48f3a8fa29587e7c73ef5a7710385a5ea725) ([#9866](https://github.com/yt-dlp/yt-dlp/issues/9866)) by [carusocr](https://github.com/carusocr)
|
||||||
|
- **cda**: [Fix age-gated web extraction](https://github.com/yt-dlp/yt-dlp/commit/6d8a53d870ff6795f509085bfbf3981417999038) ([#9939](https://github.com/yt-dlp/yt-dlp/issues/9939)) by [dirkf](https://github.com/dirkf), [emqi](https://github.com/emqi), [Podiumnoche](https://github.com/Podiumnoche), [Szpachlarz](https://github.com/Szpachlarz)
|
||||||
|
- **commonmistakes**: [Raise error on blob URLs](https://github.com/yt-dlp/yt-dlp/commit/98d71d8c5e5dab08b561ee6f137e968d2a004262) ([#9897](https://github.com/yt-dlp/yt-dlp/issues/9897)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **crunchyroll**
|
||||||
|
- [Always make metadata available](https://github.com/yt-dlp/yt-dlp/commit/cb2fb4a643949322adba561ca73bcba3221ec0c5) ([#9772](https://github.com/yt-dlp/yt-dlp/issues/9772)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix auth and remove cookies support](https://github.com/yt-dlp/yt-dlp/commit/ff38a011d57b763f3a69bebd25a5dc9044a717ce) ([#9749](https://github.com/yt-dlp/yt-dlp/issues/9749)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix stream extraction](https://github.com/yt-dlp/yt-dlp/commit/f2816634e3be88fe158b342ee33918de3c272a54) ([#10005](https://github.com/yt-dlp/yt-dlp/issues/10005)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Support browser impersonation](https://github.com/yt-dlp/yt-dlp/commit/5904853ae5788509fdc4892cb7ecdfa9ae7f78e6) ([#9857](https://github.com/yt-dlp/yt-dlp/issues/9857)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **dangalplay**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/0d067e77c3f5527946fb0c22ee1c7011994cba40) ([#10021](https://github.com/yt-dlp/yt-dlp/issues/10021)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **discoveryplus**: [Fix dmax.de and related extractors](https://github.com/yt-dlp/yt-dlp/commit/90d2da311bbb5dc06f385ee428c7e4590936e995) ([#10020](https://github.com/yt-dlp/yt-dlp/issues/10020)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **eplus**: [Handle URLs without videos](https://github.com/yt-dlp/yt-dlp/commit/351dc0bc334c4e1b5f00c152818c3ec0ed71f788) ([#9855](https://github.com/yt-dlp/yt-dlp/issues/9855)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- **europarlwebstream**: [Support new URL format](https://github.com/yt-dlp/yt-dlp/commit/800a43983e5fb719526ce4cb3956216085c63268) ([#9647](https://github.com/yt-dlp/yt-dlp/issues/9647)) by [seproDev](https://github.com/seproDev), [voidful](https://github.com/voidful)
|
||||||
|
- **facebook**: [Fix DASH formats extraction](https://github.com/yt-dlp/yt-dlp/commit/e3b42d8b1b8bcfff7ba146c19fc3f6f6ba843cea) ([#9734](https://github.com/yt-dlp/yt-dlp/issues/9734)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **godresource**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/65e709d23530959075816e966c42179ad46e8e3b) ([#9629](https://github.com/yt-dlp/yt-dlp/issues/9629)) by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
- **googledrive**: [Fix formats extraction](https://github.com/yt-dlp/yt-dlp/commit/85ec2a337ac325cf6427cbafd56f0a034c1a5218) ([#9908](https://github.com/yt-dlp/yt-dlp/issues/9908)) by [WyohKnott](https://github.com/WyohKnott)
|
||||||
|
- **hearthisat**: [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/5bbfdb7c999b22f1aeca0c3489c167d6eb73013b) ([#9949](https://github.com/yt-dlp/yt-dlp/issues/9949)) by [bohwaz](https://github.com/bohwaz), [seproDev](https://github.com/seproDev)
|
||||||
|
- **hytale**: [Use `CloudflareStreamIE` explicitly](https://github.com/yt-dlp/yt-dlp/commit/31b417e1d1ccc67d5c027bf8878f483dc34cb118) ([#9672](https://github.com/yt-dlp/yt-dlp/issues/9672)) by [llamasblade](https://github.com/llamasblade)
|
||||||
|
- **instagram**: [Support `/reels/` URLs](https://github.com/yt-dlp/yt-dlp/commit/06cb0638392b607b47d3c2ac48eb2ebecb0f060d) ([#9539](https://github.com/yt-dlp/yt-dlp/issues/9539)) by [amir16yp](https://github.com/amir16yp)
|
||||||
|
- **jiocinema**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/1463945ae5fb05986a0bd1aa02e41d1a08d93a02) ([#10026](https://github.com/yt-dlp/yt-dlp/issues/10026)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **jiosaavn**: [Extract via API and fix playlists](https://github.com/yt-dlp/yt-dlp/commit/0c21c53885cf03f4040467ae8c44d7ff51016116) ([#9656](https://github.com/yt-dlp/yt-dlp/issues/9656)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **lci**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/5a2eebc76770fca91ffabeff658d560f716fec80) ([#10025](https://github.com/yt-dlp/yt-dlp/issues/10025)) by [ocococococ](https://github.com/ocococococ)
|
||||||
|
- **mixch**: [Extract comments](https://github.com/yt-dlp/yt-dlp/commit/b38018b781b062d5169d104ab430489aef8e7f1e) ([#9860](https://github.com/yt-dlp/yt-dlp/issues/9860)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- **moviepilot**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/296df0da1d38a44d34c99b60a18066c301774537) ([#9366](https://github.com/yt-dlp/yt-dlp/issues/9366)) by [panatexxa](https://github.com/panatexxa)
|
||||||
|
- **netease**: program: [Improve `--no-playlist` message](https://github.com/yt-dlp/yt-dlp/commit/73f12119b52d98281804b0c072b2ed6aa841ec88) ([#9488](https://github.com/yt-dlp/yt-dlp/issues/9488)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- **nfb**: [Fix extractors](https://github.com/yt-dlp/yt-dlp/commit/0a1a8e3005f66c44bf67633dccd4df19c3fccd1a) ([#9650](https://github.com/yt-dlp/yt-dlp/issues/9650)) by [rrgomes](https://github.com/rrgomes)
|
||||||
|
- **ntslive**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/be7db1a5a8c483726c511c30ea4689cbb8b27962) ([#9641](https://github.com/yt-dlp/yt-dlp/issues/9641)) by [lostfictions](https://github.com/lostfictions)
|
||||||
|
- **orf**: on: [Improve extraction](https://github.com/yt-dlp/yt-dlp/commit/0dd53faeca2ba0ce138e4092d07b5f2dbf2422f9) ([#9677](https://github.com/yt-dlp/yt-dlp/issues/9677)) by [TuxCoder](https://github.com/TuxCoder)
|
||||||
|
- **orftvthek**: [Remove extractor](https://github.com/yt-dlp/yt-dlp/commit/3779f2a307ba3ef1d28e107cdd71b221dfb4eb36) ([#10011](https://github.com/yt-dlp/yt-dlp/issues/10011)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **patreon**
|
||||||
|
- [Extract multiple embeds](https://github.com/yt-dlp/yt-dlp/commit/036e0d92c6052465673d459678322ea03e61483d) ([#9850](https://github.com/yt-dlp/yt-dlp/issues/9850)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix Vimeo embed extraction](https://github.com/yt-dlp/yt-dlp/commit/c9ce57d9bf51541da2381d99bc096a9d0ddf1f27) ([#9712](https://github.com/yt-dlp/yt-dlp/issues/9712)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **piapro**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/3ba8de62d61d782256f5c1e9939a0762039657de) ([#9311](https://github.com/yt-dlp/yt-dlp/issues/9311)) by [FinnRG](https://github.com/FinnRG), [seproDev](https://github.com/seproDev)
|
||||||
|
- **pornhub**: [Fix login by email address](https://github.com/yt-dlp/yt-dlp/commit/518c1afc1592cae3e4eb39dc646b5bc059333112) ([#9914](https://github.com/yt-dlp/yt-dlp/issues/9914)) by [feederbox826](https://github.com/feederbox826)
|
||||||
|
- **qub**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/6b54cccdcb892bca3e55993480d8b86f1c7e6da6) ([#7019](https://github.com/yt-dlp/yt-dlp/issues/7019)) by [alexhuot1](https://github.com/alexhuot1), [dirkf](https://github.com/dirkf)
|
||||||
|
- **reddit**: [Fix subtitles extraction](https://github.com/yt-dlp/yt-dlp/commit/82f4f4444e26daf35b7302c406fe2312f78f619e) ([#10006](https://github.com/yt-dlp/yt-dlp/issues/10006)) by [kclauhk](https://github.com/kclauhk)
|
||||||
|
- **soundcloud**
|
||||||
|
- [Add `formats` extractor-arg](https://github.com/yt-dlp/yt-dlp/commit/beaf832c7a9d57833f365ce18f6115b88071b296) ([#10004](https://github.com/yt-dlp/yt-dlp/issues/10004)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Extract `genres`](https://github.com/yt-dlp/yt-dlp/commit/231c2eacc41b06b65c63edf94c0d04768a5da607) ([#9821](https://github.com/yt-dlp/yt-dlp/issues/9821)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **taptap**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/63b569bc5e7d461753637a20ad84a575adee4c0a) ([#9776](https://github.com/yt-dlp/yt-dlp/issues/9776)) by [c-basalt](https://github.com/c-basalt)
|
||||||
|
- **tele5**: [Overhaul extractor](https://github.com/yt-dlp/yt-dlp/commit/c92e4e625e9e6bbbbf8e3b20c3e7ebe57c16072d) ([#10024](https://github.com/yt-dlp/yt-dlp/issues/10024)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **theatercomplextown**: [Fix extractors](https://github.com/yt-dlp/yt-dlp/commit/8056a3026ed6ec6a6d0ed56fdd7ebcd16e928341) ([#9754](https://github.com/yt-dlp/yt-dlp/issues/9754)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **tiktok**
|
||||||
|
- [Add `device_id` extractor-arg](https://github.com/yt-dlp/yt-dlp/commit/3584b8390bd21c0393a3079eeee71aed56a1c1d8) ([#9951](https://github.com/yt-dlp/yt-dlp/issues/9951)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Extract all web formats](https://github.com/yt-dlp/yt-dlp/commit/4ccd73fea0f6f4be343e1ec7f22dd03799addcf8) ([#9960](https://github.com/yt-dlp/yt-dlp/issues/9960)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Extract via mobile API only if extractor-arg is passed](https://github.com/yt-dlp/yt-dlp/commit/41ba4a808b597a3afed78c89675a30deb6844450) ([#9938](https://github.com/yt-dlp/yt-dlp/issues/9938)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix subtitles extraction](https://github.com/yt-dlp/yt-dlp/commit/eef1e9f44ff14c5e65b759bb1eafa3946cdaf719) ([#9961](https://github.com/yt-dlp/yt-dlp/issues/9961)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- collection: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/119d41f27061d220d276a2d38cfc8d873437452a) ([#9986](https://github.com/yt-dlp/yt-dlp/issues/9986)) by [bashonly](https://github.com/bashonly), [imanoreotwe](https://github.com/imanoreotwe)
|
||||||
|
- user: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/347f13dd9bccc2b4db3ea25689410d45d8370ed4) ([#9661](https://github.com/yt-dlp/yt-dlp/issues/9661)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **tv5monde**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/6db96268c521e945d42649607db1574f5d92e082) ([#9143](https://github.com/yt-dlp/yt-dlp/issues/9143)) by [alard](https://github.com/alard), [seproDev](https://github.com/seproDev)
|
||||||
|
- **twitter**
|
||||||
|
- [Fix auth for x.com migration](https://github.com/yt-dlp/yt-dlp/commit/3e35aa32c74bc108375be8c8b6b3bfc90dfff1b4) ([#9952](https://github.com/yt-dlp/yt-dlp/issues/9952)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Support x.com URLs](https://github.com/yt-dlp/yt-dlp/commit/4813173e4544f125d6f2afc31e600727d761b8dd) ([#9926](https://github.com/yt-dlp/yt-dlp/issues/9926)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **vk**: [Improve format extraction](https://github.com/yt-dlp/yt-dlp/commit/df5c9e733aaba703cf285c0372b6d61629330c82) ([#9885](https://github.com/yt-dlp/yt-dlp/issues/9885)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **wrestleuniverse**: [Avoid partial stream formats](https://github.com/yt-dlp/yt-dlp/commit/c4853655cb9a793129280806af643de43c48f4d5) ([#9800](https://github.com/yt-dlp/yt-dlp/issues/9800)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **xiaohongshu**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/a2e9031605d87c469be9ce98dbbdf4960b727338) ([#9646](https://github.com/yt-dlp/yt-dlp/issues/9646)) by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
- **xvideos**: quickies: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/b207d26f83fb8ab0ce56df74dff43ff583a3264f) ([#9834](https://github.com/yt-dlp/yt-dlp/issues/9834)) by [JakeFinley96](https://github.com/JakeFinley96)
|
||||||
|
- **youporn**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/351368cb9a6731b886a58f5a10fd6b302bbe47be) ([#8827](https://github.com/yt-dlp/yt-dlp/issues/8827)) by [The-MAGI](https://github.com/The-MAGI)
|
||||||
|
- **youtube**
|
||||||
|
- [Add `mediaconnect` client](https://github.com/yt-dlp/yt-dlp/commit/cf212d0a331aba05c32117573f760cdf3af8c62f) ([#9546](https://github.com/yt-dlp/yt-dlp/issues/9546)) by [clienthax](https://github.com/clienthax)
|
||||||
|
- [Extract upload timestamp if available](https://github.com/yt-dlp/yt-dlp/commit/96a134dea6397a5f2131947c427aac52c8b4e677) ([#9856](https://github.com/yt-dlp/yt-dlp/issues/9856)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- [Fix comments extraction](https://github.com/yt-dlp/yt-dlp/commit/8e15177b4113c355989881e4e030f695a9b59c3a) ([#9775](https://github.com/yt-dlp/yt-dlp/issues/9775)) by [bbilly1](https://github.com/bbilly1), [jakeogh](https://github.com/jakeogh), [minamotorin](https://github.com/minamotorin), [shoxie007](https://github.com/shoxie007)
|
||||||
|
- [Remove `android` from default clients](https://github.com/yt-dlp/yt-dlp/commit/12d8ea8246fa901de302ff5cc748caddadc82f41) ([#9553](https://github.com/yt-dlp/yt-dlp/issues/9553)) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- **zenyandex**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/c4b87dd885ee5391e5f481e7c8bd550a7c543623) ([#9813](https://github.com/yt-dlp/yt-dlp/issues/9813)) by [src-tinkerer](https://github.com/src-tinkerer)
|
||||||
|
|
||||||
|
#### Networking changes
|
||||||
|
- [Add `extensions` attribute to `Response`](https://github.com/yt-dlp/yt-dlp/commit/bec9a59e8ec82c18e3bf9268eaa436793dd52e35) ([#9756](https://github.com/yt-dlp/yt-dlp/issues/9756)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **Request Handler**
|
||||||
|
- requests
|
||||||
|
- [Patch support for `requests` 2.32.2+](https://github.com/yt-dlp/yt-dlp/commit/3f7999533ebe41c2a579d91b4e4cb211cfcd3bc0) ([#9992](https://github.com/yt-dlp/yt-dlp/issues/9992)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Update to `requests` 2.32.0](https://github.com/yt-dlp/yt-dlp/commit/c36513f1be2ef3d3cec864accbffda1afaa06ffd) ([#9980](https://github.com/yt-dlp/yt-dlp/issues/9980)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
|
||||||
|
#### Misc. changes
|
||||||
|
- [Add `hatch`, `ruff`, `pre-commit` and improve dev docs](https://github.com/yt-dlp/yt-dlp/commit/e897bd8292a41999cf51dba91b390db5643c72db) ([#7409](https://github.com/yt-dlp/yt-dlp/issues/7409)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K), [seproDev](https://github.com/seproDev)
|
||||||
|
- **build**
|
||||||
|
- [Migrate `linux_exe` to static musl builds](https://github.com/yt-dlp/yt-dlp/commit/ac817bc83efd939dca3e40c4b527d0ccfc77172b) ([#9811](https://github.com/yt-dlp/yt-dlp/issues/9811)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Normalize `curl_cffi` group to `curl-cffi`](https://github.com/yt-dlp/yt-dlp/commit/02483bea1c4dbe1bace8ca4d19700104fbb8a00f) ([#9698](https://github.com/yt-dlp/yt-dlp/issues/9698)) by [bashonly](https://github.com/bashonly) (With fixes in [89f535e](https://github.com/yt-dlp/yt-dlp/commit/89f535e2656964b4061c25a7739d4d6ba0a30568))
|
||||||
|
- [Run `macos_legacy` job on `macos-12`](https://github.com/yt-dlp/yt-dlp/commit/1a366403d9c26b992faa77e00f4d02ead57559e3) ([#9804](https://github.com/yt-dlp/yt-dlp/issues/9804)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [`macos` job requires `setuptools<70`](https://github.com/yt-dlp/yt-dlp/commit/78c57cc0e0998b8ed90e4306f410aa4be4115cd7) ([#9993](https://github.com/yt-dlp/yt-dlp/issues/9993)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **cleanup**
|
||||||
|
- [Remove questionable extractors](https://github.com/yt-dlp/yt-dlp/commit/01395a34345d1c6ba1b73ca92f94dd200dc45341) ([#9911](https://github.com/yt-dlp/yt-dlp/issues/9911)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- Miscellaneous: [5c019f6](https://github.com/yt-dlp/yt-dlp/commit/5c019f6328ad40d66561eac3c4de0b3cd070d0f6), [ae2af11](https://github.com/yt-dlp/yt-dlp/commit/ae2af1104f80caf2f47544763a33db2c17a3e1de) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K), [seproDev](https://github.com/seproDev)
|
||||||
|
- **test**
|
||||||
|
- [Add HTTP proxy tests](https://github.com/yt-dlp/yt-dlp/commit/3c7a287e281d9f9a353dce8902ff78a84c24a040) ([#9578](https://github.com/yt-dlp/yt-dlp/issues/9578)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- [Fix connect timeout test](https://github.com/yt-dlp/yt-dlp/commit/53b4d44f55cca66ac33dab092ef2a30b1164b684) ([#9906](https://github.com/yt-dlp/yt-dlp/issues/9906)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
|
||||||
|
### 2024.04.09
|
||||||
|
|
||||||
|
#### Important changes
|
||||||
|
- Security: [[CVE-2024-22423](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-22423)] [Prevent RCE when using `--exec` with `%q` on Windows](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-hjq6-52gw-2g7p)
|
||||||
|
- The shell escape function now properly escapes `%`, `\` and `\n`.
|
||||||
|
- `utils.Popen` has been patched accordingly.
|
||||||
|
|
||||||
|
#### Core changes
|
||||||
|
- [Add new option `--progress-delta`](https://github.com/yt-dlp/yt-dlp/commit/9590cc6b4768e190183d7d071a6c78170889116a) ([#9082](https://github.com/yt-dlp/yt-dlp/issues/9082)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Add new options `--impersonate` and `--list-impersonate-targets`](https://github.com/yt-dlp/yt-dlp/commit/0b81d4d252bd065ccd352722987ea34fe17f9244) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz), [Grub4K](https://github.com/Grub4K), [pukkandan](https://github.com/pukkandan)
|
||||||
|
- [Add option `--no-break-on-existing`](https://github.com/yt-dlp/yt-dlp/commit/16be117729150b2784f3b17755c886cb0cf73374) ([#9610](https://github.com/yt-dlp/yt-dlp/issues/9610)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix `filesize_approx` calculation](https://github.com/yt-dlp/yt-dlp/commit/86e3b82261e8ebc6c6707c09544c9dfb8907c0fd) ([#9560](https://github.com/yt-dlp/yt-dlp/issues/9560)) by [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
||||||
|
- [Infer `acodec` for single-codec containers](https://github.com/yt-dlp/yt-dlp/commit/86a972033e05fea80e5fe7f2aff6723dbe2f3952) by [pukkandan](https://github.com/pukkandan)
|
||||||
|
- [Prevent RCE when using `--exec` with `%q` (CVE-2024-22423)](https://github.com/yt-dlp/yt-dlp/commit/ff07792676f404ffff6ee61b5638c9dc1a33a37a) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- **cookies**: [Add `--cookies-from-browser` support for Firefox Flatpak](https://github.com/yt-dlp/yt-dlp/commit/2ab2651a4a7be18939e2b4cb21be79fe477c797a) ([#9619](https://github.com/yt-dlp/yt-dlp/issues/9619)) by [un-def](https://github.com/un-def)
|
||||||
|
- **utils**
|
||||||
|
- `traverse_obj`
|
||||||
|
- [Allow unbranching using `all` and `any`](https://github.com/yt-dlp/yt-dlp/commit/3699eeb67cad333272b14a42dd3843d93fda1a2e) ([#9571](https://github.com/yt-dlp/yt-dlp/issues/9571)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Convenience improvements](https://github.com/yt-dlp/yt-dlp/commit/32abfb00bdbd119ca675fdc6d1719331f0a2741a) ([#9577](https://github.com/yt-dlp/yt-dlp/issues/9577)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
|
||||||
|
#### Extractor changes
|
||||||
|
- [Add extractor impersonate API](https://github.com/yt-dlp/yt-dlp/commit/50c29352312f5662acf9a64b0012766f5c40af61) ([#9474](https://github.com/yt-dlp/yt-dlp/issues/9474)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K), [pukkandan](https://github.com/pukkandan)
|
||||||
|
- **afreecatv**
|
||||||
|
- [Overhaul extractor](https://github.com/yt-dlp/yt-dlp/commit/9415f1a5ef88482ebafe3083e8bcb778ac512df7) ([#9566](https://github.com/yt-dlp/yt-dlp/issues/9566)) by [bashonly](https://github.com/bashonly), [Tomoka1](https://github.com/Tomoka1)
|
||||||
|
- live: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/9073ae6458f4c6a832aa832c67174c61852869be) ([#9348](https://github.com/yt-dlp/yt-dlp/issues/9348)) by [hui1601](https://github.com/hui1601)
|
||||||
|
- **asobistage**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/0284f1fee202302a78888420f933deae19d9f4e1) ([#8735](https://github.com/yt-dlp/yt-dlp/issues/8735)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- **box**: [Support URLs without file IDs](https://github.com/yt-dlp/yt-dlp/commit/07f5b2f7570fd9ac85aed17f4c0118f6eac77beb) ([#9504](https://github.com/yt-dlp/yt-dlp/issues/9504)) by [shreyasminocha](https://github.com/shreyasminocha)
|
||||||
|
- **cbc.ca**: player: [Support new URL format](https://github.com/yt-dlp/yt-dlp/commit/b49d5ffc53a72d8245ba319ff07bdc5b8c6a4f0c) ([#9561](https://github.com/yt-dlp/yt-dlp/issues/9561)) by [trainman261](https://github.com/trainman261)
|
||||||
|
- **crunchyroll**
|
||||||
|
- [Extract `vo_adaptive_hls` formats by default](https://github.com/yt-dlp/yt-dlp/commit/be77923ffe842f667971019460f6005f3cad01eb) ([#9447](https://github.com/yt-dlp/yt-dlp/issues/9447)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/954e57e405f79188450eb30103a9308732cd318f) ([#9615](https://github.com/yt-dlp/yt-dlp/issues/9615)) by [bytedream](https://github.com/bytedream)
|
||||||
|
- **dropbox**: [Fix formats extraction](https://github.com/yt-dlp/yt-dlp/commit/a48cc86d6f6b20427553620c2ddb990ede6a4b41) ([#9627](https://github.com/yt-dlp/yt-dlp/issues/9627)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **fathom**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/bc2b8c0596fd6b75af24822c4f0f1da6783d71f7) ([#9495](https://github.com/yt-dlp/yt-dlp/issues/9495)) by [src-tinkerer](https://github.com/src-tinkerer)
|
||||||
|
- **gofile**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/0da66980d3193cad3dae0120cddddbfcabddf7a1) ([#9446](https://github.com/yt-dlp/yt-dlp/issues/9446)) by [jazz1611](https://github.com/jazz1611)
|
||||||
|
- **imgur**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/86d2f4d24849af0d1f3af7c0e2ac43bf8a058f74) ([#9471](https://github.com/yt-dlp/yt-dlp/issues/9471)) by [trwstin](https://github.com/trwstin)
|
||||||
|
- **jiosaavn**
|
||||||
|
- [Extract artists](https://github.com/yt-dlp/yt-dlp/commit/0ae16ceb1846cc4e609b70ce7c5d8e7458efceb2) ([#9612](https://github.com/yt-dlp/yt-dlp/issues/9612)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix format extensions](https://github.com/yt-dlp/yt-dlp/commit/443e206ec41e64ca2aef61d8ef91640fb69b3113) ([#9609](https://github.com/yt-dlp/yt-dlp/issues/9609)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Support playlists](https://github.com/yt-dlp/yt-dlp/commit/2e94602f241f6e41bdc48576c61089435529339b) ([#9622](https://github.com/yt-dlp/yt-dlp/issues/9622)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **joqrag**: [Fix live status detection](https://github.com/yt-dlp/yt-dlp/commit/f2fd449b46c4058222e1744f7a35caa20b2d003d) ([#9624](https://github.com/yt-dlp/yt-dlp/issues/9624)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- **kick**: [Support browser impersonation](https://github.com/yt-dlp/yt-dlp/commit/c8a61a910096c77ce08dad5e1b2fbda5eb964156) ([#9611](https://github.com/yt-dlp/yt-dlp/issues/9611)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **loom**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/f859ed3ba1e8b129ae6a467592c65687e73fbca1) ([#8686](https://github.com/yt-dlp/yt-dlp/issues/8686)) by [bashonly](https://github.com/bashonly), [hruzgar](https://github.com/hruzgar)
|
||||||
|
- **medici**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/4cd9e251b9abada107b10830de997bf4d79ca369) ([#9518](https://github.com/yt-dlp/yt-dlp/issues/9518)) by [Offert4324](https://github.com/Offert4324)
|
||||||
|
- **mixch**
|
||||||
|
- [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/4c3b7a0769706f7f0ea24adf1f219d5ae82d2b07) ([#9608](https://github.com/yt-dlp/yt-dlp/issues/9608)) by [bashonly](https://github.com/bashonly), [nipotan](https://github.com/nipotan)
|
||||||
|
- archive: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/c59de48e2bb4c681b03b93b584a05f52609ce4a0) ([#8761](https://github.com/yt-dlp/yt-dlp/issues/8761)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- **nhk**: [Fix NHK World extractors](https://github.com/yt-dlp/yt-dlp/commit/4af9d5c2f6aa81403ae2a8a5ae3cc824730f0b86) ([#9623](https://github.com/yt-dlp/yt-dlp/issues/9623)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **patreon**: [Do not extract dead embed URLs](https://github.com/yt-dlp/yt-dlp/commit/36b240f9a72af57eb2c9d927ebb7fd1c917ebf18) ([#9613](https://github.com/yt-dlp/yt-dlp/issues/9613)) by [johnvictorfs](https://github.com/johnvictorfs)
|
||||||
|
- **radio1be**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/36baaa10e06715ccba06b78885b2042c4844c826) ([#9122](https://github.com/yt-dlp/yt-dlp/issues/9122)) by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
- **sharepoint**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/ff349ff94aae0b2b148bd3670f7c91d39c2f1d8e) ([#6531](https://github.com/yt-dlp/yt-dlp/issues/6531)) by [bashonly](https://github.com/bashonly), [C0D3D3V](https://github.com/C0D3D3V)
|
||||||
|
- **sonylivseries**: [Fix season extraction](https://github.com/yt-dlp/yt-dlp/commit/f2868b26e917354203f82a370ad2396646edb813) ([#9423](https://github.com/yt-dlp/yt-dlp/issues/9423)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **soundcloud**
|
||||||
|
- [Adjust format sorting](https://github.com/yt-dlp/yt-dlp/commit/a2d0840739cddd585d24e0ce4796394fc8a4fa2e) ([#9584](https://github.com/yt-dlp/yt-dlp/issues/9584)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Support cookies](https://github.com/yt-dlp/yt-dlp/commit/97362712a1f2b04e735bdf54f749ad99165a62fe) ([#9586](https://github.com/yt-dlp/yt-dlp/issues/9586)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Support retries for API rate-limit](https://github.com/yt-dlp/yt-dlp/commit/246571ae1d867df8bf31a056bdf3bbbfd398366a) ([#9585](https://github.com/yt-dlp/yt-dlp/issues/9585)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **thisoldhouse**: [Support Brightcove embeds](https://github.com/yt-dlp/yt-dlp/commit/0df63cce69026d2f4c0cbb4dd36163e83eac93dc) ([#9576](https://github.com/yt-dlp/yt-dlp/issues/9576)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **tiktok**
|
||||||
|
- [Fix API extraction](https://github.com/yt-dlp/yt-dlp/commit/cb61e20c266facabb7a30f9ce53bd79dfc158475) ([#9548](https://github.com/yt-dlp/yt-dlp/issues/9548)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Prefer non-bytevc2 formats](https://github.com/yt-dlp/yt-dlp/commit/63f685f341f35f6f02b0368d1ba53bdb5b520410) ([#9575](https://github.com/yt-dlp/yt-dlp/issues/9575)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Restore `carrier_region` API parameter](https://github.com/yt-dlp/yt-dlp/commit/fc53ec13ff1ee926a3e533a68cfca8acc887b661) ([#9637](https://github.com/yt-dlp/yt-dlp/issues/9637)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Update API hostname](https://github.com/yt-dlp/yt-dlp/commit/8c05b3ebae23c5b444857549a85b84004c01a536) ([#9444](https://github.com/yt-dlp/yt-dlp/issues/9444)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **twitch**: [Extract AV1 and HEVC formats](https://github.com/yt-dlp/yt-dlp/commit/02f93ff51b3ff9436d60c4993562b366eaae8851) ([#9158](https://github.com/yt-dlp/yt-dlp/issues/9158)) by [kasper93](https://github.com/kasper93)
|
||||||
|
- **vkplay**: [Fix `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/b15b0c1d2106437ec61a5c436c543e8760eac160) ([#9636](https://github.com/yt-dlp/yt-dlp/issues/9636)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **xvideos**: [Support new URL format](https://github.com/yt-dlp/yt-dlp/commit/aa7e9ae4f48276bd5d0173966c77db9484f65a0a) ([#9502](https://github.com/yt-dlp/yt-dlp/issues/9502)) by [sta1us](https://github.com/sta1us)
|
||||||
|
- **youtube**
|
||||||
|
- [Calculate more accurate `filesize`](https://github.com/yt-dlp/yt-dlp/commit/a25a424323267e3f6f9f63c0b62df499bd7b8d46) by [pukkandan](https://github.com/pukkandan)
|
||||||
|
- [Update `android` params](https://github.com/yt-dlp/yt-dlp/commit/e7b17fce14775bd2448695c8eb7379b8d31d3537) by [pukkandan](https://github.com/pukkandan)
|
||||||
|
- search: [Fix params for uncensored results](https://github.com/yt-dlp/yt-dlp/commit/17d248a58781e2588d18a5ebe00c441d10011fcd) ([#9456](https://github.com/yt-dlp/yt-dlp/issues/9456)) by [alb](https://github.com/alb), [pukkandan](https://github.com/pukkandan)
|
||||||
|
|
||||||
|
#### Downloader changes
|
||||||
|
- **ffmpeg**: [Accept output args from info dict](https://github.com/yt-dlp/yt-dlp/commit/9c42b7eef547e826e9fcc7beb6706a2523949d05) ([#9278](https://github.com/yt-dlp/yt-dlp/issues/9278)) by [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
|
#### Networking changes
|
||||||
|
- [Respect `SSLKEYLOGFILE` environment variable](https://github.com/yt-dlp/yt-dlp/commit/79a451e5763eda8b10d00684d5d3378f3255ee01) ([#9543](https://github.com/yt-dlp/yt-dlp/issues/9543)) by [luiso1979](https://github.com/luiso1979)
|
||||||
|
- **Request Handler**
|
||||||
|
- curlcffi: [Add support for `curl_cffi`](https://github.com/yt-dlp/yt-dlp/commit/52f5be1f1e0dc45bb397ab950f564721976a39bf) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz), [Grub4K](https://github.com/Grub4K), [pukkandan](https://github.com/pukkandan)
|
||||||
|
- websockets: [Workaround race condition causing issues on PyPy](https://github.com/yt-dlp/yt-dlp/commit/e5d4f11104ce7ea1717a90eea82c0f7d230ea5d5) ([#9514](https://github.com/yt-dlp/yt-dlp/issues/9514)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
|
||||||
|
#### Misc. changes
|
||||||
|
- **build**
|
||||||
|
- [Do not include `curl_cffi` in `macos_legacy`](https://github.com/yt-dlp/yt-dlp/commit/b19ae095fdddd43c2a2c67d10fbe0d9a645bb98f) ([#9653](https://github.com/yt-dlp/yt-dlp/issues/9653)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Optional dependencies cleanup](https://github.com/yt-dlp/yt-dlp/commit/58dd0f8d1eee6bc9fdc57f1923bed772fa3c946d) ([#9550](https://github.com/yt-dlp/yt-dlp/issues/9550)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Print SHA sums to GHA logs](https://github.com/yt-dlp/yt-dlp/commit/e8032503b9517465b0e86d776fc1e60d8795d673) ([#9582](https://github.com/yt-dlp/yt-dlp/issues/9582)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Update changelog for tarball and sdist](https://github.com/yt-dlp/yt-dlp/commit/17b96974a334688f76b57d350e07cae8cda46877) ([#9425](https://github.com/yt-dlp/yt-dlp/issues/9425)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **cleanup**
|
||||||
|
- [Standardize `import datetime as dt`](https://github.com/yt-dlp/yt-dlp/commit/c305a25c1b16bcf7a5ec499c3b786ed1e2c748da) ([#8978](https://github.com/yt-dlp/yt-dlp/issues/8978)) by [pukkandan](https://github.com/pukkandan)
|
||||||
|
- ie: [No `from` stdlib imports in extractors](https://github.com/yt-dlp/yt-dlp/commit/e3a3ed8a981d9395c4859b6ef56cd02bc3148db2) by [pukkandan](https://github.com/pukkandan)
|
||||||
|
- Miscellaneous: [216f6a3](https://github.com/yt-dlp/yt-dlp/commit/216f6a3cb57824e6a3c859649ce058c199b1b247) by [bashonly](https://github.com/bashonly), [pukkandan](https://github.com/pukkandan)
|
||||||
|
- **docs**
|
||||||
|
- [Update yt-dlp tagline](https://github.com/yt-dlp/yt-dlp/commit/388c979ac63a8774339fac2516fe1cc852b4276e) ([#9481](https://github.com/yt-dlp/yt-dlp/issues/9481)) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz), [Grub4K](https://github.com/Grub4K), [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
||||||
|
- [Various manpage fixes](https://github.com/yt-dlp/yt-dlp/commit/df0e138fc02ae2764a44f2f59fc93c756c4d3ee2) by [leoheitmannruiz](https://github.com/leoheitmannruiz)
|
||||||
|
- **test**
|
||||||
|
- [Workaround websocket server hanging](https://github.com/yt-dlp/yt-dlp/commit/f849d77ab54788446b995d256e1ee0894c4fb927) ([#9467](https://github.com/yt-dlp/yt-dlp/issues/9467)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- `traversal`: [Separate traversal tests](https://github.com/yt-dlp/yt-dlp/commit/979ce2e786f2ee3fc783b6dc1ef4188d8805c923) ([#9574](https://github.com/yt-dlp/yt-dlp/issues/9574)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
|
||||||
|
### 2024.03.10
|
||||||
|
|
||||||
|
#### Core changes
|
||||||
|
- [Add `--compat-options 2023`](https://github.com/yt-dlp/yt-dlp/commit/3725b4f0c93ca3943e6300013a9670e4ab757fda) ([#9084](https://github.com/yt-dlp/yt-dlp/issues/9084)) by [Grub4K](https://github.com/Grub4K) (With fixes in [ffff1bc](https://github.com/yt-dlp/yt-dlp/commit/ffff1bc6598fc7a9258e51bc153cab812467f9f9) by [pukkandan](https://github.com/pukkandan))
|
||||||
|
- [Create `ydl._request_director` when needed](https://github.com/yt-dlp/yt-dlp/commit/069b2aedae2279668b6051627a81fc4fbd9c146a) by [pukkandan](https://github.com/pukkandan) (With fixes in [dbd8b1b](https://github.com/yt-dlp/yt-dlp/commit/dbd8b1bff9afd8f05f982bcd52c20bc173c266ca) by [Grub4k](https://github.com/Grub4k))
|
||||||
|
- [Don't select storyboard formats as fallback](https://github.com/yt-dlp/yt-dlp/commit/d63eae7e7ffb1f3e733e552b9e5e82355bfba214) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Handle `--load-info-json` format selection errors](https://github.com/yt-dlp/yt-dlp/commit/263a4b55ac17a796e8991ca8d2d86a3c349f8a60) ([#9392](https://github.com/yt-dlp/yt-dlp/issues/9392)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Warn user when not launching through shell on Windows](https://github.com/yt-dlp/yt-dlp/commit/6a6cdcd1824a14e3b336332c8f31f65497b8c4b8) ([#9250](https://github.com/yt-dlp/yt-dlp/issues/9250)) by [Grub4K](https://github.com/Grub4K), [seproDev](https://github.com/seproDev)
|
||||||
|
- **cookies**
|
||||||
|
- [Fix `--cookies-from-browser` for `snap` Firefox](https://github.com/yt-dlp/yt-dlp/commit/cbed249aaa053a3f425b9bafc97f8dbd71c44487) ([#9016](https://github.com/yt-dlp/yt-dlp/issues/9016)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Fix `--cookies-from-browser` with macOS Firefox profiles](https://github.com/yt-dlp/yt-dlp/commit/85b33f5c163f60dbd089a6b9bc2ba1366d3ddf93) ([#8909](https://github.com/yt-dlp/yt-dlp/issues/8909)) by [RalphORama](https://github.com/RalphORama)
|
||||||
|
- [Improve error message for Windows `--cookies-from-browser chrome` issue](https://github.com/yt-dlp/yt-dlp/commit/2792092afd367e39251ace1fb2819c855ab8919f) ([#9080](https://github.com/yt-dlp/yt-dlp/issues/9080)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- **plugins**: [Handle `PermissionError`](https://github.com/yt-dlp/yt-dlp/commit/9a8afadd172b7cab143f0049959fa64973589d94) ([#9229](https://github.com/yt-dlp/yt-dlp/issues/9229)) by [pukkandan](https://github.com/pukkandan), [syntaxsurge](https://github.com/syntaxsurge)
|
||||||
|
- **utils**
|
||||||
|
- [Improve `repr` of `DateRange`, `match_filter_func`](https://github.com/yt-dlp/yt-dlp/commit/45491a2a30da4d1723cfa9288cb664813bb09afb) by [pukkandan](https://github.com/pukkandan)
|
||||||
|
- `traverse_obj`: [Support `xml.etree.ElementTree.Element`](https://github.com/yt-dlp/yt-dlp/commit/ffbd4f2a02fee387ea5e0a267ce32df5259111ac) ([#8911](https://github.com/yt-dlp/yt-dlp/issues/8911)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- **webvtt**: [Don't parse single fragment files](https://github.com/yt-dlp/yt-dlp/commit/f24e44e8cbd88ce338d52f594a19330f64d38b50) ([#9034](https://github.com/yt-dlp/yt-dlp/issues/9034)) by [seproDev](https://github.com/seproDev)
|
||||||
|
|
||||||
|
#### Extractor changes
|
||||||
|
- [Migrate commonly plural fields to lists](https://github.com/yt-dlp/yt-dlp/commit/104a7b5a46dc1805157fb4cc11c05876934d37c1) ([#8917](https://github.com/yt-dlp/yt-dlp/issues/8917)) by [llistochek](https://github.com/llistochek), [pukkandan](https://github.com/pukkandan) (With fixes in [b136e2a](https://github.com/yt-dlp/yt-dlp/commit/b136e2af341f7a88028aea4c5cd50efe2fa9b182) by [bashonly](https://github.com/bashonly))
|
||||||
|
- [Support multi-period MPD streams](https://github.com/yt-dlp/yt-dlp/commit/4ce57d3b873c2887814cbec03d029533e82f7db5) ([#6654](https://github.com/yt-dlp/yt-dlp/issues/6654)) by [alard](https://github.com/alard), [pukkandan](https://github.com/pukkandan)
|
||||||
|
- **abematv**
|
||||||
|
- [Fix extraction with cache](https://github.com/yt-dlp/yt-dlp/commit/c51316f8a69fbd0080f2720777d42ab438e254a3) ([#8895](https://github.com/yt-dlp/yt-dlp/issues/8895)) by [sefidel](https://github.com/sefidel)
|
||||||
|
- [Support login for playlists](https://github.com/yt-dlp/yt-dlp/commit/8226a3818f804478c756cf460baa9bf3a3b062a5) ([#8901](https://github.com/yt-dlp/yt-dlp/issues/8901)) by [sefidel](https://github.com/sefidel)
|
||||||
|
- **adn**
|
||||||
|
- [Add support for German site](https://github.com/yt-dlp/yt-dlp/commit/5eb1458be4767385a9bf1d570ff08e46100cbaa2) ([#8708](https://github.com/yt-dlp/yt-dlp/issues/8708)) by [infanf](https://github.com/infanf)
|
||||||
|
- [Improve auth error handling](https://github.com/yt-dlp/yt-dlp/commit/9526b1f179d19f75284eceaa5e0ee381af18cf19) ([#9068](https://github.com/yt-dlp/yt-dlp/issues/9068)) by [infanf](https://github.com/infanf)
|
||||||
|
- **aenetworks**: [Rating should be optional for AP extraction](https://github.com/yt-dlp/yt-dlp/commit/014cb5774d7afe624b6eb4e07f7be924b9e5e186) ([#9005](https://github.com/yt-dlp/yt-dlp/issues/9005)) by [agibson-fl](https://github.com/agibson-fl)
|
||||||
|
- **altcensored**: channel: [Fix playlist extraction](https://github.com/yt-dlp/yt-dlp/commit/e28e135d6fd6a430fed3e20dfe1a8c8bbc5f9185) ([#9297](https://github.com/yt-dlp/yt-dlp/issues/9297)) by [marcdumais](https://github.com/marcdumais)
|
||||||
|
- **amadeustv**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/e641aab7a61df7406df60ebfe0c77bd5186b2b41) ([#8744](https://github.com/yt-dlp/yt-dlp/issues/8744)) by [ArnauvGilotra](https://github.com/ArnauvGilotra)
|
||||||
|
- **ant1newsgrembed**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/1ed5ee2f045f717e814f84ba461dadc58e712266) ([#9191](https://github.com/yt-dlp/yt-dlp/issues/9191)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **archiveorg**: [Fix format URL encoding](https://github.com/yt-dlp/yt-dlp/commit/3894ab9574748188bbacbd925a3971eda6fa2bb0) ([#9279](https://github.com/yt-dlp/yt-dlp/issues/9279)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **ard**
|
||||||
|
- mediathek
|
||||||
|
- [Revert to using old id](https://github.com/yt-dlp/yt-dlp/commit/b6951271ac014761c9c317b9cecd5e8e139cfa7c) ([#8916](https://github.com/yt-dlp/yt-dlp/issues/8916)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Support cookies to verify age](https://github.com/yt-dlp/yt-dlp/commit/c099ec9392b0283dde34b290d1a04158ad8eb882) ([#9037](https://github.com/yt-dlp/yt-dlp/issues/9037)) by [StefanLobbenmeier](https://github.com/StefanLobbenmeier)
|
||||||
|
- **art19**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/999ea80beb053491089d256104c4188aced3110f) ([#9099](https://github.com/yt-dlp/yt-dlp/issues/9099)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **artetv**: [Separate closed captions](https://github.com/yt-dlp/yt-dlp/commit/393b487a4ea391c44e811505ec98531031d7e81e) ([#8231](https://github.com/yt-dlp/yt-dlp/issues/8231)) by [Nicals](https://github.com/Nicals), [seproDev](https://github.com/seproDev)
|
||||||
|
- **asobichannel**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/12f042740550c06552819374e2251deb7a519bab) ([#8700](https://github.com/yt-dlp/yt-dlp/issues/8700)) by [Snack-X](https://github.com/Snack-X)
|
||||||
|
- **bigo**: [Fix JSON extraction](https://github.com/yt-dlp/yt-dlp/commit/85a2d07c1f82c2082b568963d1c32ad3fc848f61) ([#8893](https://github.com/yt-dlp/yt-dlp/issues/8893)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **bilibili**
|
||||||
|
- [Add referer header and fix metadata extraction](https://github.com/yt-dlp/yt-dlp/commit/1713c882730a928ac344c099874d2093fc2c8b51) ([#8832](https://github.com/yt-dlp/yt-dlp/issues/8832)) by [SirElderling](https://github.com/SirElderling) (With fixes in [f1570ab](https://github.com/yt-dlp/yt-dlp/commit/f1570ab84d5f49564256c620063d2d3e9ed4acf0) by [TobiX](https://github.com/TobiX))
|
||||||
|
- [Support `--no-playlist`](https://github.com/yt-dlp/yt-dlp/commit/e439693f729daf6fb15457baea1bca10ef5da34d) ([#9139](https://github.com/yt-dlp/yt-dlp/issues/9139)) by [c-basalt](https://github.com/c-basalt)
|
||||||
|
- **bilibilisearch**: [Set cookie to fix extraction](https://github.com/yt-dlp/yt-dlp/commit/ffa017cfc5973b265c92248546fcf5020dc43eaf) ([#9119](https://github.com/yt-dlp/yt-dlp/issues/9119)) by [c-basalt](https://github.com/c-basalt)
|
||||||
|
- **biliintl**: [Fix and improve subtitles extraction](https://github.com/yt-dlp/yt-dlp/commit/cf6413e840476c15e5b166dc2f7cc2a90a4a9aad) ([#7077](https://github.com/yt-dlp/yt-dlp/issues/7077)) by [dirkf](https://github.com/dirkf), [HobbyistDev](https://github.com/HobbyistDev), [itachi-19](https://github.com/itachi-19), [seproDev](https://github.com/seproDev)
|
||||||
|
- **boosty**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/540b68298192874c75ad5ee4589bed64d02a7d55) ([#9144](https://github.com/yt-dlp/yt-dlp/issues/9144)) by [un-def](https://github.com/un-def)
|
||||||
|
- **ccma**: [Extract 1080p DASH formats](https://github.com/yt-dlp/yt-dlp/commit/4253e3b7f483127bd812bdac02466f4a5b47ff34) ([#9130](https://github.com/yt-dlp/yt-dlp/issues/9130)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **cctv**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/6ad11fef65474bcf70f3a8556850d93c141e44a2) ([#9325](https://github.com/yt-dlp/yt-dlp/issues/9325)) by [src-tinkerer](https://github.com/src-tinkerer)
|
||||||
|
- **chzzk**
|
||||||
|
- [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/ba6b0c8261e9f0a6373885736ff90a89dd1fb614) ([#8887](https://github.com/yt-dlp/yt-dlp/issues/8887)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- live: [Support `--wait-for-video`](https://github.com/yt-dlp/yt-dlp/commit/804f2366117b7065552a1c3cddb9ec19b688a5c1) ([#9309](https://github.com/yt-dlp/yt-dlp/issues/9309)) by [hui1601](https://github.com/hui1601)
|
||||||
|
- **cineverse**: [Detect when login required](https://github.com/yt-dlp/yt-dlp/commit/fc2cc626f07328a6c71b5e21853e4cfa7b1e6256) ([#9081](https://github.com/yt-dlp/yt-dlp/issues/9081)) by [garret1317](https://github.com/garret1317)
|
||||||
|
- **cloudflarestream**
|
||||||
|
- [Extract subtitles](https://github.com/yt-dlp/yt-dlp/commit/4d9dc0abe24ad5d9d22a16f40fc61137dcd103f7) ([#9007](https://github.com/yt-dlp/yt-dlp/issues/9007)) by [Bibhav48](https://github.com/Bibhav48)
|
||||||
|
- [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/f3d5face83f948c24bcb91e06d4fa6e8622d7d79) ([#9280](https://github.com/yt-dlp/yt-dlp/issues/9280)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Improve embed detection](https://github.com/yt-dlp/yt-dlp/commit/464c919ea82aefdf35f138a1ab2dd0bb8fb7fd0e) ([#9287](https://github.com/yt-dlp/yt-dlp/issues/9287)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **cloudycdn, lsm**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/5dda3b291f59f388f953337e9fb09a94b64aaf34) ([#8643](https://github.com/yt-dlp/yt-dlp/issues/8643)) by [Caesim404](https://github.com/Caesim404)
|
||||||
|
- **cnbc**: [Overhaul extractors](https://github.com/yt-dlp/yt-dlp/commit/998dffb5a2343ec709b3d6bbf2bf019649080239) ([#8741](https://github.com/yt-dlp/yt-dlp/issues/8741)) by [gonzalezjo](https://github.com/gonzalezjo), [Noor-5](https://github.com/Noor-5), [ruiminggu](https://github.com/ruiminggu), [seproDev](https://github.com/seproDev), [zhijinwuu](https://github.com/zhijinwuu)
|
||||||
|
- **craftsy**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/96f3924bac174f2fd401f86f78e77d7e0c5ee008) ([#9384](https://github.com/yt-dlp/yt-dlp/issues/9384)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **crooksandliars**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/03536126d32bd861e38536371f0cd5f1b71dcb7a) ([#9192](https://github.com/yt-dlp/yt-dlp/issues/9192)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **crtvg**: [Fix `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/785ab1af7f131e73444634ad57b39478651a43d3) ([#9404](https://github.com/yt-dlp/yt-dlp/issues/9404)) by [Xpl0itU](https://github.com/Xpl0itU)
|
||||||
|
- **dailymotion**: [Support search](https://github.com/yt-dlp/yt-dlp/commit/11ffa92a61e5847b3dfa8975f91ecb3ac2178841) ([#8292](https://github.com/yt-dlp/yt-dlp/issues/8292)) by [drzraf](https://github.com/drzraf), [seproDev](https://github.com/seproDev)
|
||||||
|
- **douyin**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/9ff946645568e71046487571eefa9cb524a5189b) ([#9239](https://github.com/yt-dlp/yt-dlp/issues/9239)) by [114514ns](https://github.com/114514ns), [bashonly](https://github.com/bashonly) (With fixes in [e546e5d](https://github.com/yt-dlp/yt-dlp/commit/e546e5d3b33a50075e574a2e7b8eda7ea874d21e) by [bashonly](https://github.com/bashonly))
|
||||||
|
- **duboku**: [Fix m3u8 formats extraction](https://github.com/yt-dlp/yt-dlp/commit/d3d4187da90a6b85f4ebae4bb07693cc9b412d75) ([#9161](https://github.com/yt-dlp/yt-dlp/issues/9161)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **dumpert**: [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/eedb38ce4093500e19279d50b708fb9c18bf4dbf) ([#9320](https://github.com/yt-dlp/yt-dlp/issues/9320)) by [rvsit](https://github.com/rvsit)
|
||||||
|
- **elementorembed**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/6171b050d70435008e64fa06aa6f19c4e5bec75f) ([#8948](https://github.com/yt-dlp/yt-dlp/issues/8948)) by [pompos02](https://github.com/pompos02), [seproDev](https://github.com/seproDev)
|
||||||
|
- **eporner**: [Extract AV1 formats](https://github.com/yt-dlp/yt-dlp/commit/96d0f8c1cb8aec250c5614bfde6b5fb95f10819b) ([#9028](https://github.com/yt-dlp/yt-dlp/issues/9028)) by [michal-repo](https://github.com/michal-repo)
|
||||||
|
- **errjupiter**
|
||||||
|
- [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/a514cc2feb1c3b265b19acab11487acad8bb3ab0) ([#8549](https://github.com/yt-dlp/yt-dlp/issues/8549)) by [glensc](https://github.com/glensc)
|
||||||
|
- [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/80ed8bdeba5a945f127ef9ab055a4823329a1210) ([#9218](https://github.com/yt-dlp/yt-dlp/issues/9218)) by [glensc](https://github.com/glensc)
|
||||||
|
- **facebook**
|
||||||
|
- [Add new ID format](https://github.com/yt-dlp/yt-dlp/commit/cf9af2c7f1fedd881a157b3fbe725e5494b00924) ([#3824](https://github.com/yt-dlp/yt-dlp/issues/3824)) by [kclauhk](https://github.com/kclauhk), [Wikidepia](https://github.com/Wikidepia)
|
||||||
|
- [Improve extraction](https://github.com/yt-dlp/yt-dlp/commit/2e30b5567b5c6113d46b39163db5b044aea8667e) by [jingtra](https://github.com/jingtra), [ringus1](https://github.com/ringus1)
|
||||||
|
- [Improve thumbnail extraction](https://github.com/yt-dlp/yt-dlp/commit/3c4d3ee491b0ec22ed3cade51d943d3d27141ba7) ([#9060](https://github.com/yt-dlp/yt-dlp/issues/9060)) by [kclauhk](https://github.com/kclauhk)
|
||||||
|
- [Set format HTTP chunk size](https://github.com/yt-dlp/yt-dlp/commit/5b68c478fb0b93ea6b8fac23f50e12217fa063db) ([#9058](https://github.com/yt-dlp/yt-dlp/issues/9058)) by [bashonly](https://github.com/bashonly), [kclauhk](https://github.com/kclauhk)
|
||||||
|
- [Support events](https://github.com/yt-dlp/yt-dlp/commit/9b5efaf86b99a2664fff9fc725d275f766c3221d) ([#9055](https://github.com/yt-dlp/yt-dlp/issues/9055)) by [kclauhk](https://github.com/kclauhk)
|
||||||
|
- [Support permalink URLs](https://github.com/yt-dlp/yt-dlp/commit/87286e93af949c4e6a0f8ba34af6a1ab5aa102b6) ([#9061](https://github.com/yt-dlp/yt-dlp/issues/9061)) by [kclauhk](https://github.com/kclauhk)
|
||||||
|
- ads: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/a40b0070c2a00d3ed839897462171a82323aa875) ([#8870](https://github.com/yt-dlp/yt-dlp/issues/8870)) by [kclauhk](https://github.com/kclauhk)
|
||||||
|
- **flextv**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/4f043479090dc8a7e06e0bb53691e5414320dfb2) ([#9178](https://github.com/yt-dlp/yt-dlp/issues/9178)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **floatplane**: [Improve metadata extraction](https://github.com/yt-dlp/yt-dlp/commit/9cd90447907a59c8a2727583f4a755fb23ed8cd3) ([#8934](https://github.com/yt-dlp/yt-dlp/issues/8934)) by [chtk](https://github.com/chtk)
|
||||||
|
- **francetv**
|
||||||
|
- [Fix DAI livestreams](https://github.com/yt-dlp/yt-dlp/commit/e4fbe5f886a6693f2466877c12e99c30c5442ace) ([#9380](https://github.com/yt-dlp/yt-dlp/issues/9380)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix extractors](https://github.com/yt-dlp/yt-dlp/commit/9749ac7fecbfda391afbadf2870797ce0e382622) ([#9333](https://github.com/yt-dlp/yt-dlp/issues/9333)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix m3u8 formats extraction](https://github.com/yt-dlp/yt-dlp/commit/ede624d1db649f5a4b61f8abbb746f365322de27) ([#9347](https://github.com/yt-dlp/yt-dlp/issues/9347)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **funk**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/cd0443fb14e2ed805abb02792473457553a123d1) ([#9194](https://github.com/yt-dlp/yt-dlp/issues/9194)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **generic**: [Follow https redirects properly](https://github.com/yt-dlp/yt-dlp/commit/c8c9039e640495700f76a13496e3418bdd4382ba) ([#9121](https://github.com/yt-dlp/yt-dlp/issues/9121)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **getcourseru**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/4310b6650eeb5630295f4591b37720877878c57a) ([#8873](https://github.com/yt-dlp/yt-dlp/issues/8873)) by [divStar](https://github.com/divStar), [seproDev](https://github.com/seproDev)
|
||||||
|
- **gofile**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/77c2472ca1ef9050a66aa68bc5fa1bee88706c66) ([#9074](https://github.com/yt-dlp/yt-dlp/issues/9074)) by [jazz1611](https://github.com/jazz1611)
|
||||||
|
- **googledrive**: [Fix source file extraction](https://github.com/yt-dlp/yt-dlp/commit/5498729c59b03a9511c64552da3ba2f802166f8d) ([#8990](https://github.com/yt-dlp/yt-dlp/issues/8990)) by [jazz1611](https://github.com/jazz1611)
|
||||||
|
- **goplay**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/7e90e34fa4617b53f8c8a9e69f460508cb1f51b0) ([#6654](https://github.com/yt-dlp/yt-dlp/issues/6654)) by [alard](https://github.com/alard)
|
||||||
|
- **gopro**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/4a07a455bbf7acf87550053bbba949c828e350ba) ([#9019](https://github.com/yt-dlp/yt-dlp/issues/9019)) by [stilor](https://github.com/stilor)
|
||||||
|
- **ilpost**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/aa5dcc4ee65916a36cbe1b1b5b29b9110c3163ed) ([#9001](https://github.com/yt-dlp/yt-dlp/issues/9001)) by [CapacitorSet](https://github.com/CapacitorSet)
|
||||||
|
- **jiosaavnsong**: [Support more bitrates](https://github.com/yt-dlp/yt-dlp/commit/5154dc0a687528f995cde22b5ff63f82c740e98a) ([#8834](https://github.com/yt-dlp/yt-dlp/issues/8834)) by [alien-developers](https://github.com/alien-developers), [bashonly](https://github.com/bashonly)
|
||||||
|
- **kukululive**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/20cdad5a2c0499d5a6746f5466a2ab0c97b75884) ([#8877](https://github.com/yt-dlp/yt-dlp/issues/8877)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **lefigarovideoembed**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/9401736fd08767c58af45a1e36ff5929c5fa1ac9) ([#9198](https://github.com/yt-dlp/yt-dlp/issues/9198)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **linkedin**: [Fix metadata and extract subtitles](https://github.com/yt-dlp/yt-dlp/commit/017adb28e7fe7b8c8fc472332d86740f31141519) ([#9056](https://github.com/yt-dlp/yt-dlp/issues/9056)) by [barsnick](https://github.com/barsnick)
|
||||||
|
- **magellantv**: [Support episodes](https://github.com/yt-dlp/yt-dlp/commit/3dc9232e1aa58fe3c2d8cafb50e8162d6f0e891e) ([#9199](https://github.com/yt-dlp/yt-dlp/issues/9199)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **magentamusik**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/5e2e24b2c5795756d81785b06b10723ddb6db7b2) ([#7790](https://github.com/yt-dlp/yt-dlp/issues/7790)) by [pwaldhauer](https://github.com/pwaldhauer), [seproDev](https://github.com/seproDev)
|
||||||
|
- **medaltv**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/02e343f6ef6d7b3f9087ff69e4a1db0b4b4a5c5d) ([#9098](https://github.com/yt-dlp/yt-dlp/issues/9098)) by [Danish-H](https://github.com/Danish-H)
|
||||||
|
- **mlbarticle**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/50e06e21a68e336198198bda332b8e7d2314f201) ([#9021](https://github.com/yt-dlp/yt-dlp/issues/9021)) by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
- **motherless**: [Support uploader playlists](https://github.com/yt-dlp/yt-dlp/commit/9f1e9dab21bbe651544c8f4663b0e615dc450e4d) ([#8994](https://github.com/yt-dlp/yt-dlp/issues/8994)) by [dasidiot](https://github.com/dasidiot)
|
||||||
|
- **mujrozhlas**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/4170b3d7120e06db3391eef39c5add18a1ddf2c3) ([#9306](https://github.com/yt-dlp/yt-dlp/issues/9306)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **mx3**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/5a63454b3637b3603434026cddfeac509218b90e) ([#8736](https://github.com/yt-dlp/yt-dlp/issues/8736)) by [martinxyz](https://github.com/martinxyz)
|
||||||
|
- **naver**: [Fix extractors](https://github.com/yt-dlp/yt-dlp/commit/a281beba8d8f007cf220f96dd1d9412bb070c7d8) ([#8883](https://github.com/yt-dlp/yt-dlp/issues/8883)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **nebula**: [Support podcasts](https://github.com/yt-dlp/yt-dlp/commit/0de09c5b9ed619d4a93d7c451c6ddff0381de808) ([#9140](https://github.com/yt-dlp/yt-dlp/issues/9140)) by [c-basalt](https://github.com/c-basalt), [seproDev](https://github.com/seproDev)
|
||||||
|
- **nerdcubedfeed**: [Overhaul extractor](https://github.com/yt-dlp/yt-dlp/commit/29a74a6126101aabaa1726ae41b1ca55cf26e7a7) ([#9269](https://github.com/yt-dlp/yt-dlp/issues/9269)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **newgrounds**
|
||||||
|
- [Fix login and clean up extraction](https://github.com/yt-dlp/yt-dlp/commit/0fcefb92f3ebfc5cada19c1e85a715f020d0f333) ([#9356](https://github.com/yt-dlp/yt-dlp/issues/9356)) by [Grub4K](https://github.com/Grub4K), [mrmedieval](https://github.com/mrmedieval)
|
||||||
|
- user: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/3e083191cdc34dd8c482da9a9b4bc682f824cb9d) ([#9046](https://github.com/yt-dlp/yt-dlp/issues/9046)) by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
- **nfb**: [Add support for onf.ca and series](https://github.com/yt-dlp/yt-dlp/commit/4b8b0dded8c65cd5b2ab2e858058ba98c9bf49ff) ([#8997](https://github.com/yt-dlp/yt-dlp/issues/8997)) by [bashonly](https://github.com/bashonly), [rrgomes](https://github.com/rrgomes)
|
||||||
|
- **nhkradiru**: [Extract extended description](https://github.com/yt-dlp/yt-dlp/commit/4392447d9404e3c25cfeb8f5bdfff31b0448da39) ([#9162](https://github.com/yt-dlp/yt-dlp/issues/9162)) by [garret1317](https://github.com/garret1317)
|
||||||
|
- **nhkradirulive**: [Make metadata extraction non-fatal](https://github.com/yt-dlp/yt-dlp/commit/5af1f19787f7d652fce72dd3ab9536cdd980fe85) ([#8956](https://github.com/yt-dlp/yt-dlp/issues/8956)) by [garret1317](https://github.com/garret1317)
|
||||||
|
- **niconico**
|
||||||
|
- [Remove legacy danmaku extraction](https://github.com/yt-dlp/yt-dlp/commit/974d444039c8bbffb57265c6792cd52d169fe1b9) ([#9209](https://github.com/yt-dlp/yt-dlp/issues/9209)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- [Support DMS formats](https://github.com/yt-dlp/yt-dlp/commit/aa13a8e3dd3b698cc40ec438988b1ad834e11a41) ([#9282](https://github.com/yt-dlp/yt-dlp/issues/9282)) by [pzhlkj6612](https://github.com/pzhlkj6612), [xpadev-net](https://github.com/xpadev-net) (With fixes in [40966e8](https://github.com/yt-dlp/yt-dlp/commit/40966e8da27bbf770dacf9be9363fcc3ad72cc9f) by [pzhlkj6612](https://github.com/pzhlkj6612))
|
||||||
|
- **ninaprotocol**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/62c65bfaf81e04e6746f6fdbafe384eb3edddfbc) ([#8946](https://github.com/yt-dlp/yt-dlp/issues/8946)) by [RaduManole](https://github.com/RaduManole), [seproDev](https://github.com/seproDev)
|
||||||
|
- **ninenews**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/43694ce13c5a9f1afca8b02b8b2b9b1576d6503d) ([#8840](https://github.com/yt-dlp/yt-dlp/issues/8840)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- **nova**: [Fix embed extraction](https://github.com/yt-dlp/yt-dlp/commit/c168d8791d0974a8a8fcb3b4a4bc2d830df51622) ([#9221](https://github.com/yt-dlp/yt-dlp/issues/9221)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **ntvru**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/7a29cbbd5fd7363e7e8535ee1506b7052465d13f) ([#9276](https://github.com/yt-dlp/yt-dlp/issues/9276)) by [bashonly](https://github.com/bashonly), [dirkf](https://github.com/dirkf)
|
||||||
|
- **nuum**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/acaf806c15f0a802ba286c23af02a10cf4bd4731) ([#8868](https://github.com/yt-dlp/yt-dlp/issues/8868)) by [DmitryScaletta](https://github.com/DmitryScaletta), [seproDev](https://github.com/seproDev)
|
||||||
|
- **nytimes**
|
||||||
|
- [Extract timestamp](https://github.com/yt-dlp/yt-dlp/commit/05420227aaab60a39c0f9ade069c5862be36b1fa) ([#9142](https://github.com/yt-dlp/yt-dlp/issues/9142)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- [Overhaul extractors](https://github.com/yt-dlp/yt-dlp/commit/07256b9fee23960799024b95d5972abc7174aa81) ([#9075](https://github.com/yt-dlp/yt-dlp/issues/9075)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- **onefootball**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/644738ddaa45428cb0babd41ead22454e5a2545e) ([#9222](https://github.com/yt-dlp/yt-dlp/issues/9222)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **openrec**: [Pass referer for m3u8 formats](https://github.com/yt-dlp/yt-dlp/commit/f591e605dfee4085ec007d6d056c943cbcacc429) ([#9253](https://github.com/yt-dlp/yt-dlp/issues/9253)) by [fireattack](https://github.com/fireattack)
|
||||||
|
- **orf**: on: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/a0d50aabc5462aee302bd3f2663d3a3554875789) ([#9113](https://github.com/yt-dlp/yt-dlp/issues/9113)) by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
- **patreon**: [Fix embedded HLS extraction](https://github.com/yt-dlp/yt-dlp/commit/f0e8bc7c60b61fe18b63116c975609d76b904771) ([#8993](https://github.com/yt-dlp/yt-dlp/issues/8993)) by [johnvictorfs](https://github.com/johnvictorfs)
|
||||||
|
- **peertube**: [Update instances](https://github.com/yt-dlp/yt-dlp/commit/35d96982f1033e36215d323317981ee17e8ab0d5) ([#9070](https://github.com/yt-dlp/yt-dlp/issues/9070)) by [Chocobozzz](https://github.com/Chocobozzz)
|
||||||
|
- **piapro**: [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/8e6e3651727b0b85764857fc6329fe5e0a3f00de) ([#8999](https://github.com/yt-dlp/yt-dlp/issues/8999)) by [FinnRG](https://github.com/FinnRG)
|
||||||
|
- **playsuisse**: [Add login support](https://github.com/yt-dlp/yt-dlp/commit/cae6e461073fb7c32fd32052a3e6721447c469bc) ([#9077](https://github.com/yt-dlp/yt-dlp/issues/9077)) by [chkuendig](https://github.com/chkuendig)
|
||||||
|
- **pornhub**: [Fix login support](https://github.com/yt-dlp/yt-dlp/commit/de954c1b4d3a6db8a6525507e65303c7bb03f39f) ([#9227](https://github.com/yt-dlp/yt-dlp/issues/9227)) by [feederbox826](https://github.com/feederbox826)
|
||||||
|
- **pr0gramm**: [Enable POL filter and provide tags without login](https://github.com/yt-dlp/yt-dlp/commit/5f25f348f9eb5db842b1ec6799f95bebb7ba35a7) ([#9051](https://github.com/yt-dlp/yt-dlp/issues/9051)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- **prankcastpost**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/a2bac6b7adb7b0e955125838e20bb39eece630ce) ([#8933](https://github.com/yt-dlp/yt-dlp/issues/8933)) by [columndeeply](https://github.com/columndeeply)
|
||||||
|
- **radiko**: [Extract more metadata](https://github.com/yt-dlp/yt-dlp/commit/e3ce2b385ec1f03fac9d4210c57fda77134495fc) ([#9115](https://github.com/yt-dlp/yt-dlp/issues/9115)) by [YoshichikaAAA](https://github.com/YoshichikaAAA)
|
||||||
|
- **rai**
|
||||||
|
- [Filter unavailable formats](https://github.com/yt-dlp/yt-dlp/commit/f78814923748277e7067b796f25870686fb46205) ([#9189](https://github.com/yt-dlp/yt-dlp/issues/9189)) by [nixxo](https://github.com/nixxo)
|
||||||
|
- [Fix m3u8 formats extraction](https://github.com/yt-dlp/yt-dlp/commit/8f423cf8051fbfeedd57cca00d106012e6e86a97) ([#9291](https://github.com/yt-dlp/yt-dlp/issues/9291)) by [nixxo](https://github.com/nixxo)
|
||||||
|
- **redcdnlivx, sejm**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/fcaa2e735b00b15a2b0d9f55f4187c654b4b5b39) ([#8676](https://github.com/yt-dlp/yt-dlp/issues/8676)) by [selfisekai](https://github.com/selfisekai)
|
||||||
|
- **redtube**
|
||||||
|
- [Fix formats extraction](https://github.com/yt-dlp/yt-dlp/commit/c91d8b1899403daff6fc15206ad32de8db17fb8f) ([#9076](https://github.com/yt-dlp/yt-dlp/issues/9076)) by [jazz1611](https://github.com/jazz1611)
|
||||||
|
- [Support redtube.com.br URLs](https://github.com/yt-dlp/yt-dlp/commit/4a6ff0b47a700dee3ee5c54804c31965308479ae) ([#9103](https://github.com/yt-dlp/yt-dlp/issues/9103)) by [jazz1611](https://github.com/jazz1611)
|
||||||
|
- **ridehome**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/cd7086c0d54ec1d7e02a30bd5bd934bdb2c54642) ([#8875](https://github.com/yt-dlp/yt-dlp/issues/8875)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- **rinsefmartistplaylist**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/1a36dbad712d359ec1c5b73d9bbbe562c03e9660) ([#8794](https://github.com/yt-dlp/yt-dlp/issues/8794)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- **roosterteeth**
|
||||||
|
- [Add Brightcove fallback](https://github.com/yt-dlp/yt-dlp/commit/b2cc150ad83ba20ceb2d6e73d09854eed3c2d05c) ([#9403](https://github.com/yt-dlp/yt-dlp/issues/9403)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Extract ad-free streams](https://github.com/yt-dlp/yt-dlp/commit/dd29e6e5fdf0f3758cb0829e73749832768f1a4e) ([#9355](https://github.com/yt-dlp/yt-dlp/issues/9355)) by [jkmartindale](https://github.com/jkmartindale)
|
||||||
|
- [Extract release date and timestamp](https://github.com/yt-dlp/yt-dlp/commit/dfd8c0b69683b1c11beea039a96dd2949026c1d7) ([#9393](https://github.com/yt-dlp/yt-dlp/issues/9393)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Support bonus features](https://github.com/yt-dlp/yt-dlp/commit/8993721ecb34867b52b79f6e92b233008d1cbe78) ([#9406](https://github.com/yt-dlp/yt-dlp/issues/9406)) by [Bl4Cc4t](https://github.com/Bl4Cc4t)
|
||||||
|
- **rule34video**
|
||||||
|
- [Extract `creators`](https://github.com/yt-dlp/yt-dlp/commit/3d9dc2f3590e10abf1561ebdaed96734a740587c) ([#9258](https://github.com/yt-dlp/yt-dlp/issues/9258)) by [gmes78](https://github.com/gmes78)
|
||||||
|
- [Extract more metadata](https://github.com/yt-dlp/yt-dlp/commit/fee2d8d9c38f9b5f0a8df347c1e698983339c34d) ([#7416](https://github.com/yt-dlp/yt-dlp/issues/7416)) by [gmes78](https://github.com/gmes78)
|
||||||
|
- [Fix `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/c0ecceeefe6ebd27452d9d8f20658f83ae121d04) ([#9044](https://github.com/yt-dlp/yt-dlp/issues/9044)) by [gmes78](https://github.com/gmes78)
|
||||||
|
- **rumblechannel**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/0023af81fbce01984f35b34ecaf8562739831227) ([#9092](https://github.com/yt-dlp/yt-dlp/issues/9092)) by [Pranaxcau](https://github.com/Pranaxcau), [vista-narvas](https://github.com/vista-narvas)
|
||||||
|
- **screencastify**: [Update `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/0bee29493ca8f91a0055a3706c7c94f5860188df) ([#9232](https://github.com/yt-dlp/yt-dlp/issues/9232)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **svtpage**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/ddd4b5e10a653bee78e656107710021c1b82934c) ([#8938](https://github.com/yt-dlp/yt-dlp/issues/8938)) by [diman8](https://github.com/diman8)
|
||||||
|
- **swearnet**: [Raise for login required](https://github.com/yt-dlp/yt-dlp/commit/b05640d532c43a52c0a0da096bb2dbd51e105ec0) ([#9281](https://github.com/yt-dlp/yt-dlp/issues/9281)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **tiktok**: [Fix webpage extraction](https://github.com/yt-dlp/yt-dlp/commit/d9b4154cbcb979d7e30af3a73b1bee422aae5aa3) ([#9327](https://github.com/yt-dlp/yt-dlp/issues/9327)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **trtworld**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/8ab84650837e58046430c9f4b615c56a8886e071) ([#8701](https://github.com/yt-dlp/yt-dlp/issues/8701)) by [ufukk](https://github.com/ufukk)
|
||||||
|
- **tvp**: [Support livestreams](https://github.com/yt-dlp/yt-dlp/commit/882e3b753c79c7799ce135c3a5edb72494b576af) ([#8860](https://github.com/yt-dlp/yt-dlp/issues/8860)) by [selfisekai](https://github.com/selfisekai)
|
||||||
|
- **twitch**: [Fix m3u8 extraction](https://github.com/yt-dlp/yt-dlp/commit/5b8c69ae04444a4c80a5a99917e40f75a116c3b8) ([#8960](https://github.com/yt-dlp/yt-dlp/issues/8960)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **twitter**
|
||||||
|
- [Extract bitrate for HLS audio formats](https://github.com/yt-dlp/yt-dlp/commit/28e53d60df9b8aadd52a93504e30e885c9c35262) ([#9257](https://github.com/yt-dlp/yt-dlp/issues/9257)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Extract numeric `channel_id`](https://github.com/yt-dlp/yt-dlp/commit/55f1833376505ed1e4be0516b09bb3ea4425e8a4) ([#9263](https://github.com/yt-dlp/yt-dlp/issues/9263)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **txxx**: [Extract thumbnails](https://github.com/yt-dlp/yt-dlp/commit/d79c7e9937c388c68b722ab7450960e43ef776d6) ([#9063](https://github.com/yt-dlp/yt-dlp/issues/9063)) by [shmohawk](https://github.com/shmohawk)
|
||||||
|
- **utreon**: [Support playeur.com](https://github.com/yt-dlp/yt-dlp/commit/41d6b61e9852a5b97f47cc8a7718b31fb23f0aea) ([#9182](https://github.com/yt-dlp/yt-dlp/issues/9182)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **vbox7**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/67bb70cd700c8d4c3149cd9e0539a5f32c3d1ce6) ([#9100](https://github.com/yt-dlp/yt-dlp/issues/9100)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **viewlift**: [Add support for chorki.com](https://github.com/yt-dlp/yt-dlp/commit/41b6cdb4197aaf7ad82bdad6885eb5d5c64acd74) ([#9095](https://github.com/yt-dlp/yt-dlp/issues/9095)) by [NurTasin](https://github.com/NurTasin)
|
||||||
|
- **vimeo**
|
||||||
|
- [Extract `live_status` and `release_timestamp`](https://github.com/yt-dlp/yt-dlp/commit/f0426e9ca57dd14b82e6c13afc17947614f1e8eb) ([#9290](https://github.com/yt-dlp/yt-dlp/issues/9290)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- [Fix API headers](https://github.com/yt-dlp/yt-dlp/commit/8e765755f7f4909e1b535e61b7376b2d66e1ba6a) ([#9125](https://github.com/yt-dlp/yt-dlp/issues/9125)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix login](https://github.com/yt-dlp/yt-dlp/commit/2e8de097ad82da378e97005e8f1ff7e5aebca585) ([#9274](https://github.com/yt-dlp/yt-dlp/issues/9274)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **viously**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/95e82347b398d8bb160767cdd975edecd62cbabd) ([#8927](https://github.com/yt-dlp/yt-dlp/issues/8927)) by [nbr23](https://github.com/nbr23), [seproDev](https://github.com/seproDev)
|
||||||
|
- **youtube**
|
||||||
|
- [Better error when all player responses are skipped](https://github.com/yt-dlp/yt-dlp/commit/5eedc208ec89d6284777060c94aadd06502338b9) ([#9083](https://github.com/yt-dlp/yt-dlp/issues/9083)) by [Grub4K](https://github.com/Grub4K), [pukkandan](https://github.com/pukkandan)
|
||||||
|
- [Bump Android and iOS client versions](https://github.com/yt-dlp/yt-dlp/commit/413d3675804599bc8fe419c19e36490fd8f0b30f) ([#9317](https://github.com/yt-dlp/yt-dlp/issues/9317)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Further bump client versions](https://github.com/yt-dlp/yt-dlp/commit/7aad06541e543fa3452d3d2513e6f079aad1f99b) ([#9395](https://github.com/yt-dlp/yt-dlp/issues/9395)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- tab: [Fix `tags` extraction](https://github.com/yt-dlp/yt-dlp/commit/8828f4576bd862438d4fbf634f1d6ab18a217b0e) ([#9413](https://github.com/yt-dlp/yt-dlp/issues/9413)) by [x11x](https://github.com/x11x)
|
||||||
|
- **zenporn**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/f00c0def7434fac3c88503c2a77c4b2419b8e5ca) ([#8509](https://github.com/yt-dlp/yt-dlp/issues/8509)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- **zetland**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/2f4b57594673035a59d72f7667588da848820034) ([#9116](https://github.com/yt-dlp/yt-dlp/issues/9116)) by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
|
||||||
|
#### Downloader changes
|
||||||
|
- **http**: [Reset resume length to handle `FileNotFoundError`](https://github.com/yt-dlp/yt-dlp/commit/2d91b9845621639c53dca7ee9d3d954f3624ba18) ([#8399](https://github.com/yt-dlp/yt-dlp/issues/8399)) by [boredzo](https://github.com/boredzo)
|
||||||
|
|
||||||
|
#### Networking changes
|
||||||
|
- [Remove `_CompatHTTPError`](https://github.com/yt-dlp/yt-dlp/commit/811d298b231cfa29e75c321b23a91d1c2b17602c) ([#8871](https://github.com/yt-dlp/yt-dlp/issues/8871)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- **Request Handler**
|
||||||
|
- [Remove additional logging handlers on close](https://github.com/yt-dlp/yt-dlp/commit/0085e2bab8465ee7d46d16fcade3ed5e96cc8a48) ([#9032](https://github.com/yt-dlp/yt-dlp/issues/9032)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- requests: [Apply `remove_dot_segments` to absolute redirect locations](https://github.com/yt-dlp/yt-dlp/commit/35f4f764a786685ea45d84abe1cf1ad3847f4c97) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
|
||||||
|
#### Misc. changes
|
||||||
|
- **build**
|
||||||
|
- [Add `default` optional dependency group](https://github.com/yt-dlp/yt-dlp/commit/cf91400a1dd6cc99b11a6d163e1af73b64d618c9) ([#9295](https://github.com/yt-dlp/yt-dlp/issues/9295)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Add transitional `setup.py` and `pyinst.py`](https://github.com/yt-dlp/yt-dlp/commit/0abf2f1f153ab47990edbeee3477dc55f74c7f89) ([#9296](https://github.com/yt-dlp/yt-dlp/issues/9296)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K), [pukkandan](https://github.com/pukkandan)
|
||||||
|
- [Bump `actions/upload-artifact` to v4 and adjust workflows](https://github.com/yt-dlp/yt-dlp/commit/3876429d72afb35247f4b2531eb9b16cfc7e0968) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Bump `conda-incubator/setup-miniconda` to v3](https://github.com/yt-dlp/yt-dlp/commit/b0059f0413a6ba6ab0a3aec1f00188ce083cd8bf) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix `secretstorage` for ARM builds](https://github.com/yt-dlp/yt-dlp/commit/920397634d1e84e76d2cb897bd6d69ba0c6bd5ca) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Migrate to `pyproject.toml` and `hatchling`](https://github.com/yt-dlp/yt-dlp/commit/775cde82dc5b1dc64ab0539a92dd8c7ba6c0ad33) by [bashonly](https://github.com/bashonly) (With fixes in [43cfd46](https://github.com/yt-dlp/yt-dlp/commit/43cfd462c0d01eff22c1d4290aeb96eb1ea2c0e1))
|
||||||
|
- [Move bundle scripts into `bundle` submodule](https://github.com/yt-dlp/yt-dlp/commit/a1b778428991b1779203bac243ef4e9b6baea90c) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Support failed build job re-runs](https://github.com/yt-dlp/yt-dlp/commit/eabbccc439720fba381919a88be4fe4d96464cbd) ([#9277](https://github.com/yt-dlp/yt-dlp/issues/9277)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- Makefile
|
||||||
|
- [Add automated `CODE_FOLDERS` and `CODE_FILES`](https://github.com/yt-dlp/yt-dlp/commit/868d2f60a7cb59b410c8cbfb452cbdb072687b81) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Ensure compatibility with BSD `make`](https://github.com/yt-dlp/yt-dlp/commit/beaa1a44554d04d9fe63a743a5bb4431ca778f28) ([#9210](https://github.com/yt-dlp/yt-dlp/issues/9210)) by [bashonly](https://github.com/bashonly) (With fixes in [73fcfa3](https://github.com/yt-dlp/yt-dlp/commit/73fcfa39f59113a8728249de2c4cee3025f17dc2))
|
||||||
|
- [Fix man pages generated by `pandoc>=3`](https://github.com/yt-dlp/yt-dlp/commit/fb44020fa98e47620b3aa1dab94b4c5b7bfb40bd) ([#7047](https://github.com/yt-dlp/yt-dlp/issues/7047)) by [t-nil](https://github.com/t-nil)
|
||||||
|
- **ci**: [Bump `actions/setup-python` to v5](https://github.com/yt-dlp/yt-dlp/commit/b14e818b37f62e3224da157b3ad768b3f0815fcd) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **cleanup**
|
||||||
|
- [Build files cleanup](https://github.com/yt-dlp/yt-dlp/commit/867f637b95b342e1cb9f1dc3c6cf0ffe727187ce) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix infodict returned fields](https://github.com/yt-dlp/yt-dlp/commit/f4f9f6d00edcac6d4eb2b3fb78bf81326235d492) ([#8906](https://github.com/yt-dlp/yt-dlp/issues/8906)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- [Fix typo in README.md](https://github.com/yt-dlp/yt-dlp/commit/292d60b1ed3b9fe5bcb2775a894cca99b0f9473e) ([#8894](https://github.com/yt-dlp/yt-dlp/issues/8894)) by [antonkesy](https://github.com/antonkesy)
|
||||||
|
- [Mark broken and remove dead extractors](https://github.com/yt-dlp/yt-dlp/commit/df773c3d5d1cc1f877cf8582f0072e386fc49318) ([#9238](https://github.com/yt-dlp/yt-dlp/issues/9238)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- [Match both `http` and `https` in `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/a687226b48f71b874fa18b0165ec528d591f53fb) ([#8968](https://github.com/yt-dlp/yt-dlp/issues/8968)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- [Remove unused code](https://github.com/yt-dlp/yt-dlp/commit/ed3bb2b0a12c44334e0d09481752dabf2ca1dc13) ([#8968](https://github.com/yt-dlp/yt-dlp/issues/8968)) by [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
||||||
|
- Miscellaneous
|
||||||
|
- [93240fc](https://github.com/yt-dlp/yt-dlp/commit/93240fc1848de4a94f25844c96e0dcd282ef1d3b) by [bashonly](https://github.com/bashonly), [Grub4k](https://github.com/Grub4k), [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
||||||
|
- [615a844](https://github.com/yt-dlp/yt-dlp/commit/615a84447e8322720be77a0e64298d7f42848693) by [bashonly](https://github.com/bashonly), [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
||||||
|
- **devscripts**
|
||||||
|
- `install_deps`: [Add script and migrate to it](https://github.com/yt-dlp/yt-dlp/commit/b8a433aaca86b15cb9f1a451b0f69371d2fc22a9) by [bashonly](https://github.com/bashonly)
|
||||||
|
- `tomlparse`: [Add makeshift toml parser](https://github.com/yt-dlp/yt-dlp/commit/fd647775e27e030ab17387c249e2ebeba68f8ff0) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- **docs**: [Misc Cleanup](https://github.com/yt-dlp/yt-dlp/commit/47ab66db0f083a76c7fba0f6e136b21dd5a93e3b) ([#8977](https://github.com/yt-dlp/yt-dlp/issues/8977)) by [Arthurszzz](https://github.com/Arthurszzz), [bashonly](https://github.com/bashonly), [Grub4k](https://github.com/Grub4k), [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
||||||
|
- **test**
|
||||||
|
- [Skip source address tests if the address cannot be bound to](https://github.com/yt-dlp/yt-dlp/commit/69d31914952dd33082ac7019c6f76b43c45b9d06) ([#8900](https://github.com/yt-dlp/yt-dlp/issues/8900)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- websockets: [Fix timeout test on Windows](https://github.com/yt-dlp/yt-dlp/commit/ac340d0745a9de5d494033e3507ef624ba25add3) ([#9344](https://github.com/yt-dlp/yt-dlp/issues/9344)) by [seproDev](https://github.com/seproDev)
|
||||||
|
|
||||||
### 2023.12.30
|
### 2023.12.30
|
||||||
|
|
||||||
#### Core changes
|
#### Core changes
|
||||||
|
@ -1936,7 +2374,7 @@ ### 2022.04.08
|
||||||
* [utils] `format_decimal_suffix`: Fix for very large numbers by [s0u1h](https://github.com/s0u1h)
|
* [utils] `format_decimal_suffix`: Fix for very large numbers by [s0u1h](https://github.com/s0u1h)
|
||||||
* [utils] `traverse_obj`: Allow filtering by value
|
* [utils] `traverse_obj`: Allow filtering by value
|
||||||
* [utils] Add `filter_dict`, `get_first`, `try_call`
|
* [utils] Add `filter_dict`, `get_first`, `try_call`
|
||||||
* [utils] ExtractorError: Fix for older python versions
|
* [utils] ExtractorError: Fix for older Python versions
|
||||||
* [utils] WebSocketsWrapper: Allow omitting `__enter__` invocation by [Lesmiscore](https://github.com/Lesmiscore)
|
* [utils] WebSocketsWrapper: Allow omitting `__enter__` invocation by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
* [docs] Add an `.editorconfig` file by [fstirlitz](https://github.com/fstirlitz)
|
* [docs] Add an `.editorconfig` file by [fstirlitz](https://github.com/fstirlitz)
|
||||||
* [docs] Clarify the exact `BSD` license of dependencies by [MrRawes](https://github.com/MrRawes)
|
* [docs] Clarify the exact `BSD` license of dependencies by [MrRawes](https://github.com/MrRawes)
|
||||||
|
@ -3400,7 +3838,7 @@ ### 2021.05.20
|
||||||
* [cleanup] code formatting, youtube tests and readme
|
* [cleanup] code formatting, youtube tests and readme
|
||||||
|
|
||||||
### 2021.05.11
|
### 2021.05.11
|
||||||
* **Deprecate support for python versions < 3.6**
|
* **Deprecate support for Python versions < 3.6**
|
||||||
* **Subtitle extraction from manifests** by [fstirlitz](https://github.com/fstirlitz). See [be6202f](https://github.com/yt-dlp/yt-dlp/commit/be6202f12b97858b9d716e608394b51065d0419f) for details
|
* **Subtitle extraction from manifests** by [fstirlitz](https://github.com/fstirlitz). See [be6202f](https://github.com/yt-dlp/yt-dlp/commit/be6202f12b97858b9d716e608394b51065d0419f) for details
|
||||||
* **Improve output template:**
|
* **Improve output template:**
|
||||||
* Allow slicing lists/strings using `field.start:end:step`
|
* Allow slicing lists/strings using `field.start:end:step`
|
||||||
|
@ -3690,7 +4128,7 @@ ### 2021.02.19
|
||||||
* Remove unnecessary `field_preference` and misuse of `preference` from extractors
|
* Remove unnecessary `field_preference` and misuse of `preference` from extractors
|
||||||
* Build improvements:
|
* Build improvements:
|
||||||
* Fix hash output by [shirt](https://github.com/shirt-dev)
|
* Fix hash output by [shirt](https://github.com/shirt-dev)
|
||||||
* Lock python package versions for x86 and use `wheels` by [shirt](https://github.com/shirt-dev)
|
* Lock Python package versions for x86 and use `wheels` by [shirt](https://github.com/shirt-dev)
|
||||||
* Exclude `vcruntime140.dll` from UPX by [jbruchon](https://github.com/jbruchon)
|
* Exclude `vcruntime140.dll` from UPX by [jbruchon](https://github.com/jbruchon)
|
||||||
* Set version number based on UTC time, not local time
|
* Set version number based on UTC time, not local time
|
||||||
* Publish on PyPi only if token is set
|
* Publish on PyPi only if token is set
|
||||||
|
@ -3757,7 +4195,7 @@ ### 2021.02.04
|
||||||
* Fix "Default format spec" appearing in quiet mode
|
* Fix "Default format spec" appearing in quiet mode
|
||||||
* [FormatSort] Allow user to prefer av01 over vp9 (The default is still vp9)
|
* [FormatSort] Allow user to prefer av01 over vp9 (The default is still vp9)
|
||||||
* [FormatSort] fix bug where `quality` had more priority than `hasvid`
|
* [FormatSort] fix bug where `quality` had more priority than `hasvid`
|
||||||
* [pyinst] Automatically detect python architecture and working directory
|
* [pyinst] Automatically detect Python architecture and working directory
|
||||||
* Strip out internal fields such as `_filename` from infojson
|
* Strip out internal fields such as `_filename` from infojson
|
||||||
|
|
||||||
|
|
||||||
|
|
37
Makefile
37
Makefile
|
@ -2,7 +2,7 @@ all: lazy-extractors yt-dlp doc pypi-files
|
||||||
clean: clean-test clean-dist
|
clean: clean-test clean-dist
|
||||||
clean-all: clean clean-cache
|
clean-all: clean clean-cache
|
||||||
completions: completion-bash completion-fish completion-zsh
|
completions: completion-bash completion-fish completion-zsh
|
||||||
doc: README.md CONTRIBUTING.md issuetemplates supportedsites
|
doc: README.md CONTRIBUTING.md CONTRIBUTORS issuetemplates supportedsites
|
||||||
ot: offlinetest
|
ot: offlinetest
|
||||||
tar: yt-dlp.tar.gz
|
tar: yt-dlp.tar.gz
|
||||||
|
|
||||||
|
@ -10,21 +10,24 @@ tar: yt-dlp.tar.gz
|
||||||
# intended use: when building a source distribution,
|
# intended use: when building a source distribution,
|
||||||
# make pypi-files && python3 -m build -sn .
|
# make pypi-files && python3 -m build -sn .
|
||||||
pypi-files: AUTHORS Changelog.md LICENSE README.md README.txt supportedsites \
|
pypi-files: AUTHORS Changelog.md LICENSE README.md README.txt supportedsites \
|
||||||
completions yt-dlp.1 pyproject.toml setup.cfg devscripts/* test/*
|
completions yt-dlp.1 pyproject.toml setup.cfg devscripts/* test/*
|
||||||
|
|
||||||
.PHONY: all clean install test tar pypi-files completions ot offlinetest codetest supportedsites
|
.PHONY: all clean clean-all clean-test clean-dist clean-cache \
|
||||||
|
completions completion-bash completion-fish completion-zsh \
|
||||||
|
doc issuetemplates supportedsites ot offlinetest codetest test \
|
||||||
|
tar pypi-files lazy-extractors install uninstall
|
||||||
|
|
||||||
clean-test:
|
clean-test:
|
||||||
rm -rf test/testdata/sigs/player-*.js tmp/ *.annotations.xml *.aria2 *.description *.dump *.frag \
|
rm -rf test/testdata/sigs/player-*.js tmp/ *.annotations.xml *.aria2 *.description *.dump *.frag \
|
||||||
*.frag.aria2 *.frag.urls *.info.json *.live_chat.json *.meta *.part* *.tmp *.temp *.unknown_video *.ytdl \
|
*.frag.aria2 *.frag.urls *.info.json *.live_chat.json *.meta *.part* *.tmp *.temp *.unknown_video *.ytdl \
|
||||||
*.3gp *.ape *.ass *.avi *.desktop *.f4v *.flac *.flv *.gif *.jpeg *.jpg *.m4a *.m4v *.mhtml *.mkv *.mov *.mp3 \
|
*.3gp *.ape *.ass *.avi *.desktop *.f4v *.flac *.flv *.gif *.jpeg *.jpg *.lrc *.m4a *.m4v *.mhtml *.mkv *.mov *.mp3 *.mp4 \
|
||||||
*.mp4 *.mpga *.oga *.ogg *.opus *.png *.sbv *.srt *.swf *.swp *.tt *.ttml *.url *.vtt *.wav *.webloc *.webm *.webp
|
*.mpg *.mpga *.oga *.ogg *.opus *.png *.sbv *.srt *.ssa *.swf *.swp *.tt *.ttml *.url *.vtt *.wav *.webloc *.webm *.webp
|
||||||
clean-dist:
|
clean-dist:
|
||||||
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ \
|
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ \
|
||||||
yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS
|
yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS
|
||||||
clean-cache:
|
clean-cache:
|
||||||
find . \( \
|
find . \( \
|
||||||
-type d -name .pytest_cache -o -type d -name __pycache__ -o -name "*.pyc" -o -name "*.class" \
|
-type d -name ".*_cache" -o -type d -name __pycache__ -o -name "*.pyc" -o -name "*.class" \
|
||||||
\) -prune -exec rm -rf {} \;
|
\) -prune -exec rm -rf {} \;
|
||||||
|
|
||||||
completion-bash: completions/bash/yt-dlp
|
completion-bash: completions/bash/yt-dlp
|
||||||
|
@ -67,14 +70,15 @@ uninstall:
|
||||||
rm -f $(DESTDIR)$(SHAREDIR)/fish/vendor_completions.d/yt-dlp.fish
|
rm -f $(DESTDIR)$(SHAREDIR)/fish/vendor_completions.d/yt-dlp.fish
|
||||||
|
|
||||||
codetest:
|
codetest:
|
||||||
flake8 .
|
ruff check .
|
||||||
|
autopep8 --diff .
|
||||||
|
|
||||||
test:
|
test:
|
||||||
$(PYTHON) -m pytest
|
$(PYTHON) -m pytest -Werror
|
||||||
$(MAKE) codetest
|
$(MAKE) codetest
|
||||||
|
|
||||||
offlinetest: codetest
|
offlinetest: codetest
|
||||||
$(PYTHON) -m pytest -k "not download"
|
$(PYTHON) -m pytest -Werror -m "not download"
|
||||||
|
|
||||||
CODE_FOLDERS_CMD = find yt_dlp -type f -name '__init__.py' | sed 's,/__init__.py,,' | grep -v '/__' | sort
|
CODE_FOLDERS_CMD = find yt_dlp -type f -name '__init__.py' | sed 's,/__init__.py,,' | grep -v '/__' | sort
|
||||||
CODE_FOLDERS != $(CODE_FOLDERS_CMD)
|
CODE_FOLDERS != $(CODE_FOLDERS_CMD)
|
||||||
|
@ -148,7 +152,7 @@ yt-dlp.tar.gz: all
|
||||||
--exclude '*.pyo' \
|
--exclude '*.pyo' \
|
||||||
--exclude '*~' \
|
--exclude '*~' \
|
||||||
--exclude '__pycache__' \
|
--exclude '__pycache__' \
|
||||||
--exclude '.pytest_cache' \
|
--exclude '.*_cache' \
|
||||||
--exclude '.git' \
|
--exclude '.git' \
|
||||||
-- \
|
-- \
|
||||||
README.md supportedsites.md Changelog.md LICENSE \
|
README.md supportedsites.md Changelog.md LICENSE \
|
||||||
|
@ -156,5 +160,14 @@ yt-dlp.tar.gz: all
|
||||||
Makefile yt-dlp.1 README.txt completions .gitignore \
|
Makefile yt-dlp.1 README.txt completions .gitignore \
|
||||||
setup.cfg yt-dlp yt_dlp pyproject.toml devscripts test
|
setup.cfg yt-dlp yt_dlp pyproject.toml devscripts test
|
||||||
|
|
||||||
AUTHORS:
|
AUTHORS: Changelog.md
|
||||||
git shortlog -s -n HEAD | cut -f2 | sort > AUTHORS
|
@if [ -d '.git' ] && command -v git > /dev/null ; then \
|
||||||
|
echo 'Generating $@ from git commit history' ; \
|
||||||
|
git shortlog -s -n HEAD | cut -f2 | sort > $@ ; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
CONTRIBUTORS: Changelog.md
|
||||||
|
@if [ -d '.git' ] && command -v git > /dev/null ; then \
|
||||||
|
echo 'Updating $@ from git commit history' ; \
|
||||||
|
$(PYTHON) devscripts/make_changelog.py -v -c > /dev/null ; \
|
||||||
|
fi
|
||||||
|
|
409
README.md
409
README.md
|
@ -17,17 +17,15 @@
|
||||||
</div>
|
</div>
|
||||||
<!-- MANPAGE: END EXCLUDED SECTION -->
|
<!-- MANPAGE: END EXCLUDED SECTION -->
|
||||||
|
|
||||||
yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on the now inactive [youtube-dlc](https://github.com/blackjack4494/yt-dlc). The main focus of this project is adding new features and patches while also keeping up to date with the original project
|
yt-dlp is a feature-rich command-line audio/video downloader with support for [thousands of sites](supportedsites.md). The project is a fork of [youtube-dl](https://github.com/ytdl-org/youtube-dl) based on the now inactive [youtube-dlc](https://github.com/blackjack4494/yt-dlc).
|
||||||
|
|
||||||
<!-- MANPAGE: MOVE "USAGE AND OPTIONS" SECTION HERE -->
|
<!-- MANPAGE: MOVE "USAGE AND OPTIONS" SECTION HERE -->
|
||||||
|
|
||||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||||
* [NEW FEATURES](#new-features)
|
|
||||||
* [Differences in default behavior](#differences-in-default-behavior)
|
|
||||||
* [INSTALLATION](#installation)
|
* [INSTALLATION](#installation)
|
||||||
* [Detailed instructions](https://github.com/yt-dlp/yt-dlp/wiki/Installation)
|
* [Detailed instructions](https://github.com/yt-dlp/yt-dlp/wiki/Installation)
|
||||||
* [Update](#update)
|
|
||||||
* [Release Files](#release-files)
|
* [Release Files](#release-files)
|
||||||
|
* [Update](#update)
|
||||||
* [Dependencies](#dependencies)
|
* [Dependencies](#dependencies)
|
||||||
* [Compile](#compile)
|
* [Compile](#compile)
|
||||||
* [USAGE AND OPTIONS](#usage-and-options)
|
* [USAGE AND OPTIONS](#usage-and-options)
|
||||||
|
@ -65,7 +63,10 @@
|
||||||
* [Developing Plugins](#developing-plugins)
|
* [Developing Plugins](#developing-plugins)
|
||||||
* [EMBEDDING YT-DLP](#embedding-yt-dlp)
|
* [EMBEDDING YT-DLP](#embedding-yt-dlp)
|
||||||
* [Embedding examples](#embedding-examples)
|
* [Embedding examples](#embedding-examples)
|
||||||
* [DEPRECATED OPTIONS](#deprecated-options)
|
* [CHANGES FROM YOUTUBE-DL](#changes-from-youtube-dl)
|
||||||
|
* [New features](#new-features)
|
||||||
|
* [Differences in default behavior](#differences-in-default-behavior)
|
||||||
|
* [Deprecated options](#deprecated-options)
|
||||||
* [CONTRIBUTING](CONTRIBUTING.md#contributing-to-yt-dlp)
|
* [CONTRIBUTING](CONTRIBUTING.md#contributing-to-yt-dlp)
|
||||||
* [Opening an Issue](CONTRIBUTING.md#opening-an-issue)
|
* [Opening an Issue](CONTRIBUTING.md#opening-an-issue)
|
||||||
* [Developer Instructions](CONTRIBUTING.md#developer-instructions)
|
* [Developer Instructions](CONTRIBUTING.md#developer-instructions)
|
||||||
|
@ -74,103 +75,6 @@
|
||||||
<!-- MANPAGE: END EXCLUDED SECTION -->
|
<!-- MANPAGE: END EXCLUDED SECTION -->
|
||||||
|
|
||||||
|
|
||||||
# NEW FEATURES
|
|
||||||
|
|
||||||
* Forked from [**yt-dlc@f9401f2**](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee) and merged with [**youtube-dl@be008e6**](https://github.com/ytdl-org/youtube-dl/commit/be008e657d79832642e2158557c899249c9e31cd) ([exceptions](https://github.com/yt-dlp/yt-dlp/issues/21))
|
|
||||||
|
|
||||||
* **[SponsorBlock Integration](#sponsorblock-options)**: You can mark/remove sponsor sections in YouTube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API
|
|
||||||
|
|
||||||
* **[Format Sorting](#sorting-formats)**: The default format sorting options have been changed so that higher resolution and better codecs will be now preferred instead of simply using larger bitrate. Furthermore, you can now specify the sort order using `-S`. This allows for much easier format selection than what is possible by simply using `--format` ([examples](#format-selection-examples))
|
|
||||||
|
|
||||||
* **Merged with animelover1984/youtube-dl**: You get most of the features and improvements from [animelover1984/youtube-dl](https://github.com/animelover1984/youtube-dl) including `--write-comments`, `BiliBiliSearch`, `BilibiliChannel`, Embedding thumbnail in mp4/ogg/opus, playlist infojson etc. Note that NicoNico livestreams are not available. See [#31](https://github.com/yt-dlp/yt-dlp/pull/31) for details.
|
|
||||||
|
|
||||||
* **YouTube improvements**:
|
|
||||||
* Supports Clips, Stories (`ytstories:<channel UCID>`), Search (including filters)**\***, YouTube Music Search, Channel-specific search, Search prefixes (`ytsearch:`, `ytsearchdate:`)**\***, Mixes, and Feeds (`:ytfav`, `:ytwatchlater`, `:ytsubs`, `:ythistory`, `:ytrec`, `:ytnotif`)
|
|
||||||
* Fix for [n-sig based throttling](https://github.com/ytdl-org/youtube-dl/issues/29326) **\***
|
|
||||||
* Supports some (but not all) age-gated content without cookies
|
|
||||||
* Download livestreams from the start using `--live-from-start` (*experimental*)
|
|
||||||
* Channel URLs download all uploads of the channel, including shorts and live
|
|
||||||
|
|
||||||
* **Cookies from browser**: Cookies can be automatically extracted from all major web browsers using `--cookies-from-browser BROWSER[+KEYRING][:PROFILE][::CONTAINER]`
|
|
||||||
|
|
||||||
* **Download time range**: Videos can be downloaded partially based on either timestamps or chapters using `--download-sections`
|
|
||||||
|
|
||||||
* **Split video by chapters**: Videos can be split into multiple files based on chapters using `--split-chapters`
|
|
||||||
|
|
||||||
* **Multi-threaded fragment downloads**: Download multiple fragments of m3u8/mpd videos in parallel. Use `--concurrent-fragments` (`-N`) option to set the number of threads used
|
|
||||||
|
|
||||||
* **Aria2c with HLS/DASH**: You can use `aria2c` as the external downloader for DASH(mpd) and HLS(m3u8) formats
|
|
||||||
|
|
||||||
* **New and fixed extractors**: Many new extractors have been added and a lot of existing ones have been fixed. See the [changelog](Changelog.md) or the [list of supported sites](supportedsites.md)
|
|
||||||
|
|
||||||
* **New MSOs**: Philo, Spectrum, SlingTV, Cablevision, RCN etc.
|
|
||||||
|
|
||||||
* **Subtitle extraction from manifests**: Subtitles can be extracted from streaming media manifests. See [commit/be6202f](https://github.com/yt-dlp/yt-dlp/commit/be6202f12b97858b9d716e608394b51065d0419f) for details
|
|
||||||
|
|
||||||
* **Multiple paths and output templates**: You can give different [output templates](#output-template) and download paths for different types of files. You can also set a temporary path where intermediary files are downloaded to using `--paths` (`-P`)
|
|
||||||
|
|
||||||
* **Portable Configuration**: Configuration files are automatically loaded from the home and root directories. See [CONFIGURATION](#configuration) for details
|
|
||||||
|
|
||||||
* **Output template improvements**: Output templates can now have date-time formatting, numeric offsets, object traversal etc. See [output template](#output-template) for details. Even more advanced operations can also be done with the help of `--parse-metadata` and `--replace-in-metadata`
|
|
||||||
|
|
||||||
* **Other new options**: Many new options have been added such as `--alias`, `--print`, `--concat-playlist`, `--wait-for-video`, `--retry-sleep`, `--sleep-requests`, `--convert-thumbnails`, `--force-download-archive`, `--force-overwrites`, `--break-match-filter` etc
|
|
||||||
|
|
||||||
* **Improvements**: Regex and other operators in `--format`/`--match-filter`, multiple `--postprocessor-args` and `--downloader-args`, faster archive checking, more [format selection options](#format-selection), merge multi-video/audio, multiple `--config-locations`, `--exec` at different stages, etc
|
|
||||||
|
|
||||||
* **Plugins**: Extractors and PostProcessors can be loaded from an external file. See [plugins](#plugins) for details
|
|
||||||
|
|
||||||
* **Self updater**: The releases can be updated using `yt-dlp -U`, and downgraded using `--update-to` if required
|
|
||||||
|
|
||||||
* **Automated builds**: [Nightly/master builds](#update-channels) can be used with `--update-to nightly` and `--update-to master`
|
|
||||||
|
|
||||||
See [changelog](Changelog.md) or [commits](https://github.com/yt-dlp/yt-dlp/commits) for the full list of changes
|
|
||||||
|
|
||||||
Features marked with a **\*** have been back-ported to youtube-dl
|
|
||||||
|
|
||||||
### Differences in default behavior
|
|
||||||
|
|
||||||
Some of yt-dlp's default options are different from that of youtube-dl and youtube-dlc:
|
|
||||||
|
|
||||||
* yt-dlp supports only [Python 3.8+](## "Windows 7"), and *may* remove support for more versions as they [become EOL](https://devguide.python.org/versions/#python-release-cycle); while [youtube-dl still supports Python 2.6+ and 3.2+](https://github.com/ytdl-org/youtube-dl/issues/30568#issue-1118238743)
|
|
||||||
* The options `--auto-number` (`-A`), `--title` (`-t`) and `--literal` (`-l`), no longer work. See [removed options](#Removed) for details
|
|
||||||
* `avconv` is not supported as an alternative to `ffmpeg`
|
|
||||||
* yt-dlp stores config files in slightly different locations to youtube-dl. See [CONFIGURATION](#configuration) for a list of correct locations
|
|
||||||
* The default [output template](#output-template) is `%(title)s [%(id)s].%(ext)s`. There is no real reason for this change. This was changed before yt-dlp was ever made public and now there are no plans to change it back to `%(title)s-%(id)s.%(ext)s`. Instead, you may use `--compat-options filename`
|
|
||||||
* The default [format sorting](#sorting-formats) is different from youtube-dl and prefers higher resolution and better codecs rather than higher bitrates. You can use the `--format-sort` option to change this to any order you prefer, or use `--compat-options format-sort` to use youtube-dl's sorting order
|
|
||||||
* The default format selector is `bv*+ba/b`. This means that if a combined video + audio format that is better than the best video-only format is found, the former will be preferred. Use `-f bv+ba/b` or `--compat-options format-spec` to revert this
|
|
||||||
* Unlike youtube-dlc, yt-dlp does not allow merging multiple audio/video streams into one file by default (since this conflicts with the use of `-f bv*+ba`). If needed, this feature must be enabled using `--audio-multistreams` and `--video-multistreams`. You can also use `--compat-options multistreams` to enable both
|
|
||||||
* `--no-abort-on-error` is enabled by default. Use `--abort-on-error` or `--compat-options abort-on-error` to abort on errors instead
|
|
||||||
* When writing metadata files such as thumbnails, description or infojson, the same information (if available) is also written for playlists. Use `--no-write-playlist-metafiles` or `--compat-options no-playlist-metafiles` to not write these files
|
|
||||||
* `--add-metadata` attaches the `infojson` to `mkv` files in addition to writing the metadata when used with `--write-info-json`. Use `--no-embed-info-json` or `--compat-options no-attach-info-json` to revert this
|
|
||||||
* Some metadata are embedded into different fields when using `--add-metadata` as compared to youtube-dl. Most notably, `comment` field contains the `webpage_url` and `synopsis` contains the `description`. You can [use `--parse-metadata`](#modifying-metadata) to modify this to your liking or use `--compat-options embed-metadata` to revert this
|
|
||||||
* `playlist_index` behaves differently when used with options like `--playlist-reverse` and `--playlist-items`. See [#302](https://github.com/yt-dlp/yt-dlp/issues/302) for details. You can use `--compat-options playlist-index` if you want to keep the earlier behavior
|
|
||||||
* The output of `-F` is listed in a new format. Use `--compat-options list-formats` to revert this
|
|
||||||
* Live chats (if available) are considered as subtitles. Use `--sub-langs all,-live_chat` to download all subtitles except live chat. You can also use `--compat-options no-live-chat` to prevent any live chat/danmaku from downloading
|
|
||||||
* YouTube channel URLs download all uploads of the channel. To download only the videos in a specific tab, pass the tab's URL. If the channel does not show the requested tab, an error will be raised. Also, `/live` URLs raise an error if there are no live videos instead of silently downloading the entire channel. You may use `--compat-options no-youtube-channel-redirect` to revert all these redirections
|
|
||||||
* Unavailable videos are also listed for YouTube playlists. Use `--compat-options no-youtube-unavailable-videos` to remove this
|
|
||||||
* The upload dates extracted from YouTube are in UTC [when available](https://github.com/yt-dlp/yt-dlp/blob/89e4d86171c7b7c997c77d4714542e0383bf0db0/yt_dlp/extractor/youtube.py#L3898-L3900). Use `--compat-options no-youtube-prefer-utc-upload-date` to prefer the non-UTC upload date.
|
|
||||||
* If `ffmpeg` is used as the downloader, the downloading and merging of formats happen in a single step when possible. Use `--compat-options no-direct-merge` to revert this
|
|
||||||
* Thumbnail embedding in `mp4` is done with mutagen if possible. Use `--compat-options embed-thumbnail-atomicparsley` to force the use of AtomicParsley instead
|
|
||||||
* Some internal metadata such as filenames are removed by default from the infojson. Use `--no-clean-infojson` or `--compat-options no-clean-infojson` to revert this
|
|
||||||
* When `--embed-subs` and `--write-subs` are used together, the subtitles are written to disk and also embedded in the media file. You can use just `--embed-subs` to embed the subs and automatically delete the separate file. See [#630 (comment)](https://github.com/yt-dlp/yt-dlp/issues/630#issuecomment-893659460) for more info. `--compat-options no-keep-subs` can be used to revert this
|
|
||||||
* `certifi` will be used for SSL root certificates, if installed. If you want to use system certificates (e.g. self-signed), use `--compat-options no-certifi`
|
|
||||||
* yt-dlp's sanitization of invalid characters in filenames is different/smarter than in youtube-dl. You can use `--compat-options filename-sanitization` to revert to youtube-dl's behavior
|
|
||||||
* yt-dlp tries to parse the external downloader outputs into the standard progress output if possible (Currently implemented: [~~aria2c~~](https://github.com/yt-dlp/yt-dlp/issues/5931)). You can use `--compat-options no-external-downloader-progress` to get the downloader output as-is
|
|
||||||
* yt-dlp versions between 2021.09.01 and 2023.01.02 applies `--match-filter` to nested playlists. This was an unintentional side-effect of [8f18ac](https://github.com/yt-dlp/yt-dlp/commit/8f18aca8717bb0dd49054555af8d386e5eda3a88) and is fixed in [d7b460](https://github.com/yt-dlp/yt-dlp/commit/d7b460d0e5fc710950582baed2e3fc616ed98a80). Use `--compat-options playlist-match-filter` to revert this
|
|
||||||
* yt-dlp versions between 2021.11.10 and 2023.06.21 estimated `filesize_approx` values for fragmented/manifest formats. This was added for convenience in [f2fe69](https://github.com/yt-dlp/yt-dlp/commit/f2fe69c7b0d208bdb1f6292b4ae92bc1e1a7444a), but was reverted in [0dff8e](https://github.com/yt-dlp/yt-dlp/commit/0dff8e4d1e6e9fb938f4256ea9af7d81f42fd54f) due to the potentially extreme inaccuracy of the estimated values. Use `--compat-options manifest-filesize-approx` to keep extracting the estimated values
|
|
||||||
* yt-dlp uses modern http client backends such as `requests`. Use `--compat-options prefer-legacy-http-handler` to prefer the legacy http handler (`urllib`) to be used for standard http requests.
|
|
||||||
* The sub-module `swfinterp` is removed.
|
|
||||||
|
|
||||||
For ease of use, a few more compat options are available:
|
|
||||||
|
|
||||||
* `--compat-options all`: Use all compat options (Do NOT use)
|
|
||||||
* `--compat-options youtube-dl`: Same as `--compat-options all,-multistreams,-playlist-match-filter,-manifest-filesize-approx`
|
|
||||||
* `--compat-options youtube-dlc`: Same as `--compat-options all,-no-live-chat,-no-youtube-channel-redirect,-playlist-match-filter,-manifest-filesize-approx`
|
|
||||||
* `--compat-options 2021`: Same as `--compat-options 2022,no-certifi,filename-sanitization,no-youtube-prefer-utc-upload-date`
|
|
||||||
* `--compat-options 2022`: Same as `--compat-options 2023,playlist-match-filter,no-external-downloader-progress,prefer-legacy-http-handler,manifest-filesize-approx`
|
|
||||||
* `--compat-options 2023`: Currently does nothing. Use this to enable all future compat options
|
|
||||||
|
|
||||||
|
|
||||||
# INSTALLATION
|
# INSTALLATION
|
||||||
|
|
||||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||||
|
@ -186,41 +90,6 @@ # INSTALLATION
|
||||||
You can install yt-dlp using [the binaries](#release-files), [pip](https://pypi.org/project/yt-dlp) or one using a third-party package manager. See [the wiki](https://github.com/yt-dlp/yt-dlp/wiki/Installation) for detailed instructions
|
You can install yt-dlp using [the binaries](#release-files), [pip](https://pypi.org/project/yt-dlp) or one using a third-party package manager. See [the wiki](https://github.com/yt-dlp/yt-dlp/wiki/Installation) for detailed instructions
|
||||||
|
|
||||||
|
|
||||||
## UPDATE
|
|
||||||
You can use `yt-dlp -U` to update if you are using the [release binaries](#release-files)
|
|
||||||
|
|
||||||
If you [installed with pip](https://github.com/yt-dlp/yt-dlp/wiki/Installation#with-pip), simply re-run the same command that was used to install the program
|
|
||||||
|
|
||||||
For other third-party package managers, see [the wiki](https://github.com/yt-dlp/yt-dlp/wiki/Installation#third-party-package-managers) or refer their documentation
|
|
||||||
|
|
||||||
<a id="update-channels"/>
|
|
||||||
|
|
||||||
There are currently three release channels for binaries: `stable`, `nightly` and `master`.
|
|
||||||
|
|
||||||
* `stable` is the default channel, and many of its changes have been tested by users of the `nightly` and `master` channels.
|
|
||||||
* The `nightly` channel has releases scheduled to build every day around midnight UTC, for a snapshot of the project's new patches and changes. This is the **recommended channel for regular users** of yt-dlp. The `nightly` releases are available from [yt-dlp/yt-dlp-nightly-builds](https://github.com/yt-dlp/yt-dlp-nightly-builds/releases) or as development releases of the `yt-dlp` PyPI package (which can be installed with pip's `--pre` flag).
|
|
||||||
* The `master` channel features releases that are built after each push to the master branch, and these will have the very latest fixes and additions, but may also be more prone to regressions. They are available from [yt-dlp/yt-dlp-master-builds](https://github.com/yt-dlp/yt-dlp-master-builds/releases).
|
|
||||||
|
|
||||||
When using `--update`/`-U`, a release binary will only update to its current channel.
|
|
||||||
`--update-to CHANNEL` can be used to switch to a different channel when a newer version is available. `--update-to [CHANNEL@]TAG` can also be used to upgrade or downgrade to specific tags from a channel.
|
|
||||||
|
|
||||||
You may also use `--update-to <repository>` (`<owner>/<repository>`) to update to a channel on a completely different repository. Be careful with what repository you are updating to though, there is no verification done for binaries from different repositories.
|
|
||||||
|
|
||||||
Example usage:
|
|
||||||
* `yt-dlp --update-to master` switch to the `master` channel and update to its latest release
|
|
||||||
* `yt-dlp --update-to stable@2023.07.06` upgrade/downgrade to release to `stable` channel tag `2023.07.06`
|
|
||||||
* `yt-dlp --update-to 2023.10.07` upgrade/downgrade to tag `2023.10.07` if it exists on the current channel
|
|
||||||
* `yt-dlp --update-to example/yt-dlp@2023.09.24` upgrade/downgrade to the release from the `example/yt-dlp` repository, tag `2023.09.24`
|
|
||||||
|
|
||||||
**Important**: Any user experiencing an issue with the `stable` release should install or update to the `nightly` release before submitting a bug report:
|
|
||||||
```
|
|
||||||
# To update to nightly from stable executable/binary:
|
|
||||||
yt-dlp --update-to nightly
|
|
||||||
|
|
||||||
# To install nightly with pip:
|
|
||||||
python -m pip install -U --pre yt-dlp
|
|
||||||
```
|
|
||||||
|
|
||||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||||
## RELEASE FILES
|
## RELEASE FILES
|
||||||
|
|
||||||
|
@ -236,10 +105,9 @@ #### Alternatives
|
||||||
|
|
||||||
File|Description
|
File|Description
|
||||||
:---|:---
|
:---|:---
|
||||||
[yt-dlp_x86.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_x86.exe)|Windows (Vista SP2+) standalone x86 (32-bit) binary
|
[yt-dlp_x86.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_x86.exe)|Windows (Win7 SP1+) standalone x86 (32-bit) binary
|
||||||
[yt-dlp_min.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_min.exe)|Windows (Win7 SP1+) standalone x64 binary built with `py2exe`<br/> ([Not recommended](#standalone-py2exe-builds-windows))
|
[yt-dlp_min.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_min.exe)|Windows (Win7 SP1+) standalone x64 binary built with `py2exe`<br/> ([Not recommended](#standalone-py2exe-builds-windows))
|
||||||
[yt-dlp_linux](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux)|Linux standalone x64 binary
|
[yt-dlp_linux](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux)|Linux standalone x64 binary
|
||||||
[yt-dlp_linux.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux.zip)|Unpackaged Linux executable (no auto-update)
|
|
||||||
[yt-dlp_linux_armv7l](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux_armv7l)|Linux standalone armv7l (32-bit) binary
|
[yt-dlp_linux_armv7l](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux_armv7l)|Linux standalone armv7l (32-bit) binary
|
||||||
[yt-dlp_linux_aarch64](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux_aarch64)|Linux standalone aarch64 (64-bit) binary
|
[yt-dlp_linux_aarch64](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux_aarch64)|Linux standalone aarch64 (64-bit) binary
|
||||||
[yt-dlp_win.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_win.zip)|Unpackaged Windows executable (no auto-update)
|
[yt-dlp_win.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_win.zip)|Unpackaged Windows executable (no auto-update)
|
||||||
|
@ -267,6 +135,43 @@ #### Misc
|
||||||
|
|
||||||
**Note**: The manpages, shell completion (autocomplete) files etc. are available inside the [source tarball](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz)
|
**Note**: The manpages, shell completion (autocomplete) files etc. are available inside the [source tarball](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz)
|
||||||
|
|
||||||
|
|
||||||
|
## UPDATE
|
||||||
|
You can use `yt-dlp -U` to update if you are using the [release binaries](#release-files)
|
||||||
|
|
||||||
|
If you [installed with pip](https://github.com/yt-dlp/yt-dlp/wiki/Installation#with-pip), simply re-run the same command that was used to install the program
|
||||||
|
|
||||||
|
For other third-party package managers, see [the wiki](https://github.com/yt-dlp/yt-dlp/wiki/Installation#third-party-package-managers) or refer their documentation
|
||||||
|
|
||||||
|
<a id="update-channels"></a>
|
||||||
|
|
||||||
|
There are currently three release channels for binaries: `stable`, `nightly` and `master`.
|
||||||
|
|
||||||
|
* `stable` is the default channel, and many of its changes have been tested by users of the `nightly` and `master` channels.
|
||||||
|
* The `nightly` channel has releases scheduled to build every day around midnight UTC, for a snapshot of the project's new patches and changes. This is the **recommended channel for regular users** of yt-dlp. The `nightly` releases are available from [yt-dlp/yt-dlp-nightly-builds](https://github.com/yt-dlp/yt-dlp-nightly-builds/releases) or as development releases of the `yt-dlp` PyPI package (which can be installed with pip's `--pre` flag).
|
||||||
|
* The `master` channel features releases that are built after each push to the master branch, and these will have the very latest fixes and additions, but may also be more prone to regressions. They are available from [yt-dlp/yt-dlp-master-builds](https://github.com/yt-dlp/yt-dlp-master-builds/releases).
|
||||||
|
|
||||||
|
When using `--update`/`-U`, a release binary will only update to its current channel.
|
||||||
|
`--update-to CHANNEL` can be used to switch to a different channel when a newer version is available. `--update-to [CHANNEL@]TAG` can also be used to upgrade or downgrade to specific tags from a channel.
|
||||||
|
|
||||||
|
You may also use `--update-to <repository>` (`<owner>/<repository>`) to update to a channel on a completely different repository. Be careful with what repository you are updating to though, there is no verification done for binaries from different repositories.
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
|
||||||
|
* `yt-dlp --update-to master` switch to the `master` channel and update to its latest release
|
||||||
|
* `yt-dlp --update-to stable@2023.07.06` upgrade/downgrade to release to `stable` channel tag `2023.07.06`
|
||||||
|
* `yt-dlp --update-to 2023.10.07` upgrade/downgrade to tag `2023.10.07` if it exists on the current channel
|
||||||
|
* `yt-dlp --update-to example/yt-dlp@2023.09.24` upgrade/downgrade to the release from the `example/yt-dlp` repository, tag `2023.09.24`
|
||||||
|
|
||||||
|
**Important**: Any user experiencing an issue with the `stable` release should install or update to the `nightly` release before submitting a bug report:
|
||||||
|
```
|
||||||
|
# To update to nightly from stable executable/binary:
|
||||||
|
yt-dlp --update-to nightly
|
||||||
|
|
||||||
|
# To install nightly with pip:
|
||||||
|
python3 -m pip install -U --pre "yt-dlp[default]"
|
||||||
|
```
|
||||||
|
|
||||||
## DEPENDENCIES
|
## DEPENDENCIES
|
||||||
Python versions 3.8+ (CPython and PyPy) are supported. Other versions and implementations may or may not work correctly.
|
Python versions 3.8+ (CPython and PyPy) are supported. Other versions and implementations may or may not work correctly.
|
||||||
|
|
||||||
|
@ -283,7 +188,7 @@ ### Strongly recommended
|
||||||
|
|
||||||
There are bugs in ffmpeg that cause various issues when used alongside yt-dlp. Since ffmpeg is such an important dependency, we provide [custom builds](https://github.com/yt-dlp/FFmpeg-Builds#ffmpeg-static-auto-builds) with patches for some of these issues at [yt-dlp/FFmpeg-Builds](https://github.com/yt-dlp/FFmpeg-Builds). See [the readme](https://github.com/yt-dlp/FFmpeg-Builds#patches-applied) for details on the specific issues solved by these builds
|
There are bugs in ffmpeg that cause various issues when used alongside yt-dlp. Since ffmpeg is such an important dependency, we provide [custom builds](https://github.com/yt-dlp/FFmpeg-Builds#ffmpeg-static-auto-builds) with patches for some of these issues at [yt-dlp/FFmpeg-Builds](https://github.com/yt-dlp/FFmpeg-Builds). See [the readme](https://github.com/yt-dlp/FFmpeg-Builds#patches-applied) for details on the specific issues solved by these builds
|
||||||
|
|
||||||
**Important**: What you need is ffmpeg *binary*, **NOT** [the python package of the same name](https://pypi.org/project/ffmpeg)
|
**Important**: What you need is ffmpeg *binary*, **NOT** [the Python package of the same name](https://pypi.org/project/ffmpeg)
|
||||||
|
|
||||||
### Networking
|
### Networking
|
||||||
* [**certifi**](https://github.com/certifi/python-certifi)\* - Provides Mozilla's root certificate bundle. Licensed under [MPLv2](https://github.com/certifi/python-certifi/blob/master/LICENSE)
|
* [**certifi**](https://github.com/certifi/python-certifi)\* - Provides Mozilla's root certificate bundle. Licensed under [MPLv2](https://github.com/certifi/python-certifi/blob/master/LICENSE)
|
||||||
|
@ -291,6 +196,15 @@ ### Networking
|
||||||
* [**websockets**](https://github.com/aaugustin/websockets)\* - For downloading over websocket. Licensed under [BSD-3-Clause](https://github.com/aaugustin/websockets/blob/main/LICENSE)
|
* [**websockets**](https://github.com/aaugustin/websockets)\* - For downloading over websocket. Licensed under [BSD-3-Clause](https://github.com/aaugustin/websockets/blob/main/LICENSE)
|
||||||
* [**requests**](https://github.com/psf/requests)\* - HTTP library. For HTTPS proxy and persistent connections support. Licensed under [Apache-2.0](https://github.com/psf/requests/blob/main/LICENSE)
|
* [**requests**](https://github.com/psf/requests)\* - HTTP library. For HTTPS proxy and persistent connections support. Licensed under [Apache-2.0](https://github.com/psf/requests/blob/main/LICENSE)
|
||||||
|
|
||||||
|
#### Impersonation
|
||||||
|
|
||||||
|
The following provide support for impersonating browser requests. This may be required for some sites that employ TLS fingerprinting.
|
||||||
|
|
||||||
|
* [**curl_cffi**](https://github.com/yifeikong/curl_cffi) (recommended) - Python binding for [curl-impersonate](https://github.com/lwthiker/curl-impersonate). Provides impersonation targets for Chrome, Edge and Safari. Licensed under [MIT](https://github.com/yifeikong/curl_cffi/blob/main/LICENSE)
|
||||||
|
* Can be installed with the `curl-cffi` group, e.g. `pip install "yt-dlp[default,curl-cffi]"`
|
||||||
|
* Currently only included in `yt-dlp.exe` and `yt-dlp_macos` builds
|
||||||
|
|
||||||
|
|
||||||
### Metadata
|
### Metadata
|
||||||
|
|
||||||
* [**mutagen**](https://github.com/quodlibet/mutagen)\* - For `--embed-thumbnail` in certain formats. Licensed under [GPLv2+](https://github.com/quodlibet/mutagen/blob/master/COPYING)
|
* [**mutagen**](https://github.com/quodlibet/mutagen)\* - For `--embed-thumbnail` in certain formats. Licensed under [GPLv2+](https://github.com/quodlibet/mutagen/blob/master/COPYING)
|
||||||
|
@ -321,7 +235,9 @@ ### Deprecated
|
||||||
## COMPILE
|
## COMPILE
|
||||||
|
|
||||||
### Standalone PyInstaller Builds
|
### Standalone PyInstaller Builds
|
||||||
To build the standalone executable, you must have Python and `pyinstaller` (plus any of yt-dlp's [optional dependencies](#dependencies) if needed). The executable will be built for the same architecture (x86/ARM, 32/64 bit) as the Python used. You can run the following commands:
|
To build the standalone executable, you must have Python and `pyinstaller` (plus any of yt-dlp's [optional dependencies](#dependencies) if needed). The executable will be built for the same CPU architecture as the Python used.
|
||||||
|
|
||||||
|
You can run the following commands:
|
||||||
|
|
||||||
```
|
```
|
||||||
python3 devscripts/install_deps.py --include pyinstaller
|
python3 devscripts/install_deps.py --include pyinstaller
|
||||||
|
@ -331,11 +247,11 @@ ### Standalone PyInstaller Builds
|
||||||
|
|
||||||
On some systems, you may need to use `py` or `python` instead of `python3`.
|
On some systems, you may need to use `py` or `python` instead of `python3`.
|
||||||
|
|
||||||
`bundle/pyinstaller.py` accepts any arguments that can be passed to `pyinstaller`, such as `--onefile/-F` or `--onedir/-D`, which is further [documented here](https://pyinstaller.org/en/stable/usage.html#what-to-generate).
|
`python -m bundle.pyinstaller` accepts any arguments that can be passed to `pyinstaller`, such as `--onefile/-F` or `--onedir/-D`, which is further [documented here](https://pyinstaller.org/en/stable/usage.html#what-to-generate).
|
||||||
|
|
||||||
**Note**: Pyinstaller versions below 4.4 [do not support](https://github.com/pyinstaller/pyinstaller#requirements-and-tested-platforms) Python installed from the Windows store without using a virtual environment.
|
**Note**: Pyinstaller versions below 4.4 [do not support](https://github.com/pyinstaller/pyinstaller#requirements-and-tested-platforms) Python installed from the Windows store without using a virtual environment.
|
||||||
|
|
||||||
**Important**: Running `pyinstaller` directly **without** using `bundle/pyinstaller.py` is **not** officially supported. This may or may not work correctly.
|
**Important**: Running `pyinstaller` directly **instead of** using `python -m bundle.pyinstaller` is **not** officially supported. This may or may not work correctly.
|
||||||
|
|
||||||
### Platform-independent Binary (UNIX)
|
### Platform-independent Binary (UNIX)
|
||||||
You will need the build tools `python` (3.8+), `zip`, `make` (GNU), `pandoc`\* and `pytest`\*.
|
You will need the build tools `python` (3.8+), `zip`, `make` (GNU), `pandoc`\* and `pytest`\*.
|
||||||
|
@ -346,7 +262,7 @@ ### Platform-independent Binary (UNIX)
|
||||||
|
|
||||||
### Standalone Py2Exe Builds (Windows)
|
### Standalone Py2Exe Builds (Windows)
|
||||||
|
|
||||||
While we provide the option to build with [py2exe](https://www.py2exe.org), it is recommended to build [using PyInstaller](#standalone-pyinstaller-builds) instead since the py2exe builds **cannot contain `pycryptodomex`/`certifi` and needs VC++14** on the target computer to run.
|
While we provide the option to build with [py2exe](https://www.py2exe.org), it is recommended to build [using PyInstaller](#standalone-pyinstaller-builds) instead since the py2exe builds **cannot contain `pycryptodomex`/`certifi` and need VC++14** on the target computer to run.
|
||||||
|
|
||||||
If you wish to build it anyway, install Python (if it is not already installed) and you can run the following commands:
|
If you wish to build it anyway, install Python (if it is not already installed) and you can run the following commands:
|
||||||
|
|
||||||
|
@ -418,7 +334,7 @@ ## General Options:
|
||||||
URLs, but emits an error if this is not
|
URLs, but emits an error if this is not
|
||||||
possible instead of searching
|
possible instead of searching
|
||||||
--ignore-config Don't load any more configuration files
|
--ignore-config Don't load any more configuration files
|
||||||
except those given by --config-locations.
|
except those given to --config-locations.
|
||||||
For backward compatibility, if this option
|
For backward compatibility, if this option
|
||||||
is found inside the system configuration
|
is found inside the system configuration
|
||||||
file, the user configuration is not loaded.
|
file, the user configuration is not loaded.
|
||||||
|
@ -482,6 +398,13 @@ ## Network Options:
|
||||||
direct connection
|
direct connection
|
||||||
--socket-timeout SECONDS Time to wait before giving up, in seconds
|
--socket-timeout SECONDS Time to wait before giving up, in seconds
|
||||||
--source-address IP Client-side IP address to bind to
|
--source-address IP Client-side IP address to bind to
|
||||||
|
--impersonate CLIENT[:OS] Client to impersonate for requests. E.g.
|
||||||
|
chrome, chrome-110, chrome:windows-10. Pass
|
||||||
|
--impersonate="" to impersonate any client.
|
||||||
|
Note that forcing impersonation for all
|
||||||
|
requests may have a detrimental impact on
|
||||||
|
download speed and stability
|
||||||
|
--list-impersonate-targets List available clients to impersonate.
|
||||||
-4, --force-ipv4 Make all connections via IPv4
|
-4, --force-ipv4 Make all connections via IPv4
|
||||||
-6, --force-ipv6 Make all connections via IPv6
|
-6, --force-ipv6 Make all connections via IPv6
|
||||||
--enable-file-urls Enable file:// URLs. This is disabled by
|
--enable-file-urls Enable file:// URLs. This is disabled by
|
||||||
|
@ -561,6 +484,9 @@ ## Video Selection:
|
||||||
--max-downloads NUMBER Abort after downloading NUMBER files
|
--max-downloads NUMBER Abort after downloading NUMBER files
|
||||||
--break-on-existing Stop the download process when encountering
|
--break-on-existing Stop the download process when encountering
|
||||||
a file that is in the archive
|
a file that is in the archive
|
||||||
|
--no-break-on-existing Do not stop the download process when
|
||||||
|
encountering a file that is in the archive
|
||||||
|
(default)
|
||||||
--break-per-input Alters --max-downloads, --break-on-existing,
|
--break-per-input Alters --max-downloads, --break-on-existing,
|
||||||
--break-match-filter, and autonumber to
|
--break-match-filter, and autonumber to
|
||||||
reset per input URL
|
reset per input URL
|
||||||
|
@ -683,7 +609,7 @@ ## Filesystem Options:
|
||||||
-o, --output [TYPES:]TEMPLATE Output filename template; see "OUTPUT
|
-o, --output [TYPES:]TEMPLATE Output filename template; see "OUTPUT
|
||||||
TEMPLATE" for details
|
TEMPLATE" for details
|
||||||
--output-na-placeholder TEXT Placeholder for unavailable fields in
|
--output-na-placeholder TEXT Placeholder for unavailable fields in
|
||||||
"OUTPUT TEMPLATE" (default: "NA")
|
--output (default: "NA")
|
||||||
--restrict-filenames Restrict filenames to only ASCII characters,
|
--restrict-filenames Restrict filenames to only ASCII characters,
|
||||||
and avoid "&" and spaces in filenames
|
and avoid "&" and spaces in filenames
|
||||||
--no-restrict-filenames Allow Unicode characters, "&" and spaces in
|
--no-restrict-filenames Allow Unicode characters, "&" and spaces in
|
||||||
|
@ -742,16 +668,17 @@ ## Filesystem Options:
|
||||||
The name of the browser to load cookies
|
The name of the browser to load cookies
|
||||||
from. Currently supported browsers are:
|
from. Currently supported browsers are:
|
||||||
brave, chrome, chromium, edge, firefox,
|
brave, chrome, chromium, edge, firefox,
|
||||||
opera, safari, vivaldi. Optionally, the
|
opera, safari, vivaldi, whale. Optionally,
|
||||||
KEYRING used for decrypting Chromium cookies
|
the KEYRING used for decrypting Chromium
|
||||||
on Linux, the name/path of the PROFILE to
|
cookies on Linux, the name/path of the
|
||||||
load cookies from, and the CONTAINER name
|
PROFILE to load cookies from, and the
|
||||||
(if Firefox) ("none" for no container) can
|
CONTAINER name (if Firefox) ("none" for no
|
||||||
be given with their respective seperators.
|
container) can be given with their
|
||||||
By default, all containers of the most
|
respective seperators. By default, all
|
||||||
recently accessed profile are used.
|
containers of the most recently accessed
|
||||||
Currently supported keyrings are: basictext,
|
profile are used. Currently supported
|
||||||
gnomekeyring, kwallet, kwallet5, kwallet6
|
keyrings are: basictext, gnomekeyring,
|
||||||
|
kwallet, kwallet5, kwallet6
|
||||||
--no-cookies-from-browser Do not load cookies from browser (default)
|
--no-cookies-from-browser Do not load cookies from browser (default)
|
||||||
--cache-dir DIR Location in the filesystem where yt-dlp can
|
--cache-dir DIR Location in the filesystem where yt-dlp can
|
||||||
store some downloaded information (such as
|
store some downloaded information (such as
|
||||||
|
@ -834,6 +761,7 @@ ## Verbosity and Simulation Options:
|
||||||
accessible under "progress" key. E.g.
|
accessible under "progress" key. E.g.
|
||||||
--console-title --progress-template
|
--console-title --progress-template
|
||||||
"download-title:%(info.id)s-%(progress.eta)s"
|
"download-title:%(info.id)s-%(progress.eta)s"
|
||||||
|
--progress-delta SECONDS Time between progress output (default: 0)
|
||||||
-v, --verbose Print various debugging information
|
-v, --verbose Print various debugging information
|
||||||
--dump-pages Print downloaded pages encoded using base64
|
--dump-pages Print downloaded pages encoded using base64
|
||||||
to debug problems (very verbose)
|
to debug problems (very verbose)
|
||||||
|
@ -1172,12 +1100,12 @@ # CONFIGURATION
|
||||||
You can configure yt-dlp by placing any supported command line option to a configuration file. The configuration is loaded from the following locations:
|
You can configure yt-dlp by placing any supported command line option to a configuration file. The configuration is loaded from the following locations:
|
||||||
|
|
||||||
1. **Main Configuration**:
|
1. **Main Configuration**:
|
||||||
* The file given by `--config-location`
|
* The file given to `--config-location`
|
||||||
1. **Portable Configuration**: (Recommended for portable installations)
|
1. **Portable Configuration**: (Recommended for portable installations)
|
||||||
* If using a binary, `yt-dlp.conf` in the same directory as the binary
|
* If using a binary, `yt-dlp.conf` in the same directory as the binary
|
||||||
* If running from source-code, `yt-dlp.conf` in the parent directory of `yt_dlp`
|
* If running from source-code, `yt-dlp.conf` in the parent directory of `yt_dlp`
|
||||||
1. **Home Configuration**:
|
1. **Home Configuration**:
|
||||||
* `yt-dlp.conf` in the home path given by `-P`
|
* `yt-dlp.conf` in the home path given to `-P`
|
||||||
* If `-P` is not given, the current directory is searched
|
* If `-P` is not given, the current directory is searched
|
||||||
1. **User Configuration**:
|
1. **User Configuration**:
|
||||||
* `${XDG_CONFIG_HOME}/yt-dlp.conf`
|
* `${XDG_CONFIG_HOME}/yt-dlp.conf`
|
||||||
|
@ -1296,7 +1224,7 @@ # OUTPUT TEMPLATE
|
||||||
|
|
||||||
Additionally, you can set different output templates for the various metadata files separately from the general output template by specifying the type of file followed by the template separated by a colon `:`. The different file types supported are `subtitle`, `thumbnail`, `description`, `annotation` (deprecated), `infojson`, `link`, `pl_thumbnail`, `pl_description`, `pl_infojson`, `chapter`, `pl_video`. E.g. `-o "%(title)s.%(ext)s" -o "thumbnail:%(title)s\%(title)s.%(ext)s"` will put the thumbnails in a folder with the same name as the video. If any of the templates is empty, that type of file will not be written. E.g. `--write-thumbnail -o "thumbnail:"` will write thumbnails only for playlists and not for video.
|
Additionally, you can set different output templates for the various metadata files separately from the general output template by specifying the type of file followed by the template separated by a colon `:`. The different file types supported are `subtitle`, `thumbnail`, `description`, `annotation` (deprecated), `infojson`, `link`, `pl_thumbnail`, `pl_description`, `pl_infojson`, `chapter`, `pl_video`. E.g. `-o "%(title)s.%(ext)s" -o "thumbnail:%(title)s\%(title)s.%(ext)s"` will put the thumbnails in a folder with the same name as the video. If any of the templates is empty, that type of file will not be written. E.g. `--write-thumbnail -o "thumbnail:"` will write thumbnails only for playlists and not for video.
|
||||||
|
|
||||||
<a id="outtmpl-postprocess-note"/>
|
<a id="outtmpl-postprocess-note"></a>
|
||||||
|
|
||||||
**Note**: Due to post-processing (i.e. merging etc.), the actual output filename might differ. Use `--print after_move:filepath` to get the name after all post-processing is complete.
|
**Note**: Due to post-processing (i.e. merging etc.), the actual output filename might differ. Use `--print after_move:filepath` to get the name after all post-processing is complete.
|
||||||
|
|
||||||
|
@ -1310,6 +1238,8 @@ # OUTPUT TEMPLATE
|
||||||
- `description` (string): The description of the video
|
- `description` (string): The description of the video
|
||||||
- `display_id` (string): An alternative identifier for the video
|
- `display_id` (string): An alternative identifier for the video
|
||||||
- `uploader` (string): Full name of the video uploader
|
- `uploader` (string): Full name of the video uploader
|
||||||
|
- `uploader_id` (string): Nickname or id of the video uploader
|
||||||
|
- `uploader_url` (string): URL to the video uploader's profile
|
||||||
- `license` (string): License name the video is licensed under
|
- `license` (string): License name the video is licensed under
|
||||||
- `creators` (list): The creators of the video
|
- `creators` (list): The creators of the video
|
||||||
- `creator` (string): The creators of the video; comma-separated
|
- `creator` (string): The creators of the video; comma-separated
|
||||||
|
@ -1320,9 +1250,9 @@ # OUTPUT TEMPLATE
|
||||||
- `release_year` (numeric): Year (YYYY) when the video or album was released
|
- `release_year` (numeric): Year (YYYY) when the video or album was released
|
||||||
- `modified_timestamp` (numeric): UNIX timestamp of the moment the video was last modified
|
- `modified_timestamp` (numeric): UNIX timestamp of the moment the video was last modified
|
||||||
- `modified_date` (string): The date (YYYYMMDD) when the video was last modified in UTC
|
- `modified_date` (string): The date (YYYYMMDD) when the video was last modified in UTC
|
||||||
- `uploader_id` (string): Nickname or id of the video uploader
|
|
||||||
- `channel` (string): Full name of the channel the video is uploaded on
|
- `channel` (string): Full name of the channel the video is uploaded on
|
||||||
- `channel_id` (string): Id of the channel
|
- `channel_id` (string): Id of the channel
|
||||||
|
- `channel_url` (string): URL of the channel
|
||||||
- `channel_follower_count` (numeric): Number of followers of the channel
|
- `channel_follower_count` (numeric): Number of followers of the channel
|
||||||
- `channel_is_verified` (boolean): Whether the channel is verified on the platform
|
- `channel_is_verified` (boolean): Whether the channel is verified on the platform
|
||||||
- `location` (string): Physical location where the video was filmed
|
- `location` (string): Physical location where the video was filmed
|
||||||
|
@ -1362,7 +1292,10 @@ # OUTPUT TEMPLATE
|
||||||
- `webpage_url_basename` (string): The basename of the webpage URL
|
- `webpage_url_basename` (string): The basename of the webpage URL
|
||||||
- `webpage_url_domain` (string): The domain of the webpage URL
|
- `webpage_url_domain` (string): The domain of the webpage URL
|
||||||
- `original_url` (string): The URL given by the user (or same as `webpage_url` for playlist entries)
|
- `original_url` (string): The URL given by the user (or same as `webpage_url` for playlist entries)
|
||||||
|
- `categories` (list): List of categories the video belongs to
|
||||||
|
- `tags` (list): List of tags assigned to the video
|
||||||
|
- `cast` (list): List of cast members
|
||||||
|
|
||||||
All the fields in [Filtering Formats](#filtering-formats) can also be used
|
All the fields in [Filtering Formats](#filtering-formats) can also be used
|
||||||
|
|
||||||
Available for the video that belongs to some logical chapter or section:
|
Available for the video that belongs to some logical chapter or section:
|
||||||
|
@ -1374,6 +1307,7 @@ # OUTPUT TEMPLATE
|
||||||
Available for the video that is an episode of some series or programme:
|
Available for the video that is an episode of some series or programme:
|
||||||
|
|
||||||
- `series` (string): Title of the series or programme the video episode belongs to
|
- `series` (string): Title of the series or programme the video episode belongs to
|
||||||
|
- `series_id` (string): Id of the series or programme the video episode belongs to
|
||||||
- `season` (string): Title of the season the video episode belongs to
|
- `season` (string): Title of the season the video episode belongs to
|
||||||
- `season_number` (numeric): Number of the season the video episode belongs to
|
- `season_number` (numeric): Number of the season the video episode belongs to
|
||||||
- `season_id` (string): Id of the season the video episode belongs to
|
- `season_id` (string): Id of the season the video episode belongs to
|
||||||
|
@ -1546,9 +1480,9 @@ ## Filtering Formats
|
||||||
- `width`: Width of the video, if known
|
- `width`: Width of the video, if known
|
||||||
- `height`: Height of the video, if known
|
- `height`: Height of the video, if known
|
||||||
- `aspect_ratio`: Aspect ratio of the video, if known
|
- `aspect_ratio`: Aspect ratio of the video, if known
|
||||||
- `tbr`: Average bitrate of audio and video in KBit/s
|
- `tbr`: Average bitrate of audio and video in [kbps](## "1000 bits/sec")
|
||||||
- `abr`: Average audio bitrate in KBit/s
|
- `abr`: Average audio bitrate in [kbps](## "1000 bits/sec")
|
||||||
- `vbr`: Average video bitrate in KBit/s
|
- `vbr`: Average video bitrate in [kbps](## "1000 bits/sec")
|
||||||
- `asr`: Audio sampling rate in Hertz
|
- `asr`: Audio sampling rate in Hertz
|
||||||
- `fps`: Frame rate
|
- `fps`: Frame rate
|
||||||
- `audio_channels`: The number of audio channels
|
- `audio_channels`: The number of audio channels
|
||||||
|
@ -1573,7 +1507,7 @@ ## Filtering Formats
|
||||||
|
|
||||||
**Note**: None of the aforementioned meta fields are guaranteed to be present since this solely depends on the metadata obtained by particular extractor, i.e. the metadata offered by the website. Any other field made available by the extractor can also be used for filtering.
|
**Note**: None of the aforementioned meta fields are guaranteed to be present since this solely depends on the metadata obtained by particular extractor, i.e. the metadata offered by the website. Any other field made available by the extractor can also be used for filtering.
|
||||||
|
|
||||||
Formats for which the value is not known are excluded unless you put a question mark (`?`) after the operator. You can combine format filters, so `-f "bv[height<=?720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 KBit/s. You can also use the filters with `all` to download all formats that satisfy the filter, e.g. `-f "all[vcodec=none]"` selects all audio-only formats.
|
Formats for which the value is not known are excluded unless you put a question mark (`?`) after the operator. You can combine format filters, so `-f "bv[height<=?720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 kbps. You can also use the filters with `all` to download all formats that satisfy the filter, e.g. `-f "all[vcodec=none]"` selects all audio-only formats.
|
||||||
|
|
||||||
Format selectors can also be grouped using parentheses; e.g. `-f "(mp4,webm)[height<480]"` will download the best pre-merged mp4 and webm formats with a height lower than 480.
|
Format selectors can also be grouped using parentheses; e.g. `-f "(mp4,webm)[height<480]"` will download the best pre-merged mp4 and webm formats with a height lower than 480.
|
||||||
|
|
||||||
|
@ -1605,10 +1539,10 @@ ## Sorting Formats
|
||||||
- `fps`: Framerate of video
|
- `fps`: Framerate of video
|
||||||
- `hdr`: The dynamic range of the video (`DV` > `HDR12` > `HDR10+` > `HDR10` > `HLG` > `SDR`)
|
- `hdr`: The dynamic range of the video (`DV` > `HDR12` > `HDR10+` > `HDR10` > `HLG` > `SDR`)
|
||||||
- `channels`: The number of audio channels
|
- `channels`: The number of audio channels
|
||||||
- `tbr`: Total average bitrate in KBit/s
|
- `tbr`: Total average bitrate in [kbps](## "1000 bits/sec")
|
||||||
- `vbr`: Average video bitrate in KBit/s
|
- `vbr`: Average video bitrate in [kbps](## "1000 bits/sec")
|
||||||
- `abr`: Average audio bitrate in KBit/s
|
- `abr`: Average audio bitrate in [kbps](## "1000 bits/sec")
|
||||||
- `br`: Average bitrate in KBit/s, `tbr`/`vbr`/`abr`
|
- `br`: Average bitrate in [kbps](## "1000 bits/sec"), `tbr`/`vbr`/`abr`
|
||||||
- `asr`: Audio sample rate in Hz
|
- `asr`: Audio sample rate in Hz
|
||||||
|
|
||||||
**Deprecation warning**: Many of these fields have (currently undocumented) aliases, that may be removed in a future version. It is recommended to use only the documented field names.
|
**Deprecation warning**: Many of these fields have (currently undocumented) aliases, that may be removed in a future version. It is recommended to use only the documented field names.
|
||||||
|
@ -1750,9 +1684,9 @@ # MODIFYING METADATA
|
||||||
|
|
||||||
The metadata obtained by the extractors can be modified by using `--parse-metadata` and `--replace-in-metadata`
|
The metadata obtained by the extractors can be modified by using `--parse-metadata` and `--replace-in-metadata`
|
||||||
|
|
||||||
`--replace-in-metadata FIELDS REGEX REPLACE` is used to replace text in any metadata field using [python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax). [Backreferences](https://docs.python.org/3/library/re.html?highlight=backreferences#re.sub) can be used in the replace string for advanced use.
|
`--replace-in-metadata FIELDS REGEX REPLACE` is used to replace text in any metadata field using [Python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax). [Backreferences](https://docs.python.org/3/library/re.html?highlight=backreferences#re.sub) can be used in the replace string for advanced use.
|
||||||
|
|
||||||
The general syntax of `--parse-metadata FROM:TO` is to give the name of a field or an [output template](#output-template) to extract data from, and the format to interpret it as, separated by a colon `:`. Either a [python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax) with named capture groups, a single field name, or a similar syntax to the [output template](#output-template) (only `%(field)s` formatting is supported) can be used for `TO`. The option can be used multiple times to parse and modify various fields.
|
The general syntax of `--parse-metadata FROM:TO` is to give the name of a field or an [output template](#output-template) to extract data from, and the format to interpret it as, separated by a colon `:`. Either a [Python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax) with named capture groups, a single field name, or a similar syntax to the [output template](#output-template) (only `%(field)s` formatting is supported) can be used for `TO`. The option can be used multiple times to parse and modify various fields.
|
||||||
|
|
||||||
Note that these options preserve their relative order, allowing replacements to be made in parsed fields and viceversa. Also, any field thus created can be used in the [output template](#output-template) and will also affect the media file's metadata added when using `--embed-metadata`.
|
Note that these options preserve their relative order, allowing replacements to be made in parsed fields and viceversa. Also, any field thus created can be used in the [output template](#output-template) and will also affect the media file's metadata added when using `--embed-metadata`.
|
||||||
|
|
||||||
|
@ -1820,7 +1754,7 @@ # Replace all spaces and "_" in title and uploader with a `-`
|
||||||
|
|
||||||
# EXTRACTOR ARGUMENTS
|
# EXTRACTOR ARGUMENTS
|
||||||
|
|
||||||
Some extractors accept additional arguments which can be passed using `--extractor-args KEY:ARGS`. `ARGS` is a `;` (semicolon) separated string of `ARG=VAL1,VAL2`. E.g. `--extractor-args "youtube:player-client=android_embedded,web;include_live_dash" --extractor-args "funimation:version=uncut"`
|
Some extractors accept additional arguments which can be passed using `--extractor-args KEY:ARGS`. `ARGS` is a `;` (semicolon) separated string of `ARG=VAL1,VAL2`. E.g. `--extractor-args "youtube:player-client=android_embedded,web;formats=incomplete" --extractor-args "funimation:version=uncut"`
|
||||||
|
|
||||||
Note: In CLI, `ARG` can use `-` instead of `_`; e.g. `youtube:player-client"` becomes `youtube:player_client"`
|
Note: In CLI, `ARG` can use `-` instead of `_`; e.g. `youtube:player-client"` becomes `youtube:player_client"`
|
||||||
|
|
||||||
|
@ -1829,7 +1763,7 @@ # EXTRACTOR ARGUMENTS
|
||||||
#### youtube
|
#### youtube
|
||||||
* `lang`: Prefer translated metadata (`title`, `description` etc) of this language code (case-sensitive). By default, the video primary language metadata is preferred, with a fallback to `en` translated. See [youtube.py](https://github.com/yt-dlp/yt-dlp/blob/c26f9b991a0681fd3ea548d535919cec1fbbd430/yt_dlp/extractor/youtube.py#L381-L390) for list of supported content language codes
|
* `lang`: Prefer translated metadata (`title`, `description` etc) of this language code (case-sensitive). By default, the video primary language metadata is preferred, with a fallback to `en` translated. See [youtube.py](https://github.com/yt-dlp/yt-dlp/blob/c26f9b991a0681fd3ea548d535919cec1fbbd430/yt_dlp/extractor/youtube.py#L381-L390) for list of supported content language codes
|
||||||
* `skip`: One or more of `hls`, `dash` or `translated_subs` to skip extraction of the m3u8 manifests, dash manifests and [auto-translated subtitles](https://github.com/yt-dlp/yt-dlp/issues/4090#issuecomment-1158102032) respectively
|
* `skip`: One or more of `hls`, `dash` or `translated_subs` to skip extraction of the m3u8 manifests, dash manifests and [auto-translated subtitles](https://github.com/yt-dlp/yt-dlp/issues/4090#issuecomment-1158102032) respectively
|
||||||
* `player_client`: Clients to extract video data from. The main clients are `web`, `android` and `ios` with variants `_music`, `_embedded`, `_embedscreen`, `_creator` (e.g. `web_embedded`); and `mweb`, `mweb_embedscreen` and `tv_embedded` (agegate bypass) with no variants. By default, `ios,android,web` is used, but `tv_embedded` and `creator` variants are added as required for age-gated videos. Similarly, the music variants are added for `music.youtube.com` urls. You can use `all` to use all the clients, and `default` for the default clients.
|
* `player_client`: Clients to extract video data from. The main clients are `web`, `ios` and `android`, with variants `_music`, `_embedded`, `_embedscreen`, `_creator` (e.g. `web_embedded`); and `mweb`, `mweb_embedscreen` and `tv_embedded` (agegate bypass) with no variants. By default, `ios,web` is used, but `tv_embedded` and `creator` variants are added as required for age-gated videos. Similarly, the music variants are added for `music.youtube.com` urls. The `android` clients will always be given lowest priority since their formats are broken. You can use `all` to use all the clients, and `default` for the default clients.
|
||||||
* `player_skip`: Skip some network requests that are generally needed for robust extraction. One or more of `configs` (skip client configs), `webpage` (skip initial webpage), `js` (skip js player). While these options can help reduce the number of requests needed or avoid some rate-limiting, they could cause some issues. See [#860](https://github.com/yt-dlp/yt-dlp/pull/860) for more details
|
* `player_skip`: Skip some network requests that are generally needed for robust extraction. One or more of `configs` (skip client configs), `webpage` (skip initial webpage), `js` (skip js player). While these options can help reduce the number of requests needed or avoid some rate-limiting, they could cause some issues. See [#860](https://github.com/yt-dlp/yt-dlp/pull/860) for more details
|
||||||
* `player_params`: YouTube player parameters to use for player requests. Will overwrite any default ones set by yt-dlp.
|
* `player_params`: YouTube player parameters to use for player requests. Will overwrite any default ones set by yt-dlp.
|
||||||
* `comment_sort`: `top` or `new` (default) - choose comment sorting mode (on YouTube's side)
|
* `comment_sort`: `top` or `new` (default) - choose comment sorting mode (on YouTube's side)
|
||||||
|
@ -1855,8 +1789,7 @@ #### funimation
|
||||||
* `version`: The video version to extract - `uncut` or `simulcast`
|
* `version`: The video version to extract - `uncut` or `simulcast`
|
||||||
|
|
||||||
#### crunchyrollbeta (Crunchyroll)
|
#### crunchyrollbeta (Crunchyroll)
|
||||||
* `format`: Which stream type(s) to extract (default: `adaptive_hls`). Potentially useful values include `adaptive_hls`, `adaptive_dash`, `vo_adaptive_hls`, `vo_adaptive_dash`, `download_hls`, `download_dash`, `multitrack_adaptive_hls_v2`
|
* `hardsub`: One or more hardsub versions to extract (in order of preference), or `all` (default: `None` = no hardsubs will be extracted), e.g. `crunchyrollbeta:hardsub=en-US,de-DE`
|
||||||
* `hardsub`: Preference order for which hardsub versions to extract, or `all` (default: `None` = no hardsubs), e.g. `crunchyrollbeta:hardsub=en-US,None`
|
|
||||||
|
|
||||||
#### vikichannel
|
#### vikichannel
|
||||||
* `video_types`: Types of videos to download - one or more of `episodes`, `movies`, `clips`, `trailers`
|
* `video_types`: Types of videos to download - one or more of `episodes`, `movies`, `clips`, `trailers`
|
||||||
|
@ -1879,9 +1812,13 @@ #### niconicochannelplus
|
||||||
* `max_comments`: Maximum number of comments to extract - default is `120`
|
* `max_comments`: Maximum number of comments to extract - default is `120`
|
||||||
|
|
||||||
#### tiktok
|
#### tiktok
|
||||||
* `api_hostname`: Hostname to use for mobile API requests, e.g. `api-h2.tiktokv.com`
|
* `api_hostname`: Hostname to use for mobile API calls, e.g. `api22-normal-c-alisg.tiktokv.com`
|
||||||
* `app_version`: App version to call mobile APIs with - should be set along with `manifest_app_version`, e.g. `20.2.1`
|
* `app_name`: Default app name to use with mobile API calls, e.g. `trill`
|
||||||
* `manifest_app_version`: Numeric app version to call mobile APIs with, e.g. `221`
|
* `app_version`: Default app version to use with mobile API calls - should be set along with `manifest_app_version`, e.g. `34.1.2`
|
||||||
|
* `manifest_app_version`: Default numeric app version to use with mobile API calls, e.g. `2023401020`
|
||||||
|
* `aid`: Default app ID to use with mobile API calls, e.g. `1180`
|
||||||
|
* `app_info`: Enable mobile API extraction with one or more app info strings in the format of `<iid>/[app_name]/[app_version]/[manifest_app_version]/[aid]`, where `iid` is the unique app install ID. `iid` is the only required value; all other values and their `/` separators can be omitted, e.g. `tiktok:app_info=1234567890123456789` or `tiktok:app_info=123,456/trill///1180,789//34.0.1/340001`
|
||||||
|
* `device_id`: Enable mobile API extraction with a genuine device ID to be used with mobile API calls. Default is a random 19-digit string
|
||||||
|
|
||||||
#### rokfinchannel
|
#### rokfinchannel
|
||||||
* `tab`: Which tab to download - one of `new`, `top`, `videos`, `podcasts`, `streams`, `stacks`
|
* `tab`: Which tab to download - one of `new`, `top`, `videos`, `podcasts`, `streams`, `stacks`
|
||||||
|
@ -1901,9 +1838,18 @@ #### nhkradirulive (NHK らじる★らじる LIVE)
|
||||||
#### nflplusreplay
|
#### nflplusreplay
|
||||||
* `type`: Type(s) of game replays to extract. Valid types are: `full_game`, `full_game_spanish`, `condensed_game` and `all_22`. You can use `all` to extract all available replay types, which is the default
|
* `type`: Type(s) of game replays to extract. Valid types are: `full_game`, `full_game_spanish`, `condensed_game` and `all_22`. You can use `all` to extract all available replay types, which is the default
|
||||||
|
|
||||||
|
#### jiocinema
|
||||||
|
* `refresh_token`: The `refreshToken` UUID from browser local storage can be passed to extend the life of your login session when logging in with `token` as username and the `accessToken` from browser local storage as password
|
||||||
|
|
||||||
#### jiosaavn
|
#### jiosaavn
|
||||||
* `bitrate`: Audio bitrates to request. One or more of `16`, `32`, `64`, `128`, `320`. Default is `128,320`
|
* `bitrate`: Audio bitrates to request. One or more of `16`, `32`, `64`, `128`, `320`. Default is `128,320`
|
||||||
|
|
||||||
|
#### afreecatvlive
|
||||||
|
* `cdn`: One or more CDN IDs to use with the API call for stream URLs, e.g. `gcp_cdn`, `gs_cdn_pc_app`, `gs_cdn_mobile_web`, `gs_cdn_pc_web`
|
||||||
|
|
||||||
|
#### soundcloud
|
||||||
|
* `formats`: Formats to request from the API. Requested values should be in the format of `{protocol}_{extension}` (omitting the bitrate), e.g. `hls_opus,http_aac`. The `*` character functions as a wildcard, e.g. `*_mp3`, and can passed by itself to request all formats. Known protocols include `http`, `hls` and `hls-aes`; known extensions include `aac`, `opus` and `mp3`. Original `download` formats are always extracted. Default is `http_aac,hls_aac,http_opus,hls_opus,http_mp3,hls_mp3`
|
||||||
|
|
||||||
**Note**: These options may be changed/removed in the future without concern for backward compatibility
|
**Note**: These options may be changed/removed in the future without concern for backward compatibility
|
||||||
|
|
||||||
<!-- MANPAGE: MOVE "INSTALLATION" SECTION HERE -->
|
<!-- MANPAGE: MOVE "INSTALLATION" SECTION HERE -->
|
||||||
|
@ -1961,6 +1907,7 @@ ## Installing Plugins
|
||||||
|
|
||||||
|
|
||||||
`.zip`, `.egg` and `.whl` archives containing a `yt_dlp_plugins` namespace folder in their root are also supported as plugin packages.
|
`.zip`, `.egg` and `.whl` archives containing a `yt_dlp_plugins` namespace folder in their root are also supported as plugin packages.
|
||||||
|
|
||||||
* e.g. `${XDG_CONFIG_HOME}/yt-dlp/plugins/mypluginpkg.zip` where `mypluginpkg.zip` contains `yt_dlp_plugins/<type>/myplugin.py`
|
* e.g. `${XDG_CONFIG_HOME}/yt-dlp/plugins/mypluginpkg.zip` where `mypluginpkg.zip` contains `yt_dlp_plugins/<type>/myplugin.py`
|
||||||
|
|
||||||
Run yt-dlp with `--verbose` to check if the plugin has been loaded.
|
Run yt-dlp with `--verbose` to check if the plugin has been loaded.
|
||||||
|
@ -2174,9 +2121,106 @@ #### Use a custom format selector
|
||||||
ydl.download(URLS)
|
ydl.download(URLS)
|
||||||
```
|
```
|
||||||
|
|
||||||
<!-- MANPAGE: MOVE "NEW FEATURES" SECTION HERE -->
|
|
||||||
|
|
||||||
# DEPRECATED OPTIONS
|
# CHANGES FROM YOUTUBE-DL
|
||||||
|
|
||||||
|
### New features
|
||||||
|
|
||||||
|
* Forked from [**yt-dlc@f9401f2**](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee) and merged with [**youtube-dl@a08f2b7**](https://github.com/ytdl-org/youtube-dl/commit/a08f2b7e4567cdc50c0614ee0a4ffdff49b8b6e6) ([exceptions](https://github.com/yt-dlp/yt-dlp/issues/21))
|
||||||
|
|
||||||
|
* **[SponsorBlock Integration](#sponsorblock-options)**: You can mark/remove sponsor sections in YouTube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API
|
||||||
|
|
||||||
|
* **[Format Sorting](#sorting-formats)**: The default format sorting options have been changed so that higher resolution and better codecs will be now preferred instead of simply using larger bitrate. Furthermore, you can now specify the sort order using `-S`. This allows for much easier format selection than what is possible by simply using `--format` ([examples](#format-selection-examples))
|
||||||
|
|
||||||
|
* **Merged with animelover1984/youtube-dl**: You get most of the features and improvements from [animelover1984/youtube-dl](https://github.com/animelover1984/youtube-dl) including `--write-comments`, `BiliBiliSearch`, `BilibiliChannel`, Embedding thumbnail in mp4/ogg/opus, playlist infojson etc. Note that NicoNico livestreams are not available. See [#31](https://github.com/yt-dlp/yt-dlp/pull/31) for details.
|
||||||
|
|
||||||
|
* **YouTube improvements**:
|
||||||
|
* Supports Clips, Stories (`ytstories:<channel UCID>`), Search (including filters)**\***, YouTube Music Search, Channel-specific search, Search prefixes (`ytsearch:`, `ytsearchdate:`)**\***, Mixes, and Feeds (`:ytfav`, `:ytwatchlater`, `:ytsubs`, `:ythistory`, `:ytrec`, `:ytnotif`)
|
||||||
|
* Fix for [n-sig based throttling](https://github.com/ytdl-org/youtube-dl/issues/29326) **\***
|
||||||
|
* Supports some (but not all) age-gated content without cookies
|
||||||
|
* Download livestreams from the start using `--live-from-start` (*experimental*)
|
||||||
|
* Channel URLs download all uploads of the channel, including shorts and live
|
||||||
|
|
||||||
|
* **Cookies from browser**: Cookies can be automatically extracted from all major web browsers using `--cookies-from-browser BROWSER[+KEYRING][:PROFILE][::CONTAINER]`
|
||||||
|
|
||||||
|
* **Download time range**: Videos can be downloaded partially based on either timestamps or chapters using `--download-sections`
|
||||||
|
|
||||||
|
* **Split video by chapters**: Videos can be split into multiple files based on chapters using `--split-chapters`
|
||||||
|
|
||||||
|
* **Multi-threaded fragment downloads**: Download multiple fragments of m3u8/mpd videos in parallel. Use `--concurrent-fragments` (`-N`) option to set the number of threads used
|
||||||
|
|
||||||
|
* **Aria2c with HLS/DASH**: You can use `aria2c` as the external downloader for DASH(mpd) and HLS(m3u8) formats
|
||||||
|
|
||||||
|
* **New and fixed extractors**: Many new extractors have been added and a lot of existing ones have been fixed. See the [changelog](Changelog.md) or the [list of supported sites](supportedsites.md)
|
||||||
|
|
||||||
|
* **New MSOs**: Philo, Spectrum, SlingTV, Cablevision, RCN etc.
|
||||||
|
|
||||||
|
* **Subtitle extraction from manifests**: Subtitles can be extracted from streaming media manifests. See [commit/be6202f](https://github.com/yt-dlp/yt-dlp/commit/be6202f12b97858b9d716e608394b51065d0419f) for details
|
||||||
|
|
||||||
|
* **Multiple paths and output templates**: You can give different [output templates](#output-template) and download paths for different types of files. You can also set a temporary path where intermediary files are downloaded to using `--paths` (`-P`)
|
||||||
|
|
||||||
|
* **Portable Configuration**: Configuration files are automatically loaded from the home and root directories. See [CONFIGURATION](#configuration) for details
|
||||||
|
|
||||||
|
* **Output template improvements**: Output templates can now have date-time formatting, numeric offsets, object traversal etc. See [output template](#output-template) for details. Even more advanced operations can also be done with the help of `--parse-metadata` and `--replace-in-metadata`
|
||||||
|
|
||||||
|
* **Other new options**: Many new options have been added such as `--alias`, `--print`, `--concat-playlist`, `--wait-for-video`, `--retry-sleep`, `--sleep-requests`, `--convert-thumbnails`, `--force-download-archive`, `--force-overwrites`, `--break-match-filter` etc
|
||||||
|
|
||||||
|
* **Improvements**: Regex and other operators in `--format`/`--match-filter`, multiple `--postprocessor-args` and `--downloader-args`, faster archive checking, more [format selection options](#format-selection), merge multi-video/audio, multiple `--config-locations`, `--exec` at different stages, etc
|
||||||
|
|
||||||
|
* **Plugins**: Extractors and PostProcessors can be loaded from an external file. See [plugins](#plugins) for details
|
||||||
|
|
||||||
|
* **Self updater**: The releases can be updated using `yt-dlp -U`, and downgraded using `--update-to` if required
|
||||||
|
|
||||||
|
* **Automated builds**: [Nightly/master builds](#update-channels) can be used with `--update-to nightly` and `--update-to master`
|
||||||
|
|
||||||
|
See [changelog](Changelog.md) or [commits](https://github.com/yt-dlp/yt-dlp/commits) for the full list of changes
|
||||||
|
|
||||||
|
Features marked with a **\*** have been back-ported to youtube-dl
|
||||||
|
|
||||||
|
### Differences in default behavior
|
||||||
|
|
||||||
|
Some of yt-dlp's default options are different from that of youtube-dl and youtube-dlc:
|
||||||
|
|
||||||
|
* yt-dlp supports only [Python 3.8+](## "Windows 7"), and *may* remove support for more versions as they [become EOL](https://devguide.python.org/versions/#python-release-cycle); while [youtube-dl still supports Python 2.6+ and 3.2+](https://github.com/ytdl-org/youtube-dl/issues/30568#issue-1118238743)
|
||||||
|
* The options `--auto-number` (`-A`), `--title` (`-t`) and `--literal` (`-l`), no longer work. See [removed options](#Removed) for details
|
||||||
|
* `avconv` is not supported as an alternative to `ffmpeg`
|
||||||
|
* yt-dlp stores config files in slightly different locations to youtube-dl. See [CONFIGURATION](#configuration) for a list of correct locations
|
||||||
|
* The default [output template](#output-template) is `%(title)s [%(id)s].%(ext)s`. There is no real reason for this change. This was changed before yt-dlp was ever made public and now there are no plans to change it back to `%(title)s-%(id)s.%(ext)s`. Instead, you may use `--compat-options filename`
|
||||||
|
* The default [format sorting](#sorting-formats) is different from youtube-dl and prefers higher resolution and better codecs rather than higher bitrates. You can use the `--format-sort` option to change this to any order you prefer, or use `--compat-options format-sort` to use youtube-dl's sorting order
|
||||||
|
* The default format selector is `bv*+ba/b`. This means that if a combined video + audio format that is better than the best video-only format is found, the former will be preferred. Use `-f bv+ba/b` or `--compat-options format-spec` to revert this
|
||||||
|
* Unlike youtube-dlc, yt-dlp does not allow merging multiple audio/video streams into one file by default (since this conflicts with the use of `-f bv*+ba`). If needed, this feature must be enabled using `--audio-multistreams` and `--video-multistreams`. You can also use `--compat-options multistreams` to enable both
|
||||||
|
* `--no-abort-on-error` is enabled by default. Use `--abort-on-error` or `--compat-options abort-on-error` to abort on errors instead
|
||||||
|
* When writing metadata files such as thumbnails, description or infojson, the same information (if available) is also written for playlists. Use `--no-write-playlist-metafiles` or `--compat-options no-playlist-metafiles` to not write these files
|
||||||
|
* `--add-metadata` attaches the `infojson` to `mkv` files in addition to writing the metadata when used with `--write-info-json`. Use `--no-embed-info-json` or `--compat-options no-attach-info-json` to revert this
|
||||||
|
* Some metadata are embedded into different fields when using `--add-metadata` as compared to youtube-dl. Most notably, `comment` field contains the `webpage_url` and `synopsis` contains the `description`. You can [use `--parse-metadata`](#modifying-metadata) to modify this to your liking or use `--compat-options embed-metadata` to revert this
|
||||||
|
* `playlist_index` behaves differently when used with options like `--playlist-reverse` and `--playlist-items`. See [#302](https://github.com/yt-dlp/yt-dlp/issues/302) for details. You can use `--compat-options playlist-index` if you want to keep the earlier behavior
|
||||||
|
* The output of `-F` is listed in a new format. Use `--compat-options list-formats` to revert this
|
||||||
|
* Live chats (if available) are considered as subtitles. Use `--sub-langs all,-live_chat` to download all subtitles except live chat. You can also use `--compat-options no-live-chat` to prevent any live chat/danmaku from downloading
|
||||||
|
* YouTube channel URLs download all uploads of the channel. To download only the videos in a specific tab, pass the tab's URL. If the channel does not show the requested tab, an error will be raised. Also, `/live` URLs raise an error if there are no live videos instead of silently downloading the entire channel. You may use `--compat-options no-youtube-channel-redirect` to revert all these redirections
|
||||||
|
* Unavailable videos are also listed for YouTube playlists. Use `--compat-options no-youtube-unavailable-videos` to remove this
|
||||||
|
* The upload dates extracted from YouTube are in UTC [when available](https://github.com/yt-dlp/yt-dlp/blob/89e4d86171c7b7c997c77d4714542e0383bf0db0/yt_dlp/extractor/youtube.py#L3898-L3900). Use `--compat-options no-youtube-prefer-utc-upload-date` to prefer the non-UTC upload date.
|
||||||
|
* If `ffmpeg` is used as the downloader, the downloading and merging of formats happen in a single step when possible. Use `--compat-options no-direct-merge` to revert this
|
||||||
|
* Thumbnail embedding in `mp4` is done with mutagen if possible. Use `--compat-options embed-thumbnail-atomicparsley` to force the use of AtomicParsley instead
|
||||||
|
* Some internal metadata such as filenames are removed by default from the infojson. Use `--no-clean-infojson` or `--compat-options no-clean-infojson` to revert this
|
||||||
|
* When `--embed-subs` and `--write-subs` are used together, the subtitles are written to disk and also embedded in the media file. You can use just `--embed-subs` to embed the subs and automatically delete the separate file. See [#630 (comment)](https://github.com/yt-dlp/yt-dlp/issues/630#issuecomment-893659460) for more info. `--compat-options no-keep-subs` can be used to revert this
|
||||||
|
* `certifi` will be used for SSL root certificates, if installed. If you want to use system certificates (e.g. self-signed), use `--compat-options no-certifi`
|
||||||
|
* yt-dlp's sanitization of invalid characters in filenames is different/smarter than in youtube-dl. You can use `--compat-options filename-sanitization` to revert to youtube-dl's behavior
|
||||||
|
* ~~yt-dlp tries to parse the external downloader outputs into the standard progress output if possible (Currently implemented: [aria2c](https://github.com/yt-dlp/yt-dlp/issues/5931)). You can use `--compat-options no-external-downloader-progress` to get the downloader output as-is~~
|
||||||
|
* yt-dlp versions between 2021.09.01 and 2023.01.02 applies `--match-filter` to nested playlists. This was an unintentional side-effect of [8f18ac](https://github.com/yt-dlp/yt-dlp/commit/8f18aca8717bb0dd49054555af8d386e5eda3a88) and is fixed in [d7b460](https://github.com/yt-dlp/yt-dlp/commit/d7b460d0e5fc710950582baed2e3fc616ed98a80). Use `--compat-options playlist-match-filter` to revert this
|
||||||
|
* yt-dlp versions between 2021.11.10 and 2023.06.21 estimated `filesize_approx` values for fragmented/manifest formats. This was added for convenience in [f2fe69](https://github.com/yt-dlp/yt-dlp/commit/f2fe69c7b0d208bdb1f6292b4ae92bc1e1a7444a), but was reverted in [0dff8e](https://github.com/yt-dlp/yt-dlp/commit/0dff8e4d1e6e9fb938f4256ea9af7d81f42fd54f) due to the potentially extreme inaccuracy of the estimated values. Use `--compat-options manifest-filesize-approx` to keep extracting the estimated values
|
||||||
|
* yt-dlp uses modern http client backends such as `requests`. Use `--compat-options prefer-legacy-http-handler` to prefer the legacy http handler (`urllib`) to be used for standard http requests.
|
||||||
|
* The sub-modules `swfinterp`, `casefold` are removed.
|
||||||
|
|
||||||
|
For ease of use, a few more compat options are available:
|
||||||
|
|
||||||
|
* `--compat-options all`: Use all compat options (Do NOT use)
|
||||||
|
* `--compat-options youtube-dl`: Same as `--compat-options all,-multistreams,-playlist-match-filter,-manifest-filesize-approx`
|
||||||
|
* `--compat-options youtube-dlc`: Same as `--compat-options all,-no-live-chat,-no-youtube-channel-redirect,-playlist-match-filter,-manifest-filesize-approx`
|
||||||
|
* `--compat-options 2021`: Same as `--compat-options 2022,no-certifi,filename-sanitization,no-youtube-prefer-utc-upload-date`
|
||||||
|
* `--compat-options 2022`: Same as `--compat-options 2023,playlist-match-filter,no-external-downloader-progress,prefer-legacy-http-handler,manifest-filesize-approx`
|
||||||
|
* `--compat-options 2023`: Currently does nothing. Use this to enable all future compat options
|
||||||
|
|
||||||
|
### Deprecated options
|
||||||
|
|
||||||
These are all the deprecated options and the current alternative to achieve the same effect
|
These are all the deprecated options and the current alternative to achieve the same effect
|
||||||
|
|
||||||
|
@ -2212,7 +2256,6 @@ #### Redundant options
|
||||||
--no-playlist-reverse Default
|
--no-playlist-reverse Default
|
||||||
--no-colors --color no_color
|
--no-colors --color no_color
|
||||||
|
|
||||||
|
|
||||||
#### Not recommended
|
#### Not recommended
|
||||||
While these options still work, their use is not recommended since there are other alternatives to achieve the same
|
While these options still work, their use is not recommended since there are other alternatives to achieve the same
|
||||||
|
|
||||||
|
@ -2239,7 +2282,6 @@ #### Not recommended
|
||||||
--geo-bypass-country CODE --xff CODE
|
--geo-bypass-country CODE --xff CODE
|
||||||
--geo-bypass-ip-block IP_BLOCK --xff IP_BLOCK
|
--geo-bypass-ip-block IP_BLOCK --xff IP_BLOCK
|
||||||
|
|
||||||
|
|
||||||
#### Developer options
|
#### Developer options
|
||||||
These options are not intended to be used by the end-user
|
These options are not intended to be used by the end-user
|
||||||
|
|
||||||
|
@ -2249,7 +2291,6 @@ #### Developer options
|
||||||
--allow-unplayable-formats List unplayable formats also
|
--allow-unplayable-formats List unplayable formats also
|
||||||
--no-allow-unplayable-formats Default
|
--no-allow-unplayable-formats Default
|
||||||
|
|
||||||
|
|
||||||
#### Old aliases
|
#### Old aliases
|
||||||
These are aliases that are no longer documented for various reasons
|
These are aliases that are no longer documented for various reasons
|
||||||
|
|
||||||
|
@ -2295,6 +2336,7 @@ #### No longer supported
|
||||||
--write-annotations No supported site has annotations now
|
--write-annotations No supported site has annotations now
|
||||||
--no-write-annotations Default
|
--no-write-annotations Default
|
||||||
--compat-options seperate-video-versions No longer needed
|
--compat-options seperate-video-versions No longer needed
|
||||||
|
--compat-options no-youtube-prefer-utc-upload-date No longer supported
|
||||||
|
|
||||||
#### Removed
|
#### Removed
|
||||||
These options were deprecated since 2014 and have now been entirely removed
|
These options were deprecated since 2014 and have now been entirely removed
|
||||||
|
@ -2302,6 +2344,7 @@ #### Removed
|
||||||
-A, --auto-number -o "%(autonumber)s-%(id)s.%(ext)s"
|
-A, --auto-number -o "%(autonumber)s-%(id)s.%(ext)s"
|
||||||
-t, -l, --title, --literal -o "%(title)s-%(id)s.%(ext)s"
|
-t, -l, --title, --literal -o "%(title)s-%(id)s.%(ext)s"
|
||||||
|
|
||||||
|
|
||||||
# CONTRIBUTING
|
# CONTRIBUTING
|
||||||
See [CONTRIBUTING.md](CONTRIBUTING.md#contributing-to-yt-dlp) for instructions on [Opening an Issue](CONTRIBUTING.md#opening-an-issue) and [Contributing code to the project](CONTRIBUTING.md#developer-instructions)
|
See [CONTRIBUTING.md](CONTRIBUTING.md#contributing-to-yt-dlp) for instructions on [Opening an Issue](CONTRIBUTING.md#opening-an-issue) and [Contributing code to the project](CONTRIBUTING.md#developer-instructions)
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
# Empty file
|
|
10
bundle/docker/compose.yml
Normal file
10
bundle/docker/compose.yml
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
services:
|
||||||
|
static:
|
||||||
|
build: static
|
||||||
|
environment:
|
||||||
|
channel: ${channel}
|
||||||
|
origin: ${origin}
|
||||||
|
version: ${version}
|
||||||
|
volumes:
|
||||||
|
- ~/build:/build
|
||||||
|
- ../..:/yt-dlp
|
21
bundle/docker/static/Dockerfile
Normal file
21
bundle/docker/static/Dockerfile
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
FROM alpine:3.19 as base
|
||||||
|
|
||||||
|
RUN apk --update add --no-cache \
|
||||||
|
build-base \
|
||||||
|
python3 \
|
||||||
|
pipx \
|
||||||
|
;
|
||||||
|
|
||||||
|
RUN pipx install pyinstaller
|
||||||
|
# Requires above step to prepare the shared venv
|
||||||
|
RUN ~/.local/share/pipx/shared/bin/python -m pip install -U wheel
|
||||||
|
RUN apk --update add --no-cache \
|
||||||
|
scons \
|
||||||
|
patchelf \
|
||||||
|
binutils \
|
||||||
|
;
|
||||||
|
RUN pipx install staticx
|
||||||
|
|
||||||
|
WORKDIR /yt-dlp
|
||||||
|
COPY entrypoint.sh /entrypoint.sh
|
||||||
|
ENTRYPOINT /entrypoint.sh
|
13
bundle/docker/static/entrypoint.sh
Executable file
13
bundle/docker/static/entrypoint.sh
Executable file
|
@ -0,0 +1,13 @@
|
||||||
|
#!/bin/ash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
source ~/.local/share/pipx/venvs/pyinstaller/bin/activate
|
||||||
|
python -m devscripts.install_deps --include secretstorage
|
||||||
|
python -m devscripts.make_lazy_extractors
|
||||||
|
python devscripts/update-version.py -c "${channel}" -r "${origin}" "${version}"
|
||||||
|
python -m bundle.pyinstaller
|
||||||
|
deactivate
|
||||||
|
|
||||||
|
source ~/.local/share/pipx/venvs/staticx/bin/activate
|
||||||
|
staticx /yt-dlp/dist/yt-dlp_linux /build/yt-dlp_linux
|
||||||
|
deactivate
|
|
@ -20,7 +20,7 @@ def main():
|
||||||
'py2exe builds do not support pycryptodomex and needs VC++14 to run. '
|
'py2exe builds do not support pycryptodomex and needs VC++14 to run. '
|
||||||
'It is recommended to run "pyinst.py" to build using pyinstaller instead')
|
'It is recommended to run "pyinst.py" to build using pyinstaller instead')
|
||||||
|
|
||||||
return freeze(
|
freeze(
|
||||||
console=[{
|
console=[{
|
||||||
'script': './yt_dlp/__main__.py',
|
'script': './yt_dlp/__main__.py',
|
||||||
'dest_base': 'yt-dlp',
|
'dest_base': 'yt-dlp',
|
||||||
|
@ -28,7 +28,7 @@ def main():
|
||||||
}],
|
}],
|
||||||
version_info={
|
version_info={
|
||||||
'version': VERSION,
|
'version': VERSION,
|
||||||
'description': 'A youtube-dl fork with additional features and patches',
|
'description': 'A feature-rich command-line audio/video downloader',
|
||||||
'comments': 'Official repository: <https://github.com/yt-dlp/yt-dlp>',
|
'comments': 'Official repository: <https://github.com/yt-dlp/yt-dlp>',
|
||||||
'product_name': 'yt-dlp',
|
'product_name': 'yt-dlp',
|
||||||
'product_version': VERSION,
|
'product_version': VERSION,
|
||||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -1 +0,0 @@
|
||||||
# Empty file needed to make devscripts.utils properly importable from outside
|
|
|
@ -120,5 +120,54 @@
|
||||||
"when": "15f22b4880b6b3f71f350c64d70976ae65b9f1ca",
|
"when": "15f22b4880b6b3f71f350c64d70976ae65b9f1ca",
|
||||||
"short": "[webvtt] Allow spaces before newlines for CueBlock (#7681)",
|
"short": "[webvtt] Allow spaces before newlines for CueBlock (#7681)",
|
||||||
"authors": ["TSRBerry"]
|
"authors": ["TSRBerry"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "change",
|
||||||
|
"when": "4ce57d3b873c2887814cbec03d029533e82f7db5",
|
||||||
|
"short": "[ie] Support multi-period MPD streams (#6654)",
|
||||||
|
"authors": ["alard", "pukkandan"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "change",
|
||||||
|
"when": "aa7e9ae4f48276bd5d0173966c77db9484f65a0a",
|
||||||
|
"short": "[ie/xvideos] Support new URL format (#9502)",
|
||||||
|
"authors": ["sta1us"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "remove",
|
||||||
|
"when": "22e4dfacb61f62dfbb3eb41b31c7b69ba1059b80"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "change",
|
||||||
|
"when": "e3a3ed8a981d9395c4859b6ef56cd02bc3148db2",
|
||||||
|
"short": "[cleanup:ie] No `from` stdlib imports in extractors",
|
||||||
|
"authors": ["pukkandan"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "add",
|
||||||
|
"when": "9590cc6b4768e190183d7d071a6c78170889116a",
|
||||||
|
"short": "[priority] Security: [[CVE-2024-22423](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-22423)] [Prevent RCE when using `--exec` with `%q` on Windows](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-hjq6-52gw-2g7p)\n - The shell escape function now properly escapes `%`, `\\` and `\\n`.\n - `utils.Popen` has been patched accordingly."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "change",
|
||||||
|
"when": "41ba4a808b597a3afed78c89675a30deb6844450",
|
||||||
|
"short": "[ie/tiktok] Extract via mobile API only if extractor-arg is passed (#9938)",
|
||||||
|
"authors": ["bashonly"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "remove",
|
||||||
|
"when": "6e36d17f404556f0e3a43f441c477a71a91877d9"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "change",
|
||||||
|
"when": "beaf832c7a9d57833f365ce18f6115b88071b296",
|
||||||
|
"short": "[ie/soundcloud] Add `formats` extractor-arg (#10004)",
|
||||||
|
"authors": ["bashonly", "Grub4K"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "change",
|
||||||
|
"when": "5c019f6328ad40d66561eac3c4de0b3cd070d0f6",
|
||||||
|
"short": "[cleanup] Misc (#9765)",
|
||||||
|
"authors": ["bashonly", "Grub4K", "seproDev"]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
@ -10,6 +10,8 @@
|
||||||
import re
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
from devscripts.tomlparse import parse_toml
|
from devscripts.tomlparse import parse_toml
|
||||||
from devscripts.utils import read_file
|
from devscripts.utils import read_file
|
||||||
|
|
||||||
|
@ -17,37 +19,50 @@
|
||||||
def parse_args():
|
def parse_args():
|
||||||
parser = argparse.ArgumentParser(description='Install dependencies for yt-dlp')
|
parser = argparse.ArgumentParser(description='Install dependencies for yt-dlp')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'input', nargs='?', metavar='TOMLFILE', default='pyproject.toml', help='Input file (default: %(default)s)')
|
'input', nargs='?', metavar='TOMLFILE', default=Path(__file__).parent.parent / 'pyproject.toml',
|
||||||
|
help='input file (default: %(default)s)')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-e', '--exclude', metavar='REQUIREMENT', action='append', help='Exclude a required dependency')
|
'-e', '--exclude', metavar='DEPENDENCY', action='append',
|
||||||
|
help='exclude a dependency')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-i', '--include', metavar='GROUP', action='append', help='Include an optional dependency group')
|
'-i', '--include', metavar='GROUP', action='append',
|
||||||
|
help='include an optional dependency group')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-o', '--only-optional', action='store_true', help='Only install optional dependencies')
|
'-o', '--only-optional', action='store_true',
|
||||||
|
help='only install optional dependencies')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-p', '--print', action='store_true', help='Only print a requirements.txt to stdout')
|
'-p', '--print', action='store_true',
|
||||||
|
help='only print requirements to stdout')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-u', '--user', action='store_true', help='Install with pip as --user')
|
'-u', '--user', action='store_true',
|
||||||
|
help='install with pip as --user')
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
toml_data = parse_toml(read_file(args.input))
|
project_table = parse_toml(read_file(args.input))['project']
|
||||||
deps = toml_data['project']['dependencies']
|
recursive_pattern = re.compile(rf'{project_table["name"]}\[(?P<group_name>[\w-]+)\]')
|
||||||
targets = deps.copy() if not args.only_optional else []
|
optional_groups = project_table['optional-dependencies']
|
||||||
|
excludes = args.exclude or []
|
||||||
|
|
||||||
for exclude in args.exclude or []:
|
def yield_deps(group):
|
||||||
for dep in deps:
|
for dep in group:
|
||||||
simplified_dep = re.match(r'[\w-]+', dep)[0]
|
if mobj := recursive_pattern.fullmatch(dep):
|
||||||
if dep in targets and (exclude.lower() == simplified_dep.lower() or exclude == dep):
|
yield from optional_groups.get(mobj.group('group_name'), [])
|
||||||
targets.remove(dep)
|
else:
|
||||||
|
yield dep
|
||||||
|
|
||||||
optional_deps = toml_data['project']['optional-dependencies']
|
targets = []
|
||||||
for include in args.include or []:
|
if not args.only_optional: # `-o` should exclude 'dependencies' and the 'default' group
|
||||||
group = optional_deps.get(include)
|
targets.extend(project_table['dependencies'])
|
||||||
if group:
|
if 'default' not in excludes: # `--exclude default` should exclude entire 'default' group
|
||||||
targets.extend(group)
|
targets.extend(yield_deps(optional_groups['default']))
|
||||||
|
|
||||||
|
for include in filter(None, map(optional_groups.get, args.include or [])):
|
||||||
|
targets.extend(yield_deps(include))
|
||||||
|
|
||||||
|
targets = [t for t in targets if re.match(r'[\w-]+', t).group(0).lower() not in excludes]
|
||||||
|
|
||||||
if args.print:
|
if args.print:
|
||||||
for target in targets:
|
for target in targets:
|
||||||
|
|
|
@ -253,7 +253,7 @@ class CommitRange:
|
||||||
''', re.VERBOSE | re.DOTALL)
|
''', re.VERBOSE | re.DOTALL)
|
||||||
EXTRACTOR_INDICATOR_RE = re.compile(r'(?:Fix|Add)\s+Extractors?', re.IGNORECASE)
|
EXTRACTOR_INDICATOR_RE = re.compile(r'(?:Fix|Add)\s+Extractors?', re.IGNORECASE)
|
||||||
REVERT_RE = re.compile(r'(?:\[[^\]]+\]\s+)?(?i:Revert)\s+([\da-f]{40})')
|
REVERT_RE = re.compile(r'(?:\[[^\]]+\]\s+)?(?i:Revert)\s+([\da-f]{40})')
|
||||||
FIXES_RE = re.compile(r'(?i:Fix(?:es)?(?:\s+bugs?)?(?:\s+in|\s+for)?|Revert)\s+([\da-f]{40})')
|
FIXES_RE = re.compile(r'(?i:Fix(?:es)?(?:\s+bugs?)?(?:\s+in|\s+for)?|Revert|Improve)\s+([\da-f]{40})')
|
||||||
UPSTREAM_MERGE_RE = re.compile(r'Update to ytdl-commit-([\da-f]+)')
|
UPSTREAM_MERGE_RE = re.compile(r'Update to ytdl-commit-([\da-f]+)')
|
||||||
|
|
||||||
def __init__(self, start, end, default_author=None):
|
def __init__(self, start, end, default_author=None):
|
||||||
|
@ -445,7 +445,32 @@ def get_new_contributors(contributors_path, commits):
|
||||||
return sorted(new_contributors, key=str.casefold)
|
return sorted(new_contributors, key=str.casefold)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
def create_changelog(args):
|
||||||
|
logging.basicConfig(
|
||||||
|
datefmt='%Y-%m-%d %H-%M-%S', format='{asctime} | {levelname:<8} | {message}',
|
||||||
|
level=logging.WARNING - 10 * args.verbosity, style='{', stream=sys.stderr)
|
||||||
|
|
||||||
|
commits = CommitRange(None, args.commitish, args.default_author)
|
||||||
|
|
||||||
|
if not args.no_override:
|
||||||
|
if args.override_path.exists():
|
||||||
|
overrides = json.loads(read_file(args.override_path))
|
||||||
|
commits.apply_overrides(overrides)
|
||||||
|
else:
|
||||||
|
logger.warning(f'File {args.override_path.as_posix()} does not exist')
|
||||||
|
|
||||||
|
logger.info(f'Loaded {len(commits)} commits')
|
||||||
|
|
||||||
|
new_contributors = get_new_contributors(args.contributors_path, commits)
|
||||||
|
if new_contributors:
|
||||||
|
if args.contributors:
|
||||||
|
write_file(args.contributors_path, '\n'.join(new_contributors) + '\n', mode='a')
|
||||||
|
logger.info(f'New contributors: {", ".join(new_contributors)}')
|
||||||
|
|
||||||
|
return Changelog(commits.groups(), args.repo, args.collapsible)
|
||||||
|
|
||||||
|
|
||||||
|
def create_parser():
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
|
@ -477,27 +502,9 @@ def get_new_contributors(contributors_path, commits):
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--collapsible', action='store_true',
|
'--collapsible', action='store_true',
|
||||||
help='make changelog collapsible (default: %(default)s)')
|
help='make changelog collapsible (default: %(default)s)')
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
logging.basicConfig(
|
return parser
|
||||||
datefmt='%Y-%m-%d %H-%M-%S', format='{asctime} | {levelname:<8} | {message}',
|
|
||||||
level=logging.WARNING - 10 * args.verbosity, style='{', stream=sys.stderr)
|
|
||||||
|
|
||||||
commits = CommitRange(None, args.commitish, args.default_author)
|
|
||||||
|
|
||||||
if not args.no_override:
|
if __name__ == '__main__':
|
||||||
if args.override_path.exists():
|
print(create_changelog(create_parser().parse_args()))
|
||||||
overrides = json.loads(read_file(args.override_path))
|
|
||||||
commits.apply_overrides(overrides)
|
|
||||||
else:
|
|
||||||
logger.warning(f'File {args.override_path.as_posix()} does not exist')
|
|
||||||
|
|
||||||
logger.info(f'Loaded {len(commits)} commits')
|
|
||||||
|
|
||||||
new_contributors = get_new_contributors(args.contributors_path, commits)
|
|
||||||
if new_contributors:
|
|
||||||
if args.contributors:
|
|
||||||
write_file(args.contributors_path, '\n'.join(new_contributors) + '\n', mode='a')
|
|
||||||
logger.info(f'New contributors: {", ".join(new_contributors)}')
|
|
||||||
|
|
||||||
print(Changelog(commits.groups(), args.repo, args.collapsible))
|
|
||||||
|
|
|
@ -24,7 +24,7 @@
|
||||||
|
|
||||||
# NAME
|
# NAME
|
||||||
|
|
||||||
yt\-dlp \- A youtube-dl fork with additional features and patches
|
yt\-dlp \- A feature\-rich command\-line audio/video downloader
|
||||||
|
|
||||||
# SYNOPSIS
|
# SYNOPSIS
|
||||||
|
|
||||||
|
@ -43,6 +43,27 @@ def filter_excluded_sections(readme):
|
||||||
'', readme)
|
'', readme)
|
||||||
|
|
||||||
|
|
||||||
|
def _convert_code_blocks(readme):
|
||||||
|
current_code_block = None
|
||||||
|
|
||||||
|
for line in readme.splitlines(True):
|
||||||
|
if current_code_block:
|
||||||
|
if line == current_code_block:
|
||||||
|
current_code_block = None
|
||||||
|
yield '\n'
|
||||||
|
else:
|
||||||
|
yield f' {line}'
|
||||||
|
elif line.startswith('```'):
|
||||||
|
current_code_block = line.count('`') * '`' + '\n'
|
||||||
|
yield '\n'
|
||||||
|
else:
|
||||||
|
yield line
|
||||||
|
|
||||||
|
|
||||||
|
def convert_code_blocks(readme):
|
||||||
|
return ''.join(_convert_code_blocks(readme))
|
||||||
|
|
||||||
|
|
||||||
def move_sections(readme):
|
def move_sections(readme):
|
||||||
MOVE_TAG_TEMPLATE = '<!-- MANPAGE: MOVE "%s" SECTION HERE -->'
|
MOVE_TAG_TEMPLATE = '<!-- MANPAGE: MOVE "%s" SECTION HERE -->'
|
||||||
sections = re.findall(r'(?m)^%s$' % (
|
sections = re.findall(r'(?m)^%s$' % (
|
||||||
|
@ -65,8 +86,10 @@ def move_sections(readme):
|
||||||
|
|
||||||
def filter_options(readme):
|
def filter_options(readme):
|
||||||
section = re.search(r'(?sm)^# USAGE AND OPTIONS\n.+?(?=^# )', readme).group(0)
|
section = re.search(r'(?sm)^# USAGE AND OPTIONS\n.+?(?=^# )', readme).group(0)
|
||||||
|
section_new = section.replace('*', R'\*')
|
||||||
|
|
||||||
options = '# OPTIONS\n'
|
options = '# OPTIONS\n'
|
||||||
for line in section.split('\n')[1:]:
|
for line in section_new.split('\n')[1:]:
|
||||||
mobj = re.fullmatch(r'''(?x)
|
mobj = re.fullmatch(r'''(?x)
|
||||||
\s{4}(?P<opt>-(?:,\s|[^\s])+)
|
\s{4}(?P<opt>-(?:,\s|[^\s])+)
|
||||||
(?:\s(?P<meta>(?:[^\s]|\s(?!\s))+))?
|
(?:\s(?P<meta>(?:[^\s]|\s(?!\s))+))?
|
||||||
|
@ -86,7 +109,7 @@ def filter_options(readme):
|
||||||
return readme.replace(section, options, 1)
|
return readme.replace(section, options, 1)
|
||||||
|
|
||||||
|
|
||||||
TRANSFORM = compose_functions(filter_excluded_sections, move_sections, filter_options)
|
TRANSFORM = compose_functions(filter_excluded_sections, convert_code_blocks, move_sections, filter_options)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
|
@ -1,4 +0,0 @@
|
||||||
@echo off
|
|
||||||
|
|
||||||
>&2 echo run_tests.bat is deprecated. Please use `devscripts/run_tests.py` instead
|
|
||||||
python %~dp0run_tests.py %~1
|
|
|
@ -4,6 +4,7 @@
|
||||||
import functools
|
import functools
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
import shlex
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
@ -18,6 +19,8 @@ def parse_args():
|
||||||
'test', help='a extractor tests, or one of "core" or "download"', nargs='*')
|
'test', help='a extractor tests, or one of "core" or "download"', nargs='*')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-k', help='run a test matching EXPRESSION. Same as "pytest -k"', metavar='EXPRESSION')
|
'-k', help='run a test matching EXPRESSION. Same as "pytest -k"', metavar='EXPRESSION')
|
||||||
|
parser.add_argument(
|
||||||
|
'--pytest-args', help='arguments to passthrough to pytest')
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
@ -26,15 +29,16 @@ def run_tests(*tests, pattern=None, ci=False):
|
||||||
run_download = 'download' in tests
|
run_download = 'download' in tests
|
||||||
tests = list(map(fix_test_name, tests))
|
tests = list(map(fix_test_name, tests))
|
||||||
|
|
||||||
arguments = ['pytest', '-Werror', '--tb=short']
|
pytest_args = args.pytest_args or os.getenv('HATCH_TEST_ARGS', '')
|
||||||
|
arguments = ['pytest', '-Werror', '--tb=short', *shlex.split(pytest_args)]
|
||||||
if ci:
|
if ci:
|
||||||
arguments.append('--color=yes')
|
arguments.append('--color=yes')
|
||||||
|
if pattern:
|
||||||
|
arguments.extend(['-k', pattern])
|
||||||
if run_core:
|
if run_core:
|
||||||
arguments.extend(['-m', 'not download'])
|
arguments.extend(['-m', 'not download'])
|
||||||
elif run_download:
|
elif run_download:
|
||||||
arguments.extend(['-m', 'download'])
|
arguments.extend(['-m', 'download'])
|
||||||
elif pattern:
|
|
||||||
arguments.extend(['-k', pattern])
|
|
||||||
else:
|
else:
|
||||||
arguments.extend(
|
arguments.extend(
|
||||||
f'test/test_download.py::TestDownload::test_{test}' for test in tests)
|
f'test/test_download.py::TestDownload::test_{test}' for test in tests)
|
||||||
|
@ -46,13 +50,13 @@ def run_tests(*tests, pattern=None, ci=False):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
arguments = [sys.executable, '-Werror', '-m', 'unittest']
|
arguments = [sys.executable, '-Werror', '-m', 'unittest']
|
||||||
|
if pattern:
|
||||||
|
arguments.extend(['-k', pattern])
|
||||||
if run_core:
|
if run_core:
|
||||||
print('"pytest" needs to be installed to run core tests', file=sys.stderr, flush=True)
|
print('"pytest" needs to be installed to run core tests', file=sys.stderr, flush=True)
|
||||||
return 1
|
return 1
|
||||||
elif run_download:
|
elif run_download:
|
||||||
arguments.append('test.test_download')
|
arguments.append('test.test_download')
|
||||||
elif pattern:
|
|
||||||
arguments.extend(['-k', pattern])
|
|
||||||
else:
|
else:
|
||||||
arguments.extend(
|
arguments.extend(
|
||||||
f'test.test_download.TestDownload.test_{test}' for test in tests)
|
f'test.test_download.TestDownload.test_{test}' for test in tests)
|
||||||
|
|
|
@ -1,4 +0,0 @@
|
||||||
#!/usr/bin/env sh
|
|
||||||
|
|
||||||
>&2 echo 'run_tests.sh is deprecated. Please use `devscripts/run_tests.py` instead'
|
|
||||||
python3 devscripts/run_tests.py "$1"
|
|
|
@ -11,7 +11,7 @@
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import datetime
|
import datetime as dt
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
@ -115,9 +115,9 @@ def parse_value(data: str, index: int):
|
||||||
for func in [
|
for func in [
|
||||||
int,
|
int,
|
||||||
float,
|
float,
|
||||||
datetime.time.fromisoformat,
|
dt.time.fromisoformat,
|
||||||
datetime.date.fromisoformat,
|
dt.date.fromisoformat,
|
||||||
datetime.datetime.fromisoformat,
|
dt.datetime.fromisoformat,
|
||||||
{'true': True, 'false': False}.get,
|
{'true': True, 'false': False}.get,
|
||||||
]:
|
]:
|
||||||
try:
|
try:
|
||||||
|
@ -179,7 +179,7 @@ def main():
|
||||||
data = file.read()
|
data = file.read()
|
||||||
|
|
||||||
def default(obj):
|
def default(obj):
|
||||||
if isinstance(obj, (datetime.date, datetime.time, datetime.datetime)):
|
if isinstance(obj, (dt.date, dt.time, dt.datetime)):
|
||||||
return obj.isoformat()
|
return obj.isoformat()
|
||||||
|
|
||||||
print(json.dumps(parse_toml(data), default=default))
|
print(json.dumps(parse_toml(data), default=default))
|
||||||
|
|
|
@ -9,15 +9,15 @@
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import contextlib
|
import contextlib
|
||||||
|
import datetime as dt
|
||||||
import sys
|
import sys
|
||||||
from datetime import datetime, timezone
|
|
||||||
|
|
||||||
from devscripts.utils import read_version, run_process, write_file
|
from devscripts.utils import read_version, run_process, write_file
|
||||||
|
|
||||||
|
|
||||||
def get_new_version(version, revision):
|
def get_new_version(version, revision):
|
||||||
if not version:
|
if not version:
|
||||||
version = datetime.now(timezone.utc).strftime('%Y.%m.%d')
|
version = dt.datetime.now(dt.timezone.utc).strftime('%Y.%m.%d')
|
||||||
|
|
||||||
if revision:
|
if revision:
|
||||||
assert revision.isdecimal(), 'Revision must be a number'
|
assert revision.isdecimal(), 'Revision must be a number'
|
||||||
|
|
26
devscripts/update_changelog.py
Executable file
26
devscripts/update_changelog.py
Executable file
|
@ -0,0 +1,26 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from devscripts.make_changelog import create_changelog, create_parser
|
||||||
|
from devscripts.utils import read_file, read_version, write_file
|
||||||
|
|
||||||
|
# Always run after devscripts/update-version.py, and run before `make doc|pypi-files|tar|all`
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = create_parser()
|
||||||
|
parser.description = 'Update an existing changelog file with an entry for a new release'
|
||||||
|
parser.add_argument(
|
||||||
|
'--changelog-path', type=Path, default=Path(__file__).parent.parent / 'Changelog.md',
|
||||||
|
help='path to the Changelog file')
|
||||||
|
args = parser.parse_args()
|
||||||
|
new_entry = create_changelog(args)
|
||||||
|
|
||||||
|
header, sep, changelog = read_file(args.changelog_path).partition('\n### ')
|
||||||
|
write_file(args.changelog_path, f'{header}{sep}{read_version()}\n{new_entry}\n{sep}{changelog}')
|
172
pyproject.toml
172
pyproject.toml
|
@ -8,8 +8,9 @@ maintainers = [
|
||||||
{name = "pukkandan", email = "pukkandan.ytdlp@gmail.com"},
|
{name = "pukkandan", email = "pukkandan.ytdlp@gmail.com"},
|
||||||
{name = "Grub4K", email = "contact@grub4k.xyz"},
|
{name = "Grub4K", email = "contact@grub4k.xyz"},
|
||||||
{name = "bashonly", email = "bashonly@protonmail.com"},
|
{name = "bashonly", email = "bashonly@protonmail.com"},
|
||||||
|
{name = "coletdjnz", email = "coletdjnz@protonmail.com"},
|
||||||
]
|
]
|
||||||
description = "A youtube-dl fork with additional features and patches"
|
description = "A feature-rich command-line audio/video downloader"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.8"
|
requires-python = ">=3.8"
|
||||||
keywords = [
|
keywords = [
|
||||||
|
@ -51,6 +52,8 @@ dependencies = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.optional-dependencies]
|
[project.optional-dependencies]
|
||||||
|
default = []
|
||||||
|
curl-cffi = ["curl-cffi==0.5.10; implementation_name=='cpython'"]
|
||||||
secretstorage = [
|
secretstorage = [
|
||||||
"cffi",
|
"cffi",
|
||||||
"secretstorage",
|
"secretstorage",
|
||||||
|
@ -59,15 +62,29 @@ build = [
|
||||||
"build",
|
"build",
|
||||||
"hatchling",
|
"hatchling",
|
||||||
"pip",
|
"pip",
|
||||||
|
"setuptools>=66.1.0,<70",
|
||||||
"wheel",
|
"wheel",
|
||||||
]
|
]
|
||||||
dev = [
|
dev = [
|
||||||
"flake8",
|
"pre-commit",
|
||||||
"isort",
|
"yt-dlp[static-analysis]",
|
||||||
"pytest",
|
"yt-dlp[test]",
|
||||||
|
]
|
||||||
|
static-analysis = [
|
||||||
|
"autopep8~=2.0",
|
||||||
|
"ruff~=0.4.4",
|
||||||
|
]
|
||||||
|
test = [
|
||||||
|
"pytest~=8.1",
|
||||||
|
]
|
||||||
|
pyinstaller = [
|
||||||
|
"pyinstaller>=6.3; sys_platform!='darwin'",
|
||||||
|
"pyinstaller==5.13.2; sys_platform=='darwin'", # needed for curl_cffi
|
||||||
|
]
|
||||||
|
py2exe = [
|
||||||
|
"py2exe>=0.12",
|
||||||
|
"requests==2.31.*",
|
||||||
]
|
]
|
||||||
pyinstaller = ["pyinstaller>=6.3"]
|
|
||||||
py2exe = ["py2exe>=0.12"]
|
|
||||||
|
|
||||||
[project.urls]
|
[project.urls]
|
||||||
Documentation = "https://github.com/yt-dlp/yt-dlp#readme"
|
Documentation = "https://github.com/yt-dlp/yt-dlp#readme"
|
||||||
|
@ -116,3 +133,146 @@ artifacts = ["/yt_dlp/extractor/lazy_extractors.py"]
|
||||||
[tool.hatch.version]
|
[tool.hatch.version]
|
||||||
path = "yt_dlp/version.py"
|
path = "yt_dlp/version.py"
|
||||||
pattern = "_pkg_version = '(?P<version>[^']+)'"
|
pattern = "_pkg_version = '(?P<version>[^']+)'"
|
||||||
|
|
||||||
|
[tool.hatch.envs.default]
|
||||||
|
features = ["curl-cffi", "default"]
|
||||||
|
dependencies = ["pre-commit"]
|
||||||
|
path = ".venv"
|
||||||
|
installer = "uv"
|
||||||
|
|
||||||
|
[tool.hatch.envs.default.scripts]
|
||||||
|
setup = "pre-commit install --config .pre-commit-hatch.yaml"
|
||||||
|
yt-dlp = "python -Werror -Xdev -m yt_dlp {args}"
|
||||||
|
|
||||||
|
[tool.hatch.envs.hatch-static-analysis]
|
||||||
|
detached = true
|
||||||
|
features = ["static-analysis"]
|
||||||
|
dependencies = [] # override hatch ruff version
|
||||||
|
config-path = "pyproject.toml"
|
||||||
|
|
||||||
|
[tool.hatch.envs.hatch-static-analysis.scripts]
|
||||||
|
format-check = "autopep8 --diff {args:.}"
|
||||||
|
format-fix = "autopep8 --in-place {args:.}"
|
||||||
|
lint-check = "ruff check {args:.}"
|
||||||
|
lint-fix = "ruff check --fix {args:.}"
|
||||||
|
|
||||||
|
[tool.hatch.envs.hatch-test]
|
||||||
|
features = ["test"]
|
||||||
|
dependencies = [
|
||||||
|
"pytest-randomly~=3.15",
|
||||||
|
"pytest-rerunfailures~=14.0",
|
||||||
|
"pytest-xdist[psutil]~=3.5",
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.hatch.envs.hatch-test.scripts]
|
||||||
|
run = "python -m devscripts.run_tests {args}"
|
||||||
|
run-cov = "echo Code coverage not implemented && exit 1"
|
||||||
|
|
||||||
|
[[tool.hatch.envs.hatch-test.matrix]]
|
||||||
|
python = [
|
||||||
|
"3.8",
|
||||||
|
"3.9",
|
||||||
|
"3.10",
|
||||||
|
"3.11",
|
||||||
|
"3.12",
|
||||||
|
"pypy3.8",
|
||||||
|
"pypy3.9",
|
||||||
|
"pypy3.10",
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.ruff]
|
||||||
|
line-length = 120
|
||||||
|
|
||||||
|
[tool.ruff.lint]
|
||||||
|
ignore = [
|
||||||
|
"E402", # module level import not at top of file
|
||||||
|
"E501", # line too long
|
||||||
|
"E731", # do not assign a lambda expression, use a def
|
||||||
|
"E741", # ambiguous variable name
|
||||||
|
]
|
||||||
|
select = [
|
||||||
|
"E", # pycodestyle errors
|
||||||
|
"W", # pycodestyle warnings
|
||||||
|
"F", # pyflakes
|
||||||
|
"I", # import order
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.ruff.lint.per-file-ignores]
|
||||||
|
"devscripts/lazy_load_template.py" = ["F401"]
|
||||||
|
"!yt_dlp/extractor/**.py" = ["I"]
|
||||||
|
|
||||||
|
[tool.ruff.lint.isort]
|
||||||
|
known-first-party = [
|
||||||
|
"bundle",
|
||||||
|
"devscripts",
|
||||||
|
"test",
|
||||||
|
]
|
||||||
|
relative-imports-order = "closest-to-furthest"
|
||||||
|
|
||||||
|
[tool.autopep8]
|
||||||
|
max_line_length = 120
|
||||||
|
recursive = true
|
||||||
|
exit-code = true
|
||||||
|
jobs = 0
|
||||||
|
select = [
|
||||||
|
"E101",
|
||||||
|
"E112",
|
||||||
|
"E113",
|
||||||
|
"E115",
|
||||||
|
"E116",
|
||||||
|
"E117",
|
||||||
|
"E121",
|
||||||
|
"E122",
|
||||||
|
"E123",
|
||||||
|
"E124",
|
||||||
|
"E125",
|
||||||
|
"E126",
|
||||||
|
"E127",
|
||||||
|
"E128",
|
||||||
|
"E129",
|
||||||
|
"E131",
|
||||||
|
"E201",
|
||||||
|
"E202",
|
||||||
|
"E203",
|
||||||
|
"E211",
|
||||||
|
"E221",
|
||||||
|
"E222",
|
||||||
|
"E223",
|
||||||
|
"E224",
|
||||||
|
"E225",
|
||||||
|
"E226",
|
||||||
|
"E227",
|
||||||
|
"E228",
|
||||||
|
"E231",
|
||||||
|
"E241",
|
||||||
|
"E242",
|
||||||
|
"E251",
|
||||||
|
"E252",
|
||||||
|
"E261",
|
||||||
|
"E262",
|
||||||
|
"E265",
|
||||||
|
"E266",
|
||||||
|
"E271",
|
||||||
|
"E272",
|
||||||
|
"E273",
|
||||||
|
"E274",
|
||||||
|
"E275",
|
||||||
|
"E301",
|
||||||
|
"E302",
|
||||||
|
"E303",
|
||||||
|
"E304",
|
||||||
|
"E305",
|
||||||
|
"E306",
|
||||||
|
"E502",
|
||||||
|
"E701",
|
||||||
|
"E702",
|
||||||
|
"E704",
|
||||||
|
"W391",
|
||||||
|
"W504",
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.pytest.ini_options]
|
||||||
|
addopts = "-ra -v --strict-markers"
|
||||||
|
markers = [
|
||||||
|
"download",
|
||||||
|
]
|
||||||
|
|
|
@ -14,12 +14,6 @@ remove-duplicate-keys = true
|
||||||
remove-unused-variables = true
|
remove-unused-variables = true
|
||||||
|
|
||||||
|
|
||||||
[tool:pytest]
|
|
||||||
addopts = -ra -v --strict-markers
|
|
||||||
markers =
|
|
||||||
download
|
|
||||||
|
|
||||||
|
|
||||||
[tox:tox]
|
[tox:tox]
|
||||||
skipsdist = true
|
skipsdist = true
|
||||||
envlist = py{38,39,310,311,312},pypy{38,39,310}
|
envlist = py{38,39,310,311,312},pypy{38,39,310}
|
||||||
|
|
|
@ -5,7 +5,7 @@ # Supported sites
|
||||||
- **1tv**: Первый канал
|
- **1tv**: Первый канал
|
||||||
- **20min**
|
- **20min**
|
||||||
- **23video**
|
- **23video**
|
||||||
- **247sports**
|
- **247sports**: (**Currently broken**)
|
||||||
- **24tv.ua**
|
- **24tv.ua**
|
||||||
- **3qsdn**: 3Q SDN
|
- **3qsdn**: 3Q SDN
|
||||||
- **3sat**
|
- **3sat**
|
||||||
|
@ -14,9 +14,9 @@ # Supported sites
|
||||||
- **6play**
|
- **6play**
|
||||||
- **7plus**
|
- **7plus**
|
||||||
- **8tracks**
|
- **8tracks**
|
||||||
- **91porn**
|
|
||||||
- **9c9media**
|
- **9c9media**
|
||||||
- **9gag**: 9GAG
|
- **9gag**: 9GAG
|
||||||
|
- **9News**
|
||||||
- **9now.com.au**
|
- **9now.com.au**
|
||||||
- **abc.net.au**
|
- **abc.net.au**
|
||||||
- **abc.net.au:iview**
|
- **abc.net.au:iview**
|
||||||
|
@ -26,13 +26,14 @@ # Supported sites
|
||||||
- **abcotvs**: ABC Owned Television Stations
|
- **abcotvs**: ABC Owned Television Stations
|
||||||
- **abcotvs:clips**
|
- **abcotvs:clips**
|
||||||
- **AbemaTV**: [*abematv*](## "netrc machine")
|
- **AbemaTV**: [*abematv*](## "netrc machine")
|
||||||
- **AbemaTVTitle**
|
- **AbemaTVTitle**: [*abematv*](## "netrc machine")
|
||||||
- **AcademicEarth:Course**
|
- **AcademicEarth:Course**
|
||||||
- **acast**
|
- **acast**
|
||||||
- **acast:channel**
|
- **acast:channel**
|
||||||
- **AcFunBangumi**
|
- **AcFunBangumi**
|
||||||
- **AcFunVideo**
|
- **AcFunVideo**
|
||||||
- **ADN**: [*animationdigitalnetwork*](## "netrc machine") Animation Digital Network
|
- **ADN**: [*animationdigitalnetwork*](## "netrc machine") Animation Digital Network
|
||||||
|
- **ADNSeason**: [*animationdigitalnetwork*](## "netrc machine") Animation Digital Network
|
||||||
- **AdobeConnect**
|
- **AdobeConnect**
|
||||||
- **adobetv**
|
- **adobetv**
|
||||||
- **adobetv:channel**
|
- **adobetv:channel**
|
||||||
|
@ -45,7 +46,7 @@ # Supported sites
|
||||||
- **aenetworks:show**
|
- **aenetworks:show**
|
||||||
- **AeonCo**
|
- **AeonCo**
|
||||||
- **afreecatv**: [*afreecatv*](## "netrc machine") afreecatv.com
|
- **afreecatv**: [*afreecatv*](## "netrc machine") afreecatv.com
|
||||||
- **afreecatv:live**: [*afreecatv*](## "netrc machine") afreecatv.com
|
- **afreecatv:live**: [*afreecatv*](## "netrc machine") afreecatv.com livestreams
|
||||||
- **afreecatv:user**
|
- **afreecatv:user**
|
||||||
- **AirTV**
|
- **AirTV**
|
||||||
- **AitubeKZVideo**
|
- **AitubeKZVideo**
|
||||||
|
@ -61,6 +62,7 @@ # Supported sites
|
||||||
- **altcensored:channel**
|
- **altcensored:channel**
|
||||||
- **Alura**: [*alura*](## "netrc machine")
|
- **Alura**: [*alura*](## "netrc machine")
|
||||||
- **AluraCourse**: [*aluracourse*](## "netrc machine")
|
- **AluraCourse**: [*aluracourse*](## "netrc machine")
|
||||||
|
- **AmadeusTV**
|
||||||
- **Amara**
|
- **Amara**
|
||||||
- **AmazonMiniTV**
|
- **AmazonMiniTV**
|
||||||
- **amazonminitv:season**: Amazon MiniTV Season, "minitv:season:" prefix
|
- **amazonminitv:season**: Amazon MiniTV Season, "minitv:season:" prefix
|
||||||
|
@ -93,11 +95,16 @@ # Supported sites
|
||||||
- **ARDMediathek**
|
- **ARDMediathek**
|
||||||
- **ARDMediathekCollection**
|
- **ARDMediathekCollection**
|
||||||
- **Arkena**
|
- **Arkena**
|
||||||
|
- **Art19**
|
||||||
|
- **Art19Show**
|
||||||
- **arte.sky.it**
|
- **arte.sky.it**
|
||||||
- **ArteTV**
|
- **ArteTV**
|
||||||
- **ArteTVCategory**
|
- **ArteTVCategory**
|
||||||
- **ArteTVEmbed**
|
- **ArteTVEmbed**
|
||||||
- **ArteTVPlaylist**
|
- **ArteTVPlaylist**
|
||||||
|
- **asobichannel**: ASOBI CHANNEL
|
||||||
|
- **asobichannel:tag**: ASOBI CHANNEL
|
||||||
|
- **AsobiStage**: ASOBISTAGE (アソビステージ)
|
||||||
- **AtresPlayer**: [*atresplayer*](## "netrc machine")
|
- **AtresPlayer**: [*atresplayer*](## "netrc machine")
|
||||||
- **AtScaleConfEvent**
|
- **AtScaleConfEvent**
|
||||||
- **ATVAt**
|
- **ATVAt**
|
||||||
|
@ -180,13 +187,14 @@ # Supported sites
|
||||||
- **BitChute**
|
- **BitChute**
|
||||||
- **BitChuteChannel**
|
- **BitChuteChannel**
|
||||||
- **BlackboardCollaborate**
|
- **BlackboardCollaborate**
|
||||||
- **BleacherReport**
|
- **BleacherReport**: (**Currently broken**)
|
||||||
- **BleacherReportCMS**
|
- **BleacherReportCMS**: (**Currently broken**)
|
||||||
- **blerp**
|
- **blerp**
|
||||||
- **blogger.com**
|
- **blogger.com**
|
||||||
- **Bloomberg**
|
- **Bloomberg**
|
||||||
- **BokeCC**
|
- **BokeCC**
|
||||||
- **BongaCams**
|
- **BongaCams**
|
||||||
|
- **Boosty**
|
||||||
- **BostonGlobe**
|
- **BostonGlobe**
|
||||||
- **Box**
|
- **Box**
|
||||||
- **BoxCastVideo**
|
- **BoxCastVideo**
|
||||||
|
@ -211,7 +219,7 @@ # Supported sites
|
||||||
- **BusinessInsider**
|
- **BusinessInsider**
|
||||||
- **BuzzFeed**
|
- **BuzzFeed**
|
||||||
- **BYUtv**: (**Currently broken**)
|
- **BYUtv**: (**Currently broken**)
|
||||||
- **CableAV**
|
- **CaffeineTV**
|
||||||
- **Callin**
|
- **Callin**
|
||||||
- **Caltrans**
|
- **Caltrans**
|
||||||
- **CAM4**
|
- **CAM4**
|
||||||
|
@ -231,8 +239,7 @@ # Supported sites
|
||||||
- **cbc.ca**
|
- **cbc.ca**
|
||||||
- **cbc.ca:player**
|
- **cbc.ca:player**
|
||||||
- **cbc.ca:player:playlist**
|
- **cbc.ca:player:playlist**
|
||||||
- **CBS**
|
- **CBS**: (**Currently broken**)
|
||||||
- **CBSInteractive**
|
|
||||||
- **CBSLocal**
|
- **CBSLocal**
|
||||||
- **CBSLocalArticle**
|
- **CBSLocalArticle**
|
||||||
- **CBSLocalLive**
|
- **CBSLocalLive**
|
||||||
|
@ -240,8 +247,8 @@ # Supported sites
|
||||||
- **cbsnews:embed**
|
- **cbsnews:embed**
|
||||||
- **cbsnews:live**: CBS News Livestream
|
- **cbsnews:live**: CBS News Livestream
|
||||||
- **cbsnews:livevideo**: CBS News Live Videos
|
- **cbsnews:livevideo**: CBS News Live Videos
|
||||||
- **cbssports**
|
- **cbssports**: (**Currently broken**)
|
||||||
- **cbssports:embed**
|
- **cbssports:embed**: (**Currently broken**)
|
||||||
- **CCMA**
|
- **CCMA**
|
||||||
- **CCTV**: 央视网
|
- **CCTV**: 央视网
|
||||||
- **CDA**: [*cdapl*](## "netrc machine")
|
- **CDA**: [*cdapl*](## "netrc machine")
|
||||||
|
@ -251,10 +258,10 @@ # Supported sites
|
||||||
- **CharlieRose**
|
- **CharlieRose**
|
||||||
- **Chaturbate**
|
- **Chaturbate**
|
||||||
- **Chilloutzone**
|
- **Chilloutzone**
|
||||||
- **Chingari**
|
- **chzzk:live**
|
||||||
- **ChingariUser**
|
- **chzzk:video**
|
||||||
- **cielotv.it**
|
- **cielotv.it**
|
||||||
- **Cinemax**
|
- **Cinemax**: (**Currently broken**)
|
||||||
- **CinetecaMilano**
|
- **CinetecaMilano**
|
||||||
- **Cineverse**
|
- **Cineverse**
|
||||||
- **CineverseDetails**
|
- **CineverseDetails**
|
||||||
|
@ -263,16 +270,15 @@ # Supported sites
|
||||||
- **ciscowebex**: Cisco Webex
|
- **ciscowebex**: Cisco Webex
|
||||||
- **CJSW**
|
- **CJSW**
|
||||||
- **Clipchamp**
|
- **Clipchamp**
|
||||||
- **cliphunter**
|
|
||||||
- **Clippit**
|
- **Clippit**
|
||||||
- **ClipRs**
|
- **ClipRs**: (**Currently broken**)
|
||||||
- **ClipYouEmbed**
|
- **ClipYouEmbed**
|
||||||
- **CloserToTruth**
|
- **CloserToTruth**: (**Currently broken**)
|
||||||
- **CloudflareStream**
|
- **CloudflareStream**
|
||||||
|
- **CloudyCDN**
|
||||||
- **Clubic**: (**Currently broken**)
|
- **Clubic**: (**Currently broken**)
|
||||||
- **Clyp**
|
- **Clyp**
|
||||||
- **cmt.com**: (**Currently broken**)
|
- **cmt.com**: (**Currently broken**)
|
||||||
- **CNBC**
|
|
||||||
- **CNBCVideo**
|
- **CNBCVideo**
|
||||||
- **CNN**
|
- **CNN**
|
||||||
- **CNNArticle**
|
- **CNNArticle**
|
||||||
|
@ -320,11 +326,14 @@ # Supported sites
|
||||||
- **DailyMail**
|
- **DailyMail**
|
||||||
- **dailymotion**: [*dailymotion*](## "netrc machine")
|
- **dailymotion**: [*dailymotion*](## "netrc machine")
|
||||||
- **dailymotion:playlist**: [*dailymotion*](## "netrc machine")
|
- **dailymotion:playlist**: [*dailymotion*](## "netrc machine")
|
||||||
|
- **dailymotion:search**: [*dailymotion*](## "netrc machine")
|
||||||
- **dailymotion:user**: [*dailymotion*](## "netrc machine")
|
- **dailymotion:user**: [*dailymotion*](## "netrc machine")
|
||||||
- **DailyWire**
|
- **DailyWire**
|
||||||
- **DailyWirePodcast**
|
- **DailyWirePodcast**
|
||||||
- **damtomo:record**
|
- **damtomo:record**
|
||||||
- **damtomo:video**
|
- **damtomo:video**
|
||||||
|
- **dangalplay**: [*dangalplay*](## "netrc machine")
|
||||||
|
- **dangalplay:season**: [*dangalplay*](## "netrc machine")
|
||||||
- **daum.net**
|
- **daum.net**
|
||||||
- **daum.net:clip**
|
- **daum.net:clip**
|
||||||
- **daum.net:playlist**
|
- **daum.net:playlist**
|
||||||
|
@ -340,7 +349,6 @@ # Supported sites
|
||||||
- **DeuxM**
|
- **DeuxM**
|
||||||
- **DeuxMNews**
|
- **DeuxMNews**
|
||||||
- **DHM**: Filmarchiv - Deutsches Historisches Museum (**Currently broken**)
|
- **DHM**: Filmarchiv - Deutsches Historisches Museum (**Currently broken**)
|
||||||
- **Digg**
|
|
||||||
- **DigitalConcertHall**: [*digitalconcerthall*](## "netrc machine") DigitalConcertHall extractor
|
- **DigitalConcertHall**: [*digitalconcerthall*](## "netrc machine") DigitalConcertHall extractor
|
||||||
- **DigitallySpeaking**
|
- **DigitallySpeaking**
|
||||||
- **Digiteka**
|
- **Digiteka**
|
||||||
|
@ -373,14 +381,14 @@ # Supported sites
|
||||||
- **drtv:live**
|
- **drtv:live**
|
||||||
- **drtv:season**
|
- **drtv:season**
|
||||||
- **drtv:series**
|
- **drtv:series**
|
||||||
- **DTube**
|
- **DTube**: (**Currently broken**)
|
||||||
- **duboku**: www.duboku.io
|
- **duboku**: www.duboku.io
|
||||||
- **duboku:list**: www.duboku.io entire series
|
- **duboku:list**: www.duboku.io entire series
|
||||||
- **Dumpert**
|
- **Dumpert**
|
||||||
- **Duoplay**
|
- **Duoplay**
|
||||||
- **dvtv**: http://video.aktualne.cz/
|
- **dvtv**: http://video.aktualne.cz/
|
||||||
- **dw**
|
- **dw**: (**Currently broken**)
|
||||||
- **dw:article**
|
- **dw:article**: (**Currently broken**)
|
||||||
- **EaglePlatform**
|
- **EaglePlatform**
|
||||||
- **EbaumsWorld**
|
- **EbaumsWorld**
|
||||||
- **Ebay**
|
- **Ebay**
|
||||||
|
@ -389,8 +397,8 @@ # Supported sites
|
||||||
- **EinsUndEinsTV**: [*1und1tv*](## "netrc machine")
|
- **EinsUndEinsTV**: [*1und1tv*](## "netrc machine")
|
||||||
- **EinsUndEinsTVLive**: [*1und1tv*](## "netrc machine")
|
- **EinsUndEinsTVLive**: [*1und1tv*](## "netrc machine")
|
||||||
- **EinsUndEinsTVRecordings**: [*1und1tv*](## "netrc machine")
|
- **EinsUndEinsTVRecordings**: [*1und1tv*](## "netrc machine")
|
||||||
- **Einthusan**
|
|
||||||
- **eitb.tv**
|
- **eitb.tv**
|
||||||
|
- **ElementorEmbed**
|
||||||
- **Elonet**
|
- **Elonet**
|
||||||
- **ElPais**: El País
|
- **ElPais**: El País
|
||||||
- **ElTreceTV**: El Trece TV (Argentina)
|
- **ElTreceTV**: El Trece TV (Argentina)
|
||||||
|
@ -405,6 +413,7 @@ # Supported sites
|
||||||
- **Erocast**
|
- **Erocast**
|
||||||
- **EroProfile**: [*eroprofile*](## "netrc machine")
|
- **EroProfile**: [*eroprofile*](## "netrc machine")
|
||||||
- **EroProfile:album**
|
- **EroProfile:album**
|
||||||
|
- **ERRJupiter**
|
||||||
- **ertflix**: ERTFLIX videos
|
- **ertflix**: ERTFLIX videos
|
||||||
- **ertflix:codename**: ERTFLIX videos by codename
|
- **ertflix:codename**: ERTFLIX videos by codename
|
||||||
- **ertwebtv:embed**: ert.gr webtv embedded videos
|
- **ertwebtv:embed**: ert.gr webtv embedded videos
|
||||||
|
@ -412,7 +421,7 @@ # Supported sites
|
||||||
- **ESPNArticle**
|
- **ESPNArticle**
|
||||||
- **ESPNCricInfo**
|
- **ESPNCricInfo**
|
||||||
- **EttuTv**
|
- **EttuTv**
|
||||||
- **Europa**
|
- **Europa**: (**Currently broken**)
|
||||||
- **EuroParlWebstream**
|
- **EuroParlWebstream**
|
||||||
- **EuropeanTour**
|
- **EuropeanTour**
|
||||||
- **Eurosport**
|
- **Eurosport**
|
||||||
|
@ -423,22 +432,24 @@ # Supported sites
|
||||||
- **Expressen**
|
- **Expressen**
|
||||||
- **EyedoTV**
|
- **EyedoTV**
|
||||||
- **facebook**: [*facebook*](## "netrc machine")
|
- **facebook**: [*facebook*](## "netrc machine")
|
||||||
|
- **facebook:ads**
|
||||||
- **facebook:reel**
|
- **facebook:reel**
|
||||||
- **FacebookPluginsVideo**
|
- **FacebookPluginsVideo**
|
||||||
- **fancode:live**: [*fancode*](## "netrc machine")
|
- **fancode:live**: [*fancode*](## "netrc machine") (**Currently broken**)
|
||||||
- **fancode:vod**: [*fancode*](## "netrc machine")
|
- **fancode:vod**: [*fancode*](## "netrc machine") (**Currently broken**)
|
||||||
|
- **Fathom**
|
||||||
- **faz.net**
|
- **faz.net**
|
||||||
- **fc2**: [*fc2*](## "netrc machine")
|
- **fc2**: [*fc2*](## "netrc machine")
|
||||||
- **fc2:embed**
|
- **fc2:embed**
|
||||||
- **fc2:live**
|
- **fc2:live**
|
||||||
- **Fczenit**
|
- **Fczenit**
|
||||||
- **Fifa**
|
- **Fifa**
|
||||||
- **Filmmodu**
|
|
||||||
- **filmon**
|
- **filmon**
|
||||||
- **filmon:channel**
|
- **filmon:channel**
|
||||||
- **Filmweb**
|
- **Filmweb**
|
||||||
- **FiveThirtyEight**
|
- **FiveThirtyEight**
|
||||||
- **FiveTV**
|
- **FiveTV**
|
||||||
|
- **FlexTV**
|
||||||
- **Flickr**
|
- **Flickr**
|
||||||
- **Floatplane**
|
- **Floatplane**
|
||||||
- **FloatplaneChannel**
|
- **FloatplaneChannel**
|
||||||
|
@ -477,7 +488,6 @@ # Supported sites
|
||||||
- **Gab**
|
- **Gab**
|
||||||
- **GabTV**
|
- **GabTV**
|
||||||
- **Gaia**: [*gaia*](## "netrc machine")
|
- **Gaia**: [*gaia*](## "netrc machine")
|
||||||
- **GameInformer**
|
|
||||||
- **GameJolt**
|
- **GameJolt**
|
||||||
- **GameJoltCommunity**
|
- **GameJoltCommunity**
|
||||||
- **GameJoltGame**
|
- **GameJoltGame**
|
||||||
|
@ -487,18 +497,20 @@ # Supported sites
|
||||||
- **GameSpot**
|
- **GameSpot**
|
||||||
- **GameStar**
|
- **GameStar**
|
||||||
- **Gaskrank**
|
- **Gaskrank**
|
||||||
- **Gazeta**
|
- **Gazeta**: (**Currently broken**)
|
||||||
- **GDCVault**: [*gdcvault*](## "netrc machine")
|
- **GBNews**: GB News clips, features and live streams
|
||||||
|
- **GDCVault**: [*gdcvault*](## "netrc machine") (**Currently broken**)
|
||||||
- **GediDigital**
|
- **GediDigital**
|
||||||
- **gem.cbc.ca**: [*cbcgem*](## "netrc machine")
|
- **gem.cbc.ca**: [*cbcgem*](## "netrc machine")
|
||||||
- **gem.cbc.ca:live**
|
- **gem.cbc.ca:live**
|
||||||
- **gem.cbc.ca:playlist**
|
- **gem.cbc.ca:playlist**
|
||||||
- **Genius**
|
- **Genius**
|
||||||
- **GeniusLyrics**
|
- **GeniusLyrics**
|
||||||
|
- **GetCourseRu**: [*getcourseru*](## "netrc machine")
|
||||||
|
- **GetCourseRuPlayer**
|
||||||
- **Gettr**
|
- **Gettr**
|
||||||
- **GettrStreaming**
|
- **GettrStreaming**
|
||||||
- **GiantBomb**
|
- **GiantBomb**
|
||||||
- **Giga**
|
|
||||||
- **GlattvisionTV**: [*glattvisiontv*](## "netrc machine")
|
- **GlattvisionTV**: [*glattvisiontv*](## "netrc machine")
|
||||||
- **GlattvisionTVLive**: [*glattvisiontv*](## "netrc machine")
|
- **GlattvisionTVLive**: [*glattvisiontv*](## "netrc machine")
|
||||||
- **GlattvisionTVRecordings**: [*glattvisiontv*](## "netrc machine")
|
- **GlattvisionTVRecordings**: [*glattvisiontv*](## "netrc machine")
|
||||||
|
@ -516,7 +528,8 @@ # Supported sites
|
||||||
- **GMANetworkVideo**
|
- **GMANetworkVideo**
|
||||||
- **Go**
|
- **Go**
|
||||||
- **GoDiscovery**
|
- **GoDiscovery**
|
||||||
- **GodTube**
|
- **GodResource**
|
||||||
|
- **GodTube**: (**Currently broken**)
|
||||||
- **Gofile**
|
- **Gofile**
|
||||||
- **Golem**
|
- **Golem**
|
||||||
- **goodgame:stream**
|
- **goodgame:stream**
|
||||||
|
@ -551,7 +564,7 @@ # Supported sites
|
||||||
- **HollywoodReporter**
|
- **HollywoodReporter**
|
||||||
- **HollywoodReporterPlaylist**
|
- **HollywoodReporterPlaylist**
|
||||||
- **Holodex**
|
- **Holodex**
|
||||||
- **HotNewHipHop**
|
- **HotNewHipHop**: (**Currently broken**)
|
||||||
- **hotstar**
|
- **hotstar**
|
||||||
- **hotstar:playlist**
|
- **hotstar:playlist**
|
||||||
- **hotstar:season**
|
- **hotstar:season**
|
||||||
|
@ -579,6 +592,7 @@ # Supported sites
|
||||||
- **IGNVideo**
|
- **IGNVideo**
|
||||||
- **iheartradio**
|
- **iheartradio**
|
||||||
- **iheartradio:podcast**
|
- **iheartradio:podcast**
|
||||||
|
- **IlPost**
|
||||||
- **Iltalehti**
|
- **Iltalehti**
|
||||||
- **imdb**: Internet Movie Database trailers
|
- **imdb**: Internet Movie Database trailers
|
||||||
- **imdb:list**: Internet Movie Database lists
|
- **imdb:list**: Internet Movie Database lists
|
||||||
|
@ -592,7 +606,7 @@ # Supported sites
|
||||||
- **Instagram**: [*instagram*](## "netrc machine")
|
- **Instagram**: [*instagram*](## "netrc machine")
|
||||||
- **instagram:story**: [*instagram*](## "netrc machine")
|
- **instagram:story**: [*instagram*](## "netrc machine")
|
||||||
- **instagram:tag**: [*instagram*](## "netrc machine") Instagram hashtag search URLs
|
- **instagram:tag**: [*instagram*](## "netrc machine") Instagram hashtag search URLs
|
||||||
- **instagram:user**: [*instagram*](## "netrc machine") Instagram user profile
|
- **instagram:user**: [*instagram*](## "netrc machine") Instagram user profile (**Currently broken**)
|
||||||
- **InstagramIOS**: IOS instagram:// URL
|
- **InstagramIOS**: IOS instagram:// URL
|
||||||
- **Internazionale**
|
- **Internazionale**
|
||||||
- **InternetVideoArchive**
|
- **InternetVideoArchive**
|
||||||
|
@ -618,13 +632,14 @@ # Supported sites
|
||||||
- **iwara:user**: [*iwara*](## "netrc machine")
|
- **iwara:user**: [*iwara*](## "netrc machine")
|
||||||
- **Ixigua**
|
- **Ixigua**
|
||||||
- **Izlesene**
|
- **Izlesene**
|
||||||
- **Jable**
|
|
||||||
- **JablePlaylist**
|
|
||||||
- **Jamendo**
|
- **Jamendo**
|
||||||
- **JamendoAlbum**
|
- **JamendoAlbum**
|
||||||
- **JeuxVideo**
|
- **JeuxVideo**: (**Currently broken**)
|
||||||
- **JioSaavnAlbum**
|
- **jiocinema**: [*jiocinema*](## "netrc machine")
|
||||||
- **JioSaavnSong**
|
- **jiocinema:series**: [*jiocinema*](## "netrc machine")
|
||||||
|
- **jiosaavn:album**
|
||||||
|
- **jiosaavn:playlist**
|
||||||
|
- **jiosaavn:song**
|
||||||
- **Joj**
|
- **Joj**
|
||||||
- **JoqrAg**: 超!A&G+ 文化放送 (f.k.a. AGQR) Nippon Cultural Broadcasting, Inc. (JOQR)
|
- **JoqrAg**: 超!A&G+ 文化放送 (f.k.a. AGQR) Nippon Cultural Broadcasting, Inc. (JOQR)
|
||||||
- **Jove**
|
- **Jove**
|
||||||
|
@ -634,12 +649,10 @@ # Supported sites
|
||||||
- **JWPlatform**
|
- **JWPlatform**
|
||||||
- **Kakao**
|
- **Kakao**
|
||||||
- **Kaltura**
|
- **Kaltura**
|
||||||
- **Kanal2**
|
- **KankaNews**: (**Currently broken**)
|
||||||
- **KankaNews**
|
|
||||||
- **Karaoketv**
|
- **Karaoketv**
|
||||||
- **KarriereVideos**
|
- **Katsomo**: (**Currently broken**)
|
||||||
- **Katsomo**
|
- **KelbyOne**: (**Currently broken**)
|
||||||
- **KelbyOne**
|
|
||||||
- **Ketnet**
|
- **Ketnet**
|
||||||
- **khanacademy**
|
- **khanacademy**
|
||||||
- **khanacademy:unit**
|
- **khanacademy:unit**
|
||||||
|
@ -651,18 +664,17 @@ # Supported sites
|
||||||
- **KinoPoisk**
|
- **KinoPoisk**
|
||||||
- **Kommunetv**
|
- **Kommunetv**
|
||||||
- **KompasVideo**
|
- **KompasVideo**
|
||||||
- **KonserthusetPlay**
|
- **Koo**: (**Currently broken**)
|
||||||
- **Koo**
|
- **KrasView**: Красвью (**Currently broken**)
|
||||||
- **KrasView**: Красвью
|
|
||||||
- **KTH**
|
- **KTH**
|
||||||
- **Ku6**
|
- **Ku6**
|
||||||
- **KUSI**
|
- **KukuluLive**
|
||||||
- **kuwo:album**: 酷我音乐 - 专辑
|
- **kuwo:album**: 酷我音乐 - 专辑 (**Currently broken**)
|
||||||
- **kuwo:category**: 酷我音乐 - 分类
|
- **kuwo:category**: 酷我音乐 - 分类 (**Currently broken**)
|
||||||
- **kuwo:chart**: 酷我音乐 - 排行榜
|
- **kuwo:chart**: 酷我音乐 - 排行榜 (**Currently broken**)
|
||||||
- **kuwo:mv**: 酷我音乐 - MV
|
- **kuwo:mv**: 酷我音乐 - MV (**Currently broken**)
|
||||||
- **kuwo:singer**: 酷我音乐 - 歌手
|
- **kuwo:singer**: 酷我音乐 - 歌手 (**Currently broken**)
|
||||||
- **kuwo:song**: 酷我音乐
|
- **kuwo:song**: 酷我音乐 (**Currently broken**)
|
||||||
- **la7.it**
|
- **la7.it**
|
||||||
- **la7.it:pod:episode**
|
- **la7.it:pod:episode**
|
||||||
- **la7.it:podcast**
|
- **la7.it:podcast**
|
||||||
|
@ -677,7 +689,7 @@ # Supported sites
|
||||||
- **Lcp**
|
- **Lcp**
|
||||||
- **LcpPlay**
|
- **LcpPlay**
|
||||||
- **Le**: 乐视网
|
- **Le**: 乐视网
|
||||||
- **Lecture2Go**
|
- **Lecture2Go**: (**Currently broken**)
|
||||||
- **Lecturio**: [*lecturio*](## "netrc machine")
|
- **Lecturio**: [*lecturio*](## "netrc machine")
|
||||||
- **LecturioCourse**: [*lecturio*](## "netrc machine")
|
- **LecturioCourse**: [*lecturio*](## "netrc machine")
|
||||||
- **LecturioDeCourse**: [*lecturio*](## "netrc machine")
|
- **LecturioDeCourse**: [*lecturio*](## "netrc machine")
|
||||||
|
@ -685,7 +697,7 @@ # Supported sites
|
||||||
- **LeFigaroVideoSection**
|
- **LeFigaroVideoSection**
|
||||||
- **LEGO**
|
- **LEGO**
|
||||||
- **Lemonde**
|
- **Lemonde**
|
||||||
- **Lenta**
|
- **Lenta**: (**Currently broken**)
|
||||||
- **LePlaylist**
|
- **LePlaylist**
|
||||||
- **LetvCloud**: 乐视云
|
- **LetvCloud**: 乐视云
|
||||||
- **Libsyn**
|
- **Libsyn**
|
||||||
|
@ -709,31 +721,34 @@ # Supported sites
|
||||||
- **Lnk**
|
- **Lnk**
|
||||||
- **LnkGo**
|
- **LnkGo**
|
||||||
- **loc**: Library of Congress
|
- **loc**: Library of Congress
|
||||||
- **LocalNews8**
|
- **loom**
|
||||||
|
- **loom:folder**
|
||||||
- **LoveHomePorn**
|
- **LoveHomePorn**
|
||||||
- **LRTStream**
|
- **LRTStream**
|
||||||
- **LRTVOD**
|
- **LRTVOD**
|
||||||
|
- **LSMLREmbed**
|
||||||
|
- **LSMLTVEmbed**
|
||||||
|
- **LSMReplay**
|
||||||
- **Lumni**
|
- **Lumni**
|
||||||
- **lynda**: [*lynda*](## "netrc machine") lynda.com videos
|
- **lynda**: [*lynda*](## "netrc machine") lynda.com videos
|
||||||
- **lynda:course**: [*lynda*](## "netrc machine") lynda.com online courses
|
- **lynda:course**: [*lynda*](## "netrc machine") lynda.com online courses
|
||||||
- **maariv.co.il**
|
- **maariv.co.il**
|
||||||
- **MagellanTV**
|
- **MagellanTV**
|
||||||
- **MagentaMusik360**
|
- **MagentaMusik**
|
||||||
- **mailru**: Видео@Mail.Ru
|
- **mailru**: Видео@Mail.Ru
|
||||||
- **mailru:music**: Музыка@Mail.Ru
|
- **mailru:music**: Музыка@Mail.Ru
|
||||||
- **mailru:music:search**: Музыка@Mail.Ru
|
- **mailru:music:search**: Музыка@Mail.Ru
|
||||||
- **MainStreaming**: MainStreaming Player
|
- **MainStreaming**: MainStreaming Player
|
||||||
- **MallTV**
|
|
||||||
- **mangomolo:live**
|
- **mangomolo:live**
|
||||||
- **mangomolo:video**
|
- **mangomolo:video**
|
||||||
- **MangoTV**: 芒果TV
|
- **MangoTV**: 芒果TV
|
||||||
- **ManotoTV**: Manoto TV (Episode)
|
- **ManotoTV**: Manoto TV (Episode)
|
||||||
- **ManotoTVLive**: Manoto TV (Live)
|
- **ManotoTVLive**: Manoto TV (Live)
|
||||||
- **ManotoTVShow**: Manoto TV (Show)
|
- **ManotoTVShow**: Manoto TV (Show)
|
||||||
- **ManyVids**
|
- **ManyVids**: (**Currently broken**)
|
||||||
- **MaoriTV**
|
- **MaoriTV**
|
||||||
- **Markiza**
|
- **Markiza**: (**Currently broken**)
|
||||||
- **MarkizaPage**
|
- **MarkizaPage**: (**Currently broken**)
|
||||||
- **massengeschmack.tv**
|
- **massengeschmack.tv**
|
||||||
- **Masters**
|
- **Masters**
|
||||||
- **MatchTV**
|
- **MatchTV**
|
||||||
|
@ -760,7 +775,6 @@ # Supported sites
|
||||||
- **MelonVOD**
|
- **MelonVOD**
|
||||||
- **Metacritic**
|
- **Metacritic**
|
||||||
- **mewatch**
|
- **mewatch**
|
||||||
- **MiaoPai**
|
|
||||||
- **MicrosoftEmbed**
|
- **MicrosoftEmbed**
|
||||||
- **microsoftstream**: Microsoft Stream
|
- **microsoftstream**: Microsoft Stream
|
||||||
- **mildom**: Record ongoing live by specific user in Mildom
|
- **mildom**: Record ongoing live by specific user in Mildom
|
||||||
|
@ -770,7 +784,6 @@ # Supported sites
|
||||||
- **minds**
|
- **minds**
|
||||||
- **minds:channel**
|
- **minds:channel**
|
||||||
- **minds:group**
|
- **minds:group**
|
||||||
- **MinistryGrid**
|
|
||||||
- **Minoto**
|
- **Minoto**
|
||||||
- **mirrativ**
|
- **mirrativ**
|
||||||
- **mirrativ:user**
|
- **mirrativ:user**
|
||||||
|
@ -793,11 +806,11 @@ # Supported sites
|
||||||
- **Mojvideo**
|
- **Mojvideo**
|
||||||
- **Monstercat**
|
- **Monstercat**
|
||||||
- **MonsterSirenHypergryphMusic**
|
- **MonsterSirenHypergryphMusic**
|
||||||
- **Morningstar**: morningstar.com
|
|
||||||
- **Motherless**
|
- **Motherless**
|
||||||
- **MotherlessGallery**
|
- **MotherlessGallery**
|
||||||
- **MotherlessGroup**
|
- **MotherlessGroup**
|
||||||
- **Motorsport**: motorsport.com
|
- **MotherlessUploader**
|
||||||
|
- **Motorsport**: motorsport.com (**Currently broken**)
|
||||||
- **MotorTrend**
|
- **MotorTrend**
|
||||||
- **MotorTrendOnDemand**
|
- **MotorTrendOnDemand**
|
||||||
- **MovieFap**
|
- **MovieFap**
|
||||||
|
@ -808,17 +821,17 @@ # Supported sites
|
||||||
- **MSN**: (**Currently broken**)
|
- **MSN**: (**Currently broken**)
|
||||||
- **mtg**: MTG services
|
- **mtg**: MTG services
|
||||||
- **mtv**
|
- **mtv**
|
||||||
- **mtv.de**
|
- **mtv.de**: (**Currently broken**)
|
||||||
- **mtv.it**
|
- **mtv.it**
|
||||||
- **mtv.it:programma**
|
- **mtv.it:programma**
|
||||||
- **mtv:video**
|
- **mtv:video**
|
||||||
- **mtvjapan**
|
- **mtvjapan**
|
||||||
- **mtvservices:embedded**
|
- **mtvservices:embedded**
|
||||||
- **MTVUutisetArticle**
|
- **MTVUutisetArticle**: (**Currently broken**)
|
||||||
- **MuenchenTV**: münchen.tv
|
- **MuenchenTV**: münchen.tv (**Currently broken**)
|
||||||
- **MujRozhlas**
|
- **MujRozhlas**
|
||||||
- **Murrtube**
|
- **Murrtube**: (**Currently broken**)
|
||||||
- **MurrtubeUser**: Murrtube user profile
|
- **MurrtubeUser**: Murrtube user profile (**Currently broken**)
|
||||||
- **MuseAI**
|
- **MuseAI**
|
||||||
- **MuseScore**
|
- **MuseScore**
|
||||||
- **MusicdexAlbum**
|
- **MusicdexAlbum**
|
||||||
|
@ -827,6 +840,9 @@ # Supported sites
|
||||||
- **MusicdexSong**
|
- **MusicdexSong**
|
||||||
- **mva**: Microsoft Virtual Academy videos
|
- **mva**: Microsoft Virtual Academy videos
|
||||||
- **mva:course**: Microsoft Virtual Academy courses
|
- **mva:course**: Microsoft Virtual Academy courses
|
||||||
|
- **Mx3**
|
||||||
|
- **Mx3Neo**
|
||||||
|
- **Mx3Volksmusik**
|
||||||
- **Mxplayer**
|
- **Mxplayer**
|
||||||
- **MxplayerShow**
|
- **MxplayerShow**
|
||||||
- **MySpace**
|
- **MySpace**
|
||||||
|
@ -862,11 +878,11 @@ # Supported sites
|
||||||
- **ndr**: NDR.de - Norddeutscher Rundfunk
|
- **ndr**: NDR.de - Norddeutscher Rundfunk
|
||||||
- **ndr:embed**
|
- **ndr:embed**
|
||||||
- **ndr:embed:base**
|
- **ndr:embed:base**
|
||||||
- **NDTV**
|
- **NDTV**: (**Currently broken**)
|
||||||
- **Nebula**: [*watchnebula*](## "netrc machine")
|
|
||||||
- **nebula:channel**: [*watchnebula*](## "netrc machine")
|
- **nebula:channel**: [*watchnebula*](## "netrc machine")
|
||||||
- **nebula:class**: [*watchnebula*](## "netrc machine")
|
- **nebula:media**: [*watchnebula*](## "netrc machine")
|
||||||
- **nebula:subscriptions**: [*watchnebula*](## "netrc machine")
|
- **nebula:subscriptions**: [*watchnebula*](## "netrc machine")
|
||||||
|
- **nebula:video**: [*watchnebula*](## "netrc machine")
|
||||||
- **NekoHacker**
|
- **NekoHacker**
|
||||||
- **NerdCubedFeed**
|
- **NerdCubedFeed**
|
||||||
- **netease:album**: 网易云音乐 - 专辑
|
- **netease:album**: 网易云音乐 - 专辑
|
||||||
|
@ -882,18 +898,19 @@ # Supported sites
|
||||||
- **Netverse**
|
- **Netverse**
|
||||||
- **NetversePlaylist**
|
- **NetversePlaylist**
|
||||||
- **NetverseSearch**: "netsearch:" prefix
|
- **NetverseSearch**: "netsearch:" prefix
|
||||||
- **Netzkino**
|
- **Netzkino**: (**Currently broken**)
|
||||||
- **Newgrounds**
|
- **Newgrounds**: [*newgrounds*](## "netrc machine")
|
||||||
- **Newgrounds:playlist**
|
- **Newgrounds:playlist**
|
||||||
- **Newgrounds:user**
|
- **Newgrounds:user**
|
||||||
- **NewsPicks**
|
- **NewsPicks**
|
||||||
- **Newsy**
|
- **Newsy**
|
||||||
- **NextMedia**: 蘋果日報
|
- **NextMedia**: 蘋果日報
|
||||||
- **NextMediaActionNews**: 蘋果日報 - 動新聞
|
- **NextMediaActionNews**: 蘋果日報 - 動新聞
|
||||||
- **NextTV**: 壹電視
|
- **NextTV**: 壹電視 (**Currently broken**)
|
||||||
- **Nexx**
|
- **Nexx**
|
||||||
- **NexxEmbed**
|
- **NexxEmbed**
|
||||||
- **NFB**
|
- **nfb**: nfb.ca and onf.ca films and episodes
|
||||||
|
- **nfb:series**: nfb.ca and onf.ca series
|
||||||
- **NFHSNetwork**
|
- **NFHSNetwork**
|
||||||
- **nfl.com**
|
- **nfl.com**
|
||||||
- **nfl.com:article**
|
- **nfl.com:article**
|
||||||
|
@ -925,11 +942,12 @@ # Supported sites
|
||||||
- **nicovideo:search**: Nico video search; "nicosearch:" prefix
|
- **nicovideo:search**: Nico video search; "nicosearch:" prefix
|
||||||
- **nicovideo:search:date**: Nico video search, newest first; "nicosearchdate:" prefix
|
- **nicovideo:search:date**: Nico video search, newest first; "nicosearchdate:" prefix
|
||||||
- **nicovideo:search_url**: Nico video search URLs
|
- **nicovideo:search_url**: Nico video search URLs
|
||||||
|
- **NinaProtocol**
|
||||||
- **Nintendo**
|
- **Nintendo**
|
||||||
- **Nitter**
|
- **Nitter**
|
||||||
- **njoy**: N-JOY
|
- **njoy**: N-JOY
|
||||||
- **njoy:embed**
|
- **njoy:embed**
|
||||||
- **NobelPrize**
|
- **NobelPrize**: (**Currently broken**)
|
||||||
- **NoicePodcast**
|
- **NoicePodcast**
|
||||||
- **NonkTube**
|
- **NonkTube**
|
||||||
- **NoodleMagazine**
|
- **NoodleMagazine**
|
||||||
|
@ -941,7 +959,7 @@ # Supported sites
|
||||||
- **nowness**
|
- **nowness**
|
||||||
- **nowness:playlist**
|
- **nowness:playlist**
|
||||||
- **nowness:series**
|
- **nowness:series**
|
||||||
- **Noz**
|
- **Noz**: (**Currently broken**)
|
||||||
- **npo**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **npo**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
- **npo.nl:live**
|
- **npo.nl:live**
|
||||||
- **npo.nl:radio**
|
- **npo.nl:radio**
|
||||||
|
@ -958,17 +976,21 @@ # Supported sites
|
||||||
- **NRKTVSeason**
|
- **NRKTVSeason**
|
||||||
- **NRKTVSeries**
|
- **NRKTVSeries**
|
||||||
- **NRLTV**: (**Currently broken**)
|
- **NRLTV**: (**Currently broken**)
|
||||||
|
- **nts.live**
|
||||||
- **ntv.ru**
|
- **ntv.ru**
|
||||||
- **NubilesPorn**: [*nubiles-porn*](## "netrc machine")
|
- **NubilesPorn**: [*nubiles-porn*](## "netrc machine")
|
||||||
|
- **nuum:live**
|
||||||
|
- **nuum:media**
|
||||||
|
- **nuum:tab**
|
||||||
- **Nuvid**
|
- **Nuvid**
|
||||||
- **NYTimes**
|
- **NYTimes**
|
||||||
- **NYTimesArticle**
|
- **NYTimesArticle**
|
||||||
- **NYTimesCooking**
|
- **NYTimesCookingGuide**
|
||||||
|
- **NYTimesCookingRecipe**
|
||||||
- **nzherald**
|
- **nzherald**
|
||||||
- **NZOnScreen**
|
- **NZOnScreen**
|
||||||
- **NZZ**
|
- **NZZ**
|
||||||
- **ocw.mit.edu**
|
- **ocw.mit.edu**
|
||||||
- **OdaTV**
|
|
||||||
- **Odnoklassniki**
|
- **Odnoklassniki**
|
||||||
- **OfTV**
|
- **OfTV**
|
||||||
- **OfTVPlaylist**
|
- **OfTVPlaylist**
|
||||||
|
@ -993,9 +1015,9 @@ # Supported sites
|
||||||
- **OraTV**
|
- **OraTV**
|
||||||
- **orf:fm4:story**: fm4.orf.at stories
|
- **orf:fm4:story**: fm4.orf.at stories
|
||||||
- **orf:iptv**: iptv.ORF.at
|
- **orf:iptv**: iptv.ORF.at
|
||||||
|
- **orf:on**
|
||||||
- **orf:podcast**
|
- **orf:podcast**
|
||||||
- **orf:radio**
|
- **orf:radio**
|
||||||
- **orf:tvthek**: ORF TVthek
|
|
||||||
- **OsnatelTV**: [*osnateltv*](## "netrc machine")
|
- **OsnatelTV**: [*osnateltv*](## "netrc machine")
|
||||||
- **OsnatelTVLive**: [*osnateltv*](## "netrc machine")
|
- **OsnatelTVLive**: [*osnateltv*](## "netrc machine")
|
||||||
- **OsnatelTVRecordings**: [*osnateltv*](## "netrc machine")
|
- **OsnatelTVRecordings**: [*osnateltv*](## "netrc machine")
|
||||||
|
@ -1015,7 +1037,7 @@ # Supported sites
|
||||||
- **ParamountPressExpress**
|
- **ParamountPressExpress**
|
||||||
- **Parler**: Posts on parler.com
|
- **Parler**: Posts on parler.com
|
||||||
- **parliamentlive.tv**: UK parliament videos
|
- **parliamentlive.tv**: UK parliament videos
|
||||||
- **Parlview**
|
- **Parlview**: (**Currently broken**)
|
||||||
- **Patreon**
|
- **Patreon**
|
||||||
- **PatreonCampaign**
|
- **PatreonCampaign**
|
||||||
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
||||||
|
@ -1049,19 +1071,19 @@ # Supported sites
|
||||||
- **Platzi**: [*platzi*](## "netrc machine")
|
- **Platzi**: [*platzi*](## "netrc machine")
|
||||||
- **PlatziCourse**: [*platzi*](## "netrc machine")
|
- **PlatziCourse**: [*platzi*](## "netrc machine")
|
||||||
- **player.sky.it**
|
- **player.sky.it**
|
||||||
|
- **playeur**
|
||||||
- **PlayPlusTV**: [*playplustv*](## "netrc machine")
|
- **PlayPlusTV**: [*playplustv*](## "netrc machine")
|
||||||
- **PlayStuff**
|
- **PlaySuisse**: [*playsuisse*](## "netrc machine")
|
||||||
- **PlaySuisse**
|
|
||||||
- **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz
|
- **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz
|
||||||
- **PlayVids**
|
- **PlayVids**
|
||||||
- **Playwire**
|
- **Playwire**
|
||||||
- **pluralsight**: [*pluralsight*](## "netrc machine")
|
- **pluralsight**: [*pluralsight*](## "netrc machine")
|
||||||
- **pluralsight:course**
|
- **pluralsight:course**
|
||||||
- **PlutoTV**
|
- **PlutoTV**: (**Currently broken**)
|
||||||
- **PodbayFM**
|
- **PodbayFM**
|
||||||
- **PodbayFMChannel**
|
- **PodbayFMChannel**
|
||||||
- **Podchaser**
|
- **Podchaser**
|
||||||
- **podomatic**
|
- **podomatic**: (**Currently broken**)
|
||||||
- **Pokemon**
|
- **Pokemon**
|
||||||
- **PokemonWatch**
|
- **PokemonWatch**
|
||||||
- **PokerGo**: [*pokergo*](## "netrc machine")
|
- **PokerGo**: [*pokergo*](## "netrc machine")
|
||||||
|
@ -1085,15 +1107,16 @@ # Supported sites
|
||||||
- **PornHubUser**: [*pornhub*](## "netrc machine")
|
- **PornHubUser**: [*pornhub*](## "netrc machine")
|
||||||
- **PornHubUserVideosUpload**: [*pornhub*](## "netrc machine")
|
- **PornHubUserVideosUpload**: [*pornhub*](## "netrc machine")
|
||||||
- **Pornotube**
|
- **Pornotube**
|
||||||
- **PornoVoisines**
|
- **PornoVoisines**: (**Currently broken**)
|
||||||
- **PornoXO**
|
- **PornoXO**: (**Currently broken**)
|
||||||
- **PornTop**
|
- **PornTop**
|
||||||
- **PornTube**
|
- **PornTube**
|
||||||
- **Pr0gramm**
|
- **Pr0gramm**
|
||||||
- **PrankCast**
|
- **PrankCast**
|
||||||
|
- **PrankCastPost**
|
||||||
- **PremiershipRugby**
|
- **PremiershipRugby**
|
||||||
- **PressTV**
|
- **PressTV**
|
||||||
- **ProjectVeritas**
|
- **ProjectVeritas**: (**Currently broken**)
|
||||||
- **prosiebensat1**: ProSiebenSat.1 Digital
|
- **prosiebensat1**: ProSiebenSat.1 Digital
|
||||||
- **PRXAccount**
|
- **PRXAccount**
|
||||||
- **PRXSeries**
|
- **PRXSeries**
|
||||||
|
@ -1115,11 +1138,12 @@ # Supported sites
|
||||||
- **QuantumTVLive**: [*quantumtv*](## "netrc machine")
|
- **QuantumTVLive**: [*quantumtv*](## "netrc machine")
|
||||||
- **QuantumTVRecordings**: [*quantumtv*](## "netrc machine")
|
- **QuantumTVRecordings**: [*quantumtv*](## "netrc machine")
|
||||||
- **Qub**
|
- **Qub**
|
||||||
- **R7**
|
- **R7**: (**Currently broken**)
|
||||||
- **R7Article**
|
- **R7Article**: (**Currently broken**)
|
||||||
- **Radiko**
|
- **Radiko**
|
||||||
- **RadikoRadio**
|
- **RadikoRadio**
|
||||||
- **radio.de**
|
- **radio.de**: (**Currently broken**)
|
||||||
|
- **Radio1Be**
|
||||||
- **radiocanada**
|
- **radiocanada**
|
||||||
- **radiocanada:audiovideo**
|
- **radiocanada:audiovideo**
|
||||||
- **RadioComercial**
|
- **RadioComercial**
|
||||||
|
@ -1129,7 +1153,7 @@ # Supported sites
|
||||||
- **RadioFrancePodcast**
|
- **RadioFrancePodcast**
|
||||||
- **RadioFranceProfile**
|
- **RadioFranceProfile**
|
||||||
- **RadioFranceProgramSchedule**
|
- **RadioFranceProgramSchedule**
|
||||||
- **RadioJavan**
|
- **RadioJavan**: (**Currently broken**)
|
||||||
- **radiokapital**
|
- **radiokapital**
|
||||||
- **radiokapital:show**
|
- **radiokapital:show**
|
||||||
- **RadioZetPodcast**
|
- **RadioZetPodcast**
|
||||||
|
@ -1151,33 +1175,34 @@ # Supported sites
|
||||||
- **RbgTum**
|
- **RbgTum**
|
||||||
- **RbgTumCourse**
|
- **RbgTumCourse**
|
||||||
- **RbgTumNewCourse**
|
- **RbgTumNewCourse**
|
||||||
- **RBMARadio**
|
|
||||||
- **RCS**
|
- **RCS**
|
||||||
- **RCSEmbeds**
|
- **RCSEmbeds**
|
||||||
- **RCSVarious**
|
- **RCSVarious**
|
||||||
- **RCTIPlus**
|
- **RCTIPlus**
|
||||||
- **RCTIPlusSeries**
|
- **RCTIPlusSeries**
|
||||||
- **RCTIPlusTV**
|
- **RCTIPlusTV**
|
||||||
- **RDS**: RDS.ca
|
- **RDS**: RDS.ca (**Currently broken**)
|
||||||
- **RedBull**
|
- **RedBull**
|
||||||
- **RedBullEmbed**
|
- **RedBullEmbed**
|
||||||
- **RedBullTV**
|
- **RedBullTV**
|
||||||
- **RedBullTVRrnContent**
|
- **RedBullTVRrnContent**
|
||||||
|
- **redcdnlivx**
|
||||||
- **Reddit**: [*reddit*](## "netrc machine")
|
- **Reddit**: [*reddit*](## "netrc machine")
|
||||||
- **RedGifs**
|
- **RedGifs**
|
||||||
- **RedGifsSearch**: Redgifs search
|
- **RedGifsSearch**: Redgifs search
|
||||||
- **RedGifsUser**: Redgifs user
|
- **RedGifsUser**: Redgifs user
|
||||||
- **RedTube**
|
- **RedTube**
|
||||||
- **RegioTV**
|
- **RENTV**: (**Currently broken**)
|
||||||
- **RENTV**
|
- **RENTVArticle**: (**Currently broken**)
|
||||||
- **RENTVArticle**
|
- **Restudy**: (**Currently broken**)
|
||||||
- **Restudy**
|
- **Reuters**: (**Currently broken**)
|
||||||
- **Reuters**
|
|
||||||
- **ReverbNation**
|
- **ReverbNation**
|
||||||
- **RheinMainTV**
|
- **RheinMainTV**
|
||||||
|
- **RideHome**
|
||||||
- **RinseFM**
|
- **RinseFM**
|
||||||
|
- **RinseFMArtistPlaylist**
|
||||||
- **RMCDecouverte**
|
- **RMCDecouverte**
|
||||||
- **RockstarGames**
|
- **RockstarGames**: (**Currently broken**)
|
||||||
- **Rokfin**: [*rokfin*](## "netrc machine")
|
- **Rokfin**: [*rokfin*](## "netrc machine")
|
||||||
- **rokfin:channel**: Rokfin Channels
|
- **rokfin:channel**: Rokfin Channels
|
||||||
- **rokfin:search**: Rokfin Search; "rkfnsearch:" prefix
|
- **rokfin:search**: Rokfin Search; "rkfnsearch:" prefix
|
||||||
|
@ -1187,7 +1212,7 @@ # Supported sites
|
||||||
- **RottenTomatoes**
|
- **RottenTomatoes**
|
||||||
- **Rozhlas**
|
- **Rozhlas**
|
||||||
- **RozhlasVltava**
|
- **RozhlasVltava**
|
||||||
- **RTBF**: [*rtbf*](## "netrc machine")
|
- **RTBF**: [*rtbf*](## "netrc machine") (**Currently broken**)
|
||||||
- **RTDocumentry**
|
- **RTDocumentry**
|
||||||
- **RTDocumentryPlaylist**
|
- **RTDocumentryPlaylist**
|
||||||
- **rte**: Raidió Teilifís Éireann TV
|
- **rte**: Raidió Teilifís Éireann TV
|
||||||
|
@ -1201,7 +1226,7 @@ # Supported sites
|
||||||
- **RTNews**
|
- **RTNews**
|
||||||
- **RTP**
|
- **RTP**
|
||||||
- **RTRFM**
|
- **RTRFM**
|
||||||
- **RTS**: RTS.ch
|
- **RTS**: RTS.ch (**Currently broken**)
|
||||||
- **RTVCKaltura**
|
- **RTVCKaltura**
|
||||||
- **RTVCPlay**
|
- **RTVCPlay**
|
||||||
- **RTVCPlayEmbed**
|
- **RTVCPlayEmbed**
|
||||||
|
@ -1234,7 +1259,7 @@ # Supported sites
|
||||||
- **safari**: [*safari*](## "netrc machine") safaribooksonline.com online video
|
- **safari**: [*safari*](## "netrc machine") safaribooksonline.com online video
|
||||||
- **safari:api**: [*safari*](## "netrc machine")
|
- **safari:api**: [*safari*](## "netrc machine")
|
||||||
- **safari:course**: [*safari*](## "netrc machine") safaribooksonline.com online courses
|
- **safari:course**: [*safari*](## "netrc machine") safaribooksonline.com online courses
|
||||||
- **Saitosan**
|
- **Saitosan**: (**Currently broken**)
|
||||||
- **SAKTV**: [*saktv*](## "netrc machine")
|
- **SAKTV**: [*saktv*](## "netrc machine")
|
||||||
- **SAKTVLive**: [*saktv*](## "netrc machine")
|
- **SAKTVLive**: [*saktv*](## "netrc machine")
|
||||||
- **SAKTVRecordings**: [*saktv*](## "netrc machine")
|
- **SAKTVRecordings**: [*saktv*](## "netrc machine")
|
||||||
|
@ -1244,7 +1269,6 @@ # Supported sites
|
||||||
- **SampleFocus**
|
- **SampleFocus**
|
||||||
- **Sangiin**: 参議院インターネット審議中継 (archive)
|
- **Sangiin**: 参議院インターネット審議中継 (archive)
|
||||||
- **Sapo**: SAPO Vídeos
|
- **Sapo**: SAPO Vídeos
|
||||||
- **savefrom.net**
|
|
||||||
- **SBS**: sbs.com.au
|
- **SBS**: sbs.com.au
|
||||||
- **sbs.co.kr**
|
- **sbs.co.kr**
|
||||||
- **sbs.co.kr:allvod_program**
|
- **sbs.co.kr:allvod_program**
|
||||||
|
@ -1261,17 +1285,18 @@ # Supported sites
|
||||||
- **Scrolller**
|
- **Scrolller**
|
||||||
- **SCTE**: [*scte*](## "netrc machine") (**Currently broken**)
|
- **SCTE**: [*scte*](## "netrc machine") (**Currently broken**)
|
||||||
- **SCTECourse**: [*scte*](## "netrc machine") (**Currently broken**)
|
- **SCTECourse**: [*scte*](## "netrc machine") (**Currently broken**)
|
||||||
- **Seeker**
|
- **sejm**
|
||||||
- **SenalColombiaLive**
|
- **SenalColombiaLive**: (**Currently broken**)
|
||||||
- **SenateGov**
|
- **SenateGov**
|
||||||
- **SenateISVP**
|
- **SenateISVP**
|
||||||
- **SendtoNews**
|
- **SendtoNews**: (**Currently broken**)
|
||||||
- **Servus**
|
- **Servus**
|
||||||
- **Sexu**
|
- **Sexu**: (**Currently broken**)
|
||||||
- **SeznamZpravy**
|
- **SeznamZpravy**
|
||||||
- **SeznamZpravyArticle**
|
- **SeznamZpravyArticle**
|
||||||
- **Shahid**: [*shahid*](## "netrc machine")
|
- **Shahid**: [*shahid*](## "netrc machine")
|
||||||
- **ShahidShow**
|
- **ShahidShow**
|
||||||
|
- **SharePoint**
|
||||||
- **ShareVideosEmbed**
|
- **ShareVideosEmbed**
|
||||||
- **ShemarooMe**
|
- **ShemarooMe**
|
||||||
- **ShowRoomLive**
|
- **ShowRoomLive**
|
||||||
|
@ -1289,9 +1314,9 @@ # Supported sites
|
||||||
- **sky:news:story**
|
- **sky:news:story**
|
||||||
- **sky:sports**
|
- **sky:sports**
|
||||||
- **sky:sports:news**
|
- **sky:sports:news**
|
||||||
- **SkylineWebcams**
|
- **SkylineWebcams**: (**Currently broken**)
|
||||||
- **skynewsarabia:article**
|
- **skynewsarabia:article**: (**Currently broken**)
|
||||||
- **skynewsarabia:video**
|
- **skynewsarabia:video**: (**Currently broken**)
|
||||||
- **SkyNewsAU**
|
- **SkyNewsAU**
|
||||||
- **Slideshare**
|
- **Slideshare**
|
||||||
- **SlidesLive**
|
- **SlidesLive**
|
||||||
|
@ -1342,7 +1367,7 @@ # Supported sites
|
||||||
- **StacommuVOD**: [*stacommu*](## "netrc machine")
|
- **StacommuVOD**: [*stacommu*](## "netrc machine")
|
||||||
- **StagePlusVODConcert**: [*stageplus*](## "netrc machine")
|
- **StagePlusVODConcert**: [*stageplus*](## "netrc machine")
|
||||||
- **stanfordoc**: Stanford Open ClassRoom
|
- **stanfordoc**: Stanford Open ClassRoom
|
||||||
- **StarTrek**
|
- **StarTrek**: (**Currently broken**)
|
||||||
- **startv**
|
- **startv**
|
||||||
- **Steam**
|
- **Steam**
|
||||||
- **SteamCommunityBroadcast**
|
- **SteamCommunityBroadcast**
|
||||||
|
@ -1353,7 +1378,6 @@ # Supported sites
|
||||||
- **StoryFireUser**
|
- **StoryFireUser**
|
||||||
- **Streamable**
|
- **Streamable**
|
||||||
- **StreamCZ**
|
- **StreamCZ**
|
||||||
- **StreamFF**
|
|
||||||
- **StreetVoice**
|
- **StreetVoice**
|
||||||
- **StretchInternet**
|
- **StretchInternet**
|
||||||
- **Stripchat**
|
- **Stripchat**
|
||||||
|
@ -1367,22 +1391,25 @@ # Supported sites
|
||||||
- **SVTPlay**: SVT Play and Öppet arkiv
|
- **SVTPlay**: SVT Play and Öppet arkiv
|
||||||
- **SVTSeries**
|
- **SVTSeries**
|
||||||
- **SwearnetEpisode**
|
- **SwearnetEpisode**
|
||||||
- **Syfy**
|
- **Syfy**: (**Currently broken**)
|
||||||
- **SYVDK**
|
- **SYVDK**
|
||||||
- **SztvHu**
|
- **SztvHu**
|
||||||
- **t-online.de**
|
- **t-online.de**: (**Currently broken**)
|
||||||
- **Tagesschau**
|
- **Tagesschau**: (**Currently broken**)
|
||||||
- **Tass**
|
- **TapTapApp**
|
||||||
|
- **TapTapAppIntl**
|
||||||
|
- **TapTapMoment**
|
||||||
|
- **TapTapPostIntl**
|
||||||
|
- **Tass**: (**Currently broken**)
|
||||||
- **TBS**
|
- **TBS**
|
||||||
- **TBSJPEpisode**
|
- **TBSJPEpisode**
|
||||||
- **TBSJPPlaylist**
|
- **TBSJPPlaylist**
|
||||||
- **TBSJPProgram**
|
- **TBSJPProgram**
|
||||||
- **TDSLifeway**
|
- **Teachable**: [*teachable*](## "netrc machine") (**Currently broken**)
|
||||||
- **Teachable**: [*teachable*](## "netrc machine")
|
|
||||||
- **TeachableCourse**: [*teachable*](## "netrc machine")
|
- **TeachableCourse**: [*teachable*](## "netrc machine")
|
||||||
- **teachertube**: teachertube.com videos
|
- **teachertube**: teachertube.com videos (**Currently broken**)
|
||||||
- **teachertube:user:collection**: teachertube.com user and collection videos
|
- **teachertube:user:collection**: teachertube.com user and collection videos (**Currently broken**)
|
||||||
- **TeachingChannel**
|
- **TeachingChannel**: (**Currently broken**)
|
||||||
- **Teamcoco**
|
- **Teamcoco**
|
||||||
- **TeamTreeHouse**: [*teamtreehouse*](## "netrc machine")
|
- **TeamTreeHouse**: [*teamtreehouse*](## "netrc machine")
|
||||||
- **techtv.mit.edu**
|
- **techtv.mit.edu**
|
||||||
|
@ -1397,14 +1424,14 @@ # Supported sites
|
||||||
- **Telecinco**: telecinco.es, cuatro.com and mediaset.es
|
- **Telecinco**: telecinco.es, cuatro.com and mediaset.es
|
||||||
- **Telegraaf**
|
- **Telegraaf**
|
||||||
- **telegram:embed**
|
- **telegram:embed**
|
||||||
- **TeleMB**
|
- **TeleMB**: (**Currently broken**)
|
||||||
- **Telemundo**
|
- **Telemundo**: (**Currently broken**)
|
||||||
- **TeleQuebec**
|
- **TeleQuebec**
|
||||||
- **TeleQuebecEmission**
|
- **TeleQuebecEmission**
|
||||||
- **TeleQuebecLive**
|
- **TeleQuebecLive**
|
||||||
- **TeleQuebecSquat**
|
- **TeleQuebecSquat**
|
||||||
- **TeleQuebecVideo**
|
- **TeleQuebecVideo**
|
||||||
- **TeleTask**
|
- **TeleTask**: (**Currently broken**)
|
||||||
- **Telewebion**
|
- **Telewebion**
|
||||||
- **Tempo**
|
- **Tempo**
|
||||||
- **TennisTV**: [*tennistv*](## "netrc machine")
|
- **TennisTV**: [*tennistv*](## "netrc machine")
|
||||||
|
@ -1431,11 +1458,12 @@ # Supported sites
|
||||||
- **ThreeSpeak**
|
- **ThreeSpeak**
|
||||||
- **ThreeSpeakUser**
|
- **ThreeSpeakUser**
|
||||||
- **TikTok**
|
- **TikTok**
|
||||||
|
- **tiktok:collection**
|
||||||
- **tiktok:effect**: (**Currently broken**)
|
- **tiktok:effect**: (**Currently broken**)
|
||||||
- **tiktok:live**
|
- **tiktok:live**
|
||||||
- **tiktok:sound**: (**Currently broken**)
|
- **tiktok:sound**: (**Currently broken**)
|
||||||
- **tiktok:tag**: (**Currently broken**)
|
- **tiktok:tag**: (**Currently broken**)
|
||||||
- **tiktok:user**: (**Currently broken**)
|
- **tiktok:user**
|
||||||
- **TLC**
|
- **TLC**
|
||||||
- **TMZ**
|
- **TMZ**
|
||||||
- **TNAFlix**
|
- **TNAFlix**
|
||||||
|
@ -1458,6 +1486,7 @@ # Supported sites
|
||||||
- **TrovoChannelVod**: All VODs of a trovo.live channel; "trovovod:" prefix
|
- **TrovoChannelVod**: All VODs of a trovo.live channel; "trovovod:" prefix
|
||||||
- **TrovoVod**
|
- **TrovoVod**
|
||||||
- **TrtCocukVideo**
|
- **TrtCocukVideo**
|
||||||
|
- **TrtWorld**
|
||||||
- **TrueID**
|
- **TrueID**
|
||||||
- **TruNews**
|
- **TruNews**
|
||||||
- **Truth**
|
- **Truth**
|
||||||
|
@ -1471,7 +1500,6 @@ # Supported sites
|
||||||
- **TuneInPodcast**
|
- **TuneInPodcast**
|
||||||
- **TuneInPodcastEpisode**
|
- **TuneInPodcastEpisode**
|
||||||
- **TuneInStation**
|
- **TuneInStation**
|
||||||
- **Turbo**
|
|
||||||
- **tv.dfb.de**
|
- **tv.dfb.de**
|
||||||
- **TV2**
|
- **TV2**
|
||||||
- **TV2Article**
|
- **TV2Article**
|
||||||
|
@ -1480,7 +1508,7 @@ # Supported sites
|
||||||
- **tv2play.hu**
|
- **tv2play.hu**
|
||||||
- **tv2playseries.hu**
|
- **tv2playseries.hu**
|
||||||
- **TV4**: tv4.se and tv4play.se
|
- **TV4**: tv4.se and tv4play.se
|
||||||
- **TV5MondePlus**: TV5MONDE+
|
- **TV5MONDE**
|
||||||
- **tv5unis**
|
- **tv5unis**
|
||||||
- **tv5unis:video**
|
- **tv5unis:video**
|
||||||
- **tv8.it**
|
- **tv8.it**
|
||||||
|
@ -1493,8 +1521,8 @@ # Supported sites
|
||||||
- **tvigle**: Интернет-телевидение Tvigle.ru
|
- **tvigle**: Интернет-телевидение Tvigle.ru
|
||||||
- **TVIPlayer**
|
- **TVIPlayer**
|
||||||
- **tvland.com**
|
- **tvland.com**
|
||||||
- **TVN24**
|
- **TVN24**: (**Currently broken**)
|
||||||
- **TVNoe**
|
- **TVNoe**: (**Currently broken**)
|
||||||
- **tvopengr:embed**: tvopen.gr embedded videos
|
- **tvopengr:embed**: tvopen.gr embedded videos
|
||||||
- **tvopengr:watch**: tvopen.gr (and ethnos.gr) videos
|
- **tvopengr:watch**: tvopen.gr (and ethnos.gr) videos
|
||||||
- **tvp**: Telewizja Polska
|
- **tvp**: Telewizja Polska
|
||||||
|
@ -1527,15 +1555,15 @@ # Supported sites
|
||||||
- **UDNEmbed**: 聯合影音
|
- **UDNEmbed**: 聯合影音
|
||||||
- **UFCArabia**: [*ufcarabia*](## "netrc machine")
|
- **UFCArabia**: [*ufcarabia*](## "netrc machine")
|
||||||
- **UFCTV**: [*ufctv*](## "netrc machine")
|
- **UFCTV**: [*ufctv*](## "netrc machine")
|
||||||
- **ukcolumn**
|
- **ukcolumn**: (**Currently broken**)
|
||||||
- **UKTVPlay**
|
- **UKTVPlay**
|
||||||
- **umg:de**: Universal Music Deutschland
|
- **umg:de**: Universal Music Deutschland (**Currently broken**)
|
||||||
- **Unistra**
|
- **Unistra**
|
||||||
- **Unity**
|
- **Unity**: (**Currently broken**)
|
||||||
- **uol.com.br**
|
- **uol.com.br**
|
||||||
- **uplynk**
|
- **uplynk**
|
||||||
- **uplynk:preplay**
|
- **uplynk:preplay**
|
||||||
- **Urort**: NRK P3 Urørt
|
- **Urort**: NRK P3 Urørt (**Currently broken**)
|
||||||
- **URPlay**
|
- **URPlay**
|
||||||
- **USANetwork**
|
- **USANetwork**
|
||||||
- **USAToday**
|
- **USAToday**
|
||||||
|
@ -1543,13 +1571,12 @@ # Supported sites
|
||||||
- **ustream:channel**
|
- **ustream:channel**
|
||||||
- **ustudio**
|
- **ustudio**
|
||||||
- **ustudio:embed**
|
- **ustudio:embed**
|
||||||
- **Utreon**
|
- **Varzesh3**: (**Currently broken**)
|
||||||
- **Varzesh3**
|
|
||||||
- **Vbox7**
|
- **Vbox7**
|
||||||
- **Veo**
|
- **Veo**
|
||||||
- **Veoh**
|
- **Veoh**
|
||||||
- **veoh:user**
|
- **veoh:user**
|
||||||
- **Vesti**: Вести.Ru
|
- **Vesti**: Вести.Ru (**Currently broken**)
|
||||||
- **Vevo**
|
- **Vevo**
|
||||||
- **VevoPlaylist**
|
- **VevoPlaylist**
|
||||||
- **VGTV**: VGTV, BTTV, FTV, Aftenposten and Aftonbladet
|
- **VGTV**: VGTV, BTTV, FTV, Aftenposten and Aftonbladet
|
||||||
|
@ -1565,7 +1592,7 @@ # Supported sites
|
||||||
- **video.sky.it**
|
- **video.sky.it**
|
||||||
- **video.sky.it:live**
|
- **video.sky.it:live**
|
||||||
- **VideoDetective**
|
- **VideoDetective**
|
||||||
- **videofy.me**
|
- **videofy.me**: (**Currently broken**)
|
||||||
- **VideoKen**
|
- **VideoKen**
|
||||||
- **VideoKenCategory**
|
- **VideoKenCategory**
|
||||||
- **VideoKenPlayer**
|
- **VideoKenPlayer**
|
||||||
|
@ -1601,7 +1628,8 @@ # Supported sites
|
||||||
- **ViMP:Playlist**
|
- **ViMP:Playlist**
|
||||||
- **Vine**
|
- **Vine**
|
||||||
- **vine:user**
|
- **vine:user**
|
||||||
- **Viqeo**
|
- **Viously**
|
||||||
|
- **Viqeo**: (**Currently broken**)
|
||||||
- **Viu**
|
- **Viu**
|
||||||
- **viu:ott**: [*viu*](## "netrc machine")
|
- **viu:ott**: [*viu*](## "netrc machine")
|
||||||
- **viu:playlist**
|
- **viu:playlist**
|
||||||
|
@ -1615,11 +1643,9 @@ # Supported sites
|
||||||
- **Vocaroo**
|
- **Vocaroo**
|
||||||
- **VODPl**
|
- **VODPl**
|
||||||
- **VODPlatform**
|
- **VODPlatform**
|
||||||
- **voicy**
|
- **voicy**: (**Currently broken**)
|
||||||
- **voicy:channel**
|
- **voicy:channel**: (**Currently broken**)
|
||||||
- **VolejTV**
|
- **VolejTV**
|
||||||
- **Voot**: [*voot*](## "netrc machine") (**Currently broken**)
|
|
||||||
- **VootSeries**: [*voot*](## "netrc machine") (**Currently broken**)
|
|
||||||
- **VoxMedia**
|
- **VoxMedia**
|
||||||
- **VoxMediaVolume**
|
- **VoxMediaVolume**
|
||||||
- **vpro**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **vpro**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
|
@ -1627,7 +1653,7 @@ # Supported sites
|
||||||
- **vqq:video**
|
- **vqq:video**
|
||||||
- **VRT**: VRT NWS, Flanders News, Flandern Info and Sporza
|
- **VRT**: VRT NWS, Flanders News, Flandern Info and Sporza
|
||||||
- **VrtNU**: [*vrtnu*](## "netrc machine") VRT MAX
|
- **VrtNU**: [*vrtnu*](## "netrc machine") VRT MAX
|
||||||
- **VTM**
|
- **VTM**: (**Currently broken**)
|
||||||
- **VTXTV**: [*vtxtv*](## "netrc machine")
|
- **VTXTV**: [*vtxtv*](## "netrc machine")
|
||||||
- **VTXTVLive**: [*vtxtv*](## "netrc machine")
|
- **VTXTVLive**: [*vtxtv*](## "netrc machine")
|
||||||
- **VTXTVRecordings**: [*vtxtv*](## "netrc machine")
|
- **VTXTVRecordings**: [*vtxtv*](## "netrc machine")
|
||||||
|
@ -1638,9 +1664,6 @@ # Supported sites
|
||||||
- **WalyTV**: [*walytv*](## "netrc machine")
|
- **WalyTV**: [*walytv*](## "netrc machine")
|
||||||
- **WalyTVLive**: [*walytv*](## "netrc machine")
|
- **WalyTVLive**: [*walytv*](## "netrc machine")
|
||||||
- **WalyTVRecordings**: [*walytv*](## "netrc machine")
|
- **WalyTVRecordings**: [*walytv*](## "netrc machine")
|
||||||
- **wasdtv:clip**
|
|
||||||
- **wasdtv:record**
|
|
||||||
- **wasdtv:stream**
|
|
||||||
- **washingtonpost**
|
- **washingtonpost**
|
||||||
- **washingtonpost:article**
|
- **washingtonpost:article**
|
||||||
- **wat.tv**
|
- **wat.tv**
|
||||||
|
@ -1658,7 +1681,7 @@ # Supported sites
|
||||||
- **Weibo**
|
- **Weibo**
|
||||||
- **WeiboUser**
|
- **WeiboUser**
|
||||||
- **WeiboVideo**
|
- **WeiboVideo**
|
||||||
- **WeiqiTV**: WQTV
|
- **WeiqiTV**: WQTV (**Currently broken**)
|
||||||
- **wetv:episode**
|
- **wetv:episode**
|
||||||
- **WeTvSeries**
|
- **WeTvSeries**
|
||||||
- **Weverse**: [*weverse*](## "netrc machine")
|
- **Weverse**: [*weverse*](## "netrc machine")
|
||||||
|
@ -1697,14 +1720,14 @@ # Supported sites
|
||||||
- **wykop:post:comment**
|
- **wykop:post:comment**
|
||||||
- **Xanimu**
|
- **Xanimu**
|
||||||
- **XboxClips**
|
- **XboxClips**
|
||||||
- **XFileShare**: XFileShare based sites: Aparat, ClipWatching, GoUnlimited, GoVid, HolaVid, Streamty, TheVideoBee, Uqload, VidBom, vidlo, VidLocker, VidShare, VUp, WolfStream, XVideoSharing
|
|
||||||
- **XHamster**
|
- **XHamster**
|
||||||
- **XHamsterEmbed**
|
- **XHamsterEmbed**
|
||||||
- **XHamsterUser**
|
- **XHamsterUser**
|
||||||
|
- **XiaoHongShu**: 小红书
|
||||||
- **ximalaya**: 喜马拉雅FM
|
- **ximalaya**: 喜马拉雅FM
|
||||||
- **ximalaya:album**: 喜马拉雅FM 专辑
|
- **ximalaya:album**: 喜马拉雅FM 专辑
|
||||||
- **xinpianchang**: xinpianchang.com
|
- **xinpianchang**: xinpianchang.com (**Currently broken**)
|
||||||
- **XMinus**
|
- **XMinus**: (**Currently broken**)
|
||||||
- **XNXX**
|
- **XNXX**
|
||||||
- **Xstream**
|
- **Xstream**
|
||||||
- **XVideos**
|
- **XVideos**
|
||||||
|
@ -1720,8 +1743,8 @@ # Supported sites
|
||||||
- **yandexmusic:track**: Яндекс.Музыка - Трек
|
- **yandexmusic:track**: Яндекс.Музыка - Трек
|
||||||
- **YandexVideo**
|
- **YandexVideo**
|
||||||
- **YandexVideoPreview**
|
- **YandexVideoPreview**
|
||||||
- **YapFiles**
|
- **YapFiles**: (**Currently broken**)
|
||||||
- **Yappy**
|
- **Yappy**: (**Currently broken**)
|
||||||
- **YappyProfile**
|
- **YappyProfile**
|
||||||
- **YleAreena**
|
- **YleAreena**
|
||||||
- **YouJizz**
|
- **YouJizz**
|
||||||
|
@ -1731,8 +1754,12 @@ # Supported sites
|
||||||
- **YouNowLive**
|
- **YouNowLive**
|
||||||
- **YouNowMoment**
|
- **YouNowMoment**
|
||||||
- **YouPorn**
|
- **YouPorn**
|
||||||
- **YourPorn**
|
- **YouPornCategory**: YouPorn category, with sorting, filtering and pagination
|
||||||
- **YourUpload**
|
- **YouPornChannel**: YouPorn channel, with sorting and pagination
|
||||||
|
- **YouPornCollection**: YouPorn collection (user playlist), with sorting and pagination
|
||||||
|
- **YouPornStar**: YouPorn Pornstar, with description, sorting and pagination
|
||||||
|
- **YouPornTag**: YouPorn tag (porntags), with sorting, filtering and pagination
|
||||||
|
- **YouPornVideos**: YouPorn video (browse) playlists, with sorting, filtering and pagination
|
||||||
- **youtube**: YouTube
|
- **youtube**: YouTube
|
||||||
- **youtube:clip**
|
- **youtube:clip**
|
||||||
- **youtube:favorites**: YouTube liked videos; ":ytfav" keyword (requires cookies)
|
- **youtube:favorites**: YouTube liked videos; ":ytfav" keyword (requires cookies)
|
||||||
|
@ -1762,9 +1789,11 @@ # Supported sites
|
||||||
- **ZDFChannel**
|
- **ZDFChannel**
|
||||||
- **Zee5**: [*zee5*](## "netrc machine")
|
- **Zee5**: [*zee5*](## "netrc machine")
|
||||||
- **zee5:series**
|
- **zee5:series**
|
||||||
- **ZeeNews**
|
- **ZeeNews**: (**Currently broken**)
|
||||||
|
- **ZenPorn**
|
||||||
- **ZenYandex**
|
- **ZenYandex**
|
||||||
- **ZenYandexChannel**
|
- **ZenYandexChannel**
|
||||||
|
- **ZetlandDKArticle**
|
||||||
- **Zhihu**
|
- **Zhihu**
|
||||||
- **zingmp3**: zingmp3.vn
|
- **zingmp3**: zingmp3.vn
|
||||||
- **zingmp3:album**
|
- **zingmp3:album**
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
import functools
|
|
||||||
import inspect
|
import inspect
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
@ -10,7 +9,9 @@
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def handler(request):
|
def handler(request):
|
||||||
RH_KEY = request.param
|
RH_KEY = getattr(request, 'param', None)
|
||||||
|
if not RH_KEY:
|
||||||
|
return
|
||||||
if inspect.isclass(RH_KEY) and issubclass(RH_KEY, RequestHandler):
|
if inspect.isclass(RH_KEY) and issubclass(RH_KEY, RequestHandler):
|
||||||
handler = RH_KEY
|
handler = RH_KEY
|
||||||
elif RH_KEY in _REQUEST_HANDLERS:
|
elif RH_KEY in _REQUEST_HANDLERS:
|
||||||
|
@ -18,9 +19,46 @@ def handler(request):
|
||||||
else:
|
else:
|
||||||
pytest.skip(f'{RH_KEY} request handler is not available')
|
pytest.skip(f'{RH_KEY} request handler is not available')
|
||||||
|
|
||||||
return functools.partial(handler, logger=FakeLogger)
|
class HandlerWrapper(handler):
|
||||||
|
RH_KEY = handler.RH_KEY
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(logger=FakeLogger, *args, **kwargs)
|
||||||
|
|
||||||
|
return HandlerWrapper
|
||||||
|
|
||||||
|
|
||||||
def validate_and_send(rh, req):
|
@pytest.fixture(autouse=True)
|
||||||
rh.validate(req)
|
def skip_handler(request, handler):
|
||||||
return rh.send(req)
|
"""usage: pytest.mark.skip_handler('my_handler', 'reason')"""
|
||||||
|
for marker in request.node.iter_markers('skip_handler'):
|
||||||
|
if marker.args[0] == handler.RH_KEY:
|
||||||
|
pytest.skip(marker.args[1] if len(marker.args) > 1 else '')
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def skip_handler_if(request, handler):
|
||||||
|
"""usage: pytest.mark.skip_handler_if('my_handler', lambda request: True, 'reason')"""
|
||||||
|
for marker in request.node.iter_markers('skip_handler_if'):
|
||||||
|
if marker.args[0] == handler.RH_KEY and marker.args[1](request):
|
||||||
|
pytest.skip(marker.args[2] if len(marker.args) > 2 else '')
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True)
|
||||||
|
def skip_handlers_if(request, handler):
|
||||||
|
"""usage: pytest.mark.skip_handlers_if(lambda request, handler: True, 'reason')"""
|
||||||
|
for marker in request.node.iter_markers('skip_handlers_if'):
|
||||||
|
if handler and marker.args[0](request, handler):
|
||||||
|
pytest.skip(marker.args[1] if len(marker.args) > 1 else '')
|
||||||
|
|
||||||
|
|
||||||
|
def pytest_configure(config):
|
||||||
|
config.addinivalue_line(
|
||||||
|
"markers", "skip_handler(handler): skip test for the given handler",
|
||||||
|
)
|
||||||
|
config.addinivalue_line(
|
||||||
|
"markers", "skip_handler_if(handler): skip test for the given handler if condition is true"
|
||||||
|
)
|
||||||
|
config.addinivalue_line(
|
||||||
|
"markers", "skip_handlers_if(handler): skip test for handlers when the condition is true"
|
||||||
|
)
|
||||||
|
|
|
@ -338,3 +338,8 @@ def http_server_port(httpd):
|
||||||
def verify_address_availability(address):
|
def verify_address_availability(address):
|
||||||
if find_available_port(address) is None:
|
if find_available_port(address) is None:
|
||||||
pytest.skip(f'Unable to bind to source address {address} (address may not exist)')
|
pytest.skip(f'Unable to bind to source address {address} (address may not exist)')
|
||||||
|
|
||||||
|
|
||||||
|
def validate_and_send(rh, req):
|
||||||
|
rh.validate(req)
|
||||||
|
return rh.send(req)
|
||||||
|
|
|
@ -1906,6 +1906,15 @@ def test_response_with_expected_status_returns_content(self):
|
||||||
expected_status=TEAPOT_RESPONSE_STATUS)
|
expected_status=TEAPOT_RESPONSE_STATUS)
|
||||||
self.assertEqual(content, TEAPOT_RESPONSE_BODY)
|
self.assertEqual(content, TEAPOT_RESPONSE_BODY)
|
||||||
|
|
||||||
|
def test_search_nextjs_data(self):
|
||||||
|
data = '<script id="__NEXT_DATA__" type="application/json">{"props":{}}</script>'
|
||||||
|
self.assertEqual(self.ie._search_nextjs_data(data, None), {'props': {}})
|
||||||
|
self.assertEqual(self.ie._search_nextjs_data('', None, fatal=False), {})
|
||||||
|
self.assertEqual(self.ie._search_nextjs_data('', None, default=None), None)
|
||||||
|
self.assertEqual(self.ie._search_nextjs_data('', None, default={}), {})
|
||||||
|
with self.assertWarns(DeprecationWarning):
|
||||||
|
self.assertEqual(self.ie._search_nextjs_data('', None, default='{}'), {})
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|
|
@ -183,7 +183,7 @@ def test_format_selection_audio_exts(self):
|
||||||
]
|
]
|
||||||
|
|
||||||
info_dict = _make_result(formats)
|
info_dict = _make_result(formats)
|
||||||
ydl = YDL({'format': 'best'})
|
ydl = YDL({'format': 'best', 'format_sort': ['abr', 'ext']})
|
||||||
ydl.sort_formats(info_dict)
|
ydl.sort_formats(info_dict)
|
||||||
ydl.process_ie_result(copy.deepcopy(info_dict))
|
ydl.process_ie_result(copy.deepcopy(info_dict))
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
@ -195,7 +195,7 @@ def test_format_selection_audio_exts(self):
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
self.assertEqual(downloaded['format_id'], 'mp3-64')
|
self.assertEqual(downloaded['format_id'], 'mp3-64')
|
||||||
|
|
||||||
ydl = YDL({'prefer_free_formats': True})
|
ydl = YDL({'prefer_free_formats': True, 'format_sort': ['abr', 'ext']})
|
||||||
ydl.sort_formats(info_dict)
|
ydl.sort_formats(info_dict)
|
||||||
ydl.process_ie_result(copy.deepcopy(info_dict))
|
ydl.process_ie_result(copy.deepcopy(info_dict))
|
||||||
downloaded = ydl.downloaded_info_dicts[0]
|
downloaded = ydl.downloaded_info_dicts[0]
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
|
import datetime as dt
|
||||||
import unittest
|
import unittest
|
||||||
from datetime import datetime, timezone
|
|
||||||
|
|
||||||
from yt_dlp import cookies
|
from yt_dlp import cookies
|
||||||
from yt_dlp.cookies import (
|
from yt_dlp.cookies import (
|
||||||
|
@ -138,7 +138,7 @@ def test_safari_cookie_parsing(self):
|
||||||
self.assertEqual(cookie.name, 'foo')
|
self.assertEqual(cookie.name, 'foo')
|
||||||
self.assertEqual(cookie.value, 'test%20%3Bcookie')
|
self.assertEqual(cookie.value, 'test%20%3Bcookie')
|
||||||
self.assertFalse(cookie.secure)
|
self.assertFalse(cookie.secure)
|
||||||
expected_expiration = datetime(2021, 6, 18, 21, 39, 19, tzinfo=timezone.utc)
|
expected_expiration = dt.datetime(2021, 6, 18, 21, 39, 19, tzinfo=dt.timezone.utc)
|
||||||
self.assertEqual(cookie.expires, int(expected_expiration.timestamp()))
|
self.assertEqual(cookie.expires, int(expected_expiration.timestamp()))
|
||||||
|
|
||||||
def test_pbkdf2_sha1(self):
|
def test_pbkdf2_sha1(self):
|
||||||
|
|
|
@ -45,7 +45,7 @@ def test_lazy_extractors(self):
|
||||||
self.assertTrue(os.path.exists(LAZY_EXTRACTORS))
|
self.assertTrue(os.path.exists(LAZY_EXTRACTORS))
|
||||||
|
|
||||||
_, stderr = self.run_yt_dlp(opts=('-s', 'test:'))
|
_, stderr = self.run_yt_dlp(opts=('-s', 'test:'))
|
||||||
# `MIN_RECOMMENDED` emits a deprecated feature warning for deprecated python versions
|
# `MIN_RECOMMENDED` emits a deprecated feature warning for deprecated Python versions
|
||||||
if stderr and stderr.startswith('Deprecated Feature: Support for Python'):
|
if stderr and stderr.startswith('Deprecated Feature: Support for Python'):
|
||||||
stderr = ''
|
stderr = ''
|
||||||
self.assertFalse(stderr)
|
self.assertFalse(stderr)
|
||||||
|
|
380
test/test_http_proxy.py
Normal file
380
test/test_http_proxy.py
Normal file
|
@ -0,0 +1,380 @@
|
||||||
|
import abc
|
||||||
|
import base64
|
||||||
|
import contextlib
|
||||||
|
import functools
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import ssl
|
||||||
|
import threading
|
||||||
|
from http.server import BaseHTTPRequestHandler
|
||||||
|
from socketserver import ThreadingTCPServer
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from test.helper import http_server_port, verify_address_availability
|
||||||
|
from test.test_networking import TEST_DIR
|
||||||
|
from test.test_socks import IPv6ThreadingTCPServer
|
||||||
|
from yt_dlp.dependencies import urllib3
|
||||||
|
from yt_dlp.networking import Request
|
||||||
|
from yt_dlp.networking.exceptions import HTTPError, ProxyError, SSLError
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPProxyAuthMixin:
|
||||||
|
|
||||||
|
def proxy_auth_error(self):
|
||||||
|
self.send_response(407)
|
||||||
|
self.send_header('Proxy-Authenticate', 'Basic realm="test http proxy"')
|
||||||
|
self.end_headers()
|
||||||
|
return False
|
||||||
|
|
||||||
|
def do_proxy_auth(self, username, password):
|
||||||
|
if username is None and password is None:
|
||||||
|
return True
|
||||||
|
|
||||||
|
proxy_auth_header = self.headers.get('Proxy-Authorization', None)
|
||||||
|
if proxy_auth_header is None:
|
||||||
|
return self.proxy_auth_error()
|
||||||
|
|
||||||
|
if not proxy_auth_header.startswith('Basic '):
|
||||||
|
return self.proxy_auth_error()
|
||||||
|
|
||||||
|
auth = proxy_auth_header[6:]
|
||||||
|
|
||||||
|
try:
|
||||||
|
auth_username, auth_password = base64.b64decode(auth).decode().split(':', 1)
|
||||||
|
except Exception:
|
||||||
|
return self.proxy_auth_error()
|
||||||
|
|
||||||
|
if auth_username != (username or '') or auth_password != (password or ''):
|
||||||
|
return self.proxy_auth_error()
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPProxyHandler(BaseHTTPRequestHandler, HTTPProxyAuthMixin):
|
||||||
|
def __init__(self, *args, proxy_info=None, username=None, password=None, request_handler=None, **kwargs):
|
||||||
|
self.username = username
|
||||||
|
self.password = password
|
||||||
|
self.proxy_info = proxy_info
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def do_GET(self):
|
||||||
|
if not self.do_proxy_auth(self.username, self.password):
|
||||||
|
self.server.close_request(self.request)
|
||||||
|
return
|
||||||
|
if self.path.endswith('/proxy_info'):
|
||||||
|
payload = json.dumps(self.proxy_info or {
|
||||||
|
'client_address': self.client_address,
|
||||||
|
'connect': False,
|
||||||
|
'connect_host': None,
|
||||||
|
'connect_port': None,
|
||||||
|
'headers': dict(self.headers),
|
||||||
|
'path': self.path,
|
||||||
|
'proxy': ':'.join(str(y) for y in self.connection.getsockname()),
|
||||||
|
})
|
||||||
|
self.send_response(200)
|
||||||
|
self.send_header('Content-Type', 'application/json; charset=utf-8')
|
||||||
|
self.send_header('Content-Length', str(len(payload)))
|
||||||
|
self.end_headers()
|
||||||
|
self.wfile.write(payload.encode())
|
||||||
|
else:
|
||||||
|
self.send_response(404)
|
||||||
|
self.end_headers()
|
||||||
|
|
||||||
|
self.server.close_request(self.request)
|
||||||
|
|
||||||
|
|
||||||
|
if urllib3:
|
||||||
|
import urllib3.util.ssltransport
|
||||||
|
|
||||||
|
class SSLTransport(urllib3.util.ssltransport.SSLTransport):
|
||||||
|
"""
|
||||||
|
Modified version of urllib3 SSLTransport to support server side SSL
|
||||||
|
|
||||||
|
This allows us to chain multiple TLS connections.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True, server_side=False):
|
||||||
|
self.incoming = ssl.MemoryBIO()
|
||||||
|
self.outgoing = ssl.MemoryBIO()
|
||||||
|
|
||||||
|
self.suppress_ragged_eofs = suppress_ragged_eofs
|
||||||
|
self.socket = socket
|
||||||
|
|
||||||
|
self.sslobj = ssl_context.wrap_bio(
|
||||||
|
self.incoming,
|
||||||
|
self.outgoing,
|
||||||
|
server_hostname=server_hostname,
|
||||||
|
server_side=server_side
|
||||||
|
)
|
||||||
|
self._ssl_io_loop(self.sslobj.do_handshake)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _io_refs(self):
|
||||||
|
return self.socket._io_refs
|
||||||
|
|
||||||
|
@_io_refs.setter
|
||||||
|
def _io_refs(self, value):
|
||||||
|
self.socket._io_refs = value
|
||||||
|
|
||||||
|
def shutdown(self, *args, **kwargs):
|
||||||
|
self.socket.shutdown(*args, **kwargs)
|
||||||
|
else:
|
||||||
|
SSLTransport = None
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPSProxyHandler(HTTPProxyHandler):
|
||||||
|
def __init__(self, request, *args, **kwargs):
|
||||||
|
certfn = os.path.join(TEST_DIR, 'testcert.pem')
|
||||||
|
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
|
||||||
|
sslctx.load_cert_chain(certfn, None)
|
||||||
|
if isinstance(request, ssl.SSLSocket):
|
||||||
|
request = SSLTransport(request, ssl_context=sslctx, server_side=True)
|
||||||
|
else:
|
||||||
|
request = sslctx.wrap_socket(request, server_side=True)
|
||||||
|
super().__init__(request, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPConnectProxyHandler(BaseHTTPRequestHandler, HTTPProxyAuthMixin):
|
||||||
|
protocol_version = 'HTTP/1.1'
|
||||||
|
default_request_version = 'HTTP/1.1'
|
||||||
|
|
||||||
|
def __init__(self, *args, username=None, password=None, request_handler=None, **kwargs):
|
||||||
|
self.username = username
|
||||||
|
self.password = password
|
||||||
|
self.request_handler = request_handler
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def do_CONNECT(self):
|
||||||
|
if not self.do_proxy_auth(self.username, self.password):
|
||||||
|
self.server.close_request(self.request)
|
||||||
|
return
|
||||||
|
self.send_response(200)
|
||||||
|
self.end_headers()
|
||||||
|
proxy_info = {
|
||||||
|
'client_address': self.client_address,
|
||||||
|
'connect': True,
|
||||||
|
'connect_host': self.path.split(':')[0],
|
||||||
|
'connect_port': int(self.path.split(':')[1]),
|
||||||
|
'headers': dict(self.headers),
|
||||||
|
'path': self.path,
|
||||||
|
'proxy': ':'.join(str(y) for y in self.connection.getsockname()),
|
||||||
|
}
|
||||||
|
self.request_handler(self.request, self.client_address, self.server, proxy_info=proxy_info)
|
||||||
|
self.server.close_request(self.request)
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPSConnectProxyHandler(HTTPConnectProxyHandler):
|
||||||
|
def __init__(self, request, *args, **kwargs):
|
||||||
|
certfn = os.path.join(TEST_DIR, 'testcert.pem')
|
||||||
|
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
|
||||||
|
sslctx.load_cert_chain(certfn, None)
|
||||||
|
request = sslctx.wrap_socket(request, server_side=True)
|
||||||
|
self._original_request = request
|
||||||
|
super().__init__(request, *args, **kwargs)
|
||||||
|
|
||||||
|
def do_CONNECT(self):
|
||||||
|
super().do_CONNECT()
|
||||||
|
self.server.close_request(self._original_request)
|
||||||
|
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def proxy_server(proxy_server_class, request_handler, bind_ip=None, **proxy_server_kwargs):
|
||||||
|
server = server_thread = None
|
||||||
|
try:
|
||||||
|
bind_address = bind_ip or '127.0.0.1'
|
||||||
|
server_type = ThreadingTCPServer if '.' in bind_address else IPv6ThreadingTCPServer
|
||||||
|
server = server_type(
|
||||||
|
(bind_address, 0), functools.partial(proxy_server_class, request_handler=request_handler, **proxy_server_kwargs))
|
||||||
|
server_port = http_server_port(server)
|
||||||
|
server_thread = threading.Thread(target=server.serve_forever)
|
||||||
|
server_thread.daemon = True
|
||||||
|
server_thread.start()
|
||||||
|
if '.' not in bind_address:
|
||||||
|
yield f'[{bind_address}]:{server_port}'
|
||||||
|
else:
|
||||||
|
yield f'{bind_address}:{server_port}'
|
||||||
|
finally:
|
||||||
|
server.shutdown()
|
||||||
|
server.server_close()
|
||||||
|
server_thread.join(2.0)
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPProxyTestContext(abc.ABC):
|
||||||
|
REQUEST_HANDLER_CLASS = None
|
||||||
|
REQUEST_PROTO = None
|
||||||
|
|
||||||
|
def http_server(self, server_class, *args, **kwargs):
|
||||||
|
return proxy_server(server_class, self.REQUEST_HANDLER_CLASS, *args, **kwargs)
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def proxy_info_request(self, handler, target_domain=None, target_port=None, **req_kwargs) -> dict:
|
||||||
|
"""return a dict of proxy_info"""
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPProxyHTTPTestContext(HTTPProxyTestContext):
|
||||||
|
# Standard HTTP Proxy for http requests
|
||||||
|
REQUEST_HANDLER_CLASS = HTTPProxyHandler
|
||||||
|
REQUEST_PROTO = 'http'
|
||||||
|
|
||||||
|
def proxy_info_request(self, handler, target_domain=None, target_port=None, **req_kwargs):
|
||||||
|
request = Request(f'http://{target_domain or "127.0.0.1"}:{target_port or "40000"}/proxy_info', **req_kwargs)
|
||||||
|
handler.validate(request)
|
||||||
|
return json.loads(handler.send(request).read().decode())
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPProxyHTTPSTestContext(HTTPProxyTestContext):
|
||||||
|
# HTTP Connect proxy, for https requests
|
||||||
|
REQUEST_HANDLER_CLASS = HTTPSProxyHandler
|
||||||
|
REQUEST_PROTO = 'https'
|
||||||
|
|
||||||
|
def proxy_info_request(self, handler, target_domain=None, target_port=None, **req_kwargs):
|
||||||
|
request = Request(f'https://{target_domain or "127.0.0.1"}:{target_port or "40000"}/proxy_info', **req_kwargs)
|
||||||
|
handler.validate(request)
|
||||||
|
return json.loads(handler.send(request).read().decode())
|
||||||
|
|
||||||
|
|
||||||
|
CTX_MAP = {
|
||||||
|
'http': HTTPProxyHTTPTestContext,
|
||||||
|
'https': HTTPProxyHTTPSTestContext,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope='module')
|
||||||
|
def ctx(request):
|
||||||
|
return CTX_MAP[request.param]()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'handler', ['Urllib', 'Requests', 'CurlCFFI'], indirect=True)
|
||||||
|
@pytest.mark.parametrize('ctx', ['http'], indirect=True) # pure http proxy can only support http
|
||||||
|
class TestHTTPProxy:
|
||||||
|
def test_http_no_auth(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPProxyHandler) as server_address:
|
||||||
|
with handler(proxies={ctx.REQUEST_PROTO: f'http://{server_address}'}) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert proxy_info['connect'] is False
|
||||||
|
assert 'Proxy-Authorization' not in proxy_info['headers']
|
||||||
|
|
||||||
|
def test_http_auth(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPProxyHandler, username='test', password='test') as server_address:
|
||||||
|
with handler(proxies={ctx.REQUEST_PROTO: f'http://test:test@{server_address}'}) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert 'Proxy-Authorization' in proxy_info['headers']
|
||||||
|
|
||||||
|
def test_http_bad_auth(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPProxyHandler, username='test', password='test') as server_address:
|
||||||
|
with handler(proxies={ctx.REQUEST_PROTO: f'http://test:bad@{server_address}'}) as rh:
|
||||||
|
with pytest.raises(HTTPError) as exc_info:
|
||||||
|
ctx.proxy_info_request(rh)
|
||||||
|
assert exc_info.value.response.status == 407
|
||||||
|
exc_info.value.response.close()
|
||||||
|
|
||||||
|
def test_http_source_address(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPProxyHandler) as server_address:
|
||||||
|
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||||
|
verify_address_availability(source_address)
|
||||||
|
with handler(proxies={ctx.REQUEST_PROTO: f'http://{server_address}'},
|
||||||
|
source_address=source_address) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert proxy_info['client_address'][0] == source_address
|
||||||
|
|
||||||
|
@pytest.mark.skip_handler('Urllib', 'urllib does not support https proxies')
|
||||||
|
def test_https(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPSProxyHandler) as server_address:
|
||||||
|
with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'https://{server_address}'}) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert proxy_info['connect'] is False
|
||||||
|
assert 'Proxy-Authorization' not in proxy_info['headers']
|
||||||
|
|
||||||
|
@pytest.mark.skip_handler('Urllib', 'urllib does not support https proxies')
|
||||||
|
def test_https_verify_failed(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPSProxyHandler) as server_address:
|
||||||
|
with handler(verify=True, proxies={ctx.REQUEST_PROTO: f'https://{server_address}'}) as rh:
|
||||||
|
# Accept SSLError as may not be feasible to tell if it is proxy or request error.
|
||||||
|
# note: if request proto also does ssl verification, this may also be the error of the request.
|
||||||
|
# Until we can support passing custom cacerts to handlers, we cannot properly test this for all cases.
|
||||||
|
with pytest.raises((ProxyError, SSLError)):
|
||||||
|
ctx.proxy_info_request(rh)
|
||||||
|
|
||||||
|
def test_http_with_idn(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPProxyHandler) as server_address:
|
||||||
|
with handler(proxies={ctx.REQUEST_PROTO: f'http://{server_address}'}) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh, target_domain='中文.tw')
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert proxy_info['path'].startswith('http://xn--fiq228c.tw')
|
||||||
|
assert proxy_info['headers']['Host'].split(':', 1)[0] == 'xn--fiq228c.tw'
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'handler,ctx', [
|
||||||
|
('Requests', 'https'),
|
||||||
|
('CurlCFFI', 'https'),
|
||||||
|
], indirect=True)
|
||||||
|
class TestHTTPConnectProxy:
|
||||||
|
def test_http_connect_no_auth(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPConnectProxyHandler) as server_address:
|
||||||
|
with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'http://{server_address}'}) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert proxy_info['connect'] is True
|
||||||
|
assert 'Proxy-Authorization' not in proxy_info['headers']
|
||||||
|
|
||||||
|
def test_http_connect_auth(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPConnectProxyHandler, username='test', password='test') as server_address:
|
||||||
|
with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'http://test:test@{server_address}'}) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert 'Proxy-Authorization' in proxy_info['headers']
|
||||||
|
|
||||||
|
@pytest.mark.skip_handler(
|
||||||
|
'Requests',
|
||||||
|
'bug in urllib3 causes unclosed socket: https://github.com/urllib3/urllib3/issues/3374'
|
||||||
|
)
|
||||||
|
def test_http_connect_bad_auth(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPConnectProxyHandler, username='test', password='test') as server_address:
|
||||||
|
with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'http://test:bad@{server_address}'}) as rh:
|
||||||
|
with pytest.raises(ProxyError):
|
||||||
|
ctx.proxy_info_request(rh)
|
||||||
|
|
||||||
|
def test_http_connect_source_address(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPConnectProxyHandler) as server_address:
|
||||||
|
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||||
|
verify_address_availability(source_address)
|
||||||
|
with handler(proxies={ctx.REQUEST_PROTO: f'http://{server_address}'},
|
||||||
|
source_address=source_address,
|
||||||
|
verify=False) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert proxy_info['client_address'][0] == source_address
|
||||||
|
|
||||||
|
@pytest.mark.skipif(urllib3 is None, reason='requires urllib3 to test')
|
||||||
|
def test_https_connect_proxy(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPSConnectProxyHandler) as server_address:
|
||||||
|
with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'https://{server_address}'}) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert proxy_info['connect'] is True
|
||||||
|
assert 'Proxy-Authorization' not in proxy_info['headers']
|
||||||
|
|
||||||
|
@pytest.mark.skipif(urllib3 is None, reason='requires urllib3 to test')
|
||||||
|
def test_https_connect_verify_failed(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPSConnectProxyHandler) as server_address:
|
||||||
|
with handler(verify=True, proxies={ctx.REQUEST_PROTO: f'https://{server_address}'}) as rh:
|
||||||
|
# Accept SSLError as may not be feasible to tell if it is proxy or request error.
|
||||||
|
# note: if request proto also does ssl verification, this may also be the error of the request.
|
||||||
|
# Until we can support passing custom cacerts to handlers, we cannot properly test this for all cases.
|
||||||
|
with pytest.raises((ProxyError, SSLError)):
|
||||||
|
ctx.proxy_info_request(rh)
|
||||||
|
|
||||||
|
@pytest.mark.skipif(urllib3 is None, reason='requires urllib3 to test')
|
||||||
|
def test_https_connect_proxy_auth(self, handler, ctx):
|
||||||
|
with ctx.http_server(HTTPSConnectProxyHandler, username='test', password='test') as server_address:
|
||||||
|
with handler(verify=False, proxies={ctx.REQUEST_PROTO: f'https://test:test@{server_address}'}) as rh:
|
||||||
|
proxy_info = ctx.proxy_info_request(rh)
|
||||||
|
assert proxy_info['proxy'] == server_address
|
||||||
|
assert 'Proxy-Authorization' in proxy_info['headers']
|
File diff suppressed because it is too large
Load diff
|
@ -286,8 +286,14 @@ def ctx(request):
|
||||||
return CTX_MAP[request.param]()
|
return CTX_MAP[request.param]()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'handler,ctx', [
|
||||||
|
('Urllib', 'http'),
|
||||||
|
('Requests', 'http'),
|
||||||
|
('Websockets', 'ws'),
|
||||||
|
('CurlCFFI', 'http')
|
||||||
|
], indirect=True)
|
||||||
class TestSocks4Proxy:
|
class TestSocks4Proxy:
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks4_no_auth(self, handler, ctx):
|
def test_socks4_no_auth(self, handler, ctx):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
||||||
|
@ -295,7 +301,6 @@ def test_socks4_no_auth(self, handler, ctx):
|
||||||
rh, proxies={'all': f'socks4://{server_address}'})
|
rh, proxies={'all': f'socks4://{server_address}'})
|
||||||
assert response['version'] == 4
|
assert response['version'] == 4
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks4_auth(self, handler, ctx):
|
def test_socks4_auth(self, handler, ctx):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with ctx.socks_server(Socks4ProxyHandler, user_id='user') as server_address:
|
with ctx.socks_server(Socks4ProxyHandler, user_id='user') as server_address:
|
||||||
|
@ -305,7 +310,6 @@ def test_socks4_auth(self, handler, ctx):
|
||||||
rh, proxies={'all': f'socks4://user:@{server_address}'})
|
rh, proxies={'all': f'socks4://user:@{server_address}'})
|
||||||
assert response['version'] == 4
|
assert response['version'] == 4
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks4a_ipv4_target(self, handler, ctx):
|
def test_socks4a_ipv4_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks4a://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks4a://{server_address}'}) as rh:
|
||||||
|
@ -313,7 +317,6 @@ def test_socks4a_ipv4_target(self, handler, ctx):
|
||||||
assert response['version'] == 4
|
assert response['version'] == 4
|
||||||
assert (response['ipv4_address'] == '127.0.0.1') != (response['domain_address'] == '127.0.0.1')
|
assert (response['ipv4_address'] == '127.0.0.1') != (response['domain_address'] == '127.0.0.1')
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks4a_domain_target(self, handler, ctx):
|
def test_socks4a_domain_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks4a://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks4a://{server_address}'}) as rh:
|
||||||
|
@ -322,7 +325,6 @@ def test_socks4a_domain_target(self, handler, ctx):
|
||||||
assert response['ipv4_address'] is None
|
assert response['ipv4_address'] is None
|
||||||
assert response['domain_address'] == 'localhost'
|
assert response['domain_address'] == 'localhost'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_ipv4_client_source_address(self, handler, ctx):
|
def test_ipv4_client_source_address(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
with ctx.socks_server(Socks4ProxyHandler) as server_address:
|
||||||
source_address = f'127.0.0.{random.randint(5, 255)}'
|
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||||
|
@ -333,7 +335,6 @@ def test_ipv4_client_source_address(self, handler, ctx):
|
||||||
assert response['client_address'][0] == source_address
|
assert response['client_address'][0] == source_address
|
||||||
assert response['version'] == 4
|
assert response['version'] == 4
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
@pytest.mark.parametrize('reply_code', [
|
@pytest.mark.parametrize('reply_code', [
|
||||||
Socks4CD.REQUEST_REJECTED_OR_FAILED,
|
Socks4CD.REQUEST_REJECTED_OR_FAILED,
|
||||||
Socks4CD.REQUEST_REJECTED_CANNOT_CONNECT_TO_IDENTD,
|
Socks4CD.REQUEST_REJECTED_CANNOT_CONNECT_TO_IDENTD,
|
||||||
|
@ -345,7 +346,6 @@ def test_socks4_errors(self, handler, ctx, reply_code):
|
||||||
with pytest.raises(ProxyError):
|
with pytest.raises(ProxyError):
|
||||||
ctx.socks_info_request(rh)
|
ctx.socks_info_request(rh)
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_ipv6_socks4_proxy(self, handler, ctx):
|
def test_ipv6_socks4_proxy(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks4ProxyHandler, bind_ip='::1') as server_address:
|
with ctx.socks_server(Socks4ProxyHandler, bind_ip='::1') as server_address:
|
||||||
with handler(proxies={'all': f'socks4://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks4://{server_address}'}) as rh:
|
||||||
|
@ -354,7 +354,6 @@ def test_ipv6_socks4_proxy(self, handler, ctx):
|
||||||
assert response['ipv4_address'] == '127.0.0.1'
|
assert response['ipv4_address'] == '127.0.0.1'
|
||||||
assert response['version'] == 4
|
assert response['version'] == 4
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_timeout(self, handler, ctx):
|
def test_timeout(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks4ProxyHandler, sleep=2) as server_address:
|
with ctx.socks_server(Socks4ProxyHandler, sleep=2) as server_address:
|
||||||
with handler(proxies={'all': f'socks4://{server_address}'}, timeout=0.5) as rh:
|
with handler(proxies={'all': f'socks4://{server_address}'}, timeout=0.5) as rh:
|
||||||
|
@ -362,9 +361,15 @@ def test_timeout(self, handler, ctx):
|
||||||
ctx.socks_info_request(rh)
|
ctx.socks_info_request(rh)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
'handler,ctx', [
|
||||||
|
('Urllib', 'http'),
|
||||||
|
('Requests', 'http'),
|
||||||
|
('Websockets', 'ws'),
|
||||||
|
('CurlCFFI', 'http')
|
||||||
|
], indirect=True)
|
||||||
class TestSocks5Proxy:
|
class TestSocks5Proxy:
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks5_no_auth(self, handler, ctx):
|
def test_socks5_no_auth(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||||
|
@ -372,7 +377,6 @@ def test_socks5_no_auth(self, handler, ctx):
|
||||||
assert response['auth_methods'] == [0x0]
|
assert response['auth_methods'] == [0x0]
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks5_user_pass(self, handler, ctx):
|
def test_socks5_user_pass(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler, auth=('test', 'testpass')) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler, auth=('test', 'testpass')) as server_address:
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
|
@ -385,7 +389,6 @@ def test_socks5_user_pass(self, handler, ctx):
|
||||||
assert response['auth_methods'] == [Socks5Auth.AUTH_NONE, Socks5Auth.AUTH_USER_PASS]
|
assert response['auth_methods'] == [Socks5Auth.AUTH_NONE, Socks5Auth.AUTH_USER_PASS]
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks5_ipv4_target(self, handler, ctx):
|
def test_socks5_ipv4_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||||
|
@ -393,7 +396,6 @@ def test_socks5_ipv4_target(self, handler, ctx):
|
||||||
assert response['ipv4_address'] == '127.0.0.1'
|
assert response['ipv4_address'] == '127.0.0.1'
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks5_domain_target(self, handler, ctx):
|
def test_socks5_domain_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||||
|
@ -401,7 +403,6 @@ def test_socks5_domain_target(self, handler, ctx):
|
||||||
assert (response['ipv4_address'] == '127.0.0.1') != (response['ipv6_address'] == '::1')
|
assert (response['ipv4_address'] == '127.0.0.1') != (response['ipv6_address'] == '::1')
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks5h_domain_target(self, handler, ctx):
|
def test_socks5h_domain_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5h://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5h://{server_address}'}) as rh:
|
||||||
|
@ -410,7 +411,6 @@ def test_socks5h_domain_target(self, handler, ctx):
|
||||||
assert response['domain_address'] == 'localhost'
|
assert response['domain_address'] == 'localhost'
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks5h_ip_target(self, handler, ctx):
|
def test_socks5h_ip_target(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5h://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5h://{server_address}'}) as rh:
|
||||||
|
@ -419,7 +419,6 @@ def test_socks5h_ip_target(self, handler, ctx):
|
||||||
assert response['domain_address'] is None
|
assert response['domain_address'] is None
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_socks5_ipv6_destination(self, handler, ctx):
|
def test_socks5_ipv6_destination(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||||
|
@ -427,7 +426,6 @@ def test_socks5_ipv6_destination(self, handler, ctx):
|
||||||
assert response['ipv6_address'] == '::1'
|
assert response['ipv6_address'] == '::1'
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_ipv6_socks5_proxy(self, handler, ctx):
|
def test_ipv6_socks5_proxy(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler, bind_ip='::1') as server_address:
|
with ctx.socks_server(Socks5ProxyHandler, bind_ip='::1') as server_address:
|
||||||
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
with handler(proxies={'all': f'socks5://{server_address}'}) as rh:
|
||||||
|
@ -438,7 +436,6 @@ def test_ipv6_socks5_proxy(self, handler, ctx):
|
||||||
|
|
||||||
# XXX: is there any feasible way of testing IPv6 source addresses?
|
# XXX: is there any feasible way of testing IPv6 source addresses?
|
||||||
# Same would go for non-proxy source_address test...
|
# Same would go for non-proxy source_address test...
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_ipv4_client_source_address(self, handler, ctx):
|
def test_ipv4_client_source_address(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler) as server_address:
|
||||||
source_address = f'127.0.0.{random.randint(5, 255)}'
|
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||||
|
@ -448,7 +445,6 @@ def test_ipv4_client_source_address(self, handler, ctx):
|
||||||
assert response['client_address'][0] == source_address
|
assert response['client_address'][0] == source_address
|
||||||
assert response['version'] == 5
|
assert response['version'] == 5
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Requests', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
@pytest.mark.parametrize('reply_code', [
|
@pytest.mark.parametrize('reply_code', [
|
||||||
Socks5Reply.GENERAL_FAILURE,
|
Socks5Reply.GENERAL_FAILURE,
|
||||||
Socks5Reply.CONNECTION_NOT_ALLOWED,
|
Socks5Reply.CONNECTION_NOT_ALLOWED,
|
||||||
|
@ -465,7 +461,6 @@ def test_socks5_errors(self, handler, ctx, reply_code):
|
||||||
with pytest.raises(ProxyError):
|
with pytest.raises(ProxyError):
|
||||||
ctx.socks_info_request(rh)
|
ctx.socks_info_request(rh)
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler,ctx', [('Urllib', 'http'), ('Websockets', 'ws')], indirect=True)
|
|
||||||
def test_timeout(self, handler, ctx):
|
def test_timeout(self, handler, ctx):
|
||||||
with ctx.socks_server(Socks5ProxyHandler, sleep=2) as server_address:
|
with ctx.socks_server(Socks5ProxyHandler, sleep=2) as server_address:
|
||||||
with handler(proxies={'all': f'socks5://{server_address}'}, timeout=1) as rh:
|
with handler(proxies={'all': f'socks5://{server_address}'}, timeout=1) as rh:
|
||||||
|
|
444
test/test_traversal.py
Normal file
444
test/test_traversal.py
Normal file
|
@ -0,0 +1,444 @@
|
||||||
|
import http.cookies
|
||||||
|
import re
|
||||||
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from yt_dlp.utils import dict_get, int_or_none, str_or_none
|
||||||
|
from yt_dlp.utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
_TEST_DATA = {
|
||||||
|
100: 100,
|
||||||
|
1.2: 1.2,
|
||||||
|
'str': 'str',
|
||||||
|
'None': None,
|
||||||
|
'...': ...,
|
||||||
|
'urls': [
|
||||||
|
{'index': 0, 'url': 'https://www.example.com/0'},
|
||||||
|
{'index': 1, 'url': 'https://www.example.com/1'},
|
||||||
|
],
|
||||||
|
'data': (
|
||||||
|
{'index': 2},
|
||||||
|
{'index': 3},
|
||||||
|
),
|
||||||
|
'dict': {},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class TestTraversal:
|
||||||
|
def test_traversal_base(self):
|
||||||
|
assert traverse_obj(_TEST_DATA, ('str',)) == 'str', \
|
||||||
|
'allow tuple path'
|
||||||
|
assert traverse_obj(_TEST_DATA, ['str']) == 'str', \
|
||||||
|
'allow list path'
|
||||||
|
assert traverse_obj(_TEST_DATA, (value for value in ("str",))) == 'str', \
|
||||||
|
'allow iterable path'
|
||||||
|
assert traverse_obj(_TEST_DATA, 'str') == 'str', \
|
||||||
|
'single items should be treated as a path'
|
||||||
|
assert traverse_obj(_TEST_DATA, 100) == 100, \
|
||||||
|
'allow int path'
|
||||||
|
assert traverse_obj(_TEST_DATA, 1.2) == 1.2, \
|
||||||
|
'allow float path'
|
||||||
|
assert traverse_obj(_TEST_DATA, None) == _TEST_DATA, \
|
||||||
|
'`None` should not perform any modification'
|
||||||
|
|
||||||
|
def test_traversal_ellipsis(self):
|
||||||
|
assert traverse_obj(_TEST_DATA, ...) == [x for x in _TEST_DATA.values() if x not in (None, {})], \
|
||||||
|
'`...` should give all non discarded values'
|
||||||
|
assert traverse_obj(_TEST_DATA, ('urls', 0, ...)) == list(_TEST_DATA['urls'][0].values()), \
|
||||||
|
'`...` selection for dicts should select all values'
|
||||||
|
assert traverse_obj(_TEST_DATA, (..., ..., 'url')) == ['https://www.example.com/0', 'https://www.example.com/1'], \
|
||||||
|
'nested `...` queries should work'
|
||||||
|
assert traverse_obj(_TEST_DATA, (..., ..., 'index')) == list(range(4)), \
|
||||||
|
'`...` query result should be flattened'
|
||||||
|
assert traverse_obj(iter(range(4)), ...) == list(range(4)), \
|
||||||
|
'`...` should accept iterables'
|
||||||
|
|
||||||
|
def test_traversal_function(self):
|
||||||
|
filter_func = lambda x, y: x == 'urls' and isinstance(y, list)
|
||||||
|
assert traverse_obj(_TEST_DATA, filter_func) == [_TEST_DATA['urls']], \
|
||||||
|
'function as query key should perform a filter based on (key, value)'
|
||||||
|
assert traverse_obj(_TEST_DATA, lambda _, x: isinstance(x[0], str)) == ['str'], \
|
||||||
|
'exceptions in the query function should be catched'
|
||||||
|
assert traverse_obj(iter(range(4)), lambda _, x: x % 2 == 0) == [0, 2], \
|
||||||
|
'function key should accept iterables'
|
||||||
|
# Wrong function signature should raise (debug mode)
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
traverse_obj(_TEST_DATA, lambda a: ...)
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
traverse_obj(_TEST_DATA, lambda a, b, c: ...)
|
||||||
|
|
||||||
|
def test_traversal_set(self):
|
||||||
|
# transformation/type, like `expected_type`
|
||||||
|
assert traverse_obj(_TEST_DATA, (..., {str.upper}, )) == ['STR'], \
|
||||||
|
'Function in set should be a transformation'
|
||||||
|
assert traverse_obj(_TEST_DATA, (..., {str})) == ['str'], \
|
||||||
|
'Type in set should be a type filter'
|
||||||
|
assert traverse_obj(_TEST_DATA, (..., {str, int})) == [100, 'str'], \
|
||||||
|
'Multiple types in set should be a type filter'
|
||||||
|
assert traverse_obj(_TEST_DATA, {dict}) == _TEST_DATA, \
|
||||||
|
'A single set should be wrapped into a path'
|
||||||
|
assert traverse_obj(_TEST_DATA, (..., {str.upper})) == ['STR'], \
|
||||||
|
'Transformation function should not raise'
|
||||||
|
expected = [x for x in map(str_or_none, _TEST_DATA.values()) if x is not None]
|
||||||
|
assert traverse_obj(_TEST_DATA, (..., {str_or_none})) == expected, \
|
||||||
|
'Function in set should be a transformation'
|
||||||
|
assert traverse_obj(_TEST_DATA, ('fail', {lambda _: 'const'})) == 'const', \
|
||||||
|
'Function in set should always be called'
|
||||||
|
# Sets with length < 1 or > 1 not including only types should raise
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
traverse_obj(_TEST_DATA, set())
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
traverse_obj(_TEST_DATA, {str.upper, str})
|
||||||
|
|
||||||
|
def test_traversal_slice(self):
|
||||||
|
_SLICE_DATA = [0, 1, 2, 3, 4]
|
||||||
|
|
||||||
|
assert traverse_obj(_TEST_DATA, ('dict', slice(1))) is None, \
|
||||||
|
'slice on a dictionary should not throw'
|
||||||
|
assert traverse_obj(_SLICE_DATA, slice(1)) == _SLICE_DATA[:1], \
|
||||||
|
'slice key should apply slice to sequence'
|
||||||
|
assert traverse_obj(_SLICE_DATA, slice(1, 2)) == _SLICE_DATA[1:2], \
|
||||||
|
'slice key should apply slice to sequence'
|
||||||
|
assert traverse_obj(_SLICE_DATA, slice(1, 4, 2)) == _SLICE_DATA[1:4:2], \
|
||||||
|
'slice key should apply slice to sequence'
|
||||||
|
|
||||||
|
def test_traversal_alternatives(self):
|
||||||
|
assert traverse_obj(_TEST_DATA, 'fail', 'str') == 'str', \
|
||||||
|
'multiple `paths` should be treated as alternative paths'
|
||||||
|
assert traverse_obj(_TEST_DATA, 'str', 100) == 'str', \
|
||||||
|
'alternatives should exit early'
|
||||||
|
assert traverse_obj(_TEST_DATA, 'fail', 'fail') is None, \
|
||||||
|
'alternatives should return `default` if exhausted'
|
||||||
|
assert traverse_obj(_TEST_DATA, (..., 'fail'), 100) == 100, \
|
||||||
|
'alternatives should track their own branching return'
|
||||||
|
assert traverse_obj(_TEST_DATA, ('dict', ...), ('data', ...)) == list(_TEST_DATA['data']), \
|
||||||
|
'alternatives on empty objects should search further'
|
||||||
|
|
||||||
|
def test_traversal_branching_nesting(self):
|
||||||
|
assert traverse_obj(_TEST_DATA, ('urls', (3, 0), 'url')) == ['https://www.example.com/0'], \
|
||||||
|
'tuple as key should be treated as branches'
|
||||||
|
assert traverse_obj(_TEST_DATA, ('urls', [3, 0], 'url')) == ['https://www.example.com/0'], \
|
||||||
|
'list as key should be treated as branches'
|
||||||
|
assert traverse_obj(_TEST_DATA, ('urls', ((1, 'fail'), (0, 'url')))) == ['https://www.example.com/0'], \
|
||||||
|
'double nesting in path should be treated as paths'
|
||||||
|
assert traverse_obj(['0', [1, 2]], [(0, 1), 0]) == [1], \
|
||||||
|
'do not fail early on branching'
|
||||||
|
expected = ['https://www.example.com/0', 'https://www.example.com/1']
|
||||||
|
assert traverse_obj(_TEST_DATA, ('urls', ((0, ('fail', 'url')), (1, 'url')))) == expected, \
|
||||||
|
'tripple nesting in path should be treated as branches'
|
||||||
|
assert traverse_obj(_TEST_DATA, ('urls', ('fail', (..., 'url')))) == expected, \
|
||||||
|
'ellipsis as branch path start gets flattened'
|
||||||
|
|
||||||
|
def test_traversal_dict(self):
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}) == {0: 100, 1: 1.2}, \
|
||||||
|
'dict key should result in a dict with the same keys'
|
||||||
|
expected = {0: 'https://www.example.com/0'}
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: ('urls', 0, 'url')}) == expected, \
|
||||||
|
'dict key should allow paths'
|
||||||
|
expected = {0: ['https://www.example.com/0']}
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: ('urls', (3, 0), 'url')}) == expected, \
|
||||||
|
'tuple in dict path should be treated as branches'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: ('urls', ((1, 'fail'), (0, 'url')))}) == expected, \
|
||||||
|
'double nesting in dict path should be treated as paths'
|
||||||
|
expected = {0: ['https://www.example.com/1', 'https://www.example.com/0']}
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: ('urls', ((1, ('fail', 'url')), (0, 'url')))}) == expected, \
|
||||||
|
'tripple nesting in dict path should be treated as branches'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: 'fail'}) == {}, \
|
||||||
|
'remove `None` values when top level dict key fails'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: 'fail'}, default=...) == {0: ...}, \
|
||||||
|
'use `default` if key fails and `default`'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: 'dict'}) == {}, \
|
||||||
|
'remove empty values when dict key'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: 'dict'}, default=...) == {0: ...}, \
|
||||||
|
'use `default` when dict key and `default`'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: {0: 'fail'}}) == {}, \
|
||||||
|
'remove empty values when nested dict key fails'
|
||||||
|
assert traverse_obj(None, {0: 'fail'}) == {}, \
|
||||||
|
'default to dict if pruned'
|
||||||
|
assert traverse_obj(None, {0: 'fail'}, default=...) == {0: ...}, \
|
||||||
|
'default to dict if pruned and default is given'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: {0: 'fail'}}, default=...) == {0: {0: ...}}, \
|
||||||
|
'use nested `default` when nested dict key fails and `default`'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: ('dict', ...)}) == {}, \
|
||||||
|
'remove key if branch in dict key not successful'
|
||||||
|
|
||||||
|
def test_traversal_default(self):
|
||||||
|
_DEFAULT_DATA = {'None': None, 'int': 0, 'list': []}
|
||||||
|
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, 'fail') is None, \
|
||||||
|
'default value should be `None`'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, 'fail', 'fail', default=...) == ..., \
|
||||||
|
'chained fails should result in default'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, 'None', 'int') == 0, \
|
||||||
|
'should not short cirquit on `None`'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, 'fail', default=1) == 1, \
|
||||||
|
'invalid dict key should result in `default`'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, 'None', default=1) == 1, \
|
||||||
|
'`None` is a deliberate sentinel and should become `default`'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, ('list', 10)) is None, \
|
||||||
|
'`IndexError` should result in `default`'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, (..., 'fail'), default=1) == 1, \
|
||||||
|
'if branched but not successful return `default` if defined, not `[]`'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, (..., 'fail'), default=None) is None, \
|
||||||
|
'if branched but not successful return `default` even if `default` is `None`'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, (..., 'fail')) == [], \
|
||||||
|
'if branched but not successful return `[]`, not `default`'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, ('list', ...)) == [], \
|
||||||
|
'if branched but object is empty return `[]`, not `default`'
|
||||||
|
assert traverse_obj(None, ...) == [], \
|
||||||
|
'if branched but object is `None` return `[]`, not `default`'
|
||||||
|
assert traverse_obj({0: None}, (0, ...)) == [], \
|
||||||
|
'if branched but state is `None` return `[]`, not `default`'
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('path', [
|
||||||
|
('fail', ...),
|
||||||
|
(..., 'fail'),
|
||||||
|
100 * ('fail',) + (...,),
|
||||||
|
(...,) + 100 * ('fail',),
|
||||||
|
])
|
||||||
|
def test_traversal_branching(self, path):
|
||||||
|
assert traverse_obj({}, path) == [], \
|
||||||
|
'if branched but state is `None`, return `[]` (not `default`)'
|
||||||
|
assert traverse_obj({}, 'fail', path) == [], \
|
||||||
|
'if branching in last alternative and previous did not match, return `[]` (not `default`)'
|
||||||
|
assert traverse_obj({0: 'x'}, 0, path) == 'x', \
|
||||||
|
'if branching in last alternative and previous did match, return single value'
|
||||||
|
assert traverse_obj({0: 'x'}, path, 0) == 'x', \
|
||||||
|
'if branching in first alternative and non-branching path does match, return single value'
|
||||||
|
assert traverse_obj({}, path, 'fail') is None, \
|
||||||
|
'if branching in first alternative and non-branching path does not match, return `default`'
|
||||||
|
|
||||||
|
def test_traversal_expected_type(self):
|
||||||
|
_EXPECTED_TYPE_DATA = {'str': 'str', 'int': 0}
|
||||||
|
|
||||||
|
assert traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=str) == 'str', \
|
||||||
|
'accept matching `expected_type` type'
|
||||||
|
assert traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=int) is None, \
|
||||||
|
'reject non matching `expected_type` type'
|
||||||
|
assert traverse_obj(_EXPECTED_TYPE_DATA, 'int', expected_type=lambda x: str(x)) == '0', \
|
||||||
|
'transform type using type function'
|
||||||
|
assert traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=lambda _: 1 / 0) is None, \
|
||||||
|
'wrap expected_type fuction in try_call'
|
||||||
|
assert traverse_obj(_EXPECTED_TYPE_DATA, ..., expected_type=str) == ['str'], \
|
||||||
|
'eliminate items that expected_type fails on'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}, expected_type=int) == {0: 100}, \
|
||||||
|
'type as expected_type should filter dict values'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: 100, 1: 1.2, 2: 'None'}, expected_type=str_or_none) == {0: '100', 1: '1.2'}, \
|
||||||
|
'function as expected_type should transform dict values'
|
||||||
|
assert traverse_obj(_TEST_DATA, ({0: 1.2}, 0, {int_or_none}), expected_type=int) == 1, \
|
||||||
|
'expected_type should not filter non final dict values'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: {0: 100, 1: 'str'}}, expected_type=int) == {0: {0: 100}}, \
|
||||||
|
'expected_type should transform deep dict values'
|
||||||
|
assert traverse_obj(_TEST_DATA, [({0: '...'}, {0: '...'})], expected_type=type(...)) == [{0: ...}, {0: ...}], \
|
||||||
|
'expected_type should transform branched dict values'
|
||||||
|
assert traverse_obj({1: {3: 4}}, [(1, 2), 3], expected_type=int) == [4], \
|
||||||
|
'expected_type regression for type matching in tuple branching'
|
||||||
|
assert traverse_obj(_TEST_DATA, ['data', ...], expected_type=int) == [], \
|
||||||
|
'expected_type regression for type matching in dict result'
|
||||||
|
|
||||||
|
def test_traversal_get_all(self):
|
||||||
|
_GET_ALL_DATA = {'key': [0, 1, 2]}
|
||||||
|
|
||||||
|
assert traverse_obj(_GET_ALL_DATA, ('key', ...), get_all=False) == 0, \
|
||||||
|
'if not `get_all`, return only first matching value'
|
||||||
|
assert traverse_obj(_GET_ALL_DATA, ..., get_all=False) == [0, 1, 2], \
|
||||||
|
'do not overflatten if not `get_all`'
|
||||||
|
|
||||||
|
def test_traversal_casesense(self):
|
||||||
|
_CASESENSE_DATA = {
|
||||||
|
'KeY': 'value0',
|
||||||
|
0: {
|
||||||
|
'KeY': 'value1',
|
||||||
|
0: {'KeY': 'value2'},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
assert traverse_obj(_CASESENSE_DATA, 'key') is None, \
|
||||||
|
'dict keys should be case sensitive unless `casesense`'
|
||||||
|
assert traverse_obj(_CASESENSE_DATA, 'keY', casesense=False) == 'value0', \
|
||||||
|
'allow non matching key case if `casesense`'
|
||||||
|
assert traverse_obj(_CASESENSE_DATA, [0, ('keY',)], casesense=False) == ['value1'], \
|
||||||
|
'allow non matching key case in branch if `casesense`'
|
||||||
|
assert traverse_obj(_CASESENSE_DATA, [0, ([0, 'keY'],)], casesense=False) == ['value2'], \
|
||||||
|
'allow non matching key case in branch path if `casesense`'
|
||||||
|
|
||||||
|
def test_traversal_traverse_string(self):
|
||||||
|
_TRAVERSE_STRING_DATA = {'str': 'str', 1.2: 1.2}
|
||||||
|
|
||||||
|
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0)) is None, \
|
||||||
|
'do not traverse into string if not `traverse_string`'
|
||||||
|
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0), traverse_string=True) == 's', \
|
||||||
|
'traverse into string if `traverse_string`'
|
||||||
|
assert traverse_obj(_TRAVERSE_STRING_DATA, (1.2, 1), traverse_string=True) == '.', \
|
||||||
|
'traverse into converted data if `traverse_string`'
|
||||||
|
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', ...), traverse_string=True) == 'str', \
|
||||||
|
'`...` should result in string (same value) if `traverse_string`'
|
||||||
|
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', slice(0, None, 2)), traverse_string=True) == 'sr', \
|
||||||
|
'`slice` should result in string if `traverse_string`'
|
||||||
|
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == "s"), traverse_string=True) == 'str', \
|
||||||
|
'function should result in string if `traverse_string`'
|
||||||
|
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)), traverse_string=True) == ['s', 'r'], \
|
||||||
|
'branching should result in list if `traverse_string`'
|
||||||
|
assert traverse_obj({}, (0, ...), traverse_string=True) == [], \
|
||||||
|
'branching should result in list if `traverse_string`'
|
||||||
|
assert traverse_obj({}, (0, lambda x, y: True), traverse_string=True) == [], \
|
||||||
|
'branching should result in list if `traverse_string`'
|
||||||
|
assert traverse_obj({}, (0, slice(1)), traverse_string=True) == [], \
|
||||||
|
'branching should result in list if `traverse_string`'
|
||||||
|
|
||||||
|
def test_traversal_re(self):
|
||||||
|
mobj = re.fullmatch(r'0(12)(?P<group>3)(4)?', '0123')
|
||||||
|
assert traverse_obj(mobj, ...) == [x for x in mobj.groups() if x is not None], \
|
||||||
|
'`...` on a `re.Match` should give its `groups()`'
|
||||||
|
assert traverse_obj(mobj, lambda k, _: k in (0, 2)) == ['0123', '3'], \
|
||||||
|
'function on a `re.Match` should give groupno, value starting at 0'
|
||||||
|
assert traverse_obj(mobj, 'group') == '3', \
|
||||||
|
'str key on a `re.Match` should give group with that name'
|
||||||
|
assert traverse_obj(mobj, 2) == '3', \
|
||||||
|
'int key on a `re.Match` should give group with that name'
|
||||||
|
assert traverse_obj(mobj, 'gRoUp', casesense=False) == '3', \
|
||||||
|
'str key on a `re.Match` should respect casesense'
|
||||||
|
assert traverse_obj(mobj, 'fail') is None, \
|
||||||
|
'failing str key on a `re.Match` should return `default`'
|
||||||
|
assert traverse_obj(mobj, 'gRoUpS', casesense=False) is None, \
|
||||||
|
'failing str key on a `re.Match` should return `default`'
|
||||||
|
assert traverse_obj(mobj, 8) is None, \
|
||||||
|
'failing int key on a `re.Match` should return `default`'
|
||||||
|
assert traverse_obj(mobj, lambda k, _: k in (0, 'group')) == ['0123', '3'], \
|
||||||
|
'function on a `re.Match` should give group name as well'
|
||||||
|
|
||||||
|
def test_traversal_xml_etree(self):
|
||||||
|
etree = xml.etree.ElementTree.fromstring('''<?xml version="1.0"?>
|
||||||
|
<data>
|
||||||
|
<country name="Liechtenstein">
|
||||||
|
<rank>1</rank>
|
||||||
|
<year>2008</year>
|
||||||
|
<gdppc>141100</gdppc>
|
||||||
|
<neighbor name="Austria" direction="E"/>
|
||||||
|
<neighbor name="Switzerland" direction="W"/>
|
||||||
|
</country>
|
||||||
|
<country name="Singapore">
|
||||||
|
<rank>4</rank>
|
||||||
|
<year>2011</year>
|
||||||
|
<gdppc>59900</gdppc>
|
||||||
|
<neighbor name="Malaysia" direction="N"/>
|
||||||
|
</country>
|
||||||
|
<country name="Panama">
|
||||||
|
<rank>68</rank>
|
||||||
|
<year>2011</year>
|
||||||
|
<gdppc>13600</gdppc>
|
||||||
|
<neighbor name="Costa Rica" direction="W"/>
|
||||||
|
<neighbor name="Colombia" direction="E"/>
|
||||||
|
</country>
|
||||||
|
</data>''')
|
||||||
|
assert traverse_obj(etree, '') == etree, \
|
||||||
|
'empty str key should return the element itself'
|
||||||
|
assert traverse_obj(etree, 'country') == list(etree), \
|
||||||
|
'str key should lead all children with that tag name'
|
||||||
|
assert traverse_obj(etree, ...) == list(etree), \
|
||||||
|
'`...` as key should return all children'
|
||||||
|
assert traverse_obj(etree, lambda _, x: x[0].text == '4') == [etree[1]], \
|
||||||
|
'function as key should get element as value'
|
||||||
|
assert traverse_obj(etree, lambda i, _: i == 1) == [etree[1]], \
|
||||||
|
'function as key should get index as key'
|
||||||
|
assert traverse_obj(etree, 0) == etree[0], \
|
||||||
|
'int key should return the nth child'
|
||||||
|
expected = ['Austria', 'Switzerland', 'Malaysia', 'Costa Rica', 'Colombia']
|
||||||
|
assert traverse_obj(etree, './/neighbor/@name') == expected, \
|
||||||
|
'`@<attribute>` at end of path should give that attribute'
|
||||||
|
assert traverse_obj(etree, '//neighbor/@fail') == [None, None, None, None, None], \
|
||||||
|
'`@<nonexistant>` at end of path should give `None`'
|
||||||
|
assert traverse_obj(etree, ('//neighbor/@', 2)) == {'name': 'Malaysia', 'direction': 'N'}, \
|
||||||
|
'`@` should give the full attribute dict'
|
||||||
|
assert traverse_obj(etree, '//year/text()') == ['2008', '2011', '2011'], \
|
||||||
|
'`text()` at end of path should give the inner text'
|
||||||
|
assert traverse_obj(etree, '//*[@direction]/@direction') == ['E', 'W', 'N', 'W', 'E'], \
|
||||||
|
'full Python xpath features should be supported'
|
||||||
|
assert traverse_obj(etree, (0, '@name')) == 'Liechtenstein', \
|
||||||
|
'special transformations should act on current element'
|
||||||
|
assert traverse_obj(etree, ('country', 0, ..., 'text()', {int_or_none})) == [1, 2008, 141100], \
|
||||||
|
'special transformations should act on current element'
|
||||||
|
|
||||||
|
def test_traversal_unbranching(self):
|
||||||
|
assert traverse_obj(_TEST_DATA, [(100, 1.2), all]) == [100, 1.2], \
|
||||||
|
'`all` should give all results as list'
|
||||||
|
assert traverse_obj(_TEST_DATA, [(100, 1.2), any]) == 100, \
|
||||||
|
'`any` should give the first result'
|
||||||
|
assert traverse_obj(_TEST_DATA, [100, all]) == [100], \
|
||||||
|
'`all` should give list if non branching'
|
||||||
|
assert traverse_obj(_TEST_DATA, [100, any]) == 100, \
|
||||||
|
'`any` should give single item if non branching'
|
||||||
|
assert traverse_obj(_TEST_DATA, [('dict', 'None', 100), all]) == [100], \
|
||||||
|
'`all` should filter `None` and empty dict'
|
||||||
|
assert traverse_obj(_TEST_DATA, [('dict', 'None', 100), any]) == 100, \
|
||||||
|
'`any` should filter `None` and empty dict'
|
||||||
|
assert traverse_obj(_TEST_DATA, [{
|
||||||
|
'all': [('dict', 'None', 100, 1.2), all],
|
||||||
|
'any': [('dict', 'None', 100, 1.2), any],
|
||||||
|
}]) == {'all': [100, 1.2], 'any': 100}, \
|
||||||
|
'`all`/`any` should apply to each dict path separately'
|
||||||
|
assert traverse_obj(_TEST_DATA, [{
|
||||||
|
'all': [('dict', 'None', 100, 1.2), all],
|
||||||
|
'any': [('dict', 'None', 100, 1.2), any],
|
||||||
|
}], get_all=False) == {'all': [100, 1.2], 'any': 100}, \
|
||||||
|
'`all`/`any` should apply to dict regardless of `get_all`'
|
||||||
|
assert traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), all, {float}]) is None, \
|
||||||
|
'`all` should reset branching status'
|
||||||
|
assert traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), any, {float}]) is None, \
|
||||||
|
'`any` should reset branching status'
|
||||||
|
assert traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), all, ..., {float}]) == [1.2], \
|
||||||
|
'`all` should allow further branching'
|
||||||
|
assert traverse_obj(_TEST_DATA, [('dict', 'None', 'urls', 'data'), any, ..., 'index']) == [0, 1], \
|
||||||
|
'`any` should allow further branching'
|
||||||
|
|
||||||
|
def test_traversal_morsel(self):
|
||||||
|
values = {
|
||||||
|
'expires': 'a',
|
||||||
|
'path': 'b',
|
||||||
|
'comment': 'c',
|
||||||
|
'domain': 'd',
|
||||||
|
'max-age': 'e',
|
||||||
|
'secure': 'f',
|
||||||
|
'httponly': 'g',
|
||||||
|
'version': 'h',
|
||||||
|
'samesite': 'i',
|
||||||
|
}
|
||||||
|
morsel = http.cookies.Morsel()
|
||||||
|
morsel.set('item_key', 'item_value', 'coded_value')
|
||||||
|
morsel.update(values)
|
||||||
|
values['key'] = 'item_key'
|
||||||
|
values['value'] = 'item_value'
|
||||||
|
|
||||||
|
for key, value in values.items():
|
||||||
|
assert traverse_obj(morsel, key) == value, \
|
||||||
|
'Morsel should provide access to all values'
|
||||||
|
assert traverse_obj(morsel, ...) == list(values.values()), \
|
||||||
|
'`...` should yield all values'
|
||||||
|
assert traverse_obj(morsel, lambda k, v: True) == list(values.values()), \
|
||||||
|
'function key should yield all values'
|
||||||
|
assert traverse_obj(morsel, [(None,), any]) == morsel, \
|
||||||
|
'Morsel should not be implicitly changed to dict on usage'
|
||||||
|
|
||||||
|
|
||||||
|
class TestDictGet:
|
||||||
|
def test_dict_get(self):
|
||||||
|
FALSE_VALUES = {
|
||||||
|
'none': None,
|
||||||
|
'false': False,
|
||||||
|
'zero': 0,
|
||||||
|
'empty_string': '',
|
||||||
|
'empty_list': [],
|
||||||
|
}
|
||||||
|
d = {**FALSE_VALUES, 'a': 42}
|
||||||
|
assert dict_get(d, 'a') == 42
|
||||||
|
assert dict_get(d, 'b') is None
|
||||||
|
assert dict_get(d, 'b', 42) == 42
|
||||||
|
assert dict_get(d, ('a',)) == 42
|
||||||
|
assert dict_get(d, ('b', 'a')) == 42
|
||||||
|
assert dict_get(d, ('b', 'c', 'a', 'd')) == 42
|
||||||
|
assert dict_get(d, ('b', 'c')) is None
|
||||||
|
assert dict_get(d, ('b', 'c'), 42) == 42
|
||||||
|
for key, false_value in FALSE_VALUES.items():
|
||||||
|
assert dict_get(d, ('b', 'c', key)) is None
|
||||||
|
assert dict_get(d, ('b', 'c', key), skip_false_values=False) == false_value
|
|
@ -2,10 +2,10 @@
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
import re
|
|
||||||
import sys
|
import sys
|
||||||
import unittest
|
import unittest
|
||||||
import warnings
|
import warnings
|
||||||
|
import datetime as dt
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
@ -28,6 +28,7 @@
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
InAdvancePagedList,
|
InAdvancePagedList,
|
||||||
LazyList,
|
LazyList,
|
||||||
|
NO_DEFAULT,
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
Popen,
|
Popen,
|
||||||
age_restricted,
|
age_restricted,
|
||||||
|
@ -45,7 +46,6 @@
|
||||||
determine_ext,
|
determine_ext,
|
||||||
determine_file_encoding,
|
determine_file_encoding,
|
||||||
dfxp2srt,
|
dfxp2srt,
|
||||||
dict_get,
|
|
||||||
encode_base_n,
|
encode_base_n,
|
||||||
encode_compat_str,
|
encode_compat_str,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
|
@ -106,13 +106,11 @@
|
||||||
sanitize_url,
|
sanitize_url,
|
||||||
shell_quote,
|
shell_quote,
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
str_or_none,
|
|
||||||
str_to_int,
|
str_to_int,
|
||||||
strip_jsonp,
|
strip_jsonp,
|
||||||
strip_or_none,
|
strip_or_none,
|
||||||
subtitles_filename,
|
subtitles_filename,
|
||||||
timeconvert,
|
timeconvert,
|
||||||
traverse_obj,
|
|
||||||
try_call,
|
try_call,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
|
@ -755,28 +753,6 @@ def test_multipart_encode(self):
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
ValueError, multipart_encode, {b'field': b'value'}, boundary='value')
|
ValueError, multipart_encode, {b'field': b'value'}, boundary='value')
|
||||||
|
|
||||||
def test_dict_get(self):
|
|
||||||
FALSE_VALUES = {
|
|
||||||
'none': None,
|
|
||||||
'false': False,
|
|
||||||
'zero': 0,
|
|
||||||
'empty_string': '',
|
|
||||||
'empty_list': [],
|
|
||||||
}
|
|
||||||
d = FALSE_VALUES.copy()
|
|
||||||
d['a'] = 42
|
|
||||||
self.assertEqual(dict_get(d, 'a'), 42)
|
|
||||||
self.assertEqual(dict_get(d, 'b'), None)
|
|
||||||
self.assertEqual(dict_get(d, 'b', 42), 42)
|
|
||||||
self.assertEqual(dict_get(d, ('a', )), 42)
|
|
||||||
self.assertEqual(dict_get(d, ('b', 'a', )), 42)
|
|
||||||
self.assertEqual(dict_get(d, ('b', 'c', 'a', 'd', )), 42)
|
|
||||||
self.assertEqual(dict_get(d, ('b', 'c', )), None)
|
|
||||||
self.assertEqual(dict_get(d, ('b', 'c', ), 42), 42)
|
|
||||||
for key, false_value in FALSE_VALUES.items():
|
|
||||||
self.assertEqual(dict_get(d, ('b', 'c', key, )), None)
|
|
||||||
self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value)
|
|
||||||
|
|
||||||
def test_merge_dicts(self):
|
def test_merge_dicts(self):
|
||||||
self.assertEqual(merge_dicts({'a': 1}, {'b': 2}), {'a': 1, 'b': 2})
|
self.assertEqual(merge_dicts({'a': 1}, {'b': 2}), {'a': 1, 'b': 2})
|
||||||
self.assertEqual(merge_dicts({'a': 1}, {'a': 2}), {'a': 1})
|
self.assertEqual(merge_dicts({'a': 1}, {'a': 2}), {'a': 1})
|
||||||
|
@ -794,6 +770,11 @@ def test_encode_compat_str(self):
|
||||||
|
|
||||||
def test_parse_iso8601(self):
|
def test_parse_iso8601(self):
|
||||||
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
|
||||||
|
self.assertEqual(parse_iso8601('2014-03-23T23:04:26-07:00'), 1395641066)
|
||||||
|
self.assertEqual(parse_iso8601('2014-03-23T23:04:26', timezone=dt.timedelta(hours=-7)), 1395641066)
|
||||||
|
self.assertEqual(parse_iso8601('2014-03-23T23:04:26', timezone=NO_DEFAULT), None)
|
||||||
|
# default does not override timezone in date_str
|
||||||
|
self.assertEqual(parse_iso8601('2014-03-23T23:04:26-07:00', timezone=dt.timedelta(hours=-10)), 1395641066)
|
||||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
|
||||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
|
||||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
|
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
|
||||||
|
@ -2039,359 +2020,6 @@ def test_variadic(self):
|
||||||
warnings.simplefilter('ignore')
|
warnings.simplefilter('ignore')
|
||||||
self.assertEqual(variadic('spam', allowed_types=[dict]), 'spam')
|
self.assertEqual(variadic('spam', allowed_types=[dict]), 'spam')
|
||||||
|
|
||||||
def test_traverse_obj(self):
|
|
||||||
_TEST_DATA = {
|
|
||||||
100: 100,
|
|
||||||
1.2: 1.2,
|
|
||||||
'str': 'str',
|
|
||||||
'None': None,
|
|
||||||
'...': ...,
|
|
||||||
'urls': [
|
|
||||||
{'index': 0, 'url': 'https://www.example.com/0'},
|
|
||||||
{'index': 1, 'url': 'https://www.example.com/1'},
|
|
||||||
],
|
|
||||||
'data': (
|
|
||||||
{'index': 2},
|
|
||||||
{'index': 3},
|
|
||||||
),
|
|
||||||
'dict': {},
|
|
||||||
}
|
|
||||||
|
|
||||||
# Test base functionality
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, ('str',)), 'str',
|
|
||||||
msg='allow tuple path')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, ['str']), 'str',
|
|
||||||
msg='allow list path')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, (value for value in ("str",))), 'str',
|
|
||||||
msg='allow iterable path')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, 'str'), 'str',
|
|
||||||
msg='single items should be treated as a path')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, None), _TEST_DATA)
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, 100), 100)
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, 1.2), 1.2)
|
|
||||||
|
|
||||||
# Test Ellipsis behavior
|
|
||||||
self.assertCountEqual(traverse_obj(_TEST_DATA, ...),
|
|
||||||
(item for item in _TEST_DATA.values() if item not in (None, {})),
|
|
||||||
msg='`...` should give all non discarded values')
|
|
||||||
self.assertCountEqual(traverse_obj(_TEST_DATA, ('urls', 0, ...)), _TEST_DATA['urls'][0].values(),
|
|
||||||
msg='`...` selection for dicts should select all values')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, (..., ..., 'url')),
|
|
||||||
['https://www.example.com/0', 'https://www.example.com/1'],
|
|
||||||
msg='nested `...` queries should work')
|
|
||||||
self.assertCountEqual(traverse_obj(_TEST_DATA, (..., ..., 'index')), range(4),
|
|
||||||
msg='`...` query result should be flattened')
|
|
||||||
self.assertEqual(traverse_obj(iter(range(4)), ...), list(range(4)),
|
|
||||||
msg='`...` should accept iterables')
|
|
||||||
|
|
||||||
# Test function as key
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, lambda x, y: x == 'urls' and isinstance(y, list)),
|
|
||||||
[_TEST_DATA['urls']],
|
|
||||||
msg='function as query key should perform a filter based on (key, value)')
|
|
||||||
self.assertCountEqual(traverse_obj(_TEST_DATA, lambda _, x: isinstance(x[0], str)), {'str'},
|
|
||||||
msg='exceptions in the query function should be catched')
|
|
||||||
self.assertEqual(traverse_obj(iter(range(4)), lambda _, x: x % 2 == 0), [0, 2],
|
|
||||||
msg='function key should accept iterables')
|
|
||||||
if __debug__:
|
|
||||||
with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'):
|
|
||||||
traverse_obj(_TEST_DATA, lambda a: ...)
|
|
||||||
with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'):
|
|
||||||
traverse_obj(_TEST_DATA, lambda a, b, c: ...)
|
|
||||||
|
|
||||||
# Test set as key (transformation/type, like `expected_type`)
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, (..., {str.upper}, )), ['STR'],
|
|
||||||
msg='Function in set should be a transformation')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, (..., {str})), ['str'],
|
|
||||||
msg='Type in set should be a type filter')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, {dict}), _TEST_DATA,
|
|
||||||
msg='A single set should be wrapped into a path')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, (..., {str.upper})), ['STR'],
|
|
||||||
msg='Transformation function should not raise')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, (..., {str_or_none})),
|
|
||||||
[item for item in map(str_or_none, _TEST_DATA.values()) if item is not None],
|
|
||||||
msg='Function in set should be a transformation')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, ('fail', {lambda _: 'const'})), 'const',
|
|
||||||
msg='Function in set should always be called')
|
|
||||||
if __debug__:
|
|
||||||
with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'):
|
|
||||||
traverse_obj(_TEST_DATA, set())
|
|
||||||
with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'):
|
|
||||||
traverse_obj(_TEST_DATA, {str.upper, str})
|
|
||||||
|
|
||||||
# Test `slice` as a key
|
|
||||||
_SLICE_DATA = [0, 1, 2, 3, 4]
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, ('dict', slice(1))), None,
|
|
||||||
msg='slice on a dictionary should not throw')
|
|
||||||
self.assertEqual(traverse_obj(_SLICE_DATA, slice(1)), _SLICE_DATA[:1],
|
|
||||||
msg='slice key should apply slice to sequence')
|
|
||||||
self.assertEqual(traverse_obj(_SLICE_DATA, slice(1, 2)), _SLICE_DATA[1:2],
|
|
||||||
msg='slice key should apply slice to sequence')
|
|
||||||
self.assertEqual(traverse_obj(_SLICE_DATA, slice(1, 4, 2)), _SLICE_DATA[1:4:2],
|
|
||||||
msg='slice key should apply slice to sequence')
|
|
||||||
|
|
||||||
# Test alternative paths
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'str'), 'str',
|
|
||||||
msg='multiple `paths` should be treated as alternative paths')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, 'str', 100), 'str',
|
|
||||||
msg='alternatives should exit early')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'fail'), None,
|
|
||||||
msg='alternatives should return `default` if exhausted')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, (..., 'fail'), 100), 100,
|
|
||||||
msg='alternatives should track their own branching return')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, ('dict', ...), ('data', ...)), list(_TEST_DATA['data']),
|
|
||||||
msg='alternatives on empty objects should search further')
|
|
||||||
|
|
||||||
# Test branch and path nesting
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, ('urls', (3, 0), 'url')), ['https://www.example.com/0'],
|
|
||||||
msg='tuple as key should be treated as branches')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, ('urls', [3, 0], 'url')), ['https://www.example.com/0'],
|
|
||||||
msg='list as key should be treated as branches')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, ('urls', ((1, 'fail'), (0, 'url')))), ['https://www.example.com/0'],
|
|
||||||
msg='double nesting in path should be treated as paths')
|
|
||||||
self.assertEqual(traverse_obj(['0', [1, 2]], [(0, 1), 0]), [1],
|
|
||||||
msg='do not fail early on branching')
|
|
||||||
self.assertCountEqual(traverse_obj(_TEST_DATA, ('urls', ((1, ('fail', 'url')), (0, 'url')))),
|
|
||||||
['https://www.example.com/0', 'https://www.example.com/1'],
|
|
||||||
msg='tripple nesting in path should be treated as branches')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, ('urls', ('fail', (..., 'url')))),
|
|
||||||
['https://www.example.com/0', 'https://www.example.com/1'],
|
|
||||||
msg='ellipsis as branch path start gets flattened')
|
|
||||||
|
|
||||||
# Test dictionary as key
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}), {0: 100, 1: 1.2},
|
|
||||||
msg='dict key should result in a dict with the same keys')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', 0, 'url')}),
|
|
||||||
{0: 'https://www.example.com/0'},
|
|
||||||
msg='dict key should allow paths')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', (3, 0), 'url')}),
|
|
||||||
{0: ['https://www.example.com/0']},
|
|
||||||
msg='tuple in dict path should be treated as branches')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', ((1, 'fail'), (0, 'url')))}),
|
|
||||||
{0: ['https://www.example.com/0']},
|
|
||||||
msg='double nesting in dict path should be treated as paths')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: ('urls', ((1, ('fail', 'url')), (0, 'url')))}),
|
|
||||||
{0: ['https://www.example.com/1', 'https://www.example.com/0']},
|
|
||||||
msg='tripple nesting in dict path should be treated as branches')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}), {},
|
|
||||||
msg='remove `None` values when top level dict key fails')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}, default=...), {0: ...},
|
|
||||||
msg='use `default` if key fails and `default`')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}), {},
|
|
||||||
msg='remove empty values when dict key')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}, default=...), {0: ...},
|
|
||||||
msg='use `default` when dict key and `default`')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 'fail'}}), {},
|
|
||||||
msg='remove empty values when nested dict key fails')
|
|
||||||
self.assertEqual(traverse_obj(None, {0: 'fail'}), {},
|
|
||||||
msg='default to dict if pruned')
|
|
||||||
self.assertEqual(traverse_obj(None, {0: 'fail'}, default=...), {0: ...},
|
|
||||||
msg='default to dict if pruned and default is given')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 'fail'}}, default=...), {0: {0: ...}},
|
|
||||||
msg='use nested `default` when nested dict key fails and `default`')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: ('dict', ...)}), {},
|
|
||||||
msg='remove key if branch in dict key not successful')
|
|
||||||
|
|
||||||
# Testing default parameter behavior
|
|
||||||
_DEFAULT_DATA = {'None': None, 'int': 0, 'list': []}
|
|
||||||
self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail'), None,
|
|
||||||
msg='default value should be `None`')
|
|
||||||
self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail', 'fail', default=...), ...,
|
|
||||||
msg='chained fails should result in default')
|
|
||||||
self.assertEqual(traverse_obj(_DEFAULT_DATA, 'None', 'int'), 0,
|
|
||||||
msg='should not short cirquit on `None`')
|
|
||||||
self.assertEqual(traverse_obj(_DEFAULT_DATA, 'fail', default=1), 1,
|
|
||||||
msg='invalid dict key should result in `default`')
|
|
||||||
self.assertEqual(traverse_obj(_DEFAULT_DATA, 'None', default=1), 1,
|
|
||||||
msg='`None` is a deliberate sentinel and should become `default`')
|
|
||||||
self.assertEqual(traverse_obj(_DEFAULT_DATA, ('list', 10)), None,
|
|
||||||
msg='`IndexError` should result in `default`')
|
|
||||||
self.assertEqual(traverse_obj(_DEFAULT_DATA, (..., 'fail'), default=1), 1,
|
|
||||||
msg='if branched but not successful return `default` if defined, not `[]`')
|
|
||||||
self.assertEqual(traverse_obj(_DEFAULT_DATA, (..., 'fail'), default=None), None,
|
|
||||||
msg='if branched but not successful return `default` even if `default` is `None`')
|
|
||||||
self.assertEqual(traverse_obj(_DEFAULT_DATA, (..., 'fail')), [],
|
|
||||||
msg='if branched but not successful return `[]`, not `default`')
|
|
||||||
self.assertEqual(traverse_obj(_DEFAULT_DATA, ('list', ...)), [],
|
|
||||||
msg='if branched but object is empty return `[]`, not `default`')
|
|
||||||
self.assertEqual(traverse_obj(None, ...), [],
|
|
||||||
msg='if branched but object is `None` return `[]`, not `default`')
|
|
||||||
self.assertEqual(traverse_obj({0: None}, (0, ...)), [],
|
|
||||||
msg='if branched but state is `None` return `[]`, not `default`')
|
|
||||||
|
|
||||||
branching_paths = [
|
|
||||||
('fail', ...),
|
|
||||||
(..., 'fail'),
|
|
||||||
100 * ('fail',) + (...,),
|
|
||||||
(...,) + 100 * ('fail',),
|
|
||||||
]
|
|
||||||
for branching_path in branching_paths:
|
|
||||||
self.assertEqual(traverse_obj({}, branching_path), [],
|
|
||||||
msg='if branched but state is `None`, return `[]` (not `default`)')
|
|
||||||
self.assertEqual(traverse_obj({}, 'fail', branching_path), [],
|
|
||||||
msg='if branching in last alternative and previous did not match, return `[]` (not `default`)')
|
|
||||||
self.assertEqual(traverse_obj({0: 'x'}, 0, branching_path), 'x',
|
|
||||||
msg='if branching in last alternative and previous did match, return single value')
|
|
||||||
self.assertEqual(traverse_obj({0: 'x'}, branching_path, 0), 'x',
|
|
||||||
msg='if branching in first alternative and non-branching path does match, return single value')
|
|
||||||
self.assertEqual(traverse_obj({}, branching_path, 'fail'), None,
|
|
||||||
msg='if branching in first alternative and non-branching path does not match, return `default`')
|
|
||||||
|
|
||||||
# Testing expected_type behavior
|
|
||||||
_EXPECTED_TYPE_DATA = {'str': 'str', 'int': 0}
|
|
||||||
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=str),
|
|
||||||
'str', msg='accept matching `expected_type` type')
|
|
||||||
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=int),
|
|
||||||
None, msg='reject non matching `expected_type` type')
|
|
||||||
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'int', expected_type=lambda x: str(x)),
|
|
||||||
'0', msg='transform type using type function')
|
|
||||||
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=lambda _: 1 / 0),
|
|
||||||
None, msg='wrap expected_type fuction in try_call')
|
|
||||||
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, ..., expected_type=str),
|
|
||||||
['str'], msg='eliminate items that expected_type fails on')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}, expected_type=int),
|
|
||||||
{0: 100}, msg='type as expected_type should filter dict values')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2, 2: 'None'}, expected_type=str_or_none),
|
|
||||||
{0: '100', 1: '1.2'}, msg='function as expected_type should transform dict values')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, ({0: 1.2}, 0, {int_or_none}), expected_type=int),
|
|
||||||
1, msg='expected_type should not filter non final dict values')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 100, 1: 'str'}}, expected_type=int),
|
|
||||||
{0: {0: 100}}, msg='expected_type should transform deep dict values')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, [({0: '...'}, {0: '...'})], expected_type=type(...)),
|
|
||||||
[{0: ...}, {0: ...}], msg='expected_type should transform branched dict values')
|
|
||||||
self.assertEqual(traverse_obj({1: {3: 4}}, [(1, 2), 3], expected_type=int),
|
|
||||||
[4], msg='expected_type regression for type matching in tuple branching')
|
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, ['data', ...], expected_type=int),
|
|
||||||
[], msg='expected_type regression for type matching in dict result')
|
|
||||||
|
|
||||||
# Test get_all behavior
|
|
||||||
_GET_ALL_DATA = {'key': [0, 1, 2]}
|
|
||||||
self.assertEqual(traverse_obj(_GET_ALL_DATA, ('key', ...), get_all=False), 0,
|
|
||||||
msg='if not `get_all`, return only first matching value')
|
|
||||||
self.assertEqual(traverse_obj(_GET_ALL_DATA, ..., get_all=False), [0, 1, 2],
|
|
||||||
msg='do not overflatten if not `get_all`')
|
|
||||||
|
|
||||||
# Test casesense behavior
|
|
||||||
_CASESENSE_DATA = {
|
|
||||||
'KeY': 'value0',
|
|
||||||
0: {
|
|
||||||
'KeY': 'value1',
|
|
||||||
0: {'KeY': 'value2'},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
self.assertEqual(traverse_obj(_CASESENSE_DATA, 'key'), None,
|
|
||||||
msg='dict keys should be case sensitive unless `casesense`')
|
|
||||||
self.assertEqual(traverse_obj(_CASESENSE_DATA, 'keY',
|
|
||||||
casesense=False), 'value0',
|
|
||||||
msg='allow non matching key case if `casesense`')
|
|
||||||
self.assertEqual(traverse_obj(_CASESENSE_DATA, (0, ('keY',)),
|
|
||||||
casesense=False), ['value1'],
|
|
||||||
msg='allow non matching key case in branch if `casesense`')
|
|
||||||
self.assertEqual(traverse_obj(_CASESENSE_DATA, (0, ((0, 'keY'),)),
|
|
||||||
casesense=False), ['value2'],
|
|
||||||
msg='allow non matching key case in branch path if `casesense`')
|
|
||||||
|
|
||||||
# Test traverse_string behavior
|
|
||||||
_TRAVERSE_STRING_DATA = {'str': 'str', 1.2: 1.2}
|
|
||||||
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0)), None,
|
|
||||||
msg='do not traverse into string if not `traverse_string`')
|
|
||||||
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0),
|
|
||||||
traverse_string=True), 's',
|
|
||||||
msg='traverse into string if `traverse_string`')
|
|
||||||
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, (1.2, 1),
|
|
||||||
traverse_string=True), '.',
|
|
||||||
msg='traverse into converted data if `traverse_string`')
|
|
||||||
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', ...),
|
|
||||||
traverse_string=True), 'str',
|
|
||||||
msg='`...` should result in string (same value) if `traverse_string`')
|
|
||||||
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', slice(0, None, 2)),
|
|
||||||
traverse_string=True), 'sr',
|
|
||||||
msg='`slice` should result in string if `traverse_string`')
|
|
||||||
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == "s"),
|
|
||||||
traverse_string=True), 'str',
|
|
||||||
msg='function should result in string if `traverse_string`')
|
|
||||||
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)),
|
|
||||||
traverse_string=True), ['s', 'r'],
|
|
||||||
msg='branching should result in list if `traverse_string`')
|
|
||||||
self.assertEqual(traverse_obj({}, (0, ...), traverse_string=True), [],
|
|
||||||
msg='branching should result in list if `traverse_string`')
|
|
||||||
self.assertEqual(traverse_obj({}, (0, lambda x, y: True), traverse_string=True), [],
|
|
||||||
msg='branching should result in list if `traverse_string`')
|
|
||||||
self.assertEqual(traverse_obj({}, (0, slice(1)), traverse_string=True), [],
|
|
||||||
msg='branching should result in list if `traverse_string`')
|
|
||||||
|
|
||||||
# Test re.Match as input obj
|
|
||||||
mobj = re.fullmatch(r'0(12)(?P<group>3)(4)?', '0123')
|
|
||||||
self.assertEqual(traverse_obj(mobj, ...), [x for x in mobj.groups() if x is not None],
|
|
||||||
msg='`...` on a `re.Match` should give its `groups()`')
|
|
||||||
self.assertEqual(traverse_obj(mobj, lambda k, _: k in (0, 2)), ['0123', '3'],
|
|
||||||
msg='function on a `re.Match` should give groupno, value starting at 0')
|
|
||||||
self.assertEqual(traverse_obj(mobj, 'group'), '3',
|
|
||||||
msg='str key on a `re.Match` should give group with that name')
|
|
||||||
self.assertEqual(traverse_obj(mobj, 2), '3',
|
|
||||||
msg='int key on a `re.Match` should give group with that name')
|
|
||||||
self.assertEqual(traverse_obj(mobj, 'gRoUp', casesense=False), '3',
|
|
||||||
msg='str key on a `re.Match` should respect casesense')
|
|
||||||
self.assertEqual(traverse_obj(mobj, 'fail'), None,
|
|
||||||
msg='failing str key on a `re.Match` should return `default`')
|
|
||||||
self.assertEqual(traverse_obj(mobj, 'gRoUpS', casesense=False), None,
|
|
||||||
msg='failing str key on a `re.Match` should return `default`')
|
|
||||||
self.assertEqual(traverse_obj(mobj, 8), None,
|
|
||||||
msg='failing int key on a `re.Match` should return `default`')
|
|
||||||
self.assertEqual(traverse_obj(mobj, lambda k, _: k in (0, 'group')), ['0123', '3'],
|
|
||||||
msg='function on a `re.Match` should give group name as well')
|
|
||||||
|
|
||||||
# Test xml.etree.ElementTree.Element as input obj
|
|
||||||
etree = xml.etree.ElementTree.fromstring('''<?xml version="1.0"?>
|
|
||||||
<data>
|
|
||||||
<country name="Liechtenstein">
|
|
||||||
<rank>1</rank>
|
|
||||||
<year>2008</year>
|
|
||||||
<gdppc>141100</gdppc>
|
|
||||||
<neighbor name="Austria" direction="E"/>
|
|
||||||
<neighbor name="Switzerland" direction="W"/>
|
|
||||||
</country>
|
|
||||||
<country name="Singapore">
|
|
||||||
<rank>4</rank>
|
|
||||||
<year>2011</year>
|
|
||||||
<gdppc>59900</gdppc>
|
|
||||||
<neighbor name="Malaysia" direction="N"/>
|
|
||||||
</country>
|
|
||||||
<country name="Panama">
|
|
||||||
<rank>68</rank>
|
|
||||||
<year>2011</year>
|
|
||||||
<gdppc>13600</gdppc>
|
|
||||||
<neighbor name="Costa Rica" direction="W"/>
|
|
||||||
<neighbor name="Colombia" direction="E"/>
|
|
||||||
</country>
|
|
||||||
</data>''')
|
|
||||||
self.assertEqual(traverse_obj(etree, ''), etree,
|
|
||||||
msg='empty str key should return the element itself')
|
|
||||||
self.assertEqual(traverse_obj(etree, 'country'), list(etree),
|
|
||||||
msg='str key should lead all children with that tag name')
|
|
||||||
self.assertEqual(traverse_obj(etree, ...), list(etree),
|
|
||||||
msg='`...` as key should return all children')
|
|
||||||
self.assertEqual(traverse_obj(etree, lambda _, x: x[0].text == '4'), [etree[1]],
|
|
||||||
msg='function as key should get element as value')
|
|
||||||
self.assertEqual(traverse_obj(etree, lambda i, _: i == 1), [etree[1]],
|
|
||||||
msg='function as key should get index as key')
|
|
||||||
self.assertEqual(traverse_obj(etree, 0), etree[0],
|
|
||||||
msg='int key should return the nth child')
|
|
||||||
self.assertEqual(traverse_obj(etree, './/neighbor/@name'),
|
|
||||||
['Austria', 'Switzerland', 'Malaysia', 'Costa Rica', 'Colombia'],
|
|
||||||
msg='`@<attribute>` at end of path should give that attribute')
|
|
||||||
self.assertEqual(traverse_obj(etree, '//neighbor/@fail'), [None, None, None, None, None],
|
|
||||||
msg='`@<nonexistant>` at end of path should give `None`')
|
|
||||||
self.assertEqual(traverse_obj(etree, ('//neighbor/@', 2)), {'name': 'Malaysia', 'direction': 'N'},
|
|
||||||
msg='`@` should give the full attribute dict')
|
|
||||||
self.assertEqual(traverse_obj(etree, '//year/text()'), ['2008', '2011', '2011'],
|
|
||||||
msg='`text()` at end of path should give the inner text')
|
|
||||||
self.assertEqual(traverse_obj(etree, '//*[@direction]/@direction'), ['E', 'W', 'N', 'W', 'E'],
|
|
||||||
msg='full python xpath features should be supported')
|
|
||||||
self.assertEqual(traverse_obj(etree, (0, '@name')), 'Liechtenstein',
|
|
||||||
msg='special transformations should act on current element')
|
|
||||||
self.assertEqual(traverse_obj(etree, ('country', 0, ..., 'text()', {int_or_none})), [1, 2008, 141100],
|
|
||||||
msg='special transformations should act on current element')
|
|
||||||
|
|
||||||
def test_http_header_dict(self):
|
def test_http_header_dict(self):
|
||||||
headers = HTTPHeaderDict()
|
headers = HTTPHeaderDict()
|
||||||
headers['ytdl-test'] = b'0'
|
headers['ytdl-test'] = b'0'
|
||||||
|
@ -2438,7 +2066,22 @@ def test_extract_basic_auth(self):
|
||||||
assert extract_basic_auth('http://user:pass@foo.bar') == ('http://foo.bar', 'Basic dXNlcjpwYXNz')
|
assert extract_basic_auth('http://user:pass@foo.bar') == ('http://foo.bar', 'Basic dXNlcjpwYXNz')
|
||||||
|
|
||||||
@unittest.skipUnless(compat_os_name == 'nt', 'Only relevant on Windows')
|
@unittest.skipUnless(compat_os_name == 'nt', 'Only relevant on Windows')
|
||||||
def test_Popen_windows_escaping(self):
|
def test_windows_escaping(self):
|
||||||
|
tests = [
|
||||||
|
'test"&',
|
||||||
|
'%CMDCMDLINE:~-1%&',
|
||||||
|
'a\nb',
|
||||||
|
'"',
|
||||||
|
'\\',
|
||||||
|
'!',
|
||||||
|
'^!',
|
||||||
|
'a \\ b',
|
||||||
|
'a \\" b',
|
||||||
|
'a \\ b\\',
|
||||||
|
# We replace \r with \n
|
||||||
|
('a\r\ra', 'a\n\na'),
|
||||||
|
]
|
||||||
|
|
||||||
def run_shell(args):
|
def run_shell(args):
|
||||||
stdout, stderr, error = Popen.run(
|
stdout, stderr, error = Popen.run(
|
||||||
args, text=True, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
args, text=True, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
@ -2446,11 +2089,15 @@ def run_shell(args):
|
||||||
assert not error
|
assert not error
|
||||||
return stdout
|
return stdout
|
||||||
|
|
||||||
# Test escaping
|
for argument in tests:
|
||||||
assert run_shell(['echo', 'test"&']) == '"test""&"\n'
|
if isinstance(argument, str):
|
||||||
# Test if delayed expansion is disabled
|
expected = argument
|
||||||
assert run_shell(['echo', '^!']) == '"^!"\n'
|
else:
|
||||||
assert run_shell('echo "^!"') == '"^!"\n'
|
argument, expected = argument
|
||||||
|
|
||||||
|
args = [sys.executable, '-c', 'import sys; print(end=sys.argv[1])', argument, 'end']
|
||||||
|
assert run_shell(args) == expected
|
||||||
|
assert run_shell(shell_quote(args, shell=True)) == expected
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
|
@ -3,10 +3,12 @@
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from test.helper import verify_address_availability
|
from test.helper import verify_address_availability
|
||||||
|
from yt_dlp.networking.common import Features, DEFAULT_TIMEOUT
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
@ -18,7 +20,7 @@
|
||||||
import ssl
|
import ssl
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
from yt_dlp import socks
|
from yt_dlp import socks, traverse_obj
|
||||||
from yt_dlp.cookies import YoutubeDLCookieJar
|
from yt_dlp.cookies import YoutubeDLCookieJar
|
||||||
from yt_dlp.dependencies import websockets
|
from yt_dlp.dependencies import websockets
|
||||||
from yt_dlp.networking import Request
|
from yt_dlp.networking import Request
|
||||||
|
@ -32,8 +34,6 @@
|
||||||
)
|
)
|
||||||
from yt_dlp.utils.networking import HTTPHeaderDict
|
from yt_dlp.utils.networking import HTTPHeaderDict
|
||||||
|
|
||||||
from test.conftest import validate_and_send
|
|
||||||
|
|
||||||
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
|
||||||
|
|
||||||
|
@ -66,7 +66,9 @@ def process_request(self, request):
|
||||||
|
|
||||||
def create_websocket_server(**ws_kwargs):
|
def create_websocket_server(**ws_kwargs):
|
||||||
import websockets.sync.server
|
import websockets.sync.server
|
||||||
wsd = websockets.sync.server.serve(websocket_handler, '127.0.0.1', 0, process_request=process_request, **ws_kwargs)
|
wsd = websockets.sync.server.serve(
|
||||||
|
websocket_handler, '127.0.0.1', 0,
|
||||||
|
process_request=process_request, open_timeout=2, **ws_kwargs)
|
||||||
ws_port = wsd.socket.getsockname()[1]
|
ws_port = wsd.socket.getsockname()[1]
|
||||||
ws_server_thread = threading.Thread(target=wsd.serve_forever)
|
ws_server_thread = threading.Thread(target=wsd.serve_forever)
|
||||||
ws_server_thread.daemon = True
|
ws_server_thread.daemon = True
|
||||||
|
@ -100,7 +102,21 @@ def create_mtls_wss_websocket_server():
|
||||||
return create_websocket_server(ssl_context=sslctx)
|
return create_websocket_server(ssl_context=sslctx)
|
||||||
|
|
||||||
|
|
||||||
|
def ws_validate_and_send(rh, req):
|
||||||
|
rh.validate(req)
|
||||||
|
max_tries = 3
|
||||||
|
for i in range(max_tries):
|
||||||
|
try:
|
||||||
|
return rh.send(req)
|
||||||
|
except TransportError as e:
|
||||||
|
if i < (max_tries - 1) and 'connection closed during handshake' in str(e):
|
||||||
|
# websockets server sometimes hangs on new connections
|
||||||
|
continue
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(not websockets, reason='websockets must be installed to test websocket request handlers')
|
@pytest.mark.skipif(not websockets, reason='websockets must be installed to test websocket request handlers')
|
||||||
|
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||||
class TestWebsSocketRequestHandlerConformance:
|
class TestWebsSocketRequestHandlerConformance:
|
||||||
@classmethod
|
@classmethod
|
||||||
def setup_class(cls):
|
def setup_class(cls):
|
||||||
|
@ -116,10 +132,9 @@ def setup_class(cls):
|
||||||
cls.mtls_wss_thread, cls.mtls_wss_port = create_mtls_wss_websocket_server()
|
cls.mtls_wss_thread, cls.mtls_wss_port = create_mtls_wss_websocket_server()
|
||||||
cls.mtls_wss_base_url = f'wss://127.0.0.1:{cls.mtls_wss_port}'
|
cls.mtls_wss_base_url = f'wss://127.0.0.1:{cls.mtls_wss_port}'
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_basic_websockets(self, handler):
|
def test_basic_websockets(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url))
|
||||||
assert 'upgrade' in ws.headers
|
assert 'upgrade' in ws.headers
|
||||||
assert ws.status == 101
|
assert ws.status == 101
|
||||||
ws.send('foo')
|
ws.send('foo')
|
||||||
|
@ -128,33 +143,29 @@ def test_basic_websockets(self, handler):
|
||||||
|
|
||||||
# https://www.rfc-editor.org/rfc/rfc6455.html#section-5.6
|
# https://www.rfc-editor.org/rfc/rfc6455.html#section-5.6
|
||||||
@pytest.mark.parametrize('msg,opcode', [('str', 1), (b'bytes', 2)])
|
@pytest.mark.parametrize('msg,opcode', [('str', 1), (b'bytes', 2)])
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_send_types(self, handler, msg, opcode):
|
def test_send_types(self, handler, msg, opcode):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url))
|
||||||
ws.send(msg)
|
ws.send(msg)
|
||||||
assert int(ws.recv()) == opcode
|
assert int(ws.recv()) == opcode
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_verify_cert(self, handler):
|
def test_verify_cert(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with pytest.raises(CertificateVerifyError):
|
with pytest.raises(CertificateVerifyError):
|
||||||
validate_and_send(rh, Request(self.wss_base_url))
|
ws_validate_and_send(rh, Request(self.wss_base_url))
|
||||||
|
|
||||||
with handler(verify=False) as rh:
|
with handler(verify=False) as rh:
|
||||||
ws = validate_and_send(rh, Request(self.wss_base_url))
|
ws = ws_validate_and_send(rh, Request(self.wss_base_url))
|
||||||
assert ws.status == 101
|
assert ws.status == 101
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_ssl_error(self, handler):
|
def test_ssl_error(self, handler):
|
||||||
with handler(verify=False) as rh:
|
with handler(verify=False) as rh:
|
||||||
with pytest.raises(SSLError, match=r'ssl(?:v3|/tls) alert handshake failure') as exc_info:
|
with pytest.raises(SSLError, match=r'ssl(?:v3|/tls) alert handshake failure') as exc_info:
|
||||||
validate_and_send(rh, Request(self.bad_wss_host))
|
ws_validate_and_send(rh, Request(self.bad_wss_host))
|
||||||
assert not issubclass(exc_info.type, CertificateVerifyError)
|
assert not issubclass(exc_info.type, CertificateVerifyError)
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
@pytest.mark.parametrize('path,expected', [
|
@pytest.mark.parametrize('path,expected', [
|
||||||
# Unicode characters should be encoded with uppercase percent-encoding
|
# Unicode characters should be encoded with uppercase percent-encoding
|
||||||
('/中文', '/%E4%B8%AD%E6%96%87'),
|
('/中文', '/%E4%B8%AD%E6%96%87'),
|
||||||
|
@ -163,18 +174,17 @@ def test_ssl_error(self, handler):
|
||||||
])
|
])
|
||||||
def test_percent_encode(self, handler, path, expected):
|
def test_percent_encode(self, handler, path, expected):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
ws = validate_and_send(rh, Request(f'{self.ws_base_url}{path}'))
|
ws = ws_validate_and_send(rh, Request(f'{self.ws_base_url}{path}'))
|
||||||
ws.send('path')
|
ws.send('path')
|
||||||
assert ws.recv() == expected
|
assert ws.recv() == expected
|
||||||
assert ws.status == 101
|
assert ws.status == 101
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_remove_dot_segments(self, handler):
|
def test_remove_dot_segments(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
# This isn't a comprehensive test,
|
# This isn't a comprehensive test,
|
||||||
# but it should be enough to check whether the handler is removing dot segments
|
# but it should be enough to check whether the handler is removing dot segments
|
||||||
ws = validate_and_send(rh, Request(f'{self.ws_base_url}/a/b/./../../test'))
|
ws = ws_validate_and_send(rh, Request(f'{self.ws_base_url}/a/b/./../../test'))
|
||||||
assert ws.status == 101
|
assert ws.status == 101
|
||||||
ws.send('path')
|
ws.send('path')
|
||||||
assert ws.recv() == '/test'
|
assert ws.recv() == '/test'
|
||||||
|
@ -182,25 +192,37 @@ def test_remove_dot_segments(self, handler):
|
||||||
|
|
||||||
# We are restricted to known HTTP status codes in http.HTTPStatus
|
# We are restricted to known HTTP status codes in http.HTTPStatus
|
||||||
# Redirects are not supported for websockets
|
# Redirects are not supported for websockets
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
@pytest.mark.parametrize('status', (200, 204, 301, 302, 303, 400, 500, 511))
|
@pytest.mark.parametrize('status', (200, 204, 301, 302, 303, 400, 500, 511))
|
||||||
def test_raise_http_error(self, handler, status):
|
def test_raise_http_error(self, handler, status):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
with pytest.raises(HTTPError) as exc_info:
|
with pytest.raises(HTTPError) as exc_info:
|
||||||
validate_and_send(rh, Request(f'{self.ws_base_url}/gen_{status}'))
|
ws_validate_and_send(rh, Request(f'{self.ws_base_url}/gen_{status}'))
|
||||||
assert exc_info.value.status == status
|
assert exc_info.value.status == status
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
@pytest.mark.parametrize('params,extensions', [
|
@pytest.mark.parametrize('params,extensions', [
|
||||||
({'timeout': 0.00001}, {}),
|
({'timeout': sys.float_info.min}, {}),
|
||||||
({}, {'timeout': 0.00001}),
|
({}, {'timeout': sys.float_info.min}),
|
||||||
])
|
])
|
||||||
def test_timeout(self, handler, params, extensions):
|
def test_read_timeout(self, handler, params, extensions):
|
||||||
with handler(**params) as rh:
|
with handler(**params) as rh:
|
||||||
with pytest.raises(TransportError):
|
with pytest.raises(TransportError):
|
||||||
validate_and_send(rh, Request(self.ws_base_url, extensions=extensions))
|
ws_validate_and_send(rh, Request(self.ws_base_url, extensions=extensions))
|
||||||
|
|
||||||
|
def test_connect_timeout(self, handler):
|
||||||
|
# nothing should be listening on this port
|
||||||
|
connect_timeout_url = 'ws://10.255.255.255'
|
||||||
|
with handler(timeout=0.01) as rh, pytest.raises(TransportError):
|
||||||
|
now = time.time()
|
||||||
|
ws_validate_and_send(rh, Request(connect_timeout_url))
|
||||||
|
assert time.time() - now < DEFAULT_TIMEOUT
|
||||||
|
|
||||||
|
# Per request timeout, should override handler timeout
|
||||||
|
request = Request(connect_timeout_url, extensions={'timeout': 0.01})
|
||||||
|
with handler() as rh, pytest.raises(TransportError):
|
||||||
|
now = time.time()
|
||||||
|
ws_validate_and_send(rh, request)
|
||||||
|
assert time.time() - now < DEFAULT_TIMEOUT
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_cookies(self, handler):
|
def test_cookies(self, handler):
|
||||||
cookiejar = YoutubeDLCookieJar()
|
cookiejar = YoutubeDLCookieJar()
|
||||||
cookiejar.set_cookie(http.cookiejar.Cookie(
|
cookiejar.set_cookie(http.cookiejar.Cookie(
|
||||||
|
@ -210,52 +232,49 @@ def test_cookies(self, handler):
|
||||||
comment_url=None, rest={}))
|
comment_url=None, rest={}))
|
||||||
|
|
||||||
with handler(cookiejar=cookiejar) as rh:
|
with handler(cookiejar=cookiejar) as rh:
|
||||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url))
|
||||||
ws.send('headers')
|
ws.send('headers')
|
||||||
assert json.loads(ws.recv())['cookie'] == 'test=ytdlp'
|
assert json.loads(ws.recv())['cookie'] == 'test=ytdlp'
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url))
|
||||||
ws.send('headers')
|
ws.send('headers')
|
||||||
assert 'cookie' not in json.loads(ws.recv())
|
assert 'cookie' not in json.loads(ws.recv())
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
ws = validate_and_send(rh, Request(self.ws_base_url, extensions={'cookiejar': cookiejar}))
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url, extensions={'cookiejar': cookiejar}))
|
||||||
ws.send('headers')
|
ws.send('headers')
|
||||||
assert json.loads(ws.recv())['cookie'] == 'test=ytdlp'
|
assert json.loads(ws.recv())['cookie'] == 'test=ytdlp'
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_source_address(self, handler):
|
def test_source_address(self, handler):
|
||||||
source_address = f'127.0.0.{random.randint(5, 255)}'
|
source_address = f'127.0.0.{random.randint(5, 255)}'
|
||||||
verify_address_availability(source_address)
|
verify_address_availability(source_address)
|
||||||
with handler(source_address=source_address) as rh:
|
with handler(source_address=source_address) as rh:
|
||||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url))
|
||||||
ws.send('source_address')
|
ws.send('source_address')
|
||||||
assert source_address == ws.recv()
|
assert source_address == ws.recv()
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_response_url(self, handler):
|
def test_response_url(self, handler):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
url = f'{self.ws_base_url}/something'
|
url = f'{self.ws_base_url}/something'
|
||||||
ws = validate_and_send(rh, Request(url))
|
ws = ws_validate_and_send(rh, Request(url))
|
||||||
assert ws.url == url
|
assert ws.url == url
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_request_headers(self, handler):
|
def test_request_headers(self, handler):
|
||||||
with handler(headers=HTTPHeaderDict({'test1': 'test', 'test2': 'test2'})) as rh:
|
with handler(headers=HTTPHeaderDict({'test1': 'test', 'test2': 'test2'})) as rh:
|
||||||
# Global Headers
|
# Global Headers
|
||||||
ws = validate_and_send(rh, Request(self.ws_base_url))
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url))
|
||||||
ws.send('headers')
|
ws.send('headers')
|
||||||
headers = HTTPHeaderDict(json.loads(ws.recv()))
|
headers = HTTPHeaderDict(json.loads(ws.recv()))
|
||||||
assert headers['test1'] == 'test'
|
assert headers['test1'] == 'test'
|
||||||
ws.close()
|
ws.close()
|
||||||
|
|
||||||
# Per request headers, merged with global
|
# Per request headers, merged with global
|
||||||
ws = validate_and_send(rh, Request(
|
ws = ws_validate_and_send(rh, Request(
|
||||||
self.ws_base_url, headers={'test2': 'changed', 'test3': 'test3'}))
|
self.ws_base_url, headers={'test2': 'changed', 'test3': 'test3'}))
|
||||||
ws.send('headers')
|
ws.send('headers')
|
||||||
headers = HTTPHeaderDict(json.loads(ws.recv()))
|
headers = HTTPHeaderDict(json.loads(ws.recv()))
|
||||||
|
@ -280,7 +299,6 @@ def test_request_headers(self, handler):
|
||||||
'client_certificate_password': 'foobar',
|
'client_certificate_password': 'foobar',
|
||||||
}
|
}
|
||||||
))
|
))
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
|
||||||
def test_mtls(self, handler, client_cert):
|
def test_mtls(self, handler, client_cert):
|
||||||
with handler(
|
with handler(
|
||||||
# Disable client-side validation of unacceptable self-signed testcert.pem
|
# Disable client-side validation of unacceptable self-signed testcert.pem
|
||||||
|
@ -288,7 +306,45 @@ def test_mtls(self, handler, client_cert):
|
||||||
verify=False,
|
verify=False,
|
||||||
client_cert=client_cert
|
client_cert=client_cert
|
||||||
) as rh:
|
) as rh:
|
||||||
validate_and_send(rh, Request(self.mtls_wss_base_url)).close()
|
ws_validate_and_send(rh, Request(self.mtls_wss_base_url)).close()
|
||||||
|
|
||||||
|
def test_request_disable_proxy(self, handler):
|
||||||
|
for proxy_proto in handler._SUPPORTED_PROXY_SCHEMES or ['ws']:
|
||||||
|
# Given handler is configured with a proxy
|
||||||
|
with handler(proxies={'ws': f'{proxy_proto}://10.255.255.255'}, timeout=5) as rh:
|
||||||
|
# When a proxy is explicitly set to None for the request
|
||||||
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url, proxies={'http': None}))
|
||||||
|
# Then no proxy should be used
|
||||||
|
assert ws.status == 101
|
||||||
|
ws.close()
|
||||||
|
|
||||||
|
@pytest.mark.skip_handlers_if(
|
||||||
|
lambda _, handler: Features.NO_PROXY not in handler._SUPPORTED_FEATURES, 'handler does not support NO_PROXY')
|
||||||
|
def test_noproxy(self, handler):
|
||||||
|
for proxy_proto in handler._SUPPORTED_PROXY_SCHEMES or ['ws']:
|
||||||
|
# Given the handler is configured with a proxy
|
||||||
|
with handler(proxies={'ws': f'{proxy_proto}://10.255.255.255'}, timeout=5) as rh:
|
||||||
|
for no_proxy in (f'127.0.0.1:{self.ws_port}', '127.0.0.1', 'localhost'):
|
||||||
|
# When request no proxy includes the request url host
|
||||||
|
ws = ws_validate_and_send(rh, Request(self.ws_base_url, proxies={'no': no_proxy}))
|
||||||
|
# Then the proxy should not be used
|
||||||
|
assert ws.status == 101
|
||||||
|
ws.close()
|
||||||
|
|
||||||
|
@pytest.mark.skip_handlers_if(
|
||||||
|
lambda _, handler: Features.ALL_PROXY not in handler._SUPPORTED_FEATURES, 'handler does not support ALL_PROXY')
|
||||||
|
def test_allproxy(self, handler):
|
||||||
|
supported_proto = traverse_obj(handler._SUPPORTED_PROXY_SCHEMES, 0, default='ws')
|
||||||
|
# This is a bit of a hacky test, but it should be enough to check whether the handler is using the proxy.
|
||||||
|
# 0.1s might not be enough of a timeout if proxy is not used in all cases, but should still get failures.
|
||||||
|
with handler(proxies={'all': f'{supported_proto}://10.255.255.255'}, timeout=0.1) as rh:
|
||||||
|
with pytest.raises(TransportError):
|
||||||
|
ws_validate_and_send(rh, Request(self.ws_base_url)).close()
|
||||||
|
|
||||||
|
with handler(timeout=0.1) as rh:
|
||||||
|
with pytest.raises(TransportError):
|
||||||
|
ws_validate_and_send(
|
||||||
|
rh, Request(self.ws_base_url, proxies={'all': f'{supported_proto}://10.255.255.255'})).close()
|
||||||
|
|
||||||
|
|
||||||
def create_fake_ws_connection(raised):
|
def create_fake_ws_connection(raised):
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import collections
|
import collections
|
||||||
import contextlib
|
import contextlib
|
||||||
import copy
|
import copy
|
||||||
import datetime
|
import datetime as dt
|
||||||
import errno
|
import errno
|
||||||
import fileinput
|
import fileinput
|
||||||
import http.cookiejar
|
import http.cookiejar
|
||||||
|
@ -25,7 +25,7 @@
|
||||||
|
|
||||||
from .cache import Cache
|
from .cache import Cache
|
||||||
from .compat import functools, urllib # isort: split
|
from .compat import functools, urllib # isort: split
|
||||||
from .compat import compat_os_name, compat_shlex_quote, urllib_req_to_req
|
from .compat import compat_os_name, urllib_req_to_req
|
||||||
from .cookies import LenientSimpleCookie, load_cookies
|
from .cookies import LenientSimpleCookie, load_cookies
|
||||||
from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
|
from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
|
||||||
from .downloader.rtmp import rtmpdump_version
|
from .downloader.rtmp import rtmpdump_version
|
||||||
|
@ -42,6 +42,7 @@
|
||||||
SSLError,
|
SSLError,
|
||||||
network_exceptions,
|
network_exceptions,
|
||||||
)
|
)
|
||||||
|
from .networking.impersonate import ImpersonateRequestHandler
|
||||||
from .plugins import directories as plugin_directories
|
from .plugins import directories as plugin_directories
|
||||||
from .postprocessor import _PLUGIN_CLASSES as plugin_pps
|
from .postprocessor import _PLUGIN_CLASSES as plugin_pps
|
||||||
from .postprocessor import (
|
from .postprocessor import (
|
||||||
|
@ -99,8 +100,8 @@
|
||||||
SameFileError,
|
SameFileError,
|
||||||
UnavailableVideoError,
|
UnavailableVideoError,
|
||||||
UserNotLive,
|
UserNotLive,
|
||||||
|
YoutubeDLError,
|
||||||
age_restricted,
|
age_restricted,
|
||||||
args_to_str,
|
|
||||||
bug_reports_message,
|
bug_reports_message,
|
||||||
date_from_str,
|
date_from_str,
|
||||||
deprecation_warning,
|
deprecation_warning,
|
||||||
|
@ -139,11 +140,13 @@
|
||||||
sanitize_filename,
|
sanitize_filename,
|
||||||
sanitize_path,
|
sanitize_path,
|
||||||
sanitize_url,
|
sanitize_url,
|
||||||
|
shell_quote,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
strftime_or_none,
|
strftime_or_none,
|
||||||
subtitles_filename,
|
subtitles_filename,
|
||||||
supports_terminal_sequences,
|
supports_terminal_sequences,
|
||||||
system_identifier,
|
system_identifier,
|
||||||
|
filesize_from_tbr,
|
||||||
timetuple_from_msec,
|
timetuple_from_msec,
|
||||||
to_high_limit_path,
|
to_high_limit_path,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
|
@ -402,6 +405,8 @@ class YoutubeDL:
|
||||||
- "detect_or_warn": check whether we can do anything
|
- "detect_or_warn": check whether we can do anything
|
||||||
about it, warn otherwise (default)
|
about it, warn otherwise (default)
|
||||||
source_address: Client-side IP address to bind to.
|
source_address: Client-side IP address to bind to.
|
||||||
|
impersonate: Client to impersonate for requests.
|
||||||
|
An ImpersonateTarget (from yt_dlp.networking.impersonate)
|
||||||
sleep_interval_requests: Number of seconds to sleep between requests
|
sleep_interval_requests: Number of seconds to sleep between requests
|
||||||
during extraction
|
during extraction
|
||||||
sleep_interval: Number of seconds to sleep before each download when
|
sleep_interval: Number of seconds to sleep before each download when
|
||||||
|
@ -476,7 +481,7 @@ class YoutubeDL:
|
||||||
nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
|
nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
|
||||||
max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
|
max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
|
||||||
continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
|
continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
|
||||||
external_downloader_args, concurrent_fragment_downloads.
|
external_downloader_args, concurrent_fragment_downloads, progress_delta.
|
||||||
|
|
||||||
The following options are used by the post processors:
|
The following options are used by the post processors:
|
||||||
ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
|
ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
|
||||||
|
@ -575,7 +580,7 @@ class YoutubeDL:
|
||||||
'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
|
'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
|
||||||
'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
|
'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
|
||||||
'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
|
'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
|
||||||
'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
|
'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start', 'is_dash_periods', 'request_data',
|
||||||
'preference', 'language', 'language_preference', 'quality', 'source_preference', 'cookies',
|
'preference', 'language', 'language_preference', 'quality', 'source_preference', 'cookies',
|
||||||
'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
|
'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
|
||||||
'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
|
'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
|
||||||
|
@ -713,6 +718,13 @@ def check_deprecated(param, option, suggestion):
|
||||||
for msg in self.params.get('_deprecation_warnings', []):
|
for msg in self.params.get('_deprecation_warnings', []):
|
||||||
self.deprecated_feature(msg)
|
self.deprecated_feature(msg)
|
||||||
|
|
||||||
|
if impersonate_target := self.params.get('impersonate'):
|
||||||
|
if not self._impersonate_target_available(impersonate_target):
|
||||||
|
raise YoutubeDLError(
|
||||||
|
f'Impersonate target "{impersonate_target}" is not available. '
|
||||||
|
f'Use --list-impersonate-targets to see available targets. '
|
||||||
|
f'You may be missing dependencies required to support this target.')
|
||||||
|
|
||||||
if 'list-formats' in self.params['compat_opts']:
|
if 'list-formats' in self.params['compat_opts']:
|
||||||
self.params['listformats_table'] = False
|
self.params['listformats_table'] = False
|
||||||
|
|
||||||
|
@ -811,7 +823,7 @@ def warn_if_short_id(self, argv):
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'Long argument string detected. '
|
'Long argument string detected. '
|
||||||
'Use -- to separate parameters and URLs, like this:\n%s' %
|
'Use -- to separate parameters and URLs, like this:\n%s' %
|
||||||
args_to_str(correct_argv))
|
shell_quote(correct_argv))
|
||||||
|
|
||||||
def add_info_extractor(self, ie):
|
def add_info_extractor(self, ie):
|
||||||
"""Add an InfoExtractor object to the end of the list."""
|
"""Add an InfoExtractor object to the end of the list."""
|
||||||
|
@ -962,8 +974,9 @@ def __exit__(self, *args):
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
self.save_cookies()
|
self.save_cookies()
|
||||||
self._request_director.close()
|
if '_request_director' in self.__dict__:
|
||||||
del self._request_director
|
self._request_director.close()
|
||||||
|
del self._request_director
|
||||||
|
|
||||||
def trouble(self, message=None, tb=None, is_error=True):
|
def trouble(self, message=None, tb=None, is_error=True):
|
||||||
"""Determine action to take when a download problem appears.
|
"""Determine action to take when a download problem appears.
|
||||||
|
@ -1342,7 +1355,7 @@ def create_key(outer_mobj):
|
||||||
value, fmt = escapeHTML(str(value)), str_fmt
|
value, fmt = escapeHTML(str(value)), str_fmt
|
||||||
elif fmt[-1] == 'q': # quoted
|
elif fmt[-1] == 'q': # quoted
|
||||||
value = map(str, variadic(value) if '#' in flags else [value])
|
value = map(str, variadic(value) if '#' in flags else [value])
|
||||||
value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
|
value, fmt = shell_quote(value, shell=True), str_fmt
|
||||||
elif fmt[-1] == 'B': # bytes
|
elif fmt[-1] == 'B': # bytes
|
||||||
value = f'%{str_fmt}'.encode() % str(value).encode()
|
value = f'%{str_fmt}'.encode() % str(value).encode()
|
||||||
value, fmt = value.decode('utf-8', 'ignore'), 's'
|
value, fmt = value.decode('utf-8', 'ignore'), 's'
|
||||||
|
@ -2123,6 +2136,11 @@ def _filter(f):
|
||||||
|
|
||||||
def _check_formats(self, formats):
|
def _check_formats(self, formats):
|
||||||
for f in formats:
|
for f in formats:
|
||||||
|
working = f.get('__working')
|
||||||
|
if working is not None:
|
||||||
|
if working:
|
||||||
|
yield f
|
||||||
|
continue
|
||||||
self.to_screen('[info] Testing format %s' % f['format_id'])
|
self.to_screen('[info] Testing format %s' % f['format_id'])
|
||||||
path = self.get_output_path('temp')
|
path = self.get_output_path('temp')
|
||||||
if not self._ensure_dir_exists(f'{path}/'):
|
if not self._ensure_dir_exists(f'{path}/'):
|
||||||
|
@ -2139,33 +2157,44 @@ def _check_formats(self, formats):
|
||||||
os.remove(temp_file.name)
|
os.remove(temp_file.name)
|
||||||
except OSError:
|
except OSError:
|
||||||
self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
|
self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
|
||||||
|
f['__working'] = success
|
||||||
if success:
|
if success:
|
||||||
yield f
|
yield f
|
||||||
else:
|
else:
|
||||||
self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
|
self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
|
||||||
|
|
||||||
|
def _select_formats(self, formats, selector):
|
||||||
|
return list(selector({
|
||||||
|
'formats': formats,
|
||||||
|
'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
|
||||||
|
'incomplete_formats': (all(f.get('vcodec') == 'none' for f in formats) # No formats with video
|
||||||
|
or all(f.get('acodec') == 'none' for f in formats)), # OR, No formats with audio
|
||||||
|
}))
|
||||||
|
|
||||||
def _default_format_spec(self, info_dict, download=True):
|
def _default_format_spec(self, info_dict, download=True):
|
||||||
|
download = download and not self.params.get('simulate')
|
||||||
|
prefer_best = download and (
|
||||||
|
self.params['outtmpl']['default'] == '-'
|
||||||
|
or info_dict.get('is_live') and not self.params.get('live_from_start'))
|
||||||
|
|
||||||
def can_merge():
|
def can_merge():
|
||||||
merger = FFmpegMergerPP(self)
|
merger = FFmpegMergerPP(self)
|
||||||
return merger.available and merger.can_merge()
|
return merger.available and merger.can_merge()
|
||||||
|
|
||||||
prefer_best = (
|
if not prefer_best and download and not can_merge():
|
||||||
not self.params.get('simulate')
|
prefer_best = True
|
||||||
and download
|
formats = self._get_formats(info_dict)
|
||||||
and (
|
evaluate_formats = lambda spec: self._select_formats(formats, self.build_format_selector(spec))
|
||||||
not can_merge()
|
if evaluate_formats('b/bv+ba') != evaluate_formats('bv*+ba/b'):
|
||||||
or info_dict.get('is_live') and not self.params.get('live_from_start')
|
self.report_warning('ffmpeg not found. The downloaded format may not be the best available. '
|
||||||
or self.params['outtmpl']['default'] == '-'))
|
'Installing ffmpeg is strongly recommended: https://github.com/yt-dlp/yt-dlp#dependencies')
|
||||||
compat = (
|
|
||||||
prefer_best
|
|
||||||
or self.params.get('allow_multiple_audio_streams', False)
|
|
||||||
or 'format-spec' in self.params['compat_opts'])
|
|
||||||
|
|
||||||
return (
|
compat = (self.params.get('allow_multiple_audio_streams')
|
||||||
'best/bestvideo+bestaudio' if prefer_best
|
or 'format-spec' in self.params['compat_opts'])
|
||||||
else 'bestvideo*+bestaudio/best' if not compat
|
|
||||||
else 'bestvideo+bestaudio/best')
|
return ('best/bestvideo+bestaudio' if prefer_best
|
||||||
|
else 'bestvideo+bestaudio/best' if compat
|
||||||
|
else 'bestvideo*+bestaudio/best')
|
||||||
|
|
||||||
def build_format_selector(self, format_spec):
|
def build_format_selector(self, format_spec):
|
||||||
def syntax_error(note, start):
|
def syntax_error(note, start):
|
||||||
|
@ -2226,7 +2255,7 @@ def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, ins
|
||||||
selectors = []
|
selectors = []
|
||||||
current_selector = None
|
current_selector = None
|
||||||
for type, string_, start, _, _ in tokens:
|
for type, string_, start, _, _ in tokens:
|
||||||
# ENCODING is only defined in python 3.x
|
# ENCODING is only defined in Python 3.x
|
||||||
if type == getattr(tokenize, 'ENCODING', None):
|
if type == getattr(tokenize, 'ENCODING', None):
|
||||||
continue
|
continue
|
||||||
elif type in [tokenize.NAME, tokenize.NUMBER]:
|
elif type in [tokenize.NAME, tokenize.NUMBER]:
|
||||||
|
@ -2616,7 +2645,7 @@ def _fill_common_fields(self, info_dict, final=True):
|
||||||
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
|
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
|
||||||
# see http://bugs.python.org/issue1646728)
|
# see http://bugs.python.org/issue1646728)
|
||||||
with contextlib.suppress(ValueError, OverflowError, OSError):
|
with contextlib.suppress(ValueError, OverflowError, OSError):
|
||||||
upload_date = datetime.datetime.fromtimestamp(info_dict[ts_key], datetime.timezone.utc)
|
upload_date = dt.datetime.fromtimestamp(info_dict[ts_key], dt.timezone.utc)
|
||||||
info_dict[date_key] = upload_date.strftime('%Y%m%d')
|
info_dict[date_key] = upload_date.strftime('%Y%m%d')
|
||||||
|
|
||||||
if not info_dict.get('release_year'):
|
if not info_dict.get('release_year'):
|
||||||
|
@ -2649,7 +2678,8 @@ def _fill_common_fields(self, info_dict, final=True):
|
||||||
|
|
||||||
for old_key, new_key in self._deprecated_multivalue_fields.items():
|
for old_key, new_key in self._deprecated_multivalue_fields.items():
|
||||||
if new_key in info_dict and old_key in info_dict:
|
if new_key in info_dict and old_key in info_dict:
|
||||||
self.deprecation_warning(f'Do not return {old_key!r} when {new_key!r} is present')
|
if '_version' not in info_dict: # HACK: Do not warn when using --load-info-json
|
||||||
|
self.deprecation_warning(f'Do not return {old_key!r} when {new_key!r} is present')
|
||||||
elif old_value := info_dict.get(old_key):
|
elif old_value := info_dict.get(old_key):
|
||||||
info_dict[new_key] = old_value.split(', ')
|
info_dict[new_key] = old_value.split(', ')
|
||||||
elif new_value := info_dict.get(new_key):
|
elif new_value := info_dict.get(new_key):
|
||||||
|
@ -2769,7 +2799,7 @@ def sanitize_numeric_fields(info):
|
||||||
|
|
||||||
get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
|
get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
|
||||||
if not get_from_start:
|
if not get_from_start:
|
||||||
info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
|
info_dict['title'] += ' ' + dt.datetime.now().strftime('%Y-%m-%d %H:%M')
|
||||||
if info_dict.get('is_live') and formats:
|
if info_dict.get('is_live') and formats:
|
||||||
formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
|
formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
|
||||||
if get_from_start and not formats:
|
if get_from_start and not formats:
|
||||||
|
@ -2800,6 +2830,9 @@ def is_wellformed(f):
|
||||||
format['url'] = sanitize_url(format['url'])
|
format['url'] = sanitize_url(format['url'])
|
||||||
if format.get('ext') is None:
|
if format.get('ext') is None:
|
||||||
format['ext'] = determine_ext(format['url']).lower()
|
format['ext'] = determine_ext(format['url']).lower()
|
||||||
|
if format['ext'] in ('aac', 'opus', 'mp3', 'flac', 'vorbis'):
|
||||||
|
if format.get('acodec') is None:
|
||||||
|
format['acodec'] = format['ext']
|
||||||
if format.get('protocol') is None:
|
if format.get('protocol') is None:
|
||||||
format['protocol'] = determine_protocol(format)
|
format['protocol'] = determine_protocol(format)
|
||||||
if format.get('resolution') is None:
|
if format.get('resolution') is None:
|
||||||
|
@ -2810,9 +2843,8 @@ def is_wellformed(f):
|
||||||
format['aspect_ratio'] = try_call(lambda: round(format['width'] / format['height'], 2))
|
format['aspect_ratio'] = try_call(lambda: round(format['width'] / format['height'], 2))
|
||||||
# For fragmented formats, "tbr" is often max bitrate and not average
|
# For fragmented formats, "tbr" is often max bitrate and not average
|
||||||
if (('manifest-filesize-approx' in self.params['compat_opts'] or not format.get('manifest_url'))
|
if (('manifest-filesize-approx' in self.params['compat_opts'] or not format.get('manifest_url'))
|
||||||
and info_dict.get('duration') and format.get('tbr')
|
|
||||||
and not format.get('filesize') and not format.get('filesize_approx')):
|
and not format.get('filesize') and not format.get('filesize_approx')):
|
||||||
format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
|
format['filesize_approx'] = filesize_from_tbr(format.get('tbr'), info_dict.get('duration'))
|
||||||
format['http_headers'] = self._calc_headers(collections.ChainMap(format, info_dict), load_cookies=True)
|
format['http_headers'] = self._calc_headers(collections.ChainMap(format, info_dict), load_cookies=True)
|
||||||
|
|
||||||
# Safeguard against old/insecure infojson when using --load-info-json
|
# Safeguard against old/insecure infojson when using --load-info-json
|
||||||
|
@ -2912,12 +2944,7 @@ def is_wellformed(f):
|
||||||
self.write_debug(f'Default format spec: {req_format}')
|
self.write_debug(f'Default format spec: {req_format}')
|
||||||
format_selector = self.build_format_selector(req_format)
|
format_selector = self.build_format_selector(req_format)
|
||||||
|
|
||||||
formats_to_download = list(format_selector({
|
formats_to_download = self._select_formats(formats, format_selector)
|
||||||
'formats': formats,
|
|
||||||
'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
|
|
||||||
'incomplete_formats': (all(f.get('vcodec') == 'none' for f in formats) # No formats with video
|
|
||||||
or all(f.get('acodec') == 'none' for f in formats)), # OR, No formats with audio
|
|
||||||
}))
|
|
||||||
if interactive_format_selection and not formats_to_download:
|
if interactive_format_selection and not formats_to_download:
|
||||||
self.report_error('Requested format is not available', tb=False, is_error=False)
|
self.report_error('Requested format is not available', tb=False, is_error=False)
|
||||||
continue
|
continue
|
||||||
|
@ -3044,7 +3071,7 @@ def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
|
||||||
f = formats[-1]
|
f = formats[-1]
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'No subtitle format found matching "%s" for language %s, '
|
'No subtitle format found matching "%s" for language %s, '
|
||||||
'using %s' % (formats_query, lang, f['ext']))
|
'using %s. Use --list-subs for a list of available subtitles' % (formats_query, lang, f['ext']))
|
||||||
subs[lang] = f
|
subs[lang] = f
|
||||||
return subs
|
return subs
|
||||||
|
|
||||||
|
@ -3576,6 +3603,8 @@ def download_with_info_file(self, info_filename):
|
||||||
raise
|
raise
|
||||||
self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
|
self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
|
||||||
self.download([webpage_url])
|
self.download([webpage_url])
|
||||||
|
except ExtractorError as e:
|
||||||
|
self.report_error(e)
|
||||||
return self._download_retcode
|
return self._download_retcode
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -3860,8 +3889,8 @@ def simplified_codec(f, field):
|
||||||
delim, (
|
delim, (
|
||||||
format_field(f, 'filesize', ' \t%s', func=format_bytes)
|
format_field(f, 'filesize', ' \t%s', func=format_bytes)
|
||||||
or format_field(f, 'filesize_approx', '≈\t%s', func=format_bytes)
|
or format_field(f, 'filesize_approx', '≈\t%s', func=format_bytes)
|
||||||
or format_field(try_call(lambda: format_bytes(int(info_dict['duration'] * f['tbr'] * (1024 / 8)))),
|
or format_field(filesize_from_tbr(f.get('tbr'), info_dict.get('duration')), None,
|
||||||
None, self._format_out('~\t%s', self.Styles.SUPPRESS))),
|
self._format_out('~\t%s', self.Styles.SUPPRESS), func=format_bytes)),
|
||||||
format_field(f, 'tbr', '\t%dk', func=round),
|
format_field(f, 'tbr', '\t%dk', func=round),
|
||||||
shorten_protocol_name(f.get('protocol', '')),
|
shorten_protocol_name(f.get('protocol', '')),
|
||||||
delim,
|
delim,
|
||||||
|
@ -4073,6 +4102,22 @@ def _opener(self):
|
||||||
handler = self._request_director.handlers['Urllib']
|
handler = self._request_director.handlers['Urllib']
|
||||||
return handler._get_instance(cookiejar=self.cookiejar, proxies=self.proxies)
|
return handler._get_instance(cookiejar=self.cookiejar, proxies=self.proxies)
|
||||||
|
|
||||||
|
def _get_available_impersonate_targets(self):
|
||||||
|
# todo(future): make available as public API
|
||||||
|
return [
|
||||||
|
(target, rh.RH_NAME)
|
||||||
|
for rh in self._request_director.handlers.values()
|
||||||
|
if isinstance(rh, ImpersonateRequestHandler)
|
||||||
|
for target in rh.supported_targets
|
||||||
|
]
|
||||||
|
|
||||||
|
def _impersonate_target_available(self, target):
|
||||||
|
# todo(future): make available as public API
|
||||||
|
return any(
|
||||||
|
rh.is_supported_target(target)
|
||||||
|
for rh in self._request_director.handlers.values()
|
||||||
|
if isinstance(rh, ImpersonateRequestHandler))
|
||||||
|
|
||||||
def urlopen(self, req):
|
def urlopen(self, req):
|
||||||
""" Start an HTTP download """
|
""" Start an HTTP download """
|
||||||
if isinstance(req, str):
|
if isinstance(req, str):
|
||||||
|
@ -4104,9 +4149,13 @@ def urlopen(self, req):
|
||||||
raise RequestError(
|
raise RequestError(
|
||||||
'file:// URLs are disabled by default in yt-dlp for security reasons. '
|
'file:// URLs are disabled by default in yt-dlp for security reasons. '
|
||||||
'Use --enable-file-urls to enable at your own risk.', cause=ue) from ue
|
'Use --enable-file-urls to enable at your own risk.', cause=ue) from ue
|
||||||
if 'unsupported proxy type: "https"' in ue.msg.lower():
|
if (
|
||||||
|
'unsupported proxy type: "https"' in ue.msg.lower()
|
||||||
|
and 'requests' not in self._request_director.handlers
|
||||||
|
and 'curl_cffi' not in self._request_director.handlers
|
||||||
|
):
|
||||||
raise RequestError(
|
raise RequestError(
|
||||||
'To use an HTTPS proxy for this request, one of the following dependencies needs to be installed: requests')
|
'To use an HTTPS proxy for this request, one of the following dependencies needs to be installed: requests, curl_cffi')
|
||||||
|
|
||||||
elif (
|
elif (
|
||||||
re.match(r'unsupported url scheme: "wss?"', ue.msg.lower())
|
re.match(r'unsupported url scheme: "wss?"', ue.msg.lower())
|
||||||
|
@ -4116,6 +4165,13 @@ def urlopen(self, req):
|
||||||
'This request requires WebSocket support. '
|
'This request requires WebSocket support. '
|
||||||
'Ensure one of the following dependencies are installed: websockets',
|
'Ensure one of the following dependencies are installed: websockets',
|
||||||
cause=ue) from ue
|
cause=ue) from ue
|
||||||
|
|
||||||
|
elif re.match(r'unsupported (?:extensions: impersonate|impersonate target)', ue.msg.lower()):
|
||||||
|
raise RequestError(
|
||||||
|
f'Impersonate target "{req.extensions["impersonate"]}" is not available.'
|
||||||
|
f' See --list-impersonate-targets for available targets.'
|
||||||
|
f' This request requires browser impersonation, however you may be missing dependencies'
|
||||||
|
f' required to support this target.')
|
||||||
raise
|
raise
|
||||||
except SSLError as e:
|
except SSLError as e:
|
||||||
if 'UNSAFE_LEGACY_RENEGOTIATION_DISABLED' in str(e):
|
if 'UNSAFE_LEGACY_RENEGOTIATION_DISABLED' in str(e):
|
||||||
|
@ -4148,6 +4204,7 @@ def build_request_director(self, handlers, preferences=None):
|
||||||
'timeout': 'socket_timeout',
|
'timeout': 'socket_timeout',
|
||||||
'legacy_ssl_support': 'legacyserverconnect',
|
'legacy_ssl_support': 'legacyserverconnect',
|
||||||
'enable_file_urls': 'enable_file_urls',
|
'enable_file_urls': 'enable_file_urls',
|
||||||
|
'impersonate': 'impersonate',
|
||||||
'client_cert': {
|
'client_cert': {
|
||||||
'client_certificate': 'client_certificate',
|
'client_certificate': 'client_certificate',
|
||||||
'client_certificate_key': 'client_certificate_key',
|
'client_certificate_key': 'client_certificate_key',
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
raise ImportError(
|
raise ImportError(
|
||||||
f'You are using an unsupported version of Python. Only Python versions 3.8 and above are supported by yt-dlp') # noqa: F541
|
f'You are using an unsupported version of Python. Only Python versions 3.8 and above are supported by yt-dlp') # noqa: F541
|
||||||
|
|
||||||
__license__ = 'Public Domain'
|
__license__ = 'The Unlicense'
|
||||||
|
|
||||||
import collections
|
import collections
|
||||||
import getpass
|
import getpass
|
||||||
|
@ -19,6 +19,7 @@
|
||||||
from .downloader.external import get_external_downloader
|
from .downloader.external import get_external_downloader
|
||||||
from .extractor import list_extractor_classes
|
from .extractor import list_extractor_classes
|
||||||
from .extractor.adobepass import MSO_INFO
|
from .extractor.adobepass import MSO_INFO
|
||||||
|
from .networking.impersonate import ImpersonateTarget
|
||||||
from .options import parseOpts
|
from .options import parseOpts
|
||||||
from .postprocessor import (
|
from .postprocessor import (
|
||||||
FFmpegExtractAudioPP,
|
FFmpegExtractAudioPP,
|
||||||
|
@ -48,6 +49,7 @@
|
||||||
float_or_none,
|
float_or_none,
|
||||||
format_field,
|
format_field,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
join_nonempty,
|
||||||
match_filter_func,
|
match_filter_func,
|
||||||
parse_bytes,
|
parse_bytes,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
|
@ -388,6 +390,9 @@ def parse_chapters(name, value, advanced=False):
|
||||||
f'Supported keyrings are: {", ".join(sorted(SUPPORTED_KEYRINGS))}')
|
f'Supported keyrings are: {", ".join(sorted(SUPPORTED_KEYRINGS))}')
|
||||||
opts.cookiesfrombrowser = (browser_name, profile, keyring, container)
|
opts.cookiesfrombrowser = (browser_name, profile, keyring, container)
|
||||||
|
|
||||||
|
if opts.impersonate is not None:
|
||||||
|
opts.impersonate = ImpersonateTarget.from_str(opts.impersonate.lower())
|
||||||
|
|
||||||
# MetadataParser
|
# MetadataParser
|
||||||
def metadataparser_actions(f):
|
def metadataparser_actions(f):
|
||||||
if isinstance(f, str):
|
if isinstance(f, str):
|
||||||
|
@ -831,6 +836,7 @@ def parse_options(argv=None):
|
||||||
'noprogress': opts.quiet if opts.noprogress is None else opts.noprogress,
|
'noprogress': opts.quiet if opts.noprogress is None else opts.noprogress,
|
||||||
'progress_with_newline': opts.progress_with_newline,
|
'progress_with_newline': opts.progress_with_newline,
|
||||||
'progress_template': opts.progress_template,
|
'progress_template': opts.progress_template,
|
||||||
|
'progress_delta': opts.progress_delta,
|
||||||
'playliststart': opts.playliststart,
|
'playliststart': opts.playliststart,
|
||||||
'playlistend': opts.playlistend,
|
'playlistend': opts.playlistend,
|
||||||
'playlistreverse': opts.playlist_reverse,
|
'playlistreverse': opts.playlist_reverse,
|
||||||
|
@ -911,6 +917,7 @@ def parse_options(argv=None):
|
||||||
'postprocessors': postprocessors,
|
'postprocessors': postprocessors,
|
||||||
'fixup': opts.fixup,
|
'fixup': opts.fixup,
|
||||||
'source_address': opts.source_address,
|
'source_address': opts.source_address,
|
||||||
|
'impersonate': opts.impersonate,
|
||||||
'call_home': opts.call_home,
|
'call_home': opts.call_home,
|
||||||
'sleep_interval_requests': opts.sleep_interval_requests,
|
'sleep_interval_requests': opts.sleep_interval_requests,
|
||||||
'sleep_interval': opts.sleep_interval,
|
'sleep_interval': opts.sleep_interval,
|
||||||
|
@ -980,6 +987,41 @@ def _real_main(argv=None):
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
ydl._download_retcode = 100
|
ydl._download_retcode = 100
|
||||||
|
|
||||||
|
if opts.list_impersonate_targets:
|
||||||
|
|
||||||
|
known_targets = [
|
||||||
|
# List of simplified targets we know are supported,
|
||||||
|
# to help users know what dependencies may be required.
|
||||||
|
(ImpersonateTarget('chrome'), 'curl_cffi'),
|
||||||
|
(ImpersonateTarget('edge'), 'curl_cffi'),
|
||||||
|
(ImpersonateTarget('safari'), 'curl_cffi'),
|
||||||
|
]
|
||||||
|
|
||||||
|
available_targets = ydl._get_available_impersonate_targets()
|
||||||
|
|
||||||
|
def make_row(target, handler):
|
||||||
|
return [
|
||||||
|
join_nonempty(target.client.title(), target.version, delim='-') or '-',
|
||||||
|
join_nonempty((target.os or "").title(), target.os_version, delim='-') or '-',
|
||||||
|
handler,
|
||||||
|
]
|
||||||
|
|
||||||
|
rows = [make_row(target, handler) for target, handler in available_targets]
|
||||||
|
|
||||||
|
for known_target, known_handler in known_targets:
|
||||||
|
if not any(
|
||||||
|
known_target in target and handler == known_handler
|
||||||
|
for target, handler in available_targets
|
||||||
|
):
|
||||||
|
rows.append([
|
||||||
|
ydl._format_out(text, ydl.Styles.SUPPRESS)
|
||||||
|
for text in make_row(known_target, f'{known_handler} (not available)')
|
||||||
|
])
|
||||||
|
|
||||||
|
ydl.to_screen('[info] Available impersonate targets')
|
||||||
|
ydl.to_stdout(render_table(['Client', 'OS', 'Source'], rows, extra_gap=2, delim='-'))
|
||||||
|
return
|
||||||
|
|
||||||
if not actual_use:
|
if not actual_use:
|
||||||
if pre_process:
|
if pre_process:
|
||||||
return ydl._download_retcode
|
return ydl._download_retcode
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
# Execute with
|
# Execute with
|
||||||
# $ python -m yt_dlp
|
# $ python3 -m yt_dlp
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from PyInstaller.utils.hooks import collect_submodules
|
from PyInstaller.utils.hooks import collect_submodules, collect_data_files
|
||||||
|
|
||||||
|
|
||||||
def pycryptodome_module():
|
def pycryptodome_module():
|
||||||
|
@ -10,7 +10,7 @@ def pycryptodome_module():
|
||||||
try:
|
try:
|
||||||
import Crypto # noqa: F401
|
import Crypto # noqa: F401
|
||||||
print('WARNING: Using Crypto since Cryptodome is not available. '
|
print('WARNING: Using Crypto since Cryptodome is not available. '
|
||||||
'Install with: pip install pycryptodomex', file=sys.stderr)
|
'Install with: python3 -m pip install pycryptodomex', file=sys.stderr)
|
||||||
return 'Crypto'
|
return 'Crypto'
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
@ -25,10 +25,12 @@ def get_hidden_imports():
|
||||||
for module in ('websockets', 'requests', 'urllib3'):
|
for module in ('websockets', 'requests', 'urllib3'):
|
||||||
yield from collect_submodules(module)
|
yield from collect_submodules(module)
|
||||||
# These are auto-detected, but explicitly add them just in case
|
# These are auto-detected, but explicitly add them just in case
|
||||||
yield from ('mutagen', 'brotli', 'certifi', 'secretstorage')
|
yield from ('mutagen', 'brotli', 'certifi', 'secretstorage', 'curl_cffi')
|
||||||
|
|
||||||
|
|
||||||
hiddenimports = list(get_hidden_imports())
|
hiddenimports = list(get_hidden_imports())
|
||||||
print(f'Adding imports: {hiddenimports}')
|
print(f'Adding imports: {hiddenimports}')
|
||||||
|
|
||||||
excludedimports = ['youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins', 'devscripts', 'bundle']
|
excludedimports = ['youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins', 'devscripts', 'bundle']
|
||||||
|
|
||||||
|
datas = collect_data_files('curl_cffi', includes=['cacert.pem'])
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
import warnings
|
|
||||||
|
|
||||||
warnings.warn(DeprecationWarning(f'{__name__} is deprecated'))
|
|
||||||
|
|
||||||
casefold = str.casefold
|
|
|
@ -27,12 +27,9 @@ def compat_etree_fromstring(text):
|
||||||
compat_os_name = os._name if os.name == 'java' else os.name
|
compat_os_name = os._name if os.name == 'java' else os.name
|
||||||
|
|
||||||
|
|
||||||
if compat_os_name == 'nt':
|
def compat_shlex_quote(s):
|
||||||
def compat_shlex_quote(s):
|
from ..utils import shell_quote
|
||||||
import re
|
return shell_quote(s)
|
||||||
return s if re.match(r'^[-_\w./]+$', s) else s.replace('"', '""').join('""')
|
|
||||||
else:
|
|
||||||
from shlex import quote as compat_shlex_quote # noqa: F401
|
|
||||||
|
|
||||||
|
|
||||||
def compat_ord(c):
|
def compat_ord(c):
|
||||||
|
|
|
@ -10,10 +10,10 @@
|
||||||
from .. import compat_os_name
|
from .. import compat_os_name
|
||||||
|
|
||||||
if compat_os_name == 'nt':
|
if compat_os_name == 'nt':
|
||||||
# On older python versions, proxies are extracted from Windows registry erroneously. [1]
|
# On older Python versions, proxies are extracted from Windows registry erroneously. [1]
|
||||||
# If the https proxy in the registry does not have a scheme, urllib will incorrectly add https:// to it. [2]
|
# If the https proxy in the registry does not have a scheme, urllib will incorrectly add https:// to it. [2]
|
||||||
# It is unlikely that the user has actually set it to be https, so we should be fine to safely downgrade
|
# It is unlikely that the user has actually set it to be https, so we should be fine to safely downgrade
|
||||||
# it to http on these older python versions to avoid issues
|
# it to http on these older Python versions to avoid issues
|
||||||
# This also applies for ftp proxy type, as ftp:// proxy scheme is not supported.
|
# This also applies for ftp proxy type, as ftp:// proxy scheme is not supported.
|
||||||
# 1: https://github.com/python/cpython/issues/86793
|
# 1: https://github.com/python/cpython/issues/86793
|
||||||
# 2: https://github.com/python/cpython/blob/51f1ae5ceb0673316c4e4b0175384e892e33cc6e/Lib/urllib/request.py#L2683-L2698
|
# 2: https://github.com/python/cpython/blob/51f1ae5ceb0673316c4e4b0175384e892e33cc6e/Lib/urllib/request.py#L2683-L2698
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import base64
|
import base64
|
||||||
import collections
|
import collections
|
||||||
import contextlib
|
import contextlib
|
||||||
|
import datetime as dt
|
||||||
import glob
|
import glob
|
||||||
import http.cookiejar
|
import http.cookiejar
|
||||||
import http.cookies
|
import http.cookies
|
||||||
|
@ -15,7 +16,6 @@
|
||||||
import tempfile
|
import tempfile
|
||||||
import time
|
import time
|
||||||
import urllib.request
|
import urllib.request
|
||||||
from datetime import datetime, timedelta, timezone
|
|
||||||
from enum import Enum, auto
|
from enum import Enum, auto
|
||||||
from hashlib import pbkdf2_hmac
|
from hashlib import pbkdf2_hmac
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@
|
||||||
from .utils._utils import _YDLLogger
|
from .utils._utils import _YDLLogger
|
||||||
from .utils.networking import normalize_url
|
from .utils.networking import normalize_url
|
||||||
|
|
||||||
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi'}
|
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi', 'whale'}
|
||||||
SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'}
|
SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'}
|
||||||
|
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ def _extract_firefox_cookies(profile, container, logger):
|
||||||
logger.info('Extracting cookies from firefox')
|
logger.info('Extracting cookies from firefox')
|
||||||
if not sqlite3:
|
if not sqlite3:
|
||||||
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
|
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
|
||||||
'Please use a python interpreter compiled with sqlite3 support')
|
'Please use a Python interpreter compiled with sqlite3 support')
|
||||||
return YoutubeDLCookieJar()
|
return YoutubeDLCookieJar()
|
||||||
|
|
||||||
if profile is None:
|
if profile is None:
|
||||||
|
@ -194,7 +194,11 @@ def _firefox_browser_dirs():
|
||||||
yield os.path.expanduser('~/Library/Application Support/Firefox/Profiles')
|
yield os.path.expanduser('~/Library/Application Support/Firefox/Profiles')
|
||||||
|
|
||||||
else:
|
else:
|
||||||
yield from map(os.path.expanduser, ('~/.mozilla/firefox', '~/snap/firefox/common/.mozilla/firefox'))
|
yield from map(os.path.expanduser, (
|
||||||
|
'~/.mozilla/firefox',
|
||||||
|
'~/snap/firefox/common/.mozilla/firefox',
|
||||||
|
'~/.var/app/org.mozilla.firefox/.mozilla/firefox',
|
||||||
|
))
|
||||||
|
|
||||||
|
|
||||||
def _firefox_cookie_dbs(roots):
|
def _firefox_cookie_dbs(roots):
|
||||||
|
@ -215,6 +219,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
||||||
'edge': os.path.join(appdata_local, R'Microsoft\Edge\User Data'),
|
'edge': os.path.join(appdata_local, R'Microsoft\Edge\User Data'),
|
||||||
'opera': os.path.join(appdata_roaming, R'Opera Software\Opera Stable'),
|
'opera': os.path.join(appdata_roaming, R'Opera Software\Opera Stable'),
|
||||||
'vivaldi': os.path.join(appdata_local, R'Vivaldi\User Data'),
|
'vivaldi': os.path.join(appdata_local, R'Vivaldi\User Data'),
|
||||||
|
'whale': os.path.join(appdata_local, R'Naver\Naver Whale\User Data'),
|
||||||
}[browser_name]
|
}[browser_name]
|
||||||
|
|
||||||
elif sys.platform == 'darwin':
|
elif sys.platform == 'darwin':
|
||||||
|
@ -226,6 +231,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
||||||
'edge': os.path.join(appdata, 'Microsoft Edge'),
|
'edge': os.path.join(appdata, 'Microsoft Edge'),
|
||||||
'opera': os.path.join(appdata, 'com.operasoftware.Opera'),
|
'opera': os.path.join(appdata, 'com.operasoftware.Opera'),
|
||||||
'vivaldi': os.path.join(appdata, 'Vivaldi'),
|
'vivaldi': os.path.join(appdata, 'Vivaldi'),
|
||||||
|
'whale': os.path.join(appdata, 'Naver/Whale'),
|
||||||
}[browser_name]
|
}[browser_name]
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
@ -237,6 +243,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
||||||
'edge': os.path.join(config, 'microsoft-edge'),
|
'edge': os.path.join(config, 'microsoft-edge'),
|
||||||
'opera': os.path.join(config, 'opera'),
|
'opera': os.path.join(config, 'opera'),
|
||||||
'vivaldi': os.path.join(config, 'vivaldi'),
|
'vivaldi': os.path.join(config, 'vivaldi'),
|
||||||
|
'whale': os.path.join(config, 'naver-whale'),
|
||||||
}[browser_name]
|
}[browser_name]
|
||||||
|
|
||||||
# Linux keyring names can be determined by snooping on dbus while opening the browser in KDE:
|
# Linux keyring names can be determined by snooping on dbus while opening the browser in KDE:
|
||||||
|
@ -248,6 +255,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
||||||
'edge': 'Microsoft Edge' if sys.platform == 'darwin' else 'Chromium',
|
'edge': 'Microsoft Edge' if sys.platform == 'darwin' else 'Chromium',
|
||||||
'opera': 'Opera' if sys.platform == 'darwin' else 'Chromium',
|
'opera': 'Opera' if sys.platform == 'darwin' else 'Chromium',
|
||||||
'vivaldi': 'Vivaldi' if sys.platform == 'darwin' else 'Chrome',
|
'vivaldi': 'Vivaldi' if sys.platform == 'darwin' else 'Chrome',
|
||||||
|
'whale': 'Whale',
|
||||||
}[browser_name]
|
}[browser_name]
|
||||||
|
|
||||||
browsers_without_profiles = {'opera'}
|
browsers_without_profiles = {'opera'}
|
||||||
|
@ -264,7 +272,7 @@ def _extract_chrome_cookies(browser_name, profile, keyring, logger):
|
||||||
|
|
||||||
if not sqlite3:
|
if not sqlite3:
|
||||||
logger.warning(f'Cannot extract cookies from {browser_name} without sqlite3 support. '
|
logger.warning(f'Cannot extract cookies from {browser_name} without sqlite3 support. '
|
||||||
'Please use a python interpreter compiled with sqlite3 support')
|
'Please use a Python interpreter compiled with sqlite3 support')
|
||||||
return YoutubeDLCookieJar()
|
return YoutubeDLCookieJar()
|
||||||
|
|
||||||
config = _get_chromium_based_browser_settings(browser_name)
|
config = _get_chromium_based_browser_settings(browser_name)
|
||||||
|
@ -343,6 +351,11 @@ def _process_chrome_cookie(decryptor, host_key, name, value, encrypted_value, pa
|
||||||
if value is None:
|
if value is None:
|
||||||
return is_encrypted, None
|
return is_encrypted, None
|
||||||
|
|
||||||
|
# In chrome, session cookies have expires_utc set to 0
|
||||||
|
# In our cookie-store, cookies that do not expire should have expires set to None
|
||||||
|
if not expires_utc:
|
||||||
|
expires_utc = None
|
||||||
|
|
||||||
return is_encrypted, http.cookiejar.Cookie(
|
return is_encrypted, http.cookiejar.Cookie(
|
||||||
version=0, name=name, value=value, port=None, port_specified=False,
|
version=0, name=name, value=value, port=None, port_specified=False,
|
||||||
domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'),
|
domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'),
|
||||||
|
@ -594,7 +607,7 @@ def skip_to_end(self, description='unknown'):
|
||||||
|
|
||||||
|
|
||||||
def _mac_absolute_time_to_posix(timestamp):
|
def _mac_absolute_time_to_posix(timestamp):
|
||||||
return int((datetime(2001, 1, 1, 0, 0, tzinfo=timezone.utc) + timedelta(seconds=timestamp)).timestamp())
|
return int((dt.datetime(2001, 1, 1, 0, 0, tzinfo=dt.timezone.utc) + dt.timedelta(seconds=timestamp)).timestamp())
|
||||||
|
|
||||||
|
|
||||||
def _parse_safari_cookies_header(data, logger):
|
def _parse_safari_cookies_header(data, logger):
|
||||||
|
|
|
@ -46,16 +46,14 @@
|
||||||
# We need to get the underlying `sqlite` version, see https://github.com/yt-dlp/yt-dlp/issues/8152
|
# We need to get the underlying `sqlite` version, see https://github.com/yt-dlp/yt-dlp/issues/8152
|
||||||
sqlite3._yt_dlp__version = sqlite3.sqlite_version
|
sqlite3._yt_dlp__version = sqlite3.sqlite_version
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# although sqlite3 is part of the standard library, it is possible to compile python without
|
# although sqlite3 is part of the standard library, it is possible to compile Python without
|
||||||
# sqlite support. See: https://github.com/yt-dlp/yt-dlp/issues/544
|
# sqlite support. See: https://github.com/yt-dlp/yt-dlp/issues/544
|
||||||
sqlite3 = None
|
sqlite3 = None
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import websockets
|
import websockets
|
||||||
except (ImportError, SyntaxError):
|
except ImportError:
|
||||||
# websockets 3.10 on python 3.6 causes SyntaxError
|
|
||||||
# See https://github.com/yt-dlp/yt-dlp/issues/2633
|
|
||||||
websockets = None
|
websockets = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -76,6 +74,10 @@
|
||||||
if hasattr(xattr, 'set'): # pyxattr
|
if hasattr(xattr, 'set'): # pyxattr
|
||||||
xattr._yt_dlp__identifier = 'pyxattr'
|
xattr._yt_dlp__identifier = 'pyxattr'
|
||||||
|
|
||||||
|
try:
|
||||||
|
import curl_cffi
|
||||||
|
except ImportError:
|
||||||
|
curl_cffi = None
|
||||||
|
|
||||||
from . import Cryptodome
|
from . import Cryptodome
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import re
|
import re
|
||||||
|
import threading
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from ..minicurses import (
|
from ..minicurses import (
|
||||||
|
@ -63,6 +64,7 @@ class FileDownloader:
|
||||||
min_filesize: Skip files smaller than this size
|
min_filesize: Skip files smaller than this size
|
||||||
max_filesize: Skip files larger than this size
|
max_filesize: Skip files larger than this size
|
||||||
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
|
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
|
||||||
|
progress_delta: The minimum time between progress output, in seconds
|
||||||
external_downloader_args: A dictionary of downloader keys (in lower case)
|
external_downloader_args: A dictionary of downloader keys (in lower case)
|
||||||
and a list of additional command-line arguments for the
|
and a list of additional command-line arguments for the
|
||||||
executable. Use 'default' as the name for arguments to be
|
executable. Use 'default' as the name for arguments to be
|
||||||
|
@ -88,6 +90,9 @@ def __init__(self, ydl, params):
|
||||||
self.params = params
|
self.params = params
|
||||||
self._prepare_multiline_status()
|
self._prepare_multiline_status()
|
||||||
self.add_progress_hook(self.report_progress)
|
self.add_progress_hook(self.report_progress)
|
||||||
|
if self.params.get('progress_delta'):
|
||||||
|
self._progress_delta_lock = threading.Lock()
|
||||||
|
self._progress_delta_time = time.monotonic()
|
||||||
|
|
||||||
def _set_ydl(self, ydl):
|
def _set_ydl(self, ydl):
|
||||||
self.ydl = ydl
|
self.ydl = ydl
|
||||||
|
@ -366,6 +371,12 @@ def with_fields(*tups, default=''):
|
||||||
if s['status'] != 'downloading':
|
if s['status'] != 'downloading':
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if update_delta := self.params.get('progress_delta'):
|
||||||
|
with self._progress_delta_lock:
|
||||||
|
if time.monotonic() < self._progress_delta_time:
|
||||||
|
return
|
||||||
|
self._progress_delta_time += update_delta
|
||||||
|
|
||||||
s.update({
|
s.update({
|
||||||
'_eta_str': self.format_eta(s.get('eta')).strip(),
|
'_eta_str': self.format_eta(s.get('eta')).strip(),
|
||||||
'_speed_str': self.format_speed(s.get('speed')),
|
'_speed_str': self.format_speed(s.get('speed')),
|
||||||
|
|
|
@ -491,7 +491,7 @@ def _call_downloader(self, tmpfilename, info_dict):
|
||||||
if not self.params.get('verbose'):
|
if not self.params.get('verbose'):
|
||||||
args += ['-hide_banner']
|
args += ['-hide_banner']
|
||||||
|
|
||||||
args += traverse_obj(info_dict, ('downloader_options', 'ffmpeg_args'), default=[])
|
args += traverse_obj(info_dict, ('downloader_options', 'ffmpeg_args', ...))
|
||||||
|
|
||||||
# These exists only for compatibility. Extractors should use
|
# These exists only for compatibility. Extractors should use
|
||||||
# info_dict['downloader_options']['ffmpeg_args'] instead
|
# info_dict['downloader_options']['ffmpeg_args'] instead
|
||||||
|
@ -615,6 +615,8 @@ def _call_downloader(self, tmpfilename, info_dict):
|
||||||
else:
|
else:
|
||||||
args += ['-f', EXT_TO_OUT_FORMATS.get(ext, ext)]
|
args += ['-f', EXT_TO_OUT_FORMATS.get(ext, ext)]
|
||||||
|
|
||||||
|
args += traverse_obj(info_dict, ('downloader_options', 'ffmpeg_args_out', ...))
|
||||||
|
|
||||||
args += self._configuration_args(('_o1', '_o', ''))
|
args += self._configuration_args(('_o1', '_o', ''))
|
||||||
|
|
||||||
args = [encodeArgument(opt) for opt in args]
|
args = [encodeArgument(opt) for opt in args]
|
||||||
|
|
|
@ -237,8 +237,13 @@ def download():
|
||||||
|
|
||||||
def retry(e):
|
def retry(e):
|
||||||
close_stream()
|
close_stream()
|
||||||
ctx.resume_len = (byte_counter if ctx.tmpfilename == '-'
|
if ctx.tmpfilename == '-':
|
||||||
else os.path.getsize(encodeFilename(ctx.tmpfilename)))
|
ctx.resume_len = byte_counter
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
ctx.resume_len = os.path.getsize(encodeFilename(ctx.tmpfilename))
|
||||||
|
except FileNotFoundError:
|
||||||
|
ctx.resume_len = 0
|
||||||
raise RetryDownload(e)
|
raise RetryDownload(e)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -6,10 +6,10 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
from ..compat import compat_str
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
dict_get,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
js_to_json,
|
dict_get,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
js_to_json,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
|
@ -245,7 +245,6 @@ class ABCIViewIE(InfoExtractor):
|
||||||
'episode_id': 'NC2203H039S00',
|
'episode_id': 'NC2203H039S00',
|
||||||
'season_number': 2022,
|
'season_number': 2022,
|
||||||
'season': 'Season 2022',
|
'season': 'Season 2022',
|
||||||
'episode_number': None,
|
|
||||||
'episode': 'Locking Up Kids',
|
'episode': 'Locking Up Kids',
|
||||||
'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/nc/NC2203H039S00_636d8a0944a22_1920.jpg',
|
'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/nc/NC2203H039S00_636d8a0944a22_1920.jpg',
|
||||||
'timestamp': 1668460497,
|
'timestamp': 1668460497,
|
||||||
|
@ -271,8 +270,6 @@ class ABCIViewIE(InfoExtractor):
|
||||||
'episode_id': 'RF2004Q043S00',
|
'episode_id': 'RF2004Q043S00',
|
||||||
'season_number': 2021,
|
'season_number': 2021,
|
||||||
'season': 'Season 2021',
|
'season': 'Season 2021',
|
||||||
'episode_number': None,
|
|
||||||
'episode': None,
|
|
||||||
'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/rf/RF2004Q043S00_61a950639dbc0_1920.jpg',
|
'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/rf/RF2004Q043S00_61a950639dbc0_1920.jpg',
|
||||||
'timestamp': 1638710705,
|
'timestamp': 1638710705,
|
||||||
|
|
||||||
|
|
|
@ -12,20 +12,21 @@
|
||||||
import urllib.request
|
import urllib.request
|
||||||
import urllib.response
|
import urllib.response
|
||||||
import uuid
|
import uuid
|
||||||
from ..utils.networking import clean_proxies
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..aes import aes_ecb_decrypt
|
from ..aes import aes_ecb_decrypt
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
OnDemandPagedList,
|
||||||
bytes_to_intlist,
|
bytes_to_intlist,
|
||||||
decode_base_n,
|
decode_base_n,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
intlist_to_bytes,
|
intlist_to_bytes,
|
||||||
OnDemandPagedList,
|
|
||||||
time_seconds,
|
time_seconds,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
)
|
)
|
||||||
|
from ..utils.networking import clean_proxies
|
||||||
|
|
||||||
|
|
||||||
def add_opener(ydl, handler): # FIXME: Create proper API in .networking
|
def add_opener(ydl, handler): # FIXME: Create proper API in .networking
|
||||||
|
@ -53,7 +54,7 @@ def __init__(self, ie: 'AbemaTVIE'):
|
||||||
# the protocol that this should really handle is 'abematv-license://'
|
# the protocol that this should really handle is 'abematv-license://'
|
||||||
# abematv_license_open is just a placeholder for development purposes
|
# abematv_license_open is just a placeholder for development purposes
|
||||||
# ref. https://github.com/python/cpython/blob/f4c03484da59049eb62a9bf7777b963e2267d187/Lib/urllib/request.py#L510
|
# ref. https://github.com/python/cpython/blob/f4c03484da59049eb62a9bf7777b963e2267d187/Lib/urllib/request.py#L510
|
||||||
setattr(self, 'abematv-license_open', getattr(self, 'abematv_license_open'))
|
setattr(self, 'abematv-license_open', getattr(self, 'abematv_license_open', None))
|
||||||
self.ie = ie
|
self.ie = ie
|
||||||
|
|
||||||
def _get_videokey_from_ticket(self, ticket):
|
def _get_videokey_from_ticket(self, ticket):
|
||||||
|
@ -259,7 +260,7 @@ class AbemaTVIE(AbemaTVBaseIE):
|
||||||
'title': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
'title': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
||||||
'series': 'ゆるキャン△ SEASON2',
|
'series': 'ゆるキャン△ SEASON2',
|
||||||
'episode': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
'episode': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
||||||
'series_number': 2,
|
'season_number': 2,
|
||||||
'episode_number': 1,
|
'episode_number': 1,
|
||||||
'description': 'md5:9c5a3172ae763278f9303922f0ea5b17',
|
'description': 'md5:9c5a3172ae763278f9303922f0ea5b17',
|
||||||
},
|
},
|
||||||
|
|
|
@ -3,9 +3,10 @@
|
||||||
float_or_none,
|
float_or_none,
|
||||||
format_field,
|
format_field,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
traverse_obj,
|
|
||||||
parse_codecs,
|
parse_codecs,
|
||||||
parse_qs,
|
parse_qs,
|
||||||
|
str_or_none,
|
||||||
|
traverse_obj,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -129,7 +130,7 @@ class AcFunBangumiIE(AcFunVideoBaseIE):
|
||||||
'title': '红孩儿之趴趴蛙寻石记 第5话 ',
|
'title': '红孩儿之趴趴蛙寻石记 第5话 ',
|
||||||
'duration': 760.0,
|
'duration': 760.0,
|
||||||
'season': '红孩儿之趴趴蛙寻石记',
|
'season': '红孩儿之趴趴蛙寻石记',
|
||||||
'season_id': 5023171,
|
'season_id': '5023171',
|
||||||
'season_number': 1, # series has only 1 season
|
'season_number': 1, # series has only 1 season
|
||||||
'episode': 'Episode 5',
|
'episode': 'Episode 5',
|
||||||
'episode_number': 5,
|
'episode_number': 5,
|
||||||
|
@ -146,7 +147,7 @@ class AcFunBangumiIE(AcFunVideoBaseIE):
|
||||||
'title': '叽歪老表(第二季) 第5话 坚不可摧',
|
'title': '叽歪老表(第二季) 第5话 坚不可摧',
|
||||||
'season': '叽歪老表(第二季)',
|
'season': '叽歪老表(第二季)',
|
||||||
'season_number': 2,
|
'season_number': 2,
|
||||||
'season_id': 6065485,
|
'season_id': '6065485',
|
||||||
'episode': '坚不可摧',
|
'episode': '坚不可摧',
|
||||||
'episode_number': 5,
|
'episode_number': 5,
|
||||||
'upload_date': '20220324',
|
'upload_date': '20220324',
|
||||||
|
@ -191,7 +192,7 @@ def _real_extract(self, url):
|
||||||
'title': json_bangumi_data.get('showTitle'),
|
'title': json_bangumi_data.get('showTitle'),
|
||||||
'thumbnail': json_bangumi_data.get('image'),
|
'thumbnail': json_bangumi_data.get('image'),
|
||||||
'season': json_bangumi_data.get('bangumiTitle'),
|
'season': json_bangumi_data.get('bangumiTitle'),
|
||||||
'season_id': season_id,
|
'season_id': str_or_none(season_id),
|
||||||
'season_number': season_number,
|
'season_number': season_number,
|
||||||
'episode': json_bangumi_data.get('title'),
|
'episode': json_bangumi_data.get('title'),
|
||||||
'episode_number': episode_number,
|
'episode_number': episode_number,
|
||||||
|
|
|
@ -10,18 +10,18 @@
|
||||||
from ..compat import compat_b64decode
|
from ..compat import compat_b64decode
|
||||||
from ..networking.exceptions import HTTPError
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
ass_subtitles_timecode,
|
ass_subtitles_timecode,
|
||||||
bytes_to_intlist,
|
bytes_to_intlist,
|
||||||
bytes_to_long,
|
bytes_to_long,
|
||||||
ExtractorError,
|
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
intlist_to_bytes,
|
intlist_to_bytes,
|
||||||
long_to_bytes,
|
long_to_bytes,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
pkcs1pad,
|
pkcs1pad,
|
||||||
strip_or_none,
|
|
||||||
str_or_none,
|
str_or_none,
|
||||||
|
strip_or_none,
|
||||||
try_get,
|
try_get,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
|
|
|
@ -4,11 +4,11 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
from ..compat import compat_str
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ISO639Utils,
|
||||||
|
OnDemandPagedList,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
ISO639Utils,
|
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
OnDemandPagedList,
|
|
||||||
parse_duration,
|
parse_duration,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
|
|
|
@ -107,7 +107,6 @@ def _real_extract(self, url):
|
||||||
title
|
title
|
||||||
tvRating
|
tvRating
|
||||||
}''' % episode_path
|
}''' % episode_path
|
||||||
['getVideoBySlug']
|
|
||||||
else:
|
else:
|
||||||
query = query % '''metaDescription
|
query = query % '''metaDescription
|
||||||
title
|
title
|
||||||
|
|
|
@ -1,25 +1,65 @@
|
||||||
import functools
|
import functools
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
date_from_str,
|
UserNotLive,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
|
filter_dict,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
qualities,
|
orderedSet,
|
||||||
traverse_obj,
|
|
||||||
unified_strdate,
|
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
update_url_query,
|
|
||||||
url_or_none,
|
url_or_none,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
xpath_text,
|
urljoin,
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class AfreecaTVIE(InfoExtractor):
|
class AfreecaTVBaseIE(InfoExtractor):
|
||||||
|
_NETRC_MACHINE = 'afreecatv'
|
||||||
|
|
||||||
|
def _perform_login(self, username, password):
|
||||||
|
login_form = {
|
||||||
|
'szWork': 'login',
|
||||||
|
'szType': 'json',
|
||||||
|
'szUid': username,
|
||||||
|
'szPassword': password,
|
||||||
|
'isSaveId': 'false',
|
||||||
|
'szScriptVar': 'oLoginRet',
|
||||||
|
'szAction': '',
|
||||||
|
}
|
||||||
|
|
||||||
|
response = self._download_json(
|
||||||
|
'https://login.afreecatv.com/app/LoginAction.php', None,
|
||||||
|
'Logging in', data=urlencode_postdata(login_form))
|
||||||
|
|
||||||
|
_ERRORS = {
|
||||||
|
-4: 'Your account has been suspended due to a violation of our terms and policies.',
|
||||||
|
-5: 'https://member.afreecatv.com/app/user_delete_progress.php',
|
||||||
|
-6: 'https://login.afreecatv.com/membership/changeMember.php',
|
||||||
|
-8: "Hello! AfreecaTV here.\nThe username you have entered belongs to \n an account that requires a legal guardian's consent. \nIf you wish to use our services without restriction, \nplease make sure to go through the necessary verification process.",
|
||||||
|
-9: 'https://member.afreecatv.com/app/pop_login_block.php',
|
||||||
|
-11: 'https://login.afreecatv.com/afreeca/second_login.php',
|
||||||
|
-12: 'https://member.afreecatv.com/app/user_security.php',
|
||||||
|
0: 'The username does not exist or you have entered the wrong password.',
|
||||||
|
-1: 'The username does not exist or you have entered the wrong password.',
|
||||||
|
-3: 'You have entered your username/password incorrectly.',
|
||||||
|
-7: 'You cannot use your Global AfreecaTV account to access Korean AfreecaTV.',
|
||||||
|
-10: 'Sorry for the inconvenience. \nYour account has been blocked due to an unauthorized access. \nPlease contact our Help Center for assistance.',
|
||||||
|
-32008: 'You have failed to log in. Please contact our Help Center.',
|
||||||
|
}
|
||||||
|
|
||||||
|
result = int_or_none(response.get('RESULT'))
|
||||||
|
if result != 1:
|
||||||
|
error = _ERRORS.get(result, 'You have failed to log in.')
|
||||||
|
raise ExtractorError(
|
||||||
|
'Unable to login: %s said: %s' % (self.IE_NAME, error),
|
||||||
|
expected=True)
|
||||||
|
|
||||||
|
|
||||||
|
class AfreecaTVIE(AfreecaTVBaseIE):
|
||||||
IE_NAME = 'afreecatv'
|
IE_NAME = 'afreecatv'
|
||||||
IE_DESC = 'afreecatv.com'
|
IE_DESC = 'afreecatv.com'
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
|
@ -34,7 +74,6 @@ class AfreecaTVIE(InfoExtractor):
|
||||||
)
|
)
|
||||||
(?P<id>\d+)
|
(?P<id>\d+)
|
||||||
'''
|
'''
|
||||||
_NETRC_MACHINE = 'afreecatv'
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://live.afreecatv.com:8079/app/index.cgi?szType=read_ucc_bbs&szBjId=dailyapril&nStationNo=16711924&nBbsNo=18605867&nTitleNo=36164052&szSkin=',
|
'url': 'http://live.afreecatv.com:8079/app/index.cgi?szType=read_ucc_bbs&szBjId=dailyapril&nStationNo=16711924&nBbsNo=18605867&nTitleNo=36164052&szSkin=',
|
||||||
'md5': 'f72c89fe7ecc14c1b5ce506c4996046e',
|
'md5': 'f72c89fe7ecc14c1b5ce506c4996046e',
|
||||||
|
@ -87,6 +126,7 @@ class AfreecaTVIE(InfoExtractor):
|
||||||
'uploader': '♥이슬이',
|
'uploader': '♥이슬이',
|
||||||
'uploader_id': 'dasl8121',
|
'uploader_id': 'dasl8121',
|
||||||
'upload_date': '20170411',
|
'upload_date': '20170411',
|
||||||
|
'timestamp': 1491929865,
|
||||||
'duration': 213,
|
'duration': 213,
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
|
@ -120,219 +160,102 @@ class AfreecaTVIE(InfoExtractor):
|
||||||
'uploader_id': 'rlantnghks',
|
'uploader_id': 'rlantnghks',
|
||||||
'uploader': '페이즈으',
|
'uploader': '페이즈으',
|
||||||
'duration': 10840,
|
'duration': 10840,
|
||||||
'thumbnail': 'http://videoimg.afreecatv.com/php/SnapshotLoad.php?rowKey=20230108_9FF5BEE1_244432674_1_r',
|
'thumbnail': r're:https?://videoimg\.afreecatv\.com/.+',
|
||||||
'upload_date': '20230108',
|
'upload_date': '20230108',
|
||||||
|
'timestamp': 1673218805,
|
||||||
'title': '젠지 페이즈',
|
'title': '젠지 페이즈',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
# adult content
|
||||||
|
'url': 'https://vod.afreecatv.com/player/70395877',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# subscribers only
|
||||||
|
'url': 'https://vod.afreecatv.com/player/104647403',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# private
|
||||||
|
'url': 'https://vod.afreecatv.com/player/81669846',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def parse_video_key(key):
|
|
||||||
video_key = {}
|
|
||||||
m = re.match(r'^(?P<upload_date>\d{8})_\w+_(?P<part>\d+)$', key)
|
|
||||||
if m:
|
|
||||||
video_key['upload_date'] = m.group('upload_date')
|
|
||||||
video_key['part'] = int(m.group('part'))
|
|
||||||
return video_key
|
|
||||||
|
|
||||||
def _perform_login(self, username, password):
|
|
||||||
login_form = {
|
|
||||||
'szWork': 'login',
|
|
||||||
'szType': 'json',
|
|
||||||
'szUid': username,
|
|
||||||
'szPassword': password,
|
|
||||||
'isSaveId': 'false',
|
|
||||||
'szScriptVar': 'oLoginRet',
|
|
||||||
'szAction': '',
|
|
||||||
}
|
|
||||||
|
|
||||||
response = self._download_json(
|
|
||||||
'https://login.afreecatv.com/app/LoginAction.php', None,
|
|
||||||
'Logging in', data=urlencode_postdata(login_form))
|
|
||||||
|
|
||||||
_ERRORS = {
|
|
||||||
-4: 'Your account has been suspended due to a violation of our terms and policies.',
|
|
||||||
-5: 'https://member.afreecatv.com/app/user_delete_progress.php',
|
|
||||||
-6: 'https://login.afreecatv.com/membership/changeMember.php',
|
|
||||||
-8: "Hello! AfreecaTV here.\nThe username you have entered belongs to \n an account that requires a legal guardian's consent. \nIf you wish to use our services without restriction, \nplease make sure to go through the necessary verification process.",
|
|
||||||
-9: 'https://member.afreecatv.com/app/pop_login_block.php',
|
|
||||||
-11: 'https://login.afreecatv.com/afreeca/second_login.php',
|
|
||||||
-12: 'https://member.afreecatv.com/app/user_security.php',
|
|
||||||
0: 'The username does not exist or you have entered the wrong password.',
|
|
||||||
-1: 'The username does not exist or you have entered the wrong password.',
|
|
||||||
-3: 'You have entered your username/password incorrectly.',
|
|
||||||
-7: 'You cannot use your Global AfreecaTV account to access Korean AfreecaTV.',
|
|
||||||
-10: 'Sorry for the inconvenience. \nYour account has been blocked due to an unauthorized access. \nPlease contact our Help Center for assistance.',
|
|
||||||
-32008: 'You have failed to log in. Please contact our Help Center.',
|
|
||||||
}
|
|
||||||
|
|
||||||
result = int_or_none(response.get('RESULT'))
|
|
||||||
if result != 1:
|
|
||||||
error = _ERRORS.get(result, 'You have failed to log in.')
|
|
||||||
raise ExtractorError(
|
|
||||||
'Unable to login: %s said: %s' % (self.IE_NAME, error),
|
|
||||||
expected=True)
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
data = self._download_json(
|
||||||
partial_view = False
|
'https://api.m.afreecatv.com/station/video/a/view', video_id,
|
||||||
adult_view = False
|
headers={'Referer': url}, data=urlencode_postdata({
|
||||||
for _ in range(2):
|
|
||||||
data = self._download_json(
|
|
||||||
'https://api.m.afreecatv.com/station/video/a/view',
|
|
||||||
video_id, headers={'Referer': url}, data=urlencode_postdata({
|
|
||||||
'nTitleNo': video_id,
|
|
||||||
'nApiLevel': 10,
|
|
||||||
}))['data']
|
|
||||||
if traverse_obj(data, ('code', {int})) == -6221:
|
|
||||||
raise ExtractorError('The VOD does not exist', expected=True)
|
|
||||||
query = {
|
|
||||||
'nTitleNo': video_id,
|
'nTitleNo': video_id,
|
||||||
'nStationNo': data['station_no'],
|
'nApiLevel': 10,
|
||||||
'nBbsNo': data['bbs_no'],
|
}))['data']
|
||||||
}
|
|
||||||
if partial_view:
|
|
||||||
query['partialView'] = 'SKIP_ADULT'
|
|
||||||
if adult_view:
|
|
||||||
query['adultView'] = 'ADULT_VIEW'
|
|
||||||
video_xml = self._download_xml(
|
|
||||||
'http://afbbs.afreecatv.com:8080/api/video/get_video_info.php',
|
|
||||||
video_id, 'Downloading video info XML%s'
|
|
||||||
% (' (skipping adult)' if partial_view else ''),
|
|
||||||
video_id, headers={
|
|
||||||
'Referer': url,
|
|
||||||
}, query=query)
|
|
||||||
|
|
||||||
flag = xpath_text(video_xml, './track/flag', 'flag', default=None)
|
error_code = traverse_obj(data, ('code', {int}))
|
||||||
if flag and flag == 'SUCCEED':
|
if error_code == -6221:
|
||||||
break
|
raise ExtractorError('The VOD does not exist', expected=True)
|
||||||
if flag == 'PARTIAL_ADULT':
|
elif error_code == -6205:
|
||||||
self.report_warning(
|
raise ExtractorError('This VOD is private', expected=True)
|
||||||
'In accordance with local laws and regulations, underage users are restricted from watching adult content. '
|
|
||||||
'Only content suitable for all ages will be downloaded. '
|
|
||||||
'Provide account credentials if you wish to download restricted content.')
|
|
||||||
partial_view = True
|
|
||||||
continue
|
|
||||||
elif flag == 'ADULT':
|
|
||||||
if not adult_view:
|
|
||||||
adult_view = True
|
|
||||||
continue
|
|
||||||
error = 'Only users older than 19 are able to watch this video. Provide account credentials to download this content.'
|
|
||||||
else:
|
|
||||||
error = flag
|
|
||||||
raise ExtractorError(
|
|
||||||
'%s said: %s' % (self.IE_NAME, error), expected=True)
|
|
||||||
else:
|
|
||||||
raise ExtractorError('Unable to download video info')
|
|
||||||
|
|
||||||
video_element = video_xml.findall('./track/video')[-1]
|
common_info = traverse_obj(data, {
|
||||||
if video_element is None or video_element.text is None:
|
'title': ('title', {str}),
|
||||||
raise ExtractorError(
|
'uploader': ('writer_nick', {str}),
|
||||||
'Video %s does not exist' % video_id, expected=True)
|
'uploader_id': ('bj_id', {str}),
|
||||||
|
'duration': ('total_file_duration', {functools.partial(int_or_none, scale=1000)}),
|
||||||
video_url = video_element.text.strip()
|
'thumbnail': ('thumb', {url_or_none}),
|
||||||
|
|
||||||
title = xpath_text(video_xml, './track/title', 'title', fatal=True)
|
|
||||||
|
|
||||||
uploader = xpath_text(video_xml, './track/nickname', 'uploader')
|
|
||||||
uploader_id = xpath_text(video_xml, './track/bj_id', 'uploader id')
|
|
||||||
duration = int_or_none(xpath_text(
|
|
||||||
video_xml, './track/duration', 'duration'))
|
|
||||||
thumbnail = xpath_text(video_xml, './track/titleImage', 'thumbnail')
|
|
||||||
|
|
||||||
common_entry = {
|
|
||||||
'uploader': uploader,
|
|
||||||
'uploader_id': uploader_id,
|
|
||||||
'thumbnail': thumbnail,
|
|
||||||
}
|
|
||||||
|
|
||||||
info = common_entry.copy()
|
|
||||||
info.update({
|
|
||||||
'id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'duration': duration,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if not video_url:
|
entries = []
|
||||||
entries = []
|
for file_num, file_element in enumerate(
|
||||||
file_elements = video_element.findall('./file')
|
traverse_obj(data, ('files', lambda _, v: url_or_none(v['file']))), start=1):
|
||||||
one = len(file_elements) == 1
|
file_url = file_element['file']
|
||||||
for file_num, file_element in enumerate(file_elements, start=1):
|
if determine_ext(file_url) == 'm3u8':
|
||||||
file_url = url_or_none(file_element.text)
|
formats = self._extract_m3u8_formats(
|
||||||
if not file_url:
|
file_url, video_id, 'mp4', m3u8_id='hls',
|
||||||
continue
|
note=f'Downloading part {file_num} m3u8 information')
|
||||||
key = file_element.get('key', '')
|
else:
|
||||||
upload_date = unified_strdate(self._search_regex(
|
formats = [{
|
||||||
r'^(\d{8})_', key, 'upload date', default=None))
|
'url': file_url,
|
||||||
if upload_date is not None:
|
'format_id': 'http',
|
||||||
# sometimes the upload date isn't included in the file name
|
}]
|
||||||
# instead, another random ID is, which may parse as a valid
|
|
||||||
# date but be wildly out of a reasonable range
|
entries.append({
|
||||||
parsed_date = date_from_str(upload_date)
|
**common_info,
|
||||||
if parsed_date.year < 2000 or parsed_date.year >= 2100:
|
'id': file_element.get('file_info_key') or f'{video_id}_{file_num}',
|
||||||
upload_date = None
|
'title': f'{common_info.get("title") or "Untitled"} (part {file_num})',
|
||||||
file_duration = int_or_none(file_element.get('duration'))
|
'formats': formats,
|
||||||
format_id = key if key else '%s_%s' % (video_id, file_num)
|
**traverse_obj(file_element, {
|
||||||
if determine_ext(file_url) == 'm3u8':
|
'duration': ('duration', {functools.partial(int_or_none, scale=1000)}),
|
||||||
formats = self._extract_m3u8_formats(
|
'timestamp': ('file_start', {unified_timestamp}),
|
||||||
file_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
|
||||||
m3u8_id='hls',
|
|
||||||
note='Downloading part %d m3u8 information' % file_num)
|
|
||||||
else:
|
|
||||||
formats = [{
|
|
||||||
'url': file_url,
|
|
||||||
'format_id': 'http',
|
|
||||||
}]
|
|
||||||
if not formats and not self.get_param('ignore_no_formats'):
|
|
||||||
continue
|
|
||||||
file_info = common_entry.copy()
|
|
||||||
file_info.update({
|
|
||||||
'id': format_id,
|
|
||||||
'title': title if one else '%s (part %d)' % (title, file_num),
|
|
||||||
'upload_date': upload_date,
|
|
||||||
'duration': file_duration,
|
|
||||||
'formats': formats,
|
|
||||||
})
|
})
|
||||||
entries.append(file_info)
|
|
||||||
entries_info = info.copy()
|
|
||||||
entries_info.update({
|
|
||||||
'_type': 'multi_video',
|
|
||||||
'entries': entries,
|
|
||||||
})
|
|
||||||
return entries_info
|
|
||||||
|
|
||||||
info = {
|
|
||||||
'id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'uploader': uploader,
|
|
||||||
'uploader_id': uploader_id,
|
|
||||||
'duration': duration,
|
|
||||||
'thumbnail': thumbnail,
|
|
||||||
}
|
|
||||||
|
|
||||||
if determine_ext(video_url) == 'm3u8':
|
|
||||||
info['formats'] = self._extract_m3u8_formats(
|
|
||||||
video_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
|
||||||
m3u8_id='hls')
|
|
||||||
else:
|
|
||||||
app, playpath = video_url.split('mp4:')
|
|
||||||
info.update({
|
|
||||||
'url': app,
|
|
||||||
'ext': 'flv',
|
|
||||||
'play_path': 'mp4:' + playpath,
|
|
||||||
'rtmp_live': True, # downloading won't end without this
|
|
||||||
})
|
})
|
||||||
|
|
||||||
return info
|
if traverse_obj(data, ('adult_status', {str})) == 'notLogin':
|
||||||
|
if not entries:
|
||||||
|
self.raise_login_required(
|
||||||
|
'Only users older than 19 are able to watch this video', method='password')
|
||||||
|
self.report_warning(
|
||||||
|
'In accordance with local laws and regulations, underage users are '
|
||||||
|
'restricted from watching adult content. Only content suitable for all '
|
||||||
|
f'ages will be downloaded. {self._login_hint("password")}')
|
||||||
|
|
||||||
|
if not entries and traverse_obj(data, ('sub_upload_type', {str})):
|
||||||
|
self.raise_login_required('This VOD is for subscribers only', method='password')
|
||||||
|
|
||||||
|
if len(entries) == 1:
|
||||||
|
return {
|
||||||
|
**entries[0],
|
||||||
|
'title': common_info.get('title'),
|
||||||
|
}
|
||||||
|
|
||||||
|
common_info['timestamp'] = traverse_obj(entries, (..., 'timestamp'), get_all=False)
|
||||||
|
|
||||||
|
return self.playlist_result(entries, video_id, multi_video=True, **common_info)
|
||||||
|
|
||||||
|
|
||||||
class AfreecaTVLiveIE(AfreecaTVIE): # XXX: Do not subclass from concrete IE
|
class AfreecaTVLiveIE(AfreecaTVBaseIE):
|
||||||
|
|
||||||
IE_NAME = 'afreecatv:live'
|
IE_NAME = 'afreecatv:live'
|
||||||
|
IE_DESC = 'afreecatv.com livestreams'
|
||||||
_VALID_URL = r'https?://play\.afreeca(?:tv)?\.com/(?P<id>[^/]+)(?:/(?P<bno>\d+))?'
|
_VALID_URL = r'https?://play\.afreeca(?:tv)?\.com/(?P<id>[^/]+)(?:/(?P<bno>\d+))?'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://play.afreecatv.com/pyh3646/237852185',
|
'url': 'https://play.afreecatv.com/pyh3646/237852185',
|
||||||
|
@ -347,77 +270,97 @@ class AfreecaTVLiveIE(AfreecaTVIE): # XXX: Do not subclass from concrete IE
|
||||||
},
|
},
|
||||||
'skip': 'Livestream has ended',
|
'skip': 'Livestream has ended',
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://play.afreeca.com/pyh3646/237852185',
|
'url': 'https://play.afreecatv.com/pyh3646/237852185',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://play.afreeca.com/pyh3646',
|
'url': 'https://play.afreecatv.com/pyh3646',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
_LIVE_API_URL = 'https://live.afreecatv.com/afreeca/player_live_api.php'
|
_LIVE_API_URL = 'https://live.afreecatv.com/afreeca/player_live_api.php'
|
||||||
|
_WORKING_CDNS = [
|
||||||
|
'gcp_cdn', # live-global-cdn-v02.afreecatv.com
|
||||||
|
'gs_cdn_pc_app', # pc-app.stream.afreecatv.com
|
||||||
|
'gs_cdn_mobile_web', # mobile-web.stream.afreecatv.com
|
||||||
|
'gs_cdn_pc_web', # pc-web.stream.afreecatv.com
|
||||||
|
]
|
||||||
|
_BAD_CDNS = [
|
||||||
|
'gs_cdn', # chromecast.afreeca.gscdn.com (cannot resolve)
|
||||||
|
'gs_cdn_chromecast', # chromecast.stream.afreecatv.com (HTTP Error 400)
|
||||||
|
'azure_cdn', # live-global-cdn-v01.afreecatv.com (cannot resolve)
|
||||||
|
'aws_cf', # live-global-cdn-v03.afreecatv.com (cannot resolve)
|
||||||
|
'kt_cdn', # kt.stream.afreecatv.com (HTTP Error 400)
|
||||||
|
]
|
||||||
|
|
||||||
_QUALITIES = ('sd', 'hd', 'hd2k', 'original')
|
def _extract_formats(self, channel_info, broadcast_no, aid):
|
||||||
|
stream_base_url = channel_info.get('RMD') or 'https://livestream-manager.afreecatv.com'
|
||||||
|
|
||||||
|
# If user has not passed CDN IDs, try API-provided CDN ID followed by other working CDN IDs
|
||||||
|
default_cdn_ids = orderedSet([
|
||||||
|
*traverse_obj(channel_info, ('CDN', {str}, all, lambda _, v: v not in self._BAD_CDNS)),
|
||||||
|
*self._WORKING_CDNS,
|
||||||
|
])
|
||||||
|
cdn_ids = self._configuration_arg('cdn', default_cdn_ids)
|
||||||
|
|
||||||
|
for attempt, cdn_id in enumerate(cdn_ids, start=1):
|
||||||
|
m3u8_url = traverse_obj(self._download_json(
|
||||||
|
urljoin(stream_base_url, 'broad_stream_assign.html'), broadcast_no,
|
||||||
|
f'Downloading {cdn_id} stream info', f'Unable to download {cdn_id} stream info',
|
||||||
|
fatal=False, query={
|
||||||
|
'return_type': cdn_id,
|
||||||
|
'broad_key': f'{broadcast_no}-common-master-hls',
|
||||||
|
}), ('view_url', {url_or_none}))
|
||||||
|
try:
|
||||||
|
return self._extract_m3u8_formats(
|
||||||
|
m3u8_url, broadcast_no, 'mp4', m3u8_id='hls', query={'aid': aid},
|
||||||
|
headers={'Referer': 'https://play.afreecatv.com/'})
|
||||||
|
except ExtractorError as e:
|
||||||
|
if attempt == len(cdn_ids):
|
||||||
|
raise
|
||||||
|
self.report_warning(
|
||||||
|
f'{e.cause or e.msg}. Retrying... (attempt {attempt} of {len(cdn_ids)})')
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
broadcaster_id, broadcast_no = self._match_valid_url(url).group('id', 'bno')
|
broadcaster_id, broadcast_no = self._match_valid_url(url).group('id', 'bno')
|
||||||
password = self.get_param('videopassword')
|
channel_info = traverse_obj(self._download_json(
|
||||||
|
self._LIVE_API_URL, broadcaster_id, data=urlencode_postdata({'bid': broadcaster_id})),
|
||||||
|
('CHANNEL', {dict})) or {}
|
||||||
|
|
||||||
info = self._download_json(self._LIVE_API_URL, broadcaster_id, fatal=False,
|
|
||||||
data=urlencode_postdata({'bid': broadcaster_id})) or {}
|
|
||||||
channel_info = info.get('CHANNEL') or {}
|
|
||||||
broadcaster_id = channel_info.get('BJID') or broadcaster_id
|
broadcaster_id = channel_info.get('BJID') or broadcaster_id
|
||||||
broadcast_no = channel_info.get('BNO') or broadcast_no
|
broadcast_no = channel_info.get('BNO') or broadcast_no
|
||||||
password_protected = channel_info.get('BPWD')
|
|
||||||
if not broadcast_no:
|
if not broadcast_no:
|
||||||
raise ExtractorError(f'Unable to extract broadcast number ({broadcaster_id} may not be live)', expected=True)
|
raise UserNotLive(video_id=broadcaster_id)
|
||||||
if password_protected == 'Y' and password is None:
|
|
||||||
|
password = self.get_param('videopassword')
|
||||||
|
if channel_info.get('BPWD') == 'Y' and password is None:
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
'This livestream is protected by a password, use the --video-password option',
|
'This livestream is protected by a password, use the --video-password option',
|
||||||
expected=True)
|
expected=True)
|
||||||
|
|
||||||
formats = []
|
token_info = traverse_obj(self._download_json(
|
||||||
quality_key = qualities(self._QUALITIES)
|
self._LIVE_API_URL, broadcast_no, 'Downloading access token for stream',
|
||||||
for quality_str in self._QUALITIES:
|
'Unable to download access token for stream', data=urlencode_postdata(filter_dict({
|
||||||
params = {
|
|
||||||
'bno': broadcast_no,
|
'bno': broadcast_no,
|
||||||
'stream_type': 'common',
|
'stream_type': 'common',
|
||||||
'type': 'aid',
|
'type': 'aid',
|
||||||
'quality': quality_str,
|
'quality': 'master',
|
||||||
}
|
'pwd': password,
|
||||||
if password is not None:
|
}))), ('CHANNEL', {dict})) or {}
|
||||||
params['pwd'] = password
|
aid = token_info.get('AID')
|
||||||
aid_response = self._download_json(
|
if not aid:
|
||||||
self._LIVE_API_URL, broadcast_no, fatal=False,
|
result = token_info.get('RESULT')
|
||||||
data=urlencode_postdata(params),
|
if result == 0:
|
||||||
note=f'Downloading access token for {quality_str} stream',
|
raise ExtractorError('This livestream has ended', expected=True)
|
||||||
errnote=f'Unable to download access token for {quality_str} stream')
|
elif result == -6:
|
||||||
aid = traverse_obj(aid_response, ('CHANNEL', 'AID'))
|
self.raise_login_required('This livestream is for subscribers only', method='password')
|
||||||
if not aid:
|
raise ExtractorError('Unable to extract access token')
|
||||||
continue
|
|
||||||
|
|
||||||
stream_base_url = channel_info.get('RMD') or 'https://livestream-manager.afreecatv.com'
|
formats = self._extract_formats(channel_info, broadcast_no, aid)
|
||||||
stream_info = self._download_json(
|
|
||||||
f'{stream_base_url}/broad_stream_assign.html', broadcast_no, fatal=False,
|
|
||||||
query={
|
|
||||||
'return_type': channel_info.get('CDN', 'gcp_cdn'),
|
|
||||||
'broad_key': f'{broadcast_no}-common-{quality_str}-hls',
|
|
||||||
},
|
|
||||||
note=f'Downloading metadata for {quality_str} stream',
|
|
||||||
errnote=f'Unable to download metadata for {quality_str} stream') or {}
|
|
||||||
|
|
||||||
if stream_info.get('view_url'):
|
station_info = traverse_obj(self._download_json(
|
||||||
formats.append({
|
|
||||||
'format_id': quality_str,
|
|
||||||
'url': update_url_query(stream_info['view_url'], {'aid': aid}),
|
|
||||||
'ext': 'mp4',
|
|
||||||
'protocol': 'm3u8',
|
|
||||||
'quality': quality_key(quality_str),
|
|
||||||
})
|
|
||||||
|
|
||||||
station_info = self._download_json(
|
|
||||||
'https://st.afreecatv.com/api/get_station_status.php', broadcast_no,
|
'https://st.afreecatv.com/api/get_station_status.php', broadcast_no,
|
||||||
query={'szBjId': broadcaster_id}, fatal=False,
|
'Downloading channel metadata', 'Unable to download channel metadata',
|
||||||
note='Downloading channel metadata', errnote='Unable to download channel metadata') or {}
|
query={'szBjId': broadcaster_id}, fatal=False), {dict}) or {}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': broadcast_no,
|
'id': broadcast_no,
|
||||||
|
@ -427,6 +370,7 @@ def _real_extract(self, url):
|
||||||
'timestamp': unified_timestamp(station_info.get('broad_start')),
|
'timestamp': unified_timestamp(station_info.get('broad_start')),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'is_live': True,
|
'is_live': True,
|
||||||
|
'http_headers': {'Referer': url},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
int_or_none,
|
int_or_none,
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
traverse_obj
|
traverse_obj,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,6 @@
|
||||||
)
|
)
|
||||||
from ..utils.traversal import traverse_obj
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
_FIELDS = '''
|
_FIELDS = '''
|
||||||
_id
|
_id
|
||||||
clipImageSource
|
clipImageSource
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
parse_iso8601,
|
int_or_none,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
parse_filesize,
|
parse_filesize,
|
||||||
int_or_none,
|
parse_iso8601,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
InAdvancePagedList,
|
InAdvancePagedList,
|
||||||
|
clean_html,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
orderedSet,
|
orderedSet,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
|
@ -32,13 +33,15 @@ class AltCensoredIE(InfoExtractor):
|
||||||
'duration': 926.09,
|
'duration': 926.09,
|
||||||
'thumbnail': 'https://archive.org/download/youtube-k0srjLSkga8/youtube-k0srjLSkga8.thumbs/k0srjLSkga8_000925.jpg',
|
'thumbnail': 'https://archive.org/download/youtube-k0srjLSkga8/youtube-k0srjLSkga8.thumbs/k0srjLSkga8_000925.jpg',
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'categories': ['News & Politics'], # FIXME
|
'categories': ['News & Politics'],
|
||||||
}
|
}
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
category = clean_html(self._html_search_regex(
|
||||||
|
r'<a href="/category/\d+">([^<]+)</a>', webpage, 'category', default=None))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'_type': 'url_transparent',
|
'_type': 'url_transparent',
|
||||||
|
@ -46,9 +49,7 @@ def _real_extract(self, url):
|
||||||
'ie_key': ArchiveOrgIE.ie_key(),
|
'ie_key': ArchiveOrgIE.ie_key(),
|
||||||
'view_count': str_to_int(self._html_search_regex(
|
'view_count': str_to_int(self._html_search_regex(
|
||||||
r'YouTube Views:(?:\s| )*([\d,]+)', webpage, 'view count', default=None)),
|
r'YouTube Views:(?:\s| )*([\d,]+)', webpage, 'view count', default=None)),
|
||||||
'categories': self._html_search_regex(
|
'categories': [category] if category else None,
|
||||||
r'<a href="/category/\d+">\s*\n?\s*([^<]+)</a>',
|
|
||||||
webpage, 'category', default='').split() or None,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,17 +1,13 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_urlparse
|
||||||
from ..compat import (
|
|
||||||
compat_urlparse,
|
|
||||||
)
|
|
||||||
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
clean_html,
|
||||||
|
int_or_none,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
urljoin,
|
urljoin,
|
||||||
int_or_none,
|
|
||||||
clean_html,
|
|
||||||
ExtractorError
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -39,7 +35,7 @@ class AluraIE(InfoExtractor):
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|
||||||
course, video_id = self._match_valid_url(url)
|
course, video_id = self._match_valid_url(url).group('course_name', 'id')
|
||||||
video_url = self._VIDEO_URL % (course, video_id)
|
video_url = self._VIDEO_URL % (course, video_id)
|
||||||
|
|
||||||
video_dict = self._download_json(video_url, video_id, 'Searching for videos')
|
video_dict = self._download_json(video_url, video_id, 'Searching for videos')
|
||||||
|
@ -52,7 +48,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for video_obj in video_dict:
|
for video_obj in video_dict:
|
||||||
video_url_m3u8 = video_obj.get('link')
|
video_url_m3u8 = video_obj.get('mp4')
|
||||||
video_format = self._extract_m3u8_formats(
|
video_format = self._extract_m3u8_formats(
|
||||||
video_url_m3u8, None, 'mp4', entry_protocol='m3u8_native',
|
video_url_m3u8, None, 'mp4', entry_protocol='m3u8_native',
|
||||||
m3u8_id='hls', fatal=False)
|
m3u8_id='hls', fatal=False)
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .youtube import YoutubeIE
|
|
||||||
from .vimeo import VimeoIE
|
from .vimeo import VimeoIE
|
||||||
|
from .youtube import YoutubeIE
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
determine_ext,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
int_or_none,
|
int_or_none,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
unified_timestamp
|
unified_timestamp,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import url_or_none, merge_dicts
|
from ..utils import merge_dicts, url_or_none
|
||||||
|
|
||||||
|
|
||||||
class AngelIE(InfoExtractor):
|
class AngelIE(InfoExtractor):
|
||||||
|
|
|
@ -67,7 +67,7 @@ def _real_extract(self, url):
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
info = self._download_and_extract_api_data(video_id, netloc)
|
info = self._download_and_extract_api_data(video_id, netloc)
|
||||||
info['description'] = self._og_search_description(webpage, default=None)
|
info['description'] = self._og_search_description(webpage, default=None)
|
||||||
info['_old_archive_ids'] = [make_archive_id('Ant1NewsGrWatch', video_id)],
|
info['_old_archive_ids'] = [make_archive_id('Ant1NewsGrWatch', video_id)]
|
||||||
return info
|
return info
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,5 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import ExtractorError, str_to_int
|
||||||
str_to_int,
|
|
||||||
ExtractorError
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class AppleConnectIE(InfoExtractor):
|
class AppleConnectIE(InfoExtractor):
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
import re
|
|
||||||
import json
|
import json
|
||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urlparse
|
from ..compat import compat_urlparse
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
urlhandle_detect_ext,
|
urlhandle_detect_ext,
|
||||||
|
variadic,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -49,7 +50,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||||
'release_date': '19681210',
|
'release_date': '19681210',
|
||||||
'timestamp': 1268695290,
|
'timestamp': 1268695290,
|
||||||
'upload_date': '20100315',
|
'upload_date': '20100315',
|
||||||
'creator': 'SRI International',
|
'creators': ['SRI International'],
|
||||||
'uploader': 'laura@archive.org',
|
'uploader': 'laura@archive.org',
|
||||||
'thumbnail': r're:https://archive\.org/download/.*\.jpg',
|
'thumbnail': r're:https://archive\.org/download/.*\.jpg',
|
||||||
'display_id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect.cdr',
|
'display_id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect.cdr',
|
||||||
|
@ -109,7 +110,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||||
'title': 'Turning',
|
'title': 'Turning',
|
||||||
'ext': 'flac',
|
'ext': 'flac',
|
||||||
'track': 'Turning',
|
'track': 'Turning',
|
||||||
'creator': 'Grateful Dead',
|
'creators': ['Grateful Dead'],
|
||||||
'display_id': 'gd1977-05-08d01t01.flac',
|
'display_id': 'gd1977-05-08d01t01.flac',
|
||||||
'track_number': 1,
|
'track_number': 1,
|
||||||
'album': '1977-05-08 - Barton Hall - Cornell University',
|
'album': '1977-05-08 - Barton Hall - Cornell University',
|
||||||
|
@ -129,7 +130,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||||
'location': 'Barton Hall - Cornell University',
|
'location': 'Barton Hall - Cornell University',
|
||||||
'duration': 438.68,
|
'duration': 438.68,
|
||||||
'track': 'Deal',
|
'track': 'Deal',
|
||||||
'creator': 'Grateful Dead',
|
'creators': ['Grateful Dead'],
|
||||||
'album': '1977-05-08 - Barton Hall - Cornell University',
|
'album': '1977-05-08 - Barton Hall - Cornell University',
|
||||||
'release_date': '19770508',
|
'release_date': '19770508',
|
||||||
'display_id': 'gd1977-05-08d01t07.flac',
|
'display_id': 'gd1977-05-08d01t07.flac',
|
||||||
|
@ -167,7 +168,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||||
'upload_date': '20160610',
|
'upload_date': '20160610',
|
||||||
'description': 'md5:f70956a156645a658a0dc9513d9e78b7',
|
'description': 'md5:f70956a156645a658a0dc9513d9e78b7',
|
||||||
'uploader': 'dimitrios@archive.org',
|
'uploader': 'dimitrios@archive.org',
|
||||||
'creator': ['British Broadcasting Corporation', 'Time-Life Films'],
|
'creators': ['British Broadcasting Corporation', 'Time-Life Films'],
|
||||||
'timestamp': 1465594947,
|
'timestamp': 1465594947,
|
||||||
},
|
},
|
||||||
'playlist': [
|
'playlist': [
|
||||||
|
@ -257,7 +258,7 @@ def _real_extract(self, url):
|
||||||
'title': m['title'],
|
'title': m['title'],
|
||||||
'description': clean_html(m.get('description')),
|
'description': clean_html(m.get('description')),
|
||||||
'uploader': dict_get(m, ['uploader', 'adder']),
|
'uploader': dict_get(m, ['uploader', 'adder']),
|
||||||
'creator': m.get('creator'),
|
'creators': traverse_obj(m, ('creator', {variadic}, {lambda x: x[0] and list(x)})),
|
||||||
'license': m.get('licenseurl'),
|
'license': m.get('licenseurl'),
|
||||||
'release_date': unified_strdate(m.get('date')),
|
'release_date': unified_strdate(m.get('date')),
|
||||||
'timestamp': unified_timestamp(dict_get(m, ['publicdate', 'addeddate'])),
|
'timestamp': unified_timestamp(dict_get(m, ['publicdate', 'addeddate'])),
|
||||||
|
@ -272,7 +273,7 @@ def _real_extract(self, url):
|
||||||
'title': f.get('title') or f['name'],
|
'title': f.get('title') or f['name'],
|
||||||
'display_id': f['name'],
|
'display_id': f['name'],
|
||||||
'description': clean_html(f.get('description')),
|
'description': clean_html(f.get('description')),
|
||||||
'creator': f.get('creator'),
|
'creators': traverse_obj(f, ('creator', {variadic}, {lambda x: x[0] and list(x)})),
|
||||||
'duration': parse_duration(f.get('length')),
|
'duration': parse_duration(f.get('length')),
|
||||||
'track_number': int_or_none(f.get('track')),
|
'track_number': int_or_none(f.get('track')),
|
||||||
'album': f.get('album'),
|
'album': f.get('album'),
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
|
import functools
|
||||||
import re
|
import re
|
||||||
from functools import partial
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -349,7 +349,7 @@ def _extract_episode_info(self, title):
|
||||||
r'(?P<title>.*)',
|
r'(?P<title>.*)',
|
||||||
]
|
]
|
||||||
|
|
||||||
return traverse_obj(patterns, (..., {partial(re.match, string=title)}, {
|
return traverse_obj(patterns, (..., {functools.partial(re.match, string=title)}, {
|
||||||
'season_number': ('season_number', {int_or_none}),
|
'season_number': ('season_number', {int_or_none}),
|
||||||
'episode_number': ('episode_number', {int_or_none}),
|
'episode_number': ('episode_number', {int_or_none}),
|
||||||
'episode': ((
|
'episode': ((
|
||||||
|
|
|
@ -4,8 +4,8 @@
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
format_field,
|
|
||||||
float_or_none,
|
float_or_none,
|
||||||
|
format_field,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
remove_start,
|
remove_start,
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
GeoRestrictedError,
|
GeoRestrictedError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
join_nonempty,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
parse_qs,
|
parse_qs,
|
||||||
strip_or_none,
|
strip_or_none,
|
||||||
|
@ -31,20 +32,6 @@ class ArteTVIE(ArteTVBaseIE):
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.arte.tv/en/videos/088501-000-A/mexico-stealing-petrol-to-survive/',
|
'url': 'https://www.arte.tv/en/videos/088501-000-A/mexico-stealing-petrol-to-survive/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
|
||||||
'url': 'https://www.arte.tv/pl/videos/100103-000-A/usa-dyskryminacja-na-porodowce/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '100103-000-A',
|
|
||||||
'title': 'USA: Dyskryminacja na porodówce',
|
|
||||||
'description': 'md5:242017b7cce59ffae340a54baefcafb1',
|
|
||||||
'alt_title': 'ARTE Reportage',
|
|
||||||
'upload_date': '20201103',
|
|
||||||
'duration': 554,
|
|
||||||
'thumbnail': r're:https://api-cdn\.arte\.tv/.+940x530',
|
|
||||||
'timestamp': 1604417980,
|
|
||||||
'ext': 'mp4',
|
|
||||||
},
|
|
||||||
'params': {'skip_download': 'm3u8'}
|
|
||||||
}, {
|
}, {
|
||||||
'note': 'No alt_title',
|
'note': 'No alt_title',
|
||||||
'url': 'https://www.arte.tv/fr/videos/110371-000-A/la-chaleur-supplice-des-arbres-de-rue/',
|
'url': 'https://www.arte.tv/fr/videos/110371-000-A/la-chaleur-supplice-des-arbres-de-rue/',
|
||||||
|
@ -58,6 +45,23 @@ class ArteTVIE(ArteTVBaseIE):
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.arte.tv/de/videos/110203-006-A/zaz/',
|
'url': 'https://www.arte.tv/de/videos/110203-006-A/zaz/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.arte.tv/fr/videos/109067-000-A/la-loi-de-teheran/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '109067-000-A',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'description': 'md5:d2ca367b8ecee028dddaa8bd1aebc739',
|
||||||
|
'timestamp': 1713927600,
|
||||||
|
'thumbnail': 'https://api-cdn.arte.tv/img/v2/image/3rR6PLzfbigSkkeHtkCZNF/940x530',
|
||||||
|
'duration': 7599,
|
||||||
|
'title': 'La loi de Téhéran',
|
||||||
|
'upload_date': '20240424',
|
||||||
|
'subtitles': {
|
||||||
|
'fr': 'mincount:1',
|
||||||
|
'fr-acc': 'mincount:1',
|
||||||
|
'fr-forced': 'mincount:1',
|
||||||
|
},
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
'note': 'age-restricted',
|
'note': 'age-restricted',
|
||||||
'url': 'https://www.arte.tv/de/videos/006785-000-A/the-element-of-crime/',
|
'url': 'https://www.arte.tv/de/videos/006785-000-A/the-element-of-crime/',
|
||||||
|
@ -71,23 +75,7 @@ class ArteTVIE(ArteTVBaseIE):
|
||||||
'upload_date': '20230930',
|
'upload_date': '20230930',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
},
|
},
|
||||||
}, {
|
'skip': '404 Not Found',
|
||||||
'url': 'https://www.arte.tv/de/videos/085374-003-A/im-hohen-norden-geboren/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '085374-003-A',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'description': 'md5:ab79ec7cc472a93164415b4e4916abf9',
|
|
||||||
'timestamp': 1702872000,
|
|
||||||
'thumbnail': 'https://api-cdn.arte.tv/img/v2/image/TnyHBfPxv3v2GEY3suXGZP/940x530',
|
|
||||||
'duration': 2594,
|
|
||||||
'title': 'Die kurze Zeit der Jugend',
|
|
||||||
'alt_title': 'Im hohen Norden geboren',
|
|
||||||
'upload_date': '20231218',
|
|
||||||
'subtitles': {
|
|
||||||
'fr': 'mincount:1',
|
|
||||||
'fr-acc': 'mincount:1',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
_GEO_BYPASS = True
|
_GEO_BYPASS = True
|
||||||
|
@ -142,17 +130,19 @@ class ArteTVIE(ArteTVBaseIE):
|
||||||
def _fix_accessible_subs_locale(subs):
|
def _fix_accessible_subs_locale(subs):
|
||||||
updated_subs = {}
|
updated_subs = {}
|
||||||
for lang, sub_formats in subs.items():
|
for lang, sub_formats in subs.items():
|
||||||
for format in sub_formats:
|
for fmt in sub_formats:
|
||||||
if format.get('url', '').endswith('-MAL.m3u8'):
|
url = fmt.get('url') or ''
|
||||||
lang += '-acc'
|
suffix = ('acc' if url.endswith('-MAL.m3u8')
|
||||||
updated_subs.setdefault(lang, []).append(format)
|
else 'forced' if '_VO' not in url
|
||||||
|
else None)
|
||||||
|
updated_subs.setdefault(join_nonempty(lang, suffix), []).append(fmt)
|
||||||
return updated_subs
|
return updated_subs
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = self._match_valid_url(url)
|
mobj = self._match_valid_url(url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
lang = mobj.group('lang') or mobj.group('lang_2')
|
lang = mobj.group('lang') or mobj.group('lang_2')
|
||||||
langauge_code = self._LANG_MAP.get(lang)
|
language_code = self._LANG_MAP.get(lang)
|
||||||
|
|
||||||
config = self._download_json(f'{self._API_BASE}/config/{lang}/{video_id}', video_id, headers={
|
config = self._download_json(f'{self._API_BASE}/config/{lang}/{video_id}', video_id, headers={
|
||||||
'x-validated-age': '18'
|
'x-validated-age': '18'
|
||||||
|
@ -180,10 +170,10 @@ def _real_extract(self, url):
|
||||||
m = self._VERSION_CODE_RE.match(stream_version_code)
|
m = self._VERSION_CODE_RE.match(stream_version_code)
|
||||||
if m:
|
if m:
|
||||||
lang_pref = int(''.join('01'[x] for x in (
|
lang_pref = int(''.join('01'[x] for x in (
|
||||||
m.group('vlang') == langauge_code, # we prefer voice in the requested language
|
m.group('vlang') == language_code, # we prefer voice in the requested language
|
||||||
not m.group('audio_desc'), # and not the audio description version
|
not m.group('audio_desc'), # and not the audio description version
|
||||||
bool(m.group('original_voice')), # but if voice is not in the requested language, at least choose the original voice
|
bool(m.group('original_voice')), # but if voice is not in the requested language, at least choose the original voice
|
||||||
m.group('sub_lang') == langauge_code, # if subtitles are present, we prefer them in the requested language
|
m.group('sub_lang') == language_code, # if subtitles are present, we prefer them in the requested language
|
||||||
not m.group('has_sub'), # but we prefer no subtitles otherwise
|
not m.group('has_sub'), # but we prefer no subtitles otherwise
|
||||||
not m.group('sdh_sub'), # and we prefer not the hard-of-hearing subtitles if there are subtitles
|
not m.group('sdh_sub'), # and we prefer not the hard-of-hearing subtitles if there are subtitles
|
||||||
)))
|
)))
|
||||||
|
|
154
yt_dlp/extractor/asobistage.py
Normal file
154
yt_dlp/extractor/asobistage.py
Normal file
|
@ -0,0 +1,154 @@
|
||||||
|
import functools
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import str_or_none, url_or_none
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
|
class AsobiStageIE(InfoExtractor):
|
||||||
|
IE_DESC = 'ASOBISTAGE (アソビステージ)'
|
||||||
|
_VALID_URL = r'https?://asobistage\.asobistore\.jp/event/(?P<id>(?P<event>\w+)/(?P<type>archive|player)/(?P<slug>\w+))(?:[?#]|$)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://asobistage.asobistore.jp/event/315passionhour_2022summer/archive/frame',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '315passionhour_2022summer/archive/frame',
|
||||||
|
'title': '315プロダクションプレゼンツ 315パッションアワー!!!',
|
||||||
|
'thumbnail': r're:^https?://[\w.-]+/\w+/\w+',
|
||||||
|
},
|
||||||
|
'playlist_count': 1,
|
||||||
|
'playlist': [{
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'edff52f2',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '315passion_FRAME_only',
|
||||||
|
'thumbnail': r're:^https?://[\w.-]+/\w+/\w+',
|
||||||
|
},
|
||||||
|
}],
|
||||||
|
}, {
|
||||||
|
'url': 'https://asobistage.asobistore.jp/event/idolmaster_idolworld2023_goods/archive/live',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'idolmaster_idolworld2023_goods/archive/live',
|
||||||
|
'title': 'md5:378510b6e830129d505885908bd6c576',
|
||||||
|
'thumbnail': r're:^https?://[\w.-]+/\w+/\w+',
|
||||||
|
},
|
||||||
|
'playlist_count': 1,
|
||||||
|
'playlist': [{
|
||||||
|
'info_dict': {
|
||||||
|
'id': '3aef7110',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'asobistore_station_1020_serverREC',
|
||||||
|
'thumbnail': r're:^https?://[\w.-]+/\w+/\w+',
|
||||||
|
},
|
||||||
|
}],
|
||||||
|
}, {
|
||||||
|
'url': 'https://asobistage.asobistore.jp/event/sidem_fclive_bpct/archive/premium_hc',
|
||||||
|
'playlist_count': 4,
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'sidem_fclive_bpct/archive/premium_hc',
|
||||||
|
'title': '315 Production presents F@NTASTIC COMBINATION LIVE ~BRAINPOWER!!~/~CONNECTIME!!!!~',
|
||||||
|
'thumbnail': r're:^https?://[\w.-]+/\w+/\w+',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://asobistage.asobistore.jp/event/ijigenfes_utagassen/player/day1',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
_API_HOST = 'https://asobistage-api.asobistore.jp'
|
||||||
|
_HEADERS = {}
|
||||||
|
_is_logged_in = False
|
||||||
|
|
||||||
|
@functools.cached_property
|
||||||
|
def _owned_tickets(self):
|
||||||
|
owned_tickets = set()
|
||||||
|
if not self._is_logged_in:
|
||||||
|
return owned_tickets
|
||||||
|
|
||||||
|
for path, name in [
|
||||||
|
('api/v1/purchase_history/list', 'ticket purchase history'),
|
||||||
|
('api/v1/serialcode/list', 'redemption history'),
|
||||||
|
]:
|
||||||
|
response = self._download_json(
|
||||||
|
f'{self._API_HOST}/{path}', None, f'Downloading {name}',
|
||||||
|
f'Unable to download {name}', expected_status=400)
|
||||||
|
if traverse_obj(response, ('payload', 'error_message'), 'error') == 'notlogin':
|
||||||
|
self._is_logged_in = False
|
||||||
|
break
|
||||||
|
owned_tickets.update(
|
||||||
|
traverse_obj(response, ('payload', 'value', ..., 'digital_product_id', {str_or_none})))
|
||||||
|
|
||||||
|
return owned_tickets
|
||||||
|
|
||||||
|
def _get_available_channel_id(self, channel):
|
||||||
|
channel_id = traverse_obj(channel, ('chennel_vspf_id', {str}))
|
||||||
|
if not channel_id:
|
||||||
|
return None
|
||||||
|
# if rights_type_id == 6, then 'No conditions (no login required - non-members are OK)'
|
||||||
|
if traverse_obj(channel, ('viewrights', lambda _, v: v['rights_type_id'] == 6)):
|
||||||
|
return channel_id
|
||||||
|
available_tickets = traverse_obj(channel, (
|
||||||
|
'viewrights', ..., ('tickets', 'serialcodes'), ..., 'digital_product_id', {str_or_none}))
|
||||||
|
if not self._owned_tickets.intersection(available_tickets):
|
||||||
|
self.report_warning(
|
||||||
|
f'You are not a ticketholder for "{channel.get("channel_name") or channel_id}"')
|
||||||
|
return None
|
||||||
|
return channel_id
|
||||||
|
|
||||||
|
def _real_initialize(self):
|
||||||
|
if self._get_cookies(self._API_HOST):
|
||||||
|
self._is_logged_in = True
|
||||||
|
token = self._download_json(
|
||||||
|
f'{self._API_HOST}/api/v1/vspf/token', None, 'Getting token', 'Unable to get token')
|
||||||
|
self._HEADERS['Authorization'] = f'Bearer {token}'
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id, event, type_, slug = self._match_valid_url(url).group('id', 'event', 'type', 'slug')
|
||||||
|
video_type = {'archive': 'archives', 'player': 'broadcasts'}[type_]
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
event_data = traverse_obj(
|
||||||
|
self._search_nextjs_data(webpage, video_id, default={}),
|
||||||
|
('props', 'pageProps', 'eventCMSData', {
|
||||||
|
'title': ('event_name', {str}),
|
||||||
|
'thumbnail': ('event_thumbnail_image', {url_or_none}),
|
||||||
|
}))
|
||||||
|
|
||||||
|
available_channels = traverse_obj(self._download_json(
|
||||||
|
f'https://asobistage.asobistore.jp/cdn/v101/events/{event}/{video_type}.json',
|
||||||
|
video_id, 'Getting channel list', 'Unable to get channel list'), (
|
||||||
|
video_type, lambda _, v: v['broadcast_slug'] == slug,
|
||||||
|
'channels', lambda _, v: v['chennel_vspf_id'] != '00000'))
|
||||||
|
|
||||||
|
entries = []
|
||||||
|
for channel_id in traverse_obj(available_channels, (..., {self._get_available_channel_id})):
|
||||||
|
if video_type == 'archives':
|
||||||
|
channel_json = self._download_json(
|
||||||
|
f'https://survapi.channel.or.jp/proxy/v1/contents/{channel_id}/get_by_cuid', channel_id,
|
||||||
|
'Getting archive channel info', 'Unable to get archive channel info', fatal=False,
|
||||||
|
headers=self._HEADERS)
|
||||||
|
channel_data = traverse_obj(channel_json, ('ex_content', {
|
||||||
|
'm3u8_url': 'streaming_url',
|
||||||
|
'title': 'title',
|
||||||
|
'thumbnail': ('thumbnail', 'url'),
|
||||||
|
}))
|
||||||
|
else: # video_type == 'broadcasts'
|
||||||
|
channel_json = self._download_json(
|
||||||
|
f'https://survapi.channel.or.jp/ex/events/{channel_id}', channel_id,
|
||||||
|
'Getting live channel info', 'Unable to get live channel info', fatal=False,
|
||||||
|
headers=self._HEADERS, query={'embed': 'channel'})
|
||||||
|
channel_data = traverse_obj(channel_json, ('data', {
|
||||||
|
'm3u8_url': ('Channel', 'Custom_live_url'),
|
||||||
|
'title': 'Name',
|
||||||
|
'thumbnail': 'Poster_url',
|
||||||
|
}))
|
||||||
|
|
||||||
|
entries.append({
|
||||||
|
'id': channel_id,
|
||||||
|
'title': channel_data.get('title'),
|
||||||
|
'formats': self._extract_m3u8_formats(channel_data.get('m3u8_url'), channel_id, fatal=False),
|
||||||
|
'is_live': video_type == 'broadcasts',
|
||||||
|
'thumbnail': url_or_none(channel_data.get('thumbnail')),
|
||||||
|
})
|
||||||
|
|
||||||
|
if not self._is_logged_in and not entries:
|
||||||
|
self.raise_login_required()
|
||||||
|
|
||||||
|
return self.playlist_result(entries, video_id, **event_data)
|
|
@ -1,11 +1,11 @@
|
||||||
import datetime
|
import datetime as dt
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
jwt_encode_hs256,
|
jwt_encode_hs256,
|
||||||
try_get,
|
try_get,
|
||||||
ExtractorError,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -71,9 +71,9 @@ def _real_extract(self, url):
|
||||||
content_ids = [{'id': id, 'subclip_start': content['start'], 'subclip_end': content['end']}
|
content_ids = [{'id': id, 'subclip_start': content['start'], 'subclip_end': content['end']}
|
||||||
for id, content in enumerate(contentResource)]
|
for id, content in enumerate(contentResource)]
|
||||||
|
|
||||||
time_of_request = datetime.datetime.now()
|
time_of_request = dt.datetime.now()
|
||||||
not_before = time_of_request - datetime.timedelta(minutes=5)
|
not_before = time_of_request - dt.timedelta(minutes=5)
|
||||||
expire = time_of_request + datetime.timedelta(minutes=5)
|
expire = time_of_request + dt.timedelta(minutes=5)
|
||||||
payload = {
|
payload = {
|
||||||
'content_ids': {
|
'content_ids': {
|
||||||
content_id: content_ids,
|
content_id: content_ids,
|
||||||
|
|
|
@ -2,8 +2,8 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse_urlencode,
|
|
||||||
compat_str,
|
compat_str,
|
||||||
|
compat_urllib_parse_urlencode,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
format_field,
|
format_field,
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
import datetime
|
import datetime as dt
|
||||||
import hashlib
|
import hashlib
|
||||||
import hmac
|
import hmac
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ class AWSIE(InfoExtractor): # XXX: Conventionally, base classes should end with
|
||||||
|
|
||||||
def _aws_execute_api(self, aws_dict, video_id, query=None):
|
def _aws_execute_api(self, aws_dict, video_id, query=None):
|
||||||
query = query or {}
|
query = query or {}
|
||||||
amz_date = datetime.datetime.now(datetime.timezone.utc).strftime('%Y%m%dT%H%M%SZ')
|
amz_date = dt.datetime.now(dt.timezone.utc).strftime('%Y%m%dT%H%M%SZ')
|
||||||
date = amz_date[:8]
|
date = amz_date[:8]
|
||||||
headers = {
|
headers = {
|
||||||
'Accept': 'application/json',
|
'Accept': 'application/json',
|
||||||
|
|
|
@ -24,7 +24,8 @@ class AxsIE(InfoExtractor):
|
||||||
'timestamp': 1685729564,
|
'timestamp': 1685729564,
|
||||||
'duration': 1284.216,
|
'duration': 1284.216,
|
||||||
'series': 'Rock & Roll Road Trip with Sammy Hagar',
|
'series': 'Rock & Roll Road Trip with Sammy Hagar',
|
||||||
'season': 2,
|
'season': 'Season 2',
|
||||||
|
'season_number': 2,
|
||||||
'episode': '3',
|
'episode': '3',
|
||||||
'thumbnail': 'https://images.dotstudiopro.com/5f4e9d330a0c3b295a7e8394',
|
'thumbnail': 'https://images.dotstudiopro.com/5f4e9d330a0c3b295a7e8394',
|
||||||
},
|
},
|
||||||
|
@ -41,7 +42,8 @@ class AxsIE(InfoExtractor):
|
||||||
'timestamp': 1676403615,
|
'timestamp': 1676403615,
|
||||||
'duration': 2570.668,
|
'duration': 2570.668,
|
||||||
'series': 'The Big Interview with Dan Rather',
|
'series': 'The Big Interview with Dan Rather',
|
||||||
'season': 3,
|
'season': 'Season 3',
|
||||||
|
'season_number': 3,
|
||||||
'episode': '5',
|
'episode': '5',
|
||||||
'thumbnail': 'https://images.dotstudiopro.com/5f4d1901f340b50d937cec32',
|
'thumbnail': 'https://images.dotstudiopro.com/5f4d1901f340b50d937cec32',
|
||||||
},
|
},
|
||||||
|
@ -77,7 +79,7 @@ def _real_extract(self, url):
|
||||||
'title': ('title', {str}),
|
'title': ('title', {str}),
|
||||||
'description': ('description', {str}),
|
'description': ('description', {str}),
|
||||||
'series': ('seriestitle', {str}),
|
'series': ('seriestitle', {str}),
|
||||||
'season': ('season', {int}),
|
'season_number': ('season', {int}),
|
||||||
'episode': ('episode', {str}),
|
'episode': ('episode', {str}),
|
||||||
'duration': ('duration', {float_or_none}),
|
'duration': ('duration', {float_or_none}),
|
||||||
'timestamp': ('updated_at', {parse_iso8601}),
|
'timestamp': ('updated_at', {parse_iso8601}),
|
||||||
|
|
|
@ -2,12 +2,12 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
|
compat_urllib_parse_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
format_field,
|
|
||||||
InAdvancePagedList,
|
InAdvancePagedList,
|
||||||
|
format_field,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
)
|
)
|
||||||
|
|
|
@ -2,11 +2,11 @@
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
try_get,
|
|
||||||
int_or_none,
|
|
||||||
url_or_none,
|
|
||||||
float_or_none,
|
float_or_none,
|
||||||
|
int_or_none,
|
||||||
|
try_get,
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -602,7 +602,7 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
|
||||||
'url': 'http://www.bbc.com/news/world-europe-32668511',
|
'url': 'http://www.bbc.com/news/world-europe-32668511',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'world-europe-32668511',
|
'id': 'world-europe-32668511',
|
||||||
'title': 'Russia stages massive WW2 parade',
|
'title': 'Russia stages massive WW2 parade despite Western boycott',
|
||||||
'description': 'md5:00ff61976f6081841f759a08bf78cc9c',
|
'description': 'md5:00ff61976f6081841f759a08bf78cc9c',
|
||||||
},
|
},
|
||||||
'playlist_count': 2,
|
'playlist_count': 2,
|
||||||
|
@ -623,6 +623,7 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '3662a707-0af9-3149-963f-47bea720b460',
|
'id': '3662a707-0af9-3149-963f-47bea720b460',
|
||||||
'title': 'BUGGER',
|
'title': 'BUGGER',
|
||||||
|
'description': r're:BUGGER The recent revelations by the whistleblower Edward Snowden were fascinating. .{211}\.{3}$',
|
||||||
},
|
},
|
||||||
'playlist_count': 18,
|
'playlist_count': 18,
|
||||||
}, {
|
}, {
|
||||||
|
@ -631,14 +632,14 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'p02mprgb',
|
'id': 'p02mprgb',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Aerial footage showed the site of the crash in the Alps - courtesy BFM TV',
|
'title': 'Germanwings crash site aerial video',
|
||||||
'description': 'md5:2868290467291b37feda7863f7a83f54',
|
'description': r're:(?s)Aerial video showed the site where the Germanwings flight 4U 9525, .{156} BFM TV\.$',
|
||||||
'duration': 47,
|
'duration': 47,
|
||||||
'timestamp': 1427219242,
|
'timestamp': 1427219242,
|
||||||
'upload_date': '20150324',
|
'upload_date': '20150324',
|
||||||
|
'thumbnail': 'https://ichef.bbci.co.uk/news/1024/media/images/81879000/jpg/_81879090_81879089.jpg',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
|
@ -656,21 +657,24 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
}
|
},
|
||||||
|
'skip': 'now SIMORGH_DATA with no video',
|
||||||
}, {
|
}, {
|
||||||
# single video embedded with data-playable containing XML playlists (regional section)
|
# single video embedded with data-playable containing XML playlists (regional section)
|
||||||
'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw',
|
'url': 'http://www.bbc.com/mundo/video_fotos/2015/06/150619_video_honduras_militares_hospitales_corrupcion_aw',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '150619_video_honduras_militares_hospitales_corrupcion_aw',
|
'id': '39275083',
|
||||||
|
'display_id': '150619_video_honduras_militares_hospitales_corrupcion_aw',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción',
|
'title': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción',
|
||||||
'description': 'md5:1525f17448c4ee262b64b8f0c9ce66c8',
|
'description': 'Honduras militariza sus hospitales por nuevo escándalo de corrupción',
|
||||||
'timestamp': 1434713142,
|
'timestamp': 1434713142,
|
||||||
'upload_date': '20150619',
|
'upload_date': '20150619',
|
||||||
|
'thumbnail': 'https://a.files.bbci.co.uk/worldservice/live/assets/images/2015/06/19/150619132146_honduras_hsopitales_militares_640x360_aptn_nocredit.jpg',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
}
|
},
|
||||||
}, {
|
}, {
|
||||||
# single video from video playlist embedded with vxp-playlist-data JSON
|
# single video from video playlist embedded with vxp-playlist-data JSON
|
||||||
'url': 'http://www.bbc.com/news/video_and_audio/must_see/33376376',
|
'url': 'http://www.bbc.com/news/video_and_audio/must_see/33376376',
|
||||||
|
@ -683,22 +687,21 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
}
|
},
|
||||||
|
'skip': '404 Not Found',
|
||||||
}, {
|
}, {
|
||||||
# single video story with digitalData
|
# single video story with __PWA_PRELOADED_STATE__
|
||||||
'url': 'http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret',
|
'url': 'http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'p02q6gc4',
|
'id': 'p02q6gc4',
|
||||||
'ext': 'flv',
|
'ext': 'mp4',
|
||||||
'title': 'Sri Lanka’s spicy secret',
|
'title': 'Tasting the spice of life in Jaffna',
|
||||||
'description': 'As a new train line to Jaffna opens up the country’s north, travellers can experience a truly distinct slice of Tamil culture.',
|
'description': r're:(?s)BBC Travel Show’s Henry Golding explores the city of Jaffna .{151} aftertaste\.$',
|
||||||
'timestamp': 1437674293,
|
'timestamp': 1646058397,
|
||||||
'upload_date': '20150723',
|
'upload_date': '20220228',
|
||||||
|
'duration': 255,
|
||||||
|
'thumbnail': 'https://ichef.bbci.co.uk/images/ic/1920xn/p02vxvkn.jpg',
|
||||||
},
|
},
|
||||||
'params': {
|
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
}
|
|
||||||
}, {
|
}, {
|
||||||
# single video story without digitalData
|
# single video story without digitalData
|
||||||
'url': 'http://www.bbc.com/autos/story/20130513-hyundais-rock-star',
|
'url': 'http://www.bbc.com/autos/story/20130513-hyundais-rock-star',
|
||||||
|
@ -710,12 +713,10 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
|
||||||
'timestamp': 1415867444,
|
'timestamp': 1415867444,
|
||||||
'upload_date': '20141113',
|
'upload_date': '20141113',
|
||||||
},
|
},
|
||||||
'params': {
|
'skip': 'redirects to TopGear home page',
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
}
|
|
||||||
}, {
|
}, {
|
||||||
# single video embedded with Morph
|
# single video embedded with Morph
|
||||||
|
# TODO: replacement test page
|
||||||
'url': 'http://www.bbc.co.uk/sport/live/olympics/36895975',
|
'url': 'http://www.bbc.co.uk/sport/live/olympics/36895975',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'p041vhd0',
|
'id': 'p041vhd0',
|
||||||
|
@ -726,27 +727,22 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
|
||||||
'uploader': 'BBC Sport',
|
'uploader': 'BBC Sport',
|
||||||
'uploader_id': 'bbc_sport',
|
'uploader_id': 'bbc_sport',
|
||||||
},
|
},
|
||||||
'params': {
|
'skip': 'Video no longer in page',
|
||||||
# m3u8 download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'skip': 'Georestricted to UK',
|
|
||||||
}, {
|
}, {
|
||||||
# single video with playlist.sxml URL in playlist param
|
# single video in __INITIAL_DATA__
|
||||||
'url': 'http://www.bbc.com/sport/0/football/33653409',
|
'url': 'http://www.bbc.com/sport/0/football/33653409',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'p02xycnp',
|
'id': 'p02xycnp',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?',
|
'title': 'Ronaldo to Man Utd, Arsenal to spend?',
|
||||||
'description': 'BBC Sport\'s David Ornstein has the latest transfer gossip, including rumours of a Manchester United return for Cristiano Ronaldo.',
|
'description': r're:(?s)BBC Sport\'s David Ornstein rounds up the latest transfer reports, .{359} here\.$',
|
||||||
|
'timestamp': 1437750175,
|
||||||
|
'upload_date': '20150724',
|
||||||
|
'thumbnail': r're:https?://.+/.+media/images/69320000/png/_69320754_mmgossipcolumnextraaugust18.png',
|
||||||
'duration': 140,
|
'duration': 140,
|
||||||
},
|
},
|
||||||
'params': {
|
|
||||||
# rtmp download
|
|
||||||
'skip_download': True,
|
|
||||||
}
|
|
||||||
}, {
|
}, {
|
||||||
# article with multiple videos embedded with playlist.sxml in playlist param
|
# article with multiple videos embedded with Morph.setPayload
|
||||||
'url': 'http://www.bbc.com/sport/0/football/34475836',
|
'url': 'http://www.bbc.com/sport/0/football/34475836',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '34475836',
|
'id': '34475836',
|
||||||
|
@ -754,6 +750,21 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
|
||||||
'description': 'Fast-paced football, wit, wisdom and a ready smile - why Liverpool fans should come to love new boss Jurgen Klopp.',
|
'description': 'Fast-paced football, wit, wisdom and a ready smile - why Liverpool fans should come to love new boss Jurgen Klopp.',
|
||||||
},
|
},
|
||||||
'playlist_count': 3,
|
'playlist_count': 3,
|
||||||
|
}, {
|
||||||
|
# Testing noplaylist
|
||||||
|
'url': 'http://www.bbc.com/sport/0/football/34475836',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'p034ppnv',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'All you need to know about Jurgen Klopp',
|
||||||
|
'timestamp': 1444335081,
|
||||||
|
'upload_date': '20151008',
|
||||||
|
'duration': 122.0,
|
||||||
|
'thumbnail': 'https://ichef.bbci.co.uk/onesport/cps/976/cpsprodpb/7542/production/_85981003_klopp.jpg',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'noplaylist': True,
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
# school report article with single video
|
# school report article with single video
|
||||||
'url': 'http://www.bbc.co.uk/schoolreport/35744779',
|
'url': 'http://www.bbc.co.uk/schoolreport/35744779',
|
||||||
|
@ -762,6 +773,7 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
|
||||||
'title': 'School which breaks down barriers in Jerusalem',
|
'title': 'School which breaks down barriers in Jerusalem',
|
||||||
},
|
},
|
||||||
'playlist_count': 1,
|
'playlist_count': 1,
|
||||||
|
'skip': 'redirects to Young Reporter home page https://www.bbc.co.uk/news/topics/cg41ylwv43pt',
|
||||||
}, {
|
}, {
|
||||||
# single video with playlist URL from weather section
|
# single video with playlist URL from weather section
|
||||||
'url': 'http://www.bbc.com/weather/features/33601775',
|
'url': 'http://www.bbc.com/weather/features/33601775',
|
||||||
|
@ -778,18 +790,33 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
|
||||||
'thumbnail': r're:https?://.+/.+\.jpg',
|
'thumbnail': r're:https?://.+/.+\.jpg',
|
||||||
'timestamp': 1437785037,
|
'timestamp': 1437785037,
|
||||||
'upload_date': '20150725',
|
'upload_date': '20150725',
|
||||||
|
'duration': 105,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
# video with window.__INITIAL_DATA__ and value as JSON string
|
# video with window.__INITIAL_DATA__ and value as JSON string
|
||||||
'url': 'https://www.bbc.com/news/av/world-europe-59468682',
|
'url': 'https://www.bbc.com/news/av/world-europe-59468682',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'p0b71qth',
|
'id': 'p0b779gc',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Why France is making this woman a national hero',
|
'title': 'Why France is making this woman a national hero',
|
||||||
'description': 'md5:7affdfab80e9c3a1f976230a1ff4d5e4',
|
'description': r're:(?s)France is honouring the US-born 20th Century singer and activist Josephine .{208} Second World War.',
|
||||||
'thumbnail': r're:https?://.+/.+\.jpg',
|
'thumbnail': r're:https?://.+/.+\.jpg',
|
||||||
'timestamp': 1638230731,
|
'timestamp': 1638215626,
|
||||||
'upload_date': '20211130',
|
'upload_date': '20211129',
|
||||||
|
'duration': 125,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# video with script id __NEXT_DATA__ and value as JSON string
|
||||||
|
'url': 'https://www.bbc.com/news/uk-68546268',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'p0hj0lq7',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Nasser Hospital doctor describes his treatment by IDF',
|
||||||
|
'description': r're:(?s)Doctor Abu Sabha said he was detained by Israeli forces after .{276} hostages\."$',
|
||||||
|
'thumbnail': r're:https?://.+/.+\.jpg',
|
||||||
|
'timestamp': 1710188248,
|
||||||
|
'upload_date': '20240311',
|
||||||
|
'duration': 104,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
# single video article embedded with data-media-vpid
|
# single video article embedded with data-media-vpid
|
||||||
|
@ -817,6 +844,7 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
|
||||||
'uploader': 'Radio 3',
|
'uploader': 'Radio 3',
|
||||||
'uploader_id': 'bbc_radio_three',
|
'uploader_id': 'bbc_radio_three',
|
||||||
},
|
},
|
||||||
|
'skip': '404 Not Found',
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.bbc.co.uk/learningenglish/chinese/features/lingohack/ep-181227',
|
'url': 'http://www.bbc.co.uk/learningenglish/chinese/features/lingohack/ep-181227',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -824,6 +852,7 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'md5:2fabf12a726603193a2879a055f72514',
|
'title': 'md5:2fabf12a726603193a2879a055f72514',
|
||||||
'description': 'Learn English words and phrases from this story',
|
'description': 'Learn English words and phrases from this story',
|
||||||
|
'thumbnail': 'https://ichef.bbci.co.uk/images/ic/1200x675/p06pq9gk.jpg',
|
||||||
},
|
},
|
||||||
'add_ie': [BBCCoUkIE.ie_key()],
|
'add_ie': [BBCCoUkIE.ie_key()],
|
||||||
}, {
|
}, {
|
||||||
|
@ -832,28 +861,30 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'p07c6sb9',
|
'id': 'p07c6sb9',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'How positive thinking is harming your happiness',
|
'title': 'The downsides of positive thinking',
|
||||||
'alt_title': 'The downsides of positive thinking',
|
'description': 'The downsides of positive thinking',
|
||||||
'description': 'md5:fad74b31da60d83b8265954ee42d85b4',
|
|
||||||
'duration': 235,
|
'duration': 235,
|
||||||
'thumbnail': r're:https?://.+/p07c9dsr.jpg',
|
'thumbnail': r're:https?://.+/p07c9dsr\.(?:jpg|webp|png)',
|
||||||
'upload_date': '20190604',
|
'upload_date': '20220223',
|
||||||
'categories': ['Psychology'],
|
'timestamp': 1645632746,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
# BBC Sounds
|
# BBC Sounds
|
||||||
'url': 'https://www.bbc.co.uk/sounds/play/m001q78b',
|
'url': 'https://www.bbc.co.uk/sounds/play/w3ct5rgx',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'm001q789',
|
'id': 'p0hrw4nr',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'The Night Tracks Mix - Music for the darkling hour',
|
'title': 'Are our coastlines being washed away?',
|
||||||
'thumbnail': 'https://ichef.bbci.co.uk/images/ic/raw/p0c00hym.jpg',
|
'description': r're:(?s)Around the world, coastlines are constantly changing .{2000,} Images\)$',
|
||||||
'chapters': 'count:8',
|
'timestamp': 1713556800,
|
||||||
'description': 'md5:815fb51cbdaa270040aab8145b3f1d67',
|
'upload_date': '20240419',
|
||||||
'uploader': 'Radio 3',
|
'duration': 1588,
|
||||||
'duration': 1800,
|
'thumbnail': 'https://ichef.bbci.co.uk/images/ic/raw/p0hrnxbl.jpg',
|
||||||
'uploader_id': 'bbc_radio_three',
|
'uploader': 'World Service',
|
||||||
},
|
'uploader_id': 'bbc_world_service',
|
||||||
|
'series': 'CrowdScience',
|
||||||
|
'chapters': [],
|
||||||
|
}
|
||||||
}, { # onion routes
|
}, { # onion routes
|
||||||
'url': 'https://www.bbcnewsd73hkzno2ini43t4gblxvycyac5aw4gnv7t2rccijh7745uqd.onion/news/av/world-europe-63208576',
|
'url': 'https://www.bbcnewsd73hkzno2ini43t4gblxvycyac5aw4gnv7t2rccijh7745uqd.onion/news/av/world-europe-63208576',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
@ -1008,8 +1039,7 @@ def _real_extract(self, url):
|
||||||
webpage, 'group id', default=None)
|
webpage, 'group id', default=None)
|
||||||
if group_id:
|
if group_id:
|
||||||
return self.url_result(
|
return self.url_result(
|
||||||
'https://www.bbc.co.uk/programmes/%s' % group_id,
|
f'https://www.bbc.co.uk/programmes/{group_id}', BBCCoUkIE)
|
||||||
ie=BBCCoUkIE.ie_key())
|
|
||||||
|
|
||||||
# single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
|
# single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
|
||||||
programme_id = self._search_regex(
|
programme_id = self._search_regex(
|
||||||
|
@ -1069,83 +1099,133 @@ def _real_extract(self, url):
|
||||||
}
|
}
|
||||||
|
|
||||||
# Morph based embed (e.g. http://www.bbc.co.uk/sport/live/olympics/36895975)
|
# Morph based embed (e.g. http://www.bbc.co.uk/sport/live/olympics/36895975)
|
||||||
# There are several setPayload calls may be present but the video
|
# Several setPayload calls may be present but the video(s)
|
||||||
# seems to be always related to the first one
|
# should be in one that mentions leadMedia or videoData
|
||||||
morph_payload = self._parse_json(
|
morph_payload = self._search_json(
|
||||||
self._search_regex(
|
r'\bMorph\s*\.\s*setPayload\s*\([^,]+,', webpage, 'morph payload', playlist_id,
|
||||||
r'Morph\.setPayload\([^,]+,\s*({.+?})\);',
|
contains_pattern=r'{(?s:(?:(?!</script>).)+(?:"leadMedia"|\\"videoData\\")\s*:.+)}',
|
||||||
webpage, 'morph payload', default='{}'),
|
default={})
|
||||||
playlist_id, fatal=False)
|
|
||||||
if morph_payload:
|
if morph_payload:
|
||||||
components = try_get(morph_payload, lambda x: x['body']['components'], list) or []
|
for lead_media in traverse_obj(morph_payload, (
|
||||||
for component in components:
|
'body', 'components', ..., 'props', 'leadMedia', {dict})):
|
||||||
if not isinstance(component, dict):
|
programme_id = traverse_obj(lead_media, ('identifiers', ('vpid', 'playablePid'), {str}, any))
|
||||||
continue
|
|
||||||
lead_media = try_get(component, lambda x: x['props']['leadMedia'], dict)
|
|
||||||
if not lead_media:
|
|
||||||
continue
|
|
||||||
identifiers = lead_media.get('identifiers')
|
|
||||||
if not identifiers or not isinstance(identifiers, dict):
|
|
||||||
continue
|
|
||||||
programme_id = identifiers.get('vpid') or identifiers.get('playablePid')
|
|
||||||
if not programme_id:
|
if not programme_id:
|
||||||
continue
|
continue
|
||||||
title = lead_media.get('title') or self._og_search_title(webpage)
|
|
||||||
formats, subtitles = self._download_media_selector(programme_id)
|
formats, subtitles = self._download_media_selector(programme_id)
|
||||||
description = lead_media.get('summary')
|
|
||||||
uploader = lead_media.get('masterBrand')
|
|
||||||
uploader_id = lead_media.get('mid')
|
|
||||||
duration = None
|
|
||||||
duration_d = lead_media.get('duration')
|
|
||||||
if isinstance(duration_d, dict):
|
|
||||||
duration = parse_duration(dict_get(
|
|
||||||
duration_d, ('rawDuration', 'formattedDuration', 'spokenDuration')))
|
|
||||||
return {
|
return {
|
||||||
'id': programme_id,
|
'id': programme_id,
|
||||||
'title': title,
|
'title': lead_media.get('title') or self._og_search_title(webpage),
|
||||||
'description': description,
|
**traverse_obj(lead_media, {
|
||||||
'duration': duration,
|
'description': ('summary', {str}),
|
||||||
'uploader': uploader,
|
'duration': ('duration', ('rawDuration', 'formattedDuration', 'spokenDuration'), {parse_duration}),
|
||||||
'uploader_id': uploader_id,
|
'uploader': ('masterBrand', {str}),
|
||||||
|
'uploader_id': ('mid', {str}),
|
||||||
|
}),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
}
|
}
|
||||||
|
body = self._parse_json(traverse_obj(morph_payload, (
|
||||||
|
'body', 'content', 'article', 'body')), playlist_id, fatal=False)
|
||||||
|
for video_data in traverse_obj(body, (lambda _, v: v['videoData']['pid'], 'videoData')):
|
||||||
|
if video_data.get('vpid'):
|
||||||
|
video_id = video_data['vpid']
|
||||||
|
formats, subtitles = self._download_media_selector(video_id)
|
||||||
|
entry = {
|
||||||
|
'id': video_id,
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
video_id = video_data['pid']
|
||||||
|
entry = self.url_result(
|
||||||
|
f'https://www.bbc.co.uk/programmes/{video_id}', BBCCoUkIE,
|
||||||
|
video_id, url_transparent=True)
|
||||||
|
entry.update({
|
||||||
|
'timestamp': traverse_obj(morph_payload, (
|
||||||
|
'body', 'content', 'article', 'dateTimeInfo', 'dateTime', {parse_iso8601})
|
||||||
|
),
|
||||||
|
**traverse_obj(video_data, {
|
||||||
|
'thumbnail': (('iChefImage', 'image'), {url_or_none}, any),
|
||||||
|
'title': (('title', 'caption'), {str}, any),
|
||||||
|
'duration': ('duration', {parse_duration}),
|
||||||
|
}),
|
||||||
|
})
|
||||||
|
if video_data.get('isLead') and not self._yes_playlist(playlist_id, video_id):
|
||||||
|
return entry
|
||||||
|
entries.append(entry)
|
||||||
|
if entries:
|
||||||
|
playlist_title = traverse_obj(morph_payload, (
|
||||||
|
'body', 'content', 'article', 'headline', {str})) or playlist_title
|
||||||
|
return self.playlist_result(
|
||||||
|
entries, playlist_id, playlist_title, playlist_description)
|
||||||
|
|
||||||
preload_state = self._parse_json(self._search_regex(
|
# various PRELOADED_STATE JSON
|
||||||
r'window\.__PRELOADED_STATE__\s*=\s*({.+?});', webpage,
|
preload_state = self._search_json(
|
||||||
'preload state', default='{}'), playlist_id, fatal=False)
|
r'window\.__(?:PWA_)?PRELOADED_STATE__\s*=', webpage,
|
||||||
if preload_state:
|
'preload state', playlist_id, transform_source=js_to_json, default={})
|
||||||
current_programme = preload_state.get('programmes', {}).get('current') or {}
|
# PRELOADED_STATE with current programmme
|
||||||
programme_id = current_programme.get('id')
|
current_programme = traverse_obj(preload_state, ('programmes', 'current', {dict}))
|
||||||
if current_programme and programme_id and current_programme.get('type') == 'playable_item':
|
programme_id = traverse_obj(current_programme, ('id', {str}))
|
||||||
title = current_programme.get('titles', {}).get('tertiary') or playlist_title
|
if programme_id and current_programme.get('type') == 'playable_item':
|
||||||
formats, subtitles = self._download_media_selector(programme_id)
|
title = traverse_obj(current_programme, ('titles', ('tertiary', 'secondary'), {str}, any)) or playlist_title
|
||||||
synopses = current_programme.get('synopses') or {}
|
formats, subtitles = self._download_media_selector(programme_id)
|
||||||
network = current_programme.get('network') or {}
|
return {
|
||||||
duration = int_or_none(
|
'id': programme_id,
|
||||||
current_programme.get('duration', {}).get('value'))
|
'title': title,
|
||||||
thumbnail = None
|
'formats': formats,
|
||||||
image_url = current_programme.get('image_url')
|
**traverse_obj(current_programme, {
|
||||||
if image_url:
|
'description': ('synopses', ('long', 'medium', 'short'), {str}, any),
|
||||||
thumbnail = image_url.replace('{recipe}', 'raw')
|
'thumbnail': ('image_url', {lambda u: url_or_none(u.replace('{recipe}', 'raw'))}),
|
||||||
|
'duration': ('duration', 'value', {int_or_none}),
|
||||||
|
'uploader': ('network', 'short_title', {str}),
|
||||||
|
'uploader_id': ('network', 'id', {str}),
|
||||||
|
'timestamp': ((('availability', 'from'), ('release', 'date')), {parse_iso8601}, any),
|
||||||
|
'series': ('titles', 'primary', {str}),
|
||||||
|
}),
|
||||||
|
'subtitles': subtitles,
|
||||||
|
'chapters': traverse_obj(preload_state, (
|
||||||
|
'tracklist', 'tracks', lambda _, v: float(v['offset']['start']), {
|
||||||
|
'title': ('titles', {lambda x: join_nonempty(
|
||||||
|
'primary', 'secondary', 'tertiary', delim=' - ', from_dict=x)}),
|
||||||
|
'start_time': ('offset', 'start', {float_or_none}),
|
||||||
|
'end_time': ('offset', 'end', {float_or_none}),
|
||||||
|
})
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
# PWA_PRELOADED_STATE with article video asset
|
||||||
|
asset_id = traverse_obj(preload_state, (
|
||||||
|
'entities', 'articles', lambda k, _: k.rsplit('/', 1)[-1] == playlist_id,
|
||||||
|
'assetVideo', 0, {str}, any))
|
||||||
|
if asset_id:
|
||||||
|
video_id = traverse_obj(preload_state, ('entities', 'videos', asset_id, 'vpid', {str}))
|
||||||
|
if video_id:
|
||||||
|
article = traverse_obj(preload_state, (
|
||||||
|
'entities', 'articles', lambda _, v: v['assetVideo'][0] == asset_id, any))
|
||||||
|
|
||||||
|
def image_url(image_id):
|
||||||
|
return traverse_obj(preload_state, (
|
||||||
|
'entities', 'images', image_id, 'url',
|
||||||
|
{lambda u: url_or_none(u.replace('$recipe', 'raw'))}))
|
||||||
|
|
||||||
|
formats, subtitles = self._download_media_selector(video_id)
|
||||||
return {
|
return {
|
||||||
'id': programme_id,
|
'id': video_id,
|
||||||
'title': title,
|
**traverse_obj(preload_state, ('entities', 'videos', asset_id, {
|
||||||
'description': dict_get(synopses, ('long', 'medium', 'short')),
|
'title': ('title', {str}),
|
||||||
'thumbnail': thumbnail,
|
'description': (('synopsisLong', 'synopsisMedium', 'synopsisShort'), {str}, any),
|
||||||
'duration': duration,
|
'thumbnail': (0, {image_url}),
|
||||||
'uploader': network.get('short_title'),
|
'duration': ('duration', {int_or_none}),
|
||||||
'uploader_id': network.get('id'),
|
})),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
'chapters': traverse_obj(preload_state, (
|
'timestamp': traverse_obj(article, ('displayDate', {parse_iso8601})),
|
||||||
'tracklist', 'tracks', lambda _, v: float_or_none(v['offset']['start']), {
|
|
||||||
'title': ('titles', {lambda x: join_nonempty(
|
|
||||||
'primary', 'secondary', 'tertiary', delim=' - ', from_dict=x)}),
|
|
||||||
'start_time': ('offset', 'start', {float_or_none}),
|
|
||||||
'end_time': ('offset', 'end', {float_or_none}),
|
|
||||||
})) or None,
|
|
||||||
}
|
}
|
||||||
|
else:
|
||||||
|
return self.url_result(
|
||||||
|
f'https://www.bbc.co.uk/programmes/{asset_id}', BBCCoUkIE,
|
||||||
|
asset_id, playlist_title, display_id=playlist_id,
|
||||||
|
description=playlist_description)
|
||||||
|
|
||||||
bbc3_config = self._parse_json(
|
bbc3_config = self._parse_json(
|
||||||
self._search_regex(
|
self._search_regex(
|
||||||
|
@ -1191,6 +1271,28 @@ def _real_extract(self, url):
|
||||||
return self.playlist_result(
|
return self.playlist_result(
|
||||||
entries, playlist_id, playlist_title, playlist_description)
|
entries, playlist_id, playlist_title, playlist_description)
|
||||||
|
|
||||||
|
def parse_model(model):
|
||||||
|
"""Extract single video from model structure"""
|
||||||
|
item_id = traverse_obj(model, ('versions', 0, 'versionId', {str}))
|
||||||
|
if not item_id:
|
||||||
|
return
|
||||||
|
formats, subtitles = self._download_media_selector(item_id)
|
||||||
|
return {
|
||||||
|
'id': item_id,
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
**traverse_obj(model, {
|
||||||
|
'title': ('title', {str}),
|
||||||
|
'thumbnail': ('imageUrl', {lambda u: urljoin(url, u.replace('$recipe', 'raw'))}),
|
||||||
|
'description': ('synopses', ('long', 'medium', 'short'), {str}, {lambda x: x or None}, any),
|
||||||
|
'duration': ('versions', 0, 'duration', {int}),
|
||||||
|
'timestamp': ('versions', 0, 'availableFrom', {functools.partial(int_or_none, scale=1000)}),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
def is_type(*types):
|
||||||
|
return lambda _, v: v['type'] in types
|
||||||
|
|
||||||
initial_data = self._search_regex(
|
initial_data = self._search_regex(
|
||||||
r'window\.__INITIAL_DATA__\s*=\s*("{.+?}")\s*;', webpage,
|
r'window\.__INITIAL_DATA__\s*=\s*("{.+?}")\s*;', webpage,
|
||||||
'quoted preload state', default=None)
|
'quoted preload state', default=None)
|
||||||
|
@ -1202,6 +1304,19 @@ def _real_extract(self, url):
|
||||||
initial_data = self._parse_json(initial_data or '"{}"', playlist_id, fatal=False)
|
initial_data = self._parse_json(initial_data or '"{}"', playlist_id, fatal=False)
|
||||||
initial_data = self._parse_json(initial_data, playlist_id, fatal=False)
|
initial_data = self._parse_json(initial_data, playlist_id, fatal=False)
|
||||||
if initial_data:
|
if initial_data:
|
||||||
|
for video_data in traverse_obj(initial_data, (
|
||||||
|
'stores', 'article', 'articleBodyContent', is_type('video'))):
|
||||||
|
model = traverse_obj(video_data, (
|
||||||
|
'model', 'blocks', is_type('aresMedia'),
|
||||||
|
'model', 'blocks', is_type('aresMediaMetadata'),
|
||||||
|
'model', {dict}, any))
|
||||||
|
entry = parse_model(model)
|
||||||
|
if entry:
|
||||||
|
entries.append(entry)
|
||||||
|
if entries:
|
||||||
|
return self.playlist_result(
|
||||||
|
entries, playlist_id, playlist_title, playlist_description)
|
||||||
|
|
||||||
def parse_media(media):
|
def parse_media(media):
|
||||||
if not media:
|
if not media:
|
||||||
return
|
return
|
||||||
|
@ -1234,27 +1349,90 @@ def parse_media(media):
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
'timestamp': item_time,
|
'timestamp': item_time,
|
||||||
'description': strip_or_none(item_desc),
|
'description': strip_or_none(item_desc),
|
||||||
|
'duration': int_or_none(item.get('duration')),
|
||||||
})
|
})
|
||||||
for resp in (initial_data.get('data') or {}).values():
|
|
||||||
name = resp.get('name')
|
for resp in traverse_obj(initial_data, ('data', lambda _, v: v['name'])):
|
||||||
|
name = resp['name']
|
||||||
if name == 'media-experience':
|
if name == 'media-experience':
|
||||||
parse_media(try_get(resp, lambda x: x['data']['initialItem']['mediaItem'], dict))
|
parse_media(try_get(resp, lambda x: x['data']['initialItem']['mediaItem'], dict))
|
||||||
elif name == 'article':
|
elif name == 'article':
|
||||||
for block in (try_get(resp,
|
for block in traverse_obj(resp, (
|
||||||
(lambda x: x['data']['blocks'],
|
'data', (None, ('content', 'model')), 'blocks',
|
||||||
lambda x: x['data']['content']['model']['blocks'],),
|
is_type('media', 'video'), 'model', {dict})):
|
||||||
list) or []):
|
parse_media(block)
|
||||||
if block.get('type') not in ['media', 'video']:
|
|
||||||
continue
|
|
||||||
parse_media(block.get('model'))
|
|
||||||
return self.playlist_result(
|
return self.playlist_result(
|
||||||
entries, playlist_id, playlist_title, playlist_description)
|
entries, playlist_id, playlist_title, playlist_description)
|
||||||
|
|
||||||
|
# extract from SIMORGH_DATA hydration JSON
|
||||||
|
simorgh_data = self._search_json(
|
||||||
|
r'window\s*\.\s*SIMORGH_DATA\s*=', webpage,
|
||||||
|
'simorgh data', playlist_id, default={})
|
||||||
|
if simorgh_data:
|
||||||
|
done = False
|
||||||
|
for video_data in traverse_obj(simorgh_data, (
|
||||||
|
'pageData', 'content', 'model', 'blocks', is_type('video', 'legacyMedia'))):
|
||||||
|
model = traverse_obj(video_data, (
|
||||||
|
'model', 'blocks', is_type('aresMedia'),
|
||||||
|
'model', 'blocks', is_type('aresMediaMetadata'),
|
||||||
|
'model', {dict}, any))
|
||||||
|
if video_data['type'] == 'video':
|
||||||
|
entry = parse_model(model)
|
||||||
|
else: # legacyMedia: no duration, subtitles
|
||||||
|
block_id, entry = traverse_obj(model, ('blockId', {str})), None
|
||||||
|
media_data = traverse_obj(simorgh_data, (
|
||||||
|
'pageData', 'promo', 'media',
|
||||||
|
{lambda x: x if x['id'] == block_id else None}))
|
||||||
|
formats = traverse_obj(media_data, ('playlist', lambda _, v: url_or_none(v['url']), {
|
||||||
|
'url': ('url', {url_or_none}),
|
||||||
|
'ext': ('format', {str}),
|
||||||
|
'tbr': ('bitrate', {functools.partial(int_or_none, scale=1000)}),
|
||||||
|
}))
|
||||||
|
if formats:
|
||||||
|
entry = {
|
||||||
|
'id': block_id,
|
||||||
|
'display_id': playlist_id,
|
||||||
|
'formats': formats,
|
||||||
|
'description': traverse_obj(simorgh_data, ('pageData', 'promo', 'summary', {str})),
|
||||||
|
**traverse_obj(model, {
|
||||||
|
'title': ('title', {str}),
|
||||||
|
'thumbnail': ('imageUrl', {lambda u: urljoin(url, u.replace('$recipe', 'raw'))}),
|
||||||
|
'description': ('synopses', ('long', 'medium', 'short'), {str}, any),
|
||||||
|
'timestamp': ('firstPublished', {functools.partial(int_or_none, scale=1000)}),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
done = True
|
||||||
|
if entry:
|
||||||
|
entries.append(entry)
|
||||||
|
if done:
|
||||||
|
break
|
||||||
|
if entries:
|
||||||
|
return self.playlist_result(
|
||||||
|
entries, playlist_id, playlist_title, playlist_description)
|
||||||
|
|
||||||
def extract_all(pattern):
|
def extract_all(pattern):
|
||||||
return list(filter(None, map(
|
return list(filter(None, map(
|
||||||
lambda s: self._parse_json(s, playlist_id, fatal=False),
|
lambda s: self._parse_json(s, playlist_id, fatal=False),
|
||||||
re.findall(pattern, webpage))))
|
re.findall(pattern, webpage))))
|
||||||
|
|
||||||
|
# US accessed article with single embedded video (e.g.
|
||||||
|
# https://www.bbc.com/news/uk-68546268)
|
||||||
|
next_data = traverse_obj(self._search_nextjs_data(webpage, playlist_id, default={}),
|
||||||
|
('props', 'pageProps', 'page'))
|
||||||
|
model = traverse_obj(next_data, (
|
||||||
|
..., 'contents', is_type('video'),
|
||||||
|
'model', 'blocks', is_type('media'),
|
||||||
|
'model', 'blocks', is_type('mediaMetadata'),
|
||||||
|
'model', {dict}, any))
|
||||||
|
if model and (entry := parse_model(model)):
|
||||||
|
if not entry.get('timestamp'):
|
||||||
|
entry['timestamp'] = traverse_obj(next_data, (
|
||||||
|
..., 'contents', is_type('timestamp'), 'model',
|
||||||
|
'timestamp', {functools.partial(int_or_none, scale=1000)}, any))
|
||||||
|
entries.append(entry)
|
||||||
|
return self.playlist_result(
|
||||||
|
entries, playlist_id, playlist_title, playlist_description)
|
||||||
|
|
||||||
# Multiple video article (e.g.
|
# Multiple video article (e.g.
|
||||||
# http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460)
|
# http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460)
|
||||||
EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+%s(?:\b[^"]+)?' % self._ID_REGEX
|
EMBED_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:[^/]+/)+%s(?:\b[^"]+)?' % self._ID_REGEX
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
|
|
||||||
|
|
||||||
class BeatBumpVideoIE(InfoExtractor):
|
class BeatBumpVideoIE(InfoExtractor):
|
||||||
_VALID_URL = r'https://beatbump\.(?:ml|io)/listen\?id=(?P<id>[\w-]+)'
|
_VALID_URL = r'https?://beatbump\.(?:ml|io)/listen\?id=(?P<id>[\w-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://beatbump.ml/listen?id=MgNrAu2pzNs',
|
'url': 'https://beatbump.ml/listen?id=MgNrAu2pzNs',
|
||||||
'md5': '5ff3fff41d3935b9810a9731e485fe66',
|
'md5': '5ff3fff41d3935b9810a9731e485fe66',
|
||||||
|
@ -48,7 +48,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
|
|
||||||
class BeatBumpPlaylistIE(InfoExtractor):
|
class BeatBumpPlaylistIE(InfoExtractor):
|
||||||
_VALID_URL = r'https://beatbump\.(?:ml|io)/(?:release\?id=|artist/|playlist/)(?P<id>[\w-]+)'
|
_VALID_URL = r'https?://beatbump\.(?:ml|io)/(?:release\?id=|artist/|playlist/)(?P<id>[\w-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://beatbump.ml/release?id=MPREb_gTAcphH99wE',
|
'url': 'https://beatbump.ml/release?id=MPREb_gTAcphH99wE',
|
||||||
'playlist_count': 50,
|
'playlist_count': 50,
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
str_or_none,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
try_get,
|
try_get,
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
|
@ -22,7 +22,7 @@ class BeegIE(InfoExtractor):
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
'upload_date': '20220131',
|
'upload_date': '20220131',
|
||||||
'timestamp': 1643656455,
|
'timestamp': 1643656455,
|
||||||
'display_id': 2540839,
|
'display_id': '2540839',
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://beeg.com/-0599050563103750?t=4-861',
|
'url': 'https://beeg.com/-0599050563103750?t=4-861',
|
||||||
|
@ -36,7 +36,7 @@ class BeegIE(InfoExtractor):
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
'description': 'md5:b4fc879a58ae6c604f8f259155b7e3b9',
|
'description': 'md5:b4fc879a58ae6c604f8f259155b7e3b9',
|
||||||
'timestamp': 1643623200,
|
'timestamp': 1643623200,
|
||||||
'display_id': 2569965,
|
'display_id': '2569965',
|
||||||
'upload_date': '20220131',
|
'upload_date': '20220131',
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
|
@ -78,7 +78,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'display_id': first_fact.get('id'),
|
'display_id': str_or_none(first_fact.get('id')),
|
||||||
'title': traverse_obj(video, ('file', 'stuff', 'sf_name')),
|
'title': traverse_obj(video, ('file', 'stuff', 'sf_name')),
|
||||||
'description': traverse_obj(video, ('file', 'stuff', 'sf_story')),
|
'description': traverse_obj(video, ('file', 'stuff', 'sf_story')),
|
||||||
'timestamp': unified_timestamp(first_fact.get('fc_created')),
|
'timestamp': unified_timestamp(first_fact.get('fc_created')),
|
||||||
|
|
|
@ -32,7 +32,7 @@ class BellMediaIE(InfoExtractor):
|
||||||
'description': 'md5:810f7f8c6a83ad5b48677c3f8e5bb2c3',
|
'description': 'md5:810f7f8c6a83ad5b48677c3f8e5bb2c3',
|
||||||
'upload_date': '20180525',
|
'upload_date': '20180525',
|
||||||
'timestamp': 1527288600,
|
'timestamp': 1527288600,
|
||||||
'season_id': 73997,
|
'season_id': '73997',
|
||||||
'season': '2018',
|
'season': '2018',
|
||||||
'thumbnail': 'http://images2.9c9media.com/image_asset/2018_5_25_baf30cbd-b28d-4a18-9903-4bb8713b00f5_PNG_956x536.jpg',
|
'thumbnail': 'http://images2.9c9media.com/image_asset/2018_5_25_baf30cbd-b28d-4a18-9903-4bb8713b00f5_PNG_956x536.jpg',
|
||||||
'tags': [],
|
'tags': [],
|
||||||
|
|
|
@ -93,7 +93,6 @@ class BFMTVArticleIE(BFMTVBaseIE):
|
||||||
'id': '6318445464112',
|
'id': '6318445464112',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Le plein de bioéthanol fait de plus en plus mal à la pompe',
|
'title': 'Le plein de bioéthanol fait de plus en plus mal à la pompe',
|
||||||
'description': None,
|
|
||||||
'uploader_id': '876630703001',
|
'uploader_id': '876630703001',
|
||||||
'upload_date': '20230110',
|
'upload_date': '20230110',
|
||||||
'timestamp': 1673341692,
|
'timestamp': 1673341692,
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
from functools import partial
|
import functools
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -50,7 +50,7 @@ def _extract_base_info(data):
|
||||||
**traverse_obj(data, {
|
**traverse_obj(data, {
|
||||||
'title': 'title',
|
'title': 'title',
|
||||||
'description': 'description',
|
'description': 'description',
|
||||||
'duration': ('duration', {partial(int_or_none, scale=1000)}),
|
'duration': ('duration', {functools.partial(int_or_none, scale=1000)}),
|
||||||
'timestamp': ('schedulingStart', {parse_iso8601}),
|
'timestamp': ('schedulingStart', {parse_iso8601}),
|
||||||
'season_number': 'seasonNumber',
|
'season_number': 'seasonNumber',
|
||||||
'episode_number': 'episodeNumber',
|
'episode_number': 'episodeNumber',
|
||||||
|
|
|
@ -93,11 +93,11 @@ def extract_formats(self, play_info):
|
||||||
|
|
||||||
return formats
|
return formats
|
||||||
|
|
||||||
def _download_playinfo(self, video_id, cid):
|
def _download_playinfo(self, video_id, cid, headers=None):
|
||||||
return self._download_json(
|
return self._download_json(
|
||||||
'https://api.bilibili.com/x/player/playurl', video_id,
|
'https://api.bilibili.com/x/player/playurl', video_id,
|
||||||
query={'bvid': video_id, 'cid': cid, 'fnval': 4048},
|
query={'bvid': video_id, 'cid': cid, 'fnval': 4048},
|
||||||
note=f'Downloading video formats for cid {cid}')['data']
|
note=f'Downloading video formats for cid {cid}', headers=headers)['data']
|
||||||
|
|
||||||
def json2srt(self, json_data):
|
def json2srt(self, json_data):
|
||||||
srt_data = ''
|
srt_data = ''
|
||||||
|
@ -493,7 +493,8 @@ class BiliBiliIE(BilibiliBaseIE):
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage, urlh = self._download_webpage_handle(url, video_id)
|
headers = self.geo_verification_headers()
|
||||||
|
webpage, urlh = self._download_webpage_handle(url, video_id, headers=headers)
|
||||||
if not self._match_valid_url(urlh.url):
|
if not self._match_valid_url(urlh.url):
|
||||||
return self.url_result(urlh.url)
|
return self.url_result(urlh.url)
|
||||||
|
|
||||||
|
@ -531,7 +532,7 @@ def _real_extract(self, url):
|
||||||
self._download_json(
|
self._download_json(
|
||||||
'https://api.bilibili.com/x/player/pagelist', video_id,
|
'https://api.bilibili.com/x/player/pagelist', video_id,
|
||||||
fatal=False, query={'bvid': video_id, 'jsonp': 'jsonp'},
|
fatal=False, query={'bvid': video_id, 'jsonp': 'jsonp'},
|
||||||
note='Extracting videos in anthology'),
|
note='Extracting videos in anthology', headers=headers),
|
||||||
'data', expected_type=list) or []
|
'data', expected_type=list) or []
|
||||||
is_anthology = len(page_list_json) > 1
|
is_anthology = len(page_list_json) > 1
|
||||||
|
|
||||||
|
@ -552,7 +553,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
festival_info = {}
|
festival_info = {}
|
||||||
if is_festival:
|
if is_festival:
|
||||||
play_info = self._download_playinfo(video_id, cid)
|
play_info = self._download_playinfo(video_id, cid, headers=headers)
|
||||||
|
|
||||||
festival_info = traverse_obj(initial_state, {
|
festival_info = traverse_obj(initial_state, {
|
||||||
'uploader': ('videoInfo', 'upName'),
|
'uploader': ('videoInfo', 'upName'),
|
||||||
|
@ -666,14 +667,15 @@ class BiliBiliBangumiIE(BilibiliBaseIE):
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
episode_id = self._match_id(url)
|
episode_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, episode_id)
|
headers = self.geo_verification_headers()
|
||||||
|
webpage = self._download_webpage(url, episode_id, headers=headers)
|
||||||
|
|
||||||
if '您所在的地区无法观看本片' in webpage:
|
if '您所在的地区无法观看本片' in webpage:
|
||||||
raise GeoRestrictedError('This video is restricted')
|
raise GeoRestrictedError('This video is restricted')
|
||||||
elif '正在观看预览,大会员免费看全片' in webpage:
|
elif '正在观看预览,大会员免费看全片' in webpage:
|
||||||
self.raise_login_required('This video is for premium members only')
|
self.raise_login_required('This video is for premium members only')
|
||||||
|
|
||||||
headers = {'Referer': url, **self.geo_verification_headers()}
|
headers['Referer'] = url
|
||||||
play_info = self._download_json(
|
play_info = self._download_json(
|
||||||
'https://api.bilibili.com/pgc/player/web/v2/playurl', episode_id,
|
'https://api.bilibili.com/pgc/player/web/v2/playurl', episode_id,
|
||||||
'Extracting episode', query={'fnval': '4048', 'ep_id': episode_id},
|
'Extracting episode', query={'fnval': '4048', 'ep_id': episode_id},
|
||||||
|
@ -724,7 +726,7 @@ def _real_extract(self, url):
|
||||||
'duration': float_or_none(play_info.get('timelength'), scale=1000),
|
'duration': float_or_none(play_info.get('timelength'), scale=1000),
|
||||||
'subtitles': self.extract_subtitles(episode_id, episode_info.get('cid'), aid=aid),
|
'subtitles': self.extract_subtitles(episode_id, episode_info.get('cid'), aid=aid),
|
||||||
'__post_extractor': self.extract_comments(aid),
|
'__post_extractor': self.extract_comments(aid),
|
||||||
'http_headers': headers,
|
'http_headers': {'Referer': url},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1043,15 +1045,17 @@ def fetch_page(page_idx):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = self._download_json('https://api.bilibili.com/x/space/wbi/arc/search',
|
response = self._download_json('https://api.bilibili.com/x/space/wbi/arc/search',
|
||||||
playlist_id, note=f'Downloading page {page_idx}', query=query)
|
playlist_id, note=f'Downloading page {page_idx}', query=query,
|
||||||
|
headers={'referer': url})
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, HTTPError) and e.cause.status == 412:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 412:
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
'Request is blocked by server (412), please add cookies, wait and try later.', expected=True)
|
'Request is blocked by server (412), please add cookies, wait and try later.', expected=True)
|
||||||
raise
|
raise
|
||||||
if response['code'] == -401:
|
if response['code'] in (-352, -401):
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
'Request is blocked by server (401), please add cookies, wait and try later.', expected=True)
|
f'Request is blocked by server ({-response["code"]}), '
|
||||||
|
'please add cookies, wait and try later.', expected=True)
|
||||||
return response['data']
|
return response['data']
|
||||||
|
|
||||||
def get_metadata(page_data):
|
def get_metadata(page_data):
|
||||||
|
@ -1965,6 +1969,7 @@ class BiliIntlIE(BiliIntlBaseIE):
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def _make_url(video_id, series_id=None):
|
def _make_url(video_id, series_id=None):
|
||||||
if series_id:
|
if series_id:
|
||||||
return f'https://www.bilibili.tv/en/play/{series_id}/{video_id}'
|
return f'https://www.bilibili.tv/en/play/{series_id}/{video_id}'
|
||||||
|
|
|
@ -185,7 +185,6 @@ class BitChuteChannelIE(InfoExtractor):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'UGlrF9o9b-Q',
|
'id': 'UGlrF9o9b-Q',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'filesize': None,
|
|
||||||
'title': 'This is the first video on #BitChute !',
|
'title': 'This is the first video on #BitChute !',
|
||||||
'description': 'md5:a0337e7b1fe39e32336974af8173a034',
|
'description': 'md5:a0337e7b1fe39e32336974af8173a034',
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
|
|
@ -1,13 +1,15 @@
|
||||||
from .common import InfoExtractor
|
|
||||||
from .amp import AMPIE
|
from .amp import AMPIE
|
||||||
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
str_or_none,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class BleacherReportIE(InfoExtractor):
|
class BleacherReportIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/articles/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/articles/(?P<id>\d+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://bleacherreport.com/articles/2496438-fsu-stat-projections-is-jalen-ramsey-best-defensive-player-in-college-football',
|
'url': 'http://bleacherreport.com/articles/2496438-fsu-stat-projections-is-jalen-ramsey-best-defensive-player-in-college-football',
|
||||||
|
@ -16,7 +18,7 @@ class BleacherReportIE(InfoExtractor):
|
||||||
'id': '2496438',
|
'id': '2496438',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'FSU Stat Projections: Is Jalen Ramsey Best Defensive Player in College Football?',
|
'title': 'FSU Stat Projections: Is Jalen Ramsey Best Defensive Player in College Football?',
|
||||||
'uploader_id': 3992341,
|
'uploader_id': '3992341',
|
||||||
'description': 'CFB, ACC, Florida State',
|
'description': 'CFB, ACC, Florida State',
|
||||||
'timestamp': 1434380212,
|
'timestamp': 1434380212,
|
||||||
'upload_date': '20150615',
|
'upload_date': '20150615',
|
||||||
|
@ -33,7 +35,7 @@ class BleacherReportIE(InfoExtractor):
|
||||||
'timestamp': 1446839961,
|
'timestamp': 1446839961,
|
||||||
'uploader': 'Sean Fay',
|
'uploader': 'Sean Fay',
|
||||||
'description': 'md5:b1601e2314c4d8eec23b6eafe086a757',
|
'description': 'md5:b1601e2314c4d8eec23b6eafe086a757',
|
||||||
'uploader_id': 6466954,
|
'uploader_id': '6466954',
|
||||||
'upload_date': '20151011',
|
'upload_date': '20151011',
|
||||||
},
|
},
|
||||||
'add_ie': ['Youtube'],
|
'add_ie': ['Youtube'],
|
||||||
|
@ -58,7 +60,7 @@ def _real_extract(self, url):
|
||||||
'id': article_id,
|
'id': article_id,
|
||||||
'title': article_data['title'],
|
'title': article_data['title'],
|
||||||
'uploader': article_data.get('author', {}).get('name'),
|
'uploader': article_data.get('author', {}).get('name'),
|
||||||
'uploader_id': article_data.get('authorId'),
|
'uploader_id': str_or_none(article_data.get('authorId')),
|
||||||
'timestamp': parse_iso8601(article_data.get('createdAt')),
|
'timestamp': parse_iso8601(article_data.get('createdAt')),
|
||||||
'thumbnails': thumbnails,
|
'thumbnails': thumbnails,
|
||||||
'comment_count': int_or_none(article_data.get('commentsCount')),
|
'comment_count': int_or_none(article_data.get('commentsCount')),
|
||||||
|
@ -82,6 +84,7 @@ def _real_extract(self, url):
|
||||||
|
|
||||||
|
|
||||||
class BleacherReportCMSIE(AMPIE):
|
class BleacherReportCMSIE(AMPIE):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/video_embed\?id=(?P<id>[0-9a-f-]{36}|\d{5})'
|
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/video_embed\?id=(?P<id>[0-9a-f-]{36}|\d{5})'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://bleacherreport.com/video_embed?id=8fd44c2f-3dc5-4821-9118-2c825a98c0e1&library=video-cms',
|
'url': 'http://bleacherreport.com/video_embed?id=8fd44c2f-3dc5-4821-9118-2c825a98c0e1&library=video-cms',
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
|
@ -5,7 +6,6 @@
|
||||||
str_or_none,
|
str_or_none,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
)
|
)
|
||||||
from .common import InfoExtractor
|
|
||||||
|
|
||||||
|
|
||||||
class BloggerIE(InfoExtractor):
|
class BloggerIE(InfoExtractor):
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue