mirror of https://github.com/yt-dlp/yt-dlp.git
Merge branch 'master' into feat/ruff
This commit is contained in:
commit
95d8d11698
|
@ -107,6 +107,8 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0 # Needed for changelog
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
|
@ -133,6 +135,7 @@ jobs:
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
python devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
|
||||||
|
python devscripts/update_changelog.py -vv
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
- name: Build Unix platform-independent binary
|
- name: Build Unix platform-independent binary
|
||||||
run: |
|
run: |
|
||||||
|
@ -164,7 +167,7 @@ jobs:
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: build-${{ github.job }}
|
name: build-bin-${{ github.job }}
|
||||||
path: |
|
path: |
|
||||||
yt-dlp
|
yt-dlp
|
||||||
yt-dlp.tar.gz
|
yt-dlp.tar.gz
|
||||||
|
@ -227,7 +230,7 @@ jobs:
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: build-linux_${{ matrix.architecture }}
|
name: build-bin-linux_${{ matrix.architecture }}
|
||||||
path: | # run-on-arch-action designates armv7l as armv7
|
path: | # run-on-arch-action designates armv7l as armv7
|
||||||
repo/dist/yt-dlp_linux_${{ (matrix.architecture == 'armv7' && 'armv7l') || matrix.architecture }}
|
repo/dist/yt-dlp_linux_${{ (matrix.architecture == 'armv7' && 'armv7l') || matrix.architecture }}
|
||||||
compression-level: 0
|
compression-level: 0
|
||||||
|
@ -271,7 +274,7 @@ jobs:
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: build-${{ github.job }}
|
name: build-bin-${{ github.job }}
|
||||||
path: |
|
path: |
|
||||||
dist/yt-dlp_macos
|
dist/yt-dlp_macos
|
||||||
dist/yt-dlp_macos.zip
|
dist/yt-dlp_macos.zip
|
||||||
|
@ -324,7 +327,7 @@ jobs:
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: build-${{ github.job }}
|
name: build-bin-${{ github.job }}
|
||||||
path: |
|
path: |
|
||||||
dist/yt-dlp_macos_legacy
|
dist/yt-dlp_macos_legacy
|
||||||
compression-level: 0
|
compression-level: 0
|
||||||
|
@ -373,7 +376,7 @@ jobs:
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: build-${{ github.job }}
|
name: build-bin-${{ github.job }}
|
||||||
path: |
|
path: |
|
||||||
dist/yt-dlp.exe
|
dist/yt-dlp.exe
|
||||||
dist/yt-dlp_min.exe
|
dist/yt-dlp_min.exe
|
||||||
|
@ -421,7 +424,7 @@ jobs:
|
||||||
- name: Upload artifacts
|
- name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: build-${{ github.job }}
|
name: build-bin-${{ github.job }}
|
||||||
path: |
|
path: |
|
||||||
dist/yt-dlp_x86.exe
|
dist/yt-dlp_x86.exe
|
||||||
compression-level: 0
|
compression-level: 0
|
||||||
|
@ -441,7 +444,7 @@ jobs:
|
||||||
- uses: actions/download-artifact@v4
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
path: artifact
|
path: artifact
|
||||||
pattern: build-*
|
pattern: build-bin-*
|
||||||
merge-multiple: true
|
merge-multiple: true
|
||||||
|
|
||||||
- name: Make SHA2-SUMS files
|
- name: Make SHA2-SUMS files
|
||||||
|
@ -484,3 +487,4 @@ jobs:
|
||||||
_update_spec
|
_update_spec
|
||||||
SHA*SUMS*
|
SHA*SUMS*
|
||||||
compression-level: 0
|
compression-level: 0
|
||||||
|
overwrite: true
|
||||||
|
|
|
@ -189,13 +189,8 @@ jobs:
|
||||||
if: |
|
if: |
|
||||||
!inputs.prerelease && env.target_repo == github.repository
|
!inputs.prerelease && env.target_repo == github.repository
|
||||||
run: |
|
run: |
|
||||||
|
python devscripts/update_changelog.py -vv
|
||||||
make doc
|
make doc
|
||||||
sed '/### /Q' Changelog.md >> ./CHANGELOG
|
|
||||||
echo '### ${{ env.version }}' >> ./CHANGELOG
|
|
||||||
python ./devscripts/make_changelog.py -vv -c >> ./CHANGELOG
|
|
||||||
echo >> ./CHANGELOG
|
|
||||||
grep -Poz '(?s)### \d+\.\d+\.\d+.+' 'Changelog.md' | head -n -1 >> ./CHANGELOG
|
|
||||||
cat ./CHANGELOG > Changelog.md
|
|
||||||
|
|
||||||
- name: Push to release
|
- name: Push to release
|
||||||
id: push_release
|
id: push_release
|
||||||
|
@ -266,6 +261,7 @@ jobs:
|
||||||
pypi_project: ${{ needs.prepare.outputs.pypi_project }}
|
pypi_project: ${{ needs.prepare.outputs.pypi_project }}
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py -c "${{ env.channel }}" -r "${{ env.target_repo }}" -s "${{ env.suffix }}" "${{ env.version }}"
|
python devscripts/update-version.py -c "${{ env.channel }}" -r "${{ env.target_repo }}" -s "${{ env.suffix }}" "${{ env.version }}"
|
||||||
|
python devscripts/update_changelog.py -vv
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
sed -i -E '0,/(name = ")[^"]+(")/s//\1${{ env.pypi_project }}\2/' pyproject.toml
|
sed -i -E '0,/(name = ")[^"]+(")/s//\1${{ env.pypi_project }}\2/' pyproject.toml
|
||||||
|
|
||||||
|
@ -312,19 +308,19 @@ jobs:
|
||||||
target_tag: ${{ needs.prepare.outputs.target_tag }}
|
target_tag: ${{ needs.prepare.outputs.target_tag }}
|
||||||
run: |
|
run: |
|
||||||
printf '%s' \
|
printf '%s' \
|
||||||
'[![Installation](https://img.shields.io/badge/-Which%20file%20should%20I%20download%3F-white.svg?style=for-the-badge)]' \
|
'[![Installation](https://img.shields.io/badge/-Which%20file%20to%20download%3F-white.svg?style=for-the-badge)]' \
|
||||||
'(https://github.com/${{ github.repository }}#installation "Installation instructions") ' \
|
'(https://github.com/${{ github.repository }}#installation "Installation instructions") ' \
|
||||||
|
'[![Discord](https://img.shields.io/discord/807245652072857610?color=blue&labelColor=555555&label=&logo=discord&style=for-the-badge)]' \
|
||||||
|
'(https://discord.gg/H5MNcFW63r "Discord") ' \
|
||||||
|
'[![Donate](https://img.shields.io/badge/_-Donate-red.svg?logo=githubsponsors&labelColor=555555&style=for-the-badge)]' \
|
||||||
|
'(https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators "Donate") ' \
|
||||||
'[![Documentation](https://img.shields.io/badge/-Docs-brightgreen.svg?style=for-the-badge&logo=GitBook&labelColor=555555)]' \
|
'[![Documentation](https://img.shields.io/badge/-Docs-brightgreen.svg?style=for-the-badge&logo=GitBook&labelColor=555555)]' \
|
||||||
'(https://github.com/${{ github.repository }}' \
|
'(https://github.com/${{ github.repository }}' \
|
||||||
'${{ env.target_repo == github.repository && format('/tree/{0}', env.target_tag) || '' }}#readme "Documentation") ' \
|
'${{ env.target_repo == github.repository && format('/tree/{0}', env.target_tag) || '' }}#readme "Documentation") ' \
|
||||||
'[![Donate](https://img.shields.io/badge/_-Donate-red.svg?logo=githubsponsors&labelColor=555555&style=for-the-badge)]' \
|
|
||||||
'(https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators "Donate") ' \
|
|
||||||
'[![Discord](https://img.shields.io/discord/807245652072857610?color=blue&labelColor=555555&label=&logo=discord&style=for-the-badge)]' \
|
|
||||||
'(https://discord.gg/H5MNcFW63r "Discord") ' \
|
|
||||||
${{ env.target_repo == 'yt-dlp/yt-dlp' && '\
|
${{ env.target_repo == 'yt-dlp/yt-dlp' && '\
|
||||||
"[![Nightly](https://img.shields.io/badge/Get%20nightly%20builds-purple.svg?style=for-the-badge)]" \
|
"[![Nightly](https://img.shields.io/badge/Nightly%20builds-purple.svg?style=for-the-badge)]" \
|
||||||
"(https://github.com/yt-dlp/yt-dlp-nightly-builds/releases/latest \"Nightly builds\") " \
|
"(https://github.com/yt-dlp/yt-dlp-nightly-builds/releases/latest \"Nightly builds\") " \
|
||||||
"[![Master](https://img.shields.io/badge/Get%20master%20builds-lightblue.svg?style=for-the-badge)]" \
|
"[![Master](https://img.shields.io/badge/Master%20builds-lightblue.svg?style=for-the-badge)]" \
|
||||||
"(https://github.com/yt-dlp/yt-dlp-master-builds/releases/latest \"Master builds\")"' || '' }} > ./RELEASE_NOTES
|
"(https://github.com/yt-dlp/yt-dlp-master-builds/releases/latest \"Master builds\")"' || '' }} > ./RELEASE_NOTES
|
||||||
printf '\n\n' >> ./RELEASE_NOTES
|
printf '\n\n' >> ./RELEASE_NOTES
|
||||||
cat >> ./RELEASE_NOTES << EOF
|
cat >> ./RELEASE_NOTES << EOF
|
||||||
|
|
|
@ -33,6 +33,7 @@ cookies
|
||||||
*.gif
|
*.gif
|
||||||
*.jpeg
|
*.jpeg
|
||||||
*.jpg
|
*.jpg
|
||||||
|
*.lrc
|
||||||
*.m4a
|
*.m4a
|
||||||
*.m4v
|
*.m4v
|
||||||
*.mhtml
|
*.mhtml
|
||||||
|
@ -40,6 +41,7 @@ cookies
|
||||||
*.mov
|
*.mov
|
||||||
*.mp3
|
*.mp3
|
||||||
*.mp4
|
*.mp4
|
||||||
|
*.mpg
|
||||||
*.mpga
|
*.mpga
|
||||||
*.oga
|
*.oga
|
||||||
*.ogg
|
*.ogg
|
||||||
|
@ -47,6 +49,7 @@ cookies
|
||||||
*.png
|
*.png
|
||||||
*.sbv
|
*.sbv
|
||||||
*.srt
|
*.srt
|
||||||
|
*.ssa
|
||||||
*.swf
|
*.swf
|
||||||
*.swp
|
*.swp
|
||||||
*.tt
|
*.tt
|
||||||
|
|
|
@ -79,7 +79,7 @@ Before reporting any issue, type `yt-dlp -U`. This should report that you're up-
|
||||||
|
|
||||||
### Is the issue already documented?
|
### Is the issue already documented?
|
||||||
|
|
||||||
Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/yt-dlp/yt-dlp/search?type=Issues) of this repository. If there is an issue, subcribe to it to be notified when there is any progress. Unless you have something useful to add to the converation, please refrain from commenting.
|
Make sure that someone has not already opened the issue you're trying to open. Search at the top of the window or browse the [GitHub Issues](https://github.com/yt-dlp/yt-dlp/search?type=Issues) of this repository. If there is an issue, subscribe to it to be notified when there is any progress. Unless you have something useful to add to the conversation, please refrain from commenting.
|
||||||
|
|
||||||
Additionally, it is also helpful to see if the issue has already been documented in the [youtube-dl issue tracker](https://github.com/ytdl-org/youtube-dl/issues). If similar issues have already been reported in youtube-dl (but not in our issue tracker), links to them can be included in your issue report here.
|
Additionally, it is also helpful to see if the issue has already been documented in the [youtube-dl issue tracker](https://github.com/ytdl-org/youtube-dl/issues). If similar issues have already been reported in youtube-dl (but not in our issue tracker), links to them can be included in your issue report here.
|
||||||
|
|
||||||
|
@ -169,7 +169,7 @@ If you want to create a build of yt-dlp yourself, you can follow the instruction
|
||||||
|
|
||||||
## Adding new feature or making overarching changes
|
## Adding new feature or making overarching changes
|
||||||
|
|
||||||
Before you start writing code for implementing a new feature, open an issue explaining your feature request and atleast one use case. This allows the maintainers to decide whether such a feature is desired for the project in the first place, and will provide an avenue to discuss some implementation details. If you open a pull request for a new feature without discussing with us first, do not be surprised when we ask for large changes to the code, or even reject it outright.
|
Before you start writing code for implementing a new feature, open an issue explaining your feature request and at least one use case. This allows the maintainers to decide whether such a feature is desired for the project in the first place, and will provide an avenue to discuss some implementation details. If you open a pull request for a new feature without discussing with us first, do not be surprised when we ask for large changes to the code, or even reject it outright.
|
||||||
|
|
||||||
The same applies for changes to the documentation, code style, or overarching changes to the architecture
|
The same applies for changes to the documentation, code style, or overarching changes to the architecture
|
||||||
|
|
||||||
|
@ -265,7 +265,7 @@ After you have ensured this site is distributing its content legally, you can fo
|
||||||
|
|
||||||
In any case, thank you very much for your contributions!
|
In any case, thank you very much for your contributions!
|
||||||
|
|
||||||
**Tip:** To test extractors that require login information, create a file `test/local_parameters.json` and add `"usenetrc": true` or your username and password in it:
|
**Tip:** To test extractors that require login information, create a file `test/local_parameters.json` and add `"usenetrc": true` or your `username`&`password` or `cookiefile`/`cookiesfrombrowser` in it:
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"username": "your user name",
|
"username": "your user name",
|
||||||
|
@ -292,7 +292,7 @@ The aforementioned metafields are the critical data that the extraction does not
|
||||||
|
|
||||||
For pornographic sites, appropriate `age_limit` must also be returned.
|
For pornographic sites, appropriate `age_limit` must also be returned.
|
||||||
|
|
||||||
The extractor is allowed to return the info dict without url or formats in some special cases if it allows the user to extract usefull information with `--ignore-no-formats-error` - e.g. when the video is a live stream that has not started yet.
|
The extractor is allowed to return the info dict without url or formats in some special cases if it allows the user to extract useful information with `--ignore-no-formats-error` - e.g. when the video is a live stream that has not started yet.
|
||||||
|
|
||||||
[Any field](yt_dlp/extractor/common.py#219-L426) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerant** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
[Any field](yt_dlp/extractor/common.py#219-L426) apart from the aforementioned ones are considered **optional**. That means that extraction should be **tolerant** to situations when sources for these fields can potentially be unavailable (even if they are always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields.
|
||||||
|
|
||||||
|
|
58
CONTRIBUTORS
58
CONTRIBUTORS
|
@ -542,3 +542,61 @@ prettykool
|
||||||
S-Aarab
|
S-Aarab
|
||||||
sonmezberkay
|
sonmezberkay
|
||||||
TSRBerry
|
TSRBerry
|
||||||
|
114514ns
|
||||||
|
agibson-fl
|
||||||
|
alard
|
||||||
|
alien-developers
|
||||||
|
antonkesy
|
||||||
|
ArnauvGilotra
|
||||||
|
Arthurszzz
|
||||||
|
Bibhav48
|
||||||
|
Bl4Cc4t
|
||||||
|
boredzo
|
||||||
|
Caesim404
|
||||||
|
chkuendig
|
||||||
|
chtk
|
||||||
|
Danish-H
|
||||||
|
dasidiot
|
||||||
|
diman8
|
||||||
|
divStar
|
||||||
|
DmitryScaletta
|
||||||
|
feederbox826
|
||||||
|
gmes78
|
||||||
|
gonzalezjo
|
||||||
|
hui1601
|
||||||
|
infanf
|
||||||
|
jazz1611
|
||||||
|
jingtra
|
||||||
|
jkmartindale
|
||||||
|
johnvictorfs
|
||||||
|
llistochek
|
||||||
|
marcdumais
|
||||||
|
martinxyz
|
||||||
|
michal-repo
|
||||||
|
mrmedieval
|
||||||
|
nbr23
|
||||||
|
Nicals
|
||||||
|
Noor-5
|
||||||
|
NurTasin
|
||||||
|
pompos02
|
||||||
|
Pranaxcau
|
||||||
|
pwaldhauer
|
||||||
|
RaduManole
|
||||||
|
RalphORama
|
||||||
|
rrgomes
|
||||||
|
ruiminggu
|
||||||
|
rvsit
|
||||||
|
sefidel
|
||||||
|
shmohawk
|
||||||
|
Snack-X
|
||||||
|
src-tinkerer
|
||||||
|
stilor
|
||||||
|
syntaxsurge
|
||||||
|
t-nil
|
||||||
|
ufukk
|
||||||
|
vista-narvas
|
||||||
|
x11x
|
||||||
|
xpadev-net
|
||||||
|
Xpl0itU
|
||||||
|
YoshichikaAAA
|
||||||
|
zhijinwuu
|
||||||
|
|
230
Changelog.md
230
Changelog.md
|
@ -4,6 +4,228 @@
|
||||||
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
|
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
### 2024.03.10
|
||||||
|
|
||||||
|
#### Core changes
|
||||||
|
- [Add `--compat-options 2023`](https://github.com/yt-dlp/yt-dlp/commit/3725b4f0c93ca3943e6300013a9670e4ab757fda) ([#9084](https://github.com/yt-dlp/yt-dlp/issues/9084)) by [Grub4K](https://github.com/Grub4K) (With fixes in [ffff1bc](https://github.com/yt-dlp/yt-dlp/commit/ffff1bc6598fc7a9258e51bc153cab812467f9f9) by [pukkandan](https://github.com/pukkandan))
|
||||||
|
- [Create `ydl._request_director` when needed](https://github.com/yt-dlp/yt-dlp/commit/069b2aedae2279668b6051627a81fc4fbd9c146a) by [pukkandan](https://github.com/pukkandan) (With fixes in [dbd8b1b](https://github.com/yt-dlp/yt-dlp/commit/dbd8b1bff9afd8f05f982bcd52c20bc173c266ca) by [Grub4k](https://github.com/Grub4k))
|
||||||
|
- [Don't select storyboard formats as fallback](https://github.com/yt-dlp/yt-dlp/commit/d63eae7e7ffb1f3e733e552b9e5e82355bfba214) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Handle `--load-info-json` format selection errors](https://github.com/yt-dlp/yt-dlp/commit/263a4b55ac17a796e8991ca8d2d86a3c349f8a60) ([#9392](https://github.com/yt-dlp/yt-dlp/issues/9392)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Warn user when not launching through shell on Windows](https://github.com/yt-dlp/yt-dlp/commit/6a6cdcd1824a14e3b336332c8f31f65497b8c4b8) ([#9250](https://github.com/yt-dlp/yt-dlp/issues/9250)) by [Grub4K](https://github.com/Grub4K), [seproDev](https://github.com/seproDev)
|
||||||
|
- **cookies**
|
||||||
|
- [Fix `--cookies-from-browser` for `snap` Firefox](https://github.com/yt-dlp/yt-dlp/commit/cbed249aaa053a3f425b9bafc97f8dbd71c44487) ([#9016](https://github.com/yt-dlp/yt-dlp/issues/9016)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Fix `--cookies-from-browser` with macOS Firefox profiles](https://github.com/yt-dlp/yt-dlp/commit/85b33f5c163f60dbd089a6b9bc2ba1366d3ddf93) ([#8909](https://github.com/yt-dlp/yt-dlp/issues/8909)) by [RalphORama](https://github.com/RalphORama)
|
||||||
|
- [Improve error message for Windows `--cookies-from-browser chrome` issue](https://github.com/yt-dlp/yt-dlp/commit/2792092afd367e39251ace1fb2819c855ab8919f) ([#9080](https://github.com/yt-dlp/yt-dlp/issues/9080)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- **plugins**: [Handle `PermissionError`](https://github.com/yt-dlp/yt-dlp/commit/9a8afadd172b7cab143f0049959fa64973589d94) ([#9229](https://github.com/yt-dlp/yt-dlp/issues/9229)) by [pukkandan](https://github.com/pukkandan), [syntaxsurge](https://github.com/syntaxsurge)
|
||||||
|
- **utils**
|
||||||
|
- [Improve `repr` of `DateRange`, `match_filter_func`](https://github.com/yt-dlp/yt-dlp/commit/45491a2a30da4d1723cfa9288cb664813bb09afb) by [pukkandan](https://github.com/pukkandan)
|
||||||
|
- `traverse_obj`: [Support `xml.etree.ElementTree.Element`](https://github.com/yt-dlp/yt-dlp/commit/ffbd4f2a02fee387ea5e0a267ce32df5259111ac) ([#8911](https://github.com/yt-dlp/yt-dlp/issues/8911)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- **webvtt**: [Don't parse single fragment files](https://github.com/yt-dlp/yt-dlp/commit/f24e44e8cbd88ce338d52f594a19330f64d38b50) ([#9034](https://github.com/yt-dlp/yt-dlp/issues/9034)) by [seproDev](https://github.com/seproDev)
|
||||||
|
|
||||||
|
#### Extractor changes
|
||||||
|
- [Migrate commonly plural fields to lists](https://github.com/yt-dlp/yt-dlp/commit/104a7b5a46dc1805157fb4cc11c05876934d37c1) ([#8917](https://github.com/yt-dlp/yt-dlp/issues/8917)) by [llistochek](https://github.com/llistochek), [pukkandan](https://github.com/pukkandan) (With fixes in [b136e2a](https://github.com/yt-dlp/yt-dlp/commit/b136e2af341f7a88028aea4c5cd50efe2fa9b182) by [bashonly](https://github.com/bashonly))
|
||||||
|
- [Support multi-period MPD streams](https://github.com/yt-dlp/yt-dlp/commit/4ce57d3b873c2887814cbec03d029533e82f7db5) ([#6654](https://github.com/yt-dlp/yt-dlp/issues/6654)) by [alard](https://github.com/alard), [pukkandan](https://github.com/pukkandan)
|
||||||
|
- **abematv**
|
||||||
|
- [Fix extraction with cache](https://github.com/yt-dlp/yt-dlp/commit/c51316f8a69fbd0080f2720777d42ab438e254a3) ([#8895](https://github.com/yt-dlp/yt-dlp/issues/8895)) by [sefidel](https://github.com/sefidel)
|
||||||
|
- [Support login for playlists](https://github.com/yt-dlp/yt-dlp/commit/8226a3818f804478c756cf460baa9bf3a3b062a5) ([#8901](https://github.com/yt-dlp/yt-dlp/issues/8901)) by [sefidel](https://github.com/sefidel)
|
||||||
|
- **adn**
|
||||||
|
- [Add support for German site](https://github.com/yt-dlp/yt-dlp/commit/5eb1458be4767385a9bf1d570ff08e46100cbaa2) ([#8708](https://github.com/yt-dlp/yt-dlp/issues/8708)) by [infanf](https://github.com/infanf)
|
||||||
|
- [Improve auth error handling](https://github.com/yt-dlp/yt-dlp/commit/9526b1f179d19f75284eceaa5e0ee381af18cf19) ([#9068](https://github.com/yt-dlp/yt-dlp/issues/9068)) by [infanf](https://github.com/infanf)
|
||||||
|
- **aenetworks**: [Rating should be optional for AP extraction](https://github.com/yt-dlp/yt-dlp/commit/014cb5774d7afe624b6eb4e07f7be924b9e5e186) ([#9005](https://github.com/yt-dlp/yt-dlp/issues/9005)) by [agibson-fl](https://github.com/agibson-fl)
|
||||||
|
- **altcensored**: channel: [Fix playlist extraction](https://github.com/yt-dlp/yt-dlp/commit/e28e135d6fd6a430fed3e20dfe1a8c8bbc5f9185) ([#9297](https://github.com/yt-dlp/yt-dlp/issues/9297)) by [marcdumais](https://github.com/marcdumais)
|
||||||
|
- **amadeustv**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/e641aab7a61df7406df60ebfe0c77bd5186b2b41) ([#8744](https://github.com/yt-dlp/yt-dlp/issues/8744)) by [ArnauvGilotra](https://github.com/ArnauvGilotra)
|
||||||
|
- **ant1newsgrembed**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/1ed5ee2f045f717e814f84ba461dadc58e712266) ([#9191](https://github.com/yt-dlp/yt-dlp/issues/9191)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **archiveorg**: [Fix format URL encoding](https://github.com/yt-dlp/yt-dlp/commit/3894ab9574748188bbacbd925a3971eda6fa2bb0) ([#9279](https://github.com/yt-dlp/yt-dlp/issues/9279)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **ard**
|
||||||
|
- mediathek
|
||||||
|
- [Revert to using old id](https://github.com/yt-dlp/yt-dlp/commit/b6951271ac014761c9c317b9cecd5e8e139cfa7c) ([#8916](https://github.com/yt-dlp/yt-dlp/issues/8916)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Support cookies to verify age](https://github.com/yt-dlp/yt-dlp/commit/c099ec9392b0283dde34b290d1a04158ad8eb882) ([#9037](https://github.com/yt-dlp/yt-dlp/issues/9037)) by [StefanLobbenmeier](https://github.com/StefanLobbenmeier)
|
||||||
|
- **art19**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/999ea80beb053491089d256104c4188aced3110f) ([#9099](https://github.com/yt-dlp/yt-dlp/issues/9099)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **artetv**: [Separate closed captions](https://github.com/yt-dlp/yt-dlp/commit/393b487a4ea391c44e811505ec98531031d7e81e) ([#8231](https://github.com/yt-dlp/yt-dlp/issues/8231)) by [Nicals](https://github.com/Nicals), [seproDev](https://github.com/seproDev)
|
||||||
|
- **asobichannel**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/12f042740550c06552819374e2251deb7a519bab) ([#8700](https://github.com/yt-dlp/yt-dlp/issues/8700)) by [Snack-X](https://github.com/Snack-X)
|
||||||
|
- **bigo**: [Fix JSON extraction](https://github.com/yt-dlp/yt-dlp/commit/85a2d07c1f82c2082b568963d1c32ad3fc848f61) ([#8893](https://github.com/yt-dlp/yt-dlp/issues/8893)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **bilibili**
|
||||||
|
- [Add referer header and fix metadata extraction](https://github.com/yt-dlp/yt-dlp/commit/1713c882730a928ac344c099874d2093fc2c8b51) ([#8832](https://github.com/yt-dlp/yt-dlp/issues/8832)) by [SirElderling](https://github.com/SirElderling) (With fixes in [f1570ab](https://github.com/yt-dlp/yt-dlp/commit/f1570ab84d5f49564256c620063d2d3e9ed4acf0) by [TobiX](https://github.com/TobiX))
|
||||||
|
- [Support `--no-playlist`](https://github.com/yt-dlp/yt-dlp/commit/e439693f729daf6fb15457baea1bca10ef5da34d) ([#9139](https://github.com/yt-dlp/yt-dlp/issues/9139)) by [c-basalt](https://github.com/c-basalt)
|
||||||
|
- **bilibilisearch**: [Set cookie to fix extraction](https://github.com/yt-dlp/yt-dlp/commit/ffa017cfc5973b265c92248546fcf5020dc43eaf) ([#9119](https://github.com/yt-dlp/yt-dlp/issues/9119)) by [c-basalt](https://github.com/c-basalt)
|
||||||
|
- **biliintl**: [Fix and improve subtitles extraction](https://github.com/yt-dlp/yt-dlp/commit/cf6413e840476c15e5b166dc2f7cc2a90a4a9aad) ([#7077](https://github.com/yt-dlp/yt-dlp/issues/7077)) by [dirkf](https://github.com/dirkf), [HobbyistDev](https://github.com/HobbyistDev), [itachi-19](https://github.com/itachi-19), [seproDev](https://github.com/seproDev)
|
||||||
|
- **boosty**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/540b68298192874c75ad5ee4589bed64d02a7d55) ([#9144](https://github.com/yt-dlp/yt-dlp/issues/9144)) by [un-def](https://github.com/un-def)
|
||||||
|
- **ccma**: [Extract 1080p DASH formats](https://github.com/yt-dlp/yt-dlp/commit/4253e3b7f483127bd812bdac02466f4a5b47ff34) ([#9130](https://github.com/yt-dlp/yt-dlp/issues/9130)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **cctv**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/6ad11fef65474bcf70f3a8556850d93c141e44a2) ([#9325](https://github.com/yt-dlp/yt-dlp/issues/9325)) by [src-tinkerer](https://github.com/src-tinkerer)
|
||||||
|
- **chzzk**
|
||||||
|
- [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/ba6b0c8261e9f0a6373885736ff90a89dd1fb614) ([#8887](https://github.com/yt-dlp/yt-dlp/issues/8887)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- live: [Support `--wait-for-video`](https://github.com/yt-dlp/yt-dlp/commit/804f2366117b7065552a1c3cddb9ec19b688a5c1) ([#9309](https://github.com/yt-dlp/yt-dlp/issues/9309)) by [hui1601](https://github.com/hui1601)
|
||||||
|
- **cineverse**: [Detect when login required](https://github.com/yt-dlp/yt-dlp/commit/fc2cc626f07328a6c71b5e21853e4cfa7b1e6256) ([#9081](https://github.com/yt-dlp/yt-dlp/issues/9081)) by [garret1317](https://github.com/garret1317)
|
||||||
|
- **cloudflarestream**
|
||||||
|
- [Extract subtitles](https://github.com/yt-dlp/yt-dlp/commit/4d9dc0abe24ad5d9d22a16f40fc61137dcd103f7) ([#9007](https://github.com/yt-dlp/yt-dlp/issues/9007)) by [Bibhav48](https://github.com/Bibhav48)
|
||||||
|
- [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/f3d5face83f948c24bcb91e06d4fa6e8622d7d79) ([#9280](https://github.com/yt-dlp/yt-dlp/issues/9280)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Improve embed detection](https://github.com/yt-dlp/yt-dlp/commit/464c919ea82aefdf35f138a1ab2dd0bb8fb7fd0e) ([#9287](https://github.com/yt-dlp/yt-dlp/issues/9287)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **cloudycdn, lsm**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/5dda3b291f59f388f953337e9fb09a94b64aaf34) ([#8643](https://github.com/yt-dlp/yt-dlp/issues/8643)) by [Caesim404](https://github.com/Caesim404)
|
||||||
|
- **cnbc**: [Overhaul extractors](https://github.com/yt-dlp/yt-dlp/commit/998dffb5a2343ec709b3d6bbf2bf019649080239) ([#8741](https://github.com/yt-dlp/yt-dlp/issues/8741)) by [gonzalezjo](https://github.com/gonzalezjo), [Noor-5](https://github.com/Noor-5), [ruiminggu](https://github.com/ruiminggu), [seproDev](https://github.com/seproDev), [zhijinwuu](https://github.com/zhijinwuu)
|
||||||
|
- **craftsy**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/96f3924bac174f2fd401f86f78e77d7e0c5ee008) ([#9384](https://github.com/yt-dlp/yt-dlp/issues/9384)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **crooksandliars**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/03536126d32bd861e38536371f0cd5f1b71dcb7a) ([#9192](https://github.com/yt-dlp/yt-dlp/issues/9192)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **crtvg**: [Fix `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/785ab1af7f131e73444634ad57b39478651a43d3) ([#9404](https://github.com/yt-dlp/yt-dlp/issues/9404)) by [Xpl0itU](https://github.com/Xpl0itU)
|
||||||
|
- **dailymotion**: [Support search](https://github.com/yt-dlp/yt-dlp/commit/11ffa92a61e5847b3dfa8975f91ecb3ac2178841) ([#8292](https://github.com/yt-dlp/yt-dlp/issues/8292)) by [drzraf](https://github.com/drzraf), [seproDev](https://github.com/seproDev)
|
||||||
|
- **douyin**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/9ff946645568e71046487571eefa9cb524a5189b) ([#9239](https://github.com/yt-dlp/yt-dlp/issues/9239)) by [114514ns](https://github.com/114514ns), [bashonly](https://github.com/bashonly) (With fixes in [e546e5d](https://github.com/yt-dlp/yt-dlp/commit/e546e5d3b33a50075e574a2e7b8eda7ea874d21e) by [bashonly](https://github.com/bashonly))
|
||||||
|
- **duboku**: [Fix m3u8 formats extraction](https://github.com/yt-dlp/yt-dlp/commit/d3d4187da90a6b85f4ebae4bb07693cc9b412d75) ([#9161](https://github.com/yt-dlp/yt-dlp/issues/9161)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **dumpert**: [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/eedb38ce4093500e19279d50b708fb9c18bf4dbf) ([#9320](https://github.com/yt-dlp/yt-dlp/issues/9320)) by [rvsit](https://github.com/rvsit)
|
||||||
|
- **elementorembed**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/6171b050d70435008e64fa06aa6f19c4e5bec75f) ([#8948](https://github.com/yt-dlp/yt-dlp/issues/8948)) by [pompos02](https://github.com/pompos02), [seproDev](https://github.com/seproDev)
|
||||||
|
- **eporner**: [Extract AV1 formats](https://github.com/yt-dlp/yt-dlp/commit/96d0f8c1cb8aec250c5614bfde6b5fb95f10819b) ([#9028](https://github.com/yt-dlp/yt-dlp/issues/9028)) by [michal-repo](https://github.com/michal-repo)
|
||||||
|
- **errjupiter**
|
||||||
|
- [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/a514cc2feb1c3b265b19acab11487acad8bb3ab0) ([#8549](https://github.com/yt-dlp/yt-dlp/issues/8549)) by [glensc](https://github.com/glensc)
|
||||||
|
- [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/80ed8bdeba5a945f127ef9ab055a4823329a1210) ([#9218](https://github.com/yt-dlp/yt-dlp/issues/9218)) by [glensc](https://github.com/glensc)
|
||||||
|
- **facebook**
|
||||||
|
- [Add new ID format](https://github.com/yt-dlp/yt-dlp/commit/cf9af2c7f1fedd881a157b3fbe725e5494b00924) ([#3824](https://github.com/yt-dlp/yt-dlp/issues/3824)) by [kclauhk](https://github.com/kclauhk), [Wikidepia](https://github.com/Wikidepia)
|
||||||
|
- [Improve extraction](https://github.com/yt-dlp/yt-dlp/commit/2e30b5567b5c6113d46b39163db5b044aea8667e) by [jingtra](https://github.com/jingtra), [ringus1](https://github.com/ringus1)
|
||||||
|
- [Improve thumbnail extraction](https://github.com/yt-dlp/yt-dlp/commit/3c4d3ee491b0ec22ed3cade51d943d3d27141ba7) ([#9060](https://github.com/yt-dlp/yt-dlp/issues/9060)) by [kclauhk](https://github.com/kclauhk)
|
||||||
|
- [Set format HTTP chunk size](https://github.com/yt-dlp/yt-dlp/commit/5b68c478fb0b93ea6b8fac23f50e12217fa063db) ([#9058](https://github.com/yt-dlp/yt-dlp/issues/9058)) by [bashonly](https://github.com/bashonly), [kclauhk](https://github.com/kclauhk)
|
||||||
|
- [Support events](https://github.com/yt-dlp/yt-dlp/commit/9b5efaf86b99a2664fff9fc725d275f766c3221d) ([#9055](https://github.com/yt-dlp/yt-dlp/issues/9055)) by [kclauhk](https://github.com/kclauhk)
|
||||||
|
- [Support permalink URLs](https://github.com/yt-dlp/yt-dlp/commit/87286e93af949c4e6a0f8ba34af6a1ab5aa102b6) ([#9061](https://github.com/yt-dlp/yt-dlp/issues/9061)) by [kclauhk](https://github.com/kclauhk)
|
||||||
|
- ads: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/a40b0070c2a00d3ed839897462171a82323aa875) ([#8870](https://github.com/yt-dlp/yt-dlp/issues/8870)) by [kclauhk](https://github.com/kclauhk)
|
||||||
|
- **flextv**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/4f043479090dc8a7e06e0bb53691e5414320dfb2) ([#9178](https://github.com/yt-dlp/yt-dlp/issues/9178)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **floatplane**: [Improve metadata extraction](https://github.com/yt-dlp/yt-dlp/commit/9cd90447907a59c8a2727583f4a755fb23ed8cd3) ([#8934](https://github.com/yt-dlp/yt-dlp/issues/8934)) by [chtk](https://github.com/chtk)
|
||||||
|
- **francetv**
|
||||||
|
- [Fix DAI livestreams](https://github.com/yt-dlp/yt-dlp/commit/e4fbe5f886a6693f2466877c12e99c30c5442ace) ([#9380](https://github.com/yt-dlp/yt-dlp/issues/9380)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix extractors](https://github.com/yt-dlp/yt-dlp/commit/9749ac7fecbfda391afbadf2870797ce0e382622) ([#9333](https://github.com/yt-dlp/yt-dlp/issues/9333)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix m3u8 formats extraction](https://github.com/yt-dlp/yt-dlp/commit/ede624d1db649f5a4b61f8abbb746f365322de27) ([#9347](https://github.com/yt-dlp/yt-dlp/issues/9347)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **funk**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/cd0443fb14e2ed805abb02792473457553a123d1) ([#9194](https://github.com/yt-dlp/yt-dlp/issues/9194)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **generic**: [Follow https redirects properly](https://github.com/yt-dlp/yt-dlp/commit/c8c9039e640495700f76a13496e3418bdd4382ba) ([#9121](https://github.com/yt-dlp/yt-dlp/issues/9121)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **getcourseru**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/4310b6650eeb5630295f4591b37720877878c57a) ([#8873](https://github.com/yt-dlp/yt-dlp/issues/8873)) by [divStar](https://github.com/divStar), [seproDev](https://github.com/seproDev)
|
||||||
|
- **gofile**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/77c2472ca1ef9050a66aa68bc5fa1bee88706c66) ([#9074](https://github.com/yt-dlp/yt-dlp/issues/9074)) by [jazz1611](https://github.com/jazz1611)
|
||||||
|
- **googledrive**: [Fix source file extraction](https://github.com/yt-dlp/yt-dlp/commit/5498729c59b03a9511c64552da3ba2f802166f8d) ([#8990](https://github.com/yt-dlp/yt-dlp/issues/8990)) by [jazz1611](https://github.com/jazz1611)
|
||||||
|
- **goplay**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/7e90e34fa4617b53f8c8a9e69f460508cb1f51b0) ([#6654](https://github.com/yt-dlp/yt-dlp/issues/6654)) by [alard](https://github.com/alard)
|
||||||
|
- **gopro**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/4a07a455bbf7acf87550053bbba949c828e350ba) ([#9019](https://github.com/yt-dlp/yt-dlp/issues/9019)) by [stilor](https://github.com/stilor)
|
||||||
|
- **ilpost**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/aa5dcc4ee65916a36cbe1b1b5b29b9110c3163ed) ([#9001](https://github.com/yt-dlp/yt-dlp/issues/9001)) by [CapacitorSet](https://github.com/CapacitorSet)
|
||||||
|
- **jiosaavnsong**: [Support more bitrates](https://github.com/yt-dlp/yt-dlp/commit/5154dc0a687528f995cde22b5ff63f82c740e98a) ([#8834](https://github.com/yt-dlp/yt-dlp/issues/8834)) by [alien-developers](https://github.com/alien-developers), [bashonly](https://github.com/bashonly)
|
||||||
|
- **kukululive**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/20cdad5a2c0499d5a6746f5466a2ab0c97b75884) ([#8877](https://github.com/yt-dlp/yt-dlp/issues/8877)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **lefigarovideoembed**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/9401736fd08767c58af45a1e36ff5929c5fa1ac9) ([#9198](https://github.com/yt-dlp/yt-dlp/issues/9198)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **linkedin**: [Fix metadata and extract subtitles](https://github.com/yt-dlp/yt-dlp/commit/017adb28e7fe7b8c8fc472332d86740f31141519) ([#9056](https://github.com/yt-dlp/yt-dlp/issues/9056)) by [barsnick](https://github.com/barsnick)
|
||||||
|
- **magellantv**: [Support episodes](https://github.com/yt-dlp/yt-dlp/commit/3dc9232e1aa58fe3c2d8cafb50e8162d6f0e891e) ([#9199](https://github.com/yt-dlp/yt-dlp/issues/9199)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **magentamusik**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/5e2e24b2c5795756d81785b06b10723ddb6db7b2) ([#7790](https://github.com/yt-dlp/yt-dlp/issues/7790)) by [pwaldhauer](https://github.com/pwaldhauer), [seproDev](https://github.com/seproDev)
|
||||||
|
- **medaltv**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/02e343f6ef6d7b3f9087ff69e4a1db0b4b4a5c5d) ([#9098](https://github.com/yt-dlp/yt-dlp/issues/9098)) by [Danish-H](https://github.com/Danish-H)
|
||||||
|
- **mlbarticle**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/50e06e21a68e336198198bda332b8e7d2314f201) ([#9021](https://github.com/yt-dlp/yt-dlp/issues/9021)) by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
- **motherless**: [Support uploader playlists](https://github.com/yt-dlp/yt-dlp/commit/9f1e9dab21bbe651544c8f4663b0e615dc450e4d) ([#8994](https://github.com/yt-dlp/yt-dlp/issues/8994)) by [dasidiot](https://github.com/dasidiot)
|
||||||
|
- **mujrozhlas**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/4170b3d7120e06db3391eef39c5add18a1ddf2c3) ([#9306](https://github.com/yt-dlp/yt-dlp/issues/9306)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **mx3**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/5a63454b3637b3603434026cddfeac509218b90e) ([#8736](https://github.com/yt-dlp/yt-dlp/issues/8736)) by [martinxyz](https://github.com/martinxyz)
|
||||||
|
- **naver**: [Fix extractors](https://github.com/yt-dlp/yt-dlp/commit/a281beba8d8f007cf220f96dd1d9412bb070c7d8) ([#8883](https://github.com/yt-dlp/yt-dlp/issues/8883)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **nebula**: [Support podcasts](https://github.com/yt-dlp/yt-dlp/commit/0de09c5b9ed619d4a93d7c451c6ddff0381de808) ([#9140](https://github.com/yt-dlp/yt-dlp/issues/9140)) by [c-basalt](https://github.com/c-basalt), [seproDev](https://github.com/seproDev)
|
||||||
|
- **nerdcubedfeed**: [Overhaul extractor](https://github.com/yt-dlp/yt-dlp/commit/29a74a6126101aabaa1726ae41b1ca55cf26e7a7) ([#9269](https://github.com/yt-dlp/yt-dlp/issues/9269)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **newgrounds**
|
||||||
|
- [Fix login and clean up extraction](https://github.com/yt-dlp/yt-dlp/commit/0fcefb92f3ebfc5cada19c1e85a715f020d0f333) ([#9356](https://github.com/yt-dlp/yt-dlp/issues/9356)) by [Grub4K](https://github.com/Grub4K), [mrmedieval](https://github.com/mrmedieval)
|
||||||
|
- user: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/3e083191cdc34dd8c482da9a9b4bc682f824cb9d) ([#9046](https://github.com/yt-dlp/yt-dlp/issues/9046)) by [u-spec-png](https://github.com/u-spec-png)
|
||||||
|
- **nfb**: [Add support for onf.ca and series](https://github.com/yt-dlp/yt-dlp/commit/4b8b0dded8c65cd5b2ab2e858058ba98c9bf49ff) ([#8997](https://github.com/yt-dlp/yt-dlp/issues/8997)) by [bashonly](https://github.com/bashonly), [rrgomes](https://github.com/rrgomes)
|
||||||
|
- **nhkradiru**: [Extract extended description](https://github.com/yt-dlp/yt-dlp/commit/4392447d9404e3c25cfeb8f5bdfff31b0448da39) ([#9162](https://github.com/yt-dlp/yt-dlp/issues/9162)) by [garret1317](https://github.com/garret1317)
|
||||||
|
- **nhkradirulive**: [Make metadata extraction non-fatal](https://github.com/yt-dlp/yt-dlp/commit/5af1f19787f7d652fce72dd3ab9536cdd980fe85) ([#8956](https://github.com/yt-dlp/yt-dlp/issues/8956)) by [garret1317](https://github.com/garret1317)
|
||||||
|
- **niconico**
|
||||||
|
- [Remove legacy danmaku extraction](https://github.com/yt-dlp/yt-dlp/commit/974d444039c8bbffb57265c6792cd52d169fe1b9) ([#9209](https://github.com/yt-dlp/yt-dlp/issues/9209)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- [Support DMS formats](https://github.com/yt-dlp/yt-dlp/commit/aa13a8e3dd3b698cc40ec438988b1ad834e11a41) ([#9282](https://github.com/yt-dlp/yt-dlp/issues/9282)) by [pzhlkj6612](https://github.com/pzhlkj6612), [xpadev-net](https://github.com/xpadev-net) (With fixes in [40966e8](https://github.com/yt-dlp/yt-dlp/commit/40966e8da27bbf770dacf9be9363fcc3ad72cc9f) by [pzhlkj6612](https://github.com/pzhlkj6612))
|
||||||
|
- **ninaprotocol**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/62c65bfaf81e04e6746f6fdbafe384eb3edddfbc) ([#8946](https://github.com/yt-dlp/yt-dlp/issues/8946)) by [RaduManole](https://github.com/RaduManole), [seproDev](https://github.com/seproDev)
|
||||||
|
- **ninenews**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/43694ce13c5a9f1afca8b02b8b2b9b1576d6503d) ([#8840](https://github.com/yt-dlp/yt-dlp/issues/8840)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- **nova**: [Fix embed extraction](https://github.com/yt-dlp/yt-dlp/commit/c168d8791d0974a8a8fcb3b4a4bc2d830df51622) ([#9221](https://github.com/yt-dlp/yt-dlp/issues/9221)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **ntvru**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/7a29cbbd5fd7363e7e8535ee1506b7052465d13f) ([#9276](https://github.com/yt-dlp/yt-dlp/issues/9276)) by [bashonly](https://github.com/bashonly), [dirkf](https://github.com/dirkf)
|
||||||
|
- **nuum**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/acaf806c15f0a802ba286c23af02a10cf4bd4731) ([#8868](https://github.com/yt-dlp/yt-dlp/issues/8868)) by [DmitryScaletta](https://github.com/DmitryScaletta), [seproDev](https://github.com/seproDev)
|
||||||
|
- **nytimes**
|
||||||
|
- [Extract timestamp](https://github.com/yt-dlp/yt-dlp/commit/05420227aaab60a39c0f9ade069c5862be36b1fa) ([#9142](https://github.com/yt-dlp/yt-dlp/issues/9142)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- [Overhaul extractors](https://github.com/yt-dlp/yt-dlp/commit/07256b9fee23960799024b95d5972abc7174aa81) ([#9075](https://github.com/yt-dlp/yt-dlp/issues/9075)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- **onefootball**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/644738ddaa45428cb0babd41ead22454e5a2545e) ([#9222](https://github.com/yt-dlp/yt-dlp/issues/9222)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **openrec**: [Pass referer for m3u8 formats](https://github.com/yt-dlp/yt-dlp/commit/f591e605dfee4085ec007d6d056c943cbcacc429) ([#9253](https://github.com/yt-dlp/yt-dlp/issues/9253)) by [fireattack](https://github.com/fireattack)
|
||||||
|
- **orf**: on: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/a0d50aabc5462aee302bd3f2663d3a3554875789) ([#9113](https://github.com/yt-dlp/yt-dlp/issues/9113)) by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
- **patreon**: [Fix embedded HLS extraction](https://github.com/yt-dlp/yt-dlp/commit/f0e8bc7c60b61fe18b63116c975609d76b904771) ([#8993](https://github.com/yt-dlp/yt-dlp/issues/8993)) by [johnvictorfs](https://github.com/johnvictorfs)
|
||||||
|
- **peertube**: [Update instances](https://github.com/yt-dlp/yt-dlp/commit/35d96982f1033e36215d323317981ee17e8ab0d5) ([#9070](https://github.com/yt-dlp/yt-dlp/issues/9070)) by [Chocobozzz](https://github.com/Chocobozzz)
|
||||||
|
- **piapro**: [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/8e6e3651727b0b85764857fc6329fe5e0a3f00de) ([#8999](https://github.com/yt-dlp/yt-dlp/issues/8999)) by [FinnRG](https://github.com/FinnRG)
|
||||||
|
- **playsuisse**: [Add login support](https://github.com/yt-dlp/yt-dlp/commit/cae6e461073fb7c32fd32052a3e6721447c469bc) ([#9077](https://github.com/yt-dlp/yt-dlp/issues/9077)) by [chkuendig](https://github.com/chkuendig)
|
||||||
|
- **pornhub**: [Fix login support](https://github.com/yt-dlp/yt-dlp/commit/de954c1b4d3a6db8a6525507e65303c7bb03f39f) ([#9227](https://github.com/yt-dlp/yt-dlp/issues/9227)) by [feederbox826](https://github.com/feederbox826)
|
||||||
|
- **pr0gramm**: [Enable POL filter and provide tags without login](https://github.com/yt-dlp/yt-dlp/commit/5f25f348f9eb5db842b1ec6799f95bebb7ba35a7) ([#9051](https://github.com/yt-dlp/yt-dlp/issues/9051)) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- **prankcastpost**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/a2bac6b7adb7b0e955125838e20bb39eece630ce) ([#8933](https://github.com/yt-dlp/yt-dlp/issues/8933)) by [columndeeply](https://github.com/columndeeply)
|
||||||
|
- **radiko**: [Extract more metadata](https://github.com/yt-dlp/yt-dlp/commit/e3ce2b385ec1f03fac9d4210c57fda77134495fc) ([#9115](https://github.com/yt-dlp/yt-dlp/issues/9115)) by [YoshichikaAAA](https://github.com/YoshichikaAAA)
|
||||||
|
- **rai**
|
||||||
|
- [Filter unavailable formats](https://github.com/yt-dlp/yt-dlp/commit/f78814923748277e7067b796f25870686fb46205) ([#9189](https://github.com/yt-dlp/yt-dlp/issues/9189)) by [nixxo](https://github.com/nixxo)
|
||||||
|
- [Fix m3u8 formats extraction](https://github.com/yt-dlp/yt-dlp/commit/8f423cf8051fbfeedd57cca00d106012e6e86a97) ([#9291](https://github.com/yt-dlp/yt-dlp/issues/9291)) by [nixxo](https://github.com/nixxo)
|
||||||
|
- **redcdnlivx, sejm**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/fcaa2e735b00b15a2b0d9f55f4187c654b4b5b39) ([#8676](https://github.com/yt-dlp/yt-dlp/issues/8676)) by [selfisekai](https://github.com/selfisekai)
|
||||||
|
- **redtube**
|
||||||
|
- [Fix formats extraction](https://github.com/yt-dlp/yt-dlp/commit/c91d8b1899403daff6fc15206ad32de8db17fb8f) ([#9076](https://github.com/yt-dlp/yt-dlp/issues/9076)) by [jazz1611](https://github.com/jazz1611)
|
||||||
|
- [Support redtube.com.br URLs](https://github.com/yt-dlp/yt-dlp/commit/4a6ff0b47a700dee3ee5c54804c31965308479ae) ([#9103](https://github.com/yt-dlp/yt-dlp/issues/9103)) by [jazz1611](https://github.com/jazz1611)
|
||||||
|
- **ridehome**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/cd7086c0d54ec1d7e02a30bd5bd934bdb2c54642) ([#8875](https://github.com/yt-dlp/yt-dlp/issues/8875)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- **rinsefmartistplaylist**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/1a36dbad712d359ec1c5b73d9bbbe562c03e9660) ([#8794](https://github.com/yt-dlp/yt-dlp/issues/8794)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- **roosterteeth**
|
||||||
|
- [Add Brightcove fallback](https://github.com/yt-dlp/yt-dlp/commit/b2cc150ad83ba20ceb2d6e73d09854eed3c2d05c) ([#9403](https://github.com/yt-dlp/yt-dlp/issues/9403)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Extract ad-free streams](https://github.com/yt-dlp/yt-dlp/commit/dd29e6e5fdf0f3758cb0829e73749832768f1a4e) ([#9355](https://github.com/yt-dlp/yt-dlp/issues/9355)) by [jkmartindale](https://github.com/jkmartindale)
|
||||||
|
- [Extract release date and timestamp](https://github.com/yt-dlp/yt-dlp/commit/dfd8c0b69683b1c11beea039a96dd2949026c1d7) ([#9393](https://github.com/yt-dlp/yt-dlp/issues/9393)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Support bonus features](https://github.com/yt-dlp/yt-dlp/commit/8993721ecb34867b52b79f6e92b233008d1cbe78) ([#9406](https://github.com/yt-dlp/yt-dlp/issues/9406)) by [Bl4Cc4t](https://github.com/Bl4Cc4t)
|
||||||
|
- **rule34video**
|
||||||
|
- [Extract `creators`](https://github.com/yt-dlp/yt-dlp/commit/3d9dc2f3590e10abf1561ebdaed96734a740587c) ([#9258](https://github.com/yt-dlp/yt-dlp/issues/9258)) by [gmes78](https://github.com/gmes78)
|
||||||
|
- [Extract more metadata](https://github.com/yt-dlp/yt-dlp/commit/fee2d8d9c38f9b5f0a8df347c1e698983339c34d) ([#7416](https://github.com/yt-dlp/yt-dlp/issues/7416)) by [gmes78](https://github.com/gmes78)
|
||||||
|
- [Fix `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/c0ecceeefe6ebd27452d9d8f20658f83ae121d04) ([#9044](https://github.com/yt-dlp/yt-dlp/issues/9044)) by [gmes78](https://github.com/gmes78)
|
||||||
|
- **rumblechannel**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/0023af81fbce01984f35b34ecaf8562739831227) ([#9092](https://github.com/yt-dlp/yt-dlp/issues/9092)) by [Pranaxcau](https://github.com/Pranaxcau), [vista-narvas](https://github.com/vista-narvas)
|
||||||
|
- **screencastify**: [Update `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/0bee29493ca8f91a0055a3706c7c94f5860188df) ([#9232](https://github.com/yt-dlp/yt-dlp/issues/9232)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **svtpage**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/ddd4b5e10a653bee78e656107710021c1b82934c) ([#8938](https://github.com/yt-dlp/yt-dlp/issues/8938)) by [diman8](https://github.com/diman8)
|
||||||
|
- **swearnet**: [Raise for login required](https://github.com/yt-dlp/yt-dlp/commit/b05640d532c43a52c0a0da096bb2dbd51e105ec0) ([#9281](https://github.com/yt-dlp/yt-dlp/issues/9281)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **tiktok**: [Fix webpage extraction](https://github.com/yt-dlp/yt-dlp/commit/d9b4154cbcb979d7e30af3a73b1bee422aae5aa3) ([#9327](https://github.com/yt-dlp/yt-dlp/issues/9327)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **trtworld**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/8ab84650837e58046430c9f4b615c56a8886e071) ([#8701](https://github.com/yt-dlp/yt-dlp/issues/8701)) by [ufukk](https://github.com/ufukk)
|
||||||
|
- **tvp**: [Support livestreams](https://github.com/yt-dlp/yt-dlp/commit/882e3b753c79c7799ce135c3a5edb72494b576af) ([#8860](https://github.com/yt-dlp/yt-dlp/issues/8860)) by [selfisekai](https://github.com/selfisekai)
|
||||||
|
- **twitch**: [Fix m3u8 extraction](https://github.com/yt-dlp/yt-dlp/commit/5b8c69ae04444a4c80a5a99917e40f75a116c3b8) ([#8960](https://github.com/yt-dlp/yt-dlp/issues/8960)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **twitter**
|
||||||
|
- [Extract bitrate for HLS audio formats](https://github.com/yt-dlp/yt-dlp/commit/28e53d60df9b8aadd52a93504e30e885c9c35262) ([#9257](https://github.com/yt-dlp/yt-dlp/issues/9257)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Extract numeric `channel_id`](https://github.com/yt-dlp/yt-dlp/commit/55f1833376505ed1e4be0516b09bb3ea4425e8a4) ([#9263](https://github.com/yt-dlp/yt-dlp/issues/9263)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **txxx**: [Extract thumbnails](https://github.com/yt-dlp/yt-dlp/commit/d79c7e9937c388c68b722ab7450960e43ef776d6) ([#9063](https://github.com/yt-dlp/yt-dlp/issues/9063)) by [shmohawk](https://github.com/shmohawk)
|
||||||
|
- **utreon**: [Support playeur.com](https://github.com/yt-dlp/yt-dlp/commit/41d6b61e9852a5b97f47cc8a7718b31fb23f0aea) ([#9182](https://github.com/yt-dlp/yt-dlp/issues/9182)) by [DmitryScaletta](https://github.com/DmitryScaletta)
|
||||||
|
- **vbox7**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/67bb70cd700c8d4c3149cd9e0539a5f32c3d1ce6) ([#9100](https://github.com/yt-dlp/yt-dlp/issues/9100)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- **viewlift**: [Add support for chorki.com](https://github.com/yt-dlp/yt-dlp/commit/41b6cdb4197aaf7ad82bdad6885eb5d5c64acd74) ([#9095](https://github.com/yt-dlp/yt-dlp/issues/9095)) by [NurTasin](https://github.com/NurTasin)
|
||||||
|
- **vimeo**
|
||||||
|
- [Extract `live_status` and `release_timestamp`](https://github.com/yt-dlp/yt-dlp/commit/f0426e9ca57dd14b82e6c13afc17947614f1e8eb) ([#9290](https://github.com/yt-dlp/yt-dlp/issues/9290)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
||||||
|
- [Fix API headers](https://github.com/yt-dlp/yt-dlp/commit/8e765755f7f4909e1b535e61b7376b2d66e1ba6a) ([#9125](https://github.com/yt-dlp/yt-dlp/issues/9125)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix login](https://github.com/yt-dlp/yt-dlp/commit/2e8de097ad82da378e97005e8f1ff7e5aebca585) ([#9274](https://github.com/yt-dlp/yt-dlp/issues/9274)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **viously**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/95e82347b398d8bb160767cdd975edecd62cbabd) ([#8927](https://github.com/yt-dlp/yt-dlp/issues/8927)) by [nbr23](https://github.com/nbr23), [seproDev](https://github.com/seproDev)
|
||||||
|
- **youtube**
|
||||||
|
- [Better error when all player responses are skipped](https://github.com/yt-dlp/yt-dlp/commit/5eedc208ec89d6284777060c94aadd06502338b9) ([#9083](https://github.com/yt-dlp/yt-dlp/issues/9083)) by [Grub4K](https://github.com/Grub4K), [pukkandan](https://github.com/pukkandan)
|
||||||
|
- [Bump Android and iOS client versions](https://github.com/yt-dlp/yt-dlp/commit/413d3675804599bc8fe419c19e36490fd8f0b30f) ([#9317](https://github.com/yt-dlp/yt-dlp/issues/9317)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Further bump client versions](https://github.com/yt-dlp/yt-dlp/commit/7aad06541e543fa3452d3d2513e6f079aad1f99b) ([#9395](https://github.com/yt-dlp/yt-dlp/issues/9395)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- tab: [Fix `tags` extraction](https://github.com/yt-dlp/yt-dlp/commit/8828f4576bd862438d4fbf634f1d6ab18a217b0e) ([#9413](https://github.com/yt-dlp/yt-dlp/issues/9413)) by [x11x](https://github.com/x11x)
|
||||||
|
- **zenporn**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/f00c0def7434fac3c88503c2a77c4b2419b8e5ca) ([#8509](https://github.com/yt-dlp/yt-dlp/issues/8509)) by [SirElderling](https://github.com/SirElderling)
|
||||||
|
- **zetland**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/2f4b57594673035a59d72f7667588da848820034) ([#9116](https://github.com/yt-dlp/yt-dlp/issues/9116)) by [HobbyistDev](https://github.com/HobbyistDev)
|
||||||
|
|
||||||
|
#### Downloader changes
|
||||||
|
- **http**: [Reset resume length to handle `FileNotFoundError`](https://github.com/yt-dlp/yt-dlp/commit/2d91b9845621639c53dca7ee9d3d954f3624ba18) ([#8399](https://github.com/yt-dlp/yt-dlp/issues/8399)) by [boredzo](https://github.com/boredzo)
|
||||||
|
|
||||||
|
#### Networking changes
|
||||||
|
- [Remove `_CompatHTTPError`](https://github.com/yt-dlp/yt-dlp/commit/811d298b231cfa29e75c321b23a91d1c2b17602c) ([#8871](https://github.com/yt-dlp/yt-dlp/issues/8871)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- **Request Handler**
|
||||||
|
- [Remove additional logging handlers on close](https://github.com/yt-dlp/yt-dlp/commit/0085e2bab8465ee7d46d16fcade3ed5e96cc8a48) ([#9032](https://github.com/yt-dlp/yt-dlp/issues/9032)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- requests: [Apply `remove_dot_segments` to absolute redirect locations](https://github.com/yt-dlp/yt-dlp/commit/35f4f764a786685ea45d84abe1cf1ad3847f4c97) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
|
||||||
|
#### Misc. changes
|
||||||
|
- **build**
|
||||||
|
- [Add `default` optional dependency group](https://github.com/yt-dlp/yt-dlp/commit/cf91400a1dd6cc99b11a6d163e1af73b64d618c9) ([#9295](https://github.com/yt-dlp/yt-dlp/issues/9295)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
|
||||||
|
- [Add transitional `setup.py` and `pyinst.py`](https://github.com/yt-dlp/yt-dlp/commit/0abf2f1f153ab47990edbeee3477dc55f74c7f89) ([#9296](https://github.com/yt-dlp/yt-dlp/issues/9296)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K), [pukkandan](https://github.com/pukkandan)
|
||||||
|
- [Bump `actions/upload-artifact` to v4 and adjust workflows](https://github.com/yt-dlp/yt-dlp/commit/3876429d72afb35247f4b2531eb9b16cfc7e0968) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Bump `conda-incubator/setup-miniconda` to v3](https://github.com/yt-dlp/yt-dlp/commit/b0059f0413a6ba6ab0a3aec1f00188ce083cd8bf) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix `secretstorage` for ARM builds](https://github.com/yt-dlp/yt-dlp/commit/920397634d1e84e76d2cb897bd6d69ba0c6bd5ca) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Migrate to `pyproject.toml` and `hatchling`](https://github.com/yt-dlp/yt-dlp/commit/775cde82dc5b1dc64ab0539a92dd8c7ba6c0ad33) by [bashonly](https://github.com/bashonly) (With fixes in [43cfd46](https://github.com/yt-dlp/yt-dlp/commit/43cfd462c0d01eff22c1d4290aeb96eb1ea2c0e1))
|
||||||
|
- [Move bundle scripts into `bundle` submodule](https://github.com/yt-dlp/yt-dlp/commit/a1b778428991b1779203bac243ef4e9b6baea90c) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Support failed build job re-runs](https://github.com/yt-dlp/yt-dlp/commit/eabbccc439720fba381919a88be4fe4d96464cbd) ([#9277](https://github.com/yt-dlp/yt-dlp/issues/9277)) by [bashonly](https://github.com/bashonly)
|
||||||
|
- Makefile
|
||||||
|
- [Add automated `CODE_FOLDERS` and `CODE_FILES`](https://github.com/yt-dlp/yt-dlp/commit/868d2f60a7cb59b410c8cbfb452cbdb072687b81) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Ensure compatibility with BSD `make`](https://github.com/yt-dlp/yt-dlp/commit/beaa1a44554d04d9fe63a743a5bb4431ca778f28) ([#9210](https://github.com/yt-dlp/yt-dlp/issues/9210)) by [bashonly](https://github.com/bashonly) (With fixes in [73fcfa3](https://github.com/yt-dlp/yt-dlp/commit/73fcfa39f59113a8728249de2c4cee3025f17dc2))
|
||||||
|
- [Fix man pages generated by `pandoc>=3`](https://github.com/yt-dlp/yt-dlp/commit/fb44020fa98e47620b3aa1dab94b4c5b7bfb40bd) ([#7047](https://github.com/yt-dlp/yt-dlp/issues/7047)) by [t-nil](https://github.com/t-nil)
|
||||||
|
- **ci**: [Bump `actions/setup-python` to v5](https://github.com/yt-dlp/yt-dlp/commit/b14e818b37f62e3224da157b3ad768b3f0815fcd) by [bashonly](https://github.com/bashonly)
|
||||||
|
- **cleanup**
|
||||||
|
- [Build files cleanup](https://github.com/yt-dlp/yt-dlp/commit/867f637b95b342e1cb9f1dc3c6cf0ffe727187ce) by [bashonly](https://github.com/bashonly)
|
||||||
|
- [Fix infodict returned fields](https://github.com/yt-dlp/yt-dlp/commit/f4f9f6d00edcac6d4eb2b3fb78bf81326235d492) ([#8906](https://github.com/yt-dlp/yt-dlp/issues/8906)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- [Fix typo in README.md](https://github.com/yt-dlp/yt-dlp/commit/292d60b1ed3b9fe5bcb2775a894cca99b0f9473e) ([#8894](https://github.com/yt-dlp/yt-dlp/issues/8894)) by [antonkesy](https://github.com/antonkesy)
|
||||||
|
- [Mark broken and remove dead extractors](https://github.com/yt-dlp/yt-dlp/commit/df773c3d5d1cc1f877cf8582f0072e386fc49318) ([#9238](https://github.com/yt-dlp/yt-dlp/issues/9238)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- [Match both `http` and `https` in `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/a687226b48f71b874fa18b0165ec528d591f53fb) ([#8968](https://github.com/yt-dlp/yt-dlp/issues/8968)) by [seproDev](https://github.com/seproDev)
|
||||||
|
- [Remove unused code](https://github.com/yt-dlp/yt-dlp/commit/ed3bb2b0a12c44334e0d09481752dabf2ca1dc13) ([#8968](https://github.com/yt-dlp/yt-dlp/issues/8968)) by [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
||||||
|
- Miscellaneous
|
||||||
|
- [93240fc](https://github.com/yt-dlp/yt-dlp/commit/93240fc1848de4a94f25844c96e0dcd282ef1d3b) by [bashonly](https://github.com/bashonly), [Grub4k](https://github.com/Grub4k), [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
||||||
|
- [615a844](https://github.com/yt-dlp/yt-dlp/commit/615a84447e8322720be77a0e64298d7f42848693) by [bashonly](https://github.com/bashonly), [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
||||||
|
- **devscripts**
|
||||||
|
- `install_deps`: [Add script and migrate to it](https://github.com/yt-dlp/yt-dlp/commit/b8a433aaca86b15cb9f1a451b0f69371d2fc22a9) by [bashonly](https://github.com/bashonly)
|
||||||
|
- `tomlparse`: [Add makeshift toml parser](https://github.com/yt-dlp/yt-dlp/commit/fd647775e27e030ab17387c249e2ebeba68f8ff0) by [Grub4K](https://github.com/Grub4K)
|
||||||
|
- **docs**: [Misc Cleanup](https://github.com/yt-dlp/yt-dlp/commit/47ab66db0f083a76c7fba0f6e136b21dd5a93e3b) ([#8977](https://github.com/yt-dlp/yt-dlp/issues/8977)) by [Arthurszzz](https://github.com/Arthurszzz), [bashonly](https://github.com/bashonly), [Grub4k](https://github.com/Grub4k), [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
||||||
|
- **test**
|
||||||
|
- [Skip source address tests if the address cannot be bound to](https://github.com/yt-dlp/yt-dlp/commit/69d31914952dd33082ac7019c6f76b43c45b9d06) ([#8900](https://github.com/yt-dlp/yt-dlp/issues/8900)) by [coletdjnz](https://github.com/coletdjnz)
|
||||||
|
- websockets: [Fix timeout test on Windows](https://github.com/yt-dlp/yt-dlp/commit/ac340d0745a9de5d494033e3507ef624ba25add3) ([#9344](https://github.com/yt-dlp/yt-dlp/issues/9344)) by [seproDev](https://github.com/seproDev)
|
||||||
|
|
||||||
### 2023.12.30
|
### 2023.12.30
|
||||||
|
|
||||||
#### Core changes
|
#### Core changes
|
||||||
|
@ -1936,7 +2158,7 @@ Since Python 3.7 has reached end-of-life, support for it will be dropped soon. [
|
||||||
* [utils] `format_decimal_suffix`: Fix for very large numbers by [s0u1h](https://github.com/s0u1h)
|
* [utils] `format_decimal_suffix`: Fix for very large numbers by [s0u1h](https://github.com/s0u1h)
|
||||||
* [utils] `traverse_obj`: Allow filtering by value
|
* [utils] `traverse_obj`: Allow filtering by value
|
||||||
* [utils] Add `filter_dict`, `get_first`, `try_call`
|
* [utils] Add `filter_dict`, `get_first`, `try_call`
|
||||||
* [utils] ExtractorError: Fix for older python versions
|
* [utils] ExtractorError: Fix for older Python versions
|
||||||
* [utils] WebSocketsWrapper: Allow omitting `__enter__` invocation by [Lesmiscore](https://github.com/Lesmiscore)
|
* [utils] WebSocketsWrapper: Allow omitting `__enter__` invocation by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
* [docs] Add an `.editorconfig` file by [fstirlitz](https://github.com/fstirlitz)
|
* [docs] Add an `.editorconfig` file by [fstirlitz](https://github.com/fstirlitz)
|
||||||
* [docs] Clarify the exact `BSD` license of dependencies by [MrRawes](https://github.com/MrRawes)
|
* [docs] Clarify the exact `BSD` license of dependencies by [MrRawes](https://github.com/MrRawes)
|
||||||
|
@ -3400,7 +3622,7 @@ Since Python 3.7 has reached end-of-life, support for it will be dropped soon. [
|
||||||
* [cleanup] code formatting, youtube tests and readme
|
* [cleanup] code formatting, youtube tests and readme
|
||||||
|
|
||||||
### 2021.05.11
|
### 2021.05.11
|
||||||
* **Deprecate support for python versions < 3.6**
|
* **Deprecate support for Python versions < 3.6**
|
||||||
* **Subtitle extraction from manifests** by [fstirlitz](https://github.com/fstirlitz). See [be6202f](https://github.com/yt-dlp/yt-dlp/commit/be6202f12b97858b9d716e608394b51065d0419f) for details
|
* **Subtitle extraction from manifests** by [fstirlitz](https://github.com/fstirlitz). See [be6202f](https://github.com/yt-dlp/yt-dlp/commit/be6202f12b97858b9d716e608394b51065d0419f) for details
|
||||||
* **Improve output template:**
|
* **Improve output template:**
|
||||||
* Allow slicing lists/strings using `field.start:end:step`
|
* Allow slicing lists/strings using `field.start:end:step`
|
||||||
|
@ -3690,7 +3912,7 @@ Since Python 3.7 has reached end-of-life, support for it will be dropped soon. [
|
||||||
* Remove unnecessary `field_preference` and misuse of `preference` from extractors
|
* Remove unnecessary `field_preference` and misuse of `preference` from extractors
|
||||||
* Build improvements:
|
* Build improvements:
|
||||||
* Fix hash output by [shirt](https://github.com/shirt-dev)
|
* Fix hash output by [shirt](https://github.com/shirt-dev)
|
||||||
* Lock python package versions for x86 and use `wheels` by [shirt](https://github.com/shirt-dev)
|
* Lock Python package versions for x86 and use `wheels` by [shirt](https://github.com/shirt-dev)
|
||||||
* Exclude `vcruntime140.dll` from UPX by [jbruchon](https://github.com/jbruchon)
|
* Exclude `vcruntime140.dll` from UPX by [jbruchon](https://github.com/jbruchon)
|
||||||
* Set version number based on UTC time, not local time
|
* Set version number based on UTC time, not local time
|
||||||
* Publish on PyPi only if token is set
|
* Publish on PyPi only if token is set
|
||||||
|
@ -3757,7 +3979,7 @@ Since Python 3.7 has reached end-of-life, support for it will be dropped soon. [
|
||||||
* Fix "Default format spec" appearing in quiet mode
|
* Fix "Default format spec" appearing in quiet mode
|
||||||
* [FormatSort] Allow user to prefer av01 over vp9 (The default is still vp9)
|
* [FormatSort] Allow user to prefer av01 over vp9 (The default is still vp9)
|
||||||
* [FormatSort] fix bug where `quality` had more priority than `hasvid`
|
* [FormatSort] fix bug where `quality` had more priority than `hasvid`
|
||||||
* [pyinst] Automatically detect python architecture and working directory
|
* [pyinst] Automatically detect Python architecture and working directory
|
||||||
* Strip out internal fields such as `_filename` from infojson
|
* Strip out internal fields such as `_filename` from infojson
|
||||||
|
|
||||||
|
|
||||||
|
|
47
Makefile
47
Makefile
|
@ -2,7 +2,7 @@ all: lazy-extractors yt-dlp doc pypi-files
|
||||||
clean: clean-test clean-dist
|
clean: clean-test clean-dist
|
||||||
clean-all: clean clean-cache
|
clean-all: clean clean-cache
|
||||||
completions: completion-bash completion-fish completion-zsh
|
completions: completion-bash completion-fish completion-zsh
|
||||||
doc: README.md CONTRIBUTING.md issuetemplates supportedsites
|
doc: README.md CONTRIBUTING.md CONTRIBUTORS issuetemplates supportedsites
|
||||||
ot: offlinetest
|
ot: offlinetest
|
||||||
tar: yt-dlp.tar.gz
|
tar: yt-dlp.tar.gz
|
||||||
|
|
||||||
|
@ -17,8 +17,8 @@ pypi-files: AUTHORS Changelog.md LICENSE README.md README.txt supportedsites \
|
||||||
clean-test:
|
clean-test:
|
||||||
rm -rf test/testdata/sigs/player-*.js tmp/ *.annotations.xml *.aria2 *.description *.dump *.frag \
|
rm -rf test/testdata/sigs/player-*.js tmp/ *.annotations.xml *.aria2 *.description *.dump *.frag \
|
||||||
*.frag.aria2 *.frag.urls *.info.json *.live_chat.json *.meta *.part* *.tmp *.temp *.unknown_video *.ytdl \
|
*.frag.aria2 *.frag.urls *.info.json *.live_chat.json *.meta *.part* *.tmp *.temp *.unknown_video *.ytdl \
|
||||||
*.3gp *.ape *.ass *.avi *.desktop *.f4v *.flac *.flv *.gif *.jpeg *.jpg *.m4a *.m4v *.mhtml *.mkv *.mov *.mp3 \
|
*.3gp *.ape *.ass *.avi *.desktop *.f4v *.flac *.flv *.gif *.jpeg *.jpg *.lrc *.m4a *.m4v *.mhtml *.mkv *.mov *.mp3 *.mp4 \
|
||||||
*.mp4 *.mpga *.oga *.ogg *.opus *.png *.sbv *.srt *.swf *.swp *.tt *.ttml *.url *.vtt *.wav *.webloc *.webm *.webp
|
*.mpg *.mpga *.oga *.ogg *.opus *.png *.sbv *.srt *.ssa *.swf *.swp *.tt *.ttml *.url *.vtt *.wav *.webloc *.webm *.webp
|
||||||
clean-dist:
|
clean-dist:
|
||||||
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ \
|
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ \
|
||||||
yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS
|
yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS
|
||||||
|
@ -37,14 +37,15 @@ BINDIR ?= $(PREFIX)/bin
|
||||||
MANDIR ?= $(PREFIX)/man
|
MANDIR ?= $(PREFIX)/man
|
||||||
SHAREDIR ?= $(PREFIX)/share
|
SHAREDIR ?= $(PREFIX)/share
|
||||||
PYTHON ?= /usr/bin/env python3
|
PYTHON ?= /usr/bin/env python3
|
||||||
|
GNUTAR ?= tar
|
||||||
# $(shell) and $(error) are no-ops in BSD Make and the != variable assignment operator is not supported by GNU Make <4.0
|
|
||||||
VERSION_CHECK != echo supported
|
|
||||||
VERSION_CHECK ?= $(error GNU Make 4+ or BSD Make is required)
|
|
||||||
CHECK_VERSION := $(VERSION_CHECK)
|
|
||||||
|
|
||||||
# set markdown input format to "markdown-smart" for pandoc version 2+ and to "markdown" for pandoc prior to version 2
|
# set markdown input format to "markdown-smart" for pandoc version 2+ and to "markdown" for pandoc prior to version 2
|
||||||
MARKDOWN != if [ "`pandoc -v | head -n1 | cut -d' ' -f2 | head -c1`" -ge "2" ]; then echo markdown-smart; else echo markdown; fi
|
PANDOC_VERSION_CMD = pandoc -v 2>/dev/null | head -n1 | cut -d' ' -f2 | head -c1
|
||||||
|
PANDOC_VERSION != $(PANDOC_VERSION_CMD)
|
||||||
|
PANDOC_VERSION ?= $(shell $(PANDOC_VERSION_CMD))
|
||||||
|
MARKDOWN_CMD = if [ "$(PANDOC_VERSION)" = "1" -o "$(PANDOC_VERSION)" = "0" ]; then echo markdown; else echo markdown-smart; fi
|
||||||
|
MARKDOWN != $(MARKDOWN_CMD)
|
||||||
|
MARKDOWN ?= $(shell $(MARKDOWN_CMD))
|
||||||
|
|
||||||
install: lazy-extractors yt-dlp yt-dlp.1 completions
|
install: lazy-extractors yt-dlp yt-dlp.1 completions
|
||||||
mkdir -p $(DESTDIR)$(BINDIR)
|
mkdir -p $(DESTDIR)$(BINDIR)
|
||||||
|
@ -76,8 +77,12 @@ test:
|
||||||
offlinetest: codetest
|
offlinetest: codetest
|
||||||
$(PYTHON) -m pytest -k "not download"
|
$(PYTHON) -m pytest -k "not download"
|
||||||
|
|
||||||
CODE_FOLDERS != find yt_dlp -type f -name '__init__.py' -exec dirname {} \+ | grep -v '/__' | sort
|
CODE_FOLDERS_CMD = find yt_dlp -type f -name '__init__.py' | sed 's,/__init__.py,,' | grep -v '/__' | sort
|
||||||
CODE_FILES != for f in $(CODE_FOLDERS) ; do echo "$$f" | sed 's,$$,/*.py,' ; done
|
CODE_FOLDERS != $(CODE_FOLDERS_CMD)
|
||||||
|
CODE_FOLDERS ?= $(shell $(CODE_FOLDERS_CMD))
|
||||||
|
CODE_FILES_CMD = for f in $(CODE_FOLDERS) ; do echo "$$f" | sed 's,$$,/*.py,' ; done
|
||||||
|
CODE_FILES != $(CODE_FILES_CMD)
|
||||||
|
CODE_FILES ?= $(shell $(CODE_FILES_CMD))
|
||||||
yt-dlp: $(CODE_FILES)
|
yt-dlp: $(CODE_FILES)
|
||||||
mkdir -p zip
|
mkdir -p zip
|
||||||
for d in $(CODE_FOLDERS) ; do \
|
for d in $(CODE_FOLDERS) ; do \
|
||||||
|
@ -130,12 +135,14 @@ completions/fish/yt-dlp.fish: $(CODE_FILES) devscripts/fish-completion.in
|
||||||
mkdir -p completions/fish
|
mkdir -p completions/fish
|
||||||
$(PYTHON) devscripts/fish-completion.py
|
$(PYTHON) devscripts/fish-completion.py
|
||||||
|
|
||||||
_EXTRACTOR_FILES != find yt_dlp/extractor -name '*.py' -and -not -name 'lazy_extractors.py'
|
_EXTRACTOR_FILES_CMD = find yt_dlp/extractor -name '*.py' -and -not -name 'lazy_extractors.py'
|
||||||
|
_EXTRACTOR_FILES != $(_EXTRACTOR_FILES_CMD)
|
||||||
|
_EXTRACTOR_FILES ?= $(shell $(_EXTRACTOR_FILES_CMD))
|
||||||
yt_dlp/extractor/lazy_extractors.py: devscripts/make_lazy_extractors.py devscripts/lazy_load_template.py $(_EXTRACTOR_FILES)
|
yt_dlp/extractor/lazy_extractors.py: devscripts/make_lazy_extractors.py devscripts/lazy_load_template.py $(_EXTRACTOR_FILES)
|
||||||
$(PYTHON) devscripts/make_lazy_extractors.py $@
|
$(PYTHON) devscripts/make_lazy_extractors.py $@
|
||||||
|
|
||||||
yt-dlp.tar.gz: all
|
yt-dlp.tar.gz: all
|
||||||
@tar -czf yt-dlp.tar.gz --transform "s|^|yt-dlp/|" --owner 0 --group 0 \
|
@$(GNUTAR) -czf yt-dlp.tar.gz --transform "s|^|yt-dlp/|" --owner 0 --group 0 \
|
||||||
--exclude '*.DS_Store' \
|
--exclude '*.DS_Store' \
|
||||||
--exclude '*.kate-swp' \
|
--exclude '*.kate-swp' \
|
||||||
--exclude '*.pyc' \
|
--exclude '*.pyc' \
|
||||||
|
@ -144,12 +151,20 @@ yt-dlp.tar.gz: all
|
||||||
--exclude '__pycache__' \
|
--exclude '__pycache__' \
|
||||||
--exclude '.*_cache' \
|
--exclude '.*_cache' \
|
||||||
--exclude '.git' \
|
--exclude '.git' \
|
||||||
--exclude '__pyinstaller' \
|
|
||||||
-- \
|
-- \
|
||||||
README.md supportedsites.md Changelog.md LICENSE \
|
README.md supportedsites.md Changelog.md LICENSE \
|
||||||
CONTRIBUTING.md Collaborators.md CONTRIBUTORS AUTHORS \
|
CONTRIBUTING.md Collaborators.md CONTRIBUTORS AUTHORS \
|
||||||
Makefile yt-dlp.1 README.txt completions .gitignore \
|
Makefile yt-dlp.1 README.txt completions .gitignore \
|
||||||
yt-dlp yt_dlp pyproject.toml devscripts test
|
yt-dlp yt_dlp pyproject.toml devscripts test
|
||||||
|
|
||||||
AUTHORS:
|
AUTHORS: Changelog.md
|
||||||
git shortlog -s -n HEAD | cut -f2 | sort > AUTHORS
|
@if [ -d '.git' ] && command -v git > /dev/null ; then \
|
||||||
|
echo 'Generating $@ from git commit history' ; \
|
||||||
|
git shortlog -s -n HEAD | cut -f2 | sort > $@ ; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
CONTRIBUTORS: Changelog.md
|
||||||
|
@if [ -d '.git' ] && command -v git > /dev/null ; then \
|
||||||
|
echo 'Updating $@ from git commit history' ; \
|
||||||
|
$(PYTHON) devscripts/make_changelog.py -v -c > /dev/null ; \
|
||||||
|
fi
|
||||||
|
|
339
README.md
339
README.md
|
@ -22,12 +22,10 @@ yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on t
|
||||||
<!-- MANPAGE: MOVE "USAGE AND OPTIONS" SECTION HERE -->
|
<!-- MANPAGE: MOVE "USAGE AND OPTIONS" SECTION HERE -->
|
||||||
|
|
||||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||||
* [NEW FEATURES](#new-features)
|
|
||||||
* [Differences in default behavior](#differences-in-default-behavior)
|
|
||||||
* [INSTALLATION](#installation)
|
* [INSTALLATION](#installation)
|
||||||
* [Detailed instructions](https://github.com/yt-dlp/yt-dlp/wiki/Installation)
|
* [Detailed instructions](https://github.com/yt-dlp/yt-dlp/wiki/Installation)
|
||||||
* [Update](#update)
|
|
||||||
* [Release Files](#release-files)
|
* [Release Files](#release-files)
|
||||||
|
* [Update](#update)
|
||||||
* [Dependencies](#dependencies)
|
* [Dependencies](#dependencies)
|
||||||
* [Compile](#compile)
|
* [Compile](#compile)
|
||||||
* [USAGE AND OPTIONS](#usage-and-options)
|
* [USAGE AND OPTIONS](#usage-and-options)
|
||||||
|
@ -65,7 +63,10 @@ yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on t
|
||||||
* [Developing Plugins](#developing-plugins)
|
* [Developing Plugins](#developing-plugins)
|
||||||
* [EMBEDDING YT-DLP](#embedding-yt-dlp)
|
* [EMBEDDING YT-DLP](#embedding-yt-dlp)
|
||||||
* [Embedding examples](#embedding-examples)
|
* [Embedding examples](#embedding-examples)
|
||||||
* [DEPRECATED OPTIONS](#deprecated-options)
|
* [CHANGES FROM YOUTUBE-DL](#changes-from-youtube-dl)
|
||||||
|
* [New features](#new-features)
|
||||||
|
* [Differences in default behavior](#differences-in-default-behavior)
|
||||||
|
* [Deprecated options](#deprecated-options)
|
||||||
* [CONTRIBUTING](CONTRIBUTING.md#contributing-to-yt-dlp)
|
* [CONTRIBUTING](CONTRIBUTING.md#contributing-to-yt-dlp)
|
||||||
* [Opening an Issue](CONTRIBUTING.md#opening-an-issue)
|
* [Opening an Issue](CONTRIBUTING.md#opening-an-issue)
|
||||||
* [Developer Instructions](CONTRIBUTING.md#developer-instructions)
|
* [Developer Instructions](CONTRIBUTING.md#developer-instructions)
|
||||||
|
@ -74,103 +75,6 @@ yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on t
|
||||||
<!-- MANPAGE: END EXCLUDED SECTION -->
|
<!-- MANPAGE: END EXCLUDED SECTION -->
|
||||||
|
|
||||||
|
|
||||||
# NEW FEATURES
|
|
||||||
|
|
||||||
* Forked from [**yt-dlc@f9401f2**](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee) and merged with [**youtube-dl@be008e6**](https://github.com/ytdl-org/youtube-dl/commit/be008e657d79832642e2158557c899249c9e31cd) ([exceptions](https://github.com/yt-dlp/yt-dlp/issues/21))
|
|
||||||
|
|
||||||
* **[SponsorBlock Integration](#sponsorblock-options)**: You can mark/remove sponsor sections in YouTube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API
|
|
||||||
|
|
||||||
* **[Format Sorting](#sorting-formats)**: The default format sorting options have been changed so that higher resolution and better codecs will be now preferred instead of simply using larger bitrate. Furthermore, you can now specify the sort order using `-S`. This allows for much easier format selection than what is possible by simply using `--format` ([examples](#format-selection-examples))
|
|
||||||
|
|
||||||
* **Merged with animelover1984/youtube-dl**: You get most of the features and improvements from [animelover1984/youtube-dl](https://github.com/animelover1984/youtube-dl) including `--write-comments`, `BiliBiliSearch`, `BilibiliChannel`, Embedding thumbnail in mp4/ogg/opus, playlist infojson etc. Note that NicoNico livestreams are not available. See [#31](https://github.com/yt-dlp/yt-dlp/pull/31) for details.
|
|
||||||
|
|
||||||
* **YouTube improvements**:
|
|
||||||
* Supports Clips, Stories (`ytstories:<channel UCID>`), Search (including filters)**\***, YouTube Music Search, Channel-specific search, Search prefixes (`ytsearch:`, `ytsearchdate:`)**\***, Mixes, and Feeds (`:ytfav`, `:ytwatchlater`, `:ytsubs`, `:ythistory`, `:ytrec`, `:ytnotif`)
|
|
||||||
* Fix for [n-sig based throttling](https://github.com/ytdl-org/youtube-dl/issues/29326) **\***
|
|
||||||
* Supports some (but not all) age-gated content without cookies
|
|
||||||
* Download livestreams from the start using `--live-from-start` (*experimental*)
|
|
||||||
* Channel URLs download all uploads of the channel, including shorts and live
|
|
||||||
|
|
||||||
* **Cookies from browser**: Cookies can be automatically extracted from all major web browsers using `--cookies-from-browser BROWSER[+KEYRING][:PROFILE][::CONTAINER]`
|
|
||||||
|
|
||||||
* **Download time range**: Videos can be downloaded partially based on either timestamps or chapters using `--download-sections`
|
|
||||||
|
|
||||||
* **Split video by chapters**: Videos can be split into multiple files based on chapters using `--split-chapters`
|
|
||||||
|
|
||||||
* **Multi-threaded fragment downloads**: Download multiple fragments of m3u8/mpd videos in parallel. Use `--concurrent-fragments` (`-N`) option to set the number of threads used
|
|
||||||
|
|
||||||
* **Aria2c with HLS/DASH**: You can use `aria2c` as the external downloader for DASH(mpd) and HLS(m3u8) formats
|
|
||||||
|
|
||||||
* **New and fixed extractors**: Many new extractors have been added and a lot of existing ones have been fixed. See the [changelog](Changelog.md) or the [list of supported sites](supportedsites.md)
|
|
||||||
|
|
||||||
* **New MSOs**: Philo, Spectrum, SlingTV, Cablevision, RCN etc.
|
|
||||||
|
|
||||||
* **Subtitle extraction from manifests**: Subtitles can be extracted from streaming media manifests. See [commit/be6202f](https://github.com/yt-dlp/yt-dlp/commit/be6202f12b97858b9d716e608394b51065d0419f) for details
|
|
||||||
|
|
||||||
* **Multiple paths and output templates**: You can give different [output templates](#output-template) and download paths for different types of files. You can also set a temporary path where intermediary files are downloaded to using `--paths` (`-P`)
|
|
||||||
|
|
||||||
* **Portable Configuration**: Configuration files are automatically loaded from the home and root directories. See [CONFIGURATION](#configuration) for details
|
|
||||||
|
|
||||||
* **Output template improvements**: Output templates can now have date-time formatting, numeric offsets, object traversal etc. See [output template](#output-template) for details. Even more advanced operations can also be done with the help of `--parse-metadata` and `--replace-in-metadata`
|
|
||||||
|
|
||||||
* **Other new options**: Many new options have been added such as `--alias`, `--print`, `--concat-playlist`, `--wait-for-video`, `--retry-sleep`, `--sleep-requests`, `--convert-thumbnails`, `--force-download-archive`, `--force-overwrites`, `--break-match-filter` etc
|
|
||||||
|
|
||||||
* **Improvements**: Regex and other operators in `--format`/`--match-filter`, multiple `--postprocessor-args` and `--downloader-args`, faster archive checking, more [format selection options](#format-selection), merge multi-video/audio, multiple `--config-locations`, `--exec` at different stages, etc
|
|
||||||
|
|
||||||
* **Plugins**: Extractors and PostProcessors can be loaded from an external file. See [plugins](#plugins) for details
|
|
||||||
|
|
||||||
* **Self updater**: The releases can be updated using `yt-dlp -U`, and downgraded using `--update-to` if required
|
|
||||||
|
|
||||||
* **Automated builds**: [Nightly/master builds](#update-channels) can be used with `--update-to nightly` and `--update-to master`
|
|
||||||
|
|
||||||
See [changelog](Changelog.md) or [commits](https://github.com/yt-dlp/yt-dlp/commits) for the full list of changes
|
|
||||||
|
|
||||||
Features marked with a **\*** have been back-ported to youtube-dl
|
|
||||||
|
|
||||||
### Differences in default behavior
|
|
||||||
|
|
||||||
Some of yt-dlp's default options are different from that of youtube-dl and youtube-dlc:
|
|
||||||
|
|
||||||
* yt-dlp supports only [Python 3.8+](## "Windows 7"), and *may* remove support for more versions as they [become EOL](https://devguide.python.org/versions/#python-release-cycle); while [youtube-dl still supports Python 2.6+ and 3.2+](https://github.com/ytdl-org/youtube-dl/issues/30568#issue-1118238743)
|
|
||||||
* The options `--auto-number` (`-A`), `--title` (`-t`) and `--literal` (`-l`), no longer work. See [removed options](#Removed) for details
|
|
||||||
* `avconv` is not supported as an alternative to `ffmpeg`
|
|
||||||
* yt-dlp stores config files in slightly different locations to youtube-dl. See [CONFIGURATION](#configuration) for a list of correct locations
|
|
||||||
* The default [output template](#output-template) is `%(title)s [%(id)s].%(ext)s`. There is no real reason for this change. This was changed before yt-dlp was ever made public and now there are no plans to change it back to `%(title)s-%(id)s.%(ext)s`. Instead, you may use `--compat-options filename`
|
|
||||||
* The default [format sorting](#sorting-formats) is different from youtube-dl and prefers higher resolution and better codecs rather than higher bitrates. You can use the `--format-sort` option to change this to any order you prefer, or use `--compat-options format-sort` to use youtube-dl's sorting order
|
|
||||||
* The default format selector is `bv*+ba/b`. This means that if a combined video + audio format that is better than the best video-only format is found, the former will be preferred. Use `-f bv+ba/b` or `--compat-options format-spec` to revert this
|
|
||||||
* Unlike youtube-dlc, yt-dlp does not allow merging multiple audio/video streams into one file by default (since this conflicts with the use of `-f bv*+ba`). If needed, this feature must be enabled using `--audio-multistreams` and `--video-multistreams`. You can also use `--compat-options multistreams` to enable both
|
|
||||||
* `--no-abort-on-error` is enabled by default. Use `--abort-on-error` or `--compat-options abort-on-error` to abort on errors instead
|
|
||||||
* When writing metadata files such as thumbnails, description or infojson, the same information (if available) is also written for playlists. Use `--no-write-playlist-metafiles` or `--compat-options no-playlist-metafiles` to not write these files
|
|
||||||
* `--add-metadata` attaches the `infojson` to `mkv` files in addition to writing the metadata when used with `--write-info-json`. Use `--no-embed-info-json` or `--compat-options no-attach-info-json` to revert this
|
|
||||||
* Some metadata are embedded into different fields when using `--add-metadata` as compared to youtube-dl. Most notably, `comment` field contains the `webpage_url` and `synopsis` contains the `description`. You can [use `--parse-metadata`](#modifying-metadata) to modify this to your liking or use `--compat-options embed-metadata` to revert this
|
|
||||||
* `playlist_index` behaves differently when used with options like `--playlist-reverse` and `--playlist-items`. See [#302](https://github.com/yt-dlp/yt-dlp/issues/302) for details. You can use `--compat-options playlist-index` if you want to keep the earlier behavior
|
|
||||||
* The output of `-F` is listed in a new format. Use `--compat-options list-formats` to revert this
|
|
||||||
* Live chats (if available) are considered as subtitles. Use `--sub-langs all,-live_chat` to download all subtitles except live chat. You can also use `--compat-options no-live-chat` to prevent any live chat/danmaku from downloading
|
|
||||||
* YouTube channel URLs download all uploads of the channel. To download only the videos in a specific tab, pass the tab's URL. If the channel does not show the requested tab, an error will be raised. Also, `/live` URLs raise an error if there are no live videos instead of silently downloading the entire channel. You may use `--compat-options no-youtube-channel-redirect` to revert all these redirections
|
|
||||||
* Unavailable videos are also listed for YouTube playlists. Use `--compat-options no-youtube-unavailable-videos` to remove this
|
|
||||||
* The upload dates extracted from YouTube are in UTC [when available](https://github.com/yt-dlp/yt-dlp/blob/89e4d86171c7b7c997c77d4714542e0383bf0db0/yt_dlp/extractor/youtube.py#L3898-L3900). Use `--compat-options no-youtube-prefer-utc-upload-date` to prefer the non-UTC upload date.
|
|
||||||
* If `ffmpeg` is used as the downloader, the downloading and merging of formats happen in a single step when possible. Use `--compat-options no-direct-merge` to revert this
|
|
||||||
* Thumbnail embedding in `mp4` is done with mutagen if possible. Use `--compat-options embed-thumbnail-atomicparsley` to force the use of AtomicParsley instead
|
|
||||||
* Some internal metadata such as filenames are removed by default from the infojson. Use `--no-clean-infojson` or `--compat-options no-clean-infojson` to revert this
|
|
||||||
* When `--embed-subs` and `--write-subs` are used together, the subtitles are written to disk and also embedded in the media file. You can use just `--embed-subs` to embed the subs and automatically delete the separate file. See [#630 (comment)](https://github.com/yt-dlp/yt-dlp/issues/630#issuecomment-893659460) for more info. `--compat-options no-keep-subs` can be used to revert this
|
|
||||||
* `certifi` will be used for SSL root certificates, if installed. If you want to use system certificates (e.g. self-signed), use `--compat-options no-certifi`
|
|
||||||
* yt-dlp's sanitization of invalid characters in filenames is different/smarter than in youtube-dl. You can use `--compat-options filename-sanitization` to revert to youtube-dl's behavior
|
|
||||||
* yt-dlp tries to parse the external downloader outputs into the standard progress output if possible (Currently implemented: [~~aria2c~~](https://github.com/yt-dlp/yt-dlp/issues/5931)). You can use `--compat-options no-external-downloader-progress` to get the downloader output as-is
|
|
||||||
* yt-dlp versions between 2021.09.01 and 2023.01.02 applies `--match-filter` to nested playlists. This was an unintentional side-effect of [8f18ac](https://github.com/yt-dlp/yt-dlp/commit/8f18aca8717bb0dd49054555af8d386e5eda3a88) and is fixed in [d7b460](https://github.com/yt-dlp/yt-dlp/commit/d7b460d0e5fc710950582baed2e3fc616ed98a80). Use `--compat-options playlist-match-filter` to revert this
|
|
||||||
* yt-dlp versions between 2021.11.10 and 2023.06.21 estimated `filesize_approx` values for fragmented/manifest formats. This was added for convenience in [f2fe69](https://github.com/yt-dlp/yt-dlp/commit/f2fe69c7b0d208bdb1f6292b4ae92bc1e1a7444a), but was reverted in [0dff8e](https://github.com/yt-dlp/yt-dlp/commit/0dff8e4d1e6e9fb938f4256ea9af7d81f42fd54f) due to the potentially extreme inaccuracy of the estimated values. Use `--compat-options manifest-filesize-approx` to keep extracting the estimated values
|
|
||||||
* yt-dlp uses modern http client backends such as `requests`. Use `--compat-options prefer-legacy-http-handler` to prefer the legacy http handler (`urllib`) to be used for standard http requests.
|
|
||||||
* The sub-module `swfinterp` is removed.
|
|
||||||
|
|
||||||
For ease of use, a few more compat options are available:
|
|
||||||
|
|
||||||
* `--compat-options all`: Use all compat options (Do NOT use)
|
|
||||||
* `--compat-options youtube-dl`: Same as `--compat-options all,-multistreams,-playlist-match-filter,-manifest-filesize-approx`
|
|
||||||
* `--compat-options youtube-dlc`: Same as `--compat-options all,-no-live-chat,-no-youtube-channel-redirect,-playlist-match-filter,-manifest-filesize-approx`
|
|
||||||
* `--compat-options 2021`: Same as `--compat-options 2022,no-certifi,filename-sanitization,no-youtube-prefer-utc-upload-date`
|
|
||||||
* `--compat-options 2022`: Same as `--compat-options 2023,playlist-match-filter,no-external-downloader-progress`
|
|
||||||
* `--compat-options 2023`: Same as `--compat-options prefer-legacy-http-handler,manifest-filesize-approx`. Use this to enable all future compat options
|
|
||||||
|
|
||||||
|
|
||||||
# INSTALLATION
|
# INSTALLATION
|
||||||
|
|
||||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||||
|
@ -186,41 +90,6 @@ For ease of use, a few more compat options are available:
|
||||||
You can install yt-dlp using [the binaries](#release-files), [pip](https://pypi.org/project/yt-dlp) or one using a third-party package manager. See [the wiki](https://github.com/yt-dlp/yt-dlp/wiki/Installation) for detailed instructions
|
You can install yt-dlp using [the binaries](#release-files), [pip](https://pypi.org/project/yt-dlp) or one using a third-party package manager. See [the wiki](https://github.com/yt-dlp/yt-dlp/wiki/Installation) for detailed instructions
|
||||||
|
|
||||||
|
|
||||||
## UPDATE
|
|
||||||
You can use `yt-dlp -U` to update if you are using the [release binaries](#release-files)
|
|
||||||
|
|
||||||
If you [installed with pip](https://github.com/yt-dlp/yt-dlp/wiki/Installation#with-pip), simply re-run the same command that was used to install the program
|
|
||||||
|
|
||||||
For other third-party package managers, see [the wiki](https://github.com/yt-dlp/yt-dlp/wiki/Installation#third-party-package-managers) or refer their documentation
|
|
||||||
|
|
||||||
<a id="update-channels"/>
|
|
||||||
|
|
||||||
There are currently three release channels for binaries: `stable`, `nightly` and `master`.
|
|
||||||
|
|
||||||
* `stable` is the default channel, and many of its changes have been tested by users of the `nightly` and `master` channels.
|
|
||||||
* The `nightly` channel has releases scheduled to build every day around midnight UTC, for a snapshot of the project's new patches and changes. This is the **recommended channel for regular users** of yt-dlp. The `nightly` releases are available from [yt-dlp/yt-dlp-nightly-builds](https://github.com/yt-dlp/yt-dlp-nightly-builds/releases) or as development releases of the `yt-dlp` PyPI package (which can be installed with pip's `--pre` flag).
|
|
||||||
* The `master` channel features releases that are built after each push to the master branch, and these will have the very latest fixes and additions, but may also be more prone to regressions. They are available from [yt-dlp/yt-dlp-master-builds](https://github.com/yt-dlp/yt-dlp-master-builds/releases).
|
|
||||||
|
|
||||||
When using `--update`/`-U`, a release binary will only update to its current channel.
|
|
||||||
`--update-to CHANNEL` can be used to switch to a different channel when a newer version is available. `--update-to [CHANNEL@]TAG` can also be used to upgrade or downgrade to specific tags from a channel.
|
|
||||||
|
|
||||||
You may also use `--update-to <repository>` (`<owner>/<repository>`) to update to a channel on a completely different repository. Be careful with what repository you are updating to though, there is no verification done for binaries from different repositories.
|
|
||||||
|
|
||||||
Example usage:
|
|
||||||
* `yt-dlp --update-to master` switch to the `master` channel and update to its latest release
|
|
||||||
* `yt-dlp --update-to stable@2023.07.06` upgrade/downgrade to release to `stable` channel tag `2023.07.06`
|
|
||||||
* `yt-dlp --update-to 2023.10.07` upgrade/downgrade to tag `2023.10.07` if it exists on the current channel
|
|
||||||
* `yt-dlp --update-to example/yt-dlp@2023.09.24` upgrade/downgrade to the release from the `example/yt-dlp` repository, tag `2023.09.24`
|
|
||||||
|
|
||||||
**Important**: Any user experiencing an issue with the `stable` release should install or update to the `nightly` release before submitting a bug report:
|
|
||||||
```
|
|
||||||
# To update to nightly from stable executable/binary:
|
|
||||||
yt-dlp --update-to nightly
|
|
||||||
|
|
||||||
# To install nightly with pip:
|
|
||||||
python -m pip install -U --pre yt-dlp
|
|
||||||
```
|
|
||||||
|
|
||||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||||
## RELEASE FILES
|
## RELEASE FILES
|
||||||
|
|
||||||
|
@ -236,7 +105,7 @@ File|Description
|
||||||
|
|
||||||
File|Description
|
File|Description
|
||||||
:---|:---
|
:---|:---
|
||||||
[yt-dlp_x86.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_x86.exe)|Windows (Vista SP2+) standalone x86 (32-bit) binary
|
[yt-dlp_x86.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_x86.exe)|Windows (Win7 SP1+) standalone x86 (32-bit) binary
|
||||||
[yt-dlp_min.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_min.exe)|Windows (Win7 SP1+) standalone x64 binary built with `py2exe`<br/> ([Not recommended](#standalone-py2exe-builds-windows))
|
[yt-dlp_min.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_min.exe)|Windows (Win7 SP1+) standalone x64 binary built with `py2exe`<br/> ([Not recommended](#standalone-py2exe-builds-windows))
|
||||||
[yt-dlp_linux](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux)|Linux standalone x64 binary
|
[yt-dlp_linux](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux)|Linux standalone x64 binary
|
||||||
[yt-dlp_linux.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux.zip)|Unpackaged Linux executable (no auto-update)
|
[yt-dlp_linux.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_linux.zip)|Unpackaged Linux executable (no auto-update)
|
||||||
|
@ -267,6 +136,42 @@ gpg --verify SHA2-512SUMS.sig SHA2-512SUMS
|
||||||
|
|
||||||
**Note**: The manpages, shell completion (autocomplete) files etc. are available inside the [source tarball](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz)
|
**Note**: The manpages, shell completion (autocomplete) files etc. are available inside the [source tarball](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz)
|
||||||
|
|
||||||
|
|
||||||
|
## UPDATE
|
||||||
|
You can use `yt-dlp -U` to update if you are using the [release binaries](#release-files)
|
||||||
|
|
||||||
|
If you [installed with pip](https://github.com/yt-dlp/yt-dlp/wiki/Installation#with-pip), simply re-run the same command that was used to install the program
|
||||||
|
|
||||||
|
For other third-party package managers, see [the wiki](https://github.com/yt-dlp/yt-dlp/wiki/Installation#third-party-package-managers) or refer their documentation
|
||||||
|
|
||||||
|
<a id="update-channels"></a>
|
||||||
|
|
||||||
|
There are currently three release channels for binaries: `stable`, `nightly` and `master`.
|
||||||
|
|
||||||
|
* `stable` is the default channel, and many of its changes have been tested by users of the `nightly` and `master` channels.
|
||||||
|
* The `nightly` channel has releases scheduled to build every day around midnight UTC, for a snapshot of the project's new patches and changes. This is the **recommended channel for regular users** of yt-dlp. The `nightly` releases are available from [yt-dlp/yt-dlp-nightly-builds](https://github.com/yt-dlp/yt-dlp-nightly-builds/releases) or as development releases of the `yt-dlp` PyPI package (which can be installed with pip's `--pre` flag).
|
||||||
|
* The `master` channel features releases that are built after each push to the master branch, and these will have the very latest fixes and additions, but may also be more prone to regressions. They are available from [yt-dlp/yt-dlp-master-builds](https://github.com/yt-dlp/yt-dlp-master-builds/releases).
|
||||||
|
|
||||||
|
When using `--update`/`-U`, a release binary will only update to its current channel.
|
||||||
|
`--update-to CHANNEL` can be used to switch to a different channel when a newer version is available. `--update-to [CHANNEL@]TAG` can also be used to upgrade or downgrade to specific tags from a channel.
|
||||||
|
|
||||||
|
You may also use `--update-to <repository>` (`<owner>/<repository>`) to update to a channel on a completely different repository. Be careful with what repository you are updating to though, there is no verification done for binaries from different repositories.
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
* `yt-dlp --update-to master` switch to the `master` channel and update to its latest release
|
||||||
|
* `yt-dlp --update-to stable@2023.07.06` upgrade/downgrade to release to `stable` channel tag `2023.07.06`
|
||||||
|
* `yt-dlp --update-to 2023.10.07` upgrade/downgrade to tag `2023.10.07` if it exists on the current channel
|
||||||
|
* `yt-dlp --update-to example/yt-dlp@2023.09.24` upgrade/downgrade to the release from the `example/yt-dlp` repository, tag `2023.09.24`
|
||||||
|
|
||||||
|
**Important**: Any user experiencing an issue with the `stable` release should install or update to the `nightly` release before submitting a bug report:
|
||||||
|
```
|
||||||
|
# To update to nightly from stable executable/binary:
|
||||||
|
yt-dlp --update-to nightly
|
||||||
|
|
||||||
|
# To install nightly with pip:
|
||||||
|
python3 -m pip install -U --pre yt-dlp[default]
|
||||||
|
```
|
||||||
|
|
||||||
## DEPENDENCIES
|
## DEPENDENCIES
|
||||||
Python versions 3.8+ (CPython and PyPy) are supported. Other versions and implementations may or may not work correctly.
|
Python versions 3.8+ (CPython and PyPy) are supported. Other versions and implementations may or may not work correctly.
|
||||||
|
|
||||||
|
@ -283,7 +188,7 @@ While all the other dependencies are optional, `ffmpeg` and `ffprobe` are highly
|
||||||
|
|
||||||
There are bugs in ffmpeg that cause various issues when used alongside yt-dlp. Since ffmpeg is such an important dependency, we provide [custom builds](https://github.com/yt-dlp/FFmpeg-Builds#ffmpeg-static-auto-builds) with patches for some of these issues at [yt-dlp/FFmpeg-Builds](https://github.com/yt-dlp/FFmpeg-Builds). See [the readme](https://github.com/yt-dlp/FFmpeg-Builds#patches-applied) for details on the specific issues solved by these builds
|
There are bugs in ffmpeg that cause various issues when used alongside yt-dlp. Since ffmpeg is such an important dependency, we provide [custom builds](https://github.com/yt-dlp/FFmpeg-Builds#ffmpeg-static-auto-builds) with patches for some of these issues at [yt-dlp/FFmpeg-Builds](https://github.com/yt-dlp/FFmpeg-Builds). See [the readme](https://github.com/yt-dlp/FFmpeg-Builds#patches-applied) for details on the specific issues solved by these builds
|
||||||
|
|
||||||
**Important**: What you need is ffmpeg *binary*, **NOT** [the python package of the same name](https://pypi.org/project/ffmpeg)
|
**Important**: What you need is ffmpeg *binary*, **NOT** [the Python package of the same name](https://pypi.org/project/ffmpeg)
|
||||||
|
|
||||||
### Networking
|
### Networking
|
||||||
* [**certifi**](https://github.com/certifi/python-certifi)\* - Provides Mozilla's root certificate bundle. Licensed under [MPLv2](https://github.com/certifi/python-certifi/blob/master/LICENSE)
|
* [**certifi**](https://github.com/certifi/python-certifi)\* - Provides Mozilla's root certificate bundle. Licensed under [MPLv2](https://github.com/certifi/python-certifi/blob/master/LICENSE)
|
||||||
|
@ -321,7 +226,9 @@ If you do not have the necessary dependencies for a task you are attempting, yt-
|
||||||
## COMPILE
|
## COMPILE
|
||||||
|
|
||||||
### Standalone PyInstaller Builds
|
### Standalone PyInstaller Builds
|
||||||
To build the standalone executable, you must have Python and `pyinstaller` (plus any of yt-dlp's [optional dependencies](#dependencies) if needed). The executable will be built for the same architecture (x86/ARM, 32/64 bit) as the Python used. You can run the following commands:
|
To build the standalone executable, you must have Python and `pyinstaller` (plus any of yt-dlp's [optional dependencies](#dependencies) if needed). The executable will be built for the same CPU architecture as the Python used.
|
||||||
|
|
||||||
|
You can run the following commands:
|
||||||
|
|
||||||
```
|
```
|
||||||
python3 devscripts/install_deps.py --include pyinstaller
|
python3 devscripts/install_deps.py --include pyinstaller
|
||||||
|
@ -331,11 +238,11 @@ python3 -m bundle.pyinstaller
|
||||||
|
|
||||||
On some systems, you may need to use `py` or `python` instead of `python3`.
|
On some systems, you may need to use `py` or `python` instead of `python3`.
|
||||||
|
|
||||||
`bundle/pyinstaller.py` accepts any arguments that can be passed to `pyinstaller`, such as `--onefile/-F` or `--onedir/-D`, which is further [documented here](https://pyinstaller.org/en/stable/usage.html#what-to-generate).
|
`python -m bundle.pyinstaller` accepts any arguments that can be passed to `pyinstaller`, such as `--onefile/-F` or `--onedir/-D`, which is further [documented here](https://pyinstaller.org/en/stable/usage.html#what-to-generate).
|
||||||
|
|
||||||
**Note**: Pyinstaller versions below 4.4 [do not support](https://github.com/pyinstaller/pyinstaller#requirements-and-tested-platforms) Python installed from the Windows store without using a virtual environment.
|
**Note**: Pyinstaller versions below 4.4 [do not support](https://github.com/pyinstaller/pyinstaller#requirements-and-tested-platforms) Python installed from the Windows store without using a virtual environment.
|
||||||
|
|
||||||
**Important**: Running `pyinstaller` directly **without** using `bundle/pyinstaller.py` is **not** officially supported. This may or may not work correctly.
|
**Important**: Running `pyinstaller` directly **instead of** using `python -m bundle.pyinstaller` is **not** officially supported. This may or may not work correctly.
|
||||||
|
|
||||||
### Platform-independent Binary (UNIX)
|
### Platform-independent Binary (UNIX)
|
||||||
You will need the build tools `python` (3.8+), `zip`, `make` (GNU), `pandoc`\* and `pytest`\*.
|
You will need the build tools `python` (3.8+), `zip`, `make` (GNU), `pandoc`\* and `pytest`\*.
|
||||||
|
@ -418,7 +325,7 @@ If you fork the project on GitHub, you can run your fork's [build workflow](.git
|
||||||
URLs, but emits an error if this is not
|
URLs, but emits an error if this is not
|
||||||
possible instead of searching
|
possible instead of searching
|
||||||
--ignore-config Don't load any more configuration files
|
--ignore-config Don't load any more configuration files
|
||||||
except those given by --config-locations.
|
except those given to --config-locations.
|
||||||
For backward compatibility, if this option
|
For backward compatibility, if this option
|
||||||
is found inside the system configuration
|
is found inside the system configuration
|
||||||
file, the user configuration is not loaded.
|
file, the user configuration is not loaded.
|
||||||
|
@ -683,7 +590,7 @@ If you fork the project on GitHub, you can run your fork's [build workflow](.git
|
||||||
-o, --output [TYPES:]TEMPLATE Output filename template; see "OUTPUT
|
-o, --output [TYPES:]TEMPLATE Output filename template; see "OUTPUT
|
||||||
TEMPLATE" for details
|
TEMPLATE" for details
|
||||||
--output-na-placeholder TEXT Placeholder for unavailable fields in
|
--output-na-placeholder TEXT Placeholder for unavailable fields in
|
||||||
"OUTPUT TEMPLATE" (default: "NA")
|
--output (default: "NA")
|
||||||
--restrict-filenames Restrict filenames to only ASCII characters,
|
--restrict-filenames Restrict filenames to only ASCII characters,
|
||||||
and avoid "&" and spaces in filenames
|
and avoid "&" and spaces in filenames
|
||||||
--no-restrict-filenames Allow Unicode characters, "&" and spaces in
|
--no-restrict-filenames Allow Unicode characters, "&" and spaces in
|
||||||
|
@ -1172,12 +1079,12 @@ Make chapter entries for, or remove various segments (sponsor,
|
||||||
You can configure yt-dlp by placing any supported command line option to a configuration file. The configuration is loaded from the following locations:
|
You can configure yt-dlp by placing any supported command line option to a configuration file. The configuration is loaded from the following locations:
|
||||||
|
|
||||||
1. **Main Configuration**:
|
1. **Main Configuration**:
|
||||||
* The file given by `--config-location`
|
* The file given to `--config-location`
|
||||||
1. **Portable Configuration**: (Recommended for portable installations)
|
1. **Portable Configuration**: (Recommended for portable installations)
|
||||||
* If using a binary, `yt-dlp.conf` in the same directory as the binary
|
* If using a binary, `yt-dlp.conf` in the same directory as the binary
|
||||||
* If running from source-code, `yt-dlp.conf` in the parent directory of `yt_dlp`
|
* If running from source-code, `yt-dlp.conf` in the parent directory of `yt_dlp`
|
||||||
1. **Home Configuration**:
|
1. **Home Configuration**:
|
||||||
* `yt-dlp.conf` in the home path given by `-P`
|
* `yt-dlp.conf` in the home path given to `-P`
|
||||||
* If `-P` is not given, the current directory is searched
|
* If `-P` is not given, the current directory is searched
|
||||||
1. **User Configuration**:
|
1. **User Configuration**:
|
||||||
* `${XDG_CONFIG_HOME}/yt-dlp.conf`
|
* `${XDG_CONFIG_HOME}/yt-dlp.conf`
|
||||||
|
@ -1296,7 +1203,7 @@ To summarize, the general syntax for a field is:
|
||||||
|
|
||||||
Additionally, you can set different output templates for the various metadata files separately from the general output template by specifying the type of file followed by the template separated by a colon `:`. The different file types supported are `subtitle`, `thumbnail`, `description`, `annotation` (deprecated), `infojson`, `link`, `pl_thumbnail`, `pl_description`, `pl_infojson`, `chapter`, `pl_video`. E.g. `-o "%(title)s.%(ext)s" -o "thumbnail:%(title)s\%(title)s.%(ext)s"` will put the thumbnails in a folder with the same name as the video. If any of the templates is empty, that type of file will not be written. E.g. `--write-thumbnail -o "thumbnail:"` will write thumbnails only for playlists and not for video.
|
Additionally, you can set different output templates for the various metadata files separately from the general output template by specifying the type of file followed by the template separated by a colon `:`. The different file types supported are `subtitle`, `thumbnail`, `description`, `annotation` (deprecated), `infojson`, `link`, `pl_thumbnail`, `pl_description`, `pl_infojson`, `chapter`, `pl_video`. E.g. `-o "%(title)s.%(ext)s" -o "thumbnail:%(title)s\%(title)s.%(ext)s"` will put the thumbnails in a folder with the same name as the video. If any of the templates is empty, that type of file will not be written. E.g. `--write-thumbnail -o "thumbnail:"` will write thumbnails only for playlists and not for video.
|
||||||
|
|
||||||
<a id="outtmpl-postprocess-note"/>
|
<a id="outtmpl-postprocess-note"></a>
|
||||||
|
|
||||||
**Note**: Due to post-processing (i.e. merging etc.), the actual output filename might differ. Use `--print after_move:filepath` to get the name after all post-processing is complete.
|
**Note**: Due to post-processing (i.e. merging etc.), the actual output filename might differ. Use `--print after_move:filepath` to get the name after all post-processing is complete.
|
||||||
|
|
||||||
|
@ -1310,8 +1217,11 @@ The available fields are:
|
||||||
- `description` (string): The description of the video
|
- `description` (string): The description of the video
|
||||||
- `display_id` (string): An alternative identifier for the video
|
- `display_id` (string): An alternative identifier for the video
|
||||||
- `uploader` (string): Full name of the video uploader
|
- `uploader` (string): Full name of the video uploader
|
||||||
|
- `uploader_id` (string): Nickname or id of the video uploader
|
||||||
|
- `uploader_url` (string): URL to the video uploader's profile
|
||||||
- `license` (string): License name the video is licensed under
|
- `license` (string): License name the video is licensed under
|
||||||
- `creator` (string): The creator of the video
|
- `creators` (list): The creators of the video
|
||||||
|
- `creator` (string): The creators of the video; comma-separated
|
||||||
- `timestamp` (numeric): UNIX timestamp of the moment the video became available
|
- `timestamp` (numeric): UNIX timestamp of the moment the video became available
|
||||||
- `upload_date` (string): Video upload date in UTC (YYYYMMDD)
|
- `upload_date` (string): Video upload date in UTC (YYYYMMDD)
|
||||||
- `release_timestamp` (numeric): UNIX timestamp of the moment the video was released
|
- `release_timestamp` (numeric): UNIX timestamp of the moment the video was released
|
||||||
|
@ -1319,9 +1229,9 @@ The available fields are:
|
||||||
- `release_year` (numeric): Year (YYYY) when the video or album was released
|
- `release_year` (numeric): Year (YYYY) when the video or album was released
|
||||||
- `modified_timestamp` (numeric): UNIX timestamp of the moment the video was last modified
|
- `modified_timestamp` (numeric): UNIX timestamp of the moment the video was last modified
|
||||||
- `modified_date` (string): The date (YYYYMMDD) when the video was last modified in UTC
|
- `modified_date` (string): The date (YYYYMMDD) when the video was last modified in UTC
|
||||||
- `uploader_id` (string): Nickname or id of the video uploader
|
|
||||||
- `channel` (string): Full name of the channel the video is uploaded on
|
- `channel` (string): Full name of the channel the video is uploaded on
|
||||||
- `channel_id` (string): Id of the channel
|
- `channel_id` (string): Id of the channel
|
||||||
|
- `channel_url` (string): URL of the channel
|
||||||
- `channel_follower_count` (numeric): Number of followers of the channel
|
- `channel_follower_count` (numeric): Number of followers of the channel
|
||||||
- `channel_is_verified` (boolean): Whether the channel is verified on the platform
|
- `channel_is_verified` (boolean): Whether the channel is verified on the platform
|
||||||
- `location` (string): Physical location where the video was filmed
|
- `location` (string): Physical location where the video was filmed
|
||||||
|
@ -1361,7 +1271,10 @@ The available fields are:
|
||||||
- `webpage_url_basename` (string): The basename of the webpage URL
|
- `webpage_url_basename` (string): The basename of the webpage URL
|
||||||
- `webpage_url_domain` (string): The domain of the webpage URL
|
- `webpage_url_domain` (string): The domain of the webpage URL
|
||||||
- `original_url` (string): The URL given by the user (or same as `webpage_url` for playlist entries)
|
- `original_url` (string): The URL given by the user (or same as `webpage_url` for playlist entries)
|
||||||
|
- `categories` (list): List of categories the video belongs to
|
||||||
|
- `tags` (list): List of tags assigned to the video
|
||||||
|
- `cast` (list): List of cast members
|
||||||
|
|
||||||
All the fields in [Filtering Formats](#filtering-formats) can also be used
|
All the fields in [Filtering Formats](#filtering-formats) can also be used
|
||||||
|
|
||||||
Available for the video that belongs to some logical chapter or section:
|
Available for the video that belongs to some logical chapter or section:
|
||||||
|
@ -1373,6 +1286,7 @@ Available for the video that belongs to some logical chapter or section:
|
||||||
Available for the video that is an episode of some series or programme:
|
Available for the video that is an episode of some series or programme:
|
||||||
|
|
||||||
- `series` (string): Title of the series or programme the video episode belongs to
|
- `series` (string): Title of the series or programme the video episode belongs to
|
||||||
|
- `series_id` (string): Id of the series or programme the video episode belongs to
|
||||||
- `season` (string): Title of the season the video episode belongs to
|
- `season` (string): Title of the season the video episode belongs to
|
||||||
- `season_number` (numeric): Number of the season the video episode belongs to
|
- `season_number` (numeric): Number of the season the video episode belongs to
|
||||||
- `season_id` (string): Id of the season the video episode belongs to
|
- `season_id` (string): Id of the season the video episode belongs to
|
||||||
|
@ -1385,11 +1299,16 @@ Available for the media that is a track or a part of a music album:
|
||||||
- `track` (string): Title of the track
|
- `track` (string): Title of the track
|
||||||
- `track_number` (numeric): Number of the track within an album or a disc
|
- `track_number` (numeric): Number of the track within an album or a disc
|
||||||
- `track_id` (string): Id of the track
|
- `track_id` (string): Id of the track
|
||||||
- `artist` (string): Artist(s) of the track
|
- `artists` (list): Artist(s) of the track
|
||||||
- `genre` (string): Genre(s) of the track
|
- `artist` (string): Artist(s) of the track; comma-separated
|
||||||
|
- `genres` (list): Genre(s) of the track
|
||||||
|
- `genre` (string): Genre(s) of the track; comma-separated
|
||||||
|
- `composers` (list): Composer(s) of the piece
|
||||||
|
- `composer` (string): Composer(s) of the piece; comma-separated
|
||||||
- `album` (string): Title of the album the track belongs to
|
- `album` (string): Title of the album the track belongs to
|
||||||
- `album_type` (string): Type of the album
|
- `album_type` (string): Type of the album
|
||||||
- `album_artist` (string): List of all artists appeared on the album
|
- `album_artists` (list): All artists appeared on the album
|
||||||
|
- `album_artist` (string): All artists appeared on the album; comma-separated
|
||||||
- `disc_number` (numeric): Number of the disc or other physical medium the track belongs to
|
- `disc_number` (numeric): Number of the disc or other physical medium the track belongs to
|
||||||
|
|
||||||
Available only when using `--download-sections` and for `chapter:` prefix when using `--split-chapters` for videos with internal chapters:
|
Available only when using `--download-sections` and for `chapter:` prefix when using `--split-chapters` for videos with internal chapters:
|
||||||
|
@ -1744,9 +1663,9 @@ $ yt-dlp -S "+res:480,codec,br"
|
||||||
|
|
||||||
The metadata obtained by the extractors can be modified by using `--parse-metadata` and `--replace-in-metadata`
|
The metadata obtained by the extractors can be modified by using `--parse-metadata` and `--replace-in-metadata`
|
||||||
|
|
||||||
`--replace-in-metadata FIELDS REGEX REPLACE` is used to replace text in any metadata field using [python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax). [Backreferences](https://docs.python.org/3/library/re.html?highlight=backreferences#re.sub) can be used in the replace string for advanced use.
|
`--replace-in-metadata FIELDS REGEX REPLACE` is used to replace text in any metadata field using [Python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax). [Backreferences](https://docs.python.org/3/library/re.html?highlight=backreferences#re.sub) can be used in the replace string for advanced use.
|
||||||
|
|
||||||
The general syntax of `--parse-metadata FROM:TO` is to give the name of a field or an [output template](#output-template) to extract data from, and the format to interpret it as, separated by a colon `:`. Either a [python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax) with named capture groups, a single field name, or a similar syntax to the [output template](#output-template) (only `%(field)s` formatting is supported) can be used for `TO`. The option can be used multiple times to parse and modify various fields.
|
The general syntax of `--parse-metadata FROM:TO` is to give the name of a field or an [output template](#output-template) to extract data from, and the format to interpret it as, separated by a colon `:`. Either a [Python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax) with named capture groups, a single field name, or a similar syntax to the [output template](#output-template) (only `%(field)s` formatting is supported) can be used for `TO`. The option can be used multiple times to parse and modify various fields.
|
||||||
|
|
||||||
Note that these options preserve their relative order, allowing replacements to be made in parsed fields and viceversa. Also, any field thus created can be used in the [output template](#output-template) and will also affect the media file's metadata added when using `--embed-metadata`.
|
Note that these options preserve their relative order, allowing replacements to be made in parsed fields and viceversa. Also, any field thus created can be used in the [output template](#output-template) and will also affect the media file's metadata added when using `--embed-metadata`.
|
||||||
|
|
||||||
|
@ -1767,10 +1686,11 @@ Metadata fields | From
|
||||||
`description`, `synopsis` | `description`
|
`description`, `synopsis` | `description`
|
||||||
`purl`, `comment` | `webpage_url`
|
`purl`, `comment` | `webpage_url`
|
||||||
`track` | `track_number`
|
`track` | `track_number`
|
||||||
`artist` | `artist`, `creator`, `uploader` or `uploader_id`
|
`artist` | `artist`, `artists`, `creator`, `creators`, `uploader` or `uploader_id`
|
||||||
`genre` | `genre`
|
`composer` | `composer` or `composers`
|
||||||
|
`genre` | `genre` or `genres`
|
||||||
`album` | `album`
|
`album` | `album`
|
||||||
`album_artist` | `album_artist`
|
`album_artist` | `album_artist` or `album_artists`
|
||||||
`disc` | `disc_number`
|
`disc` | `disc_number`
|
||||||
`show` | `series`
|
`show` | `series`
|
||||||
`season_number` | `season_number`
|
`season_number` | `season_number`
|
||||||
|
@ -2167,9 +2087,106 @@ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
||||||
ydl.download(URLS)
|
ydl.download(URLS)
|
||||||
```
|
```
|
||||||
|
|
||||||
<!-- MANPAGE: MOVE "NEW FEATURES" SECTION HERE -->
|
|
||||||
|
|
||||||
# DEPRECATED OPTIONS
|
# CHANGES FROM YOUTUBE-DL
|
||||||
|
|
||||||
|
### New features
|
||||||
|
|
||||||
|
* Forked from [**yt-dlc@f9401f2**](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee) and merged with [**youtube-dl@be008e6**](https://github.com/ytdl-org/youtube-dl/commit/be008e657d79832642e2158557c899249c9e31cd) ([exceptions](https://github.com/yt-dlp/yt-dlp/issues/21))
|
||||||
|
|
||||||
|
* **[SponsorBlock Integration](#sponsorblock-options)**: You can mark/remove sponsor sections in YouTube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API
|
||||||
|
|
||||||
|
* **[Format Sorting](#sorting-formats)**: The default format sorting options have been changed so that higher resolution and better codecs will be now preferred instead of simply using larger bitrate. Furthermore, you can now specify the sort order using `-S`. This allows for much easier format selection than what is possible by simply using `--format` ([examples](#format-selection-examples))
|
||||||
|
|
||||||
|
* **Merged with animelover1984/youtube-dl**: You get most of the features and improvements from [animelover1984/youtube-dl](https://github.com/animelover1984/youtube-dl) including `--write-comments`, `BiliBiliSearch`, `BilibiliChannel`, Embedding thumbnail in mp4/ogg/opus, playlist infojson etc. Note that NicoNico livestreams are not available. See [#31](https://github.com/yt-dlp/yt-dlp/pull/31) for details.
|
||||||
|
|
||||||
|
* **YouTube improvements**:
|
||||||
|
* Supports Clips, Stories (`ytstories:<channel UCID>`), Search (including filters)**\***, YouTube Music Search, Channel-specific search, Search prefixes (`ytsearch:`, `ytsearchdate:`)**\***, Mixes, and Feeds (`:ytfav`, `:ytwatchlater`, `:ytsubs`, `:ythistory`, `:ytrec`, `:ytnotif`)
|
||||||
|
* Fix for [n-sig based throttling](https://github.com/ytdl-org/youtube-dl/issues/29326) **\***
|
||||||
|
* Supports some (but not all) age-gated content without cookies
|
||||||
|
* Download livestreams from the start using `--live-from-start` (*experimental*)
|
||||||
|
* Channel URLs download all uploads of the channel, including shorts and live
|
||||||
|
|
||||||
|
* **Cookies from browser**: Cookies can be automatically extracted from all major web browsers using `--cookies-from-browser BROWSER[+KEYRING][:PROFILE][::CONTAINER]`
|
||||||
|
|
||||||
|
* **Download time range**: Videos can be downloaded partially based on either timestamps or chapters using `--download-sections`
|
||||||
|
|
||||||
|
* **Split video by chapters**: Videos can be split into multiple files based on chapters using `--split-chapters`
|
||||||
|
|
||||||
|
* **Multi-threaded fragment downloads**: Download multiple fragments of m3u8/mpd videos in parallel. Use `--concurrent-fragments` (`-N`) option to set the number of threads used
|
||||||
|
|
||||||
|
* **Aria2c with HLS/DASH**: You can use `aria2c` as the external downloader for DASH(mpd) and HLS(m3u8) formats
|
||||||
|
|
||||||
|
* **New and fixed extractors**: Many new extractors have been added and a lot of existing ones have been fixed. See the [changelog](Changelog.md) or the [list of supported sites](supportedsites.md)
|
||||||
|
|
||||||
|
* **New MSOs**: Philo, Spectrum, SlingTV, Cablevision, RCN etc.
|
||||||
|
|
||||||
|
* **Subtitle extraction from manifests**: Subtitles can be extracted from streaming media manifests. See [commit/be6202f](https://github.com/yt-dlp/yt-dlp/commit/be6202f12b97858b9d716e608394b51065d0419f) for details
|
||||||
|
|
||||||
|
* **Multiple paths and output templates**: You can give different [output templates](#output-template) and download paths for different types of files. You can also set a temporary path where intermediary files are downloaded to using `--paths` (`-P`)
|
||||||
|
|
||||||
|
* **Portable Configuration**: Configuration files are automatically loaded from the home and root directories. See [CONFIGURATION](#configuration) for details
|
||||||
|
|
||||||
|
* **Output template improvements**: Output templates can now have date-time formatting, numeric offsets, object traversal etc. See [output template](#output-template) for details. Even more advanced operations can also be done with the help of `--parse-metadata` and `--replace-in-metadata`
|
||||||
|
|
||||||
|
* **Other new options**: Many new options have been added such as `--alias`, `--print`, `--concat-playlist`, `--wait-for-video`, `--retry-sleep`, `--sleep-requests`, `--convert-thumbnails`, `--force-download-archive`, `--force-overwrites`, `--break-match-filter` etc
|
||||||
|
|
||||||
|
* **Improvements**: Regex and other operators in `--format`/`--match-filter`, multiple `--postprocessor-args` and `--downloader-args`, faster archive checking, more [format selection options](#format-selection), merge multi-video/audio, multiple `--config-locations`, `--exec` at different stages, etc
|
||||||
|
|
||||||
|
* **Plugins**: Extractors and PostProcessors can be loaded from an external file. See [plugins](#plugins) for details
|
||||||
|
|
||||||
|
* **Self updater**: The releases can be updated using `yt-dlp -U`, and downgraded using `--update-to` if required
|
||||||
|
|
||||||
|
* **Automated builds**: [Nightly/master builds](#update-channels) can be used with `--update-to nightly` and `--update-to master`
|
||||||
|
|
||||||
|
See [changelog](Changelog.md) or [commits](https://github.com/yt-dlp/yt-dlp/commits) for the full list of changes
|
||||||
|
|
||||||
|
Features marked with a **\*** have been back-ported to youtube-dl
|
||||||
|
|
||||||
|
### Differences in default behavior
|
||||||
|
|
||||||
|
Some of yt-dlp's default options are different from that of youtube-dl and youtube-dlc:
|
||||||
|
|
||||||
|
* yt-dlp supports only [Python 3.8+](## "Windows 7"), and *may* remove support for more versions as they [become EOL](https://devguide.python.org/versions/#python-release-cycle); while [youtube-dl still supports Python 2.6+ and 3.2+](https://github.com/ytdl-org/youtube-dl/issues/30568#issue-1118238743)
|
||||||
|
* The options `--auto-number` (`-A`), `--title` (`-t`) and `--literal` (`-l`), no longer work. See [removed options](#Removed) for details
|
||||||
|
* `avconv` is not supported as an alternative to `ffmpeg`
|
||||||
|
* yt-dlp stores config files in slightly different locations to youtube-dl. See [CONFIGURATION](#configuration) for a list of correct locations
|
||||||
|
* The default [output template](#output-template) is `%(title)s [%(id)s].%(ext)s`. There is no real reason for this change. This was changed before yt-dlp was ever made public and now there are no plans to change it back to `%(title)s-%(id)s.%(ext)s`. Instead, you may use `--compat-options filename`
|
||||||
|
* The default [format sorting](#sorting-formats) is different from youtube-dl and prefers higher resolution and better codecs rather than higher bitrates. You can use the `--format-sort` option to change this to any order you prefer, or use `--compat-options format-sort` to use youtube-dl's sorting order
|
||||||
|
* The default format selector is `bv*+ba/b`. This means that if a combined video + audio format that is better than the best video-only format is found, the former will be preferred. Use `-f bv+ba/b` or `--compat-options format-spec` to revert this
|
||||||
|
* Unlike youtube-dlc, yt-dlp does not allow merging multiple audio/video streams into one file by default (since this conflicts with the use of `-f bv*+ba`). If needed, this feature must be enabled using `--audio-multistreams` and `--video-multistreams`. You can also use `--compat-options multistreams` to enable both
|
||||||
|
* `--no-abort-on-error` is enabled by default. Use `--abort-on-error` or `--compat-options abort-on-error` to abort on errors instead
|
||||||
|
* When writing metadata files such as thumbnails, description or infojson, the same information (if available) is also written for playlists. Use `--no-write-playlist-metafiles` or `--compat-options no-playlist-metafiles` to not write these files
|
||||||
|
* `--add-metadata` attaches the `infojson` to `mkv` files in addition to writing the metadata when used with `--write-info-json`. Use `--no-embed-info-json` or `--compat-options no-attach-info-json` to revert this
|
||||||
|
* Some metadata are embedded into different fields when using `--add-metadata` as compared to youtube-dl. Most notably, `comment` field contains the `webpage_url` and `synopsis` contains the `description`. You can [use `--parse-metadata`](#modifying-metadata) to modify this to your liking or use `--compat-options embed-metadata` to revert this
|
||||||
|
* `playlist_index` behaves differently when used with options like `--playlist-reverse` and `--playlist-items`. See [#302](https://github.com/yt-dlp/yt-dlp/issues/302) for details. You can use `--compat-options playlist-index` if you want to keep the earlier behavior
|
||||||
|
* The output of `-F` is listed in a new format. Use `--compat-options list-formats` to revert this
|
||||||
|
* Live chats (if available) are considered as subtitles. Use `--sub-langs all,-live_chat` to download all subtitles except live chat. You can also use `--compat-options no-live-chat` to prevent any live chat/danmaku from downloading
|
||||||
|
* YouTube channel URLs download all uploads of the channel. To download only the videos in a specific tab, pass the tab's URL. If the channel does not show the requested tab, an error will be raised. Also, `/live` URLs raise an error if there are no live videos instead of silently downloading the entire channel. You may use `--compat-options no-youtube-channel-redirect` to revert all these redirections
|
||||||
|
* Unavailable videos are also listed for YouTube playlists. Use `--compat-options no-youtube-unavailable-videos` to remove this
|
||||||
|
* The upload dates extracted from YouTube are in UTC [when available](https://github.com/yt-dlp/yt-dlp/blob/89e4d86171c7b7c997c77d4714542e0383bf0db0/yt_dlp/extractor/youtube.py#L3898-L3900). Use `--compat-options no-youtube-prefer-utc-upload-date` to prefer the non-UTC upload date.
|
||||||
|
* If `ffmpeg` is used as the downloader, the downloading and merging of formats happen in a single step when possible. Use `--compat-options no-direct-merge` to revert this
|
||||||
|
* Thumbnail embedding in `mp4` is done with mutagen if possible. Use `--compat-options embed-thumbnail-atomicparsley` to force the use of AtomicParsley instead
|
||||||
|
* Some internal metadata such as filenames are removed by default from the infojson. Use `--no-clean-infojson` or `--compat-options no-clean-infojson` to revert this
|
||||||
|
* When `--embed-subs` and `--write-subs` are used together, the subtitles are written to disk and also embedded in the media file. You can use just `--embed-subs` to embed the subs and automatically delete the separate file. See [#630 (comment)](https://github.com/yt-dlp/yt-dlp/issues/630#issuecomment-893659460) for more info. `--compat-options no-keep-subs` can be used to revert this
|
||||||
|
* `certifi` will be used for SSL root certificates, if installed. If you want to use system certificates (e.g. self-signed), use `--compat-options no-certifi`
|
||||||
|
* yt-dlp's sanitization of invalid characters in filenames is different/smarter than in youtube-dl. You can use `--compat-options filename-sanitization` to revert to youtube-dl's behavior
|
||||||
|
* ~~yt-dlp tries to parse the external downloader outputs into the standard progress output if possible (Currently implemented: [aria2c](https://github.com/yt-dlp/yt-dlp/issues/5931)). You can use `--compat-options no-external-downloader-progress` to get the downloader output as-is~~
|
||||||
|
* yt-dlp versions between 2021.09.01 and 2023.01.02 applies `--match-filter` to nested playlists. This was an unintentional side-effect of [8f18ac](https://github.com/yt-dlp/yt-dlp/commit/8f18aca8717bb0dd49054555af8d386e5eda3a88) and is fixed in [d7b460](https://github.com/yt-dlp/yt-dlp/commit/d7b460d0e5fc710950582baed2e3fc616ed98a80). Use `--compat-options playlist-match-filter` to revert this
|
||||||
|
* yt-dlp versions between 2021.11.10 and 2023.06.21 estimated `filesize_approx` values for fragmented/manifest formats. This was added for convenience in [f2fe69](https://github.com/yt-dlp/yt-dlp/commit/f2fe69c7b0d208bdb1f6292b4ae92bc1e1a7444a), but was reverted in [0dff8e](https://github.com/yt-dlp/yt-dlp/commit/0dff8e4d1e6e9fb938f4256ea9af7d81f42fd54f) due to the potentially extreme inaccuracy of the estimated values. Use `--compat-options manifest-filesize-approx` to keep extracting the estimated values
|
||||||
|
* yt-dlp uses modern http client backends such as `requests`. Use `--compat-options prefer-legacy-http-handler` to prefer the legacy http handler (`urllib`) to be used for standard http requests.
|
||||||
|
* The sub-modules `swfinterp`, `casefold` are removed.
|
||||||
|
|
||||||
|
For ease of use, a few more compat options are available:
|
||||||
|
|
||||||
|
* `--compat-options all`: Use all compat options (Do NOT use)
|
||||||
|
* `--compat-options youtube-dl`: Same as `--compat-options all,-multistreams,-playlist-match-filter,-manifest-filesize-approx`
|
||||||
|
* `--compat-options youtube-dlc`: Same as `--compat-options all,-no-live-chat,-no-youtube-channel-redirect,-playlist-match-filter,-manifest-filesize-approx`
|
||||||
|
* `--compat-options 2021`: Same as `--compat-options 2022,no-certifi,filename-sanitization,no-youtube-prefer-utc-upload-date`
|
||||||
|
* `--compat-options 2022`: Same as `--compat-options 2023,playlist-match-filter,no-external-downloader-progress,prefer-legacy-http-handler,manifest-filesize-approx`
|
||||||
|
* `--compat-options 2023`: Currently does nothing. Use this to enable all future compat options
|
||||||
|
|
||||||
|
### Deprecated options
|
||||||
|
|
||||||
These are all the deprecated options and the current alternative to achieve the same effect
|
These are all the deprecated options and the current alternative to achieve the same effect
|
||||||
|
|
||||||
|
@ -2205,7 +2222,6 @@ While these options are redundant, they are still expected to be used due to the
|
||||||
--no-playlist-reverse Default
|
--no-playlist-reverse Default
|
||||||
--no-colors --color no_color
|
--no-colors --color no_color
|
||||||
|
|
||||||
|
|
||||||
#### Not recommended
|
#### Not recommended
|
||||||
While these options still work, their use is not recommended since there are other alternatives to achieve the same
|
While these options still work, their use is not recommended since there are other alternatives to achieve the same
|
||||||
|
|
||||||
|
@ -2232,7 +2248,6 @@ While these options still work, their use is not recommended since there are oth
|
||||||
--geo-bypass-country CODE --xff CODE
|
--geo-bypass-country CODE --xff CODE
|
||||||
--geo-bypass-ip-block IP_BLOCK --xff IP_BLOCK
|
--geo-bypass-ip-block IP_BLOCK --xff IP_BLOCK
|
||||||
|
|
||||||
|
|
||||||
#### Developer options
|
#### Developer options
|
||||||
These options are not intended to be used by the end-user
|
These options are not intended to be used by the end-user
|
||||||
|
|
||||||
|
@ -2242,7 +2257,6 @@ These options are not intended to be used by the end-user
|
||||||
--allow-unplayable-formats List unplayable formats also
|
--allow-unplayable-formats List unplayable formats also
|
||||||
--no-allow-unplayable-formats Default
|
--no-allow-unplayable-formats Default
|
||||||
|
|
||||||
|
|
||||||
#### Old aliases
|
#### Old aliases
|
||||||
These are aliases that are no longer documented for various reasons
|
These are aliases that are no longer documented for various reasons
|
||||||
|
|
||||||
|
@ -2295,6 +2309,7 @@ These options were deprecated since 2014 and have now been entirely removed
|
||||||
-A, --auto-number -o "%(autonumber)s-%(id)s.%(ext)s"
|
-A, --auto-number -o "%(autonumber)s-%(id)s.%(ext)s"
|
||||||
-t, -l, --title, --literal -o "%(title)s-%(id)s.%(ext)s"
|
-t, -l, --title, --literal -o "%(title)s-%(id)s.%(ext)s"
|
||||||
|
|
||||||
|
|
||||||
# CONTRIBUTING
|
# CONTRIBUTING
|
||||||
See [CONTRIBUTING.md](CONTRIBUTING.md#contributing-to-yt-dlp) for instructions on [Opening an Issue](CONTRIBUTING.md#opening-an-issue) and [Contributing code to the project](CONTRIBUTING.md#developer-instructions)
|
See [CONTRIBUTING.md](CONTRIBUTING.md#contributing-to-yt-dlp) for instructions on [Opening an Issue](CONTRIBUTING.md#opening-an-issue) and [Contributing code to the project](CONTRIBUTING.md#developer-instructions)
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
# Empty file
|
|
|
@ -20,7 +20,7 @@ def main():
|
||||||
'py2exe builds do not support pycryptodomex and needs VC++14 to run. '
|
'py2exe builds do not support pycryptodomex and needs VC++14 to run. '
|
||||||
'It is recommended to run "pyinst.py" to build using pyinstaller instead')
|
'It is recommended to run "pyinst.py" to build using pyinstaller instead')
|
||||||
|
|
||||||
return freeze(
|
freeze(
|
||||||
console=[{
|
console=[{
|
||||||
'script': './yt_dlp/__main__.py',
|
'script': './yt_dlp/__main__.py',
|
||||||
'dest_base': 'yt-dlp',
|
'dest_base': 'yt-dlp',
|
||||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -1 +0,0 @@
|
||||||
# Empty file needed to make devscripts.utils properly importable from outside
|
|
|
@ -120,5 +120,11 @@
|
||||||
"when": "15f22b4880b6b3f71f350c64d70976ae65b9f1ca",
|
"when": "15f22b4880b6b3f71f350c64d70976ae65b9f1ca",
|
||||||
"short": "[webvtt] Allow spaces before newlines for CueBlock (#7681)",
|
"short": "[webvtt] Allow spaces before newlines for CueBlock (#7681)",
|
||||||
"authors": ["TSRBerry"]
|
"authors": ["TSRBerry"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "change",
|
||||||
|
"when": "4ce57d3b873c2887814cbec03d029533e82f7db5",
|
||||||
|
"short": "[ie] Support multi-period MPD streams (#6654)",
|
||||||
|
"authors": ["alard", "pukkandan"]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
@ -19,7 +19,7 @@ def parse_args():
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'input', nargs='?', metavar='TOMLFILE', default='pyproject.toml', help='Input file (default: %(default)s)')
|
'input', nargs='?', metavar='TOMLFILE', default='pyproject.toml', help='Input file (default: %(default)s)')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-e', '--exclude', metavar='REQUIREMENT', action='append', help='Exclude a required dependency')
|
'-e', '--exclude', metavar='DEPENDENCY', action='append', help='Exclude a dependency')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-i', '--include', metavar='GROUP', action='append', help='Include an optional dependency group')
|
'-i', '--include', metavar='GROUP', action='append', help='Include an optional dependency group')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
@ -33,21 +33,28 @@ def parse_args():
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
toml_data = parse_toml(read_file(args.input))
|
project_table = parse_toml(read_file(args.input))['project']
|
||||||
deps = toml_data['project']['dependencies']
|
optional_groups = project_table['optional-dependencies']
|
||||||
targets = deps.copy() if not args.only_optional else []
|
excludes = args.exclude or []
|
||||||
|
|
||||||
for exclude in args.exclude or []:
|
deps = []
|
||||||
for dep in deps:
|
if not args.only_optional: # `-o` should exclude 'dependencies' and the 'default' group
|
||||||
simplified_dep = re.match(r'[\w-]+', dep)[0]
|
deps.extend(project_table['dependencies'])
|
||||||
if dep in targets and (exclude.lower() == simplified_dep.lower() or exclude == dep):
|
if 'default' not in excludes: # `--exclude default` should exclude entire 'default' group
|
||||||
targets.remove(dep)
|
deps.extend(optional_groups['default'])
|
||||||
|
|
||||||
optional_deps = toml_data['project']['optional-dependencies']
|
def name(dependency):
|
||||||
for include in args.include or []:
|
return re.match(r'[\w-]+', dependency)[0].lower()
|
||||||
group = optional_deps.get(include)
|
|
||||||
if group:
|
target_map = {name(dep): dep for dep in deps}
|
||||||
targets.extend(group)
|
|
||||||
|
for include in filter(None, map(optional_groups.get, args.include or [])):
|
||||||
|
target_map.update(zip(map(name, include), include))
|
||||||
|
|
||||||
|
for exclude in map(name, excludes):
|
||||||
|
target_map.pop(exclude, None)
|
||||||
|
|
||||||
|
targets = list(target_map.values())
|
||||||
|
|
||||||
if args.print:
|
if args.print:
|
||||||
for target in targets:
|
for target in targets:
|
||||||
|
|
|
@ -253,7 +253,7 @@ class CommitRange:
|
||||||
''', re.VERBOSE | re.DOTALL)
|
''', re.VERBOSE | re.DOTALL)
|
||||||
EXTRACTOR_INDICATOR_RE = re.compile(r'(?:Fix|Add)\s+Extractors?', re.IGNORECASE)
|
EXTRACTOR_INDICATOR_RE = re.compile(r'(?:Fix|Add)\s+Extractors?', re.IGNORECASE)
|
||||||
REVERT_RE = re.compile(r'(?:\[[^\]]+\]\s+)?(?i:Revert)\s+([\da-f]{40})')
|
REVERT_RE = re.compile(r'(?:\[[^\]]+\]\s+)?(?i:Revert)\s+([\da-f]{40})')
|
||||||
FIXES_RE = re.compile(r'(?i:Fix(?:es)?(?:\s+bugs?)?(?:\s+in|\s+for)?|Revert)\s+([\da-f]{40})')
|
FIXES_RE = re.compile(r'(?i:Fix(?:es)?(?:\s+bugs?)?(?:\s+in|\s+for)?|Revert|Improve)\s+([\da-f]{40})')
|
||||||
UPSTREAM_MERGE_RE = re.compile(r'Update to ytdl-commit-([\da-f]+)')
|
UPSTREAM_MERGE_RE = re.compile(r'Update to ytdl-commit-([\da-f]+)')
|
||||||
|
|
||||||
def __init__(self, start, end, default_author=None):
|
def __init__(self, start, end, default_author=None):
|
||||||
|
@ -445,7 +445,32 @@ def get_new_contributors(contributors_path, commits):
|
||||||
return sorted(new_contributors, key=str.casefold)
|
return sorted(new_contributors, key=str.casefold)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
def create_changelog(args):
|
||||||
|
logging.basicConfig(
|
||||||
|
datefmt='%Y-%m-%d %H-%M-%S', format='{asctime} | {levelname:<8} | {message}',
|
||||||
|
level=logging.WARNING - 10 * args.verbosity, style='{', stream=sys.stderr)
|
||||||
|
|
||||||
|
commits = CommitRange(None, args.commitish, args.default_author)
|
||||||
|
|
||||||
|
if not args.no_override:
|
||||||
|
if args.override_path.exists():
|
||||||
|
overrides = json.loads(read_file(args.override_path))
|
||||||
|
commits.apply_overrides(overrides)
|
||||||
|
else:
|
||||||
|
logger.warning(f'File {args.override_path.as_posix()} does not exist')
|
||||||
|
|
||||||
|
logger.info(f'Loaded {len(commits)} commits')
|
||||||
|
|
||||||
|
new_contributors = get_new_contributors(args.contributors_path, commits)
|
||||||
|
if new_contributors:
|
||||||
|
if args.contributors:
|
||||||
|
write_file(args.contributors_path, '\n'.join(new_contributors) + '\n', mode='a')
|
||||||
|
logger.info(f'New contributors: {", ".join(new_contributors)}')
|
||||||
|
|
||||||
|
return Changelog(commits.groups(), args.repo, args.collapsible)
|
||||||
|
|
||||||
|
|
||||||
|
def create_parser():
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
|
@ -477,27 +502,9 @@ if __name__ == '__main__':
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'--collapsible', action='store_true',
|
'--collapsible', action='store_true',
|
||||||
help='make changelog collapsible (default: %(default)s)')
|
help='make changelog collapsible (default: %(default)s)')
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
logging.basicConfig(
|
return parser
|
||||||
datefmt='%Y-%m-%d %H-%M-%S', format='{asctime} | {levelname:<8} | {message}',
|
|
||||||
level=logging.WARNING - 10 * args.verbosity, style='{', stream=sys.stderr)
|
|
||||||
|
|
||||||
commits = CommitRange(None, args.commitish, args.default_author)
|
|
||||||
|
|
||||||
if not args.no_override:
|
if __name__ == '__main__':
|
||||||
if args.override_path.exists():
|
print(create_changelog(create_parser().parse_args()))
|
||||||
overrides = json.loads(read_file(args.override_path))
|
|
||||||
commits.apply_overrides(overrides)
|
|
||||||
else:
|
|
||||||
logger.warning(f'File {args.override_path.as_posix()} does not exist')
|
|
||||||
|
|
||||||
logger.info(f'Loaded {len(commits)} commits')
|
|
||||||
|
|
||||||
new_contributors = get_new_contributors(args.contributors_path, commits)
|
|
||||||
if new_contributors:
|
|
||||||
if args.contributors:
|
|
||||||
write_file(args.contributors_path, '\n'.join(new_contributors) + '\n', mode='a')
|
|
||||||
logger.info(f'New contributors: {", ".join(new_contributors)}')
|
|
||||||
|
|
||||||
print(Changelog(commits.groups(), args.repo, args.collapsible))
|
|
||||||
|
|
|
@ -0,0 +1,26 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from devscripts.make_changelog import create_changelog, create_parser
|
||||||
|
from devscripts.utils import read_file, read_version, write_file
|
||||||
|
|
||||||
|
# Always run after devscripts/update-version.py, and run before `make doc|pypi-files|tar|all`
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = create_parser()
|
||||||
|
parser.description = 'Update an existing changelog file with an entry for a new release'
|
||||||
|
parser.add_argument(
|
||||||
|
'--changelog-path', type=Path, default=Path(__file__).parent.parent / 'Changelog.md',
|
||||||
|
help='path to the Changelog file')
|
||||||
|
args = parser.parse_args()
|
||||||
|
new_entry = create_changelog(args)
|
||||||
|
|
||||||
|
header, sep, changelog = read_file(args.changelog_path).partition('\n### ')
|
||||||
|
write_file(args.changelog_path, f'{header}{sep}{read_version()}\n{new_entry}\n{sep}{changelog}')
|
|
@ -0,0 +1,17 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Allow execution from anywhere
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
from bundle.pyinstaller import main
|
||||||
|
|
||||||
|
warnings.warn(DeprecationWarning('`pyinst.py` is deprecated and will be removed in a future version. '
|
||||||
|
'Use `bundle.pyinstaller` instead'))
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
|
@ -8,6 +8,7 @@ maintainers = [
|
||||||
{name = "pukkandan", email = "pukkandan.ytdlp@gmail.com"},
|
{name = "pukkandan", email = "pukkandan.ytdlp@gmail.com"},
|
||||||
{name = "Grub4K", email = "contact@grub4k.xyz"},
|
{name = "Grub4K", email = "contact@grub4k.xyz"},
|
||||||
{name = "bashonly", email = "bashonly@protonmail.com"},
|
{name = "bashonly", email = "bashonly@protonmail.com"},
|
||||||
|
{name = "coletdjnz", email = "coletdjnz@protonmail.com"},
|
||||||
]
|
]
|
||||||
description = "A youtube-dl fork with additional features and patches"
|
description = "A youtube-dl fork with additional features and patches"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
|
@ -51,6 +52,7 @@ dependencies = [
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.optional-dependencies]
|
[project.optional-dependencies]
|
||||||
|
default = []
|
||||||
secretstorage = [
|
secretstorage = [
|
||||||
"cffi",
|
"cffi",
|
||||||
"secretstorage",
|
"secretstorage",
|
||||||
|
@ -94,7 +96,6 @@ include = [
|
||||||
"/README.md", # included as readme
|
"/README.md", # included as readme
|
||||||
"/supportedsites.md",
|
"/supportedsites.md",
|
||||||
]
|
]
|
||||||
exclude = ["/yt_dlp/__pyinstaller"]
|
|
||||||
artifacts = [
|
artifacts = [
|
||||||
"/yt_dlp/extractor/lazy_extractors.py",
|
"/yt_dlp/extractor/lazy_extractors.py",
|
||||||
"/completions",
|
"/completions",
|
||||||
|
@ -105,7 +106,6 @@ artifacts = [
|
||||||
|
|
||||||
[tool.hatch.build.targets.wheel]
|
[tool.hatch.build.targets.wheel]
|
||||||
packages = ["yt_dlp"]
|
packages = ["yt_dlp"]
|
||||||
exclude = ["/yt_dlp/__pyinstaller"]
|
|
||||||
artifacts = ["/yt_dlp/extractor/lazy_extractors.py"]
|
artifacts = ["/yt_dlp/extractor/lazy_extractors.py"]
|
||||||
|
|
||||||
[tool.hatch.build.targets.wheel.shared-data]
|
[tool.hatch.build.targets.wheel.shared-data]
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Allow execution from anywhere
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
|
||||||
|
if sys.argv[1:2] == ['py2exe']:
|
||||||
|
warnings.warn(DeprecationWarning('`setup.py py2exe` is deprecated and will be removed in a future version. '
|
||||||
|
'Use `bundle.py2exe` instead'))
|
||||||
|
|
||||||
|
import bundle.py2exe
|
||||||
|
|
||||||
|
bundle.py2exe.main()
|
||||||
|
|
||||||
|
elif 'build_lazy_extractors' in sys.argv:
|
||||||
|
warnings.warn(DeprecationWarning('`setup.py build_lazy_extractors` is deprecated and will be removed in a future version. '
|
||||||
|
'Use `devscripts.make_lazy_extractors` instead'))
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
os.chdir(sys.path[0])
|
||||||
|
print('running build_lazy_extractors')
|
||||||
|
subprocess.run([sys.executable, 'devscripts/make_lazy_extractors.py'])
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
print(
|
||||||
|
'ERROR: Building by calling `setup.py` is deprecated. '
|
||||||
|
'Use a build frontend like `build` instead. ',
|
||||||
|
'Refer to https://build.pypa.io for more info', file=sys.stderr)
|
||||||
|
sys.exit(1)
|
|
@ -5,7 +5,7 @@
|
||||||
- **1tv**: Первый канал
|
- **1tv**: Первый канал
|
||||||
- **20min**
|
- **20min**
|
||||||
- **23video**
|
- **23video**
|
||||||
- **247sports**
|
- **247sports**: (**Currently broken**)
|
||||||
- **24tv.ua**
|
- **24tv.ua**
|
||||||
- **3qsdn**: 3Q SDN
|
- **3qsdn**: 3Q SDN
|
||||||
- **3sat**
|
- **3sat**
|
||||||
|
@ -17,6 +17,7 @@
|
||||||
- **91porn**
|
- **91porn**
|
||||||
- **9c9media**
|
- **9c9media**
|
||||||
- **9gag**: 9GAG
|
- **9gag**: 9GAG
|
||||||
|
- **9News**
|
||||||
- **9now.com.au**
|
- **9now.com.au**
|
||||||
- **abc.net.au**
|
- **abc.net.au**
|
||||||
- **abc.net.au:iview**
|
- **abc.net.au:iview**
|
||||||
|
@ -26,13 +27,14 @@
|
||||||
- **abcotvs**: ABC Owned Television Stations
|
- **abcotvs**: ABC Owned Television Stations
|
||||||
- **abcotvs:clips**
|
- **abcotvs:clips**
|
||||||
- **AbemaTV**: [*abematv*](## "netrc machine")
|
- **AbemaTV**: [*abematv*](## "netrc machine")
|
||||||
- **AbemaTVTitle**
|
- **AbemaTVTitle**: [*abematv*](## "netrc machine")
|
||||||
- **AcademicEarth:Course**
|
- **AcademicEarth:Course**
|
||||||
- **acast**
|
- **acast**
|
||||||
- **acast:channel**
|
- **acast:channel**
|
||||||
- **AcFunBangumi**
|
- **AcFunBangumi**
|
||||||
- **AcFunVideo**
|
- **AcFunVideo**
|
||||||
- **ADN**: [*animationdigitalnetwork*](## "netrc machine") Animation Digital Network
|
- **ADN**: [*animationdigitalnetwork*](## "netrc machine") Animation Digital Network
|
||||||
|
- **ADNSeason**: [*animationdigitalnetwork*](## "netrc machine") Animation Digital Network
|
||||||
- **AdobeConnect**
|
- **AdobeConnect**
|
||||||
- **adobetv**
|
- **adobetv**
|
||||||
- **adobetv:channel**
|
- **adobetv:channel**
|
||||||
|
@ -61,6 +63,7 @@
|
||||||
- **altcensored:channel**
|
- **altcensored:channel**
|
||||||
- **Alura**: [*alura*](## "netrc machine")
|
- **Alura**: [*alura*](## "netrc machine")
|
||||||
- **AluraCourse**: [*aluracourse*](## "netrc machine")
|
- **AluraCourse**: [*aluracourse*](## "netrc machine")
|
||||||
|
- **AmadeusTV**
|
||||||
- **Amara**
|
- **Amara**
|
||||||
- **AmazonMiniTV**
|
- **AmazonMiniTV**
|
||||||
- **amazonminitv:season**: Amazon MiniTV Season, "minitv:season:" prefix
|
- **amazonminitv:season**: Amazon MiniTV Season, "minitv:season:" prefix
|
||||||
|
@ -93,11 +96,15 @@
|
||||||
- **ARDMediathek**
|
- **ARDMediathek**
|
||||||
- **ARDMediathekCollection**
|
- **ARDMediathekCollection**
|
||||||
- **Arkena**
|
- **Arkena**
|
||||||
|
- **Art19**
|
||||||
|
- **Art19Show**
|
||||||
- **arte.sky.it**
|
- **arte.sky.it**
|
||||||
- **ArteTV**
|
- **ArteTV**
|
||||||
- **ArteTVCategory**
|
- **ArteTVCategory**
|
||||||
- **ArteTVEmbed**
|
- **ArteTVEmbed**
|
||||||
- **ArteTVPlaylist**
|
- **ArteTVPlaylist**
|
||||||
|
- **asobichannel**: ASOBI CHANNEL
|
||||||
|
- **asobichannel:tag**: ASOBI CHANNEL
|
||||||
- **AtresPlayer**: [*atresplayer*](## "netrc machine")
|
- **AtresPlayer**: [*atresplayer*](## "netrc machine")
|
||||||
- **AtScaleConfEvent**
|
- **AtScaleConfEvent**
|
||||||
- **ATVAt**
|
- **ATVAt**
|
||||||
|
@ -180,13 +187,14 @@
|
||||||
- **BitChute**
|
- **BitChute**
|
||||||
- **BitChuteChannel**
|
- **BitChuteChannel**
|
||||||
- **BlackboardCollaborate**
|
- **BlackboardCollaborate**
|
||||||
- **BleacherReport**
|
- **BleacherReport**: (**Currently broken**)
|
||||||
- **BleacherReportCMS**
|
- **BleacherReportCMS**: (**Currently broken**)
|
||||||
- **blerp**
|
- **blerp**
|
||||||
- **blogger.com**
|
- **blogger.com**
|
||||||
- **Bloomberg**
|
- **Bloomberg**
|
||||||
- **BokeCC**
|
- **BokeCC**
|
||||||
- **BongaCams**
|
- **BongaCams**
|
||||||
|
- **Boosty**
|
||||||
- **BostonGlobe**
|
- **BostonGlobe**
|
||||||
- **Box**
|
- **Box**
|
||||||
- **BoxCastVideo**
|
- **BoxCastVideo**
|
||||||
|
@ -231,8 +239,7 @@
|
||||||
- **cbc.ca**
|
- **cbc.ca**
|
||||||
- **cbc.ca:player**
|
- **cbc.ca:player**
|
||||||
- **cbc.ca:player:playlist**
|
- **cbc.ca:player:playlist**
|
||||||
- **CBS**
|
- **CBS**: (**Currently broken**)
|
||||||
- **CBSInteractive**
|
|
||||||
- **CBSLocal**
|
- **CBSLocal**
|
||||||
- **CBSLocalArticle**
|
- **CBSLocalArticle**
|
||||||
- **CBSLocalLive**
|
- **CBSLocalLive**
|
||||||
|
@ -240,8 +247,8 @@
|
||||||
- **cbsnews:embed**
|
- **cbsnews:embed**
|
||||||
- **cbsnews:live**: CBS News Livestream
|
- **cbsnews:live**: CBS News Livestream
|
||||||
- **cbsnews:livevideo**: CBS News Live Videos
|
- **cbsnews:livevideo**: CBS News Live Videos
|
||||||
- **cbssports**
|
- **cbssports**: (**Currently broken**)
|
||||||
- **cbssports:embed**
|
- **cbssports:embed**: (**Currently broken**)
|
||||||
- **CCMA**
|
- **CCMA**
|
||||||
- **CCTV**: 央视网
|
- **CCTV**: 央视网
|
||||||
- **CDA**: [*cdapl*](## "netrc machine")
|
- **CDA**: [*cdapl*](## "netrc machine")
|
||||||
|
@ -251,10 +258,10 @@
|
||||||
- **CharlieRose**
|
- **CharlieRose**
|
||||||
- **Chaturbate**
|
- **Chaturbate**
|
||||||
- **Chilloutzone**
|
- **Chilloutzone**
|
||||||
- **Chingari**
|
- **chzzk:live**
|
||||||
- **ChingariUser**
|
- **chzzk:video**
|
||||||
- **cielotv.it**
|
- **cielotv.it**
|
||||||
- **Cinemax**
|
- **Cinemax**: (**Currently broken**)
|
||||||
- **CinetecaMilano**
|
- **CinetecaMilano**
|
||||||
- **Cineverse**
|
- **Cineverse**
|
||||||
- **CineverseDetails**
|
- **CineverseDetails**
|
||||||
|
@ -263,16 +270,15 @@
|
||||||
- **ciscowebex**: Cisco Webex
|
- **ciscowebex**: Cisco Webex
|
||||||
- **CJSW**
|
- **CJSW**
|
||||||
- **Clipchamp**
|
- **Clipchamp**
|
||||||
- **cliphunter**
|
|
||||||
- **Clippit**
|
- **Clippit**
|
||||||
- **ClipRs**
|
- **ClipRs**: (**Currently broken**)
|
||||||
- **ClipYouEmbed**
|
- **ClipYouEmbed**
|
||||||
- **CloserToTruth**
|
- **CloserToTruth**: (**Currently broken**)
|
||||||
- **CloudflareStream**
|
- **CloudflareStream**
|
||||||
|
- **CloudyCDN**
|
||||||
- **Clubic**: (**Currently broken**)
|
- **Clubic**: (**Currently broken**)
|
||||||
- **Clyp**
|
- **Clyp**
|
||||||
- **cmt.com**: (**Currently broken**)
|
- **cmt.com**: (**Currently broken**)
|
||||||
- **CNBC**
|
|
||||||
- **CNBCVideo**
|
- **CNBCVideo**
|
||||||
- **CNN**
|
- **CNN**
|
||||||
- **CNNArticle**
|
- **CNNArticle**
|
||||||
|
@ -320,6 +326,7 @@
|
||||||
- **DailyMail**
|
- **DailyMail**
|
||||||
- **dailymotion**: [*dailymotion*](## "netrc machine")
|
- **dailymotion**: [*dailymotion*](## "netrc machine")
|
||||||
- **dailymotion:playlist**: [*dailymotion*](## "netrc machine")
|
- **dailymotion:playlist**: [*dailymotion*](## "netrc machine")
|
||||||
|
- **dailymotion:search**: [*dailymotion*](## "netrc machine")
|
||||||
- **dailymotion:user**: [*dailymotion*](## "netrc machine")
|
- **dailymotion:user**: [*dailymotion*](## "netrc machine")
|
||||||
- **DailyWire**
|
- **DailyWire**
|
||||||
- **DailyWirePodcast**
|
- **DailyWirePodcast**
|
||||||
|
@ -340,7 +347,6 @@
|
||||||
- **DeuxM**
|
- **DeuxM**
|
||||||
- **DeuxMNews**
|
- **DeuxMNews**
|
||||||
- **DHM**: Filmarchiv - Deutsches Historisches Museum (**Currently broken**)
|
- **DHM**: Filmarchiv - Deutsches Historisches Museum (**Currently broken**)
|
||||||
- **Digg**
|
|
||||||
- **DigitalConcertHall**: [*digitalconcerthall*](## "netrc machine") DigitalConcertHall extractor
|
- **DigitalConcertHall**: [*digitalconcerthall*](## "netrc machine") DigitalConcertHall extractor
|
||||||
- **DigitallySpeaking**
|
- **DigitallySpeaking**
|
||||||
- **Digiteka**
|
- **Digiteka**
|
||||||
|
@ -373,14 +379,14 @@
|
||||||
- **drtv:live**
|
- **drtv:live**
|
||||||
- **drtv:season**
|
- **drtv:season**
|
||||||
- **drtv:series**
|
- **drtv:series**
|
||||||
- **DTube**
|
- **DTube**: (**Currently broken**)
|
||||||
- **duboku**: www.duboku.io
|
- **duboku**: www.duboku.io
|
||||||
- **duboku:list**: www.duboku.io entire series
|
- **duboku:list**: www.duboku.io entire series
|
||||||
- **Dumpert**
|
- **Dumpert**
|
||||||
- **Duoplay**
|
- **Duoplay**
|
||||||
- **dvtv**: http://video.aktualne.cz/
|
- **dvtv**: http://video.aktualne.cz/
|
||||||
- **dw**
|
- **dw**: (**Currently broken**)
|
||||||
- **dw:article**
|
- **dw:article**: (**Currently broken**)
|
||||||
- **EaglePlatform**
|
- **EaglePlatform**
|
||||||
- **EbaumsWorld**
|
- **EbaumsWorld**
|
||||||
- **Ebay**
|
- **Ebay**
|
||||||
|
@ -391,6 +397,7 @@
|
||||||
- **EinsUndEinsTVRecordings**: [*1und1tv*](## "netrc machine")
|
- **EinsUndEinsTVRecordings**: [*1und1tv*](## "netrc machine")
|
||||||
- **Einthusan**
|
- **Einthusan**
|
||||||
- **eitb.tv**
|
- **eitb.tv**
|
||||||
|
- **ElementorEmbed**
|
||||||
- **Elonet**
|
- **Elonet**
|
||||||
- **ElPais**: El País
|
- **ElPais**: El País
|
||||||
- **ElTreceTV**: El Trece TV (Argentina)
|
- **ElTreceTV**: El Trece TV (Argentina)
|
||||||
|
@ -405,6 +412,7 @@
|
||||||
- **Erocast**
|
- **Erocast**
|
||||||
- **EroProfile**: [*eroprofile*](## "netrc machine")
|
- **EroProfile**: [*eroprofile*](## "netrc machine")
|
||||||
- **EroProfile:album**
|
- **EroProfile:album**
|
||||||
|
- **ERRJupiter**
|
||||||
- **ertflix**: ERTFLIX videos
|
- **ertflix**: ERTFLIX videos
|
||||||
- **ertflix:codename**: ERTFLIX videos by codename
|
- **ertflix:codename**: ERTFLIX videos by codename
|
||||||
- **ertwebtv:embed**: ert.gr webtv embedded videos
|
- **ertwebtv:embed**: ert.gr webtv embedded videos
|
||||||
|
@ -412,7 +420,7 @@
|
||||||
- **ESPNArticle**
|
- **ESPNArticle**
|
||||||
- **ESPNCricInfo**
|
- **ESPNCricInfo**
|
||||||
- **EttuTv**
|
- **EttuTv**
|
||||||
- **Europa**
|
- **Europa**: (**Currently broken**)
|
||||||
- **EuroParlWebstream**
|
- **EuroParlWebstream**
|
||||||
- **EuropeanTour**
|
- **EuropeanTour**
|
||||||
- **Eurosport**
|
- **Eurosport**
|
||||||
|
@ -423,22 +431,23 @@
|
||||||
- **Expressen**
|
- **Expressen**
|
||||||
- **EyedoTV**
|
- **EyedoTV**
|
||||||
- **facebook**: [*facebook*](## "netrc machine")
|
- **facebook**: [*facebook*](## "netrc machine")
|
||||||
|
- **facebook:ads**
|
||||||
- **facebook:reel**
|
- **facebook:reel**
|
||||||
- **FacebookPluginsVideo**
|
- **FacebookPluginsVideo**
|
||||||
- **fancode:live**: [*fancode*](## "netrc machine")
|
- **fancode:live**: [*fancode*](## "netrc machine") (**Currently broken**)
|
||||||
- **fancode:vod**: [*fancode*](## "netrc machine")
|
- **fancode:vod**: [*fancode*](## "netrc machine") (**Currently broken**)
|
||||||
- **faz.net**
|
- **faz.net**
|
||||||
- **fc2**: [*fc2*](## "netrc machine")
|
- **fc2**: [*fc2*](## "netrc machine")
|
||||||
- **fc2:embed**
|
- **fc2:embed**
|
||||||
- **fc2:live**
|
- **fc2:live**
|
||||||
- **Fczenit**
|
- **Fczenit**
|
||||||
- **Fifa**
|
- **Fifa**
|
||||||
- **Filmmodu**
|
|
||||||
- **filmon**
|
- **filmon**
|
||||||
- **filmon:channel**
|
- **filmon:channel**
|
||||||
- **Filmweb**
|
- **Filmweb**
|
||||||
- **FiveThirtyEight**
|
- **FiveThirtyEight**
|
||||||
- **FiveTV**
|
- **FiveTV**
|
||||||
|
- **FlexTV**
|
||||||
- **Flickr**
|
- **Flickr**
|
||||||
- **Floatplane**
|
- **Floatplane**
|
||||||
- **FloatplaneChannel**
|
- **FloatplaneChannel**
|
||||||
|
@ -477,7 +486,6 @@
|
||||||
- **Gab**
|
- **Gab**
|
||||||
- **GabTV**
|
- **GabTV**
|
||||||
- **Gaia**: [*gaia*](## "netrc machine")
|
- **Gaia**: [*gaia*](## "netrc machine")
|
||||||
- **GameInformer**
|
|
||||||
- **GameJolt**
|
- **GameJolt**
|
||||||
- **GameJoltCommunity**
|
- **GameJoltCommunity**
|
||||||
- **GameJoltGame**
|
- **GameJoltGame**
|
||||||
|
@ -487,18 +495,19 @@
|
||||||
- **GameSpot**
|
- **GameSpot**
|
||||||
- **GameStar**
|
- **GameStar**
|
||||||
- **Gaskrank**
|
- **Gaskrank**
|
||||||
- **Gazeta**
|
- **Gazeta**: (**Currently broken**)
|
||||||
- **GDCVault**: [*gdcvault*](## "netrc machine")
|
- **GDCVault**: [*gdcvault*](## "netrc machine") (**Currently broken**)
|
||||||
- **GediDigital**
|
- **GediDigital**
|
||||||
- **gem.cbc.ca**: [*cbcgem*](## "netrc machine")
|
- **gem.cbc.ca**: [*cbcgem*](## "netrc machine")
|
||||||
- **gem.cbc.ca:live**
|
- **gem.cbc.ca:live**
|
||||||
- **gem.cbc.ca:playlist**
|
- **gem.cbc.ca:playlist**
|
||||||
- **Genius**
|
- **Genius**
|
||||||
- **GeniusLyrics**
|
- **GeniusLyrics**
|
||||||
|
- **GetCourseRu**: [*getcourseru*](## "netrc machine")
|
||||||
|
- **GetCourseRuPlayer**
|
||||||
- **Gettr**
|
- **Gettr**
|
||||||
- **GettrStreaming**
|
- **GettrStreaming**
|
||||||
- **GiantBomb**
|
- **GiantBomb**
|
||||||
- **Giga**
|
|
||||||
- **GlattvisionTV**: [*glattvisiontv*](## "netrc machine")
|
- **GlattvisionTV**: [*glattvisiontv*](## "netrc machine")
|
||||||
- **GlattvisionTVLive**: [*glattvisiontv*](## "netrc machine")
|
- **GlattvisionTVLive**: [*glattvisiontv*](## "netrc machine")
|
||||||
- **GlattvisionTVRecordings**: [*glattvisiontv*](## "netrc machine")
|
- **GlattvisionTVRecordings**: [*glattvisiontv*](## "netrc machine")
|
||||||
|
@ -516,7 +525,7 @@
|
||||||
- **GMANetworkVideo**
|
- **GMANetworkVideo**
|
||||||
- **Go**
|
- **Go**
|
||||||
- **GoDiscovery**
|
- **GoDiscovery**
|
||||||
- **GodTube**
|
- **GodTube**: (**Currently broken**)
|
||||||
- **Gofile**
|
- **Gofile**
|
||||||
- **Golem**
|
- **Golem**
|
||||||
- **goodgame:stream**
|
- **goodgame:stream**
|
||||||
|
@ -551,7 +560,7 @@
|
||||||
- **HollywoodReporter**
|
- **HollywoodReporter**
|
||||||
- **HollywoodReporterPlaylist**
|
- **HollywoodReporterPlaylist**
|
||||||
- **Holodex**
|
- **Holodex**
|
||||||
- **HotNewHipHop**
|
- **HotNewHipHop**: (**Currently broken**)
|
||||||
- **hotstar**
|
- **hotstar**
|
||||||
- **hotstar:playlist**
|
- **hotstar:playlist**
|
||||||
- **hotstar:season**
|
- **hotstar:season**
|
||||||
|
@ -579,6 +588,7 @@
|
||||||
- **IGNVideo**
|
- **IGNVideo**
|
||||||
- **iheartradio**
|
- **iheartradio**
|
||||||
- **iheartradio:podcast**
|
- **iheartradio:podcast**
|
||||||
|
- **IlPost**
|
||||||
- **Iltalehti**
|
- **Iltalehti**
|
||||||
- **imdb**: Internet Movie Database trailers
|
- **imdb**: Internet Movie Database trailers
|
||||||
- **imdb:list**: Internet Movie Database lists
|
- **imdb:list**: Internet Movie Database lists
|
||||||
|
@ -592,7 +602,7 @@
|
||||||
- **Instagram**: [*instagram*](## "netrc machine")
|
- **Instagram**: [*instagram*](## "netrc machine")
|
||||||
- **instagram:story**: [*instagram*](## "netrc machine")
|
- **instagram:story**: [*instagram*](## "netrc machine")
|
||||||
- **instagram:tag**: [*instagram*](## "netrc machine") Instagram hashtag search URLs
|
- **instagram:tag**: [*instagram*](## "netrc machine") Instagram hashtag search URLs
|
||||||
- **instagram:user**: [*instagram*](## "netrc machine") Instagram user profile
|
- **instagram:user**: [*instagram*](## "netrc machine") Instagram user profile (**Currently broken**)
|
||||||
- **InstagramIOS**: IOS instagram:// URL
|
- **InstagramIOS**: IOS instagram:// URL
|
||||||
- **Internazionale**
|
- **Internazionale**
|
||||||
- **InternetVideoArchive**
|
- **InternetVideoArchive**
|
||||||
|
@ -622,7 +632,7 @@
|
||||||
- **JablePlaylist**
|
- **JablePlaylist**
|
||||||
- **Jamendo**
|
- **Jamendo**
|
||||||
- **JamendoAlbum**
|
- **JamendoAlbum**
|
||||||
- **JeuxVideo**
|
- **JeuxVideo**: (**Currently broken**)
|
||||||
- **JioSaavnAlbum**
|
- **JioSaavnAlbum**
|
||||||
- **JioSaavnSong**
|
- **JioSaavnSong**
|
||||||
- **Joj**
|
- **Joj**
|
||||||
|
@ -634,12 +644,10 @@
|
||||||
- **JWPlatform**
|
- **JWPlatform**
|
||||||
- **Kakao**
|
- **Kakao**
|
||||||
- **Kaltura**
|
- **Kaltura**
|
||||||
- **Kanal2**
|
- **KankaNews**: (**Currently broken**)
|
||||||
- **KankaNews**
|
|
||||||
- **Karaoketv**
|
- **Karaoketv**
|
||||||
- **KarriereVideos**
|
- **Katsomo**: (**Currently broken**)
|
||||||
- **Katsomo**
|
- **KelbyOne**: (**Currently broken**)
|
||||||
- **KelbyOne**
|
|
||||||
- **Ketnet**
|
- **Ketnet**
|
||||||
- **khanacademy**
|
- **khanacademy**
|
||||||
- **khanacademy:unit**
|
- **khanacademy:unit**
|
||||||
|
@ -651,18 +659,17 @@
|
||||||
- **KinoPoisk**
|
- **KinoPoisk**
|
||||||
- **Kommunetv**
|
- **Kommunetv**
|
||||||
- **KompasVideo**
|
- **KompasVideo**
|
||||||
- **KonserthusetPlay**
|
- **Koo**: (**Currently broken**)
|
||||||
- **Koo**
|
- **KrasView**: Красвью (**Currently broken**)
|
||||||
- **KrasView**: Красвью
|
|
||||||
- **KTH**
|
- **KTH**
|
||||||
- **Ku6**
|
- **Ku6**
|
||||||
- **KUSI**
|
- **KukuluLive**
|
||||||
- **kuwo:album**: 酷我音乐 - 专辑
|
- **kuwo:album**: 酷我音乐 - 专辑 (**Currently broken**)
|
||||||
- **kuwo:category**: 酷我音乐 - 分类
|
- **kuwo:category**: 酷我音乐 - 分类 (**Currently broken**)
|
||||||
- **kuwo:chart**: 酷我音乐 - 排行榜
|
- **kuwo:chart**: 酷我音乐 - 排行榜 (**Currently broken**)
|
||||||
- **kuwo:mv**: 酷我音乐 - MV
|
- **kuwo:mv**: 酷我音乐 - MV (**Currently broken**)
|
||||||
- **kuwo:singer**: 酷我音乐 - 歌手
|
- **kuwo:singer**: 酷我音乐 - 歌手 (**Currently broken**)
|
||||||
- **kuwo:song**: 酷我音乐
|
- **kuwo:song**: 酷我音乐 (**Currently broken**)
|
||||||
- **la7.it**
|
- **la7.it**
|
||||||
- **la7.it:pod:episode**
|
- **la7.it:pod:episode**
|
||||||
- **la7.it:podcast**
|
- **la7.it:podcast**
|
||||||
|
@ -677,7 +684,7 @@
|
||||||
- **Lcp**
|
- **Lcp**
|
||||||
- **LcpPlay**
|
- **LcpPlay**
|
||||||
- **Le**: 乐视网
|
- **Le**: 乐视网
|
||||||
- **Lecture2Go**
|
- **Lecture2Go**: (**Currently broken**)
|
||||||
- **Lecturio**: [*lecturio*](## "netrc machine")
|
- **Lecturio**: [*lecturio*](## "netrc machine")
|
||||||
- **LecturioCourse**: [*lecturio*](## "netrc machine")
|
- **LecturioCourse**: [*lecturio*](## "netrc machine")
|
||||||
- **LecturioDeCourse**: [*lecturio*](## "netrc machine")
|
- **LecturioDeCourse**: [*lecturio*](## "netrc machine")
|
||||||
|
@ -685,7 +692,7 @@
|
||||||
- **LeFigaroVideoSection**
|
- **LeFigaroVideoSection**
|
||||||
- **LEGO**
|
- **LEGO**
|
||||||
- **Lemonde**
|
- **Lemonde**
|
||||||
- **Lenta**
|
- **Lenta**: (**Currently broken**)
|
||||||
- **LePlaylist**
|
- **LePlaylist**
|
||||||
- **LetvCloud**: 乐视云
|
- **LetvCloud**: 乐视云
|
||||||
- **Libsyn**
|
- **Libsyn**
|
||||||
|
@ -709,31 +716,32 @@
|
||||||
- **Lnk**
|
- **Lnk**
|
||||||
- **LnkGo**
|
- **LnkGo**
|
||||||
- **loc**: Library of Congress
|
- **loc**: Library of Congress
|
||||||
- **LocalNews8**
|
|
||||||
- **LoveHomePorn**
|
- **LoveHomePorn**
|
||||||
- **LRTStream**
|
- **LRTStream**
|
||||||
- **LRTVOD**
|
- **LRTVOD**
|
||||||
|
- **LSMLREmbed**
|
||||||
|
- **LSMLTVEmbed**
|
||||||
|
- **LSMReplay**
|
||||||
- **Lumni**
|
- **Lumni**
|
||||||
- **lynda**: [*lynda*](## "netrc machine") lynda.com videos
|
- **lynda**: [*lynda*](## "netrc machine") lynda.com videos
|
||||||
- **lynda:course**: [*lynda*](## "netrc machine") lynda.com online courses
|
- **lynda:course**: [*lynda*](## "netrc machine") lynda.com online courses
|
||||||
- **maariv.co.il**
|
- **maariv.co.il**
|
||||||
- **MagellanTV**
|
- **MagellanTV**
|
||||||
- **MagentaMusik360**
|
- **MagentaMusik**
|
||||||
- **mailru**: Видео@Mail.Ru
|
- **mailru**: Видео@Mail.Ru
|
||||||
- **mailru:music**: Музыка@Mail.Ru
|
- **mailru:music**: Музыка@Mail.Ru
|
||||||
- **mailru:music:search**: Музыка@Mail.Ru
|
- **mailru:music:search**: Музыка@Mail.Ru
|
||||||
- **MainStreaming**: MainStreaming Player
|
- **MainStreaming**: MainStreaming Player
|
||||||
- **MallTV**
|
|
||||||
- **mangomolo:live**
|
- **mangomolo:live**
|
||||||
- **mangomolo:video**
|
- **mangomolo:video**
|
||||||
- **MangoTV**: 芒果TV
|
- **MangoTV**: 芒果TV
|
||||||
- **ManotoTV**: Manoto TV (Episode)
|
- **ManotoTV**: Manoto TV (Episode)
|
||||||
- **ManotoTVLive**: Manoto TV (Live)
|
- **ManotoTVLive**: Manoto TV (Live)
|
||||||
- **ManotoTVShow**: Manoto TV (Show)
|
- **ManotoTVShow**: Manoto TV (Show)
|
||||||
- **ManyVids**
|
- **ManyVids**: (**Currently broken**)
|
||||||
- **MaoriTV**
|
- **MaoriTV**
|
||||||
- **Markiza**
|
- **Markiza**: (**Currently broken**)
|
||||||
- **MarkizaPage**
|
- **MarkizaPage**: (**Currently broken**)
|
||||||
- **massengeschmack.tv**
|
- **massengeschmack.tv**
|
||||||
- **Masters**
|
- **Masters**
|
||||||
- **MatchTV**
|
- **MatchTV**
|
||||||
|
@ -760,7 +768,6 @@
|
||||||
- **MelonVOD**
|
- **MelonVOD**
|
||||||
- **Metacritic**
|
- **Metacritic**
|
||||||
- **mewatch**
|
- **mewatch**
|
||||||
- **MiaoPai**
|
|
||||||
- **MicrosoftEmbed**
|
- **MicrosoftEmbed**
|
||||||
- **microsoftstream**: Microsoft Stream
|
- **microsoftstream**: Microsoft Stream
|
||||||
- **mildom**: Record ongoing live by specific user in Mildom
|
- **mildom**: Record ongoing live by specific user in Mildom
|
||||||
|
@ -770,7 +777,6 @@
|
||||||
- **minds**
|
- **minds**
|
||||||
- **minds:channel**
|
- **minds:channel**
|
||||||
- **minds:group**
|
- **minds:group**
|
||||||
- **MinistryGrid**
|
|
||||||
- **Minoto**
|
- **Minoto**
|
||||||
- **mirrativ**
|
- **mirrativ**
|
||||||
- **mirrativ:user**
|
- **mirrativ:user**
|
||||||
|
@ -793,11 +799,11 @@
|
||||||
- **Mojvideo**
|
- **Mojvideo**
|
||||||
- **Monstercat**
|
- **Monstercat**
|
||||||
- **MonsterSirenHypergryphMusic**
|
- **MonsterSirenHypergryphMusic**
|
||||||
- **Morningstar**: morningstar.com
|
|
||||||
- **Motherless**
|
- **Motherless**
|
||||||
- **MotherlessGallery**
|
- **MotherlessGallery**
|
||||||
- **MotherlessGroup**
|
- **MotherlessGroup**
|
||||||
- **Motorsport**: motorsport.com
|
- **MotherlessUploader**
|
||||||
|
- **Motorsport**: motorsport.com (**Currently broken**)
|
||||||
- **MotorTrend**
|
- **MotorTrend**
|
||||||
- **MotorTrendOnDemand**
|
- **MotorTrendOnDemand**
|
||||||
- **MovieFap**
|
- **MovieFap**
|
||||||
|
@ -808,17 +814,17 @@
|
||||||
- **MSN**: (**Currently broken**)
|
- **MSN**: (**Currently broken**)
|
||||||
- **mtg**: MTG services
|
- **mtg**: MTG services
|
||||||
- **mtv**
|
- **mtv**
|
||||||
- **mtv.de**
|
- **mtv.de**: (**Currently broken**)
|
||||||
- **mtv.it**
|
- **mtv.it**
|
||||||
- **mtv.it:programma**
|
- **mtv.it:programma**
|
||||||
- **mtv:video**
|
- **mtv:video**
|
||||||
- **mtvjapan**
|
- **mtvjapan**
|
||||||
- **mtvservices:embedded**
|
- **mtvservices:embedded**
|
||||||
- **MTVUutisetArticle**
|
- **MTVUutisetArticle**: (**Currently broken**)
|
||||||
- **MuenchenTV**: münchen.tv
|
- **MuenchenTV**: münchen.tv (**Currently broken**)
|
||||||
- **MujRozhlas**
|
- **MujRozhlas**
|
||||||
- **Murrtube**
|
- **Murrtube**: (**Currently broken**)
|
||||||
- **MurrtubeUser**: Murrtube user profile
|
- **MurrtubeUser**: Murrtube user profile (**Currently broken**)
|
||||||
- **MuseAI**
|
- **MuseAI**
|
||||||
- **MuseScore**
|
- **MuseScore**
|
||||||
- **MusicdexAlbum**
|
- **MusicdexAlbum**
|
||||||
|
@ -827,6 +833,9 @@
|
||||||
- **MusicdexSong**
|
- **MusicdexSong**
|
||||||
- **mva**: Microsoft Virtual Academy videos
|
- **mva**: Microsoft Virtual Academy videos
|
||||||
- **mva:course**: Microsoft Virtual Academy courses
|
- **mva:course**: Microsoft Virtual Academy courses
|
||||||
|
- **Mx3**
|
||||||
|
- **Mx3Neo**
|
||||||
|
- **Mx3Volksmusik**
|
||||||
- **Mxplayer**
|
- **Mxplayer**
|
||||||
- **MxplayerShow**
|
- **MxplayerShow**
|
||||||
- **MySpace**
|
- **MySpace**
|
||||||
|
@ -862,11 +871,11 @@
|
||||||
- **ndr**: NDR.de - Norddeutscher Rundfunk
|
- **ndr**: NDR.de - Norddeutscher Rundfunk
|
||||||
- **ndr:embed**
|
- **ndr:embed**
|
||||||
- **ndr:embed:base**
|
- **ndr:embed:base**
|
||||||
- **NDTV**
|
- **NDTV**: (**Currently broken**)
|
||||||
- **Nebula**: [*watchnebula*](## "netrc machine")
|
|
||||||
- **nebula:channel**: [*watchnebula*](## "netrc machine")
|
- **nebula:channel**: [*watchnebula*](## "netrc machine")
|
||||||
- **nebula:class**: [*watchnebula*](## "netrc machine")
|
- **nebula:media**: [*watchnebula*](## "netrc machine")
|
||||||
- **nebula:subscriptions**: [*watchnebula*](## "netrc machine")
|
- **nebula:subscriptions**: [*watchnebula*](## "netrc machine")
|
||||||
|
- **nebula:video**: [*watchnebula*](## "netrc machine")
|
||||||
- **NekoHacker**
|
- **NekoHacker**
|
||||||
- **NerdCubedFeed**
|
- **NerdCubedFeed**
|
||||||
- **netease:album**: 网易云音乐 - 专辑
|
- **netease:album**: 网易云音乐 - 专辑
|
||||||
|
@ -882,18 +891,19 @@
|
||||||
- **Netverse**
|
- **Netverse**
|
||||||
- **NetversePlaylist**
|
- **NetversePlaylist**
|
||||||
- **NetverseSearch**: "netsearch:" prefix
|
- **NetverseSearch**: "netsearch:" prefix
|
||||||
- **Netzkino**
|
- **Netzkino**: (**Currently broken**)
|
||||||
- **Newgrounds**
|
- **Newgrounds**: [*newgrounds*](## "netrc machine")
|
||||||
- **Newgrounds:playlist**
|
- **Newgrounds:playlist**
|
||||||
- **Newgrounds:user**
|
- **Newgrounds:user**
|
||||||
- **NewsPicks**
|
- **NewsPicks**
|
||||||
- **Newsy**
|
- **Newsy**
|
||||||
- **NextMedia**: 蘋果日報
|
- **NextMedia**: 蘋果日報
|
||||||
- **NextMediaActionNews**: 蘋果日報 - 動新聞
|
- **NextMediaActionNews**: 蘋果日報 - 動新聞
|
||||||
- **NextTV**: 壹電視
|
- **NextTV**: 壹電視 (**Currently broken**)
|
||||||
- **Nexx**
|
- **Nexx**
|
||||||
- **NexxEmbed**
|
- **NexxEmbed**
|
||||||
- **NFB**
|
- **nfb**: nfb.ca and onf.ca films and episodes
|
||||||
|
- **nfb:series**: nfb.ca and onf.ca series
|
||||||
- **NFHSNetwork**
|
- **NFHSNetwork**
|
||||||
- **nfl.com**
|
- **nfl.com**
|
||||||
- **nfl.com:article**
|
- **nfl.com:article**
|
||||||
|
@ -925,11 +935,12 @@
|
||||||
- **nicovideo:search**: Nico video search; "nicosearch:" prefix
|
- **nicovideo:search**: Nico video search; "nicosearch:" prefix
|
||||||
- **nicovideo:search:date**: Nico video search, newest first; "nicosearchdate:" prefix
|
- **nicovideo:search:date**: Nico video search, newest first; "nicosearchdate:" prefix
|
||||||
- **nicovideo:search_url**: Nico video search URLs
|
- **nicovideo:search_url**: Nico video search URLs
|
||||||
|
- **NinaProtocol**
|
||||||
- **Nintendo**
|
- **Nintendo**
|
||||||
- **Nitter**
|
- **Nitter**
|
||||||
- **njoy**: N-JOY
|
- **njoy**: N-JOY
|
||||||
- **njoy:embed**
|
- **njoy:embed**
|
||||||
- **NobelPrize**
|
- **NobelPrize**: (**Currently broken**)
|
||||||
- **NoicePodcast**
|
- **NoicePodcast**
|
||||||
- **NonkTube**
|
- **NonkTube**
|
||||||
- **NoodleMagazine**
|
- **NoodleMagazine**
|
||||||
|
@ -941,7 +952,7 @@
|
||||||
- **nowness**
|
- **nowness**
|
||||||
- **nowness:playlist**
|
- **nowness:playlist**
|
||||||
- **nowness:series**
|
- **nowness:series**
|
||||||
- **Noz**
|
- **Noz**: (**Currently broken**)
|
||||||
- **npo**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **npo**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
- **npo.nl:live**
|
- **npo.nl:live**
|
||||||
- **npo.nl:radio**
|
- **npo.nl:radio**
|
||||||
|
@ -960,15 +971,18 @@
|
||||||
- **NRLTV**: (**Currently broken**)
|
- **NRLTV**: (**Currently broken**)
|
||||||
- **ntv.ru**
|
- **ntv.ru**
|
||||||
- **NubilesPorn**: [*nubiles-porn*](## "netrc machine")
|
- **NubilesPorn**: [*nubiles-porn*](## "netrc machine")
|
||||||
|
- **nuum:live**
|
||||||
|
- **nuum:media**
|
||||||
|
- **nuum:tab**
|
||||||
- **Nuvid**
|
- **Nuvid**
|
||||||
- **NYTimes**
|
- **NYTimes**
|
||||||
- **NYTimesArticle**
|
- **NYTimesArticle**
|
||||||
- **NYTimesCooking**
|
- **NYTimesCookingGuide**
|
||||||
|
- **NYTimesCookingRecipe**
|
||||||
- **nzherald**
|
- **nzherald**
|
||||||
- **NZOnScreen**
|
- **NZOnScreen**
|
||||||
- **NZZ**
|
- **NZZ**
|
||||||
- **ocw.mit.edu**
|
- **ocw.mit.edu**
|
||||||
- **OdaTV**
|
|
||||||
- **Odnoklassniki**
|
- **Odnoklassniki**
|
||||||
- **OfTV**
|
- **OfTV**
|
||||||
- **OfTVPlaylist**
|
- **OfTVPlaylist**
|
||||||
|
@ -993,6 +1007,7 @@
|
||||||
- **OraTV**
|
- **OraTV**
|
||||||
- **orf:fm4:story**: fm4.orf.at stories
|
- **orf:fm4:story**: fm4.orf.at stories
|
||||||
- **orf:iptv**: iptv.ORF.at
|
- **orf:iptv**: iptv.ORF.at
|
||||||
|
- **orf:on**
|
||||||
- **orf:podcast**
|
- **orf:podcast**
|
||||||
- **orf:radio**
|
- **orf:radio**
|
||||||
- **orf:tvthek**: ORF TVthek
|
- **orf:tvthek**: ORF TVthek
|
||||||
|
@ -1015,7 +1030,7 @@
|
||||||
- **ParamountPressExpress**
|
- **ParamountPressExpress**
|
||||||
- **Parler**: Posts on parler.com
|
- **Parler**: Posts on parler.com
|
||||||
- **parliamentlive.tv**: UK parliament videos
|
- **parliamentlive.tv**: UK parliament videos
|
||||||
- **Parlview**
|
- **Parlview**: (**Currently broken**)
|
||||||
- **Patreon**
|
- **Patreon**
|
||||||
- **PatreonCampaign**
|
- **PatreonCampaign**
|
||||||
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
||||||
|
@ -1049,19 +1064,19 @@
|
||||||
- **Platzi**: [*platzi*](## "netrc machine")
|
- **Platzi**: [*platzi*](## "netrc machine")
|
||||||
- **PlatziCourse**: [*platzi*](## "netrc machine")
|
- **PlatziCourse**: [*platzi*](## "netrc machine")
|
||||||
- **player.sky.it**
|
- **player.sky.it**
|
||||||
|
- **playeur**
|
||||||
- **PlayPlusTV**: [*playplustv*](## "netrc machine")
|
- **PlayPlusTV**: [*playplustv*](## "netrc machine")
|
||||||
- **PlayStuff**
|
- **PlaySuisse**: [*playsuisse*](## "netrc machine")
|
||||||
- **PlaySuisse**
|
|
||||||
- **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz
|
- **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz
|
||||||
- **PlayVids**
|
- **PlayVids**
|
||||||
- **Playwire**
|
- **Playwire**
|
||||||
- **pluralsight**: [*pluralsight*](## "netrc machine")
|
- **pluralsight**: [*pluralsight*](## "netrc machine")
|
||||||
- **pluralsight:course**
|
- **pluralsight:course**
|
||||||
- **PlutoTV**
|
- **PlutoTV**: (**Currently broken**)
|
||||||
- **PodbayFM**
|
- **PodbayFM**
|
||||||
- **PodbayFMChannel**
|
- **PodbayFMChannel**
|
||||||
- **Podchaser**
|
- **Podchaser**
|
||||||
- **podomatic**
|
- **podomatic**: (**Currently broken**)
|
||||||
- **Pokemon**
|
- **Pokemon**
|
||||||
- **PokemonWatch**
|
- **PokemonWatch**
|
||||||
- **PokerGo**: [*pokergo*](## "netrc machine")
|
- **PokerGo**: [*pokergo*](## "netrc machine")
|
||||||
|
@ -1085,15 +1100,16 @@
|
||||||
- **PornHubUser**: [*pornhub*](## "netrc machine")
|
- **PornHubUser**: [*pornhub*](## "netrc machine")
|
||||||
- **PornHubUserVideosUpload**: [*pornhub*](## "netrc machine")
|
- **PornHubUserVideosUpload**: [*pornhub*](## "netrc machine")
|
||||||
- **Pornotube**
|
- **Pornotube**
|
||||||
- **PornoVoisines**
|
- **PornoVoisines**: (**Currently broken**)
|
||||||
- **PornoXO**
|
- **PornoXO**: (**Currently broken**)
|
||||||
- **PornTop**
|
- **PornTop**
|
||||||
- **PornTube**
|
- **PornTube**
|
||||||
- **Pr0gramm**
|
- **Pr0gramm**
|
||||||
- **PrankCast**
|
- **PrankCast**
|
||||||
|
- **PrankCastPost**
|
||||||
- **PremiershipRugby**
|
- **PremiershipRugby**
|
||||||
- **PressTV**
|
- **PressTV**
|
||||||
- **ProjectVeritas**
|
- **ProjectVeritas**: (**Currently broken**)
|
||||||
- **prosiebensat1**: ProSiebenSat.1 Digital
|
- **prosiebensat1**: ProSiebenSat.1 Digital
|
||||||
- **PRXAccount**
|
- **PRXAccount**
|
||||||
- **PRXSeries**
|
- **PRXSeries**
|
||||||
|
@ -1115,11 +1131,11 @@
|
||||||
- **QuantumTVLive**: [*quantumtv*](## "netrc machine")
|
- **QuantumTVLive**: [*quantumtv*](## "netrc machine")
|
||||||
- **QuantumTVRecordings**: [*quantumtv*](## "netrc machine")
|
- **QuantumTVRecordings**: [*quantumtv*](## "netrc machine")
|
||||||
- **Qub**
|
- **Qub**
|
||||||
- **R7**
|
- **R7**: (**Currently broken**)
|
||||||
- **R7Article**
|
- **R7Article**: (**Currently broken**)
|
||||||
- **Radiko**
|
- **Radiko**
|
||||||
- **RadikoRadio**
|
- **RadikoRadio**
|
||||||
- **radio.de**
|
- **radio.de**: (**Currently broken**)
|
||||||
- **radiocanada**
|
- **radiocanada**
|
||||||
- **radiocanada:audiovideo**
|
- **radiocanada:audiovideo**
|
||||||
- **RadioComercial**
|
- **RadioComercial**
|
||||||
|
@ -1129,7 +1145,7 @@
|
||||||
- **RadioFrancePodcast**
|
- **RadioFrancePodcast**
|
||||||
- **RadioFranceProfile**
|
- **RadioFranceProfile**
|
||||||
- **RadioFranceProgramSchedule**
|
- **RadioFranceProgramSchedule**
|
||||||
- **RadioJavan**
|
- **RadioJavan**: (**Currently broken**)
|
||||||
- **radiokapital**
|
- **radiokapital**
|
||||||
- **radiokapital:show**
|
- **radiokapital:show**
|
||||||
- **RadioZetPodcast**
|
- **RadioZetPodcast**
|
||||||
|
@ -1151,33 +1167,34 @@
|
||||||
- **RbgTum**
|
- **RbgTum**
|
||||||
- **RbgTumCourse**
|
- **RbgTumCourse**
|
||||||
- **RbgTumNewCourse**
|
- **RbgTumNewCourse**
|
||||||
- **RBMARadio**
|
|
||||||
- **RCS**
|
- **RCS**
|
||||||
- **RCSEmbeds**
|
- **RCSEmbeds**
|
||||||
- **RCSVarious**
|
- **RCSVarious**
|
||||||
- **RCTIPlus**
|
- **RCTIPlus**
|
||||||
- **RCTIPlusSeries**
|
- **RCTIPlusSeries**
|
||||||
- **RCTIPlusTV**
|
- **RCTIPlusTV**
|
||||||
- **RDS**: RDS.ca
|
- **RDS**: RDS.ca (**Currently broken**)
|
||||||
- **RedBull**
|
- **RedBull**
|
||||||
- **RedBullEmbed**
|
- **RedBullEmbed**
|
||||||
- **RedBullTV**
|
- **RedBullTV**
|
||||||
- **RedBullTVRrnContent**
|
- **RedBullTVRrnContent**
|
||||||
|
- **redcdnlivx**
|
||||||
- **Reddit**: [*reddit*](## "netrc machine")
|
- **Reddit**: [*reddit*](## "netrc machine")
|
||||||
- **RedGifs**
|
- **RedGifs**
|
||||||
- **RedGifsSearch**: Redgifs search
|
- **RedGifsSearch**: Redgifs search
|
||||||
- **RedGifsUser**: Redgifs user
|
- **RedGifsUser**: Redgifs user
|
||||||
- **RedTube**
|
- **RedTube**
|
||||||
- **RegioTV**
|
- **RENTV**: (**Currently broken**)
|
||||||
- **RENTV**
|
- **RENTVArticle**: (**Currently broken**)
|
||||||
- **RENTVArticle**
|
- **Restudy**: (**Currently broken**)
|
||||||
- **Restudy**
|
- **Reuters**: (**Currently broken**)
|
||||||
- **Reuters**
|
|
||||||
- **ReverbNation**
|
- **ReverbNation**
|
||||||
- **RheinMainTV**
|
- **RheinMainTV**
|
||||||
|
- **RideHome**
|
||||||
- **RinseFM**
|
- **RinseFM**
|
||||||
|
- **RinseFMArtistPlaylist**
|
||||||
- **RMCDecouverte**
|
- **RMCDecouverte**
|
||||||
- **RockstarGames**
|
- **RockstarGames**: (**Currently broken**)
|
||||||
- **Rokfin**: [*rokfin*](## "netrc machine")
|
- **Rokfin**: [*rokfin*](## "netrc machine")
|
||||||
- **rokfin:channel**: Rokfin Channels
|
- **rokfin:channel**: Rokfin Channels
|
||||||
- **rokfin:search**: Rokfin Search; "rkfnsearch:" prefix
|
- **rokfin:search**: Rokfin Search; "rkfnsearch:" prefix
|
||||||
|
@ -1187,7 +1204,7 @@
|
||||||
- **RottenTomatoes**
|
- **RottenTomatoes**
|
||||||
- **Rozhlas**
|
- **Rozhlas**
|
||||||
- **RozhlasVltava**
|
- **RozhlasVltava**
|
||||||
- **RTBF**: [*rtbf*](## "netrc machine")
|
- **RTBF**: [*rtbf*](## "netrc machine") (**Currently broken**)
|
||||||
- **RTDocumentry**
|
- **RTDocumentry**
|
||||||
- **RTDocumentryPlaylist**
|
- **RTDocumentryPlaylist**
|
||||||
- **rte**: Raidió Teilifís Éireann TV
|
- **rte**: Raidió Teilifís Éireann TV
|
||||||
|
@ -1201,7 +1218,7 @@
|
||||||
- **RTNews**
|
- **RTNews**
|
||||||
- **RTP**
|
- **RTP**
|
||||||
- **RTRFM**
|
- **RTRFM**
|
||||||
- **RTS**: RTS.ch
|
- **RTS**: RTS.ch (**Currently broken**)
|
||||||
- **RTVCKaltura**
|
- **RTVCKaltura**
|
||||||
- **RTVCPlay**
|
- **RTVCPlay**
|
||||||
- **RTVCPlayEmbed**
|
- **RTVCPlayEmbed**
|
||||||
|
@ -1234,7 +1251,7 @@
|
||||||
- **safari**: [*safari*](## "netrc machine") safaribooksonline.com online video
|
- **safari**: [*safari*](## "netrc machine") safaribooksonline.com online video
|
||||||
- **safari:api**: [*safari*](## "netrc machine")
|
- **safari:api**: [*safari*](## "netrc machine")
|
||||||
- **safari:course**: [*safari*](## "netrc machine") safaribooksonline.com online courses
|
- **safari:course**: [*safari*](## "netrc machine") safaribooksonline.com online courses
|
||||||
- **Saitosan**
|
- **Saitosan**: (**Currently broken**)
|
||||||
- **SAKTV**: [*saktv*](## "netrc machine")
|
- **SAKTV**: [*saktv*](## "netrc machine")
|
||||||
- **SAKTVLive**: [*saktv*](## "netrc machine")
|
- **SAKTVLive**: [*saktv*](## "netrc machine")
|
||||||
- **SAKTVRecordings**: [*saktv*](## "netrc machine")
|
- **SAKTVRecordings**: [*saktv*](## "netrc machine")
|
||||||
|
@ -1244,7 +1261,6 @@
|
||||||
- **SampleFocus**
|
- **SampleFocus**
|
||||||
- **Sangiin**: 参議院インターネット審議中継 (archive)
|
- **Sangiin**: 参議院インターネット審議中継 (archive)
|
||||||
- **Sapo**: SAPO Vídeos
|
- **Sapo**: SAPO Vídeos
|
||||||
- **savefrom.net**
|
|
||||||
- **SBS**: sbs.com.au
|
- **SBS**: sbs.com.au
|
||||||
- **sbs.co.kr**
|
- **sbs.co.kr**
|
||||||
- **sbs.co.kr:allvod_program**
|
- **sbs.co.kr:allvod_program**
|
||||||
|
@ -1261,13 +1277,13 @@
|
||||||
- **Scrolller**
|
- **Scrolller**
|
||||||
- **SCTE**: [*scte*](## "netrc machine") (**Currently broken**)
|
- **SCTE**: [*scte*](## "netrc machine") (**Currently broken**)
|
||||||
- **SCTECourse**: [*scte*](## "netrc machine") (**Currently broken**)
|
- **SCTECourse**: [*scte*](## "netrc machine") (**Currently broken**)
|
||||||
- **Seeker**
|
- **sejm**
|
||||||
- **SenalColombiaLive**
|
- **SenalColombiaLive**: (**Currently broken**)
|
||||||
- **SenateGov**
|
- **SenateGov**
|
||||||
- **SenateISVP**
|
- **SenateISVP**
|
||||||
- **SendtoNews**
|
- **SendtoNews**: (**Currently broken**)
|
||||||
- **Servus**
|
- **Servus**
|
||||||
- **Sexu**
|
- **Sexu**: (**Currently broken**)
|
||||||
- **SeznamZpravy**
|
- **SeznamZpravy**
|
||||||
- **SeznamZpravyArticle**
|
- **SeznamZpravyArticle**
|
||||||
- **Shahid**: [*shahid*](## "netrc machine")
|
- **Shahid**: [*shahid*](## "netrc machine")
|
||||||
|
@ -1289,9 +1305,9 @@
|
||||||
- **sky:news:story**
|
- **sky:news:story**
|
||||||
- **sky:sports**
|
- **sky:sports**
|
||||||
- **sky:sports:news**
|
- **sky:sports:news**
|
||||||
- **SkylineWebcams**
|
- **SkylineWebcams**: (**Currently broken**)
|
||||||
- **skynewsarabia:article**
|
- **skynewsarabia:article**: (**Currently broken**)
|
||||||
- **skynewsarabia:video**
|
- **skynewsarabia:video**: (**Currently broken**)
|
||||||
- **SkyNewsAU**
|
- **SkyNewsAU**
|
||||||
- **Slideshare**
|
- **Slideshare**
|
||||||
- **SlidesLive**
|
- **SlidesLive**
|
||||||
|
@ -1342,7 +1358,7 @@
|
||||||
- **StacommuVOD**: [*stacommu*](## "netrc machine")
|
- **StacommuVOD**: [*stacommu*](## "netrc machine")
|
||||||
- **StagePlusVODConcert**: [*stageplus*](## "netrc machine")
|
- **StagePlusVODConcert**: [*stageplus*](## "netrc machine")
|
||||||
- **stanfordoc**: Stanford Open ClassRoom
|
- **stanfordoc**: Stanford Open ClassRoom
|
||||||
- **StarTrek**
|
- **StarTrek**: (**Currently broken**)
|
||||||
- **startv**
|
- **startv**
|
||||||
- **Steam**
|
- **Steam**
|
||||||
- **SteamCommunityBroadcast**
|
- **SteamCommunityBroadcast**
|
||||||
|
@ -1353,7 +1369,6 @@
|
||||||
- **StoryFireUser**
|
- **StoryFireUser**
|
||||||
- **Streamable**
|
- **Streamable**
|
||||||
- **StreamCZ**
|
- **StreamCZ**
|
||||||
- **StreamFF**
|
|
||||||
- **StreetVoice**
|
- **StreetVoice**
|
||||||
- **StretchInternet**
|
- **StretchInternet**
|
||||||
- **Stripchat**
|
- **Stripchat**
|
||||||
|
@ -1367,22 +1382,21 @@
|
||||||
- **SVTPlay**: SVT Play and Öppet arkiv
|
- **SVTPlay**: SVT Play and Öppet arkiv
|
||||||
- **SVTSeries**
|
- **SVTSeries**
|
||||||
- **SwearnetEpisode**
|
- **SwearnetEpisode**
|
||||||
- **Syfy**
|
- **Syfy**: (**Currently broken**)
|
||||||
- **SYVDK**
|
- **SYVDK**
|
||||||
- **SztvHu**
|
- **SztvHu**
|
||||||
- **t-online.de**
|
- **t-online.de**: (**Currently broken**)
|
||||||
- **Tagesschau**
|
- **Tagesschau**: (**Currently broken**)
|
||||||
- **Tass**
|
- **Tass**: (**Currently broken**)
|
||||||
- **TBS**
|
- **TBS**
|
||||||
- **TBSJPEpisode**
|
- **TBSJPEpisode**
|
||||||
- **TBSJPPlaylist**
|
- **TBSJPPlaylist**
|
||||||
- **TBSJPProgram**
|
- **TBSJPProgram**
|
||||||
- **TDSLifeway**
|
- **Teachable**: [*teachable*](## "netrc machine") (**Currently broken**)
|
||||||
- **Teachable**: [*teachable*](## "netrc machine")
|
|
||||||
- **TeachableCourse**: [*teachable*](## "netrc machine")
|
- **TeachableCourse**: [*teachable*](## "netrc machine")
|
||||||
- **teachertube**: teachertube.com videos
|
- **teachertube**: teachertube.com videos (**Currently broken**)
|
||||||
- **teachertube:user:collection**: teachertube.com user and collection videos
|
- **teachertube:user:collection**: teachertube.com user and collection videos (**Currently broken**)
|
||||||
- **TeachingChannel**
|
- **TeachingChannel**: (**Currently broken**)
|
||||||
- **Teamcoco**
|
- **Teamcoco**
|
||||||
- **TeamTreeHouse**: [*teamtreehouse*](## "netrc machine")
|
- **TeamTreeHouse**: [*teamtreehouse*](## "netrc machine")
|
||||||
- **techtv.mit.edu**
|
- **techtv.mit.edu**
|
||||||
|
@ -1391,20 +1405,20 @@
|
||||||
- **TedSeries**
|
- **TedSeries**
|
||||||
- **TedTalk**
|
- **TedTalk**
|
||||||
- **Tele13**
|
- **Tele13**
|
||||||
- **Tele5**
|
- **Tele5**: (**Currently broken**)
|
||||||
- **TeleBruxelles**
|
- **TeleBruxelles**
|
||||||
- **TelecaribePlay**
|
- **TelecaribePlay**
|
||||||
- **Telecinco**: telecinco.es, cuatro.com and mediaset.es
|
- **Telecinco**: telecinco.es, cuatro.com and mediaset.es
|
||||||
- **Telegraaf**
|
- **Telegraaf**
|
||||||
- **telegram:embed**
|
- **telegram:embed**
|
||||||
- **TeleMB**
|
- **TeleMB**: (**Currently broken**)
|
||||||
- **Telemundo**
|
- **Telemundo**: (**Currently broken**)
|
||||||
- **TeleQuebec**
|
- **TeleQuebec**
|
||||||
- **TeleQuebecEmission**
|
- **TeleQuebecEmission**
|
||||||
- **TeleQuebecLive**
|
- **TeleQuebecLive**
|
||||||
- **TeleQuebecSquat**
|
- **TeleQuebecSquat**
|
||||||
- **TeleQuebecVideo**
|
- **TeleQuebecVideo**
|
||||||
- **TeleTask**
|
- **TeleTask**: (**Currently broken**)
|
||||||
- **Telewebion**
|
- **Telewebion**
|
||||||
- **Tempo**
|
- **Tempo**
|
||||||
- **TennisTV**: [*tennistv*](## "netrc machine")
|
- **TennisTV**: [*tennistv*](## "netrc machine")
|
||||||
|
@ -1458,6 +1472,7 @@
|
||||||
- **TrovoChannelVod**: All VODs of a trovo.live channel; "trovovod:" prefix
|
- **TrovoChannelVod**: All VODs of a trovo.live channel; "trovovod:" prefix
|
||||||
- **TrovoVod**
|
- **TrovoVod**
|
||||||
- **TrtCocukVideo**
|
- **TrtCocukVideo**
|
||||||
|
- **TrtWorld**
|
||||||
- **TrueID**
|
- **TrueID**
|
||||||
- **TruNews**
|
- **TruNews**
|
||||||
- **Truth**
|
- **Truth**
|
||||||
|
@ -1471,7 +1486,6 @@
|
||||||
- **TuneInPodcast**
|
- **TuneInPodcast**
|
||||||
- **TuneInPodcastEpisode**
|
- **TuneInPodcastEpisode**
|
||||||
- **TuneInStation**
|
- **TuneInStation**
|
||||||
- **Turbo**
|
|
||||||
- **tv.dfb.de**
|
- **tv.dfb.de**
|
||||||
- **TV2**
|
- **TV2**
|
||||||
- **TV2Article**
|
- **TV2Article**
|
||||||
|
@ -1493,8 +1507,8 @@
|
||||||
- **tvigle**: Интернет-телевидение Tvigle.ru
|
- **tvigle**: Интернет-телевидение Tvigle.ru
|
||||||
- **TVIPlayer**
|
- **TVIPlayer**
|
||||||
- **tvland.com**
|
- **tvland.com**
|
||||||
- **TVN24**
|
- **TVN24**: (**Currently broken**)
|
||||||
- **TVNoe**
|
- **TVNoe**: (**Currently broken**)
|
||||||
- **tvopengr:embed**: tvopen.gr embedded videos
|
- **tvopengr:embed**: tvopen.gr embedded videos
|
||||||
- **tvopengr:watch**: tvopen.gr (and ethnos.gr) videos
|
- **tvopengr:watch**: tvopen.gr (and ethnos.gr) videos
|
||||||
- **tvp**: Telewizja Polska
|
- **tvp**: Telewizja Polska
|
||||||
|
@ -1527,15 +1541,15 @@
|
||||||
- **UDNEmbed**: 聯合影音
|
- **UDNEmbed**: 聯合影音
|
||||||
- **UFCArabia**: [*ufcarabia*](## "netrc machine")
|
- **UFCArabia**: [*ufcarabia*](## "netrc machine")
|
||||||
- **UFCTV**: [*ufctv*](## "netrc machine")
|
- **UFCTV**: [*ufctv*](## "netrc machine")
|
||||||
- **ukcolumn**
|
- **ukcolumn**: (**Currently broken**)
|
||||||
- **UKTVPlay**
|
- **UKTVPlay**
|
||||||
- **umg:de**: Universal Music Deutschland
|
- **umg:de**: Universal Music Deutschland (**Currently broken**)
|
||||||
- **Unistra**
|
- **Unistra**
|
||||||
- **Unity**
|
- **Unity**: (**Currently broken**)
|
||||||
- **uol.com.br**
|
- **uol.com.br**
|
||||||
- **uplynk**
|
- **uplynk**
|
||||||
- **uplynk:preplay**
|
- **uplynk:preplay**
|
||||||
- **Urort**: NRK P3 Urørt
|
- **Urort**: NRK P3 Urørt (**Currently broken**)
|
||||||
- **URPlay**
|
- **URPlay**
|
||||||
- **USANetwork**
|
- **USANetwork**
|
||||||
- **USAToday**
|
- **USAToday**
|
||||||
|
@ -1543,13 +1557,12 @@
|
||||||
- **ustream:channel**
|
- **ustream:channel**
|
||||||
- **ustudio**
|
- **ustudio**
|
||||||
- **ustudio:embed**
|
- **ustudio:embed**
|
||||||
- **Utreon**
|
- **Varzesh3**: (**Currently broken**)
|
||||||
- **Varzesh3**
|
|
||||||
- **Vbox7**
|
- **Vbox7**
|
||||||
- **Veo**
|
- **Veo**
|
||||||
- **Veoh**
|
- **Veoh**
|
||||||
- **veoh:user**
|
- **veoh:user**
|
||||||
- **Vesti**: Вести.Ru
|
- **Vesti**: Вести.Ru (**Currently broken**)
|
||||||
- **Vevo**
|
- **Vevo**
|
||||||
- **VevoPlaylist**
|
- **VevoPlaylist**
|
||||||
- **VGTV**: VGTV, BTTV, FTV, Aftenposten and Aftonbladet
|
- **VGTV**: VGTV, BTTV, FTV, Aftenposten and Aftonbladet
|
||||||
|
@ -1565,7 +1578,7 @@
|
||||||
- **video.sky.it**
|
- **video.sky.it**
|
||||||
- **video.sky.it:live**
|
- **video.sky.it:live**
|
||||||
- **VideoDetective**
|
- **VideoDetective**
|
||||||
- **videofy.me**
|
- **videofy.me**: (**Currently broken**)
|
||||||
- **VideoKen**
|
- **VideoKen**
|
||||||
- **VideoKenCategory**
|
- **VideoKenCategory**
|
||||||
- **VideoKenPlayer**
|
- **VideoKenPlayer**
|
||||||
|
@ -1601,7 +1614,8 @@
|
||||||
- **ViMP:Playlist**
|
- **ViMP:Playlist**
|
||||||
- **Vine**
|
- **Vine**
|
||||||
- **vine:user**
|
- **vine:user**
|
||||||
- **Viqeo**
|
- **Viously**
|
||||||
|
- **Viqeo**: (**Currently broken**)
|
||||||
- **Viu**
|
- **Viu**
|
||||||
- **viu:ott**: [*viu*](## "netrc machine")
|
- **viu:ott**: [*viu*](## "netrc machine")
|
||||||
- **viu:playlist**
|
- **viu:playlist**
|
||||||
|
@ -1615,8 +1629,8 @@
|
||||||
- **Vocaroo**
|
- **Vocaroo**
|
||||||
- **VODPl**
|
- **VODPl**
|
||||||
- **VODPlatform**
|
- **VODPlatform**
|
||||||
- **voicy**
|
- **voicy**: (**Currently broken**)
|
||||||
- **voicy:channel**
|
- **voicy:channel**: (**Currently broken**)
|
||||||
- **VolejTV**
|
- **VolejTV**
|
||||||
- **Voot**: [*voot*](## "netrc machine") (**Currently broken**)
|
- **Voot**: [*voot*](## "netrc machine") (**Currently broken**)
|
||||||
- **VootSeries**: [*voot*](## "netrc machine") (**Currently broken**)
|
- **VootSeries**: [*voot*](## "netrc machine") (**Currently broken**)
|
||||||
|
@ -1627,7 +1641,7 @@
|
||||||
- **vqq:video**
|
- **vqq:video**
|
||||||
- **VRT**: VRT NWS, Flanders News, Flandern Info and Sporza
|
- **VRT**: VRT NWS, Flanders News, Flandern Info and Sporza
|
||||||
- **VrtNU**: [*vrtnu*](## "netrc machine") VRT MAX
|
- **VrtNU**: [*vrtnu*](## "netrc machine") VRT MAX
|
||||||
- **VTM**
|
- **VTM**: (**Currently broken**)
|
||||||
- **VTXTV**: [*vtxtv*](## "netrc machine")
|
- **VTXTV**: [*vtxtv*](## "netrc machine")
|
||||||
- **VTXTVLive**: [*vtxtv*](## "netrc machine")
|
- **VTXTVLive**: [*vtxtv*](## "netrc machine")
|
||||||
- **VTXTVRecordings**: [*vtxtv*](## "netrc machine")
|
- **VTXTVRecordings**: [*vtxtv*](## "netrc machine")
|
||||||
|
@ -1638,9 +1652,6 @@
|
||||||
- **WalyTV**: [*walytv*](## "netrc machine")
|
- **WalyTV**: [*walytv*](## "netrc machine")
|
||||||
- **WalyTVLive**: [*walytv*](## "netrc machine")
|
- **WalyTVLive**: [*walytv*](## "netrc machine")
|
||||||
- **WalyTVRecordings**: [*walytv*](## "netrc machine")
|
- **WalyTVRecordings**: [*walytv*](## "netrc machine")
|
||||||
- **wasdtv:clip**
|
|
||||||
- **wasdtv:record**
|
|
||||||
- **wasdtv:stream**
|
|
||||||
- **washingtonpost**
|
- **washingtonpost**
|
||||||
- **washingtonpost:article**
|
- **washingtonpost:article**
|
||||||
- **wat.tv**
|
- **wat.tv**
|
||||||
|
@ -1658,7 +1669,7 @@
|
||||||
- **Weibo**
|
- **Weibo**
|
||||||
- **WeiboUser**
|
- **WeiboUser**
|
||||||
- **WeiboVideo**
|
- **WeiboVideo**
|
||||||
- **WeiqiTV**: WQTV
|
- **WeiqiTV**: WQTV (**Currently broken**)
|
||||||
- **wetv:episode**
|
- **wetv:episode**
|
||||||
- **WeTvSeries**
|
- **WeTvSeries**
|
||||||
- **Weverse**: [*weverse*](## "netrc machine")
|
- **Weverse**: [*weverse*](## "netrc machine")
|
||||||
|
@ -1703,8 +1714,8 @@
|
||||||
- **XHamsterUser**
|
- **XHamsterUser**
|
||||||
- **ximalaya**: 喜马拉雅FM
|
- **ximalaya**: 喜马拉雅FM
|
||||||
- **ximalaya:album**: 喜马拉雅FM 专辑
|
- **ximalaya:album**: 喜马拉雅FM 专辑
|
||||||
- **xinpianchang**: xinpianchang.com
|
- **xinpianchang**: xinpianchang.com (**Currently broken**)
|
||||||
- **XMinus**
|
- **XMinus**: (**Currently broken**)
|
||||||
- **XNXX**
|
- **XNXX**
|
||||||
- **Xstream**
|
- **Xstream**
|
||||||
- **XVideos**
|
- **XVideos**
|
||||||
|
@ -1720,8 +1731,8 @@
|
||||||
- **yandexmusic:track**: Яндекс.Музыка - Трек
|
- **yandexmusic:track**: Яндекс.Музыка - Трек
|
||||||
- **YandexVideo**
|
- **YandexVideo**
|
||||||
- **YandexVideoPreview**
|
- **YandexVideoPreview**
|
||||||
- **YapFiles**
|
- **YapFiles**: (**Currently broken**)
|
||||||
- **Yappy**
|
- **Yappy**: (**Currently broken**)
|
||||||
- **YappyProfile**
|
- **YappyProfile**
|
||||||
- **YleAreena**
|
- **YleAreena**
|
||||||
- **YouJizz**
|
- **YouJizz**
|
||||||
|
@ -1762,9 +1773,11 @@
|
||||||
- **ZDFChannel**
|
- **ZDFChannel**
|
||||||
- **Zee5**: [*zee5*](## "netrc machine")
|
- **Zee5**: [*zee5*](## "netrc machine")
|
||||||
- **zee5:series**
|
- **zee5:series**
|
||||||
- **ZeeNews**
|
- **ZeeNews**: (**Currently broken**)
|
||||||
|
- **ZenPorn**
|
||||||
- **ZenYandex**
|
- **ZenYandex**
|
||||||
- **ZenYandexChannel**
|
- **ZenYandexChannel**
|
||||||
|
- **ZetlandDKArticle**
|
||||||
- **Zhihu**
|
- **Zhihu**
|
||||||
- **zingmp3**: zingmp3.vn
|
- **zingmp3**: zingmp3.vn
|
||||||
- **zingmp3:album**
|
- **zingmp3:album**
|
||||||
|
|
|
@ -223,6 +223,10 @@ def sanitize_got_info_dict(got_dict):
|
||||||
if test_info_dict.get('display_id') == test_info_dict.get('id'):
|
if test_info_dict.get('display_id') == test_info_dict.get('id'):
|
||||||
test_info_dict.pop('display_id')
|
test_info_dict.pop('display_id')
|
||||||
|
|
||||||
|
# Remove deprecated fields
|
||||||
|
for old in YoutubeDL._deprecated_multivalue_fields.keys():
|
||||||
|
test_info_dict.pop(old, None)
|
||||||
|
|
||||||
# release_year may be generated from release_date
|
# release_year may be generated from release_date
|
||||||
if try_call(lambda: test_info_dict['release_year'] == int(test_info_dict['release_date'][:4])):
|
if try_call(lambda: test_info_dict['release_year'] == int(test_info_dict['release_date'][:4])):
|
||||||
test_info_dict.pop('release_year')
|
test_info_dict.pop('release_year')
|
||||||
|
|
|
@ -941,7 +941,7 @@ class TestYoutubeDL(unittest.TestCase):
|
||||||
def get_videos(filter_=None):
|
def get_videos(filter_=None):
|
||||||
ydl = YDL({'match_filter': filter_, 'simulate': True})
|
ydl = YDL({'match_filter': filter_, 'simulate': True})
|
||||||
for v in videos:
|
for v in videos:
|
||||||
ydl.process_ie_result(v, download=True)
|
ydl.process_ie_result(v.copy(), download=True)
|
||||||
return [v['id'] for v in ydl.downloaded_info_dicts]
|
return [v['id'] for v in ydl.downloaded_info_dicts]
|
||||||
|
|
||||||
res = get_videos()
|
res = get_videos()
|
||||||
|
|
|
@ -45,7 +45,7 @@ class TestExecution(unittest.TestCase):
|
||||||
self.assertTrue(os.path.exists(LAZY_EXTRACTORS))
|
self.assertTrue(os.path.exists(LAZY_EXTRACTORS))
|
||||||
|
|
||||||
_, stderr = self.run_yt_dlp(opts=('-s', 'test:'))
|
_, stderr = self.run_yt_dlp(opts=('-s', 'test:'))
|
||||||
# `MIN_RECOMMENDED` emits a deprecated feature warning for deprecated python versions
|
# `MIN_RECOMMENDED` emits a deprecated feature warning for deprecated Python versions
|
||||||
if stderr and stderr.startswith('Deprecated Feature: Support for Python'):
|
if stderr and stderr.startswith('Deprecated Feature: Support for Python'):
|
||||||
stderr = ''
|
stderr = ''
|
||||||
self.assertFalse(stderr)
|
self.assertFalse(stderr)
|
||||||
|
|
|
@ -13,6 +13,7 @@ import http.client
|
||||||
import http.cookiejar
|
import http.cookiejar
|
||||||
import http.server
|
import http.server
|
||||||
import io
|
import io
|
||||||
|
import logging
|
||||||
import pathlib
|
import pathlib
|
||||||
import random
|
import random
|
||||||
import ssl
|
import ssl
|
||||||
|
@ -68,7 +69,7 @@ def _build_proxy_handler(name):
|
||||||
self.send_response(200)
|
self.send_response(200)
|
||||||
self.send_header('Content-Type', 'text/plain; charset=utf-8')
|
self.send_header('Content-Type', 'text/plain; charset=utf-8')
|
||||||
self.end_headers()
|
self.end_headers()
|
||||||
self.wfile.write('{self.proxy_name}: {self.path}'.format(self=self).encode())
|
self.wfile.write(f'{self.proxy_name}: {self.path}'.encode())
|
||||||
return HTTPTestRequestHandler
|
return HTTPTestRequestHandler
|
||||||
|
|
||||||
|
|
||||||
|
@ -752,6 +753,25 @@ class TestClientCertificate:
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
|
class TestRequestHandlerMisc:
|
||||||
|
"""Misc generic tests for request handlers, not related to request or validation testing"""
|
||||||
|
@pytest.mark.parametrize('handler,logger_name', [
|
||||||
|
('Requests', 'urllib3'),
|
||||||
|
('Websockets', 'websockets.client'),
|
||||||
|
('Websockets', 'websockets.server')
|
||||||
|
], indirect=['handler'])
|
||||||
|
def test_remove_logging_handler(self, handler, logger_name):
|
||||||
|
# Ensure any logging handlers, which may contain a YoutubeDL instance,
|
||||||
|
# are removed when we close the request handler
|
||||||
|
# See: https://github.com/yt-dlp/yt-dlp/issues/8922
|
||||||
|
logging_handlers = logging.getLogger(logger_name).handlers
|
||||||
|
before_count = len(logging_handlers)
|
||||||
|
rh = handler()
|
||||||
|
assert len(logging_handlers) == before_count + 1
|
||||||
|
rh.close()
|
||||||
|
assert len(logging_handlers) == before_count
|
||||||
|
|
||||||
|
|
||||||
class TestUrllibRequestHandler(TestRequestHandlerBase):
|
class TestUrllibRequestHandler(TestRequestHandlerBase):
|
||||||
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Urllib'], indirect=True)
|
||||||
def test_file_urls(self, handler):
|
def test_file_urls(self, handler):
|
||||||
|
@ -827,6 +847,7 @@ class TestUrllibRequestHandler(TestRequestHandlerBase):
|
||||||
assert not isinstance(exc_info.value, TransportError)
|
assert not isinstance(exc_info.value, TransportError)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('handler', ['Requests'], indirect=True)
|
||||||
class TestRequestsRequestHandler(TestRequestHandlerBase):
|
class TestRequestsRequestHandler(TestRequestHandlerBase):
|
||||||
@pytest.mark.parametrize('raised,expected', [
|
@pytest.mark.parametrize('raised,expected', [
|
||||||
(lambda: requests.exceptions.ConnectTimeout(), TransportError),
|
(lambda: requests.exceptions.ConnectTimeout(), TransportError),
|
||||||
|
@ -843,7 +864,6 @@ class TestRequestsRequestHandler(TestRequestHandlerBase):
|
||||||
(lambda: requests.exceptions.RequestException(), RequestError)
|
(lambda: requests.exceptions.RequestException(), RequestError)
|
||||||
# (lambda: requests.exceptions.TooManyRedirects(), HTTPError) - Needs a response object
|
# (lambda: requests.exceptions.TooManyRedirects(), HTTPError) - Needs a response object
|
||||||
])
|
])
|
||||||
@pytest.mark.parametrize('handler', ['Requests'], indirect=True)
|
|
||||||
def test_request_error_mapping(self, handler, monkeypatch, raised, expected):
|
def test_request_error_mapping(self, handler, monkeypatch, raised, expected):
|
||||||
with handler() as rh:
|
with handler() as rh:
|
||||||
def mock_get_instance(*args, **kwargs):
|
def mock_get_instance(*args, **kwargs):
|
||||||
|
@ -877,7 +897,6 @@ class TestRequestsRequestHandler(TestRequestHandlerBase):
|
||||||
'3 bytes read, 5 more expected'
|
'3 bytes read, 5 more expected'
|
||||||
),
|
),
|
||||||
])
|
])
|
||||||
@pytest.mark.parametrize('handler', ['Requests'], indirect=True)
|
|
||||||
def test_response_error_mapping(self, handler, monkeypatch, raised, expected, match):
|
def test_response_error_mapping(self, handler, monkeypatch, raised, expected, match):
|
||||||
from requests.models import Response as RequestsResponse
|
from requests.models import Response as RequestsResponse
|
||||||
from urllib3.response import HTTPResponse as Urllib3Response
|
from urllib3.response import HTTPResponse as Urllib3Response
|
||||||
|
@ -896,6 +915,21 @@ class TestRequestsRequestHandler(TestRequestHandlerBase):
|
||||||
|
|
||||||
assert exc_info.type is expected
|
assert exc_info.type is expected
|
||||||
|
|
||||||
|
def test_close(self, handler, monkeypatch):
|
||||||
|
rh = handler()
|
||||||
|
session = rh._get_instance(cookiejar=rh.cookiejar)
|
||||||
|
called = False
|
||||||
|
original_close = session.close
|
||||||
|
|
||||||
|
def mock_close(*args, **kwargs):
|
||||||
|
nonlocal called
|
||||||
|
called = True
|
||||||
|
return original_close(*args, **kwargs)
|
||||||
|
|
||||||
|
monkeypatch.setattr(session, 'close', mock_close)
|
||||||
|
rh.close()
|
||||||
|
assert called
|
||||||
|
|
||||||
|
|
||||||
def run_validation(handler, error, req, **handler_kwargs):
|
def run_validation(handler, error, req, **handler_kwargs):
|
||||||
with handler(**handler_kwargs) as rh:
|
with handler(**handler_kwargs) as rh:
|
||||||
|
@ -1205,6 +1239,19 @@ class TestRequestDirector:
|
||||||
assert director.send(Request('http://')).read() == b''
|
assert director.send(Request('http://')).read() == b''
|
||||||
assert director.send(Request('http://', headers={'prefer': '1'})).read() == b'supported'
|
assert director.send(Request('http://', headers={'prefer': '1'})).read() == b'supported'
|
||||||
|
|
||||||
|
def test_close(self, monkeypatch):
|
||||||
|
director = RequestDirector(logger=FakeLogger())
|
||||||
|
director.add_handler(FakeRH(logger=FakeLogger()))
|
||||||
|
called = False
|
||||||
|
|
||||||
|
def mock_close(*args, **kwargs):
|
||||||
|
nonlocal called
|
||||||
|
called = True
|
||||||
|
|
||||||
|
monkeypatch.setattr(director.handlers[FakeRH.RH_KEY], 'close', mock_close)
|
||||||
|
director.close()
|
||||||
|
assert called
|
||||||
|
|
||||||
|
|
||||||
# XXX: do we want to move this to test_YoutubeDL.py?
|
# XXX: do we want to move this to test_YoutubeDL.py?
|
||||||
class TestYoutubeDLNetworking:
|
class TestYoutubeDLNetworking:
|
||||||
|
|
|
@ -2386,7 +2386,7 @@ Line 1
|
||||||
self.assertEqual(traverse_obj(etree, '//year/text()'), ['2008', '2011', '2011'],
|
self.assertEqual(traverse_obj(etree, '//year/text()'), ['2008', '2011', '2011'],
|
||||||
msg='`text()` at end of path should give the inner text')
|
msg='`text()` at end of path should give the inner text')
|
||||||
self.assertEqual(traverse_obj(etree, '//*[@direction]/@direction'), ['E', 'W', 'N', 'W', 'E'],
|
self.assertEqual(traverse_obj(etree, '//*[@direction]/@direction'), ['E', 'W', 'N', 'W', 'E'],
|
||||||
msg='full python xpath features should be supported')
|
msg='full Python xpath features should be supported')
|
||||||
self.assertEqual(traverse_obj(etree, (0, '@name')), 'Liechtenstein',
|
self.assertEqual(traverse_obj(etree, (0, '@name')), 'Liechtenstein',
|
||||||
msg='special transformations should act on current element')
|
msg='special transformations should act on current element')
|
||||||
self.assertEqual(traverse_obj(etree, ('country', 0, ..., 'text()', {int_or_none})), [1, 2008, 141100],
|
self.assertEqual(traverse_obj(etree, ('country', 0, ..., 'text()', {int_or_none})), [1, 2008, 141100],
|
||||||
|
|
|
@ -192,8 +192,8 @@ class TestWebsSocketRequestHandlerConformance:
|
||||||
|
|
||||||
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
@pytest.mark.parametrize('handler', ['Websockets'], indirect=True)
|
||||||
@pytest.mark.parametrize('params,extensions', [
|
@pytest.mark.parametrize('params,extensions', [
|
||||||
({'timeout': 0.00001}, {}),
|
({'timeout': sys.float_info.min}, {}),
|
||||||
({}, {'timeout': 0.00001}),
|
({}, {'timeout': sys.float_info.min}),
|
||||||
])
|
])
|
||||||
def test_timeout(self, handler, params, extensions):
|
def test_timeout(self, handler, params, extensions):
|
||||||
with handler(**params) as rh:
|
with handler(**params) as rh:
|
||||||
|
|
|
@ -575,11 +575,18 @@ class YoutubeDL:
|
||||||
'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
|
'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
|
||||||
'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
|
'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
|
||||||
'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
|
'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns',
|
||||||
'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
|
'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start', 'is_dash_periods', 'request_data',
|
||||||
'preference', 'language', 'language_preference', 'quality', 'source_preference', 'cookies',
|
'preference', 'language', 'language_preference', 'quality', 'source_preference', 'cookies',
|
||||||
'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
|
'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options',
|
||||||
'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
|
'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
|
||||||
}
|
}
|
||||||
|
_deprecated_multivalue_fields = {
|
||||||
|
'album_artist': 'album_artists',
|
||||||
|
'artist': 'artists',
|
||||||
|
'composer': 'composers',
|
||||||
|
'creator': 'creators',
|
||||||
|
'genre': 'genres',
|
||||||
|
}
|
||||||
_format_selection_exts = {
|
_format_selection_exts = {
|
||||||
'audio': set(MEDIA_EXTENSIONS.common_audio),
|
'audio': set(MEDIA_EXTENSIONS.common_audio),
|
||||||
'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )),
|
'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )),
|
||||||
|
@ -683,7 +690,6 @@ class YoutubeDL:
|
||||||
self.params['http_headers'] = HTTPHeaderDict(std_headers, self.params.get('http_headers'))
|
self.params['http_headers'] = HTTPHeaderDict(std_headers, self.params.get('http_headers'))
|
||||||
self._load_cookies(self.params['http_headers'].get('Cookie')) # compat
|
self._load_cookies(self.params['http_headers'].get('Cookie')) # compat
|
||||||
self.params['http_headers'].pop('Cookie', None)
|
self.params['http_headers'].pop('Cookie', None)
|
||||||
self._request_director = self.build_request_director(_REQUEST_HANDLERS.values(), _RH_PREFERENCES)
|
|
||||||
|
|
||||||
if auto_init and auto_init != 'no_verbose_header':
|
if auto_init and auto_init != 'no_verbose_header':
|
||||||
self.print_debug_header()
|
self.print_debug_header()
|
||||||
|
@ -956,7 +962,9 @@ class YoutubeDL:
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
self.save_cookies()
|
self.save_cookies()
|
||||||
self._request_director.close()
|
if '_request_director' in self.__dict__:
|
||||||
|
self._request_director.close()
|
||||||
|
del self._request_director
|
||||||
|
|
||||||
def trouble(self, message=None, tb=None, is_error=True):
|
def trouble(self, message=None, tb=None, is_error=True):
|
||||||
"""Determine action to take when a download problem appears.
|
"""Determine action to take when a download problem appears.
|
||||||
|
@ -2219,7 +2227,7 @@ class YoutubeDL:
|
||||||
selectors = []
|
selectors = []
|
||||||
current_selector = None
|
current_selector = None
|
||||||
for type, string_, start, _, _ in tokens:
|
for type, string_, start, _, _ in tokens:
|
||||||
# ENCODING is only defined in python 3.x
|
# ENCODING is only defined in Python 3.x
|
||||||
if type == getattr(tokenize, 'ENCODING', None):
|
if type == getattr(tokenize, 'ENCODING', None):
|
||||||
continue
|
continue
|
||||||
elif type in [tokenize.NAME, tokenize.NUMBER]:
|
elif type in [tokenize.NAME, tokenize.NUMBER]:
|
||||||
|
@ -2640,6 +2648,15 @@ class YoutubeDL:
|
||||||
if final and info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
|
if final and info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
|
||||||
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
|
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
|
||||||
|
|
||||||
|
for old_key, new_key in self._deprecated_multivalue_fields.items():
|
||||||
|
if new_key in info_dict and old_key in info_dict:
|
||||||
|
if '_version' not in info_dict: # HACK: Do not warn when using --load-info-json
|
||||||
|
self.deprecation_warning(f'Do not return {old_key!r} when {new_key!r} is present')
|
||||||
|
elif old_value := info_dict.get(old_key):
|
||||||
|
info_dict[new_key] = old_value.split(', ')
|
||||||
|
elif new_value := info_dict.get(new_key):
|
||||||
|
info_dict[old_key] = ', '.join(v.replace(',', '\N{FULLWIDTH COMMA}') for v in new_value)
|
||||||
|
|
||||||
def _raise_pending_errors(self, info):
|
def _raise_pending_errors(self, info):
|
||||||
err = info.pop('__pending_error', None)
|
err = info.pop('__pending_error', None)
|
||||||
if err:
|
if err:
|
||||||
|
@ -3483,7 +3500,8 @@ class YoutubeDL:
|
||||||
or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
|
or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
|
||||||
'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
|
'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
|
||||||
FFmpegFixupM3u8PP)
|
FFmpegFixupM3u8PP)
|
||||||
ffmpeg_fixup(info_dict.get('is_live') and downloader == 'dashsegments',
|
ffmpeg_fixup(downloader == 'dashsegments'
|
||||||
|
and (info_dict.get('is_live') or info_dict.get('is_dash_periods')),
|
||||||
'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
|
'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
|
||||||
|
|
||||||
ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
|
ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
|
||||||
|
@ -3560,6 +3578,8 @@ class YoutubeDL:
|
||||||
raise
|
raise
|
||||||
self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
|
self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
|
||||||
self.download([webpage_url])
|
self.download([webpage_url])
|
||||||
|
except ExtractorError as e:
|
||||||
|
self.report_error(e)
|
||||||
return self._download_retcode
|
return self._download_retcode
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -4144,6 +4164,10 @@ class YoutubeDL:
|
||||||
director.preferences.add(lambda rh, _: 500 if rh.RH_KEY == 'Urllib' else 0)
|
director.preferences.add(lambda rh, _: 500 if rh.RH_KEY == 'Urllib' else 0)
|
||||||
return director
|
return director
|
||||||
|
|
||||||
|
@functools.cached_property
|
||||||
|
def _request_director(self):
|
||||||
|
return self.build_request_director(_REQUEST_HANDLERS.values(), _RH_PREFERENCES)
|
||||||
|
|
||||||
def encode(self, s):
|
def encode(self, s):
|
||||||
if isinstance(s, bytes):
|
if isinstance(s, bytes):
|
||||||
return s # Already encoded
|
return s # Already encoded
|
||||||
|
|
|
@ -4,7 +4,7 @@ if sys.version_info < (3, 8):
|
||||||
raise ImportError(
|
raise ImportError(
|
||||||
f'You are using an unsupported version of Python. Only Python versions 3.8 and above are supported by yt-dlp') # noqa: F541
|
f'You are using an unsupported version of Python. Only Python versions 3.8 and above are supported by yt-dlp') # noqa: F541
|
||||||
|
|
||||||
__license__ = 'Public Domain'
|
__license__ = 'The Unlicense'
|
||||||
|
|
||||||
import collections
|
import collections
|
||||||
import getpass
|
import getpass
|
||||||
|
@ -14,7 +14,7 @@ import os
|
||||||
import re
|
import re
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from .compat import compat_shlex_quote
|
from .compat import compat_os_name, compat_shlex_quote
|
||||||
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
|
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
|
||||||
from .downloader.external import get_external_downloader
|
from .downloader.external import get_external_downloader
|
||||||
from .extractor import list_extractor_classes
|
from .extractor import list_extractor_classes
|
||||||
|
@ -984,7 +984,28 @@ def _real_main(argv=None):
|
||||||
if pre_process:
|
if pre_process:
|
||||||
return ydl._download_retcode
|
return ydl._download_retcode
|
||||||
|
|
||||||
ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv)
|
args = sys.argv[1:] if argv is None else argv
|
||||||
|
ydl.warn_if_short_id(args)
|
||||||
|
|
||||||
|
# Show a useful error message and wait for keypress if not launched from shell on Windows
|
||||||
|
if not args and compat_os_name == 'nt' and getattr(sys, 'frozen', False):
|
||||||
|
import ctypes.wintypes
|
||||||
|
import msvcrt
|
||||||
|
|
||||||
|
kernel32 = ctypes.WinDLL('Kernel32')
|
||||||
|
|
||||||
|
buffer = (1 * ctypes.wintypes.DWORD)()
|
||||||
|
attached_processes = kernel32.GetConsoleProcessList(buffer, 1)
|
||||||
|
# If we only have a single process attached, then the executable was double clicked
|
||||||
|
# When using `pyinstaller` with `--onefile`, two processes get attached
|
||||||
|
is_onefile = hasattr(sys, '_MEIPASS') and os.path.basename(sys._MEIPASS).startswith('_MEI')
|
||||||
|
if attached_processes == 1 or is_onefile and attached_processes == 2:
|
||||||
|
print(parser._generate_error_message(
|
||||||
|
'Do not double-click the executable, instead call it from a command line.\n'
|
||||||
|
'Please read the README for further information on how to use yt-dlp: '
|
||||||
|
'https://github.com/yt-dlp/yt-dlp#readme'))
|
||||||
|
msvcrt.getch()
|
||||||
|
_exit(2)
|
||||||
parser.error(
|
parser.error(
|
||||||
'You must provide at least one URL.\n'
|
'You must provide at least one URL.\n'
|
||||||
'Type yt-dlp --help to see a list of all options.')
|
'Type yt-dlp --help to see a list of all options.')
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
# Execute with
|
# Execute with
|
||||||
# $ python -m yt_dlp
|
# $ python3 -m yt_dlp
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@ def pycryptodome_module():
|
||||||
try:
|
try:
|
||||||
import Crypto # noqa: F401
|
import Crypto # noqa: F401
|
||||||
print('WARNING: Using Crypto since Cryptodome is not available. '
|
print('WARNING: Using Crypto since Cryptodome is not available. '
|
||||||
'Install with: pip install pycryptodomex', file=sys.stderr)
|
'Install with: python3 -m pip install pycryptodomex', file=sys.stderr)
|
||||||
return 'Crypto'
|
return 'Crypto'
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
@ -31,4 +31,4 @@ def get_hidden_imports():
|
||||||
hiddenimports = list(get_hidden_imports())
|
hiddenimports = list(get_hidden_imports())
|
||||||
print(f'Adding imports: {hiddenimports}')
|
print(f'Adding imports: {hiddenimports}')
|
||||||
|
|
||||||
excludedimports = ['youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins', 'devscripts']
|
excludedimports = ['youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins', 'devscripts', 'bundle']
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
import warnings
|
|
||||||
|
|
||||||
warnings.warn(DeprecationWarning(f'{__name__} is deprecated'))
|
|
||||||
|
|
||||||
casefold = str.casefold
|
|
|
@ -10,10 +10,10 @@ del passthrough_module
|
||||||
from .. import compat_os_name
|
from .. import compat_os_name
|
||||||
|
|
||||||
if compat_os_name == 'nt':
|
if compat_os_name == 'nt':
|
||||||
# On older python versions, proxies are extracted from Windows registry erroneously. [1]
|
# On older Python versions, proxies are extracted from Windows registry erroneously. [1]
|
||||||
# If the https proxy in the registry does not have a scheme, urllib will incorrectly add https:// to it. [2]
|
# If the https proxy in the registry does not have a scheme, urllib will incorrectly add https:// to it. [2]
|
||||||
# It is unlikely that the user has actually set it to be https, so we should be fine to safely downgrade
|
# It is unlikely that the user has actually set it to be https, so we should be fine to safely downgrade
|
||||||
# it to http on these older python versions to avoid issues
|
# it to http on these older Python versions to avoid issues
|
||||||
# This also applies for ftp proxy type, as ftp:// proxy scheme is not supported.
|
# This also applies for ftp proxy type, as ftp:// proxy scheme is not supported.
|
||||||
# 1: https://github.com/python/cpython/issues/86793
|
# 1: https://github.com/python/cpython/issues/86793
|
||||||
# 2: https://github.com/python/cpython/blob/51f1ae5ceb0673316c4e4b0175384e892e33cc6e/Lib/urllib/request.py#L2683-L2698
|
# 2: https://github.com/python/cpython/blob/51f1ae5ceb0673316c4e4b0175384e892e33cc6e/Lib/urllib/request.py#L2683-L2698
|
||||||
|
|
|
@ -121,7 +121,7 @@ def _extract_firefox_cookies(profile, container, logger):
|
||||||
logger.info('Extracting cookies from firefox')
|
logger.info('Extracting cookies from firefox')
|
||||||
if not sqlite3:
|
if not sqlite3:
|
||||||
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
|
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
|
||||||
'Please use a python interpreter compiled with sqlite3 support')
|
'Please use a Python interpreter compiled with sqlite3 support')
|
||||||
return YoutubeDLCookieJar()
|
return YoutubeDLCookieJar()
|
||||||
|
|
||||||
if profile is None:
|
if profile is None:
|
||||||
|
@ -264,7 +264,7 @@ def _extract_chrome_cookies(browser_name, profile, keyring, logger):
|
||||||
|
|
||||||
if not sqlite3:
|
if not sqlite3:
|
||||||
logger.warning(f'Cannot extract cookies from {browser_name} without sqlite3 support. '
|
logger.warning(f'Cannot extract cookies from {browser_name} without sqlite3 support. '
|
||||||
'Please use a python interpreter compiled with sqlite3 support')
|
'Please use a Python interpreter compiled with sqlite3 support')
|
||||||
return YoutubeDLCookieJar()
|
return YoutubeDLCookieJar()
|
||||||
|
|
||||||
config = _get_chromium_based_browser_settings(browser_name)
|
config = _get_chromium_based_browser_settings(browser_name)
|
||||||
|
|
|
@ -46,16 +46,14 @@ try:
|
||||||
# We need to get the underlying `sqlite` version, see https://github.com/yt-dlp/yt-dlp/issues/8152
|
# We need to get the underlying `sqlite` version, see https://github.com/yt-dlp/yt-dlp/issues/8152
|
||||||
sqlite3._yt_dlp__version = sqlite3.sqlite_version
|
sqlite3._yt_dlp__version = sqlite3.sqlite_version
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# although sqlite3 is part of the standard library, it is possible to compile python without
|
# although sqlite3 is part of the standard library, it is possible to compile Python without
|
||||||
# sqlite support. See: https://github.com/yt-dlp/yt-dlp/issues/544
|
# sqlite support. See: https://github.com/yt-dlp/yt-dlp/issues/544
|
||||||
sqlite3 = None
|
sqlite3 = None
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import websockets
|
import websockets
|
||||||
except (ImportError, SyntaxError):
|
except ImportError:
|
||||||
# websockets 3.10 on python 3.6 causes SyntaxError
|
|
||||||
# See https://github.com/yt-dlp/yt-dlp/issues/2633
|
|
||||||
websockets = None
|
websockets = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -237,8 +237,13 @@ class HttpFD(FileDownloader):
|
||||||
|
|
||||||
def retry(e):
|
def retry(e):
|
||||||
close_stream()
|
close_stream()
|
||||||
ctx.resume_len = (byte_counter if ctx.tmpfilename == '-'
|
if ctx.tmpfilename == '-':
|
||||||
else os.path.getsize(encodeFilename(ctx.tmpfilename)))
|
ctx.resume_len = byte_counter
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
ctx.resume_len = os.path.getsize(encodeFilename(ctx.tmpfilename))
|
||||||
|
except FileNotFoundError:
|
||||||
|
ctx.resume_len = 0
|
||||||
raise RetryDownload(e)
|
raise RetryDownload(e)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
|
|
@ -320,7 +320,6 @@ from .cbs import (
|
||||||
CBSIE,
|
CBSIE,
|
||||||
ParamountPressExpressIE,
|
ParamountPressExpressIE,
|
||||||
)
|
)
|
||||||
from .cbsinteractive import CBSInteractiveIE
|
|
||||||
from .cbsnews import (
|
from .cbsnews import (
|
||||||
CBSNewsEmbedIE,
|
CBSNewsEmbedIE,
|
||||||
CBSNewsIE,
|
CBSNewsIE,
|
||||||
|
@ -348,10 +347,6 @@ from .cgtn import CGTNIE
|
||||||
from .charlierose import CharlieRoseIE
|
from .charlierose import CharlieRoseIE
|
||||||
from .chaturbate import ChaturbateIE
|
from .chaturbate import ChaturbateIE
|
||||||
from .chilloutzone import ChilloutzoneIE
|
from .chilloutzone import ChilloutzoneIE
|
||||||
from .chingari import (
|
|
||||||
ChingariIE,
|
|
||||||
ChingariUserIE,
|
|
||||||
)
|
|
||||||
from .chzzk import (
|
from .chzzk import (
|
||||||
CHZZKLiveIE,
|
CHZZKLiveIE,
|
||||||
CHZZKVideoIE,
|
CHZZKVideoIE,
|
||||||
|
@ -369,7 +364,6 @@ from .ciscolive import (
|
||||||
from .ciscowebex import CiscoWebexIE
|
from .ciscowebex import CiscoWebexIE
|
||||||
from .cjsw import CJSWIE
|
from .cjsw import CJSWIE
|
||||||
from .clipchamp import ClipchampIE
|
from .clipchamp import ClipchampIE
|
||||||
from .cliphunter import CliphunterIE
|
|
||||||
from .clippit import ClippitIE
|
from .clippit import ClippitIE
|
||||||
from .cliprs import ClipRsIE
|
from .cliprs import ClipRsIE
|
||||||
from .closertotruth import CloserToTruthIE
|
from .closertotruth import CloserToTruthIE
|
||||||
|
@ -379,7 +373,6 @@ from .clubic import ClubicIE
|
||||||
from .clyp import ClypIE
|
from .clyp import ClypIE
|
||||||
from .cmt import CMTIE
|
from .cmt import CMTIE
|
||||||
from .cnbc import (
|
from .cnbc import (
|
||||||
CNBCIE,
|
|
||||||
CNBCVideoIE,
|
CNBCVideoIE,
|
||||||
)
|
)
|
||||||
from .cnn import (
|
from .cnn import (
|
||||||
|
@ -445,6 +438,7 @@ from .dailymail import DailyMailIE
|
||||||
from .dailymotion import (
|
from .dailymotion import (
|
||||||
DailymotionIE,
|
DailymotionIE,
|
||||||
DailymotionPlaylistIE,
|
DailymotionPlaylistIE,
|
||||||
|
DailymotionSearchIE,
|
||||||
DailymotionUserIE,
|
DailymotionUserIE,
|
||||||
)
|
)
|
||||||
from .dailywire import (
|
from .dailywire import (
|
||||||
|
@ -476,7 +470,6 @@ from .dlf import (
|
||||||
)
|
)
|
||||||
from .dfb import DFBIE
|
from .dfb import DFBIE
|
||||||
from .dhm import DHMIE
|
from .dhm import DHMIE
|
||||||
from .digg import DiggIE
|
|
||||||
from .douyutv import (
|
from .douyutv import (
|
||||||
DouyuShowIE,
|
DouyuShowIE,
|
||||||
DouyuTVIE,
|
DouyuTVIE,
|
||||||
|
@ -610,7 +603,6 @@ from .fc2 import (
|
||||||
)
|
)
|
||||||
from .fczenit import FczenitIE
|
from .fczenit import FczenitIE
|
||||||
from .fifa import FifaIE
|
from .fifa import FifaIE
|
||||||
from .filmmodu import FilmmoduIE
|
|
||||||
from .filmon import (
|
from .filmon import (
|
||||||
FilmOnIE,
|
FilmOnIE,
|
||||||
FilmOnChannelIE,
|
FilmOnChannelIE,
|
||||||
|
@ -618,6 +610,7 @@ from .filmon import (
|
||||||
from .filmweb import FilmwebIE
|
from .filmweb import FilmwebIE
|
||||||
from .firsttv import FirstTVIE
|
from .firsttv import FirstTVIE
|
||||||
from .fivetv import FiveTVIE
|
from .fivetv import FiveTVIE
|
||||||
|
from .flextv import FlexTVIE
|
||||||
from .flickr import FlickrIE
|
from .flickr import FlickrIE
|
||||||
from .floatplane import (
|
from .floatplane import (
|
||||||
FloatplaneIE,
|
FloatplaneIE,
|
||||||
|
@ -675,7 +668,6 @@ from .gab import (
|
||||||
GabIE,
|
GabIE,
|
||||||
)
|
)
|
||||||
from .gaia import GaiaIE
|
from .gaia import GaiaIE
|
||||||
from .gameinformer import GameInformerIE
|
|
||||||
from .gamejolt import (
|
from .gamejolt import (
|
||||||
GameJoltIE,
|
GameJoltIE,
|
||||||
GameJoltUserIE,
|
GameJoltUserIE,
|
||||||
|
@ -704,7 +696,6 @@ from .gettr import (
|
||||||
GettrStreamingIE,
|
GettrStreamingIE,
|
||||||
)
|
)
|
||||||
from .giantbomb import GiantBombIE
|
from .giantbomb import GiantBombIE
|
||||||
from .giga import GigaIE
|
|
||||||
from .glide import GlideIE
|
from .glide import GlideIE
|
||||||
from .globalplayer import (
|
from .globalplayer import (
|
||||||
GlobalPlayerLiveIE,
|
GlobalPlayerLiveIE,
|
||||||
|
@ -895,10 +886,8 @@ from .jtbc import (
|
||||||
from .jwplatform import JWPlatformIE
|
from .jwplatform import JWPlatformIE
|
||||||
from .kakao import KakaoIE
|
from .kakao import KakaoIE
|
||||||
from .kaltura import KalturaIE
|
from .kaltura import KalturaIE
|
||||||
from .kanal2 import Kanal2IE
|
|
||||||
from .kankanews import KankaNewsIE
|
from .kankanews import KankaNewsIE
|
||||||
from .karaoketv import KaraoketvIE
|
from .karaoketv import KaraoketvIE
|
||||||
from .karrierevideos import KarriereVideosIE
|
|
||||||
from .kelbyone import KelbyOneIE
|
from .kelbyone import KelbyOneIE
|
||||||
from .khanacademy import (
|
from .khanacademy import (
|
||||||
KhanAcademyIE,
|
KhanAcademyIE,
|
||||||
|
@ -914,13 +903,11 @@ from .kinja import KinjaEmbedIE
|
||||||
from .kinopoisk import KinoPoiskIE
|
from .kinopoisk import KinoPoiskIE
|
||||||
from .kommunetv import KommunetvIE
|
from .kommunetv import KommunetvIE
|
||||||
from .kompas import KompasVideoIE
|
from .kompas import KompasVideoIE
|
||||||
from .konserthusetplay import KonserthusetPlayIE
|
|
||||||
from .koo import KooIE
|
from .koo import KooIE
|
||||||
from .kth import KTHIE
|
from .kth import KTHIE
|
||||||
from .krasview import KrasViewIE
|
from .krasview import KrasViewIE
|
||||||
from .ku6 import Ku6IE
|
from .ku6 import Ku6IE
|
||||||
from .kukululive import KukuluLiveIE
|
from .kukululive import KukuluLiveIE
|
||||||
from .kusi import KUSIIE
|
|
||||||
from .kuwo import (
|
from .kuwo import (
|
||||||
KuwoIE,
|
KuwoIE,
|
||||||
KuwoAlbumIE,
|
KuwoAlbumIE,
|
||||||
|
@ -1002,7 +989,6 @@ from .lnkgo import (
|
||||||
LnkGoIE,
|
LnkGoIE,
|
||||||
LnkIE,
|
LnkIE,
|
||||||
)
|
)
|
||||||
from .localnews8 import LocalNews8IE
|
|
||||||
from .lovehomeporn import LoveHomePornIE
|
from .lovehomeporn import LoveHomePornIE
|
||||||
from .lrt import (
|
from .lrt import (
|
||||||
LRTVODIE,
|
LRTVODIE,
|
||||||
|
@ -1029,7 +1015,6 @@ from .mailru import (
|
||||||
MailRuMusicSearchIE,
|
MailRuMusicSearchIE,
|
||||||
)
|
)
|
||||||
from .mainstreaming import MainStreamingIE
|
from .mainstreaming import MainStreamingIE
|
||||||
from .malltv import MallTVIE
|
|
||||||
from .mangomolo import (
|
from .mangomolo import (
|
||||||
MangomoloVideoIE,
|
MangomoloVideoIE,
|
||||||
MangomoloLiveIE,
|
MangomoloLiveIE,
|
||||||
|
@ -1073,7 +1058,6 @@ from .meipai import MeipaiIE
|
||||||
from .melonvod import MelonVODIE
|
from .melonvod import MelonVODIE
|
||||||
from .metacritic import MetacriticIE
|
from .metacritic import MetacriticIE
|
||||||
from .mgtv import MGTVIE
|
from .mgtv import MGTVIE
|
||||||
from .miaopai import MiaoPaiIE
|
|
||||||
from .microsoftstream import MicrosoftStreamIE
|
from .microsoftstream import MicrosoftStreamIE
|
||||||
from .microsoftvirtualacademy import (
|
from .microsoftvirtualacademy import (
|
||||||
MicrosoftVirtualAcademyIE,
|
MicrosoftVirtualAcademyIE,
|
||||||
|
@ -1091,7 +1075,6 @@ from .minds import (
|
||||||
MindsChannelIE,
|
MindsChannelIE,
|
||||||
MindsGroupIE,
|
MindsGroupIE,
|
||||||
)
|
)
|
||||||
from .ministrygrid import MinistryGridIE
|
|
||||||
from .minoto import MinotoIE
|
from .minoto import MinotoIE
|
||||||
from .mirrativ import (
|
from .mirrativ import (
|
||||||
MirrativIE,
|
MirrativIE,
|
||||||
|
@ -1119,7 +1102,6 @@ from .mlssoccer import MLSSoccerIE
|
||||||
from .mocha import MochaVideoIE
|
from .mocha import MochaVideoIE
|
||||||
from .mojvideo import MojvideoIE
|
from .mojvideo import MojvideoIE
|
||||||
from .monstercat import MonstercatIE
|
from .monstercat import MonstercatIE
|
||||||
from .morningstar import MorningstarIE
|
|
||||||
from .motherless import (
|
from .motherless import (
|
||||||
MotherlessIE,
|
MotherlessIE,
|
||||||
MotherlessGroupIE,
|
MotherlessGroupIE,
|
||||||
|
@ -1364,7 +1346,6 @@ from .nuvid import NuvidIE
|
||||||
from .nzherald import NZHeraldIE
|
from .nzherald import NZHeraldIE
|
||||||
from .nzonscreen import NZOnScreenIE
|
from .nzonscreen import NZOnScreenIE
|
||||||
from .nzz import NZZIE
|
from .nzz import NZZIE
|
||||||
from .odatv import OdaTVIE
|
|
||||||
from .odkmedia import OnDemandChinaEpisodeIE
|
from .odkmedia import OnDemandChinaEpisodeIE
|
||||||
from .odnoklassniki import OdnoklassnikiIE
|
from .odnoklassniki import OdnoklassnikiIE
|
||||||
from .oftv import (
|
from .oftv import (
|
||||||
|
@ -1476,7 +1457,6 @@ from .platzi import (
|
||||||
PlatziCourseIE,
|
PlatziCourseIE,
|
||||||
)
|
)
|
||||||
from .playplustv import PlayPlusTVIE
|
from .playplustv import PlayPlusTVIE
|
||||||
from .playstuff import PlayStuffIE
|
|
||||||
from .playsuisse import PlaySuisseIE
|
from .playsuisse import PlaySuisseIE
|
||||||
from .playtvak import PlaytvakIE
|
from .playtvak import PlaytvakIE
|
||||||
from .playwire import PlaywireIE
|
from .playwire import PlaywireIE
|
||||||
|
@ -1598,7 +1578,6 @@ from .raywenderlich import (
|
||||||
RayWenderlichIE,
|
RayWenderlichIE,
|
||||||
RayWenderlichCourseIE,
|
RayWenderlichCourseIE,
|
||||||
)
|
)
|
||||||
from .rbmaradio import RBMARadioIE
|
|
||||||
from .rbgtum import (
|
from .rbgtum import (
|
||||||
RbgTumIE,
|
RbgTumIE,
|
||||||
RbgTumCourseIE,
|
RbgTumCourseIE,
|
||||||
|
@ -1630,7 +1609,6 @@ from .redgifs import (
|
||||||
RedGifsUserIE,
|
RedGifsUserIE,
|
||||||
)
|
)
|
||||||
from .redtube import RedTubeIE
|
from .redtube import RedTubeIE
|
||||||
from .regiotv import RegioTVIE
|
|
||||||
from .rentv import (
|
from .rentv import (
|
||||||
RENTVIE,
|
RENTVIE,
|
||||||
RENTVArticleIE,
|
RENTVArticleIE,
|
||||||
|
@ -1639,6 +1617,7 @@ from .restudy import RestudyIE
|
||||||
from .reuters import ReutersIE
|
from .reuters import ReutersIE
|
||||||
from .reverbnation import ReverbNationIE
|
from .reverbnation import ReverbNationIE
|
||||||
from .rheinmaintv import RheinMainTVIE
|
from .rheinmaintv import RheinMainTVIE
|
||||||
|
from .ridehome import RideHomeIE
|
||||||
from .rinsefm import (
|
from .rinsefm import (
|
||||||
RinseFMIE,
|
RinseFMIE,
|
||||||
RinseFMArtistPlaylistIE,
|
RinseFMArtistPlaylistIE,
|
||||||
|
@ -1737,7 +1716,6 @@ from .safari import (
|
||||||
from .saitosan import SaitosanIE
|
from .saitosan import SaitosanIE
|
||||||
from .samplefocus import SampleFocusIE
|
from .samplefocus import SampleFocusIE
|
||||||
from .sapo import SapoIE
|
from .sapo import SapoIE
|
||||||
from .savefrom import SaveFromIE
|
|
||||||
from .sbs import SBSIE
|
from .sbs import SBSIE
|
||||||
from .sbscokr import (
|
from .sbscokr import (
|
||||||
SBSCoKrIE,
|
SBSCoKrIE,
|
||||||
|
@ -1757,7 +1735,6 @@ from .scte import (
|
||||||
SCTECourseIE,
|
SCTECourseIE,
|
||||||
)
|
)
|
||||||
from .scrolller import ScrolllerIE
|
from .scrolller import ScrolllerIE
|
||||||
from .seeker import SeekerIE
|
|
||||||
from .sejmpl import SejmIE
|
from .sejmpl import SejmIE
|
||||||
from .senalcolombia import SenalColombiaLiveIE
|
from .senalcolombia import SenalColombiaLiveIE
|
||||||
from .senategov import SenateISVPIE, SenateGovIE
|
from .senategov import SenateISVPIE, SenateGovIE
|
||||||
|
@ -1900,7 +1877,6 @@ from .storyfire import (
|
||||||
)
|
)
|
||||||
from .streamable import StreamableIE
|
from .streamable import StreamableIE
|
||||||
from .streamcz import StreamCZIE
|
from .streamcz import StreamCZIE
|
||||||
from .streamff import StreamFFIE
|
|
||||||
from .streetvoice import StreetVoiceIE
|
from .streetvoice import StreetVoiceIE
|
||||||
from .stretchinternet import StretchInternetIE
|
from .stretchinternet import StretchInternetIE
|
||||||
from .stripchat import StripchatIE
|
from .stripchat import StripchatIE
|
||||||
|
@ -1929,7 +1905,6 @@ from .tbsjp import (
|
||||||
TBSJPProgramIE,
|
TBSJPProgramIE,
|
||||||
TBSJPPlaylistIE,
|
TBSJPPlaylistIE,
|
||||||
)
|
)
|
||||||
from .tdslifeway import TDSLifewayIE
|
|
||||||
from .teachable import (
|
from .teachable import (
|
||||||
TeachableIE,
|
TeachableIE,
|
||||||
TeachableCourseIE,
|
TeachableCourseIE,
|
||||||
|
@ -2499,6 +2474,7 @@ from .zee5 import (
|
||||||
Zee5SeriesIE,
|
Zee5SeriesIE,
|
||||||
)
|
)
|
||||||
from .zeenews import ZeeNewsIE
|
from .zeenews import ZeeNewsIE
|
||||||
|
from .zenporn import ZenPornIE
|
||||||
from .zetland import ZetlandDKArticleIE
|
from .zetland import ZetlandDKArticleIE
|
||||||
from .zhihu import ZhihuIE
|
from .zhihu import ZhihuIE
|
||||||
from .zingmp3 import (
|
from .zingmp3 import (
|
||||||
|
|
|
@ -245,7 +245,6 @@ class ABCIViewIE(InfoExtractor):
|
||||||
'episode_id': 'NC2203H039S00',
|
'episode_id': 'NC2203H039S00',
|
||||||
'season_number': 2022,
|
'season_number': 2022,
|
||||||
'season': 'Season 2022',
|
'season': 'Season 2022',
|
||||||
'episode_number': None,
|
|
||||||
'episode': 'Locking Up Kids',
|
'episode': 'Locking Up Kids',
|
||||||
'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/nc/NC2203H039S00_636d8a0944a22_1920.jpg',
|
'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/nc/NC2203H039S00_636d8a0944a22_1920.jpg',
|
||||||
'timestamp': 1668460497,
|
'timestamp': 1668460497,
|
||||||
|
@ -271,8 +270,6 @@ class ABCIViewIE(InfoExtractor):
|
||||||
'episode_id': 'RF2004Q043S00',
|
'episode_id': 'RF2004Q043S00',
|
||||||
'season_number': 2021,
|
'season_number': 2021,
|
||||||
'season': 'Season 2021',
|
'season': 'Season 2021',
|
||||||
'episode_number': None,
|
|
||||||
'episode': None,
|
|
||||||
'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/rf/RF2004Q043S00_61a950639dbc0_1920.jpg',
|
'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/rf/RF2004Q043S00_61a950639dbc0_1920.jpg',
|
||||||
'timestamp': 1638710705,
|
'timestamp': 1638710705,
|
||||||
|
|
||||||
|
|
|
@ -53,7 +53,7 @@ class AbemaLicenseHandler(urllib.request.BaseHandler):
|
||||||
# the protocol that this should really handle is 'abematv-license://'
|
# the protocol that this should really handle is 'abematv-license://'
|
||||||
# abematv_license_open is just a placeholder for development purposes
|
# abematv_license_open is just a placeholder for development purposes
|
||||||
# ref. https://github.com/python/cpython/blob/f4c03484da59049eb62a9bf7777b963e2267d187/Lib/urllib/request.py#L510
|
# ref. https://github.com/python/cpython/blob/f4c03484da59049eb62a9bf7777b963e2267d187/Lib/urllib/request.py#L510
|
||||||
setattr(self, 'abematv-license_open', getattr(self, 'abematv_license_open'))
|
setattr(self, 'abematv-license_open', getattr(self, 'abematv_license_open', None))
|
||||||
self.ie = ie
|
self.ie = ie
|
||||||
|
|
||||||
def _get_videokey_from_ticket(self, ticket):
|
def _get_videokey_from_ticket(self, ticket):
|
||||||
|
@ -259,7 +259,7 @@ class AbemaTVIE(AbemaTVBaseIE):
|
||||||
'title': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
'title': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
||||||
'series': 'ゆるキャン△ SEASON2',
|
'series': 'ゆるキャン△ SEASON2',
|
||||||
'episode': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
'episode': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
||||||
'series_number': 2,
|
'season_number': 2,
|
||||||
'episode_number': 1,
|
'episode_number': 1,
|
||||||
'description': 'md5:9c5a3172ae763278f9303922f0ea5b17',
|
'description': 'md5:9c5a3172ae763278f9303922f0ea5b17',
|
||||||
},
|
},
|
||||||
|
|
|
@ -3,6 +3,7 @@ from ..utils import (
|
||||||
float_or_none,
|
float_or_none,
|
||||||
format_field,
|
format_field,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
str_or_none,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
parse_codecs,
|
parse_codecs,
|
||||||
parse_qs,
|
parse_qs,
|
||||||
|
@ -129,7 +130,7 @@ class AcFunBangumiIE(AcFunVideoBaseIE):
|
||||||
'title': '红孩儿之趴趴蛙寻石记 第5话 ',
|
'title': '红孩儿之趴趴蛙寻石记 第5话 ',
|
||||||
'duration': 760.0,
|
'duration': 760.0,
|
||||||
'season': '红孩儿之趴趴蛙寻石记',
|
'season': '红孩儿之趴趴蛙寻石记',
|
||||||
'season_id': 5023171,
|
'season_id': '5023171',
|
||||||
'season_number': 1, # series has only 1 season
|
'season_number': 1, # series has only 1 season
|
||||||
'episode': 'Episode 5',
|
'episode': 'Episode 5',
|
||||||
'episode_number': 5,
|
'episode_number': 5,
|
||||||
|
@ -146,7 +147,7 @@ class AcFunBangumiIE(AcFunVideoBaseIE):
|
||||||
'title': '叽歪老表(第二季) 第5话 坚不可摧',
|
'title': '叽歪老表(第二季) 第5话 坚不可摧',
|
||||||
'season': '叽歪老表(第二季)',
|
'season': '叽歪老表(第二季)',
|
||||||
'season_number': 2,
|
'season_number': 2,
|
||||||
'season_id': 6065485,
|
'season_id': '6065485',
|
||||||
'episode': '坚不可摧',
|
'episode': '坚不可摧',
|
||||||
'episode_number': 5,
|
'episode_number': 5,
|
||||||
'upload_date': '20220324',
|
'upload_date': '20220324',
|
||||||
|
@ -191,7 +192,7 @@ class AcFunBangumiIE(AcFunVideoBaseIE):
|
||||||
'title': json_bangumi_data.get('showTitle'),
|
'title': json_bangumi_data.get('showTitle'),
|
||||||
'thumbnail': json_bangumi_data.get('image'),
|
'thumbnail': json_bangumi_data.get('image'),
|
||||||
'season': json_bangumi_data.get('bangumiTitle'),
|
'season': json_bangumi_data.get('bangumiTitle'),
|
||||||
'season_id': season_id,
|
'season_id': str_or_none(season_id),
|
||||||
'season_number': season_number,
|
'season_number': season_number,
|
||||||
'episode': json_bangumi_data.get('title'),
|
'episode': json_bangumi_data.get('title'),
|
||||||
'episode_number': episode_number,
|
'episode_number': episode_number,
|
||||||
|
|
|
@ -107,7 +107,6 @@ class AdultSwimIE(TurnerBaseIE):
|
||||||
title
|
title
|
||||||
tvRating
|
tvRating
|
||||||
}''' % episode_path
|
}''' % episode_path
|
||||||
['getVideoBySlug']
|
|
||||||
else:
|
else:
|
||||||
query = query % '''metaDescription
|
query = query % '''metaDescription
|
||||||
title
|
title
|
||||||
|
|
|
@ -4,6 +4,7 @@ from .archiveorg import ArchiveOrgIE
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
InAdvancePagedList,
|
InAdvancePagedList,
|
||||||
|
clean_html,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
orderedSet,
|
orderedSet,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
|
@ -22,7 +23,7 @@ class AltCensoredIE(InfoExtractor):
|
||||||
'title': "QUELLES SONT LES CONSÉQUENCES DE L'HYPERSEXUALISATION DE LA SOCIÉTÉ ?",
|
'title': "QUELLES SONT LES CONSÉQUENCES DE L'HYPERSEXUALISATION DE LA SOCIÉTÉ ?",
|
||||||
'display_id': 'k0srjLSkga8.webm',
|
'display_id': 'k0srjLSkga8.webm',
|
||||||
'release_date': '20180403',
|
'release_date': '20180403',
|
||||||
'creator': 'Virginie Vota',
|
'creators': ['Virginie Vota'],
|
||||||
'release_year': 2018,
|
'release_year': 2018,
|
||||||
'upload_date': '20230318',
|
'upload_date': '20230318',
|
||||||
'uploader': 'admin@altcensored.com',
|
'uploader': 'admin@altcensored.com',
|
||||||
|
@ -39,6 +40,8 @@ class AltCensoredIE(InfoExtractor):
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
category = clean_html(self._html_search_regex(
|
||||||
|
r'<a href="/category/\d+">([^<]+)</a>', webpage, 'category', default=None))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'_type': 'url_transparent',
|
'_type': 'url_transparent',
|
||||||
|
@ -46,9 +49,7 @@ class AltCensoredIE(InfoExtractor):
|
||||||
'ie_key': ArchiveOrgIE.ie_key(),
|
'ie_key': ArchiveOrgIE.ie_key(),
|
||||||
'view_count': str_to_int(self._html_search_regex(
|
'view_count': str_to_int(self._html_search_regex(
|
||||||
r'YouTube Views:(?:\s| )*([\d,]+)', webpage, 'view count', default=None)),
|
r'YouTube Views:(?:\s| )*([\d,]+)', webpage, 'view count', default=None)),
|
||||||
'categories': self._html_search_regex(
|
'categories': [category] if category else None,
|
||||||
r'<a href="/category/\d+">\s*\n?\s*([^<]+)</a>',
|
|
||||||
webpage, 'category', default='').split() or None,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -62,14 +63,21 @@ class AltCensoredChannelIE(InfoExtractor):
|
||||||
'title': 'Virginie Vota',
|
'title': 'Virginie Vota',
|
||||||
'id': 'UCFPTO55xxHqFqkzRZHu4kcw',
|
'id': 'UCFPTO55xxHqFqkzRZHu4kcw',
|
||||||
},
|
},
|
||||||
'playlist_count': 91
|
'playlist_count': 85,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://altcensored.com/channel/UC9CcJ96HKMWn0LZlcxlpFTw',
|
'url': 'https://altcensored.com/channel/UC9CcJ96HKMWn0LZlcxlpFTw',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'title': 'yukikaze775',
|
'title': 'yukikaze775',
|
||||||
'id': 'UC9CcJ96HKMWn0LZlcxlpFTw',
|
'id': 'UC9CcJ96HKMWn0LZlcxlpFTw',
|
||||||
},
|
},
|
||||||
'playlist_count': 4
|
'playlist_count': 4,
|
||||||
|
}, {
|
||||||
|
'url': 'https://altcensored.com/channel/UCfYbb7nga6-icsFWWgS-kWw',
|
||||||
|
'info_dict': {
|
||||||
|
'title': 'Mister Metokur',
|
||||||
|
'id': 'UCfYbb7nga6-icsFWWgS-kWw',
|
||||||
|
},
|
||||||
|
'playlist_count': 121,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -78,7 +86,7 @@ class AltCensoredChannelIE(InfoExtractor):
|
||||||
url, channel_id, 'Download channel webpage', 'Unable to get channel webpage')
|
url, channel_id, 'Download channel webpage', 'Unable to get channel webpage')
|
||||||
title = self._html_search_meta('altcen_title', webpage, 'title', fatal=False)
|
title = self._html_search_meta('altcen_title', webpage, 'title', fatal=False)
|
||||||
page_count = int_or_none(self._html_search_regex(
|
page_count = int_or_none(self._html_search_regex(
|
||||||
r'<a[^>]+href="/channel/\w+/page/(\d+)">(?:\1)</a>',
|
r'<a[^>]+href="/channel/[\w-]+/page/(\d+)">(?:\1)</a>',
|
||||||
webpage, 'page count', default='1'))
|
webpage, 'page count', default='1'))
|
||||||
|
|
||||||
def page_func(page_num):
|
def page_func(page_num):
|
||||||
|
|
|
@ -67,7 +67,7 @@ class AntennaGrWatchIE(AntennaBaseIE):
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
info = self._download_and_extract_api_data(video_id, netloc)
|
info = self._download_and_extract_api_data(video_id, netloc)
|
||||||
info['description'] = self._og_search_description(webpage, default=None)
|
info['description'] = self._og_search_description(webpage, default=None)
|
||||||
info['_old_archive_ids'] = [make_archive_id('Ant1NewsGrWatch', video_id)],
|
info['_old_archive_ids'] = [make_archive_id('Ant1NewsGrWatch', video_id)]
|
||||||
return info
|
return info
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,7 @@ from ..utils import (
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
urlhandle_detect_ext,
|
urlhandle_detect_ext,
|
||||||
|
variadic,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -49,7 +50,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||||
'release_date': '19681210',
|
'release_date': '19681210',
|
||||||
'timestamp': 1268695290,
|
'timestamp': 1268695290,
|
||||||
'upload_date': '20100315',
|
'upload_date': '20100315',
|
||||||
'creator': 'SRI International',
|
'creators': ['SRI International'],
|
||||||
'uploader': 'laura@archive.org',
|
'uploader': 'laura@archive.org',
|
||||||
'thumbnail': r're:https://archive\.org/download/.*\.jpg',
|
'thumbnail': r're:https://archive\.org/download/.*\.jpg',
|
||||||
'display_id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect.cdr',
|
'display_id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect.cdr',
|
||||||
|
@ -109,7 +110,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||||
'title': 'Turning',
|
'title': 'Turning',
|
||||||
'ext': 'flac',
|
'ext': 'flac',
|
||||||
'track': 'Turning',
|
'track': 'Turning',
|
||||||
'creator': 'Grateful Dead',
|
'creators': ['Grateful Dead'],
|
||||||
'display_id': 'gd1977-05-08d01t01.flac',
|
'display_id': 'gd1977-05-08d01t01.flac',
|
||||||
'track_number': 1,
|
'track_number': 1,
|
||||||
'album': '1977-05-08 - Barton Hall - Cornell University',
|
'album': '1977-05-08 - Barton Hall - Cornell University',
|
||||||
|
@ -129,7 +130,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||||
'location': 'Barton Hall - Cornell University',
|
'location': 'Barton Hall - Cornell University',
|
||||||
'duration': 438.68,
|
'duration': 438.68,
|
||||||
'track': 'Deal',
|
'track': 'Deal',
|
||||||
'creator': 'Grateful Dead',
|
'creators': ['Grateful Dead'],
|
||||||
'album': '1977-05-08 - Barton Hall - Cornell University',
|
'album': '1977-05-08 - Barton Hall - Cornell University',
|
||||||
'release_date': '19770508',
|
'release_date': '19770508',
|
||||||
'display_id': 'gd1977-05-08d01t07.flac',
|
'display_id': 'gd1977-05-08d01t07.flac',
|
||||||
|
@ -167,7 +168,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||||
'upload_date': '20160610',
|
'upload_date': '20160610',
|
||||||
'description': 'md5:f70956a156645a658a0dc9513d9e78b7',
|
'description': 'md5:f70956a156645a658a0dc9513d9e78b7',
|
||||||
'uploader': 'dimitrios@archive.org',
|
'uploader': 'dimitrios@archive.org',
|
||||||
'creator': ['British Broadcasting Corporation', 'Time-Life Films'],
|
'creators': ['British Broadcasting Corporation', 'Time-Life Films'],
|
||||||
'timestamp': 1465594947,
|
'timestamp': 1465594947,
|
||||||
},
|
},
|
||||||
'playlist': [
|
'playlist': [
|
||||||
|
@ -257,7 +258,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||||
'title': m['title'],
|
'title': m['title'],
|
||||||
'description': clean_html(m.get('description')),
|
'description': clean_html(m.get('description')),
|
||||||
'uploader': dict_get(m, ['uploader', 'adder']),
|
'uploader': dict_get(m, ['uploader', 'adder']),
|
||||||
'creator': m.get('creator'),
|
'creators': traverse_obj(m, ('creator', {variadic}, {lambda x: x[0] and list(x)})),
|
||||||
'license': m.get('licenseurl'),
|
'license': m.get('licenseurl'),
|
||||||
'release_date': unified_strdate(m.get('date')),
|
'release_date': unified_strdate(m.get('date')),
|
||||||
'timestamp': unified_timestamp(dict_get(m, ['publicdate', 'addeddate'])),
|
'timestamp': unified_timestamp(dict_get(m, ['publicdate', 'addeddate'])),
|
||||||
|
@ -272,7 +273,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||||
'title': f.get('title') or f['name'],
|
'title': f.get('title') or f['name'],
|
||||||
'display_id': f['name'],
|
'display_id': f['name'],
|
||||||
'description': clean_html(f.get('description')),
|
'description': clean_html(f.get('description')),
|
||||||
'creator': f.get('creator'),
|
'creators': traverse_obj(f, ('creator', {variadic}, {lambda x: x[0] and list(x)})),
|
||||||
'duration': parse_duration(f.get('length')),
|
'duration': parse_duration(f.get('length')),
|
||||||
'track_number': int_or_none(f.get('track')),
|
'track_number': int_or_none(f.get('track')),
|
||||||
'album': f.get('album'),
|
'album': f.get('album'),
|
||||||
|
@ -300,7 +301,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||||
is_logged_in = bool(self._get_cookies('https://archive.org').get('logged-in-sig'))
|
is_logged_in = bool(self._get_cookies('https://archive.org').get('logged-in-sig'))
|
||||||
if extension in KNOWN_EXTENSIONS and (not f.get('private') or is_logged_in):
|
if extension in KNOWN_EXTENSIONS and (not f.get('private') or is_logged_in):
|
||||||
entry['formats'].append({
|
entry['formats'].append({
|
||||||
'url': 'https://archive.org/download/' + identifier + '/' + f['name'],
|
'url': 'https://archive.org/download/' + identifier + '/' + urllib.parse.quote(f['name']),
|
||||||
'format': f.get('format'),
|
'format': f.get('format'),
|
||||||
'width': int_or_none(f.get('width')),
|
'width': int_or_none(f.get('width')),
|
||||||
'height': int_or_none(f.get('height')),
|
'height': int_or_none(f.get('height')),
|
||||||
|
|
|
@ -142,10 +142,10 @@ class ArteTVIE(ArteTVBaseIE):
|
||||||
def _fix_accessible_subs_locale(subs):
|
def _fix_accessible_subs_locale(subs):
|
||||||
updated_subs = {}
|
updated_subs = {}
|
||||||
for lang, sub_formats in subs.items():
|
for lang, sub_formats in subs.items():
|
||||||
for format in sub_formats:
|
for fmt in sub_formats:
|
||||||
if format.get('url', '').endswith('-MAL.m3u8'):
|
if fmt.get('url', '').endswith('-MAL.m3u8'):
|
||||||
lang += '-acc'
|
lang += '-acc'
|
||||||
updated_subs.setdefault(lang, []).append(format)
|
updated_subs.setdefault(lang, []).append(fmt)
|
||||||
return updated_subs
|
return updated_subs
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|
|
@ -24,7 +24,8 @@ class AxsIE(InfoExtractor):
|
||||||
'timestamp': 1685729564,
|
'timestamp': 1685729564,
|
||||||
'duration': 1284.216,
|
'duration': 1284.216,
|
||||||
'series': 'Rock & Roll Road Trip with Sammy Hagar',
|
'series': 'Rock & Roll Road Trip with Sammy Hagar',
|
||||||
'season': 2,
|
'season': 'Season 2',
|
||||||
|
'season_number': 2,
|
||||||
'episode': '3',
|
'episode': '3',
|
||||||
'thumbnail': 'https://images.dotstudiopro.com/5f4e9d330a0c3b295a7e8394',
|
'thumbnail': 'https://images.dotstudiopro.com/5f4e9d330a0c3b295a7e8394',
|
||||||
},
|
},
|
||||||
|
@ -41,7 +42,8 @@ class AxsIE(InfoExtractor):
|
||||||
'timestamp': 1676403615,
|
'timestamp': 1676403615,
|
||||||
'duration': 2570.668,
|
'duration': 2570.668,
|
||||||
'series': 'The Big Interview with Dan Rather',
|
'series': 'The Big Interview with Dan Rather',
|
||||||
'season': 3,
|
'season': 'Season 3',
|
||||||
|
'season_number': 3,
|
||||||
'episode': '5',
|
'episode': '5',
|
||||||
'thumbnail': 'https://images.dotstudiopro.com/5f4d1901f340b50d937cec32',
|
'thumbnail': 'https://images.dotstudiopro.com/5f4d1901f340b50d937cec32',
|
||||||
},
|
},
|
||||||
|
@ -77,7 +79,7 @@ class AxsIE(InfoExtractor):
|
||||||
'title': ('title', {str}),
|
'title': ('title', {str}),
|
||||||
'description': ('description', {str}),
|
'description': ('description', {str}),
|
||||||
'series': ('seriestitle', {str}),
|
'series': ('seriestitle', {str}),
|
||||||
'season': ('season', {int}),
|
'season_number': ('season', {int}),
|
||||||
'episode': ('episode', {str}),
|
'episode': ('episode', {str}),
|
||||||
'duration': ('duration', {float_or_none}),
|
'duration': ('duration', {float_or_none}),
|
||||||
'timestamp': ('updated_at', {parse_iso8601}),
|
'timestamp': ('updated_at', {parse_iso8601}),
|
||||||
|
|
|
@ -3,7 +3,7 @@ from .youtube import YoutubeIE, YoutubeTabIE
|
||||||
|
|
||||||
|
|
||||||
class BeatBumpVideoIE(InfoExtractor):
|
class BeatBumpVideoIE(InfoExtractor):
|
||||||
_VALID_URL = r'https://beatbump\.(?:ml|io)/listen\?id=(?P<id>[\w-]+)'
|
_VALID_URL = r'https?://beatbump\.(?:ml|io)/listen\?id=(?P<id>[\w-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://beatbump.ml/listen?id=MgNrAu2pzNs',
|
'url': 'https://beatbump.ml/listen?id=MgNrAu2pzNs',
|
||||||
'md5': '5ff3fff41d3935b9810a9731e485fe66',
|
'md5': '5ff3fff41d3935b9810a9731e485fe66',
|
||||||
|
@ -48,7 +48,7 @@ class BeatBumpVideoIE(InfoExtractor):
|
||||||
|
|
||||||
|
|
||||||
class BeatBumpPlaylistIE(InfoExtractor):
|
class BeatBumpPlaylistIE(InfoExtractor):
|
||||||
_VALID_URL = r'https://beatbump\.(?:ml|io)/(?:release\?id=|artist/|playlist/)(?P<id>[\w-]+)'
|
_VALID_URL = r'https?://beatbump\.(?:ml|io)/(?:release\?id=|artist/|playlist/)(?P<id>[\w-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://beatbump.ml/release?id=MPREb_gTAcphH99wE',
|
'url': 'https://beatbump.ml/release?id=MPREb_gTAcphH99wE',
|
||||||
'playlist_count': 50,
|
'playlist_count': 50,
|
||||||
|
|
|
@ -2,6 +2,7 @@ from .common import InfoExtractor
|
||||||
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
str_or_none,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
try_get,
|
try_get,
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
|
@ -22,7 +23,7 @@ class BeegIE(InfoExtractor):
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
'upload_date': '20220131',
|
'upload_date': '20220131',
|
||||||
'timestamp': 1643656455,
|
'timestamp': 1643656455,
|
||||||
'display_id': 2540839,
|
'display_id': '2540839',
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://beeg.com/-0599050563103750?t=4-861',
|
'url': 'https://beeg.com/-0599050563103750?t=4-861',
|
||||||
|
@ -36,7 +37,7 @@ class BeegIE(InfoExtractor):
|
||||||
'age_limit': 18,
|
'age_limit': 18,
|
||||||
'description': 'md5:b4fc879a58ae6c604f8f259155b7e3b9',
|
'description': 'md5:b4fc879a58ae6c604f8f259155b7e3b9',
|
||||||
'timestamp': 1643623200,
|
'timestamp': 1643623200,
|
||||||
'display_id': 2569965,
|
'display_id': '2569965',
|
||||||
'upload_date': '20220131',
|
'upload_date': '20220131',
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
|
@ -78,7 +79,7 @@ class BeegIE(InfoExtractor):
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'display_id': first_fact.get('id'),
|
'display_id': str_or_none(first_fact.get('id')),
|
||||||
'title': traverse_obj(video, ('file', 'stuff', 'sf_name')),
|
'title': traverse_obj(video, ('file', 'stuff', 'sf_name')),
|
||||||
'description': traverse_obj(video, ('file', 'stuff', 'sf_story')),
|
'description': traverse_obj(video, ('file', 'stuff', 'sf_story')),
|
||||||
'timestamp': unified_timestamp(first_fact.get('fc_created')),
|
'timestamp': unified_timestamp(first_fact.get('fc_created')),
|
||||||
|
|
|
@ -32,7 +32,7 @@ class BellMediaIE(InfoExtractor):
|
||||||
'description': 'md5:810f7f8c6a83ad5b48677c3f8e5bb2c3',
|
'description': 'md5:810f7f8c6a83ad5b48677c3f8e5bb2c3',
|
||||||
'upload_date': '20180525',
|
'upload_date': '20180525',
|
||||||
'timestamp': 1527288600,
|
'timestamp': 1527288600,
|
||||||
'season_id': 73997,
|
'season_id': '73997',
|
||||||
'season': '2018',
|
'season': '2018',
|
||||||
'thumbnail': 'http://images2.9c9media.com/image_asset/2018_5_25_baf30cbd-b28d-4a18-9903-4bb8713b00f5_PNG_956x536.jpg',
|
'thumbnail': 'http://images2.9c9media.com/image_asset/2018_5_25_baf30cbd-b28d-4a18-9903-4bb8713b00f5_PNG_956x536.jpg',
|
||||||
'tags': [],
|
'tags': [],
|
||||||
|
|
|
@ -93,7 +93,6 @@ class BFMTVArticleIE(BFMTVBaseIE):
|
||||||
'id': '6318445464112',
|
'id': '6318445464112',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Le plein de bioéthanol fait de plus en plus mal à la pompe',
|
'title': 'Le plein de bioéthanol fait de plus en plus mal à la pompe',
|
||||||
'description': None,
|
|
||||||
'uploader_id': '876630703001',
|
'uploader_id': '876630703001',
|
||||||
'upload_date': '20230110',
|
'upload_date': '20230110',
|
||||||
'timestamp': 1673341692,
|
'timestamp': 1673341692,
|
||||||
|
|
|
@ -1965,6 +1965,7 @@ class BiliIntlIE(BiliIntlBaseIE):
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
def _make_url(video_id, series_id=None):
|
def _make_url(video_id, series_id=None):
|
||||||
if series_id:
|
if series_id:
|
||||||
return f'https://www.bilibili.tv/en/play/{series_id}/{video_id}'
|
return f'https://www.bilibili.tv/en/play/{series_id}/{video_id}'
|
||||||
|
@ -1996,7 +1997,7 @@ class BiliIntlIE(BiliIntlBaseIE):
|
||||||
'title': get_element_by_class(
|
'title': get_element_by_class(
|
||||||
'bstar-meta__title', webpage) or self._html_search_meta('og:title', webpage),
|
'bstar-meta__title', webpage) or self._html_search_meta('og:title', webpage),
|
||||||
'description': get_element_by_class(
|
'description': get_element_by_class(
|
||||||
'bstar-meta__desc', webpage) or self._html_search_meta('og:description'),
|
'bstar-meta__desc', webpage) or self._html_search_meta('og:description', webpage),
|
||||||
}, self._search_json_ld(webpage, video_id, default={}))
|
}, self._search_json_ld(webpage, video_id, default={}))
|
||||||
|
|
||||||
def _get_comments_reply(self, root_id, next_id=0, display_id=None):
|
def _get_comments_reply(self, root_id, next_id=0, display_id=None):
|
||||||
|
|
|
@ -185,7 +185,6 @@ class BitChuteChannelIE(InfoExtractor):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'UGlrF9o9b-Q',
|
'id': 'UGlrF9o9b-Q',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'filesize': None,
|
|
||||||
'title': 'This is the first video on #BitChute !',
|
'title': 'This is the first video on #BitChute !',
|
||||||
'description': 'md5:a0337e7b1fe39e32336974af8173a034',
|
'description': 'md5:a0337e7b1fe39e32336974af8173a034',
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
|
|
|
@ -4,10 +4,12 @@ from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
str_or_none,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class BleacherReportIE(InfoExtractor):
|
class BleacherReportIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/articles/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/articles/(?P<id>\d+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://bleacherreport.com/articles/2496438-fsu-stat-projections-is-jalen-ramsey-best-defensive-player-in-college-football',
|
'url': 'http://bleacherreport.com/articles/2496438-fsu-stat-projections-is-jalen-ramsey-best-defensive-player-in-college-football',
|
||||||
|
@ -16,7 +18,7 @@ class BleacherReportIE(InfoExtractor):
|
||||||
'id': '2496438',
|
'id': '2496438',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'FSU Stat Projections: Is Jalen Ramsey Best Defensive Player in College Football?',
|
'title': 'FSU Stat Projections: Is Jalen Ramsey Best Defensive Player in College Football?',
|
||||||
'uploader_id': 3992341,
|
'uploader_id': '3992341',
|
||||||
'description': 'CFB, ACC, Florida State',
|
'description': 'CFB, ACC, Florida State',
|
||||||
'timestamp': 1434380212,
|
'timestamp': 1434380212,
|
||||||
'upload_date': '20150615',
|
'upload_date': '20150615',
|
||||||
|
@ -33,7 +35,7 @@ class BleacherReportIE(InfoExtractor):
|
||||||
'timestamp': 1446839961,
|
'timestamp': 1446839961,
|
||||||
'uploader': 'Sean Fay',
|
'uploader': 'Sean Fay',
|
||||||
'description': 'md5:b1601e2314c4d8eec23b6eafe086a757',
|
'description': 'md5:b1601e2314c4d8eec23b6eafe086a757',
|
||||||
'uploader_id': 6466954,
|
'uploader_id': '6466954',
|
||||||
'upload_date': '20151011',
|
'upload_date': '20151011',
|
||||||
},
|
},
|
||||||
'add_ie': ['Youtube'],
|
'add_ie': ['Youtube'],
|
||||||
|
@ -58,7 +60,7 @@ class BleacherReportIE(InfoExtractor):
|
||||||
'id': article_id,
|
'id': article_id,
|
||||||
'title': article_data['title'],
|
'title': article_data['title'],
|
||||||
'uploader': article_data.get('author', {}).get('name'),
|
'uploader': article_data.get('author', {}).get('name'),
|
||||||
'uploader_id': article_data.get('authorId'),
|
'uploader_id': str_or_none(article_data.get('authorId')),
|
||||||
'timestamp': parse_iso8601(article_data.get('createdAt')),
|
'timestamp': parse_iso8601(article_data.get('createdAt')),
|
||||||
'thumbnails': thumbnails,
|
'thumbnails': thumbnails,
|
||||||
'comment_count': int_or_none(article_data.get('commentsCount')),
|
'comment_count': int_or_none(article_data.get('commentsCount')),
|
||||||
|
@ -82,6 +84,7 @@ class BleacherReportIE(InfoExtractor):
|
||||||
|
|
||||||
|
|
||||||
class BleacherReportCMSIE(AMPIE):
|
class BleacherReportCMSIE(AMPIE):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/video_embed\?id=(?P<id>[0-9a-f-]{36}|\d{5})'
|
_VALID_URL = r'https?://(?:www\.)?bleacherreport\.com/video_embed\?id=(?P<id>[0-9a-f-]{36}|\d{5})'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://bleacherreport.com/video_embed?id=8fd44c2f-3dc5-4821-9118-2c825a98c0e1&library=video-cms',
|
'url': 'http://bleacherreport.com/video_embed?id=8fd44c2f-3dc5-4821-9118-2c825a98c0e1&library=video-cms',
|
||||||
|
|
|
@ -2,7 +2,7 @@ from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
class CableAVIE(InfoExtractor):
|
class CableAVIE(InfoExtractor):
|
||||||
_VALID_URL = r'https://cableav\.tv/(?P<id>[a-zA-Z0-9]+)'
|
_VALID_URL = r'https?://cableav\.tv/(?P<id>[a-zA-Z0-9]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://cableav.tv/lS4iR9lWjN8/',
|
'url': 'https://cableav.tv/lS4iR9lWjN8/',
|
||||||
'md5': '7e3fe5e49d61c4233b7f5b0f69b15e18',
|
'md5': '7e3fe5e49d61c4233b7f5b0f69b15e18',
|
||||||
|
|
|
@ -13,7 +13,7 @@ from ..utils import (
|
||||||
|
|
||||||
|
|
||||||
class CamFMShowIE(InfoExtractor):
|
class CamFMShowIE(InfoExtractor):
|
||||||
_VALID_URL = r'https://(?:www\.)?camfm\.co\.uk/shows/(?P<id>[^/]+)'
|
_VALID_URL = r'https?://(?:www\.)?camfm\.co\.uk/shows/(?P<id>[^/]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'playlist_mincount': 5,
|
'playlist_mincount': 5,
|
||||||
'url': 'https://camfm.co.uk/shows/soul-mining/',
|
'url': 'https://camfm.co.uk/shows/soul-mining/',
|
||||||
|
@ -42,7 +42,7 @@ class CamFMShowIE(InfoExtractor):
|
||||||
|
|
||||||
|
|
||||||
class CamFMEpisodeIE(InfoExtractor):
|
class CamFMEpisodeIE(InfoExtractor):
|
||||||
_VALID_URL = r'https://(?:www\.)?camfm\.co\.uk/player/(?P<id>[^/]+)'
|
_VALID_URL = r'https?://(?:www\.)?camfm\.co\.uk/player/(?P<id>[^/]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://camfm.co.uk/player/43336',
|
'url': 'https://camfm.co.uk/player/43336',
|
||||||
'skip': 'Episode will expire - don\'t actually know when, but it will go eventually',
|
'skip': 'Episode will expire - don\'t actually know when, but it will go eventually',
|
||||||
|
|
|
@ -76,6 +76,7 @@ class CBSBaseIE(ThePlatformFeedIE): # XXX: Do not subclass from concrete IE
|
||||||
|
|
||||||
|
|
||||||
class CBSIE(CBSBaseIE):
|
class CBSIE(CBSBaseIE):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
(?:
|
(?:
|
||||||
cbs:|
|
cbs:|
|
||||||
|
|
|
@ -1,98 +0,0 @@
|
||||||
from .cbs import CBSIE
|
|
||||||
from ..utils import int_or_none
|
|
||||||
|
|
||||||
|
|
||||||
class CBSInteractiveIE(CBSIE): # XXX: Do not subclass from concrete IE
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?(?P<site>cnet|zdnet)\.com/(?:videos|video(?:/share)?)/(?P<id>[^/?]+)'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://www.cnet.com/videos/hands-on-with-microsofts-windows-8-1-update/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'R49SYt__yAfmlXR85z4f7gNmCBDcN_00',
|
|
||||||
'display_id': 'hands-on-with-microsofts-windows-8-1-update',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Hands-on with Microsoft Windows 8.1 Update',
|
|
||||||
'description': 'The new update to the Windows 8 OS brings improved performance for mouse and keyboard users.',
|
|
||||||
'uploader_id': '6085384d-619e-11e3-b231-14feb5ca9861',
|
|
||||||
'uploader': 'Sarah Mitroff',
|
|
||||||
'duration': 70,
|
|
||||||
'timestamp': 1396479627,
|
|
||||||
'upload_date': '20140402',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# m3u8 download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.cnet.com/videos/whiny-pothole-tweets-at-local-government-when-hit-by-cars-tomorrow-daily-187/',
|
|
||||||
'md5': 'f11d27b2fa18597fbf92444d2a9ed386',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'kjOJd_OoVJqbg_ZD8MZCOk8Wekb9QccK',
|
|
||||||
'display_id': 'whiny-pothole-tweets-at-local-government-when-hit-by-cars-tomorrow-daily-187',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Whiny potholes tweet at local government when hit by cars (Tomorrow Daily 187)',
|
|
||||||
'description': 'md5:d2b9a95a5ffe978ae6fbd4cf944d618f',
|
|
||||||
'uploader_id': 'b163284d-6b73-44fc-b3e6-3da66c392d40',
|
|
||||||
'uploader': 'Ashley Esqueda',
|
|
||||||
'duration': 1482,
|
|
||||||
'timestamp': 1433289889,
|
|
||||||
'upload_date': '20150603',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.zdnet.com/video/share/video-keeping-android-smartphones-and-tablets-secure/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'k0r4T_ehht4xW_hAOqiVQPuBDPZ8SRjt',
|
|
||||||
'display_id': 'video-keeping-android-smartphones-and-tablets-secure',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Video: Keeping Android smartphones and tablets secure',
|
|
||||||
'description': 'Here\'s the best way to keep Android devices secure, and what you do when they\'ve come to the end of their lives.',
|
|
||||||
'uploader_id': 'f2d97ea2-8175-11e2-9d12-0018fe8a00b0',
|
|
||||||
'uploader': 'Adrian Kingsley-Hughes',
|
|
||||||
'duration': 731,
|
|
||||||
'timestamp': 1449129925,
|
|
||||||
'upload_date': '20151203',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# m3u8 download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.zdnet.com/video/huawei-matebook-x-video/',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
|
||||||
|
|
||||||
MPX_ACCOUNTS = {
|
|
||||||
'cnet': 2198311517,
|
|
||||||
'zdnet': 2387448114,
|
|
||||||
}
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
site, display_id = self._match_valid_url(url).groups()
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
|
||||||
|
|
||||||
data_json = self._html_search_regex(
|
|
||||||
r"data(?:-(?:cnet|zdnet))?-video(?:-(?:uvp(?:js)?|player))?-options='([^']+)'",
|
|
||||||
webpage, 'data json')
|
|
||||||
data = self._parse_json(data_json, display_id)
|
|
||||||
vdata = data.get('video') or (data.get('videos') or data.get('playlist'))[0]
|
|
||||||
|
|
||||||
video_id = vdata['mpxRefId']
|
|
||||||
|
|
||||||
title = vdata['title']
|
|
||||||
author = vdata.get('author')
|
|
||||||
if author:
|
|
||||||
uploader = '%s %s' % (author['firstName'], author['lastName'])
|
|
||||||
uploader_id = author.get('id')
|
|
||||||
else:
|
|
||||||
uploader = None
|
|
||||||
uploader_id = None
|
|
||||||
|
|
||||||
info = self._extract_video_info(video_id, site, self.MPX_ACCOUNTS[site])
|
|
||||||
info.update({
|
|
||||||
'id': video_id,
|
|
||||||
'display_id': display_id,
|
|
||||||
'title': title,
|
|
||||||
'duration': int_or_none(vdata.get('duration')),
|
|
||||||
'uploader': uploader,
|
|
||||||
'uploader_id': uploader_id,
|
|
||||||
})
|
|
||||||
return info
|
|
|
@ -8,6 +8,7 @@ from ..utils import (
|
||||||
|
|
||||||
# class CBSSportsEmbedIE(CBSBaseIE):
|
# class CBSSportsEmbedIE(CBSBaseIE):
|
||||||
class CBSSportsEmbedIE(InfoExtractor):
|
class CBSSportsEmbedIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
IE_NAME = 'cbssports:embed'
|
IE_NAME = 'cbssports:embed'
|
||||||
_VALID_URL = r'''(?ix)https?://(?:(?:www\.)?cbs|embed\.247)sports\.com/player/embed.+?
|
_VALID_URL = r'''(?ix)https?://(?:(?:www\.)?cbs|embed\.247)sports\.com/player/embed.+?
|
||||||
(?:
|
(?:
|
||||||
|
@ -75,6 +76,7 @@ class CBSSportsBaseIE(InfoExtractor):
|
||||||
|
|
||||||
|
|
||||||
class CBSSportsIE(CBSSportsBaseIE):
|
class CBSSportsIE(CBSSportsBaseIE):
|
||||||
|
_WORKING = False
|
||||||
IE_NAME = 'cbssports'
|
IE_NAME = 'cbssports'
|
||||||
_VALID_URL = r'https?://(?:www\.)?cbssports\.com/[^/]+/video/(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:www\.)?cbssports\.com/[^/]+/video/(?P<id>[^/?#&]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
|
@ -92,6 +94,7 @@ class CBSSportsIE(CBSSportsBaseIE):
|
||||||
|
|
||||||
|
|
||||||
class TwentyFourSevenSportsIE(CBSSportsBaseIE):
|
class TwentyFourSevenSportsIE(CBSSportsBaseIE):
|
||||||
|
_WORKING = False
|
||||||
IE_NAME = '247sports'
|
IE_NAME = '247sports'
|
||||||
_VALID_URL = r'https?://(?:www\.)?247sports\.com/Video/(?:[^/?#&]+-)?(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?247sports\.com/Video/(?:[^/?#&]+-)?(?P<id>\d+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
|
|
|
@ -88,6 +88,20 @@ class CCTVIE(InfoExtractor):
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
# videoCenterId: "id"
|
||||||
|
'url': 'http://news.cctv.com/2024/02/21/ARTIcU5tKIOIF2myEGCATkLo240221.shtml',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '5c846c0518444308ba32c4159df3b3e0',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '《平“语”近人——习近平喜欢的典故》第三季 第5集:风物长宜放眼量',
|
||||||
|
'uploader': 'yangjuan',
|
||||||
|
'timestamp': 1708554940,
|
||||||
|
'upload_date': '20240221',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
# var ids = ["id"]
|
# var ids = ["id"]
|
||||||
'url': 'http://www.ncpa-classic.com/clt/more/416/index.shtml',
|
'url': 'http://www.ncpa-classic.com/clt/more/416/index.shtml',
|
||||||
|
@ -128,7 +142,7 @@ class CCTVIE(InfoExtractor):
|
||||||
|
|
||||||
video_id = self._search_regex(
|
video_id = self._search_regex(
|
||||||
[r'var\s+guid\s*=\s*["\']([\da-fA-F]+)',
|
[r'var\s+guid\s*=\s*["\']([\da-fA-F]+)',
|
||||||
r'videoCenterId["\']\s*,\s*["\']([\da-fA-F]+)',
|
r'videoCenterId(?:["\']\s*,|:)\s*["\']([\da-fA-F]+)',
|
||||||
r'changePlayer\s*\(\s*["\']([\da-fA-F]+)',
|
r'changePlayer\s*\(\s*["\']([\da-fA-F]+)',
|
||||||
r'load[Vv]ideo\s*\(\s*["\']([\da-fA-F]+)',
|
r'load[Vv]ideo\s*\(\s*["\']([\da-fA-F]+)',
|
||||||
r'var\s+initMyAray\s*=\s*["\']([\da-fA-F]+)',
|
r'var\s+initMyAray\s*=\s*["\']([\da-fA-F]+)',
|
||||||
|
|
|
@ -51,7 +51,7 @@ class CeskaTelevizeIE(InfoExtractor):
|
||||||
'url': 'http://www.ceskatelevize.cz/ivysilani/zive/ct4/',
|
'url': 'http://www.ceskatelevize.cz/ivysilani/zive/ct4/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 402,
|
'id': '402',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': r're:^ČT Sport \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
|
'title': r're:^ČT Sport \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
|
||||||
'is_live': True,
|
'is_live': True,
|
||||||
|
|
|
@ -17,6 +17,7 @@ class CGTNIE(InfoExtractor):
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
'timestamp': 1615295940,
|
'timestamp': 1615295940,
|
||||||
'upload_date': '20210309',
|
'upload_date': '20210309',
|
||||||
|
'categories': ['Video'],
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True
|
'skip_download': True
|
||||||
|
@ -29,8 +30,8 @@ class CGTNIE(InfoExtractor):
|
||||||
'title': 'China, Indonesia vow to further deepen maritime cooperation',
|
'title': 'China, Indonesia vow to further deepen maritime cooperation',
|
||||||
'thumbnail': r're:^https?://.*\.png$',
|
'thumbnail': r're:^https?://.*\.png$',
|
||||||
'description': 'China and Indonesia vowed to upgrade their cooperation into the maritime sector and also for political security, economy, and cultural and people-to-people exchanges.',
|
'description': 'China and Indonesia vowed to upgrade their cooperation into the maritime sector and also for political security, economy, and cultural and people-to-people exchanges.',
|
||||||
'author': 'CGTN',
|
'creators': ['CGTN'],
|
||||||
'category': 'China',
|
'categories': ['China'],
|
||||||
'timestamp': 1622950200,
|
'timestamp': 1622950200,
|
||||||
'upload_date': '20210606',
|
'upload_date': '20210606',
|
||||||
},
|
},
|
||||||
|
@ -45,7 +46,12 @@ class CGTNIE(InfoExtractor):
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
download_url = self._html_search_regex(r'data-video ="(?P<url>.+m3u8)"', webpage, 'download_url')
|
download_url = self._html_search_regex(r'data-video ="(?P<url>.+m3u8)"', webpage, 'download_url')
|
||||||
datetime_str = self._html_search_regex(r'<span class="date">\s*(.+?)\s*</span>', webpage, 'datetime_str', fatal=False)
|
datetime_str = self._html_search_regex(
|
||||||
|
r'<span class="date">\s*(.+?)\s*</span>', webpage, 'datetime_str', fatal=False)
|
||||||
|
category = self._html_search_regex(
|
||||||
|
r'<span class="section">\s*(.+?)\s*</span>', webpage, 'category', fatal=False)
|
||||||
|
author = self._search_regex(
|
||||||
|
r'<div class="news-author-name">\s*(.+?)\s*</div>', webpage, 'author', default=None)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
@ -53,9 +59,7 @@ class CGTNIE(InfoExtractor):
|
||||||
'description': self._og_search_description(webpage, default=None),
|
'description': self._og_search_description(webpage, default=None),
|
||||||
'thumbnail': self._og_search_thumbnail(webpage),
|
'thumbnail': self._og_search_thumbnail(webpage),
|
||||||
'formats': self._extract_m3u8_formats(download_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls'),
|
'formats': self._extract_m3u8_formats(download_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls'),
|
||||||
'category': self._html_search_regex(r'<span class="section">\s*(.+?)\s*</span>',
|
'categories': [category] if category else None,
|
||||||
webpage, 'category', fatal=False),
|
'creators': [author] if author else None,
|
||||||
'author': self._html_search_regex(r'<div class="news-author-name">\s*(.+?)\s*</div>',
|
|
||||||
webpage, 'author', default=None, fatal=False),
|
|
||||||
'timestamp': try_get(unified_timestamp(datetime_str), lambda x: x - 8 * 3600),
|
'timestamp': try_get(unified_timestamp(datetime_str), lambda x: x - 8 * 3600),
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,207 +0,0 @@
|
||||||
import itertools
|
|
||||||
import json
|
|
||||||
import urllib.parse
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import (
|
|
||||||
ExtractorError,
|
|
||||||
clean_html,
|
|
||||||
int_or_none,
|
|
||||||
str_to_int,
|
|
||||||
url_or_none,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ChingariBaseIE(InfoExtractor):
|
|
||||||
def _get_post(self, id, post_data):
|
|
||||||
media_data = post_data['mediaLocation']
|
|
||||||
base_url = media_data['base']
|
|
||||||
author_data = post_data.get('authorData', {})
|
|
||||||
song_data = post_data.get('song', {}) # revist this in future for differentiating b/w 'art' and 'author'
|
|
||||||
|
|
||||||
formats = [{
|
|
||||||
'format_id': frmt,
|
|
||||||
'width': str_to_int(frmt[1:]),
|
|
||||||
'url': base_url + frmt_path,
|
|
||||||
} for frmt, frmt_path in media_data.get('transcoded', {}).items()]
|
|
||||||
|
|
||||||
if media_data.get('path'):
|
|
||||||
formats.append({
|
|
||||||
'format_id': 'original',
|
|
||||||
'format_note': 'Direct video.',
|
|
||||||
'url': base_url + '/apipublic' + media_data['path'],
|
|
||||||
'quality': 10,
|
|
||||||
})
|
|
||||||
timestamp = str_to_int(post_data.get('created_at'))
|
|
||||||
if timestamp:
|
|
||||||
timestamp = int_or_none(timestamp, 1000)
|
|
||||||
|
|
||||||
thumbnail, uploader_url = None, None
|
|
||||||
if media_data.get('thumbnail'):
|
|
||||||
thumbnail = base_url + media_data.get('thumbnail')
|
|
||||||
if author_data.get('username'):
|
|
||||||
uploader_url = 'https://chingari.io/' + author_data.get('username')
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': id,
|
|
||||||
'extractor_key': ChingariIE.ie_key(),
|
|
||||||
'extractor': 'Chingari',
|
|
||||||
'title': urllib.parse.unquote_plus(clean_html(post_data.get('caption'))),
|
|
||||||
'description': urllib.parse.unquote_plus(clean_html(post_data.get('caption'))),
|
|
||||||
'duration': media_data.get('duration'),
|
|
||||||
'thumbnail': url_or_none(thumbnail),
|
|
||||||
'like_count': post_data.get('likeCount'),
|
|
||||||
'view_count': post_data.get('viewsCount'),
|
|
||||||
'comment_count': post_data.get('commentCount'),
|
|
||||||
'repost_count': post_data.get('shareCount'),
|
|
||||||
'timestamp': timestamp,
|
|
||||||
'uploader_id': post_data.get('userId') or author_data.get('_id'),
|
|
||||||
'uploader': author_data.get('name'),
|
|
||||||
'uploader_url': url_or_none(uploader_url),
|
|
||||||
'track': song_data.get('title'),
|
|
||||||
'artist': song_data.get('author'),
|
|
||||||
'formats': formats,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class ChingariIE(ChingariBaseIE):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?chingari\.io/share/post\?id=(?P<id>[^&/#?]+)'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'https://chingari.io/share/post?id=612f8f4ce1dc57090e8a7beb',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '612f8f4ce1dc57090e8a7beb',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Happy birthday Srila Prabhupada',
|
|
||||||
'description': 'md5:c7080ebfdfeb06016e638c286d6bc3fa',
|
|
||||||
'duration': 0,
|
|
||||||
'thumbnail': 'https://media.chingari.io/uploads/c41d30e2-06b6-4e3b-9b4b-edbb929cec06-1630506826911/thumbnail/198f993f-ce87-4623-82c6-cd071bd6d4f4-1630506828016.jpg',
|
|
||||||
'like_count': int,
|
|
||||||
'view_count': int,
|
|
||||||
'comment_count': int,
|
|
||||||
'repost_count': int,
|
|
||||||
'timestamp': 1630506828,
|
|
||||||
'upload_date': '20210901',
|
|
||||||
'uploader_id': '5f0403982c8bd344f4813f8c',
|
|
||||||
'uploader': 'ISKCON,Inc.',
|
|
||||||
'uploader_url': 'https://chingari.io/iskcon,inc',
|
|
||||||
'track': None,
|
|
||||||
'artist': None,
|
|
||||||
},
|
|
||||||
'params': {'skip_download': True}
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
id = self._match_id(url)
|
|
||||||
post_json = self._download_json(f'https://api.chingari.io/post/post_details/{id}', id)
|
|
||||||
if post_json['code'] != 200:
|
|
||||||
raise ExtractorError(post_json['message'], expected=True)
|
|
||||||
post_data = post_json['data']
|
|
||||||
return self._get_post(id, post_data)
|
|
||||||
|
|
||||||
|
|
||||||
class ChingariUserIE(ChingariBaseIE):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?chingari\.io/(?!share/post)(?P<id>[^/?]+)'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'https://chingari.io/dada1023',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'dada1023',
|
|
||||||
},
|
|
||||||
'params': {'playlistend': 3},
|
|
||||||
'playlist': [{
|
|
||||||
'url': 'https://chingari.io/share/post?id=614781f3ade60b3a0bfff42a',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '614781f3ade60b3a0bfff42a',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': '#chingaribappa ',
|
|
||||||
'description': 'md5:d1df21d84088770468fa63afe3b17857',
|
|
||||||
'duration': 7,
|
|
||||||
'thumbnail': 'https://media.chingari.io/uploads/346d86d4-abb2-474e-a164-ffccf2bbcb72-1632076273717/thumbnail/b0b3aac2-2b86-4dd1-909d-9ed6e57cf77c-1632076275552.jpg',
|
|
||||||
'like_count': int,
|
|
||||||
'view_count': int,
|
|
||||||
'comment_count': int,
|
|
||||||
'repost_count': int,
|
|
||||||
'timestamp': 1632076275,
|
|
||||||
'upload_date': '20210919',
|
|
||||||
'uploader_id': '5efc4b12cca35c3d1794c2d3',
|
|
||||||
'uploader': 'dada (girish) dhawale',
|
|
||||||
'uploader_url': 'https://chingari.io/dada1023',
|
|
||||||
'track': None,
|
|
||||||
'artist': None
|
|
||||||
},
|
|
||||||
'params': {'skip_download': True}
|
|
||||||
}, {
|
|
||||||
'url': 'https://chingari.io/share/post?id=6146b132bcbf860959e12cba',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '6146b132bcbf860959e12cba',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Tactor harvesting',
|
|
||||||
'description': 'md5:8403f12dce68828b77ecee7eb7e887b7',
|
|
||||||
'duration': 59.3,
|
|
||||||
'thumbnail': 'https://media.chingari.io/uploads/b353ca70-7a87-400d-93a6-fa561afaec86-1632022814584/thumbnail/c09302e3-2043-41b1-a2fe-77d97e5bd676-1632022834260.jpg',
|
|
||||||
'like_count': int,
|
|
||||||
'view_count': int,
|
|
||||||
'comment_count': int,
|
|
||||||
'repost_count': int,
|
|
||||||
'timestamp': 1632022834,
|
|
||||||
'upload_date': '20210919',
|
|
||||||
'uploader_id': '5efc4b12cca35c3d1794c2d3',
|
|
||||||
'uploader': 'dada (girish) dhawale',
|
|
||||||
'uploader_url': 'https://chingari.io/dada1023',
|
|
||||||
'track': None,
|
|
||||||
'artist': None
|
|
||||||
},
|
|
||||||
'params': {'skip_download': True}
|
|
||||||
}, {
|
|
||||||
'url': 'https://chingari.io/share/post?id=6145651b74cb030a64c40b82',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '6145651b74cb030a64c40b82',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': '#odiabhajan ',
|
|
||||||
'description': 'md5:687ea36835b9276cf2af90f25e7654cb',
|
|
||||||
'duration': 56.67,
|
|
||||||
'thumbnail': 'https://media.chingari.io/uploads/6cbf216b-babc-4cce-87fe-ceaac8d706ac-1631937782708/thumbnail/8855754f-6669-48ce-b269-8cc0699ed6da-1631937819522.jpg',
|
|
||||||
'like_count': int,
|
|
||||||
'view_count': int,
|
|
||||||
'comment_count': int,
|
|
||||||
'repost_count': int,
|
|
||||||
'timestamp': 1631937819,
|
|
||||||
'upload_date': '20210918',
|
|
||||||
'uploader_id': '5efc4b12cca35c3d1794c2d3',
|
|
||||||
'uploader': 'dada (girish) dhawale',
|
|
||||||
'uploader_url': 'https://chingari.io/dada1023',
|
|
||||||
'track': None,
|
|
||||||
'artist': None
|
|
||||||
},
|
|
||||||
'params': {'skip_download': True}
|
|
||||||
}],
|
|
||||||
}, {
|
|
||||||
'url': 'https://chingari.io/iskcon%2Cinc',
|
|
||||||
'playlist_mincount': 1025,
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'iskcon%2Cinc',
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _entries(self, id):
|
|
||||||
skip = 0
|
|
||||||
has_more = True
|
|
||||||
for page in itertools.count():
|
|
||||||
posts = self._download_json('https://api.chingari.io/users/getPosts', id,
|
|
||||||
data=json.dumps({'userId': id, 'ownerId': id, 'skip': skip, 'limit': 20}).encode(),
|
|
||||||
headers={'content-type': 'application/json;charset=UTF-8'},
|
|
||||||
note='Downloading page %s' % page)
|
|
||||||
for post in posts.get('data', []):
|
|
||||||
post_data = post['post']
|
|
||||||
yield self._get_post(post_data['_id'], post_data)
|
|
||||||
skip += 20
|
|
||||||
has_more = posts['hasMoreData']
|
|
||||||
if not has_more:
|
|
||||||
break
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
alt_id = self._match_id(url)
|
|
||||||
post_json = self._download_json(f'https://api.chingari.io/user/{alt_id}', alt_id)
|
|
||||||
if post_json['code'] != 200:
|
|
||||||
raise ExtractorError(post_json['message'], expected=True)
|
|
||||||
id = post_json['data']['_id']
|
|
||||||
return self.playlist_result(self._entries(id), playlist_id=alt_id)
|
|
|
@ -2,7 +2,7 @@ import functools
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
UserNotLive,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
|
@ -40,7 +40,7 @@ class CHZZKLiveIE(InfoExtractor):
|
||||||
note='Downloading channel info', errnote='Unable to download channel info')['content']
|
note='Downloading channel info', errnote='Unable to download channel info')['content']
|
||||||
|
|
||||||
if live_detail.get('status') == 'CLOSE':
|
if live_detail.get('status') == 'CLOSE':
|
||||||
raise ExtractorError('The channel is not currently live', expected=True)
|
raise UserNotLive(video_id=channel_id)
|
||||||
|
|
||||||
live_playback = self._parse_json(live_detail['livePlaybackJson'], channel_id)
|
live_playback = self._parse_json(live_detail['livePlaybackJson'], channel_id)
|
||||||
|
|
||||||
|
|
|
@ -2,6 +2,7 @@ from .hbo import HBOBaseIE
|
||||||
|
|
||||||
|
|
||||||
class CinemaxIE(HBOBaseIE):
|
class CinemaxIE(HBOBaseIE):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://(?:www\.)?cinemax\.com/(?P<path>[^/]+/video/[0-9a-z-]+-(?P<id>\d+))'
|
_VALID_URL = r'https?://(?:www\.)?cinemax\.com/(?P<path>[^/]+/video/[0-9a-z-]+-(?P<id>\d+))'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.cinemax.com/warrior/video/s1-ep-1-recap-20126903',
|
'url': 'https://www.cinemax.com/warrior/video/s1-ep-1-recap-20126903',
|
||||||
|
|
|
@ -13,7 +13,7 @@ from ..utils import (
|
||||||
|
|
||||||
|
|
||||||
class CineverseBaseIE(InfoExtractor):
|
class CineverseBaseIE(InfoExtractor):
|
||||||
_VALID_URL_BASE = r'https://www\.(?P<host>%s)' % '|'.join(map(re.escape, (
|
_VALID_URL_BASE = r'https?://www\.(?P<host>%s)' % '|'.join(map(re.escape, (
|
||||||
'cineverse.com',
|
'cineverse.com',
|
||||||
'asiancrush.com',
|
'asiancrush.com',
|
||||||
'dovechannel.com',
|
'dovechannel.com',
|
||||||
|
|
|
@ -1,76 +0,0 @@
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import (
|
|
||||||
int_or_none,
|
|
||||||
url_or_none,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class CliphunterIE(InfoExtractor):
|
|
||||||
IE_NAME = 'cliphunter'
|
|
||||||
|
|
||||||
_VALID_URL = r'''(?x)https?://(?:www\.)?cliphunter\.com/w/
|
|
||||||
(?P<id>[0-9]+)/
|
|
||||||
(?P<seo>.+?)(?:$|[#\?])
|
|
||||||
'''
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://www.cliphunter.com/w/1012420/Fun_Jynx_Maze_solo',
|
|
||||||
'md5': 'b7c9bbd4eb3a226ab91093714dcaa480',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '1012420',
|
|
||||||
'ext': 'flv',
|
|
||||||
'title': 'Fun Jynx Maze solo',
|
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
|
||||||
'age_limit': 18,
|
|
||||||
},
|
|
||||||
'skip': 'Video gone',
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.cliphunter.com/w/2019449/ShesNew__My_booty_girlfriend_Victoria_Paradices_pussy_filled_with_jizz',
|
|
||||||
'md5': '55a723c67bfc6da6b0cfa00d55da8a27',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '2019449',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'ShesNew - My booty girlfriend, Victoria Paradice\'s pussy filled with jizz',
|
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
|
||||||
'age_limit': 18,
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
|
|
||||||
video_title = self._search_regex(
|
|
||||||
r'mediaTitle = "([^"]+)"', webpage, 'title')
|
|
||||||
|
|
||||||
gexo_files = self._parse_json(
|
|
||||||
self._search_regex(
|
|
||||||
r'var\s+gexoFiles\s*=\s*({.+?});', webpage, 'gexo files'),
|
|
||||||
video_id)
|
|
||||||
|
|
||||||
formats = []
|
|
||||||
for format_id, f in gexo_files.items():
|
|
||||||
video_url = url_or_none(f.get('url'))
|
|
||||||
if not video_url:
|
|
||||||
continue
|
|
||||||
fmt = f.get('fmt')
|
|
||||||
height = f.get('h')
|
|
||||||
format_id = '%s_%sp' % (fmt, height) if fmt and height else format_id
|
|
||||||
formats.append({
|
|
||||||
'url': video_url,
|
|
||||||
'format_id': format_id,
|
|
||||||
'width': int_or_none(f.get('w')),
|
|
||||||
'height': int_or_none(height),
|
|
||||||
'tbr': int_or_none(f.get('br')),
|
|
||||||
})
|
|
||||||
|
|
||||||
thumbnail = self._search_regex(
|
|
||||||
r"var\s+mov_thumb\s*=\s*'([^']+)';",
|
|
||||||
webpage, 'thumbnail', fatal=False)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'title': video_title,
|
|
||||||
'formats': formats,
|
|
||||||
'age_limit': self._rta_search(webpage),
|
|
||||||
'thumbnail': thumbnail,
|
|
||||||
}
|
|
|
@ -2,6 +2,7 @@ from .onet import OnetBaseIE
|
||||||
|
|
||||||
|
|
||||||
class ClipRsIE(OnetBaseIE):
|
class ClipRsIE(OnetBaseIE):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://(?:www\.)?clip\.rs/(?P<id>[^/]+)/\d+'
|
_VALID_URL = r'https?://(?:www\.)?clip\.rs/(?P<id>[^/]+)/\d+'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://www.clip.rs/premijera-frajle-predstavljaju-novi-spot-za-pesmu-moli-me-moli/3732',
|
'url': 'http://www.clip.rs/premijera-frajle-predstavljaju-novi-spot-za-pesmu-moli-me-moli/3732',
|
||||||
|
|
|
@ -4,6 +4,7 @@ from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
class CloserToTruthIE(InfoExtractor):
|
class CloserToTruthIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://(?:www\.)?closertotruth\.com/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:www\.)?closertotruth\.com/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://closertotruth.com/series/solutions-the-mind-body-problem#video-3688',
|
'url': 'http://closertotruth.com/series/solutions-the-mind-body-problem#video-3688',
|
||||||
|
|
|
@ -4,27 +4,25 @@ from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
class CloudflareStreamIE(InfoExtractor):
|
class CloudflareStreamIE(InfoExtractor):
|
||||||
|
_SUBDOMAIN_RE = r'(?:(?:watch|iframe|customer-\w+)\.)?'
|
||||||
_DOMAIN_RE = r'(?:cloudflarestream\.com|(?:videodelivery|bytehighway)\.net)'
|
_DOMAIN_RE = r'(?:cloudflarestream\.com|(?:videodelivery|bytehighway)\.net)'
|
||||||
_EMBED_RE = r'embed\.%s/embed/[^/]+\.js\?.*?\bvideo=' % _DOMAIN_RE
|
_EMBED_RE = rf'embed\.{_DOMAIN_RE}/embed/[^/]+\.js\?.*?\bvideo='
|
||||||
_ID_RE = r'[\da-f]{32}|[\w-]+\.[\w-]+\.[\w-]+'
|
_ID_RE = r'[\da-f]{32}|[\w-]+\.[\w-]+\.[\w-]+'
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = rf'https?://(?:{_SUBDOMAIN_RE}{_DOMAIN_RE}/|{_EMBED_RE})(?P<id>{_ID_RE})'
|
||||||
https?://
|
_EMBED_REGEX = [
|
||||||
(?:
|
rf'<script[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//{_EMBED_RE}(?:{_ID_RE}).*?)\1',
|
||||||
(?:watch\.)?%s/|
|
rf'<iframe[^>]+\bsrc=["\'](?P<url>https?://{_SUBDOMAIN_RE}{_DOMAIN_RE}/[\da-f]{{32}})',
|
||||||
%s
|
]
|
||||||
)
|
|
||||||
(?P<id>%s)
|
|
||||||
''' % (_DOMAIN_RE, _EMBED_RE, _ID_RE)
|
|
||||||
_EMBED_REGEX = [fr'<script[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//{_EMBED_RE}(?:{_ID_RE}).*?)\1']
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://embed.cloudflarestream.com/embed/we4g.fla9.latest.js?video=31c9291ab41fac05471db4e73aa11717',
|
'url': 'https://embed.cloudflarestream.com/embed/we4g.fla9.latest.js?video=31c9291ab41fac05471db4e73aa11717',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '31c9291ab41fac05471db4e73aa11717',
|
'id': '31c9291ab41fac05471db4e73aa11717',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': '31c9291ab41fac05471db4e73aa11717',
|
'title': '31c9291ab41fac05471db4e73aa11717',
|
||||||
|
'thumbnail': 'https://videodelivery.net/31c9291ab41fac05471db4e73aa11717/thumbnails/thumbnail.jpg',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': 'm3u8',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://watch.cloudflarestream.com/9df17203414fd1db3e3ed74abbe936c1',
|
'url': 'https://watch.cloudflarestream.com/9df17203414fd1db3e3ed74abbe936c1',
|
||||||
|
@ -35,6 +33,21 @@ class CloudflareStreamIE(InfoExtractor):
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://embed.videodelivery.net/embed/r4xu.fla9.latest.js?video=81d80727f3022488598f68d323c1ad5e',
|
'url': 'https://embed.videodelivery.net/embed/r4xu.fla9.latest.js?video=81d80727f3022488598f68d323c1ad5e',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://customer-aw5py76sw8wyqzmh.cloudflarestream.com/2463f6d3e06fa29710a337f5f5389fd8/iframe',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
_WEBPAGE_TESTS = [{
|
||||||
|
'url': 'https://upride.cc/incident/shoulder-pass-at-light/',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'eaef9dea5159cf968be84241b5cedfe7',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'eaef9dea5159cf968be84241b5cedfe7',
|
||||||
|
'thumbnail': 'https://videodelivery.net/eaef9dea5159cf968be84241b5cedfe7/thumbnails/thumbnail.jpg',
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': 'm3u8',
|
||||||
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|
|
@ -1,68 +1,97 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import smuggle_url
|
from ..utils import int_or_none, parse_iso8601, str_or_none, url_or_none
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
class CNBCIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'https?://video\.cnbc\.com/gallery/\?video=(?P<id>[0-9]+)'
|
|
||||||
_TEST = {
|
|
||||||
'url': 'http://video.cnbc.com/gallery/?video=3000503714',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '3000503714',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Fighting zombies is big business',
|
|
||||||
'description': 'md5:0c100d8e1a7947bd2feec9a5550e519e',
|
|
||||||
'timestamp': 1459332000,
|
|
||||||
'upload_date': '20160330',
|
|
||||||
'uploader': 'NBCU-CNBC',
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
# m3u8 download
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
'skip': 'Dead link',
|
|
||||||
}
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
return {
|
|
||||||
'_type': 'url_transparent',
|
|
||||||
'ie_key': 'ThePlatform',
|
|
||||||
'url': smuggle_url(
|
|
||||||
'http://link.theplatform.com/s/gZWlPC/media/guid/2408950221/%s?mbr=true&manifest=m3u' % video_id,
|
|
||||||
{'force_smil_url': True}),
|
|
||||||
'id': video_id,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class CNBCVideoIE(InfoExtractor):
|
class CNBCVideoIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?cnbc\.com(?P<path>/video/(?:[^/]+/)+(?P<id>[^./?#&]+)\.html)'
|
_VALID_URL = r'https?://(?:www\.)?cnbc\.com/video/(?:[^/?#]+/)+(?P<id>[^./?#&]+)\.html'
|
||||||
_TEST = {
|
|
||||||
'url': 'https://www.cnbc.com/video/2018/07/19/trump-i-dont-necessarily-agree-with-raising-rates.html',
|
_TESTS = [{
|
||||||
|
'url': 'https://www.cnbc.com/video/2023/12/07/mcdonalds-just-unveiled-cosmcsits-new-spinoff-brand.html',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '7000031301',
|
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': "Trump: I don't necessarily agree with raising rates",
|
'id': '107344774',
|
||||||
'description': 'md5:878d8f0b4ebb5bb1dda3514b91b49de3',
|
'display_id': 'mcdonalds-just-unveiled-cosmcsits-new-spinoff-brand',
|
||||||
'timestamp': 1531958400,
|
'modified_timestamp': 1702053483,
|
||||||
'upload_date': '20180719',
|
'timestamp': 1701977810,
|
||||||
'uploader': 'NBCU-CNBC',
|
'channel': 'News Videos',
|
||||||
|
'upload_date': '20231207',
|
||||||
|
'description': 'md5:882c001d85cb43d7579b514307b3e78b',
|
||||||
|
'release_timestamp': 1701977375,
|
||||||
|
'modified_date': '20231208',
|
||||||
|
'release_date': '20231207',
|
||||||
|
'duration': 65,
|
||||||
|
'creators': ['Sean Conlon'],
|
||||||
|
'title': 'Here\'s a first look at McDonald\'s new spinoff brand, CosMc\'s',
|
||||||
|
'thumbnail': 'https://image.cnbcfm.com/api/v1/image/107344192-1701894812493-CosMcsskyHero_2336x1040_hero-desktop.jpg?v=1701894855',
|
||||||
},
|
},
|
||||||
'params': {
|
'expected_warnings': ['Unable to download f4m manifest'],
|
||||||
'skip_download': True,
|
}, {
|
||||||
|
'url': 'https://www.cnbc.com/video/2023/12/08/jim-cramer-shares-his-take-on-seattles-tech-scene.html',
|
||||||
|
'info_dict': {
|
||||||
|
'creators': ['Jim Cramer'],
|
||||||
|
'channel': 'Mad Money with Jim Cramer',
|
||||||
|
'description': 'md5:72925be21b952e95eba51178dddf4e3e',
|
||||||
|
'duration': 299.0,
|
||||||
|
'ext': 'mp4',
|
||||||
|
'id': '107345451',
|
||||||
|
'display_id': 'jim-cramer-shares-his-take-on-seattles-tech-scene',
|
||||||
|
'thumbnail': 'https://image.cnbcfm.com/api/v1/image/107345481-1702079431MM-B-120823.jpg?v=1702079430',
|
||||||
|
'timestamp': 1702080139,
|
||||||
|
'title': 'Jim Cramer shares his take on Seattle\'s tech scene',
|
||||||
|
'release_date': '20231208',
|
||||||
|
'upload_date': '20231209',
|
||||||
|
'modified_timestamp': 1702080139,
|
||||||
|
'modified_date': '20231209',
|
||||||
|
'release_timestamp': 1702073551,
|
||||||
},
|
},
|
||||||
'skip': 'Dead link',
|
'expected_warnings': ['Unable to download f4m manifest'],
|
||||||
}
|
}, {
|
||||||
|
'url': 'https://www.cnbc.com/video/2023/12/08/the-epicenter-of-ai-is-in-seattle-says-jim-cramer.html',
|
||||||
|
'info_dict': {
|
||||||
|
'creators': ['Jim Cramer'],
|
||||||
|
'channel': 'Mad Money with Jim Cramer',
|
||||||
|
'description': 'md5:72925be21b952e95eba51178dddf4e3e',
|
||||||
|
'duration': 113.0,
|
||||||
|
'ext': 'mp4',
|
||||||
|
'id': '107345474',
|
||||||
|
'display_id': 'the-epicenter-of-ai-is-in-seattle-says-jim-cramer',
|
||||||
|
'thumbnail': 'https://image.cnbcfm.com/api/v1/image/107345486-Screenshot_2023-12-08_at_70339_PM.png?v=1702080248',
|
||||||
|
'timestamp': 1702080535,
|
||||||
|
'title': 'The epicenter of AI is in Seattle, says Jim Cramer',
|
||||||
|
'release_timestamp': 1702077347,
|
||||||
|
'modified_timestamp': 1702080535,
|
||||||
|
'release_date': '20231208',
|
||||||
|
'upload_date': '20231209',
|
||||||
|
'modified_date': '20231209',
|
||||||
|
},
|
||||||
|
'expected_warnings': ['Unable to download f4m manifest'],
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
path, display_id = self._match_valid_url(url).groups()
|
display_id = self._match_id(url)
|
||||||
video_id = self._download_json(
|
webpage = self._download_webpage(url, display_id)
|
||||||
'https://webql-redesign.cnbcfm.com/graphql', display_id, query={
|
data = self._search_json(r'window\.__s_data=', webpage, 'video data', display_id)
|
||||||
'query': '''{
|
|
||||||
page(path: "%s") {
|
player_data = traverse_obj(data, (
|
||||||
vcpsId
|
'page', 'page', 'layout', ..., 'columns', ..., 'modules',
|
||||||
}
|
lambda _, v: v['name'] == 'clipPlayer', 'data', {dict}), get_all=False)
|
||||||
}''' % path,
|
|
||||||
})['data']['page']['vcpsId']
|
return {
|
||||||
return self.url_result(
|
'id': display_id,
|
||||||
'http://video.cnbc.com/gallery/?video=%d' % video_id,
|
'display_id': display_id,
|
||||||
CNBCIE.ie_key())
|
'formats': self._extract_akamai_formats(player_data['playbackURL'], display_id),
|
||||||
|
**self._search_json_ld(webpage, display_id, fatal=False),
|
||||||
|
**traverse_obj(player_data, {
|
||||||
|
'id': ('id', {str_or_none}),
|
||||||
|
'title': ('title', {str}),
|
||||||
|
'description': ('description', {str}),
|
||||||
|
'creators': ('author', ..., 'name', {str}),
|
||||||
|
'timestamp': ('datePublished', {parse_iso8601}),
|
||||||
|
'release_timestamp': ('uploadDate', {parse_iso8601}),
|
||||||
|
'modified_timestamp': ('dateLastPublished', {parse_iso8601}),
|
||||||
|
'thumbnail': ('thumbnail', {url_or_none}),
|
||||||
|
'duration': ('duration', {int_or_none}),
|
||||||
|
'channel': ('section', 'title', {str}),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
|
|
@ -247,6 +247,8 @@ class InfoExtractor:
|
||||||
(For internal use only)
|
(For internal use only)
|
||||||
* http_chunk_size Chunk size for HTTP downloads
|
* http_chunk_size Chunk size for HTTP downloads
|
||||||
* ffmpeg_args Extra arguments for ffmpeg downloader
|
* ffmpeg_args Extra arguments for ffmpeg downloader
|
||||||
|
* is_dash_periods Whether the format is a result of merging
|
||||||
|
multiple DASH periods.
|
||||||
RTMP formats can also have the additional fields: page_url,
|
RTMP formats can also have the additional fields: page_url,
|
||||||
app, play_path, tc_url, flash_version, rtmp_live, rtmp_conn,
|
app, play_path, tc_url, flash_version, rtmp_live, rtmp_conn,
|
||||||
rtmp_protocol, rtmp_real_time
|
rtmp_protocol, rtmp_real_time
|
||||||
|
@ -260,7 +262,7 @@ class InfoExtractor:
|
||||||
|
|
||||||
direct: True if a direct video file was given (must only be set by GenericIE)
|
direct: True if a direct video file was given (must only be set by GenericIE)
|
||||||
alt_title: A secondary title of the video.
|
alt_title: A secondary title of the video.
|
||||||
display_id An alternative identifier for the video, not necessarily
|
display_id: An alternative identifier for the video, not necessarily
|
||||||
unique, but available before title. Typically, id is
|
unique, but available before title. Typically, id is
|
||||||
something like "4234987", title "Dancing naked mole rats",
|
something like "4234987", title "Dancing naked mole rats",
|
||||||
and display_id "dancing-naked-mole-rats"
|
and display_id "dancing-naked-mole-rats"
|
||||||
|
@ -278,7 +280,7 @@ class InfoExtractor:
|
||||||
description: Full video description.
|
description: Full video description.
|
||||||
uploader: Full name of the video uploader.
|
uploader: Full name of the video uploader.
|
||||||
license: License name the video is licensed under.
|
license: License name the video is licensed under.
|
||||||
creator: The creator of the video.
|
creators: List of creators of the video.
|
||||||
timestamp: UNIX timestamp of the moment the video was uploaded
|
timestamp: UNIX timestamp of the moment the video was uploaded
|
||||||
upload_date: Video upload date in UTC (YYYYMMDD).
|
upload_date: Video upload date in UTC (YYYYMMDD).
|
||||||
If not explicitly set, calculated from timestamp
|
If not explicitly set, calculated from timestamp
|
||||||
|
@ -422,16 +424,16 @@ class InfoExtractor:
|
||||||
track_number: Number of the track within an album or a disc, as an integer.
|
track_number: Number of the track within an album or a disc, as an integer.
|
||||||
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
|
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
|
||||||
as a unicode string.
|
as a unicode string.
|
||||||
artist: Artist(s) of the track.
|
artists: List of artists of the track.
|
||||||
genre: Genre(s) of the track.
|
composers: List of composers of the piece.
|
||||||
|
genres: List of genres of the track.
|
||||||
album: Title of the album the track belongs to.
|
album: Title of the album the track belongs to.
|
||||||
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
|
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
|
||||||
album_artist: List of all artists appeared on the album (e.g.
|
album_artists: List of all artists appeared on the album.
|
||||||
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
|
E.g. ["Ash Borer", "Fell Voices"] or ["Various Artists"].
|
||||||
and compilations).
|
Useful for splits and compilations.
|
||||||
disc_number: Number of the disc or other physical medium the track belongs to,
|
disc_number: Number of the disc or other physical medium the track belongs to,
|
||||||
as an integer.
|
as an integer.
|
||||||
composer: Composer of the piece
|
|
||||||
|
|
||||||
The following fields should only be set for clips that should be cut from the original video:
|
The following fields should only be set for clips that should be cut from the original video:
|
||||||
|
|
||||||
|
@ -442,6 +444,18 @@ class InfoExtractor:
|
||||||
rows: Number of rows in each storyboard fragment, as an integer
|
rows: Number of rows in each storyboard fragment, as an integer
|
||||||
columns: Number of columns in each storyboard fragment, as an integer
|
columns: Number of columns in each storyboard fragment, as an integer
|
||||||
|
|
||||||
|
The following fields are deprecated and should not be set by new code:
|
||||||
|
composer: Use "composers" instead.
|
||||||
|
Composer(s) of the piece, comma-separated.
|
||||||
|
artist: Use "artists" instead.
|
||||||
|
Artist(s) of the track, comma-separated.
|
||||||
|
genre: Use "genres" instead.
|
||||||
|
Genre(s) of the track, comma-separated.
|
||||||
|
album_artist: Use "album_artists" instead.
|
||||||
|
All artists appeared on the album, comma-separated.
|
||||||
|
creator: Use "creators" instead.
|
||||||
|
The creator of the video.
|
||||||
|
|
||||||
Unless mentioned otherwise, the fields should be Unicode strings.
|
Unless mentioned otherwise, the fields should be Unicode strings.
|
||||||
|
|
||||||
Unless mentioned otherwise, None is equivalent to absence of information.
|
Unless mentioned otherwise, None is equivalent to absence of information.
|
||||||
|
@ -733,7 +747,7 @@ class InfoExtractor:
|
||||||
raise
|
raise
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
e.video_id = e.video_id or self.get_temp_id(url)
|
e.video_id = e.video_id or self.get_temp_id(url)
|
||||||
e.ie = e.ie or self.IE_NAME,
|
e.ie = e.ie or self.IE_NAME
|
||||||
e.traceback = e.traceback or sys.exc_info()[2]
|
e.traceback = e.traceback or sys.exc_info()[2]
|
||||||
raise
|
raise
|
||||||
except IncompleteRead as e:
|
except IncompleteRead as e:
|
||||||
|
@ -1325,7 +1339,10 @@ class InfoExtractor:
|
||||||
else:
|
else:
|
||||||
return None, None
|
return None, None
|
||||||
if not info:
|
if not info:
|
||||||
raise netrc.NetrcParseError(f'No authenticators for {netrc_machine}')
|
self.to_screen(f'No authenticators for {netrc_machine}')
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
self.write_debug(f'Using netrc for {netrc_machine} authentication')
|
||||||
return info[0], info[2]
|
return info[0], info[2]
|
||||||
|
|
||||||
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
|
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
|
||||||
|
@ -2530,7 +2547,11 @@ class InfoExtractor:
|
||||||
self._report_ignoring_subs('DASH')
|
self._report_ignoring_subs('DASH')
|
||||||
return fmts
|
return fmts
|
||||||
|
|
||||||
def _extract_mpd_formats_and_subtitles(
|
def _extract_mpd_formats_and_subtitles(self, *args, **kwargs):
|
||||||
|
periods = self._extract_mpd_periods(*args, **kwargs)
|
||||||
|
return self._merge_mpd_periods(periods)
|
||||||
|
|
||||||
|
def _extract_mpd_periods(
|
||||||
self, mpd_url, video_id, mpd_id=None, note=None, errnote=None,
|
self, mpd_url, video_id, mpd_id=None, note=None, errnote=None,
|
||||||
fatal=True, data=None, headers={}, query={}):
|
fatal=True, data=None, headers={}, query={}):
|
||||||
|
|
||||||
|
@ -2543,17 +2564,16 @@ class InfoExtractor:
|
||||||
errnote='Failed to download MPD manifest' if errnote is None else errnote,
|
errnote='Failed to download MPD manifest' if errnote is None else errnote,
|
||||||
fatal=fatal, data=data, headers=headers, query=query)
|
fatal=fatal, data=data, headers=headers, query=query)
|
||||||
if res is False:
|
if res is False:
|
||||||
return [], {}
|
return []
|
||||||
mpd_doc, urlh = res
|
mpd_doc, urlh = res
|
||||||
if mpd_doc is None:
|
if mpd_doc is None:
|
||||||
return [], {}
|
return []
|
||||||
|
|
||||||
# We could have been redirected to a new url when we retrieved our mpd file.
|
# We could have been redirected to a new url when we retrieved our mpd file.
|
||||||
mpd_url = urlh.url
|
mpd_url = urlh.url
|
||||||
mpd_base_url = base_url(mpd_url)
|
mpd_base_url = base_url(mpd_url)
|
||||||
|
|
||||||
return self._parse_mpd_formats_and_subtitles(
|
return self._parse_mpd_periods(mpd_doc, mpd_id, mpd_base_url, mpd_url)
|
||||||
mpd_doc, mpd_id, mpd_base_url, mpd_url)
|
|
||||||
|
|
||||||
def _parse_mpd_formats(self, *args, **kwargs):
|
def _parse_mpd_formats(self, *args, **kwargs):
|
||||||
fmts, subs = self._parse_mpd_formats_and_subtitles(*args, **kwargs)
|
fmts, subs = self._parse_mpd_formats_and_subtitles(*args, **kwargs)
|
||||||
|
@ -2561,8 +2581,39 @@ class InfoExtractor:
|
||||||
self._report_ignoring_subs('DASH')
|
self._report_ignoring_subs('DASH')
|
||||||
return fmts
|
return fmts
|
||||||
|
|
||||||
def _parse_mpd_formats_and_subtitles(
|
def _parse_mpd_formats_and_subtitles(self, *args, **kwargs):
|
||||||
self, mpd_doc, mpd_id=None, mpd_base_url='', mpd_url=None):
|
periods = self._parse_mpd_periods(*args, **kwargs)
|
||||||
|
return self._merge_mpd_periods(periods)
|
||||||
|
|
||||||
|
def _merge_mpd_periods(self, periods):
|
||||||
|
"""
|
||||||
|
Combine all formats and subtitles from an MPD manifest into a single list,
|
||||||
|
by concatenate streams with similar formats.
|
||||||
|
"""
|
||||||
|
formats, subtitles = {}, {}
|
||||||
|
for period in periods:
|
||||||
|
for f in period['formats']:
|
||||||
|
assert 'is_dash_periods' not in f, 'format already processed'
|
||||||
|
f['is_dash_periods'] = True
|
||||||
|
format_key = tuple(v for k, v in f.items() if k not in (
|
||||||
|
('format_id', 'fragments', 'manifest_stream_number')))
|
||||||
|
if format_key not in formats:
|
||||||
|
formats[format_key] = f
|
||||||
|
elif 'fragments' in f:
|
||||||
|
formats[format_key].setdefault('fragments', []).extend(f['fragments'])
|
||||||
|
|
||||||
|
if subtitles and period['subtitles']:
|
||||||
|
self.report_warning(bug_reports_message(
|
||||||
|
'Found subtitles in multiple periods in the DASH manifest; '
|
||||||
|
'if part of the subtitles are missing,'
|
||||||
|
), only_once=True)
|
||||||
|
|
||||||
|
for sub_lang, sub_info in period['subtitles'].items():
|
||||||
|
subtitles.setdefault(sub_lang, []).extend(sub_info)
|
||||||
|
|
||||||
|
return list(formats.values()), subtitles
|
||||||
|
|
||||||
|
def _parse_mpd_periods(self, mpd_doc, mpd_id=None, mpd_base_url='', mpd_url=None):
|
||||||
"""
|
"""
|
||||||
Parse formats from MPD manifest.
|
Parse formats from MPD manifest.
|
||||||
References:
|
References:
|
||||||
|
@ -2641,9 +2692,13 @@ class InfoExtractor:
|
||||||
return ms_info
|
return ms_info
|
||||||
|
|
||||||
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
|
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
|
||||||
formats, subtitles = [], {}
|
|
||||||
stream_numbers = collections.defaultdict(int)
|
stream_numbers = collections.defaultdict(int)
|
||||||
for period in mpd_doc.findall(_add_ns('Period')):
|
for period_idx, period in enumerate(mpd_doc.findall(_add_ns('Period'))):
|
||||||
|
period_entry = {
|
||||||
|
'id': period.get('id', f'period-{period_idx}'),
|
||||||
|
'formats': [],
|
||||||
|
'subtitles': collections.defaultdict(list),
|
||||||
|
}
|
||||||
period_duration = parse_duration(period.get('duration')) or mpd_duration
|
period_duration = parse_duration(period.get('duration')) or mpd_duration
|
||||||
period_ms_info = extract_multisegment_info(period, {
|
period_ms_info = extract_multisegment_info(period, {
|
||||||
'start_number': 1,
|
'start_number': 1,
|
||||||
|
@ -2893,11 +2948,10 @@ class InfoExtractor:
|
||||||
if content_type in ('video', 'audio', 'image/jpeg'):
|
if content_type in ('video', 'audio', 'image/jpeg'):
|
||||||
f['manifest_stream_number'] = stream_numbers[f['url']]
|
f['manifest_stream_number'] = stream_numbers[f['url']]
|
||||||
stream_numbers[f['url']] += 1
|
stream_numbers[f['url']] += 1
|
||||||
formats.append(f)
|
period_entry['formats'].append(f)
|
||||||
elif content_type == 'text':
|
elif content_type == 'text':
|
||||||
subtitles.setdefault(lang or 'und', []).append(f)
|
period_entry['subtitles'][lang or 'und'].append(f)
|
||||||
|
yield period_entry
|
||||||
return formats, subtitles
|
|
||||||
|
|
||||||
def _extract_ism_formats(self, *args, **kwargs):
|
def _extract_ism_formats(self, *args, **kwargs):
|
||||||
fmts, subs = self._extract_ism_formats_and_subtitles(*args, **kwargs)
|
fmts, subs = self._extract_ism_formats_and_subtitles(*args, **kwargs)
|
||||||
|
|
|
@ -65,7 +65,7 @@ class CPACIE(InfoExtractor):
|
||||||
'title': title,
|
'title': title,
|
||||||
'description': str_or_none(content['details'].get('description_%s_t' % (url_lang, ))),
|
'description': str_or_none(content['details'].get('description_%s_t' % (url_lang, ))),
|
||||||
'timestamp': unified_timestamp(content['details'].get('liveDateTime')),
|
'timestamp': unified_timestamp(content['details'].get('liveDateTime')),
|
||||||
'category': [category] if category else None,
|
'categories': [category] if category else None,
|
||||||
'thumbnail': urljoin(url, str_or_none(content['details'].get('image_%s_s' % (url_lang, )))),
|
'thumbnail': urljoin(url, str_or_none(content['details'].get('image_%s_s' % (url_lang, )))),
|
||||||
'is_live': is_live(content['details'].get('type')),
|
'is_live': is_live(content['details'].get('type')),
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,13 @@
|
||||||
|
import json
|
||||||
|
|
||||||
from .brightcove import BrightcoveNewIE
|
from .brightcove import BrightcoveNewIE
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
dict_get,
|
extract_attributes,
|
||||||
get_element_by_id,
|
get_element_html_by_class,
|
||||||
js_to_json,
|
get_element_text_and_html_by_tag,
|
||||||
traverse_obj,
|
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class CraftsyIE(InfoExtractor):
|
class CraftsyIE(InfoExtractor):
|
||||||
|
@ -41,28 +42,34 @@ class CraftsyIE(InfoExtractor):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
video_data = self._parse_json(self._search_regex(
|
video_player = get_element_html_by_class('class-video-player', webpage)
|
||||||
r'class_video_player_vars\s*=\s*({.*})\s*;',
|
video_data = traverse_obj(video_player, (
|
||||||
get_element_by_id('vidstore-classes_class-video-player-js-extra', webpage),
|
{extract_attributes}, 'wire:snapshot', {json.loads}, 'data', {dict})) or {}
|
||||||
'video data'), video_id, transform_source=js_to_json)
|
video_js = traverse_obj(video_player, (
|
||||||
|
{lambda x: get_element_text_and_html_by_tag('video-js', x)}, 1, {extract_attributes})) or {}
|
||||||
|
|
||||||
account_id = traverse_obj(video_data, ('video_player', 'bc_account_id'))
|
has_access = video_data.get('userHasAccess')
|
||||||
|
lessons = traverse_obj(video_data, ('lessons', ..., ..., lambda _, v: v['video_id']))
|
||||||
|
|
||||||
entries = []
|
preview_id = video_js.get('data-video-id')
|
||||||
class_preview = traverse_obj(video_data, ('video_player', 'class_preview'))
|
if preview_id and preview_id not in traverse_obj(lessons, (..., 'video_id')):
|
||||||
if class_preview:
|
if not lessons and not has_access:
|
||||||
v_id = class_preview.get('video_id')
|
self.report_warning(
|
||||||
entries.append(self.url_result(
|
'Only extracting preview. For the full class, pass cookies '
|
||||||
f'http://players.brightcove.net/{account_id}/default_default/index.html?videoId={v_id}',
|
+ f'from an account that has access. {self._login_hint()}')
|
||||||
BrightcoveNewIE, v_id, class_preview.get('title')))
|
lessons.append({'video_id': preview_id})
|
||||||
|
|
||||||
if dict_get(video_data, ('is_free', 'user_has_access')):
|
if not lessons and not has_access:
|
||||||
entries += [
|
self.raise_login_required('You do not have access to this class')
|
||||||
self.url_result(
|
|
||||||
|
account_id = video_data.get('accountId') or video_js['data-account']
|
||||||
|
|
||||||
|
def entries(lessons):
|
||||||
|
for lesson in lessons:
|
||||||
|
yield self.url_result(
|
||||||
f'http://players.brightcove.net/{account_id}/default_default/index.html?videoId={lesson["video_id"]}',
|
f'http://players.brightcove.net/{account_id}/default_default/index.html?videoId={lesson["video_id"]}',
|
||||||
BrightcoveNewIE, lesson['video_id'], lesson.get('title'))
|
BrightcoveNewIE, lesson['video_id'], lesson.get('title'))
|
||||||
for lesson in video_data['lessons']]
|
|
||||||
|
|
||||||
return self.playlist_result(
|
return self.playlist_result(
|
||||||
entries, video_id, video_data.get('class_title'),
|
entries(lessons), video_id, self._html_search_meta(('og:title', 'twitter:title'), webpage),
|
||||||
self._html_search_meta(('og:description', 'description'), webpage, default=None))
|
self._html_search_meta(('og:description', 'description'), webpage, default=None))
|
||||||
|
|
|
@ -1,18 +1,32 @@
|
||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import remove_end
|
from ..utils import make_archive_id, remove_end
|
||||||
|
|
||||||
|
|
||||||
class CrtvgIE(InfoExtractor):
|
class CrtvgIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?crtvg\.es/tvg/a-carta/[^/#?]+-(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?crtvg\.es/tvg/a-carta/(?P<id>[^/#?]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.crtvg.es/tvg/a-carta/os-caimans-do-tea-5839623',
|
'url': 'https://www.crtvg.es/tvg/a-carta/os-caimans-do-tea-5839623',
|
||||||
'md5': 'c0958d9ff90e4503a75544358758921d',
|
'md5': 'c0958d9ff90e4503a75544358758921d',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '5839623',
|
'id': 'os-caimans-do-tea-5839623',
|
||||||
'title': 'Os caimáns do Tea',
|
'title': 'Os caimáns do Tea',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'description': 'md5:f71cfba21ae564f0a6f415b31de1f842',
|
'description': 'md5:f71cfba21ae564f0a6f415b31de1f842',
|
||||||
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
|
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
|
||||||
|
'_old_archive_ids': ['crtvg 5839623'],
|
||||||
|
},
|
||||||
|
'params': {'skip_download': 'm3u8'}
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.crtvg.es/tvg/a-carta/a-parabolica-love-story',
|
||||||
|
'md5': '9a47b95a1749db7b7eb3214904624584',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'a-parabolica-love-story',
|
||||||
|
'title': 'A parabólica / Trabuco, o can mordedor / Love Story',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'description': 'md5:f71cfba21ae564f0a6f415b31de1f842',
|
||||||
|
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
|
||||||
},
|
},
|
||||||
'params': {'skip_download': 'm3u8'}
|
'params': {'skip_download': 'm3u8'}
|
||||||
}]
|
}]
|
||||||
|
@ -24,8 +38,13 @@ class CrtvgIE(InfoExtractor):
|
||||||
formats = self._extract_m3u8_formats(video_url + '/playlist.m3u8', video_id, fatal=False)
|
formats = self._extract_m3u8_formats(video_url + '/playlist.m3u8', video_id, fatal=False)
|
||||||
formats.extend(self._extract_mpd_formats(video_url + '/manifest.mpd', video_id, fatal=False))
|
formats.extend(self._extract_mpd_formats(video_url + '/manifest.mpd', video_id, fatal=False))
|
||||||
|
|
||||||
|
old_video_id = None
|
||||||
|
if mobj := re.fullmatch(r'[^/#?]+-(?P<old_id>\d{7})', video_id):
|
||||||
|
old_video_id = [make_archive_id(self, mobj.group('old_id'))]
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
'_old_archive_ids': old_video_id,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'title': remove_end(self._html_search_meta(
|
'title': remove_end(self._html_search_meta(
|
||||||
['og:title', 'twitter:title'], webpage, 'title', default=None), ' | CRTVG'),
|
['og:title', 'twitter:title'], webpage, 'title', default=None), ' | CRTVG'),
|
||||||
|
|
|
@ -136,7 +136,7 @@ class CrunchyrollBaseIE(InfoExtractor):
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def _extract_formats(self, stream_response, display_id=None):
|
def _extract_formats(self, stream_response, display_id=None):
|
||||||
requested_formats = self._configuration_arg('format') or ['adaptive_hls']
|
requested_formats = self._configuration_arg('format') or ['vo_adaptive_hls']
|
||||||
available_formats = {}
|
available_formats = {}
|
||||||
for stream_type, streams in traverse_obj(
|
for stream_type, streams in traverse_obj(
|
||||||
stream_response, (('streams', ('data', 0)), {dict.items}, ...)):
|
stream_response, (('streams', ('data', 0)), {dict.items}, ...)):
|
||||||
|
@ -514,7 +514,7 @@ class CrunchyrollMusicIE(CrunchyrollBaseIE):
|
||||||
'track': 'Egaono Hana',
|
'track': 'Egaono Hana',
|
||||||
'artist': 'Goose house',
|
'artist': 'Goose house',
|
||||||
'thumbnail': r're:(?i)^https://www.crunchyroll.com/imgsrv/.*\.jpeg?$',
|
'thumbnail': r're:(?i)^https://www.crunchyroll.com/imgsrv/.*\.jpeg?$',
|
||||||
'genre': ['J-Pop'],
|
'genres': ['J-Pop'],
|
||||||
},
|
},
|
||||||
'params': {'skip_download': 'm3u8'},
|
'params': {'skip_download': 'm3u8'},
|
||||||
}, {
|
}, {
|
||||||
|
@ -527,7 +527,7 @@ class CrunchyrollMusicIE(CrunchyrollBaseIE):
|
||||||
'track': 'Crossing Field',
|
'track': 'Crossing Field',
|
||||||
'artist': 'LiSA',
|
'artist': 'LiSA',
|
||||||
'thumbnail': r're:(?i)^https://www.crunchyroll.com/imgsrv/.*\.jpeg?$',
|
'thumbnail': r're:(?i)^https://www.crunchyroll.com/imgsrv/.*\.jpeg?$',
|
||||||
'genre': ['Anime'],
|
'genres': ['Anime'],
|
||||||
},
|
},
|
||||||
'params': {'skip_download': 'm3u8'},
|
'params': {'skip_download': 'm3u8'},
|
||||||
}, {
|
}, {
|
||||||
|
@ -541,7 +541,7 @@ class CrunchyrollMusicIE(CrunchyrollBaseIE):
|
||||||
'artist': 'LiSA',
|
'artist': 'LiSA',
|
||||||
'thumbnail': r're:(?i)^https://www.crunchyroll.com/imgsrv/.*\.jpeg?$',
|
'thumbnail': r're:(?i)^https://www.crunchyroll.com/imgsrv/.*\.jpeg?$',
|
||||||
'description': 'md5:747444e7e6300907b7a43f0a0503072e',
|
'description': 'md5:747444e7e6300907b7a43f0a0503072e',
|
||||||
'genre': ['J-Pop'],
|
'genres': ['J-Pop'],
|
||||||
},
|
},
|
||||||
'params': {'skip_download': 'm3u8'},
|
'params': {'skip_download': 'm3u8'},
|
||||||
}, {
|
}, {
|
||||||
|
@ -594,7 +594,7 @@ class CrunchyrollMusicIE(CrunchyrollBaseIE):
|
||||||
'width': ('width', {int_or_none}),
|
'width': ('width', {int_or_none}),
|
||||||
'height': ('height', {int_or_none}),
|
'height': ('height', {int_or_none}),
|
||||||
}),
|
}),
|
||||||
'genre': ('genres', ..., 'displayValue'),
|
'genres': ('genres', ..., 'displayValue'),
|
||||||
'age_limit': ('maturity_ratings', -1, {parse_age_limit}),
|
'age_limit': ('maturity_ratings', -1, {parse_age_limit}),
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
|
@ -611,7 +611,7 @@ class CrunchyrollArtistIE(CrunchyrollBaseIE):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'MA179CB50D',
|
'id': 'MA179CB50D',
|
||||||
'title': 'LiSA',
|
'title': 'LiSA',
|
||||||
'genre': ['J-Pop', 'Anime', 'Rock'],
|
'genres': ['J-Pop', 'Anime', 'Rock'],
|
||||||
'description': 'md5:16d87de61a55c3f7d6c454b73285938e',
|
'description': 'md5:16d87de61a55c3f7d6c454b73285938e',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 83,
|
'playlist_mincount': 83,
|
||||||
|
@ -645,6 +645,6 @@ class CrunchyrollArtistIE(CrunchyrollBaseIE):
|
||||||
'width': ('width', {int_or_none}),
|
'width': ('width', {int_or_none}),
|
||||||
'height': ('height', {int_or_none}),
|
'height': ('height', {int_or_none}),
|
||||||
}),
|
}),
|
||||||
'genre': ('genres', ..., 'displayValue'),
|
'genres': ('genres', ..., 'displayValue'),
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
|
|
|
@ -110,11 +110,11 @@ class CybraryIE(CybraryBaseIE):
|
||||||
|
|
||||||
|
|
||||||
class CybraryCourseIE(CybraryBaseIE):
|
class CybraryCourseIE(CybraryBaseIE):
|
||||||
_VALID_URL = r'https://app\.cybrary\.it/browse/course/(?P<id>[\w-]+)/?(?:$|[#?])'
|
_VALID_URL = r'https?://app\.cybrary\.it/browse/course/(?P<id>[\w-]+)/?(?:$|[#?])'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://app.cybrary.it/browse/course/az-500-microsoft-azure-security-technologies',
|
'url': 'https://app.cybrary.it/browse/course/az-500-microsoft-azure-security-technologies',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 898,
|
'id': '898',
|
||||||
'title': 'AZ-500: Microsoft Azure Security Technologies',
|
'title': 'AZ-500: Microsoft Azure Security Technologies',
|
||||||
'description': 'md5:69549d379c0fc1dec92926d4e8b6fbd4'
|
'description': 'md5:69549d379c0fc1dec92926d4e8b6fbd4'
|
||||||
},
|
},
|
||||||
|
@ -122,7 +122,7 @@ class CybraryCourseIE(CybraryBaseIE):
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://app.cybrary.it/browse/course/cybrary-orientation',
|
'url': 'https://app.cybrary.it/browse/course/cybrary-orientation',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 1245,
|
'id': '1245',
|
||||||
'title': 'Cybrary Orientation',
|
'title': 'Cybrary Orientation',
|
||||||
'description': 'md5:9e69ff66b32fe78744e0ad4babe2e88e'
|
'description': 'md5:9e69ff66b32fe78744e0ad4babe2e88e'
|
||||||
},
|
},
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import functools
|
import functools
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..networking.exceptions import HTTPError
|
from ..networking.exceptions import HTTPError
|
||||||
|
@ -44,36 +45,41 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
|
||||||
self._FAMILY_FILTER = ff == 'on' if ff else age_restricted(18, self.get_param('age_limit'))
|
self._FAMILY_FILTER = ff == 'on' if ff else age_restricted(18, self.get_param('age_limit'))
|
||||||
self._set_dailymotion_cookie('ff', 'on' if self._FAMILY_FILTER else 'off')
|
self._set_dailymotion_cookie('ff', 'on' if self._FAMILY_FILTER else 'off')
|
||||||
|
|
||||||
|
def _get_token(self, xid):
|
||||||
|
cookies = self._get_dailymotion_cookies()
|
||||||
|
token = self._get_cookie_value(cookies, 'access_token') or self._get_cookie_value(cookies, 'client_token')
|
||||||
|
if token:
|
||||||
|
return token
|
||||||
|
|
||||||
|
data = {
|
||||||
|
'client_id': 'f1a362d288c1b98099c7',
|
||||||
|
'client_secret': 'eea605b96e01c796ff369935357eca920c5da4c5',
|
||||||
|
}
|
||||||
|
username, password = self._get_login_info()
|
||||||
|
if username:
|
||||||
|
data.update({
|
||||||
|
'grant_type': 'password',
|
||||||
|
'password': password,
|
||||||
|
'username': username,
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
data['grant_type'] = 'client_credentials'
|
||||||
|
try:
|
||||||
|
token = self._download_json(
|
||||||
|
'https://graphql.api.dailymotion.com/oauth/token',
|
||||||
|
None, 'Downloading Access Token',
|
||||||
|
data=urlencode_postdata(data))['access_token']
|
||||||
|
except ExtractorError as e:
|
||||||
|
if isinstance(e.cause, HTTPError) and e.cause.status == 400:
|
||||||
|
raise ExtractorError(self._parse_json(
|
||||||
|
e.cause.response.read().decode(), xid)['error_description'], expected=True)
|
||||||
|
raise
|
||||||
|
self._set_dailymotion_cookie('access_token' if username else 'client_token', token)
|
||||||
|
return token
|
||||||
|
|
||||||
def _call_api(self, object_type, xid, object_fields, note, filter_extra=None):
|
def _call_api(self, object_type, xid, object_fields, note, filter_extra=None):
|
||||||
if not self._HEADERS.get('Authorization'):
|
if not self._HEADERS.get('Authorization'):
|
||||||
cookies = self._get_dailymotion_cookies()
|
self._HEADERS['Authorization'] = f'Bearer {self._get_token(xid)}'
|
||||||
token = self._get_cookie_value(cookies, 'access_token') or self._get_cookie_value(cookies, 'client_token')
|
|
||||||
if not token:
|
|
||||||
data = {
|
|
||||||
'client_id': 'f1a362d288c1b98099c7',
|
|
||||||
'client_secret': 'eea605b96e01c796ff369935357eca920c5da4c5',
|
|
||||||
}
|
|
||||||
username, password = self._get_login_info()
|
|
||||||
if username:
|
|
||||||
data.update({
|
|
||||||
'grant_type': 'password',
|
|
||||||
'password': password,
|
|
||||||
'username': username,
|
|
||||||
})
|
|
||||||
else:
|
|
||||||
data['grant_type'] = 'client_credentials'
|
|
||||||
try:
|
|
||||||
token = self._download_json(
|
|
||||||
'https://graphql.api.dailymotion.com/oauth/token',
|
|
||||||
None, 'Downloading Access Token',
|
|
||||||
data=urlencode_postdata(data))['access_token']
|
|
||||||
except ExtractorError as e:
|
|
||||||
if isinstance(e.cause, HTTPError) and e.cause.status == 400:
|
|
||||||
raise ExtractorError(self._parse_json(
|
|
||||||
e.cause.response.read().decode(), xid)['error_description'], expected=True)
|
|
||||||
raise
|
|
||||||
self._set_dailymotion_cookie('access_token' if username else 'client_token', token)
|
|
||||||
self._HEADERS['Authorization'] = 'Bearer ' + token
|
|
||||||
|
|
||||||
resp = self._download_json(
|
resp = self._download_json(
|
||||||
'https://graphql.api.dailymotion.com/', xid, note, data=json.dumps({
|
'https://graphql.api.dailymotion.com/', xid, note, data=json.dumps({
|
||||||
|
@ -393,9 +399,55 @@ class DailymotionPlaylistIE(DailymotionPlaylistBaseIE):
|
||||||
yield '//dailymotion.com/playlist/%s' % p
|
yield '//dailymotion.com/playlist/%s' % p
|
||||||
|
|
||||||
|
|
||||||
|
class DailymotionSearchIE(DailymotionPlaylistBaseIE):
|
||||||
|
IE_NAME = 'dailymotion:search'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/search/(?P<id>[^/?#]+)/videos'
|
||||||
|
_PAGE_SIZE = 20
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'http://www.dailymotion.com/search/king of turtles/videos',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'king of turtles',
|
||||||
|
'title': 'king of turtles',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 90,
|
||||||
|
}]
|
||||||
|
_SEARCH_QUERY = 'query SEARCH_QUERY( $query: String! $page: Int $limit: Int ) { search { videos( query: $query first: $limit page: $page ) { edges { node { xid } } } } } '
|
||||||
|
|
||||||
|
def _call_search_api(self, term, page, note):
|
||||||
|
if not self._HEADERS.get('Authorization'):
|
||||||
|
self._HEADERS['Authorization'] = f'Bearer {self._get_token(term)}'
|
||||||
|
resp = self._download_json(
|
||||||
|
'https://graphql.api.dailymotion.com/', None, note, data=json.dumps({
|
||||||
|
'operationName': 'SEARCH_QUERY',
|
||||||
|
'query': self._SEARCH_QUERY,
|
||||||
|
'variables': {
|
||||||
|
'limit': 20,
|
||||||
|
'page': page,
|
||||||
|
'query': term,
|
||||||
|
}
|
||||||
|
}).encode(), headers=self._HEADERS)
|
||||||
|
obj = traverse_obj(resp, ('data', 'search', {dict}))
|
||||||
|
if not obj:
|
||||||
|
raise ExtractorError(
|
||||||
|
traverse_obj(resp, ('errors', 0, 'message', {str})) or 'Could not fetch search data')
|
||||||
|
|
||||||
|
return obj
|
||||||
|
|
||||||
|
def _fetch_page(self, term, page):
|
||||||
|
page += 1
|
||||||
|
response = self._call_search_api(term, page, f'Searching "{term}" page {page}')
|
||||||
|
for xid in traverse_obj(response, ('videos', 'edges', ..., 'node', 'xid')):
|
||||||
|
yield self.url_result(f'https://www.dailymotion.com/video/{xid}', DailymotionIE, xid)
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
term = urllib.parse.unquote_plus(self._match_id(url))
|
||||||
|
return self.playlist_result(
|
||||||
|
OnDemandPagedList(functools.partial(self._fetch_page, term), self._PAGE_SIZE), term, term)
|
||||||
|
|
||||||
|
|
||||||
class DailymotionUserIE(DailymotionPlaylistBaseIE):
|
class DailymotionUserIE(DailymotionPlaylistBaseIE):
|
||||||
IE_NAME = 'dailymotion:user'
|
IE_NAME = 'dailymotion:user'
|
||||||
_VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|swf|#|video|playlist)/)(?:(?:old/)?user/)?(?P<id>[^/]+)'
|
_VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?!(?:embed|swf|#|video|playlist|search)/)(?:(?:old/)?user/)?(?P<id>[^/?#]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.dailymotion.com/user/nqtv',
|
'url': 'https://www.dailymotion.com/user/nqtv',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
|
|
@ -83,7 +83,6 @@ class DamtomoRecordIE(DamtomoBaseIE):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '27376862',
|
'id': '27376862',
|
||||||
'title': 'イカSUMMER [良音]',
|
'title': 'イカSUMMER [良音]',
|
||||||
'description': None,
|
|
||||||
'uploader': 'NANA',
|
'uploader': 'NANA',
|
||||||
'uploader_id': 'MzAyMDExNTY',
|
'uploader_id': 'MzAyMDExNTY',
|
||||||
'upload_date': '20210721',
|
'upload_date': '20210721',
|
||||||
|
|
|
@ -27,7 +27,7 @@ class DaumIE(DaumBaseIE):
|
||||||
'duration': 2117,
|
'duration': 2117,
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'comment_count': int,
|
'comment_count': int,
|
||||||
'uploader_id': 186139,
|
'uploader_id': '186139',
|
||||||
'uploader': '콘간지',
|
'uploader': '콘간지',
|
||||||
'timestamp': 1387310323,
|
'timestamp': 1387310323,
|
||||||
},
|
},
|
||||||
|
@ -44,7 +44,7 @@ class DaumIE(DaumBaseIE):
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'comment_count': int,
|
'comment_count': int,
|
||||||
'uploader': 'MBC 예능',
|
'uploader': 'MBC 예능',
|
||||||
'uploader_id': 132251,
|
'uploader_id': '132251',
|
||||||
'timestamp': 1421604228,
|
'timestamp': 1421604228,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
@ -63,7 +63,7 @@ class DaumIE(DaumBaseIE):
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'comment_count': int,
|
'comment_count': int,
|
||||||
'uploader': '까칠한 墮落始祖 황비홍님의',
|
'uploader': '까칠한 墮落始祖 황비홍님의',
|
||||||
'uploader_id': 560824,
|
'uploader_id': '560824',
|
||||||
'timestamp': 1203770745,
|
'timestamp': 1203770745,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
@ -77,7 +77,7 @@ class DaumIE(DaumBaseIE):
|
||||||
'description': '러블리즈 - Destiny (나의 지구) (Lovelyz - Destiny)\r\n\r\n[쇼! 음악중심] 20160611, 507회',
|
'description': '러블리즈 - Destiny (나의 지구) (Lovelyz - Destiny)\r\n\r\n[쇼! 음악중심] 20160611, 507회',
|
||||||
'upload_date': '20170129',
|
'upload_date': '20170129',
|
||||||
'uploader': '쇼! 음악중심',
|
'uploader': '쇼! 음악중심',
|
||||||
'uploader_id': 2653210,
|
'uploader_id': '2653210',
|
||||||
'timestamp': 1485684628,
|
'timestamp': 1485684628,
|
||||||
},
|
},
|
||||||
}]
|
}]
|
||||||
|
@ -107,7 +107,7 @@ class DaumClipIE(DaumBaseIE):
|
||||||
'duration': 3868,
|
'duration': 3868,
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'uploader': 'GOMeXP',
|
'uploader': 'GOMeXP',
|
||||||
'uploader_id': 6667,
|
'uploader_id': '6667',
|
||||||
'timestamp': 1377911092,
|
'timestamp': 1377911092,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
|
|
@ -1,54 +0,0 @@
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import js_to_json
|
|
||||||
|
|
||||||
|
|
||||||
class DiggIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?digg\.com/video/(?P<id>[^/?#&]+)'
|
|
||||||
_TESTS = [{
|
|
||||||
# JWPlatform via provider
|
|
||||||
'url': 'http://digg.com/video/sci-fi-short-jonah-daniel-kaluuya-get-out',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'LcqvmS0b',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': "'Get Out' Star Daniel Kaluuya Goes On 'Moby Dick'-Like Journey In Sci-Fi Short 'Jonah'",
|
|
||||||
'description': 'md5:541bb847648b6ee3d6514bc84b82efda',
|
|
||||||
'upload_date': '20180109',
|
|
||||||
'timestamp': 1515530551,
|
|
||||||
},
|
|
||||||
'params': {
|
|
||||||
'skip_download': True,
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
# Youtube via provider
|
|
||||||
'url': 'http://digg.com/video/dog-boat-seal-play',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
# vimeo as regular embed
|
|
||||||
'url': 'http://digg.com/video/dream-girl-short-film',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
display_id = self._match_id(url)
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
|
||||||
|
|
||||||
info = self._parse_json(
|
|
||||||
self._search_regex(
|
|
||||||
r'(?s)video_info\s*=\s*({.+?});\n', webpage, 'video info',
|
|
||||||
default='{}'), display_id, transform_source=js_to_json,
|
|
||||||
fatal=False)
|
|
||||||
|
|
||||||
video_id = info.get('video_id')
|
|
||||||
|
|
||||||
if video_id:
|
|
||||||
provider = info.get('provider_name')
|
|
||||||
if provider == 'youtube':
|
|
||||||
return self.url_result(
|
|
||||||
video_id, ie='Youtube', video_id=video_id)
|
|
||||||
elif provider == 'jwplayer':
|
|
||||||
return self.url_result(
|
|
||||||
'jwplatform:%s' % video_id, ie='JWPlatform',
|
|
||||||
video_id=video_id)
|
|
||||||
|
|
||||||
return self.url_result(url, 'Generic')
|
|
|
@ -9,6 +9,7 @@ from ..utils import (
|
||||||
|
|
||||||
|
|
||||||
class DTubeIE(InfoExtractor):
|
class DTubeIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://(?:www\.)?d\.tube/(?:#!/)?v/(?P<uploader_id>[0-9a-z.-]+)/(?P<id>[0-9a-z]{8})'
|
_VALID_URL = r'https?://(?:www\.)?d\.tube/(?:#!/)?v/(?P<uploader_id>[0-9a-z.-]+)/(?P<id>[0-9a-z]{8})'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'https://d.tube/#!/v/broncnutz/x380jtr1',
|
'url': 'https://d.tube/#!/v/broncnutz/x380jtr1',
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
|
import base64
|
||||||
import re
|
import re
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urlparse
|
from ..compat import compat_urlparse
|
||||||
|
@ -129,11 +131,15 @@ class DubokuIE(InfoExtractor):
|
||||||
data_url = player_data.get('url')
|
data_url = player_data.get('url')
|
||||||
if not data_url:
|
if not data_url:
|
||||||
raise ExtractorError('Cannot find url in player_data')
|
raise ExtractorError('Cannot find url in player_data')
|
||||||
data_from = player_data.get('from')
|
player_encrypt = player_data.get('encrypt')
|
||||||
|
if player_encrypt == 1:
|
||||||
|
data_url = urllib.parse.unquote(data_url)
|
||||||
|
elif player_encrypt == 2:
|
||||||
|
data_url = urllib.parse.unquote(base64.b64decode(data_url).decode('ascii'))
|
||||||
|
|
||||||
# if it is an embedded iframe, maybe it's an external source
|
# if it is an embedded iframe, maybe it's an external source
|
||||||
headers = {'Referer': webpage_url}
|
headers = {'Referer': webpage_url}
|
||||||
if data_from == 'iframe':
|
if player_data.get('from') == 'iframe':
|
||||||
# use _type url_transparent to retain the meaningful details
|
# use _type url_transparent to retain the meaningful details
|
||||||
# of the video.
|
# of the video.
|
||||||
return {
|
return {
|
||||||
|
|
|
@ -8,9 +8,9 @@ from ..utils import (
|
||||||
|
|
||||||
class DumpertIE(InfoExtractor):
|
class DumpertIE(InfoExtractor):
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
(?P<protocol>https?)://(?:(?:www|legacy)\.)?dumpert\.nl(?:
|
(?P<protocol>https?)://(?:(?:www|legacy)\.)?dumpert\.nl/(?:
|
||||||
/(?:mediabase|embed|item)/|
|
(?:mediabase|embed|item)/|
|
||||||
(?:/toppers|/latest|/?)\?selectedId=
|
[^#]*[?&]selectedId=
|
||||||
)(?P<id>[0-9]+[/_][0-9a-zA-Z]+)'''
|
)(?P<id>[0-9]+[/_][0-9a-zA-Z]+)'''
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.dumpert.nl/item/6646981_951bc60f',
|
'url': 'https://www.dumpert.nl/item/6646981_951bc60f',
|
||||||
|
@ -56,6 +56,9 @@ class DumpertIE(InfoExtractor):
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.dumpert.nl/?selectedId=100031688_b317a185',
|
'url': 'https://www.dumpert.nl/?selectedId=100031688_b317a185',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.dumpert.nl/toppers/dag?selectedId=100086074_f5cef3ac',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|
|
@ -13,7 +13,7 @@ from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class DuoplayIE(InfoExtractor):
|
class DuoplayIE(InfoExtractor):
|
||||||
_VALID_URL = r'https://duoplay\.ee/(?P<id>\d+)/[\w-]+/?(?:\?(?:[^#]+&)?ep=(?P<ep>\d+))?'
|
_VALID_URL = r'https?://duoplay\.ee/(?P<id>\d+)/[\w-]+/?(?:\?(?:[^#]+&)?ep=(?P<ep>\d+))?'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'note': 'Siberi võmm S02E12',
|
'note': 'Siberi võmm S02E12',
|
||||||
'url': 'https://duoplay.ee/4312/siberi-vomm?ep=24',
|
'url': 'https://duoplay.ee/4312/siberi-vomm?ep=24',
|
||||||
|
@ -32,7 +32,7 @@ class DuoplayIE(InfoExtractor):
|
||||||
'season_number': 2,
|
'season_number': 2,
|
||||||
'episode': 'Operatsioon "Öö"',
|
'episode': 'Operatsioon "Öö"',
|
||||||
'episode_number': 12,
|
'episode_number': 12,
|
||||||
'episode_id': 24,
|
'episode_id': '24',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'note': 'Empty title',
|
'note': 'Empty title',
|
||||||
|
@ -50,7 +50,7 @@ class DuoplayIE(InfoExtractor):
|
||||||
'series_id': '17',
|
'series_id': '17',
|
||||||
'season': 'Season 2',
|
'season': 'Season 2',
|
||||||
'season_number': 2,
|
'season_number': 2,
|
||||||
'episode_id': 14,
|
'episode_id': '14',
|
||||||
'release_year': 2010,
|
'release_year': 2010,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
@ -99,6 +99,6 @@ class DuoplayIE(InfoExtractor):
|
||||||
'season_number': ('season_id', {int_or_none}),
|
'season_number': ('season_id', {int_or_none}),
|
||||||
'episode': 'subtitle',
|
'episode': 'subtitle',
|
||||||
'episode_number': ('episode_nr', {int_or_none}),
|
'episode_number': ('episode_nr', {int_or_none}),
|
||||||
'episode_id': ('episode_id', {int_or_none}),
|
'episode_id': ('episode_id', {str_or_none}),
|
||||||
}, get_all=False) if episode_attr.get('category') != 'movies' else {}),
|
}, get_all=False) if episode_attr.get('category') != 'movies' else {}),
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,6 +8,8 @@ from ..compat import compat_urlparse
|
||||||
|
|
||||||
|
|
||||||
class DWIE(InfoExtractor):
|
class DWIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
|
_ENABLED = None # XXX: pass through to GenericIE
|
||||||
IE_NAME = 'dw'
|
IE_NAME = 'dw'
|
||||||
_VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+(?:av|e)-(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+(?:av|e)-(?P<id>\d+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
|
@ -82,6 +84,8 @@ class DWIE(InfoExtractor):
|
||||||
|
|
||||||
|
|
||||||
class DWArticleIE(InfoExtractor):
|
class DWArticleIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
|
_ENABLED = None # XXX: pass through to GenericIE
|
||||||
IE_NAME = 'dw:article'
|
IE_NAME = 'dw:article'
|
||||||
_VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+a-(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+a-(?P<id>\d+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
|
|
|
@ -19,7 +19,7 @@ class EggheadBaseIE(InfoExtractor):
|
||||||
class EggheadCourseIE(EggheadBaseIE):
|
class EggheadCourseIE(EggheadBaseIE):
|
||||||
IE_DESC = 'egghead.io course'
|
IE_DESC = 'egghead.io course'
|
||||||
IE_NAME = 'egghead:course'
|
IE_NAME = 'egghead:course'
|
||||||
_VALID_URL = r'https://(?:app\.)?egghead\.io/(?:course|playlist)s/(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:app\.)?egghead\.io/(?:course|playlist)s/(?P<id>[^/?#&]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://egghead.io/courses/professor-frisby-introduces-composable-functional-javascript',
|
'url': 'https://egghead.io/courses/professor-frisby-introduces-composable-functional-javascript',
|
||||||
'playlist_count': 29,
|
'playlist_count': 29,
|
||||||
|
@ -65,7 +65,7 @@ class EggheadCourseIE(EggheadBaseIE):
|
||||||
class EggheadLessonIE(EggheadBaseIE):
|
class EggheadLessonIE(EggheadBaseIE):
|
||||||
IE_DESC = 'egghead.io lesson'
|
IE_DESC = 'egghead.io lesson'
|
||||||
IE_NAME = 'egghead:lesson'
|
IE_NAME = 'egghead:lesson'
|
||||||
_VALID_URL = r'https://(?:app\.)?egghead\.io/(?:api/v1/)?lessons/(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:app\.)?egghead\.io/(?:api/v1/)?lessons/(?P<id>[^/?#&]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://egghead.io/lessons/javascript-linear-data-flow-with-container-style-types-box',
|
'url': 'https://egghead.io/lessons/javascript-linear-data-flow-with-container-style-types-box',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
|
|
@ -42,7 +42,6 @@ class EplusIbIE(InfoExtractor):
|
||||||
'live_status': 'was_live',
|
'live_status': 'was_live',
|
||||||
'release_date': '20210719',
|
'release_date': '20210719',
|
||||||
'release_timestamp': 1626703200,
|
'release_timestamp': 1626703200,
|
||||||
'description': None,
|
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
|
|
|
@ -9,7 +9,7 @@ from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class ERRJupiterIE(InfoExtractor):
|
class ERRJupiterIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://jupiter(?:pluss)?\.err\.ee/(?P<id>\d+)'
|
_VALID_URL = r'https?://(?:jupiter(?:pluss)?|lasteekraan)\.err\.ee/(?P<id>\d+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'note': 'Jupiter: Movie: siin-me-oleme',
|
'note': 'Jupiter: Movie: siin-me-oleme',
|
||||||
'url': 'https://jupiter.err.ee/1211107/siin-me-oleme',
|
'url': 'https://jupiter.err.ee/1211107/siin-me-oleme',
|
||||||
|
@ -145,6 +145,31 @@ class ERRJupiterIE(InfoExtractor):
|
||||||
'season_number': 0,
|
'season_number': 0,
|
||||||
'series': 'Лесные истории | Аисты',
|
'series': 'Лесные истории | Аисты',
|
||||||
'series_id': '1037497',
|
'series_id': '1037497',
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'note': 'Lasteekraan: Pätu',
|
||||||
|
'url': 'https://lasteekraan.err.ee/1092243/patu',
|
||||||
|
'md5': 'a67eb9b9bcb3d201718c15d1638edf77',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '1092243',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Pätu',
|
||||||
|
'alt_title': '',
|
||||||
|
'description': 'md5:64a7b5a80afd7042d3f8ec48c77befd9',
|
||||||
|
'release_date': '20230614',
|
||||||
|
'upload_date': '20200520',
|
||||||
|
'modified_date': '20200520',
|
||||||
|
'release_timestamp': 1686745800,
|
||||||
|
'timestamp': 1589975640,
|
||||||
|
'modified_timestamp': 1589975640,
|
||||||
|
'release_year': 1990,
|
||||||
|
'episode': 'Episode 1',
|
||||||
|
'episode_id': '1092243',
|
||||||
|
'episode_number': 1,
|
||||||
|
'season': 'Season 1',
|
||||||
|
'season_number': 1,
|
||||||
|
'series': 'Pätu',
|
||||||
|
'series_id': '1092236',
|
||||||
},
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,7 @@ from ..utils import (
|
||||||
|
|
||||||
|
|
||||||
class EuropaIE(InfoExtractor):
|
class EuropaIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)'
|
_VALID_URL = r'https?://ec\.europa\.eu/avservices/(?:video/player|audio/audioDetails)\.cfm\?.*?\bref=(?P<id>[A-Za-z0-9-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758',
|
'url': 'http://ec.europa.eu/avservices/video/player.cfm?ref=I107758',
|
||||||
|
|
|
@ -500,6 +500,7 @@ class FacebookIE(InfoExtractor):
|
||||||
webpage, 'description', default=None)
|
webpage, 'description', default=None)
|
||||||
uploader_data = (
|
uploader_data = (
|
||||||
get_first(media, ('owner', {dict}))
|
get_first(media, ('owner', {dict}))
|
||||||
|
or get_first(post, ('video', 'creation_story', 'attachments', ..., 'media', lambda k, v: k == 'owner' and v['name']))
|
||||||
or get_first(post, (..., 'video', lambda k, v: k == 'owner' and v['name']))
|
or get_first(post, (..., 'video', lambda k, v: k == 'owner' and v['name']))
|
||||||
or get_first(post, ('node', 'actors', ..., {dict}))
|
or get_first(post, ('node', 'actors', ..., {dict}))
|
||||||
or get_first(post, ('event', 'event_creator', {dict})) or {})
|
or get_first(post, ('event', 'event_creator', {dict})) or {})
|
||||||
|
@ -583,8 +584,8 @@ class FacebookIE(InfoExtractor):
|
||||||
def extract_relay_prefetched_data(_filter):
|
def extract_relay_prefetched_data(_filter):
|
||||||
return traverse_obj(extract_relay_data(_filter), (
|
return traverse_obj(extract_relay_data(_filter), (
|
||||||
'require', (None, (..., ..., ..., '__bbox', 'require')),
|
'require', (None, (..., ..., ..., '__bbox', 'require')),
|
||||||
lambda _, v: 'RelayPrefetchedStreamCache' in v, ..., ...,
|
lambda _, v: any(key.startswith('RelayPrefetchedStreamCache') for key in v),
|
||||||
'__bbox', 'result', 'data', {dict}), get_all=False) or {}
|
..., ..., '__bbox', 'result', 'data', {dict}), get_all=False) or {}
|
||||||
|
|
||||||
if not video_data:
|
if not video_data:
|
||||||
server_js_data = self._parse_json(self._search_regex([
|
server_js_data = self._parse_json(self._search_regex([
|
||||||
|
|
|
@ -10,6 +10,7 @@ from ..utils import (
|
||||||
|
|
||||||
|
|
||||||
class FancodeVodIE(InfoExtractor):
|
class FancodeVodIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
IE_NAME = 'fancode:vod'
|
IE_NAME = 'fancode:vod'
|
||||||
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?fancode\.com/video/(?P<id>[0-9]+)\b'
|
_VALID_URL = r'https?://(?:www\.)?fancode\.com/video/(?P<id>[0-9]+)\b'
|
||||||
|
@ -126,6 +127,7 @@ class FancodeVodIE(InfoExtractor):
|
||||||
|
|
||||||
|
|
||||||
class FancodeLiveIE(FancodeVodIE): # XXX: Do not subclass from concrete IE
|
class FancodeLiveIE(FancodeVodIE): # XXX: Do not subclass from concrete IE
|
||||||
|
_WORKING = False
|
||||||
IE_NAME = 'fancode:live'
|
IE_NAME = 'fancode:live'
|
||||||
|
|
||||||
_VALID_URL = r'https?://(www\.)?fancode\.com/match/(?P<id>[0-9]+).+'
|
_VALID_URL = r'https?://(www\.)?fancode\.com/match/(?P<id>[0-9]+).+'
|
||||||
|
|
|
@ -1,69 +0,0 @@
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import int_or_none
|
|
||||||
|
|
||||||
|
|
||||||
class FilmmoduIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?filmmodu\.org/(?P<id>[^/]+-(?:turkce-dublaj-izle|altyazili-izle))'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'https://www.filmmodu.org/f9-altyazili-izle',
|
|
||||||
'md5': 'aeefd955c2a508a5bdaa3bcec8eeb0d4',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '10804',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'F9',
|
|
||||||
'description': 'md5:2713f584a4d65afa2611e2948d0b953c',
|
|
||||||
'subtitles': {
|
|
||||||
'tr': [{
|
|
||||||
'ext': 'vtt',
|
|
||||||
}],
|
|
||||||
},
|
|
||||||
'thumbnail': r're:https://s[0-9]+.filmmodu.org/uploads/movie/cover/10804/xXHZeb1yhJvnSHPzZDqee0zfMb6.jpg',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.filmmodu.org/the-godfather-turkce-dublaj-izle',
|
|
||||||
'md5': '109f2fcb9c941330eed133971c035c00',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '3646',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Baba',
|
|
||||||
'description': 'md5:d43fd651937cd75cc650883ebd8d8461',
|
|
||||||
'thumbnail': r're:https://s[0-9]+.filmmodu.org/uploads/movie/cover/3646/6xKCYgH16UuwEGAyroLU6p8HLIn.jpg',
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
title = self._og_search_title(webpage, fatal=True)
|
|
||||||
description = self._og_search_description(webpage)
|
|
||||||
thumbnail = self._og_search_thumbnail(webpage)
|
|
||||||
real_video_id = self._search_regex(r'var\s*videoId\s*=\s*\'([0-9]+)\'', webpage, 'video_id')
|
|
||||||
video_type = self._search_regex(r'var\s*videoType\s*=\s*\'([a-z]+)\'', webpage, 'video_type')
|
|
||||||
data = self._download_json('https://www.filmmodu.org/get-source', real_video_id, query={
|
|
||||||
'movie_id': real_video_id,
|
|
||||||
'type': video_type,
|
|
||||||
})
|
|
||||||
formats = [{
|
|
||||||
'url': source['src'],
|
|
||||||
'ext': 'mp4',
|
|
||||||
'format_id': source['label'],
|
|
||||||
'height': int_or_none(source.get('res')),
|
|
||||||
'protocol': 'm3u8_native',
|
|
||||||
} for source in data['sources']]
|
|
||||||
|
|
||||||
subtitles = {}
|
|
||||||
|
|
||||||
if data.get('subtitle'):
|
|
||||||
subtitles['tr'] = [{
|
|
||||||
'url': data['subtitle'],
|
|
||||||
}]
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': real_video_id,
|
|
||||||
'display_id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'description': description,
|
|
||||||
'formats': formats,
|
|
||||||
'subtitles': subtitles,
|
|
||||||
'thumbnail': thumbnail,
|
|
||||||
}
|
|
|
@ -0,0 +1,62 @@
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
UserNotLive,
|
||||||
|
parse_iso8601,
|
||||||
|
str_or_none,
|
||||||
|
traverse_obj,
|
||||||
|
url_or_none,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class FlexTVIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?flextv\.co\.kr/channels/(?P<id>\d+)/live'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://www.flextv.co.kr/channels/231638/live',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '231638',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': r're:^214하나만\.\.\. ',
|
||||||
|
'thumbnail': r're:^https?://.+\.jpg',
|
||||||
|
'upload_date': r're:\d{8}',
|
||||||
|
'timestamp': int,
|
||||||
|
'live_status': 'is_live',
|
||||||
|
'channel': 'Hi별',
|
||||||
|
'channel_id': '244396',
|
||||||
|
},
|
||||||
|
'skip': 'The channel is offline',
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.flextv.co.kr/channels/746/live',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
channel_id = self._match_id(url)
|
||||||
|
|
||||||
|
try:
|
||||||
|
stream_data = self._download_json(
|
||||||
|
f'https://api.flextv.co.kr/api/channels/{channel_id}/stream',
|
||||||
|
channel_id, query={'option': 'all'})
|
||||||
|
except ExtractorError as e:
|
||||||
|
if isinstance(e.cause, HTTPError) and e.cause.status == 400:
|
||||||
|
raise UserNotLive(video_id=channel_id)
|
||||||
|
raise
|
||||||
|
|
||||||
|
playlist_url = stream_data['sources'][0]['url']
|
||||||
|
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
|
||||||
|
playlist_url, channel_id, 'mp4')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': channel_id,
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
'is_live': True,
|
||||||
|
**traverse_obj(stream_data, {
|
||||||
|
'title': ('stream', 'title', {str}),
|
||||||
|
'timestamp': ('stream', 'createdAt', {parse_iso8601}),
|
||||||
|
'thumbnail': ('thumbUrl', {url_or_none}),
|
||||||
|
'channel': ('owner', 'name', {str}),
|
||||||
|
'channel_id': ('owner', 'id', {str_or_none}),
|
||||||
|
}),
|
||||||
|
}
|
|
@ -1,60 +1,49 @@
|
||||||
|
import re
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .dailymotion import DailymotionIE
|
from .dailymotion import DailymotionIE
|
||||||
|
from ..networking import HEADRequest
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
|
||||||
determine_ext,
|
determine_ext,
|
||||||
|
filter_dict,
|
||||||
format_field,
|
format_field,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
parse_qs,
|
smuggle_url,
|
||||||
|
unsmuggle_url,
|
||||||
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class FranceTVBaseInfoExtractor(InfoExtractor):
|
class FranceTVBaseInfoExtractor(InfoExtractor):
|
||||||
def _make_url_result(self, video_or_full_id, catalog=None):
|
def _make_url_result(self, video_id, url=None):
|
||||||
full_id = 'francetv:%s' % video_or_full_id
|
video_id = video_id.split('@')[0] # for compat with old @catalog IDs
|
||||||
if '@' not in video_or_full_id and catalog:
|
full_id = f'francetv:{video_id}'
|
||||||
full_id += '@%s' % catalog
|
if url:
|
||||||
return self.url_result(
|
full_id = smuggle_url(full_id, {'hostname': urllib.parse.urlparse(url).hostname})
|
||||||
full_id, ie=FranceTVIE.ie_key(),
|
return self.url_result(full_id, FranceTVIE, video_id)
|
||||||
video_id=video_or_full_id.split('@')[0])
|
|
||||||
|
|
||||||
|
|
||||||
class FranceTVIE(InfoExtractor):
|
class FranceTVIE(InfoExtractor):
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'francetv:(?P<id>[^@#]+)'
|
||||||
(?:
|
_GEO_COUNTRIES = ['FR']
|
||||||
https?://
|
_GEO_BYPASS = False
|
||||||
sivideo\.webservices\.francetelevisions\.fr/tools/getInfosOeuvre/v2/\?
|
|
||||||
.*?\bidDiffusion=[^&]+|
|
|
||||||
(?:
|
|
||||||
https?://videos\.francetv\.fr/video/|
|
|
||||||
francetv:
|
|
||||||
)
|
|
||||||
(?P<id>[^@]+)(?:@(?P<catalog>.+))?
|
|
||||||
)
|
|
||||||
'''
|
|
||||||
_EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?://)?embed\.francetv\.fr/\?ue=.+?)\1']
|
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# without catalog
|
'url': 'francetv:ec217ecc-0733-48cf-ac06-af1347b849d1',
|
||||||
'url': 'https://sivideo.webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=162311093&callback=_jsonp_loader_callback_request_0',
|
|
||||||
'md5': 'c2248a8de38c4e65ea8fae7b5df2d84f',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '162311093',
|
'id': 'ec217ecc-0733-48cf-ac06-af1347b849d1',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': '13h15, le dimanche... - Les mystères de Jésus',
|
'title': '13h15, le dimanche... - Les mystères de Jésus',
|
||||||
'description': 'md5:75efe8d4c0a8205e5904498ffe1e1a42',
|
|
||||||
'timestamp': 1502623500,
|
'timestamp': 1502623500,
|
||||||
|
'duration': 2580,
|
||||||
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
'upload_date': '20170813',
|
'upload_date': '20170813',
|
||||||
},
|
},
|
||||||
}, {
|
'params': {'skip_download': 'm3u8'},
|
||||||
# with catalog
|
|
||||||
'url': 'https://sivideo.webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=NI_1004933&catalogue=Zouzous&callback=_jsonp_loader_callback_request_4',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
|
||||||
'url': 'http://videos.francetv.fr/video/NI_657393@Regions',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
}, {
|
||||||
'url': 'francetv:162311093',
|
'url': 'francetv:162311093',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
@ -76,10 +65,7 @@ class FranceTVIE(InfoExtractor):
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _extract_video(self, video_id, catalogue=None):
|
def _extract_video(self, video_id, hostname=None):
|
||||||
# Videos are identified by idDiffusion so catalogue part is optional.
|
|
||||||
# However when provided, some extra formats may be returned so we pass
|
|
||||||
# it if available.
|
|
||||||
is_live = None
|
is_live = None
|
||||||
videos = []
|
videos = []
|
||||||
title = None
|
title = None
|
||||||
|
@ -91,18 +77,20 @@ class FranceTVIE(InfoExtractor):
|
||||||
timestamp = None
|
timestamp = None
|
||||||
spritesheets = None
|
spritesheets = None
|
||||||
|
|
||||||
for device_type in ('desktop', 'mobile'):
|
# desktop+chrome returns dash; mobile+safari returns hls
|
||||||
|
for device_type, browser in [('desktop', 'chrome'), ('mobile', 'safari')]:
|
||||||
dinfo = self._download_json(
|
dinfo = self._download_json(
|
||||||
'https://player.webservices.francetelevisions.fr/v1/videos/%s' % video_id,
|
f'https://k7.ftven.fr/videos/{video_id}', video_id,
|
||||||
video_id, 'Downloading %s video JSON' % device_type, query={
|
f'Downloading {device_type} {browser} video JSON', query=filter_dict({
|
||||||
'device_type': device_type,
|
'device_type': device_type,
|
||||||
'browser': 'chrome',
|
'browser': browser,
|
||||||
}, fatal=False)
|
'domain': hostname,
|
||||||
|
}), fatal=False)
|
||||||
|
|
||||||
if not dinfo:
|
if not dinfo:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
video = dinfo.get('video')
|
video = traverse_obj(dinfo, ('video', {dict}))
|
||||||
if video:
|
if video:
|
||||||
videos.append(video)
|
videos.append(video)
|
||||||
if duration is None:
|
if duration is None:
|
||||||
|
@ -112,7 +100,7 @@ class FranceTVIE(InfoExtractor):
|
||||||
if spritesheets is None:
|
if spritesheets is None:
|
||||||
spritesheets = video.get('spritesheets')
|
spritesheets = video.get('spritesheets')
|
||||||
|
|
||||||
meta = dinfo.get('meta')
|
meta = traverse_obj(dinfo, ('meta', {dict}))
|
||||||
if meta:
|
if meta:
|
||||||
if title is None:
|
if title is None:
|
||||||
title = meta.get('title')
|
title = meta.get('title')
|
||||||
|
@ -126,43 +114,46 @@ class FranceTVIE(InfoExtractor):
|
||||||
if timestamp is None:
|
if timestamp is None:
|
||||||
timestamp = parse_iso8601(meta.get('broadcasted_at'))
|
timestamp = parse_iso8601(meta.get('broadcasted_at'))
|
||||||
|
|
||||||
formats = []
|
formats, subtitles, video_url = [], {}, None
|
||||||
subtitles = {}
|
for video in traverse_obj(videos, lambda _, v: url_or_none(v['url'])):
|
||||||
for video in videos:
|
video_url = video['url']
|
||||||
format_id = video.get('format')
|
format_id = video.get('format')
|
||||||
|
|
||||||
video_url = None
|
if token_url := url_or_none(video.get('token')):
|
||||||
if video.get('workflow') == 'token-akamai':
|
tokenized_url = traverse_obj(self._download_json(
|
||||||
token_url = video.get('token')
|
token_url, video_id, f'Downloading signed {format_id} manifest URL',
|
||||||
if token_url:
|
fatal=False, query={
|
||||||
token_json = self._download_json(
|
'format': 'json',
|
||||||
token_url, video_id,
|
'url': video_url,
|
||||||
'Downloading signed %s manifest URL' % format_id)
|
}), ('url', {url_or_none}))
|
||||||
if token_json:
|
if tokenized_url:
|
||||||
video_url = token_json.get('url')
|
video_url = tokenized_url
|
||||||
if not video_url:
|
|
||||||
video_url = video.get('url')
|
|
||||||
|
|
||||||
ext = determine_ext(video_url)
|
ext = determine_ext(video_url)
|
||||||
if ext == 'f4m':
|
if ext == 'f4m':
|
||||||
formats.extend(self._extract_f4m_formats(
|
formats.extend(self._extract_f4m_formats(
|
||||||
video_url, video_id, f4m_id=format_id, fatal=False))
|
video_url, video_id, f4m_id=format_id or ext, fatal=False))
|
||||||
elif ext == 'm3u8':
|
elif ext == 'm3u8':
|
||||||
|
format_id = format_id or 'hls'
|
||||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
||||||
video_url, video_id, 'mp4',
|
video_url, video_id, 'mp4', m3u8_id=format_id, fatal=False)
|
||||||
entry_protocol='m3u8_native', m3u8_id=format_id,
|
for f in traverse_obj(fmts, lambda _, v: v['vcodec'] == 'none' and v.get('tbr') is None):
|
||||||
fatal=False)
|
if mobj := re.match(rf'{format_id}-[Aa]udio-\w+-(?P<bitrate>\d+)', f['format_id']):
|
||||||
|
f.update({
|
||||||
|
'tbr': int_or_none(mobj.group('bitrate')),
|
||||||
|
'acodec': 'mp4a',
|
||||||
|
})
|
||||||
formats.extend(fmts)
|
formats.extend(fmts)
|
||||||
self._merge_subtitles(subs, target=subtitles)
|
self._merge_subtitles(subs, target=subtitles)
|
||||||
elif ext == 'mpd':
|
elif ext == 'mpd':
|
||||||
fmts, subs = self._extract_mpd_formats_and_subtitles(
|
fmts, subs = self._extract_mpd_formats_and_subtitles(
|
||||||
video_url, video_id, mpd_id=format_id, fatal=False)
|
video_url, video_id, mpd_id=format_id or 'dash', fatal=False)
|
||||||
formats.extend(fmts)
|
formats.extend(fmts)
|
||||||
self._merge_subtitles(subs, target=subtitles)
|
self._merge_subtitles(subs, target=subtitles)
|
||||||
elif video_url.startswith('rtmp'):
|
elif video_url.startswith('rtmp'):
|
||||||
formats.append({
|
formats.append({
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
'format_id': 'rtmp-%s' % format_id,
|
'format_id': join_nonempty('rtmp', format_id),
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
})
|
})
|
||||||
else:
|
else:
|
||||||
|
@ -174,6 +165,13 @@ class FranceTVIE(InfoExtractor):
|
||||||
|
|
||||||
# XXX: what is video['captions']?
|
# XXX: what is video['captions']?
|
||||||
|
|
||||||
|
if not formats and video_url:
|
||||||
|
urlh = self._request_webpage(
|
||||||
|
HEADRequest(video_url), video_id, 'Checking for geo-restriction',
|
||||||
|
fatal=False, expected_status=403)
|
||||||
|
if urlh and urlh.headers.get('x-errortype') == 'geo':
|
||||||
|
self.raise_geo_restricted(countries=self._GEO_COUNTRIES, metadata_available=True)
|
||||||
|
|
||||||
for f in formats:
|
for f in formats:
|
||||||
if f.get('acodec') != 'none' and f.get('language') in ('qtz', 'qad'):
|
if f.get('acodec') != 'none' and f.get('language') in ('qtz', 'qad'):
|
||||||
f['language_preference'] = -10
|
f['language_preference'] = -10
|
||||||
|
@ -194,7 +192,7 @@ class FranceTVIE(InfoExtractor):
|
||||||
# a 10×10 grid of thumbnails corresponding to approximately
|
# a 10×10 grid of thumbnails corresponding to approximately
|
||||||
# 2 seconds of the video; the last spritesheet may be shorter
|
# 2 seconds of the video; the last spritesheet may be shorter
|
||||||
'duration': 200,
|
'duration': 200,
|
||||||
} for sheet in spritesheets]
|
} for sheet in traverse_obj(spritesheets, (..., {url_or_none}))]
|
||||||
})
|
})
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
@ -210,21 +208,15 @@ class FranceTVIE(InfoExtractor):
|
||||||
'series': title if episode_number else None,
|
'series': title if episode_number else None,
|
||||||
'episode_number': int_or_none(episode_number),
|
'episode_number': int_or_none(episode_number),
|
||||||
'season_number': int_or_none(season_number),
|
'season_number': int_or_none(season_number),
|
||||||
|
'_format_sort_fields': ('res', 'tbr', 'proto'), # prioritize m3u8 over dash
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = self._match_valid_url(url)
|
url, smuggled_data = unsmuggle_url(url, {})
|
||||||
video_id = mobj.group('id')
|
video_id = self._match_id(url)
|
||||||
catalog = mobj.group('catalog')
|
hostname = smuggled_data.get('hostname') or 'www.france.tv'
|
||||||
|
|
||||||
if not video_id:
|
return self._extract_video(video_id, hostname=hostname)
|
||||||
qs = parse_qs(url)
|
|
||||||
video_id = qs.get('idDiffusion', [None])[0]
|
|
||||||
catalog = qs.get('catalogue', [None])[0]
|
|
||||||
if not video_id:
|
|
||||||
raise ExtractorError('Invalid URL', expected=True)
|
|
||||||
|
|
||||||
return self._extract_video(video_id, catalog)
|
|
||||||
|
|
||||||
|
|
||||||
class FranceTVSiteIE(FranceTVBaseInfoExtractor):
|
class FranceTVSiteIE(FranceTVBaseInfoExtractor):
|
||||||
|
@ -246,6 +238,7 @@ class FranceTVSiteIE(FranceTVBaseInfoExtractor):
|
||||||
},
|
},
|
||||||
'add_ie': [FranceTVIE.ie_key()],
|
'add_ie': [FranceTVIE.ie_key()],
|
||||||
}, {
|
}, {
|
||||||
|
# geo-restricted
|
||||||
'url': 'https://www.france.tv/enfants/six-huit-ans/foot2rue/saison-1/3066387-duel-au-vieux-port.html',
|
'url': 'https://www.france.tv/enfants/six-huit-ans/foot2rue/saison-1/3066387-duel-au-vieux-port.html',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'a9050959-eedd-4b4a-9b0d-de6eeaa73e44',
|
'id': 'a9050959-eedd-4b4a-9b0d-de6eeaa73e44',
|
||||||
|
@ -261,6 +254,26 @@ class FranceTVSiteIE(FranceTVBaseInfoExtractor):
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
'duration': 1441,
|
'duration': 1441,
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
# geo-restricted livestream (workflow == 'token-akamai')
|
||||||
|
'url': 'https://www.france.tv/france-4/direct.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '9a6a7670-dde9-4264-adbc-55b89558594b',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': r're:France 4 en direct .+',
|
||||||
|
'live_status': 'is_live',
|
||||||
|
},
|
||||||
|
'skip': 'geo-restricted livestream',
|
||||||
|
}, {
|
||||||
|
# livestream (workflow == 'dai')
|
||||||
|
'url': 'https://www.france.tv/france-2/direct.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '006194ea-117d-4bcf-94a9-153d999c59ae',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': r're:France 2 en direct .+',
|
||||||
|
'live_status': 'is_live',
|
||||||
|
},
|
||||||
|
'params': {'skip_download': 'livestream'},
|
||||||
}, {
|
}, {
|
||||||
# france3
|
# france3
|
||||||
'url': 'https://www.france.tv/france-3/des-chiffres-et-des-lettres/139063-emission-du-mardi-9-mai-2017.html',
|
'url': 'https://www.france.tv/france-3/des-chiffres-et-des-lettres/139063-emission-du-mardi-9-mai-2017.html',
|
||||||
|
@ -277,10 +290,6 @@ class FranceTVSiteIE(FranceTVBaseInfoExtractor):
|
||||||
# franceo
|
# franceo
|
||||||
'url': 'https://www.france.tv/france-o/archipels/132249-mon-ancetre-l-esclave.html',
|
'url': 'https://www.france.tv/france-o/archipels/132249-mon-ancetre-l-esclave.html',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
|
||||||
# france2 live
|
|
||||||
'url': 'https://www.france.tv/france-2/direct.html',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.france.tv/documentaires/histoire/136517-argentine-les-500-bebes-voles-de-la-dictature.html',
|
'url': 'https://www.france.tv/documentaires/histoire/136517-argentine-les-500-bebes-voles-de-la-dictature.html',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
@ -304,17 +313,16 @@ class FranceTVSiteIE(FranceTVBaseInfoExtractor):
|
||||||
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
catalogue = None
|
|
||||||
video_id = self._search_regex(
|
video_id = self._search_regex(
|
||||||
r'(?:data-main-video\s*=|videoId["\']?\s*[:=])\s*(["\'])(?P<id>(?:(?!\1).)+)\1',
|
r'(?:data-main-video\s*=|videoId["\']?\s*[:=])\s*(["\'])(?P<id>(?:(?!\1).)+)\1',
|
||||||
webpage, 'video id', default=None, group='id')
|
webpage, 'video id', default=None, group='id')
|
||||||
|
|
||||||
if not video_id:
|
if not video_id:
|
||||||
video_id, catalogue = self._html_search_regex(
|
video_id = self._html_search_regex(
|
||||||
r'(?:href=|player\.setVideo\(\s*)"http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"',
|
r'(?:href=|player\.setVideo\(\s*)"http://videos?\.francetv\.fr/video/([^@"]+@[^"]+)"',
|
||||||
webpage, 'video ID').split('@')
|
webpage, 'video ID')
|
||||||
|
|
||||||
return self._make_url_result(video_id, catalogue)
|
return self._make_url_result(video_id, url=url)
|
||||||
|
|
||||||
|
|
||||||
class FranceTVInfoIE(FranceTVBaseInfoExtractor):
|
class FranceTVInfoIE(FranceTVBaseInfoExtractor):
|
||||||
|
@ -328,8 +336,9 @@ class FranceTVInfoIE(FranceTVBaseInfoExtractor):
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Soir 3',
|
'title': 'Soir 3',
|
||||||
'upload_date': '20190822',
|
'upload_date': '20190822',
|
||||||
'timestamp': 1566510900,
|
'timestamp': 1566510730,
|
||||||
'description': 'md5:72d167097237701d6e8452ff03b83c00',
|
'thumbnail': r're:^https?://.*\.jpe?g$',
|
||||||
|
'duration': 1637,
|
||||||
'subtitles': {
|
'subtitles': {
|
||||||
'fr': 'mincount:2',
|
'fr': 'mincount:2',
|
||||||
},
|
},
|
||||||
|
@ -344,8 +353,8 @@ class FranceTVInfoIE(FranceTVBaseInfoExtractor):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '7d204c9e-a2d3-11eb-9e4c-000d3a23d482',
|
'id': '7d204c9e-a2d3-11eb-9e4c-000d3a23d482',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Covid-19 : une situation catastrophique à New Dehli',
|
'title': 'Covid-19 : une situation catastrophique à New Dehli - Édition du mercredi 21 avril 2021',
|
||||||
'thumbnail': str,
|
'thumbnail': r're:^https?://.*\.jpe?g$',
|
||||||
'duration': 76,
|
'duration': 76,
|
||||||
'timestamp': 1619028518,
|
'timestamp': 1619028518,
|
||||||
'upload_date': '20210421',
|
'upload_date': '20210421',
|
||||||
|
@ -371,11 +380,17 @@ class FranceTVInfoIE(FranceTVBaseInfoExtractor):
|
||||||
'id': 'x4iiko0',
|
'id': 'x4iiko0',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'NDDL, référendum, Brexit : Cécile Duflot répond à Patrick Cohen',
|
'title': 'NDDL, référendum, Brexit : Cécile Duflot répond à Patrick Cohen',
|
||||||
'description': 'Au lendemain de la victoire du "oui" au référendum sur l\'aéroport de Notre-Dame-des-Landes, l\'ancienne ministre écologiste est l\'invitée de Patrick Cohen. Plus d\'info : https://www.franceinter.fr/emissions/le-7-9/le-7-9-27-juin-2016',
|
'description': 'md5:fdcb582c370756293a65cdfbc6ecd90e',
|
||||||
'timestamp': 1467011958,
|
'timestamp': 1467011958,
|
||||||
'upload_date': '20160627',
|
|
||||||
'uploader': 'France Inter',
|
'uploader': 'France Inter',
|
||||||
'uploader_id': 'x2q2ez',
|
'uploader_id': 'x2q2ez',
|
||||||
|
'upload_date': '20160627',
|
||||||
|
'view_count': int,
|
||||||
|
'tags': ['Politique', 'France Inter', '27 juin 2016', 'Linvité de 8h20', 'Cécile Duflot', 'Patrick Cohen'],
|
||||||
|
'age_limit': 0,
|
||||||
|
'duration': 640,
|
||||||
|
'like_count': int,
|
||||||
|
'thumbnail': r're:https://[^/?#]+/v/[^/?#]+/x1080',
|
||||||
},
|
},
|
||||||
'add_ie': ['Dailymotion'],
|
'add_ie': ['Dailymotion'],
|
||||||
}, {
|
}, {
|
||||||
|
@ -405,4 +420,4 @@ class FranceTVInfoIE(FranceTVBaseInfoExtractor):
|
||||||
r'(?:data-id|<figure[^<]+\bid)=["\']([\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'),
|
r'(?:data-id|<figure[^<]+\bid)=["\']([\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'),
|
||||||
webpage, 'video id')
|
webpage, 'video id')
|
||||||
|
|
||||||
return self._make_url_result(video_id)
|
return self._make_url_result(video_id, url=url)
|
||||||
|
|
|
@ -301,7 +301,7 @@ class FunimationShowIE(FunimationBaseIE):
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.funimation.com/en/shows/sk8-the-infinity',
|
'url': 'https://www.funimation.com/en/shows/sk8-the-infinity',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 1315000,
|
'id': '1315000',
|
||||||
'title': 'SK8 the Infinity'
|
'title': 'SK8 the Infinity'
|
||||||
},
|
},
|
||||||
'playlist_count': 13,
|
'playlist_count': 13,
|
||||||
|
@ -312,7 +312,7 @@ class FunimationShowIE(FunimationBaseIE):
|
||||||
# without lang code
|
# without lang code
|
||||||
'url': 'https://www.funimation.com/shows/ouran-high-school-host-club/',
|
'url': 'https://www.funimation.com/shows/ouran-high-school-host-club/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 39643,
|
'id': '39643',
|
||||||
'title': 'Ouran High School Host Club'
|
'title': 'Ouran High School Host Club'
|
||||||
},
|
},
|
||||||
'playlist_count': 26,
|
'playlist_count': 26,
|
||||||
|
@ -339,7 +339,7 @@ class FunimationShowIE(FunimationBaseIE):
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'_type': 'playlist',
|
'_type': 'playlist',
|
||||||
'id': show_info['id'],
|
'id': str_or_none(show_info['id']),
|
||||||
'title': show_info['name'],
|
'title': show_info['name'],
|
||||||
'entries': orderedSet(
|
'entries': orderedSet(
|
||||||
self.url_result(
|
self.url_result(
|
||||||
|
|
|
@ -19,7 +19,6 @@ class GabTVIE(InfoExtractor):
|
||||||
'id': '61217eacea5665de450d0488',
|
'id': '61217eacea5665de450d0488',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'WHY WAS AMERICA IN AFGHANISTAN - AMERICA FIRST AGAINST AMERICAN OLIGARCHY',
|
'title': 'WHY WAS AMERICA IN AFGHANISTAN - AMERICA FIRST AGAINST AMERICAN OLIGARCHY',
|
||||||
'description': None,
|
|
||||||
'uploader': 'Wurzelroot',
|
'uploader': 'Wurzelroot',
|
||||||
'uploader_id': '608fb0a85738fd1974984f7d',
|
'uploader_id': '608fb0a85738fd1974984f7d',
|
||||||
'thumbnail': 'https://tv.gab.com/image/61217eacea5665de450d0488',
|
'thumbnail': 'https://tv.gab.com/image/61217eacea5665de450d0488',
|
||||||
|
|
|
@ -1,46 +0,0 @@
|
||||||
from .brightcove import BrightcoveNewIE
|
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import (
|
|
||||||
clean_html,
|
|
||||||
get_element_by_class,
|
|
||||||
get_element_by_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class GameInformerIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?gameinformer\.com/(?:[^/]+/)*(?P<id>[^.?&#]+)'
|
|
||||||
_TESTS = [{
|
|
||||||
# normal Brightcove embed code extracted with BrightcoveNewIE._extract_url
|
|
||||||
'url': 'http://www.gameinformer.com/b/features/archive/2015/09/26/replay-animal-crossing.aspx',
|
|
||||||
'md5': '292f26da1ab4beb4c9099f1304d2b071',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '4515472681001',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Replay - Animal Crossing',
|
|
||||||
'description': 'md5:2e211891b215c85d061adc7a4dd2d930',
|
|
||||||
'timestamp': 1443457610,
|
|
||||||
'upload_date': '20150928',
|
|
||||||
'uploader_id': '694940074001',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
# Brightcove id inside unique element with field--name-field-brightcove-video-id class
|
|
||||||
'url': 'https://www.gameinformer.com/video-feature/new-gameplay-today/2019/07/09/new-gameplay-today-streets-of-rogue',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '6057111913001',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'New Gameplay Today – Streets Of Rogue',
|
|
||||||
'timestamp': 1562699001,
|
|
||||||
'upload_date': '20190709',
|
|
||||||
'uploader_id': '694940074001',
|
|
||||||
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/694940074001/default_default/index.html?videoId=%s'
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
display_id = self._match_id(url)
|
|
||||||
webpage = self._download_webpage(
|
|
||||||
url, display_id, headers=self.geo_verification_headers())
|
|
||||||
brightcove_id = clean_html(get_element_by_class('field--name-field-brightcove-video-id', webpage) or get_element_by_id('video-source-content', webpage))
|
|
||||||
brightcove_url = self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id if brightcove_id else BrightcoveNewIE._extract_url(self, webpage)
|
|
||||||
return self.url_result(brightcove_url, 'BrightcoveNew', brightcove_id)
|
|
|
@ -88,7 +88,7 @@ class GameJoltBaseIE(InfoExtractor):
|
||||||
'uploader_id': user_data.get('username'),
|
'uploader_id': user_data.get('username'),
|
||||||
'uploader_url': format_field(user_data, 'url', 'https://gamejolt.com%s'),
|
'uploader_url': format_field(user_data, 'url', 'https://gamejolt.com%s'),
|
||||||
'categories': [try_get(category, lambda x: '%s - %s' % (x['community']['name'], x['channel'].get('display_title') or x['channel']['title']))
|
'categories': [try_get(category, lambda x: '%s - %s' % (x['community']['name'], x['channel'].get('display_title') or x['channel']['title']))
|
||||||
for category in post_data.get('communities' or [])],
|
for category in post_data.get('communities') or []],
|
||||||
'tags': traverse_obj(
|
'tags': traverse_obj(
|
||||||
lead_content, ('content', ..., 'content', ..., 'marks', ..., 'attrs', 'tag'), expected_type=str_or_none),
|
lead_content, ('content', ..., 'content', ..., 'marks', ..., 'attrs', 'tag'), expected_type=str_or_none),
|
||||||
'like_count': int_or_none(post_data.get('like_count')),
|
'like_count': int_or_none(post_data.get('like_count')),
|
||||||
|
@ -267,9 +267,9 @@ class GameJoltIE(GameJoltBaseIE):
|
||||||
'id': 'dszyjnwi',
|
'id': 'dszyjnwi',
|
||||||
'ext': 'webm',
|
'ext': 'webm',
|
||||||
'title': 'gif-presentacion-mejorado-dszyjnwi',
|
'title': 'gif-presentacion-mejorado-dszyjnwi',
|
||||||
'n_entries': 1,
|
|
||||||
}
|
}
|
||||||
}]
|
}],
|
||||||
|
'playlist_count': 1,
|
||||||
}, {
|
}, {
|
||||||
# Multiple GIFs
|
# Multiple GIFs
|
||||||
'url': 'https://gamejolt.com/p/gif-yhsqkumq',
|
'url': 'https://gamejolt.com/p/gif-yhsqkumq',
|
||||||
|
@ -374,7 +374,6 @@ class GameJoltGameSoundtrackIE(GameJoltBaseIE):
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '657899',
|
'id': '657899',
|
||||||
'title': 'Friday Night Funkin\': Vs Oswald',
|
'title': 'Friday Night Funkin\': Vs Oswald',
|
||||||
'n_entries': None,
|
|
||||||
},
|
},
|
||||||
'playlist': [{
|
'playlist': [{
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -384,7 +383,6 @@ class GameJoltGameSoundtrackIE(GameJoltBaseIE):
|
||||||
'url': r're:^https://.+vs-oswald-menu-music\.mp3$',
|
'url': r're:^https://.+vs-oswald-menu-music\.mp3$',
|
||||||
'release_timestamp': 1635190816,
|
'release_timestamp': 1635190816,
|
||||||
'release_date': '20211025',
|
'release_date': '20211025',
|
||||||
'n_entries': 3,
|
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -394,7 +392,6 @@ class GameJoltGameSoundtrackIE(GameJoltBaseIE):
|
||||||
'url': r're:^https://.+rabbit-s-luck--full-version-\.mp3$',
|
'url': r're:^https://.+rabbit-s-luck--full-version-\.mp3$',
|
||||||
'release_timestamp': 1635190841,
|
'release_timestamp': 1635190841,
|
||||||
'release_date': '20211025',
|
'release_date': '20211025',
|
||||||
'n_entries': 3,
|
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -404,9 +401,9 @@ class GameJoltGameSoundtrackIE(GameJoltBaseIE):
|
||||||
'url': r're:^https://.+last-straw\.mp3$',
|
'url': r're:^https://.+last-straw\.mp3$',
|
||||||
'release_timestamp': 1635881104,
|
'release_timestamp': 1635881104,
|
||||||
'release_date': '20211102',
|
'release_date': '20211102',
|
||||||
'n_entries': 3,
|
|
||||||
}
|
}
|
||||||
}]
|
}],
|
||||||
|
'playlist_count': 3,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
|
|
@ -21,7 +21,6 @@ class GaskrankIE(InfoExtractor):
|
||||||
'display_id': 'strike-einparken-durch-anfaenger-crash-mit-groesserem-flurschaden',
|
'display_id': 'strike-einparken-durch-anfaenger-crash-mit-groesserem-flurschaden',
|
||||||
'uploader_id': 'Bikefun',
|
'uploader_id': 'Bikefun',
|
||||||
'upload_date': '20170110',
|
'upload_date': '20170110',
|
||||||
'uploader_url': None,
|
|
||||||
}
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.gaskrank.tv/tv/racing/isle-of-man-tt-2011-michael-du-15920.htm',
|
'url': 'http://www.gaskrank.tv/tv/racing/isle-of-man-tt-2011-michael-du-15920.htm',
|
||||||
|
|
|
@ -2,6 +2,7 @@ from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
class GazetaIE(InfoExtractor):
|
class GazetaIE(InfoExtractor):
|
||||||
|
_WORKING = False
|
||||||
_VALID_URL = r'(?P<url>https?://(?:www\.)?gazeta\.ru/(?:[^/]+/)?video/(?:main/)*(?:\d{4}/\d{2}/\d{2}/)?(?P<id>[A-Za-z0-9-_.]+)\.s?html)'
|
_VALID_URL = r'(?P<url>https?://(?:www\.)?gazeta\.ru/(?:[^/]+/)?video/(?:main/)*(?:\d{4}/\d{2}/\d{2}/)?(?P<id>[A-Za-z0-9-_.]+)\.s?html)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.gazeta.ru/video/main/zadaite_vopros_vladislavu_yurevichu.shtml',
|
'url': 'http://www.gazeta.ru/video/main/zadaite_vopros_vladislavu_yurevichu.shtml',
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue