Compare commits
138 Commits
Author | SHA1 | Date | |
---|---|---|---|
ed4b05d9b6 | |||
6f88b1cec6 | |||
03451fb8ae | |||
e45c3fd48b | |||
1153ac8f24 | |||
c256a045f9 | |||
98603439cb | |||
a6ca011202 | |||
114c2572a4 | |||
f64b362603 | |||
2fd7910194 | |||
c2e53072f7 | |||
c2986f3b14 | |||
57854169f4 | |||
3217305f9f | |||
639aadd2c1 | |||
7157df13cd | |||
630e0137e0 | |||
a0c51731af | |||
d361996fc0 | |||
4ef7dda14a | |||
ee31cedae0 | |||
d3b0cb5e13 | |||
0a79974d11 | |||
4e327944a0 | |||
09a437f7fb | |||
3cbe18aac0 | |||
62418f8e95 | |||
bfd3760969 | |||
efd89b2e64 | |||
0dc1747178 | |||
8577164785 | |||
8af98968dd | |||
8f00cbcdd6 | |||
af75551bc2 | |||
3a6cc1e44f | |||
7664b5f0ff | |||
ec5d236cad | |||
d6b7a255d0 | |||
22bc7324db | |||
48e8f271e7 | |||
9a0ad6070b | |||
6039589f24 | |||
d4cba7eb6c | |||
70cb453280 | |||
7a106331e7 | |||
8775e131af | |||
1f16f7cb62 | |||
80b7f3cd00 | |||
8b79e067bc | |||
cda0627d5a | |||
ad40dd6d6b | |||
b91d53dc6f | |||
cda4fd1f26 | |||
ff2a2edaa5 | |||
38d8d5d4c5 | |||
f010452abf | |||
ab93f8242b | |||
1505414a1a | |||
c04d7c9a24 | |||
3ee2df7faa | |||
d2c883c211 | |||
59c988f819 | |||
629c811e84 | |||
284024433b | |||
55a8e50d6a | |||
810dff999e | |||
4da91fb972 | |||
874ac0a0ac | |||
89ae1e265b | |||
00bd9fee6f | |||
b215e2a3b2 | |||
97972d6fa3 | |||
6ae20bb1f5 | |||
5f3b90ad45 | |||
2463af7685 | |||
86bb312d6d | |||
964b99ea40 | |||
51a1693789 | |||
ca4a735692 | |||
2140f48919 | |||
4be01d3964 | |||
b45e3476c8 | |||
d591956baa | |||
![]() |
6011a08cdf | ||
![]() |
83af4ab0d7 | ||
![]() |
5594d017e2 | ||
![]() |
8f9c5eeb48 | ||
![]() |
89e21302e3 | ||
![]() |
cb4ceefada | ||
![]() |
c4cc5cecbf | ||
![]() |
cc8f30eba2 | ||
![]() |
6740afd6a0 | ||
![]() |
63c0f4aa8f | ||
![]() |
8908dc138f | ||
![]() |
cd7624f2cb | ||
![]() |
5d53225874 | ||
![]() |
6af17450c6 | ||
![]() |
d85c27a728 | ||
![]() |
344341b87f | ||
![]() |
21224c8dae | ||
![]() |
93b58efa0e | ||
![]() |
db08283368 | ||
![]() |
0f4bf45cde | ||
![]() |
d7f934b7b2 | ||
![]() |
a4299dc917 | ||
![]() |
e6fd9b40f4 | ||
![]() |
f322035d4a | ||
![]() |
74907a8183 | ||
![]() |
ec8f652bc8 | ||
![]() |
aa57ace742 | ||
![]() |
512798366c | ||
![]() |
9859c5485e | ||
![]() |
e54596f3e9 | ||
![]() |
c6e1b366b5 | ||
![]() |
43e7f7ce93 | ||
![]() |
97032b31ee | ||
![]() |
ba3714c860 | ||
![]() |
14c8cf3f5b | ||
![]() |
3025158d14 | ||
![]() |
fb13fd21ef | ||
![]() |
68752000f0 | ||
![]() |
7b60751e99 | ||
![]() |
9890617098 | ||
![]() |
beca545951 | ||
![]() |
a9a68e7df3 | ||
![]() |
0f78f07875 | ||
![]() |
08545a29df | ||
9564ee30fe | |||
6806146450 | |||
5764586646 | |||
aae1aec6ad | |||
91bdaa716c | |||
9a3a3c9c59 | |||
a736412fbd | |||
85860087b6 | |||
a19da4050c | |||
c524eb16e5 |
23
.gitea/workflows/ci.yaml
Normal file
23
.gitea/workflows/ci.yaml
Normal file
@ -0,0 +1,23 @@
|
||||
name: CI
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.11
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install -r requirements-dev.txt
|
||||
|
||||
- name: Run tests
|
||||
run: pytest
|
40
.gitea/workflows/git-sync.yaml
Normal file
40
.gitea/workflows/git-sync.yaml
Normal file
@ -0,0 +1,40 @@
|
||||
name: git-sync-with-mirror
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
git-sync:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: git-sync
|
||||
env:
|
||||
git_sync_source_repo: git@git.fridu.us:heckyel/yt-local.git
|
||||
git_sync_destination_repo: ssh://git@c.fridu.us/software/yt-local.git
|
||||
if: env.git_sync_source_repo && env.git_sync_destination_repo
|
||||
uses: astounds/git-sync@v1
|
||||
with:
|
||||
source_repo: git@git.fridu.us:heckyel/yt-local.git
|
||||
source_branch: "master"
|
||||
destination_repo: ssh://git@c.fridu.us/software/yt-local.git
|
||||
destination_branch: "master"
|
||||
source_ssh_private_key: ${{ secrets.GIT_SYNC_SOURCE_SSH_PRIVATE_KEY }}
|
||||
destination_ssh_private_key: ${{ secrets.GIT_SYNC_DESTINATION_SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: git-sync-sourcehut
|
||||
env:
|
||||
git_sync_source_repo: git@git.fridu.us:heckyel/yt-local.git
|
||||
git_sync_destination_repo: git@git.sr.ht:~heckyel/yt-local
|
||||
if: env.git_sync_source_repo && env.git_sync_destination_repo
|
||||
uses: astounds/git-sync@v1
|
||||
with:
|
||||
source_repo: git@git.fridu.us:heckyel/yt-local.git
|
||||
source_branch: "master"
|
||||
destination_repo: git@git.sr.ht:~heckyel/yt-local
|
||||
destination_branch: "master"
|
||||
source_ssh_private_key: ${{ secrets.GIT_SYNC_SOURCE_SSH_PRIVATE_KEY }}
|
||||
destination_ssh_private_key: ${{ secrets.GIT_SYNC_DESTINATION_SSH_PRIVATE_KEY }}
|
||||
continue-on-error: true
|
23
README.md
23
README.md
@ -1,5 +1,3 @@
|
||||
[](https://drone.hgit.ga/heckyel/yt-local)
|
||||
|
||||
# yt-local
|
||||
|
||||
Fork of [youtube-local](https://github.com/user234683/youtube-local)
|
||||
@ -24,7 +22,7 @@ The YouTube API is not used, so no keys or anything are needed. It uses the same
|
||||
* Local playlists: These solve the two problems with creating playlists on YouTube: (1) they're datamined and (2) videos frequently get deleted by YouTube and lost from the playlist, making it very difficult to find a reupload as the title of the deleted video is not displayed.
|
||||
* Themes: Light, Gray, and Dark
|
||||
* Subtitles
|
||||
* Easily download videos or their audio
|
||||
* Easily download videos or their audio. (Disabled by default)
|
||||
* No ads
|
||||
* View comments
|
||||
* JavaScript not required
|
||||
@ -56,7 +54,6 @@ The YouTube API is not used, so no keys or anything are needed. It uses the same
|
||||
- [ ] Import youtube playlist into a local playlist
|
||||
- [ ] Rearrange items of local playlist
|
||||
- [x] Video qualities other than 360p and 720p by muxing video and audio
|
||||
- [ ] Corrected .m4a downloads
|
||||
- [x] Indicate if comments are disabled
|
||||
- [x] Indicate how many comments a video has
|
||||
- [ ] Featured channels page
|
||||
@ -94,11 +91,11 @@ Firstly, if you wish to run this in portable mode, create the empty file "settin
|
||||
|
||||
To run the program on windows, open `run.bat`. On GNU+Linux/MacOS, run `python3 server.py`.
|
||||
|
||||
Access youtube URLs by prefixing them with `http://localhost:9010/`.
|
||||
For instance, `http://localhost:9010/https://www.youtube.com/watch?v=vBgulDeV2RU`
|
||||
You can use an addon such as Redirector ([Firefox](https://addons.mozilla.org/en-US/firefox/addon/redirector/)|[Chrome](https://chrome.google.com/webstore/detail/redirector/ocgpenflpmgnfapjedencafcfakcekcd)) to automatically redirect YouTube URLs to yt-local. I use the include pattern `^(https?://(?:[a-zA-Z0-9_-]*\.)?(?:youtube\.com|youtu\.be|youtube-nocookie\.com)/.*)` and redirect pattern `http://localhost:9010/$1` (Make sure you're using regular expression mode).
|
||||
|
||||
Access youtube URLs by prefixing them with `http://localhost:9010/`, For instance, `http://localhost:9010/https://www.youtube.com/watch?v=vBgulDeV2RU`
|
||||
You can use an addon such as Redirector ([Firefox](https://addons.mozilla.org/en-US/firefox/addon/redirector/)|[Chrome](https://chrome.google.com/webstore/detail/redirector/ocgpenflpmgnfapjedencafcfakcekcd)) to automatically redirect YouTube URLs to yt-local. I use the include pattern `^(https?://(?:[a-zA-Z0-9_-]*\.)?(?:youtube\.com|youtu\.be|youtube-nocookie\.com)/.*)` and the redirect pattern `http://localhost:9010/$1` (Make sure you're using regular expression mode).
|
||||
|
||||
If you want embeds on the web to also redirect to yt-local, make sure "Iframes" is checked under advanced options in your redirector rule. Check test `http://localhost:9010/youtube.com/embed/vBgulDeV2RU`
|
||||
If you want embeds on web to also redirect to yt-local, make sure "Iframes" is checked under advanced options in your redirector rule. Check test `http://localhost:9010/youtube.com/embed/vBgulDeV2RU`
|
||||
|
||||
yt-local can be added as a search engine in firefox to make searching more convenient. See [here](https://support.mozilla.org/en-US/kb/add-or-remove-search-engine-firefox) for information on firefox search plugins.
|
||||
|
||||
@ -114,7 +111,7 @@ If you don't want to waste system resources leaving the Tor Browser open in addi
|
||||
|
||||
For Windows, to make standalone Tor run at startup, press Windows Key + R and type `shell:startup` to open the Startup folder. Create a new shortcut there. For the command of the shortcut, enter `"C:\[path-to-Tor-Browser-directory]\Tor\tor.exe" SOCKSPort 9150 ControlPort 9151`. You can then launch this shortcut to start it. Alternatively, if something isn't working, to see what's wrong, open `cmd.exe` and go to the directory `C:\[path-to-Tor-Browser-directory]\Tor`. Then run `tor SOCKSPort 9150 ControlPort 9151 | more`. The `more` part at the end is just to make sure any errors are displayed, to fix a bug in Windows cmd where tor doesn't display any output. You can stop tor in the task manager.
|
||||
|
||||
For Debian/Ubuntu, you can `sudo apt install tor` to install the command line version of Tor, and then run `sudo systemctl start tor` to run it as a background service that will get started during boot as well. However, Tor on the command line uses the port 9050 by default (rather than the 9150 used by the Tor Browser). So you will need to change `Tor port` to 9050 and `Tor control port` to 9051 in the yt-local settings page. Additionally, you will need to enable the Tor control port by uncommenting the line `ControlPort 9051`, and setting `CookieAuthentication` to 0 in `/etc/tor/torrc`. If no Tor package is available for your distro, you can configure the `tor` binary located at `./Browser/TorBrowser/Tor/tor` inside the Tor Browser installation location to run at start time, or create a service to do it.
|
||||
For Debian/Ubuntu, you can `sudo apt install tor` to install the command line version of Tor, and then run `sudo systemctl start tor` to run it as a background service that will get started during boot as well. However, Tor on the command line uses the port `9050` by default (rather than the 9150 used by the Tor Browser). So you will need to change `Tor port` to 9050 and `Tor control port` to `9051` in yt-local settings page. Additionally, you will need to enable the Tor control port by uncommenting the line `ControlPort 9051`, and setting `CookieAuthentication` to 0 in `/etc/tor/torrc`. If no Tor package is available for your distro, you can configure the `tor` binary located at `./Browser/TorBrowser/Tor/tor` inside the Tor Browser installation location to run at start time, or create a service to do it.
|
||||
|
||||
### Tor video routing
|
||||
|
||||
@ -144,11 +141,17 @@ Pull requests and issues are welcome
|
||||
|
||||
For coding guidelines and an overview of the software architecture, see the [HACKING.md](docs/HACKING.md) file.
|
||||
|
||||
## GPG public KEY
|
||||
|
||||
```bash
|
||||
72CFB264DFC43F63E098F926E607CE7149F4D71C
|
||||
```
|
||||
|
||||
## Public instances
|
||||
|
||||
yt-local is not made to work in public mode, however there is an instance of yt-local in public mode but with less features
|
||||
|
||||
- <https://fast-gorge-89206.herokuapp.com>
|
||||
- <https://m.fridu.us/https://youtube.com>
|
||||
|
||||
## License
|
||||
|
||||
|
@ -1,7 +1,8 @@
|
||||
# Generate a windows release and a generated embedded distribution of python
|
||||
# Latest python version is the argument of the script
|
||||
# Latest python version is the argument of the script (or oldwin for
|
||||
# vista, 7 and 32-bit versions)
|
||||
# Requirements: 7z, git
|
||||
# wine 32-bit is required in order to build on Linux
|
||||
# wine is required in order to build on Linux
|
||||
|
||||
import sys
|
||||
import urllib
|
||||
@ -12,22 +13,28 @@ import os
|
||||
import hashlib
|
||||
|
||||
latest_version = sys.argv[1]
|
||||
if len(sys.argv) > 2:
|
||||
bitness = sys.argv[2]
|
||||
else:
|
||||
bitness = '64'
|
||||
|
||||
if latest_version == 'oldwin':
|
||||
bitness = '32'
|
||||
latest_version = '3.7.9'
|
||||
suffix = 'windows-vista-7-only'
|
||||
else:
|
||||
suffix = 'windows'
|
||||
|
||||
def check(code):
|
||||
if code != 0:
|
||||
raise Exception('Got nonzero exit code from command')
|
||||
|
||||
|
||||
def check_subp(x):
|
||||
if x.returncode != 0:
|
||||
raise Exception('Got nonzero exit code from command')
|
||||
|
||||
|
||||
def log(line):
|
||||
print('[generate_release.py] ' + line)
|
||||
|
||||
|
||||
# https://stackoverflow.com/questions/7833715/python-deleting-certain-file-extensions
|
||||
def remove_files_with_extensions(path, extensions):
|
||||
for root, dirs, files in os.walk(path):
|
||||
@ -35,7 +42,6 @@ def remove_files_with_extensions(path, extensions):
|
||||
if os.path.splitext(file)[1] in extensions:
|
||||
os.remove(os.path.join(root, file))
|
||||
|
||||
|
||||
def download_if_not_exists(file_name, url, sha256=None):
|
||||
if not os.path.exists('./' + file_name):
|
||||
log('Downloading ' + file_name + '..')
|
||||
@ -51,7 +57,6 @@ def download_if_not_exists(file_name, url, sha256=None):
|
||||
else:
|
||||
log('Using existing ' + file_name)
|
||||
|
||||
|
||||
def wine_run_shell(command):
|
||||
if os.name == 'posix':
|
||||
check(os.system('wine ' + command.replace('\\', '/')))
|
||||
@ -60,14 +65,12 @@ def wine_run_shell(command):
|
||||
else:
|
||||
raise Exception('Unsupported OS')
|
||||
|
||||
|
||||
def wine_run(command_parts):
|
||||
if os.name == 'posix':
|
||||
command_parts = ['wine', ] + command_parts
|
||||
command_parts = ['wine',] + command_parts
|
||||
if subprocess.run(command_parts).returncode != 0:
|
||||
raise Exception('Got nonzero exit code from command')
|
||||
|
||||
|
||||
# ---------- Get current release version, for later ----------
|
||||
log('Getting current release version')
|
||||
describe_result = subprocess.run(['git', 'describe', '--tags'], stdout=subprocess.PIPE)
|
||||
@ -98,19 +101,33 @@ if len(os.listdir('./yt-local')) == 0:
|
||||
# ----------- Generate embedded python distribution -----------
|
||||
os.environ['PYTHONDONTWRITEBYTECODE'] = '1' # *.pyc files double the size of the distribution
|
||||
get_pip_url = 'https://bootstrap.pypa.io/get-pip.py'
|
||||
latest_dist_url = 'https://www.python.org/ftp/python/' + latest_version + '/python-' + latest_version + '-embed-win32.zip'
|
||||
latest_dist_url = 'https://www.python.org/ftp/python/' + latest_version + '/python-' + latest_version
|
||||
if bitness == '32':
|
||||
latest_dist_url += '-embed-win32.zip'
|
||||
else:
|
||||
latest_dist_url += '-embed-amd64.zip'
|
||||
|
||||
# I've verified that all the dlls in the following are signed by Microsoft.
|
||||
# Using this because Microsoft only provides installers whose files can't be
|
||||
# extracted without a special tool.
|
||||
visual_c_runtime_url = 'https://github.com/yuempek/vc-archive/raw/master/archives/vc15_(14.10.25017.0)_2017_x86.7z'
|
||||
visual_c_runtime_sha256 = '2549eb4d2ce4cf3a87425ea01940f74368bf1cda378ef8a8a1f1a12ed59f1547'
|
||||
if bitness == '32':
|
||||
visual_c_runtime_url = 'https://github.com/yuempek/vc-archive/raw/master/archives/vc15_(14.10.25017.0)_2017_x86.7z'
|
||||
visual_c_runtime_sha256 = '2549eb4d2ce4cf3a87425ea01940f74368bf1cda378ef8a8a1f1a12ed59f1547'
|
||||
visual_c_name = 'vc15_(14.10.25017.0)_2017_x86.7z'
|
||||
visual_c_path_to_dlls = 'runtime_minimum/System'
|
||||
else:
|
||||
visual_c_runtime_url = 'https://github.com/yuempek/vc-archive/raw/master/archives/vc15_(14.10.25017.0)_2017_x64.7z'
|
||||
visual_c_runtime_sha256 = '4f00b824c37e1017a93fccbd5775e6ee54f824b6786f5730d257a87a3d9ce921'
|
||||
visual_c_name = 'vc15_(14.10.25017.0)_2017_x64.7z'
|
||||
visual_c_path_to_dlls = 'runtime_minimum/System64'
|
||||
|
||||
download_if_not_exists('get-pip.py', get_pip_url)
|
||||
download_if_not_exists('python-dist-' + latest_version + '.zip', latest_dist_url)
|
||||
download_if_not_exists('vc15_(14.10.25017.0)_2017_x86.7z',
|
||||
visual_c_runtime_url,
|
||||
sha256=visual_c_runtime_sha256)
|
||||
|
||||
python_dist_name = 'python-dist-' + latest_version + '-' + bitness + '.zip'
|
||||
|
||||
download_if_not_exists(python_dist_name, latest_dist_url)
|
||||
download_if_not_exists(visual_c_name,
|
||||
visual_c_runtime_url, sha256=visual_c_runtime_sha256)
|
||||
|
||||
if os.path.exists('./python'):
|
||||
log('Removing old python distribution')
|
||||
@ -119,7 +136,7 @@ if os.path.exists('./python'):
|
||||
|
||||
log('Extracting python distribution')
|
||||
|
||||
check(os.system(r'7z -y x -opython python-dist-' + latest_version + '.zip'))
|
||||
check(os.system(r'7z -y x -opython ' + python_dist_name))
|
||||
|
||||
log('Executing get-pip.py')
|
||||
wine_run(['./python/python.exe', '-I', 'get-pip.py'])
|
||||
@ -183,7 +200,7 @@ with open('./python/python3' + major_release + '._pth', 'a', encoding='utf-8') a
|
||||
f.write('..\n')'''
|
||||
|
||||
log('Inserting Microsoft C Runtime')
|
||||
check_subp(subprocess.run([r'7z', '-y', 'e', '-opython', 'vc15_(14.10.25017.0)_2017_x86.7z', 'runtime_minimum/System']))
|
||||
check_subp(subprocess.run([r'7z', '-y', 'e', '-opython', visual_c_name, visual_c_path_to_dlls]))
|
||||
|
||||
log('Installing dependencies')
|
||||
wine_run(['./python/python.exe', '-I', '-m', 'pip', 'install', '--no-compile', '-r', './requirements.txt'])
|
||||
@ -219,7 +236,7 @@ log('Copying python distribution into release folder')
|
||||
shutil.copytree(r'./python', r'./yt-local/python')
|
||||
|
||||
# ----------- Create release zip -----------
|
||||
output_filename = 'yt-local-' + release_tag + '-windows.zip'
|
||||
output_filename = 'yt-local-' + release_tag + '-' + suffix + '.zip'
|
||||
if os.path.exists('./' + output_filename):
|
||||
log('Removing previous zipped release')
|
||||
os.remove('./' + output_filename)
|
||||
|
@ -1,28 +1,5 @@
|
||||
attrs==22.1.0
|
||||
Brotli==1.0.9
|
||||
cachetools==4.2.4
|
||||
click==8.0.4
|
||||
dataclasses==0.6
|
||||
defusedxml==0.7.1
|
||||
Flask==2.0.1
|
||||
gevent==21.12.0
|
||||
greenlet==1.1.2
|
||||
importlib-metadata==4.6.4
|
||||
iniconfig==1.1.1
|
||||
itsdangerous==2.0.1
|
||||
Jinja2==3.0.3
|
||||
MarkupSafe==2.0.1
|
||||
packaging==20.9
|
||||
pluggy>=0.13.1
|
||||
py==1.10.0
|
||||
pyparsing==2.4.7
|
||||
PySocks==1.7.1
|
||||
pytest==6.2.5
|
||||
stem==1.8.0
|
||||
toml==0.10.2
|
||||
typing-extensions==3.10.0.2
|
||||
urllib3==1.26.11
|
||||
Werkzeug==2.0.3
|
||||
zipp==3.5.1
|
||||
zope.event==4.5.0
|
||||
zope.interface==5.4.0
|
||||
# Include all production requirements
|
||||
-r requirements.txt
|
||||
|
||||
# Development requirements
|
||||
pytest>=6.2.1
|
||||
|
@ -1,20 +1,8 @@
|
||||
Brotli==1.0.9
|
||||
cachetools==4.2.4
|
||||
click==8.0.4
|
||||
dataclasses==0.6
|
||||
defusedxml==0.7.1
|
||||
Flask==2.0.1
|
||||
gevent==21.12.0
|
||||
greenlet==1.1.2
|
||||
importlib-metadata==4.6.4
|
||||
itsdangerous==2.0.1
|
||||
Jinja2==3.0.3
|
||||
MarkupSafe==2.0.1
|
||||
PySocks==1.7.1
|
||||
stem==1.8.0
|
||||
typing-extensions==3.10.0.2
|
||||
urllib3==1.26.11
|
||||
Werkzeug==2.0.3
|
||||
zipp==3.5.1
|
||||
zope.event==4.5.0
|
||||
zope.interface==5.4.0
|
||||
Flask>=1.0.3
|
||||
gevent>=1.2.2
|
||||
Brotli>=1.0.7
|
||||
PySocks>=1.6.8
|
||||
urllib3>=1.24.1
|
||||
defusedxml>=0.5.0
|
||||
cachetools>=4.0.0
|
||||
stem>=1.8.0
|
||||
|
@ -84,7 +84,7 @@ def proxy_site(env, start_response, video=False):
|
||||
else:
|
||||
response, cleanup_func = util.fetch_url_response(url, send_headers)
|
||||
|
||||
response_headers = response.getheaders()
|
||||
response_headers = response.headers
|
||||
if isinstance(response_headers, urllib3._collections.HTTPHeaderDict):
|
||||
response_headers = response_headers.items()
|
||||
if video:
|
||||
@ -169,8 +169,8 @@ site_handlers = {
|
||||
'youtube-nocookie.com': yt_app,
|
||||
'youtu.be': youtu_be,
|
||||
'ytimg.com': proxy_site,
|
||||
'yt3.ggpht.com': proxy_site,
|
||||
'lh3.googleusercontent.com': proxy_site,
|
||||
'ggpht.com': proxy_site,
|
||||
'googleusercontent.com': proxy_site,
|
||||
'sponsor.ajay.app': proxy_site,
|
||||
'googlevideo.com': proxy_video,
|
||||
}
|
||||
|
67
settings.py
67
settings.py
@ -151,6 +151,13 @@ For security reasons, enabling this is not recommended.''',
|
||||
'category': 'interface',
|
||||
}),
|
||||
|
||||
('autoplay_videos', {
|
||||
'type': bool,
|
||||
'default': False,
|
||||
'comment': '',
|
||||
'category': 'playback',
|
||||
}),
|
||||
|
||||
('default_resolution', {
|
||||
'type': int,
|
||||
'default': 720,
|
||||
@ -200,12 +207,17 @@ For security reasons, enabling this is not recommended.''',
|
||||
}),
|
||||
|
||||
('prefer_uni_sources', {
|
||||
'label': 'Prefer integrated sources',
|
||||
'type': bool,
|
||||
'default': False,
|
||||
'label': 'Use integrated sources',
|
||||
'type': int,
|
||||
'default': 1,
|
||||
'comment': '',
|
||||
'options': [
|
||||
(0, 'Prefer not'),
|
||||
(1, 'Prefer'),
|
||||
(2, 'Always'),
|
||||
],
|
||||
'category': 'playback',
|
||||
'description': 'If enabled and the default resolution is set to 360p or 720p, uses the unified (integrated) video files which contain audio and video, with buffering managed by the browser. If disabled, always uses the separate audio and video files through custom buffer management in av-merge via MediaSource.',
|
||||
'description': 'If set to Prefer or Always and the default resolution is set to 360p or 720p, uses the unified (integrated) video files which contain audio and video, with buffering managed by the browser. If set to prefer not, uses the separate audio and video files through custom buffer management in av-merge via MediaSource unless they are unavailable.',
|
||||
}),
|
||||
|
||||
('use_video_player', {
|
||||
@ -220,6 +232,20 @@ For security reasons, enabling this is not recommended.''',
|
||||
'category': 'interface',
|
||||
}),
|
||||
|
||||
('use_video_download', {
|
||||
'type': int,
|
||||
'default': 0,
|
||||
'comment': '',
|
||||
'options': [
|
||||
(0, 'Disabled'),
|
||||
(1, 'Enabled'),
|
||||
],
|
||||
'category': 'interface',
|
||||
'comment': '''If enabled, you may incur legal issues with RIAA. Disabled by default.
|
||||
More info: https://torrentfreak.com/riaa-thwarts-youts-attempt-to-declare-youtube-ripping-legal-221002/
|
||||
Archive: https://archive.ph/OZQbN''',
|
||||
}),
|
||||
|
||||
('proxy_images', {
|
||||
'label': 'Route images',
|
||||
'type': bool,
|
||||
@ -284,11 +310,16 @@ For security reasons, enabling this is not recommended.''',
|
||||
'comment': '',
|
||||
}),
|
||||
|
||||
('gather_googlevideo_domains', {
|
||||
('include_shorts_in_subscriptions', {
|
||||
'type': bool,
|
||||
'default': False,
|
||||
'comment': '''Developer use to debug 403s''',
|
||||
'hidden': True,
|
||||
'default': 0,
|
||||
'comment': '',
|
||||
}),
|
||||
|
||||
('include_shorts_in_channel', {
|
||||
'type': bool,
|
||||
'default': 1,
|
||||
'comment': '',
|
||||
}),
|
||||
|
||||
('debugging_save_responses', {
|
||||
@ -300,7 +331,7 @@ For security reasons, enabling this is not recommended.''',
|
||||
|
||||
('settings_version', {
|
||||
'type': int,
|
||||
'default': 4,
|
||||
'default': 6,
|
||||
'comment': '''Do not change, remove, or comment out this value, or else your settings may be lost or corrupted''',
|
||||
'hidden': True,
|
||||
}),
|
||||
@ -373,10 +404,28 @@ def upgrade_to_4(settings_dict):
|
||||
return new_settings
|
||||
|
||||
|
||||
def upgrade_to_5(settings_dict):
|
||||
new_settings = settings_dict.copy()
|
||||
if 'prefer_uni_sources' in settings_dict:
|
||||
new_settings['prefer_uni_sources'] = int(settings_dict['prefer_uni_sources'])
|
||||
new_settings['settings_version'] = 5
|
||||
return new_settings
|
||||
|
||||
|
||||
def upgrade_to_6(settings_dict):
|
||||
new_settings = settings_dict.copy()
|
||||
if 'gather_googlevideo_domains' in new_settings:
|
||||
del new_settings['gather_googlevideo_domains']
|
||||
new_settings['settings_version'] = 6
|
||||
return new_settings
|
||||
|
||||
|
||||
upgrade_functions = {
|
||||
1: upgrade_to_2,
|
||||
2: upgrade_to_3,
|
||||
3: upgrade_to_4,
|
||||
4: upgrade_to_5,
|
||||
5: upgrade_to_6,
|
||||
}
|
||||
|
||||
|
||||
|
@ -54,7 +54,10 @@ def commatize(num):
|
||||
if num is None:
|
||||
return ''
|
||||
if isinstance(num, str):
|
||||
num = int(num)
|
||||
try:
|
||||
num = int(num)
|
||||
except ValueError:
|
||||
return num
|
||||
return '{:,}'.format(num)
|
||||
|
||||
|
||||
@ -115,7 +118,18 @@ def error_page(e):
|
||||
error_message=exc_info()[1].error_message,
|
||||
slim=slim
|
||||
), 502)
|
||||
return flask.render_template('error.html', traceback=traceback.format_exc(), slim=slim), 500
|
||||
elif (exc_info()[0] == util.FetchError
|
||||
and exc_info()[1].code == '404'
|
||||
):
|
||||
error_message = ('Error: The page you are looking for isn\'t here.')
|
||||
return flask.render_template('error.html',
|
||||
error_code=exc_info()[1].code,
|
||||
error_message=error_message,
|
||||
slim=slim), 404
|
||||
return flask.render_template('error.html', traceback=traceback.format_exc(),
|
||||
error_code=exc_info()[1].code,
|
||||
slim=slim), 500
|
||||
# return flask.render_template('error.html', traceback=traceback.format_exc(), slim=slim), 500
|
||||
|
||||
|
||||
font_choices = {
|
||||
|
@ -1,6 +1,8 @@
|
||||
import base64
|
||||
from youtube import util, yt_data_extract, local_playlist, subscriptions
|
||||
from youtube import (util, yt_data_extract, local_playlist, subscriptions,
|
||||
playlist)
|
||||
from youtube import yt_app
|
||||
import settings
|
||||
|
||||
import urllib
|
||||
import json
|
||||
@ -31,6 +33,132 @@ headers_mobile = (
|
||||
real_cookie = (('Cookie', 'VISITOR_INFO1_LIVE=8XihrAcN1l4'),)
|
||||
generic_cookie = (('Cookie', 'VISITOR_INFO1_LIVE=ST1Ti53r4fU'),)
|
||||
|
||||
# added an extra nesting under the 2nd base64 compared to v4
|
||||
# added tab support
|
||||
# changed offset field to uint id 1
|
||||
def channel_ctoken_v5(channel_id, page, sort, tab, view=1):
|
||||
new_sort = (2 if int(sort) == 1 else 1)
|
||||
offset = 30*(int(page) - 1)
|
||||
if tab == 'videos':
|
||||
tab = 15
|
||||
elif tab == 'shorts':
|
||||
tab = 10
|
||||
elif tab == 'streams':
|
||||
tab = 14
|
||||
pointless_nest = proto.string(80226972,
|
||||
proto.string(2, channel_id)
|
||||
+ proto.string(3,
|
||||
proto.percent_b64encode(
|
||||
proto.string(110,
|
||||
proto.string(3,
|
||||
proto.string(tab,
|
||||
proto.string(1,
|
||||
proto.string(1,
|
||||
proto.unpadded_b64encode(
|
||||
proto.string(1,
|
||||
proto.string(1,
|
||||
proto.unpadded_b64encode(
|
||||
proto.string(2,
|
||||
b"ST:"
|
||||
+ proto.unpadded_b64encode(
|
||||
proto.uint(1, offset)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
# targetId, just needs to be present but
|
||||
# doesn't need to be correct
|
||||
+ proto.string(2, "63faaff0-0000-23fe-80f0-582429d11c38")
|
||||
)
|
||||
# 1 - newest, 2 - popular
|
||||
+ proto.uint(3, new_sort)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
return base64.urlsafe_b64encode(pointless_nest).decode('ascii')
|
||||
|
||||
|
||||
def channel_about_ctoken(channel_id):
|
||||
return proto.make_protobuf(
|
||||
('base64p',
|
||||
[
|
||||
[2, 80226972,
|
||||
[
|
||||
[2, 2, channel_id],
|
||||
[2, 3,
|
||||
('base64p',
|
||||
[
|
||||
[2, 110,
|
||||
[
|
||||
[2, 3,
|
||||
[
|
||||
[2, 19,
|
||||
[
|
||||
[2, 1, b'66b0e9e9-0000-2820-9589-582429a83980'],
|
||||
]
|
||||
],
|
||||
]
|
||||
],
|
||||
]
|
||||
],
|
||||
]
|
||||
)
|
||||
],
|
||||
]
|
||||
],
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# https://github.com/user234683/youtube-local/issues/151
|
||||
def channel_ctoken_v4(channel_id, page, sort, tab, view=1):
|
||||
new_sort = (2 if int(sort) == 1 else 1)
|
||||
offset = str(30*(int(page) - 1))
|
||||
pointless_nest = proto.string(80226972,
|
||||
proto.string(2, channel_id)
|
||||
+ proto.string(3,
|
||||
proto.percent_b64encode(
|
||||
proto.string(110,
|
||||
proto.string(3,
|
||||
proto.string(15,
|
||||
proto.string(1,
|
||||
proto.string(1,
|
||||
proto.unpadded_b64encode(
|
||||
proto.string(1,
|
||||
proto.unpadded_b64encode(
|
||||
proto.string(2,
|
||||
b"ST:"
|
||||
+ proto.unpadded_b64encode(
|
||||
proto.string(2, offset)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
# targetId, just needs to be present but
|
||||
# doesn't need to be correct
|
||||
+ proto.string(2, "63faaff0-0000-23fe-80f0-582429d11c38")
|
||||
)
|
||||
# 1 - newest, 2 - popular
|
||||
+ proto.uint(3, new_sort)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
return base64.urlsafe_b64encode(pointless_nest).decode('ascii')
|
||||
|
||||
# SORT:
|
||||
# videos:
|
||||
# Popular - 1
|
||||
@ -75,15 +203,15 @@ def channel_ctoken_v2(channel_id, page, sort, tab, view=1):
|
||||
2: 17254859483345278706,
|
||||
1: 16570086088270825023,
|
||||
}[int(sort)]
|
||||
page_token = proto.string(61, proto.unpadded_b64encode(
|
||||
proto.string(1, proto.uint(1, schema_number) + proto.string(
|
||||
2,
|
||||
proto.string(1, proto.unpadded_b64encode(proto.uint(1, offset)))
|
||||
))))
|
||||
page_token = proto.string(61, proto.unpadded_b64encode(proto.string(1,
|
||||
proto.uint(1, schema_number) + proto.string(2,
|
||||
proto.string(1, proto.unpadded_b64encode(proto.uint(1,offset)))
|
||||
)
|
||||
)))
|
||||
|
||||
tab = proto.string(2, tab)
|
||||
sort = proto.uint(3, int(sort))
|
||||
# page = proto.string(15, str(page) )
|
||||
#page = proto.string(15, str(page))
|
||||
|
||||
shelf_view = proto.uint(4, 0)
|
||||
view = proto.uint(6, int(view))
|
||||
@ -118,8 +246,12 @@ def get_channel_tab(channel_id, page="1", sort=3, tab='videos', view=1,
|
||||
message = 'Got channel tab' if print_status else None
|
||||
|
||||
if not ctoken:
|
||||
ctoken = channel_ctoken_v3(channel_id, page, sort, tab, view)
|
||||
if tab in ('videos', 'shorts', 'streams'):
|
||||
ctoken = channel_ctoken_v5(channel_id, page, sort, tab, view)
|
||||
else:
|
||||
ctoken = channel_ctoken_v3(channel_id, page, sort, tab, view)
|
||||
ctoken = ctoken.replace('=', '%3D')
|
||||
|
||||
# Not sure what the purpose of the key is or whether it will change
|
||||
# For now it seems to be constant for the API endpoint, not dependent
|
||||
# on the browsing session or channel
|
||||
@ -132,7 +264,7 @@ def get_channel_tab(channel_id, page="1", sort=3, tab='videos', view=1,
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': '2.20180830',
|
||||
'clientVersion': '2.20240327.00.00',
|
||||
},
|
||||
},
|
||||
'continuation': ctoken,
|
||||
@ -147,7 +279,8 @@ def get_channel_tab(channel_id, page="1", sort=3, tab='videos', view=1,
|
||||
|
||||
|
||||
# cache entries expire after 30 minutes
|
||||
@cachetools.func.ttl_cache(maxsize=128, ttl=30*60)
|
||||
number_of_videos_cache = cachetools.TTLCache(128, 30*60)
|
||||
@cachetools.cached(number_of_videos_cache)
|
||||
def get_number_of_videos_channel(channel_id):
|
||||
if channel_id is None:
|
||||
return 1000
|
||||
@ -159,7 +292,7 @@ def get_number_of_videos_channel(channel_id):
|
||||
try:
|
||||
response = util.fetch_url(url, headers_mobile,
|
||||
debug_name='number_of_videos', report_text='Got number of videos')
|
||||
except urllib.error.HTTPError as e:
|
||||
except (urllib.error.HTTPError, util.FetchError) as e:
|
||||
traceback.print_exc()
|
||||
print("Couldn't retrieve number of videos")
|
||||
return 1000
|
||||
@ -172,18 +305,20 @@ def get_number_of_videos_channel(channel_id):
|
||||
return int(match.group(1).replace(',',''))
|
||||
else:
|
||||
return 0
|
||||
def set_cached_number_of_videos(channel_id, num_videos):
|
||||
@cachetools.cached(number_of_videos_cache)
|
||||
def dummy_func_using_same_cache(channel_id):
|
||||
return num_videos
|
||||
dummy_func_using_same_cache(channel_id)
|
||||
|
||||
|
||||
channel_id_re = re.compile(r'videos\.xml\?channel_id=([a-zA-Z0-9_-]{24})"')
|
||||
|
||||
|
||||
@cachetools.func.lru_cache(maxsize=128)
|
||||
def get_channel_id(base_url):
|
||||
# method that gives the smallest possible response at ~4 kb
|
||||
# needs to be as fast as possible
|
||||
base_url = base_url.replace('https://www', 'https://m') # avoid redirect
|
||||
response = util.fetch_url(
|
||||
base_url + '/about?pbj=1', headers_mobile,
|
||||
response = util.fetch_url(base_url + '/about?pbj=1', headers_mobile,
|
||||
debug_name='get_channel_id', report_text='Got channel id').decode('utf-8')
|
||||
match = channel_id_re.search(response)
|
||||
if match:
|
||||
@ -191,6 +326,31 @@ def get_channel_id(base_url):
|
||||
return None
|
||||
|
||||
|
||||
metadata_cache = cachetools.LRUCache(128)
|
||||
@cachetools.cached(metadata_cache)
|
||||
def get_metadata(channel_id):
|
||||
base_url = 'https://www.youtube.com/channel/' + channel_id
|
||||
polymer_json = util.fetch_url(base_url + '/about?pbj=1',
|
||||
headers_desktop,
|
||||
debug_name='gen_channel_about',
|
||||
report_text='Retrieved channel metadata')
|
||||
info = yt_data_extract.extract_channel_info(json.loads(polymer_json),
|
||||
'about',
|
||||
continuation=False)
|
||||
return extract_metadata_for_caching(info)
|
||||
def set_cached_metadata(channel_id, metadata):
|
||||
@cachetools.cached(metadata_cache)
|
||||
def dummy_func_using_same_cache(channel_id):
|
||||
return metadata
|
||||
dummy_func_using_same_cache(channel_id)
|
||||
def extract_metadata_for_caching(channel_info):
|
||||
metadata = {}
|
||||
for key in ('approx_subscriber_count', 'short_description', 'channel_name',
|
||||
'avatar'):
|
||||
metadata[key] = channel_info[key]
|
||||
return metadata
|
||||
|
||||
|
||||
def get_number_of_videos_general(base_url):
|
||||
return get_number_of_videos_channel(get_channel_id(base_url))
|
||||
|
||||
@ -211,7 +371,7 @@ def get_channel_search_json(channel_id, query, page):
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': '2.20180830',
|
||||
'clientVersion': '2.20240327.00.00',
|
||||
},
|
||||
},
|
||||
'continuation': ctoken,
|
||||
@ -229,19 +389,20 @@ def post_process_channel_info(info):
|
||||
info['avatar'] = util.prefix_url(info['avatar'])
|
||||
info['channel_url'] = util.prefix_url(info['channel_url'])
|
||||
for item in info['items']:
|
||||
item['thumbnail'] = "https://i.ytimg.com/vi/{}/hqdefault.jpg".format(item['id'])
|
||||
util.prefix_urls(item)
|
||||
util.add_extra_html_info(item)
|
||||
if info['current_tab'] == 'about':
|
||||
for i, (text, url) in enumerate(info['links']):
|
||||
if util.YOUTUBE_URL_RE.fullmatch(url):
|
||||
if isinstance(url, str) and util.YOUTUBE_URL_RE.fullmatch(url):
|
||||
info['links'][i] = (text, util.prefix_url(url))
|
||||
|
||||
|
||||
def get_channel_first_page(base_url=None, channel_id=None):
|
||||
def get_channel_first_page(base_url=None, tab='videos', channel_id=None):
|
||||
if channel_id:
|
||||
base_url = 'https://www.youtube.com/channel/' + channel_id
|
||||
return util.fetch_url(base_url + '/videos?pbj=1&view=0', headers_desktop,
|
||||
debug_name='gen_channel_videos')
|
||||
return util.fetch_url(base_url + '/' + tab + '?pbj=1&view=0',
|
||||
headers_desktop, debug_name='gen_channel_' + tab)
|
||||
|
||||
|
||||
playlist_sort_codes = {'2': "da", '3': "dd", '4': "lad"}
|
||||
@ -250,63 +411,159 @@ playlist_sort_codes = {'2': "da", '3': "dd", '4': "lad"}
|
||||
# youtube.com/user/[username]/[tab]
|
||||
# youtube.com/c/[custom]/[tab]
|
||||
# youtube.com/[custom]/[tab]
|
||||
|
||||
|
||||
def get_channel_page_general_url(base_url, tab, request, channel_id=None):
|
||||
|
||||
page_number = int(request.args.get('page', 1))
|
||||
sort = request.args.get('sort', '3')
|
||||
# sort 1: views
|
||||
# sort 2: oldest
|
||||
# sort 3: newest
|
||||
# sort 4: newest - no shorts (Just a kludge on our end, not internal to yt)
|
||||
default_sort = '3' if settings.include_shorts_in_channel else '4'
|
||||
sort = request.args.get('sort', default_sort)
|
||||
view = request.args.get('view', '1')
|
||||
query = request.args.get('query', '')
|
||||
ctoken = request.args.get('ctoken', '')
|
||||
default_params = (page_number == 1 and sort == '3' and view == '1')
|
||||
include_shorts = (sort != '4')
|
||||
default_params = (page_number == 1 and sort in ('3', '4') and view == '1')
|
||||
continuation = bool(ctoken) # whether or not we're using a continuation
|
||||
page_size = 30
|
||||
try_channel_api = True
|
||||
polymer_json = None
|
||||
|
||||
if tab == 'videos' and channel_id and not default_params:
|
||||
tasks = (
|
||||
gevent.spawn(get_number_of_videos_channel, channel_id),
|
||||
gevent.spawn(get_channel_tab, channel_id, page_number, sort,
|
||||
'videos', view, ctoken)
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
util.check_gevent_exceptions(*tasks)
|
||||
number_of_videos, polymer_json = tasks[0].value, tasks[1].value
|
||||
elif tab == 'videos':
|
||||
# Use the special UU playlist which contains all the channel's uploads
|
||||
if tab == 'videos' and sort in ('3', '4'):
|
||||
if not channel_id:
|
||||
channel_id = get_channel_id(base_url)
|
||||
if page_number == 1 and include_shorts:
|
||||
tasks = (
|
||||
gevent.spawn(playlist.playlist_first_page,
|
||||
'UU' + channel_id[2:],
|
||||
report_text='Retrieved channel videos'),
|
||||
gevent.spawn(get_metadata, channel_id),
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
util.check_gevent_exceptions(*tasks)
|
||||
|
||||
# Ignore the metadata for now, it is cached and will be
|
||||
# recalled later
|
||||
pl_json = tasks[0].value
|
||||
pl_info = yt_data_extract.extract_playlist_info(pl_json)
|
||||
number_of_videos = pl_info['metadata']['video_count']
|
||||
if number_of_videos is None:
|
||||
number_of_videos = 1000
|
||||
else:
|
||||
set_cached_number_of_videos(channel_id, number_of_videos)
|
||||
else:
|
||||
tasks = (
|
||||
gevent.spawn(playlist.get_videos, 'UU' + channel_id[2:],
|
||||
page_number, include_shorts=include_shorts),
|
||||
gevent.spawn(get_metadata, channel_id),
|
||||
gevent.spawn(get_number_of_videos_channel, channel_id),
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
util.check_gevent_exceptions(*tasks)
|
||||
|
||||
pl_json = tasks[0].value
|
||||
pl_info = yt_data_extract.extract_playlist_info(pl_json)
|
||||
number_of_videos = tasks[2].value
|
||||
|
||||
info = pl_info
|
||||
info['channel_id'] = channel_id
|
||||
info['current_tab'] = 'videos'
|
||||
if info['items']: # Success
|
||||
page_size = 100
|
||||
try_channel_api = False
|
||||
else: # Try the first-page method next
|
||||
try_channel_api = True
|
||||
|
||||
# Use the regular channel API
|
||||
if tab in ('shorts', 'streams') or (tab=='videos' and try_channel_api):
|
||||
if channel_id:
|
||||
num_videos_call = (get_number_of_videos_channel, channel_id)
|
||||
else:
|
||||
num_videos_call = (get_number_of_videos_general, base_url)
|
||||
|
||||
# Use ctoken method, which YouTube changes all the time
|
||||
if channel_id and not default_params:
|
||||
if sort == 4:
|
||||
_sort = 3
|
||||
else:
|
||||
_sort = sort
|
||||
page_call = (get_channel_tab, channel_id, page_number, _sort,
|
||||
tab, view, ctoken)
|
||||
# Use the first-page method, which won't break
|
||||
else:
|
||||
page_call = (get_channel_first_page, base_url, tab)
|
||||
|
||||
tasks = (
|
||||
gevent.spawn(*num_videos_call),
|
||||
gevent.spawn(get_channel_first_page, base_url=base_url),
|
||||
gevent.spawn(*page_call),
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
util.check_gevent_exceptions(*tasks)
|
||||
number_of_videos, polymer_json = tasks[0].value, tasks[1].value
|
||||
|
||||
elif tab == 'about':
|
||||
polymer_json = util.fetch_url(base_url + '/about?pbj=1', headers_desktop, debug_name='gen_channel_about')
|
||||
# polymer_json = util.fetch_url(base_url + '/about?pbj=1', headers_desktop, debug_name='gen_channel_about')
|
||||
channel_id = get_channel_id(base_url)
|
||||
ctoken = channel_about_ctoken(channel_id)
|
||||
polymer_json = util.call_youtube_api('web', 'browse', {
|
||||
'continuation': ctoken,
|
||||
})
|
||||
continuation=True
|
||||
elif tab == 'playlists' and page_number == 1:
|
||||
polymer_json = util.fetch_url(base_url+ '/playlists?pbj=1&view=1&sort=' + playlist_sort_codes[sort], headers_desktop, debug_name='gen_channel_playlists')
|
||||
elif tab == 'playlists':
|
||||
polymer_json = get_channel_tab(channel_id, page_number, sort,
|
||||
'playlists', view)
|
||||
continuation = True
|
||||
elif tab == 'search' and channel_id:
|
||||
polymer_json = get_channel_search_json(channel_id, query, page_number)
|
||||
elif tab == 'search':
|
||||
url = base_url + '/search?pbj=1&query=' + urllib.parse.quote(query, safe='')
|
||||
polymer_json = util.fetch_url(url, headers_desktop, debug_name='gen_channel_search')
|
||||
elif tab == 'videos':
|
||||
pass
|
||||
else:
|
||||
flask.abort(404, 'Unknown channel tab: ' + tab)
|
||||
|
||||
info = yt_data_extract.extract_channel_info(json.loads(polymer_json), tab)
|
||||
if polymer_json is not None:
|
||||
info = yt_data_extract.extract_channel_info(
|
||||
json.loads(polymer_json), tab, continuation=continuation
|
||||
)
|
||||
|
||||
if info['error'] is not None:
|
||||
return flask.render_template('error.html', error_message=info['error'])
|
||||
|
||||
post_process_channel_info(info)
|
||||
if tab == 'videos':
|
||||
if channel_id:
|
||||
info['channel_url'] = 'https://www.youtube.com/channel/' + channel_id
|
||||
info['channel_id'] = channel_id
|
||||
else:
|
||||
channel_id = info['channel_id']
|
||||
|
||||
# Will have microformat present, cache metadata while we have it
|
||||
if channel_id and default_params and tab not in ('videos', 'about'):
|
||||
metadata = extract_metadata_for_caching(info)
|
||||
set_cached_metadata(channel_id, metadata)
|
||||
# Otherwise, populate with our (hopefully cached) metadata
|
||||
elif channel_id and info.get('channel_name') is None:
|
||||
metadata = get_metadata(channel_id)
|
||||
for key, value in metadata.items():
|
||||
yt_data_extract.conservative_update(info, key, value)
|
||||
# need to add this metadata to the videos/playlists
|
||||
additional_info = {
|
||||
'author': info['channel_name'],
|
||||
'author_id': info['channel_id'],
|
||||
'author_url': info['channel_url'],
|
||||
}
|
||||
for item in info['items']:
|
||||
item.update(additional_info)
|
||||
|
||||
if tab in ('videos', 'shorts', 'streams'):
|
||||
info['number_of_videos'] = number_of_videos
|
||||
info['number_of_pages'] = math.ceil(number_of_videos/30)
|
||||
info['number_of_pages'] = math.ceil(number_of_videos/page_size)
|
||||
info['header_playlist_names'] = local_playlist.get_playlist_names()
|
||||
if tab in ('videos', 'playlists'):
|
||||
if tab in ('videos', 'shorts', 'streams', 'playlists'):
|
||||
info['current_sort'] = sort
|
||||
elif tab == 'search':
|
||||
info['search_box_value'] = query
|
||||
@ -315,9 +572,10 @@ def get_channel_page_general_url(base_url, tab, request, channel_id=None):
|
||||
info['page_number'] = page_number
|
||||
info['subscribed'] = subscriptions.is_subscribed(info['channel_id'])
|
||||
|
||||
return flask.render_template(
|
||||
'channel.html',
|
||||
parameters_dictionary=request.args,
|
||||
post_process_channel_info(info)
|
||||
|
||||
return flask.render_template('channel.html',
|
||||
parameters_dictionary = request.args,
|
||||
**info
|
||||
)
|
||||
|
||||
|
@ -78,7 +78,7 @@ def single_comment_ctoken(video_id, comment_id):
|
||||
|
||||
def post_process_comments_info(comments_info):
|
||||
for comment in comments_info['comments']:
|
||||
comment['author'] = strip_non_ascii(comment['author'])
|
||||
comment['author'] = strip_non_ascii(comment['author']) if comment.get('author') else ""
|
||||
comment['author_url'] = concat_or_none(
|
||||
'/', comment['author_url'])
|
||||
comment['author_avatar'] = concat_or_none(
|
||||
@ -97,7 +97,7 @@ def post_process_comments_info(comments_info):
|
||||
ctoken = comment['reply_ctoken']
|
||||
ctoken, err = proto.set_protobuf_value(
|
||||
ctoken,
|
||||
'base64p', 6, 3, 9, value=250)
|
||||
'base64p', 6, 3, 9, value=200)
|
||||
if err:
|
||||
print('Error setting ctoken value:')
|
||||
print(err)
|
||||
@ -127,7 +127,7 @@ def post_process_comments_info(comments_info):
|
||||
# change max_replies field to 250 in ctoken
|
||||
new_ctoken, err = proto.set_protobuf_value(
|
||||
ctoken,
|
||||
'base64p', 6, 3, 9, value=250)
|
||||
'base64p', 6, 3, 9, value=200)
|
||||
if err:
|
||||
print('Error setting ctoken value:')
|
||||
print(err)
|
||||
@ -150,7 +150,7 @@ def post_process_comments_info(comments_info):
|
||||
util.URL_ORIGIN, '/watch?v=', comments_info['video_id'])
|
||||
comments_info['video_thumbnail'] = concat_or_none(
|
||||
settings.img_prefix, 'https://i.ytimg.com/vi/',
|
||||
comments_info['video_id'], '/mqdefault.jpg'
|
||||
comments_info['video_id'], '/hqdefault.jpg'
|
||||
)
|
||||
|
||||
|
||||
@ -189,10 +189,10 @@ def video_comments(video_id, sort=0, offset=0, lc='', secret_key=''):
|
||||
comments_info['error'] += '\n\n' + e.error_message
|
||||
comments_info['error'] += '\n\nExit node IP address: %s' % e.ip
|
||||
else:
|
||||
comments_info['error'] = 'YouTube blocked the request. IP address: %s' % e.ip
|
||||
comments_info['error'] = 'YouTube blocked the request. Error: %s' % str(e)
|
||||
|
||||
except Exception as e:
|
||||
comments_info['error'] = 'YouTube blocked the request. IP address: %s' % e.ip
|
||||
comments_info['error'] = 'YouTube blocked the request. Error: %s' % str(e)
|
||||
|
||||
if comments_info.get('error'):
|
||||
print('Error retrieving comments for ' + str(video_id) + ':\n' +
|
||||
|
@ -11,17 +11,10 @@ import subprocess
|
||||
def app_version():
|
||||
def minimal_env_cmd(cmd):
|
||||
# make minimal environment
|
||||
env = {}
|
||||
for k in ['SYSTEMROOT', 'PATH']:
|
||||
v = os.environ.get(k)
|
||||
if v is not None:
|
||||
env[k] = v
|
||||
env = {k: os.environ[k] for k in ['SYSTEMROOT', 'PATH'] if k in os.environ}
|
||||
env.update({'LANGUAGE': 'C', 'LANG': 'C', 'LC_ALL': 'C'})
|
||||
|
||||
env['LANGUAGE'] = 'C'
|
||||
env['LANG'] = 'C'
|
||||
env['LC_ALL'] = 'C'
|
||||
out = subprocess.Popen(
|
||||
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
|
||||
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
|
||||
return out
|
||||
|
||||
subst_list = {
|
||||
@ -31,24 +24,21 @@ def app_version():
|
||||
}
|
||||
|
||||
if os.system("command -v git > /dev/null 2>&1") != 0:
|
||||
subst_list
|
||||
else:
|
||||
if call(["git", "branch"], stderr=STDOUT,
|
||||
stdout=open(os.devnull, 'w')) != 0:
|
||||
subst_list
|
||||
else:
|
||||
# version
|
||||
describe = minimal_env_cmd(["git", "describe", "--always"])
|
||||
git_revision = describe.strip().decode('ascii')
|
||||
# branch
|
||||
branch = minimal_env_cmd(["git", "branch"])
|
||||
git_branch = branch.strip().decode('ascii').replace('* ', '')
|
||||
return subst_list
|
||||
|
||||
subst_list = {
|
||||
"version": __version__,
|
||||
"branch": git_branch,
|
||||
"commit": git_revision
|
||||
}
|
||||
if call(["git", "branch"], stderr=STDOUT, stdout=open(os.devnull, 'w')) != 0:
|
||||
return subst_list
|
||||
|
||||
describe = minimal_env_cmd(["git", "describe", "--tags", "--always"])
|
||||
git_revision = describe.strip().decode('ascii')
|
||||
|
||||
branch = minimal_env_cmd(["git", "branch"])
|
||||
git_branch = branch.strip().decode('ascii').replace('* ', '')
|
||||
|
||||
subst_list.update({
|
||||
"branch": git_branch,
|
||||
"commit": git_revision
|
||||
})
|
||||
|
||||
return subst_list
|
||||
|
||||
|
@ -12,12 +12,13 @@ from flask import request
|
||||
import flask
|
||||
|
||||
|
||||
def playlist_ctoken(playlist_id, offset):
|
||||
def playlist_ctoken(playlist_id, offset, include_shorts=True):
|
||||
|
||||
offset = proto.uint(1, offset)
|
||||
# this is just obfuscation as far as I can tell. It doesn't even follow protobuf
|
||||
offset = b'PT:' + proto.unpadded_b64encode(offset)
|
||||
offset = proto.string(15, offset)
|
||||
if not include_shorts:
|
||||
offset += proto.string(104, proto.uint(2, 1))
|
||||
|
||||
continuation_info = proto.string(3, proto.percent_b64encode(offset))
|
||||
|
||||
@ -26,47 +27,46 @@ def playlist_ctoken(playlist_id, offset):
|
||||
|
||||
return base64.urlsafe_b64encode(pointless_nest).decode('ascii')
|
||||
|
||||
# initial request types:
|
||||
# polymer_json: https://m.youtube.com/playlist?list=PLv3TTBr1W_9tppikBxAE_G6qjWdBljBHJ&pbj=1&lact=0
|
||||
# ajax json: https://m.youtube.com/playlist?list=PLv3TTBr1W_9tppikBxAE_G6qjWdBljBHJ&pbj=1&lact=0 with header X-YouTube-Client-Version: 1.20180418
|
||||
|
||||
|
||||
# continuation request types:
|
||||
# polymer_json: https://m.youtube.com/playlist?&ctoken=[...]&pbj=1
|
||||
# ajax json: https://m.youtube.com/playlist?action_continuation=1&ajax=1&ctoken=[...]
|
||||
|
||||
|
||||
headers_1 = (
|
||||
('Accept', '*/*'),
|
||||
('Accept-Language', 'en-US,en;q=0.5'),
|
||||
('X-YouTube-Client-Name', '2'),
|
||||
('X-YouTube-Client-Version', '2.20180614'),
|
||||
)
|
||||
|
||||
|
||||
def playlist_first_page(playlist_id, report_text="Retrieved playlist"):
|
||||
url = 'https://m.youtube.com/playlist?list=' + playlist_id + '&pbj=1'
|
||||
content = util.fetch_url(url, util.mobile_ua + headers_1, report_text=report_text, debug_name='playlist_first_page')
|
||||
content = json.loads(content.decode('utf-8'))
|
||||
def playlist_first_page(playlist_id, report_text="Retrieved playlist",
|
||||
use_mobile=False):
|
||||
if use_mobile:
|
||||
url = 'https://m.youtube.com/playlist?list=' + playlist_id + '&pbj=1'
|
||||
content = util.fetch_url(
|
||||
url, util.mobile_xhr_headers,
|
||||
report_text=report_text, debug_name='playlist_first_page'
|
||||
)
|
||||
content = json.loads(content.decode('utf-8'))
|
||||
else:
|
||||
url = 'https://www.youtube.com/playlist?list=' + playlist_id + '&pbj=1'
|
||||
content = util.fetch_url(
|
||||
url, util.desktop_xhr_headers,
|
||||
report_text=report_text, debug_name='playlist_first_page'
|
||||
)
|
||||
content = json.loads(content.decode('utf-8'))
|
||||
|
||||
return content
|
||||
|
||||
|
||||
#https://m.youtube.com/playlist?itct=CBMQybcCIhMIptj9xJaJ2wIV2JKcCh3Idwu-&ctoken=4qmFsgI2EiRWTFBMT3kwajlBdmxWWlB0bzZJa2pLZnB1MFNjeC0tN1BHVEMaDmVnWlFWRHBEUWxFJTNE&pbj=1
|
||||
def get_videos(playlist_id, page):
|
||||
|
||||
url = "https://m.youtube.com/playlist?ctoken=" + playlist_ctoken(playlist_id, (int(page)-1)*20) + "&pbj=1"
|
||||
headers = {
|
||||
'User-Agent': ' Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1',
|
||||
'Accept': '*/*',
|
||||
'Accept-Language': 'en-US,en;q=0.5',
|
||||
'X-YouTube-Client-Name': '2',
|
||||
'X-YouTube-Client-Version': '2.20180508',
|
||||
}
|
||||
def get_videos(playlist_id, page, include_shorts=True, use_mobile=False,
|
||||
report_text='Retrieved playlist'):
|
||||
# mobile requests return 20 videos per page
|
||||
if use_mobile:
|
||||
page_size = 20
|
||||
headers = util.mobile_xhr_headers
|
||||
# desktop requests return 100 videos per page
|
||||
else:
|
||||
page_size = 100
|
||||
headers = util.desktop_xhr_headers
|
||||
|
||||
url = "https://m.youtube.com/playlist?ctoken="
|
||||
url += playlist_ctoken(playlist_id, (int(page)-1)*page_size,
|
||||
include_shorts=include_shorts)
|
||||
url += "&pbj=1"
|
||||
content = util.fetch_url(
|
||||
url, headers,
|
||||
report_text="Retrieved playlist", debug_name='playlist_videos')
|
||||
url, headers, report_text=report_text,
|
||||
debug_name='playlist_videos'
|
||||
)
|
||||
|
||||
info = json.loads(content.decode('utf-8'))
|
||||
return info
|
||||
@ -85,7 +85,10 @@ def get_playlist_page():
|
||||
this_page_json = first_page_json
|
||||
else:
|
||||
tasks = (
|
||||
gevent.spawn(playlist_first_page, playlist_id, report_text="Retrieved playlist info" ),
|
||||
gevent.spawn(
|
||||
playlist_first_page, playlist_id,
|
||||
report_text="Retrieved playlist info", use_mobile=True
|
||||
),
|
||||
gevent.spawn(get_videos, playlist_id, page)
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
@ -104,7 +107,7 @@ def get_playlist_page():
|
||||
util.prefix_urls(item)
|
||||
util.add_extra_html_info(item)
|
||||
if 'id' in item:
|
||||
item['thumbnail'] = settings.img_prefix + 'https://i.ytimg.com/vi/' + item['id'] + '/default.jpg'
|
||||
item['thumbnail'] = f"{settings.img_prefix}https://i.ytimg.com/vi/{item['id']}/hqdefault.jpg"
|
||||
|
||||
item['url'] += '&list=' + playlist_id
|
||||
if item['index']:
|
||||
@ -112,13 +115,13 @@ def get_playlist_page():
|
||||
|
||||
video_count = yt_data_extract.deep_get(info, 'metadata', 'video_count')
|
||||
if video_count is None:
|
||||
video_count = 40
|
||||
video_count = 1000
|
||||
|
||||
return flask.render_template(
|
||||
'playlist.html',
|
||||
header_playlist_names=local_playlist.get_playlist_names(),
|
||||
video_list=info.get('items', []),
|
||||
num_pages=math.ceil(video_count/20),
|
||||
num_pages=math.ceil(video_count/100),
|
||||
parameters_dictionary=request.args,
|
||||
|
||||
**info['metadata']
|
||||
|
@ -141,6 +141,17 @@ base64_enc_funcs = {
|
||||
|
||||
|
||||
def _make_protobuf(data):
|
||||
'''
|
||||
Input: Recursive list of protobuf objects or base-64 encodings
|
||||
Output: Protobuf bytestring
|
||||
Each protobuf object takes the form [wire_type, field_number, field_data]
|
||||
If a string protobuf has a list/tuple of length 2, this has the form
|
||||
(base64 type, data)
|
||||
The base64 types are
|
||||
- base64 means a base64 encode with equals sign paddings
|
||||
- base64s means a base64 encode without padding
|
||||
- base64p means a url base64 encode with equals signs replaced with %3D
|
||||
'''
|
||||
# must be dict mapping field_number to [wire_type, value]
|
||||
if isinstance(data, dict):
|
||||
new_data = []
|
||||
|
@ -64,6 +64,8 @@ def get_search_page():
|
||||
query = request.args.get('search_query') or request.args.get('query')
|
||||
if query is None:
|
||||
return flask.render_template('home.html', title='Search')
|
||||
elif query.startswith('https://www.youtube.com') or query.startswith('https://www.youtu.be'):
|
||||
return flask.redirect(f'/{query}')
|
||||
|
||||
page = request.args.get("page", "1")
|
||||
autocorrect = int(request.args.get("autocorrect", "1"))
|
||||
|
@ -256,7 +256,8 @@ hr {
|
||||
padding-top: 6px;
|
||||
text-align: center;
|
||||
white-space: nowrap;
|
||||
border: none;
|
||||
border: 1px solid;
|
||||
border-color: var(--button-border);
|
||||
border-radius: 0.2rem;
|
||||
}
|
||||
|
||||
|
@ -1,20 +1,22 @@
|
||||
:root {
|
||||
--background: #212121;
|
||||
--background: #121113;
|
||||
--text: #FFFFFF;
|
||||
--secondary-hover: #73828c;
|
||||
--secondary-focus: #303030;
|
||||
--secondary-inverse: #FFF;
|
||||
--secondary-hover: #222222;
|
||||
--secondary-focus: #121113;
|
||||
--secondary-inverse: #FFFFFF;
|
||||
--primary-background: #242424;
|
||||
--secondary-background: #424242;
|
||||
--thumb-background: #757575;
|
||||
--secondary-background: #222222;
|
||||
--thumb-background: #222222;
|
||||
--link: #00B0FF;
|
||||
--link-visited: #40C4FF;
|
||||
--border-bg: #FFFFFF;
|
||||
--buttom: #dcdcdb;
|
||||
--buttom-text: #415462;
|
||||
--button-border: #91918c;
|
||||
--buttom-hover: #BBB;
|
||||
--search-text: #FFF;
|
||||
--time-background: #212121;
|
||||
--time-text: #FFF;
|
||||
--border-bg: #222222;
|
||||
--border-bg-settings: #000000;
|
||||
--border-bg-license: #000000;
|
||||
--buttom: #121113;
|
||||
--buttom-text: #FFFFFF;
|
||||
--button-border: #222222;
|
||||
--buttom-hover: #222222;
|
||||
--search-text: #FFFFFF;
|
||||
--time-background: #121113;
|
||||
--time-text: #FFFFFF;
|
||||
}
|
||||
|
@ -1,19 +1,21 @@
|
||||
:root {
|
||||
--background: #2d3743;
|
||||
--background: #2D3743;
|
||||
--text: #FFFFFF;
|
||||
--secondary-hover: #73828c;
|
||||
--secondary-hover: #73828C;
|
||||
--secondary-focus: rgba(115, 130, 140, 0.125);
|
||||
--secondary-inverse: #FFFFFF;
|
||||
--primary-background: #2d3743;
|
||||
--primary-background: #2D3743;
|
||||
--secondary-background: #102027;
|
||||
--thumb-background: #35404D;
|
||||
--link: #22aaff;
|
||||
--link-visited: #7755ff;
|
||||
--link: #22AAFF;
|
||||
--link-visited: #7755FF;
|
||||
--border-bg: #FFFFFF;
|
||||
--buttom: #DCDCDC;
|
||||
--buttom-text: #415462;
|
||||
--button-border: #91918c;
|
||||
--buttom-hover: #BBBBBB;
|
||||
--border-bg-settings: #FFFFFF;
|
||||
--border-bg-license: #FFFFFF;
|
||||
--buttom: #2D3743;
|
||||
--buttom-text: #FFFFFF;
|
||||
--button-border: #102027;
|
||||
--buttom-hover: #102027;
|
||||
--search-text: #FFFFFF;
|
||||
--time-background: #212121;
|
||||
--time-text: #FFFFFF;
|
||||
|
@ -20,6 +20,29 @@
|
||||
// TODO: Call abort to cancel in-progress appends?
|
||||
|
||||
|
||||
// Buffer sizes for different systems
|
||||
const BUFFER_CONFIG = {
|
||||
default: 50 * 10**6, // 50 megabytes
|
||||
webOS: 20 * 10**6, // 20 megabytes WebOS (LG)
|
||||
samsungTizen: 20 * 10**6, // 20 megabytes Samsung Tizen OS
|
||||
androidTV: 30 * 10**6, // 30 megabytes Android TV
|
||||
desktop: 50 * 10**6, // 50 megabytes PC/Mac
|
||||
};
|
||||
|
||||
function detectSystem() {
|
||||
const userAgent = navigator.userAgent.toLowerCase();
|
||||
if (/webos|lg browser/i.test(userAgent)) {
|
||||
return "webOS";
|
||||
} else if (/tizen/i.test(userAgent)) {
|
||||
return "samsungTizen";
|
||||
} else if (/android tv|smart-tv/i.test(userAgent)) {
|
||||
return "androidTV";
|
||||
} else if (/firefox|chrome|safari|edge/i.test(userAgent)) {
|
||||
return "desktop";
|
||||
} else {
|
||||
return "default";
|
||||
}
|
||||
}
|
||||
|
||||
function AVMerge(video, srcInfo, startTime){
|
||||
this.audioSource = null;
|
||||
@ -164,6 +187,8 @@ AVMerge.prototype.printDebuggingInfo = function() {
|
||||
}
|
||||
|
||||
function Stream(avMerge, source, startTime, avRatio) {
|
||||
const selectedSystem = detectSystem();
|
||||
let baseBufferTarget = BUFFER_CONFIG[selectedSystem] || BUFFER_CONFIG.default;
|
||||
this.avMerge = avMerge;
|
||||
this.video = avMerge.video;
|
||||
this.url = source['url'];
|
||||
@ -173,10 +198,11 @@ function Stream(avMerge, source, startTime, avRatio) {
|
||||
this.mimeCodec = source['mime_codec']
|
||||
this.streamType = source['acodec'] ? 'audio' : 'video';
|
||||
if (this.streamType == 'audio') {
|
||||
this.bufferTarget = avRatio*50*10**6;
|
||||
this.bufferTarget = avRatio * baseBufferTarget;
|
||||
} else {
|
||||
this.bufferTarget = 50*10**6; // 50 megabytes
|
||||
this.bufferTarget = baseBufferTarget;
|
||||
}
|
||||
console.info(`Detected system: ${selectedSystem}. Applying bufferTarget of ${this.bufferTarget} bytes to ${this.streamType}.`);
|
||||
|
||||
this.initRange = source['init_range'];
|
||||
this.indexRange = source['index_range'];
|
||||
@ -204,6 +230,8 @@ Stream.prototype.setup = async function(){
|
||||
this.url,
|
||||
this.initRange.start,
|
||||
this.indexRange.end,
|
||||
'Initialization+index segments',
|
||||
).then(
|
||||
(buffer) => {
|
||||
let init_end = this.initRange.end - this.initRange.start + 1;
|
||||
let index_start = this.indexRange.start - this.initRange.start;
|
||||
@ -211,22 +239,23 @@ Stream.prototype.setup = async function(){
|
||||
this.setupInitSegment(buffer.slice(0, init_end));
|
||||
this.setupSegmentIndex(buffer.slice(index_start, index_end));
|
||||
}
|
||||
)
|
||||
);
|
||||
} else {
|
||||
// initialization data
|
||||
await fetchRange(
|
||||
this.url,
|
||||
this.initRange.start,
|
||||
this.initRange.end,
|
||||
this.setupInitSegment.bind(this),
|
||||
);
|
||||
'Initialization segment',
|
||||
).then(this.setupInitSegment.bind(this));
|
||||
|
||||
// sidx (segment index) table
|
||||
fetchRange(
|
||||
this.url,
|
||||
this.indexRange.start,
|
||||
this.indexRange.end,
|
||||
this.setupSegmentIndex.bind(this)
|
||||
);
|
||||
'Index segment',
|
||||
).then(this.setupSegmentIndex.bind(this));
|
||||
}
|
||||
}
|
||||
Stream.prototype.setupInitSegment = function(initSegment) {
|
||||
@ -388,7 +417,7 @@ Stream.prototype.getSegmentIdx = function(videoTime) {
|
||||
}
|
||||
index = index + increment;
|
||||
}
|
||||
this.reportInfo('Could not find segment index for time', videoTime);
|
||||
this.reportError('Could not find segment index for time', videoTime);
|
||||
return 0;
|
||||
}
|
||||
Stream.prototype.checkBuffer = async function() {
|
||||
@ -485,8 +514,8 @@ Stream.prototype.fetchSegment = function(segmentIdx) {
|
||||
this.url,
|
||||
entry.start,
|
||||
entry.end,
|
||||
this.appendSegment.bind(this, segmentIdx),
|
||||
);
|
||||
String(this.streamType) + ' segment ' + String(segmentIdx),
|
||||
).then(this.appendSegment.bind(this, segmentIdx));
|
||||
}
|
||||
Stream.prototype.fetchSegmentIfNeeded = function(segmentIdx) {
|
||||
if (segmentIdx < 0 || segmentIdx >= this.sidx.entries.length){
|
||||
@ -518,22 +547,56 @@ Stream.prototype.reportWarning = function(...args) {
|
||||
Stream.prototype.reportError = function(...args) {
|
||||
reportError(String(this.streamType) + ':', ...args);
|
||||
}
|
||||
Stream.prototype.reportInfo = function(...args) {
|
||||
reportInfo(String(this.streamType) + ':', ...args);
|
||||
}
|
||||
|
||||
|
||||
// Utility functions
|
||||
|
||||
function fetchRange(url, start, end, cb) {
|
||||
// https://gomakethings.com/promise-based-xhr/
|
||||
// https://stackoverflow.com/a/30008115
|
||||
// http://lofi.limo/blog/retry-xmlhttprequest-carefully
|
||||
function fetchRange(url, start, end, debugInfo) {
|
||||
return new Promise((resolve, reject) => {
|
||||
let retryCount = 0;
|
||||
let xhr = new XMLHttpRequest();
|
||||
function onFailure(err, message, maxRetries=5){
|
||||
message = debugInfo + ': ' + message + ' - Err: ' + String(err);
|
||||
retryCount++;
|
||||
if (retryCount > maxRetries || xhr.status == 403){
|
||||
reportError('fetchRange error while fetching ' + message);
|
||||
reject(message);
|
||||
return;
|
||||
} else {
|
||||
reportWarning('Failed to fetch ' + message
|
||||
+ '. Attempting retry '
|
||||
+ String(retryCount) +'/' + String(maxRetries));
|
||||
}
|
||||
|
||||
// Retry in 1 second, doubled for each next retry
|
||||
setTimeout(function(){
|
||||
xhr.open('get',url);
|
||||
xhr.send();
|
||||
}, 1000*Math.pow(2,(retryCount-1)));
|
||||
}
|
||||
xhr.open('get', url);
|
||||
xhr.timeout = 15000;
|
||||
xhr.responseType = 'arraybuffer';
|
||||
xhr.setRequestHeader('Range', 'bytes=' + start + '-' + end);
|
||||
xhr.onload = function() {
|
||||
//bytesFetched += end - start + 1;
|
||||
resolve(cb(xhr.response));
|
||||
xhr.onload = function (e) {
|
||||
if (xhr.status >= 200 && xhr.status < 300) {
|
||||
resolve(xhr.response);
|
||||
} else {
|
||||
onFailure(e,
|
||||
'Status '
|
||||
+ String(xhr.status) + ' ' + String(xhr.statusText)
|
||||
);
|
||||
}
|
||||
};
|
||||
xhr.onerror = function (event) {
|
||||
onFailure(e, 'Network error');
|
||||
};
|
||||
xhr.ontimeout = function (event){
|
||||
xhr.timeout += 5000;
|
||||
onFailure(null, 'Timeout (15s)', maxRetries=5);
|
||||
};
|
||||
xhr.send();
|
||||
});
|
||||
@ -573,9 +636,6 @@ function addEvent(obj, eventName, func) {
|
||||
return new RegisteredEvent(obj, eventName, func);
|
||||
}
|
||||
|
||||
function reportInfo(...args){
|
||||
console.info(...args);
|
||||
}
|
||||
function reportWarning(...args){
|
||||
console.warn(...args);
|
||||
}
|
||||
|
@ -1,77 +1,66 @@
|
||||
(function main() {
|
||||
'use strict';
|
||||
|
||||
let captionsActive;
|
||||
|
||||
switch(true) {
|
||||
case data.settings.subtitles_mode == 2:
|
||||
captionsActive = true;
|
||||
break;
|
||||
case data.settings.subtitles_mode == 1 && data.has_manual_captions:
|
||||
captionsActive = true;
|
||||
break;
|
||||
default:
|
||||
captionsActive = false;
|
||||
// Captions
|
||||
let captionsActive = false;
|
||||
if (data.settings.subtitles_mode === 2 || (data.settings.subtitles_mode === 1 && data.has_manual_captions)) {
|
||||
captionsActive = true;
|
||||
}
|
||||
|
||||
// AutoPlay
|
||||
let autoplayActive = data.settings.autoplay_videos || false;
|
||||
|
||||
let qualityOptions = [];
|
||||
let qualityDefault;
|
||||
for (let src of data['uni_sources']) {
|
||||
qualityOptions.push(src.quality_string)
|
||||
|
||||
for (let src of data.uni_sources) {
|
||||
qualityOptions.push(src.quality_string);
|
||||
}
|
||||
for (let src of data['pair_sources']) {
|
||||
qualityOptions.push(src.quality_string)
|
||||
|
||||
for (let src of data.pair_sources) {
|
||||
qualityOptions.push(src.quality_string);
|
||||
}
|
||||
if (data['using_pair_sources'])
|
||||
qualityDefault = data['pair_sources'][data['pair_idx']].quality_string;
|
||||
else if (data['uni_sources'].length != 0)
|
||||
qualityDefault = data['uni_sources'][data['uni_idx']].quality_string;
|
||||
else
|
||||
|
||||
if (data.using_pair_sources) {
|
||||
qualityDefault = data.pair_sources[data.pair_idx].quality_string;
|
||||
} else if (data.uni_sources.length !== 0) {
|
||||
qualityDefault = data.uni_sources[data.uni_idx].quality_string;
|
||||
} else {
|
||||
qualityDefault = 'None';
|
||||
}
|
||||
|
||||
// Fix plyr refusing to work with qualities that are strings
|
||||
Object.defineProperty(Plyr.prototype, 'quality', {
|
||||
set: function(input) {
|
||||
set: function (input) {
|
||||
const config = this.config.quality;
|
||||
const options = this.options.quality;
|
||||
let quality;
|
||||
let quality = input;
|
||||
let updateStorage = true;
|
||||
|
||||
if (!options.length) {
|
||||
return;
|
||||
}
|
||||
|
||||
// removing this line:
|
||||
//let quality = [!is.empty(input) && Number(input), this.storage.get('quality'), config.selected, config.default].find(is.number);
|
||||
// replacing with:
|
||||
quality = input;
|
||||
let updateStorage = true;
|
||||
|
||||
if (!options.includes(quality)) {
|
||||
// Plyr sets quality to null at startup, resulting in the erroneous
|
||||
// calling of this setter function with input = null, and the
|
||||
// commented out code below would set the quality to something
|
||||
// unrelated at startup. Comment out and just return.
|
||||
return;
|
||||
/*const value = closest(options, quality);
|
||||
this.debug.warn(`Unsupported quality option: ${quality}, using ${value} instead`);
|
||||
quality = value; // Don't update storage if quality is not supported
|
||||
updateStorage = false;*/
|
||||
} // Update config
|
||||
|
||||
|
||||
config.selected = quality; // Set quality
|
||||
|
||||
this.media.quality = quality; // Save to storage
|
||||
|
||||
if (updateStorage) {
|
||||
this.storage.set({
|
||||
quality
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Update config
|
||||
config.selected = quality;
|
||||
|
||||
// Set quality
|
||||
this.media.quality = quality;
|
||||
|
||||
// Save to storage
|
||||
if (updateStorage) {
|
||||
this.storage.set({ quality });
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
const player = new Plyr(document.getElementById('js-video-player'), {
|
||||
const playerOptions = {
|
||||
// Learning about autoplay permission https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Permissions-Policy/autoplay#syntax
|
||||
autoplay: autoplayActive,
|
||||
disableContextMenu: false,
|
||||
captions: {
|
||||
active: captionsActive,
|
||||
@ -89,29 +78,31 @@
|
||||
'settings',
|
||||
'pip',
|
||||
'airplay',
|
||||
'fullscreen'
|
||||
'fullscreen',
|
||||
],
|
||||
iconUrl: "/youtube.com/static/modules/plyr/plyr.svg",
|
||||
blankVideo: "/youtube.com/static/modules/plyr/blank.webm",
|
||||
iconUrl: '/youtube.com/static/modules/plyr/plyr.svg',
|
||||
blankVideo: '/youtube.com/static/modules/plyr/blank.webm',
|
||||
debug: false,
|
||||
storage: {enabled: false},
|
||||
storage: { enabled: false },
|
||||
quality: {
|
||||
default: qualityDefault,
|
||||
options: qualityOptions,
|
||||
forced: true,
|
||||
onChange: function(quality) {
|
||||
if (quality == 'None') {return;}
|
||||
onChange: function (quality) {
|
||||
if (quality == 'None') {
|
||||
return;
|
||||
}
|
||||
if (quality.includes('(integrated)')) {
|
||||
for (let i=0; i < data['uni_sources'].length; i++) {
|
||||
if (data['uni_sources'][i].quality_string == quality) {
|
||||
changeQuality({'type': 'uni', 'index': i});
|
||||
for (let i = 0; i < data.uni_sources.length; i++) {
|
||||
if (data.uni_sources[i].quality_string == quality) {
|
||||
changeQuality({ type: 'uni', index: i });
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (let i=0; i < data['pair_sources'].length; i++) {
|
||||
if (data['pair_sources'][i].quality_string == quality) {
|
||||
changeQuality({'type': 'pair', 'index': i});
|
||||
for (let i = 0; i < data.pair_sources.length; i++) {
|
||||
if (data.pair_sources[i].quality_string == quality) {
|
||||
changeQuality({ type: 'pair', index: i });
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -119,12 +110,27 @@
|
||||
},
|
||||
},
|
||||
previewThumbnails: {
|
||||
enabled: storyboard_url != null,
|
||||
enabled: storyboard_url !== null,
|
||||
src: [storyboard_url],
|
||||
},
|
||||
settings: ['captions', 'quality', 'speed', 'loop'],
|
||||
tooltips: {
|
||||
controls: true,
|
||||
},
|
||||
}
|
||||
|
||||
const player = new Plyr(document.getElementById('js-video-player'), playerOptions);
|
||||
|
||||
// disable double click to fullscreen
|
||||
// https://github.com/sampotts/plyr/issues/1370#issuecomment-528966795
|
||||
player.eventListeners.forEach(function(eventListener) {
|
||||
if(eventListener.type === 'dblclick') {
|
||||
eventListener.element.removeEventListener(eventListener.type, eventListener.callback, eventListener.options);
|
||||
}
|
||||
});
|
||||
}());
|
||||
|
||||
// Add .started property, true after the playback has been started
|
||||
// Needed so controls won't be hidden before playback has started
|
||||
player.started = false;
|
||||
player.once('playing', function(){this.started = true});
|
||||
})();
|
||||
|
@ -5,8 +5,9 @@ function changeQuality(selection) {
|
||||
let videoPaused = video.paused;
|
||||
let videoSpeed = video.playbackRate;
|
||||
let srcInfo;
|
||||
if (avMerge)
|
||||
if (avMerge && typeof avMerge.close === 'function') {
|
||||
avMerge.close();
|
||||
}
|
||||
if (selection.type == 'uni'){
|
||||
srcInfo = data['uni_sources'][selection.index];
|
||||
video.src = srcInfo.url;
|
||||
|
@ -181,7 +181,7 @@ label[for=options-toggle-cbox] {
|
||||
|
||||
.table td,.table th {
|
||||
padding: 10px 10px;
|
||||
border: 1px solid var(--secondary-background);
|
||||
border: 1px solid var(--border-bg-license);
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
|
@ -10,9 +10,11 @@
|
||||
--link: #212121;
|
||||
--link-visited: #808080;
|
||||
--border-bg: #212121;
|
||||
--buttom: #DCDCDC;
|
||||
--border-bg-settings: #91918C;
|
||||
--border-bg-license: #91918C;
|
||||
--buttom: #FFFFFF;
|
||||
--buttom-text: #212121;
|
||||
--button-border: #91918c;
|
||||
--button-border: #91918C;
|
||||
--buttom-hover: #BBBBBB;
|
||||
--search-text: #212121;
|
||||
--time-background: #212121;
|
||||
|
77
youtube/static/modules/plyr/custom_plyr.css
Normal file
77
youtube/static/modules/plyr/custom_plyr.css
Normal file
@ -0,0 +1,77 @@
|
||||
/* Prevent this div from blocking right-click menu for video
|
||||
e.g. Firefox playback speed options */
|
||||
.plyr__poster {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* plyr fix */
|
||||
.plyr:-moz-full-screen video {
|
||||
max-height: initial;
|
||||
}
|
||||
|
||||
.plyr:-webkit-full-screen video {
|
||||
max-height: initial;
|
||||
}
|
||||
|
||||
.plyr:-ms-fullscreen video {
|
||||
max-height: initial;
|
||||
}
|
||||
|
||||
.plyr:fullscreen video {
|
||||
max-height: initial;
|
||||
}
|
||||
|
||||
.plyr__preview-thumb__image-container {
|
||||
width: 158px;
|
||||
height: 90px;
|
||||
}
|
||||
|
||||
.plyr__preview-thumb {
|
||||
bottom: 100%;
|
||||
}
|
||||
|
||||
.plyr__menu__container [role="menu"],
|
||||
.plyr__menu__container [role="menucaptions"] {
|
||||
/* Set vertical scroll */
|
||||
/* issue https://github.com/sampotts/plyr/issues/1420 */
|
||||
max-height: 320px;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
/*
|
||||
* Custom styles similar to youtube
|
||||
*/
|
||||
.plyr__controls {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
.plyr__progress__container {
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
width: 100%;
|
||||
margin-bottom: -10px;
|
||||
}
|
||||
|
||||
.plyr__controls .plyr__controls__item:first-child {
|
||||
margin-left: 0;
|
||||
margin-right: 0;
|
||||
z-index: 5;
|
||||
}
|
||||
|
||||
.plyr__controls .plyr__controls__item.plyr__volume {
|
||||
margin-left: auto;
|
||||
}
|
||||
|
||||
.plyr__controls .plyr__controls__item.plyr__progress__container {
|
||||
padding-left: 10px;
|
||||
padding-right: 10px;
|
||||
}
|
||||
|
||||
.plyr__progress input[type="range"] {
|
||||
margin-bottom: 50px;
|
||||
}
|
||||
|
||||
/*
|
||||
* End custom styles
|
||||
*/
|
1
youtube/static/modules/plyr/plyr.min.js.map
Normal file
1
youtube/static/modules/plyr/plyr.min.js.map
Normal file
File diff suppressed because one or more lines are too long
@ -155,7 +155,7 @@ label[for=options-toggle-cbox] {
|
||||
}
|
||||
|
||||
.settings-form > h2 {
|
||||
border-bottom: 2px solid var(--border-bg);
|
||||
border-bottom: 2px solid var(--border-bg-settings);
|
||||
padding-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
|
@ -21,21 +21,7 @@ img {
|
||||
video {
|
||||
width: 100%;
|
||||
height: auto;
|
||||
max-height: 480px;
|
||||
}
|
||||
|
||||
/* plyr fix */
|
||||
.plyr:-moz-full-screen video {
|
||||
max-height: initial;
|
||||
}
|
||||
.plyr:-webkit-full-screen video {
|
||||
max-height: initial;
|
||||
}
|
||||
.plyr:-ms-fullscreen video {
|
||||
max-height: initial;
|
||||
}
|
||||
.plyr:fullscreen video {
|
||||
max-height: initial;
|
||||
max-height: calc(100vh/1.5);
|
||||
}
|
||||
|
||||
a:link {
|
||||
@ -142,6 +128,29 @@ header {
|
||||
background-color: var(--buttom-hover);
|
||||
}
|
||||
|
||||
.live-url-choices {
|
||||
background-color: var(--thumb-background);
|
||||
margin: 1rem 0;
|
||||
padding: 1rem;
|
||||
}
|
||||
|
||||
.playability-error {
|
||||
position: relative;
|
||||
box-sizing: border-box;
|
||||
height: 30vh;
|
||||
margin: 1rem 0;
|
||||
}
|
||||
|
||||
.playability-error > span {
|
||||
display: flex;
|
||||
background-color: var(--thumb-background);
|
||||
height: 100%;
|
||||
object-fit: cover;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.playlist {
|
||||
display: grid;
|
||||
grid-gap: 4px;
|
||||
@ -636,6 +645,9 @@ figure.sc-video {
|
||||
max-height: 80vh;
|
||||
overflow-y: scroll;
|
||||
}
|
||||
.playability-error {
|
||||
height: 60vh;
|
||||
}
|
||||
.playlist {
|
||||
display: grid;
|
||||
grid-gap: 1px;
|
||||
|
@ -1,4 +1,4 @@
|
||||
from youtube import util, yt_data_extract, channel, local_playlist
|
||||
from youtube import util, yt_data_extract, channel, local_playlist, playlist
|
||||
from youtube import yt_app
|
||||
import settings
|
||||
|
||||
@ -108,8 +108,7 @@ def _subscribe(channels):
|
||||
with connection as cursor:
|
||||
channel_ids_to_check = [channel[0] for channel in channels if not _is_subscribed(cursor, channel[0])]
|
||||
|
||||
rows = ((channel_id, channel_name, 0, 0) for channel_id,
|
||||
channel_name in channels)
|
||||
rows = ((channel_id, channel_name, 0, 0) for channel_id, channel_name in channels)
|
||||
cursor.executemany('''INSERT OR IGNORE INTO subscribed_channels (yt_channel_id, channel_name, time_last_checked, next_check_time)
|
||||
VALUES (?, ?, ?, ?)''', rows)
|
||||
|
||||
@ -236,8 +235,7 @@ def _get_channel_names(cursor, channel_ids):
|
||||
return result
|
||||
|
||||
|
||||
def _channels_with_tag(cursor, tag, order=False, exclude_muted=False,
|
||||
include_muted_status=False):
|
||||
def _channels_with_tag(cursor, tag, order=False, exclude_muted=False, include_muted_status=False):
|
||||
''' returns list of (channel_id, channel_name) '''
|
||||
|
||||
statement = '''SELECT yt_channel_id, channel_name'''
|
||||
@ -434,8 +432,10 @@ def autocheck_setting_changed(old_value, new_value):
|
||||
stop_autocheck_system()
|
||||
|
||||
|
||||
settings.add_setting_changed_hook('autocheck_subscriptions',
|
||||
autocheck_setting_changed)
|
||||
settings.add_setting_changed_hook(
|
||||
'autocheck_subscriptions',
|
||||
autocheck_setting_changed
|
||||
)
|
||||
if settings.autocheck_subscriptions:
|
||||
start_autocheck_system()
|
||||
# ----------------------------
|
||||
@ -463,7 +463,24 @@ def _get_atoma_feed(channel_id):
|
||||
|
||||
def _get_channel_videos_first_page(channel_id, channel_status_name):
|
||||
try:
|
||||
return channel.get_channel_first_page(channel_id=channel_id)
|
||||
# First try the playlist method
|
||||
pl_json = playlist.get_videos(
|
||||
'UU' + channel_id[2:],
|
||||
1,
|
||||
include_shorts=settings.include_shorts_in_subscriptions,
|
||||
report_text=None
|
||||
)
|
||||
pl_info = yt_data_extract.extract_playlist_info(pl_json)
|
||||
if pl_info.get('items'):
|
||||
pl_info['items'] = pl_info['items'][0:30]
|
||||
return pl_info
|
||||
|
||||
# Try the channel api method
|
||||
channel_json = channel.get_channel_first_page(channel_id=channel_id)
|
||||
channel_info = yt_data_extract.extract_channel_info(
|
||||
json.loads(channel_json), 'videos'
|
||||
)
|
||||
return channel_info
|
||||
except util.FetchError as e:
|
||||
if e.code == '429' and settings.route_tor:
|
||||
error_message = ('Error checking channel ' + channel_status_name
|
||||
@ -497,7 +514,7 @@ def _get_upstream_videos(channel_id):
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
|
||||
channel_tab, feed = tasks[0].value, tasks[1].value
|
||||
channel_info, feed = tasks[0].value, tasks[1].value
|
||||
|
||||
# extract published times from atoma feed
|
||||
times_published = {}
|
||||
@ -535,9 +552,8 @@ def _get_upstream_videos(channel_id):
|
||||
except defusedxml.ElementTree.ParseError:
|
||||
print('Failed to read atoma feed for ' + channel_status_name)
|
||||
|
||||
if channel_tab is None: # there was an error
|
||||
if channel_info is None: # there was an error
|
||||
return
|
||||
channel_info = yt_data_extract.extract_channel_info(json.loads(channel_tab), 'videos')
|
||||
if channel_info['error']:
|
||||
print('Error checking channel ' + channel_status_name + ': ' + channel_info['error'])
|
||||
return
|
||||
@ -552,14 +568,38 @@ def _get_upstream_videos(channel_id):
|
||||
if video_item['id'] in times_published:
|
||||
video_item['time_published'] = times_published[video_item['id']]
|
||||
video_item['is_time_published_exact'] = True
|
||||
else:
|
||||
elif video_item.get('time_published'):
|
||||
video_item['is_time_published_exact'] = False
|
||||
try:
|
||||
video_item['time_published'] = youtube_timestamp_to_posix(video_item['time_published']) - i # subtract a few seconds off the videos so they will be in the right order
|
||||
except KeyError:
|
||||
except Exception:
|
||||
print(video_item)
|
||||
|
||||
else:
|
||||
video_item['is_time_published_exact'] = False
|
||||
video_item['time_published'] = None
|
||||
video_item['channel_id'] = channel_id
|
||||
if len(videos) > 1:
|
||||
# Go back and fill in any videos that don't have a time published
|
||||
# using the time published of the surrounding ones
|
||||
for i in range(len(videos)-1):
|
||||
if (videos[i+1]['time_published'] is None
|
||||
and videos[i]['time_published'] is not None
|
||||
):
|
||||
videos[i+1]['time_published'] = videos[i]['time_published'] - 1
|
||||
for i in reversed(range(1,len(videos))):
|
||||
if (videos[i-1]['time_published'] is None
|
||||
and videos[i]['time_published'] is not None
|
||||
):
|
||||
videos[i-1]['time_published'] = videos[i]['time_published'] + 1
|
||||
# Special case: none of the videos have a time published.
|
||||
# In this case, make something up
|
||||
if videos and videos[0]['time_published'] is None:
|
||||
assert all(v['time_published'] is None for v in videos)
|
||||
now = time.time()
|
||||
for i in range(len(videos)):
|
||||
# 1 month between videos
|
||||
videos[i]['time_published'] = now - i*3600*24*30
|
||||
|
||||
|
||||
if len(videos) == 0:
|
||||
average_upload_period = 4*7*24*3600 # assume 1 month for channel with no videos
|
||||
@ -578,26 +618,31 @@ def _get_upstream_videos(channel_id):
|
||||
with open_database() as connection:
|
||||
with connection as cursor:
|
||||
|
||||
# calculate how many new videos there are
|
||||
existing_vids = set(row[0] for row in cursor.execute(
|
||||
'''SELECT video_id
|
||||
# Get video ids and duration of existing vids so we
|
||||
# can see how many new ones there are and update
|
||||
# livestreams/premiers
|
||||
existing_vids = list(cursor.execute(
|
||||
'''SELECT video_id, duration
|
||||
FROM videos
|
||||
INNER JOIN subscribed_channels
|
||||
ON videos.sql_channel_id = subscribed_channels.id
|
||||
WHERE yt_channel_id=?
|
||||
ORDER BY time_published DESC
|
||||
LIMIT 30''', [channel_id]).fetchall())
|
||||
existing_vid_ids = set(row[0] for row in existing_vids)
|
||||
existing_durs = dict(existing_vids)
|
||||
|
||||
# new videos the channel has uploaded since last time we checked
|
||||
number_of_new_videos = 0
|
||||
for video in videos:
|
||||
if video['id'] in existing_vids:
|
||||
if video['id'] in existing_vid_ids:
|
||||
break
|
||||
number_of_new_videos += 1
|
||||
|
||||
is_first_check = cursor.execute('''SELECT time_last_checked FROM subscribed_channels WHERE yt_channel_id=?''', [channel_id]).fetchone()[0] in (None, 0)
|
||||
time_videos_retrieved = int(time.time())
|
||||
rows = []
|
||||
update_rows = []
|
||||
for i, video_item in enumerate(videos):
|
||||
if (is_first_check
|
||||
or number_of_new_videos > 6
|
||||
@ -613,16 +658,34 @@ def _get_upstream_videos(channel_id):
|
||||
time_noticed = video_item['time_published']
|
||||
else:
|
||||
time_noticed = time_videos_retrieved
|
||||
rows.append((
|
||||
video_item['channel_id'],
|
||||
video_item['id'],
|
||||
video_item['title'],
|
||||
video_item['duration'],
|
||||
video_item['time_published'],
|
||||
video_item['is_time_published_exact'],
|
||||
time_noticed,
|
||||
video_item['description'],
|
||||
))
|
||||
|
||||
# videos which need durations updated
|
||||
non_durations = ('upcoming', 'none', 'live', '')
|
||||
v_id = video_item['id']
|
||||
if (existing_durs.get(v_id) is not None
|
||||
and existing_durs[v_id].lower() in non_durations
|
||||
and video_item['duration'] not in non_durations
|
||||
):
|
||||
update_rows.append((
|
||||
video_item['title'],
|
||||
video_item['duration'],
|
||||
video_item['time_published'],
|
||||
video_item['is_time_published_exact'],
|
||||
video_item['description'],
|
||||
video_item['id'],
|
||||
))
|
||||
# all other videos
|
||||
else:
|
||||
rows.append((
|
||||
video_item['channel_id'],
|
||||
video_item['id'],
|
||||
video_item['title'],
|
||||
video_item['duration'],
|
||||
video_item['time_published'],
|
||||
video_item['is_time_published_exact'],
|
||||
time_noticed,
|
||||
video_item['description'],
|
||||
))
|
||||
|
||||
cursor.executemany('''INSERT OR IGNORE INTO videos (
|
||||
sql_channel_id,
|
||||
@ -635,6 +698,13 @@ def _get_upstream_videos(channel_id):
|
||||
description
|
||||
)
|
||||
VALUES ((SELECT id FROM subscribed_channels WHERE yt_channel_id=?), ?, ?, ?, ?, ?, ?, ?)''', rows)
|
||||
cursor.executemany('''UPDATE videos SET
|
||||
title=?,
|
||||
duration=?,
|
||||
time_published=?,
|
||||
is_time_published_exact=?,
|
||||
description=?
|
||||
WHERE video_id=?''', update_rows)
|
||||
cursor.execute('''UPDATE subscribed_channels
|
||||
SET time_last_checked = ?, next_check_time = ?
|
||||
WHERE yt_channel_id=?''', [int(time.time()), next_check_time, channel_id])
|
||||
@ -767,7 +837,7 @@ def import_subscriptions():
|
||||
error = 'Unsupported file format: ' + mime_type
|
||||
error += (' . Only subscription.json, subscriptions.csv files'
|
||||
' (from Google Takeouts)'
|
||||
' and XML OPML files exported from Youtube\'s'
|
||||
' and XML OPML files exported from YouTube\'s'
|
||||
' subscription manager page are supported')
|
||||
return (flask.render_template('error.html', error_message=error),
|
||||
400)
|
||||
@ -962,7 +1032,8 @@ def get_subscriptions_page():
|
||||
'muted': muted,
|
||||
})
|
||||
|
||||
return flask.render_template('subscriptions.html',
|
||||
return flask.render_template(
|
||||
'subscriptions.html',
|
||||
header_playlist_names=local_playlist.get_playlist_names(),
|
||||
videos=videos,
|
||||
num_pages=math.ceil(number_of_videos_in_db/60),
|
||||
@ -1018,7 +1089,7 @@ def serve_subscription_thumbnail(thumbnail):
|
||||
f.close()
|
||||
return flask.Response(image, mimetype='image/jpeg')
|
||||
|
||||
url = "https://i.ytimg.com/vi/" + video_id + "/mqdefault.jpg"
|
||||
url = f"https://i.ytimg.com/vi/{video_id}/hqdefault.jpg"
|
||||
try:
|
||||
image = util.fetch_url(url, report_text="Saved thumbnail: " + video_id)
|
||||
except urllib.error.HTTPError as e:
|
||||
|
@ -6,14 +6,14 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta http-equiv="Content-Security-Policy" content="default-src 'self' 'unsafe-inline' 'unsafe-eval'; media-src 'self' blob: {{ app_url }}/* data: https://*.googlevideo.com; {{ "img-src 'self' https://*.googleusercontent.com https://*.ggpht.com https://*.ytimg.com;" if not settings.proxy_images else "" }}">
|
||||
<title>{{ page_title }}</title>
|
||||
<link title="YT Local" href="/youtube.com/opensearch.xml" rel="search" type="application/opensearchdescription+xml"/>
|
||||
<link href="/youtube.com/static/favicon.ico" type="image/x-icon" rel="icon"/>
|
||||
<link href="/youtube.com/static/normalize.css" rel="stylesheet"/>
|
||||
<link href="{{ theme_path }}" rel="stylesheet"/>
|
||||
<link title="YT Local" href="/youtube.com/opensearch.xml" rel="search" type="application/opensearchdescription+xml">
|
||||
<link href="/youtube.com/static/favicon.ico" type="image/x-icon" rel="icon">
|
||||
<link href="/youtube.com/static/normalize.css" rel="stylesheet">
|
||||
<link href="{{ theme_path }}" rel="stylesheet">
|
||||
<link href="/youtube.com/shared.css" rel="stylesheet">
|
||||
{% block style %}
|
||||
{{ style }}
|
||||
|
@ -1,21 +1,21 @@
|
||||
{% if current_tab == 'search' %}
|
||||
{% set page_title = search_box_value + ' - Page ' + page_number|string %}
|
||||
{% else %}
|
||||
{% set page_title = channel_name + ' - Channel' %}
|
||||
{% set page_title = channel_name|string + ' - Channel' %}
|
||||
{% endif %}
|
||||
|
||||
{% extends "base.html" %}
|
||||
{% import "common_elements.html" as common_elements %}
|
||||
{% block style %}
|
||||
<link href="/youtube.com/static/message_box.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/channel.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/message_box.css" rel="stylesheet">
|
||||
<link href="/youtube.com/static/channel.css" rel="stylesheet">
|
||||
{% endblock style %}
|
||||
|
||||
{% block main %}
|
||||
|
||||
<div class="author-container">
|
||||
<div class="author">
|
||||
<img alt="{{ channel_name }}" src="{{ avatar }}"/>
|
||||
<img alt="{{ channel_name }}" src="{{ avatar }}">
|
||||
<h2>{{ channel_name }}</h2>
|
||||
</div>
|
||||
<div class="summary">
|
||||
@ -33,7 +33,7 @@
|
||||
<hr/>
|
||||
|
||||
<nav class="channel-tabs">
|
||||
{% for tab_name in ('Videos', 'Playlists', 'About') %}
|
||||
{% for tab_name in ('Videos', 'Shorts', 'Streams', 'Playlists', 'About') %}
|
||||
{% if tab_name.lower() == current_tab %}
|
||||
<a class="tab page-button">{{ tab_name }}</a>
|
||||
{% else %}
|
||||
@ -51,8 +51,11 @@
|
||||
<ul>
|
||||
{% for (before_text, stat, after_text) in [
|
||||
('Joined ', date_joined, ''),
|
||||
('', view_count|commatize, ' views'),
|
||||
('', approx_view_count, ' views'),
|
||||
('', approx_subscriber_count, ' subscribers'),
|
||||
('', approx_video_count, ' videos'),
|
||||
('Country: ', country, ''),
|
||||
('Canonical Url: ', canonical_url, ''),
|
||||
] %}
|
||||
{% if stat %}
|
||||
<li>{{ before_text + stat|string + after_text }}</li>
|
||||
@ -65,7 +68,11 @@
|
||||
<hr>
|
||||
<ul>
|
||||
{% for text, url in links %}
|
||||
<li><a href="{{ url }}">{{ text }}</a></li>
|
||||
{% if url %}
|
||||
<li><a href="{{ url }}">{{ text }}</a></li>
|
||||
{% else %}
|
||||
<li>{{ text }}</li>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
@ -73,8 +80,8 @@
|
||||
|
||||
<!-- new-->
|
||||
<div id="links-metadata">
|
||||
{% if current_tab == 'videos' %}
|
||||
{% set sorts = [('1', 'views'), ('2', 'oldest'), ('3', 'newest')] %}
|
||||
{% if current_tab in ('videos', 'shorts', 'streams') %}
|
||||
{% set sorts = [('1', 'views'), ('2', 'oldest'), ('3', 'newest'), ('4', 'newest - no shorts'),] %}
|
||||
<div id="number-of-results">{{ number_of_videos }} videos</div>
|
||||
{% elif current_tab == 'playlists' %}
|
||||
{% set sorts = [('2', 'oldest'), ('3', 'newest'), ('4', 'last video added')] %}
|
||||
@ -110,13 +117,9 @@
|
||||
<hr/>
|
||||
|
||||
<footer class="pagination-container">
|
||||
{% if current_tab == 'videos' and current_sort.__str__() == '2' %}
|
||||
<nav class="next-previous-button-row">
|
||||
{{ common_elements.next_previous_ctoken_buttons(None, ctoken, channel_url + '/' + current_tab, parameters_dictionary) }}
|
||||
</nav>
|
||||
{% elif current_tab == 'videos' %}
|
||||
{% if current_tab in ('videos', 'shorts', 'streams') %}
|
||||
<nav class="pagination-list">
|
||||
{{ common_elements.page_buttons(number_of_pages, channel_url + '/' + current_tab, parameters_dictionary, include_ends=(current_sort.__str__() == '3')) }}
|
||||
{{ common_elements.page_buttons(number_of_pages, channel_url + '/' + current_tab, parameters_dictionary, include_ends=(current_sort.__str__() in '34')) }}
|
||||
</nav>
|
||||
{% elif current_tab == 'playlists' or current_tab == 'search' %}
|
||||
<nav class="next-previous-button-row">
|
||||
|
@ -1,14 +1,14 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
||||
<meta http-equiv="Content-Security-Policy" content="default-src 'self' 'unsafe-inline'; media-src 'self' https://*.googlevideo.com; {{ "img-src 'self' https://*.googleusercontent.com https://*.ggpht.com https://*.ytimg.com;" if not settings.proxy_images else "" }}"/>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta http-equiv="Content-Security-Policy" content="default-src 'self' 'unsafe-inline'; media-src 'self' https://*.googlevideo.com; {{ "img-src 'self' https://*.googleusercontent.com https://*.ggpht.com https://*.ytimg.com;" if not settings.proxy_images else "" }}">
|
||||
<title>{{ title }}</title>
|
||||
<link href="/youtube.com/static/favicon.ico" type="image/x-icon" rel="icon"/>
|
||||
<link href="/youtube.com/static/favicon.ico" type="image/x-icon" rel="icon">
|
||||
{% if settings.use_video_player == 2 %}
|
||||
<!-- plyr -->
|
||||
<link href="/youtube.com/static/modules/plyr/plyr.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/modules/plyr/plyr.css" rel="stylesheet">
|
||||
<!--/ plyr -->
|
||||
{% endif %}
|
||||
<style>
|
||||
|
@ -1,4 +1,8 @@
|
||||
{% set page_title = 'Error' %}
|
||||
{% if error_code %}
|
||||
{% set page_title = 'Error: ' ~ error_code %}
|
||||
{% else %}
|
||||
{% set page_title = 'Error' %}
|
||||
{% endif %}
|
||||
|
||||
{% if not slim %}
|
||||
{% extends "base.html" %}
|
||||
|
@ -1,7 +1,7 @@
|
||||
{% set page_title = title %}
|
||||
{% extends "base.html" %}
|
||||
{% block style %}
|
||||
<link href="/youtube.com/static/home.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/home.css" rel="stylesheet">
|
||||
{% endblock style %}
|
||||
{% block main %}
|
||||
<ul>
|
||||
|
@ -1,7 +1,7 @@
|
||||
{% set page_title = title %}
|
||||
{% extends "base.html" %}
|
||||
{% block style %}
|
||||
<link href="/youtube.com/static/license.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/license.css" rel="stylesheet">
|
||||
{% endblock style %}
|
||||
{% block main %}
|
||||
<table id="jslicense-labels1" class="table">
|
||||
|
@ -2,8 +2,8 @@
|
||||
{% extends "base.html" %}
|
||||
{% import "common_elements.html" as common_elements %}
|
||||
{% block style %}
|
||||
<link href="/youtube.com/static/message_box.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/local_playlist.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/message_box.css" rel="stylesheet">
|
||||
<link href="/youtube.com/static/local_playlist.css" rel="stylesheet">
|
||||
{% endblock style %}
|
||||
|
||||
{% block main %}
|
||||
|
@ -2,15 +2,15 @@
|
||||
{% extends "base.html" %}
|
||||
{% import "common_elements.html" as common_elements %}
|
||||
{% block style %}
|
||||
<link href="/youtube.com/static/message_box.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/playlist.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/message_box.css" rel="stylesheet">
|
||||
<link href="/youtube.com/static/playlist.css" rel="stylesheet">
|
||||
{% endblock style %}
|
||||
|
||||
{% block main %}
|
||||
|
||||
<div class="playlist-metadata">
|
||||
<div class="author">
|
||||
<img alt="{{ title }}" src="{{ thumbnail }}"/>
|
||||
<img alt="{{ title }}" src="{{ thumbnail }}">
|
||||
<h2>{{ title }}</h2>
|
||||
</div>
|
||||
<div class="summary">
|
||||
|
@ -3,8 +3,8 @@
|
||||
{% extends "base.html" %}
|
||||
{% import "common_elements.html" as common_elements %}
|
||||
{% block style %}
|
||||
<link href="/youtube.com/static/message_box.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/search.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/message_box.css" rel="stylesheet">
|
||||
<link href="/youtube.com/static/search.css" rel="stylesheet">
|
||||
{% endblock style %}
|
||||
|
||||
{% block main %}
|
||||
|
@ -1,7 +1,7 @@
|
||||
{% set page_title = 'Settings' %}
|
||||
{% extends "base.html" %}
|
||||
{% block style %}
|
||||
<link href="/youtube.com/static/settings.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/settings.css" rel="stylesheet">
|
||||
{% endblock style %}
|
||||
|
||||
{% block main %}
|
||||
@ -13,9 +13,9 @@
|
||||
{% if not setting_info.get('hidden', false) %}
|
||||
<li class="setting-item">
|
||||
{% if 'label' is in(setting_info) %}
|
||||
<label for="{{ 'setting_' + setting_name }}">{{ setting_info['label'] }}</label>
|
||||
<label for="{{ 'setting_' + setting_name }}" {% if 'comment' is in(setting_info) %}title="{{ setting_info['comment'] }}" {% endif %}>{{ setting_info['label'] }}</label>
|
||||
{% else %}
|
||||
<label for="{{ 'setting_' + setting_name }}">{{ setting_name.replace('_', ' ')|capitalize }}</label>
|
||||
<label for="{{ 'setting_' + setting_name }}" {% if 'comment' is in(setting_info) %}title="{{ setting_info['comment'] }}" {% endif %}>{{ setting_name.replace('_', ' ')|capitalize }}</label>
|
||||
{% endif %}
|
||||
|
||||
{% if setting_info['type'].__name__ == 'bool' %}
|
||||
|
@ -1,7 +1,7 @@
|
||||
{% set page_title = 'Subscription Manager' %}
|
||||
{% extends "base.html" %}
|
||||
{% block style %}
|
||||
<link href="/youtube.com/static/subscription_manager.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/subscription_manager.css" rel="stylesheet">
|
||||
{% endblock style %}
|
||||
|
||||
|
||||
|
@ -7,8 +7,8 @@
|
||||
{% import "common_elements.html" as common_elements %}
|
||||
|
||||
{% block style %}
|
||||
<link href="/youtube.com/static/message_box.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/subscription.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/message_box.css" rel="stylesheet">
|
||||
<link href="/youtube.com/static/subscription.css" rel="stylesheet">
|
||||
{% endblock style %}
|
||||
|
||||
{% block main %}
|
||||
|
@ -3,19 +3,13 @@
|
||||
{% import "common_elements.html" as common_elements %}
|
||||
{% import "comments.html" as comments with context %}
|
||||
{% block style %}
|
||||
<link href="/youtube.com/static/message_box.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/watch.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/message_box.css" rel="stylesheet">
|
||||
<link href="/youtube.com/static/watch.css" rel="stylesheet">
|
||||
{% if settings.use_video_player == 2 %}
|
||||
<!-- plyr -->
|
||||
<link href="/youtube.com/static/modules/plyr/plyr.css" rel="stylesheet"/>
|
||||
<link href="/youtube.com/static/modules/plyr/plyr.css" rel="stylesheet">
|
||||
<link href="/youtube.com/static/modules/plyr/custom_plyr.css" rel="stylesheet">
|
||||
<!--/ plyr -->
|
||||
<style>
|
||||
/* Prevent this div from blocking right-click menu for video
|
||||
e.g. Firefox playback speed options */
|
||||
.plyr__poster {
|
||||
display: none !important;
|
||||
}
|
||||
</style>
|
||||
{% endif %}
|
||||
{% endblock style %}
|
||||
|
||||
@ -40,7 +34,7 @@
|
||||
</div>
|
||||
{% else %}
|
||||
<figure class="sc-video">
|
||||
<video id="js-video-player" playsinline controls>
|
||||
<video id="js-video-player" playsinline controls {{ 'autoplay' if settings.autoplay_videos }}>
|
||||
{% if uni_sources %}
|
||||
<source src="{{ uni_sources[uni_idx]['url'] }}" type="{{ uni_sources[uni_idx]['type'] }}" data-res="{{ uni_sources[uni_idx]['quality'] }}">
|
||||
{% endif %}
|
||||
@ -97,6 +91,7 @@
|
||||
|
||||
<span class="v-direct-link"><a href="https://youtu.be/{{ video_id }}" rel="noopener noreferrer" target="_blank">Direct Link</a></span>
|
||||
|
||||
{% if settings.use_video_download != 0 %}
|
||||
<details class="v-download">
|
||||
<summary class="download-dropdown-label">Download</summary>
|
||||
<ul class="download-dropdown-content">
|
||||
@ -116,6 +111,9 @@
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</details>
|
||||
{% else %}
|
||||
<span class="v-download"></span>
|
||||
{% endif %}
|
||||
<span class="v-description">{{ common_elements.text_runs(description)|escape|urlize|timestamps|safe }}</span>
|
||||
|
||||
<div class="v-music-list">
|
||||
@ -131,7 +129,11 @@
|
||||
{% for track in music_list %}
|
||||
<tr>
|
||||
{% for attribute in music_attributes %}
|
||||
<td>{{ track.get(attribute.lower(), '') }}</td>
|
||||
{% if attribute.lower() == 'title' and track['url'] is not none %}
|
||||
<td><a href="{{ track['url'] }}">{{ track.get(attribute.lower(), '') }}</a></td>
|
||||
{% else %}
|
||||
<td>{{ track.get(attribute.lower(), '') }}</td>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</tr>
|
||||
{% endfor %}
|
||||
@ -163,7 +165,7 @@
|
||||
<div class="playlist-header">
|
||||
<a href="{{ playlist['url'] }}" title="{{ playlist['title'] }}"><h3>{{ playlist['title'] }}</h3></a>
|
||||
<ul class="playlist-metadata">
|
||||
<li><label for="playlist-autoplay-toggle">Autoplay: </label><input type="checkbox" class="autoplay-toggle"></li>
|
||||
<li><label for="playlist-autoplay-toggle">Autoplay: </label><input id="playlist-autoplay-toggle" type="checkbox" class="autoplay-toggle"></li>
|
||||
{% if playlist['current_index'] is none %}
|
||||
<li>[Error!]/{{ playlist['video_count'] }}</li>
|
||||
{% else %}
|
||||
@ -186,7 +188,7 @@
|
||||
</nav>
|
||||
</div>
|
||||
{% elif settings.related_videos_mode != 0 %}
|
||||
<div class="related-autoplay"><label for="related-autoplay-toggle">Autoplay: </label><input type="checkbox" class="autoplay-toggle"></div>
|
||||
<div class="related-autoplay"><label for="related-autoplay-toggle">Autoplay: </label><input id="related-autoplay-toggle" type="checkbox" class="autoplay-toggle"></div>
|
||||
{% endif %}
|
||||
|
||||
{% if subtitle_sources %}
|
||||
@ -225,7 +227,7 @@
|
||||
<div class="comments-area-outer comments-disabled">Comments disabled</div>
|
||||
{% else %}
|
||||
<details class="comments-area-outer" {{'open' if settings.comments_mode == 1 else ''}}>
|
||||
<summary>{{ comment_count|commatize }} comment{{'s' if comment_count != 1 else ''}}</summary>
|
||||
<summary>{{ comment_count|commatize }} comment{{'s' if comment_count != '1' else ''}}</summary>
|
||||
<div class="comments-area-inner comments-area">
|
||||
{% if comments_info %}
|
||||
{{ comments.video_comments(comments_info) }}
|
||||
|
191
youtube/util.py
191
youtube/util.py
@ -318,10 +318,11 @@ def fetch_url(url, headers=(), timeout=15, report_text=None, data=None,
|
||||
cleanup_func(response) # release_connection for urllib3
|
||||
content = decode_content(
|
||||
content,
|
||||
response.getheader('Content-Encoding', default='identity'))
|
||||
response.headers.get('Content-Encoding', default='identity'))
|
||||
|
||||
if (settings.debugging_save_responses
|
||||
and debug_name is not None and content):
|
||||
and debug_name is not None
|
||||
and content):
|
||||
save_dir = os.path.join(settings.data_dir, 'debug')
|
||||
if not os.path.exists(save_dir):
|
||||
os.makedirs(save_dir)
|
||||
@ -336,7 +337,7 @@ def fetch_url(url, headers=(), timeout=15, report_text=None, data=None,
|
||||
)
|
||||
)
|
||||
):
|
||||
print(response.status, response.reason, response.getheaders())
|
||||
print(response.status, response.reason, response.headers)
|
||||
ip = re.search(
|
||||
br'IP address: ((?:[\da-f]*:)+[\da-f]+|(?:\d+\.)+\d+)',
|
||||
content)
|
||||
@ -394,7 +395,6 @@ def head(url, use_tor=False, report_text=None, max_redirects=10):
|
||||
round(time.monotonic() - start_time, 3))
|
||||
return response
|
||||
|
||||
|
||||
mobile_user_agent = 'Mozilla/5.0 (Linux; Android 7.0; Redmi Note 4 Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Mobile Safari/537.36'
|
||||
mobile_ua = (('User-Agent', mobile_user_agent),)
|
||||
desktop_user_agent = 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'
|
||||
@ -404,13 +404,13 @@ desktop_xhr_headers = (
|
||||
('Accept', '*/*'),
|
||||
('Accept-Language', 'en-US,en;q=0.5'),
|
||||
('X-YouTube-Client-Name', '1'),
|
||||
('X-YouTube-Client-Version', '2.20180830'),
|
||||
('X-YouTube-Client-Version', '2.20240304.00.00'),
|
||||
) + desktop_ua
|
||||
mobile_xhr_headers = (
|
||||
('Accept', '*/*'),
|
||||
('Accept-Language', 'en-US,en;q=0.5'),
|
||||
('X-YouTube-Client-Name', '2'),
|
||||
('X-YouTube-Client-Version', '2.20180830'),
|
||||
('X-YouTube-Client-Version', '2.20240304.08.00'),
|
||||
) + mobile_ua
|
||||
|
||||
|
||||
@ -462,7 +462,7 @@ class RateLimitedQueue(gevent.queue.Queue):
|
||||
|
||||
|
||||
def download_thumbnail(save_directory, video_id):
|
||||
url = "https://i.ytimg.com/vi/" + video_id + "/mqdefault.jpg"
|
||||
url = f"https://i.ytimg.com/vi/{video_id}/hqdefault.jpg"
|
||||
save_location = os.path.join(save_directory, video_id + ".jpg")
|
||||
try:
|
||||
thumbnail = fetch_url(url, report_text="Saved thumbnail: " + video_id)
|
||||
@ -504,7 +504,7 @@ def video_id(url):
|
||||
|
||||
# default, sddefault, mqdefault, hqdefault, hq720
|
||||
def get_thumbnail_url(video_id):
|
||||
return settings.img_prefix + "https://i.ytimg.com/vi/" + video_id + "/mqdefault.jpg"
|
||||
return f"{settings.img_prefix}https://i.ytimg.com/vi/{video_id}/hqdefault.jpg"
|
||||
|
||||
|
||||
def seconds_to_timestamp(seconds):
|
||||
@ -665,8 +665,183 @@ def to_valid_filename(name):
|
||||
return name
|
||||
|
||||
|
||||
# https://github.com/yt-dlp/yt-dlp/blob/master/yt_dlp/extractor/youtube.py#L72
|
||||
INNERTUBE_CLIENTS = {
|
||||
'android': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'ANDROID',
|
||||
'clientVersion': '19.09.36',
|
||||
'osName': 'Android',
|
||||
'osVersion': '12',
|
||||
'androidSdkVersion': 31,
|
||||
'platform': 'MOBILE',
|
||||
'userAgent': 'com.google.android.youtube/19.09.36 (Linux; U; Android 12; US) gzip'
|
||||
},
|
||||
# https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-887739287
|
||||
#'thirdParty': {
|
||||
# 'embedUrl': 'https://google.com', # Can be any valid URL
|
||||
#}
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
},
|
||||
|
||||
'android-test-suite': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'ANDROID_TESTSUITE',
|
||||
'clientVersion': '1.9',
|
||||
'osName': 'Android',
|
||||
'osVersion': '12',
|
||||
'androidSdkVersion': 31,
|
||||
'platform': 'MOBILE',
|
||||
'userAgent': 'com.google.android.youtube/1.9 (Linux; U; Android 12; US) gzip'
|
||||
},
|
||||
# https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-887739287
|
||||
#'thirdParty': {
|
||||
# 'embedUrl': 'https://google.com', # Can be any valid URL
|
||||
#}
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
},
|
||||
|
||||
'ios': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyB-63vPrdThhKuerbB2N_l7Kwwcxj6yUAc',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'IOS',
|
||||
'clientVersion': '19.09.3',
|
||||
'deviceModel': 'iPhone14,3',
|
||||
'userAgent': 'com.google.ios.youtube/19.09.3 (iPhone14,3; U; CPU iOS 15_6 like Mac OS X)'
|
||||
}
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
|
||||
'REQUIRE_JS_PLAYER': False
|
||||
},
|
||||
|
||||
# This client can access age restricted videos (unless the uploader has disabled the 'allow embedding' option)
|
||||
# See: https://github.com/zerodytrash/YouTube-Internal-Clients
|
||||
'tv_embedded': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'TVHTML5_SIMPLY_EMBEDDED_PLAYER',
|
||||
'clientVersion': '2.0',
|
||||
'clientScreen': 'EMBED',
|
||||
},
|
||||
# https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-887739287
|
||||
'thirdParty': {
|
||||
'embedUrl': 'https://google.com', # Can be any valid URL
|
||||
}
|
||||
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 85,
|
||||
'REQUIRE_JS_PLAYER': True,
|
||||
},
|
||||
|
||||
'web': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': '2.20220801.00.00',
|
||||
'userAgent': desktop_user_agent,
|
||||
}
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 1
|
||||
},
|
||||
'android_vr': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'ANDROID_VR',
|
||||
'clientVersion': '1.60.19',
|
||||
'deviceMake': 'Oculus',
|
||||
'deviceModel': 'Quest 3',
|
||||
'androidSdkVersion': 32,
|
||||
'userAgent': 'com.google.android.apps.youtube.vr.oculus/1.60.19 (Linux; U; Android 12L; eureka-user Build/SQ3A.220605.009.A1) gzip',
|
||||
'osName': 'Android',
|
||||
'osVersion': '12L',
|
||||
},
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 28,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
},
|
||||
}
|
||||
|
||||
def get_visitor_data():
|
||||
visitor_data = None
|
||||
visitor_data_cache = os.path.join(settings.data_dir, 'visitorData.txt')
|
||||
if not os.path.exists(settings.data_dir):
|
||||
os.makedirs(settings.data_dir)
|
||||
if os.path.isfile(visitor_data_cache):
|
||||
with open(visitor_data_cache, 'r') as file:
|
||||
print('Getting visitor_data from cache')
|
||||
visitor_data = file.read()
|
||||
max_age = 12*3600
|
||||
file_age = time.time() - os.path.getmtime(visitor_data_cache)
|
||||
if file_age > max_age:
|
||||
print('visitor_data cache is too old. Removing file...')
|
||||
os.remove(visitor_data_cache)
|
||||
return visitor_data
|
||||
|
||||
print('Fetching youtube homepage to get visitor_data')
|
||||
yt_homepage = 'https://www.youtube.com'
|
||||
yt_resp = fetch_url(yt_homepage, headers={'User-Agent': mobile_user_agent}, report_text='Getting youtube homepage')
|
||||
visitor_data_re = r'''"visitorData":\s*?"(.+?)"'''
|
||||
visitor_data_match = re.search(visitor_data_re, yt_resp.decode())
|
||||
if visitor_data_match:
|
||||
visitor_data = visitor_data_match.group(1)
|
||||
print(f'Got visitor_data: {len(visitor_data)}')
|
||||
with open(visitor_data_cache, 'w') as file:
|
||||
print('Saving visitor_data cache...')
|
||||
file.write(visitor_data)
|
||||
return visitor_data
|
||||
else:
|
||||
print('Unable to get visitor_data value')
|
||||
return visitor_data
|
||||
|
||||
def call_youtube_api(client, api, data):
|
||||
client_params = INNERTUBE_CLIENTS[client]
|
||||
context = client_params['INNERTUBE_CONTEXT']
|
||||
key = client_params['INNERTUBE_API_KEY']
|
||||
host = client_params.get('INNERTUBE_HOST') or 'www.youtube.com'
|
||||
user_agent = context['client'].get('userAgent') or mobile_user_agent
|
||||
visitor_data = get_visitor_data()
|
||||
|
||||
url = 'https://' + host + '/youtubei/v1/' + api + '?key=' + key
|
||||
if visitor_data:
|
||||
context['client'].update({'visitorData': visitor_data})
|
||||
data['context'] = context
|
||||
|
||||
data = json.dumps(data)
|
||||
headers = (('Content-Type', 'application/json'),('User-Agent', user_agent))
|
||||
if visitor_data:
|
||||
headers = ( *headers, ('X-Goog-Visitor-Id', visitor_data ))
|
||||
response = fetch_url(
|
||||
url, data=data, headers=headers,
|
||||
debug_name='youtubei_' + api + '_' + client,
|
||||
report_text='Fetched ' + client + ' youtubei ' + api
|
||||
).decode('utf-8')
|
||||
return response
|
||||
|
||||
|
||||
def strip_non_ascii(string):
|
||||
''' Returns the string without non ASCII characters'''
|
||||
if string is None:
|
||||
return ""
|
||||
stripped = (c for c in string if 0 < ord(c) < 127)
|
||||
return ''.join(stripped)
|
||||
|
||||
|
@ -1,3 +1,3 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '0.2.2'
|
||||
__version__ = 'v0.3.2'
|
||||
|
193
youtube/watch.py
193
youtube/watch.py
@ -19,6 +19,7 @@ from urllib.parse import parse_qs, urlencode
|
||||
from types import SimpleNamespace
|
||||
from math import ceil
|
||||
|
||||
|
||||
try:
|
||||
with open(os.path.join(settings.data_dir, 'decrypt_function_cache.json'), 'r') as f:
|
||||
decrypt_cache = json.loads(f.read())['decrypt_cache']
|
||||
@ -49,6 +50,8 @@ def get_video_sources(info, target_resolution):
|
||||
video_only_sources = {}
|
||||
uni_sources = []
|
||||
pair_sources = []
|
||||
|
||||
|
||||
for fmt in info['formats']:
|
||||
if not all(fmt[attr] for attr in ('ext', 'url', 'itag')):
|
||||
continue
|
||||
@ -74,7 +77,6 @@ def get_video_sources(info, target_resolution):
|
||||
fmt['audio_bitrate'] = int(fmt['bitrate']/1000)
|
||||
source = {
|
||||
'type': 'audio/' + fmt['ext'],
|
||||
'bitrate': fmt['audio_bitrate'],
|
||||
'quality_string': audio_quality_string(fmt),
|
||||
}
|
||||
source.update(fmt)
|
||||
@ -308,14 +310,6 @@ def save_decrypt_cache():
|
||||
f.close()
|
||||
|
||||
|
||||
watch_headers = (
|
||||
('Accept', '*/*'),
|
||||
('Accept-Language', 'en-US,en;q=0.5'),
|
||||
('X-YouTube-Client-Name', '2'),
|
||||
('X-YouTube-Client-Version', '2.20180830'),
|
||||
) + util.mobile_ua
|
||||
|
||||
|
||||
def decrypt_signatures(info, video_id):
|
||||
'''return error string, or False if no errors'''
|
||||
if not yt_data_extract.requires_decryption(info):
|
||||
@ -346,7 +340,13 @@ def _add_to_error(info, key, additional_message):
|
||||
info[key] = additional_message
|
||||
|
||||
|
||||
def extract_info(video_id, use_invidious, playlist_id=None, index=None):
|
||||
def fetch_player_response(client, video_id):
|
||||
return util.call_youtube_api(client, 'player', {
|
||||
'videoId': video_id,
|
||||
})
|
||||
|
||||
|
||||
def fetch_watch_page_info(video_id, playlist_id, index):
|
||||
# bpctr=9999999999 will bypass are-you-sure dialogs for controversial
|
||||
# videos
|
||||
url = 'https://m.youtube.com/embed/' + video_id + '?bpctr=9999999999'
|
||||
@ -354,57 +354,55 @@ def extract_info(video_id, use_invidious, playlist_id=None, index=None):
|
||||
url += '&list=' + playlist_id
|
||||
if index:
|
||||
url += '&index=' + index
|
||||
watch_page = util.fetch_url(url, headers=watch_headers,
|
||||
|
||||
headers = (
|
||||
('Accept', '*/*'),
|
||||
('Accept-Language', 'en-US,en;q=0.5'),
|
||||
('X-YouTube-Client-Name', '2'),
|
||||
('X-YouTube-Client-Version', '2.20180830'),
|
||||
) + util.mobile_ua
|
||||
|
||||
watch_page = util.fetch_url(url, headers=headers,
|
||||
debug_name='watch')
|
||||
watch_page = watch_page.decode('utf-8')
|
||||
info = yt_data_extract.extract_watch_info_from_html(watch_page)
|
||||
return yt_data_extract.extract_watch_info_from_html(watch_page)
|
||||
|
||||
context = {
|
||||
'client': {
|
||||
'clientName': 'ANDROID',
|
||||
'clientVersion': '16.20',
|
||||
'gl': 'US',
|
||||
'hl': 'en',
|
||||
},
|
||||
# https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-887739287
|
||||
'thirdParty': {
|
||||
'embedUrl': 'https://google.com', # Can be any valid URL
|
||||
}
|
||||
}
|
||||
if info['age_restricted'] or info['player_urls_missing']:
|
||||
if info['age_restricted']:
|
||||
print('Age restricted video. Fetching /youtubei/v1/player page')
|
||||
else:
|
||||
print('Missing player. Fetching /youtubei/v1/player page')
|
||||
context['client']['clientScreen'] = 'EMBED'
|
||||
else:
|
||||
print('Fetching /youtubei/v1/player page')
|
||||
|
||||
# https://github.com/yt-dlp/yt-dlp/issues/574#issuecomment-887171136
|
||||
# ANDROID is used instead because its urls don't require decryption
|
||||
# The URLs returned with WEB for videos requiring decryption
|
||||
# couldn't be decrypted with the base.js from the web page for some
|
||||
# reason
|
||||
url ='https://youtubei.googleapis.com/youtubei/v1/player'
|
||||
url += '?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
|
||||
data = {
|
||||
'videoId': video_id,
|
||||
'context': context,
|
||||
}
|
||||
data = json.dumps(data)
|
||||
content_header = (('Content-Type', 'application/json'),)
|
||||
player_response = util.fetch_url(
|
||||
url, data=data, headers=util.mobile_ua + content_header,
|
||||
debug_name='youtubei_player',
|
||||
report_text='Fetched youtubei player page').decode('utf-8')
|
||||
def extract_info(video_id, use_invidious, playlist_id=None, index=None):
|
||||
primary_client = 'android_vr'
|
||||
fallback_client = 'ios'
|
||||
last_resort_client = 'tv_embedded'
|
||||
|
||||
yt_data_extract.update_with_age_restricted_info(info, player_response)
|
||||
tasks = (
|
||||
# Get video metadata from here
|
||||
gevent.spawn(fetch_watch_page_info, video_id, playlist_id, index),
|
||||
gevent.spawn(fetch_player_response, primary_client, video_id)
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
util.check_gevent_exceptions(*tasks)
|
||||
|
||||
info = tasks[0].value or {}
|
||||
player_response = tasks[1].value or {}
|
||||
|
||||
yt_data_extract.update_with_new_urls(info, player_response)
|
||||
|
||||
# Fallback to 'ios' if no valid URLs are found
|
||||
if not info.get('formats') or info.get('player_urls_missing'):
|
||||
print(f"No URLs found in '{primary_client}', attempting with '{fallback_client}'.")
|
||||
player_response = fetch_player_response(fallback_client, video_id) or {}
|
||||
yt_data_extract.update_with_new_urls(info, player_response)
|
||||
|
||||
# Final attempt with 'tv_embedded' if there are still no URLs
|
||||
if not info.get('formats') or info.get('player_urls_missing'):
|
||||
print(f"No URLs found in '{fallback_client}', attempting with '{last_resort_client}'")
|
||||
player_response = fetch_player_response(last_resort_client, video_id) or {}
|
||||
yt_data_extract.update_with_new_urls(info, player_response)
|
||||
|
||||
# signature decryption
|
||||
decryption_error = decrypt_signatures(info, video_id)
|
||||
if decryption_error:
|
||||
decryption_error = 'Error decrypting url signatures: ' + decryption_error
|
||||
info['playability_error'] = decryption_error
|
||||
if info.get('formats'):
|
||||
decryption_error = decrypt_signatures(info, video_id)
|
||||
if decryption_error:
|
||||
info['playability_error'] = 'Error decrypting url signatures: ' + decryption_error
|
||||
|
||||
# check if urls ready (non-live format) in former livestream
|
||||
# urls not ready if all of them have no filesize
|
||||
@ -418,26 +416,26 @@ def extract_info(video_id, use_invidious, playlist_id=None, index=None):
|
||||
|
||||
# livestream urls
|
||||
# sometimes only the livestream urls work soon after the livestream is over
|
||||
if (info['hls_manifest_url']
|
||||
and (info['live'] or not info['formats'] or not info['urls_ready'])
|
||||
):
|
||||
manifest = util.fetch_url(
|
||||
info['hls_manifest_url'],
|
||||
debug_name='hls_manifest.m3u8',
|
||||
report_text='Fetched hls manifest'
|
||||
).decode('utf-8')
|
||||
|
||||
info['hls_formats'], err = yt_data_extract.extract_hls_formats(manifest)
|
||||
if not err:
|
||||
info['playability_error'] = None
|
||||
for fmt in info['hls_formats']:
|
||||
fmt['video_quality'] = video_quality_string(fmt)
|
||||
else:
|
||||
info['hls_formats'] = []
|
||||
info['hls_formats'] = []
|
||||
if info.get('hls_manifest_url') and (info.get('live') or not info.get('formats') or not info['urls_ready']):
|
||||
try:
|
||||
manifest = util.fetch_url(info['hls_manifest_url'],
|
||||
debug_name='hls_manifest.m3u8',
|
||||
report_text='Fetched hls manifest'
|
||||
).decode('utf-8')
|
||||
info['hls_formats'], err = yt_data_extract.extract_hls_formats(manifest)
|
||||
if not err:
|
||||
info['playability_error'] = None
|
||||
for fmt in info['hls_formats']:
|
||||
fmt['video_quality'] = video_quality_string(fmt)
|
||||
except Exception as e:
|
||||
print(f"Error obteniendo HLS manifest: {e}")
|
||||
info['hls_formats'] = []
|
||||
|
||||
# check for 403. Unnecessary for tor video routing b/c ip address is same
|
||||
info['invidious_used'] = False
|
||||
info['invidious_reload_button'] = False
|
||||
info['tor_bypass_used'] = False
|
||||
if (settings.route_tor == 1
|
||||
and info['formats'] and info['formats'][0]['url']):
|
||||
try:
|
||||
@ -451,6 +449,7 @@ def extract_info(video_id, use_invidious, playlist_id=None, index=None):
|
||||
if response.status == 403:
|
||||
print('Access denied (403) for video urls.')
|
||||
print('Routing video through Tor')
|
||||
info['tor_bypass_used'] = True
|
||||
for fmt in info['formats']:
|
||||
fmt['url'] += '&use_tor=1'
|
||||
elif 300 <= response.status < 400:
|
||||
@ -572,8 +571,6 @@ def get_storyboard_vtt():
|
||||
|
||||
|
||||
time_table = {'h': 3600, 'm': 60, 's': 1}
|
||||
|
||||
|
||||
@yt_app.route('/watch')
|
||||
@yt_app.route('/embed')
|
||||
@yt_app.route('/embed/<video_id>')
|
||||
@ -628,8 +625,11 @@ def get_watch_page(video_id=None):
|
||||
|
||||
# prefix urls, and other post-processing not handled by yt_data_extract
|
||||
for item in info['related_videos']:
|
||||
item['thumbnail'] = "https://i.ytimg.com/vi/{}/hqdefault.jpg".format(item['id']) # set HQ relateds thumbnail videos
|
||||
util.prefix_urls(item)
|
||||
util.add_extra_html_info(item)
|
||||
for song in info['music_list']:
|
||||
song['url'] = util.prefix_url(song['url'])
|
||||
if info['playlist']:
|
||||
playlist_id = info['playlist']['id']
|
||||
for item in info['playlist']['items']:
|
||||
@ -659,12 +659,6 @@ def get_watch_page(video_id=None):
|
||||
'/videoplayback',
|
||||
'/videoplayback/name/' + filename)
|
||||
|
||||
if settings.gather_googlevideo_domains:
|
||||
with open(os.path.join(settings.data_dir, 'googlevideo-domains.txt'), 'a+', encoding='utf-8') as f:
|
||||
url = info['formats'][0]['url']
|
||||
subdomain = url[0:url.find(".googlevideo.com")]
|
||||
f.write(subdomain + "\n")
|
||||
|
||||
download_formats = []
|
||||
|
||||
for format in (info['formats'] + info['hls_formats']):
|
||||
@ -681,20 +675,19 @@ def get_watch_page(video_id=None):
|
||||
'codecs': codecs_string,
|
||||
})
|
||||
|
||||
target_resolution = settings.default_resolution
|
||||
if (settings.route_tor == 2) or info['tor_bypass_used']:
|
||||
target_resolution = 240
|
||||
else:
|
||||
target_resolution = settings.default_resolution
|
||||
|
||||
source_info = get_video_sources(info, target_resolution)
|
||||
uni_sources = source_info['uni_sources']
|
||||
pair_sources = source_info['pair_sources']
|
||||
uni_idx, pair_idx = source_info['uni_idx'], source_info['pair_idx']
|
||||
video_height = yt_data_extract.deep_get(source_info, 'uni_sources',
|
||||
uni_idx, 'height',
|
||||
default=360)
|
||||
video_width = yt_data_extract.deep_get(source_info, 'uni_sources',
|
||||
uni_idx, 'width',
|
||||
default=640)
|
||||
|
||||
pair_quality = yt_data_extract.deep_get(pair_sources, pair_idx, 'quality')
|
||||
uni_quality = yt_data_extract.deep_get(uni_sources, uni_idx, 'quality')
|
||||
|
||||
pair_error = abs((pair_quality or 360) - target_resolution)
|
||||
uni_error = abs((uni_quality or 360) - target_resolution)
|
||||
if uni_error == pair_error:
|
||||
@ -704,9 +697,18 @@ def get_watch_page(video_id=None):
|
||||
closer_to_target = 'uni'
|
||||
else:
|
||||
closer_to_target = 'pair'
|
||||
using_pair_sources = (
|
||||
bool(pair_sources) and (not uni_sources or closer_to_target == 'pair')
|
||||
)
|
||||
|
||||
if settings.prefer_uni_sources == 2:
|
||||
# Use uni sources unless there's no choice.
|
||||
using_pair_sources = (
|
||||
bool(pair_sources) and (not uni_sources)
|
||||
)
|
||||
else:
|
||||
# Use the pair sources if they're closer to the desired resolution
|
||||
using_pair_sources = (
|
||||
bool(pair_sources)
|
||||
and (not uni_sources or closer_to_target == 'pair')
|
||||
)
|
||||
if using_pair_sources:
|
||||
video_height = pair_sources[pair_idx]['height']
|
||||
video_width = pair_sources[pair_idx]['width']
|
||||
@ -718,6 +720,8 @@ def get_watch_page(video_id=None):
|
||||
uni_sources, uni_idx, 'width', default=640
|
||||
)
|
||||
|
||||
|
||||
|
||||
# 1 second per pixel, or the actual video width
|
||||
theater_video_target_width = max(640, info['duration'] or 0, video_width)
|
||||
|
||||
@ -750,14 +754,13 @@ def get_watch_page(video_id=None):
|
||||
template_name = 'embed.html'
|
||||
else:
|
||||
template_name = 'watch.html'
|
||||
return flask.render_template(
|
||||
template_name,
|
||||
header_playlist_names = local_playlist.get_playlist_names(),
|
||||
uploader_channel_url = ('/' + info['author_url']) if info['author_url'] else '',
|
||||
time_published = info['time_published'],
|
||||
time_published_utc=time_utc_isoformat(info['time_published']),
|
||||
return flask.render_template(template_name,
|
||||
header_playlist_names = local_playlist.get_playlist_names(),
|
||||
uploader_channel_url = ('/' + info['author_url']) if info['author_url'] else '',
|
||||
time_published = info['time_published'],
|
||||
view_count = (lambda x: '{:,}'.format(x) if x is not None else "")(info.get("view_count", None)),
|
||||
like_count = (lambda x: '{:,}'.format(x) if x is not None else "")(info.get("like_count", None)),
|
||||
dislike_count = (lambda x: '{:,}'.format(x) if x is not None else "")(info.get("dislike_count", None)),
|
||||
download_formats = download_formats,
|
||||
other_downloads = other_downloads,
|
||||
video_info = json.dumps(video_info),
|
||||
@ -806,7 +809,7 @@ def get_watch_page(video_id=None):
|
||||
'related': info['related_videos'],
|
||||
'playability_error': info['playability_error'],
|
||||
},
|
||||
font_family=youtube.font_choices[settings.font],
|
||||
font_family = youtube.font_choices[settings.font], # for embed page
|
||||
**source_info,
|
||||
using_pair_sources = using_pair_sources,
|
||||
)
|
||||
|
@ -7,7 +7,7 @@ from .everything_else import (extract_channel_info, extract_search_info,
|
||||
extract_playlist_metadata, extract_playlist_info, extract_comments_info)
|
||||
|
||||
from .watch_extraction import (extract_watch_info, get_caption_url,
|
||||
update_with_age_restricted_info, requires_decryption,
|
||||
update_with_new_urls, requires_decryption,
|
||||
extract_decryption_function, decrypt_signatures, _formats,
|
||||
update_format_with_type_info, extract_hls_formats,
|
||||
extract_watch_info_from_html, captions_available)
|
||||
|
@ -109,7 +109,7 @@ def concat_or_none(*strings):
|
||||
def remove_redirect(url):
|
||||
if url is None:
|
||||
return None
|
||||
if re.fullmatch(r'(((https?:)?//)?(www.)?youtube.com)?/redirect\?.*', url) is not None: # youtube puts these on external links to do tracking
|
||||
if re.fullmatch(r'(((https?:)?//)?(www.)?youtube.com)?/redirect\?.*', url) is not None: # YouTube puts these on external links to do tracking
|
||||
query_string = url[url.find('?')+1: ]
|
||||
return urllib.parse.parse_qs(query_string)['q'][0]
|
||||
return url
|
||||
@ -133,11 +133,11 @@ def _recover_urls(runs):
|
||||
for run in runs:
|
||||
url = deep_get(run, 'navigationEndpoint', 'urlEndpoint', 'url')
|
||||
text = run.get('text', '')
|
||||
# second condition is necessary because youtube makes other things into urls, such as hashtags, which we want to keep as text
|
||||
# second condition is necessary because YouTube makes other things into urls, such as hashtags, which we want to keep as text
|
||||
if url is not None and (text.startswith('http://') or text.startswith('https://')):
|
||||
url = remove_redirect(url)
|
||||
run['url'] = url
|
||||
run['text'] = url # youtube truncates the url text, use actual url instead
|
||||
run['text'] = url # YouTube truncates the url text, use actual url instead
|
||||
|
||||
def extract_str(node, default=None, recover_urls=False):
|
||||
'''default is the value returned if the extraction fails. If recover_urls is true, will attempt to fix YouTube's truncation of url text (most prominently seen in descriptions)'''
|
||||
@ -185,7 +185,7 @@ def extract_int(string, default=None, whole_word=True):
|
||||
return default
|
||||
|
||||
def extract_approx_int(string):
|
||||
'''e.g. "15.1M" from "15.1M subscribers"'''
|
||||
'''e.g. "15.1M" from "15.1M subscribers" or '4,353' from 4353'''
|
||||
if not isinstance(string, str):
|
||||
string = extract_str(string)
|
||||
if not string:
|
||||
@ -193,7 +193,10 @@ def extract_approx_int(string):
|
||||
match = re.search(r'\b(\d+(?:\.\d+)?[KMBTkmbt]?)\b', string.replace(',', ''))
|
||||
if match is None:
|
||||
return None
|
||||
return match.group(1)
|
||||
result = match.group(1)
|
||||
if re.fullmatch(r'\d+', result):
|
||||
result = '{:,}'.format(int(result))
|
||||
return result
|
||||
|
||||
MONTH_ABBREVIATIONS = {'jan':'1', 'feb':'2', 'mar':'3', 'apr':'4', 'may':'5', 'jun':'6', 'jul':'7', 'aug':'8', 'sep':'9', 'oct':'10', 'nov':'11', 'dec':'12'}
|
||||
def extract_date(date_text):
|
||||
@ -249,6 +252,9 @@ def extract_item_info(item, additional_info={}):
|
||||
primary_type = type_parts[-2]
|
||||
if primary_type == 'video':
|
||||
info['type'] = 'video'
|
||||
elif type_parts[0] == 'reel': # shorts
|
||||
info['type'] = 'video'
|
||||
primary_type = 'video'
|
||||
elif primary_type in ('playlist', 'radio', 'show'):
|
||||
info['type'] = 'playlist'
|
||||
info['playlist_type'] = primary_type
|
||||
@ -295,7 +301,11 @@ def extract_item_info(item, additional_info={}):
|
||||
info['time_published'] = timestamp.group(1)
|
||||
|
||||
if primary_type == 'video':
|
||||
info['id'] = item.get('videoId')
|
||||
info['id'] = multi_deep_get(item,
|
||||
['videoId'],
|
||||
['navigationEndpoint', 'watchEndpoint', 'videoId'],
|
||||
['navigationEndpoint', 'reelWatchEndpoint', 'videoId'] # shorts
|
||||
)
|
||||
info['view_count'] = extract_int(item.get('viewCountText'))
|
||||
|
||||
# dig into accessibility data to get view_count for videos marked as recommended, and to get time_published
|
||||
@ -313,17 +323,35 @@ def extract_item_info(item, additional_info={}):
|
||||
if info['view_count']:
|
||||
info['approx_view_count'] = '{:,}'.format(info['view_count'])
|
||||
else:
|
||||
info['approx_view_count'] = extract_approx_int(item.get('shortViewCountText'))
|
||||
info['approx_view_count'] = extract_approx_int(multi_get(item,
|
||||
'shortViewCountText',
|
||||
'viewCountText' # shorts
|
||||
))
|
||||
|
||||
# handle case where it is "No views"
|
||||
if not info['approx_view_count']:
|
||||
if ('No views' in item.get('shortViewCountText', '')
|
||||
or 'no views' in accessibility_label.lower()):
|
||||
or 'no views' in accessibility_label.lower()
|
||||
or 'No views' in extract_str(item.get('viewCountText', '')) # shorts
|
||||
):
|
||||
info['view_count'] = 0
|
||||
info['approx_view_count'] = '0'
|
||||
|
||||
info['duration'] = extract_str(item.get('lengthText'))
|
||||
|
||||
# dig into accessibility data to get duration for shorts
|
||||
accessibility_label = deep_get(item,
|
||||
'accessibility', 'accessibilityData', 'label',
|
||||
default='')
|
||||
duration = re.search(r'(\d+) (second|seconds|minute) - play video$',
|
||||
accessibility_label)
|
||||
if duration:
|
||||
if duration.group(2) == 'minute':
|
||||
conservative_update(info, 'duration', '1:00')
|
||||
else:
|
||||
conservative_update(info,
|
||||
'duration', '0:' + duration.group(1).zfill(2))
|
||||
|
||||
# if it's an item in a playlist, get its index
|
||||
if 'index' in item: # url has wrong index on playlist page
|
||||
info['index'] = extract_int(item.get('index'))
|
||||
@ -395,6 +423,8 @@ _item_types = {
|
||||
'gridVideoRenderer',
|
||||
'playlistVideoRenderer',
|
||||
|
||||
'reelItemRenderer',
|
||||
|
||||
'playlistRenderer',
|
||||
'compactPlaylistRenderer',
|
||||
'gridPlaylistRenderer',
|
||||
@ -542,9 +572,13 @@ def extract_items(response, item_types=_item_types,
|
||||
item_types=item_types)
|
||||
if items:
|
||||
break
|
||||
elif 'onResponseReceivedEndpoints' in response:
|
||||
for endpoint in response.get('onResponseReceivedEndpoints', []):
|
||||
items, ctoken = extract_items_from_renderer_list(
|
||||
if ('onResponseReceivedEndpoints' in response
|
||||
or 'onResponseReceivedActions' in response):
|
||||
for endpoint in multi_get(response,
|
||||
'onResponseReceivedEndpoints',
|
||||
'onResponseReceivedActions',
|
||||
[]):
|
||||
new_items, new_ctoken = extract_items_from_renderer_list(
|
||||
multi_deep_get(
|
||||
endpoint,
|
||||
['reloadContinuationItemsCommand', 'continuationItems'],
|
||||
@ -553,13 +587,17 @@ def extract_items(response, item_types=_item_types,
|
||||
),
|
||||
item_types=item_types,
|
||||
)
|
||||
if items:
|
||||
break
|
||||
elif 'contents' in response:
|
||||
items += new_items
|
||||
if (not ctoken) or (new_ctoken and new_items):
|
||||
ctoken = new_ctoken
|
||||
if 'contents' in response:
|
||||
renderer = get(response, 'contents', {})
|
||||
items, ctoken = extract_items_from_renderer(
|
||||
new_items, new_ctoken = extract_items_from_renderer(
|
||||
renderer,
|
||||
item_types=item_types)
|
||||
items += new_items
|
||||
if (not ctoken) or (new_ctoken and new_items):
|
||||
ctoken = new_ctoken
|
||||
|
||||
if search_engagement_panels and 'engagementPanels' in response:
|
||||
new_items, new_ctoken = extract_items_from_renderer_list(
|
||||
|
@ -9,7 +9,7 @@ import re
|
||||
import urllib
|
||||
from math import ceil
|
||||
|
||||
def extract_channel_info(polymer_json, tab):
|
||||
def extract_channel_info(polymer_json, tab, continuation=False):
|
||||
response, err = extract_response(polymer_json)
|
||||
if err:
|
||||
return {'error': err}
|
||||
@ -23,7 +23,8 @@ def extract_channel_info(polymer_json, tab):
|
||||
|
||||
# channel doesn't exist or was terminated
|
||||
# example terminated channel: https://www.youtube.com/channel/UCnKJeK_r90jDdIuzHXC0Org
|
||||
if not metadata:
|
||||
# metadata and microformat are not present for continuation requests
|
||||
if not metadata and not continuation:
|
||||
if response.get('alerts'):
|
||||
error_string = ' '.join(
|
||||
extract_str(deep_get(alert, 'alertRenderer', 'text'), default='')
|
||||
@ -44,7 +45,7 @@ def extract_channel_info(polymer_json, tab):
|
||||
info['approx_subscriber_count'] = extract_approx_int(deep_get(response,
|
||||
'header', 'c4TabbedHeaderRenderer', 'subscriberCountText'))
|
||||
|
||||
# stuff from microformat (info given by youtube for every page on channel)
|
||||
# stuff from microformat (info given by youtube for first page on channel)
|
||||
info['short_description'] = metadata.get('description')
|
||||
if info['short_description'] and len(info['short_description']) > 730:
|
||||
info['short_description'] = info['short_description'][0:730] + '...'
|
||||
@ -69,10 +70,10 @@ def extract_channel_info(polymer_json, tab):
|
||||
info['ctoken'] = None
|
||||
|
||||
# empty channel
|
||||
if 'contents' not in response and 'continuationContents' not in response:
|
||||
return info
|
||||
#if 'contents' not in response and 'continuationContents' not in response:
|
||||
# return info
|
||||
|
||||
if tab in ('videos', 'playlists', 'search'):
|
||||
if tab in ('videos', 'shorts', 'streams', 'playlists', 'search'):
|
||||
items, ctoken = extract_items(response)
|
||||
additional_info = {
|
||||
'author': info['channel_name'],
|
||||
@ -84,23 +85,84 @@ def extract_channel_info(polymer_json, tab):
|
||||
if tab in ('search', 'playlists'):
|
||||
info['is_last_page'] = (ctoken is None)
|
||||
elif tab == 'about':
|
||||
items, _ = extract_items(response, item_types={'channelAboutFullMetadataRenderer'})
|
||||
if not items:
|
||||
info['error'] = 'Could not find channelAboutFullMetadataRenderer'
|
||||
return info
|
||||
channel_metadata = items[0]['channelAboutFullMetadataRenderer']
|
||||
# Latest type
|
||||
items, _ = extract_items(response, item_types={'aboutChannelRenderer'})
|
||||
if items:
|
||||
a_metadata = deep_get(items, 0, 'aboutChannelRenderer',
|
||||
'metadata', 'aboutChannelViewModel')
|
||||
if not a_metadata:
|
||||
info['error'] = 'Could not find aboutChannelViewModel'
|
||||
return info
|
||||
|
||||
info['links'] = []
|
||||
for link_json in channel_metadata.get('primaryLinks', ()):
|
||||
url = remove_redirect(deep_get(link_json, 'navigationEndpoint', 'urlEndpoint', 'url'))
|
||||
if not (url.startswith('http://') or url.startswith('https://')):
|
||||
url = 'http://' + url
|
||||
text = extract_str(link_json.get('title'))
|
||||
info['links'].append( (text, url) )
|
||||
info['links'] = []
|
||||
for link_outer in a_metadata.get('links', ()):
|
||||
link = link_outer.get('channelExternalLinkViewModel') or {}
|
||||
link_content = extract_str(deep_get(link, 'link', 'content'))
|
||||
for run in deep_get(link, 'link', 'commandRuns') or ():
|
||||
url = remove_redirect(deep_get(run, 'onTap',
|
||||
'innertubeCommand', 'urlEndpoint', 'url'))
|
||||
if url and not (url.startswith('http://')
|
||||
or url.startswith('https://')):
|
||||
url = 'https://' + url
|
||||
if link_content is None or (link_content in url):
|
||||
break
|
||||
else: # didn't break
|
||||
url = link_content
|
||||
if url and not (url.startswith('http://')
|
||||
or url.startswith('https://')):
|
||||
url = 'https://' + url
|
||||
text = extract_str(deep_get(link, 'title', 'content'))
|
||||
info['links'].append( (text, url) )
|
||||
|
||||
info['date_joined'] = extract_date(channel_metadata.get('joinedDateText'))
|
||||
info['view_count'] = extract_int(channel_metadata.get('viewCountText'))
|
||||
info['description'] = extract_str(channel_metadata.get('description'), default='')
|
||||
info['date_joined'] = extract_date(
|
||||
a_metadata.get('joinedDateText')
|
||||
)
|
||||
info['view_count'] = extract_int(a_metadata.get('viewCountText'))
|
||||
info['approx_view_count'] = extract_approx_int(
|
||||
a_metadata.get('viewCountText')
|
||||
)
|
||||
info['description'] = extract_str(
|
||||
a_metadata.get('description'), default=''
|
||||
)
|
||||
info['approx_video_count'] = extract_approx_int(
|
||||
a_metadata.get('videoCountText')
|
||||
)
|
||||
info['approx_subscriber_count'] = extract_approx_int(
|
||||
a_metadata.get('subscriberCountText')
|
||||
)
|
||||
info['country'] = extract_str(a_metadata.get('country'))
|
||||
info['canonical_url'] = extract_str(
|
||||
a_metadata.get('canonicalChannelUrl')
|
||||
)
|
||||
|
||||
# Old type
|
||||
else:
|
||||
items, _ = extract_items(response,
|
||||
item_types={'channelAboutFullMetadataRenderer'})
|
||||
if not items:
|
||||
info['error'] = 'Could not find aboutChannelRenderer or channelAboutFullMetadataRenderer'
|
||||
return info
|
||||
a_metadata = items[0]['channelAboutFullMetadataRenderer']
|
||||
|
||||
info['links'] = []
|
||||
for link_json in a_metadata.get('primaryLinks', ()):
|
||||
url = remove_redirect(deep_get(link_json, 'navigationEndpoint',
|
||||
'urlEndpoint', 'url'))
|
||||
if url and not (url.startswith('http://')
|
||||
or url.startswith('https://')):
|
||||
url = 'https://' + url
|
||||
text = extract_str(link_json.get('title'))
|
||||
info['links'].append( (text, url) )
|
||||
|
||||
info['date_joined'] = extract_date(a_metadata.get('joinedDateText'))
|
||||
info['view_count'] = extract_int(a_metadata.get('viewCountText'))
|
||||
info['description'] = extract_str(a_metadata.get(
|
||||
'description'), default='')
|
||||
|
||||
info['approx_video_count'] = None
|
||||
info['approx_subscriber_count'] = None
|
||||
info['country'] = None
|
||||
info['canonical_url'] = None
|
||||
else:
|
||||
raise NotImplementedError('Unknown or unsupported channel tab: ' + tab)
|
||||
|
||||
@ -167,7 +229,7 @@ def extract_playlist_metadata(polymer_json):
|
||||
if metadata['first_video_id'] is None:
|
||||
metadata['thumbnail'] = None
|
||||
else:
|
||||
metadata['thumbnail'] = 'https://i.ytimg.com/vi/' + metadata['first_video_id'] + '/mqdefault.jpg'
|
||||
metadata['thumbnail'] = f"https://i.ytimg.com/vi/{metadata['first_video_id']}/hqdefault.jpg"
|
||||
|
||||
metadata['video_count'] = extract_int(header.get('numVideosText'))
|
||||
metadata['description'] = extract_str(header.get('descriptionText'), default='')
|
||||
@ -190,6 +252,19 @@ def extract_playlist_metadata(polymer_json):
|
||||
elif 'updated' in text:
|
||||
metadata['time_published'] = extract_date(text)
|
||||
|
||||
microformat = deep_get(response, 'microformat', 'microformatDataRenderer',
|
||||
default={})
|
||||
conservative_update(
|
||||
metadata, 'title', extract_str(microformat.get('title'))
|
||||
)
|
||||
conservative_update(
|
||||
metadata, 'description', extract_str(microformat.get('description'))
|
||||
)
|
||||
conservative_update(
|
||||
metadata, 'thumbnail', deep_get(microformat, 'thumbnail',
|
||||
'thumbnails', -1, 'url')
|
||||
)
|
||||
|
||||
return metadata
|
||||
|
||||
def extract_playlist_info(polymer_json):
|
||||
@ -197,13 +272,11 @@ def extract_playlist_info(polymer_json):
|
||||
if err:
|
||||
return {'error': err}
|
||||
info = {'error': None}
|
||||
first_page = 'continuationContents' not in response
|
||||
video_list, _ = extract_items(response)
|
||||
|
||||
info['items'] = [extract_item_info(renderer) for renderer in video_list]
|
||||
|
||||
if first_page:
|
||||
info['metadata'] = extract_playlist_metadata(polymer_json)
|
||||
info['metadata'] = extract_playlist_metadata(polymer_json)
|
||||
|
||||
return info
|
||||
|
||||
|
@ -111,14 +111,10 @@ _formats = {
|
||||
'_rtmp': {'protocol': 'rtmp'},
|
||||
|
||||
# av01 video only formats sometimes served with "unknown" codecs
|
||||
'394': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
|
||||
'395': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
|
||||
'396': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'av01.0.01M.08'},
|
||||
'397': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'av01.0.04M.08'},
|
||||
'398': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'av01.0.05M.08'},
|
||||
'399': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'av01.0.08M.08'},
|
||||
'400': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
|
||||
'401': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
|
||||
'394': {'vcodec': 'av01.0.05M.08'},
|
||||
'395': {'vcodec': 'av01.0.05M.08'},
|
||||
'396': {'vcodec': 'av01.0.05M.08'},
|
||||
'397': {'vcodec': 'av01.0.05M.08'},
|
||||
}
|
||||
|
||||
|
||||
@ -137,29 +133,59 @@ def _extract_from_video_information_renderer(renderer_content):
|
||||
return info
|
||||
|
||||
def _extract_likes_dislikes(renderer_content):
|
||||
info = {
|
||||
'like_count': None,
|
||||
}
|
||||
for button in renderer_content.get('buttons', ()):
|
||||
button_renderer = button.get('slimMetadataToggleButtonRenderer', {})
|
||||
|
||||
def extract_button_count(toggle_button_renderer):
|
||||
# all the digits can be found in the accessibility data
|
||||
count = extract_int(deep_get(
|
||||
button_renderer,
|
||||
'button', 'toggleButtonRenderer', 'defaultText',
|
||||
'accessibility', 'accessibilityData', 'label'))
|
||||
count = extract_int(multi_deep_get(
|
||||
toggle_button_renderer,
|
||||
['defaultText', 'accessibility', 'accessibilityData', 'label'],
|
||||
['accessibility', 'label'],
|
||||
['accessibilityData', 'accessibilityData', 'label'],
|
||||
['accessibilityText'],
|
||||
))
|
||||
|
||||
# this count doesn't have all the digits, it's like 53K for instance
|
||||
dumb_count = extract_int(extract_str(deep_get(
|
||||
button_renderer, 'button', 'toggleButtonRenderer', 'defaultText')))
|
||||
dumb_count = extract_int(extract_str(multi_get(
|
||||
toggle_button_renderer, ['defaultText', 'title'])))
|
||||
|
||||
# The accessibility text will be "No likes" or "No dislikes" or
|
||||
# something like that, but dumb count will be 0
|
||||
if dumb_count == 0:
|
||||
count = 0
|
||||
return count
|
||||
|
||||
if 'isLike' in button_renderer:
|
||||
info['like_count'] = count
|
||||
info = {
|
||||
'like_count': None,
|
||||
'dislike_count': None,
|
||||
}
|
||||
for button in renderer_content.get('buttons', ()):
|
||||
if 'slimMetadataToggleButtonRenderer' in button:
|
||||
button_renderer = button['slimMetadataToggleButtonRenderer']
|
||||
count = extract_button_count(deep_get(button_renderer,
|
||||
'button',
|
||||
'toggleButtonRenderer'))
|
||||
if 'isLike' in button_renderer:
|
||||
info['like_count'] = count
|
||||
elif 'isDislike' in button_renderer:
|
||||
info['dislike_count'] = count
|
||||
elif 'slimMetadataButtonRenderer' in button:
|
||||
button_renderer = button['slimMetadataButtonRenderer']
|
||||
liberal_update(info, 'like_count', extract_button_count(
|
||||
multi_deep_get(button_renderer,
|
||||
['button', 'segmentedLikeDislikeButtonRenderer',
|
||||
'likeButton', 'toggleButtonRenderer'],
|
||||
['button', 'segmentedLikeDislikeButtonViewModel',
|
||||
'likeButtonViewModel', 'likeButtonViewModel',
|
||||
'toggleButtonViewModel', 'toggleButtonViewModel',
|
||||
'defaultButtonViewModel', 'buttonViewModel']
|
||||
)
|
||||
))
|
||||
'''liberal_update(info, 'dislike_count', extract_button_count(
|
||||
deep_get(
|
||||
button_renderer, 'button',
|
||||
'segmentedLikeDislikeButtonRenderer',
|
||||
'dislikeButton', 'toggleButtonRenderer'
|
||||
)
|
||||
))'''
|
||||
return info
|
||||
|
||||
def _extract_from_owner_renderer(renderer_content):
|
||||
@ -213,6 +239,36 @@ def _extract_metadata_row_info(renderer_content):
|
||||
|
||||
return info
|
||||
|
||||
def _extract_from_music_renderer(renderer_content):
|
||||
# latest format for the music list
|
||||
info = {
|
||||
'music_list': [],
|
||||
}
|
||||
|
||||
for carousel in renderer_content.get('carouselLockups', []):
|
||||
song = {}
|
||||
carousel = carousel.get('carouselLockupRenderer', {})
|
||||
video_renderer = carousel.get('videoLockup', {})
|
||||
video_renderer_info = extract_item_info(video_renderer)
|
||||
video_id = video_renderer_info.get('id')
|
||||
song['url'] = concat_or_none('https://www.youtube.com/watch?v=',
|
||||
video_id)
|
||||
song['title'] = video_renderer_info.get('title')
|
||||
for row in carousel.get('infoRows', []):
|
||||
row = row.get('infoRowRenderer', {})
|
||||
title = extract_str(row.get('title'))
|
||||
data = extract_str(row.get('defaultMetadata'))
|
||||
if title == 'SONG':
|
||||
song['title'] = data
|
||||
elif title == 'ARTIST':
|
||||
song['artist'] = data
|
||||
elif title == 'ALBUM':
|
||||
song['album'] = data
|
||||
elif title == 'WRITERS':
|
||||
song['writers'] = data
|
||||
info['music_list'].append(song)
|
||||
return info
|
||||
|
||||
def _extract_from_video_metadata(renderer_content):
|
||||
info = _extract_from_video_information_renderer(renderer_content)
|
||||
liberal_dict_update(info, _extract_likes_dislikes(renderer_content))
|
||||
@ -236,6 +292,7 @@ visible_extraction_dispatch = {
|
||||
'slimVideoActionBarRenderer': _extract_likes_dislikes,
|
||||
'slimOwnerRenderer': _extract_from_owner_renderer,
|
||||
'videoDescriptionHeaderRenderer': _extract_from_video_header_renderer,
|
||||
'videoDescriptionMusicSectionRenderer': _extract_from_music_renderer,
|
||||
'expandableVideoDescriptionRenderer': _extract_from_description_renderer,
|
||||
'metadataRowContainerRenderer': _extract_metadata_row_info,
|
||||
# OR just this one, which contains SOME of the above inside it
|
||||
@ -308,17 +365,18 @@ def _extract_watch_info_mobile(top_level):
|
||||
# https://www.androidpolice.com/2019/10/31/google-youtube-app-comment-section-below-videos/
|
||||
# https://www.youtube.com/watch?v=bR5Q-wD-6qo
|
||||
if header_type == 'commentsEntryPointHeaderRenderer':
|
||||
comment_count_text = extract_str(comment_info.get('headerText'))
|
||||
comment_count_text = extract_str(multi_get(
|
||||
comment_info, 'commentCount', 'headerText'))
|
||||
else:
|
||||
comment_count_text = extract_str(deep_get(comment_info,
|
||||
'header', 'commentSectionHeaderRenderer', 'countText'))
|
||||
if comment_count_text == 'Comments': # just this with no number, means 0 comments
|
||||
info['comment_count'] = 0
|
||||
info['comment_count'] = '0'
|
||||
else:
|
||||
info['comment_count'] = extract_int(comment_count_text)
|
||||
info['comment_count'] = extract_approx_int(comment_count_text)
|
||||
info['comments_disabled'] = False
|
||||
else: # no comment section present means comments are disabled
|
||||
info['comment_count'] = 0
|
||||
info['comment_count'] = '0'
|
||||
info['comments_disabled'] = True
|
||||
|
||||
# check for limited state
|
||||
@ -354,8 +412,10 @@ def _extract_watch_info_desktop(top_level):
|
||||
likes_dislikes = deep_get(video_info, 'sentimentBar', 'sentimentBarRenderer', 'tooltip', default='').split('/')
|
||||
if len(likes_dislikes) == 2:
|
||||
info['like_count'] = extract_int(likes_dislikes[0])
|
||||
info['dislike_count'] = extract_int(likes_dislikes[1])
|
||||
else:
|
||||
info['like_count'] = None
|
||||
info['dislike_count'] = None
|
||||
|
||||
info['title'] = extract_str(video_info.get('title', None))
|
||||
info['author'] = extract_str(deep_get(video_info, 'owner', 'videoOwnerRenderer', 'title'))
|
||||
@ -368,26 +428,28 @@ def _extract_watch_info_desktop(top_level):
|
||||
return info
|
||||
|
||||
def update_format_with_codec_info(fmt, codec):
|
||||
if (codec.startswith('av')
|
||||
or codec in ('vp9', 'vp8', 'vp8.0', 'h263', 'h264', 'mp4v')):
|
||||
if any(codec.startswith(c) for c in ('av', 'vp', 'h263', 'h264', 'mp4v')):
|
||||
if codec == 'vp8.0':
|
||||
codec = 'vp8'
|
||||
conservative_update(fmt, 'vcodec', codec)
|
||||
elif (codec.startswith('mp4a')
|
||||
or codec in ('opus', 'mp3', 'aac', 'dtse', 'ec-3', 'vorbis')):
|
||||
or codec in ('opus', 'mp3', 'aac', 'dtse', 'ec-3', 'vorbis',
|
||||
'ac-3')):
|
||||
conservative_update(fmt, 'acodec', codec)
|
||||
else:
|
||||
print('Warning: unrecognized codec: ' + codec)
|
||||
|
||||
fmt_type_re = re.compile(
|
||||
r'(text|audio|video)/([\w0-9]+); codecs="([\w0-9\.]+(?:, [\w0-9\.]+)*)"')
|
||||
r'(text|audio|video)/([\w0-9]+); codecs="([^"]+)"')
|
||||
def update_format_with_type_info(fmt, yt_fmt):
|
||||
# 'type' for invidious api format
|
||||
mime_type = multi_get(yt_fmt, 'mimeType', 'type')
|
||||
if mime_type is None:
|
||||
return
|
||||
match = re.fullmatch(fmt_type_re, mime_type)
|
||||
|
||||
if match is None:
|
||||
print('Warning: Could not read mimetype', mime_type)
|
||||
return
|
||||
type, fmt['ext'], codecs = match.groups()
|
||||
codecs = codecs.split(', ')
|
||||
for codec in codecs:
|
||||
@ -410,6 +472,13 @@ def _extract_formats(info, player_response):
|
||||
for yt_fmt in yt_formats:
|
||||
itag = yt_fmt.get('itag')
|
||||
|
||||
# Translated audio track
|
||||
# Example: https://www.youtube.com/watch?v=gF9kkB0UWYQ
|
||||
# Only get the original language for now so a foreign
|
||||
# translation will not be picked just because it comes first
|
||||
if deep_get(yt_fmt, 'audioTrack', 'audioIsDefault') is False:
|
||||
continue
|
||||
|
||||
fmt = {}
|
||||
fmt['itag'] = itag
|
||||
fmt['ext'] = None
|
||||
@ -770,7 +839,7 @@ def get_caption_url(info, language, format, automatic=False, translation_languag
|
||||
url += '&tlang=' + translation_language
|
||||
return url
|
||||
|
||||
def update_with_age_restricted_info(info, player_response):
|
||||
def update_with_new_urls(info, player_response):
|
||||
'''Inserts urls from player_response json'''
|
||||
ERROR_PREFIX = 'Error getting missing player or bypassing age-restriction: '
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user