Compare commits
98 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ed4b05d9b6 | ||
|
|
6f88b1cec6 | ||
|
|
03451fb8ae | ||
|
|
e45c3fd48b | ||
|
|
1153ac8f24 | ||
|
|
c256a045f9 | ||
|
|
98603439cb | ||
|
|
a6ca011202 | ||
|
|
114c2572a4 | ||
|
f64b362603
|
|||
|
2fd7910194
|
|||
|
c2e53072f7
|
|||
|
c2986f3b14
|
|||
|
57854169f4
|
|||
|
3217305f9f
|
|||
|
639aadd2c1
|
|||
|
7157df13cd
|
|||
|
630e0137e0
|
|||
|
a0c51731af
|
|||
|
d361996fc0
|
|||
|
|
4ef7dda14a | ||
|
|
ee31cedae0 | ||
|
d3b0cb5e13
|
|||
|
0a79974d11
|
|||
|
4e327944a0
|
|||
|
09a437f7fb
|
|||
|
3cbe18aac0
|
|||
|
|
62418f8e95 | ||
|
bfd3760969
|
|||
|
efd89b2e64
|
|||
|
0dc1747178
|
|||
|
8577164785
|
|||
|
8af98968dd
|
|||
|
8f00cbcdd6
|
|||
|
af75551bc2
|
|||
|
3a6cc1e44f
|
|||
|
7664b5f0ff
|
|||
|
ec5d236cad
|
|||
|
d6b7a255d0
|
|||
|
22bc7324db
|
|||
|
48e8f271e7
|
|||
|
9a0ad6070b
|
|||
|
6039589f24
|
|||
|
d4cba7eb6c
|
|||
|
70cb453280
|
|||
|
7a106331e7
|
|||
|
8775e131af
|
|||
|
1f16f7cb62
|
|||
|
80b7f3cd00
|
|||
|
8b79e067bc
|
|||
|
cda0627d5a
|
|||
|
ad40dd6d6b
|
|||
|
b91d53dc6f
|
|||
|
cda4fd1f26
|
|||
|
ff2a2edaa5
|
|||
|
38d8d5d4c5
|
|||
|
f010452abf
|
|||
|
ab93f8242b
|
|||
|
1505414a1a
|
|||
|
c04d7c9a24
|
|||
|
3ee2df7faa
|
|||
|
d2c883c211
|
|||
|
59c988f819
|
|||
|
629c811e84
|
|||
|
284024433b
|
|||
|
55a8e50d6a
|
|||
|
810dff999e
|
|||
|
4da91fb972
|
|||
|
874ac0a0ac
|
|||
|
89ae1e265b
|
|||
|
00bd9fee6f
|
|||
|
b215e2a3b2
|
|||
|
97972d6fa3
|
|||
|
6ae20bb1f5
|
|||
|
5f3b90ad45
|
|||
|
2463af7685
|
|||
|
86bb312d6d
|
|||
|
964b99ea40
|
|||
|
51a1693789
|
|||
|
ca4a735692
|
|||
|
2140f48919
|
|||
|
4be01d3964
|
|||
|
b45e3476c8
|
|||
|
d591956baa
|
|||
|
|
6011a08cdf | ||
|
|
83af4ab0d7 | ||
|
|
5594d017e2 | ||
|
|
8f9c5eeb48 | ||
|
|
89e21302e3 | ||
|
|
cb4ceefada | ||
|
|
c4cc5cecbf | ||
|
|
cc8f30eba2 | ||
|
|
6740afd6a0 | ||
|
|
63c0f4aa8f | ||
|
|
8908dc138f | ||
|
|
cd7624f2cb | ||
|
|
5d53225874 | ||
|
|
6af17450c6 |
23
.gitea/workflows/ci.yaml
Normal file
23
.gitea/workflows/ci.yaml
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
name: CI
|
||||||
|
|
||||||
|
on: [push, pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: 3.11
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
pip install --upgrade pip
|
||||||
|
pip install -r requirements-dev.txt
|
||||||
|
|
||||||
|
- name: Run tests
|
||||||
|
run: pytest
|
||||||
40
.gitea/workflows/git-sync.yaml
Normal file
40
.gitea/workflows/git-sync.yaml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
name: git-sync-with-mirror
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master ]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
git-sync:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: git-sync
|
||||||
|
env:
|
||||||
|
git_sync_source_repo: git@git.fridu.us:heckyel/yt-local.git
|
||||||
|
git_sync_destination_repo: ssh://git@c.fridu.us/software/yt-local.git
|
||||||
|
if: env.git_sync_source_repo && env.git_sync_destination_repo
|
||||||
|
uses: astounds/git-sync@v1
|
||||||
|
with:
|
||||||
|
source_repo: git@git.fridu.us:heckyel/yt-local.git
|
||||||
|
source_branch: "master"
|
||||||
|
destination_repo: ssh://git@c.fridu.us/software/yt-local.git
|
||||||
|
destination_branch: "master"
|
||||||
|
source_ssh_private_key: ${{ secrets.GIT_SYNC_SOURCE_SSH_PRIVATE_KEY }}
|
||||||
|
destination_ssh_private_key: ${{ secrets.GIT_SYNC_DESTINATION_SSH_PRIVATE_KEY }}
|
||||||
|
|
||||||
|
- name: git-sync-sourcehut
|
||||||
|
env:
|
||||||
|
git_sync_source_repo: git@git.fridu.us:heckyel/yt-local.git
|
||||||
|
git_sync_destination_repo: git@git.sr.ht:~heckyel/yt-local
|
||||||
|
if: env.git_sync_source_repo && env.git_sync_destination_repo
|
||||||
|
uses: astounds/git-sync@v1
|
||||||
|
with:
|
||||||
|
source_repo: git@git.fridu.us:heckyel/yt-local.git
|
||||||
|
source_branch: "master"
|
||||||
|
destination_repo: git@git.sr.ht:~heckyel/yt-local
|
||||||
|
destination_branch: "master"
|
||||||
|
source_ssh_private_key: ${{ secrets.GIT_SYNC_SOURCE_SSH_PRIVATE_KEY }}
|
||||||
|
destination_ssh_private_key: ${{ secrets.GIT_SYNC_DESTINATION_SSH_PRIVATE_KEY }}
|
||||||
|
continue-on-error: true
|
||||||
@@ -1,5 +1,3 @@
|
|||||||
[](https://drone.hgit.ga/heckyel/yt-local)
|
|
||||||
|
|
||||||
# yt-local
|
# yt-local
|
||||||
|
|
||||||
Fork of [youtube-local](https://github.com/user234683/youtube-local)
|
Fork of [youtube-local](https://github.com/user234683/youtube-local)
|
||||||
@@ -153,7 +151,7 @@ For coding guidelines and an overview of the software architecture, see the [HAC
|
|||||||
|
|
||||||
yt-local is not made to work in public mode, however there is an instance of yt-local in public mode but with less features
|
yt-local is not made to work in public mode, however there is an instance of yt-local in public mode but with less features
|
||||||
|
|
||||||
- <https://fast-gorge-89206.herokuapp.com>
|
- <https://m.fridu.us/https://youtube.com>
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ if len(sys.argv) > 2:
|
|||||||
else:
|
else:
|
||||||
bitness = '64'
|
bitness = '64'
|
||||||
|
|
||||||
if latest_version = 'oldwin':
|
if latest_version == 'oldwin':
|
||||||
bitness = '32'
|
bitness = '32'
|
||||||
latest_version = '3.7.9'
|
latest_version = '3.7.9'
|
||||||
suffix = 'windows-vista-7-only'
|
suffix = 'windows-vista-7-only'
|
||||||
@@ -114,10 +114,12 @@ if bitness == '32':
|
|||||||
visual_c_runtime_url = 'https://github.com/yuempek/vc-archive/raw/master/archives/vc15_(14.10.25017.0)_2017_x86.7z'
|
visual_c_runtime_url = 'https://github.com/yuempek/vc-archive/raw/master/archives/vc15_(14.10.25017.0)_2017_x86.7z'
|
||||||
visual_c_runtime_sha256 = '2549eb4d2ce4cf3a87425ea01940f74368bf1cda378ef8a8a1f1a12ed59f1547'
|
visual_c_runtime_sha256 = '2549eb4d2ce4cf3a87425ea01940f74368bf1cda378ef8a8a1f1a12ed59f1547'
|
||||||
visual_c_name = 'vc15_(14.10.25017.0)_2017_x86.7z'
|
visual_c_name = 'vc15_(14.10.25017.0)_2017_x86.7z'
|
||||||
|
visual_c_path_to_dlls = 'runtime_minimum/System'
|
||||||
else:
|
else:
|
||||||
visual_c_runtime_url = 'https://github.com/yuempek/vc-archive/raw/master/archives/vc15_(14.10.25017.0)_2017_x64.7z'
|
visual_c_runtime_url = 'https://github.com/yuempek/vc-archive/raw/master/archives/vc15_(14.10.25017.0)_2017_x64.7z'
|
||||||
visual_c_runtime_sha256 = '4f00b824c37e1017a93fccbd5775e6ee54f824b6786f5730d257a87a3d9ce921'
|
visual_c_runtime_sha256 = '4f00b824c37e1017a93fccbd5775e6ee54f824b6786f5730d257a87a3d9ce921'
|
||||||
visual_c_name = 'vc15_(14.10.25017.0)_2017_x64.7z'
|
visual_c_name = 'vc15_(14.10.25017.0)_2017_x64.7z'
|
||||||
|
visual_c_path_to_dlls = 'runtime_minimum/System64'
|
||||||
|
|
||||||
download_if_not_exists('get-pip.py', get_pip_url)
|
download_if_not_exists('get-pip.py', get_pip_url)
|
||||||
|
|
||||||
@@ -198,7 +200,7 @@ with open('./python/python3' + major_release + '._pth', 'a', encoding='utf-8') a
|
|||||||
f.write('..\n')'''
|
f.write('..\n')'''
|
||||||
|
|
||||||
log('Inserting Microsoft C Runtime')
|
log('Inserting Microsoft C Runtime')
|
||||||
check_subp(subprocess.run([r'7z', '-y', 'e', '-opython', 'vc15_(14.10.25017.0)_2017_x86.7z', 'runtime_minimum/System']))
|
check_subp(subprocess.run([r'7z', '-y', 'e', '-opython', visual_c_name, visual_c_path_to_dlls]))
|
||||||
|
|
||||||
log('Installing dependencies')
|
log('Installing dependencies')
|
||||||
wine_run(['./python/python.exe', '-I', '-m', 'pip', 'install', '--no-compile', '-r', './requirements.txt'])
|
wine_run(['./python/python.exe', '-I', '-m', 'pip', 'install', '--no-compile', '-r', './requirements.txt'])
|
||||||
|
|||||||
@@ -1,28 +1,5 @@
|
|||||||
attrs==22.1.0
|
# Include all production requirements
|
||||||
Brotli==1.0.9
|
-r requirements.txt
|
||||||
cachetools==4.2.4
|
|
||||||
click==8.0.4
|
# Development requirements
|
||||||
dataclasses==0.6
|
pytest>=6.2.1
|
||||||
defusedxml==0.7.1
|
|
||||||
Flask==2.0.1
|
|
||||||
gevent==22.10.2
|
|
||||||
greenlet==2.0.1
|
|
||||||
importlib-metadata==4.6.4
|
|
||||||
iniconfig==1.1.1
|
|
||||||
itsdangerous==2.0.1
|
|
||||||
Jinja2==3.0.3
|
|
||||||
MarkupSafe==2.0.1
|
|
||||||
packaging==20.9
|
|
||||||
pluggy>=0.13.1
|
|
||||||
py==1.10.0
|
|
||||||
pyparsing==2.4.7
|
|
||||||
PySocks==1.7.1
|
|
||||||
pytest==6.2.5
|
|
||||||
stem==1.8.0
|
|
||||||
toml==0.10.2
|
|
||||||
typing-extensions==3.10.0.2
|
|
||||||
urllib3==1.26.11
|
|
||||||
Werkzeug==2.1.1
|
|
||||||
zipp==3.5.1
|
|
||||||
zope.event==4.5.0
|
|
||||||
zope.interface==5.4.0
|
|
||||||
|
|||||||
@@ -1,20 +1,8 @@
|
|||||||
Brotli==1.0.9
|
Flask>=1.0.3
|
||||||
cachetools==4.2.4
|
gevent>=1.2.2
|
||||||
click==8.0.4
|
Brotli>=1.0.7
|
||||||
dataclasses==0.6
|
PySocks>=1.6.8
|
||||||
defusedxml==0.7.1
|
urllib3>=1.24.1
|
||||||
Flask==2.0.1
|
defusedxml>=0.5.0
|
||||||
gevent==22.10.2
|
cachetools>=4.0.0
|
||||||
greenlet==2.0.1
|
stem>=1.8.0
|
||||||
importlib-metadata==4.6.4
|
|
||||||
itsdangerous==2.0.1
|
|
||||||
Jinja2==3.0.3
|
|
||||||
MarkupSafe==2.0.1
|
|
||||||
PySocks==1.7.1
|
|
||||||
stem==1.8.0
|
|
||||||
typing-extensions==3.10.0.2
|
|
||||||
urllib3==1.26.11
|
|
||||||
Werkzeug==2.1.1
|
|
||||||
zipp==3.5.1
|
|
||||||
zope.event==4.5.0
|
|
||||||
zope.interface==5.4.0
|
|
||||||
|
|||||||
@@ -84,7 +84,7 @@ def proxy_site(env, start_response, video=False):
|
|||||||
else:
|
else:
|
||||||
response, cleanup_func = util.fetch_url_response(url, send_headers)
|
response, cleanup_func = util.fetch_url_response(url, send_headers)
|
||||||
|
|
||||||
response_headers = response.getheaders()
|
response_headers = response.headers
|
||||||
if isinstance(response_headers, urllib3._collections.HTTPHeaderDict):
|
if isinstance(response_headers, urllib3._collections.HTTPHeaderDict):
|
||||||
response_headers = response_headers.items()
|
response_headers = response_headers.items()
|
||||||
if video:
|
if video:
|
||||||
|
|||||||
53
settings.py
53
settings.py
@@ -151,6 +151,13 @@ For security reasons, enabling this is not recommended.''',
|
|||||||
'category': 'interface',
|
'category': 'interface',
|
||||||
}),
|
}),
|
||||||
|
|
||||||
|
('autoplay_videos', {
|
||||||
|
'type': bool,
|
||||||
|
'default': False,
|
||||||
|
'comment': '',
|
||||||
|
'category': 'playback',
|
||||||
|
}),
|
||||||
|
|
||||||
('default_resolution', {
|
('default_resolution', {
|
||||||
'type': int,
|
'type': int,
|
||||||
'default': 720,
|
'default': 720,
|
||||||
@@ -200,12 +207,17 @@ For security reasons, enabling this is not recommended.''',
|
|||||||
}),
|
}),
|
||||||
|
|
||||||
('prefer_uni_sources', {
|
('prefer_uni_sources', {
|
||||||
'label': 'Prefer integrated sources',
|
'label': 'Use integrated sources',
|
||||||
'type': bool,
|
'type': int,
|
||||||
'default': False,
|
'default': 1,
|
||||||
'comment': '',
|
'comment': '',
|
||||||
|
'options': [
|
||||||
|
(0, 'Prefer not'),
|
||||||
|
(1, 'Prefer'),
|
||||||
|
(2, 'Always'),
|
||||||
|
],
|
||||||
'category': 'playback',
|
'category': 'playback',
|
||||||
'description': 'If enabled and the default resolution is set to 360p or 720p, uses the unified (integrated) video files which contain audio and video, with buffering managed by the browser. If disabled, always uses the separate audio and video files through custom buffer management in av-merge via MediaSource.',
|
'description': 'If set to Prefer or Always and the default resolution is set to 360p or 720p, uses the unified (integrated) video files which contain audio and video, with buffering managed by the browser. If set to prefer not, uses the separate audio and video files through custom buffer management in av-merge via MediaSource unless they are unavailable.',
|
||||||
}),
|
}),
|
||||||
|
|
||||||
('use_video_player', {
|
('use_video_player', {
|
||||||
@@ -298,11 +310,16 @@ Archive: https://archive.ph/OZQbN''',
|
|||||||
'comment': '',
|
'comment': '',
|
||||||
}),
|
}),
|
||||||
|
|
||||||
('gather_googlevideo_domains', {
|
('include_shorts_in_subscriptions', {
|
||||||
'type': bool,
|
'type': bool,
|
||||||
'default': False,
|
'default': 0,
|
||||||
'comment': '''Developer use to debug 403s''',
|
'comment': '',
|
||||||
'hidden': True,
|
}),
|
||||||
|
|
||||||
|
('include_shorts_in_channel', {
|
||||||
|
'type': bool,
|
||||||
|
'default': 1,
|
||||||
|
'comment': '',
|
||||||
}),
|
}),
|
||||||
|
|
||||||
('debugging_save_responses', {
|
('debugging_save_responses', {
|
||||||
@@ -314,7 +331,7 @@ Archive: https://archive.ph/OZQbN''',
|
|||||||
|
|
||||||
('settings_version', {
|
('settings_version', {
|
||||||
'type': int,
|
'type': int,
|
||||||
'default': 4,
|
'default': 6,
|
||||||
'comment': '''Do not change, remove, or comment out this value, or else your settings may be lost or corrupted''',
|
'comment': '''Do not change, remove, or comment out this value, or else your settings may be lost or corrupted''',
|
||||||
'hidden': True,
|
'hidden': True,
|
||||||
}),
|
}),
|
||||||
@@ -387,10 +404,28 @@ def upgrade_to_4(settings_dict):
|
|||||||
return new_settings
|
return new_settings
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade_to_5(settings_dict):
|
||||||
|
new_settings = settings_dict.copy()
|
||||||
|
if 'prefer_uni_sources' in settings_dict:
|
||||||
|
new_settings['prefer_uni_sources'] = int(settings_dict['prefer_uni_sources'])
|
||||||
|
new_settings['settings_version'] = 5
|
||||||
|
return new_settings
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade_to_6(settings_dict):
|
||||||
|
new_settings = settings_dict.copy()
|
||||||
|
if 'gather_googlevideo_domains' in new_settings:
|
||||||
|
del new_settings['gather_googlevideo_domains']
|
||||||
|
new_settings['settings_version'] = 6
|
||||||
|
return new_settings
|
||||||
|
|
||||||
|
|
||||||
upgrade_functions = {
|
upgrade_functions = {
|
||||||
1: upgrade_to_2,
|
1: upgrade_to_2,
|
||||||
2: upgrade_to_3,
|
2: upgrade_to_3,
|
||||||
3: upgrade_to_4,
|
3: upgrade_to_4,
|
||||||
|
4: upgrade_to_5,
|
||||||
|
5: upgrade_to_6,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -54,7 +54,10 @@ def commatize(num):
|
|||||||
if num is None:
|
if num is None:
|
||||||
return ''
|
return ''
|
||||||
if isinstance(num, str):
|
if isinstance(num, str):
|
||||||
num = int(num)
|
try:
|
||||||
|
num = int(num)
|
||||||
|
except ValueError:
|
||||||
|
return num
|
||||||
return '{:,}'.format(num)
|
return '{:,}'.format(num)
|
||||||
|
|
||||||
|
|
||||||
@@ -115,7 +118,18 @@ def error_page(e):
|
|||||||
error_message=exc_info()[1].error_message,
|
error_message=exc_info()[1].error_message,
|
||||||
slim=slim
|
slim=slim
|
||||||
), 502)
|
), 502)
|
||||||
return flask.render_template('error.html', traceback=traceback.format_exc(), slim=slim), 500
|
elif (exc_info()[0] == util.FetchError
|
||||||
|
and exc_info()[1].code == '404'
|
||||||
|
):
|
||||||
|
error_message = ('Error: The page you are looking for isn\'t here.')
|
||||||
|
return flask.render_template('error.html',
|
||||||
|
error_code=exc_info()[1].code,
|
||||||
|
error_message=error_message,
|
||||||
|
slim=slim), 404
|
||||||
|
return flask.render_template('error.html', traceback=traceback.format_exc(),
|
||||||
|
error_code=exc_info()[1].code,
|
||||||
|
slim=slim), 500
|
||||||
|
# return flask.render_template('error.html', traceback=traceback.format_exc(), slim=slim), 500
|
||||||
|
|
||||||
|
|
||||||
font_choices = {
|
font_choices = {
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
import base64
|
import base64
|
||||||
from youtube import util, yt_data_extract, local_playlist, subscriptions
|
from youtube import (util, yt_data_extract, local_playlist, subscriptions,
|
||||||
|
playlist)
|
||||||
from youtube import yt_app
|
from youtube import yt_app
|
||||||
|
import settings
|
||||||
|
|
||||||
import urllib
|
import urllib
|
||||||
import json
|
import json
|
||||||
@@ -36,7 +38,7 @@ generic_cookie = (('Cookie', 'VISITOR_INFO1_LIVE=ST1Ti53r4fU'),)
|
|||||||
# changed offset field to uint id 1
|
# changed offset field to uint id 1
|
||||||
def channel_ctoken_v5(channel_id, page, sort, tab, view=1):
|
def channel_ctoken_v5(channel_id, page, sort, tab, view=1):
|
||||||
new_sort = (2 if int(sort) == 1 else 1)
|
new_sort = (2 if int(sort) == 1 else 1)
|
||||||
offset = str(30*(int(page) - 1))
|
offset = 30*(int(page) - 1)
|
||||||
if tab == 'videos':
|
if tab == 'videos':
|
||||||
tab = 15
|
tab = 15
|
||||||
elif tab == 'shorts':
|
elif tab == 'shorts':
|
||||||
@@ -82,10 +84,44 @@ def channel_ctoken_v5(channel_id, page, sort, tab, view=1):
|
|||||||
|
|
||||||
return base64.urlsafe_b64encode(pointless_nest).decode('ascii')
|
return base64.urlsafe_b64encode(pointless_nest).decode('ascii')
|
||||||
|
|
||||||
|
|
||||||
|
def channel_about_ctoken(channel_id):
|
||||||
|
return proto.make_protobuf(
|
||||||
|
('base64p',
|
||||||
|
[
|
||||||
|
[2, 80226972,
|
||||||
|
[
|
||||||
|
[2, 2, channel_id],
|
||||||
|
[2, 3,
|
||||||
|
('base64p',
|
||||||
|
[
|
||||||
|
[2, 110,
|
||||||
|
[
|
||||||
|
[2, 3,
|
||||||
|
[
|
||||||
|
[2, 19,
|
||||||
|
[
|
||||||
|
[2, 1, b'66b0e9e9-0000-2820-9589-582429a83980'],
|
||||||
|
]
|
||||||
|
],
|
||||||
|
]
|
||||||
|
],
|
||||||
|
]
|
||||||
|
],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
],
|
||||||
|
]
|
||||||
|
],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# https://github.com/user234683/youtube-local/issues/151
|
# https://github.com/user234683/youtube-local/issues/151
|
||||||
def channel_ctoken_v4(channel_id, page, sort, tab, view=1):
|
def channel_ctoken_v4(channel_id, page, sort, tab, view=1):
|
||||||
new_sort = (2 if int(sort) == 1 else 1)
|
new_sort = (2 if int(sort) == 1 else 1)
|
||||||
offset = 30*(int(page) - 1)
|
offset = str(30*(int(page) - 1))
|
||||||
pointless_nest = proto.string(80226972,
|
pointless_nest = proto.string(80226972,
|
||||||
proto.string(2, channel_id)
|
proto.string(2, channel_id)
|
||||||
+ proto.string(3,
|
+ proto.string(3,
|
||||||
@@ -228,7 +264,7 @@ def get_channel_tab(channel_id, page="1", sort=3, tab='videos', view=1,
|
|||||||
'hl': 'en',
|
'hl': 'en',
|
||||||
'gl': 'US',
|
'gl': 'US',
|
||||||
'clientName': 'WEB',
|
'clientName': 'WEB',
|
||||||
'clientVersion': '2.20180830',
|
'clientVersion': '2.20240327.00.00',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
'continuation': ctoken,
|
'continuation': ctoken,
|
||||||
@@ -243,7 +279,8 @@ def get_channel_tab(channel_id, page="1", sort=3, tab='videos', view=1,
|
|||||||
|
|
||||||
|
|
||||||
# cache entries expire after 30 minutes
|
# cache entries expire after 30 minutes
|
||||||
@cachetools.func.ttl_cache(maxsize=128, ttl=30*60)
|
number_of_videos_cache = cachetools.TTLCache(128, 30*60)
|
||||||
|
@cachetools.cached(number_of_videos_cache)
|
||||||
def get_number_of_videos_channel(channel_id):
|
def get_number_of_videos_channel(channel_id):
|
||||||
if channel_id is None:
|
if channel_id is None:
|
||||||
return 1000
|
return 1000
|
||||||
@@ -255,7 +292,7 @@ def get_number_of_videos_channel(channel_id):
|
|||||||
try:
|
try:
|
||||||
response = util.fetch_url(url, headers_mobile,
|
response = util.fetch_url(url, headers_mobile,
|
||||||
debug_name='number_of_videos', report_text='Got number of videos')
|
debug_name='number_of_videos', report_text='Got number of videos')
|
||||||
except urllib.error.HTTPError as e:
|
except (urllib.error.HTTPError, util.FetchError) as e:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
print("Couldn't retrieve number of videos")
|
print("Couldn't retrieve number of videos")
|
||||||
return 1000
|
return 1000
|
||||||
@@ -268,11 +305,14 @@ def get_number_of_videos_channel(channel_id):
|
|||||||
return int(match.group(1).replace(',',''))
|
return int(match.group(1).replace(',',''))
|
||||||
else:
|
else:
|
||||||
return 0
|
return 0
|
||||||
|
def set_cached_number_of_videos(channel_id, num_videos):
|
||||||
|
@cachetools.cached(number_of_videos_cache)
|
||||||
|
def dummy_func_using_same_cache(channel_id):
|
||||||
|
return num_videos
|
||||||
|
dummy_func_using_same_cache(channel_id)
|
||||||
|
|
||||||
|
|
||||||
channel_id_re = re.compile(r'videos\.xml\?channel_id=([a-zA-Z0-9_-]{24})"')
|
channel_id_re = re.compile(r'videos\.xml\?channel_id=([a-zA-Z0-9_-]{24})"')
|
||||||
|
|
||||||
|
|
||||||
@cachetools.func.lru_cache(maxsize=128)
|
@cachetools.func.lru_cache(maxsize=128)
|
||||||
def get_channel_id(base_url):
|
def get_channel_id(base_url):
|
||||||
# method that gives the smallest possible response at ~4 kb
|
# method that gives the smallest possible response at ~4 kb
|
||||||
@@ -331,7 +371,7 @@ def get_channel_search_json(channel_id, query, page):
|
|||||||
'hl': 'en',
|
'hl': 'en',
|
||||||
'gl': 'US',
|
'gl': 'US',
|
||||||
'clientName': 'WEB',
|
'clientName': 'WEB',
|
||||||
'clientVersion': '2.20180830',
|
'clientVersion': '2.20240327.00.00',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
'continuation': ctoken,
|
'continuation': ctoken,
|
||||||
@@ -349,15 +389,16 @@ def post_process_channel_info(info):
|
|||||||
info['avatar'] = util.prefix_url(info['avatar'])
|
info['avatar'] = util.prefix_url(info['avatar'])
|
||||||
info['channel_url'] = util.prefix_url(info['channel_url'])
|
info['channel_url'] = util.prefix_url(info['channel_url'])
|
||||||
for item in info['items']:
|
for item in info['items']:
|
||||||
|
item['thumbnail'] = "https://i.ytimg.com/vi/{}/hqdefault.jpg".format(item['id'])
|
||||||
util.prefix_urls(item)
|
util.prefix_urls(item)
|
||||||
util.add_extra_html_info(item)
|
util.add_extra_html_info(item)
|
||||||
if info['current_tab'] == 'about':
|
if info['current_tab'] == 'about':
|
||||||
for i, (text, url) in enumerate(info['links']):
|
for i, (text, url) in enumerate(info['links']):
|
||||||
if util.YOUTUBE_URL_RE.fullmatch(url):
|
if isinstance(url, str) and util.YOUTUBE_URL_RE.fullmatch(url):
|
||||||
info['links'][i] = (text, util.prefix_url(url))
|
info['links'][i] = (text, util.prefix_url(url))
|
||||||
|
|
||||||
|
|
||||||
def get_channel_first_page(base_url=None, channel_id=None, tab='videos'):
|
def get_channel_first_page(base_url=None, tab='videos', channel_id=None):
|
||||||
if channel_id:
|
if channel_id:
|
||||||
base_url = 'https://www.youtube.com/channel/' + channel_id
|
base_url = 'https://www.youtube.com/channel/' + channel_id
|
||||||
return util.fetch_url(base_url + '/' + tab + '?pbj=1&view=0',
|
return util.fetch_url(base_url + '/' + tab + '?pbj=1&view=0',
|
||||||
@@ -370,43 +411,106 @@ playlist_sort_codes = {'2': "da", '3': "dd", '4': "lad"}
|
|||||||
# youtube.com/user/[username]/[tab]
|
# youtube.com/user/[username]/[tab]
|
||||||
# youtube.com/c/[custom]/[tab]
|
# youtube.com/c/[custom]/[tab]
|
||||||
# youtube.com/[custom]/[tab]
|
# youtube.com/[custom]/[tab]
|
||||||
|
|
||||||
|
|
||||||
def get_channel_page_general_url(base_url, tab, request, channel_id=None):
|
def get_channel_page_general_url(base_url, tab, request, channel_id=None):
|
||||||
|
|
||||||
page_number = int(request.args.get('page', 1))
|
page_number = int(request.args.get('page', 1))
|
||||||
sort = request.args.get('sort', '3')
|
# sort 1: views
|
||||||
|
# sort 2: oldest
|
||||||
|
# sort 3: newest
|
||||||
|
# sort 4: newest - no shorts (Just a kludge on our end, not internal to yt)
|
||||||
|
default_sort = '3' if settings.include_shorts_in_channel else '4'
|
||||||
|
sort = request.args.get('sort', default_sort)
|
||||||
view = request.args.get('view', '1')
|
view = request.args.get('view', '1')
|
||||||
query = request.args.get('query', '')
|
query = request.args.get('query', '')
|
||||||
ctoken = request.args.get('ctoken', '')
|
ctoken = request.args.get('ctoken', '')
|
||||||
default_params = (page_number == 1 and sort == '3' and view == '1')
|
include_shorts = (sort != '4')
|
||||||
|
default_params = (page_number == 1 and sort in ('3', '4') and view == '1')
|
||||||
continuation = bool(ctoken) # whether or not we're using a continuation
|
continuation = bool(ctoken) # whether or not we're using a continuation
|
||||||
|
page_size = 30
|
||||||
|
try_channel_api = True
|
||||||
|
polymer_json = None
|
||||||
|
|
||||||
if (tab in ('videos', 'shorts', 'streams') and channel_id and
|
# Use the special UU playlist which contains all the channel's uploads
|
||||||
not default_params):
|
if tab == 'videos' and sort in ('3', '4'):
|
||||||
tasks = (
|
if not channel_id:
|
||||||
gevent.spawn(get_number_of_videos_channel, channel_id),
|
channel_id = get_channel_id(base_url)
|
||||||
gevent.spawn(get_channel_tab, channel_id, page_number, sort,
|
if page_number == 1 and include_shorts:
|
||||||
tab, view, ctoken)
|
tasks = (
|
||||||
)
|
gevent.spawn(playlist.playlist_first_page,
|
||||||
gevent.joinall(tasks)
|
'UU' + channel_id[2:],
|
||||||
util.check_gevent_exceptions(*tasks)
|
report_text='Retrieved channel videos'),
|
||||||
number_of_videos, polymer_json = tasks[0].value, tasks[1].value
|
gevent.spawn(get_metadata, channel_id),
|
||||||
continuation = True
|
)
|
||||||
elif tab in ('videos', 'shorts', 'streams'):
|
gevent.joinall(tasks)
|
||||||
|
util.check_gevent_exceptions(*tasks)
|
||||||
|
|
||||||
|
# Ignore the metadata for now, it is cached and will be
|
||||||
|
# recalled later
|
||||||
|
pl_json = tasks[0].value
|
||||||
|
pl_info = yt_data_extract.extract_playlist_info(pl_json)
|
||||||
|
number_of_videos = pl_info['metadata']['video_count']
|
||||||
|
if number_of_videos is None:
|
||||||
|
number_of_videos = 1000
|
||||||
|
else:
|
||||||
|
set_cached_number_of_videos(channel_id, number_of_videos)
|
||||||
|
else:
|
||||||
|
tasks = (
|
||||||
|
gevent.spawn(playlist.get_videos, 'UU' + channel_id[2:],
|
||||||
|
page_number, include_shorts=include_shorts),
|
||||||
|
gevent.spawn(get_metadata, channel_id),
|
||||||
|
gevent.spawn(get_number_of_videos_channel, channel_id),
|
||||||
|
)
|
||||||
|
gevent.joinall(tasks)
|
||||||
|
util.check_gevent_exceptions(*tasks)
|
||||||
|
|
||||||
|
pl_json = tasks[0].value
|
||||||
|
pl_info = yt_data_extract.extract_playlist_info(pl_json)
|
||||||
|
number_of_videos = tasks[2].value
|
||||||
|
|
||||||
|
info = pl_info
|
||||||
|
info['channel_id'] = channel_id
|
||||||
|
info['current_tab'] = 'videos'
|
||||||
|
if info['items']: # Success
|
||||||
|
page_size = 100
|
||||||
|
try_channel_api = False
|
||||||
|
else: # Try the first-page method next
|
||||||
|
try_channel_api = True
|
||||||
|
|
||||||
|
# Use the regular channel API
|
||||||
|
if tab in ('shorts', 'streams') or (tab=='videos' and try_channel_api):
|
||||||
if channel_id:
|
if channel_id:
|
||||||
num_videos_call = (get_number_of_videos_channel, channel_id)
|
num_videos_call = (get_number_of_videos_channel, channel_id)
|
||||||
else:
|
else:
|
||||||
num_videos_call = (get_number_of_videos_general, base_url)
|
num_videos_call = (get_number_of_videos_general, base_url)
|
||||||
|
|
||||||
|
# Use ctoken method, which YouTube changes all the time
|
||||||
|
if channel_id and not default_params:
|
||||||
|
if sort == 4:
|
||||||
|
_sort = 3
|
||||||
|
else:
|
||||||
|
_sort = sort
|
||||||
|
page_call = (get_channel_tab, channel_id, page_number, _sort,
|
||||||
|
tab, view, ctoken)
|
||||||
|
# Use the first-page method, which won't break
|
||||||
|
else:
|
||||||
|
page_call = (get_channel_first_page, base_url, tab)
|
||||||
|
|
||||||
tasks = (
|
tasks = (
|
||||||
gevent.spawn(*num_videos_call),
|
gevent.spawn(*num_videos_call),
|
||||||
gevent.spawn(get_channel_first_page, base_url=base_url, tab=tab),
|
gevent.spawn(*page_call),
|
||||||
)
|
)
|
||||||
gevent.joinall(tasks)
|
gevent.joinall(tasks)
|
||||||
util.check_gevent_exceptions(*tasks)
|
util.check_gevent_exceptions(*tasks)
|
||||||
number_of_videos, polymer_json = tasks[0].value, tasks[1].value
|
number_of_videos, polymer_json = tasks[0].value, tasks[1].value
|
||||||
|
|
||||||
elif tab == 'about':
|
elif tab == 'about':
|
||||||
polymer_json = util.fetch_url(base_url + '/about?pbj=1', headers_desktop, debug_name='gen_channel_about')
|
# polymer_json = util.fetch_url(base_url + '/about?pbj=1', headers_desktop, debug_name='gen_channel_about')
|
||||||
|
channel_id = get_channel_id(base_url)
|
||||||
|
ctoken = channel_about_ctoken(channel_id)
|
||||||
|
polymer_json = util.call_youtube_api('web', 'browse', {
|
||||||
|
'continuation': ctoken,
|
||||||
|
})
|
||||||
|
continuation=True
|
||||||
elif tab == 'playlists' and page_number == 1:
|
elif tab == 'playlists' and page_number == 1:
|
||||||
polymer_json = util.fetch_url(base_url+ '/playlists?pbj=1&view=1&sort=' + playlist_sort_codes[sort], headers_desktop, debug_name='gen_channel_playlists')
|
polymer_json = util.fetch_url(base_url+ '/playlists?pbj=1&view=1&sort=' + playlist_sort_codes[sort], headers_desktop, debug_name='gen_channel_playlists')
|
||||||
elif tab == 'playlists':
|
elif tab == 'playlists':
|
||||||
@@ -418,12 +522,19 @@ def get_channel_page_general_url(base_url, tab, request, channel_id=None):
|
|||||||
elif tab == 'search':
|
elif tab == 'search':
|
||||||
url = base_url + '/search?pbj=1&query=' + urllib.parse.quote(query, safe='')
|
url = base_url + '/search?pbj=1&query=' + urllib.parse.quote(query, safe='')
|
||||||
polymer_json = util.fetch_url(url, headers_desktop, debug_name='gen_channel_search')
|
polymer_json = util.fetch_url(url, headers_desktop, debug_name='gen_channel_search')
|
||||||
|
elif tab == 'videos':
|
||||||
|
pass
|
||||||
else:
|
else:
|
||||||
flask.abort(404, 'Unknown channel tab: ' + tab)
|
flask.abort(404, 'Unknown channel tab: ' + tab)
|
||||||
|
|
||||||
|
if polymer_json is not None:
|
||||||
|
info = yt_data_extract.extract_channel_info(
|
||||||
|
json.loads(polymer_json), tab, continuation=continuation
|
||||||
|
)
|
||||||
|
|
||||||
|
if info['error'] is not None:
|
||||||
|
return flask.render_template('error.html', error_message=info['error'])
|
||||||
|
|
||||||
info = yt_data_extract.extract_channel_info(json.loads(polymer_json), tab,
|
|
||||||
continuation=continuation)
|
|
||||||
if channel_id:
|
if channel_id:
|
||||||
info['channel_url'] = 'https://www.youtube.com/channel/' + channel_id
|
info['channel_url'] = 'https://www.youtube.com/channel/' + channel_id
|
||||||
info['channel_id'] = channel_id
|
info['channel_id'] = channel_id
|
||||||
@@ -431,11 +542,11 @@ def get_channel_page_general_url(base_url, tab, request, channel_id=None):
|
|||||||
channel_id = info['channel_id']
|
channel_id = info['channel_id']
|
||||||
|
|
||||||
# Will have microformat present, cache metadata while we have it
|
# Will have microformat present, cache metadata while we have it
|
||||||
if channel_id and default_params:
|
if channel_id and default_params and tab not in ('videos', 'about'):
|
||||||
metadata = extract_metadata_for_caching(info)
|
metadata = extract_metadata_for_caching(info)
|
||||||
set_cached_metadata(channel_id, metadata)
|
set_cached_metadata(channel_id, metadata)
|
||||||
# Otherwise, populate with our (hopefully cached) metadata
|
# Otherwise, populate with our (hopefully cached) metadata
|
||||||
elif channel_id and info['channel_name'] is None:
|
elif channel_id and info.get('channel_name') is None:
|
||||||
metadata = get_metadata(channel_id)
|
metadata = get_metadata(channel_id)
|
||||||
for key, value in metadata.items():
|
for key, value in metadata.items():
|
||||||
yt_data_extract.conservative_update(info, key, value)
|
yt_data_extract.conservative_update(info, key, value)
|
||||||
@@ -448,12 +559,9 @@ def get_channel_page_general_url(base_url, tab, request, channel_id=None):
|
|||||||
for item in info['items']:
|
for item in info['items']:
|
||||||
item.update(additional_info)
|
item.update(additional_info)
|
||||||
|
|
||||||
if info['error'] is not None:
|
|
||||||
return flask.render_template('error.html', error_message = info['error'])
|
|
||||||
|
|
||||||
if tab in ('videos', 'shorts', 'streams'):
|
if tab in ('videos', 'shorts', 'streams'):
|
||||||
info['number_of_videos'] = number_of_videos
|
info['number_of_videos'] = number_of_videos
|
||||||
info['number_of_pages'] = math.ceil(number_of_videos/30)
|
info['number_of_pages'] = math.ceil(number_of_videos/page_size)
|
||||||
info['header_playlist_names'] = local_playlist.get_playlist_names()
|
info['header_playlist_names'] = local_playlist.get_playlist_names()
|
||||||
if tab in ('videos', 'shorts', 'streams', 'playlists'):
|
if tab in ('videos', 'shorts', 'streams', 'playlists'):
|
||||||
info['current_sort'] = sort
|
info['current_sort'] = sort
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ def single_comment_ctoken(video_id, comment_id):
|
|||||||
|
|
||||||
def post_process_comments_info(comments_info):
|
def post_process_comments_info(comments_info):
|
||||||
for comment in comments_info['comments']:
|
for comment in comments_info['comments']:
|
||||||
comment['author'] = strip_non_ascii(comment['author'])
|
comment['author'] = strip_non_ascii(comment['author']) if comment.get('author') else ""
|
||||||
comment['author_url'] = concat_or_none(
|
comment['author_url'] = concat_or_none(
|
||||||
'/', comment['author_url'])
|
'/', comment['author_url'])
|
||||||
comment['author_avatar'] = concat_or_none(
|
comment['author_avatar'] = concat_or_none(
|
||||||
@@ -97,7 +97,7 @@ def post_process_comments_info(comments_info):
|
|||||||
ctoken = comment['reply_ctoken']
|
ctoken = comment['reply_ctoken']
|
||||||
ctoken, err = proto.set_protobuf_value(
|
ctoken, err = proto.set_protobuf_value(
|
||||||
ctoken,
|
ctoken,
|
||||||
'base64p', 6, 3, 9, value=250)
|
'base64p', 6, 3, 9, value=200)
|
||||||
if err:
|
if err:
|
||||||
print('Error setting ctoken value:')
|
print('Error setting ctoken value:')
|
||||||
print(err)
|
print(err)
|
||||||
@@ -127,7 +127,7 @@ def post_process_comments_info(comments_info):
|
|||||||
# change max_replies field to 250 in ctoken
|
# change max_replies field to 250 in ctoken
|
||||||
new_ctoken, err = proto.set_protobuf_value(
|
new_ctoken, err = proto.set_protobuf_value(
|
||||||
ctoken,
|
ctoken,
|
||||||
'base64p', 6, 3, 9, value=250)
|
'base64p', 6, 3, 9, value=200)
|
||||||
if err:
|
if err:
|
||||||
print('Error setting ctoken value:')
|
print('Error setting ctoken value:')
|
||||||
print(err)
|
print(err)
|
||||||
@@ -150,7 +150,7 @@ def post_process_comments_info(comments_info):
|
|||||||
util.URL_ORIGIN, '/watch?v=', comments_info['video_id'])
|
util.URL_ORIGIN, '/watch?v=', comments_info['video_id'])
|
||||||
comments_info['video_thumbnail'] = concat_or_none(
|
comments_info['video_thumbnail'] = concat_or_none(
|
||||||
settings.img_prefix, 'https://i.ytimg.com/vi/',
|
settings.img_prefix, 'https://i.ytimg.com/vi/',
|
||||||
comments_info['video_id'], '/mqdefault.jpg'
|
comments_info['video_id'], '/hqdefault.jpg'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -189,10 +189,10 @@ def video_comments(video_id, sort=0, offset=0, lc='', secret_key=''):
|
|||||||
comments_info['error'] += '\n\n' + e.error_message
|
comments_info['error'] += '\n\n' + e.error_message
|
||||||
comments_info['error'] += '\n\nExit node IP address: %s' % e.ip
|
comments_info['error'] += '\n\nExit node IP address: %s' % e.ip
|
||||||
else:
|
else:
|
||||||
comments_info['error'] = 'YouTube blocked the request. IP address: %s' % e.ip
|
comments_info['error'] = 'YouTube blocked the request. Error: %s' % str(e)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
comments_info['error'] = 'YouTube blocked the request. IP address: %s' % e.ip
|
comments_info['error'] = 'YouTube blocked the request. Error: %s' % str(e)
|
||||||
|
|
||||||
if comments_info.get('error'):
|
if comments_info.get('error'):
|
||||||
print('Error retrieving comments for ' + str(video_id) + ':\n' +
|
print('Error retrieving comments for ' + str(video_id) + ':\n' +
|
||||||
|
|||||||
@@ -11,17 +11,10 @@ import subprocess
|
|||||||
def app_version():
|
def app_version():
|
||||||
def minimal_env_cmd(cmd):
|
def minimal_env_cmd(cmd):
|
||||||
# make minimal environment
|
# make minimal environment
|
||||||
env = {}
|
env = {k: os.environ[k] for k in ['SYSTEMROOT', 'PATH'] if k in os.environ}
|
||||||
for k in ['SYSTEMROOT', 'PATH']:
|
env.update({'LANGUAGE': 'C', 'LANG': 'C', 'LC_ALL': 'C'})
|
||||||
v = os.environ.get(k)
|
|
||||||
if v is not None:
|
|
||||||
env[k] = v
|
|
||||||
|
|
||||||
env['LANGUAGE'] = 'C'
|
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
|
||||||
env['LANG'] = 'C'
|
|
||||||
env['LC_ALL'] = 'C'
|
|
||||||
out = subprocess.Popen(
|
|
||||||
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
|
|
||||||
return out
|
return out
|
||||||
|
|
||||||
subst_list = {
|
subst_list = {
|
||||||
@@ -31,24 +24,21 @@ def app_version():
|
|||||||
}
|
}
|
||||||
|
|
||||||
if os.system("command -v git > /dev/null 2>&1") != 0:
|
if os.system("command -v git > /dev/null 2>&1") != 0:
|
||||||
subst_list
|
return subst_list
|
||||||
else:
|
|
||||||
if call(["git", "branch"], stderr=STDOUT,
|
|
||||||
stdout=open(os.devnull, 'w')) != 0:
|
|
||||||
subst_list
|
|
||||||
else:
|
|
||||||
# version
|
|
||||||
describe = minimal_env_cmd(["git", "describe", "--always"])
|
|
||||||
git_revision = describe.strip().decode('ascii')
|
|
||||||
# branch
|
|
||||||
branch = minimal_env_cmd(["git", "branch"])
|
|
||||||
git_branch = branch.strip().decode('ascii').replace('* ', '')
|
|
||||||
|
|
||||||
subst_list = {
|
if call(["git", "branch"], stderr=STDOUT, stdout=open(os.devnull, 'w')) != 0:
|
||||||
"version": __version__,
|
return subst_list
|
||||||
"branch": git_branch,
|
|
||||||
"commit": git_revision
|
describe = minimal_env_cmd(["git", "describe", "--tags", "--always"])
|
||||||
}
|
git_revision = describe.strip().decode('ascii')
|
||||||
|
|
||||||
|
branch = minimal_env_cmd(["git", "branch"])
|
||||||
|
git_branch = branch.strip().decode('ascii').replace('* ', '')
|
||||||
|
|
||||||
|
subst_list.update({
|
||||||
|
"branch": git_branch,
|
||||||
|
"commit": git_revision
|
||||||
|
})
|
||||||
|
|
||||||
return subst_list
|
return subst_list
|
||||||
|
|
||||||
|
|||||||
@@ -12,12 +12,13 @@ from flask import request
|
|||||||
import flask
|
import flask
|
||||||
|
|
||||||
|
|
||||||
def playlist_ctoken(playlist_id, offset):
|
def playlist_ctoken(playlist_id, offset, include_shorts=True):
|
||||||
|
|
||||||
offset = proto.uint(1, offset)
|
offset = proto.uint(1, offset)
|
||||||
# this is just obfuscation as far as I can tell. It doesn't even follow protobuf
|
|
||||||
offset = b'PT:' + proto.unpadded_b64encode(offset)
|
offset = b'PT:' + proto.unpadded_b64encode(offset)
|
||||||
offset = proto.string(15, offset)
|
offset = proto.string(15, offset)
|
||||||
|
if not include_shorts:
|
||||||
|
offset += proto.string(104, proto.uint(2, 1))
|
||||||
|
|
||||||
continuation_info = proto.string(3, proto.percent_b64encode(offset))
|
continuation_info = proto.string(3, proto.percent_b64encode(offset))
|
||||||
|
|
||||||
@@ -26,47 +27,46 @@ def playlist_ctoken(playlist_id, offset):
|
|||||||
|
|
||||||
return base64.urlsafe_b64encode(pointless_nest).decode('ascii')
|
return base64.urlsafe_b64encode(pointless_nest).decode('ascii')
|
||||||
|
|
||||||
# initial request types:
|
|
||||||
# polymer_json: https://m.youtube.com/playlist?list=PLv3TTBr1W_9tppikBxAE_G6qjWdBljBHJ&pbj=1&lact=0
|
|
||||||
# ajax json: https://m.youtube.com/playlist?list=PLv3TTBr1W_9tppikBxAE_G6qjWdBljBHJ&pbj=1&lact=0 with header X-YouTube-Client-Version: 1.20180418
|
|
||||||
|
|
||||||
|
def playlist_first_page(playlist_id, report_text="Retrieved playlist",
|
||||||
# continuation request types:
|
use_mobile=False):
|
||||||
# polymer_json: https://m.youtube.com/playlist?&ctoken=[...]&pbj=1
|
if use_mobile:
|
||||||
# ajax json: https://m.youtube.com/playlist?action_continuation=1&ajax=1&ctoken=[...]
|
url = 'https://m.youtube.com/playlist?list=' + playlist_id + '&pbj=1'
|
||||||
|
content = util.fetch_url(
|
||||||
|
url, util.mobile_xhr_headers,
|
||||||
headers_1 = (
|
report_text=report_text, debug_name='playlist_first_page'
|
||||||
('Accept', '*/*'),
|
)
|
||||||
('Accept-Language', 'en-US,en;q=0.5'),
|
content = json.loads(content.decode('utf-8'))
|
||||||
('X-YouTube-Client-Name', '2'),
|
else:
|
||||||
('X-YouTube-Client-Version', '2.20180614'),
|
url = 'https://www.youtube.com/playlist?list=' + playlist_id + '&pbj=1'
|
||||||
)
|
content = util.fetch_url(
|
||||||
|
url, util.desktop_xhr_headers,
|
||||||
|
report_text=report_text, debug_name='playlist_first_page'
|
||||||
def playlist_first_page(playlist_id, report_text="Retrieved playlist"):
|
)
|
||||||
url = 'https://m.youtube.com/playlist?list=' + playlist_id + '&pbj=1'
|
content = json.loads(content.decode('utf-8'))
|
||||||
content = util.fetch_url(url, util.mobile_ua + headers_1, report_text=report_text, debug_name='playlist_first_page')
|
|
||||||
content = json.loads(content.decode('utf-8'))
|
|
||||||
|
|
||||||
return content
|
return content
|
||||||
|
|
||||||
|
|
||||||
#https://m.youtube.com/playlist?itct=CBMQybcCIhMIptj9xJaJ2wIV2JKcCh3Idwu-&ctoken=4qmFsgI2EiRWTFBMT3kwajlBdmxWWlB0bzZJa2pLZnB1MFNjeC0tN1BHVEMaDmVnWlFWRHBEUWxFJTNE&pbj=1
|
def get_videos(playlist_id, page, include_shorts=True, use_mobile=False,
|
||||||
def get_videos(playlist_id, page):
|
report_text='Retrieved playlist'):
|
||||||
|
# mobile requests return 20 videos per page
|
||||||
url = "https://m.youtube.com/playlist?ctoken=" + playlist_ctoken(playlist_id, (int(page)-1)*20) + "&pbj=1"
|
if use_mobile:
|
||||||
headers = {
|
page_size = 20
|
||||||
'User-Agent': ' Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1',
|
headers = util.mobile_xhr_headers
|
||||||
'Accept': '*/*',
|
# desktop requests return 100 videos per page
|
||||||
'Accept-Language': 'en-US,en;q=0.5',
|
else:
|
||||||
'X-YouTube-Client-Name': '2',
|
page_size = 100
|
||||||
'X-YouTube-Client-Version': '2.20180508',
|
headers = util.desktop_xhr_headers
|
||||||
}
|
|
||||||
|
|
||||||
|
url = "https://m.youtube.com/playlist?ctoken="
|
||||||
|
url += playlist_ctoken(playlist_id, (int(page)-1)*page_size,
|
||||||
|
include_shorts=include_shorts)
|
||||||
|
url += "&pbj=1"
|
||||||
content = util.fetch_url(
|
content = util.fetch_url(
|
||||||
url, headers,
|
url, headers, report_text=report_text,
|
||||||
report_text="Retrieved playlist", debug_name='playlist_videos')
|
debug_name='playlist_videos'
|
||||||
|
)
|
||||||
|
|
||||||
info = json.loads(content.decode('utf-8'))
|
info = json.loads(content.decode('utf-8'))
|
||||||
return info
|
return info
|
||||||
@@ -85,7 +85,10 @@ def get_playlist_page():
|
|||||||
this_page_json = first_page_json
|
this_page_json = first_page_json
|
||||||
else:
|
else:
|
||||||
tasks = (
|
tasks = (
|
||||||
gevent.spawn(playlist_first_page, playlist_id, report_text="Retrieved playlist info" ),
|
gevent.spawn(
|
||||||
|
playlist_first_page, playlist_id,
|
||||||
|
report_text="Retrieved playlist info", use_mobile=True
|
||||||
|
),
|
||||||
gevent.spawn(get_videos, playlist_id, page)
|
gevent.spawn(get_videos, playlist_id, page)
|
||||||
)
|
)
|
||||||
gevent.joinall(tasks)
|
gevent.joinall(tasks)
|
||||||
@@ -104,7 +107,7 @@ def get_playlist_page():
|
|||||||
util.prefix_urls(item)
|
util.prefix_urls(item)
|
||||||
util.add_extra_html_info(item)
|
util.add_extra_html_info(item)
|
||||||
if 'id' in item:
|
if 'id' in item:
|
||||||
item['thumbnail'] = settings.img_prefix + 'https://i.ytimg.com/vi/' + item['id'] + '/default.jpg'
|
item['thumbnail'] = f"{settings.img_prefix}https://i.ytimg.com/vi/{item['id']}/hqdefault.jpg"
|
||||||
|
|
||||||
item['url'] += '&list=' + playlist_id
|
item['url'] += '&list=' + playlist_id
|
||||||
if item['index']:
|
if item['index']:
|
||||||
@@ -112,13 +115,13 @@ def get_playlist_page():
|
|||||||
|
|
||||||
video_count = yt_data_extract.deep_get(info, 'metadata', 'video_count')
|
video_count = yt_data_extract.deep_get(info, 'metadata', 'video_count')
|
||||||
if video_count is None:
|
if video_count is None:
|
||||||
video_count = 40
|
video_count = 1000
|
||||||
|
|
||||||
return flask.render_template(
|
return flask.render_template(
|
||||||
'playlist.html',
|
'playlist.html',
|
||||||
header_playlist_names=local_playlist.get_playlist_names(),
|
header_playlist_names=local_playlist.get_playlist_names(),
|
||||||
video_list=info.get('items', []),
|
video_list=info.get('items', []),
|
||||||
num_pages=math.ceil(video_count/20),
|
num_pages=math.ceil(video_count/100),
|
||||||
parameters_dictionary=request.args,
|
parameters_dictionary=request.args,
|
||||||
|
|
||||||
**info['metadata']
|
**info['metadata']
|
||||||
|
|||||||
@@ -141,6 +141,17 @@ base64_enc_funcs = {
|
|||||||
|
|
||||||
|
|
||||||
def _make_protobuf(data):
|
def _make_protobuf(data):
|
||||||
|
'''
|
||||||
|
Input: Recursive list of protobuf objects or base-64 encodings
|
||||||
|
Output: Protobuf bytestring
|
||||||
|
Each protobuf object takes the form [wire_type, field_number, field_data]
|
||||||
|
If a string protobuf has a list/tuple of length 2, this has the form
|
||||||
|
(base64 type, data)
|
||||||
|
The base64 types are
|
||||||
|
- base64 means a base64 encode with equals sign paddings
|
||||||
|
- base64s means a base64 encode without padding
|
||||||
|
- base64p means a url base64 encode with equals signs replaced with %3D
|
||||||
|
'''
|
||||||
# must be dict mapping field_number to [wire_type, value]
|
# must be dict mapping field_number to [wire_type, value]
|
||||||
if isinstance(data, dict):
|
if isinstance(data, dict):
|
||||||
new_data = []
|
new_data = []
|
||||||
|
|||||||
@@ -256,7 +256,8 @@ hr {
|
|||||||
padding-top: 6px;
|
padding-top: 6px;
|
||||||
text-align: center;
|
text-align: center;
|
||||||
white-space: nowrap;
|
white-space: nowrap;
|
||||||
border: none;
|
border: 1px solid;
|
||||||
|
border-color: var(--button-border);
|
||||||
border-radius: 0.2rem;
|
border-radius: 0.2rem;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,20 +1,22 @@
|
|||||||
:root {
|
:root {
|
||||||
--background: #212121;
|
--background: #121113;
|
||||||
--text: #FFFFFF;
|
--text: #FFFFFF;
|
||||||
--secondary-hover: #73828c;
|
--secondary-hover: #222222;
|
||||||
--secondary-focus: #303030;
|
--secondary-focus: #121113;
|
||||||
--secondary-inverse: #FFF;
|
--secondary-inverse: #FFFFFF;
|
||||||
--primary-background: #242424;
|
--primary-background: #242424;
|
||||||
--secondary-background: #424242;
|
--secondary-background: #222222;
|
||||||
--thumb-background: #757575;
|
--thumb-background: #222222;
|
||||||
--link: #00B0FF;
|
--link: #00B0FF;
|
||||||
--link-visited: #40C4FF;
|
--link-visited: #40C4FF;
|
||||||
--border-bg: #FFFFFF;
|
--border-bg: #222222;
|
||||||
--buttom: #dcdcdb;
|
--border-bg-settings: #000000;
|
||||||
--buttom-text: #415462;
|
--border-bg-license: #000000;
|
||||||
--button-border: #91918c;
|
--buttom: #121113;
|
||||||
--buttom-hover: #BBB;
|
--buttom-text: #FFFFFF;
|
||||||
--search-text: #FFF;
|
--button-border: #222222;
|
||||||
--time-background: #212121;
|
--buttom-hover: #222222;
|
||||||
--time-text: #FFF;
|
--search-text: #FFFFFF;
|
||||||
|
--time-background: #121113;
|
||||||
|
--time-text: #FFFFFF;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,19 +1,21 @@
|
|||||||
:root {
|
:root {
|
||||||
--background: #2d3743;
|
--background: #2D3743;
|
||||||
--text: #FFFFFF;
|
--text: #FFFFFF;
|
||||||
--secondary-hover: #73828c;
|
--secondary-hover: #73828C;
|
||||||
--secondary-focus: rgba(115, 130, 140, 0.125);
|
--secondary-focus: rgba(115, 130, 140, 0.125);
|
||||||
--secondary-inverse: #FFFFFF;
|
--secondary-inverse: #FFFFFF;
|
||||||
--primary-background: #2d3743;
|
--primary-background: #2D3743;
|
||||||
--secondary-background: #102027;
|
--secondary-background: #102027;
|
||||||
--thumb-background: #35404D;
|
--thumb-background: #35404D;
|
||||||
--link: #22aaff;
|
--link: #22AAFF;
|
||||||
--link-visited: #7755ff;
|
--link-visited: #7755FF;
|
||||||
--border-bg: #FFFFFF;
|
--border-bg: #FFFFFF;
|
||||||
--buttom: #DCDCDC;
|
--border-bg-settings: #FFFFFF;
|
||||||
--buttom-text: #415462;
|
--border-bg-license: #FFFFFF;
|
||||||
--button-border: #91918c;
|
--buttom: #2D3743;
|
||||||
--buttom-hover: #BBBBBB;
|
--buttom-text: #FFFFFF;
|
||||||
|
--button-border: #102027;
|
||||||
|
--buttom-hover: #102027;
|
||||||
--search-text: #FFFFFF;
|
--search-text: #FFFFFF;
|
||||||
--time-background: #212121;
|
--time-background: #212121;
|
||||||
--time-text: #FFFFFF;
|
--time-text: #FFFFFF;
|
||||||
|
|||||||
@@ -20,6 +20,29 @@
|
|||||||
// TODO: Call abort to cancel in-progress appends?
|
// TODO: Call abort to cancel in-progress appends?
|
||||||
|
|
||||||
|
|
||||||
|
// Buffer sizes for different systems
|
||||||
|
const BUFFER_CONFIG = {
|
||||||
|
default: 50 * 10**6, // 50 megabytes
|
||||||
|
webOS: 20 * 10**6, // 20 megabytes WebOS (LG)
|
||||||
|
samsungTizen: 20 * 10**6, // 20 megabytes Samsung Tizen OS
|
||||||
|
androidTV: 30 * 10**6, // 30 megabytes Android TV
|
||||||
|
desktop: 50 * 10**6, // 50 megabytes PC/Mac
|
||||||
|
};
|
||||||
|
|
||||||
|
function detectSystem() {
|
||||||
|
const userAgent = navigator.userAgent.toLowerCase();
|
||||||
|
if (/webos|lg browser/i.test(userAgent)) {
|
||||||
|
return "webOS";
|
||||||
|
} else if (/tizen/i.test(userAgent)) {
|
||||||
|
return "samsungTizen";
|
||||||
|
} else if (/android tv|smart-tv/i.test(userAgent)) {
|
||||||
|
return "androidTV";
|
||||||
|
} else if (/firefox|chrome|safari|edge/i.test(userAgent)) {
|
||||||
|
return "desktop";
|
||||||
|
} else {
|
||||||
|
return "default";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function AVMerge(video, srcInfo, startTime){
|
function AVMerge(video, srcInfo, startTime){
|
||||||
this.audioSource = null;
|
this.audioSource = null;
|
||||||
@@ -164,6 +187,8 @@ AVMerge.prototype.printDebuggingInfo = function() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function Stream(avMerge, source, startTime, avRatio) {
|
function Stream(avMerge, source, startTime, avRatio) {
|
||||||
|
const selectedSystem = detectSystem();
|
||||||
|
let baseBufferTarget = BUFFER_CONFIG[selectedSystem] || BUFFER_CONFIG.default;
|
||||||
this.avMerge = avMerge;
|
this.avMerge = avMerge;
|
||||||
this.video = avMerge.video;
|
this.video = avMerge.video;
|
||||||
this.url = source['url'];
|
this.url = source['url'];
|
||||||
@@ -173,10 +198,11 @@ function Stream(avMerge, source, startTime, avRatio) {
|
|||||||
this.mimeCodec = source['mime_codec']
|
this.mimeCodec = source['mime_codec']
|
||||||
this.streamType = source['acodec'] ? 'audio' : 'video';
|
this.streamType = source['acodec'] ? 'audio' : 'video';
|
||||||
if (this.streamType == 'audio') {
|
if (this.streamType == 'audio') {
|
||||||
this.bufferTarget = avRatio*50*10**6;
|
this.bufferTarget = avRatio * baseBufferTarget;
|
||||||
} else {
|
} else {
|
||||||
this.bufferTarget = 50*10**6; // 50 megabytes
|
this.bufferTarget = baseBufferTarget;
|
||||||
}
|
}
|
||||||
|
console.info(`Detected system: ${selectedSystem}. Applying bufferTarget of ${this.bufferTarget} bytes to ${this.streamType}.`);
|
||||||
|
|
||||||
this.initRange = source['init_range'];
|
this.initRange = source['init_range'];
|
||||||
this.indexRange = source['index_range'];
|
this.indexRange = source['index_range'];
|
||||||
@@ -204,6 +230,8 @@ Stream.prototype.setup = async function(){
|
|||||||
this.url,
|
this.url,
|
||||||
this.initRange.start,
|
this.initRange.start,
|
||||||
this.indexRange.end,
|
this.indexRange.end,
|
||||||
|
'Initialization+index segments',
|
||||||
|
).then(
|
||||||
(buffer) => {
|
(buffer) => {
|
||||||
let init_end = this.initRange.end - this.initRange.start + 1;
|
let init_end = this.initRange.end - this.initRange.start + 1;
|
||||||
let index_start = this.indexRange.start - this.initRange.start;
|
let index_start = this.indexRange.start - this.initRange.start;
|
||||||
@@ -211,22 +239,23 @@ Stream.prototype.setup = async function(){
|
|||||||
this.setupInitSegment(buffer.slice(0, init_end));
|
this.setupInitSegment(buffer.slice(0, init_end));
|
||||||
this.setupSegmentIndex(buffer.slice(index_start, index_end));
|
this.setupSegmentIndex(buffer.slice(index_start, index_end));
|
||||||
}
|
}
|
||||||
)
|
);
|
||||||
} else {
|
} else {
|
||||||
// initialization data
|
// initialization data
|
||||||
await fetchRange(
|
await fetchRange(
|
||||||
this.url,
|
this.url,
|
||||||
this.initRange.start,
|
this.initRange.start,
|
||||||
this.initRange.end,
|
this.initRange.end,
|
||||||
this.setupInitSegment.bind(this),
|
'Initialization segment',
|
||||||
);
|
).then(this.setupInitSegment.bind(this));
|
||||||
|
|
||||||
// sidx (segment index) table
|
// sidx (segment index) table
|
||||||
fetchRange(
|
fetchRange(
|
||||||
this.url,
|
this.url,
|
||||||
this.indexRange.start,
|
this.indexRange.start,
|
||||||
this.indexRange.end,
|
this.indexRange.end,
|
||||||
this.setupSegmentIndex.bind(this)
|
'Index segment',
|
||||||
);
|
).then(this.setupSegmentIndex.bind(this));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Stream.prototype.setupInitSegment = function(initSegment) {
|
Stream.prototype.setupInitSegment = function(initSegment) {
|
||||||
@@ -388,7 +417,7 @@ Stream.prototype.getSegmentIdx = function(videoTime) {
|
|||||||
}
|
}
|
||||||
index = index + increment;
|
index = index + increment;
|
||||||
}
|
}
|
||||||
this.reportInfo('Could not find segment index for time', videoTime);
|
this.reportError('Could not find segment index for time', videoTime);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
Stream.prototype.checkBuffer = async function() {
|
Stream.prototype.checkBuffer = async function() {
|
||||||
@@ -485,8 +514,8 @@ Stream.prototype.fetchSegment = function(segmentIdx) {
|
|||||||
this.url,
|
this.url,
|
||||||
entry.start,
|
entry.start,
|
||||||
entry.end,
|
entry.end,
|
||||||
this.appendSegment.bind(this, segmentIdx),
|
String(this.streamType) + ' segment ' + String(segmentIdx),
|
||||||
);
|
).then(this.appendSegment.bind(this, segmentIdx));
|
||||||
}
|
}
|
||||||
Stream.prototype.fetchSegmentIfNeeded = function(segmentIdx) {
|
Stream.prototype.fetchSegmentIfNeeded = function(segmentIdx) {
|
||||||
if (segmentIdx < 0 || segmentIdx >= this.sidx.entries.length){
|
if (segmentIdx < 0 || segmentIdx >= this.sidx.entries.length){
|
||||||
@@ -518,22 +547,56 @@ Stream.prototype.reportWarning = function(...args) {
|
|||||||
Stream.prototype.reportError = function(...args) {
|
Stream.prototype.reportError = function(...args) {
|
||||||
reportError(String(this.streamType) + ':', ...args);
|
reportError(String(this.streamType) + ':', ...args);
|
||||||
}
|
}
|
||||||
Stream.prototype.reportInfo = function(...args) {
|
|
||||||
reportInfo(String(this.streamType) + ':', ...args);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Utility functions
|
// Utility functions
|
||||||
|
|
||||||
function fetchRange(url, start, end, cb) {
|
// https://gomakethings.com/promise-based-xhr/
|
||||||
|
// https://stackoverflow.com/a/30008115
|
||||||
|
// http://lofi.limo/blog/retry-xmlhttprequest-carefully
|
||||||
|
function fetchRange(url, start, end, debugInfo) {
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
|
let retryCount = 0;
|
||||||
let xhr = new XMLHttpRequest();
|
let xhr = new XMLHttpRequest();
|
||||||
|
function onFailure(err, message, maxRetries=5){
|
||||||
|
message = debugInfo + ': ' + message + ' - Err: ' + String(err);
|
||||||
|
retryCount++;
|
||||||
|
if (retryCount > maxRetries || xhr.status == 403){
|
||||||
|
reportError('fetchRange error while fetching ' + message);
|
||||||
|
reject(message);
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
reportWarning('Failed to fetch ' + message
|
||||||
|
+ '. Attempting retry '
|
||||||
|
+ String(retryCount) +'/' + String(maxRetries));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retry in 1 second, doubled for each next retry
|
||||||
|
setTimeout(function(){
|
||||||
|
xhr.open('get',url);
|
||||||
|
xhr.send();
|
||||||
|
}, 1000*Math.pow(2,(retryCount-1)));
|
||||||
|
}
|
||||||
xhr.open('get', url);
|
xhr.open('get', url);
|
||||||
|
xhr.timeout = 15000;
|
||||||
xhr.responseType = 'arraybuffer';
|
xhr.responseType = 'arraybuffer';
|
||||||
xhr.setRequestHeader('Range', 'bytes=' + start + '-' + end);
|
xhr.setRequestHeader('Range', 'bytes=' + start + '-' + end);
|
||||||
xhr.onload = function() {
|
xhr.onload = function (e) {
|
||||||
//bytesFetched += end - start + 1;
|
if (xhr.status >= 200 && xhr.status < 300) {
|
||||||
resolve(cb(xhr.response));
|
resolve(xhr.response);
|
||||||
|
} else {
|
||||||
|
onFailure(e,
|
||||||
|
'Status '
|
||||||
|
+ String(xhr.status) + ' ' + String(xhr.statusText)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
xhr.onerror = function (event) {
|
||||||
|
onFailure(e, 'Network error');
|
||||||
|
};
|
||||||
|
xhr.ontimeout = function (event){
|
||||||
|
xhr.timeout += 5000;
|
||||||
|
onFailure(null, 'Timeout (15s)', maxRetries=5);
|
||||||
};
|
};
|
||||||
xhr.send();
|
xhr.send();
|
||||||
});
|
});
|
||||||
@@ -573,9 +636,6 @@ function addEvent(obj, eventName, func) {
|
|||||||
return new RegisteredEvent(obj, eventName, func);
|
return new RegisteredEvent(obj, eventName, func);
|
||||||
}
|
}
|
||||||
|
|
||||||
function reportInfo(...args){
|
|
||||||
console.info(...args);
|
|
||||||
}
|
|
||||||
function reportWarning(...args){
|
function reportWarning(...args){
|
||||||
console.warn(...args);
|
console.warn(...args);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,77 +1,66 @@
|
|||||||
(function main() {
|
(function main() {
|
||||||
'use strict';
|
'use strict';
|
||||||
|
|
||||||
let captionsActive;
|
// Captions
|
||||||
|
let captionsActive = false;
|
||||||
switch(true) {
|
if (data.settings.subtitles_mode === 2 || (data.settings.subtitles_mode === 1 && data.has_manual_captions)) {
|
||||||
case data.settings.subtitles_mode == 2:
|
captionsActive = true;
|
||||||
captionsActive = true;
|
|
||||||
break;
|
|
||||||
case data.settings.subtitles_mode == 1 && data.has_manual_captions:
|
|
||||||
captionsActive = true;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
captionsActive = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AutoPlay
|
||||||
|
let autoplayActive = data.settings.autoplay_videos || false;
|
||||||
|
|
||||||
let qualityOptions = [];
|
let qualityOptions = [];
|
||||||
let qualityDefault;
|
let qualityDefault;
|
||||||
for (let src of data['uni_sources']) {
|
|
||||||
qualityOptions.push(src.quality_string)
|
for (let src of data.uni_sources) {
|
||||||
|
qualityOptions.push(src.quality_string);
|
||||||
}
|
}
|
||||||
for (let src of data['pair_sources']) {
|
|
||||||
qualityOptions.push(src.quality_string)
|
for (let src of data.pair_sources) {
|
||||||
|
qualityOptions.push(src.quality_string);
|
||||||
}
|
}
|
||||||
if (data['using_pair_sources'])
|
|
||||||
qualityDefault = data['pair_sources'][data['pair_idx']].quality_string;
|
if (data.using_pair_sources) {
|
||||||
else if (data['uni_sources'].length != 0)
|
qualityDefault = data.pair_sources[data.pair_idx].quality_string;
|
||||||
qualityDefault = data['uni_sources'][data['uni_idx']].quality_string;
|
} else if (data.uni_sources.length !== 0) {
|
||||||
else
|
qualityDefault = data.uni_sources[data.uni_idx].quality_string;
|
||||||
|
} else {
|
||||||
qualityDefault = 'None';
|
qualityDefault = 'None';
|
||||||
|
}
|
||||||
|
|
||||||
// Fix plyr refusing to work with qualities that are strings
|
// Fix plyr refusing to work with qualities that are strings
|
||||||
Object.defineProperty(Plyr.prototype, 'quality', {
|
Object.defineProperty(Plyr.prototype, 'quality', {
|
||||||
set: function(input) {
|
set: function (input) {
|
||||||
const config = this.config.quality;
|
const config = this.config.quality;
|
||||||
const options = this.options.quality;
|
const options = this.options.quality;
|
||||||
let quality;
|
let quality = input;
|
||||||
|
let updateStorage = true;
|
||||||
|
|
||||||
if (!options.length) {
|
if (!options.length) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// removing this line:
|
|
||||||
//let quality = [!is.empty(input) && Number(input), this.storage.get('quality'), config.selected, config.default].find(is.number);
|
|
||||||
// replacing with:
|
|
||||||
quality = input;
|
|
||||||
let updateStorage = true;
|
|
||||||
|
|
||||||
if (!options.includes(quality)) {
|
if (!options.includes(quality)) {
|
||||||
// Plyr sets quality to null at startup, resulting in the erroneous
|
|
||||||
// calling of this setter function with input = null, and the
|
|
||||||
// commented out code below would set the quality to something
|
|
||||||
// unrelated at startup. Comment out and just return.
|
|
||||||
return;
|
return;
|
||||||
/*const value = closest(options, quality);
|
|
||||||
this.debug.warn(`Unsupported quality option: ${quality}, using ${value} instead`);
|
|
||||||
quality = value; // Don't update storage if quality is not supported
|
|
||||||
updateStorage = false;*/
|
|
||||||
} // Update config
|
|
||||||
|
|
||||||
|
|
||||||
config.selected = quality; // Set quality
|
|
||||||
|
|
||||||
this.media.quality = quality; // Save to storage
|
|
||||||
|
|
||||||
if (updateStorage) {
|
|
||||||
this.storage.set({
|
|
||||||
quality
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
// Update config
|
||||||
|
config.selected = quality;
|
||||||
|
|
||||||
|
// Set quality
|
||||||
|
this.media.quality = quality;
|
||||||
|
|
||||||
|
// Save to storage
|
||||||
|
if (updateStorage) {
|
||||||
|
this.storage.set({ quality });
|
||||||
|
}
|
||||||
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
const player = new Plyr(document.getElementById('js-video-player'), {
|
const playerOptions = {
|
||||||
|
// Learning about autoplay permission https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Permissions-Policy/autoplay#syntax
|
||||||
|
autoplay: autoplayActive,
|
||||||
disableContextMenu: false,
|
disableContextMenu: false,
|
||||||
captions: {
|
captions: {
|
||||||
active: captionsActive,
|
active: captionsActive,
|
||||||
@@ -89,29 +78,31 @@
|
|||||||
'settings',
|
'settings',
|
||||||
'pip',
|
'pip',
|
||||||
'airplay',
|
'airplay',
|
||||||
'fullscreen'
|
'fullscreen',
|
||||||
],
|
],
|
||||||
iconUrl: "/youtube.com/static/modules/plyr/plyr.svg",
|
iconUrl: '/youtube.com/static/modules/plyr/plyr.svg',
|
||||||
blankVideo: "/youtube.com/static/modules/plyr/blank.webm",
|
blankVideo: '/youtube.com/static/modules/plyr/blank.webm',
|
||||||
debug: false,
|
debug: false,
|
||||||
storage: {enabled: false},
|
storage: { enabled: false },
|
||||||
quality: {
|
quality: {
|
||||||
default: qualityDefault,
|
default: qualityDefault,
|
||||||
options: qualityOptions,
|
options: qualityOptions,
|
||||||
forced: true,
|
forced: true,
|
||||||
onChange: function(quality) {
|
onChange: function (quality) {
|
||||||
if (quality == 'None') {return;}
|
if (quality == 'None') {
|
||||||
|
return;
|
||||||
|
}
|
||||||
if (quality.includes('(integrated)')) {
|
if (quality.includes('(integrated)')) {
|
||||||
for (let i=0; i < data['uni_sources'].length; i++) {
|
for (let i = 0; i < data.uni_sources.length; i++) {
|
||||||
if (data['uni_sources'][i].quality_string == quality) {
|
if (data.uni_sources[i].quality_string == quality) {
|
||||||
changeQuality({'type': 'uni', 'index': i});
|
changeQuality({ type: 'uni', index: i });
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (let i=0; i < data['pair_sources'].length; i++) {
|
for (let i = 0; i < data.pair_sources.length; i++) {
|
||||||
if (data['pair_sources'][i].quality_string == quality) {
|
if (data.pair_sources[i].quality_string == quality) {
|
||||||
changeQuality({'type': 'pair', 'index': i});
|
changeQuality({ type: 'pair', index: i });
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -119,12 +110,27 @@
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
previewThumbnails: {
|
previewThumbnails: {
|
||||||
enabled: storyboard_url != null,
|
enabled: storyboard_url !== null,
|
||||||
src: [storyboard_url],
|
src: [storyboard_url],
|
||||||
},
|
},
|
||||||
settings: ['captions', 'quality', 'speed', 'loop'],
|
settings: ['captions', 'quality', 'speed', 'loop'],
|
||||||
tooltips: {
|
tooltips: {
|
||||||
controls: true,
|
controls: true,
|
||||||
},
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
const player = new Plyr(document.getElementById('js-video-player'), playerOptions);
|
||||||
|
|
||||||
|
// disable double click to fullscreen
|
||||||
|
// https://github.com/sampotts/plyr/issues/1370#issuecomment-528966795
|
||||||
|
player.eventListeners.forEach(function(eventListener) {
|
||||||
|
if(eventListener.type === 'dblclick') {
|
||||||
|
eventListener.element.removeEventListener(eventListener.type, eventListener.callback, eventListener.options);
|
||||||
|
}
|
||||||
});
|
});
|
||||||
}());
|
|
||||||
|
// Add .started property, true after the playback has been started
|
||||||
|
// Needed so controls won't be hidden before playback has started
|
||||||
|
player.started = false;
|
||||||
|
player.once('playing', function(){this.started = true});
|
||||||
|
})();
|
||||||
|
|||||||
@@ -5,8 +5,9 @@ function changeQuality(selection) {
|
|||||||
let videoPaused = video.paused;
|
let videoPaused = video.paused;
|
||||||
let videoSpeed = video.playbackRate;
|
let videoSpeed = video.playbackRate;
|
||||||
let srcInfo;
|
let srcInfo;
|
||||||
if (avMerge)
|
if (avMerge && typeof avMerge.close === 'function') {
|
||||||
avMerge.close();
|
avMerge.close();
|
||||||
|
}
|
||||||
if (selection.type == 'uni'){
|
if (selection.type == 'uni'){
|
||||||
srcInfo = data['uni_sources'][selection.index];
|
srcInfo = data['uni_sources'][selection.index];
|
||||||
video.src = srcInfo.url;
|
video.src = srcInfo.url;
|
||||||
|
|||||||
@@ -181,7 +181,7 @@ label[for=options-toggle-cbox] {
|
|||||||
|
|
||||||
.table td,.table th {
|
.table td,.table th {
|
||||||
padding: 10px 10px;
|
padding: 10px 10px;
|
||||||
border: 1px solid var(--secondary-background);
|
border: 1px solid var(--border-bg-license);
|
||||||
text-align: center;
|
text-align: center;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -10,9 +10,11 @@
|
|||||||
--link: #212121;
|
--link: #212121;
|
||||||
--link-visited: #808080;
|
--link-visited: #808080;
|
||||||
--border-bg: #212121;
|
--border-bg: #212121;
|
||||||
--buttom: #DCDCDC;
|
--border-bg-settings: #91918C;
|
||||||
|
--border-bg-license: #91918C;
|
||||||
|
--buttom: #FFFFFF;
|
||||||
--buttom-text: #212121;
|
--buttom-text: #212121;
|
||||||
--button-border: #91918c;
|
--button-border: #91918C;
|
||||||
--buttom-hover: #BBBBBB;
|
--buttom-hover: #BBBBBB;
|
||||||
--search-text: #212121;
|
--search-text: #212121;
|
||||||
--time-background: #212121;
|
--time-background: #212121;
|
||||||
|
|||||||
77
youtube/static/modules/plyr/custom_plyr.css
Normal file
77
youtube/static/modules/plyr/custom_plyr.css
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
/* Prevent this div from blocking right-click menu for video
|
||||||
|
e.g. Firefox playback speed options */
|
||||||
|
.plyr__poster {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* plyr fix */
|
||||||
|
.plyr:-moz-full-screen video {
|
||||||
|
max-height: initial;
|
||||||
|
}
|
||||||
|
|
||||||
|
.plyr:-webkit-full-screen video {
|
||||||
|
max-height: initial;
|
||||||
|
}
|
||||||
|
|
||||||
|
.plyr:-ms-fullscreen video {
|
||||||
|
max-height: initial;
|
||||||
|
}
|
||||||
|
|
||||||
|
.plyr:fullscreen video {
|
||||||
|
max-height: initial;
|
||||||
|
}
|
||||||
|
|
||||||
|
.plyr__preview-thumb__image-container {
|
||||||
|
width: 158px;
|
||||||
|
height: 90px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.plyr__preview-thumb {
|
||||||
|
bottom: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.plyr__menu__container [role="menu"],
|
||||||
|
.plyr__menu__container [role="menucaptions"] {
|
||||||
|
/* Set vertical scroll */
|
||||||
|
/* issue https://github.com/sampotts/plyr/issues/1420 */
|
||||||
|
max-height: 320px;
|
||||||
|
overflow-y: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Custom styles similar to youtube
|
||||||
|
*/
|
||||||
|
.plyr__controls {
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.plyr__progress__container {
|
||||||
|
position: absolute;
|
||||||
|
bottom: 0;
|
||||||
|
width: 100%;
|
||||||
|
margin-bottom: -10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.plyr__controls .plyr__controls__item:first-child {
|
||||||
|
margin-left: 0;
|
||||||
|
margin-right: 0;
|
||||||
|
z-index: 5;
|
||||||
|
}
|
||||||
|
|
||||||
|
.plyr__controls .plyr__controls__item.plyr__volume {
|
||||||
|
margin-left: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
.plyr__controls .plyr__controls__item.plyr__progress__container {
|
||||||
|
padding-left: 10px;
|
||||||
|
padding-right: 10px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.plyr__progress input[type="range"] {
|
||||||
|
margin-bottom: 50px;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* End custom styles
|
||||||
|
*/
|
||||||
1
youtube/static/modules/plyr/plyr.min.js.map
Normal file
1
youtube/static/modules/plyr/plyr.min.js.map
Normal file
File diff suppressed because one or more lines are too long
@@ -155,7 +155,7 @@ label[for=options-toggle-cbox] {
|
|||||||
}
|
}
|
||||||
|
|
||||||
.settings-form > h2 {
|
.settings-form > h2 {
|
||||||
border-bottom: 2px solid var(--border-bg);
|
border-bottom: 2px solid var(--border-bg-settings);
|
||||||
padding-bottom: 0.5rem;
|
padding-bottom: 0.5rem;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,21 +21,7 @@ img {
|
|||||||
video {
|
video {
|
||||||
width: 100%;
|
width: 100%;
|
||||||
height: auto;
|
height: auto;
|
||||||
max-height: 480px;
|
max-height: calc(100vh/1.5);
|
||||||
}
|
|
||||||
|
|
||||||
/* plyr fix */
|
|
||||||
.plyr:-moz-full-screen video {
|
|
||||||
max-height: initial;
|
|
||||||
}
|
|
||||||
.plyr:-webkit-full-screen video {
|
|
||||||
max-height: initial;
|
|
||||||
}
|
|
||||||
.plyr:-ms-fullscreen video {
|
|
||||||
max-height: initial;
|
|
||||||
}
|
|
||||||
.plyr:fullscreen video {
|
|
||||||
max-height: initial;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
a:link {
|
a:link {
|
||||||
@@ -142,6 +128,29 @@ header {
|
|||||||
background-color: var(--buttom-hover);
|
background-color: var(--buttom-hover);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.live-url-choices {
|
||||||
|
background-color: var(--thumb-background);
|
||||||
|
margin: 1rem 0;
|
||||||
|
padding: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.playability-error {
|
||||||
|
position: relative;
|
||||||
|
box-sizing: border-box;
|
||||||
|
height: 30vh;
|
||||||
|
margin: 1rem 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.playability-error > span {
|
||||||
|
display: flex;
|
||||||
|
background-color: var(--thumb-background);
|
||||||
|
height: 100%;
|
||||||
|
object-fit: cover;
|
||||||
|
justify-content: center;
|
||||||
|
align-items: center;
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
|
||||||
.playlist {
|
.playlist {
|
||||||
display: grid;
|
display: grid;
|
||||||
grid-gap: 4px;
|
grid-gap: 4px;
|
||||||
@@ -636,6 +645,9 @@ figure.sc-video {
|
|||||||
max-height: 80vh;
|
max-height: 80vh;
|
||||||
overflow-y: scroll;
|
overflow-y: scroll;
|
||||||
}
|
}
|
||||||
|
.playability-error {
|
||||||
|
height: 60vh;
|
||||||
|
}
|
||||||
.playlist {
|
.playlist {
|
||||||
display: grid;
|
display: grid;
|
||||||
grid-gap: 1px;
|
grid-gap: 1px;
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from youtube import util, yt_data_extract, channel, local_playlist
|
from youtube import util, yt_data_extract, channel, local_playlist, playlist
|
||||||
from youtube import yt_app
|
from youtube import yt_app
|
||||||
import settings
|
import settings
|
||||||
|
|
||||||
@@ -108,8 +108,7 @@ def _subscribe(channels):
|
|||||||
with connection as cursor:
|
with connection as cursor:
|
||||||
channel_ids_to_check = [channel[0] for channel in channels if not _is_subscribed(cursor, channel[0])]
|
channel_ids_to_check = [channel[0] for channel in channels if not _is_subscribed(cursor, channel[0])]
|
||||||
|
|
||||||
rows = ((channel_id, channel_name, 0, 0) for channel_id,
|
rows = ((channel_id, channel_name, 0, 0) for channel_id, channel_name in channels)
|
||||||
channel_name in channels)
|
|
||||||
cursor.executemany('''INSERT OR IGNORE INTO subscribed_channels (yt_channel_id, channel_name, time_last_checked, next_check_time)
|
cursor.executemany('''INSERT OR IGNORE INTO subscribed_channels (yt_channel_id, channel_name, time_last_checked, next_check_time)
|
||||||
VALUES (?, ?, ?, ?)''', rows)
|
VALUES (?, ?, ?, ?)''', rows)
|
||||||
|
|
||||||
@@ -236,8 +235,7 @@ def _get_channel_names(cursor, channel_ids):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def _channels_with_tag(cursor, tag, order=False, exclude_muted=False,
|
def _channels_with_tag(cursor, tag, order=False, exclude_muted=False, include_muted_status=False):
|
||||||
include_muted_status=False):
|
|
||||||
''' returns list of (channel_id, channel_name) '''
|
''' returns list of (channel_id, channel_name) '''
|
||||||
|
|
||||||
statement = '''SELECT yt_channel_id, channel_name'''
|
statement = '''SELECT yt_channel_id, channel_name'''
|
||||||
@@ -434,8 +432,10 @@ def autocheck_setting_changed(old_value, new_value):
|
|||||||
stop_autocheck_system()
|
stop_autocheck_system()
|
||||||
|
|
||||||
|
|
||||||
settings.add_setting_changed_hook('autocheck_subscriptions',
|
settings.add_setting_changed_hook(
|
||||||
autocheck_setting_changed)
|
'autocheck_subscriptions',
|
||||||
|
autocheck_setting_changed
|
||||||
|
)
|
||||||
if settings.autocheck_subscriptions:
|
if settings.autocheck_subscriptions:
|
||||||
start_autocheck_system()
|
start_autocheck_system()
|
||||||
# ----------------------------
|
# ----------------------------
|
||||||
@@ -463,7 +463,24 @@ def _get_atoma_feed(channel_id):
|
|||||||
|
|
||||||
def _get_channel_videos_first_page(channel_id, channel_status_name):
|
def _get_channel_videos_first_page(channel_id, channel_status_name):
|
||||||
try:
|
try:
|
||||||
return channel.get_channel_first_page(channel_id=channel_id)
|
# First try the playlist method
|
||||||
|
pl_json = playlist.get_videos(
|
||||||
|
'UU' + channel_id[2:],
|
||||||
|
1,
|
||||||
|
include_shorts=settings.include_shorts_in_subscriptions,
|
||||||
|
report_text=None
|
||||||
|
)
|
||||||
|
pl_info = yt_data_extract.extract_playlist_info(pl_json)
|
||||||
|
if pl_info.get('items'):
|
||||||
|
pl_info['items'] = pl_info['items'][0:30]
|
||||||
|
return pl_info
|
||||||
|
|
||||||
|
# Try the channel api method
|
||||||
|
channel_json = channel.get_channel_first_page(channel_id=channel_id)
|
||||||
|
channel_info = yt_data_extract.extract_channel_info(
|
||||||
|
json.loads(channel_json), 'videos'
|
||||||
|
)
|
||||||
|
return channel_info
|
||||||
except util.FetchError as e:
|
except util.FetchError as e:
|
||||||
if e.code == '429' and settings.route_tor:
|
if e.code == '429' and settings.route_tor:
|
||||||
error_message = ('Error checking channel ' + channel_status_name
|
error_message = ('Error checking channel ' + channel_status_name
|
||||||
@@ -497,7 +514,7 @@ def _get_upstream_videos(channel_id):
|
|||||||
)
|
)
|
||||||
gevent.joinall(tasks)
|
gevent.joinall(tasks)
|
||||||
|
|
||||||
channel_tab, feed = tasks[0].value, tasks[1].value
|
channel_info, feed = tasks[0].value, tasks[1].value
|
||||||
|
|
||||||
# extract published times from atoma feed
|
# extract published times from atoma feed
|
||||||
times_published = {}
|
times_published = {}
|
||||||
@@ -535,9 +552,8 @@ def _get_upstream_videos(channel_id):
|
|||||||
except defusedxml.ElementTree.ParseError:
|
except defusedxml.ElementTree.ParseError:
|
||||||
print('Failed to read atoma feed for ' + channel_status_name)
|
print('Failed to read atoma feed for ' + channel_status_name)
|
||||||
|
|
||||||
if channel_tab is None: # there was an error
|
if channel_info is None: # there was an error
|
||||||
return
|
return
|
||||||
channel_info = yt_data_extract.extract_channel_info(json.loads(channel_tab), 'videos')
|
|
||||||
if channel_info['error']:
|
if channel_info['error']:
|
||||||
print('Error checking channel ' + channel_status_name + ': ' + channel_info['error'])
|
print('Error checking channel ' + channel_status_name + ': ' + channel_info['error'])
|
||||||
return
|
return
|
||||||
@@ -552,14 +568,38 @@ def _get_upstream_videos(channel_id):
|
|||||||
if video_item['id'] in times_published:
|
if video_item['id'] in times_published:
|
||||||
video_item['time_published'] = times_published[video_item['id']]
|
video_item['time_published'] = times_published[video_item['id']]
|
||||||
video_item['is_time_published_exact'] = True
|
video_item['is_time_published_exact'] = True
|
||||||
else:
|
elif video_item.get('time_published'):
|
||||||
video_item['is_time_published_exact'] = False
|
video_item['is_time_published_exact'] = False
|
||||||
try:
|
try:
|
||||||
video_item['time_published'] = youtube_timestamp_to_posix(video_item['time_published']) - i # subtract a few seconds off the videos so they will be in the right order
|
video_item['time_published'] = youtube_timestamp_to_posix(video_item['time_published']) - i # subtract a few seconds off the videos so they will be in the right order
|
||||||
except KeyError:
|
except Exception:
|
||||||
print(video_item)
|
print(video_item)
|
||||||
|
else:
|
||||||
|
video_item['is_time_published_exact'] = False
|
||||||
|
video_item['time_published'] = None
|
||||||
video_item['channel_id'] = channel_id
|
video_item['channel_id'] = channel_id
|
||||||
|
if len(videos) > 1:
|
||||||
|
# Go back and fill in any videos that don't have a time published
|
||||||
|
# using the time published of the surrounding ones
|
||||||
|
for i in range(len(videos)-1):
|
||||||
|
if (videos[i+1]['time_published'] is None
|
||||||
|
and videos[i]['time_published'] is not None
|
||||||
|
):
|
||||||
|
videos[i+1]['time_published'] = videos[i]['time_published'] - 1
|
||||||
|
for i in reversed(range(1,len(videos))):
|
||||||
|
if (videos[i-1]['time_published'] is None
|
||||||
|
and videos[i]['time_published'] is not None
|
||||||
|
):
|
||||||
|
videos[i-1]['time_published'] = videos[i]['time_published'] + 1
|
||||||
|
# Special case: none of the videos have a time published.
|
||||||
|
# In this case, make something up
|
||||||
|
if videos and videos[0]['time_published'] is None:
|
||||||
|
assert all(v['time_published'] is None for v in videos)
|
||||||
|
now = time.time()
|
||||||
|
for i in range(len(videos)):
|
||||||
|
# 1 month between videos
|
||||||
|
videos[i]['time_published'] = now - i*3600*24*30
|
||||||
|
|
||||||
|
|
||||||
if len(videos) == 0:
|
if len(videos) == 0:
|
||||||
average_upload_period = 4*7*24*3600 # assume 1 month for channel with no videos
|
average_upload_period = 4*7*24*3600 # assume 1 month for channel with no videos
|
||||||
@@ -578,26 +618,31 @@ def _get_upstream_videos(channel_id):
|
|||||||
with open_database() as connection:
|
with open_database() as connection:
|
||||||
with connection as cursor:
|
with connection as cursor:
|
||||||
|
|
||||||
# calculate how many new videos there are
|
# Get video ids and duration of existing vids so we
|
||||||
existing_vids = set(row[0] for row in cursor.execute(
|
# can see how many new ones there are and update
|
||||||
'''SELECT video_id
|
# livestreams/premiers
|
||||||
|
existing_vids = list(cursor.execute(
|
||||||
|
'''SELECT video_id, duration
|
||||||
FROM videos
|
FROM videos
|
||||||
INNER JOIN subscribed_channels
|
INNER JOIN subscribed_channels
|
||||||
ON videos.sql_channel_id = subscribed_channels.id
|
ON videos.sql_channel_id = subscribed_channels.id
|
||||||
WHERE yt_channel_id=?
|
WHERE yt_channel_id=?
|
||||||
ORDER BY time_published DESC
|
ORDER BY time_published DESC
|
||||||
LIMIT 30''', [channel_id]).fetchall())
|
LIMIT 30''', [channel_id]).fetchall())
|
||||||
|
existing_vid_ids = set(row[0] for row in existing_vids)
|
||||||
|
existing_durs = dict(existing_vids)
|
||||||
|
|
||||||
# new videos the channel has uploaded since last time we checked
|
# new videos the channel has uploaded since last time we checked
|
||||||
number_of_new_videos = 0
|
number_of_new_videos = 0
|
||||||
for video in videos:
|
for video in videos:
|
||||||
if video['id'] in existing_vids:
|
if video['id'] in existing_vid_ids:
|
||||||
break
|
break
|
||||||
number_of_new_videos += 1
|
number_of_new_videos += 1
|
||||||
|
|
||||||
is_first_check = cursor.execute('''SELECT time_last_checked FROM subscribed_channels WHERE yt_channel_id=?''', [channel_id]).fetchone()[0] in (None, 0)
|
is_first_check = cursor.execute('''SELECT time_last_checked FROM subscribed_channels WHERE yt_channel_id=?''', [channel_id]).fetchone()[0] in (None, 0)
|
||||||
time_videos_retrieved = int(time.time())
|
time_videos_retrieved = int(time.time())
|
||||||
rows = []
|
rows = []
|
||||||
|
update_rows = []
|
||||||
for i, video_item in enumerate(videos):
|
for i, video_item in enumerate(videos):
|
||||||
if (is_first_check
|
if (is_first_check
|
||||||
or number_of_new_videos > 6
|
or number_of_new_videos > 6
|
||||||
@@ -613,16 +658,34 @@ def _get_upstream_videos(channel_id):
|
|||||||
time_noticed = video_item['time_published']
|
time_noticed = video_item['time_published']
|
||||||
else:
|
else:
|
||||||
time_noticed = time_videos_retrieved
|
time_noticed = time_videos_retrieved
|
||||||
rows.append((
|
|
||||||
video_item['channel_id'],
|
# videos which need durations updated
|
||||||
video_item['id'],
|
non_durations = ('upcoming', 'none', 'live', '')
|
||||||
video_item['title'],
|
v_id = video_item['id']
|
||||||
video_item['duration'],
|
if (existing_durs.get(v_id) is not None
|
||||||
video_item['time_published'],
|
and existing_durs[v_id].lower() in non_durations
|
||||||
video_item['is_time_published_exact'],
|
and video_item['duration'] not in non_durations
|
||||||
time_noticed,
|
):
|
||||||
video_item['description'],
|
update_rows.append((
|
||||||
))
|
video_item['title'],
|
||||||
|
video_item['duration'],
|
||||||
|
video_item['time_published'],
|
||||||
|
video_item['is_time_published_exact'],
|
||||||
|
video_item['description'],
|
||||||
|
video_item['id'],
|
||||||
|
))
|
||||||
|
# all other videos
|
||||||
|
else:
|
||||||
|
rows.append((
|
||||||
|
video_item['channel_id'],
|
||||||
|
video_item['id'],
|
||||||
|
video_item['title'],
|
||||||
|
video_item['duration'],
|
||||||
|
video_item['time_published'],
|
||||||
|
video_item['is_time_published_exact'],
|
||||||
|
time_noticed,
|
||||||
|
video_item['description'],
|
||||||
|
))
|
||||||
|
|
||||||
cursor.executemany('''INSERT OR IGNORE INTO videos (
|
cursor.executemany('''INSERT OR IGNORE INTO videos (
|
||||||
sql_channel_id,
|
sql_channel_id,
|
||||||
@@ -635,6 +698,13 @@ def _get_upstream_videos(channel_id):
|
|||||||
description
|
description
|
||||||
)
|
)
|
||||||
VALUES ((SELECT id FROM subscribed_channels WHERE yt_channel_id=?), ?, ?, ?, ?, ?, ?, ?)''', rows)
|
VALUES ((SELECT id FROM subscribed_channels WHERE yt_channel_id=?), ?, ?, ?, ?, ?, ?, ?)''', rows)
|
||||||
|
cursor.executemany('''UPDATE videos SET
|
||||||
|
title=?,
|
||||||
|
duration=?,
|
||||||
|
time_published=?,
|
||||||
|
is_time_published_exact=?,
|
||||||
|
description=?
|
||||||
|
WHERE video_id=?''', update_rows)
|
||||||
cursor.execute('''UPDATE subscribed_channels
|
cursor.execute('''UPDATE subscribed_channels
|
||||||
SET time_last_checked = ?, next_check_time = ?
|
SET time_last_checked = ?, next_check_time = ?
|
||||||
WHERE yt_channel_id=?''', [int(time.time()), next_check_time, channel_id])
|
WHERE yt_channel_id=?''', [int(time.time()), next_check_time, channel_id])
|
||||||
@@ -767,7 +837,7 @@ def import_subscriptions():
|
|||||||
error = 'Unsupported file format: ' + mime_type
|
error = 'Unsupported file format: ' + mime_type
|
||||||
error += (' . Only subscription.json, subscriptions.csv files'
|
error += (' . Only subscription.json, subscriptions.csv files'
|
||||||
' (from Google Takeouts)'
|
' (from Google Takeouts)'
|
||||||
' and XML OPML files exported from Youtube\'s'
|
' and XML OPML files exported from YouTube\'s'
|
||||||
' subscription manager page are supported')
|
' subscription manager page are supported')
|
||||||
return (flask.render_template('error.html', error_message=error),
|
return (flask.render_template('error.html', error_message=error),
|
||||||
400)
|
400)
|
||||||
@@ -962,7 +1032,8 @@ def get_subscriptions_page():
|
|||||||
'muted': muted,
|
'muted': muted,
|
||||||
})
|
})
|
||||||
|
|
||||||
return flask.render_template('subscriptions.html',
|
return flask.render_template(
|
||||||
|
'subscriptions.html',
|
||||||
header_playlist_names=local_playlist.get_playlist_names(),
|
header_playlist_names=local_playlist.get_playlist_names(),
|
||||||
videos=videos,
|
videos=videos,
|
||||||
num_pages=math.ceil(number_of_videos_in_db/60),
|
num_pages=math.ceil(number_of_videos_in_db/60),
|
||||||
@@ -1018,7 +1089,7 @@ def serve_subscription_thumbnail(thumbnail):
|
|||||||
f.close()
|
f.close()
|
||||||
return flask.Response(image, mimetype='image/jpeg')
|
return flask.Response(image, mimetype='image/jpeg')
|
||||||
|
|
||||||
url = "https://i.ytimg.com/vi/" + video_id + "/mqdefault.jpg"
|
url = f"https://i.ytimg.com/vi/{video_id}/hqdefault.jpg"
|
||||||
try:
|
try:
|
||||||
image = util.fetch_url(url, report_text="Saved thumbnail: " + video_id)
|
image = util.fetch_url(url, report_text="Saved thumbnail: " + video_id)
|
||||||
except urllib.error.HTTPError as e:
|
except urllib.error.HTTPError as e:
|
||||||
|
|||||||
@@ -51,8 +51,11 @@
|
|||||||
<ul>
|
<ul>
|
||||||
{% for (before_text, stat, after_text) in [
|
{% for (before_text, stat, after_text) in [
|
||||||
('Joined ', date_joined, ''),
|
('Joined ', date_joined, ''),
|
||||||
('', view_count|commatize, ' views'),
|
('', approx_view_count, ' views'),
|
||||||
('', approx_subscriber_count, ' subscribers'),
|
('', approx_subscriber_count, ' subscribers'),
|
||||||
|
('', approx_video_count, ' videos'),
|
||||||
|
('Country: ', country, ''),
|
||||||
|
('Canonical Url: ', canonical_url, ''),
|
||||||
] %}
|
] %}
|
||||||
{% if stat %}
|
{% if stat %}
|
||||||
<li>{{ before_text + stat|string + after_text }}</li>
|
<li>{{ before_text + stat|string + after_text }}</li>
|
||||||
@@ -65,7 +68,11 @@
|
|||||||
<hr>
|
<hr>
|
||||||
<ul>
|
<ul>
|
||||||
{% for text, url in links %}
|
{% for text, url in links %}
|
||||||
<li><a href="{{ url }}">{{ text }}</a></li>
|
{% if url %}
|
||||||
|
<li><a href="{{ url }}">{{ text }}</a></li>
|
||||||
|
{% else %}
|
||||||
|
<li>{{ text }}</li>
|
||||||
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
</ul>
|
</ul>
|
||||||
</div>
|
</div>
|
||||||
@@ -74,7 +81,7 @@
|
|||||||
<!-- new-->
|
<!-- new-->
|
||||||
<div id="links-metadata">
|
<div id="links-metadata">
|
||||||
{% if current_tab in ('videos', 'shorts', 'streams') %}
|
{% if current_tab in ('videos', 'shorts', 'streams') %}
|
||||||
{% set sorts = [('1', 'views'), ('2', 'oldest'), ('3', 'newest')] %}
|
{% set sorts = [('1', 'views'), ('2', 'oldest'), ('3', 'newest'), ('4', 'newest - no shorts'),] %}
|
||||||
<div id="number-of-results">{{ number_of_videos }} videos</div>
|
<div id="number-of-results">{{ number_of_videos }} videos</div>
|
||||||
{% elif current_tab == 'playlists' %}
|
{% elif current_tab == 'playlists' %}
|
||||||
{% set sorts = [('2', 'oldest'), ('3', 'newest'), ('4', 'last video added')] %}
|
{% set sorts = [('2', 'oldest'), ('3', 'newest'), ('4', 'last video added')] %}
|
||||||
@@ -110,13 +117,9 @@
|
|||||||
<hr/>
|
<hr/>
|
||||||
|
|
||||||
<footer class="pagination-container">
|
<footer class="pagination-container">
|
||||||
{% if (current_tab in ('videos', 'shorts', 'streams')) and current_sort.__str__() == '2' %}
|
{% if current_tab in ('videos', 'shorts', 'streams') %}
|
||||||
<nav class="next-previous-button-row">
|
|
||||||
{{ common_elements.next_previous_ctoken_buttons(None, ctoken, channel_url + '/' + current_tab, parameters_dictionary) }}
|
|
||||||
</nav>
|
|
||||||
{% elif current_tab in ('videos', 'shorts', 'streams') %}
|
|
||||||
<nav class="pagination-list">
|
<nav class="pagination-list">
|
||||||
{{ common_elements.page_buttons(number_of_pages, channel_url + '/' + current_tab, parameters_dictionary, include_ends=(current_sort.__str__() == '3')) }}
|
{{ common_elements.page_buttons(number_of_pages, channel_url + '/' + current_tab, parameters_dictionary, include_ends=(current_sort.__str__() in '34')) }}
|
||||||
</nav>
|
</nav>
|
||||||
{% elif current_tab == 'playlists' or current_tab == 'search' %}
|
{% elif current_tab == 'playlists' or current_tab == 'search' %}
|
||||||
<nav class="next-previous-button-row">
|
<nav class="next-previous-button-row">
|
||||||
|
|||||||
@@ -1,4 +1,8 @@
|
|||||||
{% set page_title = 'Error' %}
|
{% if error_code %}
|
||||||
|
{% set page_title = 'Error: ' ~ error_code %}
|
||||||
|
{% else %}
|
||||||
|
{% set page_title = 'Error' %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
{% if not slim %}
|
{% if not slim %}
|
||||||
{% extends "base.html" %}
|
{% extends "base.html" %}
|
||||||
|
|||||||
@@ -8,14 +8,8 @@
|
|||||||
{% if settings.use_video_player == 2 %}
|
{% if settings.use_video_player == 2 %}
|
||||||
<!-- plyr -->
|
<!-- plyr -->
|
||||||
<link href="/youtube.com/static/modules/plyr/plyr.css" rel="stylesheet">
|
<link href="/youtube.com/static/modules/plyr/plyr.css" rel="stylesheet">
|
||||||
|
<link href="/youtube.com/static/modules/plyr/custom_plyr.css" rel="stylesheet">
|
||||||
<!--/ plyr -->
|
<!--/ plyr -->
|
||||||
<style>
|
|
||||||
/* Prevent this div from blocking right-click menu for video
|
|
||||||
e.g. Firefox playback speed options */
|
|
||||||
.plyr__poster {
|
|
||||||
display: none !important;
|
|
||||||
}
|
|
||||||
</style>
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endblock style %}
|
{% endblock style %}
|
||||||
|
|
||||||
@@ -40,7 +34,7 @@
|
|||||||
</div>
|
</div>
|
||||||
{% else %}
|
{% else %}
|
||||||
<figure class="sc-video">
|
<figure class="sc-video">
|
||||||
<video id="js-video-player" playsinline controls>
|
<video id="js-video-player" playsinline controls {{ 'autoplay' if settings.autoplay_videos }}>
|
||||||
{% if uni_sources %}
|
{% if uni_sources %}
|
||||||
<source src="{{ uni_sources[uni_idx]['url'] }}" type="{{ uni_sources[uni_idx]['type'] }}" data-res="{{ uni_sources[uni_idx]['quality'] }}">
|
<source src="{{ uni_sources[uni_idx]['url'] }}" type="{{ uni_sources[uni_idx]['type'] }}" data-res="{{ uni_sources[uni_idx]['quality'] }}">
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -233,7 +227,7 @@
|
|||||||
<div class="comments-area-outer comments-disabled">Comments disabled</div>
|
<div class="comments-area-outer comments-disabled">Comments disabled</div>
|
||||||
{% else %}
|
{% else %}
|
||||||
<details class="comments-area-outer" {{'open' if settings.comments_mode == 1 else ''}}>
|
<details class="comments-area-outer" {{'open' if settings.comments_mode == 1 else ''}}>
|
||||||
<summary>{{ comment_count|commatize }} comment{{'s' if comment_count != 1 else ''}}</summary>
|
<summary>{{ comment_count|commatize }} comment{{'s' if comment_count != '1' else ''}}</summary>
|
||||||
<div class="comments-area-inner comments-area">
|
<div class="comments-area-inner comments-area">
|
||||||
{% if comments_info %}
|
{% if comments_info %}
|
||||||
{{ comments.video_comments(comments_info) }}
|
{{ comments.video_comments(comments_info) }}
|
||||||
|
|||||||
191
youtube/util.py
191
youtube/util.py
@@ -318,10 +318,11 @@ def fetch_url(url, headers=(), timeout=15, report_text=None, data=None,
|
|||||||
cleanup_func(response) # release_connection for urllib3
|
cleanup_func(response) # release_connection for urllib3
|
||||||
content = decode_content(
|
content = decode_content(
|
||||||
content,
|
content,
|
||||||
response.getheader('Content-Encoding', default='identity'))
|
response.headers.get('Content-Encoding', default='identity'))
|
||||||
|
|
||||||
if (settings.debugging_save_responses
|
if (settings.debugging_save_responses
|
||||||
and debug_name is not None and content):
|
and debug_name is not None
|
||||||
|
and content):
|
||||||
save_dir = os.path.join(settings.data_dir, 'debug')
|
save_dir = os.path.join(settings.data_dir, 'debug')
|
||||||
if not os.path.exists(save_dir):
|
if not os.path.exists(save_dir):
|
||||||
os.makedirs(save_dir)
|
os.makedirs(save_dir)
|
||||||
@@ -336,7 +337,7 @@ def fetch_url(url, headers=(), timeout=15, report_text=None, data=None,
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
):
|
):
|
||||||
print(response.status, response.reason, response.getheaders())
|
print(response.status, response.reason, response.headers)
|
||||||
ip = re.search(
|
ip = re.search(
|
||||||
br'IP address: ((?:[\da-f]*:)+[\da-f]+|(?:\d+\.)+\d+)',
|
br'IP address: ((?:[\da-f]*:)+[\da-f]+|(?:\d+\.)+\d+)',
|
||||||
content)
|
content)
|
||||||
@@ -394,7 +395,6 @@ def head(url, use_tor=False, report_text=None, max_redirects=10):
|
|||||||
round(time.monotonic() - start_time, 3))
|
round(time.monotonic() - start_time, 3))
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
mobile_user_agent = 'Mozilla/5.0 (Linux; Android 7.0; Redmi Note 4 Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Mobile Safari/537.36'
|
mobile_user_agent = 'Mozilla/5.0 (Linux; Android 7.0; Redmi Note 4 Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Mobile Safari/537.36'
|
||||||
mobile_ua = (('User-Agent', mobile_user_agent),)
|
mobile_ua = (('User-Agent', mobile_user_agent),)
|
||||||
desktop_user_agent = 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'
|
desktop_user_agent = 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'
|
||||||
@@ -404,13 +404,13 @@ desktop_xhr_headers = (
|
|||||||
('Accept', '*/*'),
|
('Accept', '*/*'),
|
||||||
('Accept-Language', 'en-US,en;q=0.5'),
|
('Accept-Language', 'en-US,en;q=0.5'),
|
||||||
('X-YouTube-Client-Name', '1'),
|
('X-YouTube-Client-Name', '1'),
|
||||||
('X-YouTube-Client-Version', '2.20180830'),
|
('X-YouTube-Client-Version', '2.20240304.00.00'),
|
||||||
) + desktop_ua
|
) + desktop_ua
|
||||||
mobile_xhr_headers = (
|
mobile_xhr_headers = (
|
||||||
('Accept', '*/*'),
|
('Accept', '*/*'),
|
||||||
('Accept-Language', 'en-US,en;q=0.5'),
|
('Accept-Language', 'en-US,en;q=0.5'),
|
||||||
('X-YouTube-Client-Name', '2'),
|
('X-YouTube-Client-Name', '2'),
|
||||||
('X-YouTube-Client-Version', '2.20180830'),
|
('X-YouTube-Client-Version', '2.20240304.08.00'),
|
||||||
) + mobile_ua
|
) + mobile_ua
|
||||||
|
|
||||||
|
|
||||||
@@ -462,7 +462,7 @@ class RateLimitedQueue(gevent.queue.Queue):
|
|||||||
|
|
||||||
|
|
||||||
def download_thumbnail(save_directory, video_id):
|
def download_thumbnail(save_directory, video_id):
|
||||||
url = "https://i.ytimg.com/vi/" + video_id + "/mqdefault.jpg"
|
url = f"https://i.ytimg.com/vi/{video_id}/hqdefault.jpg"
|
||||||
save_location = os.path.join(save_directory, video_id + ".jpg")
|
save_location = os.path.join(save_directory, video_id + ".jpg")
|
||||||
try:
|
try:
|
||||||
thumbnail = fetch_url(url, report_text="Saved thumbnail: " + video_id)
|
thumbnail = fetch_url(url, report_text="Saved thumbnail: " + video_id)
|
||||||
@@ -504,7 +504,7 @@ def video_id(url):
|
|||||||
|
|
||||||
# default, sddefault, mqdefault, hqdefault, hq720
|
# default, sddefault, mqdefault, hqdefault, hq720
|
||||||
def get_thumbnail_url(video_id):
|
def get_thumbnail_url(video_id):
|
||||||
return settings.img_prefix + "https://i.ytimg.com/vi/" + video_id + "/mqdefault.jpg"
|
return f"{settings.img_prefix}https://i.ytimg.com/vi/{video_id}/hqdefault.jpg"
|
||||||
|
|
||||||
|
|
||||||
def seconds_to_timestamp(seconds):
|
def seconds_to_timestamp(seconds):
|
||||||
@@ -665,8 +665,183 @@ def to_valid_filename(name):
|
|||||||
return name
|
return name
|
||||||
|
|
||||||
|
|
||||||
|
# https://github.com/yt-dlp/yt-dlp/blob/master/yt_dlp/extractor/youtube.py#L72
|
||||||
|
INNERTUBE_CLIENTS = {
|
||||||
|
'android': {
|
||||||
|
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
|
||||||
|
'INNERTUBE_CONTEXT': {
|
||||||
|
'client': {
|
||||||
|
'hl': 'en',
|
||||||
|
'gl': 'US',
|
||||||
|
'clientName': 'ANDROID',
|
||||||
|
'clientVersion': '19.09.36',
|
||||||
|
'osName': 'Android',
|
||||||
|
'osVersion': '12',
|
||||||
|
'androidSdkVersion': 31,
|
||||||
|
'platform': 'MOBILE',
|
||||||
|
'userAgent': 'com.google.android.youtube/19.09.36 (Linux; U; Android 12; US) gzip'
|
||||||
|
},
|
||||||
|
# https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-887739287
|
||||||
|
#'thirdParty': {
|
||||||
|
# 'embedUrl': 'https://google.com', # Can be any valid URL
|
||||||
|
#}
|
||||||
|
},
|
||||||
|
'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
|
||||||
|
'REQUIRE_JS_PLAYER': False,
|
||||||
|
},
|
||||||
|
|
||||||
|
'android-test-suite': {
|
||||||
|
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
|
||||||
|
'INNERTUBE_CONTEXT': {
|
||||||
|
'client': {
|
||||||
|
'hl': 'en',
|
||||||
|
'gl': 'US',
|
||||||
|
'clientName': 'ANDROID_TESTSUITE',
|
||||||
|
'clientVersion': '1.9',
|
||||||
|
'osName': 'Android',
|
||||||
|
'osVersion': '12',
|
||||||
|
'androidSdkVersion': 31,
|
||||||
|
'platform': 'MOBILE',
|
||||||
|
'userAgent': 'com.google.android.youtube/1.9 (Linux; U; Android 12; US) gzip'
|
||||||
|
},
|
||||||
|
# https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-887739287
|
||||||
|
#'thirdParty': {
|
||||||
|
# 'embedUrl': 'https://google.com', # Can be any valid URL
|
||||||
|
#}
|
||||||
|
},
|
||||||
|
'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
|
||||||
|
'REQUIRE_JS_PLAYER': False,
|
||||||
|
},
|
||||||
|
|
||||||
|
'ios': {
|
||||||
|
'INNERTUBE_API_KEY': 'AIzaSyB-63vPrdThhKuerbB2N_l7Kwwcxj6yUAc',
|
||||||
|
'INNERTUBE_CONTEXT': {
|
||||||
|
'client': {
|
||||||
|
'hl': 'en',
|
||||||
|
'gl': 'US',
|
||||||
|
'clientName': 'IOS',
|
||||||
|
'clientVersion': '19.09.3',
|
||||||
|
'deviceModel': 'iPhone14,3',
|
||||||
|
'userAgent': 'com.google.ios.youtube/19.09.3 (iPhone14,3; U; CPU iOS 15_6 like Mac OS X)'
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
|
||||||
|
'REQUIRE_JS_PLAYER': False
|
||||||
|
},
|
||||||
|
|
||||||
|
# This client can access age restricted videos (unless the uploader has disabled the 'allow embedding' option)
|
||||||
|
# See: https://github.com/zerodytrash/YouTube-Internal-Clients
|
||||||
|
'tv_embedded': {
|
||||||
|
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
|
||||||
|
'INNERTUBE_CONTEXT': {
|
||||||
|
'client': {
|
||||||
|
'hl': 'en',
|
||||||
|
'gl': 'US',
|
||||||
|
'clientName': 'TVHTML5_SIMPLY_EMBEDDED_PLAYER',
|
||||||
|
'clientVersion': '2.0',
|
||||||
|
'clientScreen': 'EMBED',
|
||||||
|
},
|
||||||
|
# https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-887739287
|
||||||
|
'thirdParty': {
|
||||||
|
'embedUrl': 'https://google.com', # Can be any valid URL
|
||||||
|
}
|
||||||
|
|
||||||
|
},
|
||||||
|
'INNERTUBE_CONTEXT_CLIENT_NAME': 85,
|
||||||
|
'REQUIRE_JS_PLAYER': True,
|
||||||
|
},
|
||||||
|
|
||||||
|
'web': {
|
||||||
|
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
|
||||||
|
'INNERTUBE_CONTEXT': {
|
||||||
|
'client': {
|
||||||
|
'clientName': 'WEB',
|
||||||
|
'clientVersion': '2.20220801.00.00',
|
||||||
|
'userAgent': desktop_user_agent,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
'INNERTUBE_CONTEXT_CLIENT_NAME': 1
|
||||||
|
},
|
||||||
|
'android_vr': {
|
||||||
|
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
|
||||||
|
'INNERTUBE_CONTEXT': {
|
||||||
|
'client': {
|
||||||
|
'clientName': 'ANDROID_VR',
|
||||||
|
'clientVersion': '1.60.19',
|
||||||
|
'deviceMake': 'Oculus',
|
||||||
|
'deviceModel': 'Quest 3',
|
||||||
|
'androidSdkVersion': 32,
|
||||||
|
'userAgent': 'com.google.android.apps.youtube.vr.oculus/1.60.19 (Linux; U; Android 12L; eureka-user Build/SQ3A.220605.009.A1) gzip',
|
||||||
|
'osName': 'Android',
|
||||||
|
'osVersion': '12L',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
'INNERTUBE_CONTEXT_CLIENT_NAME': 28,
|
||||||
|
'REQUIRE_JS_PLAYER': False,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_visitor_data():
|
||||||
|
visitor_data = None
|
||||||
|
visitor_data_cache = os.path.join(settings.data_dir, 'visitorData.txt')
|
||||||
|
if not os.path.exists(settings.data_dir):
|
||||||
|
os.makedirs(settings.data_dir)
|
||||||
|
if os.path.isfile(visitor_data_cache):
|
||||||
|
with open(visitor_data_cache, 'r') as file:
|
||||||
|
print('Getting visitor_data from cache')
|
||||||
|
visitor_data = file.read()
|
||||||
|
max_age = 12*3600
|
||||||
|
file_age = time.time() - os.path.getmtime(visitor_data_cache)
|
||||||
|
if file_age > max_age:
|
||||||
|
print('visitor_data cache is too old. Removing file...')
|
||||||
|
os.remove(visitor_data_cache)
|
||||||
|
return visitor_data
|
||||||
|
|
||||||
|
print('Fetching youtube homepage to get visitor_data')
|
||||||
|
yt_homepage = 'https://www.youtube.com'
|
||||||
|
yt_resp = fetch_url(yt_homepage, headers={'User-Agent': mobile_user_agent}, report_text='Getting youtube homepage')
|
||||||
|
visitor_data_re = r'''"visitorData":\s*?"(.+?)"'''
|
||||||
|
visitor_data_match = re.search(visitor_data_re, yt_resp.decode())
|
||||||
|
if visitor_data_match:
|
||||||
|
visitor_data = visitor_data_match.group(1)
|
||||||
|
print(f'Got visitor_data: {len(visitor_data)}')
|
||||||
|
with open(visitor_data_cache, 'w') as file:
|
||||||
|
print('Saving visitor_data cache...')
|
||||||
|
file.write(visitor_data)
|
||||||
|
return visitor_data
|
||||||
|
else:
|
||||||
|
print('Unable to get visitor_data value')
|
||||||
|
return visitor_data
|
||||||
|
|
||||||
|
def call_youtube_api(client, api, data):
|
||||||
|
client_params = INNERTUBE_CLIENTS[client]
|
||||||
|
context = client_params['INNERTUBE_CONTEXT']
|
||||||
|
key = client_params['INNERTUBE_API_KEY']
|
||||||
|
host = client_params.get('INNERTUBE_HOST') or 'www.youtube.com'
|
||||||
|
user_agent = context['client'].get('userAgent') or mobile_user_agent
|
||||||
|
visitor_data = get_visitor_data()
|
||||||
|
|
||||||
|
url = 'https://' + host + '/youtubei/v1/' + api + '?key=' + key
|
||||||
|
if visitor_data:
|
||||||
|
context['client'].update({'visitorData': visitor_data})
|
||||||
|
data['context'] = context
|
||||||
|
|
||||||
|
data = json.dumps(data)
|
||||||
|
headers = (('Content-Type', 'application/json'),('User-Agent', user_agent))
|
||||||
|
if visitor_data:
|
||||||
|
headers = ( *headers, ('X-Goog-Visitor-Id', visitor_data ))
|
||||||
|
response = fetch_url(
|
||||||
|
url, data=data, headers=headers,
|
||||||
|
debug_name='youtubei_' + api + '_' + client,
|
||||||
|
report_text='Fetched ' + client + ' youtubei ' + api
|
||||||
|
).decode('utf-8')
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
def strip_non_ascii(string):
|
def strip_non_ascii(string):
|
||||||
''' Returns the string without non ASCII characters'''
|
''' Returns the string without non ASCII characters'''
|
||||||
|
if string is None:
|
||||||
|
return ""
|
||||||
stripped = (c for c in string if 0 < ord(c) < 127)
|
stripped = (c for c in string if 0 < ord(c) < 127)
|
||||||
return ''.join(stripped)
|
return ''.join(stripped)
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
from __future__ import unicode_literals
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
__version__ = '0.2.4'
|
__version__ = 'v0.3.2'
|
||||||
|
|||||||
164
youtube/watch.py
164
youtube/watch.py
@@ -19,51 +19,6 @@ from urllib.parse import parse_qs, urlencode
|
|||||||
from types import SimpleNamespace
|
from types import SimpleNamespace
|
||||||
from math import ceil
|
from math import ceil
|
||||||
|
|
||||||
# https://github.com/yt-dlp/yt-dlp/blob/master/yt_dlp/extractor/youtube.py#L72
|
|
||||||
INNERTUBE_CLIENTS = {
|
|
||||||
'android': {
|
|
||||||
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
|
|
||||||
'INNERTUBE_CONTEXT': {
|
|
||||||
'client': {
|
|
||||||
'hl': 'en',
|
|
||||||
'gl': 'US',
|
|
||||||
'clientName': 'ANDROID',
|
|
||||||
'clientVersion': '17.31.35',
|
|
||||||
'osName': 'Android',
|
|
||||||
'osVersion': '12',
|
|
||||||
'androidSdkVersion': 31,
|
|
||||||
'userAgent': 'com.google.android.youtube/17.31.35 (Linux; U; Android 12) gzip'
|
|
||||||
},
|
|
||||||
# https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-887739287
|
|
||||||
#'thirdParty': {
|
|
||||||
# 'embedUrl': 'https://google.com', # Can be any valid URL
|
|
||||||
#}
|
|
||||||
},
|
|
||||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
|
|
||||||
'REQUIRE_JS_PLAYER': False,
|
|
||||||
},
|
|
||||||
|
|
||||||
# This client can access age restricted videos (unless the uploader has disabled the 'allow embedding' option)
|
|
||||||
# See: https://github.com/zerodytrash/YouTube-Internal-Clients
|
|
||||||
'tv_embedded': {
|
|
||||||
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
|
|
||||||
'INNERTUBE_CONTEXT': {
|
|
||||||
'client': {
|
|
||||||
'hl': 'en',
|
|
||||||
'gl': 'US',
|
|
||||||
'clientName': 'TVHTML5_SIMPLY_EMBEDDED_PLAYER',
|
|
||||||
'clientVersion': '2.0',
|
|
||||||
},
|
|
||||||
# https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-887739287
|
|
||||||
'thirdParty': {
|
|
||||||
'embedUrl': 'https://google.com', # Can be any valid URL
|
|
||||||
}
|
|
||||||
|
|
||||||
},
|
|
||||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 85,
|
|
||||||
'REQUIRE_JS_PLAYER': True,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(os.path.join(settings.data_dir, 'decrypt_function_cache.json'), 'r') as f:
|
with open(os.path.join(settings.data_dir, 'decrypt_function_cache.json'), 'r') as f:
|
||||||
@@ -386,26 +341,10 @@ def _add_to_error(info, key, additional_message):
|
|||||||
|
|
||||||
|
|
||||||
def fetch_player_response(client, video_id):
|
def fetch_player_response(client, video_id):
|
||||||
client_params = INNERTUBE_CLIENTS[client]
|
return util.call_youtube_api(client, 'player', {
|
||||||
context = client_params['INNERTUBE_CONTEXT']
|
|
||||||
key = client_params['INNERTUBE_API_KEY']
|
|
||||||
host = client_params.get('INNERTUBE_HOST') or 'www.youtube.com'
|
|
||||||
user_agent = context['client'].get('userAgent') or util.mobile_user_agent
|
|
||||||
|
|
||||||
url = 'https://' + host + '/youtubei/v1/player?key=' + key
|
|
||||||
data = {
|
|
||||||
'videoId': video_id,
|
'videoId': video_id,
|
||||||
'context': context,
|
})
|
||||||
'params': '8AEB',
|
|
||||||
}
|
|
||||||
data = json.dumps(data)
|
|
||||||
headers = (('Content-Type', 'application/json'),('User-Agent', user_agent))
|
|
||||||
player_response = util.fetch_url(
|
|
||||||
url, data=data, headers=headers,
|
|
||||||
debug_name='youtubei_player_' + client,
|
|
||||||
report_text='Fetched ' + client + ' youtubei player'
|
|
||||||
).decode('utf-8')
|
|
||||||
return player_response
|
|
||||||
|
|
||||||
def fetch_watch_page_info(video_id, playlist_id, index):
|
def fetch_watch_page_info(video_id, playlist_id, index):
|
||||||
# bpctr=9999999999 will bypass are-you-sure dialogs for controversial
|
# bpctr=9999999999 will bypass are-you-sure dialogs for controversial
|
||||||
@@ -428,42 +367,42 @@ def fetch_watch_page_info(video_id, playlist_id, index):
|
|||||||
watch_page = watch_page.decode('utf-8')
|
watch_page = watch_page.decode('utf-8')
|
||||||
return yt_data_extract.extract_watch_info_from_html(watch_page)
|
return yt_data_extract.extract_watch_info_from_html(watch_page)
|
||||||
|
|
||||||
|
|
||||||
def extract_info(video_id, use_invidious, playlist_id=None, index=None):
|
def extract_info(video_id, use_invidious, playlist_id=None, index=None):
|
||||||
|
primary_client = 'android_vr'
|
||||||
|
fallback_client = 'ios'
|
||||||
|
last_resort_client = 'tv_embedded'
|
||||||
|
|
||||||
tasks = (
|
tasks = (
|
||||||
# Get video metadata from here
|
# Get video metadata from here
|
||||||
gevent.spawn(fetch_watch_page_info, video_id, playlist_id, index),
|
gevent.spawn(fetch_watch_page_info, video_id, playlist_id, index),
|
||||||
|
gevent.spawn(fetch_player_response, primary_client, video_id)
|
||||||
# Get video URLs by spoofing as android client because its urls don't
|
|
||||||
# require decryption
|
|
||||||
# The URLs returned with WEB for videos requiring decryption
|
|
||||||
# couldn't be decrypted with the base.js from the web page for some
|
|
||||||
# reason
|
|
||||||
# https://github.com/yt-dlp/yt-dlp/issues/574#issuecomment-887171136
|
|
||||||
|
|
||||||
# Update 4/26/23, these URLs will randomly start returning 403
|
|
||||||
# mid-playback and I'm not sure why
|
|
||||||
gevent.spawn(fetch_player_response, 'android', video_id)
|
|
||||||
)
|
)
|
||||||
gevent.joinall(tasks)
|
gevent.joinall(tasks)
|
||||||
util.check_gevent_exceptions(*tasks)
|
util.check_gevent_exceptions(*tasks)
|
||||||
info, player_response = tasks[0].value, tasks[1].value
|
|
||||||
|
info = tasks[0].value or {}
|
||||||
|
player_response = tasks[1].value or {}
|
||||||
|
|
||||||
yt_data_extract.update_with_new_urls(info, player_response)
|
yt_data_extract.update_with_new_urls(info, player_response)
|
||||||
|
|
||||||
# Age restricted video, retry
|
# Fallback to 'ios' if no valid URLs are found
|
||||||
if info['age_restricted'] or info['player_urls_missing']:
|
if not info.get('formats') or info.get('player_urls_missing'):
|
||||||
if info['age_restricted']:
|
print(f"No URLs found in '{primary_client}', attempting with '{fallback_client}'.")
|
||||||
print('Age restricted video, retrying')
|
player_response = fetch_player_response(fallback_client, video_id) or {}
|
||||||
else:
|
yt_data_extract.update_with_new_urls(info, player_response)
|
||||||
print('Player urls missing, retrying')
|
|
||||||
player_response = fetch_player_response('tv_embedded', video_id)
|
# Final attempt with 'tv_embedded' if there are still no URLs
|
||||||
|
if not info.get('formats') or info.get('player_urls_missing'):
|
||||||
|
print(f"No URLs found in '{fallback_client}', attempting with '{last_resort_client}'")
|
||||||
|
player_response = fetch_player_response(last_resort_client, video_id) or {}
|
||||||
yt_data_extract.update_with_new_urls(info, player_response)
|
yt_data_extract.update_with_new_urls(info, player_response)
|
||||||
|
|
||||||
# signature decryption
|
# signature decryption
|
||||||
decryption_error = decrypt_signatures(info, video_id)
|
if info.get('formats'):
|
||||||
if decryption_error:
|
decryption_error = decrypt_signatures(info, video_id)
|
||||||
decryption_error = 'Error decrypting url signatures: ' + decryption_error
|
if decryption_error:
|
||||||
info['playability_error'] = decryption_error
|
info['playability_error'] = 'Error decrypting url signatures: ' + decryption_error
|
||||||
|
|
||||||
# check if urls ready (non-live format) in former livestream
|
# check if urls ready (non-live format) in former livestream
|
||||||
# urls not ready if all of them have no filesize
|
# urls not ready if all of them have no filesize
|
||||||
@@ -477,21 +416,21 @@ def extract_info(video_id, use_invidious, playlist_id=None, index=None):
|
|||||||
|
|
||||||
# livestream urls
|
# livestream urls
|
||||||
# sometimes only the livestream urls work soon after the livestream is over
|
# sometimes only the livestream urls work soon after the livestream is over
|
||||||
if (info['hls_manifest_url']
|
info['hls_formats'] = []
|
||||||
and (info['live'] or not info['formats'] or not info['urls_ready'])
|
if info.get('hls_manifest_url') and (info.get('live') or not info.get('formats') or not info['urls_ready']):
|
||||||
):
|
try:
|
||||||
manifest = util.fetch_url(info['hls_manifest_url'],
|
manifest = util.fetch_url(info['hls_manifest_url'],
|
||||||
debug_name='hls_manifest.m3u8',
|
debug_name='hls_manifest.m3u8',
|
||||||
report_text='Fetched hls manifest'
|
report_text='Fetched hls manifest'
|
||||||
).decode('utf-8')
|
).decode('utf-8')
|
||||||
|
info['hls_formats'], err = yt_data_extract.extract_hls_formats(manifest)
|
||||||
info['hls_formats'], err = yt_data_extract.extract_hls_formats(manifest)
|
if not err:
|
||||||
if not err:
|
info['playability_error'] = None
|
||||||
info['playability_error'] = None
|
for fmt in info['hls_formats']:
|
||||||
for fmt in info['hls_formats']:
|
fmt['video_quality'] = video_quality_string(fmt)
|
||||||
fmt['video_quality'] = video_quality_string(fmt)
|
except Exception as e:
|
||||||
else:
|
print(f"Error obteniendo HLS manifest: {e}")
|
||||||
info['hls_formats'] = []
|
info['hls_formats'] = []
|
||||||
|
|
||||||
# check for 403. Unnecessary for tor video routing b/c ip address is same
|
# check for 403. Unnecessary for tor video routing b/c ip address is same
|
||||||
info['invidious_used'] = False
|
info['invidious_used'] = False
|
||||||
@@ -686,6 +625,7 @@ def get_watch_page(video_id=None):
|
|||||||
|
|
||||||
# prefix urls, and other post-processing not handled by yt_data_extract
|
# prefix urls, and other post-processing not handled by yt_data_extract
|
||||||
for item in info['related_videos']:
|
for item in info['related_videos']:
|
||||||
|
item['thumbnail'] = "https://i.ytimg.com/vi/{}/hqdefault.jpg".format(item['id']) # set HQ relateds thumbnail videos
|
||||||
util.prefix_urls(item)
|
util.prefix_urls(item)
|
||||||
util.add_extra_html_info(item)
|
util.add_extra_html_info(item)
|
||||||
for song in info['music_list']:
|
for song in info['music_list']:
|
||||||
@@ -719,12 +659,6 @@ def get_watch_page(video_id=None):
|
|||||||
'/videoplayback',
|
'/videoplayback',
|
||||||
'/videoplayback/name/' + filename)
|
'/videoplayback/name/' + filename)
|
||||||
|
|
||||||
if settings.gather_googlevideo_domains:
|
|
||||||
with open(os.path.join(settings.data_dir, 'googlevideo-domains.txt'), 'a+', encoding='utf-8') as f:
|
|
||||||
url = info['formats'][0]['url']
|
|
||||||
subdomain = url[0:url.find(".googlevideo.com")]
|
|
||||||
f.write(subdomain + "\n")
|
|
||||||
|
|
||||||
download_formats = []
|
download_formats = []
|
||||||
|
|
||||||
for format in (info['formats'] + info['hls_formats']):
|
for format in (info['formats'] + info['hls_formats']):
|
||||||
@@ -764,9 +698,17 @@ def get_watch_page(video_id=None):
|
|||||||
else:
|
else:
|
||||||
closer_to_target = 'pair'
|
closer_to_target = 'pair'
|
||||||
|
|
||||||
using_pair_sources = (
|
if settings.prefer_uni_sources == 2:
|
||||||
bool(pair_sources) and (not uni_sources or closer_to_target == 'pair')
|
# Use uni sources unless there's no choice.
|
||||||
)
|
using_pair_sources = (
|
||||||
|
bool(pair_sources) and (not uni_sources)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Use the pair sources if they're closer to the desired resolution
|
||||||
|
using_pair_sources = (
|
||||||
|
bool(pair_sources)
|
||||||
|
and (not uni_sources or closer_to_target == 'pair')
|
||||||
|
)
|
||||||
if using_pair_sources:
|
if using_pair_sources:
|
||||||
video_height = pair_sources[pair_idx]['height']
|
video_height = pair_sources[pair_idx]['height']
|
||||||
video_width = pair_sources[pair_idx]['width']
|
video_width = pair_sources[pair_idx]['width']
|
||||||
|
|||||||
@@ -109,7 +109,7 @@ def concat_or_none(*strings):
|
|||||||
def remove_redirect(url):
|
def remove_redirect(url):
|
||||||
if url is None:
|
if url is None:
|
||||||
return None
|
return None
|
||||||
if re.fullmatch(r'(((https?:)?//)?(www.)?youtube.com)?/redirect\?.*', url) is not None: # youtube puts these on external links to do tracking
|
if re.fullmatch(r'(((https?:)?//)?(www.)?youtube.com)?/redirect\?.*', url) is not None: # YouTube puts these on external links to do tracking
|
||||||
query_string = url[url.find('?')+1: ]
|
query_string = url[url.find('?')+1: ]
|
||||||
return urllib.parse.parse_qs(query_string)['q'][0]
|
return urllib.parse.parse_qs(query_string)['q'][0]
|
||||||
return url
|
return url
|
||||||
@@ -133,11 +133,11 @@ def _recover_urls(runs):
|
|||||||
for run in runs:
|
for run in runs:
|
||||||
url = deep_get(run, 'navigationEndpoint', 'urlEndpoint', 'url')
|
url = deep_get(run, 'navigationEndpoint', 'urlEndpoint', 'url')
|
||||||
text = run.get('text', '')
|
text = run.get('text', '')
|
||||||
# second condition is necessary because youtube makes other things into urls, such as hashtags, which we want to keep as text
|
# second condition is necessary because YouTube makes other things into urls, such as hashtags, which we want to keep as text
|
||||||
if url is not None and (text.startswith('http://') or text.startswith('https://')):
|
if url is not None and (text.startswith('http://') or text.startswith('https://')):
|
||||||
url = remove_redirect(url)
|
url = remove_redirect(url)
|
||||||
run['url'] = url
|
run['url'] = url
|
||||||
run['text'] = url # youtube truncates the url text, use actual url instead
|
run['text'] = url # YouTube truncates the url text, use actual url instead
|
||||||
|
|
||||||
def extract_str(node, default=None, recover_urls=False):
|
def extract_str(node, default=None, recover_urls=False):
|
||||||
'''default is the value returned if the extraction fails. If recover_urls is true, will attempt to fix YouTube's truncation of url text (most prominently seen in descriptions)'''
|
'''default is the value returned if the extraction fails. If recover_urls is true, will attempt to fix YouTube's truncation of url text (most prominently seen in descriptions)'''
|
||||||
@@ -185,7 +185,7 @@ def extract_int(string, default=None, whole_word=True):
|
|||||||
return default
|
return default
|
||||||
|
|
||||||
def extract_approx_int(string):
|
def extract_approx_int(string):
|
||||||
'''e.g. "15.1M" from "15.1M subscribers"'''
|
'''e.g. "15.1M" from "15.1M subscribers" or '4,353' from 4353'''
|
||||||
if not isinstance(string, str):
|
if not isinstance(string, str):
|
||||||
string = extract_str(string)
|
string = extract_str(string)
|
||||||
if not string:
|
if not string:
|
||||||
@@ -193,7 +193,10 @@ def extract_approx_int(string):
|
|||||||
match = re.search(r'\b(\d+(?:\.\d+)?[KMBTkmbt]?)\b', string.replace(',', ''))
|
match = re.search(r'\b(\d+(?:\.\d+)?[KMBTkmbt]?)\b', string.replace(',', ''))
|
||||||
if match is None:
|
if match is None:
|
||||||
return None
|
return None
|
||||||
return match.group(1)
|
result = match.group(1)
|
||||||
|
if re.fullmatch(r'\d+', result):
|
||||||
|
result = '{:,}'.format(int(result))
|
||||||
|
return result
|
||||||
|
|
||||||
MONTH_ABBREVIATIONS = {'jan':'1', 'feb':'2', 'mar':'3', 'apr':'4', 'may':'5', 'jun':'6', 'jul':'7', 'aug':'8', 'sep':'9', 'oct':'10', 'nov':'11', 'dec':'12'}
|
MONTH_ABBREVIATIONS = {'jan':'1', 'feb':'2', 'mar':'3', 'apr':'4', 'may':'5', 'jun':'6', 'jul':'7', 'aug':'8', 'sep':'9', 'oct':'10', 'nov':'11', 'dec':'12'}
|
||||||
def extract_date(date_text):
|
def extract_date(date_text):
|
||||||
@@ -569,13 +572,13 @@ def extract_items(response, item_types=_item_types,
|
|||||||
item_types=item_types)
|
item_types=item_types)
|
||||||
if items:
|
if items:
|
||||||
break
|
break
|
||||||
elif ('onResponseReceivedEndpoints' in response
|
if ('onResponseReceivedEndpoints' in response
|
||||||
or 'onResponseReceivedActions' in response):
|
or 'onResponseReceivedActions' in response):
|
||||||
for endpoint in multi_get(response,
|
for endpoint in multi_get(response,
|
||||||
'onResponseReceivedEndpoints',
|
'onResponseReceivedEndpoints',
|
||||||
'onResponseReceivedActions',
|
'onResponseReceivedActions',
|
||||||
[]):
|
[]):
|
||||||
items, ctoken = extract_items_from_renderer_list(
|
new_items, new_ctoken = extract_items_from_renderer_list(
|
||||||
multi_deep_get(
|
multi_deep_get(
|
||||||
endpoint,
|
endpoint,
|
||||||
['reloadContinuationItemsCommand', 'continuationItems'],
|
['reloadContinuationItemsCommand', 'continuationItems'],
|
||||||
@@ -584,13 +587,17 @@ def extract_items(response, item_types=_item_types,
|
|||||||
),
|
),
|
||||||
item_types=item_types,
|
item_types=item_types,
|
||||||
)
|
)
|
||||||
if items:
|
items += new_items
|
||||||
break
|
if (not ctoken) or (new_ctoken and new_items):
|
||||||
elif 'contents' in response:
|
ctoken = new_ctoken
|
||||||
|
if 'contents' in response:
|
||||||
renderer = get(response, 'contents', {})
|
renderer = get(response, 'contents', {})
|
||||||
items, ctoken = extract_items_from_renderer(
|
new_items, new_ctoken = extract_items_from_renderer(
|
||||||
renderer,
|
renderer,
|
||||||
item_types=item_types)
|
item_types=item_types)
|
||||||
|
items += new_items
|
||||||
|
if (not ctoken) or (new_ctoken and new_items):
|
||||||
|
ctoken = new_ctoken
|
||||||
|
|
||||||
if search_engagement_panels and 'engagementPanels' in response:
|
if search_engagement_panels and 'engagementPanels' in response:
|
||||||
new_items, new_ctoken = extract_items_from_renderer_list(
|
new_items, new_ctoken = extract_items_from_renderer_list(
|
||||||
|
|||||||
@@ -85,23 +85,84 @@ def extract_channel_info(polymer_json, tab, continuation=False):
|
|||||||
if tab in ('search', 'playlists'):
|
if tab in ('search', 'playlists'):
|
||||||
info['is_last_page'] = (ctoken is None)
|
info['is_last_page'] = (ctoken is None)
|
||||||
elif tab == 'about':
|
elif tab == 'about':
|
||||||
items, _ = extract_items(response, item_types={'channelAboutFullMetadataRenderer'})
|
# Latest type
|
||||||
if not items:
|
items, _ = extract_items(response, item_types={'aboutChannelRenderer'})
|
||||||
info['error'] = 'Could not find channelAboutFullMetadataRenderer'
|
if items:
|
||||||
return info
|
a_metadata = deep_get(items, 0, 'aboutChannelRenderer',
|
||||||
channel_metadata = items[0]['channelAboutFullMetadataRenderer']
|
'metadata', 'aboutChannelViewModel')
|
||||||
|
if not a_metadata:
|
||||||
|
info['error'] = 'Could not find aboutChannelViewModel'
|
||||||
|
return info
|
||||||
|
|
||||||
info['links'] = []
|
info['links'] = []
|
||||||
for link_json in channel_metadata.get('primaryLinks', ()):
|
for link_outer in a_metadata.get('links', ()):
|
||||||
url = remove_redirect(deep_get(link_json, 'navigationEndpoint', 'urlEndpoint', 'url'))
|
link = link_outer.get('channelExternalLinkViewModel') or {}
|
||||||
if not (url.startswith('http://') or url.startswith('https://')):
|
link_content = extract_str(deep_get(link, 'link', 'content'))
|
||||||
url = 'http://' + url
|
for run in deep_get(link, 'link', 'commandRuns') or ():
|
||||||
text = extract_str(link_json.get('title'))
|
url = remove_redirect(deep_get(run, 'onTap',
|
||||||
info['links'].append( (text, url) )
|
'innertubeCommand', 'urlEndpoint', 'url'))
|
||||||
|
if url and not (url.startswith('http://')
|
||||||
|
or url.startswith('https://')):
|
||||||
|
url = 'https://' + url
|
||||||
|
if link_content is None or (link_content in url):
|
||||||
|
break
|
||||||
|
else: # didn't break
|
||||||
|
url = link_content
|
||||||
|
if url and not (url.startswith('http://')
|
||||||
|
or url.startswith('https://')):
|
||||||
|
url = 'https://' + url
|
||||||
|
text = extract_str(deep_get(link, 'title', 'content'))
|
||||||
|
info['links'].append( (text, url) )
|
||||||
|
|
||||||
info['date_joined'] = extract_date(channel_metadata.get('joinedDateText'))
|
info['date_joined'] = extract_date(
|
||||||
info['view_count'] = extract_int(channel_metadata.get('viewCountText'))
|
a_metadata.get('joinedDateText')
|
||||||
info['description'] = extract_str(channel_metadata.get('description'), default='')
|
)
|
||||||
|
info['view_count'] = extract_int(a_metadata.get('viewCountText'))
|
||||||
|
info['approx_view_count'] = extract_approx_int(
|
||||||
|
a_metadata.get('viewCountText')
|
||||||
|
)
|
||||||
|
info['description'] = extract_str(
|
||||||
|
a_metadata.get('description'), default=''
|
||||||
|
)
|
||||||
|
info['approx_video_count'] = extract_approx_int(
|
||||||
|
a_metadata.get('videoCountText')
|
||||||
|
)
|
||||||
|
info['approx_subscriber_count'] = extract_approx_int(
|
||||||
|
a_metadata.get('subscriberCountText')
|
||||||
|
)
|
||||||
|
info['country'] = extract_str(a_metadata.get('country'))
|
||||||
|
info['canonical_url'] = extract_str(
|
||||||
|
a_metadata.get('canonicalChannelUrl')
|
||||||
|
)
|
||||||
|
|
||||||
|
# Old type
|
||||||
|
else:
|
||||||
|
items, _ = extract_items(response,
|
||||||
|
item_types={'channelAboutFullMetadataRenderer'})
|
||||||
|
if not items:
|
||||||
|
info['error'] = 'Could not find aboutChannelRenderer or channelAboutFullMetadataRenderer'
|
||||||
|
return info
|
||||||
|
a_metadata = items[0]['channelAboutFullMetadataRenderer']
|
||||||
|
|
||||||
|
info['links'] = []
|
||||||
|
for link_json in a_metadata.get('primaryLinks', ()):
|
||||||
|
url = remove_redirect(deep_get(link_json, 'navigationEndpoint',
|
||||||
|
'urlEndpoint', 'url'))
|
||||||
|
if url and not (url.startswith('http://')
|
||||||
|
or url.startswith('https://')):
|
||||||
|
url = 'https://' + url
|
||||||
|
text = extract_str(link_json.get('title'))
|
||||||
|
info['links'].append( (text, url) )
|
||||||
|
|
||||||
|
info['date_joined'] = extract_date(a_metadata.get('joinedDateText'))
|
||||||
|
info['view_count'] = extract_int(a_metadata.get('viewCountText'))
|
||||||
|
info['description'] = extract_str(a_metadata.get(
|
||||||
|
'description'), default='')
|
||||||
|
|
||||||
|
info['approx_video_count'] = None
|
||||||
|
info['approx_subscriber_count'] = None
|
||||||
|
info['country'] = None
|
||||||
|
info['canonical_url'] = None
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError('Unknown or unsupported channel tab: ' + tab)
|
raise NotImplementedError('Unknown or unsupported channel tab: ' + tab)
|
||||||
|
|
||||||
@@ -168,7 +229,7 @@ def extract_playlist_metadata(polymer_json):
|
|||||||
if metadata['first_video_id'] is None:
|
if metadata['first_video_id'] is None:
|
||||||
metadata['thumbnail'] = None
|
metadata['thumbnail'] = None
|
||||||
else:
|
else:
|
||||||
metadata['thumbnail'] = 'https://i.ytimg.com/vi/' + metadata['first_video_id'] + '/mqdefault.jpg'
|
metadata['thumbnail'] = f"https://i.ytimg.com/vi/{metadata['first_video_id']}/hqdefault.jpg"
|
||||||
|
|
||||||
metadata['video_count'] = extract_int(header.get('numVideosText'))
|
metadata['video_count'] = extract_int(header.get('numVideosText'))
|
||||||
metadata['description'] = extract_str(header.get('descriptionText'), default='')
|
metadata['description'] = extract_str(header.get('descriptionText'), default='')
|
||||||
@@ -191,6 +252,19 @@ def extract_playlist_metadata(polymer_json):
|
|||||||
elif 'updated' in text:
|
elif 'updated' in text:
|
||||||
metadata['time_published'] = extract_date(text)
|
metadata['time_published'] = extract_date(text)
|
||||||
|
|
||||||
|
microformat = deep_get(response, 'microformat', 'microformatDataRenderer',
|
||||||
|
default={})
|
||||||
|
conservative_update(
|
||||||
|
metadata, 'title', extract_str(microformat.get('title'))
|
||||||
|
)
|
||||||
|
conservative_update(
|
||||||
|
metadata, 'description', extract_str(microformat.get('description'))
|
||||||
|
)
|
||||||
|
conservative_update(
|
||||||
|
metadata, 'thumbnail', deep_get(microformat, 'thumbnail',
|
||||||
|
'thumbnails', -1, 'url')
|
||||||
|
)
|
||||||
|
|
||||||
return metadata
|
return metadata
|
||||||
|
|
||||||
def extract_playlist_info(polymer_json):
|
def extract_playlist_info(polymer_json):
|
||||||
@@ -198,13 +272,11 @@ def extract_playlist_info(polymer_json):
|
|||||||
if err:
|
if err:
|
||||||
return {'error': err}
|
return {'error': err}
|
||||||
info = {'error': None}
|
info = {'error': None}
|
||||||
first_page = 'continuationContents' not in response
|
|
||||||
video_list, _ = extract_items(response)
|
video_list, _ = extract_items(response)
|
||||||
|
|
||||||
info['items'] = [extract_item_info(renderer) for renderer in video_list]
|
info['items'] = [extract_item_info(renderer) for renderer in video_list]
|
||||||
|
|
||||||
if first_page:
|
info['metadata'] = extract_playlist_metadata(polymer_json)
|
||||||
info['metadata'] = extract_playlist_metadata(polymer_json)
|
|
||||||
|
|
||||||
return info
|
return info
|
||||||
|
|
||||||
|
|||||||
@@ -140,11 +140,12 @@ def _extract_likes_dislikes(renderer_content):
|
|||||||
['defaultText', 'accessibility', 'accessibilityData', 'label'],
|
['defaultText', 'accessibility', 'accessibilityData', 'label'],
|
||||||
['accessibility', 'label'],
|
['accessibility', 'label'],
|
||||||
['accessibilityData', 'accessibilityData', 'label'],
|
['accessibilityData', 'accessibilityData', 'label'],
|
||||||
|
['accessibilityText'],
|
||||||
))
|
))
|
||||||
|
|
||||||
# this count doesn't have all the digits, it's like 53K for instance
|
# this count doesn't have all the digits, it's like 53K for instance
|
||||||
dumb_count = extract_int(extract_str(deep_get(
|
dumb_count = extract_int(extract_str(multi_get(
|
||||||
toggle_button_renderer, 'defaultText')))
|
toggle_button_renderer, ['defaultText', 'title'])))
|
||||||
|
|
||||||
# The accessibility text will be "No likes" or "No dislikes" or
|
# The accessibility text will be "No likes" or "No dislikes" or
|
||||||
# something like that, but dumb count will be 0
|
# something like that, but dumb count will be 0
|
||||||
@@ -168,16 +169,23 @@ def _extract_likes_dislikes(renderer_content):
|
|||||||
info['dislike_count'] = count
|
info['dislike_count'] = count
|
||||||
elif 'slimMetadataButtonRenderer' in button:
|
elif 'slimMetadataButtonRenderer' in button:
|
||||||
button_renderer = button['slimMetadataButtonRenderer']
|
button_renderer = button['slimMetadataButtonRenderer']
|
||||||
liberal_update(info, 'like_count', extract_button_count(deep_get(
|
liberal_update(info, 'like_count', extract_button_count(
|
||||||
button_renderer, 'button',
|
multi_deep_get(button_renderer,
|
||||||
'segmentedLikeDislikeButtonRenderer',
|
['button', 'segmentedLikeDislikeButtonRenderer',
|
||||||
'likeButton', 'toggleButtonRenderer'
|
'likeButton', 'toggleButtonRenderer'],
|
||||||
)))
|
['button', 'segmentedLikeDislikeButtonViewModel',
|
||||||
liberal_update(info, 'dislike_count',extract_button_count(deep_get(
|
'likeButtonViewModel', 'likeButtonViewModel',
|
||||||
button_renderer, 'button',
|
'toggleButtonViewModel', 'toggleButtonViewModel',
|
||||||
'segmentedLikeDislikeButtonRenderer',
|
'defaultButtonViewModel', 'buttonViewModel']
|
||||||
'dislikeButton', 'toggleButtonRenderer'
|
)
|
||||||
)))
|
))
|
||||||
|
'''liberal_update(info, 'dislike_count', extract_button_count(
|
||||||
|
deep_get(
|
||||||
|
button_renderer, 'button',
|
||||||
|
'segmentedLikeDislikeButtonRenderer',
|
||||||
|
'dislikeButton', 'toggleButtonRenderer'
|
||||||
|
)
|
||||||
|
))'''
|
||||||
return info
|
return info
|
||||||
|
|
||||||
def _extract_from_owner_renderer(renderer_content):
|
def _extract_from_owner_renderer(renderer_content):
|
||||||
@@ -357,17 +365,18 @@ def _extract_watch_info_mobile(top_level):
|
|||||||
# https://www.androidpolice.com/2019/10/31/google-youtube-app-comment-section-below-videos/
|
# https://www.androidpolice.com/2019/10/31/google-youtube-app-comment-section-below-videos/
|
||||||
# https://www.youtube.com/watch?v=bR5Q-wD-6qo
|
# https://www.youtube.com/watch?v=bR5Q-wD-6qo
|
||||||
if header_type == 'commentsEntryPointHeaderRenderer':
|
if header_type == 'commentsEntryPointHeaderRenderer':
|
||||||
comment_count_text = extract_str(comment_info.get('headerText'))
|
comment_count_text = extract_str(multi_get(
|
||||||
|
comment_info, 'commentCount', 'headerText'))
|
||||||
else:
|
else:
|
||||||
comment_count_text = extract_str(deep_get(comment_info,
|
comment_count_text = extract_str(deep_get(comment_info,
|
||||||
'header', 'commentSectionHeaderRenderer', 'countText'))
|
'header', 'commentSectionHeaderRenderer', 'countText'))
|
||||||
if comment_count_text == 'Comments': # just this with no number, means 0 comments
|
if comment_count_text == 'Comments': # just this with no number, means 0 comments
|
||||||
info['comment_count'] = 0
|
info['comment_count'] = '0'
|
||||||
else:
|
else:
|
||||||
info['comment_count'] = extract_int(comment_count_text)
|
info['comment_count'] = extract_approx_int(comment_count_text)
|
||||||
info['comments_disabled'] = False
|
info['comments_disabled'] = False
|
||||||
else: # no comment section present means comments are disabled
|
else: # no comment section present means comments are disabled
|
||||||
info['comment_count'] = 0
|
info['comment_count'] = '0'
|
||||||
info['comments_disabled'] = True
|
info['comments_disabled'] = True
|
||||||
|
|
||||||
# check for limited state
|
# check for limited state
|
||||||
@@ -463,6 +472,13 @@ def _extract_formats(info, player_response):
|
|||||||
for yt_fmt in yt_formats:
|
for yt_fmt in yt_formats:
|
||||||
itag = yt_fmt.get('itag')
|
itag = yt_fmt.get('itag')
|
||||||
|
|
||||||
|
# Translated audio track
|
||||||
|
# Example: https://www.youtube.com/watch?v=gF9kkB0UWYQ
|
||||||
|
# Only get the original language for now so a foreign
|
||||||
|
# translation will not be picked just because it comes first
|
||||||
|
if deep_get(yt_fmt, 'audioTrack', 'audioIsDefault') is False:
|
||||||
|
continue
|
||||||
|
|
||||||
fmt = {}
|
fmt = {}
|
||||||
fmt['itag'] = itag
|
fmt['itag'] = itag
|
||||||
fmt['ext'] = None
|
fmt['ext'] = None
|
||||||
|
|||||||
Reference in New Issue
Block a user