Compare commits
41 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
d4cba7eb6c
|
|||
|
70cb453280
|
|||
|
7a106331e7
|
|||
|
8775e131af
|
|||
|
1f16f7cb62
|
|||
|
80b7f3cd00
|
|||
|
8b79e067bc
|
|||
|
cda0627d5a
|
|||
|
ad40dd6d6b
|
|||
|
b91d53dc6f
|
|||
|
cda4fd1f26
|
|||
|
ff2a2edaa5
|
|||
|
38d8d5d4c5
|
|||
|
f010452abf
|
|||
|
ab93f8242b
|
|||
|
1505414a1a
|
|||
|
c04d7c9a24
|
|||
|
3ee2df7faa
|
|||
|
d2c883c211
|
|||
|
59c988f819
|
|||
|
629c811e84
|
|||
|
284024433b
|
|||
|
55a8e50d6a
|
|||
|
810dff999e
|
|||
|
4da91fb972
|
|||
|
874ac0a0ac
|
|||
|
89ae1e265b
|
|||
|
00bd9fee6f
|
|||
|
b215e2a3b2
|
|||
|
97972d6fa3
|
|||
|
6ae20bb1f5
|
|||
|
5f3b90ad45
|
|||
|
2463af7685
|
|||
|
86bb312d6d
|
|||
|
964b99ea40
|
|||
|
51a1693789
|
|||
|
ca4a735692
|
|||
|
2140f48919
|
|||
|
4be01d3964
|
|||
|
b45e3476c8
|
|||
|
d591956baa
|
@@ -1,5 +1,3 @@
|
||||
[](https://drone.hgit.ga/heckyel/yt-local)
|
||||
|
||||
# yt-local
|
||||
|
||||
Fork of [youtube-local](https://github.com/user234683/youtube-local)
|
||||
@@ -153,7 +151,7 @@ For coding guidelines and an overview of the software architecture, see the [HAC
|
||||
|
||||
yt-local is not made to work in public mode, however there is an instance of yt-local in public mode but with less features
|
||||
|
||||
- <https://1cd1-93-95-230-133.ngrok-free.app/https://youtube.com>
|
||||
- <https://m.fridu.us/https://youtube.com>
|
||||
|
||||
## License
|
||||
|
||||
|
||||
@@ -114,10 +114,12 @@ if bitness == '32':
|
||||
visual_c_runtime_url = 'https://github.com/yuempek/vc-archive/raw/master/archives/vc15_(14.10.25017.0)_2017_x86.7z'
|
||||
visual_c_runtime_sha256 = '2549eb4d2ce4cf3a87425ea01940f74368bf1cda378ef8a8a1f1a12ed59f1547'
|
||||
visual_c_name = 'vc15_(14.10.25017.0)_2017_x86.7z'
|
||||
visual_c_path_to_dlls = 'runtime_minimum/System'
|
||||
else:
|
||||
visual_c_runtime_url = 'https://github.com/yuempek/vc-archive/raw/master/archives/vc15_(14.10.25017.0)_2017_x64.7z'
|
||||
visual_c_runtime_sha256 = '4f00b824c37e1017a93fccbd5775e6ee54f824b6786f5730d257a87a3d9ce921'
|
||||
visual_c_name = 'vc15_(14.10.25017.0)_2017_x64.7z'
|
||||
visual_c_path_to_dlls = 'runtime_minimum/System64'
|
||||
|
||||
download_if_not_exists('get-pip.py', get_pip_url)
|
||||
|
||||
@@ -198,7 +200,7 @@ with open('./python/python3' + major_release + '._pth', 'a', encoding='utf-8') a
|
||||
f.write('..\n')'''
|
||||
|
||||
log('Inserting Microsoft C Runtime')
|
||||
check_subp(subprocess.run([r'7z', '-y', 'e', '-opython', 'vc15_(14.10.25017.0)_2017_x86.7z', 'runtime_minimum/System']))
|
||||
check_subp(subprocess.run([r'7z', '-y', 'e', '-opython', visual_c_name, visual_c_path_to_dlls]))
|
||||
|
||||
log('Installing dependencies')
|
||||
wine_run(['./python/python.exe', '-I', '-m', 'pip', 'install', '--no-compile', '-r', './requirements.txt'])
|
||||
|
||||
@@ -1,28 +1,21 @@
|
||||
attrs==22.1.0
|
||||
Brotli==1.0.9
|
||||
cachetools==4.2.4
|
||||
click==8.0.4
|
||||
dataclasses==0.6
|
||||
blinker==1.7.0
|
||||
Brotli==1.1.0
|
||||
cachetools==5.3.3
|
||||
click==8.1.7
|
||||
defusedxml==0.7.1
|
||||
Flask==2.0.1
|
||||
gevent==22.10.2
|
||||
greenlet==2.0.1
|
||||
importlib-metadata==4.6.4
|
||||
iniconfig==1.1.1
|
||||
itsdangerous==2.0.1
|
||||
Jinja2==3.0.3
|
||||
MarkupSafe==2.0.1
|
||||
packaging==20.9
|
||||
pluggy>=0.13.1
|
||||
py==1.10.0
|
||||
pyparsing==2.4.7
|
||||
Flask==3.0.2
|
||||
gevent==24.2.1
|
||||
greenlet==3.0.3
|
||||
iniconfig==2.0.0
|
||||
itsdangerous==2.1.2
|
||||
Jinja2==3.1.3
|
||||
MarkupSafe==2.1.5
|
||||
packaging==24.0
|
||||
pluggy==1.4.0
|
||||
PySocks==1.7.1
|
||||
pytest==6.2.5
|
||||
stem==1.8.0
|
||||
toml==0.10.2
|
||||
typing-extensions==3.10.0.2
|
||||
urllib3==1.26.11
|
||||
Werkzeug==2.1.1
|
||||
zipp==3.5.1
|
||||
zope.event==4.5.0
|
||||
zope.interface==5.4.0
|
||||
pytest==8.1.1
|
||||
stem==1.8.2
|
||||
urllib3==2.2.1
|
||||
Werkzeug==3.0.1
|
||||
zope.event==5.0
|
||||
zope.interface==6.2
|
||||
|
||||
@@ -1,20 +1,17 @@
|
||||
Brotli==1.0.9
|
||||
cachetools==4.2.4
|
||||
click==8.0.4
|
||||
dataclasses==0.6
|
||||
blinker==1.7.0
|
||||
Brotli==1.1.0
|
||||
cachetools==5.3.3
|
||||
click==8.1.7
|
||||
defusedxml==0.7.1
|
||||
Flask==2.0.1
|
||||
gevent==22.10.2
|
||||
greenlet==2.0.1
|
||||
importlib-metadata==4.6.4
|
||||
itsdangerous==2.0.1
|
||||
Jinja2==3.0.3
|
||||
MarkupSafe==2.0.1
|
||||
Flask==3.0.2
|
||||
gevent==24.2.1
|
||||
greenlet==3.0.3
|
||||
itsdangerous==2.1.2
|
||||
Jinja2==3.1.3
|
||||
MarkupSafe==2.1.5
|
||||
PySocks==1.7.1
|
||||
stem==1.8.0
|
||||
typing-extensions==3.10.0.2
|
||||
urllib3==1.26.11
|
||||
Werkzeug==2.1.1
|
||||
zipp==3.5.1
|
||||
zope.event==4.5.0
|
||||
zope.interface==5.4.0
|
||||
stem==1.8.2
|
||||
urllib3==2.2.1
|
||||
Werkzeug==3.0.1
|
||||
zope.event==5.0
|
||||
zope.interface==6.2
|
||||
|
||||
@@ -84,7 +84,7 @@ def proxy_site(env, start_response, video=False):
|
||||
else:
|
||||
response, cleanup_func = util.fetch_url_response(url, send_headers)
|
||||
|
||||
response_headers = response.getheaders()
|
||||
response_headers = response.headers
|
||||
if isinstance(response_headers, urllib3._collections.HTTPHeaderDict):
|
||||
response_headers = response_headers.items()
|
||||
if video:
|
||||
|
||||
43
settings.py
43
settings.py
@@ -151,6 +151,13 @@ For security reasons, enabling this is not recommended.''',
|
||||
'category': 'interface',
|
||||
}),
|
||||
|
||||
('autoplay_videos', {
|
||||
'type': bool,
|
||||
'default': False,
|
||||
'comment': '',
|
||||
'category': 'playback',
|
||||
}),
|
||||
|
||||
('default_resolution', {
|
||||
'type': int,
|
||||
'default': 720,
|
||||
@@ -200,12 +207,17 @@ For security reasons, enabling this is not recommended.''',
|
||||
}),
|
||||
|
||||
('prefer_uni_sources', {
|
||||
'label': 'Prefer integrated sources',
|
||||
'type': bool,
|
||||
'default': False,
|
||||
'label': 'Use integrated sources',
|
||||
'type': int,
|
||||
'default': 1,
|
||||
'comment': '',
|
||||
'options': [
|
||||
(0, 'Prefer not'),
|
||||
(1, 'Prefer'),
|
||||
(2, 'Always'),
|
||||
],
|
||||
'category': 'playback',
|
||||
'description': 'If enabled and the default resolution is set to 360p or 720p, uses the unified (integrated) video files which contain audio and video, with buffering managed by the browser. If disabled, always uses the separate audio and video files through custom buffer management in av-merge via MediaSource.',
|
||||
'description': 'If set to Prefer or Always and the default resolution is set to 360p or 720p, uses the unified (integrated) video files which contain audio and video, with buffering managed by the browser. If set to prefer not, uses the separate audio and video files through custom buffer management in av-merge via MediaSource unless they are unavailable.',
|
||||
}),
|
||||
|
||||
('use_video_player', {
|
||||
@@ -298,6 +310,18 @@ Archive: https://archive.ph/OZQbN''',
|
||||
'comment': '',
|
||||
}),
|
||||
|
||||
('include_shorts_in_subscriptions', {
|
||||
'type': bool,
|
||||
'default': 0,
|
||||
'comment': '',
|
||||
}),
|
||||
|
||||
('include_shorts_in_channel', {
|
||||
'type': bool,
|
||||
'default': 1,
|
||||
'comment': '',
|
||||
}),
|
||||
|
||||
('gather_googlevideo_domains', {
|
||||
'type': bool,
|
||||
'default': False,
|
||||
@@ -314,7 +338,7 @@ Archive: https://archive.ph/OZQbN''',
|
||||
|
||||
('settings_version', {
|
||||
'type': int,
|
||||
'default': 4,
|
||||
'default': 5,
|
||||
'comment': '''Do not change, remove, or comment out this value, or else your settings may be lost or corrupted''',
|
||||
'hidden': True,
|
||||
}),
|
||||
@@ -387,10 +411,19 @@ def upgrade_to_4(settings_dict):
|
||||
return new_settings
|
||||
|
||||
|
||||
def upgrade_to_5(settings_dict):
|
||||
new_settings = settings_dict.copy()
|
||||
if 'prefer_uni_sources' in settings_dict:
|
||||
new_settings['prefer_uni_sources'] = int(settings_dict['prefer_uni_sources'])
|
||||
new_settings['settings_version'] = 5
|
||||
return new_settings
|
||||
|
||||
|
||||
upgrade_functions = {
|
||||
1: upgrade_to_2,
|
||||
2: upgrade_to_3,
|
||||
3: upgrade_to_4,
|
||||
4: upgrade_to_5,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -54,7 +54,10 @@ def commatize(num):
|
||||
if num is None:
|
||||
return ''
|
||||
if isinstance(num, str):
|
||||
num = int(num)
|
||||
try:
|
||||
num = int(num)
|
||||
except ValueError:
|
||||
return num
|
||||
return '{:,}'.format(num)
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import base64
|
||||
from youtube import util, yt_data_extract, local_playlist, subscriptions
|
||||
from youtube import (util, yt_data_extract, local_playlist, subscriptions,
|
||||
playlist)
|
||||
from youtube import yt_app
|
||||
import settings
|
||||
|
||||
import urllib
|
||||
import json
|
||||
@@ -82,6 +84,40 @@ def channel_ctoken_v5(channel_id, page, sort, tab, view=1):
|
||||
|
||||
return base64.urlsafe_b64encode(pointless_nest).decode('ascii')
|
||||
|
||||
|
||||
def channel_about_ctoken(channel_id):
|
||||
return proto.make_protobuf(
|
||||
('base64p',
|
||||
[
|
||||
[2, 80226972,
|
||||
[
|
||||
[2, 2, channel_id],
|
||||
[2, 3,
|
||||
('base64p',
|
||||
[
|
||||
[2, 110,
|
||||
[
|
||||
[2, 3,
|
||||
[
|
||||
[2, 19,
|
||||
[
|
||||
[2, 1, b'66b0e9e9-0000-2820-9589-582429a83980'],
|
||||
]
|
||||
],
|
||||
]
|
||||
],
|
||||
]
|
||||
],
|
||||
]
|
||||
)
|
||||
],
|
||||
]
|
||||
],
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# https://github.com/user234683/youtube-local/issues/151
|
||||
def channel_ctoken_v4(channel_id, page, sort, tab, view=1):
|
||||
new_sort = (2 if int(sort) == 1 else 1)
|
||||
@@ -228,7 +264,7 @@ def get_channel_tab(channel_id, page="1", sort=3, tab='videos', view=1,
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': '2.20180830',
|
||||
'clientVersion': '2.20240327.00.00',
|
||||
},
|
||||
},
|
||||
'continuation': ctoken,
|
||||
@@ -243,7 +279,8 @@ def get_channel_tab(channel_id, page="1", sort=3, tab='videos', view=1,
|
||||
|
||||
|
||||
# cache entries expire after 30 minutes
|
||||
@cachetools.func.ttl_cache(maxsize=128, ttl=30*60)
|
||||
number_of_videos_cache = cachetools.TTLCache(128, 30*60)
|
||||
@cachetools.cached(number_of_videos_cache)
|
||||
def get_number_of_videos_channel(channel_id):
|
||||
if channel_id is None:
|
||||
return 1000
|
||||
@@ -268,11 +305,14 @@ def get_number_of_videos_channel(channel_id):
|
||||
return int(match.group(1).replace(',',''))
|
||||
else:
|
||||
return 0
|
||||
def set_cached_number_of_videos(channel_id, num_videos):
|
||||
@cachetools.cached(number_of_videos_cache)
|
||||
def dummy_func_using_same_cache(channel_id):
|
||||
return num_videos
|
||||
dummy_func_using_same_cache(channel_id)
|
||||
|
||||
|
||||
channel_id_re = re.compile(r'videos\.xml\?channel_id=([a-zA-Z0-9_-]{24})"')
|
||||
|
||||
|
||||
@cachetools.func.lru_cache(maxsize=128)
|
||||
def get_channel_id(base_url):
|
||||
# method that gives the smallest possible response at ~4 kb
|
||||
@@ -331,7 +371,7 @@ def get_channel_search_json(channel_id, query, page):
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': '2.20180830',
|
||||
'clientVersion': '2.20240327.00.00',
|
||||
},
|
||||
},
|
||||
'continuation': ctoken,
|
||||
@@ -349,15 +389,16 @@ def post_process_channel_info(info):
|
||||
info['avatar'] = util.prefix_url(info['avatar'])
|
||||
info['channel_url'] = util.prefix_url(info['channel_url'])
|
||||
for item in info['items']:
|
||||
item['thumbnail'] = "https://i.ytimg.com/vi/{}/hqdefault.jpg".format(item['id'])
|
||||
util.prefix_urls(item)
|
||||
util.add_extra_html_info(item)
|
||||
if info['current_tab'] == 'about':
|
||||
for i, (text, url) in enumerate(info['links']):
|
||||
if util.YOUTUBE_URL_RE.fullmatch(url):
|
||||
if isinstance(url, str) and util.YOUTUBE_URL_RE.fullmatch(url):
|
||||
info['links'][i] = (text, util.prefix_url(url))
|
||||
|
||||
|
||||
def get_channel_first_page(base_url=None, channel_id=None, tab='videos'):
|
||||
def get_channel_first_page(base_url=None, tab='videos', channel_id=None):
|
||||
if channel_id:
|
||||
base_url = 'https://www.youtube.com/channel/' + channel_id
|
||||
return util.fetch_url(base_url + '/' + tab + '?pbj=1&view=0',
|
||||
@@ -370,43 +411,106 @@ playlist_sort_codes = {'2': "da", '3': "dd", '4': "lad"}
|
||||
# youtube.com/user/[username]/[tab]
|
||||
# youtube.com/c/[custom]/[tab]
|
||||
# youtube.com/[custom]/[tab]
|
||||
|
||||
|
||||
def get_channel_page_general_url(base_url, tab, request, channel_id=None):
|
||||
|
||||
page_number = int(request.args.get('page', 1))
|
||||
sort = request.args.get('sort', '3')
|
||||
# sort 1: views
|
||||
# sort 2: oldest
|
||||
# sort 3: newest
|
||||
# sort 4: newest - no shorts (Just a kludge on our end, not internal to yt)
|
||||
default_sort = '3' if settings.include_shorts_in_channel else '4'
|
||||
sort = request.args.get('sort', default_sort)
|
||||
view = request.args.get('view', '1')
|
||||
query = request.args.get('query', '')
|
||||
ctoken = request.args.get('ctoken', '')
|
||||
default_params = (page_number == 1 and sort == '3' and view == '1')
|
||||
include_shorts = (sort != '4')
|
||||
default_params = (page_number == 1 and sort in ('3', '4') and view == '1')
|
||||
continuation = bool(ctoken) # whether or not we're using a continuation
|
||||
page_size = 30
|
||||
try_channel_api = True
|
||||
polymer_json = None
|
||||
|
||||
if (tab in ('videos', 'shorts', 'streams') and channel_id and
|
||||
not default_params):
|
||||
tasks = (
|
||||
gevent.spawn(get_number_of_videos_channel, channel_id),
|
||||
gevent.spawn(get_channel_tab, channel_id, page_number, sort,
|
||||
tab, view, ctoken)
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
util.check_gevent_exceptions(*tasks)
|
||||
number_of_videos, polymer_json = tasks[0].value, tasks[1].value
|
||||
continuation = True
|
||||
elif tab in ('videos', 'shorts', 'streams'):
|
||||
# Use the special UU playlist which contains all the channel's uploads
|
||||
if tab == 'videos' and sort in ('3', '4'):
|
||||
if not channel_id:
|
||||
channel_id = get_channel_id(base_url)
|
||||
if page_number == 1 and include_shorts:
|
||||
tasks = (
|
||||
gevent.spawn(playlist.playlist_first_page,
|
||||
'UU' + channel_id[2:],
|
||||
report_text='Retrieved channel videos'),
|
||||
gevent.spawn(get_metadata, channel_id),
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
util.check_gevent_exceptions(*tasks)
|
||||
|
||||
# Ignore the metadata for now, it is cached and will be
|
||||
# recalled later
|
||||
pl_json = tasks[0].value
|
||||
pl_info = yt_data_extract.extract_playlist_info(pl_json)
|
||||
number_of_videos = pl_info['metadata']['video_count']
|
||||
if number_of_videos is None:
|
||||
number_of_videos = 1000
|
||||
else:
|
||||
set_cached_number_of_videos(channel_id, number_of_videos)
|
||||
else:
|
||||
tasks = (
|
||||
gevent.spawn(playlist.get_videos, 'UU' + channel_id[2:],
|
||||
page_number, include_shorts=include_shorts),
|
||||
gevent.spawn(get_metadata, channel_id),
|
||||
gevent.spawn(get_number_of_videos_channel, channel_id),
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
util.check_gevent_exceptions(*tasks)
|
||||
|
||||
pl_json = tasks[0].value
|
||||
pl_info = yt_data_extract.extract_playlist_info(pl_json)
|
||||
number_of_videos = tasks[2].value
|
||||
|
||||
info = pl_info
|
||||
info['channel_id'] = channel_id
|
||||
info['current_tab'] = 'videos'
|
||||
if info['items']: # Success
|
||||
page_size = 100
|
||||
try_channel_api = False
|
||||
else: # Try the first-page method next
|
||||
try_channel_api = True
|
||||
|
||||
# Use the regular channel API
|
||||
if tab in ('shorts', 'streams') or (tab=='videos' and try_channel_api):
|
||||
if channel_id:
|
||||
num_videos_call = (get_number_of_videos_channel, channel_id)
|
||||
else:
|
||||
num_videos_call = (get_number_of_videos_general, base_url)
|
||||
|
||||
# Use ctoken method, which YouTube changes all the time
|
||||
if channel_id and not default_params:
|
||||
if sort == 4:
|
||||
_sort = 3
|
||||
else:
|
||||
_sort = sort
|
||||
page_call = (get_channel_tab, channel_id, page_number, _sort,
|
||||
tab, view, ctoken)
|
||||
# Use the first-page method, which won't break
|
||||
else:
|
||||
page_call = (get_channel_first_page, base_url, tab)
|
||||
|
||||
tasks = (
|
||||
gevent.spawn(*num_videos_call),
|
||||
gevent.spawn(get_channel_first_page, base_url=base_url, tab=tab),
|
||||
gevent.spawn(*page_call),
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
util.check_gevent_exceptions(*tasks)
|
||||
number_of_videos, polymer_json = tasks[0].value, tasks[1].value
|
||||
|
||||
elif tab == 'about':
|
||||
polymer_json = util.fetch_url(base_url + '/about?pbj=1', headers_desktop, debug_name='gen_channel_about')
|
||||
# polymer_json = util.fetch_url(base_url + '/about?pbj=1', headers_desktop, debug_name='gen_channel_about')
|
||||
channel_id = get_channel_id(base_url)
|
||||
ctoken = channel_about_ctoken(channel_id)
|
||||
polymer_json = util.call_youtube_api('web', 'browse', {
|
||||
'continuation': ctoken,
|
||||
})
|
||||
continuation=True
|
||||
elif tab == 'playlists' and page_number == 1:
|
||||
polymer_json = util.fetch_url(base_url+ '/playlists?pbj=1&view=1&sort=' + playlist_sort_codes[sort], headers_desktop, debug_name='gen_channel_playlists')
|
||||
elif tab == 'playlists':
|
||||
@@ -418,12 +522,19 @@ def get_channel_page_general_url(base_url, tab, request, channel_id=None):
|
||||
elif tab == 'search':
|
||||
url = base_url + '/search?pbj=1&query=' + urllib.parse.quote(query, safe='')
|
||||
polymer_json = util.fetch_url(url, headers_desktop, debug_name='gen_channel_search')
|
||||
elif tab == 'videos':
|
||||
pass
|
||||
else:
|
||||
flask.abort(404, 'Unknown channel tab: ' + tab)
|
||||
|
||||
if polymer_json is not None:
|
||||
info = yt_data_extract.extract_channel_info(
|
||||
json.loads(polymer_json), tab, continuation=continuation
|
||||
)
|
||||
|
||||
if info['error'] is not None:
|
||||
return flask.render_template('error.html', error_message=info['error'])
|
||||
|
||||
info = yt_data_extract.extract_channel_info(json.loads(polymer_json), tab,
|
||||
continuation=continuation)
|
||||
if channel_id:
|
||||
info['channel_url'] = 'https://www.youtube.com/channel/' + channel_id
|
||||
info['channel_id'] = channel_id
|
||||
@@ -431,11 +542,11 @@ def get_channel_page_general_url(base_url, tab, request, channel_id=None):
|
||||
channel_id = info['channel_id']
|
||||
|
||||
# Will have microformat present, cache metadata while we have it
|
||||
if channel_id and default_params:
|
||||
if channel_id and default_params and tab not in ('videos', 'about'):
|
||||
metadata = extract_metadata_for_caching(info)
|
||||
set_cached_metadata(channel_id, metadata)
|
||||
# Otherwise, populate with our (hopefully cached) metadata
|
||||
elif channel_id and info['channel_name'] is None:
|
||||
elif channel_id and info.get('channel_name') is None:
|
||||
metadata = get_metadata(channel_id)
|
||||
for key, value in metadata.items():
|
||||
yt_data_extract.conservative_update(info, key, value)
|
||||
@@ -448,12 +559,9 @@ def get_channel_page_general_url(base_url, tab, request, channel_id=None):
|
||||
for item in info['items']:
|
||||
item.update(additional_info)
|
||||
|
||||
if info['error'] is not None:
|
||||
return flask.render_template('error.html', error_message = info['error'])
|
||||
|
||||
if tab in ('videos', 'shorts', 'streams'):
|
||||
info['number_of_videos'] = number_of_videos
|
||||
info['number_of_pages'] = math.ceil(number_of_videos/30)
|
||||
info['number_of_pages'] = math.ceil(number_of_videos/page_size)
|
||||
info['header_playlist_names'] = local_playlist.get_playlist_names()
|
||||
if tab in ('videos', 'shorts', 'streams', 'playlists'):
|
||||
info['current_sort'] = sort
|
||||
|
||||
@@ -53,7 +53,7 @@ def request_comments(ctoken, replies=False):
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'MWEB',
|
||||
'clientVersion': '2.20210804.02.00',
|
||||
'clientVersion': '2.20240328.08.00',
|
||||
},
|
||||
},
|
||||
'continuation': ctoken.replace('=', '%3D'),
|
||||
@@ -97,7 +97,7 @@ def post_process_comments_info(comments_info):
|
||||
ctoken = comment['reply_ctoken']
|
||||
ctoken, err = proto.set_protobuf_value(
|
||||
ctoken,
|
||||
'base64p', 6, 3, 9, value=250)
|
||||
'base64p', 6, 3, 9, value=200)
|
||||
if err:
|
||||
print('Error setting ctoken value:')
|
||||
print(err)
|
||||
@@ -127,7 +127,7 @@ def post_process_comments_info(comments_info):
|
||||
# change max_replies field to 250 in ctoken
|
||||
new_ctoken, err = proto.set_protobuf_value(
|
||||
ctoken,
|
||||
'base64p', 6, 3, 9, value=250)
|
||||
'base64p', 6, 3, 9, value=200)
|
||||
if err:
|
||||
print('Error setting ctoken value:')
|
||||
print(err)
|
||||
|
||||
@@ -12,12 +12,13 @@ from flask import request
|
||||
import flask
|
||||
|
||||
|
||||
def playlist_ctoken(playlist_id, offset):
|
||||
def playlist_ctoken(playlist_id, offset, include_shorts=True):
|
||||
|
||||
offset = proto.uint(1, offset)
|
||||
# this is just obfuscation as far as I can tell. It doesn't even follow protobuf
|
||||
offset = b'PT:' + proto.unpadded_b64encode(offset)
|
||||
offset = proto.string(15, offset)
|
||||
if not include_shorts:
|
||||
offset += proto.string(104, proto.uint(2, 1))
|
||||
|
||||
continuation_info = proto.string(3, proto.percent_b64encode(offset))
|
||||
|
||||
@@ -26,47 +27,46 @@ def playlist_ctoken(playlist_id, offset):
|
||||
|
||||
return base64.urlsafe_b64encode(pointless_nest).decode('ascii')
|
||||
|
||||
# initial request types:
|
||||
# polymer_json: https://m.youtube.com/playlist?list=PLv3TTBr1W_9tppikBxAE_G6qjWdBljBHJ&pbj=1&lact=0
|
||||
# ajax json: https://m.youtube.com/playlist?list=PLv3TTBr1W_9tppikBxAE_G6qjWdBljBHJ&pbj=1&lact=0 with header X-YouTube-Client-Version: 1.20180418
|
||||
|
||||
|
||||
# continuation request types:
|
||||
# polymer_json: https://m.youtube.com/playlist?&ctoken=[...]&pbj=1
|
||||
# ajax json: https://m.youtube.com/playlist?action_continuation=1&ajax=1&ctoken=[...]
|
||||
|
||||
|
||||
headers_1 = (
|
||||
('Accept', '*/*'),
|
||||
('Accept-Language', 'en-US,en;q=0.5'),
|
||||
('X-YouTube-Client-Name', '2'),
|
||||
('X-YouTube-Client-Version', '2.20180614'),
|
||||
)
|
||||
|
||||
|
||||
def playlist_first_page(playlist_id, report_text="Retrieved playlist"):
|
||||
url = 'https://m.youtube.com/playlist?list=' + playlist_id + '&pbj=1'
|
||||
content = util.fetch_url(url, util.mobile_ua + headers_1, report_text=report_text, debug_name='playlist_first_page')
|
||||
content = json.loads(content.decode('utf-8'))
|
||||
def playlist_first_page(playlist_id, report_text="Retrieved playlist",
|
||||
use_mobile=False):
|
||||
if use_mobile:
|
||||
url = 'https://m.youtube.com/playlist?list=' + playlist_id + '&pbj=1'
|
||||
content = util.fetch_url(
|
||||
url, util.mobile_xhr_headers,
|
||||
report_text=report_text, debug_name='playlist_first_page'
|
||||
)
|
||||
content = json.loads(content.decode('utf-8'))
|
||||
else:
|
||||
url = 'https://www.youtube.com/playlist?list=' + playlist_id + '&pbj=1'
|
||||
content = util.fetch_url(
|
||||
url, util.desktop_xhr_headers,
|
||||
report_text=report_text, debug_name='playlist_first_page'
|
||||
)
|
||||
content = json.loads(content.decode('utf-8'))
|
||||
|
||||
return content
|
||||
|
||||
|
||||
#https://m.youtube.com/playlist?itct=CBMQybcCIhMIptj9xJaJ2wIV2JKcCh3Idwu-&ctoken=4qmFsgI2EiRWTFBMT3kwajlBdmxWWlB0bzZJa2pLZnB1MFNjeC0tN1BHVEMaDmVnWlFWRHBEUWxFJTNE&pbj=1
|
||||
def get_videos(playlist_id, page):
|
||||
|
||||
url = "https://m.youtube.com/playlist?ctoken=" + playlist_ctoken(playlist_id, (int(page)-1)*20) + "&pbj=1"
|
||||
headers = {
|
||||
'User-Agent': ' Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1',
|
||||
'Accept': '*/*',
|
||||
'Accept-Language': 'en-US,en;q=0.5',
|
||||
'X-YouTube-Client-Name': '2',
|
||||
'X-YouTube-Client-Version': '2.20180508',
|
||||
}
|
||||
def get_videos(playlist_id, page, include_shorts=True, use_mobile=False,
|
||||
report_text='Retrieved playlist'):
|
||||
# mobile requests return 20 videos per page
|
||||
if use_mobile:
|
||||
page_size = 20
|
||||
headers = util.mobile_xhr_headers
|
||||
# desktop requests return 100 videos per page
|
||||
else:
|
||||
page_size = 100
|
||||
headers = util.desktop_xhr_headers
|
||||
|
||||
url = "https://m.youtube.com/playlist?ctoken="
|
||||
url += playlist_ctoken(playlist_id, (int(page)-1)*page_size,
|
||||
include_shorts=include_shorts)
|
||||
url += "&pbj=1"
|
||||
content = util.fetch_url(
|
||||
url, headers,
|
||||
report_text="Retrieved playlist", debug_name='playlist_videos')
|
||||
url, headers, report_text=report_text,
|
||||
debug_name='playlist_videos'
|
||||
)
|
||||
|
||||
info = json.loads(content.decode('utf-8'))
|
||||
return info
|
||||
@@ -85,7 +85,10 @@ def get_playlist_page():
|
||||
this_page_json = first_page_json
|
||||
else:
|
||||
tasks = (
|
||||
gevent.spawn(playlist_first_page, playlist_id, report_text="Retrieved playlist info" ),
|
||||
gevent.spawn(
|
||||
playlist_first_page, playlist_id,
|
||||
report_text="Retrieved playlist info", use_mobile=True
|
||||
),
|
||||
gevent.spawn(get_videos, playlist_id, page)
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
@@ -118,7 +121,7 @@ def get_playlist_page():
|
||||
'playlist.html',
|
||||
header_playlist_names=local_playlist.get_playlist_names(),
|
||||
video_list=info.get('items', []),
|
||||
num_pages=math.ceil(video_count/20),
|
||||
num_pages=math.ceil(video_count/100),
|
||||
parameters_dictionary=request.args,
|
||||
|
||||
**info['metadata']
|
||||
|
||||
@@ -141,6 +141,17 @@ base64_enc_funcs = {
|
||||
|
||||
|
||||
def _make_protobuf(data):
|
||||
'''
|
||||
Input: Recursive list of protobuf objects or base-64 encodings
|
||||
Output: Protobuf bytestring
|
||||
Each protobuf object takes the form [wire_type, field_number, field_data]
|
||||
If a string protobuf has a list/tuple of length 2, this has the form
|
||||
(base64 type, data)
|
||||
The base64 types are
|
||||
- base64 means a base64 encode with equals sign paddings
|
||||
- base64s means a base64 encode without padding
|
||||
- base64p means a url base64 encode with equals signs replaced with %3D
|
||||
'''
|
||||
# must be dict mapping field_number to [wire_type, value]
|
||||
if isinstance(data, dict):
|
||||
new_data = []
|
||||
|
||||
@@ -204,6 +204,8 @@ Stream.prototype.setup = async function(){
|
||||
this.url,
|
||||
this.initRange.start,
|
||||
this.indexRange.end,
|
||||
'Initialization+index segments',
|
||||
).then(
|
||||
(buffer) => {
|
||||
let init_end = this.initRange.end - this.initRange.start + 1;
|
||||
let index_start = this.indexRange.start - this.initRange.start;
|
||||
@@ -211,22 +213,23 @@ Stream.prototype.setup = async function(){
|
||||
this.setupInitSegment(buffer.slice(0, init_end));
|
||||
this.setupSegmentIndex(buffer.slice(index_start, index_end));
|
||||
}
|
||||
)
|
||||
);
|
||||
} else {
|
||||
// initialization data
|
||||
await fetchRange(
|
||||
this.url,
|
||||
this.initRange.start,
|
||||
this.initRange.end,
|
||||
this.setupInitSegment.bind(this),
|
||||
);
|
||||
'Initialization segment',
|
||||
).then(this.setupInitSegment.bind(this));
|
||||
|
||||
// sidx (segment index) table
|
||||
fetchRange(
|
||||
this.url,
|
||||
this.indexRange.start,
|
||||
this.indexRange.end,
|
||||
this.setupSegmentIndex.bind(this)
|
||||
);
|
||||
'Index segment',
|
||||
).then(this.setupSegmentIndex.bind(this));
|
||||
}
|
||||
}
|
||||
Stream.prototype.setupInitSegment = function(initSegment) {
|
||||
@@ -388,7 +391,7 @@ Stream.prototype.getSegmentIdx = function(videoTime) {
|
||||
}
|
||||
index = index + increment;
|
||||
}
|
||||
this.reportInfo('Could not find segment index for time', videoTime);
|
||||
this.reportError('Could not find segment index for time', videoTime);
|
||||
return 0;
|
||||
}
|
||||
Stream.prototype.checkBuffer = async function() {
|
||||
@@ -485,8 +488,8 @@ Stream.prototype.fetchSegment = function(segmentIdx) {
|
||||
this.url,
|
||||
entry.start,
|
||||
entry.end,
|
||||
this.appendSegment.bind(this, segmentIdx),
|
||||
);
|
||||
String(this.streamType) + ' segment ' + String(segmentIdx),
|
||||
).then(this.appendSegment.bind(this, segmentIdx));
|
||||
}
|
||||
Stream.prototype.fetchSegmentIfNeeded = function(segmentIdx) {
|
||||
if (segmentIdx < 0 || segmentIdx >= this.sidx.entries.length){
|
||||
@@ -518,22 +521,56 @@ Stream.prototype.reportWarning = function(...args) {
|
||||
Stream.prototype.reportError = function(...args) {
|
||||
reportError(String(this.streamType) + ':', ...args);
|
||||
}
|
||||
Stream.prototype.reportInfo = function(...args) {
|
||||
reportInfo(String(this.streamType) + ':', ...args);
|
||||
}
|
||||
|
||||
|
||||
// Utility functions
|
||||
|
||||
function fetchRange(url, start, end, cb) {
|
||||
// https://gomakethings.com/promise-based-xhr/
|
||||
// https://stackoverflow.com/a/30008115
|
||||
// http://lofi.limo/blog/retry-xmlhttprequest-carefully
|
||||
function fetchRange(url, start, end, debugInfo) {
|
||||
return new Promise((resolve, reject) => {
|
||||
let retryCount = 0;
|
||||
let xhr = new XMLHttpRequest();
|
||||
function onFailure(err, message, maxRetries=5){
|
||||
message = debugInfo + ': ' + message + ' - Err: ' + String(err);
|
||||
retryCount++;
|
||||
if (retryCount > maxRetries || xhr.status == 403){
|
||||
reportError('fetchRange error while fetching ' + message);
|
||||
reject(message);
|
||||
return;
|
||||
} else {
|
||||
reportWarning('Failed to fetch ' + message
|
||||
+ '. Attempting retry '
|
||||
+ String(retryCount) +'/' + String(maxRetries));
|
||||
}
|
||||
|
||||
// Retry in 1 second, doubled for each next retry
|
||||
setTimeout(function(){
|
||||
xhr.open('get',url);
|
||||
xhr.send();
|
||||
}, 1000*Math.pow(2,(retryCount-1)));
|
||||
}
|
||||
xhr.open('get', url);
|
||||
xhr.timeout = 15000;
|
||||
xhr.responseType = 'arraybuffer';
|
||||
xhr.setRequestHeader('Range', 'bytes=' + start + '-' + end);
|
||||
xhr.onload = function() {
|
||||
//bytesFetched += end - start + 1;
|
||||
resolve(cb(xhr.response));
|
||||
xhr.onload = function (e) {
|
||||
if (xhr.status >= 200 && xhr.status < 300) {
|
||||
resolve(xhr.response);
|
||||
} else {
|
||||
onFailure(e,
|
||||
'Status '
|
||||
+ String(xhr.status) + ' ' + String(xhr.statusText)
|
||||
);
|
||||
}
|
||||
};
|
||||
xhr.onerror = function (event) {
|
||||
onFailure(e, 'Network error');
|
||||
};
|
||||
xhr.ontimeout = function (event){
|
||||
xhr.timeout += 5000;
|
||||
onFailure(null, 'Timeout (15s)', maxRetries=5);
|
||||
};
|
||||
xhr.send();
|
||||
});
|
||||
@@ -573,9 +610,6 @@ function addEvent(obj, eventName, func) {
|
||||
return new RegisteredEvent(obj, eventName, func);
|
||||
}
|
||||
|
||||
function reportInfo(...args){
|
||||
console.info(...args);
|
||||
}
|
||||
function reportWarning(...args){
|
||||
console.warn(...args);
|
||||
}
|
||||
|
||||
@@ -1,77 +1,66 @@
|
||||
(function main() {
|
||||
'use strict';
|
||||
|
||||
let captionsActive;
|
||||
|
||||
switch(true) {
|
||||
case data.settings.subtitles_mode == 2:
|
||||
captionsActive = true;
|
||||
break;
|
||||
case data.settings.subtitles_mode == 1 && data.has_manual_captions:
|
||||
captionsActive = true;
|
||||
break;
|
||||
default:
|
||||
captionsActive = false;
|
||||
// Captions
|
||||
let captionsActive = false;
|
||||
if (data.settings.subtitles_mode === 2 || (data.settings.subtitles_mode === 1 && data.has_manual_captions)) {
|
||||
captionsActive = true;
|
||||
}
|
||||
|
||||
// AutoPlay
|
||||
let autoplayActive = data.settings.autoplay_videos || false;
|
||||
|
||||
let qualityOptions = [];
|
||||
let qualityDefault;
|
||||
for (let src of data['uni_sources']) {
|
||||
qualityOptions.push(src.quality_string)
|
||||
|
||||
for (let src of data.uni_sources) {
|
||||
qualityOptions.push(src.quality_string);
|
||||
}
|
||||
for (let src of data['pair_sources']) {
|
||||
qualityOptions.push(src.quality_string)
|
||||
|
||||
for (let src of data.pair_sources) {
|
||||
qualityOptions.push(src.quality_string);
|
||||
}
|
||||
if (data['using_pair_sources'])
|
||||
qualityDefault = data['pair_sources'][data['pair_idx']].quality_string;
|
||||
else if (data['uni_sources'].length != 0)
|
||||
qualityDefault = data['uni_sources'][data['uni_idx']].quality_string;
|
||||
else
|
||||
|
||||
if (data.using_pair_sources) {
|
||||
qualityDefault = data.pair_sources[data.pair_idx].quality_string;
|
||||
} else if (data.uni_sources.length !== 0) {
|
||||
qualityDefault = data.uni_sources[data.uni_idx].quality_string;
|
||||
} else {
|
||||
qualityDefault = 'None';
|
||||
}
|
||||
|
||||
// Fix plyr refusing to work with qualities that are strings
|
||||
Object.defineProperty(Plyr.prototype, 'quality', {
|
||||
set: function(input) {
|
||||
set: function (input) {
|
||||
const config = this.config.quality;
|
||||
const options = this.options.quality;
|
||||
let quality;
|
||||
let quality = input;
|
||||
let updateStorage = true;
|
||||
|
||||
if (!options.length) {
|
||||
return;
|
||||
}
|
||||
|
||||
// removing this line:
|
||||
//let quality = [!is.empty(input) && Number(input), this.storage.get('quality'), config.selected, config.default].find(is.number);
|
||||
// replacing with:
|
||||
quality = input;
|
||||
let updateStorage = true;
|
||||
|
||||
if (!options.includes(quality)) {
|
||||
// Plyr sets quality to null at startup, resulting in the erroneous
|
||||
// calling of this setter function with input = null, and the
|
||||
// commented out code below would set the quality to something
|
||||
// unrelated at startup. Comment out and just return.
|
||||
return;
|
||||
/*const value = closest(options, quality);
|
||||
this.debug.warn(`Unsupported quality option: ${quality}, using ${value} instead`);
|
||||
quality = value; // Don't update storage if quality is not supported
|
||||
updateStorage = false;*/
|
||||
} // Update config
|
||||
|
||||
|
||||
config.selected = quality; // Set quality
|
||||
|
||||
this.media.quality = quality; // Save to storage
|
||||
|
||||
if (updateStorage) {
|
||||
this.storage.set({
|
||||
quality
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Update config
|
||||
config.selected = quality;
|
||||
|
||||
// Set quality
|
||||
this.media.quality = quality;
|
||||
|
||||
// Save to storage
|
||||
if (updateStorage) {
|
||||
this.storage.set({ quality });
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
const player = new Plyr(document.getElementById('js-video-player'), {
|
||||
// Learning about autoplay permission https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Permissions-Policy/autoplay#syntax
|
||||
autoplay: autoplayActive,
|
||||
disableContextMenu: false,
|
||||
captions: {
|
||||
active: captionsActive,
|
||||
@@ -89,29 +78,31 @@
|
||||
'settings',
|
||||
'pip',
|
||||
'airplay',
|
||||
'fullscreen'
|
||||
'fullscreen',
|
||||
],
|
||||
iconUrl: "/youtube.com/static/modules/plyr/plyr.svg",
|
||||
blankVideo: "/youtube.com/static/modules/plyr/blank.webm",
|
||||
iconUrl: '/youtube.com/static/modules/plyr/plyr.svg',
|
||||
blankVideo: '/youtube.com/static/modules/plyr/blank.webm',
|
||||
debug: false,
|
||||
storage: {enabled: false},
|
||||
storage: { enabled: false },
|
||||
quality: {
|
||||
default: qualityDefault,
|
||||
options: qualityOptions,
|
||||
forced: true,
|
||||
onChange: function(quality) {
|
||||
if (quality == 'None') {return;}
|
||||
onChange: function (quality) {
|
||||
if (quality == 'None') {
|
||||
return;
|
||||
}
|
||||
if (quality.includes('(integrated)')) {
|
||||
for (let i=0; i < data['uni_sources'].length; i++) {
|
||||
if (data['uni_sources'][i].quality_string == quality) {
|
||||
changeQuality({'type': 'uni', 'index': i});
|
||||
for (let i = 0; i < data.uni_sources.length; i++) {
|
||||
if (data.uni_sources[i].quality_string == quality) {
|
||||
changeQuality({ type: 'uni', index: i });
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (let i=0; i < data['pair_sources'].length; i++) {
|
||||
if (data['pair_sources'][i].quality_string == quality) {
|
||||
changeQuality({'type': 'pair', 'index': i});
|
||||
for (let i = 0; i < data.pair_sources.length; i++) {
|
||||
if (data.pair_sources[i].quality_string == quality) {
|
||||
changeQuality({ type: 'pair', index: i });
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -119,7 +110,7 @@
|
||||
},
|
||||
},
|
||||
previewThumbnails: {
|
||||
enabled: storyboard_url != null,
|
||||
enabled: storyboard_url !== null,
|
||||
src: [storyboard_url],
|
||||
},
|
||||
settings: ['captions', 'quality', 'speed', 'loop'],
|
||||
@@ -127,4 +118,4 @@
|
||||
controls: true,
|
||||
},
|
||||
});
|
||||
}());
|
||||
})();
|
||||
|
||||
39
youtube/static/modules/plyr/custom_plyr.css
Normal file
39
youtube/static/modules/plyr/custom_plyr.css
Normal file
@@ -0,0 +1,39 @@
|
||||
/* Prevent this div from blocking right-click menu for video
|
||||
e.g. Firefox playback speed options */
|
||||
.plyr__poster {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* plyr fix */
|
||||
.plyr:-moz-full-screen video {
|
||||
max-height: initial;
|
||||
}
|
||||
|
||||
.plyr:-webkit-full-screen video {
|
||||
max-height: initial;
|
||||
}
|
||||
|
||||
.plyr:-ms-fullscreen video {
|
||||
max-height: initial;
|
||||
}
|
||||
|
||||
.plyr:fullscreen video {
|
||||
max-height: initial;
|
||||
}
|
||||
|
||||
.plyr__preview-thumb__image-container {
|
||||
width: 158px;
|
||||
height: 90px;
|
||||
}
|
||||
|
||||
.plyr__preview-thumb {
|
||||
bottom: 100%;
|
||||
}
|
||||
|
||||
.plyr__menu__container [role="menu"],
|
||||
.plyr__menu__container [role="menucaptions"] {
|
||||
/* Set vertical scroll */
|
||||
/* issue https://github.com/sampotts/plyr/issues/1420 */
|
||||
max-height: 320px;
|
||||
overflow-y: auto;
|
||||
}
|
||||
1
youtube/static/modules/plyr/plyr.min.js.map
Normal file
1
youtube/static/modules/plyr/plyr.min.js.map
Normal file
File diff suppressed because one or more lines are too long
@@ -21,21 +21,7 @@ img {
|
||||
video {
|
||||
width: 100%;
|
||||
height: auto;
|
||||
max-height: 480px;
|
||||
}
|
||||
|
||||
/* plyr fix */
|
||||
.plyr:-moz-full-screen video {
|
||||
max-height: initial;
|
||||
}
|
||||
.plyr:-webkit-full-screen video {
|
||||
max-height: initial;
|
||||
}
|
||||
.plyr:-ms-fullscreen video {
|
||||
max-height: initial;
|
||||
}
|
||||
.plyr:fullscreen video {
|
||||
max-height: initial;
|
||||
max-height: calc(100vh/1.5);
|
||||
}
|
||||
|
||||
a:link {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from youtube import util, yt_data_extract, channel, local_playlist
|
||||
from youtube import util, yt_data_extract, channel, local_playlist, playlist
|
||||
from youtube import yt_app
|
||||
import settings
|
||||
|
||||
@@ -108,8 +108,7 @@ def _subscribe(channels):
|
||||
with connection as cursor:
|
||||
channel_ids_to_check = [channel[0] for channel in channels if not _is_subscribed(cursor, channel[0])]
|
||||
|
||||
rows = ((channel_id, channel_name, 0, 0) for channel_id,
|
||||
channel_name in channels)
|
||||
rows = ((channel_id, channel_name, 0, 0) for channel_id, channel_name in channels)
|
||||
cursor.executemany('''INSERT OR IGNORE INTO subscribed_channels (yt_channel_id, channel_name, time_last_checked, next_check_time)
|
||||
VALUES (?, ?, ?, ?)''', rows)
|
||||
|
||||
@@ -236,8 +235,7 @@ def _get_channel_names(cursor, channel_ids):
|
||||
return result
|
||||
|
||||
|
||||
def _channels_with_tag(cursor, tag, order=False, exclude_muted=False,
|
||||
include_muted_status=False):
|
||||
def _channels_with_tag(cursor, tag, order=False, exclude_muted=False, include_muted_status=False):
|
||||
''' returns list of (channel_id, channel_name) '''
|
||||
|
||||
statement = '''SELECT yt_channel_id, channel_name'''
|
||||
@@ -434,8 +432,10 @@ def autocheck_setting_changed(old_value, new_value):
|
||||
stop_autocheck_system()
|
||||
|
||||
|
||||
settings.add_setting_changed_hook('autocheck_subscriptions',
|
||||
autocheck_setting_changed)
|
||||
settings.add_setting_changed_hook(
|
||||
'autocheck_subscriptions',
|
||||
autocheck_setting_changed
|
||||
)
|
||||
if settings.autocheck_subscriptions:
|
||||
start_autocheck_system()
|
||||
# ----------------------------
|
||||
@@ -463,7 +463,24 @@ def _get_atoma_feed(channel_id):
|
||||
|
||||
def _get_channel_videos_first_page(channel_id, channel_status_name):
|
||||
try:
|
||||
return channel.get_channel_first_page(channel_id=channel_id)
|
||||
# First try the playlist method
|
||||
pl_json = playlist.get_videos(
|
||||
'UU' + channel_id[2:],
|
||||
1,
|
||||
include_shorts=settings.include_shorts_in_subscriptions,
|
||||
report_text=None
|
||||
)
|
||||
pl_info = yt_data_extract.extract_playlist_info(pl_json)
|
||||
if pl_info.get('items'):
|
||||
pl_info['items'] = pl_info['items'][0:30]
|
||||
return pl_info
|
||||
|
||||
# Try the channel api method
|
||||
channel_json = channel.get_channel_first_page(channel_id=channel_id)
|
||||
channel_info = yt_data_extract.extract_channel_info(
|
||||
json.loads(channel_json), 'videos'
|
||||
)
|
||||
return channel_info
|
||||
except util.FetchError as e:
|
||||
if e.code == '429' and settings.route_tor:
|
||||
error_message = ('Error checking channel ' + channel_status_name
|
||||
@@ -497,7 +514,7 @@ def _get_upstream_videos(channel_id):
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
|
||||
channel_tab, feed = tasks[0].value, tasks[1].value
|
||||
channel_info, feed = tasks[0].value, tasks[1].value
|
||||
|
||||
# extract published times from atoma feed
|
||||
times_published = {}
|
||||
@@ -535,9 +552,8 @@ def _get_upstream_videos(channel_id):
|
||||
except defusedxml.ElementTree.ParseError:
|
||||
print('Failed to read atoma feed for ' + channel_status_name)
|
||||
|
||||
if channel_tab is None: # there was an error
|
||||
if channel_info is None: # there was an error
|
||||
return
|
||||
channel_info = yt_data_extract.extract_channel_info(json.loads(channel_tab), 'videos')
|
||||
if channel_info['error']:
|
||||
print('Error checking channel ' + channel_status_name + ': ' + channel_info['error'])
|
||||
return
|
||||
@@ -552,14 +568,38 @@ def _get_upstream_videos(channel_id):
|
||||
if video_item['id'] in times_published:
|
||||
video_item['time_published'] = times_published[video_item['id']]
|
||||
video_item['is_time_published_exact'] = True
|
||||
else:
|
||||
elif video_item.get('time_published'):
|
||||
video_item['is_time_published_exact'] = False
|
||||
try:
|
||||
video_item['time_published'] = youtube_timestamp_to_posix(video_item['time_published']) - i # subtract a few seconds off the videos so they will be in the right order
|
||||
except KeyError:
|
||||
except Exception:
|
||||
print(video_item)
|
||||
|
||||
else:
|
||||
video_item['is_time_published_exact'] = False
|
||||
video_item['time_published'] = None
|
||||
video_item['channel_id'] = channel_id
|
||||
if len(videos) > 1:
|
||||
# Go back and fill in any videos that don't have a time published
|
||||
# using the time published of the surrounding ones
|
||||
for i in range(len(videos)-1):
|
||||
if (videos[i+1]['time_published'] is None
|
||||
and videos[i]['time_published'] is not None
|
||||
):
|
||||
videos[i+1]['time_published'] = videos[i]['time_published'] - 1
|
||||
for i in reversed(range(1,len(videos))):
|
||||
if (videos[i-1]['time_published'] is None
|
||||
and videos[i]['time_published'] is not None
|
||||
):
|
||||
videos[i-1]['time_published'] = videos[i]['time_published'] + 1
|
||||
# Special case: none of the videos have a time published.
|
||||
# In this case, make something up
|
||||
if videos and videos[0]['time_published'] is None:
|
||||
assert all(v['time_published'] is None for v in videos)
|
||||
now = time.time()
|
||||
for i in range(len(videos)):
|
||||
# 1 month between videos
|
||||
videos[i]['time_published'] = now - i*3600*24*30
|
||||
|
||||
|
||||
if len(videos) == 0:
|
||||
average_upload_period = 4*7*24*3600 # assume 1 month for channel with no videos
|
||||
@@ -578,26 +618,31 @@ def _get_upstream_videos(channel_id):
|
||||
with open_database() as connection:
|
||||
with connection as cursor:
|
||||
|
||||
# calculate how many new videos there are
|
||||
existing_vids = set(row[0] for row in cursor.execute(
|
||||
'''SELECT video_id
|
||||
# Get video ids and duration of existing vids so we
|
||||
# can see how many new ones there are and update
|
||||
# livestreams/premiers
|
||||
existing_vids = list(cursor.execute(
|
||||
'''SELECT video_id, duration
|
||||
FROM videos
|
||||
INNER JOIN subscribed_channels
|
||||
ON videos.sql_channel_id = subscribed_channels.id
|
||||
WHERE yt_channel_id=?
|
||||
ORDER BY time_published DESC
|
||||
LIMIT 30''', [channel_id]).fetchall())
|
||||
existing_vid_ids = set(row[0] for row in existing_vids)
|
||||
existing_durs = dict(existing_vids)
|
||||
|
||||
# new videos the channel has uploaded since last time we checked
|
||||
number_of_new_videos = 0
|
||||
for video in videos:
|
||||
if video['id'] in existing_vids:
|
||||
if video['id'] in existing_vid_ids:
|
||||
break
|
||||
number_of_new_videos += 1
|
||||
|
||||
is_first_check = cursor.execute('''SELECT time_last_checked FROM subscribed_channels WHERE yt_channel_id=?''', [channel_id]).fetchone()[0] in (None, 0)
|
||||
time_videos_retrieved = int(time.time())
|
||||
rows = []
|
||||
update_rows = []
|
||||
for i, video_item in enumerate(videos):
|
||||
if (is_first_check
|
||||
or number_of_new_videos > 6
|
||||
@@ -613,16 +658,34 @@ def _get_upstream_videos(channel_id):
|
||||
time_noticed = video_item['time_published']
|
||||
else:
|
||||
time_noticed = time_videos_retrieved
|
||||
rows.append((
|
||||
video_item['channel_id'],
|
||||
video_item['id'],
|
||||
video_item['title'],
|
||||
video_item['duration'],
|
||||
video_item['time_published'],
|
||||
video_item['is_time_published_exact'],
|
||||
time_noticed,
|
||||
video_item['description'],
|
||||
))
|
||||
|
||||
# videos which need durations updated
|
||||
non_durations = ('upcoming', 'none', 'live', '')
|
||||
v_id = video_item['id']
|
||||
if (existing_durs.get(v_id) is not None
|
||||
and existing_durs[v_id].lower() in non_durations
|
||||
and video_item['duration'] not in non_durations
|
||||
):
|
||||
update_rows.append((
|
||||
video_item['title'],
|
||||
video_item['duration'],
|
||||
video_item['time_published'],
|
||||
video_item['is_time_published_exact'],
|
||||
video_item['description'],
|
||||
video_item['id'],
|
||||
))
|
||||
# all other videos
|
||||
else:
|
||||
rows.append((
|
||||
video_item['channel_id'],
|
||||
video_item['id'],
|
||||
video_item['title'],
|
||||
video_item['duration'],
|
||||
video_item['time_published'],
|
||||
video_item['is_time_published_exact'],
|
||||
time_noticed,
|
||||
video_item['description'],
|
||||
))
|
||||
|
||||
cursor.executemany('''INSERT OR IGNORE INTO videos (
|
||||
sql_channel_id,
|
||||
@@ -635,6 +698,13 @@ def _get_upstream_videos(channel_id):
|
||||
description
|
||||
)
|
||||
VALUES ((SELECT id FROM subscribed_channels WHERE yt_channel_id=?), ?, ?, ?, ?, ?, ?, ?)''', rows)
|
||||
cursor.executemany('''UPDATE videos SET
|
||||
title=?,
|
||||
duration=?,
|
||||
time_published=?,
|
||||
is_time_published_exact=?,
|
||||
description=?
|
||||
WHERE video_id=?''', update_rows)
|
||||
cursor.execute('''UPDATE subscribed_channels
|
||||
SET time_last_checked = ?, next_check_time = ?
|
||||
WHERE yt_channel_id=?''', [int(time.time()), next_check_time, channel_id])
|
||||
@@ -767,7 +837,7 @@ def import_subscriptions():
|
||||
error = 'Unsupported file format: ' + mime_type
|
||||
error += (' . Only subscription.json, subscriptions.csv files'
|
||||
' (from Google Takeouts)'
|
||||
' and XML OPML files exported from Youtube\'s'
|
||||
' and XML OPML files exported from YouTube\'s'
|
||||
' subscription manager page are supported')
|
||||
return (flask.render_template('error.html', error_message=error),
|
||||
400)
|
||||
@@ -962,7 +1032,8 @@ def get_subscriptions_page():
|
||||
'muted': muted,
|
||||
})
|
||||
|
||||
return flask.render_template('subscriptions.html',
|
||||
return flask.render_template(
|
||||
'subscriptions.html',
|
||||
header_playlist_names=local_playlist.get_playlist_names(),
|
||||
videos=videos,
|
||||
num_pages=math.ceil(number_of_videos_in_db/60),
|
||||
|
||||
@@ -51,8 +51,11 @@
|
||||
<ul>
|
||||
{% for (before_text, stat, after_text) in [
|
||||
('Joined ', date_joined, ''),
|
||||
('', view_count|commatize, ' views'),
|
||||
('', approx_view_count, ' views'),
|
||||
('', approx_subscriber_count, ' subscribers'),
|
||||
('', approx_video_count, ' videos'),
|
||||
('Country: ', country, ''),
|
||||
('Canonical Url: ', canonical_url, ''),
|
||||
] %}
|
||||
{% if stat %}
|
||||
<li>{{ before_text + stat|string + after_text }}</li>
|
||||
@@ -65,7 +68,11 @@
|
||||
<hr>
|
||||
<ul>
|
||||
{% for text, url in links %}
|
||||
<li><a href="{{ url }}">{{ text }}</a></li>
|
||||
{% if url %}
|
||||
<li><a href="{{ url }}">{{ text }}</a></li>
|
||||
{% else %}
|
||||
<li>{{ text }}</li>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
@@ -74,7 +81,7 @@
|
||||
<!-- new-->
|
||||
<div id="links-metadata">
|
||||
{% if current_tab in ('videos', 'shorts', 'streams') %}
|
||||
{% set sorts = [('1', 'views'), ('2', 'oldest'), ('3', 'newest')] %}
|
||||
{% set sorts = [('1', 'views'), ('2', 'oldest'), ('3', 'newest'), ('4', 'newest - no shorts'),] %}
|
||||
<div id="number-of-results">{{ number_of_videos }} videos</div>
|
||||
{% elif current_tab == 'playlists' %}
|
||||
{% set sorts = [('2', 'oldest'), ('3', 'newest'), ('4', 'last video added')] %}
|
||||
@@ -110,13 +117,9 @@
|
||||
<hr/>
|
||||
|
||||
<footer class="pagination-container">
|
||||
{% if (current_tab in ('videos', 'shorts', 'streams')) and current_sort.__str__() == '2' %}
|
||||
<nav class="next-previous-button-row">
|
||||
{{ common_elements.next_previous_ctoken_buttons(None, ctoken, channel_url + '/' + current_tab, parameters_dictionary) }}
|
||||
</nav>
|
||||
{% elif current_tab in ('videos', 'shorts', 'streams') %}
|
||||
{% if current_tab in ('videos', 'shorts', 'streams') %}
|
||||
<nav class="pagination-list">
|
||||
{{ common_elements.page_buttons(number_of_pages, channel_url + '/' + current_tab, parameters_dictionary, include_ends=(current_sort.__str__() == '3')) }}
|
||||
{{ common_elements.page_buttons(number_of_pages, channel_url + '/' + current_tab, parameters_dictionary, include_ends=(current_sort.__str__() in '34')) }}
|
||||
</nav>
|
||||
{% elif current_tab == 'playlists' or current_tab == 'search' %}
|
||||
<nav class="next-previous-button-row">
|
||||
|
||||
@@ -8,14 +8,8 @@
|
||||
{% if settings.use_video_player == 2 %}
|
||||
<!-- plyr -->
|
||||
<link href="/youtube.com/static/modules/plyr/plyr.css" rel="stylesheet">
|
||||
<link href="/youtube.com/static/modules/plyr/custom_plyr.css" rel="stylesheet">
|
||||
<!--/ plyr -->
|
||||
<style>
|
||||
/* Prevent this div from blocking right-click menu for video
|
||||
e.g. Firefox playback speed options */
|
||||
.plyr__poster {
|
||||
display: none !important;
|
||||
}
|
||||
</style>
|
||||
{% endif %}
|
||||
{% endblock style %}
|
||||
|
||||
@@ -40,7 +34,7 @@
|
||||
</div>
|
||||
{% else %}
|
||||
<figure class="sc-video">
|
||||
<video id="js-video-player" playsinline controls>
|
||||
<video id="js-video-player" playsinline controls {{ 'autoplay' if settings.autoplay_videos }}>
|
||||
{% if uni_sources %}
|
||||
<source src="{{ uni_sources[uni_idx]['url'] }}" type="{{ uni_sources[uni_idx]['type'] }}" data-res="{{ uni_sources[uni_idx]['quality'] }}">
|
||||
{% endif %}
|
||||
@@ -233,7 +227,7 @@
|
||||
<div class="comments-area-outer comments-disabled">Comments disabled</div>
|
||||
{% else %}
|
||||
<details class="comments-area-outer" {{'open' if settings.comments_mode == 1 else ''}}>
|
||||
<summary>{{ comment_count|commatize }} comment{{'s' if comment_count != 1 else ''}}</summary>
|
||||
<summary>{{ comment_count|commatize }} comment{{'s' if comment_count != '1' else ''}}</summary>
|
||||
<div class="comments-area-inner comments-area">
|
||||
{% if comments_info %}
|
||||
{{ comments.video_comments(comments_info) }}
|
||||
|
||||
128
youtube/util.py
128
youtube/util.py
@@ -336,7 +336,7 @@ def fetch_url(url, headers=(), timeout=15, report_text=None, data=None,
|
||||
)
|
||||
)
|
||||
):
|
||||
print(response.status, response.reason, response.getheaders())
|
||||
print(response.status, response.reason, response.headers)
|
||||
ip = re.search(
|
||||
br'IP address: ((?:[\da-f]*:)+[\da-f]+|(?:\d+\.)+\d+)',
|
||||
content)
|
||||
@@ -395,22 +395,22 @@ def head(url, use_tor=False, report_text=None, max_redirects=10):
|
||||
return response
|
||||
|
||||
|
||||
mobile_user_agent = 'Mozilla/5.0 (Linux; Android 7.0; Redmi Note 4 Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Mobile Safari/537.36'
|
||||
mobile_user_agent = 'Mozilla/5.0 (Linux; Android 14) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.6312.80 Mobile Safari/537.36'
|
||||
mobile_ua = (('User-Agent', mobile_user_agent),)
|
||||
desktop_user_agent = 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'
|
||||
desktop_user_agent = 'Mozilla/5.0 (Windows NT 10.0; rv:124.0) Gecko/20100101 Firefox/124.0'
|
||||
desktop_ua = (('User-Agent', desktop_user_agent),)
|
||||
json_header = (('Content-Type', 'application/json'),)
|
||||
desktop_xhr_headers = (
|
||||
('Accept', '*/*'),
|
||||
('Accept-Language', 'en-US,en;q=0.5'),
|
||||
('X-YouTube-Client-Name', '1'),
|
||||
('X-YouTube-Client-Version', '2.20180830'),
|
||||
('X-YouTube-Client-Version', '2.20240327.00.00'),
|
||||
) + desktop_ua
|
||||
mobile_xhr_headers = (
|
||||
('Accept', '*/*'),
|
||||
('Accept-Language', 'en-US,en;q=0.5'),
|
||||
('X-YouTube-Client-Name', '2'),
|
||||
('X-YouTube-Client-Version', '2.20180830'),
|
||||
('X-YouTube-Client-Name', '1'),
|
||||
('X-YouTube-Client-Version', '2.20240328.08.00'),
|
||||
) + mobile_ua
|
||||
|
||||
|
||||
@@ -665,6 +665,122 @@ def to_valid_filename(name):
|
||||
return name
|
||||
|
||||
|
||||
# https://github.com/yt-dlp/yt-dlp/blob/master/yt_dlp/extractor/youtube.py#L72
|
||||
INNERTUBE_CLIENTS = {
|
||||
'android_music': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyAOghZGza2MQSZkY_zfZ370N-PUdXEo8AI',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'ANDROID_MUSIC',
|
||||
'clientVersion': '6.44.54',
|
||||
'osName': 'Android',
|
||||
'osVersion': '14',
|
||||
'androidSdkVersion': 34,
|
||||
'platform': 'MOBILE',
|
||||
'userAgent': 'com.google.android.apps.youtube.music/6.44.54 (Linux; U; Android 14; US) gzip'
|
||||
}
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 21,
|
||||
'REQUIRE_JS_PLAYER': False
|
||||
},
|
||||
|
||||
'android': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'ANDROID',
|
||||
'clientVersion': '19.12.36',
|
||||
'osName': 'Android',
|
||||
'osVersion': '14',
|
||||
'androidSdkVersion': 34,
|
||||
'platform': 'MOBILE',
|
||||
'userAgent': 'com.google.android.youtube/19.12.36 (Linux; U; Android 14; US) gzip'
|
||||
},
|
||||
# https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-887739287
|
||||
#'thirdParty': {
|
||||
# 'embedUrl': 'https://google.com', # Can be any valid URL
|
||||
#}
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
},
|
||||
|
||||
'ios': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyB-63vPrdThhKuerbB2N_l7Kwwcxj6yUAc',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'IOS',
|
||||
'clientVersion': '19.12.3',
|
||||
'deviceModel': 'iPhone14,3',
|
||||
'userAgent': 'com.google.ios.youtube/19.12.3 (iPhone14,3; U; CPU iOS 15_6 like Mac OS X)'
|
||||
}
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
|
||||
'REQUIRE_JS_PLAYER': False
|
||||
},
|
||||
|
||||
# This client can access age restricted videos (unless the uploader has disabled the 'allow embedding' option)
|
||||
# See: https://github.com/zerodytrash/YouTube-Internal-Clients
|
||||
'tv_embedded': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'TVHTML5_SIMPLY_EMBEDDED_PLAYER',
|
||||
'clientVersion': '2.0',
|
||||
'clientScreen': 'EMBED',
|
||||
},
|
||||
# https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-887739287
|
||||
'thirdParty': {
|
||||
'embedUrl': 'https://google.com', # Can be any valid URL
|
||||
}
|
||||
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 85,
|
||||
'REQUIRE_JS_PLAYER': True,
|
||||
},
|
||||
|
||||
'web': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': '2.20240327.00.00',
|
||||
'userAgent': desktop_user_agent,
|
||||
}
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 1
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def call_youtube_api(client, api, data):
|
||||
client_params = INNERTUBE_CLIENTS[client]
|
||||
context = client_params['INNERTUBE_CONTEXT']
|
||||
key = client_params['INNERTUBE_API_KEY']
|
||||
host = client_params.get('INNERTUBE_HOST') or 'www.youtube.com'
|
||||
user_agent = context['client'].get('userAgent') or mobile_user_agent
|
||||
|
||||
url = 'https://' + host + '/youtubei/v1/' + api + '?key=' + key
|
||||
data['context'] = context
|
||||
|
||||
data = json.dumps(data)
|
||||
headers = (('Content-Type', 'application/json'),('User-Agent', user_agent))
|
||||
response = fetch_url(
|
||||
url, data=data, headers=headers,
|
||||
debug_name='youtubei_' + api + '_' + client,
|
||||
report_text='Fetched ' + client + ' youtubei ' + api
|
||||
).decode('utf-8')
|
||||
return response
|
||||
|
||||
|
||||
def strip_non_ascii(string):
|
||||
''' Returns the string without non ASCII characters'''
|
||||
stripped = (c for c in string if 0 < ord(c) < 127)
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '0.2.6'
|
||||
__version__ = '0.2.12'
|
||||
|
||||
@@ -19,51 +19,6 @@ from urllib.parse import parse_qs, urlencode
|
||||
from types import SimpleNamespace
|
||||
from math import ceil
|
||||
|
||||
# https://github.com/yt-dlp/yt-dlp/blob/master/yt_dlp/extractor/youtube.py#L72
|
||||
INNERTUBE_CLIENTS = {
|
||||
'android': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'ANDROID',
|
||||
'clientVersion': '17.31.35',
|
||||
'osName': 'Android',
|
||||
'osVersion': '12',
|
||||
'androidSdkVersion': 31,
|
||||
'userAgent': 'com.google.android.youtube/17.31.35 (Linux; U; Android 12) gzip'
|
||||
},
|
||||
# https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-887739287
|
||||
#'thirdParty': {
|
||||
# 'embedUrl': 'https://google.com', # Can be any valid URL
|
||||
#}
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
},
|
||||
|
||||
# This client can access age restricted videos (unless the uploader has disabled the 'allow embedding' option)
|
||||
# See: https://github.com/zerodytrash/YouTube-Internal-Clients
|
||||
'tv_embedded': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'TVHTML5_SIMPLY_EMBEDDED_PLAYER',
|
||||
'clientVersion': '2.0',
|
||||
},
|
||||
# https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-887739287
|
||||
'thirdParty': {
|
||||
'embedUrl': 'https://google.com', # Can be any valid URL
|
||||
}
|
||||
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 85,
|
||||
'REQUIRE_JS_PLAYER': True,
|
||||
},
|
||||
}
|
||||
|
||||
try:
|
||||
with open(os.path.join(settings.data_dir, 'decrypt_function_cache.json'), 'r') as f:
|
||||
@@ -386,26 +341,11 @@ def _add_to_error(info, key, additional_message):
|
||||
|
||||
|
||||
def fetch_player_response(client, video_id):
|
||||
client_params = INNERTUBE_CLIENTS[client]
|
||||
context = client_params['INNERTUBE_CONTEXT']
|
||||
key = client_params['INNERTUBE_API_KEY']
|
||||
host = client_params.get('INNERTUBE_HOST') or 'www.youtube.com'
|
||||
user_agent = context['client'].get('userAgent') or util.mobile_user_agent
|
||||
|
||||
url = 'https://' + host + '/youtubei/v1/player?key=' + key
|
||||
data = {
|
||||
return util.call_youtube_api(client, 'player', {
|
||||
'videoId': video_id,
|
||||
'context': context,
|
||||
'params': 'CgIQBg',
|
||||
}
|
||||
data = json.dumps(data)
|
||||
headers = (('Content-Type', 'application/json'),('User-Agent', user_agent))
|
||||
player_response = util.fetch_url(
|
||||
url, data=data, headers=headers,
|
||||
debug_name='youtubei_player_' + client,
|
||||
report_text='Fetched ' + client + ' youtubei player'
|
||||
).decode('utf-8')
|
||||
return player_response
|
||||
})
|
||||
|
||||
|
||||
def fetch_watch_page_info(video_id, playlist_id, index):
|
||||
# bpctr=9999999999 will bypass are-you-sure dialogs for controversial
|
||||
@@ -432,17 +372,7 @@ def extract_info(video_id, use_invidious, playlist_id=None, index=None):
|
||||
tasks = (
|
||||
# Get video metadata from here
|
||||
gevent.spawn(fetch_watch_page_info, video_id, playlist_id, index),
|
||||
|
||||
# Get video URLs by spoofing as android client because its urls don't
|
||||
# require decryption
|
||||
# The URLs returned with WEB for videos requiring decryption
|
||||
# couldn't be decrypted with the base.js from the web page for some
|
||||
# reason
|
||||
# https://github.com/yt-dlp/yt-dlp/issues/574#issuecomment-887171136
|
||||
|
||||
# Update 4/26/23, these URLs will randomly start returning 403
|
||||
# mid-playback and I'm not sure why
|
||||
gevent.spawn(fetch_player_response, 'android', video_id)
|
||||
gevent.spawn(fetch_player_response, 'ios', video_id)
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
util.check_gevent_exceptions(*tasks)
|
||||
@@ -765,9 +695,17 @@ def get_watch_page(video_id=None):
|
||||
else:
|
||||
closer_to_target = 'pair'
|
||||
|
||||
using_pair_sources = (
|
||||
bool(pair_sources) and (not uni_sources or closer_to_target == 'pair')
|
||||
)
|
||||
if settings.prefer_uni_sources == 2:
|
||||
# Use uni sources unless there's no choice.
|
||||
using_pair_sources = (
|
||||
bool(pair_sources) and (not uni_sources)
|
||||
)
|
||||
else:
|
||||
# Use the pair sources if they're closer to the desired resolution
|
||||
using_pair_sources = (
|
||||
bool(pair_sources)
|
||||
and (not uni_sources or closer_to_target == 'pair')
|
||||
)
|
||||
if using_pair_sources:
|
||||
video_height = pair_sources[pair_idx]['height']
|
||||
video_width = pair_sources[pair_idx]['width']
|
||||
|
||||
@@ -185,7 +185,7 @@ def extract_int(string, default=None, whole_word=True):
|
||||
return default
|
||||
|
||||
def extract_approx_int(string):
|
||||
'''e.g. "15.1M" from "15.1M subscribers"'''
|
||||
'''e.g. "15.1M" from "15.1M subscribers" or '4,353' from 4353'''
|
||||
if not isinstance(string, str):
|
||||
string = extract_str(string)
|
||||
if not string:
|
||||
@@ -193,7 +193,10 @@ def extract_approx_int(string):
|
||||
match = re.search(r'\b(\d+(?:\.\d+)?[KMBTkmbt]?)\b', string.replace(',', ''))
|
||||
if match is None:
|
||||
return None
|
||||
return match.group(1)
|
||||
result = match.group(1)
|
||||
if re.fullmatch(r'\d+', result):
|
||||
result = '{:,}'.format(int(result))
|
||||
return result
|
||||
|
||||
MONTH_ABBREVIATIONS = {'jan':'1', 'feb':'2', 'mar':'3', 'apr':'4', 'may':'5', 'jun':'6', 'jul':'7', 'aug':'8', 'sep':'9', 'oct':'10', 'nov':'11', 'dec':'12'}
|
||||
def extract_date(date_text):
|
||||
|
||||
@@ -85,23 +85,84 @@ def extract_channel_info(polymer_json, tab, continuation=False):
|
||||
if tab in ('search', 'playlists'):
|
||||
info['is_last_page'] = (ctoken is None)
|
||||
elif tab == 'about':
|
||||
items, _ = extract_items(response, item_types={'channelAboutFullMetadataRenderer'})
|
||||
if not items:
|
||||
info['error'] = 'Could not find channelAboutFullMetadataRenderer'
|
||||
return info
|
||||
channel_metadata = items[0]['channelAboutFullMetadataRenderer']
|
||||
# Latest type
|
||||
items, _ = extract_items(response, item_types={'aboutChannelRenderer'})
|
||||
if items:
|
||||
a_metadata = deep_get(items, 0, 'aboutChannelRenderer',
|
||||
'metadata', 'aboutChannelViewModel')
|
||||
if not a_metadata:
|
||||
info['error'] = 'Could not find aboutChannelViewModel'
|
||||
return info
|
||||
|
||||
info['links'] = []
|
||||
for link_json in channel_metadata.get('primaryLinks', ()):
|
||||
url = remove_redirect(deep_get(link_json, 'navigationEndpoint', 'urlEndpoint', 'url'))
|
||||
if not (url.startswith('http://') or url.startswith('https://')):
|
||||
url = 'http://' + url
|
||||
text = extract_str(link_json.get('title'))
|
||||
info['links'].append( (text, url) )
|
||||
info['links'] = []
|
||||
for link_outer in a_metadata.get('links', ()):
|
||||
link = link_outer.get('channelExternalLinkViewModel') or {}
|
||||
link_content = extract_str(deep_get(link, 'link', 'content'))
|
||||
for run in deep_get(link, 'link', 'commandRuns') or ():
|
||||
url = remove_redirect(deep_get(run, 'onTap',
|
||||
'innertubeCommand', 'urlEndpoint', 'url'))
|
||||
if url and not (url.startswith('http://')
|
||||
or url.startswith('https://')):
|
||||
url = 'https://' + url
|
||||
if link_content is None or (link_content in url):
|
||||
break
|
||||
else: # didn't break
|
||||
url = link_content
|
||||
if url and not (url.startswith('http://')
|
||||
or url.startswith('https://')):
|
||||
url = 'https://' + url
|
||||
text = extract_str(deep_get(link, 'title', 'content'))
|
||||
info['links'].append( (text, url) )
|
||||
|
||||
info['date_joined'] = extract_date(channel_metadata.get('joinedDateText'))
|
||||
info['view_count'] = extract_int(channel_metadata.get('viewCountText'))
|
||||
info['description'] = extract_str(channel_metadata.get('description'), default='')
|
||||
info['date_joined'] = extract_date(
|
||||
a_metadata.get('joinedDateText')
|
||||
)
|
||||
info['view_count'] = extract_int(a_metadata.get('viewCountText'))
|
||||
info['approx_view_count'] = extract_approx_int(
|
||||
a_metadata.get('viewCountText')
|
||||
)
|
||||
info['description'] = extract_str(
|
||||
a_metadata.get('description'), default=''
|
||||
)
|
||||
info['approx_video_count'] = extract_approx_int(
|
||||
a_metadata.get('videoCountText')
|
||||
)
|
||||
info['approx_subscriber_count'] = extract_approx_int(
|
||||
a_metadata.get('subscriberCountText')
|
||||
)
|
||||
info['country'] = extract_str(a_metadata.get('country'))
|
||||
info['canonical_url'] = extract_str(
|
||||
a_metadata.get('canonicalChannelUrl')
|
||||
)
|
||||
|
||||
# Old type
|
||||
else:
|
||||
items, _ = extract_items(response,
|
||||
item_types={'channelAboutFullMetadataRenderer'})
|
||||
if not items:
|
||||
info['error'] = 'Could not find aboutChannelRenderer or channelAboutFullMetadataRenderer'
|
||||
return info
|
||||
a_metadata = items[0]['channelAboutFullMetadataRenderer']
|
||||
|
||||
info['links'] = []
|
||||
for link_json in a_metadata.get('primaryLinks', ()):
|
||||
url = remove_redirect(deep_get(link_json, 'navigationEndpoint',
|
||||
'urlEndpoint', 'url'))
|
||||
if url and not (url.startswith('http://')
|
||||
or url.startswith('https://')):
|
||||
url = 'https://' + url
|
||||
text = extract_str(link_json.get('title'))
|
||||
info['links'].append( (text, url) )
|
||||
|
||||
info['date_joined'] = extract_date(a_metadata.get('joinedDateText'))
|
||||
info['view_count'] = extract_int(a_metadata.get('viewCountText'))
|
||||
info['description'] = extract_str(a_metadata.get(
|
||||
'description'), default='')
|
||||
|
||||
info['approx_video_count'] = None
|
||||
info['approx_subscriber_count'] = None
|
||||
info['country'] = None
|
||||
info['canonical_url'] = None
|
||||
else:
|
||||
raise NotImplementedError('Unknown or unsupported channel tab: ' + tab)
|
||||
|
||||
@@ -191,6 +252,19 @@ def extract_playlist_metadata(polymer_json):
|
||||
elif 'updated' in text:
|
||||
metadata['time_published'] = extract_date(text)
|
||||
|
||||
microformat = deep_get(response, 'microformat', 'microformatDataRenderer',
|
||||
default={})
|
||||
conservative_update(
|
||||
metadata, 'title', extract_str(microformat.get('title'))
|
||||
)
|
||||
conservative_update(
|
||||
metadata, 'description', extract_str(microformat.get('description'))
|
||||
)
|
||||
conservative_update(
|
||||
metadata, 'thumbnail', deep_get(microformat, 'thumbnail',
|
||||
'thumbnails', -1, 'url')
|
||||
)
|
||||
|
||||
return metadata
|
||||
|
||||
def extract_playlist_info(polymer_json):
|
||||
@@ -198,13 +272,11 @@ def extract_playlist_info(polymer_json):
|
||||
if err:
|
||||
return {'error': err}
|
||||
info = {'error': None}
|
||||
first_page = 'continuationContents' not in response
|
||||
video_list, _ = extract_items(response)
|
||||
|
||||
info['items'] = [extract_item_info(renderer) for renderer in video_list]
|
||||
|
||||
if first_page:
|
||||
info['metadata'] = extract_playlist_metadata(polymer_json)
|
||||
info['metadata'] = extract_playlist_metadata(polymer_json)
|
||||
|
||||
return info
|
||||
|
||||
|
||||
@@ -140,11 +140,12 @@ def _extract_likes_dislikes(renderer_content):
|
||||
['defaultText', 'accessibility', 'accessibilityData', 'label'],
|
||||
['accessibility', 'label'],
|
||||
['accessibilityData', 'accessibilityData', 'label'],
|
||||
['accessibilityText'],
|
||||
))
|
||||
|
||||
# this count doesn't have all the digits, it's like 53K for instance
|
||||
dumb_count = extract_int(extract_str(deep_get(
|
||||
toggle_button_renderer, 'defaultText')))
|
||||
dumb_count = extract_int(extract_str(multi_get(
|
||||
toggle_button_renderer, ['defaultText', 'title'])))
|
||||
|
||||
# The accessibility text will be "No likes" or "No dislikes" or
|
||||
# something like that, but dumb count will be 0
|
||||
@@ -168,16 +169,23 @@ def _extract_likes_dislikes(renderer_content):
|
||||
info['dislike_count'] = count
|
||||
elif 'slimMetadataButtonRenderer' in button:
|
||||
button_renderer = button['slimMetadataButtonRenderer']
|
||||
liberal_update(info, 'like_count', extract_button_count(deep_get(
|
||||
button_renderer, 'button',
|
||||
'segmentedLikeDislikeButtonRenderer',
|
||||
'likeButton', 'toggleButtonRenderer'
|
||||
)))
|
||||
liberal_update(info, 'dislike_count',extract_button_count(deep_get(
|
||||
button_renderer, 'button',
|
||||
'segmentedLikeDislikeButtonRenderer',
|
||||
'dislikeButton', 'toggleButtonRenderer'
|
||||
)))
|
||||
liberal_update(info, 'like_count', extract_button_count(
|
||||
multi_deep_get(button_renderer,
|
||||
['button', 'segmentedLikeDislikeButtonRenderer',
|
||||
'likeButton', 'toggleButtonRenderer'],
|
||||
['button', 'segmentedLikeDislikeButtonViewModel',
|
||||
'likeButtonViewModel', 'likeButtonViewModel',
|
||||
'toggleButtonViewModel', 'toggleButtonViewModel',
|
||||
'defaultButtonViewModel', 'buttonViewModel']
|
||||
)
|
||||
))
|
||||
'''liberal_update(info, 'dislike_count', extract_button_count(
|
||||
deep_get(
|
||||
button_renderer, 'button',
|
||||
'segmentedLikeDislikeButtonRenderer',
|
||||
'dislikeButton', 'toggleButtonRenderer'
|
||||
)
|
||||
))'''
|
||||
return info
|
||||
|
||||
def _extract_from_owner_renderer(renderer_content):
|
||||
@@ -363,12 +371,12 @@ def _extract_watch_info_mobile(top_level):
|
||||
comment_count_text = extract_str(deep_get(comment_info,
|
||||
'header', 'commentSectionHeaderRenderer', 'countText'))
|
||||
if comment_count_text == 'Comments': # just this with no number, means 0 comments
|
||||
info['comment_count'] = 0
|
||||
info['comment_count'] = '0'
|
||||
else:
|
||||
info['comment_count'] = extract_int(comment_count_text)
|
||||
info['comment_count'] = extract_approx_int(comment_count_text)
|
||||
info['comments_disabled'] = False
|
||||
else: # no comment section present means comments are disabled
|
||||
info['comment_count'] = 0
|
||||
info['comment_count'] = '0'
|
||||
info['comments_disabled'] = True
|
||||
|
||||
# check for limited state
|
||||
|
||||
Reference in New Issue
Block a user