Merge branch 'master' into add_sponsorblock

This commit is contained in:
James Taylor 2020-10-21 18:53:12 -07:00 committed by GitHub
commit aa52c7a42e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 163 additions and 106 deletions

View File

@ -160,6 +160,19 @@ For security reasons, enabling this is not recommended.''',
],
}),
('font', {
'type': int,
'default': 1,
'comment': '',
'options': [
(0, 'Browser default'),
(1, 'Arial'),
(2, 'Liberation Serif'),
(3, 'Verdana'),
(4, 'Tahoma'),
],
}),
('autocheck_subscriptions', {
'type': bool,
'default': 0,
@ -319,12 +332,6 @@ else:
globals().update(current_settings_dict)
if proxy_images:
img_prefix = "/"
else:
img_prefix = ""
if route_tor:
print("Tor routing is ON")
@ -343,6 +350,19 @@ def add_setting_changed_hook(setting, func):
hooks[setting] = [func]
def set_img_prefix(old_value=None, value=None):
global img_prefix
if value is None:
value = proxy_images
if value:
img_prefix = '/'
else:
img_prefix = ''
set_img_prefix()
add_setting_changed_hook('proxy_images', set_img_prefix)
def settings_page():
if request.method == 'GET':
return flask.render_template('settings.html',

View File

@ -1,5 +1,6 @@
from youtube import util
import flask
from flask import request
import settings
import traceback
import re
@ -59,6 +60,7 @@ def timestamps(text):
@yt_app.errorhandler(500)
def error_page(e):
slim = request.args.get('slim', False) # whether it was an ajax request
if (exc_info()[0] == util.FetchError
and exc_info()[1].code == '429'
and settings.route_tor
@ -68,5 +70,22 @@ def error_page(e):
' using the New Identity button in the Tor Browser.')
if exc_info()[1].ip:
error_message += ' Exit node IP address: ' + exc_info()[1].ip
return flask.render_template('error.html', error_message=error_message), 502
return flask.render_template('error.html', traceback=traceback.format_exc()), 500
return flask.render_template('error.html', error_message=error_message, slim=slim), 502
return flask.render_template('error.html', traceback=traceback.format_exc(), slim=slim), 500
font_choices = {
0: 'initial',
1: 'arial, "liberation sans", sans-serif',
2: '"liberation serif", "times new roman", calibri, carlito, serif',
3: 'verdana, sans-serif',
4: 'tahoma, sans-serif',
}
@yt_app.route('/shared.css')
def get_css():
return flask.Response(
flask.render_template('shared.css',
font_family = font_choices[settings.font]
),
mimetype='text/css',
)

View File

@ -90,7 +90,7 @@ def single_comment_ctoken(video_id, comment_id):
def post_process_comments_info(comments_info):
for comment in comments_info['comments']:
comment['author_url'] = concat_or_none(
util.URL_ORIGIN, comment['author_url'])
'/', comment['author_url'])
comment['author_avatar'] = concat_or_none(
settings.img_prefix, comment['author_avatar'])

View File

@ -124,6 +124,23 @@
grid-column-gap: 10px;
}
details.replies > summary{
background-color: var(--interface-color);
border-style: outset;
border-width: 1px;
font-weight: bold;
padding-bottom: 0px;
}
.replies-open-new-tab{
display: inline-block;
margin-top: 5px;
}
details.replies .comment{
width: 600px;
}
.more-comments{
justify-self:center;
margin-top:10px;

View File

@ -41,9 +41,7 @@ function doXhr(url, callback=null) {
var xhr = new XMLHttpRequest();
xhr.open("GET", url);
xhr.onload = (e) => {
let ok = xhr.status >= 200 && xhr.status < 300;
if (ok) callback(e.currentTarget.response);
else alert(`${xhr.responseURL} status code: ${xhr.status}`);
callback(e.currentTarget.response);
}
xhr.send();
return xhr;

View File

@ -4,12 +4,9 @@
<meta charset="utf-8">
<title>{{ page_title }}</title>
<meta http-equiv="Content-Security-Policy" content="default-src 'self' 'unsafe-inline'; media-src 'self' https://*.googlevideo.com;
{% if not settings.proxy_images %}
img-src https://*.googleusercontent.com https://*.ggpht.com https://*.ytimg.com;
{% endif %}">
{{ "img-src 'self' https://*.googleusercontent.com https://*.ggpht.com https://*.ytimg.com;" if not settings.proxy_images else "" }}">
<link href="{{ theme_path }}" type="text/css" rel="stylesheet">
<link href="/youtube.com/static/shared.css" type="text/css" rel="stylesheet">
<link href="/youtube.com/shared.css" type="text/css" rel="stylesheet">
<link href="/youtube.com/static/comments.css" type="text/css" rel="stylesheet">
<link href="/youtube.com/static/favicon.ico" type="image/x-icon" rel="icon">
<link title="Youtube local" href="/youtube.com/opensearch.xml" rel="search" type="application/opensearchdescription+xml">

View File

@ -25,6 +25,7 @@
{% if settings.use_comments_js and comment['reply_count'] %}
<details class="replies" src="{{ comment['replies_url'] }}">
<summary>{{ comment['view_replies_text'] }}</summary>
<a href="{{ comment['replies_url'] }}" class="replies-open-new-tab" target="_blank">Open in new tab</a>
<div class="comment_page">loading..</div>
</details>
{% else %}

View File

@ -1,29 +1,8 @@
{% set page_title = 'Error' %}
{% extends "base.html" %}
{% block style %}
h1{
font-size: 2rem;
font-weight: normal;
}
#error-box, #error-message{
background-color: var(--interface-color);
width: 80%;
margin: auto;
margin-top: 20px;
padding: 5px;
}
#error-box > div, #error-box > p, #error-box > h1{
white-space: pre-wrap;
margin-bottom: 10px;
}
.code-box{
padding: 5px;
border-style:solid;
border-width:1px;
border-radius:5px;
}
{% endblock style %}
{% if not slim %}
{% extends "base.html" %}
{% endif %}
{% block main %}
{% if traceback %}

View File

@ -12,7 +12,7 @@ address{
}
html{
font-family: "liberation serif", "times new roman", calibri, carlito, serif;
font-family: {{ font_family }};
}
body{
@ -334,3 +334,26 @@ body{
padding: 2px;
justify-self: start;
}
/* error page stuff */
h1{
font-size: 2rem;
font-weight: normal;
}
#error-box, #error-message{
background-color: var(--interface-color);
width: 80%;
margin: auto;
margin-top: 20px;
padding: 5px;
}
#error-box > div, #error-box > p, #error-box > h1{
white-space: pre-wrap;
margin-bottom: 10px;
}
.code-box{
padding: 5px;
border-style:solid;
border-width:1px;
border-radius:5px;
}

View File

@ -14,18 +14,6 @@
text-decoration: underline;
}
details.replies > summary{
background-color: var(--interface-color);
border-style: outset;
border-width: 1px;
font-weight: bold;
padding-bottom: 0px;
}
details.replies .comment{
width: 600px;
}
.playability-error{
height: 360px;
width: 640px;

View File

@ -226,15 +226,19 @@ def extract_info(video_id, use_invidious, playlist_id=None, index=None):
return {'error': 'Failed to parse json response'}
info = yt_data_extract.extract_watch_info(polymer_json)
# age restriction bypass
# request player if it's missing
# see https://github.com/user234683/youtube-local/issues/22#issuecomment-706395160
if info['age_restricted'] or info['player_response_missing']:
if info['age_restricted']:
print('Fetching age restriction bypass page')
print('Age restricted video. Fetching get_video_info page')
else:
print('Missing player. Fetching get_video_info page')
data = {
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
}
url = 'https://www.youtube.com/get_video_info?' + urllib.parse.urlencode(data)
video_info_page = util.fetch_url(url, debug_name='get_video_info', report_text='Fetched age restriction bypass page').decode('utf-8')
video_info_page = util.fetch_url(url, debug_name='get_video_info', report_text='Fetched get_video_info page').decode('utf-8')
yt_data_extract.update_with_age_restricted_info(info, video_info_page)
# signature decryption

View File

@ -90,15 +90,20 @@ def remove_redirect(url):
return urllib.parse.parse_qs(query_string)['q'][0]
return url
youtube_url_re = re.compile(r'^(?:(?:(?:https?:)?//)?(?:www\.)?youtube\.com)?(/.*)$')
norm_url_re = re.compile(r'^(?:(?:https?:)?//)?((?:[\w-]+\.)+[\w-]+)?(/.*)$')
def normalize_url(url):
'''Insert https, resolve relative paths for youtube.com, and put www. infront of youtube.com'''
if url is None:
return None
match = youtube_url_re.fullmatch(url)
match = norm_url_re.fullmatch(url)
if match is None:
raise Exception()
raise Exception(url)
return 'https://www.youtube.com' + match.group(1)
domain = match.group(1) or 'www.youtube.com'
if domain == 'youtube.com':
domain = 'www.youtube.com'
return 'https://' + domain + match.group(2)
def _recover_urls(runs):
for run in runs:
@ -240,11 +245,11 @@ def extract_item_info(item, additional_info={}):
))
info['author_url'] = ('https://www.youtube.com/channel/' + info['author_id']) if info['author_id'] else None
info['description'] = extract_formatted_text(multi_get(item, 'descriptionSnippet', 'descriptionText'))
info['thumbnail'] = multi_deep_get(item,
info['thumbnail'] = normalize_url(multi_deep_get(item,
['thumbnail', 'thumbnails', 0, 'url'], # videos
['thumbnails', 0, 'thumbnails', 0, 'url'], # playlists
['thumbnailRenderer', 'showCustomThumbnailRenderer', 'thumbnail', 'thumbnails', 0, 'url'], # shows
)
))
info['badges'] = []
for badge_node in multi_get(item, 'badges', 'ownerBadges', default=()):

View File

@ -49,10 +49,10 @@ def extract_channel_info(polymer_json, tab):
if info['short_description'] and len(info['short_description']) > 730:
info['short_description'] = info['short_description'][0:730] + '...'
info['channel_name'] = metadata.get('title')
info['avatar'] = multi_deep_get(metadata,
info['avatar'] = normalize_url(multi_deep_get(metadata,
['avatar', 'thumbnails', 0, 'url'],
['thumbnail', 'thumbnails', 0, 'url'],
)
))
channel_url = multi_get(metadata, 'urlCanonical', 'channelUrl')
if channel_url:
channel_id = get(channel_url.rstrip('/').split('/'), -1)
@ -263,13 +263,13 @@ def extract_comments_info(polymer_json):
# These 3 are sometimes absent, likely because the channel was deleted
comment_info['author'] = extract_str(comment_renderer.get('authorText'))
comment_info['author_url'] = deep_get(comment_renderer,
'authorEndpoint', 'commandMetadata', 'webCommandMetadata', 'url')
comment_info['author_url'] = normalize_url(deep_get(comment_renderer,
'authorEndpoint', 'commandMetadata', 'webCommandMetadata', 'url'))
comment_info['author_id'] = deep_get(comment_renderer,
'authorEndpoint', 'browseEndpoint', 'browseId')
comment_info['author_avatar'] = deep_get(comment_renderer,
'authorThumbnail', 'thumbnails', 0, 'url')
comment_info['author_avatar'] = normalize_url(deep_get(
comment_renderer, 'authorThumbnail', 'thumbnails', 0, 'url'))
comment_info['id'] = comment_renderer.get('commentId')
comment_info['text'] = extract_formatted_text(comment_renderer.get('contentText'))
comment_info['time_published'] = extract_str(comment_renderer.get('publishedTimeText'))

View File

@ -447,7 +447,8 @@ def _extract_playability_error(info, player_response, error_prefix=''):
SUBTITLE_FORMATS = ('srv1', 'srv2', 'srv3', 'ttml', 'vtt')
def extract_watch_info(polymer_json):
info = {'playability_error': None, 'error': None}
info = {'playability_error': None, 'error': None,
'player_response_missing': None}
if isinstance(polymer_json, dict):
top_level = polymer_json
@ -477,6 +478,10 @@ def extract_watch_info(polymer_json):
else:
embedded_player_response = {}
# see https://github.com/user234683/youtube-local/issues/22#issuecomment-706395160
info['player_response_missing'] = not (
player_response or embedded_player_response)
# captions
info['automatic_caption_languages'] = []
info['manual_caption_languages'] = []
@ -580,7 +585,8 @@ def get_caption_url(info, language, format, automatic=False, translation_languag
return url
def update_with_age_restricted_info(info, video_info_page):
ERROR_PREFIX = 'Error bypassing age-restriction: '
'''Inserts urls from 'player_response' in get_video_info page'''
ERROR_PREFIX = 'Error getting missing player or bypassing age-restriction: '
video_info = urllib.parse.parse_qs(video_info_page)
player_response = deep_get(video_info, 'player_response', 0)