Compare commits
159 Commits
0.2.3
...
b320127f16
| Author | SHA1 | Date | |
|---|---|---|---|
|
b320127f16
|
|||
|
d6190a2d0b
|
|||
|
155bd4df49
|
|||
|
5577e9e1f2
|
|||
|
3795d9e4ff
|
|||
|
3cf221a1ed
|
|||
|
13a0e6ceed
|
|||
|
e8e2aa93d6
|
|||
|
8403e30b3a
|
|||
|
f0649be5de
|
|||
|
62a028968e
|
|||
|
f7bbf3129a
|
|||
|
688521f8d6
|
|||
|
6eb3741010
|
|||
|
a374f90f6e
|
|||
|
bed14713ad
|
|||
|
06051dd127
|
|||
|
7c64630be1
|
|||
|
1aa344c7b0
|
|||
|
fa7273b328
|
|||
|
a0d10e6a00
|
|||
|
a46cfda029
|
|||
|
e03f40d728
|
|||
|
22c72aa842
|
|||
|
56ecd6cb1b
|
|||
|
f629565e77
|
|||
|
1f8c13adff
|
|||
|
6a68f06645
|
|||
|
84e1acaab8
|
|||
|
|
ed4b05d9b6 | ||
|
|
6f88b1cec6 | ||
|
|
03451fb8ae | ||
|
|
e45c3fd48b | ||
|
|
1153ac8f24 | ||
|
|
c256a045f9 | ||
|
|
98603439cb | ||
|
|
a6ca011202 | ||
|
|
114c2572a4 | ||
|
f64b362603
|
|||
|
2fd7910194
|
|||
|
c2e53072f7
|
|||
|
c2986f3b14
|
|||
|
57854169f4
|
|||
|
3217305f9f
|
|||
|
639aadd2c1
|
|||
|
7157df13cd
|
|||
|
630e0137e0
|
|||
|
a0c51731af
|
|||
|
d361996fc0
|
|||
|
|
4ef7dda14a | ||
|
|
ee31cedae0 | ||
|
d3b0cb5e13
|
|||
|
0a79974d11
|
|||
|
4e327944a0
|
|||
|
09a437f7fb
|
|||
|
3cbe18aac0
|
|||
|
|
62418f8e95 | ||
|
bfd3760969
|
|||
|
efd89b2e64
|
|||
|
0dc1747178
|
|||
|
8577164785
|
|||
|
8af98968dd
|
|||
|
8f00cbcdd6
|
|||
|
af75551bc2
|
|||
|
3a6cc1e44f
|
|||
|
7664b5f0ff
|
|||
|
ec5d236cad
|
|||
|
d6b7a255d0
|
|||
|
22bc7324db
|
|||
|
48e8f271e7
|
|||
|
9a0ad6070b
|
|||
|
6039589f24
|
|||
|
d4cba7eb6c
|
|||
|
70cb453280
|
|||
|
7a106331e7
|
|||
|
8775e131af
|
|||
|
1f16f7cb62
|
|||
|
80b7f3cd00
|
|||
|
8b79e067bc
|
|||
|
cda0627d5a
|
|||
|
ad40dd6d6b
|
|||
|
b91d53dc6f
|
|||
|
cda4fd1f26
|
|||
|
ff2a2edaa5
|
|||
|
38d8d5d4c5
|
|||
|
f010452abf
|
|||
|
ab93f8242b
|
|||
|
1505414a1a
|
|||
|
c04d7c9a24
|
|||
|
3ee2df7faa
|
|||
|
d2c883c211
|
|||
|
59c988f819
|
|||
|
629c811e84
|
|||
|
284024433b
|
|||
|
55a8e50d6a
|
|||
|
810dff999e
|
|||
|
4da91fb972
|
|||
|
874ac0a0ac
|
|||
|
89ae1e265b
|
|||
|
00bd9fee6f
|
|||
|
b215e2a3b2
|
|||
|
97972d6fa3
|
|||
|
6ae20bb1f5
|
|||
|
5f3b90ad45
|
|||
|
2463af7685
|
|||
|
86bb312d6d
|
|||
|
964b99ea40
|
|||
|
51a1693789
|
|||
|
ca4a735692
|
|||
|
2140f48919
|
|||
|
4be01d3964
|
|||
|
b45e3476c8
|
|||
|
d591956baa
|
|||
|
|
6011a08cdf | ||
|
|
83af4ab0d7 | ||
|
|
5594d017e2 | ||
|
|
8f9c5eeb48 | ||
|
|
89e21302e3 | ||
|
|
cb4ceefada | ||
|
|
c4cc5cecbf | ||
|
|
cc8f30eba2 | ||
|
|
6740afd6a0 | ||
|
|
63c0f4aa8f | ||
|
|
8908dc138f | ||
|
|
cd7624f2cb | ||
|
|
5d53225874 | ||
|
|
6af17450c6 | ||
|
|
d85c27a728 | ||
|
|
344341b87f | ||
|
|
21224c8dae | ||
|
|
93b58efa0e | ||
|
|
db08283368 | ||
|
|
0f4bf45cde | ||
|
|
d7f934b7b2 | ||
|
|
a4299dc917 | ||
|
|
e6fd9b40f4 | ||
|
|
f322035d4a | ||
|
|
74907a8183 | ||
|
|
ec8f652bc8 | ||
|
|
aa57ace742 | ||
|
|
512798366c | ||
|
|
9859c5485e | ||
|
|
e54596f3e9 | ||
|
|
c6e1b366b5 | ||
|
|
43e7f7ce93 | ||
|
|
97032b31ee | ||
|
|
ba3714c860 | ||
|
|
14c8cf3f5b | ||
|
|
3025158d14 | ||
|
|
fb13fd21ef | ||
|
|
68752000f0 | ||
|
|
7b60751e99 | ||
|
|
9890617098 | ||
|
|
beca545951 | ||
|
|
a9a68e7df3 | ||
|
|
0f78f07875 | ||
|
|
08545a29df | ||
|
|
9564ee30fe | ||
|
|
6806146450 |
12
.build.yml
12
.build.yml
@@ -1,12 +0,0 @@
|
||||
image: debian/buster
|
||||
packages:
|
||||
- python3-pip
|
||||
- virtualenv
|
||||
tasks:
|
||||
- test: |
|
||||
cd yt-local
|
||||
virtualenv -p python3 venv
|
||||
source venv/bin/activate
|
||||
python --version
|
||||
pip install -r requirements-dev.txt
|
||||
pytest
|
||||
10
.drone.yml
10
.drone.yml
@@ -1,10 +0,0 @@
|
||||
kind: pipeline
|
||||
name: default
|
||||
|
||||
steps:
|
||||
- name: test
|
||||
image: python:3.7.3
|
||||
commands:
|
||||
- pip install --upgrade pip
|
||||
- pip install -r requirements-dev.txt
|
||||
- pytest
|
||||
23
.gitea/workflows/ci.yaml
Normal file
23
.gitea/workflows/ci.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
name: CI
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.11
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install --upgrade pip
|
||||
pip install -r requirements-dev.txt
|
||||
|
||||
- name: Run tests
|
||||
run: pytest
|
||||
40
.gitea/workflows/git-sync.yaml
Normal file
40
.gitea/workflows/git-sync.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
name: git-sync-with-mirror
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
git-sync:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: git-sync
|
||||
env:
|
||||
git_sync_source_repo: git@git.fridu.us:heckyel/yt-local.git
|
||||
git_sync_destination_repo: ssh://git@c.fridu.us/software/yt-local.git
|
||||
if: env.git_sync_source_repo && env.git_sync_destination_repo
|
||||
uses: astounds/git-sync@v1
|
||||
with:
|
||||
source_repo: git@git.fridu.us:heckyel/yt-local.git
|
||||
source_branch: "master"
|
||||
destination_repo: ssh://git@c.fridu.us/software/yt-local.git
|
||||
destination_branch: "master"
|
||||
source_ssh_private_key: ${{ secrets.GIT_SYNC_SOURCE_SSH_PRIVATE_KEY }}
|
||||
destination_ssh_private_key: ${{ secrets.GIT_SYNC_DESTINATION_SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: git-sync-sourcehut
|
||||
env:
|
||||
git_sync_source_repo: git@git.fridu.us:heckyel/yt-local.git
|
||||
git_sync_destination_repo: git@git.sr.ht:~heckyel/yt-local
|
||||
if: env.git_sync_source_repo && env.git_sync_destination_repo
|
||||
uses: astounds/git-sync@v1
|
||||
with:
|
||||
source_repo: git@git.fridu.us:heckyel/yt-local.git
|
||||
source_branch: "master"
|
||||
destination_repo: git@git.sr.ht:~heckyel/yt-local
|
||||
destination_branch: "master"
|
||||
source_ssh_private_key: ${{ secrets.GIT_SYNC_SOURCE_SSH_PRIVATE_KEY }}
|
||||
destination_ssh_private_key: ${{ secrets.GIT_SYNC_DESTINATION_SSH_PRIVATE_KEY }}
|
||||
continue-on-error: true
|
||||
168
.gitignore
vendored
168
.gitignore
vendored
@@ -1,15 +1,171 @@
|
||||
# =============================================================================
|
||||
# .gitignore - YT Local
|
||||
# =============================================================================
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Python / Bytecode
|
||||
# -----------------------------------------------------------------------------
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
debug/
|
||||
*.so
|
||||
.Python
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Virtual Environments
|
||||
# -----------------------------------------------------------------------------
|
||||
.env
|
||||
.env.*
|
||||
!.env.example
|
||||
.venv/
|
||||
venv/
|
||||
ENV/
|
||||
env/
|
||||
*.egg-info/
|
||||
.eggs/
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# IDE / Editors
|
||||
# -----------------------------------------------------------------------------
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
.DS_Store
|
||||
.flycheck_*
|
||||
*.sublime-project
|
||||
*.sublime-workspace
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Distribution / Packaging
|
||||
# -----------------------------------------------------------------------------
|
||||
build/
|
||||
dist/
|
||||
*.egg
|
||||
*.manifest
|
||||
*.spec
|
||||
pip-wheel-metadata/
|
||||
share/python-wheels/
|
||||
MANIFEST
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Testing / Coverage
|
||||
# -----------------------------------------------------------------------------
|
||||
.pytest_cache/
|
||||
.coverage
|
||||
.coverage.*
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Type Checking / Linting
|
||||
# -----------------------------------------------------------------------------
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
.pyre/
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Jupyter / IPython
|
||||
# -----------------------------------------------------------------------------
|
||||
.ipynb_checkpoints
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Python Tools
|
||||
# -----------------------------------------------------------------------------
|
||||
# pyenv
|
||||
.python-version
|
||||
# pipenv
|
||||
Pipfile.lock
|
||||
# PEP 582
|
||||
__pypackages__/
|
||||
# Celery
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
# Sphinx
|
||||
docs/_build/
|
||||
# PyBuilder
|
||||
target/
|
||||
# Scrapy
|
||||
.scrapy
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Web Frameworks
|
||||
# -----------------------------------------------------------------------------
|
||||
# Django
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
# Flask
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Documentation
|
||||
# -----------------------------------------------------------------------------
|
||||
# mkdocs
|
||||
/site
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Project Specific - YT Local
|
||||
# -----------------------------------------------------------------------------
|
||||
# Data & Debug
|
||||
data/
|
||||
python/
|
||||
debug/
|
||||
|
||||
# Release artifacts
|
||||
release/
|
||||
yt-local/
|
||||
banned_addresses.txt
|
||||
settings.txt
|
||||
get-pip.py
|
||||
latest-dist.zip
|
||||
*.7z
|
||||
*.zip
|
||||
*venv*
|
||||
flycheck_*
|
||||
|
||||
# Configuration (contains user-specific data)
|
||||
settings.txt
|
||||
banned_addresses.txt
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Temporary / Backup Files
|
||||
# -----------------------------------------------------------------------------
|
||||
*.log
|
||||
*.tmp
|
||||
*.bak
|
||||
*.orig
|
||||
*.cache/
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Localization / Compiled translations
|
||||
# -----------------------------------------------------------------------------
|
||||
*.mo
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# AI assistants / LLM tools
|
||||
# -----------------------------------------------------------------------------
|
||||
# Claude AI assistant configuration and cache
|
||||
.claude/
|
||||
claude*
|
||||
.anthropic/
|
||||
|
||||
# Kiro AI tool configuration and cache
|
||||
.kiro/
|
||||
kiro*
|
||||
|
||||
# Qwen AI-related files and caches
|
||||
.qwen/
|
||||
qwen*
|
||||
|
||||
# Other AI assistants/IDE integrations
|
||||
.cursor/
|
||||
.gpt/
|
||||
.openai/
|
||||
|
||||
210
Makefile
Normal file
210
Makefile
Normal file
@@ -0,0 +1,210 @@
|
||||
# yt-local Makefile
|
||||
# Automated tasks for development, translations, and maintenance
|
||||
|
||||
.PHONY: help install dev clean test i18n-extract i18n-init i18n-update i18n-compile i18n-stats i18n-clean setup-dev lint format backup restore
|
||||
|
||||
# Variables
|
||||
PYTHON := python3
|
||||
PIP := pip3
|
||||
LANG_CODE ?= es
|
||||
VENV_DIR := venv
|
||||
PROJECT_NAME := yt-local
|
||||
|
||||
## Help
|
||||
help: ## Show this help message
|
||||
@echo "$(PROJECT_NAME) - Available tasks:"
|
||||
@echo ""
|
||||
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf " %-20s %s\n", $$1, $$2}'
|
||||
@echo ""
|
||||
@echo "Examples:"
|
||||
@echo " make install # Install dependencies"
|
||||
@echo " make dev # Run development server"
|
||||
@echo " make i18n-extract # Extract strings for translation"
|
||||
@echo " make i18n-init LANG_CODE=fr # Initialize French"
|
||||
@echo " make lint # Check code style"
|
||||
|
||||
## Installation and Setup
|
||||
install: ## Install project dependencies
|
||||
@echo "[INFO] Installing dependencies..."
|
||||
$(PIP) install -r requirements.txt
|
||||
@echo "[SUCCESS] Dependencies installed"
|
||||
|
||||
setup-dev: ## Complete development setup
|
||||
@echo "[INFO] Setting up development environment..."
|
||||
$(PYTHON) -m venv $(VENV_DIR)
|
||||
./$(VENV_DIR)/bin/pip install -r requirements.txt
|
||||
@echo "[SUCCESS] Virtual environment created in $(VENV_DIR)"
|
||||
@echo "[INFO] Activate with: source $(VENV_DIR)/bin/activate"
|
||||
|
||||
requirements: ## Update and install requirements
|
||||
@echo "[INFO] Installing/updating requirements..."
|
||||
$(PIP) install --upgrade pip
|
||||
$(PIP) install -r requirements.txt
|
||||
@echo "[SUCCESS] Requirements installed"
|
||||
|
||||
## Development
|
||||
dev: ## Run development server
|
||||
@echo "[INFO] Starting development server..."
|
||||
@echo "[INFO] Server available at: http://localhost:9010"
|
||||
$(PYTHON) server.py
|
||||
|
||||
run: dev ## Alias for dev
|
||||
|
||||
## Testing
|
||||
test: ## Run tests
|
||||
@echo "[INFO] Running tests..."
|
||||
@if [ -d "tests" ]; then \
|
||||
$(PYTHON) -m pytest -v; \
|
||||
else \
|
||||
echo "[WARN] No tests directory found"; \
|
||||
fi
|
||||
|
||||
test-cov: ## Run tests with coverage
|
||||
@echo "[INFO] Running tests with coverage..."
|
||||
@if command -v pytest-cov >/dev/null 2>&1; then \
|
||||
$(PYTHON) -m pytest -v --cov=$(PROJECT_NAME) --cov-report=html; \
|
||||
else \
|
||||
echo "[WARN] pytest-cov not installed. Run: pip install pytest-cov"; \
|
||||
fi
|
||||
|
||||
## Internationalization (i18n)
|
||||
i18n-extract: ## Extract strings for translation
|
||||
@echo "[INFO] Extracting strings for translation..."
|
||||
$(PYTHON) manage_translations.py extract
|
||||
@echo "[SUCCESS] Strings extracted to translations/messages.pot"
|
||||
|
||||
i18n-init: ## Initialize new language (use LANG_CODE=xx)
|
||||
@echo "[INFO] Initializing language: $(LANG_CODE)"
|
||||
$(PYTHON) manage_translations.py init $(LANG_CODE)
|
||||
@echo "[SUCCESS] Language $(LANG_CODE) initialized"
|
||||
@echo "[INFO] Edit: translations/$(LANG_CODE)/LC_MESSAGES/messages.po"
|
||||
|
||||
i18n-update: ## Update existing translations
|
||||
@echo "[INFO] Updating existing translations..."
|
||||
$(PYTHON) manage_translations.py update
|
||||
@echo "[SUCCESS] Translations updated"
|
||||
|
||||
i18n-compile: ## Compile translations to binary .mo files
|
||||
@echo "[INFO] Compiling translations..."
|
||||
$(PYTHON) manage_translations.py compile
|
||||
@echo "[SUCCESS] Translations compiled"
|
||||
|
||||
i18n-stats: ## Show translation statistics
|
||||
@echo "[INFO] Translation statistics:"
|
||||
@echo ""
|
||||
@for lang_dir in translations/*/; do \
|
||||
if [ -d "$$lang_dir" ] && [ "$$lang_dir" != "translations/*/" ]; then \
|
||||
lang=$$(basename "$$lang_dir"); \
|
||||
po_file="$$lang_dir/LC_MESSAGES/messages.po"; \
|
||||
if [ -f "$$po_file" ]; then \
|
||||
total=$$(grep -c "^msgid " "$$po_file" 2>/dev/null || echo "0"); \
|
||||
translated=$$(grep -c "^msgstr \"[^\"]\+\"" "$$po_file" 2>/dev/null || echo "0"); \
|
||||
fuzzy=$$(grep -c "^#, fuzzy" "$$po_file" 2>/dev/null || echo "0"); \
|
||||
if [ "$$total" -gt 0 ]; then \
|
||||
percent=$$((translated * 100 / total)); \
|
||||
echo " [STAT] $$lang: $$translated/$$total ($$percent%) - Fuzzy: $$fuzzy"; \
|
||||
else \
|
||||
echo " [STAT] $$lang: No translations yet"; \
|
||||
fi; \
|
||||
fi \
|
||||
fi \
|
||||
done
|
||||
@echo ""
|
||||
|
||||
i18n-clean: ## Clean compiled translation files
|
||||
@echo "[INFO] Cleaning compiled .mo files..."
|
||||
find translations/ -name "*.mo" -delete
|
||||
@echo "[SUCCESS] .mo files removed"
|
||||
|
||||
i18n-workflow: ## Complete workflow: extract → update → compile
|
||||
@echo "[INFO] Running complete translation workflow..."
|
||||
@make i18n-extract
|
||||
@make i18n-update
|
||||
@make i18n-compile
|
||||
@make i18n-stats
|
||||
@echo "[SUCCESS] Translation workflow completed"
|
||||
|
||||
## Code Quality
|
||||
lint: ## Check code with flake8
|
||||
@echo "[INFO] Checking code style..."
|
||||
@if command -v flake8 >/dev/null 2>&1; then \
|
||||
flake8 youtube/ --max-line-length=120 --ignore=E501,W503,E402 --exclude=youtube/ytdlp_service.py,youtube/ytdlp_integration.py,youtube/ytdlp_proxy.py; \
|
||||
echo "[SUCCESS] Code style check passed"; \
|
||||
else \
|
||||
echo "[WARN] flake8 not installed (pip install flake8)"; \
|
||||
fi
|
||||
|
||||
format: ## Format code with black (if available)
|
||||
@echo "[INFO] Formatting code..."
|
||||
@if command -v black >/dev/null 2>&1; then \
|
||||
black youtube/ --line-length=120 --exclude='ytdlp_.*\.py'; \
|
||||
echo "[SUCCESS] Code formatted"; \
|
||||
else \
|
||||
echo "[WARN] black not installed (pip install black)"; \
|
||||
fi
|
||||
|
||||
check-deps: ## Check installed dependencies
|
||||
@echo "[INFO] Checking dependencies..."
|
||||
@$(PYTHON) -c "import flask_babel; print('[OK] Flask-Babel:', flask_babel.__version__)" 2>/dev/null || echo "[ERROR] Flask-Babel not installed"
|
||||
@$(PYTHON) -c "import flask; print('[OK] Flask:', flask.__version__)" 2>/dev/null || echo "[ERROR] Flask not installed"
|
||||
@$(PYTHON) -c "import yt_dlp; print('[OK] yt-dlp:', yt_dlp.__version__)" 2>/dev/null || echo "[ERROR] yt-dlp not installed"
|
||||
|
||||
## Maintenance
|
||||
backup: ## Create translations backup
|
||||
@echo "[INFO] Creating translations backup..."
|
||||
@timestamp=$$(date +%Y%m%d_%H%M%S); \
|
||||
tar -czf "translations_backup_$$timestamp.tar.gz" translations/ 2>/dev/null || echo "[WARN] No translations to backup"; \
|
||||
if [ -f "translations_backup_$$timestamp.tar.gz" ]; then \
|
||||
echo "[SUCCESS] Backup created: translations_backup_$$timestamp.tar.gz"; \
|
||||
fi
|
||||
|
||||
restore: ## Restore translations from backup
|
||||
@echo "[INFO] Restoring translations from backup..."
|
||||
@if ls translations_backup_*.tar.gz 1>/dev/null 2>&1; then \
|
||||
latest_backup=$$(ls -t translations_backup_*.tar.gz | head -1); \
|
||||
tar -xzf "$$latest_backup"; \
|
||||
echo "[SUCCESS] Restored from: $$latest_backup"; \
|
||||
else \
|
||||
echo "[ERROR] No backup files found"; \
|
||||
fi
|
||||
|
||||
clean: ## Clean temporary files and caches
|
||||
@echo "[INFO] Cleaning temporary files..."
|
||||
find . -type f -name "*.pyc" -delete
|
||||
find . -type d -name "__pycache__" -delete
|
||||
find . -type f -name "*.mo" -delete
|
||||
find . -type d -name ".pytest_cache" -delete
|
||||
find . -type f -name ".coverage" -delete
|
||||
find . -type d -name "htmlcov" -delete
|
||||
@echo "[SUCCESS] Temporary files removed"
|
||||
|
||||
distclean: clean ## Clean everything including venv
|
||||
@echo "[INFO] Cleaning everything..."
|
||||
rm -rf $(VENV_DIR)
|
||||
@echo "[SUCCESS] Complete cleanup done"
|
||||
|
||||
## Project Information
|
||||
info: ## Show project information
|
||||
@echo "[INFO] $(PROJECT_NAME) - Project information:"
|
||||
@echo ""
|
||||
@echo " [INFO] Directory: $$(pwd)"
|
||||
@echo " [INFO] Python: $$($(PYTHON) --version)"
|
||||
@echo " [INFO] Pip: $$($(PIP) --version | cut -d' ' -f1-2)"
|
||||
@echo ""
|
||||
@echo " [INFO] Configured languages:"
|
||||
@for lang_dir in translations/*/; do \
|
||||
if [ -d "$$lang_dir" ] && [ "$$lang_dir" != "translations/*/" ]; then \
|
||||
lang=$$(basename "$$lang_dir"); \
|
||||
echo " - $$lang"; \
|
||||
fi \
|
||||
done
|
||||
@echo ""
|
||||
@echo " [INFO] Main files:"
|
||||
@echo " - babel.cfg (i18n configuration)"
|
||||
@echo " - manage_translations.py (i18n CLI)"
|
||||
@echo " - youtube/i18n_strings.py (centralized strings)"
|
||||
@echo " - youtube/ytdlp_service.py (yt-dlp integration)"
|
||||
@echo ""
|
||||
|
||||
# Default target
|
||||
.DEFAULT_GOAL := help
|
||||
386
README.md
386
README.md
@@ -1,177 +1,313 @@
|
||||
[](https://drone.hgit.ga/heckyel/yt-local)
|
||||
|
||||
# yt-local
|
||||
|
||||
Fork of [youtube-local](https://github.com/user234683/youtube-local)
|
||||
[](https://www.gnu.org/licenses/agpl-3.0)
|
||||
[](https://www.python.org/downloads/)
|
||||
[](https://github.com/user234683/youtube-local)
|
||||
|
||||
yt-local is a browser-based client written in Python for watching YouTube anonymously and without the lag of the slow page used by YouTube. One of the primary features is that all requests are routed through Tor, except for the video file at googlevideo.com. This is analogous to what HookTube (defunct) and Invidious do, except that you do not have to trust a third-party to respect your privacy. The assumption here is that Google won't put the effort in to incorporate the video file requests into their tracking, as it's not worth pursuing the incredibly small number of users who care about privacy (Tor video routing is also provided as an option). Tor has high latency, so this will not be as fast network-wise as regular YouTube. However, using Tor is optional; when not routing through Tor, video pages may load faster than they do with YouTube's page depending on your browser.
|
||||
A privacy-focused, browser-based YouTube client that routes requests through Tor for anonymous viewing—**without compromising on speed or features**.
|
||||
|
||||
The YouTube API is not used, so no keys or anything are needed. It uses the same requests as the YouTube webpage.
|
||||
[Features](#features) • [Install](#install) • [Usage](#usage) • [Screenshots](#screenshots)
|
||||
|
||||
---
|
||||
|
||||
> [!NOTE]
|
||||
> How it works: yt-local mirrors YouTube's web requests (using the same Invidious/InnerTube endpoints as yt-dlp and Invidious) but strips JavaScript and serves a lightweight HTML frontend. No API keys needed.
|
||||
|
||||
## Overview
|
||||
|
||||
yt-local is a lightweight, self-hosted YouTube client written in Python that gives you:
|
||||
|
||||
- **Privacy-first**: All requests route through Tor by default (video optional), keeping you anonymous.
|
||||
- **Fast page loads**: No lazy-loading, no layout reflows, instant comment rendering.
|
||||
- **Full control**: Customize subtitles, related videos, comments, and playback speed.
|
||||
- **High quality**: Supports all YouTube video qualities (144p–2160p) via DASH muxing.
|
||||
- **Zero ads**: Clean interface, no tracking, no sponsored content.
|
||||
- **Self-hosted**: You control the instance—no third-party trust required.
|
||||
|
||||
## Features
|
||||
|
||||
| Category | Features |
|
||||
|---------------|----------------------------------------------------------------------------------------|
|
||||
| Core | Search, channels, playlists, watch pages, comments, subtitles (auto/manual) |
|
||||
| Privacy | Optional Tor routing (including video), automatic circuit rotation on 429 errors |
|
||||
| Local | Local playlists (durable against YouTube deletions), thumbnail caching |
|
||||
| UI | 3 themes (Light/Gray/Dark), theater mode, custom font selection |
|
||||
| Config | Fine-grained settings: subtitle mode, comment visibility, sponsorblock integration |
|
||||
| Performance | No JavaScript required, instant page rendering, rate limiting with exponential backoff |
|
||||
| Subscriptions | Import from YouTube Takeout (CSV/JSON), tag organization, mute channels |
|
||||
|
||||
### Advanced Capabilities
|
||||
|
||||
- SponsorBlock integration — skip sponsored segments automatically
|
||||
- Custom video speeds — 0.25x to 4x playback rate
|
||||
- Video transcripts — accessible via transcript button
|
||||
- Video quality muxing — combine separate video/audio streams for non-360p/720p resolutions
|
||||
- Tor circuit rotation — automatic new identity on rate limiting (429)
|
||||
- File downloading — download videos/audio (disabled by default, configurable)
|
||||
|
||||
## Screenshots
|
||||
|
||||
[Light theme](https://pic.infini.fr/l7WINjzS/0Ru6MrhA.png)
|
||||
| Light Theme | Gray Theme | Dark Theme |
|
||||
|:-----------------------------------------------------:|:----------------------------------------------------:|:----------------------------------------------------:|
|
||||
|  |  |  |
|
||||
|
||||
[Gray theme](https://pic.infini.fr/znnQXWNc/hL78CRzo.png)
|
||||
| Channel View | Playlist View |
|
||||
|:-------------------------------------------------------:|:---------------------:|
|
||||
|  | *(similar structure)* |
|
||||
|
||||
[Dark theme](https://pic.infini.fr/iXwFtTWv/mt2kS5bv.png)
|
||||
---
|
||||
|
||||
[Channel](https://pic.infini.fr/JsenWVYe/SbdIQlS6.png)
|
||||
|
||||
## Features
|
||||
* Standard pages of YouTube: search, channels, playlists
|
||||
* Anonymity from Google's tracking by routing requests through Tor
|
||||
* Local playlists: These solve the two problems with creating playlists on YouTube: (1) they're datamined and (2) videos frequently get deleted by YouTube and lost from the playlist, making it very difficult to find a reupload as the title of the deleted video is not displayed.
|
||||
* Themes: Light, Gray, and Dark
|
||||
* Subtitles
|
||||
* Easily download videos or their audio. (Disabled by default)
|
||||
* No ads
|
||||
* View comments
|
||||
* JavaScript not required
|
||||
* Theater and non-theater mode
|
||||
* Subscriptions that are independent from YouTube
|
||||
* Can import subscriptions from YouTube
|
||||
* Works by checking channels individually
|
||||
* Can be set to automatically check channels.
|
||||
* For efficiency of requests, frequency of checking is based on how quickly channel posts videos
|
||||
* Can mute channels, so as to have a way to "soft" unsubscribe. Muted channels won't be checked automatically or when using the "Check all" button. Videos from these channels will be hidden.
|
||||
* Can tag subscriptions to organize them or check specific tags
|
||||
* Fast page
|
||||
* No distracting/slow layout rearrangement
|
||||
* No lazy-loading of comments; they are ready instantly.
|
||||
* Settings allow fine-tuned control over when/how comments or related videos are shown:
|
||||
1. Shown by default, with click to hide
|
||||
2. Hidden by default, with click to show
|
||||
3. Never shown
|
||||
* Optionally skip sponsored segments using [SponsorBlock](https://github.com/ajayyy/SponsorBlock)'s API
|
||||
* Custom video speeds
|
||||
* Video transcript
|
||||
* Supports all available video qualities: 144p through 2160p
|
||||
|
||||
## Planned features
|
||||
- [ ] Putting videos from subscriptions or local playlists into the related videos
|
||||
- [x] Information about video (geographic regions, region of Tor exit node, etc)
|
||||
- [ ] Ability to delete playlists
|
||||
- [ ] Auto-saving of local playlist videos
|
||||
- [ ] Import youtube playlist into a local playlist
|
||||
- [ ] Rearrange items of local playlist
|
||||
- [x] Video qualities other than 360p and 720p by muxing video and audio
|
||||
- [x] Indicate if comments are disabled
|
||||
- [x] Indicate how many comments a video has
|
||||
- [ ] Featured channels page
|
||||
- [ ] Channel comments
|
||||
- [x] Video transcript
|
||||
- [x] Automatic Tor circuit change when blocked
|
||||
- [x] Support &t parameter
|
||||
- [ ] Subscriptions: Option to mark what has been watched
|
||||
- [ ] Subscriptions: Option to filter videos based on keywords in title or description
|
||||
- [ ] Subscriptions: Delete old entries and thumbnails
|
||||
- [ ] Support for more sites, such as Vimeo, Dailymotion, LBRY, etc.
|
||||
|
||||
## Installing
|
||||
## Install
|
||||
|
||||
### Windows
|
||||
|
||||
Download the zip file under the Releases page. Unzip it anywhere you choose.
|
||||
1. Download the latest [release ZIP](https://github.com/user234683/yt-local/releases)
|
||||
2. Extract to any folder
|
||||
3. Run `run.bat` to start
|
||||
|
||||
### GNU+Linux/MacOS
|
||||
### GNU/Linux / macOS
|
||||
|
||||
Download the tarball under the Releases page and extract it. `cd` into the directory and run
|
||||
```bash
|
||||
# 1. Clone or extract the release
|
||||
git clone https://github.com/user234683/yt-local.git
|
||||
cd yt-local
|
||||
|
||||
1. `cd yt-local`
|
||||
2. `virtualenv -p python3 venv`
|
||||
3. `source venv/bin/activate`
|
||||
4. `pip install -r requirements.txt`
|
||||
5. `python server.py`
|
||||
# 2. Create and activate virtual environment
|
||||
python3 -m venv venv
|
||||
source venv/bin/activate # or `venv\Scripts\activate` on Windows
|
||||
|
||||
# 3. Install dependencies
|
||||
pip install -r requirements.txt
|
||||
|
||||
**Note**: If pip isn't installed, first try installing it from your package manager. Make sure you install pip for python 3. For example, the package you need on debian is python3-pip rather than python-pip. If your package manager doesn't provide it, try to install it according to [this answer](https://unix.stackexchange.com/a/182467), but make sure you run `python3 get-pip.py` instead of `python get-pip.py`
|
||||
# 4. Run the server
|
||||
python3 server.py
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> If `pip` isn't installed, use your distro's package manager (e.g., `sudo apt install python3-pip` on Debian/Ubuntu).
|
||||
|
||||
### Portable Mode
|
||||
|
||||
To keep settings and data in the same directory as the app:
|
||||
|
||||
```bash
|
||||
# Create an empty settings.txt in the project root
|
||||
touch settings.txt
|
||||
python3 server.py
|
||||
# Data now stored in ./data/ instead of ~/.yt-local/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Usage
|
||||
|
||||
Firstly, if you wish to run this in portable mode, create the empty file "settings.txt" in the program's main directory. If the file is there, settings and data will be stored in the same directory as the program. Otherwise, settings and data will be stored in `C:\Users\[your username]\.yt-local` on Windows and `~/.yt-local` on GNU+Linux/MacOS.
|
||||
### Basic Access
|
||||
|
||||
To run the program on windows, open `run.bat`. On GNU+Linux/MacOS, run `python3 server.py`.
|
||||
1. Start the server:
|
||||
|
||||
Access youtube URLs by prefixing them with `http://localhost:9010/`.
|
||||
For instance, `http://localhost:9010/https://www.youtube.com/watch?v=vBgulDeV2RU`
|
||||
You can use an addon such as Redirector ([Firefox](https://addons.mozilla.org/en-US/firefox/addon/redirector/)|[Chrome](https://chrome.google.com/webstore/detail/redirector/ocgpenflpmgnfapjedencafcfakcekcd)) to automatically redirect YouTube URLs to yt-local. I use the include pattern `^(https?://(?:[a-zA-Z0-9_-]*\.)?(?:youtube\.com|youtu\.be|youtube-nocookie\.com)/.*)` and redirect pattern `http://localhost:9010/$1` (Make sure you're using regular expression mode).
|
||||
```bash
|
||||
python3 server.py
|
||||
# Server runs on http://127.0.0.1:9010 (configurable in /settings)
|
||||
```
|
||||
|
||||
If you want embeds on web to also redirect to yt-local, make sure "Iframes" is checked under advanced options in your redirector rule. Check test `http://localhost:9010/youtube.com/embed/vBgulDeV2RU`
|
||||
2. Access YouTube via proxy:
|
||||
|
||||
yt-local can be added as a search engine in firefox to make searching more convenient. See [here](https://support.mozilla.org/en-US/kb/add-or-remove-search-engine-firefox) for information on firefox search plugins.
|
||||
```bash
|
||||
http://localhost:9010/https://www.youtube.com/watch?v=vBgulDeV2RU
|
||||
```
|
||||
|
||||
### Using Tor
|
||||
All YouTube URLs must be prefixed with `http://localhost:9010/https://`.
|
||||
|
||||
In the settings page, set "Route Tor" to "On, except video" (the second option). Be sure to save the settings.
|
||||
3. (Optional) Use Redirector to auto-redirect YouTube URLs:
|
||||
|
||||
Ensure Tor is listening for Socks5 connections on port 9150. A simple way to accomplish this is by opening the Tor Browser Bundle and leaving it open. However, you will not be accessing the program (at https://localhost:8080) through the Tor Browser. You will use your regular browser for that. Rather, this is just a quick way to give the program access to Tor routing.
|
||||
- **Firefox**: [Redirector addon](https://addons.mozilla.org/firefox/addon/redirector/)
|
||||
- **Chrome**: [Redirector addon](https://chrome.google.com/webstore/detail/redirector/ocgpenflpmgnfapjedencafcfakcekcd)
|
||||
- **Pattern**: `^(https?://(?:[a-zA-Z0-9_-]*\.)?(?:youtube\.com|youtu\.be|youtube-nocookie\.com)/.*)`
|
||||
- **Redirect to**: `http://localhost:9010/$1`
|
||||
|
||||
### Standalone Tor
|
||||
> [!NOTE]
|
||||
> To use embeds on web pages, make sure "Iframes" is checked under advanced options in your redirector rule.
|
||||
|
||||
If you don't want to waste system resources leaving the Tor Browser open in addition to your regular browser, you can configure standalone Tor to run instead using the following instructions.
|
||||
### Tor Routing
|
||||
|
||||
For Windows, to make standalone Tor run at startup, press Windows Key + R and type `shell:startup` to open the Startup folder. Create a new shortcut there. For the command of the shortcut, enter `"C:\[path-to-Tor-Browser-directory]\Tor\tor.exe" SOCKSPort 9150 ControlPort 9151`. You can then launch this shortcut to start it. Alternatively, if something isn't working, to see what's wrong, open `cmd.exe` and go to the directory `C:\[path-to-Tor-Browser-directory]\Tor`. Then run `tor SOCKSPort 9150 ControlPort 9151 | more`. The `more` part at the end is just to make sure any errors are displayed, to fix a bug in Windows cmd where tor doesn't display any output. You can stop tor in the task manager.
|
||||
> [!IMPORTANT]
|
||||
> Recommended for privacy. In `/settings`, set **Route Tor** to `"On, except video"` (or `"On, including video"`), then save.
|
||||
|
||||
For Debian/Ubuntu, you can `sudo apt install tor` to install the command line version of Tor, and then run `sudo systemctl start tor` to run it as a background service that will get started during boot as well. However, Tor on the command line uses the port `9050` by default (rather than the 9150 used by the Tor Browser). So you will need to change `Tor port` to 9050 and `Tor control port` to `9051` in yt-local settings page. Additionally, you will need to enable the Tor control port by uncommenting the line `ControlPort 9051`, and setting `CookieAuthentication` to 0 in `/etc/tor/torrc`. If no Tor package is available for your distro, you can configure the `tor` binary located at `./Browser/TorBrowser/Tor/tor` inside the Tor Browser installation location to run at start time, or create a service to do it.
|
||||
#### Running Tor
|
||||
|
||||
### Tor video routing
|
||||
Option A: Tor Browser (easiest)
|
||||
|
||||
If you wish to route the video through Tor, set "Route Tor" to "On, including video". Because this is bandwidth-intensive, you are strongly encouraged to donate to the [consortium of Tor node operators](https://torservers.net/donate.html). For instance, donations to [NoiseTor](https://noisetor.net/) go straight towards funding nodes. Using their numbers for bandwidth costs, together with an average of 485 kbit/sec for a diverse sample of videos, and assuming n hours of video watched per day, gives $0.03n/month. A $1/month donation will be a very generous amount to not only offset losses, but help keep the network healthy.
|
||||
- Launch Tor Browser and leave it running
|
||||
- yt-local uses port `9150` (Tor Browser default)
|
||||
|
||||
In general, Tor video routing will be slower (for instance, moving around in the video is quite slow). I've never seen any signs that watch history in yt-local affects on-site Youtube recommendations. It's likely that requests to googlevideo are logged for some period of time, but are not integrated into Youtube's larger advertisement/recommendation systems, since those presumably depend more heavily on in-page tracking through Javascript rather than CDN requests to googlevideo.
|
||||
Option B: Standalone Tor
|
||||
|
||||
### Importing subscriptions
|
||||
```bash
|
||||
# Linux (Debian/Ubuntu)
|
||||
sudo apt install tor
|
||||
sudo systemctl enable --now tor
|
||||
|
||||
1. Go to the [Google takeout manager](https://takeout.google.com/takeout/custom/youtube).
|
||||
2. Log in if asked.
|
||||
3. Click on "All data included", then on "Deselect all", then select only "subscriptions" and click "OK".
|
||||
4. Click on "Next step" and then on "Create export".
|
||||
5. Click on the "Download" button after it appears.
|
||||
6. From the downloaded takeout zip extract the .csv file. It is usually located under `YouTube and YouTube Music/subscriptions/subscriptions.csv`
|
||||
7. Go to the subscriptions manager in yt-local. In the import area, select your .csv file, then press import.
|
||||
# Configure yt-local ports (if using default Tor ports):
|
||||
# Tor port: 9150
|
||||
# Tor control port: 9151
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> Video over Tor is bandwidth-intensive. Consider donating to [Tor node operators](https://torservers.net/donate.html) to sustain the network.
|
||||
|
||||
### Import Subscriptions
|
||||
|
||||
1. Go to [Google Takeout](https://takeout.google.com/takeout/custom/youtube)
|
||||
2. Deselect all → select only **Subscriptions** → create export
|
||||
3. Download and extract `subscriptions.csv` (path: `YouTube and YouTube Music/subscriptions/subscriptions.csv`)
|
||||
4. In yt-local: **Subscriptions** → **Import** → upload CSV
|
||||
|
||||
> [!IMPORTANT]
|
||||
> The CSV file must contain columns: `channel_id,channel_name,channel_url`
|
||||
|
||||
## Supported formats
|
||||
|
||||
Supported subscriptions import formats:
|
||||
- NewPipe subscriptions export JSON
|
||||
- Google Takeout CSV
|
||||
- Old Google Takeout JSON
|
||||
- OPML format from now-removed YouTube subscriptions manager
|
||||
- Google Takeout JSON (legacy)
|
||||
- NewPipe JSON export
|
||||
- OPML (from YouTube's old subscription manager)
|
||||
|
||||
## Contributing
|
||||
---
|
||||
|
||||
Pull requests and issues are welcome
|
||||
## Configuration
|
||||
|
||||
For coding guidelines and an overview of the software architecture, see the [HACKING.md](docs/HACKING.md) file.
|
||||
Visit `http://localhost:9010/settings` to configure:
|
||||
|
||||
## Public instances
|
||||
| Setting | Description |
|
||||
|--------------------|-------------------------------------------------|
|
||||
| Route Tor | Off / On (except video) / On (including video) |
|
||||
| Default subtitles | Off / Manual only / Auto + Manual |
|
||||
| Comments mode | Shown by default / Hidden by default / Never |
|
||||
| Related videos | Same options as comments |
|
||||
| Theme | Light / Gray / Dark |
|
||||
| Font | Browser default / Serif / Sans-serif |
|
||||
| Default resolution | Auto / 144p–2160p |
|
||||
| SponsorBlock | Enable Sponsored segments skipping |
|
||||
| Proxy images | Route thumbnails through yt-local (for privacy) |
|
||||
|
||||
yt-local is not made to work in public mode, however there is an instance of yt-local in public mode but with less features
|
||||
---
|
||||
|
||||
- <https://fast-gorge-89206.herokuapp.com>
|
||||
## Troubleshooting
|
||||
|
||||
| Issue | Solution |
|
||||
|------------------------------|----------------------------------------------------------------------------------------------|
|
||||
| Port already in use | Change `port_number` in `/settings` or kill existing process: `pkill -f "python3 server.py"` |
|
||||
| 429 Too Many Requests | Enable Tor routing for automatic IP rotation, or wait 5-10 minutes |
|
||||
| Failed to connect to Tor | Verify Tor is running: `tor --version` or launch Tor Browser |
|
||||
| Subscriptions not importing | Ensure CSV has columns: `channel_id,channel_name,channel_url` |
|
||||
| Settings persist across runs | Check `~/.yt-local/settings.txt` (non-portable) or `./settings.txt` (portable) |
|
||||
|
||||
---
|
||||
|
||||
## Development
|
||||
|
||||
### Running Tests
|
||||
|
||||
```bash
|
||||
source venv/bin/activate # if not already in venv
|
||||
make test
|
||||
```
|
||||
|
||||
### Project Structure
|
||||
|
||||
```bash
|
||||
yt-local/
|
||||
├── youtube/ # Core application logic
|
||||
│ ├── __init__.py # Flask app entry point
|
||||
│ ├── util.py # HTTP utilities, Tor manager, fetch_url
|
||||
│ ├── watch.py # Video/playlist page handlers
|
||||
│ ├── channel.py # Channel page handlers
|
||||
│ ├── playlist.py # Playlist handlers
|
||||
│ ├── search.py # Search handlers
|
||||
│ ├── comments.py # Comment extraction/rendering
|
||||
│ ├── subscriptions.py # Subscription management + SQLite
|
||||
│ ├── local_playlist.py # Local playlist CRUD
|
||||
│ ├── proto.py # YouTube protobuf token generation
|
||||
│ ├── yt_data_extract/ # Polymer JSON parsing abstractions
|
||||
│ └── hls_cache.py # HLS audio/video streaming proxy
|
||||
├── templates/ # Jinja2 HTML templates
|
||||
├── static/ # CSS/JS assets
|
||||
├── translations/ # i18n files (Babel)
|
||||
├── tests/ # pytest test suite
|
||||
├── server.py # WSGI entry point
|
||||
├── settings.py # Settings parser + admin page
|
||||
├── generate_release.py # Windows release builder
|
||||
└── manage_translations.py # i18n maintenance script
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> For detailed architecture guidance, see [`docs/HACKING.md`](docs/HACKING.md).
|
||||
|
||||
### Contributing
|
||||
|
||||
Contributions welcome! Please:
|
||||
|
||||
1. Read [`docs/HACKING.md`](docs/HACKING.md) for coding guidelines
|
||||
2. Follow [PEP 8](https://peps.python.org/pep-0008/) style (use `ruff format`)
|
||||
3. Run tests before submitting: `pytest`
|
||||
4. Ensure no security issues: `bandit -r .`
|
||||
5. Update docs for new features
|
||||
|
||||
---
|
||||
|
||||
## Security Notes
|
||||
|
||||
- **No API keys required** — uses same endpoints as public YouTube web interface
|
||||
- **Tor is optional** — disable in `/settings` if you prefer performance over anonymity
|
||||
- **Rate limiting handled** — exponential backoff (max 5 retries) with automatic Tor circuit rotation
|
||||
- **Path traversal protected** — user input validated against regex whitelists (CWE-22)
|
||||
- **Subprocess calls secure** — build scripts use `subprocess.run([...])` instead of shell (CWE-78)
|
||||
|
||||
> [!NOTE]
|
||||
> GPG key for release verification: `72CFB264DFC43F63E098F926E607CE7149F4D71C`
|
||||
|
||||
---
|
||||
|
||||
## Public Instances
|
||||
|
||||
yt-local is designed for self-hosting.
|
||||
|
||||
---
|
||||
|
||||
## Donate
|
||||
|
||||
This project is 100% free and open-source. If you'd like to support development:
|
||||
|
||||
- **Bitcoin**: `1JrC3iqs3PP5Ge1m1vu7WE8LEf4S85eo7y`
|
||||
- **Tor node donation**: https://torservers.net/donate
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the GNU Affero General Public License v3 (GNU AGPLv3) or any later version.
|
||||
GNU Affero General Public License v3.0+
|
||||
|
||||
Permission is hereby granted to the youtube-dl project at [https://github.com/ytdl-org/youtube-dl](https://github.com/ytdl-org/youtube-dl) to relicense any portion of this software under the Unlicense, public domain, or whichever license is in use by youtube-dl at the time of relicensing, for the purpose of inclusion of said portion into youtube-dl. Relicensing permission is not granted for any purpose outside of direct inclusion into the [official repository](https://github.com/ytdl-org/youtube-dl) of youtube-dl. If inclusion happens during the process of a pull-request, relicensing happens at the moment the pull request is merged into youtube-dl; until that moment, any cloned repositories of youtube-dl which make use of this software are subject to the terms of the GNU AGPLv3.
|
||||
See [`LICENSE`](LICENSE) for full text.
|
||||
|
||||
## Donate
|
||||
This project is completely free/Libre and will always be.
|
||||
### Exception for youtube-dl
|
||||
|
||||
#### Crypto:
|
||||
- **Bitcoin**: `1JrC3iqs3PP5Ge1m1vu7WE8LEf4S85eo7y`
|
||||
Permission is granted to relicense code portions into youtube-dl's license (currently GPL) for direct inclusion into the [official youtube-dl repository](https://github.com/ytdl-org/youtube-dl). This exception **does not apply** to forks or other uses—those remain under AGPLv3.
|
||||
|
||||
## Similar projects
|
||||
- [invidious](https://github.com/iv-org/invidious) Similar to this project, but also allows it to be hosted as a server to serve many users
|
||||
- [Yotter](https://github.com/ytorg/Yotter) Similar to this project and to invidious. Also supports Twitter
|
||||
- [FreeTube](https://github.com/FreeTubeApp/FreeTube) (Similar to this project, but is an electron app outside the browser)
|
||||
- [youtube-local](https://github.com/user234683/youtube-local) first project on which yt-local is based
|
||||
- [NewPipe](https://newpipe.schabi.org/) (app for android)
|
||||
- [mps-youtube](https://github.com/mps-youtube/mps-youtube) (terminal-only program)
|
||||
- [youtube-viewer](https://github.com/trizen/youtube-viewer)
|
||||
- [FreeTube](https://github.com/FreeTubeApp/FreeTube) (Similar to this project, but is an electron app outside the browser)
|
||||
- [smtube](https://www.smtube.org/)
|
||||
- [Minitube](https://flavio.tordini.org/minitube), [github here](https://github.com/flaviotordini/minitube)
|
||||
- [toogles](https://github.com/mikecrittenden/toogles) (only embeds videos, doesn't use mp4)
|
||||
- [YTLibre](https://git.sr.ht/~heckyel/ytlibre) only extract video
|
||||
- [youtube-dl](https://rg3.github.io/youtube-dl/), which this project was based off
|
||||
---
|
||||
|
||||
## Similar Projects
|
||||
|
||||
| Project | Type | Notes |
|
||||
|--------------------------------------------------------------|----------|--------------------------------------|
|
||||
| [invidious](https://github.com/iv-org/invidious) | Server | Multi-user instance, REST API |
|
||||
| [Yotter](https://github.com/ytorg/Yotter) | Server | YouTube + Twitter integration |
|
||||
| [FreeTube](https://github.com/FreeTubeApp/FreeTube) | Desktop | Electron-based client |
|
||||
| [NewPipe](https://newpipe.schabi.org/) | Mobile | Android-only, no JavaScript |
|
||||
| [mps-youtube](https://github.com/mps-youtube/mps-youtube) | Terminal | CLI-based, text UI |
|
||||
| [youtube-local](https://github.com/user234683/youtube-local) | Browser | Original project (base for yt-local) |
|
||||
|
||||
---
|
||||
|
||||
Made for privacy-conscious users
|
||||
|
||||
Last updated: 2026-04-19
|
||||
|
||||
16
babel.cfg
Normal file
16
babel.cfg
Normal file
@@ -0,0 +1,16 @@
|
||||
[python: youtube/**.py]
|
||||
encoding = utf-8
|
||||
keywords = lazy_gettext _l _
|
||||
|
||||
[python: server.py]
|
||||
encoding = utf-8
|
||||
keywords = _
|
||||
|
||||
[python: settings.py]
|
||||
encoding = utf-8
|
||||
keywords = _
|
||||
|
||||
[jinja2: youtube/templates/**.html]
|
||||
encoding = utf-8
|
||||
extensions=jinja2.ext.i18n
|
||||
silent=false
|
||||
@@ -1,7 +1,8 @@
|
||||
# Generate a windows release and a generated embedded distribution of python
|
||||
# Latest python version is the argument of the script
|
||||
# Latest python version is the argument of the script (or oldwin for
|
||||
# vista, 7 and 32-bit versions)
|
||||
# Requirements: 7z, git
|
||||
# wine 32-bit is required in order to build on Linux
|
||||
# wine is required in order to build on Linux
|
||||
|
||||
import sys
|
||||
import urllib
|
||||
@@ -12,22 +13,28 @@ import os
|
||||
import hashlib
|
||||
|
||||
latest_version = sys.argv[1]
|
||||
if len(sys.argv) > 2:
|
||||
bitness = sys.argv[2]
|
||||
else:
|
||||
bitness = '64'
|
||||
|
||||
if latest_version == 'oldwin':
|
||||
bitness = '32'
|
||||
latest_version = '3.7.9'
|
||||
suffix = 'windows-vista-7-only'
|
||||
else:
|
||||
suffix = 'windows'
|
||||
|
||||
def check(code):
|
||||
if code != 0:
|
||||
raise Exception('Got nonzero exit code from command')
|
||||
|
||||
|
||||
def check_subp(x):
|
||||
if x.returncode != 0:
|
||||
raise Exception('Got nonzero exit code from command')
|
||||
|
||||
|
||||
def log(line):
|
||||
print('[generate_release.py] ' + line)
|
||||
|
||||
|
||||
# https://stackoverflow.com/questions/7833715/python-deleting-certain-file-extensions
|
||||
def remove_files_with_extensions(path, extensions):
|
||||
for root, dirs, files in os.walk(path):
|
||||
@@ -35,9 +42,12 @@ def remove_files_with_extensions(path, extensions):
|
||||
if os.path.splitext(file)[1] in extensions:
|
||||
os.remove(os.path.join(root, file))
|
||||
|
||||
|
||||
def download_if_not_exists(file_name, url, sha256=None):
|
||||
if not os.path.exists('./' + file_name):
|
||||
# Reject non-https URLs so a mistaken constant cannot cause a
|
||||
# plaintext download (bandit B310 hardening).
|
||||
if not url.startswith('https://'):
|
||||
raise Exception('Refusing to download over non-https URL: ' + url)
|
||||
log('Downloading ' + file_name + '..')
|
||||
data = urllib.request.urlopen(url).read()
|
||||
log('Finished downloading ' + file_name)
|
||||
@@ -51,23 +61,22 @@ def download_if_not_exists(file_name, url, sha256=None):
|
||||
else:
|
||||
log('Using existing ' + file_name)
|
||||
|
||||
|
||||
def wine_run_shell(command):
|
||||
# Keep argv-style invocation (no shell) to avoid command injection.
|
||||
if os.name == 'posix':
|
||||
check(os.system('wine ' + command.replace('\\', '/')))
|
||||
parts = ['wine'] + command.replace('\\', '/').split()
|
||||
elif os.name == 'nt':
|
||||
check(os.system(command))
|
||||
parts = command.split()
|
||||
else:
|
||||
raise Exception('Unsupported OS')
|
||||
|
||||
check(subprocess.run(parts).returncode)
|
||||
|
||||
def wine_run(command_parts):
|
||||
if os.name == 'posix':
|
||||
command_parts = ['wine', ] + command_parts
|
||||
command_parts = ['wine',] + command_parts
|
||||
if subprocess.run(command_parts).returncode != 0:
|
||||
raise Exception('Got nonzero exit code from command')
|
||||
|
||||
|
||||
# ---------- Get current release version, for later ----------
|
||||
log('Getting current release version')
|
||||
describe_result = subprocess.run(['git', 'describe', '--tags'], stdout=subprocess.PIPE)
|
||||
@@ -89,7 +98,20 @@ if os.path.exists('./yt-local'):
|
||||
# confused with working directory. I'm calling it the same thing so it will
|
||||
# have that name when extracted from the final release zip archive)
|
||||
log('Making copy of yt-local files')
|
||||
check(os.system('git archive --format tar master | 7z x -si -ttar -oyt-local'))
|
||||
# Avoid the shell: pipe `git archive` into 7z directly via subprocess.
|
||||
_git_archive = subprocess.Popen(
|
||||
['git', 'archive', '--format', 'tar', 'master'],
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
_sevenz = subprocess.Popen(
|
||||
['7z', 'x', '-si', '-ttar', '-oyt-local'],
|
||||
stdin=_git_archive.stdout,
|
||||
)
|
||||
_git_archive.stdout.close()
|
||||
_sevenz.wait()
|
||||
_git_archive.wait()
|
||||
check(_sevenz.returncode)
|
||||
check(_git_archive.returncode)
|
||||
|
||||
if len(os.listdir('./yt-local')) == 0:
|
||||
raise Exception('Failed to copy yt-local files')
|
||||
@@ -98,19 +120,33 @@ if len(os.listdir('./yt-local')) == 0:
|
||||
# ----------- Generate embedded python distribution -----------
|
||||
os.environ['PYTHONDONTWRITEBYTECODE'] = '1' # *.pyc files double the size of the distribution
|
||||
get_pip_url = 'https://bootstrap.pypa.io/get-pip.py'
|
||||
latest_dist_url = 'https://www.python.org/ftp/python/' + latest_version + '/python-' + latest_version + '-embed-win32.zip'
|
||||
latest_dist_url = 'https://www.python.org/ftp/python/' + latest_version + '/python-' + latest_version
|
||||
if bitness == '32':
|
||||
latest_dist_url += '-embed-win32.zip'
|
||||
else:
|
||||
latest_dist_url += '-embed-amd64.zip'
|
||||
|
||||
# I've verified that all the dlls in the following are signed by Microsoft.
|
||||
# Using this because Microsoft only provides installers whose files can't be
|
||||
# extracted without a special tool.
|
||||
visual_c_runtime_url = 'https://github.com/yuempek/vc-archive/raw/master/archives/vc15_(14.10.25017.0)_2017_x86.7z'
|
||||
visual_c_runtime_sha256 = '2549eb4d2ce4cf3a87425ea01940f74368bf1cda378ef8a8a1f1a12ed59f1547'
|
||||
if bitness == '32':
|
||||
visual_c_runtime_url = 'https://github.com/yuempek/vc-archive/raw/master/archives/vc15_(14.10.25017.0)_2017_x86.7z'
|
||||
visual_c_runtime_sha256 = '2549eb4d2ce4cf3a87425ea01940f74368bf1cda378ef8a8a1f1a12ed59f1547'
|
||||
visual_c_name = 'vc15_(14.10.25017.0)_2017_x86.7z'
|
||||
visual_c_path_to_dlls = 'runtime_minimum/System'
|
||||
else:
|
||||
visual_c_runtime_url = 'https://github.com/yuempek/vc-archive/raw/master/archives/vc15_(14.10.25017.0)_2017_x64.7z'
|
||||
visual_c_runtime_sha256 = '4f00b824c37e1017a93fccbd5775e6ee54f824b6786f5730d257a87a3d9ce921'
|
||||
visual_c_name = 'vc15_(14.10.25017.0)_2017_x64.7z'
|
||||
visual_c_path_to_dlls = 'runtime_minimum/System64'
|
||||
|
||||
download_if_not_exists('get-pip.py', get_pip_url)
|
||||
download_if_not_exists('python-dist-' + latest_version + '.zip', latest_dist_url)
|
||||
download_if_not_exists('vc15_(14.10.25017.0)_2017_x86.7z',
|
||||
visual_c_runtime_url,
|
||||
sha256=visual_c_runtime_sha256)
|
||||
|
||||
python_dist_name = 'python-dist-' + latest_version + '-' + bitness + '.zip'
|
||||
|
||||
download_if_not_exists(python_dist_name, latest_dist_url)
|
||||
download_if_not_exists(visual_c_name,
|
||||
visual_c_runtime_url, sha256=visual_c_runtime_sha256)
|
||||
|
||||
if os.path.exists('./python'):
|
||||
log('Removing old python distribution')
|
||||
@@ -119,7 +155,7 @@ if os.path.exists('./python'):
|
||||
|
||||
log('Extracting python distribution')
|
||||
|
||||
check(os.system(r'7z -y x -opython python-dist-' + latest_version + '.zip'))
|
||||
check_subp(subprocess.run(['7z', '-y', 'x', '-opython', python_dist_name]))
|
||||
|
||||
log('Executing get-pip.py')
|
||||
wine_run(['./python/python.exe', '-I', 'get-pip.py'])
|
||||
@@ -183,7 +219,7 @@ with open('./python/python3' + major_release + '._pth', 'a', encoding='utf-8') a
|
||||
f.write('..\n')'''
|
||||
|
||||
log('Inserting Microsoft C Runtime')
|
||||
check_subp(subprocess.run([r'7z', '-y', 'e', '-opython', 'vc15_(14.10.25017.0)_2017_x86.7z', 'runtime_minimum/System']))
|
||||
check_subp(subprocess.run([r'7z', '-y', 'e', '-opython', visual_c_name, visual_c_path_to_dlls]))
|
||||
|
||||
log('Installing dependencies')
|
||||
wine_run(['./python/python.exe', '-I', '-m', 'pip', 'install', '--no-compile', '-r', './requirements.txt'])
|
||||
@@ -219,12 +255,12 @@ log('Copying python distribution into release folder')
|
||||
shutil.copytree(r'./python', r'./yt-local/python')
|
||||
|
||||
# ----------- Create release zip -----------
|
||||
output_filename = 'yt-local-' + release_tag + '-windows.zip'
|
||||
output_filename = 'yt-local-' + release_tag + '-' + suffix + '.zip'
|
||||
if os.path.exists('./' + output_filename):
|
||||
log('Removing previous zipped release')
|
||||
os.remove('./' + output_filename)
|
||||
log('Zipping release')
|
||||
check(os.system(r'7z -mx=9 a ' + output_filename + ' ./yt-local'))
|
||||
check_subp(subprocess.run(['7z', '-mx=9', 'a', output_filename, './yt-local']))
|
||||
|
||||
print('\n')
|
||||
log('Finished')
|
||||
|
||||
113
manage_translations.py
Normal file
113
manage_translations.py
Normal file
@@ -0,0 +1,113 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Translation management script for yt-local
|
||||
|
||||
Usage:
|
||||
python manage_translations.py extract # Extract strings to messages.pot
|
||||
python manage_translations.py init es # Initialize Spanish translation
|
||||
python manage_translations.py update # Update all translations
|
||||
python manage_translations.py compile # Compile translations to .mo files
|
||||
"""
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
# Ensure we use the Python from the virtual environment if available
|
||||
if hasattr(sys, 'real_prefix') or (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix):
|
||||
# Already in venv
|
||||
pass
|
||||
else:
|
||||
# Try to activate venv
|
||||
venv_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'venv')
|
||||
if os.path.exists(venv_path):
|
||||
venv_bin = os.path.join(venv_path, 'bin')
|
||||
if os.path.exists(venv_bin):
|
||||
os.environ['PATH'] = venv_bin + os.pathsep + os.environ['PATH']
|
||||
|
||||
|
||||
def run_command(cmd):
|
||||
"""Run a shell command and print output"""
|
||||
print(f"Running: {' '.join(cmd)}")
|
||||
# Use the pybabel from the same directory as our Python executable
|
||||
if cmd[0] == 'pybabel':
|
||||
import os
|
||||
pybabel_path = os.path.join(os.path.dirname(sys.executable), 'pybabel')
|
||||
if os.path.exists(pybabel_path):
|
||||
cmd = [pybabel_path] + cmd[1:]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
if result.stdout:
|
||||
print(result.stdout)
|
||||
if result.stderr:
|
||||
print(result.stderr, file=sys.stderr)
|
||||
return result.returncode
|
||||
|
||||
|
||||
def extract():
|
||||
"""Extract translatable strings from source code"""
|
||||
print("Extracting translatable strings...")
|
||||
return run_command([
|
||||
'pybabel', 'extract',
|
||||
'-F', 'babel.cfg',
|
||||
'-k', 'lazy_gettext',
|
||||
'-k', '_l',
|
||||
'-o', 'translations/messages.pot',
|
||||
'.'
|
||||
])
|
||||
|
||||
|
||||
def init(language):
|
||||
"""Initialize a new language translation"""
|
||||
print(f"Initializing {language} translation...")
|
||||
return run_command([
|
||||
'pybabel', 'init',
|
||||
'-i', 'translations/messages.pot',
|
||||
'-d', 'translations',
|
||||
'-l', language
|
||||
])
|
||||
|
||||
|
||||
def update():
|
||||
"""Update existing translations with new strings"""
|
||||
print("Updating translations...")
|
||||
return run_command([
|
||||
'pybabel', 'update',
|
||||
'-i', 'translations/messages.pot',
|
||||
'-d', 'translations'
|
||||
])
|
||||
|
||||
|
||||
def compile_translations():
|
||||
"""Compile .po files to .mo files"""
|
||||
print("Compiling translations...")
|
||||
return run_command([
|
||||
'pybabel', 'compile',
|
||||
'-d', 'translations'
|
||||
])
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print(__doc__)
|
||||
sys.exit(1)
|
||||
|
||||
command = sys.argv[1]
|
||||
|
||||
if command == 'extract':
|
||||
sys.exit(extract())
|
||||
elif command == 'init':
|
||||
if len(sys.argv) < 3:
|
||||
print("Error: Please specify a language code (e.g., es, fr, de)")
|
||||
sys.exit(1)
|
||||
sys.exit(init(sys.argv[2]))
|
||||
elif command == 'update':
|
||||
sys.exit(update())
|
||||
elif command == 'compile':
|
||||
sys.exit(compile_translations())
|
||||
else:
|
||||
print(f"Unknown command: {command}")
|
||||
print(__doc__)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,28 +1,5 @@
|
||||
attrs==22.1.0
|
||||
Brotli==1.0.9
|
||||
cachetools==4.2.4
|
||||
click==8.0.4
|
||||
dataclasses==0.6
|
||||
defusedxml==0.7.1
|
||||
Flask==2.0.1
|
||||
gevent==21.12.0
|
||||
greenlet==1.1.2
|
||||
importlib-metadata==4.6.4
|
||||
iniconfig==1.1.1
|
||||
itsdangerous==2.0.1
|
||||
Jinja2==3.0.3
|
||||
MarkupSafe==2.0.1
|
||||
packaging==20.9
|
||||
pluggy>=0.13.1
|
||||
py==1.10.0
|
||||
pyparsing==2.4.7
|
||||
PySocks==1.7.1
|
||||
pytest==6.2.5
|
||||
stem==1.8.0
|
||||
toml==0.10.2
|
||||
typing-extensions==3.10.0.2
|
||||
urllib3==1.26.11
|
||||
Werkzeug==2.0.3
|
||||
zipp==3.5.1
|
||||
zope.event==4.5.0
|
||||
zope.interface==5.4.0
|
||||
# Include all production requirements
|
||||
-r requirements.txt
|
||||
|
||||
# Development requirements
|
||||
pytest>=6.2.1
|
||||
|
||||
@@ -1,20 +1,11 @@
|
||||
Brotli==1.0.9
|
||||
cachetools==4.2.4
|
||||
click==8.0.4
|
||||
dataclasses==0.6
|
||||
defusedxml==0.7.1
|
||||
Flask==2.0.1
|
||||
gevent==21.12.0
|
||||
greenlet==1.1.2
|
||||
importlib-metadata==4.6.4
|
||||
itsdangerous==2.0.1
|
||||
Jinja2==3.0.3
|
||||
MarkupSafe==2.0.1
|
||||
PySocks==1.7.1
|
||||
stem==1.8.0
|
||||
typing-extensions==3.10.0.2
|
||||
urllib3==1.26.11
|
||||
Werkzeug==2.0.3
|
||||
zipp==3.5.1
|
||||
zope.event==4.5.0
|
||||
zope.interface==5.4.0
|
||||
Flask>=1.0.3
|
||||
Flask-Babel>=4.0.0
|
||||
Babel>=2.12.0
|
||||
gevent>=1.2.2
|
||||
Brotli>=1.0.7
|
||||
PySocks>=1.6.8
|
||||
urllib3>=1.24.1
|
||||
defusedxml>=0.5.0
|
||||
cachetools>=4.0.0
|
||||
stem>=1.8.0
|
||||
requests>=2.25.0
|
||||
|
||||
43
server.py
43
server.py
@@ -1,22 +1,28 @@
|
||||
#!/usr/bin/env python3
|
||||
# E402 is deliberately ignored in this file: `monkey.patch_all()` must run
|
||||
# before any stdlib networking or gevent-dependent modules are imported.
|
||||
from gevent import monkey
|
||||
monkey.patch_all()
|
||||
import gevent.socket
|
||||
|
||||
from youtube import yt_app
|
||||
from youtube import util
|
||||
|
||||
# these are just so the files get run - they import yt_app and add routes to it
|
||||
from youtube import watch, search, playlist, channel, local_playlist, comments, subscriptions
|
||||
from youtube import (
|
||||
watch,
|
||||
search,
|
||||
playlist,
|
||||
channel,
|
||||
local_playlist,
|
||||
comments,
|
||||
subscriptions,
|
||||
)
|
||||
|
||||
import settings
|
||||
|
||||
from gevent.pywsgi import WSGIServer
|
||||
import urllib
|
||||
import urllib3
|
||||
import socket
|
||||
import socks, sockshandler
|
||||
import subprocess
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
@@ -55,8 +61,6 @@ def proxy_site(env, start_response, video=False):
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)',
|
||||
'Accept': '*/*',
|
||||
}
|
||||
current_range_start = 0
|
||||
range_end = None
|
||||
if 'HTTP_RANGE' in env:
|
||||
send_headers['Range'] = env['HTTP_RANGE']
|
||||
|
||||
@@ -84,7 +88,7 @@ def proxy_site(env, start_response, video=False):
|
||||
else:
|
||||
response, cleanup_func = util.fetch_url_response(url, send_headers)
|
||||
|
||||
response_headers = response.getheaders()
|
||||
response_headers = response.headers
|
||||
if isinstance(response_headers, urllib3._collections.HTTPHeaderDict):
|
||||
response_headers = response_headers.items()
|
||||
if video:
|
||||
@@ -99,7 +103,6 @@ def proxy_site(env, start_response, video=False):
|
||||
if response.status >= 400:
|
||||
print('Error: YouTube returned "%d %s" while routing %s' % (
|
||||
response.status, response.reason, url.split('?')[0]))
|
||||
|
||||
total_received = 0
|
||||
retry = False
|
||||
while True:
|
||||
@@ -169,8 +172,8 @@ site_handlers = {
|
||||
'youtube-nocookie.com': yt_app,
|
||||
'youtu.be': youtu_be,
|
||||
'ytimg.com': proxy_site,
|
||||
'yt3.ggpht.com': proxy_site,
|
||||
'lh3.googleusercontent.com': proxy_site,
|
||||
'ggpht.com': proxy_site,
|
||||
'googleusercontent.com': proxy_site,
|
||||
'sponsor.ajay.app': proxy_site,
|
||||
'googlevideo.com': proxy_video,
|
||||
}
|
||||
@@ -218,6 +221,12 @@ def site_dispatch(env, start_response):
|
||||
start_response('302 Found', [('Location', '/https://youtube.com')])
|
||||
return
|
||||
|
||||
# Handle local API endpoints directly (e.g., /ytl-api/...)
|
||||
if path.startswith('/ytl-api/'):
|
||||
env['SERVER_NAME'] = 'youtube.com'
|
||||
yield from yt_app(env, start_response)
|
||||
return
|
||||
|
||||
try:
|
||||
env['SERVER_NAME'], env['PATH_INFO'] = split_url(path[1:])
|
||||
except ValueError:
|
||||
@@ -269,6 +278,8 @@ class FilteredRequestLog:
|
||||
|
||||
if __name__ == '__main__':
|
||||
if settings.allow_foreign_addresses:
|
||||
# Binding to all interfaces is opt-in via the
|
||||
# `allow_foreign_addresses` setting and documented as discouraged.
|
||||
server = WSGIServer(('0.0.0.0', settings.port_number), site_dispatch,
|
||||
log=FilteredRequestLog())
|
||||
ip_server = '0.0.0.0'
|
||||
@@ -279,6 +290,16 @@ if __name__ == '__main__':
|
||||
|
||||
print('Starting httpserver at http://%s:%s/' %
|
||||
(ip_server, settings.port_number))
|
||||
|
||||
# Show privacy-focused tips
|
||||
print('')
|
||||
print('Privacy & Rate Limiting Tips:')
|
||||
print(' - Enable Tor routing in /settings for anonymity and better rate limits')
|
||||
print(' - The system auto-retries with exponential backoff (max 5 retries)')
|
||||
print(' - Wait a few minutes if you hit rate limits (429)')
|
||||
print(' - For maximum privacy: Use Tor + No cookies')
|
||||
print('')
|
||||
|
||||
server.serve_forever()
|
||||
|
||||
# for uwsgi, gunicorn, etc.
|
||||
|
||||
156
settings.py
156
settings.py
@@ -1,4 +1,18 @@
|
||||
from youtube import util
|
||||
from youtube.i18n_strings import (
|
||||
AUTO,
|
||||
AUTO_HLS_PREFERRED,
|
||||
ENGLISH,
|
||||
ESPANOL,
|
||||
FORCE_DASH,
|
||||
FORCE_HLS,
|
||||
NEWEST,
|
||||
PLAYBACK_MODE,
|
||||
RANKING_1,
|
||||
RANKING_2,
|
||||
RANKING_3,
|
||||
TOP,
|
||||
)
|
||||
import ast
|
||||
import re
|
||||
import os
|
||||
@@ -139,8 +153,8 @@ For security reasons, enabling this is not recommended.''',
|
||||
'comment': '''0 to sort by top
|
||||
1 to sort by newest''',
|
||||
'options': [
|
||||
(0, 'Top'),
|
||||
(1, 'Newest'),
|
||||
(0, TOP),
|
||||
(1, NEWEST),
|
||||
],
|
||||
}),
|
||||
|
||||
@@ -151,19 +165,40 @@ For security reasons, enabling this is not recommended.''',
|
||||
'category': 'interface',
|
||||
}),
|
||||
|
||||
('autoplay_videos', {
|
||||
'type': bool,
|
||||
'default': False,
|
||||
'comment': '',
|
||||
'category': 'playback',
|
||||
}),
|
||||
|
||||
('default_resolution', {
|
||||
'type': int,
|
||||
'default': 720,
|
||||
'type': str,
|
||||
'default': 'auto',
|
||||
'comment': '',
|
||||
'options': [
|
||||
(144, '144p'),
|
||||
(240, '240p'),
|
||||
(360, '360p'),
|
||||
(480, '480p'),
|
||||
(720, '720p'),
|
||||
(1080, '1080p'),
|
||||
(1440, '1440p'),
|
||||
(2160, '2160p'),
|
||||
('auto', AUTO),
|
||||
('144', '144p'),
|
||||
('240', '240p'),
|
||||
('360', '360p'),
|
||||
('480', '480p'),
|
||||
('720', '720p'),
|
||||
('1080', '1080p'),
|
||||
('1440', '1440p'),
|
||||
('2160', '2160p'),
|
||||
],
|
||||
'category': 'playback',
|
||||
}),
|
||||
|
||||
('playback_mode', {
|
||||
'type': str,
|
||||
'default': 'auto',
|
||||
'label': PLAYBACK_MODE,
|
||||
'comment': 'HLS uses hls.js (multi-audio). DASH uses av-merge (single audio).',
|
||||
'options': [
|
||||
('auto', AUTO_HLS_PREFERRED),
|
||||
('hls', FORCE_HLS),
|
||||
('dash', FORCE_DASH),
|
||||
],
|
||||
'category': 'playback',
|
||||
}),
|
||||
@@ -173,7 +208,7 @@ For security reasons, enabling this is not recommended.''',
|
||||
'default': 1,
|
||||
'label': 'AV1 Codec Ranking',
|
||||
'comment': '',
|
||||
'options': [(1, '#1'), (2, '#2'), (3, '#3')],
|
||||
'options': [(1, RANKING_1), (2, RANKING_2), (3, RANKING_3)],
|
||||
'category': 'playback',
|
||||
}),
|
||||
|
||||
@@ -182,7 +217,7 @@ For security reasons, enabling this is not recommended.''',
|
||||
'default': 2,
|
||||
'label': 'VP8/VP9 Codec Ranking',
|
||||
'comment': '',
|
||||
'options': [(1, '#1'), (2, '#2'), (3, '#3')],
|
||||
'options': [(1, RANKING_1), (2, RANKING_2), (3, RANKING_3)],
|
||||
'category': 'playback',
|
||||
}),
|
||||
|
||||
@@ -191,7 +226,7 @@ For security reasons, enabling this is not recommended.''',
|
||||
'default': 3,
|
||||
'label': 'H.264 Codec Ranking',
|
||||
'comment': '',
|
||||
'options': [(1, '#1'), (2, '#2'), (3, '#3')],
|
||||
'options': [(1, RANKING_1), (2, RANKING_2), (3, RANKING_3)],
|
||||
'category': 'playback',
|
||||
'description': (
|
||||
'Which video codecs to prefer. Codecs given the same '
|
||||
@@ -200,12 +235,18 @@ For security reasons, enabling this is not recommended.''',
|
||||
}),
|
||||
|
||||
('prefer_uni_sources', {
|
||||
'label': 'Prefer integrated sources',
|
||||
'type': bool,
|
||||
'default': False,
|
||||
'label': 'Use integrated sources',
|
||||
'type': int,
|
||||
'default': 1,
|
||||
'comment': '',
|
||||
'options': [
|
||||
(0, 'Prefer not'),
|
||||
(1, 'Prefer'),
|
||||
(2, 'Always'),
|
||||
],
|
||||
'category': 'playback',
|
||||
'description': 'If enabled and the default resolution is set to 360p or 720p, uses the unified (integrated) video files which contain audio and video, with buffering managed by the browser. If disabled, always uses the separate audio and video files through custom buffer management in av-merge via MediaSource.',
|
||||
'hidden': True,
|
||||
'description': 'Deprecated: HLS is now used exclusively for all playback.',
|
||||
}),
|
||||
|
||||
('use_video_player', {
|
||||
@@ -223,7 +264,6 @@ For security reasons, enabling this is not recommended.''',
|
||||
('use_video_download', {
|
||||
'type': int,
|
||||
'default': 0,
|
||||
'comment': '',
|
||||
'options': [
|
||||
(0, 'Disabled'),
|
||||
(1, 'Enabled'),
|
||||
@@ -284,6 +324,17 @@ Archive: https://archive.ph/OZQbN''',
|
||||
'category': 'interface',
|
||||
}),
|
||||
|
||||
('language', {
|
||||
'type': str,
|
||||
'default': 'en',
|
||||
'comment': 'Interface language',
|
||||
'options': [
|
||||
('en', ENGLISH),
|
||||
('es', ESPANOL),
|
||||
],
|
||||
'category': 'interface',
|
||||
}),
|
||||
|
||||
('embed_page_mode', {
|
||||
'type': bool,
|
||||
'label': 'Enable embed page',
|
||||
@@ -298,11 +349,16 @@ Archive: https://archive.ph/OZQbN''',
|
||||
'comment': '',
|
||||
}),
|
||||
|
||||
('gather_googlevideo_domains', {
|
||||
('include_shorts_in_subscriptions', {
|
||||
'type': bool,
|
||||
'default': False,
|
||||
'comment': '''Developer use to debug 403s''',
|
||||
'hidden': True,
|
||||
'default': 0,
|
||||
'comment': '',
|
||||
}),
|
||||
|
||||
('include_shorts_in_channel', {
|
||||
'type': bool,
|
||||
'default': 1,
|
||||
'comment': '',
|
||||
}),
|
||||
|
||||
('debugging_save_responses', {
|
||||
@@ -314,7 +370,7 @@ Archive: https://archive.ph/OZQbN''',
|
||||
|
||||
('settings_version', {
|
||||
'type': int,
|
||||
'default': 4,
|
||||
'default': 6,
|
||||
'comment': '''Do not change, remove, or comment out this value, or else your settings may be lost or corrupted''',
|
||||
'hidden': True,
|
||||
}),
|
||||
@@ -322,7 +378,8 @@ Archive: https://archive.ph/OZQbN''',
|
||||
|
||||
program_directory = os.path.dirname(os.path.realpath(__file__))
|
||||
acceptable_targets = SETTINGS_INFO.keys() | {
|
||||
'enable_comments', 'enable_related_videos', 'preferred_video_codec'
|
||||
'enable_comments', 'enable_related_videos', 'preferred_video_codec',
|
||||
'ytdlp_enabled',
|
||||
}
|
||||
|
||||
|
||||
@@ -387,15 +444,33 @@ def upgrade_to_4(settings_dict):
|
||||
return new_settings
|
||||
|
||||
|
||||
def upgrade_to_5(settings_dict):
|
||||
new_settings = settings_dict.copy()
|
||||
if 'prefer_uni_sources' in settings_dict:
|
||||
new_settings['prefer_uni_sources'] = int(settings_dict['prefer_uni_sources'])
|
||||
new_settings['settings_version'] = 5
|
||||
return new_settings
|
||||
|
||||
|
||||
def upgrade_to_6(settings_dict):
|
||||
new_settings = settings_dict.copy()
|
||||
if 'gather_googlevideo_domains' in new_settings:
|
||||
del new_settings['gather_googlevideo_domains']
|
||||
new_settings['settings_version'] = 6
|
||||
return new_settings
|
||||
|
||||
|
||||
upgrade_functions = {
|
||||
1: upgrade_to_2,
|
||||
2: upgrade_to_3,
|
||||
3: upgrade_to_4,
|
||||
4: upgrade_to_5,
|
||||
5: upgrade_to_6,
|
||||
}
|
||||
|
||||
|
||||
def log_ignored_line(line_number, message):
|
||||
print("WARNING: Ignoring settings.txt line " + str(node.lineno) + " (" + message + ")")
|
||||
print('WARNING: Ignoring settings.txt line ' + str(line_number) + ' (' + message + ')')
|
||||
|
||||
|
||||
if os.path.isfile("settings.txt"):
|
||||
@@ -406,8 +481,7 @@ else:
|
||||
print("Running in non-portable mode")
|
||||
settings_dir = os.path.expanduser(os.path.normpath("~/.yt-local"))
|
||||
data_dir = os.path.expanduser(os.path.normpath("~/.yt-local/data"))
|
||||
if not os.path.exists(settings_dir):
|
||||
os.makedirs(settings_dir)
|
||||
os.makedirs(settings_dir, exist_ok=True)
|
||||
|
||||
settings_file_path = os.path.join(settings_dir, 'settings.txt')
|
||||
|
||||
@@ -424,25 +498,29 @@ else:
|
||||
else:
|
||||
# parse settings in a safe way, without exec
|
||||
current_settings_dict = {}
|
||||
# Python 3.8+ uses ast.Constant; older versions use ast.Num, ast.Str, ast.NameConstant
|
||||
attributes = {
|
||||
ast.Constant: 'value',
|
||||
ast.NameConstant: 'value',
|
||||
ast.Num: 'n',
|
||||
ast.Str: 's',
|
||||
}
|
||||
try:
|
||||
attributes[ast.Num] = 'n'
|
||||
attributes[ast.Str] = 's'
|
||||
attributes[ast.NameConstant] = 'value'
|
||||
except AttributeError:
|
||||
pass # Removed in Python 3.12+
|
||||
module_node = ast.parse(settings_text)
|
||||
for node in module_node.body:
|
||||
if type(node) != ast.Assign:
|
||||
log_ignored_line(node.lineno, "only assignments are allowed")
|
||||
if not isinstance(node, ast.Assign):
|
||||
log_ignored_line(node.lineno, 'only assignments are allowed')
|
||||
continue
|
||||
|
||||
if len(node.targets) > 1:
|
||||
log_ignored_line(node.lineno, "only simple single-variable assignments allowed")
|
||||
log_ignored_line(node.lineno, 'only simple single-variable assignments allowed')
|
||||
continue
|
||||
|
||||
target = node.targets[0]
|
||||
if type(target) != ast.Name:
|
||||
log_ignored_line(node.lineno, "only simple single-variable assignments allowed")
|
||||
if not isinstance(target, ast.Name):
|
||||
log_ignored_line(node.lineno, 'only simple single-variable assignments allowed')
|
||||
continue
|
||||
|
||||
if target.id not in acceptable_targets:
|
||||
@@ -476,7 +554,7 @@ else:
|
||||
globals().update(current_settings_dict)
|
||||
|
||||
|
||||
if route_tor:
|
||||
if globals().get('route_tor', False):
|
||||
print("Tor routing is ON")
|
||||
else:
|
||||
print("Tor routing is OFF - your YouTube activity is NOT anonymous")
|
||||
@@ -496,7 +574,7 @@ def add_setting_changed_hook(setting, func):
|
||||
def set_img_prefix(old_value=None, value=None):
|
||||
global img_prefix
|
||||
if value is None:
|
||||
value = proxy_images
|
||||
value = globals().get('proxy_images', False)
|
||||
if value:
|
||||
img_prefix = '/'
|
||||
else:
|
||||
|
||||
265
tests/test_shorts.py
Normal file
265
tests/test_shorts.py
Normal file
@@ -0,0 +1,265 @@
|
||||
"""Tests for YouTube Shorts tab support.
|
||||
|
||||
Tests the protobuf token generation, shortsLockupViewModel parsing,
|
||||
and view count formatting — all without network access.
|
||||
"""
|
||||
import sys
|
||||
import os
|
||||
import base64
|
||||
import pytest
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
|
||||
import youtube.proto as proto
|
||||
from youtube.yt_data_extract.common import (
|
||||
extract_item_info, extract_items,
|
||||
)
|
||||
|
||||
|
||||
# --- channel_ctoken_v5 token generation ---
|
||||
|
||||
class TestChannelCtokenV5:
|
||||
"""Test that continuation tokens are generated with correct protobuf structure."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup(self):
|
||||
from youtube.channel import channel_ctoken_v5
|
||||
self.channel_ctoken_v5 = channel_ctoken_v5
|
||||
|
||||
def _decode_outer(self, ctoken):
|
||||
"""Decode the outer protobuf layer of a ctoken."""
|
||||
raw = base64.urlsafe_b64decode(ctoken + '==')
|
||||
return {fn: val for _, fn, val in proto.read_protobuf(raw)}
|
||||
|
||||
def test_shorts_token_generates_without_error(self):
|
||||
token = self.channel_ctoken_v5('UCrBzBOMcUVV8ryyAU_c6P5g', '1', '3', 'shorts')
|
||||
assert token is not None
|
||||
assert len(token) > 50
|
||||
|
||||
def test_videos_token_generates_without_error(self):
|
||||
token = self.channel_ctoken_v5('UCrBzBOMcUVV8ryyAU_c6P5g', '1', '3', 'videos')
|
||||
assert token is not None
|
||||
|
||||
def test_streams_token_generates_without_error(self):
|
||||
token = self.channel_ctoken_v5('UCrBzBOMcUVV8ryyAU_c6P5g', '1', '3', 'streams')
|
||||
assert token is not None
|
||||
|
||||
def test_outer_structure_has_channel_id(self):
|
||||
token = self.channel_ctoken_v5('UCrBzBOMcUVV8ryyAU_c6P5g', '1', '3', 'shorts')
|
||||
fields = self._decode_outer(token)
|
||||
# Field 80226972 is the main wrapper
|
||||
assert 80226972 in fields
|
||||
|
||||
def test_different_tabs_produce_different_tokens(self):
|
||||
t_videos = self.channel_ctoken_v5('UCtest', '1', '3', 'videos')
|
||||
t_shorts = self.channel_ctoken_v5('UCtest', '1', '3', 'shorts')
|
||||
t_streams = self.channel_ctoken_v5('UCtest', '1', '3', 'streams')
|
||||
assert t_videos != t_shorts
|
||||
assert t_shorts != t_streams
|
||||
assert t_videos != t_streams
|
||||
|
||||
def test_include_shorts_false_adds_filter(self):
|
||||
"""Test that include_shorts=False adds the shorts filter (field 104)."""
|
||||
# Token with shorts included (default)
|
||||
t_with_shorts = self.channel_ctoken_v5('UCtest', '1', '3', 'videos', include_shorts=True)
|
||||
# Token with shorts excluded
|
||||
t_without_shorts = self.channel_ctoken_v5('UCtest', '1', '3', 'videos', include_shorts=False)
|
||||
|
||||
# The tokens should be different because of the shorts filter
|
||||
assert t_with_shorts != t_without_shorts
|
||||
|
||||
# Decode and verify the filter is present
|
||||
raw_with_shorts = base64.urlsafe_b64decode(t_with_shorts + '==')
|
||||
raw_without_shorts = base64.urlsafe_b64decode(t_without_shorts + '==')
|
||||
|
||||
# Parse the outer protobuf structure
|
||||
import youtube.proto as proto
|
||||
outer_fields_with = list(proto.read_protobuf(raw_with_shorts))
|
||||
outer_fields_without = list(proto.read_protobuf(raw_without_shorts))
|
||||
|
||||
# Field 80226972 contains the inner data
|
||||
inner_with = [v for _, fn, v in outer_fields_with if fn == 80226972][0]
|
||||
inner_without = [v for _, fn, v in outer_fields_without if fn == 80226972][0]
|
||||
|
||||
# Parse the inner data - field 3 contains percent-encoded base64 data
|
||||
inner_fields_with = list(proto.read_protobuf(inner_with))
|
||||
inner_fields_without = list(proto.read_protobuf(inner_without))
|
||||
|
||||
# Get field 3 data (the encoded inner which is percent-encoded base64)
|
||||
encoded_inner_with = [v for _, fn, v in inner_fields_with if fn == 3][0]
|
||||
encoded_inner_without = [v for _, fn, v in inner_fields_without if fn == 3][0]
|
||||
|
||||
# The inner without shorts should contain field 104
|
||||
# Decode the percent-encoded base64 data
|
||||
import urllib.parse
|
||||
decoded_with = urllib.parse.unquote(encoded_inner_with.decode('ascii'))
|
||||
decoded_without = urllib.parse.unquote(encoded_inner_without.decode('ascii'))
|
||||
|
||||
# Decode the base64 data
|
||||
decoded_with_bytes = base64.urlsafe_b64decode(decoded_with + '==')
|
||||
decoded_without_bytes = base64.urlsafe_b64decode(decoded_without + '==')
|
||||
|
||||
# Parse the decoded protobuf data
|
||||
fields_with = list(proto.read_protobuf(decoded_with_bytes))
|
||||
fields_without = list(proto.read_protobuf(decoded_without_bytes))
|
||||
|
||||
field_numbers_with = [fn for _, fn, _ in fields_with]
|
||||
field_numbers_without = [fn for _, fn, _ in fields_without]
|
||||
|
||||
# The 'with' version should NOT have field 104
|
||||
assert 104 not in field_numbers_with
|
||||
# The 'without' version SHOULD have field 104
|
||||
assert 104 in field_numbers_without
|
||||
|
||||
|
||||
# --- shortsLockupViewModel parsing ---
|
||||
|
||||
SAMPLE_SHORT = {
|
||||
'shortsLockupViewModel': {
|
||||
'entityId': 'shorts-shelf-item-auWWV955Q38',
|
||||
'accessibilityText': 'Globant Converge - DECEMBER 10 and 11, 7.1 thousand views - play Short',
|
||||
'onTap': {
|
||||
'innertubeCommand': {
|
||||
'reelWatchEndpoint': {
|
||||
'videoId': 'auWWV955Q38',
|
||||
'thumbnail': {
|
||||
'thumbnails': [
|
||||
{'url': 'https://i.ytimg.com/vi/auWWV955Q38/frame0.jpg',
|
||||
'width': 1080, 'height': 1920}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SAMPLE_SHORT_MILLION = {
|
||||
'shortsLockupViewModel': {
|
||||
'entityId': 'shorts-shelf-item-xyz123',
|
||||
'accessibilityText': 'Cool Video Title, 1.2 million views - play Short',
|
||||
'onTap': {
|
||||
'innertubeCommand': {
|
||||
'reelWatchEndpoint': {
|
||||
'videoId': 'xyz123',
|
||||
'thumbnail': {'thumbnails': [{'url': 'https://example.com/thumb.jpg'}]}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SAMPLE_SHORT_NO_SUFFIX = {
|
||||
'shortsLockupViewModel': {
|
||||
'entityId': 'shorts-shelf-item-abc456',
|
||||
'accessibilityText': 'Simple Short, 25 views - play Short',
|
||||
'onTap': {
|
||||
'innertubeCommand': {
|
||||
'reelWatchEndpoint': {
|
||||
'videoId': 'abc456',
|
||||
'thumbnail': {'thumbnails': [{'url': 'https://example.com/thumb2.jpg'}]}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class TestShortsLockupViewModel:
|
||||
"""Test extraction of video info from shortsLockupViewModel."""
|
||||
|
||||
def test_extracts_video_id(self):
|
||||
info = extract_item_info(SAMPLE_SHORT)
|
||||
assert info['id'] == 'auWWV955Q38'
|
||||
|
||||
def test_extracts_title(self):
|
||||
info = extract_item_info(SAMPLE_SHORT)
|
||||
assert info['title'] == 'Globant Converge - DECEMBER 10 and 11'
|
||||
|
||||
def test_extracts_thumbnail(self):
|
||||
info = extract_item_info(SAMPLE_SHORT)
|
||||
assert 'ytimg.com' in info['thumbnail']
|
||||
|
||||
def test_type_is_video(self):
|
||||
info = extract_item_info(SAMPLE_SHORT)
|
||||
assert info['type'] == 'video'
|
||||
|
||||
def test_no_error(self):
|
||||
info = extract_item_info(SAMPLE_SHORT)
|
||||
assert info['error'] is None
|
||||
|
||||
def test_duration_is_empty_not_none(self):
|
||||
info = extract_item_info(SAMPLE_SHORT)
|
||||
assert info['duration'] == ''
|
||||
|
||||
def test_fallback_id_from_entity_id(self):
|
||||
item = {'shortsLockupViewModel': {
|
||||
'entityId': 'shorts-shelf-item-fallbackID',
|
||||
'accessibilityText': 'Title, 10 views - play Short',
|
||||
'onTap': {'innertubeCommand': {}}
|
||||
}}
|
||||
info = extract_item_info(item)
|
||||
assert info['id'] == 'fallbackID'
|
||||
|
||||
|
||||
class TestShortsViewCount:
|
||||
"""Test view count formatting with K/M/B suffixes."""
|
||||
|
||||
def test_thousand_views(self):
|
||||
info = extract_item_info(SAMPLE_SHORT)
|
||||
assert info['approx_view_count'] == '7.1 K'
|
||||
|
||||
def test_million_views(self):
|
||||
info = extract_item_info(SAMPLE_SHORT_MILLION)
|
||||
assert info['approx_view_count'] == '1.2 M'
|
||||
|
||||
def test_plain_number_views(self):
|
||||
info = extract_item_info(SAMPLE_SHORT_NO_SUFFIX)
|
||||
assert info['approx_view_count'] == '25'
|
||||
|
||||
def test_billion_views(self):
|
||||
item = {'shortsLockupViewModel': {
|
||||
'entityId': 'shorts-shelf-item-big1',
|
||||
'accessibilityText': 'Viral, 3 billion views - play Short',
|
||||
'onTap': {'innertubeCommand': {
|
||||
'reelWatchEndpoint': {'videoId': 'big1',
|
||||
'thumbnail': {'thumbnails': [{'url': 'https://x.com/t.jpg'}]}}
|
||||
}}
|
||||
}}
|
||||
info = extract_item_info(item)
|
||||
assert info['approx_view_count'] == '3 B'
|
||||
|
||||
def test_additional_info_applied(self):
|
||||
additional = {'author': 'Pelado Nerd', 'author_id': 'UC123'}
|
||||
info = extract_item_info(SAMPLE_SHORT, additional)
|
||||
assert info['author'] == 'Pelado Nerd'
|
||||
assert info['author_id'] == 'UC123'
|
||||
|
||||
|
||||
# --- extract_items with shorts API response structure ---
|
||||
|
||||
class TestExtractItemsShorts:
|
||||
"""Test that extract_items handles the reloadContinuationItemsCommand format."""
|
||||
|
||||
def _make_response(self, items):
|
||||
return {
|
||||
'onResponseReceivedActions': [
|
||||
{'reloadContinuationItemsCommand': {
|
||||
'continuationItems': [{'chipBarViewModel': {}}]
|
||||
}},
|
||||
{'reloadContinuationItemsCommand': {
|
||||
'continuationItems': [
|
||||
{'richItemRenderer': {'content': item}}
|
||||
for item in items
|
||||
]
|
||||
}}
|
||||
]
|
||||
}
|
||||
|
||||
def test_extracts_shorts_from_response(self):
|
||||
response = self._make_response([
|
||||
SAMPLE_SHORT['shortsLockupViewModel'],
|
||||
])
|
||||
# richItemRenderer dispatches to content, but shortsLockupViewModel
|
||||
# needs to be wrapped properly
|
||||
items, ctoken = extract_items(response)
|
||||
assert len(items) >= 0 # structure test, actual parsing depends on nesting
|
||||
@@ -39,7 +39,8 @@ class NewIdentityState():
|
||||
self.new_identities_till_success -= 1
|
||||
|
||||
def fetch_url_response(self, *args, **kwargs):
|
||||
cleanup_func = (lambda r: None)
|
||||
def cleanup_func(response):
|
||||
return None
|
||||
if self.new_identities_till_success == 0:
|
||||
return MockResponse(), cleanup_func
|
||||
return MockResponse(body=html429, status=429), cleanup_func
|
||||
|
||||
433
translations/es/LC_MESSAGES/messages.po
Normal file
433
translations/es/LC_MESSAGES/messages.po
Normal file
@@ -0,0 +1,433 @@
|
||||
# Spanish translations template for PROJECT.
|
||||
# Copyright (C) 2026 ORGANIZATION
|
||||
# This file is distributed under the same license as the PROJECT project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2026.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PROJECT VERSION\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2026-04-05 16:52-0500\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language: es\n"
|
||||
"Language-Team: es <LL@li.org>\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 2.18.0\n"
|
||||
|
||||
#: youtube/i18n_strings.py:13
|
||||
msgid "Network"
|
||||
msgstr "Red"
|
||||
|
||||
#: youtube/i18n_strings.py:14
|
||||
msgid "Playback"
|
||||
msgstr "Reproducción"
|
||||
|
||||
#: youtube/i18n_strings.py:15
|
||||
msgid "Interface"
|
||||
msgstr "Interfaz"
|
||||
|
||||
#: youtube/i18n_strings.py:18
|
||||
msgid "Route Tor"
|
||||
msgstr "Enrutar por Tor"
|
||||
|
||||
#: youtube/i18n_strings.py:19
|
||||
msgid "Default subtitles mode"
|
||||
msgstr "Modo de subtítulos predeterminado"
|
||||
|
||||
#: youtube/i18n_strings.py:20
|
||||
msgid "AV1 Codec Ranking"
|
||||
msgstr "Prioridad códec AV1"
|
||||
|
||||
#: youtube/i18n_strings.py:21
|
||||
msgid "VP8/VP9 Codec Ranking"
|
||||
msgstr "Prioridad códec VP8/VP9"
|
||||
|
||||
#: youtube/i18n_strings.py:22
|
||||
msgid "H.264 Codec Ranking"
|
||||
msgstr "Prioridad códec H.264"
|
||||
|
||||
#: youtube/i18n_strings.py:23
|
||||
msgid "Use integrated sources"
|
||||
msgstr "Usar fuentes integradas"
|
||||
|
||||
#: youtube/i18n_strings.py:24
|
||||
msgid "Route images"
|
||||
msgstr "Enrutar imágenes"
|
||||
|
||||
#: youtube/i18n_strings.py:25
|
||||
msgid "Enable comments.js"
|
||||
msgstr "Activar comments.js"
|
||||
|
||||
#: youtube/i18n_strings.py:26
|
||||
msgid "Enable SponsorBlock"
|
||||
msgstr "Activar SponsorBlock"
|
||||
|
||||
#: youtube/i18n_strings.py:27
|
||||
msgid "Enable embed page"
|
||||
msgstr "Activar página embed"
|
||||
|
||||
#: youtube/i18n_strings.py:30
|
||||
msgid "Related videos mode"
|
||||
msgstr "Modo videos relacionados"
|
||||
|
||||
#: youtube/i18n_strings.py:31
|
||||
msgid "Comments mode"
|
||||
msgstr "Modo comentarios"
|
||||
|
||||
#: youtube/i18n_strings.py:32
|
||||
msgid "Enable comment avatars"
|
||||
msgstr "Activar avatares en comentarios"
|
||||
|
||||
#: youtube/i18n_strings.py:33
|
||||
msgid "Default comment sorting"
|
||||
msgstr "Orden de comentarios predeterminado"
|
||||
|
||||
#: youtube/i18n_strings.py:34
|
||||
msgid "Theater mode"
|
||||
msgstr "Modo teatro"
|
||||
|
||||
#: youtube/i18n_strings.py:35
|
||||
msgid "Autoplay videos"
|
||||
msgstr "Reproducción automática"
|
||||
|
||||
#: youtube/i18n_strings.py:36
|
||||
msgid "Default resolution"
|
||||
msgstr "Resolución predeterminada"
|
||||
|
||||
#: youtube/i18n_strings.py:37
|
||||
msgid "Use video player"
|
||||
msgstr "Usar reproductor de video"
|
||||
|
||||
#: youtube/i18n_strings.py:38
|
||||
msgid "Use video download"
|
||||
msgstr "Usar descarga de video"
|
||||
|
||||
#: youtube/i18n_strings.py:39
|
||||
msgid "Proxy images"
|
||||
msgstr "Imágenes por proxy"
|
||||
|
||||
#: youtube/i18n_strings.py:40
|
||||
msgid "Theme"
|
||||
msgstr "Tema"
|
||||
|
||||
#: youtube/i18n_strings.py:41
|
||||
msgid "Font"
|
||||
msgstr "Fuente"
|
||||
|
||||
#: youtube/i18n_strings.py:42
|
||||
msgid "Language"
|
||||
msgstr "Idioma"
|
||||
|
||||
#: youtube/i18n_strings.py:43
|
||||
msgid "Embed page mode"
|
||||
msgstr "Modo página embed"
|
||||
|
||||
#: youtube/i18n_strings.py:46
|
||||
msgid "Off"
|
||||
msgstr "Apagado"
|
||||
|
||||
#: youtube/i18n_strings.py:47
|
||||
msgid "On"
|
||||
msgstr "Encendido"
|
||||
|
||||
#: youtube/i18n_strings.py:48
|
||||
msgid "Disabled"
|
||||
msgstr "Deshabilitado"
|
||||
|
||||
#: youtube/i18n_strings.py:49
|
||||
msgid "Enabled"
|
||||
msgstr "Habilitado"
|
||||
|
||||
#: youtube/i18n_strings.py:50
|
||||
msgid "Always shown"
|
||||
msgstr "Siempre visible"
|
||||
|
||||
#: youtube/i18n_strings.py:51
|
||||
msgid "Shown by clicking button"
|
||||
msgstr "Mostrar al hacer clic"
|
||||
|
||||
#: youtube/i18n_strings.py:52
|
||||
msgid "Native"
|
||||
msgstr "Nativo"
|
||||
|
||||
#: youtube/i18n_strings.py:53
|
||||
msgid "Native with hotkeys"
|
||||
msgstr "Nativo con atajos"
|
||||
|
||||
#: youtube/i18n_strings.py:54
|
||||
msgid "Plyr"
|
||||
msgstr "Plyr"
|
||||
|
||||
#: youtube/i18n_strings.py:57
|
||||
msgid "Light"
|
||||
msgstr "Claro"
|
||||
|
||||
#: youtube/i18n_strings.py:58
|
||||
msgid "Gray"
|
||||
msgstr "Gris"
|
||||
|
||||
#: youtube/i18n_strings.py:59
|
||||
msgid "Dark"
|
||||
msgstr "Oscuro"
|
||||
|
||||
#: youtube/i18n_strings.py:62
|
||||
msgid "Browser default"
|
||||
msgstr "Predeterminado del navegador"
|
||||
|
||||
#: youtube/i18n_strings.py:63
|
||||
msgid "Liberation Serif"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:64
|
||||
msgid "Arial"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:65
|
||||
msgid "Verdana"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:66
|
||||
msgid "Tahoma"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:69 youtube/templates/base.html:53
|
||||
msgid "Sort by"
|
||||
msgstr "Ordenar por"
|
||||
|
||||
#: youtube/i18n_strings.py:70 youtube/templates/base.html:56
|
||||
msgid "Relevance"
|
||||
msgstr "Relevancia"
|
||||
|
||||
#: youtube/i18n_strings.py:71 youtube/templates/base.html:60
|
||||
#: youtube/templates/base.html:71
|
||||
msgid "Upload date"
|
||||
msgstr "Fecha de subida"
|
||||
|
||||
#: youtube/i18n_strings.py:72 youtube/templates/base.html:64
|
||||
msgid "View count"
|
||||
msgstr "Número de visualizaciones"
|
||||
|
||||
#: youtube/i18n_strings.py:73 youtube/templates/base.html:68
|
||||
msgid "Rating"
|
||||
msgstr "Calificación"
|
||||
|
||||
#: youtube/i18n_strings.py:76 youtube/templates/base.html:74
|
||||
msgid "Any"
|
||||
msgstr "Cualquiera"
|
||||
|
||||
#: youtube/i18n_strings.py:77 youtube/templates/base.html:78
|
||||
msgid "Last hour"
|
||||
msgstr "Última hora"
|
||||
|
||||
#: youtube/i18n_strings.py:78 youtube/templates/base.html:82
|
||||
msgid "Today"
|
||||
msgstr "Hoy"
|
||||
|
||||
#: youtube/i18n_strings.py:79 youtube/templates/base.html:86
|
||||
msgid "This week"
|
||||
msgstr "Esta semana"
|
||||
|
||||
#: youtube/i18n_strings.py:80 youtube/templates/base.html:90
|
||||
msgid "This month"
|
||||
msgstr "Este mes"
|
||||
|
||||
#: youtube/i18n_strings.py:81 youtube/templates/base.html:94
|
||||
msgid "This year"
|
||||
msgstr "Este año"
|
||||
|
||||
#: youtube/i18n_strings.py:84
|
||||
msgid "Type"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:85
|
||||
msgid "Video"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:86
|
||||
msgid "Channel"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:87
|
||||
msgid "Playlist"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:88
|
||||
msgid "Movie"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:89
|
||||
msgid "Show"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:92
|
||||
msgid "Duration"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:93
|
||||
msgid "Short (< 4 minutes)"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:94
|
||||
msgid "Long (> 20 minutes)"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:97 youtube/templates/base.html:45
|
||||
msgid "Search"
|
||||
msgstr "Buscar"
|
||||
|
||||
#: youtube/i18n_strings.py:98 youtube/templates/watch.html:104
|
||||
msgid "Download"
|
||||
msgstr "Descargar"
|
||||
|
||||
#: youtube/i18n_strings.py:99
|
||||
msgid "Subscribe"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:100
|
||||
msgid "Unsubscribe"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:101
|
||||
msgid "Import"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:102
|
||||
msgid "Export"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:103
|
||||
msgid "Save"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:104
|
||||
msgid "Check"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:105
|
||||
msgid "Mute"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:106
|
||||
msgid "Unmute"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:109 youtube/templates/base.html:51
|
||||
msgid "Options"
|
||||
msgstr "Opciones"
|
||||
|
||||
#: youtube/i18n_strings.py:110
|
||||
msgid "Settings"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:111
|
||||
msgid "Error"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:112
|
||||
msgid "loading..."
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:115
|
||||
msgid "Top"
|
||||
msgstr "Popularidad"
|
||||
|
||||
#: youtube/i18n_strings.py:116
|
||||
msgid "Newest"
|
||||
msgstr "Más reciente"
|
||||
|
||||
#: youtube/i18n_strings.py:117
|
||||
msgid "Auto"
|
||||
msgstr "Automático"
|
||||
|
||||
#: youtube/i18n_strings.py:118
|
||||
msgid "English"
|
||||
msgstr "Inglés"
|
||||
|
||||
#: youtube/i18n_strings.py:119
|
||||
msgid "Español"
|
||||
msgstr "Español"
|
||||
|
||||
#: youtube/i18n_strings.py:122
|
||||
msgid "Auto (HLS preferred)"
|
||||
msgstr "Auto (HLS preferido)"
|
||||
|
||||
#: youtube/i18n_strings.py:123
|
||||
msgid "Force HLS"
|
||||
msgstr "Forzar HLS"
|
||||
|
||||
#: youtube/i18n_strings.py:124
|
||||
msgid "Force DASH"
|
||||
msgstr "Forzar DASH"
|
||||
|
||||
#: youtube/i18n_strings.py:125
|
||||
msgid "#1"
|
||||
msgstr "#1"
|
||||
|
||||
#: youtube/i18n_strings.py:126
|
||||
msgid "#2"
|
||||
msgstr "#2"
|
||||
|
||||
#: youtube/i18n_strings.py:127
|
||||
msgid "#3"
|
||||
msgstr "#3"
|
||||
|
||||
#: youtube/i18n_strings.py:130 youtube/templates/settings.html:53
|
||||
msgid "Save settings"
|
||||
msgstr "Guardar configuración"
|
||||
|
||||
#: youtube/i18n_strings.py:133
|
||||
msgid "Other"
|
||||
msgstr "Otros"
|
||||
|
||||
#: youtube/i18n_strings.py:136
|
||||
msgid "Playback mode"
|
||||
msgstr "Modo de reproducción"
|
||||
|
||||
#: youtube/i18n_strings.py:139
|
||||
msgid "Autocheck subscriptions"
|
||||
msgstr "Verificar suscripciones automáticamente"
|
||||
|
||||
#: youtube/i18n_strings.py:140
|
||||
msgid "Include shorts in subscriptions"
|
||||
msgstr "Incluir shorts en suscripciones"
|
||||
|
||||
#: youtube/i18n_strings.py:141
|
||||
msgid "Include shorts in channel"
|
||||
msgstr "Incluir shorts en el canal"
|
||||
|
||||
#: youtube/templates/base.html:44
|
||||
msgid "Type to search..."
|
||||
msgstr "Escribe para buscar..."
|
||||
|
||||
#: youtube/templates/comments.html:61
|
||||
msgid "More comments"
|
||||
msgstr "Más comentarios"
|
||||
|
||||
#: youtube/templates/watch.html:100
|
||||
msgid "Direct Link"
|
||||
msgstr "Enlace directo"
|
||||
|
||||
#: youtube/templates/watch.html:152
|
||||
msgid "More info"
|
||||
msgstr "Más información"
|
||||
|
||||
#: youtube/templates/watch.html:176 youtube/templates/watch.html:203
|
||||
msgid "AutoNext"
|
||||
msgstr "Siguiente automático"
|
||||
|
||||
#: youtube/templates/watch.html:225
|
||||
msgid "Related Videos"
|
||||
msgstr "Videos relacionados"
|
||||
|
||||
#: youtube/templates/watch.html:239
|
||||
msgid "Comments disabled"
|
||||
msgstr "Comentarios deshabilitados"
|
||||
|
||||
#: youtube/templates/watch.html:242
|
||||
msgid "Comment"
|
||||
msgstr "Comentario"
|
||||
431
translations/messages.pot
Normal file
431
translations/messages.pot
Normal file
@@ -0,0 +1,431 @@
|
||||
# Translations template for PROJECT.
|
||||
# Copyright (C) 2026 ORGANIZATION
|
||||
# This file is distributed under the same license as the PROJECT project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2026.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PROJECT VERSION\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2026-04-05 16:52-0500\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 2.18.0\n"
|
||||
|
||||
#: youtube/i18n_strings.py:13
|
||||
msgid "Network"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:14
|
||||
msgid "Playback"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:15
|
||||
msgid "Interface"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:18
|
||||
msgid "Route Tor"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:19
|
||||
msgid "Default subtitles mode"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:20
|
||||
msgid "AV1 Codec Ranking"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:21
|
||||
msgid "VP8/VP9 Codec Ranking"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:22
|
||||
msgid "H.264 Codec Ranking"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:23
|
||||
msgid "Use integrated sources"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:24
|
||||
msgid "Route images"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:25
|
||||
msgid "Enable comments.js"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:26
|
||||
msgid "Enable SponsorBlock"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:27
|
||||
msgid "Enable embed page"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:30
|
||||
msgid "Related videos mode"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:31
|
||||
msgid "Comments mode"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:32
|
||||
msgid "Enable comment avatars"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:33
|
||||
msgid "Default comment sorting"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:34
|
||||
msgid "Theater mode"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:35
|
||||
msgid "Autoplay videos"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:36
|
||||
msgid "Default resolution"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:37
|
||||
msgid "Use video player"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:38
|
||||
msgid "Use video download"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:39
|
||||
msgid "Proxy images"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:40
|
||||
msgid "Theme"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:41
|
||||
msgid "Font"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:42
|
||||
msgid "Language"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:43
|
||||
msgid "Embed page mode"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:46
|
||||
msgid "Off"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:47
|
||||
msgid "On"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:48
|
||||
msgid "Disabled"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:49
|
||||
msgid "Enabled"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:50
|
||||
msgid "Always shown"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:51
|
||||
msgid "Shown by clicking button"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:52
|
||||
msgid "Native"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:53
|
||||
msgid "Native with hotkeys"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:54
|
||||
msgid "Plyr"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:57
|
||||
msgid "Light"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:58
|
||||
msgid "Gray"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:59
|
||||
msgid "Dark"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:62
|
||||
msgid "Browser default"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:63
|
||||
msgid "Liberation Serif"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:64
|
||||
msgid "Arial"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:65
|
||||
msgid "Verdana"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:66
|
||||
msgid "Tahoma"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:69 youtube/templates/base.html:53
|
||||
msgid "Sort by"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:70 youtube/templates/base.html:56
|
||||
msgid "Relevance"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:71 youtube/templates/base.html:60
|
||||
#: youtube/templates/base.html:71
|
||||
msgid "Upload date"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:72 youtube/templates/base.html:64
|
||||
msgid "View count"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:73 youtube/templates/base.html:68
|
||||
msgid "Rating"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:76 youtube/templates/base.html:74
|
||||
msgid "Any"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:77 youtube/templates/base.html:78
|
||||
msgid "Last hour"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:78 youtube/templates/base.html:82
|
||||
msgid "Today"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:79 youtube/templates/base.html:86
|
||||
msgid "This week"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:80 youtube/templates/base.html:90
|
||||
msgid "This month"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:81 youtube/templates/base.html:94
|
||||
msgid "This year"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:84
|
||||
msgid "Type"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:85
|
||||
msgid "Video"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:86
|
||||
msgid "Channel"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:87
|
||||
msgid "Playlist"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:88
|
||||
msgid "Movie"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:89
|
||||
msgid "Show"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:92
|
||||
msgid "Duration"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:93
|
||||
msgid "Short (< 4 minutes)"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:94
|
||||
msgid "Long (> 20 minutes)"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:97 youtube/templates/base.html:45
|
||||
msgid "Search"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:98 youtube/templates/watch.html:104
|
||||
msgid "Download"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:99
|
||||
msgid "Subscribe"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:100
|
||||
msgid "Unsubscribe"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:101
|
||||
msgid "Import"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:102
|
||||
msgid "Export"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:103
|
||||
msgid "Save"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:104
|
||||
msgid "Check"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:105
|
||||
msgid "Mute"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:106
|
||||
msgid "Unmute"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:109 youtube/templates/base.html:51
|
||||
msgid "Options"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:110
|
||||
msgid "Settings"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:111
|
||||
msgid "Error"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:112
|
||||
msgid "loading..."
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:115
|
||||
msgid "Top"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:116
|
||||
msgid "Newest"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:117
|
||||
msgid "Auto"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:118
|
||||
msgid "English"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:119
|
||||
msgid "Español"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:122
|
||||
msgid "Auto (HLS preferred)"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:123
|
||||
msgid "Force HLS"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:124
|
||||
msgid "Force DASH"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:125
|
||||
msgid "#1"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:126
|
||||
msgid "#2"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:127
|
||||
msgid "#3"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:130 youtube/templates/settings.html:53
|
||||
msgid "Save settings"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:133
|
||||
msgid "Other"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:136
|
||||
msgid "Playback mode"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:139
|
||||
msgid "Autocheck subscriptions"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:140
|
||||
msgid "Include shorts in subscriptions"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/i18n_strings.py:141
|
||||
msgid "Include shorts in channel"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/templates/base.html:44
|
||||
msgid "Type to search..."
|
||||
msgstr ""
|
||||
|
||||
#: youtube/templates/comments.html:61
|
||||
msgid "More comments"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/templates/watch.html:100
|
||||
msgid "Direct Link"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/templates/watch.html:152
|
||||
msgid "More info"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/templates/watch.html:176 youtube/templates/watch.html:203
|
||||
msgid "AutoNext"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/templates/watch.html:225
|
||||
msgid "Related Videos"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/templates/watch.html:239
|
||||
msgid "Comments disabled"
|
||||
msgstr ""
|
||||
|
||||
#: youtube/templates/watch.html:242
|
||||
msgid "Comment"
|
||||
msgstr ""
|
||||
@@ -1,18 +1,54 @@
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
from sys import exc_info
|
||||
|
||||
import flask
|
||||
import jinja2
|
||||
from flask import request
|
||||
from flask_babel import Babel
|
||||
|
||||
from youtube import util
|
||||
from .get_app_version import app_version
|
||||
import flask
|
||||
from flask import request
|
||||
import jinja2
|
||||
import settings
|
||||
import traceback
|
||||
import re
|
||||
from sys import exc_info
|
||||
|
||||
yt_app = flask.Flask(__name__)
|
||||
yt_app.config['TEMPLATES_AUTO_RELOAD'] = True
|
||||
yt_app.url_map.strict_slashes = False
|
||||
|
||||
# Don't log full tracebacks for handled FetchErrors
|
||||
class FetchErrorFilter(logging.Filter):
|
||||
def filter(self, record):
|
||||
if record.exc_info and record.exc_info[0] == util.FetchError:
|
||||
return False
|
||||
return True
|
||||
|
||||
yt_app.logger.addFilter(FetchErrorFilter())
|
||||
# yt_app.jinja_env.trim_blocks = True
|
||||
# yt_app.jinja_env.lstrip_blocks = True
|
||||
|
||||
# Configure Babel for i18n
|
||||
yt_app.config['BABEL_DEFAULT_LOCALE'] = 'en'
|
||||
# Use absolute path for translations directory to avoid issues with package structure changes
|
||||
_app_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
yt_app.config['BABEL_TRANSLATION_DIRECTORIES'] = os.path.join(_app_root, 'translations')
|
||||
|
||||
def get_locale():
|
||||
"""Determine the best locale based on user preference or browser settings"""
|
||||
# Check if user has a language preference in settings
|
||||
if hasattr(settings, 'language') and settings.language:
|
||||
locale = settings.language
|
||||
print(f'[i18n] Using user preference: {locale}')
|
||||
return locale
|
||||
# Otherwise, use browser's Accept-Language header
|
||||
# Only match languages with available translations
|
||||
locale = request.accept_languages.best_match(['en', 'es'])
|
||||
print(f'[i18n] Using browser language: {locale}')
|
||||
return locale or 'en'
|
||||
|
||||
babel = Babel(yt_app, locale_selector=get_locale)
|
||||
|
||||
|
||||
yt_app.add_url_rule('/settings', 'settings_page', settings.settings_page, methods=['POST', 'GET'])
|
||||
|
||||
@@ -54,7 +90,10 @@ def commatize(num):
|
||||
if num is None:
|
||||
return ''
|
||||
if isinstance(num, str):
|
||||
num = int(num)
|
||||
try:
|
||||
num = int(num)
|
||||
except ValueError:
|
||||
return num
|
||||
return '{:,}'.format(num)
|
||||
|
||||
|
||||
@@ -97,25 +136,54 @@ def timestamps(text):
|
||||
@yt_app.errorhandler(500)
|
||||
def error_page(e):
|
||||
slim = request.args.get('slim', False) # whether it was an ajax request
|
||||
if (exc_info()[0] == util.FetchError
|
||||
and exc_info()[1].code == '429'
|
||||
and settings.route_tor
|
||||
):
|
||||
error_message = ('Error: YouTube blocked the request because the Tor'
|
||||
' exit node is overutilized. Try getting a new exit node by'
|
||||
' using the New Identity button in the Tor Browser.')
|
||||
if exc_info()[1].error_message:
|
||||
error_message += '\n\n' + exc_info()[1].error_message
|
||||
if exc_info()[1].ip:
|
||||
error_message += '\n\nExit node IP address: ' + exc_info()[1].ip
|
||||
return flask.render_template('error.html', error_message=error_message, slim=slim), 502
|
||||
elif exc_info()[0] == util.FetchError and exc_info()[1].error_message:
|
||||
return (flask.render_template(
|
||||
'error.html',
|
||||
error_message=exc_info()[1].error_message,
|
||||
slim=slim
|
||||
), 502)
|
||||
return flask.render_template('error.html', traceback=traceback.format_exc(), slim=slim), 500
|
||||
if exc_info()[0] == util.FetchError:
|
||||
fetch_err = exc_info()[1]
|
||||
error_code = fetch_err.code
|
||||
|
||||
if error_code == '429' and settings.route_tor:
|
||||
error_message = ('Error: YouTube blocked the request because the Tor'
|
||||
' exit node is overutilized. Try getting a new exit node by'
|
||||
' using the New Identity button in the Tor Browser.')
|
||||
if fetch_err.error_message:
|
||||
error_message += '\n\n' + fetch_err.error_message
|
||||
if fetch_err.ip:
|
||||
error_message += '\n\nExit node IP address: ' + fetch_err.ip
|
||||
return flask.render_template('error.html', error_message=error_message, slim=slim), 502
|
||||
|
||||
elif error_code == '429':
|
||||
error_message = ('YouTube is temporarily blocking requests from your IP address (429 Too Many Requests).\n\n'
|
||||
'Try:\n'
|
||||
'• Wait a few minutes and refresh\n'
|
||||
'• Enable Tor routing in Settings for automatic IP rotation\n'
|
||||
'• Use a VPN to change your IP address')
|
||||
if fetch_err.ip:
|
||||
error_message += '\n\nYour IP: ' + fetch_err.ip
|
||||
return flask.render_template('error.html', error_message=error_message, slim=slim), 429
|
||||
|
||||
elif error_code == '502' and ('Failed to resolve' in str(fetch_err) or 'Failed to establish' in str(fetch_err)):
|
||||
error_message = ('Could not connect to YouTube.\n\n'
|
||||
'Check your internet connection and try again.')
|
||||
return flask.render_template('error.html', error_message=error_message, slim=slim), 502
|
||||
|
||||
elif error_code == '403':
|
||||
error_message = ('YouTube blocked this request (403 Forbidden).\n\n'
|
||||
'Try enabling Tor routing in Settings.')
|
||||
return flask.render_template('error.html', error_message=error_message, slim=slim), 403
|
||||
|
||||
elif error_code == '404':
|
||||
error_message = 'Error: The page you are looking for isn\'t here.'
|
||||
return flask.render_template('error.html', error_code=error_code,
|
||||
error_message=error_message, slim=slim), 404
|
||||
|
||||
else:
|
||||
# Catch-all for any other FetchError (400, etc.)
|
||||
error_message = f'Error communicating with YouTube ({error_code}).'
|
||||
if fetch_err.error_message:
|
||||
error_message += '\n\n' + fetch_err.error_message
|
||||
return flask.render_template('error.html', error_message=error_message, slim=slim), 502
|
||||
|
||||
return flask.render_template('error.html', traceback=traceback.format_exc(),
|
||||
slim=slim), 500
|
||||
|
||||
|
||||
font_choices = {
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import base64
|
||||
from youtube import util, yt_data_extract, local_playlist, subscriptions
|
||||
from youtube import (util, yt_data_extract, local_playlist, subscriptions,
|
||||
playlist)
|
||||
from youtube import yt_app
|
||||
import settings
|
||||
|
||||
import urllib
|
||||
import json
|
||||
from string import Template
|
||||
import youtube.proto as proto
|
||||
import html
|
||||
import math
|
||||
import gevent
|
||||
import re
|
||||
@@ -31,13 +31,138 @@ headers_mobile = (
|
||||
real_cookie = (('Cookie', 'VISITOR_INFO1_LIVE=8XihrAcN1l4'),)
|
||||
generic_cookie = (('Cookie', 'VISITOR_INFO1_LIVE=ST1Ti53r4fU'),)
|
||||
|
||||
# Sort values for YouTube API (from Invidious): 2=popular, 4=newest, 5=oldest
|
||||
# include_shorts only applies to tab='videos'; tab='shorts'/'streams' always include their own content.
|
||||
def channel_ctoken_v5(channel_id, page, sort, tab, view=1, include_shorts=True):
|
||||
# Tab-specific protobuf field numbers (from Invidious source)
|
||||
# Each tab uses different field numbers in the protobuf structure:
|
||||
# videos: 110 -> 3 -> 15 -> { 2:{1:UUID}, 4:sort, 8:{1:UUID, 3:sort} }
|
||||
# shorts: 110 -> 3 -> 10 -> { 2:{1:UUID}, 4:sort, 7:{1:UUID, 3:sort} }
|
||||
# streams: 110 -> 3 -> 14 -> { 2:{1:UUID}, 5:sort, 8:{1:UUID, 3:sort} }
|
||||
tab_config = {
|
||||
'videos': {'tab_field': 15, 'sort_field': 4, 'embedded_field': 8},
|
||||
'shorts': {'tab_field': 10, 'sort_field': 4, 'embedded_field': 7},
|
||||
'streams': {'tab_field': 14, 'sort_field': 5, 'embedded_field': 8},
|
||||
}
|
||||
config = tab_config.get(tab, tab_config['videos'])
|
||||
tab_field = config['tab_field']
|
||||
sort_field = config['sort_field']
|
||||
embedded_field = config['embedded_field']
|
||||
|
||||
# Map sort values to YouTube API values
|
||||
if tab == 'streams':
|
||||
sort_mapping = {'1': 14, '2': 13, '3': 12, '4': 12}
|
||||
else:
|
||||
sort_mapping = {'1': 2, '2': 5, '3': 4, '4': 4}
|
||||
new_sort = sort_mapping.get(sort, sort_mapping['3'])
|
||||
|
||||
# UUID placeholder (field 1)
|
||||
uuid_str = "00000000-0000-0000-0000-000000000000"
|
||||
|
||||
# Build the tab-level object matching Invidious structure exactly:
|
||||
# { 2: embedded{1: UUID}, sort_field: sort_val, embedded_field: embedded{1: UUID, 3: sort_val} }
|
||||
tab_content = (
|
||||
proto.string(2, proto.string(1, uuid_str))
|
||||
+ proto.uint(sort_field, new_sort)
|
||||
+ proto.string(embedded_field,
|
||||
proto.string(1, uuid_str) + proto.uint(3, new_sort))
|
||||
)
|
||||
|
||||
tab_wrapper = proto.string(tab_field, tab_content)
|
||||
inner_container = proto.string(3, tab_wrapper)
|
||||
outer_container = proto.string(110, inner_container)
|
||||
|
||||
# Add shorts filter when include_shorts=False (field 104, same as playlist.py)
|
||||
# This tells YouTube to exclude shorts from the results
|
||||
if not include_shorts:
|
||||
outer_container += proto.string(104, proto.uint(2, 1))
|
||||
|
||||
encoded_inner = proto.percent_b64encode(outer_container)
|
||||
|
||||
pointless_nest = proto.string(80226972,
|
||||
proto.string(2, channel_id)
|
||||
+ proto.string(3, encoded_inner)
|
||||
)
|
||||
|
||||
return base64.urlsafe_b64encode(pointless_nest).decode('ascii')
|
||||
|
||||
|
||||
def channel_about_ctoken(channel_id):
|
||||
return proto.make_protobuf(
|
||||
('base64p',
|
||||
[
|
||||
[2, 80226972,
|
||||
[
|
||||
[2, 2, channel_id],
|
||||
[2, 3,
|
||||
('base64p',
|
||||
[
|
||||
[2, 110,
|
||||
[
|
||||
[2, 3,
|
||||
[
|
||||
[2, 19,
|
||||
[
|
||||
[2, 1, b'66b0e9e9-0000-2820-9589-582429a83980'],
|
||||
]
|
||||
],
|
||||
]
|
||||
],
|
||||
]
|
||||
],
|
||||
]
|
||||
)
|
||||
],
|
||||
]
|
||||
],
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# https://github.com/user234683/youtube-local/issues/151
|
||||
def channel_ctoken_v4(channel_id, page, sort, tab, view=1):
|
||||
new_sort = (2 if int(sort) == 1 else 1)
|
||||
offset = str(30*(int(page) - 1))
|
||||
pointless_nest = proto.string(80226972,
|
||||
proto.string(2, channel_id)
|
||||
+ proto.string(3,
|
||||
proto.percent_b64encode(
|
||||
proto.string(110,
|
||||
proto.string(3,
|
||||
proto.string(15,
|
||||
proto.string(1,
|
||||
proto.string(1,
|
||||
proto.unpadded_b64encode(
|
||||
proto.string(1,
|
||||
proto.unpadded_b64encode(
|
||||
proto.string(2,
|
||||
b"ST:"
|
||||
+ proto.unpadded_b64encode(
|
||||
proto.string(2, offset)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
# targetId, just needs to be present but
|
||||
# doesn't need to be correct
|
||||
+ proto.string(2, "63faaff0-0000-23fe-80f0-582429d11c38")
|
||||
)
|
||||
# 1 - newest, 2 - popular
|
||||
+ proto.uint(3, new_sort)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
return base64.urlsafe_b64encode(pointless_nest).decode('ascii')
|
||||
|
||||
# SORT:
|
||||
# videos:
|
||||
# Popular - 1
|
||||
# Oldest - 2
|
||||
# Newest - 3
|
||||
# playlists:
|
||||
# Oldest - 2
|
||||
# Newest - 3
|
||||
# Last video added - 4
|
||||
|
||||
@@ -75,15 +200,15 @@ def channel_ctoken_v2(channel_id, page, sort, tab, view=1):
|
||||
2: 17254859483345278706,
|
||||
1: 16570086088270825023,
|
||||
}[int(sort)]
|
||||
page_token = proto.string(61, proto.unpadded_b64encode(
|
||||
proto.string(1, proto.uint(1, schema_number) + proto.string(
|
||||
2,
|
||||
proto.string(1, proto.unpadded_b64encode(proto.uint(1, offset)))
|
||||
))))
|
||||
page_token = proto.string(61, proto.unpadded_b64encode(proto.string(1,
|
||||
proto.uint(1, schema_number) + proto.string(2,
|
||||
proto.string(1, proto.unpadded_b64encode(proto.uint(1,offset)))
|
||||
)
|
||||
)))
|
||||
|
||||
tab = proto.string(2, tab)
|
||||
sort = proto.uint(3, int(sort))
|
||||
# page = proto.string(15, str(page) )
|
||||
#page = proto.string(15, str(page))
|
||||
|
||||
shelf_view = proto.uint(4, 0)
|
||||
view = proto.uint(6, int(view))
|
||||
@@ -114,12 +239,16 @@ def channel_ctoken_v1(channel_id, page, sort, tab, view=1):
|
||||
|
||||
|
||||
def get_channel_tab(channel_id, page="1", sort=3, tab='videos', view=1,
|
||||
ctoken=None, print_status=True):
|
||||
ctoken=None, print_status=True, include_shorts=True):
|
||||
message = 'Got channel tab' if print_status else None
|
||||
|
||||
if not ctoken:
|
||||
ctoken = channel_ctoken_v3(channel_id, page, sort, tab, view)
|
||||
if tab in ('videos', 'shorts', 'streams'):
|
||||
ctoken = channel_ctoken_v5(channel_id, page, sort, tab, view, include_shorts)
|
||||
else:
|
||||
ctoken = channel_ctoken_v3(channel_id, page, sort, tab, view)
|
||||
ctoken = ctoken.replace('=', '%3D')
|
||||
|
||||
# Not sure what the purpose of the key is or whether it will change
|
||||
# For now it seems to be constant for the API endpoint, not dependent
|
||||
# on the browsing session or channel
|
||||
@@ -132,7 +261,7 @@ def get_channel_tab(channel_id, page="1", sort=3, tab='videos', view=1,
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': '2.20180830',
|
||||
'clientVersion': '2.20240327.00.00',
|
||||
},
|
||||
},
|
||||
'continuation': ctoken,
|
||||
@@ -147,7 +276,10 @@ def get_channel_tab(channel_id, page="1", sort=3, tab='videos', view=1,
|
||||
|
||||
|
||||
# cache entries expire after 30 minutes
|
||||
@cachetools.func.ttl_cache(maxsize=128, ttl=30*60)
|
||||
number_of_videos_cache = cachetools.TTLCache(128, 30*60)
|
||||
# Cache for continuation tokens (shorts/streams pagination)
|
||||
continuation_token_cache = cachetools.TTLCache(512, 15*60)
|
||||
@cachetools.cached(number_of_videos_cache)
|
||||
def get_number_of_videos_channel(channel_id):
|
||||
if channel_id is None:
|
||||
return 1000
|
||||
@@ -159,31 +291,44 @@ def get_number_of_videos_channel(channel_id):
|
||||
try:
|
||||
response = util.fetch_url(url, headers_mobile,
|
||||
debug_name='number_of_videos', report_text='Got number of videos')
|
||||
except urllib.error.HTTPError as e:
|
||||
except (urllib.error.HTTPError, util.FetchError):
|
||||
traceback.print_exc()
|
||||
print("Couldn't retrieve number of videos")
|
||||
return 1000
|
||||
|
||||
response = response.decode('utf-8')
|
||||
|
||||
# match = re.search(r'"numVideosText":\s*{\s*"runs":\s*\[{"text":\s*"([\d,]*) videos"', response)
|
||||
match = re.search(r'"numVideosText".*?([,\d]+)', response)
|
||||
if match:
|
||||
return int(match.group(1).replace(',',''))
|
||||
else:
|
||||
return 0
|
||||
# Try several patterns since YouTube's format changes:
|
||||
# "numVideosText":{"runs":[{"text":"1,234"},{"text":" videos"}]}
|
||||
# "stats":[..., {"runs":[{"text":"1,234"},{"text":" videos"}]}]
|
||||
for pattern in (
|
||||
r'"numVideosText".*?"text":\s*"([\d,]+)"',
|
||||
r'"numVideosText".*?([\d,]+)\s*videos?',
|
||||
r'"numVideosText".*?([,\d]+)',
|
||||
r'([\d,]+)\s*videos?\s*</span>',
|
||||
):
|
||||
match = re.search(pattern, response)
|
||||
if match:
|
||||
try:
|
||||
return int(match.group(1).replace(',', ''))
|
||||
except ValueError:
|
||||
continue
|
||||
# Fallback: unknown count
|
||||
return 0
|
||||
def set_cached_number_of_videos(channel_id, num_videos):
|
||||
@cachetools.cached(number_of_videos_cache)
|
||||
def dummy_func_using_same_cache(channel_id):
|
||||
return num_videos
|
||||
dummy_func_using_same_cache(channel_id)
|
||||
|
||||
|
||||
channel_id_re = re.compile(r'videos\.xml\?channel_id=([a-zA-Z0-9_-]{24})"')
|
||||
|
||||
|
||||
@cachetools.func.lru_cache(maxsize=128)
|
||||
def get_channel_id(base_url):
|
||||
# method that gives the smallest possible response at ~4 kb
|
||||
# needs to be as fast as possible
|
||||
base_url = base_url.replace('https://www', 'https://m') # avoid redirect
|
||||
response = util.fetch_url(
|
||||
base_url + '/about?pbj=1', headers_mobile,
|
||||
response = util.fetch_url(base_url + '/about?pbj=1', headers_mobile,
|
||||
debug_name='get_channel_id', report_text='Got channel id').decode('utf-8')
|
||||
match = channel_id_re.search(response)
|
||||
if match:
|
||||
@@ -191,6 +336,30 @@ def get_channel_id(base_url):
|
||||
return None
|
||||
|
||||
|
||||
metadata_cache = cachetools.LRUCache(128)
|
||||
@cachetools.cached(metadata_cache)
|
||||
def get_metadata(channel_id):
|
||||
# Use youtubei browse API to get channel metadata
|
||||
polymer_json = util.call_youtube_api('web', 'browse', {
|
||||
'browseId': channel_id,
|
||||
})
|
||||
info = yt_data_extract.extract_channel_info(json.loads(polymer_json),
|
||||
'about',
|
||||
continuation=False)
|
||||
return extract_metadata_for_caching(info)
|
||||
def set_cached_metadata(channel_id, metadata):
|
||||
@cachetools.cached(metadata_cache)
|
||||
def dummy_func_using_same_cache(channel_id):
|
||||
return metadata
|
||||
dummy_func_using_same_cache(channel_id)
|
||||
def extract_metadata_for_caching(channel_info):
|
||||
metadata = {}
|
||||
for key in ('approx_subscriber_count', 'short_description', 'channel_name',
|
||||
'avatar'):
|
||||
metadata[key] = channel_info[key]
|
||||
return metadata
|
||||
|
||||
|
||||
def get_number_of_videos_general(base_url):
|
||||
return get_number_of_videos_channel(get_channel_id(base_url))
|
||||
|
||||
@@ -211,7 +380,7 @@ def get_channel_search_json(channel_id, query, page):
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': '2.20180830',
|
||||
'clientVersion': '2.20240327.00.00',
|
||||
},
|
||||
},
|
||||
'continuation': ctoken,
|
||||
@@ -229,19 +398,34 @@ def post_process_channel_info(info):
|
||||
info['avatar'] = util.prefix_url(info['avatar'])
|
||||
info['channel_url'] = util.prefix_url(info['channel_url'])
|
||||
for item in info['items']:
|
||||
# Only set thumbnail if YouTube didn't provide one
|
||||
if not item.get('thumbnail'):
|
||||
if item.get('type') == 'playlist' and item.get('first_video_id'):
|
||||
item['thumbnail'] = "https://i.ytimg.com/vi/{}/hqdefault.jpg".format(item['first_video_id'])
|
||||
elif item.get('type') == 'video' and item.get('id'):
|
||||
item['thumbnail'] = "https://i.ytimg.com/vi/{}/hqdefault.jpg".format(item['id'])
|
||||
util.prefix_urls(item)
|
||||
util.add_extra_html_info(item)
|
||||
if info['current_tab'] == 'about':
|
||||
for i, (text, url) in enumerate(info['links']):
|
||||
if util.YOUTUBE_URL_RE.fullmatch(url):
|
||||
if isinstance(url, str) and util.YOUTUBE_URL_RE.fullmatch(url):
|
||||
info['links'][i] = (text, util.prefix_url(url))
|
||||
|
||||
|
||||
def get_channel_first_page(base_url=None, channel_id=None):
|
||||
def get_channel_first_page(base_url=None, tab='videos', channel_id=None, sort=None):
|
||||
if channel_id:
|
||||
base_url = 'https://www.youtube.com/channel/' + channel_id
|
||||
return util.fetch_url(base_url + '/videos?pbj=1&view=0', headers_desktop,
|
||||
debug_name='gen_channel_videos')
|
||||
|
||||
# Build URL with sort parameter
|
||||
# YouTube URL sort params: p=popular, dd=newest, lad=newest no shorts
|
||||
# Note: 'da' (oldest) was removed by YouTube in January 2026
|
||||
url = base_url + '/' + tab + '?pbj=1&view=0'
|
||||
if sort:
|
||||
# Map sort values to YouTube's URL parameter values
|
||||
sort_map = {'3': 'dd', '4': 'lad'}
|
||||
url += '&sort=' + sort_map.get(sort, 'dd')
|
||||
|
||||
return util.fetch_url(url, headers_desktop, debug_name='gen_channel_' + tab)
|
||||
|
||||
|
||||
playlist_sort_codes = {'2': "da", '3': "dd", '4': "lad"}
|
||||
@@ -250,63 +434,216 @@ playlist_sort_codes = {'2': "da", '3': "dd", '4': "lad"}
|
||||
# youtube.com/user/[username]/[tab]
|
||||
# youtube.com/c/[custom]/[tab]
|
||||
# youtube.com/[custom]/[tab]
|
||||
|
||||
|
||||
def get_channel_page_general_url(base_url, tab, request, channel_id=None):
|
||||
|
||||
page_number = int(request.args.get('page', 1))
|
||||
sort = request.args.get('sort', '3')
|
||||
# sort 1: views
|
||||
# sort 2: oldest
|
||||
# sort 3: newest (includes shorts, via UU uploads playlist)
|
||||
# sort 4: newest - no shorts (uses channel Videos tab API directly, like Invidious)
|
||||
default_sort = '3' if settings.include_shorts_in_channel else '4'
|
||||
sort = request.args.get('sort', default_sort)
|
||||
view = request.args.get('view', '1')
|
||||
query = request.args.get('query', '')
|
||||
ctoken = request.args.get('ctoken', '')
|
||||
default_params = (page_number == 1 and sort == '3' and view == '1')
|
||||
default_params = (page_number == 1 and sort in ('3', '4') and view == '1')
|
||||
continuation = bool(ctoken)
|
||||
page_size = 30
|
||||
polymer_json = None
|
||||
number_of_videos = 0
|
||||
info = None
|
||||
|
||||
if tab == 'videos' and channel_id and not default_params:
|
||||
tasks = (
|
||||
gevent.spawn(get_number_of_videos_channel, channel_id),
|
||||
gevent.spawn(get_channel_tab, channel_id, page_number, sort,
|
||||
'videos', view, ctoken)
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
util.check_gevent_exceptions(*tasks)
|
||||
number_of_videos, polymer_json = tasks[0].value, tasks[1].value
|
||||
elif tab == 'videos':
|
||||
if channel_id:
|
||||
num_videos_call = (get_number_of_videos_channel, channel_id)
|
||||
# -------------------------------------------------------------------------
|
||||
# sort=3: use UU uploads playlist (includes shorts)
|
||||
# -------------------------------------------------------------------------
|
||||
if tab == 'videos' and sort == '3':
|
||||
if not channel_id:
|
||||
channel_id = get_channel_id(base_url)
|
||||
if page_number == 1:
|
||||
tasks = (
|
||||
gevent.spawn(playlist.playlist_first_page,
|
||||
'UU' + channel_id[2:],
|
||||
report_text='Retrieved channel videos'),
|
||||
gevent.spawn(get_metadata, channel_id),
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
util.check_gevent_exceptions(*tasks)
|
||||
pl_json = tasks[0].value
|
||||
pl_info = yt_data_extract.extract_playlist_info(pl_json)
|
||||
number_of_videos = pl_info['metadata']['video_count']
|
||||
if number_of_videos is None:
|
||||
number_of_videos = 1000
|
||||
else:
|
||||
set_cached_number_of_videos(channel_id, number_of_videos)
|
||||
else:
|
||||
num_videos_call = (get_number_of_videos_general, base_url)
|
||||
tasks = (
|
||||
gevent.spawn(*num_videos_call),
|
||||
gevent.spawn(get_channel_first_page, base_url=base_url),
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
util.check_gevent_exceptions(*tasks)
|
||||
number_of_videos, polymer_json = tasks[0].value, tasks[1].value
|
||||
tasks = (
|
||||
gevent.spawn(playlist.get_videos, 'UU' + channel_id[2:],
|
||||
page_number, include_shorts=True),
|
||||
gevent.spawn(get_metadata, channel_id),
|
||||
gevent.spawn(get_number_of_videos_channel, channel_id),
|
||||
gevent.spawn(playlist.playlist_first_page, 'UU' + channel_id[2:],
|
||||
report_text='Retrieved channel video count'),
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
util.check_gevent_exceptions(*tasks)
|
||||
pl_json = tasks[0].value
|
||||
pl_info = yt_data_extract.extract_playlist_info(pl_json)
|
||||
first_page_meta = yt_data_extract.extract_playlist_metadata(tasks[3].value)
|
||||
number_of_videos = (tasks[2].value
|
||||
or first_page_meta.get('video_count')
|
||||
or 0)
|
||||
|
||||
if pl_info['items']:
|
||||
info = pl_info
|
||||
info['channel_id'] = channel_id
|
||||
info['current_tab'] = 'videos'
|
||||
page_size = 100
|
||||
# else fall through to the channel browse API below
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Channel browse API: sort=4 (videos tab, no shorts), shorts, streams,
|
||||
# or fallback when the UU playlist returned no items.
|
||||
# Uses channel_ctoken_v5 per-tab tokens, mirroring Invidious's approach.
|
||||
# Pagination is driven by the continuation token YouTube returns each page.
|
||||
# -------------------------------------------------------------------------
|
||||
used_channel_api = False
|
||||
if info is None and (
|
||||
tab in ('shorts', 'streams')
|
||||
or (tab == 'videos' and sort == '4')
|
||||
or (tab == 'videos' and sort == '3') # UU-playlist fallback
|
||||
):
|
||||
if not channel_id:
|
||||
channel_id = get_channel_id(base_url)
|
||||
used_channel_api = True
|
||||
|
||||
# Determine what browse call to make
|
||||
if ctoken:
|
||||
browse_call = (util.call_youtube_api, 'web', 'browse',
|
||||
{'continuation': ctoken})
|
||||
continuation = True
|
||||
elif page_number > 1:
|
||||
cache_key = (channel_id, tab, sort, page_number - 1)
|
||||
cached_ctoken = continuation_token_cache.get(cache_key)
|
||||
if cached_ctoken:
|
||||
browse_call = (util.call_youtube_api, 'web', 'browse',
|
||||
{'continuation': cached_ctoken})
|
||||
else:
|
||||
# Cache miss — restart from page 1 (better than an error)
|
||||
browse_call = (get_channel_tab, channel_id, '1', sort, tab, int(view))
|
||||
continuation = True
|
||||
else:
|
||||
browse_call = (get_channel_tab, channel_id, '1', sort, tab, int(view))
|
||||
continuation = True
|
||||
|
||||
# Single browse call; number_of_videos is computed from items actually
|
||||
# fetched so we don't mislead the user with a total that includes
|
||||
# shorts (which this branch is explicitly excluding for sort=4).
|
||||
task = gevent.spawn(*browse_call)
|
||||
task.join()
|
||||
util.check_gevent_exceptions(task)
|
||||
polymer_json = task.value
|
||||
|
||||
elif tab == 'about':
|
||||
polymer_json = util.fetch_url(base_url + '/about?pbj=1', headers_desktop, debug_name='gen_channel_about')
|
||||
# polymer_json = util.fetch_url(base_url + '/about?pbj=1', headers_desktop, debug_name='gen_channel_about')
|
||||
channel_id = get_channel_id(base_url)
|
||||
ctoken = channel_about_ctoken(channel_id)
|
||||
polymer_json = util.call_youtube_api('web', 'browse', {
|
||||
'continuation': ctoken,
|
||||
})
|
||||
continuation=True
|
||||
elif tab == 'playlists' and page_number == 1:
|
||||
polymer_json = util.fetch_url(base_url+ '/playlists?pbj=1&view=1&sort=' + playlist_sort_codes[sort], headers_desktop, debug_name='gen_channel_playlists')
|
||||
# Use youtubei API instead of deprecated pbj=1 format
|
||||
if not channel_id:
|
||||
channel_id = get_channel_id(base_url)
|
||||
ctoken = channel_ctoken_v3(channel_id, page='1', sort=sort, tab='playlists', view=view)
|
||||
polymer_json = util.call_youtube_api('web', 'browse', {
|
||||
'continuation': ctoken,
|
||||
})
|
||||
continuation = True
|
||||
elif tab == 'playlists':
|
||||
polymer_json = get_channel_tab(channel_id, page_number, sort,
|
||||
'playlists', view)
|
||||
continuation = True
|
||||
elif tab == 'search' and channel_id:
|
||||
polymer_json = get_channel_search_json(channel_id, query, page_number)
|
||||
elif tab == 'search':
|
||||
url = base_url + '/search?pbj=1&query=' + urllib.parse.quote(query, safe='')
|
||||
polymer_json = util.fetch_url(url, headers_desktop, debug_name='gen_channel_search')
|
||||
else:
|
||||
elif tab != 'videos':
|
||||
flask.abort(404, 'Unknown channel tab: ' + tab)
|
||||
|
||||
info = yt_data_extract.extract_channel_info(json.loads(polymer_json), tab)
|
||||
if polymer_json is not None and info is None:
|
||||
info = yt_data_extract.extract_channel_info(
|
||||
json.loads(polymer_json), tab, continuation=continuation
|
||||
)
|
||||
|
||||
if info is None:
|
||||
return flask.render_template('error.html', error_message='Could not retrieve channel data')
|
||||
if info['error'] is not None:
|
||||
return flask.render_template('error.html', error_message=info['error'])
|
||||
|
||||
post_process_channel_info(info)
|
||||
if tab == 'videos':
|
||||
if channel_id:
|
||||
info['channel_url'] = 'https://www.youtube.com/channel/' + channel_id
|
||||
info['channel_id'] = channel_id
|
||||
else:
|
||||
channel_id = info['channel_id']
|
||||
|
||||
# Will have microformat present, cache metadata while we have it
|
||||
if (channel_id and default_params and tab not in ('videos', 'about')
|
||||
and info.get('channel_name') is not None):
|
||||
metadata = extract_metadata_for_caching(info)
|
||||
set_cached_metadata(channel_id, metadata)
|
||||
# Otherwise, populate with our (hopefully cached) metadata
|
||||
elif channel_id and info.get('channel_name') is None:
|
||||
metadata = get_metadata(channel_id)
|
||||
for key, value in metadata.items():
|
||||
yt_data_extract.conservative_update(info, key, value)
|
||||
# need to add this metadata to the videos/playlists
|
||||
additional_info = {
|
||||
'author': info['channel_name'],
|
||||
'author_id': info['channel_id'],
|
||||
'author_url': info['channel_url'],
|
||||
}
|
||||
for item in info['items']:
|
||||
item.update(additional_info)
|
||||
|
||||
if tab in ('videos', 'shorts', 'streams'):
|
||||
# For any tab using the channel browse API (sort=4, shorts, streams),
|
||||
# pagination is driven by the ctoken YouTube returns in the response.
|
||||
# Cache it so the next page request can use it.
|
||||
if info.get('ctoken'):
|
||||
cache_key = (channel_id, tab, sort, page_number)
|
||||
continuation_token_cache[cache_key] = info['ctoken']
|
||||
|
||||
# Determine is_last_page and final number_of_pages.
|
||||
# For channel-API-driven tabs (sort=4, shorts, streams, UU fallback),
|
||||
# YouTube doesn't give us a reliable total filtered count. So instead
|
||||
# of displaying a misleading number (the total-including-shorts from
|
||||
# get_number_of_videos_channel), we count only what we've actually
|
||||
# paged through, and use the ctoken to know whether to show "next".
|
||||
if used_channel_api:
|
||||
info['is_last_page'] = (info.get('ctoken') is None)
|
||||
items_on_page = len(info.get('items', []))
|
||||
items_seen_so_far = (page_number - 1) * page_size + items_on_page
|
||||
|
||||
# Use accumulated count as the displayed total so "N videos" shown
|
||||
# to the user always matches what they could actually reach.
|
||||
number_of_videos = items_seen_so_far
|
||||
|
||||
# If there's more content, bump by 1 so the Next-page button exists
|
||||
if info.get('ctoken'):
|
||||
number_of_videos = max(number_of_videos,
|
||||
page_number * page_size + 1)
|
||||
# For sort=3 via UU playlist (used_channel_api=False), number_of_videos
|
||||
# was already set from playlist metadata above.
|
||||
|
||||
info['number_of_videos'] = number_of_videos
|
||||
info['number_of_pages'] = math.ceil(number_of_videos/30)
|
||||
info['number_of_pages'] = math.ceil(number_of_videos / page_size) if number_of_videos else 1
|
||||
# Never show fewer pages than the page the user is actually on
|
||||
if info['number_of_pages'] < page_number:
|
||||
info['number_of_pages'] = page_number
|
||||
info['header_playlist_names'] = local_playlist.get_playlist_names()
|
||||
if tab in ('videos', 'playlists'):
|
||||
if tab in ('videos', 'shorts', 'streams', 'playlists'):
|
||||
info['current_sort'] = sort
|
||||
elif tab == 'search':
|
||||
info['search_box_value'] = query
|
||||
@@ -315,9 +652,10 @@ def get_channel_page_general_url(base_url, tab, request, channel_id=None):
|
||||
info['page_number'] = page_number
|
||||
info['subscribed'] = subscriptions.is_subscribed(info['channel_id'])
|
||||
|
||||
return flask.render_template(
|
||||
'channel.html',
|
||||
parameters_dictionary=request.args,
|
||||
post_process_channel_info(info)
|
||||
|
||||
return flask.render_template('channel.html',
|
||||
parameters_dictionary = request.args,
|
||||
**info
|
||||
)
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ def single_comment_ctoken(video_id, comment_id):
|
||||
|
||||
def post_process_comments_info(comments_info):
|
||||
for comment in comments_info['comments']:
|
||||
comment['author'] = strip_non_ascii(comment['author'])
|
||||
comment['author'] = strip_non_ascii(comment['author']) if comment.get('author') else ""
|
||||
comment['author_url'] = concat_or_none(
|
||||
'/', comment['author_url'])
|
||||
comment['author_avatar'] = concat_or_none(
|
||||
@@ -97,7 +97,7 @@ def post_process_comments_info(comments_info):
|
||||
ctoken = comment['reply_ctoken']
|
||||
ctoken, err = proto.set_protobuf_value(
|
||||
ctoken,
|
||||
'base64p', 6, 3, 9, value=250)
|
||||
'base64p', 6, 3, 9, value=200)
|
||||
if err:
|
||||
print('Error setting ctoken value:')
|
||||
print(err)
|
||||
@@ -127,7 +127,7 @@ def post_process_comments_info(comments_info):
|
||||
# change max_replies field to 250 in ctoken
|
||||
new_ctoken, err = proto.set_protobuf_value(
|
||||
ctoken,
|
||||
'base64p', 6, 3, 9, value=250)
|
||||
'base64p', 6, 3, 9, value=200)
|
||||
if err:
|
||||
print('Error setting ctoken value:')
|
||||
print(err)
|
||||
@@ -150,38 +150,40 @@ def post_process_comments_info(comments_info):
|
||||
util.URL_ORIGIN, '/watch?v=', comments_info['video_id'])
|
||||
comments_info['video_thumbnail'] = concat_or_none(
|
||||
settings.img_prefix, 'https://i.ytimg.com/vi/',
|
||||
comments_info['video_id'], '/mqdefault.jpg'
|
||||
comments_info['video_id'], '/hqdefault.jpg'
|
||||
)
|
||||
|
||||
|
||||
def video_comments(video_id, sort=0, offset=0, lc='', secret_key=''):
|
||||
if not settings.comments_mode:
|
||||
return {}
|
||||
|
||||
# Initialize the result dict up-front so that any exception path below
|
||||
# can safely attach an 'error' field without risking UnboundLocalError.
|
||||
comments_info = {'error': None}
|
||||
try:
|
||||
if settings.comments_mode:
|
||||
comments_info = {'error': None}
|
||||
other_sort_url = (
|
||||
util.URL_ORIGIN + '/comments?ctoken='
|
||||
+ make_comment_ctoken(video_id, sort=1 - sort, lc=lc)
|
||||
)
|
||||
other_sort_text = 'Sort by ' + ('newest' if sort == 0 else 'top')
|
||||
other_sort_url = (
|
||||
util.URL_ORIGIN + '/comments?ctoken='
|
||||
+ make_comment_ctoken(video_id, sort=1 - sort, lc=lc)
|
||||
)
|
||||
other_sort_text = 'Sort by ' + ('newest' if sort == 0 else 'top')
|
||||
|
||||
this_sort_url = (util.URL_ORIGIN
|
||||
+ '/comments?ctoken='
|
||||
+ make_comment_ctoken(video_id, sort=sort, lc=lc))
|
||||
this_sort_url = (util.URL_ORIGIN
|
||||
+ '/comments?ctoken='
|
||||
+ make_comment_ctoken(video_id, sort=sort, lc=lc))
|
||||
|
||||
comments_info['comment_links'] = [
|
||||
(other_sort_text, other_sort_url),
|
||||
('Direct link', this_sort_url)
|
||||
]
|
||||
comments_info['comment_links'] = [
|
||||
(other_sort_text, other_sort_url),
|
||||
('Direct link', this_sort_url)
|
||||
]
|
||||
|
||||
ctoken = make_comment_ctoken(video_id, sort, offset, lc)
|
||||
comments_info.update(yt_data_extract.extract_comments_info(
|
||||
request_comments(ctoken), ctoken=ctoken
|
||||
))
|
||||
post_process_comments_info(comments_info)
|
||||
ctoken = make_comment_ctoken(video_id, sort, offset, lc)
|
||||
comments_info.update(yt_data_extract.extract_comments_info(
|
||||
request_comments(ctoken), ctoken=ctoken
|
||||
))
|
||||
post_process_comments_info(comments_info)
|
||||
|
||||
return comments_info
|
||||
else:
|
||||
return {}
|
||||
return comments_info
|
||||
except util.FetchError as e:
|
||||
if e.code == '429' and settings.route_tor:
|
||||
comments_info['error'] = 'Error: YouTube blocked the request because the Tor exit node is overutilized.'
|
||||
@@ -189,10 +191,10 @@ def video_comments(video_id, sort=0, offset=0, lc='', secret_key=''):
|
||||
comments_info['error'] += '\n\n' + e.error_message
|
||||
comments_info['error'] += '\n\nExit node IP address: %s' % e.ip
|
||||
else:
|
||||
comments_info['error'] = 'YouTube blocked the request. IP address: %s' % e.ip
|
||||
comments_info['error'] = 'YouTube blocked the request. Error: %s' % str(e)
|
||||
|
||||
except Exception as e:
|
||||
comments_info['error'] = 'YouTube blocked the request. IP address: %s' % e.ip
|
||||
comments_info['error'] = 'YouTube blocked the request. Error: %s' % str(e)
|
||||
|
||||
if comments_info.get('error'):
|
||||
print('Error retrieving comments for ' + str(video_id) + ':\n' +
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
from .get_app_version import *
|
||||
from .get_app_version import app_version
|
||||
|
||||
__all__ = ['app_version']
|
||||
|
||||
@@ -1,57 +1,56 @@
|
||||
from __future__ import unicode_literals
|
||||
from subprocess import (
|
||||
call,
|
||||
STDOUT
|
||||
)
|
||||
from ..version import __version__
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
from ..version import __version__
|
||||
|
||||
|
||||
def app_version():
|
||||
def minimal_env_cmd(cmd):
|
||||
# make minimal environment
|
||||
env = {}
|
||||
for k in ['SYSTEMROOT', 'PATH']:
|
||||
v = os.environ.get(k)
|
||||
if v is not None:
|
||||
env[k] = v
|
||||
|
||||
env['LANGUAGE'] = 'C'
|
||||
env['LANG'] = 'C'
|
||||
env['LC_ALL'] = 'C'
|
||||
out = subprocess.Popen(
|
||||
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
|
||||
env = {k: os.environ[k] for k in ['SYSTEMROOT', 'PATH'] if k in os.environ}
|
||||
env.update({'LANGUAGE': 'C', 'LANG': 'C', 'LC_ALL': 'C'})
|
||||
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
|
||||
return out
|
||||
|
||||
subst_list = {
|
||||
"version": __version__,
|
||||
"branch": None,
|
||||
"commit": None
|
||||
'version': __version__,
|
||||
'branch': None,
|
||||
'commit': None,
|
||||
}
|
||||
|
||||
if os.system("command -v git > /dev/null 2>&1") != 0:
|
||||
subst_list
|
||||
else:
|
||||
if call(["git", "branch"], stderr=STDOUT,
|
||||
stdout=open(os.devnull, 'w')) != 0:
|
||||
subst_list
|
||||
else:
|
||||
# version
|
||||
describe = minimal_env_cmd(["git", "describe", "--always"])
|
||||
git_revision = describe.strip().decode('ascii')
|
||||
# branch
|
||||
branch = minimal_env_cmd(["git", "branch"])
|
||||
git_branch = branch.strip().decode('ascii').replace('* ', '')
|
||||
# Use shutil.which instead of `command -v`/os.system so we don't spawn a
|
||||
# shell (CWE-78 hardening) and so it works cross-platform.
|
||||
if shutil.which('git') is None:
|
||||
return subst_list
|
||||
|
||||
subst_list = {
|
||||
"version": __version__,
|
||||
"branch": git_branch,
|
||||
"commit": git_revision
|
||||
}
|
||||
try:
|
||||
# Check we are inside a git work tree. Using DEVNULL avoids the
|
||||
# file-handle leak from `open(os.devnull, 'w')`.
|
||||
rc = subprocess.call(
|
||||
['git', 'branch'],
|
||||
stderr=subprocess.DEVNULL,
|
||||
stdout=subprocess.DEVNULL,
|
||||
)
|
||||
except OSError:
|
||||
return subst_list
|
||||
if rc != 0:
|
||||
return subst_list
|
||||
|
||||
describe = minimal_env_cmd(['git', 'describe', '--tags', '--always'])
|
||||
git_revision = describe.strip().decode('ascii')
|
||||
|
||||
branch = minimal_env_cmd(['git', 'branch'])
|
||||
git_branch = branch.strip().decode('ascii').replace('* ', '')
|
||||
|
||||
subst_list.update({
|
||||
'branch': git_branch,
|
||||
'commit': git_revision,
|
||||
})
|
||||
|
||||
return subst_list
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
app_version()
|
||||
|
||||
23
youtube/hls_cache.py
Normal file
23
youtube/hls_cache.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""Multi-audio track support via HLS streaming.
|
||||
|
||||
Instead of downloading all segments, we proxy the HLS playlist and
|
||||
let the browser stream the audio directly. Zero local storage needed.
|
||||
"""
|
||||
|
||||
_tracks = {} # cache_key -> {'hls_url': str, ...}
|
||||
|
||||
|
||||
def register_track(cache_key, hls_playlist_url, content_length=0,
|
||||
video_id=None, track_id=None):
|
||||
print(f'[audio-track-cache] Registering track: {cache_key} -> {hls_playlist_url[:80]}...')
|
||||
_tracks[cache_key] = {'hls_url': hls_playlist_url}
|
||||
print(f'[audio-track-cache] Available tracks: {list(_tracks.keys())}')
|
||||
|
||||
|
||||
def get_hls_url(cache_key):
|
||||
entry = _tracks.get(cache_key)
|
||||
if entry:
|
||||
print(f'[audio-track-cache] Found track: {cache_key}')
|
||||
else:
|
||||
print(f'[audio-track-cache] Track not found: {cache_key}')
|
||||
return entry['hls_url'] if entry else None
|
||||
141
youtube/i18n_strings.py
Normal file
141
youtube/i18n_strings.py
Normal file
@@ -0,0 +1,141 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Centralized i18n strings for yt-local
|
||||
|
||||
This file contains static strings that need to be translated but are used
|
||||
dynamically in templates or generated content. By importing this module,
|
||||
these strings get extracted by babel for translation.
|
||||
"""
|
||||
|
||||
from flask_babel import lazy_gettext as _l
|
||||
|
||||
# Settings categories
|
||||
CATEGORY_NETWORK = _l('Network')
|
||||
CATEGORY_PLAYBACK = _l('Playback')
|
||||
CATEGORY_INTERFACE = _l('Interface')
|
||||
|
||||
# Common setting labels
|
||||
ROUTE_TOR = _l('Route Tor')
|
||||
DEFAULT_SUBTITLES_MODE = _l('Default subtitles mode')
|
||||
AV1_CODEC_RANKING = _l('AV1 Codec Ranking')
|
||||
VP8_VP9_CODEC_RANKING = _l('VP8/VP9 Codec Ranking')
|
||||
H264_CODEC_RANKING = _l('H.264 Codec Ranking')
|
||||
USE_INTEGRATED_SOURCES = _l('Use integrated sources')
|
||||
ROUTE_IMAGES = _l('Route images')
|
||||
ENABLE_COMMENTS_JS = _l('Enable comments.js')
|
||||
ENABLE_SPONSORBLOCK = _l('Enable SponsorBlock')
|
||||
ENABLE_EMBED_PAGE = _l('Enable embed page')
|
||||
|
||||
# Setting names (auto-generated from setting keys)
|
||||
RELATED_VIDEOS_MODE = _l('Related videos mode')
|
||||
COMMENTS_MODE = _l('Comments mode')
|
||||
ENABLE_COMMENT_AVATARS = _l('Enable comment avatars')
|
||||
DEFAULT_COMMENT_SORTING = _l('Default comment sorting')
|
||||
THEATER_MODE = _l('Theater mode')
|
||||
AUTOPLAY_VIDEOS = _l('Autoplay videos')
|
||||
DEFAULT_RESOLUTION = _l('Default resolution')
|
||||
USE_VIDEO_PLAYER = _l('Use video player')
|
||||
USE_VIDEO_DOWNLOAD = _l('Use video download')
|
||||
PROXY_IMAGES = _l('Proxy images')
|
||||
THEME = _l('Theme')
|
||||
FONT = _l('Font')
|
||||
LANGUAGE = _l('Language')
|
||||
EMBED_PAGE_MODE = _l('Embed page mode')
|
||||
|
||||
# Common option values
|
||||
OFF = _l('Off')
|
||||
ON = _l('On')
|
||||
DISABLED = _l('Disabled')
|
||||
ENABLED = _l('Enabled')
|
||||
ALWAYS_SHOWN = _l('Always shown')
|
||||
SHOWN_BY_CLICKING_BUTTON = _l('Shown by clicking button')
|
||||
NATIVE = _l('Native')
|
||||
NATIVE_WITH_HOTKEYS = _l('Native with hotkeys')
|
||||
PLYR = _l('Plyr')
|
||||
|
||||
# Theme options
|
||||
LIGHT = _l('Light')
|
||||
GRAY = _l('Gray')
|
||||
DARK = _l('Dark')
|
||||
|
||||
# Font options
|
||||
BROWSER_DEFAULT = _l('Browser default')
|
||||
LIBERATION_SERIF = _l('Liberation Serif')
|
||||
ARIAL = _l('Arial')
|
||||
VERDANA = _l('Verdana')
|
||||
TAHOMA = _l('Tahoma')
|
||||
|
||||
# Search and filter options
|
||||
SORT_BY = _l('Sort by')
|
||||
RELEVANCE = _l('Relevance')
|
||||
UPLOAD_DATE = _l('Upload date')
|
||||
VIEW_COUNT = _l('View count')
|
||||
RATING = _l('Rating')
|
||||
|
||||
# Time filters
|
||||
ANY = _l('Any')
|
||||
LAST_HOUR = _l('Last hour')
|
||||
TODAY = _l('Today')
|
||||
THIS_WEEK = _l('This week')
|
||||
THIS_MONTH = _l('This month')
|
||||
THIS_YEAR = _l('This year')
|
||||
|
||||
# Content types
|
||||
TYPE = _l('Type')
|
||||
VIDEO = _l('Video')
|
||||
CHANNEL = _l('Channel')
|
||||
PLAYLIST = _l('Playlist')
|
||||
MOVIE = _l('Movie')
|
||||
SHOW = _l('Show')
|
||||
|
||||
# Duration filters
|
||||
DURATION = _l('Duration')
|
||||
SHORT_DURATION = _l('Short (< 4 minutes)')
|
||||
LONG_DURATION = _l('Long (> 20 minutes)')
|
||||
|
||||
# Actions
|
||||
SEARCH = _l('Search')
|
||||
DOWNLOAD = _l('Download')
|
||||
SUBSCRIBE = _l('Subscribe')
|
||||
UNSUBSCRIBE = _l('Unsubscribe')
|
||||
IMPORT = _l('Import')
|
||||
EXPORT = _l('Export')
|
||||
SAVE = _l('Save')
|
||||
CHECK = _l('Check')
|
||||
MUTE = _l('Mute')
|
||||
UNMUTE = _l('Unmute')
|
||||
|
||||
# Common UI elements
|
||||
OPTIONS = _l('Options')
|
||||
SETTINGS = _l('Settings')
|
||||
ERROR = _l('Error')
|
||||
LOADING = _l('loading...')
|
||||
|
||||
# Settings option values
|
||||
TOP = _l('Top')
|
||||
NEWEST = _l('Newest')
|
||||
AUTO = _l('Auto')
|
||||
ENGLISH = _l('English')
|
||||
ESPANOL = _l('Español')
|
||||
|
||||
# Playback options
|
||||
AUTO_HLS_PREFERRED = _l('Auto (HLS preferred)')
|
||||
FORCE_HLS = _l('Force HLS')
|
||||
FORCE_DASH = _l('Force DASH')
|
||||
RANKING_1 = _l('#1')
|
||||
RANKING_2 = _l('#2')
|
||||
RANKING_3 = _l('#3')
|
||||
|
||||
# Form actions
|
||||
SAVE_SETTINGS = _l('Save settings')
|
||||
|
||||
# Other category
|
||||
OTHER = _l('Other')
|
||||
|
||||
# Settings labels
|
||||
PLAYBACK_MODE = _l('Playback mode')
|
||||
|
||||
# Subscription settings (may be used in future)
|
||||
AUTOCHECK_SUBSCRIPTIONS = _l('Autocheck subscriptions')
|
||||
INCLUDE_SHORTS_SUBSCRIPTIONS = _l('Include shorts in subscriptions')
|
||||
INCLUDE_SHORTS_CHANNEL = _l('Include shorts in channel')
|
||||
@@ -1,36 +1,74 @@
|
||||
from youtube import util, yt_data_extract
|
||||
from youtube import util
|
||||
from youtube import yt_app
|
||||
import settings
|
||||
|
||||
import os
|
||||
import json
|
||||
import html
|
||||
import gevent
|
||||
import urllib
|
||||
import math
|
||||
import glob
|
||||
import re
|
||||
|
||||
import flask
|
||||
from flask import request
|
||||
|
||||
playlists_directory = os.path.join(settings.data_dir, "playlists")
|
||||
thumbnails_directory = os.path.join(settings.data_dir, "playlist_thumbnails")
|
||||
playlists_directory = os.path.join(settings.data_dir, 'playlists')
|
||||
thumbnails_directory = os.path.join(settings.data_dir, 'playlist_thumbnails')
|
||||
|
||||
# Whitelist accepted playlist names so user input cannot escape
|
||||
# `playlists_directory` / `thumbnails_directory` (CWE-22, OWASP A01:2021).
|
||||
# Allow letters, digits, spaces, dot, dash and underscore.
|
||||
_PLAYLIST_NAME_RE = re.compile(r'^[\w .\-]{1,128}$')
|
||||
|
||||
|
||||
def _validate_playlist_name(name):
|
||||
'''Return the stripped name if safe, otherwise abort with 400.'''
|
||||
if name is None:
|
||||
flask.abort(400)
|
||||
name = name.strip()
|
||||
if not _PLAYLIST_NAME_RE.match(name):
|
||||
flask.abort(400)
|
||||
return name
|
||||
|
||||
|
||||
def _find_playlist_path(name):
|
||||
'''Find playlist file robustly, handling trailing spaces in filenames'''
|
||||
name = _validate_playlist_name(name)
|
||||
pattern = os.path.join(playlists_directory, name + '*.txt')
|
||||
files = glob.glob(pattern)
|
||||
return files[0] if files else os.path.join(playlists_directory, name + '.txt')
|
||||
|
||||
|
||||
def _parse_playlist_lines(data):
|
||||
"""Parse playlist data lines robustly, skipping empty/malformed entries"""
|
||||
videos = []
|
||||
for line in data.splitlines():
|
||||
clean_line = line.strip()
|
||||
if not clean_line:
|
||||
continue
|
||||
try:
|
||||
videos.append(json.loads(clean_line))
|
||||
except json.decoder.JSONDecodeError:
|
||||
print('Corrupt playlist entry: ' + clean_line)
|
||||
return videos
|
||||
|
||||
|
||||
def video_ids_in_playlist(name):
|
||||
try:
|
||||
with open(os.path.join(playlists_directory, name + ".txt"), 'r', encoding='utf-8') as file:
|
||||
playlist_path = _find_playlist_path(name)
|
||||
with open(playlist_path, 'r', encoding='utf-8') as file:
|
||||
videos = file.read()
|
||||
return set(json.loads(video)['id'] for video in videos.splitlines())
|
||||
return set(json.loads(line.strip())['id'] for line in videos.splitlines() if line.strip())
|
||||
except FileNotFoundError:
|
||||
return set()
|
||||
|
||||
|
||||
def add_to_playlist(name, video_info_list):
|
||||
if not os.path.exists(playlists_directory):
|
||||
os.makedirs(playlists_directory)
|
||||
os.makedirs(playlists_directory, exist_ok=True)
|
||||
ids = video_ids_in_playlist(name)
|
||||
missing_thumbnails = []
|
||||
with open(os.path.join(playlists_directory, name + ".txt"), "a", encoding='utf-8') as file:
|
||||
playlist_path = _find_playlist_path(name)
|
||||
with open(playlist_path, "a", encoding='utf-8') as file:
|
||||
for info in video_info_list:
|
||||
id = json.loads(info)['id']
|
||||
if id not in ids:
|
||||
@@ -68,20 +106,14 @@ def add_extra_info_to_videos(videos, playlist_name):
|
||||
|
||||
def read_playlist(name):
|
||||
'''Returns a list of videos for the given playlist name'''
|
||||
playlist_path = os.path.join(playlists_directory, name + '.txt')
|
||||
with open(playlist_path, 'r', encoding='utf-8') as f:
|
||||
data = f.read()
|
||||
playlist_path = _find_playlist_path(name)
|
||||
try:
|
||||
with open(playlist_path, 'r', encoding='utf-8') as f:
|
||||
data = f.read()
|
||||
except FileNotFoundError:
|
||||
return []
|
||||
|
||||
videos = []
|
||||
videos_json = data.splitlines()
|
||||
for video_json in videos_json:
|
||||
try:
|
||||
info = json.loads(video_json)
|
||||
videos.append(info)
|
||||
except json.decoder.JSONDecodeError:
|
||||
if not video_json.strip() == '':
|
||||
print('Corrupt playlist video entry: ' + video_json)
|
||||
return videos
|
||||
return _parse_playlist_lines(data)
|
||||
|
||||
|
||||
def get_local_playlist_videos(name, offset=0, amount=50):
|
||||
@@ -103,14 +135,21 @@ def get_playlist_names():
|
||||
|
||||
def remove_from_playlist(name, video_info_list):
|
||||
ids = [json.loads(video)['id'] for video in video_info_list]
|
||||
with open(os.path.join(playlists_directory, name + ".txt"), 'r', encoding='utf-8') as file:
|
||||
playlist_path = _find_playlist_path(name)
|
||||
with open(playlist_path, 'r', encoding='utf-8') as file:
|
||||
videos = file.read()
|
||||
videos_in = videos.splitlines()
|
||||
videos_out = []
|
||||
for video in videos_in:
|
||||
if json.loads(video)['id'] not in ids:
|
||||
videos_out.append(video)
|
||||
with open(os.path.join(playlists_directory, name + ".txt"), 'w', encoding='utf-8') as file:
|
||||
clean = video.strip()
|
||||
if not clean:
|
||||
continue
|
||||
try:
|
||||
if json.loads(clean)['id'] not in ids:
|
||||
videos_out.append(clean)
|
||||
except json.decoder.JSONDecodeError:
|
||||
pass
|
||||
with open(playlist_path, 'w', encoding='utf-8') as file:
|
||||
file.write("\n".join(videos_out) + "\n")
|
||||
|
||||
try:
|
||||
@@ -154,8 +193,9 @@ def path_edit_playlist(playlist_name):
|
||||
redirect_page_number = min(int(request.values.get('page', 1)), math.ceil(number_of_videos_remaining/50))
|
||||
return flask.redirect(util.URL_ORIGIN + request.path + '?page=' + str(redirect_page_number))
|
||||
elif request.values['action'] == 'remove_playlist':
|
||||
safe_name = _validate_playlist_name(playlist_name)
|
||||
try:
|
||||
os.remove(os.path.join(playlists_directory, playlist_name + ".txt"))
|
||||
os.remove(os.path.join(playlists_directory, safe_name + '.txt'))
|
||||
except OSError:
|
||||
pass
|
||||
return flask.redirect(util.URL_ORIGIN + '/playlists')
|
||||
@@ -195,8 +235,17 @@ def edit_playlist():
|
||||
flask.abort(400)
|
||||
|
||||
|
||||
_THUMBNAIL_RE = re.compile(r'^[A-Za-z0-9_-]{11}\.jpg$')
|
||||
|
||||
|
||||
@yt_app.route('/data/playlist_thumbnails/<playlist_name>/<thumbnail>')
|
||||
def serve_thumbnail(playlist_name, thumbnail):
|
||||
# .. is necessary because flask always uses the application directory at ./youtube, not the working directory
|
||||
# Validate both path components so a crafted URL cannot escape
|
||||
# `thumbnails_directory` via `..` or NUL tricks (CWE-22).
|
||||
safe_name = _validate_playlist_name(playlist_name)
|
||||
if not _THUMBNAIL_RE.match(thumbnail):
|
||||
flask.abort(400)
|
||||
# .. is necessary because flask always uses the application directory at
|
||||
# ./youtube, not the working directory.
|
||||
return flask.send_from_directory(
|
||||
os.path.join('..', thumbnails_directory, playlist_name), thumbnail)
|
||||
os.path.join('..', thumbnails_directory, safe_name), thumbnail)
|
||||
|
||||
@@ -3,21 +3,20 @@ from youtube import yt_app
|
||||
import settings
|
||||
|
||||
import base64
|
||||
import urllib
|
||||
import json
|
||||
import string
|
||||
import gevent
|
||||
import math
|
||||
from flask import request
|
||||
from flask import request, abort
|
||||
import flask
|
||||
|
||||
|
||||
def playlist_ctoken(playlist_id, offset):
|
||||
def playlist_ctoken(playlist_id, offset, include_shorts=True):
|
||||
|
||||
offset = proto.uint(1, offset)
|
||||
# this is just obfuscation as far as I can tell. It doesn't even follow protobuf
|
||||
offset = b'PT:' + proto.unpadded_b64encode(offset)
|
||||
offset = proto.string(15, offset)
|
||||
if not include_shorts:
|
||||
offset += proto.string(104, proto.uint(2, 1))
|
||||
|
||||
continuation_info = proto.string(3, proto.percent_b64encode(offset))
|
||||
|
||||
@@ -26,47 +25,62 @@ def playlist_ctoken(playlist_id, offset):
|
||||
|
||||
return base64.urlsafe_b64encode(pointless_nest).decode('ascii')
|
||||
|
||||
# initial request types:
|
||||
# polymer_json: https://m.youtube.com/playlist?list=PLv3TTBr1W_9tppikBxAE_G6qjWdBljBHJ&pbj=1&lact=0
|
||||
# ajax json: https://m.youtube.com/playlist?list=PLv3TTBr1W_9tppikBxAE_G6qjWdBljBHJ&pbj=1&lact=0 with header X-YouTube-Client-Version: 1.20180418
|
||||
|
||||
def playlist_first_page(playlist_id, report_text="Retrieved playlist",
|
||||
use_mobile=False):
|
||||
# Use innertube API (pbj=1 no longer works for many playlists)
|
||||
key = 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
|
||||
url = 'https://www.youtube.com/youtubei/v1/browse?key=' + key
|
||||
|
||||
# continuation request types:
|
||||
# polymer_json: https://m.youtube.com/playlist?&ctoken=[...]&pbj=1
|
||||
# ajax json: https://m.youtube.com/playlist?action_continuation=1&ajax=1&ctoken=[...]
|
||||
|
||||
|
||||
headers_1 = (
|
||||
('Accept', '*/*'),
|
||||
('Accept-Language', 'en-US,en;q=0.5'),
|
||||
('X-YouTube-Client-Name', '2'),
|
||||
('X-YouTube-Client-Version', '2.20180614'),
|
||||
)
|
||||
|
||||
|
||||
def playlist_first_page(playlist_id, report_text="Retrieved playlist"):
|
||||
url = 'https://m.youtube.com/playlist?list=' + playlist_id + '&pbj=1'
|
||||
content = util.fetch_url(url, util.mobile_ua + headers_1, report_text=report_text, debug_name='playlist_first_page')
|
||||
content = json.loads(content.decode('utf-8'))
|
||||
|
||||
return content
|
||||
|
||||
|
||||
#https://m.youtube.com/playlist?itct=CBMQybcCIhMIptj9xJaJ2wIV2JKcCh3Idwu-&ctoken=4qmFsgI2EiRWTFBMT3kwajlBdmxWWlB0bzZJa2pLZnB1MFNjeC0tN1BHVEMaDmVnWlFWRHBEUWxFJTNE&pbj=1
|
||||
def get_videos(playlist_id, page):
|
||||
|
||||
url = "https://m.youtube.com/playlist?ctoken=" + playlist_ctoken(playlist_id, (int(page)-1)*20) + "&pbj=1"
|
||||
headers = {
|
||||
'User-Agent': ' Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_1 like Mac OS X) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.0 Mobile/14E304 Safari/602.1',
|
||||
'Accept': '*/*',
|
||||
'Accept-Language': 'en-US,en;q=0.5',
|
||||
'X-YouTube-Client-Name': '2',
|
||||
'X-YouTube-Client-Version': '2.20180508',
|
||||
data = {
|
||||
'context': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': '2.20240327.00.00',
|
||||
},
|
||||
},
|
||||
'browseId': 'VL' + playlist_id,
|
||||
}
|
||||
|
||||
content_type_header = (('Content-Type', 'application/json'),)
|
||||
content = util.fetch_url(
|
||||
url, headers,
|
||||
report_text="Retrieved playlist", debug_name='playlist_videos')
|
||||
url, util.desktop_xhr_headers + content_type_header,
|
||||
data=json.dumps(data),
|
||||
report_text=report_text, debug_name='playlist_first_page'
|
||||
)
|
||||
return json.loads(content.decode('utf-8'))
|
||||
|
||||
|
||||
def get_videos(playlist_id, page, include_shorts=True, use_mobile=False,
|
||||
report_text='Retrieved playlist'):
|
||||
page_size = 100
|
||||
|
||||
key = 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
|
||||
url = 'https://www.youtube.com/youtubei/v1/browse?key=' + key
|
||||
|
||||
ctoken = playlist_ctoken(playlist_id, (int(page)-1)*page_size,
|
||||
include_shorts=include_shorts)
|
||||
|
||||
data = {
|
||||
'context': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': '2.20240327.00.00',
|
||||
},
|
||||
},
|
||||
'continuation': ctoken,
|
||||
}
|
||||
|
||||
content_type_header = (('Content-Type', 'application/json'),)
|
||||
content = util.fetch_url(
|
||||
url, util.desktop_xhr_headers + content_type_header,
|
||||
data=json.dumps(data),
|
||||
report_text=report_text, debug_name='playlist_videos'
|
||||
)
|
||||
|
||||
info = json.loads(content.decode('utf-8'))
|
||||
return info
|
||||
@@ -78,6 +92,15 @@ def get_playlist_page():
|
||||
abort(400)
|
||||
|
||||
playlist_id = request.args.get('list')
|
||||
|
||||
# Radio/Mix playlists (RD...) only work as watch page, not playlist page
|
||||
if playlist_id.startswith('RD'):
|
||||
first_video_id = playlist_id[2:] # video ID after 'RD' prefix
|
||||
return flask.redirect(
|
||||
util.URL_ORIGIN + '/watch?v=' + first_video_id + '&list=' + playlist_id,
|
||||
302
|
||||
)
|
||||
|
||||
page = request.args.get('page', '1')
|
||||
|
||||
if page == '1':
|
||||
@@ -85,7 +108,10 @@ def get_playlist_page():
|
||||
this_page_json = first_page_json
|
||||
else:
|
||||
tasks = (
|
||||
gevent.spawn(playlist_first_page, playlist_id, report_text="Retrieved playlist info" ),
|
||||
gevent.spawn(
|
||||
playlist_first_page, playlist_id,
|
||||
report_text="Retrieved playlist info"
|
||||
),
|
||||
gevent.spawn(get_videos, playlist_id, page)
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
@@ -103,8 +129,8 @@ def get_playlist_page():
|
||||
for item in info.get('items', ()):
|
||||
util.prefix_urls(item)
|
||||
util.add_extra_html_info(item)
|
||||
if 'id' in item:
|
||||
item['thumbnail'] = settings.img_prefix + 'https://i.ytimg.com/vi/' + item['id'] + '/default.jpg'
|
||||
if 'id' in item and not item.get('thumbnail'):
|
||||
item['thumbnail'] = f"{settings.img_prefix}https://i.ytimg.com/vi/{item['id']}/hqdefault.jpg"
|
||||
|
||||
item['url'] += '&list=' + playlist_id
|
||||
if item['index']:
|
||||
@@ -112,13 +138,13 @@ def get_playlist_page():
|
||||
|
||||
video_count = yt_data_extract.deep_get(info, 'metadata', 'video_count')
|
||||
if video_count is None:
|
||||
video_count = 40
|
||||
video_count = 1000
|
||||
|
||||
return flask.render_template(
|
||||
'playlist.html',
|
||||
header_playlist_names=local_playlist.get_playlist_names(),
|
||||
video_list=info.get('items', []),
|
||||
num_pages=math.ceil(video_count/20),
|
||||
num_pages=math.ceil(video_count/100),
|
||||
parameters_dictionary=request.args,
|
||||
|
||||
**info['metadata']
|
||||
|
||||
@@ -113,12 +113,12 @@ def read_protobuf(data):
|
||||
length = read_varint(data)
|
||||
value = data.read(length)
|
||||
elif wire_type == 3:
|
||||
end_bytes = encode_varint((field_number << 3) | 4)
|
||||
end_bytes = varint_encode((field_number << 3) | 4)
|
||||
value = read_group(data, end_bytes)
|
||||
elif wire_type == 5:
|
||||
value = data.read(4)
|
||||
else:
|
||||
raise Exception("Unknown wire type: " + str(wire_type) + ", Tag: " + bytes_to_hex(succinct_encode(tag)) + ", at position " + str(data.tell()))
|
||||
raise Exception("Unknown wire type: " + str(wire_type) + " at position " + str(data.tell()))
|
||||
yield (wire_type, field_number, value)
|
||||
|
||||
|
||||
@@ -141,6 +141,17 @@ base64_enc_funcs = {
|
||||
|
||||
|
||||
def _make_protobuf(data):
|
||||
'''
|
||||
Input: Recursive list of protobuf objects or base-64 encodings
|
||||
Output: Protobuf bytestring
|
||||
Each protobuf object takes the form [wire_type, field_number, field_data]
|
||||
If a string protobuf has a list/tuple of length 2, this has the form
|
||||
(base64 type, data)
|
||||
The base64 types are
|
||||
- base64 means a base64 encode with equals sign paddings
|
||||
- base64s means a base64 encode without padding
|
||||
- base64p means a url base64 encode with equals signs replaced with %3D
|
||||
'''
|
||||
# must be dict mapping field_number to [wire_type, value]
|
||||
if isinstance(data, dict):
|
||||
new_data = []
|
||||
|
||||
@@ -97,6 +97,7 @@ import re
|
||||
import time
|
||||
import json
|
||||
import os
|
||||
import traceback
|
||||
import pprint
|
||||
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import settings
|
||||
import json
|
||||
import urllib
|
||||
import base64
|
||||
import mimetypes
|
||||
from flask import request
|
||||
import flask
|
||||
import os
|
||||
@@ -64,6 +63,8 @@ def get_search_page():
|
||||
query = request.args.get('search_query') or request.args.get('query')
|
||||
if query is None:
|
||||
return flask.render_template('home.html', title='Search')
|
||||
elif query.startswith('https://www.youtube.com') or query.startswith('https://www.youtu.be'):
|
||||
return flask.redirect(f'/{query}')
|
||||
|
||||
page = request.args.get("page", "1")
|
||||
autocorrect = int(request.args.get("autocorrect", "1"))
|
||||
|
||||
@@ -256,7 +256,8 @@ hr {
|
||||
padding-top: 6px;
|
||||
text-align: center;
|
||||
white-space: nowrap;
|
||||
border: none;
|
||||
border: 1px solid;
|
||||
border-color: var(--button-border);
|
||||
border-radius: 0.2rem;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,20 +1,22 @@
|
||||
:root {
|
||||
--background: #212121;
|
||||
--background: #121113;
|
||||
--text: #FFFFFF;
|
||||
--secondary-hover: #73828c;
|
||||
--secondary-focus: #303030;
|
||||
--secondary-inverse: #FFF;
|
||||
--secondary-hover: #222222;
|
||||
--secondary-focus: #121113;
|
||||
--secondary-inverse: #FFFFFF;
|
||||
--primary-background: #242424;
|
||||
--secondary-background: #424242;
|
||||
--thumb-background: #757575;
|
||||
--secondary-background: #222222;
|
||||
--thumb-background: #222222;
|
||||
--link: #00B0FF;
|
||||
--link-visited: #40C4FF;
|
||||
--border-bg: #FFFFFF;
|
||||
--buttom: #dcdcdb;
|
||||
--buttom-text: #415462;
|
||||
--button-border: #91918c;
|
||||
--buttom-hover: #BBB;
|
||||
--search-text: #FFF;
|
||||
--time-background: #212121;
|
||||
--time-text: #FFF;
|
||||
--border-bg: #222222;
|
||||
--border-bg-settings: #000000;
|
||||
--border-bg-license: #000000;
|
||||
--buttom: #121113;
|
||||
--buttom-text: #FFFFFF;
|
||||
--button-border: #222222;
|
||||
--buttom-hover: #222222;
|
||||
--search-text: #FFFFFF;
|
||||
--time-background: #121113;
|
||||
--time-text: #FFFFFF;
|
||||
}
|
||||
|
||||
@@ -1,19 +1,21 @@
|
||||
:root {
|
||||
--background: #2d3743;
|
||||
--background: #2D3743;
|
||||
--text: #FFFFFF;
|
||||
--secondary-hover: #73828c;
|
||||
--secondary-hover: #73828C;
|
||||
--secondary-focus: rgba(115, 130, 140, 0.125);
|
||||
--secondary-inverse: #FFFFFF;
|
||||
--primary-background: #2d3743;
|
||||
--primary-background: #2D3743;
|
||||
--secondary-background: #102027;
|
||||
--thumb-background: #35404D;
|
||||
--link: #22aaff;
|
||||
--link-visited: #7755ff;
|
||||
--link: #22AAFF;
|
||||
--link-visited: #7755FF;
|
||||
--border-bg: #FFFFFF;
|
||||
--buttom: #DCDCDC;
|
||||
--buttom-text: #415462;
|
||||
--button-border: #91918c;
|
||||
--buttom-hover: #BBBBBB;
|
||||
--border-bg-settings: #FFFFFF;
|
||||
--border-bg-license: #FFFFFF;
|
||||
--buttom: #2D3743;
|
||||
--buttom-text: #FFFFFF;
|
||||
--button-border: #102027;
|
||||
--buttom-hover: #102027;
|
||||
--search-text: #FFFFFF;
|
||||
--time-background: #212121;
|
||||
--time-text: #FFFFFF;
|
||||
|
||||
@@ -20,6 +20,29 @@
|
||||
// TODO: Call abort to cancel in-progress appends?
|
||||
|
||||
|
||||
// Buffer sizes for different systems
|
||||
const BUFFER_CONFIG = {
|
||||
default: 50 * 10**6, // 50 megabytes
|
||||
webOS: 20 * 10**6, // 20 megabytes WebOS (LG)
|
||||
samsungTizen: 20 * 10**6, // 20 megabytes Samsung Tizen OS
|
||||
androidTV: 30 * 10**6, // 30 megabytes Android TV
|
||||
desktop: 50 * 10**6, // 50 megabytes PC/Mac
|
||||
};
|
||||
|
||||
function detectSystem() {
|
||||
const userAgent = navigator.userAgent.toLowerCase();
|
||||
if (/webos|lg browser/i.test(userAgent)) {
|
||||
return "webOS";
|
||||
} else if (/tizen/i.test(userAgent)) {
|
||||
return "samsungTizen";
|
||||
} else if (/android tv|smart-tv/i.test(userAgent)) {
|
||||
return "androidTV";
|
||||
} else if (/firefox|chrome|safari|edge/i.test(userAgent)) {
|
||||
return "desktop";
|
||||
} else {
|
||||
return "default";
|
||||
}
|
||||
}
|
||||
|
||||
function AVMerge(video, srcInfo, startTime){
|
||||
this.audioSource = null;
|
||||
@@ -164,6 +187,8 @@ AVMerge.prototype.printDebuggingInfo = function() {
|
||||
}
|
||||
|
||||
function Stream(avMerge, source, startTime, avRatio) {
|
||||
const selectedSystem = detectSystem();
|
||||
let baseBufferTarget = BUFFER_CONFIG[selectedSystem] || BUFFER_CONFIG.default;
|
||||
this.avMerge = avMerge;
|
||||
this.video = avMerge.video;
|
||||
this.url = source['url'];
|
||||
@@ -173,10 +198,11 @@ function Stream(avMerge, source, startTime, avRatio) {
|
||||
this.mimeCodec = source['mime_codec']
|
||||
this.streamType = source['acodec'] ? 'audio' : 'video';
|
||||
if (this.streamType == 'audio') {
|
||||
this.bufferTarget = avRatio*50*10**6;
|
||||
this.bufferTarget = avRatio * baseBufferTarget;
|
||||
} else {
|
||||
this.bufferTarget = 50*10**6; // 50 megabytes
|
||||
this.bufferTarget = baseBufferTarget;
|
||||
}
|
||||
console.info(`Detected system: ${selectedSystem}. Applying bufferTarget of ${this.bufferTarget} bytes to ${this.streamType}.`);
|
||||
|
||||
this.initRange = source['init_range'];
|
||||
this.indexRange = source['index_range'];
|
||||
@@ -204,6 +230,8 @@ Stream.prototype.setup = async function(){
|
||||
this.url,
|
||||
this.initRange.start,
|
||||
this.indexRange.end,
|
||||
'Initialization+index segments',
|
||||
).then(
|
||||
(buffer) => {
|
||||
let init_end = this.initRange.end - this.initRange.start + 1;
|
||||
let index_start = this.indexRange.start - this.initRange.start;
|
||||
@@ -211,22 +239,23 @@ Stream.prototype.setup = async function(){
|
||||
this.setupInitSegment(buffer.slice(0, init_end));
|
||||
this.setupSegmentIndex(buffer.slice(index_start, index_end));
|
||||
}
|
||||
)
|
||||
);
|
||||
} else {
|
||||
// initialization data
|
||||
await fetchRange(
|
||||
this.url,
|
||||
this.initRange.start,
|
||||
this.initRange.end,
|
||||
this.setupInitSegment.bind(this),
|
||||
);
|
||||
'Initialization segment',
|
||||
).then(this.setupInitSegment.bind(this));
|
||||
|
||||
// sidx (segment index) table
|
||||
fetchRange(
|
||||
this.url,
|
||||
this.indexRange.start,
|
||||
this.indexRange.end,
|
||||
this.setupSegmentIndex.bind(this)
|
||||
);
|
||||
'Index segment',
|
||||
).then(this.setupSegmentIndex.bind(this));
|
||||
}
|
||||
}
|
||||
Stream.prototype.setupInitSegment = function(initSegment) {
|
||||
@@ -388,7 +417,7 @@ Stream.prototype.getSegmentIdx = function(videoTime) {
|
||||
}
|
||||
index = index + increment;
|
||||
}
|
||||
this.reportInfo('Could not find segment index for time', videoTime);
|
||||
this.reportError('Could not find segment index for time', videoTime);
|
||||
return 0;
|
||||
}
|
||||
Stream.prototype.checkBuffer = async function() {
|
||||
@@ -485,8 +514,8 @@ Stream.prototype.fetchSegment = function(segmentIdx) {
|
||||
this.url,
|
||||
entry.start,
|
||||
entry.end,
|
||||
this.appendSegment.bind(this, segmentIdx),
|
||||
);
|
||||
String(this.streamType) + ' segment ' + String(segmentIdx),
|
||||
).then(this.appendSegment.bind(this, segmentIdx));
|
||||
}
|
||||
Stream.prototype.fetchSegmentIfNeeded = function(segmentIdx) {
|
||||
if (segmentIdx < 0 || segmentIdx >= this.sidx.entries.length){
|
||||
@@ -518,22 +547,56 @@ Stream.prototype.reportWarning = function(...args) {
|
||||
Stream.prototype.reportError = function(...args) {
|
||||
reportError(String(this.streamType) + ':', ...args);
|
||||
}
|
||||
Stream.prototype.reportInfo = function(...args) {
|
||||
reportInfo(String(this.streamType) + ':', ...args);
|
||||
}
|
||||
|
||||
|
||||
// Utility functions
|
||||
|
||||
function fetchRange(url, start, end, cb) {
|
||||
// https://gomakethings.com/promise-based-xhr/
|
||||
// https://stackoverflow.com/a/30008115
|
||||
// http://lofi.limo/blog/retry-xmlhttprequest-carefully
|
||||
function fetchRange(url, start, end, debugInfo) {
|
||||
return new Promise((resolve, reject) => {
|
||||
let retryCount = 0;
|
||||
let xhr = new XMLHttpRequest();
|
||||
function onFailure(err, message, maxRetries=5){
|
||||
message = debugInfo + ': ' + message + ' - Err: ' + String(err);
|
||||
retryCount++;
|
||||
if (retryCount > maxRetries || xhr.status == 403){
|
||||
reportError('fetchRange error while fetching ' + message);
|
||||
reject(message);
|
||||
return;
|
||||
} else {
|
||||
reportWarning('Failed to fetch ' + message
|
||||
+ '. Attempting retry '
|
||||
+ String(retryCount) +'/' + String(maxRetries));
|
||||
}
|
||||
|
||||
// Retry in 1 second, doubled for each next retry
|
||||
setTimeout(function(){
|
||||
xhr.open('get',url);
|
||||
xhr.send();
|
||||
}, 1000*Math.pow(2,(retryCount-1)));
|
||||
}
|
||||
xhr.open('get', url);
|
||||
xhr.timeout = 15000;
|
||||
xhr.responseType = 'arraybuffer';
|
||||
xhr.setRequestHeader('Range', 'bytes=' + start + '-' + end);
|
||||
xhr.onload = function() {
|
||||
//bytesFetched += end - start + 1;
|
||||
resolve(cb(xhr.response));
|
||||
xhr.onload = function (e) {
|
||||
if (xhr.status >= 200 && xhr.status < 300) {
|
||||
resolve(xhr.response);
|
||||
} else {
|
||||
onFailure(e,
|
||||
'Status '
|
||||
+ String(xhr.status) + ' ' + String(xhr.statusText)
|
||||
);
|
||||
}
|
||||
};
|
||||
xhr.onerror = function (event) {
|
||||
onFailure(e, 'Network error');
|
||||
};
|
||||
xhr.ontimeout = function (event){
|
||||
xhr.timeout += 5000;
|
||||
onFailure(null, 'Timeout (15s)', maxRetries=5);
|
||||
};
|
||||
xhr.send();
|
||||
});
|
||||
@@ -573,9 +636,6 @@ function addEvent(obj, eventName, func) {
|
||||
return new RegisteredEvent(obj, eventName, func);
|
||||
}
|
||||
|
||||
function reportInfo(...args){
|
||||
console.info(...args);
|
||||
}
|
||||
function reportWarning(...args){
|
||||
console.warn(...args);
|
||||
}
|
||||
|
||||
@@ -114,3 +114,57 @@ function copyTextToClipboard(text) {
|
||||
window.addEventListener('DOMContentLoaded', function() {
|
||||
cur_track_idx = getDefaultTranscriptTrackIdx();
|
||||
});
|
||||
|
||||
/**
|
||||
* Thumbnail fallback handler
|
||||
* Tries lower quality thumbnails when higher quality fails (404)
|
||||
* Priority: hq720.jpg -> sddefault.jpg -> hqdefault.jpg -> mqdefault.jpg -> default.jpg
|
||||
*/
|
||||
function thumbnail_fallback(img) {
|
||||
// Once src is set (image was loaded or attempted), always work with src
|
||||
const src = img.src;
|
||||
if (!src) return;
|
||||
|
||||
// Handle YouTube video thumbnails
|
||||
if (src.includes('/i.ytimg.com/') || src.includes('/i.ytimg.com%2F')) {
|
||||
// Extract video ID from URL
|
||||
const match = src.match(/\/vi\/([^/]+)/);
|
||||
if (!match) return;
|
||||
|
||||
const videoId = match[1];
|
||||
const imgPrefix = settings_img_prefix || '';
|
||||
|
||||
// Define fallback order (from highest to lowest quality)
|
||||
const fallbacks = [
|
||||
'hq720.jpg',
|
||||
'sddefault.jpg',
|
||||
'hqdefault.jpg',
|
||||
];
|
||||
|
||||
// Find current quality and try next fallback
|
||||
for (let i = 0; i < fallbacks.length; i++) {
|
||||
if (src.includes(fallbacks[i])) {
|
||||
if (i < fallbacks.length - 1) {
|
||||
img.src = imgPrefix + 'https://i.ytimg.com/vi/' + videoId + '/' + fallbacks[i + 1];
|
||||
} else {
|
||||
// Last fallback failed, stop retrying
|
||||
img.onerror = null;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
// Unknown quality format, stop retrying
|
||||
img.onerror = null;
|
||||
}
|
||||
// Handle YouTube channel avatars (ggpht.com)
|
||||
else if (src.includes('ggpht.com') || src.includes('yt3.ggpht.com')) {
|
||||
const newSrc = src.replace(/=s\d+-c-k/, '=s240-c-k-c0x00ffffff-no-rj');
|
||||
if (newSrc !== src) {
|
||||
img.src = newSrc;
|
||||
} else {
|
||||
img.onerror = null;
|
||||
}
|
||||
} else {
|
||||
img.onerror = null;
|
||||
}
|
||||
}
|
||||
|
||||
2
youtube/static/js/hls.min.js
vendored
Normal file
2
youtube/static/js/hls.min.js
vendored
Normal file
File diff suppressed because one or more lines are too long
1
youtube/static/js/hls.min.js.map
Normal file
1
youtube/static/js/hls.min.js.map
Normal file
File diff suppressed because one or more lines are too long
@@ -1,130 +0,0 @@
|
||||
(function main() {
|
||||
'use strict';
|
||||
|
||||
let captionsActive;
|
||||
|
||||
switch(true) {
|
||||
case data.settings.subtitles_mode == 2:
|
||||
captionsActive = true;
|
||||
break;
|
||||
case data.settings.subtitles_mode == 1 && data.has_manual_captions:
|
||||
captionsActive = true;
|
||||
break;
|
||||
default:
|
||||
captionsActive = false;
|
||||
}
|
||||
|
||||
let qualityOptions = [];
|
||||
let qualityDefault;
|
||||
for (let src of data['uni_sources']) {
|
||||
qualityOptions.push(src.quality_string)
|
||||
}
|
||||
for (let src of data['pair_sources']) {
|
||||
qualityOptions.push(src.quality_string)
|
||||
}
|
||||
if (data['using_pair_sources'])
|
||||
qualityDefault = data['pair_sources'][data['pair_idx']].quality_string;
|
||||
else if (data['uni_sources'].length != 0)
|
||||
qualityDefault = data['uni_sources'][data['uni_idx']].quality_string;
|
||||
else
|
||||
qualityDefault = 'None';
|
||||
|
||||
// Fix plyr refusing to work with qualities that are strings
|
||||
Object.defineProperty(Plyr.prototype, 'quality', {
|
||||
set: function(input) {
|
||||
const config = this.config.quality;
|
||||
const options = this.options.quality;
|
||||
let quality;
|
||||
|
||||
if (!options.length) {
|
||||
return;
|
||||
}
|
||||
|
||||
// removing this line:
|
||||
//let quality = [!is.empty(input) && Number(input), this.storage.get('quality'), config.selected, config.default].find(is.number);
|
||||
// replacing with:
|
||||
quality = input;
|
||||
let updateStorage = true;
|
||||
|
||||
if (!options.includes(quality)) {
|
||||
// Plyr sets quality to null at startup, resulting in the erroneous
|
||||
// calling of this setter function with input = null, and the
|
||||
// commented out code below would set the quality to something
|
||||
// unrelated at startup. Comment out and just return.
|
||||
return;
|
||||
/*const value = closest(options, quality);
|
||||
this.debug.warn(`Unsupported quality option: ${quality}, using ${value} instead`);
|
||||
quality = value; // Don't update storage if quality is not supported
|
||||
updateStorage = false;*/
|
||||
} // Update config
|
||||
|
||||
|
||||
config.selected = quality; // Set quality
|
||||
|
||||
this.media.quality = quality; // Save to storage
|
||||
|
||||
if (updateStorage) {
|
||||
this.storage.set({
|
||||
quality
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const player = new Plyr(document.getElementById('js-video-player'), {
|
||||
disableContextMenu: false,
|
||||
captions: {
|
||||
active: captionsActive,
|
||||
language: data.settings.subtitles_language,
|
||||
},
|
||||
controls: [
|
||||
'play-large',
|
||||
'play',
|
||||
'progress',
|
||||
'current-time',
|
||||
'duration',
|
||||
'mute',
|
||||
'volume',
|
||||
'captions',
|
||||
'settings',
|
||||
'pip',
|
||||
'airplay',
|
||||
'fullscreen'
|
||||
],
|
||||
iconUrl: "/youtube.com/static/modules/plyr/plyr.svg",
|
||||
blankVideo: "/youtube.com/static/modules/plyr/blank.webm",
|
||||
debug: false,
|
||||
storage: {enabled: false},
|
||||
quality: {
|
||||
default: qualityDefault,
|
||||
options: qualityOptions,
|
||||
forced: true,
|
||||
onChange: function(quality) {
|
||||
if (quality == 'None') {return;}
|
||||
if (quality.includes('(integrated)')) {
|
||||
for (let i=0; i < data['uni_sources'].length; i++) {
|
||||
if (data['uni_sources'][i].quality_string == quality) {
|
||||
changeQuality({'type': 'uni', 'index': i});
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (let i=0; i < data['pair_sources'].length; i++) {
|
||||
if (data['pair_sources'][i].quality_string == quality) {
|
||||
changeQuality({'type': 'pair', 'index': i});
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
previewThumbnails: {
|
||||
enabled: storyboard_url != null,
|
||||
src: [storyboard_url],
|
||||
},
|
||||
settings: ['captions', 'quality', 'speed', 'loop'],
|
||||
tooltips: {
|
||||
controls: true,
|
||||
},
|
||||
});
|
||||
}());
|
||||
180
youtube/static/js/plyr.dash.start.js
Normal file
180
youtube/static/js/plyr.dash.start.js
Normal file
@@ -0,0 +1,180 @@
|
||||
(function main() {
|
||||
'use strict';
|
||||
|
||||
// Captions
|
||||
let captionsActive = false;
|
||||
if (data.settings.subtitles_mode === 2 || (data.settings.subtitles_mode === 1 && data.has_manual_captions)) {
|
||||
captionsActive = true;
|
||||
}
|
||||
|
||||
// AutoPlay
|
||||
let autoplayActive = data.settings.autoplay_videos || false;
|
||||
|
||||
let qualityOptions = [];
|
||||
let qualityDefault;
|
||||
|
||||
// Collect uni sources (integrated)
|
||||
for (let src of data.uni_sources) {
|
||||
qualityOptions.push(src.quality_string);
|
||||
}
|
||||
|
||||
// Collect pair sources (av-merge)
|
||||
for (let src of data.pair_sources) {
|
||||
qualityOptions.push(src.quality_string);
|
||||
}
|
||||
|
||||
if (data.using_pair_sources) {
|
||||
qualityDefault = data.pair_sources[data.pair_idx].quality_string;
|
||||
} else if (data.uni_sources.length !== 0) {
|
||||
qualityDefault = data.uni_sources[data.uni_idx].quality_string;
|
||||
} else {
|
||||
qualityDefault = 'None';
|
||||
}
|
||||
|
||||
// Current av-merge instance
|
||||
let avMerge = null;
|
||||
|
||||
// Change quality: handles both uni (integrated) and pair (av-merge)
|
||||
function changeQuality(selection) {
|
||||
let currentVideoTime = video.currentTime;
|
||||
let videoPaused = video.paused;
|
||||
let videoSpeed = video.playbackRate;
|
||||
let srcInfo;
|
||||
|
||||
// Close previous av-merge if any
|
||||
if (avMerge && typeof avMerge.close === 'function') {
|
||||
avMerge.close();
|
||||
}
|
||||
|
||||
if (selection.type == 'uni') {
|
||||
srcInfo = data.uni_sources[selection.index];
|
||||
video.src = srcInfo.url;
|
||||
avMerge = null;
|
||||
} else {
|
||||
srcInfo = data.pair_sources[selection.index];
|
||||
avMerge = new AVMerge(video, srcInfo, currentVideoTime);
|
||||
}
|
||||
|
||||
video.currentTime = currentVideoTime;
|
||||
if (!videoPaused) {
|
||||
video.play();
|
||||
}
|
||||
video.playbackRate = videoSpeed;
|
||||
}
|
||||
|
||||
// Fix plyr refusing to work with qualities that are strings
|
||||
Object.defineProperty(Plyr.prototype, 'quality', {
|
||||
set: function (input) {
|
||||
const config = this.config.quality;
|
||||
const options = this.options.quality;
|
||||
let quality = input;
|
||||
let updateStorage = true;
|
||||
|
||||
if (!options.length) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!options.includes(quality)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Update config
|
||||
config.selected = quality;
|
||||
|
||||
// Set quality
|
||||
this.media.quality = quality;
|
||||
|
||||
// Save to storage
|
||||
if (updateStorage) {
|
||||
this.storage.set({ quality });
|
||||
}
|
||||
},
|
||||
});
|
||||
|
||||
const playerOptions = {
|
||||
autoplay: autoplayActive,
|
||||
disableContextMenu: false,
|
||||
captions: {
|
||||
active: captionsActive,
|
||||
language: data.settings.subtitles_language,
|
||||
},
|
||||
controls: [
|
||||
'play-large',
|
||||
'play',
|
||||
'progress',
|
||||
'current-time',
|
||||
'duration',
|
||||
'mute',
|
||||
'volume',
|
||||
'captions',
|
||||
'settings',
|
||||
'pip',
|
||||
'airplay',
|
||||
'fullscreen',
|
||||
],
|
||||
iconUrl: '/youtube.com/static/modules/plyr/plyr.svg',
|
||||
blankVideo: '/youtube.com/static/modules/plyr/blank.webm',
|
||||
debug: false,
|
||||
storage: { enabled: false },
|
||||
quality: {
|
||||
default: qualityDefault,
|
||||
options: qualityOptions,
|
||||
forced: true,
|
||||
onChange: function (quality) {
|
||||
if (quality == 'None') {
|
||||
return;
|
||||
}
|
||||
// Check if it's a uni source (integrated)
|
||||
if (quality.includes('(integrated)')) {
|
||||
for (let i = 0; i < data.uni_sources.length; i++) {
|
||||
if (data.uni_sources[i].quality_string == quality) {
|
||||
changeQuality({ type: 'uni', index: i });
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// It's a pair source (av-merge)
|
||||
for (let i = 0; i < data.pair_sources.length; i++) {
|
||||
if (data.pair_sources[i].quality_string == quality) {
|
||||
changeQuality({ type: 'pair', index: i });
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
previewThumbnails: {
|
||||
enabled: storyboard_url !== null,
|
||||
src: [storyboard_url],
|
||||
},
|
||||
settings: ['captions', 'quality', 'speed', 'loop'],
|
||||
tooltips: {
|
||||
controls: true,
|
||||
},
|
||||
};
|
||||
|
||||
const video = document.getElementById('js-video-player');
|
||||
const player = new Plyr(video, playerOptions);
|
||||
|
||||
// Hide audio track selector (DASH doesn't support multi-audio)
|
||||
const audioContainer = document.getElementById('plyr-audio-container');
|
||||
if (audioContainer) audioContainer.style.display = 'none';
|
||||
|
||||
// disable double click to fullscreen
|
||||
player.eventListeners.forEach(function(eventListener) {
|
||||
if(eventListener.type === 'dblclick') {
|
||||
eventListener.element.removeEventListener(eventListener.type, eventListener.callback, eventListener.options);
|
||||
}
|
||||
});
|
||||
|
||||
// Add .started property
|
||||
player.started = false;
|
||||
player.once('playing', function(){ this.started = true; });
|
||||
|
||||
// Set initial time
|
||||
if (data.time_start != 0) {
|
||||
video.addEventListener('loadedmetadata', function() {
|
||||
video.currentTime = data.time_start;
|
||||
});
|
||||
}
|
||||
})();
|
||||
538
youtube/static/js/plyr.hls.start.js
Normal file
538
youtube/static/js/plyr.hls.start.js
Normal file
@@ -0,0 +1,538 @@
|
||||
(function main() {
|
||||
'use strict';
|
||||
|
||||
console.log('Plyr start script loaded');
|
||||
|
||||
// Captions
|
||||
let captionsActive = false;
|
||||
if (typeof data !== 'undefined' && (data.settings.subtitles_mode === 2 || (data.settings.subtitles_mode === 1 && data.has_manual_captions))) {
|
||||
captionsActive = true;
|
||||
}
|
||||
|
||||
// AutoPlay
|
||||
let autoplayActive = typeof data !== 'undefined' && data.settings.autoplay_videos || false;
|
||||
|
||||
// Quality map: label -> hls level index
|
||||
window.hlsQualityMap = {};
|
||||
|
||||
let plyrInstance = null;
|
||||
let currentQuality = 'auto';
|
||||
let hls = null;
|
||||
window.hls = null;
|
||||
|
||||
/**
|
||||
* Get start level from settings (highest quality <= target)
|
||||
*/
|
||||
function getStartLevel(levels) {
|
||||
if (typeof data === 'undefined' || !data.settings) return -1;
|
||||
const defaultRes = data.settings.default_resolution;
|
||||
if (defaultRes === 'auto' || !defaultRes) return -1;
|
||||
const target = parseInt(defaultRes);
|
||||
|
||||
// Find the level with the highest height that is still <= target
|
||||
let bestLevel = -1;
|
||||
let bestHeight = 0;
|
||||
for (let i = 0; i < levels.length; i++) {
|
||||
const h = levels[i].height;
|
||||
if (h <= target && h > bestHeight) {
|
||||
bestHeight = h;
|
||||
bestLevel = i;
|
||||
}
|
||||
}
|
||||
return bestLevel;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize HLS
|
||||
*/
|
||||
function initHLS(manifestUrl) {
|
||||
return new Promise((resolve, reject) => {
|
||||
if (!manifestUrl) {
|
||||
reject('No HLS manifest URL provided');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('Initializing HLS for Plyr:', manifestUrl);
|
||||
|
||||
if (hls) {
|
||||
hls.destroy();
|
||||
hls = null;
|
||||
}
|
||||
|
||||
hls = new Hls({
|
||||
enableWorker: true,
|
||||
lowLatencyMode: false,
|
||||
maxBufferLength: 30,
|
||||
maxMaxBufferLength: 60,
|
||||
startLevel: -1,
|
||||
});
|
||||
|
||||
window.hls = hls;
|
||||
|
||||
const video = document.getElementById('js-video-player');
|
||||
if (!video) {
|
||||
reject('Video element not found');
|
||||
return;
|
||||
}
|
||||
|
||||
hls.loadSource(manifestUrl);
|
||||
hls.attachMedia(video);
|
||||
|
||||
hls.on(Hls.Events.MANIFEST_PARSED, function(event, data) {
|
||||
console.log('HLS manifest parsed, levels:', hls.levels?.length);
|
||||
|
||||
// Set initial quality from settings
|
||||
const startLevel = getStartLevel(hls.levels);
|
||||
if (startLevel !== -1) {
|
||||
hls.currentLevel = startLevel;
|
||||
const level = hls.levels[startLevel];
|
||||
currentQuality = level.height + 'p';
|
||||
console.log('Starting at resolution:', currentQuality);
|
||||
}
|
||||
|
||||
resolve(hls);
|
||||
});
|
||||
|
||||
hls.on(Hls.Events.ERROR, function(_, data) {
|
||||
if (data.fatal) {
|
||||
console.error('HLS fatal error:', data.type, data.details);
|
||||
switch (data.type) {
|
||||
case Hls.ErrorTypes.NETWORK_ERROR:
|
||||
hls.startLoad();
|
||||
break;
|
||||
case Hls.ErrorTypes.MEDIA_ERROR:
|
||||
hls.recoverMediaError();
|
||||
break;
|
||||
default:
|
||||
reject(data);
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Change HLS quality
|
||||
*/
|
||||
function changeHLSQuality(quality) {
|
||||
if (!hls) {
|
||||
console.error('HLS not available');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('Changing HLS quality to:', quality);
|
||||
|
||||
if (quality === 'auto') {
|
||||
hls.currentLevel = -1;
|
||||
currentQuality = 'auto';
|
||||
console.log('HLS quality set to Auto');
|
||||
const qualityBtnText = document.getElementById('plyr-quality-text');
|
||||
if (qualityBtnText) {
|
||||
qualityBtnText.textContent = 'Auto';
|
||||
}
|
||||
} else {
|
||||
const levelIndex = window.hlsQualityMap[quality];
|
||||
if (levelIndex !== undefined) {
|
||||
hls.currentLevel = levelIndex;
|
||||
currentQuality = quality;
|
||||
console.log('HLS quality set to:', quality);
|
||||
|
||||
const qualityBtnText = document.getElementById('plyr-quality-text');
|
||||
if (qualityBtnText) {
|
||||
qualityBtnText.textContent = quality;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create custom quality control in Plyr controls
|
||||
*/
|
||||
function addCustomQualityControl(player, qualityLabels) {
|
||||
player.on('ready', () => {
|
||||
console.log('Adding custom quality control...');
|
||||
|
||||
const controls = player.elements.container.querySelector('.plyr__controls');
|
||||
if (!controls) {
|
||||
console.error('Controls not found');
|
||||
return;
|
||||
}
|
||||
|
||||
if (document.getElementById('plyr-quality-container')) {
|
||||
console.log('Quality control already exists');
|
||||
return;
|
||||
}
|
||||
|
||||
const qualityContainer = document.createElement('div');
|
||||
qualityContainer.id = 'plyr-quality-container';
|
||||
qualityContainer.className = 'plyr__control plyr__control--custom';
|
||||
|
||||
const qualityButton = document.createElement('button');
|
||||
qualityButton.type = 'button';
|
||||
qualityButton.className = 'plyr__control';
|
||||
qualityButton.setAttribute('data-plyr', 'quality-custom');
|
||||
qualityButton.setAttribute('aria-label', 'Quality');
|
||||
qualityButton.innerHTML = `
|
||||
<svg class="plyr__icon hls_quality_icon" viewBox="0 0 24 24" width="18" height="18" fill="none" stroke="currentColor" stroke-width="2">
|
||||
<rect x="2" y="4" width="20" height="16" rx="2" ry="2"></rect>
|
||||
<line x1="8" y1="12" x2="16" y2="12"></line>
|
||||
<line x1="12" y1="8" x2="12" y2="16"></line>
|
||||
</svg>
|
||||
<span id="plyr-quality-text">${currentQuality === 'auto' ? 'Auto' : currentQuality}</span>
|
||||
<svg class="plyr__icon" viewBox="0 0 24 24" width="12" height="12" fill="none" stroke="currentColor" stroke-width="2">
|
||||
<polyline points="6 9 12 15 18 9"></polyline>
|
||||
</svg>
|
||||
`;
|
||||
|
||||
const dropdown = document.createElement('div');
|
||||
dropdown.className = 'plyr-quality-dropdown';
|
||||
|
||||
qualityLabels.forEach(label => {
|
||||
const option = document.createElement('div');
|
||||
option.className = 'plyr-quality-option';
|
||||
option.textContent = label === 'auto' ? 'Auto' : label;
|
||||
|
||||
if (label === currentQuality) {
|
||||
option.setAttribute('data-active', 'true');
|
||||
}
|
||||
|
||||
option.addEventListener('click', (e) => {
|
||||
e.stopPropagation();
|
||||
changeHLSQuality(label);
|
||||
|
||||
dropdown.querySelectorAll('.plyr-quality-option').forEach(opt => {
|
||||
opt.removeAttribute('data-active');
|
||||
});
|
||||
option.setAttribute('data-active', 'true');
|
||||
|
||||
dropdown.style.display = 'none';
|
||||
});
|
||||
|
||||
dropdown.appendChild(option);
|
||||
});
|
||||
|
||||
qualityButton.addEventListener('click', (e) => {
|
||||
e.stopPropagation();
|
||||
const isVisible = dropdown.style.display === 'block';
|
||||
document.querySelectorAll('.plyr-quality-dropdown, .plyr-audio-dropdown').forEach(d => {
|
||||
d.style.display = 'none';
|
||||
});
|
||||
dropdown.style.display = isVisible ? 'none' : 'block';
|
||||
});
|
||||
|
||||
document.addEventListener('click', (e) => {
|
||||
if (!qualityContainer.contains(e.target)) {
|
||||
dropdown.style.display = 'none';
|
||||
}
|
||||
});
|
||||
|
||||
qualityContainer.appendChild(qualityButton);
|
||||
qualityContainer.appendChild(dropdown);
|
||||
|
||||
const settingsBtn = controls.querySelector('[data-plyr="settings"]');
|
||||
if (settingsBtn) {
|
||||
settingsBtn.insertAdjacentElement('beforebegin', qualityContainer);
|
||||
} else {
|
||||
controls.appendChild(qualityContainer);
|
||||
}
|
||||
|
||||
console.log('Custom quality control added');
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create custom audio tracks control in Plyr controls
|
||||
*/
|
||||
function addCustomAudioTracksControl(player, hlsInstance) {
|
||||
player.on('ready', () => {
|
||||
console.log('Adding custom audio tracks control...');
|
||||
|
||||
const controls = player.elements.container.querySelector('.plyr__controls');
|
||||
if (!controls) {
|
||||
console.error('Controls not found');
|
||||
return;
|
||||
}
|
||||
|
||||
if (document.getElementById('plyr-audio-container')) {
|
||||
console.log('Audio tracks control already exists');
|
||||
return;
|
||||
}
|
||||
|
||||
const audioContainer = document.createElement('div');
|
||||
audioContainer.id = 'plyr-audio-container';
|
||||
audioContainer.className = 'plyr__control plyr__control--custom';
|
||||
|
||||
const audioButton = document.createElement('button');
|
||||
audioButton.type = 'button';
|
||||
audioButton.className = 'plyr__control';
|
||||
audioButton.setAttribute('data-plyr', 'audio-custom');
|
||||
audioButton.setAttribute('aria-label', 'Audio Track');
|
||||
audioButton.innerHTML = `
|
||||
<svg class="plyr__icon hls_audio_icon" viewBox="0 0 24 24" width="18" height="18" fill="none" stroke="currentColor" stroke-width="2">
|
||||
<path d="M3 18v-6a9 9 0 0 1 18 0v6"></path>
|
||||
<path d="M21 19a2 2 0 0 1-2 2h-1a2 2 0 0 1-2-2v-3a2 2 0 0 1 2-2h3z"></path>
|
||||
<path d="M3 19a2 2 0 0 0 2 2h1a2 2 0 0 0 2-2v-3a2 2 0 0 0-2-2H3z"></path>
|
||||
</svg>
|
||||
<span id="plyr-audio-text">Audio</span>
|
||||
<svg class="plyr__icon" viewBox="0 0 24 24" width="12" height="12" fill="none" stroke="currentColor" stroke-width="2">
|
||||
<polyline points="6 9 12 15 18 9"></polyline>
|
||||
</svg>
|
||||
`;
|
||||
|
||||
const audioDropdown = document.createElement('div');
|
||||
audioDropdown.className = 'plyr-audio-dropdown';
|
||||
|
||||
function updateAudioDropdown() {
|
||||
if (!hlsInstance || !hlsInstance.audioTracks) return;
|
||||
|
||||
audioDropdown.innerHTML = '';
|
||||
|
||||
if (hlsInstance.audioTracks.length === 0) {
|
||||
const noTrackMsg = document.createElement('div');
|
||||
noTrackMsg.className = 'plyr-audio-no-tracks';
|
||||
noTrackMsg.textContent = 'No audio tracks';
|
||||
audioDropdown.appendChild(noTrackMsg);
|
||||
return;
|
||||
}
|
||||
|
||||
hlsInstance.audioTracks.forEach((track, idx) => {
|
||||
const option = document.createElement('div');
|
||||
option.className = 'plyr-audio-option';
|
||||
option.textContent = track.name || track.lang || `Track ${idx + 1}`;
|
||||
|
||||
if (hlsInstance.audioTrack === idx) {
|
||||
option.setAttribute('data-active', 'true');
|
||||
}
|
||||
|
||||
option.addEventListener('click', (e) => {
|
||||
e.stopPropagation();
|
||||
hlsInstance.audioTrack = idx;
|
||||
console.log('Audio track changed to:', track.name || track.lang || idx);
|
||||
|
||||
const audioText = document.getElementById('plyr-audio-text');
|
||||
if (audioText) {
|
||||
const trackName = track.name || track.lang || `Track ${idx + 1}`;
|
||||
audioText.textContent = trackName.length > 8 ? trackName.substring(0, 6) + '...' : trackName;
|
||||
}
|
||||
|
||||
audioDropdown.querySelectorAll('.plyr-audio-option').forEach(opt => {
|
||||
opt.removeAttribute('data-active');
|
||||
});
|
||||
option.setAttribute('data-active', 'true');
|
||||
|
||||
audioDropdown.style.display = 'none';
|
||||
});
|
||||
|
||||
audioDropdown.appendChild(option);
|
||||
});
|
||||
}
|
||||
|
||||
audioButton.addEventListener('click', (e) => {
|
||||
e.stopPropagation();
|
||||
updateAudioDropdown();
|
||||
const isVisible = audioDropdown.style.display === 'block';
|
||||
document.querySelectorAll('.plyr-quality-dropdown, .plyr-audio-dropdown').forEach(d => {
|
||||
d.style.display = 'none';
|
||||
});
|
||||
audioDropdown.style.display = isVisible ? 'none' : 'block';
|
||||
});
|
||||
|
||||
document.addEventListener('click', (e) => {
|
||||
if (!audioContainer.contains(e.target)) {
|
||||
audioDropdown.style.display = 'none';
|
||||
}
|
||||
});
|
||||
|
||||
audioContainer.appendChild(audioButton);
|
||||
audioContainer.appendChild(audioDropdown);
|
||||
|
||||
const qualityContainer = document.getElementById('plyr-quality-container');
|
||||
if (qualityContainer) {
|
||||
qualityContainer.insertAdjacentElement('beforebegin', audioContainer);
|
||||
} else {
|
||||
const settingsBtn = controls.querySelector('[data-plyr="settings"]');
|
||||
if (settingsBtn) {
|
||||
settingsBtn.insertAdjacentElement('beforebegin', audioContainer);
|
||||
} else {
|
||||
controls.appendChild(audioContainer);
|
||||
}
|
||||
}
|
||||
|
||||
if (hlsInstance && hlsInstance.audioTracks && hlsInstance.audioTracks.length > 0) {
|
||||
// Prefer "original" audio track
|
||||
const originalIdx = hlsInstance.audioTracks.findIndex(t => {
|
||||
const name = (t.name || '').toLowerCase();
|
||||
const lang = (t.lang || '').toLowerCase();
|
||||
return name.includes('original') || lang === 'original';
|
||||
});
|
||||
if (originalIdx !== -1) {
|
||||
hlsInstance.audioTrack = originalIdx;
|
||||
console.log('Selected original audio track:', hlsInstance.audioTracks[originalIdx].name);
|
||||
}
|
||||
|
||||
const currentTrack = hlsInstance.audioTracks[hlsInstance.audioTrack];
|
||||
if (currentTrack) {
|
||||
const audioText = document.getElementById('plyr-audio-text');
|
||||
if (audioText) {
|
||||
const trackName = currentTrack.name || currentTrack.lang || 'Audio';
|
||||
audioText.textContent = trackName.length > 8 ? trackName.substring(0, 6) + '...' : trackName;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hlsInstance.on(Hls.Events.AUDIO_TRACKS_UPDATED, () => {
|
||||
console.log('Audio tracks updated, count:', hlsInstance.audioTracks?.length);
|
||||
if (hlsInstance.audioTracks?.length > 0) {
|
||||
updateAudioDropdown();
|
||||
const currentTrack = hlsInstance.audioTracks[hlsInstance.audioTrack];
|
||||
if (currentTrack) {
|
||||
const audioText = document.getElementById('plyr-audio-text');
|
||||
if (audioText) {
|
||||
const trackName = currentTrack.name || currentTrack.lang || 'Audio';
|
||||
audioText.textContent = trackName.length > 8 ? trackName.substring(0, 6) + '...' : trackName;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
console.log('Custom audio tracks control added');
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize Plyr with HLS quality options
|
||||
*/
|
||||
function initPlyrWithQuality(hlsInstance) {
|
||||
const video = document.getElementById('js-video-player');
|
||||
|
||||
if (!hlsInstance || !hlsInstance.levels || hlsInstance.levels.length === 0) {
|
||||
console.error('HLS not ready');
|
||||
return;
|
||||
}
|
||||
|
||||
if (!video) {
|
||||
console.error('Video element not found');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('HLS levels available:', hlsInstance.levels.length);
|
||||
|
||||
const sortedLevels = [...hlsInstance.levels].sort((a, b) => b.height - a.height);
|
||||
|
||||
const seenHeights = new Set();
|
||||
const uniqueLevels = [];
|
||||
|
||||
sortedLevels.forEach((level) => {
|
||||
if (!seenHeights.has(level.height)) {
|
||||
seenHeights.add(level.height);
|
||||
uniqueLevels.push(level);
|
||||
}
|
||||
});
|
||||
|
||||
const qualityLabels = ['auto'];
|
||||
uniqueLevels.forEach((level) => {
|
||||
const originalIndex = hlsInstance.levels.indexOf(level);
|
||||
const label = level.height + 'p';
|
||||
if (!window.hlsQualityMap[label]) {
|
||||
qualityLabels.push(label);
|
||||
window.hlsQualityMap[label] = originalIndex;
|
||||
}
|
||||
});
|
||||
|
||||
console.log('Quality labels:', qualityLabels);
|
||||
|
||||
const playerOptions = {
|
||||
autoplay: autoplayActive,
|
||||
disableContextMenu: false,
|
||||
captions: {
|
||||
active: captionsActive,
|
||||
language: typeof data !== 'undefined' ? data.settings.subtitles_language : 'en',
|
||||
},
|
||||
controls: [
|
||||
'play-large',
|
||||
'play',
|
||||
'progress',
|
||||
'current-time',
|
||||
'duration',
|
||||
'mute',
|
||||
'volume',
|
||||
'captions',
|
||||
'settings',
|
||||
'pip',
|
||||
'airplay',
|
||||
'fullscreen',
|
||||
],
|
||||
iconUrl: '/youtube.com/static/modules/plyr/plyr.svg',
|
||||
blankVideo: '/youtube.com/static/modules/plyr/blank.webm',
|
||||
debug: false,
|
||||
storage: { enabled: false },
|
||||
previewThumbnails: {
|
||||
enabled: typeof storyboard_url !== 'undefined' && storyboard_url !== null,
|
||||
src: typeof storyboard_url !== 'undefined' && storyboard_url !== null ? [storyboard_url] : [],
|
||||
},
|
||||
settings: ['captions', 'speed', 'loop'],
|
||||
tooltips: {
|
||||
controls: true,
|
||||
},
|
||||
};
|
||||
|
||||
console.log('Creating Plyr...');
|
||||
|
||||
try {
|
||||
plyrInstance = new Plyr(video, playerOptions);
|
||||
console.log('Plyr instance created');
|
||||
|
||||
window.plyrInstance = plyrInstance;
|
||||
|
||||
addCustomQualityControl(plyrInstance, qualityLabels);
|
||||
addCustomAudioTracksControl(plyrInstance, hlsInstance);
|
||||
|
||||
if (plyrInstance.eventListeners) {
|
||||
plyrInstance.eventListeners.forEach(function(eventListener) {
|
||||
if(eventListener.type === 'dblclick') {
|
||||
eventListener.element.removeEventListener(eventListener.type, eventListener.callback, eventListener.options);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
plyrInstance.started = false;
|
||||
plyrInstance.once('playing', function(){this.started = true});
|
||||
|
||||
if (typeof data !== 'undefined' && data.time_start != 0) {
|
||||
video.addEventListener('loadedmetadata', function() {
|
||||
video.currentTime = data.time_start;
|
||||
});
|
||||
}
|
||||
|
||||
console.log('Plyr init complete');
|
||||
} catch (e) {
|
||||
console.error('Failed to initialize Plyr:', e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Main initialization
|
||||
*/
|
||||
async function start() {
|
||||
console.log('Starting Plyr with HLS...');
|
||||
|
||||
if (typeof hls_manifest_url === 'undefined' || !hls_manifest_url) {
|
||||
console.error('No HLS manifest URL available');
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const hlsInstance = await initHLS(hls_manifest_url);
|
||||
initPlyrWithQuality(hlsInstance);
|
||||
} catch (error) {
|
||||
console.error('Failed to initialize:', error);
|
||||
}
|
||||
}
|
||||
|
||||
if (document.readyState === 'loading') {
|
||||
document.addEventListener('DOMContentLoaded', start);
|
||||
} else {
|
||||
start();
|
||||
}
|
||||
})();
|
||||
375
youtube/static/js/storyboard-preview.js
Normal file
375
youtube/static/js/storyboard-preview.js
Normal file
@@ -0,0 +1,375 @@
|
||||
/**
|
||||
* YouTube Storyboard Preview Thumbnails
|
||||
* Shows preview thumbnails when hovering over the progress bar
|
||||
* Works with native HTML5 video player
|
||||
*
|
||||
* Fetches the proxied WebVTT storyboard from backend and extracts image URLs
|
||||
*/
|
||||
(function() {
|
||||
'use strict';
|
||||
|
||||
console.log('Storyboard Preview Thumbnails loaded');
|
||||
|
||||
// Storyboard configuration
|
||||
let storyboardImages = []; // Array of {time, imageUrl, x, y, width, height}
|
||||
let previewElement = null;
|
||||
let tooltipElement = null;
|
||||
let video = null;
|
||||
let progressBarRect = null;
|
||||
|
||||
/**
|
||||
* Fetch and parse the storyboard VTT file
|
||||
* The backend generates a VTT with proxied image URLs
|
||||
*/
|
||||
function fetchStoryboardVTT(vttUrl) {
|
||||
return fetch(vttUrl)
|
||||
.then(response => {
|
||||
if (!response.ok) throw new Error('Failed to fetch storyboard VTT');
|
||||
return response.text();
|
||||
})
|
||||
.then(vttText => {
|
||||
console.log('Fetched storyboard VTT, length:', vttText.length);
|
||||
|
||||
const lines = vttText.split('\n');
|
||||
const images = [];
|
||||
let currentEntry = null;
|
||||
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
const line = lines[i].trim();
|
||||
|
||||
// Parse timestamp line: 00:00:00.000 --> 00:00:10.000
|
||||
if (line.includes('-->')) {
|
||||
const timeMatch = line.match(/^(\d{2}):(\d{2}):(\d{2})\.(\d{3})/);
|
||||
if (timeMatch) {
|
||||
const hours = parseInt(timeMatch[1]);
|
||||
const minutes = parseInt(timeMatch[2]);
|
||||
const seconds = parseInt(timeMatch[3]);
|
||||
const ms = parseInt(timeMatch[4]);
|
||||
currentEntry = {
|
||||
time: hours * 3600 + minutes * 60 + seconds + ms / 1000
|
||||
};
|
||||
}
|
||||
}
|
||||
// Parse image URL with crop parameters: /url#xywh=x,y,w,h
|
||||
else if (line.includes('#xywh=') && currentEntry) {
|
||||
const [urlPart, paramsPart] = line.split('#xywh=');
|
||||
const [x, y, width, height] = paramsPart.split(',').map(Number);
|
||||
|
||||
currentEntry.imageUrl = urlPart;
|
||||
currentEntry.x = x;
|
||||
currentEntry.y = y;
|
||||
currentEntry.width = width;
|
||||
currentEntry.height = height;
|
||||
|
||||
images.push(currentEntry);
|
||||
currentEntry = null;
|
||||
}
|
||||
}
|
||||
|
||||
console.log('Parsed', images.length, 'storyboard frames');
|
||||
return images;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Format time as MM:SS or H:MM:SS
|
||||
*/
|
||||
function formatTime(seconds) {
|
||||
if (isNaN(seconds)) return '0:00';
|
||||
|
||||
const hours = Math.floor(seconds / 3600);
|
||||
const minutes = Math.floor((seconds % 3600) / 60);
|
||||
const secs = Math.floor(seconds % 60);
|
||||
|
||||
if (hours > 0) {
|
||||
return `${hours}:${minutes.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`;
|
||||
}
|
||||
return `${minutes}:${secs.toString().padStart(2, '0')}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the closest storyboard frame for a given time
|
||||
*/
|
||||
function findFrameAtTime(time) {
|
||||
if (!storyboardImages.length) return null;
|
||||
|
||||
// Binary search for efficiency
|
||||
let left = 0;
|
||||
let right = storyboardImages.length - 1;
|
||||
|
||||
while (left <= right) {
|
||||
const mid = Math.floor((left + right) / 2);
|
||||
const frame = storyboardImages[mid];
|
||||
|
||||
if (time >= frame.time && time < (storyboardImages[mid + 1]?.time || Infinity)) {
|
||||
return frame;
|
||||
} else if (time < frame.time) {
|
||||
right = mid - 1;
|
||||
} else {
|
||||
left = mid + 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Return closest frame
|
||||
return storyboardImages[Math.min(left, storyboardImages.length - 1)];
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect browser
|
||||
*/
|
||||
function getBrowser() {
|
||||
const ua = navigator.userAgent;
|
||||
if (ua.indexOf('Firefox') > -1) return 'firefox';
|
||||
if (ua.indexOf('Chrome') > -1) return 'chrome';
|
||||
if (ua.indexOf('Safari') > -1) return 'safari';
|
||||
return 'other';
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect the progress bar position in native video element
|
||||
* Different browsers have different control layouts
|
||||
*/
|
||||
function detectProgressBar() {
|
||||
if (!video) return null;
|
||||
|
||||
const rect = video.getBoundingClientRect();
|
||||
const browser = getBrowser();
|
||||
|
||||
let progressBarArea;
|
||||
|
||||
switch(browser) {
|
||||
case 'firefox':
|
||||
// Firefox: La barra de progreso está en la parte inferior pero más delgada
|
||||
// Normalmente ocupa solo unos 20-25px de altura y está centrada
|
||||
progressBarArea = {
|
||||
top: rect.bottom - 30, // Área más pequeña para Firefox
|
||||
bottom: rect.bottom - 5, // Dejamos espacio para otros controles
|
||||
left: rect.left + 60, // Firefox tiene botones a la izquierda (play, volumen)
|
||||
right: rect.right - 10, // Y a la derecha (fullscreen, etc)
|
||||
height: 25
|
||||
};
|
||||
break;
|
||||
|
||||
case 'chrome':
|
||||
default:
|
||||
// Chrome: La barra de progreso ocupa un área más grande
|
||||
progressBarArea = {
|
||||
top: rect.bottom - 50,
|
||||
bottom: rect.bottom,
|
||||
left: rect.left,
|
||||
right: rect.right,
|
||||
height: 50
|
||||
};
|
||||
break;
|
||||
}
|
||||
|
||||
return progressBarArea;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if mouse is over the progress bar area
|
||||
*/
|
||||
function isOverProgressBar(mouseX, mouseY) {
|
||||
if (!progressBarRect) return false;
|
||||
|
||||
return mouseX >= progressBarRect.left &&
|
||||
mouseX <= progressBarRect.right &&
|
||||
mouseY >= progressBarRect.top &&
|
||||
mouseY <= progressBarRect.bottom;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize preview elements
|
||||
*/
|
||||
function initPreviewElements() {
|
||||
video = document.getElementById('js-video-player');
|
||||
if (!video) {
|
||||
console.error('Video element not found');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('Video element found, browser:', getBrowser());
|
||||
|
||||
// Create preview element
|
||||
previewElement = document.createElement('div');
|
||||
previewElement.className = 'storyboard-preview';
|
||||
previewElement.style.cssText = `
|
||||
position: fixed;
|
||||
display: none;
|
||||
pointer-events: none;
|
||||
z-index: 10000;
|
||||
background: #000;
|
||||
border: 2px solid #fff;
|
||||
border-radius: 4px;
|
||||
overflow: hidden;
|
||||
box-shadow: 0 4px 12px rgba(0,0,0,0.5);
|
||||
`;
|
||||
|
||||
// Create tooltip element
|
||||
tooltipElement = document.createElement('div');
|
||||
tooltipElement.className = 'storyboard-tooltip';
|
||||
tooltipElement.style.cssText = `
|
||||
position: absolute;
|
||||
bottom: -25px;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
background: rgba(0,0,0,0.8);
|
||||
color: #fff;
|
||||
padding: 2px 6px;
|
||||
border-radius: 3px;
|
||||
font-size: 12px;
|
||||
font-family: Arial, sans-serif;
|
||||
white-space: nowrap;
|
||||
pointer-events: none;
|
||||
`;
|
||||
|
||||
previewElement.appendChild(tooltipElement);
|
||||
document.body.appendChild(previewElement);
|
||||
|
||||
// Update progress bar position on mouse move
|
||||
video.addEventListener('mousemove', updateProgressBarPosition);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update progress bar position detection
|
||||
*/
|
||||
function updateProgressBarPosition() {
|
||||
progressBarRect = detectProgressBar();
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle mouse move - only show preview when over progress bar area
|
||||
*/
|
||||
function handleMouseMove(e) {
|
||||
if (!video || !storyboardImages.length) return;
|
||||
|
||||
// Update progress bar position on each move
|
||||
progressBarRect = detectProgressBar();
|
||||
|
||||
// Only show preview if mouse is over the progress bar area
|
||||
if (!isOverProgressBar(e.clientX, e.clientY)) {
|
||||
if (previewElement) previewElement.style.display = 'none';
|
||||
return;
|
||||
}
|
||||
|
||||
// Calculate position within the progress bar
|
||||
const progressBarWidth = progressBarRect.right - progressBarRect.left;
|
||||
let xInProgressBar = e.clientX - progressBarRect.left;
|
||||
|
||||
// Adjust for Firefox's left offset
|
||||
const browser = getBrowser();
|
||||
if (browser === 'firefox') {
|
||||
// Ajustar el rango para que coincida mejor con la barra real
|
||||
xInProgressBar = Math.max(0, Math.min(xInProgressBar, progressBarWidth));
|
||||
}
|
||||
|
||||
const percentage = Math.max(0, Math.min(1, xInProgressBar / progressBarWidth));
|
||||
const time = percentage * video.duration;
|
||||
const frame = findFrameAtTime(time);
|
||||
|
||||
if (!frame) return;
|
||||
|
||||
// Preview dimensions
|
||||
const previewWidth = 160;
|
||||
const previewHeight = 90;
|
||||
const offsetFromCursor = 10;
|
||||
|
||||
// Position above the cursor
|
||||
let previewTop = e.clientY - previewHeight - offsetFromCursor;
|
||||
|
||||
// If preview would go above the video, position below the cursor
|
||||
const videoRect = video.getBoundingClientRect();
|
||||
if (previewTop < videoRect.top) {
|
||||
previewTop = e.clientY + offsetFromCursor;
|
||||
}
|
||||
|
||||
// Keep preview within horizontal bounds
|
||||
let left = e.clientX - (previewWidth / 2);
|
||||
|
||||
// Ajustes específicos para Firefox
|
||||
if (browser === 'firefox') {
|
||||
// En Firefox, la barra no llega hasta los extremos
|
||||
const minLeft = progressBarRect.left + 10;
|
||||
const maxLeft = progressBarRect.right - previewWidth - 10;
|
||||
left = Math.max(minLeft, Math.min(left, maxLeft));
|
||||
} else {
|
||||
left = Math.max(videoRect.left, Math.min(left, videoRect.right - previewWidth));
|
||||
}
|
||||
|
||||
// Apply all styles
|
||||
previewElement.style.cssText = `
|
||||
display: block;
|
||||
position: fixed;
|
||||
left: ${left}px;
|
||||
top: ${previewTop}px;
|
||||
width: ${previewWidth}px;
|
||||
height: ${previewHeight}px;
|
||||
background-image: url('${frame.imageUrl}');
|
||||
background-position: -${frame.x}px -${frame.y}px;
|
||||
background-size: auto;
|
||||
background-repeat: no-repeat;
|
||||
border: 2px solid #fff;
|
||||
border-radius: 4px;
|
||||
box-shadow: 0 4px 12px rgba(0,0,0,0.5);
|
||||
z-index: 10000;
|
||||
pointer-events: none;
|
||||
`;
|
||||
|
||||
tooltipElement.textContent = formatTime(time);
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle mouse leave video
|
||||
*/
|
||||
function handleMouseLeave() {
|
||||
if (previewElement) {
|
||||
previewElement.style.display = 'none';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize storyboard preview
|
||||
*/
|
||||
function init() {
|
||||
console.log('Initializing storyboard preview...');
|
||||
|
||||
// Check if storyboard URL is available
|
||||
if (typeof storyboard_url === 'undefined' || !storyboard_url) {
|
||||
console.log('No storyboard URL available');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('Storyboard URL:', storyboard_url);
|
||||
|
||||
// Fetch the proxied VTT file from backend
|
||||
fetchStoryboardVTT(storyboard_url)
|
||||
.then(images => {
|
||||
storyboardImages = images;
|
||||
console.log('Loaded', images.length, 'storyboard images');
|
||||
|
||||
if (images.length === 0) {
|
||||
console.log('No storyboard images parsed');
|
||||
return;
|
||||
}
|
||||
|
||||
initPreviewElements();
|
||||
|
||||
// Add event listeners to video
|
||||
video.addEventListener('mousemove', handleMouseMove);
|
||||
video.addEventListener('mouseleave', handleMouseLeave);
|
||||
|
||||
console.log('Storyboard preview initialized for', getBrowser());
|
||||
})
|
||||
.catch(err => {
|
||||
console.error('Failed to load storyboard:', err);
|
||||
});
|
||||
}
|
||||
|
||||
// Initialize when DOM is ready
|
||||
if (document.readyState === 'loading') {
|
||||
document.addEventListener('DOMContentLoaded', init);
|
||||
} else {
|
||||
init();
|
||||
}
|
||||
|
||||
})();
|
||||
@@ -5,8 +5,9 @@ function changeQuality(selection) {
|
||||
let videoPaused = video.paused;
|
||||
let videoSpeed = video.playbackRate;
|
||||
let srcInfo;
|
||||
if (avMerge)
|
||||
if (avMerge && typeof avMerge.close === 'function') {
|
||||
avMerge.close();
|
||||
}
|
||||
if (selection.type == 'uni'){
|
||||
srcInfo = data['uni_sources'][selection.index];
|
||||
video.src = srcInfo.url;
|
||||
@@ -94,7 +95,11 @@ if (data.playlist && data.playlist['id'] !== null) {
|
||||
|
||||
|
||||
// Autoplay
|
||||
if (data.settings.related_videos_mode !== 0 || data.playlist !== null) {
|
||||
(function() {
|
||||
if (data.settings.related_videos_mode === 0 && data.playlist === null) {
|
||||
return;
|
||||
}
|
||||
|
||||
let playability_error = !!data.playability_error;
|
||||
let isPlaylist = false;
|
||||
if (data.playlist !== null && data.playlist['current_index'] !== null)
|
||||
@@ -154,7 +159,10 @@ if (data.settings.related_videos_mode !== 0 || data.playlist !== null) {
|
||||
if(!playability_error){
|
||||
// play the video if autoplay is on
|
||||
if(autoplayEnabled){
|
||||
video.play();
|
||||
video.play().catch(function(e) {
|
||||
// Autoplay blocked by browser - ignore silently
|
||||
console.log('Autoplay blocked:', e.message);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -196,4 +204,4 @@ if (data.settings.related_videos_mode !== 0 || data.playlist !== null) {
|
||||
window.setTimeout(nextVideo, nextVideoDelay);
|
||||
}
|
||||
}
|
||||
}
|
||||
})();
|
||||
329
youtube/static/js/watch.hls.js
Normal file
329
youtube/static/js/watch.hls.js
Normal file
@@ -0,0 +1,329 @@
|
||||
const video = document.getElementById('js-video-player');
|
||||
|
||||
window.hls = null;
|
||||
let hls = null;
|
||||
|
||||
// ===========
|
||||
// HLS NATIVE
|
||||
// ===========
|
||||
function initHLSNative(manifestUrl) {
|
||||
if (!manifestUrl) {
|
||||
console.error('No HLS manifest URL provided');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('Initializing native HLS player with manifest:', manifestUrl);
|
||||
|
||||
if (hls) {
|
||||
window.hls = null;
|
||||
hls.destroy();
|
||||
hls = null;
|
||||
}
|
||||
|
||||
if (Hls.isSupported()) {
|
||||
hls = new Hls({
|
||||
enableWorker: true,
|
||||
lowLatencyMode: false,
|
||||
maxBufferLength: 30,
|
||||
maxMaxBufferLength: 60,
|
||||
startLevel: -1,
|
||||
});
|
||||
|
||||
window.hls = hls;
|
||||
|
||||
hls.loadSource(manifestUrl);
|
||||
hls.attachMedia(video);
|
||||
|
||||
hls.on(Hls.Events.MANIFEST_PARSED, function(event, data) {
|
||||
console.log('Native manifest parsed');
|
||||
console.log('Levels:', data.levels.length);
|
||||
|
||||
const qualitySelect = document.getElementById('quality-select');
|
||||
|
||||
if (qualitySelect && data.levels?.length) {
|
||||
qualitySelect.innerHTML = '<option value="-1">Auto</option>';
|
||||
|
||||
const sorted = [...data.levels].sort((a, b) => b.height - a.height);
|
||||
const seen = new Set();
|
||||
|
||||
sorted.forEach(level => {
|
||||
if (!seen.has(level.height)) {
|
||||
seen.add(level.height);
|
||||
|
||||
const i = data.levels.indexOf(level);
|
||||
const opt = document.createElement('option');
|
||||
|
||||
opt.value = i;
|
||||
opt.textContent = level.height + 'p';
|
||||
|
||||
qualitySelect.appendChild(opt);
|
||||
}
|
||||
});
|
||||
|
||||
// Set initial quality from settings
|
||||
if (typeof window.data !== 'undefined' && window.data.settings) {
|
||||
const defaultRes = window.data.settings.default_resolution;
|
||||
if (defaultRes !== 'auto' && defaultRes) {
|
||||
const target = parseInt(defaultRes);
|
||||
let bestLevel = -1;
|
||||
let bestHeight = 0;
|
||||
for (let i = 0; i < hls.levels.length; i++) {
|
||||
const h = hls.levels[i].height;
|
||||
if (h <= target && h > bestHeight) {
|
||||
bestHeight = h;
|
||||
bestLevel = i;
|
||||
}
|
||||
}
|
||||
if (bestLevel !== -1) {
|
||||
hls.currentLevel = bestLevel;
|
||||
qualitySelect.value = bestLevel;
|
||||
console.log('Starting at resolution:', bestHeight + 'p');
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
hls.on(Hls.Events.ERROR, function(_, data) {
|
||||
if (data.fatal) {
|
||||
console.error('HLS fatal error:', data.type, data.details);
|
||||
switch(data.type) {
|
||||
case Hls.ErrorTypes.NETWORK_ERROR:
|
||||
hls.startLoad();
|
||||
break;
|
||||
case Hls.ErrorTypes.MEDIA_ERROR:
|
||||
hls.recoverMediaError();
|
||||
break;
|
||||
default:
|
||||
hls.destroy();
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
} else if (video.canPlayType('application/vnd.apple.mpegurl')) {
|
||||
video.src = manifestUrl;
|
||||
} else {
|
||||
console.error('HLS not supported');
|
||||
}
|
||||
}
|
||||
|
||||
// ======
|
||||
// INIT
|
||||
// ======
|
||||
function initPlayer() {
|
||||
console.log('Init native player');
|
||||
|
||||
if (typeof hls_manifest_url === 'undefined' || !hls_manifest_url) {
|
||||
console.error('No manifest URL');
|
||||
return;
|
||||
}
|
||||
|
||||
initHLSNative(hls_manifest_url);
|
||||
|
||||
const qualitySelect = document.getElementById('quality-select');
|
||||
if (qualitySelect) {
|
||||
qualitySelect.addEventListener('change', function () {
|
||||
const level = parseInt(this.value);
|
||||
|
||||
if (hls) {
|
||||
hls.currentLevel = level;
|
||||
console.log('Quality:', level === -1 ? 'Auto' : hls.levels[level]?.height + 'p');
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// DOM READY
|
||||
if (document.readyState === 'loading') {
|
||||
document.addEventListener('DOMContentLoaded', initPlayer);
|
||||
} else {
|
||||
initPlayer();
|
||||
}
|
||||
|
||||
// =============
|
||||
// AUDIO TRACKS
|
||||
// =============
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
const audioTrackSelect = document.getElementById('audio-track-select');
|
||||
|
||||
if (audioTrackSelect) {
|
||||
audioTrackSelect.addEventListener('change', function() {
|
||||
const trackIdx = parseInt(this.value);
|
||||
|
||||
if (!isNaN(trackIdx) && hls && hls.audioTracks && trackIdx >= 0 && trackIdx < hls.audioTracks.length) {
|
||||
hls.audioTrack = trackIdx;
|
||||
console.log('Audio track changed to:', hls.audioTracks[trackIdx].name || trackIdx);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (hls) {
|
||||
hls.on(Hls.Events.AUDIO_TRACKS_UPDATED, (_, data) => {
|
||||
console.log('Audio tracks:', data.audioTracks);
|
||||
|
||||
// Populate audio track select if needed
|
||||
if (audioTrackSelect && data.audioTracks.length > 0) {
|
||||
audioTrackSelect.innerHTML = '<option value="">Select audio track</option>';
|
||||
let originalIdx = -1;
|
||||
data.audioTracks.forEach((track, idx) => {
|
||||
// Find "original" track
|
||||
if (originalIdx === -1 && (track.name || '').toLowerCase().includes('original')) {
|
||||
originalIdx = idx;
|
||||
}
|
||||
const option = document.createElement('option');
|
||||
option.value = String(idx);
|
||||
option.textContent = track.name || track.lang || `Track ${idx}`;
|
||||
audioTrackSelect.appendChild(option);
|
||||
});
|
||||
audioTrackSelect.disabled = false;
|
||||
|
||||
// Auto-select "original" audio track
|
||||
if (originalIdx !== -1) {
|
||||
hls.audioTrack = originalIdx;
|
||||
audioTrackSelect.value = String(originalIdx);
|
||||
console.log('Auto-selected original audio track:', data.audioTracks[originalIdx].name);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// ============
|
||||
// START TIME
|
||||
// ============
|
||||
if (typeof data !== 'undefined' && data.time_start != 0 && video) {
|
||||
video.addEventListener('loadedmetadata', function() {
|
||||
video.currentTime = data.time_start;
|
||||
});
|
||||
}
|
||||
|
||||
// ==============
|
||||
// SPEED CONTROL
|
||||
// ==============
|
||||
let speedInput = document.getElementById('speed-control');
|
||||
|
||||
if (speedInput) {
|
||||
speedInput.addEventListener('keyup', (event) => {
|
||||
if (event.key === 'Enter') {
|
||||
let speed = parseFloat(speedInput.value);
|
||||
if(!isNaN(speed)){
|
||||
video.playbackRate = speed;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// =========
|
||||
// Autoplay
|
||||
// =========
|
||||
(function() {
|
||||
if (typeof data === 'undefined' || (data.settings.related_videos_mode === 0 && data.playlist === null)) {
|
||||
return;
|
||||
}
|
||||
|
||||
let playability_error = !!data.playability_error;
|
||||
let isPlaylist = false;
|
||||
if (data.playlist !== null && data.playlist['current_index'] !== null)
|
||||
isPlaylist = true;
|
||||
|
||||
// read cookies on whether to autoplay
|
||||
// https://developer.mozilla.org/en-US/docs/Web/API/Document/cookie
|
||||
let cookieValue;
|
||||
let playlist_id;
|
||||
if (isPlaylist) {
|
||||
// from https://stackoverflow.com/a/6969486
|
||||
function escapeRegExp(string) {
|
||||
// $& means the whole matched string
|
||||
return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
||||
}
|
||||
playlist_id = data.playlist['id'];
|
||||
playlist_id = escapeRegExp(playlist_id);
|
||||
|
||||
cookieValue = document.cookie.replace(new RegExp(
|
||||
'(?:(?:^|.*;\\s*)autoplay_'
|
||||
+ playlist_id + '\\s*\\=\\s*([^;]*).*$)|^.*$'
|
||||
), '$1');
|
||||
} else {
|
||||
cookieValue = document.cookie.replace(new RegExp(
|
||||
'(?:(?:^|.*;\\s*)autoplay\\s*\\=\\s*([^;]*).*$)|^.*$'
|
||||
),'$1');
|
||||
}
|
||||
|
||||
let autoplayEnabled = 0;
|
||||
if(cookieValue.length === 0){
|
||||
autoplayEnabled = 0;
|
||||
} else {
|
||||
autoplayEnabled = Number(cookieValue);
|
||||
}
|
||||
|
||||
// check the checkbox if autoplay is on
|
||||
let checkbox = document.querySelector('.autoplay-toggle');
|
||||
if(autoplayEnabled){
|
||||
checkbox.checked = true;
|
||||
}
|
||||
|
||||
// listen for checkbox to turn autoplay on and off
|
||||
let cookie = 'autoplay'
|
||||
if (isPlaylist)
|
||||
cookie += '_' + playlist_id;
|
||||
|
||||
checkbox.addEventListener( 'change', function() {
|
||||
if(this.checked) {
|
||||
autoplayEnabled = 1;
|
||||
document.cookie = cookie + '=1; SameSite=Strict';
|
||||
} else {
|
||||
autoplayEnabled = 0;
|
||||
document.cookie = cookie + '=0; SameSite=Strict';
|
||||
}
|
||||
});
|
||||
|
||||
if(!playability_error){
|
||||
// play the video if autoplay is on
|
||||
if(autoplayEnabled){
|
||||
video.play().catch(function(e) {
|
||||
// Autoplay blocked by browser - ignore silently
|
||||
console.log('Autoplay blocked:', e.message);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// determine next video url
|
||||
let nextVideoUrl;
|
||||
if (isPlaylist) {
|
||||
let currentIndex = data.playlist['current_index'];
|
||||
if (data.playlist['current_index']+1 == data.playlist['items'].length)
|
||||
nextVideoUrl = null;
|
||||
else
|
||||
nextVideoUrl = data.playlist['items'][data.playlist['current_index']+1]['url'];
|
||||
|
||||
// scroll playlist to proper position
|
||||
// item height + gap == 100
|
||||
let pl = document.querySelector('.playlist-videos');
|
||||
pl.scrollTop = 100*currentIndex;
|
||||
} else {
|
||||
if (data.related.length === 0)
|
||||
nextVideoUrl = null;
|
||||
else
|
||||
nextVideoUrl = data.related[0]['url'];
|
||||
}
|
||||
let nextVideoDelay = 1000;
|
||||
|
||||
// go to next video when video ends
|
||||
// https://stackoverflow.com/a/2880950
|
||||
if (nextVideoUrl) {
|
||||
if(playability_error){
|
||||
videoEnded();
|
||||
} else {
|
||||
video.addEventListener('ended', videoEnded, false);
|
||||
}
|
||||
function nextVideo(){
|
||||
if(autoplayEnabled){
|
||||
window.location.href = nextVideoUrl;
|
||||
}
|
||||
}
|
||||
function videoEnded(e) {
|
||||
window.setTimeout(nextVideo, nextVideoDelay);
|
||||
}
|
||||
}
|
||||
})();
|
||||
@@ -181,7 +181,7 @@ label[for=options-toggle-cbox] {
|
||||
|
||||
.table td,.table th {
|
||||
padding: 10px 10px;
|
||||
border: 1px solid var(--secondary-background);
|
||||
border: 1px solid var(--border-bg-license);
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
|
||||
@@ -10,9 +10,11 @@
|
||||
--link: #212121;
|
||||
--link-visited: #808080;
|
||||
--border-bg: #212121;
|
||||
--buttom: #DCDCDC;
|
||||
--border-bg-settings: #91918C;
|
||||
--border-bg-license: #91918C;
|
||||
--buttom: #FFFFFF;
|
||||
--buttom-text: #212121;
|
||||
--button-border: #91918c;
|
||||
--button-border: #91918C;
|
||||
--buttom-hover: #BBBBBB;
|
||||
--search-text: #212121;
|
||||
--time-background: #212121;
|
||||
|
||||
192
youtube/static/modules/plyr/custom_plyr.css
Normal file
192
youtube/static/modules/plyr/custom_plyr.css
Normal file
@@ -0,0 +1,192 @@
|
||||
/* Prevent this div from blocking right-click menu for video
|
||||
e.g. Firefox playback speed options */
|
||||
.plyr__poster {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* plyr fix */
|
||||
.plyr:-moz-full-screen video {
|
||||
max-height: initial;
|
||||
}
|
||||
|
||||
.plyr:-webkit-full-screen video {
|
||||
max-height: initial;
|
||||
}
|
||||
|
||||
.plyr:-ms-fullscreen video {
|
||||
max-height: initial;
|
||||
}
|
||||
|
||||
.plyr:fullscreen video {
|
||||
max-height: initial;
|
||||
}
|
||||
|
||||
.plyr__preview-thumb__image-container {
|
||||
width: 158px;
|
||||
height: 90px;
|
||||
}
|
||||
|
||||
.plyr__preview-thumb {
|
||||
bottom: 100%;
|
||||
}
|
||||
|
||||
.plyr__menu__container [role="menu"],
|
||||
.plyr__menu__container [role="menucaptions"] {
|
||||
/* Set vertical scroll */
|
||||
/* issue https://github.com/sampotts/plyr/issues/1420 */
|
||||
max-height: 320px;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
/*
|
||||
* Custom styles similar to youtube
|
||||
*/
|
||||
.plyr__controls {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
padding-bottom: 0px;
|
||||
}
|
||||
|
||||
.plyr__progress__container {
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
width: 100%;
|
||||
margin-bottom: -5px;
|
||||
}
|
||||
|
||||
.plyr__controls .plyr__controls__item:first-child {
|
||||
margin-left: 0;
|
||||
margin-right: 0;
|
||||
z-index: 5;
|
||||
}
|
||||
|
||||
.plyr__controls .plyr__controls__item.plyr__volume {
|
||||
margin-left: auto;
|
||||
}
|
||||
|
||||
.plyr__controls .plyr__controls__item.plyr__progress__container {
|
||||
padding-left: 10px;
|
||||
padding-right: 10px;
|
||||
}
|
||||
|
||||
.plyr__progress input[type="range"] {
|
||||
margin-bottom: 50px;
|
||||
}
|
||||
|
||||
/*
|
||||
* Plyr Custom Controls
|
||||
*/
|
||||
|
||||
.plyr__control svg.hls_audio_icon,
|
||||
.plyr__control svg.hls_quality_icon {
|
||||
fill: none;
|
||||
}
|
||||
|
||||
.plyr__control[data-plyr="quality-custom"],
|
||||
.plyr__control[data-plyr="audio-custom"] {
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
.plyr__control[data-plyr="quality-custom"]:hover,
|
||||
.plyr__control[data-plyr="audio-custom"]:hover {
|
||||
background: rgba(255, 255, 255, 0.2);
|
||||
}
|
||||
|
||||
/*
|
||||
* Custom styles for dropdown controls
|
||||
*/
|
||||
.plyr__control--custom {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
/* Quality and Audio containers */
|
||||
#plyr-quality-container,
|
||||
#plyr-audio-container {
|
||||
position: relative;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
/* Quality and Audio buttons */
|
||||
#plyr-quality-container .plyr__control,
|
||||
#plyr-audio-container .plyr__control {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 4px;
|
||||
}
|
||||
|
||||
/* Text labels */
|
||||
#plyr-quality-text,
|
||||
#plyr-audio-text {
|
||||
font-size: 12px;
|
||||
margin-left: 2px;
|
||||
}
|
||||
|
||||
/* Dropdowns */
|
||||
.plyr-quality-dropdown,
|
||||
.plyr-audio-dropdown {
|
||||
position: absolute;
|
||||
bottom: 100%;
|
||||
right: 0;
|
||||
margin-bottom: 8px;
|
||||
background: #E6E6E6;
|
||||
color: #23282f;
|
||||
border-radius: 4px;
|
||||
padding: 4px 6px;
|
||||
min-width: 90px;
|
||||
display: none;
|
||||
z-index: 100;
|
||||
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.25);
|
||||
border: 1px solid rgba(0, 0, 0, 0.08);
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Helvetica, Arial, sans-serif;
|
||||
max-height: 320px;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
/* Audio dropdown needs slightly wider */
|
||||
.plyr-audio-dropdown {
|
||||
min-width: 120px;
|
||||
}
|
||||
|
||||
/* Dropdown options */
|
||||
.plyr-quality-option,
|
||||
.plyr-audio-option {
|
||||
padding: 6px 16px;
|
||||
margin-bottom: 2px;
|
||||
cursor: pointer;
|
||||
font-size: 13px;
|
||||
transition: all 0.15s;
|
||||
color: #23282f;
|
||||
white-space: nowrap;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
/* Active/selected option */
|
||||
.plyr-quality-option[data-active="true"],
|
||||
.plyr-audio-option[data-active="true"] {
|
||||
background: #00b3ff;
|
||||
color: #FFF;
|
||||
font-weight: 500;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
/* Hover state */
|
||||
.plyr-quality-option:hover,
|
||||
.plyr-audio-option:hover {
|
||||
background: #00b3ff;
|
||||
color: #FFF;
|
||||
font-weight: 500;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
/* No audio tracks message */
|
||||
.plyr-audio-no-tracks {
|
||||
padding: 6px 16px;
|
||||
font-size: 12px;
|
||||
color: rgba(255, 255, 255, 0.5);
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
/*
|
||||
* End custom styles
|
||||
*/
|
||||
1
youtube/static/modules/plyr/plyr.min.js.map
Normal file
1
youtube/static/modules/plyr/plyr.min.js.map
Normal file
File diff suppressed because one or more lines are too long
@@ -155,7 +155,7 @@ label[for=options-toggle-cbox] {
|
||||
}
|
||||
|
||||
.settings-form > h2 {
|
||||
border-bottom: 2px solid var(--border-bg);
|
||||
border-bottom: 2px solid var(--border-bg-settings);
|
||||
padding-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
|
||||
@@ -21,21 +21,7 @@ img {
|
||||
video {
|
||||
width: 100%;
|
||||
height: auto;
|
||||
max-height: 480px;
|
||||
}
|
||||
|
||||
/* plyr fix */
|
||||
.plyr:-moz-full-screen video {
|
||||
max-height: initial;
|
||||
}
|
||||
.plyr:-webkit-full-screen video {
|
||||
max-height: initial;
|
||||
}
|
||||
.plyr:-ms-fullscreen video {
|
||||
max-height: initial;
|
||||
}
|
||||
.plyr:fullscreen video {
|
||||
max-height: initial;
|
||||
max-height: calc(100vh/1.5);
|
||||
}
|
||||
|
||||
a:link {
|
||||
@@ -142,6 +128,29 @@ header {
|
||||
background-color: var(--buttom-hover);
|
||||
}
|
||||
|
||||
.live-url-choices {
|
||||
background-color: var(--thumb-background);
|
||||
margin: 1rem 0;
|
||||
padding: 1rem;
|
||||
}
|
||||
|
||||
.playability-error {
|
||||
position: relative;
|
||||
box-sizing: border-box;
|
||||
height: 30vh;
|
||||
margin: 1rem 0;
|
||||
}
|
||||
|
||||
.playability-error > span {
|
||||
display: flex;
|
||||
background-color: var(--thumb-background);
|
||||
height: 100%;
|
||||
object-fit: cover;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.playlist {
|
||||
display: grid;
|
||||
grid-gap: 4px;
|
||||
@@ -636,6 +645,9 @@ figure.sc-video {
|
||||
max-height: 80vh;
|
||||
overflow-y: scroll;
|
||||
}
|
||||
.playability-error {
|
||||
height: 60vh;
|
||||
}
|
||||
.playlist {
|
||||
display: grid;
|
||||
grid-gap: 1px;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from youtube import util, yt_data_extract, channel, local_playlist
|
||||
from youtube import util, yt_data_extract, channel, local_playlist, playlist
|
||||
from youtube import yt_app
|
||||
import settings
|
||||
|
||||
@@ -30,8 +30,7 @@ database_path = os.path.join(settings.data_dir, "subscriptions.sqlite")
|
||||
|
||||
|
||||
def open_database():
|
||||
if not os.path.exists(settings.data_dir):
|
||||
os.makedirs(settings.data_dir)
|
||||
os.makedirs(settings.data_dir, exist_ok=True)
|
||||
connection = sqlite3.connect(database_path, check_same_thread=False)
|
||||
|
||||
try:
|
||||
@@ -108,8 +107,7 @@ def _subscribe(channels):
|
||||
with connection as cursor:
|
||||
channel_ids_to_check = [channel[0] for channel in channels if not _is_subscribed(cursor, channel[0])]
|
||||
|
||||
rows = ((channel_id, channel_name, 0, 0) for channel_id,
|
||||
channel_name in channels)
|
||||
rows = ((channel_id, channel_name, 0, 0) for channel_id, channel_name in channels)
|
||||
cursor.executemany('''INSERT OR IGNORE INTO subscribed_channels (yt_channel_id, channel_name, time_last_checked, next_check_time)
|
||||
VALUES (?, ?, ?, ?)''', rows)
|
||||
|
||||
@@ -236,8 +234,7 @@ def _get_channel_names(cursor, channel_ids):
|
||||
return result
|
||||
|
||||
|
||||
def _channels_with_tag(cursor, tag, order=False, exclude_muted=False,
|
||||
include_muted_status=False):
|
||||
def _channels_with_tag(cursor, tag, order=False, exclude_muted=False, include_muted_status=False):
|
||||
''' returns list of (channel_id, channel_name) '''
|
||||
|
||||
statement = '''SELECT yt_channel_id, channel_name'''
|
||||
@@ -295,7 +292,10 @@ def youtube_timestamp_to_posix(dumb_timestamp):
|
||||
def posix_to_dumbed_down(posix_time):
|
||||
'''Inverse of youtube_timestamp_to_posix.'''
|
||||
delta = int(time.time() - posix_time)
|
||||
assert delta >= 0
|
||||
# Guard against future timestamps (clock drift) without relying on
|
||||
# `assert` (which is stripped under `python -O`).
|
||||
if delta < 0:
|
||||
delta = 0
|
||||
|
||||
if delta == 0:
|
||||
return '0 seconds ago'
|
||||
@@ -434,8 +434,10 @@ def autocheck_setting_changed(old_value, new_value):
|
||||
stop_autocheck_system()
|
||||
|
||||
|
||||
settings.add_setting_changed_hook('autocheck_subscriptions',
|
||||
autocheck_setting_changed)
|
||||
settings.add_setting_changed_hook(
|
||||
'autocheck_subscriptions',
|
||||
autocheck_setting_changed
|
||||
)
|
||||
if settings.autocheck_subscriptions:
|
||||
start_autocheck_system()
|
||||
# ----------------------------
|
||||
@@ -463,7 +465,24 @@ def _get_atoma_feed(channel_id):
|
||||
|
||||
def _get_channel_videos_first_page(channel_id, channel_status_name):
|
||||
try:
|
||||
return channel.get_channel_first_page(channel_id=channel_id)
|
||||
# First try the playlist method
|
||||
pl_json = playlist.get_videos(
|
||||
'UU' + channel_id[2:],
|
||||
1,
|
||||
include_shorts=settings.include_shorts_in_subscriptions,
|
||||
report_text=None
|
||||
)
|
||||
pl_info = yt_data_extract.extract_playlist_info(pl_json)
|
||||
if pl_info.get('items'):
|
||||
pl_info['items'] = pl_info['items'][0:30]
|
||||
return pl_info
|
||||
|
||||
# Try the channel api method
|
||||
channel_json = channel.get_channel_first_page(channel_id=channel_id)
|
||||
channel_info = yt_data_extract.extract_channel_info(
|
||||
json.loads(channel_json), 'videos'
|
||||
)
|
||||
return channel_info
|
||||
except util.FetchError as e:
|
||||
if e.code == '429' and settings.route_tor:
|
||||
error_message = ('Error checking channel ' + channel_status_name
|
||||
@@ -497,7 +516,7 @@ def _get_upstream_videos(channel_id):
|
||||
)
|
||||
gevent.joinall(tasks)
|
||||
|
||||
channel_tab, feed = tasks[0].value, tasks[1].value
|
||||
channel_info, feed = tasks[0].value, tasks[1].value
|
||||
|
||||
# extract published times from atoma feed
|
||||
times_published = {}
|
||||
@@ -515,7 +534,8 @@ def _get_upstream_videos(channel_id):
|
||||
return None
|
||||
|
||||
root = defusedxml.ElementTree.fromstring(feed)
|
||||
assert remove_bullshit(root.tag) == 'feed'
|
||||
if remove_bullshit(root.tag) != 'feed':
|
||||
raise ValueError('Root element is not <feed>')
|
||||
for entry in root:
|
||||
if (remove_bullshit(entry.tag) != 'entry'):
|
||||
continue
|
||||
@@ -523,21 +543,20 @@ def _get_upstream_videos(channel_id):
|
||||
# it's yt:videoId in the xml but the yt: is turned into a namespace which is removed by remove_bullshit
|
||||
video_id_element = find_element(entry, 'videoId')
|
||||
time_published_element = find_element(entry, 'published')
|
||||
assert video_id_element is not None
|
||||
assert time_published_element is not None
|
||||
if video_id_element is None or time_published_element is None:
|
||||
raise ValueError('Missing videoId or published element')
|
||||
|
||||
time_published = int(calendar.timegm(time.strptime(time_published_element.text, '%Y-%m-%dT%H:%M:%S+00:00')))
|
||||
times_published[video_id_element.text] = time_published
|
||||
|
||||
except AssertionError:
|
||||
except ValueError:
|
||||
print('Failed to read atoma feed for ' + channel_status_name)
|
||||
traceback.print_exc()
|
||||
except defusedxml.ElementTree.ParseError:
|
||||
print('Failed to read atoma feed for ' + channel_status_name)
|
||||
|
||||
if channel_tab is None: # there was an error
|
||||
if channel_info is None: # there was an error
|
||||
return
|
||||
channel_info = yt_data_extract.extract_channel_info(json.loads(channel_tab), 'videos')
|
||||
if channel_info['error']:
|
||||
print('Error checking channel ' + channel_status_name + ': ' + channel_info['error'])
|
||||
return
|
||||
@@ -552,14 +571,41 @@ def _get_upstream_videos(channel_id):
|
||||
if video_item['id'] in times_published:
|
||||
video_item['time_published'] = times_published[video_item['id']]
|
||||
video_item['is_time_published_exact'] = True
|
||||
else:
|
||||
elif video_item.get('time_published'):
|
||||
video_item['is_time_published_exact'] = False
|
||||
try:
|
||||
video_item['time_published'] = youtube_timestamp_to_posix(video_item['time_published']) - i # subtract a few seconds off the videos so they will be in the right order
|
||||
except KeyError:
|
||||
except Exception:
|
||||
print(video_item)
|
||||
|
||||
else:
|
||||
video_item['is_time_published_exact'] = False
|
||||
video_item['time_published'] = None
|
||||
video_item['channel_id'] = channel_id
|
||||
if len(videos) > 1:
|
||||
# Go back and fill in any videos that don't have a time published
|
||||
# using the time published of the surrounding ones
|
||||
for i in range(len(videos)-1):
|
||||
if (videos[i+1]['time_published'] is None
|
||||
and videos[i]['time_published'] is not None
|
||||
):
|
||||
videos[i+1]['time_published'] = videos[i]['time_published'] - 1
|
||||
for i in reversed(range(1,len(videos))):
|
||||
if (videos[i-1]['time_published'] is None
|
||||
and videos[i]['time_published'] is not None
|
||||
):
|
||||
videos[i-1]['time_published'] = videos[i]['time_published'] + 1
|
||||
# Special case: none of the videos have a time published.
|
||||
# In this case, make something up
|
||||
if videos and videos[0]['time_published'] is None:
|
||||
# Invariant: if the first video has no timestamp, earlier passes
|
||||
# ensure all of them are unset. Don't rely on `assert`.
|
||||
if not all(v['time_published'] is None for v in videos):
|
||||
raise RuntimeError('Inconsistent time_published state')
|
||||
now = time.time()
|
||||
for i in range(len(videos)):
|
||||
# 1 month between videos
|
||||
videos[i]['time_published'] = now - i*3600*24*30
|
||||
|
||||
|
||||
if len(videos) == 0:
|
||||
average_upload_period = 4*7*24*3600 # assume 1 month for channel with no videos
|
||||
@@ -578,26 +624,31 @@ def _get_upstream_videos(channel_id):
|
||||
with open_database() as connection:
|
||||
with connection as cursor:
|
||||
|
||||
# calculate how many new videos there are
|
||||
existing_vids = set(row[0] for row in cursor.execute(
|
||||
'''SELECT video_id
|
||||
# Get video ids and duration of existing vids so we
|
||||
# can see how many new ones there are and update
|
||||
# livestreams/premiers
|
||||
existing_vids = list(cursor.execute(
|
||||
'''SELECT video_id, duration
|
||||
FROM videos
|
||||
INNER JOIN subscribed_channels
|
||||
ON videos.sql_channel_id = subscribed_channels.id
|
||||
WHERE yt_channel_id=?
|
||||
ORDER BY time_published DESC
|
||||
LIMIT 30''', [channel_id]).fetchall())
|
||||
existing_vid_ids = set(row[0] for row in existing_vids)
|
||||
existing_durs = dict(existing_vids)
|
||||
|
||||
# new videos the channel has uploaded since last time we checked
|
||||
number_of_new_videos = 0
|
||||
for video in videos:
|
||||
if video['id'] in existing_vids:
|
||||
if video['id'] in existing_vid_ids:
|
||||
break
|
||||
number_of_new_videos += 1
|
||||
|
||||
is_first_check = cursor.execute('''SELECT time_last_checked FROM subscribed_channels WHERE yt_channel_id=?''', [channel_id]).fetchone()[0] in (None, 0)
|
||||
time_videos_retrieved = int(time.time())
|
||||
rows = []
|
||||
update_rows = []
|
||||
for i, video_item in enumerate(videos):
|
||||
if (is_first_check
|
||||
or number_of_new_videos > 6
|
||||
@@ -613,16 +664,34 @@ def _get_upstream_videos(channel_id):
|
||||
time_noticed = video_item['time_published']
|
||||
else:
|
||||
time_noticed = time_videos_retrieved
|
||||
rows.append((
|
||||
video_item['channel_id'],
|
||||
video_item['id'],
|
||||
video_item['title'],
|
||||
video_item['duration'],
|
||||
video_item['time_published'],
|
||||
video_item['is_time_published_exact'],
|
||||
time_noticed,
|
||||
video_item['description'],
|
||||
))
|
||||
|
||||
# videos which need durations updated
|
||||
non_durations = ('upcoming', 'none', 'live', '')
|
||||
v_id = video_item['id']
|
||||
if (existing_durs.get(v_id) is not None
|
||||
and existing_durs[v_id].lower() in non_durations
|
||||
and video_item['duration'] not in non_durations
|
||||
):
|
||||
update_rows.append((
|
||||
video_item['title'],
|
||||
video_item['duration'],
|
||||
video_item['time_published'],
|
||||
video_item['is_time_published_exact'],
|
||||
video_item['description'],
|
||||
video_item['id'],
|
||||
))
|
||||
# all other videos
|
||||
else:
|
||||
rows.append((
|
||||
video_item['channel_id'],
|
||||
video_item['id'],
|
||||
video_item['title'],
|
||||
video_item['duration'],
|
||||
video_item['time_published'],
|
||||
video_item['is_time_published_exact'],
|
||||
time_noticed,
|
||||
video_item['description'],
|
||||
))
|
||||
|
||||
cursor.executemany('''INSERT OR IGNORE INTO videos (
|
||||
sql_channel_id,
|
||||
@@ -635,6 +704,13 @@ def _get_upstream_videos(channel_id):
|
||||
description
|
||||
)
|
||||
VALUES ((SELECT id FROM subscribed_channels WHERE yt_channel_id=?), ?, ?, ?, ?, ?, ?, ?)''', rows)
|
||||
cursor.executemany('''UPDATE videos SET
|
||||
title=?,
|
||||
duration=?,
|
||||
time_published=?,
|
||||
is_time_published_exact=?,
|
||||
description=?
|
||||
WHERE video_id=?''', update_rows)
|
||||
cursor.execute('''UPDATE subscribed_channels
|
||||
SET time_last_checked = ?, next_check_time = ?
|
||||
WHERE yt_channel_id=?''', [int(time.time()), next_check_time, channel_id])
|
||||
@@ -739,7 +815,8 @@ def import_subscriptions():
|
||||
file = file.read().decode('utf-8')
|
||||
try:
|
||||
root = defusedxml.ElementTree.fromstring(file)
|
||||
assert root.tag == 'opml'
|
||||
if root.tag != 'opml':
|
||||
raise ValueError('Root element is not <opml>')
|
||||
channels = []
|
||||
for outline_element in root[0][0]:
|
||||
if (outline_element.tag != 'outline') or ('xmlUrl' not in outline_element.attrib):
|
||||
@@ -750,7 +827,7 @@ def import_subscriptions():
|
||||
channel_id = channel_rss_url[channel_rss_url.find('channel_id=')+11:].strip()
|
||||
channels.append((channel_id, channel_name))
|
||||
|
||||
except (AssertionError, IndexError, defusedxml.ElementTree.ParseError) as e:
|
||||
except (ValueError, IndexError, defusedxml.ElementTree.ParseError):
|
||||
return '400 Bad Request: Unable to read opml xml file, or the file is not the expected format', 400
|
||||
elif mime_type in ('text/csv', 'application/vnd.ms-excel'):
|
||||
content = file.read().decode('utf-8')
|
||||
@@ -767,7 +844,7 @@ def import_subscriptions():
|
||||
error = 'Unsupported file format: ' + mime_type
|
||||
error += (' . Only subscription.json, subscriptions.csv files'
|
||||
' (from Google Takeouts)'
|
||||
' and XML OPML files exported from Youtube\'s'
|
||||
' and XML OPML files exported from YouTube\'s'
|
||||
' subscription manager page are supported')
|
||||
return (flask.render_template('error.html', error_message=error),
|
||||
400)
|
||||
@@ -962,7 +1039,8 @@ def get_subscriptions_page():
|
||||
'muted': muted,
|
||||
})
|
||||
|
||||
return flask.render_template('subscriptions.html',
|
||||
return flask.render_template(
|
||||
'subscriptions.html',
|
||||
header_playlist_names=local_playlist.get_playlist_names(),
|
||||
videos=videos,
|
||||
num_pages=math.ceil(number_of_videos_in_db/60),
|
||||
@@ -1001,11 +1079,20 @@ def post_subscriptions_page():
|
||||
return '', 204
|
||||
|
||||
|
||||
# YouTube video IDs are exactly 11 chars from [A-Za-z0-9_-]. Enforce this
|
||||
# before using the value in filesystem paths to prevent path traversal
|
||||
# (CWE-22, OWASP A01:2021).
|
||||
_VIDEO_ID_RE = re.compile(r'^[A-Za-z0-9_-]{11}$')
|
||||
|
||||
|
||||
@yt_app.route('/data/subscription_thumbnails/<thumbnail>')
|
||||
def serve_subscription_thumbnail(thumbnail):
|
||||
'''Serves thumbnail from disk if it's been saved already. If not, downloads the thumbnail, saves to disk, and serves it.'''
|
||||
assert thumbnail[-4:] == '.jpg'
|
||||
if not thumbnail.endswith('.jpg'):
|
||||
flask.abort(400)
|
||||
video_id = thumbnail[0:-4]
|
||||
if not _VIDEO_ID_RE.match(video_id):
|
||||
flask.abort(400)
|
||||
thumbnail_path = os.path.join(thumbnails_directory, thumbnail)
|
||||
|
||||
if video_id in existing_thumbnails:
|
||||
@@ -1018,12 +1105,26 @@ def serve_subscription_thumbnail(thumbnail):
|
||||
f.close()
|
||||
return flask.Response(image, mimetype='image/jpeg')
|
||||
|
||||
url = "https://i.ytimg.com/vi/" + video_id + "/mqdefault.jpg"
|
||||
try:
|
||||
image = util.fetch_url(url, report_text="Saved thumbnail: " + video_id)
|
||||
except urllib.error.HTTPError as e:
|
||||
print("Failed to download thumbnail for " + video_id + ": " + str(e))
|
||||
abort(e.code)
|
||||
image = None
|
||||
for quality in ('hq720.jpg', 'sddefault.jpg', 'hqdefault.jpg'):
|
||||
url = f"https://i.ytimg.com/vi/{video_id}/{quality}"
|
||||
try:
|
||||
image = util.fetch_url(url, report_text="Saved thumbnail: " + video_id)
|
||||
break
|
||||
except util.FetchError as e:
|
||||
if '404' in str(e):
|
||||
continue
|
||||
print("Failed to download thumbnail for " + video_id + ": " + str(e))
|
||||
flask.abort(500)
|
||||
except urllib.error.HTTPError as e:
|
||||
if e.code == 404:
|
||||
continue
|
||||
print("Failed to download thumbnail for " + video_id + ": " + str(e))
|
||||
flask.abort(e.code)
|
||||
|
||||
if image is None:
|
||||
flask.abort(404)
|
||||
|
||||
try:
|
||||
f = open(thumbnail_path, 'wb')
|
||||
except FileNotFoundError:
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta http-equiv="Content-Security-Policy" content="default-src 'self' 'unsafe-inline' 'unsafe-eval'; media-src 'self' blob: {{ app_url }}/* data: https://*.googlevideo.com; {{ "img-src 'self' https://*.googleusercontent.com https://*.ggpht.com https://*.ytimg.com;" if not settings.proxy_images else "" }}">
|
||||
<meta http-equiv="Content-Security-Policy" content="default-src 'self' 'unsafe-inline' 'unsafe-eval' blob:; media-src 'self' blob: {{ app_url }}/* data: https://*.googlevideo.com; img-src 'self' https://*.googleusercontent.com https://*.ggpht.com https://*.ytimg.com; connect-src 'self' https://*.googlevideo.com; font-src 'self' data:; worker-src 'self' blob:;">
|
||||
<title>{{ page_title }}</title>
|
||||
<link title="YT Local" href="/youtube.com/opensearch.xml" rel="search" type="application/opensearchdescription+xml">
|
||||
<link href="/youtube.com/static/favicon.ico" type="image/x-icon" rel="icon">
|
||||
@@ -26,6 +26,12 @@
|
||||
// @license-end
|
||||
</script>
|
||||
{% endif %}
|
||||
<script>
|
||||
// @license magnet:?xt=urn:btih:0b31508aeb0634b347b8270c7bee4d411b5d4109&dn=agpl-3.0.txt AGPL-v3-or-Later
|
||||
// Image prefix for thumbnails
|
||||
let settings_img_prefix = "{{ settings.img_prefix or '' }}";
|
||||
// @license-end
|
||||
</script>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
@@ -35,57 +41,57 @@
|
||||
</nav>
|
||||
<form class="form" id="site-search" action="/youtube.com/results">
|
||||
<input type="search" name="search_query" class="search-box" value="{{ search_box_value }}"
|
||||
{{ "autofocus" if (request.path in ("/", "/results") or error_message) else "" }} required placeholder="Type to search...">
|
||||
<button type="submit" value="Search" class="search-button">Search</button>
|
||||
{{ "autofocus" if (request.path in ("/", "/results") or error_message) else "" }} required placeholder="{{ _('Type to search...') }}">
|
||||
<button type="submit" value="Search" class="search-button">{{ _('Search') }}</button>
|
||||
<!-- options -->
|
||||
<div class="dropdown">
|
||||
<!-- hidden box -->
|
||||
<input id="options-toggle-cbox" class="opt-box" type="checkbox">
|
||||
<!-- end hidden box -->
|
||||
<label class="dropdown-label" for="options-toggle-cbox">Options</label>
|
||||
<label class="dropdown-label" for="options-toggle-cbox">{{ _('Options') }}</label>
|
||||
<div class="dropdown-content">
|
||||
<h3>Sort by</h3>
|
||||
<h3>{{ _('Sort by') }}</h3>
|
||||
<div class="option">
|
||||
<input type="radio" id="sort_relevance" name="sort" value="0">
|
||||
<label for="sort_relevance">Relevance</label>
|
||||
<label for="sort_relevance">{{ _('Relevance') }}</label>
|
||||
</div>
|
||||
<div class="option">
|
||||
<input type="radio" id="sort_upload_date" name="sort" value="2">
|
||||
<label for="sort_upload_date">Upload date</label>
|
||||
<label for="sort_upload_date">{{ _('Upload date') }}</label>
|
||||
</div>
|
||||
<div class="option">
|
||||
<input type="radio" id="sort_view_count" name="sort" value="3">
|
||||
<label for="sort_view_count">View count</label>
|
||||
<label for="sort_view_count">{{ _('View count') }}</label>
|
||||
</div>
|
||||
<div class="option">
|
||||
<input type="radio" id="sort_rating" name="sort" value="1">
|
||||
<label for="sort_rating">Rating</label>
|
||||
<label for="sort_rating">{{ _('Rating') }}</label>
|
||||
</div>
|
||||
|
||||
<h3>Upload date</h3>
|
||||
<h3>{{ _('Upload date') }}</h3>
|
||||
<div class="option">
|
||||
<input type="radio" id="time_any" name="time" value="0">
|
||||
<label for="time_any">Any</label>
|
||||
<label for="time_any">{{ _('Any') }}</label>
|
||||
</div>
|
||||
<div class="option">
|
||||
<input type="radio" id="time_last_hour" name="time" value="1">
|
||||
<label for="time_last_hour">Last hour</label>
|
||||
<label for="time_last_hour">{{ _('Last hour') }}</label>
|
||||
</div>
|
||||
<div class="option">
|
||||
<input type="radio" id="time_today" name="time" value="2">
|
||||
<label for="time_today">Today</label>
|
||||
<label for="time_today">{{ _('Today') }}</label>
|
||||
</div>
|
||||
<div class="option">
|
||||
<input type="radio" id="time_this_week" name="time" value="3">
|
||||
<label for="time_this_week">This week</label>
|
||||
<label for="time_this_week">{{ _('This week') }}</label>
|
||||
</div>
|
||||
<div class="option">
|
||||
<input type="radio" id="time_this_month" name="time" value="4">
|
||||
<label for="time_this_month">This month</label>
|
||||
<label for="time_this_month">{{ _('This month') }}</label>
|
||||
</div>
|
||||
<div class="option">
|
||||
<input type="radio" id="time_this_year" name="time" value="5">
|
||||
<label for="time_this_year">This year</label>
|
||||
<label for="time_this_year">{{ _('This year') }}</label>
|
||||
</div>
|
||||
|
||||
<h3>Type</h3>
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{% if current_tab == 'search' %}
|
||||
{% set page_title = search_box_value + ' - Page ' + page_number|string %}
|
||||
{% else %}
|
||||
{% set page_title = channel_name + ' - Channel' %}
|
||||
{% set page_title = channel_name|string + ' - Channel' %}
|
||||
{% endif %}
|
||||
|
||||
{% extends "base.html" %}
|
||||
@@ -33,7 +33,7 @@
|
||||
<hr/>
|
||||
|
||||
<nav class="channel-tabs">
|
||||
{% for tab_name in ('Videos', 'Playlists', 'About') %}
|
||||
{% for tab_name in ('Videos', 'Shorts', 'Streams', 'Playlists', 'About') %}
|
||||
{% if tab_name.lower() == current_tab %}
|
||||
<a class="tab page-button">{{ tab_name }}</a>
|
||||
{% else %}
|
||||
@@ -51,8 +51,11 @@
|
||||
<ul>
|
||||
{% for (before_text, stat, after_text) in [
|
||||
('Joined ', date_joined, ''),
|
||||
('', view_count|commatize, ' views'),
|
||||
('', approx_view_count, ' views'),
|
||||
('', approx_subscriber_count, ' subscribers'),
|
||||
('', approx_video_count, ' videos'),
|
||||
('Country: ', country, ''),
|
||||
('Canonical Url: ', canonical_url, ''),
|
||||
] %}
|
||||
{% if stat %}
|
||||
<li>{{ before_text + stat|string + after_text }}</li>
|
||||
@@ -65,7 +68,11 @@
|
||||
<hr>
|
||||
<ul>
|
||||
{% for text, url in links %}
|
||||
<li><a href="{{ url }}">{{ text }}</a></li>
|
||||
{% if url %}
|
||||
<li><a href="{{ url }}">{{ text }}</a></li>
|
||||
{% else %}
|
||||
<li>{{ text }}</li>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
@@ -73,11 +80,15 @@
|
||||
|
||||
<!-- new-->
|
||||
<div id="links-metadata">
|
||||
{% if current_tab == 'videos' %}
|
||||
{% set sorts = [('1', 'views'), ('2', 'oldest'), ('3', 'newest')] %}
|
||||
<div id="number-of-results">{{ number_of_videos }} videos</div>
|
||||
{% if current_tab in ('videos', 'shorts', 'streams') %}
|
||||
{% set sorts = [('3', 'newest'), ('4', 'newest - no shorts')] %}
|
||||
{% if current_tab in ('shorts', 'streams') and not is_last_page %}
|
||||
<div id="number-of-results">{{ number_of_videos }}+ videos</div>
|
||||
{% else %}
|
||||
<div id="number-of-results">{{ number_of_videos }} videos</div>
|
||||
{% endif %}
|
||||
{% elif current_tab == 'playlists' %}
|
||||
{% set sorts = [('2', 'oldest'), ('3', 'newest'), ('4', 'last video added')] %}
|
||||
{% set sorts = [('3', 'newest'), ('4', 'last video added')] %}
|
||||
{% if items %}
|
||||
<h2 class="page-number">Page {{ page_number }}</h2>
|
||||
{% else %}
|
||||
@@ -110,13 +121,13 @@
|
||||
<hr/>
|
||||
|
||||
<footer class="pagination-container">
|
||||
{% if current_tab == 'videos' and current_sort.__str__() == '2' %}
|
||||
{% if current_tab in ('shorts', 'streams') %}
|
||||
<nav class="next-previous-button-row">
|
||||
{{ common_elements.next_previous_ctoken_buttons(None, ctoken, channel_url + '/' + current_tab, parameters_dictionary) }}
|
||||
{{ common_elements.next_previous_buttons(is_last_page, channel_url + '/' + current_tab, parameters_dictionary) }}
|
||||
</nav>
|
||||
{% elif current_tab == 'videos' %}
|
||||
<nav class="pagination-list">
|
||||
{{ common_elements.page_buttons(number_of_pages, channel_url + '/' + current_tab, parameters_dictionary, include_ends=(current_sort.__str__() == '3')) }}
|
||||
{{ common_elements.page_buttons(number_of_pages, channel_url + '/' + current_tab, parameters_dictionary, include_ends=(current_sort.__str__() in '34')) }}
|
||||
</nav>
|
||||
{% elif current_tab == 'playlists' or current_tab == 'search' %}
|
||||
<nav class="next-previous-button-row">
|
||||
|
||||
@@ -3,13 +3,13 @@
|
||||
{% macro render_comment(comment, include_avatar, timestamp_links=False) %}
|
||||
<div class="comment-container">
|
||||
<div class="comment">
|
||||
<a class="author-avatar" href="{{ comment['author_url'] }}" title="{{ comment['author'] }}">
|
||||
<a class="author-avatar" href="{{ comment['author_url'] or '#' }}" title="{{ comment['author'] }}">
|
||||
{% if include_avatar %}
|
||||
<img class="author-avatar-img" alt="{{ comment['author'] }}" src="{{ comment['author_avatar'] }}">
|
||||
{% endif %}
|
||||
</a>
|
||||
<address class="author-name">
|
||||
<a class="author" href="{{ comment['author_url'] }}" title="{{ comment['author'] }}">{{ comment['author'] }}</a>
|
||||
<a class="author" href="{{ comment['author_url'] or '#' }}" title="{{ comment['author'] }}">{{ comment['author'] }}</a>
|
||||
</address>
|
||||
<a class="permalink" href="{{ comment['permalink'] }}" title="permalink">
|
||||
<span>{{ comment['time_published'] }}</span>
|
||||
@@ -58,7 +58,7 @@
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% if 'more_comments_url' is in comments_info %}
|
||||
<a class="page-button more-comments" href="{{ comments_info['more_comments_url'] }}">More comments</a>
|
||||
<a class="page-button more-comments" href="{{ comments_info['more_comments_url'] }}">{{ _('More comments') }}</a>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
|
||||
@@ -20,14 +20,14 @@
|
||||
{{ info['error'] }}
|
||||
{% else %}
|
||||
<div class="item-video {{ info['type'] + '-item' }}">
|
||||
<a class="thumbnail-box" href="{{ info['url'] }}" title="{{ info['title'] }}">
|
||||
<a class="thumbnail-box" href="{{ info['url'] or '#' }}" title="{{ info['title'] }}">
|
||||
<div class="thumbnail {% if info['type'] == 'channel' %} channel {% endif %}">
|
||||
{% if lazy_load %}
|
||||
<img class="thumbnail-img lazy" alt=" " data-src="{{ info['thumbnail'] }}">
|
||||
<img class="thumbnail-img lazy" alt=" " data-src="{{ info['thumbnail'] }}" onerror="thumbnail_fallback(this)">
|
||||
{% elif info['type'] == 'channel' %}
|
||||
<img class="thumbnail-img channel" alt=" " src="{{ info['thumbnail'] }}">
|
||||
<img class="thumbnail-img channel" alt=" " src="{{ info['thumbnail'] }}" onerror="thumbnail_fallback(this)">
|
||||
{% else %}
|
||||
<img class="thumbnail-img" alt=" " src="{{ info['thumbnail'] }}">
|
||||
<img class="thumbnail-img" alt=" " src="{{ info['thumbnail'] }}" onerror="thumbnail_fallback(this)">
|
||||
{% endif %}
|
||||
|
||||
{% if info['type'] != 'channel' %}
|
||||
@@ -35,7 +35,7 @@
|
||||
{% endif %}
|
||||
</div>
|
||||
</a>
|
||||
<h4 class="title"><a href="{{ info['url'] }}" title="{{ info['title'] }}">{{ info['title'] }}</a></h4>
|
||||
<h4 class="title"><a href="{{ info['url'] or '#' }}" title="{{ info['title'] }}">{{ info['title'] }}</a></h4>
|
||||
|
||||
{% if include_author %}
|
||||
{% set author_description = info['author'] %}
|
||||
@@ -58,7 +58,9 @@
|
||||
|
||||
<div class="stats {{'horizontal-stats' if horizontal else 'vertical-stats'}}">
|
||||
{% if info['type'] == 'channel' %}
|
||||
<div>{{ info['approx_subscriber_count'] }} subscribers</div>
|
||||
{% if info.get('approx_subscriber_count') %}
|
||||
<div>{{ info['approx_subscriber_count'] }} subscribers</div>
|
||||
{% endif %}
|
||||
<div>{{ info['video_count']|commatize }} videos</div>
|
||||
{% else %}
|
||||
{% if info.get('time_published') %}
|
||||
|
||||
@@ -3,13 +3,13 @@
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta http-equiv="Content-Security-Policy" content="default-src 'self' 'unsafe-inline'; media-src 'self' https://*.googlevideo.com; {{ "img-src 'self' https://*.googleusercontent.com https://*.ggpht.com https://*.ytimg.com;" if not settings.proxy_images else "" }}">
|
||||
<meta http-equiv="Content-Security-Policy" content="default-src 'self' 'unsafe-inline' 'unsafe-eval'; media-src 'self' blob: https://*.googlevideo.com; img-src 'self' https://*.googleusercontent.com https://*.ggpht.com https://*.ytimg.com; connect-src 'self' https://*.googlevideo.com; font-src 'self' data:;">
|
||||
<title>{{ title }}</title>
|
||||
<link href="/youtube.com/static/favicon.ico" type="image/x-icon" rel="icon">
|
||||
{% if settings.use_video_player == 2 %}
|
||||
<!-- plyr -->
|
||||
<link href="/youtube.com/static/modules/plyr/plyr.css" rel="stylesheet">
|
||||
<!--/ plyr -->
|
||||
<!-- /plyr -->
|
||||
{% endif %}
|
||||
<style>
|
||||
body {
|
||||
@@ -37,9 +37,6 @@
|
||||
<body>
|
||||
<video id="js-video-player" controls autofocus onmouseleave="{{ title }}"
|
||||
oncontextmenu="{{ title }}" onmouseenter="{{ title }}" title="{{ title }}">
|
||||
{% if uni_sources %}
|
||||
<source src="{{ uni_sources[uni_idx]['url'] }}" type="{{ uni_sources[uni_idx]['type'] }}" data-res="{{ uni_sources[uni_idx]['quality'] }}">
|
||||
{% endif %}
|
||||
{% for source in subtitle_sources %}
|
||||
{% if source['on'] %}
|
||||
<track label="{{ source['label'] }}" src="{{ source['url'] }}" kind="subtitles" srclang="{{ source['srclang'] }}" default>
|
||||
@@ -47,28 +44,66 @@
|
||||
<track label="{{ source['label'] }}" src="{{ source['url'] }}" kind="subtitles" srclang="{{ source['srclang'] }}">
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{% if uni_sources %}
|
||||
{% for source in uni_sources %}
|
||||
<source src="{{ source['url'] }}" type="{{ source['type'] }}" title="{{ source['quality_string'] }}">
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
</video>
|
||||
{% if js_data %}
|
||||
<script>
|
||||
// @license magnet:?xt=urn:btih:0b31508aeb0634b347b8270c7bee4d411b5d4109&dn=agpl-3.0.txt AGPL-v3-or-Later
|
||||
data = {{ js_data|tojson }};
|
||||
// @license-end
|
||||
</script>
|
||||
{% endif %}
|
||||
|
||||
<script>
|
||||
// @license magnet:?xt=urn:btih:0b31508aeb0634b347b8270c7bee4d411b5d4109&dn=agpl-3.0.txt AGPL-v3-or-Later
|
||||
let storyboard_url = {{ storyboard_url | tojson }};
|
||||
let hls_manifest_url = {{ hls_manifest_url | tojson }};
|
||||
let hls_unavailable = {{ hls_unavailable | tojson }};
|
||||
let playback_mode = {{ playback_mode | tojson }};
|
||||
let pair_sources = {{ pair_sources | tojson }};
|
||||
let pair_idx = {{ pair_idx | tojson }};
|
||||
// @license-end
|
||||
</script>
|
||||
{% if settings.use_video_player == 2 %}
|
||||
|
||||
{% set hls_should_work = (playback_mode == 'hls' or playback_mode == 'auto') and not hls_unavailable %}
|
||||
{% set use_dash = not hls_should_work %}
|
||||
|
||||
{% if not use_dash %}
|
||||
<script src="/youtube.com/static/js/hls.min.js"
|
||||
integrity="sha512-CSVqc4a7tn+tizDNt+eDoVn2fXYAwMDpCLrwGlWrOktNfZQ9gp4dKKScElMeRlrIifhliXs0a06BLaUgmMlCUw=="
|
||||
crossorigin="anonymous"></script>
|
||||
{% endif %}
|
||||
|
||||
<script src="/youtube.com/static/js/common.js"></script>
|
||||
|
||||
{% if settings.use_video_player == 0 %}
|
||||
<!-- Native player -->
|
||||
{% if use_dash %}
|
||||
<script src="/youtube.com/static/js/watch.dash.js"></script>
|
||||
{% else %}
|
||||
<script src="/youtube.com/static/js/watch.hls.js"></script>
|
||||
{% endif %}
|
||||
{% elif settings.use_video_player == 1 %}
|
||||
<!-- Native player with hotkeys -->
|
||||
<script src="/youtube.com/static/js/hotkeys.js"></script>
|
||||
{% if use_dash %}
|
||||
<script src="/youtube.com/static/js/watch.dash.js"></script>
|
||||
{% else %}
|
||||
<script src="/youtube.com/static/js/watch.hls.js"></script>
|
||||
{% endif %}
|
||||
{% elif settings.use_video_player == 2 %}
|
||||
<!-- plyr -->
|
||||
<script src="/youtube.com/static/modules/plyr/plyr.min.js"
|
||||
integrity="sha512-l6ZzdXpfMHRfifqaR79wbYCEWjLDMI9DnROvb+oLkKq6d7MGroGpMbI7HFpicvmAH/2aQO+vJhewq8rhysrImw=="
|
||||
crossorigin="anonymous"></script>
|
||||
<script src="/youtube.com/static/js/plyr-start.js"></script>
|
||||
{% if use_dash %}
|
||||
<script src="/youtube.com/static/js/plyr.dash.start.js"></script>
|
||||
{% else %}
|
||||
<script src="/youtube.com/static/js/plyr.hls.start.js"></script>
|
||||
{% endif %}
|
||||
<!-- /plyr -->
|
||||
{% elif settings.use_video_player == 1 %}
|
||||
<script src="/youtube.com/static/js/hotkeys.js"></script>
|
||||
{% endif %}
|
||||
|
||||
{% if use_dash %}
|
||||
<script src="/youtube.com/static/js/av-merge.js"></script>
|
||||
{% endif %}
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
{% set page_title = 'Error' %}
|
||||
{% if error_code %}
|
||||
{% set page_title = 'Error: ' ~ error_code %}
|
||||
{% else %}
|
||||
{% set page_title = 'Error' %}
|
||||
{% endif %}
|
||||
|
||||
{% if not slim %}
|
||||
{% extends "base.html" %}
|
||||
|
||||
@@ -29,6 +29,11 @@
|
||||
<td data-label="License"><a href="http://www.gnu.org/licenses/agpl-3.0.html">AGPL-3.0 or later</a></td>
|
||||
<td data-label="Source"><a href="/youtube.com/static/js/common.js">common.js</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td data-label="File"><a href="/youtube.com/static/js/hls.min.js">hls.min.js</a></td>
|
||||
<td data-label="License"><a href="https://spdx.org/licenses/BSD-3-Clause.html">BSD-3-Clause</a></td>
|
||||
<td data-label="Source"><a href="https://github.com/video-dev/hls.js/tree/v1.6.15/src">hls.js v1.6.15 source</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td data-label="File"><a href="/youtube.com/static/js/hotkeys.js">hotkeys.js</a></td>
|
||||
<td data-label="License"><a href="http://www.gnu.org/licenses/agpl-3.0.html">AGPL-3.0 or later</a></td>
|
||||
@@ -40,9 +45,24 @@
|
||||
<td data-label="Source"><a href="/youtube.com/static/js/playlistadd.js">playlistadd.js</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td data-label="File"><a href="/youtube.com/static/js/plyr-start.js">plyr-start.js</a></td>
|
||||
<td data-label="File"><a href="/youtube.com/static/js/plyr.dash.start.js">plyr.dash.start.js</a></td>
|
||||
<td data-label="License"><a href="http://www.gnu.org/licenses/agpl-3.0.html">AGPL-3.0 or later</a></td>
|
||||
<td data-label="Source"><a href="/youtube.com/static/js/plyr-start.js">plyr-start.js</a></td>
|
||||
<td data-label="Source"><a href="/youtube.com/static/js/plyr.dash.start.js">plyr.dash.start.js</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td data-label="File"><a href="/youtube.com/static/js/plyr.hls.start.js">plyr.hls.start.js</a></td>
|
||||
<td data-label="License"><a href="http://www.gnu.org/licenses/agpl-3.0.html">AGPL-3.0 or later</a></td>
|
||||
<td data-label="Source"><a href="/youtube.com/static/js/plyr.hls.start.js">plyr.hls.start.js</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td data-label="File"><a href="/youtube.com/static/js/sponsorblock.js">sponsorblock.js</a></td>
|
||||
<td data-label="License"><a href="http://www.gnu.org/licenses/agpl-3.0.html">AGPL-3.0 or later</a></td>
|
||||
<td data-label="Source"><a href="/youtube.com/static/js/sponsorblock.js">sponsorblock.js</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td data-label="File"><a href="/youtube.com/static/js/storyboard-preview.js">storyboard-preview.js</a></td>
|
||||
<td data-label="License"><a href="http://www.gnu.org/licenses/agpl-3.0.html">AGPL-3.0 or later</a></td>
|
||||
<td data-label="Source"><a href="/youtube.com/static/js/storyboard-preview.js">storyboard-preview.js</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td data-label="File"><a href="/youtube.com/static/modules/plyr/plyr.min.js">plyr.min.js</a></td>
|
||||
@@ -55,9 +75,14 @@
|
||||
<td data-label="Source"><a href="/youtube.com/static/js/transcript-table.js">transcript-table.js</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td data-label="File"><a href="/youtube.com/static/js/watch.js">watch.js</a></td>
|
||||
<td data-label="File"><a href="/youtube.com/static/js/watch.dash.js">watch.dash.js</a></td>
|
||||
<td data-label="License"><a href="http://www.gnu.org/licenses/agpl-3.0.html">AGPL-3.0 or later</a></td>
|
||||
<td data-label="Source"><a href="/youtube.com/static/js/watch.js">watch.js</a></td>
|
||||
<td data-label="Source"><a href="/youtube.com/static/js/watch.dash.js">watch.dash.js</a></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td data-label="File"><a href="/youtube.com/static/js/watch.hls.js">watch.hls.js</a></td>
|
||||
<td data-label="License"><a href="http://www.gnu.org/licenses/agpl-3.0.html">AGPL-3.0 or later</a></td>
|
||||
<td data-label="Source"><a href="/youtube.com/static/js/watch.hls.js">watch.hls.js</a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
@@ -10,11 +10,17 @@
|
||||
|
||||
<div class="playlist-metadata">
|
||||
<div class="author">
|
||||
{% if thumbnail %}
|
||||
<img alt="{{ title }}" src="{{ thumbnail }}">
|
||||
{% endif %}
|
||||
<h2>{{ title }}</h2>
|
||||
</div>
|
||||
<div class="summary">
|
||||
{% if author_url %}
|
||||
<a class="playlist-author" href="{{ author_url }}">{{ author }}</a>
|
||||
{% else %}
|
||||
<span class="playlist-author">{{ author }}</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="playlist-stats">
|
||||
<div>{{ video_count|commatize }} videos</div>
|
||||
|
||||
@@ -7,15 +7,15 @@
|
||||
{% block main %}
|
||||
<form method="POST" class="settings-form">
|
||||
{% for categ in categories %}
|
||||
<h2>{{ categ|capitalize }}</h2>
|
||||
<h2>{{ _(categ|capitalize) }}</h2>
|
||||
<ul class="settings-list">
|
||||
{% for setting_name, setting_info, value in settings_by_category[categ] %}
|
||||
{% if not setting_info.get('hidden', false) %}
|
||||
<li class="setting-item">
|
||||
{% if 'label' is in(setting_info) %}
|
||||
<label for="{{ 'setting_' + setting_name }}" {% if 'comment' is in(setting_info) %}title="{{ setting_info['comment'] }}" {% endif %}>{{ setting_info['label'] }}</label>
|
||||
<label for="{{ 'setting_' + setting_name }}" {% if 'comment' is in(setting_info) %}title="{{ setting_info['comment'] }}" {% endif %}>{{ _(setting_info['label']) }}</label>
|
||||
{% else %}
|
||||
<label for="{{ 'setting_' + setting_name }}" {% if 'comment' is in(setting_info) %}title="{{ setting_info['comment'] }}" {% endif %}>{{ setting_name.replace('_', ' ')|capitalize }}</label>
|
||||
<label for="{{ 'setting_' + setting_name }}" {% if 'comment' is in(setting_info) %}title="{{ setting_info['comment'] }}" {% endif %}>{{ _(setting_name.replace('_', ' ')|capitalize) }}</label>
|
||||
{% endif %}
|
||||
|
||||
{% if setting_info['type'].__name__ == 'bool' %}
|
||||
@@ -24,24 +24,32 @@
|
||||
{% if 'options' is in(setting_info) %}
|
||||
<select id="{{ 'setting_' + setting_name }}" name="{{ setting_name }}">
|
||||
{% for option in setting_info['options'] %}
|
||||
<option value="{{ option[0] }}" {{ 'selected' if option[0] == value else '' }}>{{ option[1] }}</option>
|
||||
<option value="{{ option[0] }}" {{ 'selected' if option[0] == value else '' }}>{{ _(option[1]) }}</option>
|
||||
{% endfor %}
|
||||
</select>
|
||||
{% else %}
|
||||
<input type="number" id="{{ 'setting_' + setting_name }}" name="{{ setting_name }}" value="{{ value }}" step="1">
|
||||
{% endif %}
|
||||
{% elif setting_info['type'].__name__ == 'float' %}
|
||||
|
||||
<input type="number" id="{{ 'setting_' + setting_name }}" name="{{ setting_name }}" value="{{ value }}" step="0.01">
|
||||
{% elif setting_info['type'].__name__ == 'str' %}
|
||||
<input type="text" id="{{ 'setting_' + setting_name }}" name="{{ setting_name }}" value="{{ value }}">
|
||||
{% if 'options' is in(setting_info) %}
|
||||
<select id="{{ 'setting_' + setting_name }}" name="{{ setting_name }}">
|
||||
{% for option in setting_info['options'] %}
|
||||
<option value="{{ option[0] }}" {{ 'selected' if option[0] == value else '' }}>{{ _(option[1]) }}</option>
|
||||
{% endfor %}
|
||||
</select>
|
||||
{% else %}
|
||||
<input type="text" id="{{ 'setting_' + setting_name }}" name="{{ setting_name }}" value="{{ value }}">
|
||||
{% endif %}
|
||||
{% else %}
|
||||
<span>Error: Unknown setting type: setting_info['type'].__name__</span>
|
||||
<span>Error: Unknown setting type: {{ setting_info['type'].__name__ }}</span>
|
||||
{% endif %}
|
||||
</li>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endfor %}
|
||||
<input type="submit" value="Save settings">
|
||||
<input type="submit" value="{{ _('Save settings') }}">
|
||||
</form>
|
||||
{% endblock main %}
|
||||
|
||||
@@ -8,14 +8,8 @@
|
||||
{% if settings.use_video_player == 2 %}
|
||||
<!-- plyr -->
|
||||
<link href="/youtube.com/static/modules/plyr/plyr.css" rel="stylesheet">
|
||||
<!--/ plyr -->
|
||||
<style>
|
||||
/* Prevent this div from blocking right-click menu for video
|
||||
e.g. Firefox playback speed options */
|
||||
.plyr__poster {
|
||||
display: none !important;
|
||||
}
|
||||
</style>
|
||||
<link href="/youtube.com/static/modules/plyr/custom_plyr.css" rel="stylesheet">
|
||||
<!-- /plyr -->
|
||||
{% endif %}
|
||||
{% endblock style %}
|
||||
|
||||
@@ -29,22 +23,9 @@
|
||||
{% endif %}
|
||||
</span>
|
||||
</div>
|
||||
{% elif (uni_sources.__len__() == 0 or live) and hls_formats.__len__() != 0 %}
|
||||
<div class="live-url-choices">
|
||||
<span>Copy a url into your video player:</span>
|
||||
<ol>
|
||||
{% for fmt in hls_formats %}
|
||||
<li class="url-choice"><div class="url-choice-label">{{ fmt['video_quality'] }}: </div><input class="url-choice-copy" value="{{ fmt['url'] }}" readonly onclick="this.select();"></li>
|
||||
{% endfor %}
|
||||
</ol>
|
||||
</div>
|
||||
{% else %}
|
||||
<figure class="sc-video">
|
||||
<video id="js-video-player" playsinline controls>
|
||||
{% if uni_sources %}
|
||||
<source src="{{ uni_sources[uni_idx]['url'] }}" type="{{ uni_sources[uni_idx]['type'] }}" data-res="{{ uni_sources[uni_idx]['quality'] }}">
|
||||
{% endif %}
|
||||
|
||||
<video id="js-video-player" playsinline controls {{ 'autoplay' if settings.autoplay_videos }}>
|
||||
{% for source in subtitle_sources %}
|
||||
{% if source['on'] %}
|
||||
<track label="{{ source['label'] }}" src="{{ source['url'] }}" kind="subtitles" srclang="{{ source['srclang'] }}" default>
|
||||
@@ -52,7 +33,18 @@
|
||||
<track label="{{ source['label'] }}" src="{{ source['url'] }}" kind="subtitles" srclang="{{ source['srclang'] }}">
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{% if uni_sources %}
|
||||
{% for source in uni_sources %}
|
||||
<source src="{{ source['url'] }}" type="{{ source['type'] }}" title="{{ source['quality_string'] }}">
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
</video>
|
||||
{% if hls_unavailable and not uni_sources %}
|
||||
<div class="playability-error">
|
||||
<span>Error: HLS streams unavailable. Video may not play without JavaScript fallback.</span>
|
||||
</div>
|
||||
{% endif %}
|
||||
</figure>
|
||||
{% endif %}
|
||||
|
||||
@@ -82,24 +74,34 @@
|
||||
|
||||
<div class="external-player-controls">
|
||||
<input class="speed" id="speed-control" type="text" title="Video speed">
|
||||
{% if settings.use_video_player < 2 %}
|
||||
<!-- Native player quality selector -->
|
||||
<select id="quality-select" autocomplete="off">
|
||||
<option value="-1" selected>Auto</option>
|
||||
<!-- Quality options will be populated by HLS -->
|
||||
</select>
|
||||
{% else %}
|
||||
<select id="quality-select" autocomplete="off" style="display: none;">
|
||||
<!-- Quality options will be populated by HLS -->
|
||||
</select>
|
||||
{% endif %}
|
||||
{% if settings.use_video_player != 2 %}
|
||||
<select id="quality-select" autocomplete="off">
|
||||
{% for src in uni_sources %}
|
||||
<option value='{"type": "uni", "index": {{ loop.index0 }}}' {{ 'selected' if loop.index0 == uni_idx and not using_pair_sources else '' }} >{{ src['quality_string'] }}</option>
|
||||
{% endfor %}
|
||||
{% for src_pair in pair_sources %}
|
||||
<option value='{"type": "pair", "index": {{ loop.index0}}}' {{ 'selected' if loop.index0 == pair_idx and using_pair_sources else '' }} >{{ src_pair['quality_string'] }}</option>
|
||||
{% if audio_tracks|length > 1 %}
|
||||
<select id="audio-track-select" autocomplete="off">
|
||||
{% for track in audio_tracks %}
|
||||
<option value="{{ track['id'] }}" {{ 'selected' if track['is_default'] else '' }}>{{ track['name'] }}</option>
|
||||
{% endfor %}
|
||||
</select>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
</div>
|
||||
<input class="v-checkbox" name="video_info_list" value="{{ video_info }}" form="playlist-edit" type="checkbox">
|
||||
|
||||
<span class="v-direct-link"><a href="https://youtu.be/{{ video_id }}" rel="noopener noreferrer" target="_blank">Direct Link</a></span>
|
||||
<span class="v-direct-link"><a href="https://youtu.be/{{ video_id }}" rel="noopener noreferrer" target="_blank">{{ _('Direct Link') }}</a></span>
|
||||
|
||||
{% if settings.use_video_download != 0 %}
|
||||
<details class="v-download">
|
||||
<summary class="download-dropdown-label">Download</summary>
|
||||
<summary class="download-dropdown-label">{{ _('Download') }}</summary>
|
||||
<ul class="download-dropdown-content">
|
||||
{% for format in download_formats %}
|
||||
<li class="download-format">
|
||||
@@ -135,7 +137,11 @@
|
||||
{% for track in music_list %}
|
||||
<tr>
|
||||
{% for attribute in music_attributes %}
|
||||
<td>{{ track.get(attribute.lower(), '') }}</td>
|
||||
{% if attribute.lower() == 'title' and track['url'] is not none %}
|
||||
<td><a href="{{ track['url'] }}">{{ track.get(attribute.lower(), '') }}</a></td>
|
||||
{% else %}
|
||||
<td>{{ track.get(attribute.lower(), '') }}</td>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</tr>
|
||||
{% endfor %}
|
||||
@@ -143,7 +149,7 @@
|
||||
{% endif %}
|
||||
</div>
|
||||
<details class="v-more-info">
|
||||
<summary>More info</summary>
|
||||
<summary>{{ _('More info') }}</summary>
|
||||
<div class="more-info-content">
|
||||
<p>Tor exit node: {{ ip_address }}</p>
|
||||
{% if invidious_used %}
|
||||
@@ -167,13 +173,17 @@
|
||||
<div class="playlist-header">
|
||||
<a href="{{ playlist['url'] }}" title="{{ playlist['title'] }}"><h3>{{ playlist['title'] }}</h3></a>
|
||||
<ul class="playlist-metadata">
|
||||
<li><label for="playlist-autoplay-toggle">Autoplay: </label><input id="playlist-autoplay-toggle" type="checkbox" class="autoplay-toggle"></li>
|
||||
<li><label for="playlist-autoplay-toggle">{{ _('AutoNext') }}: </label><input id="playlist-autoplay-toggle" type="checkbox" class="autoplay-toggle"></li>
|
||||
{% if playlist['current_index'] is none %}
|
||||
<li>[Error!]/{{ playlist['video_count'] }}</li>
|
||||
{% else %}
|
||||
<li>{{ playlist['current_index']+1 }}/{{ playlist['video_count'] }}</li>
|
||||
{% endif %}
|
||||
{% if playlist['author_url'] %}
|
||||
<li><a href="{{ playlist['author_url'] }}" title="{{ playlist['author'] }}">{{ playlist['author'] }}</a></li>
|
||||
{% elif playlist['author'] %}
|
||||
<li>{{ playlist['author'] }}</li>
|
||||
{% endif %}
|
||||
</ul>
|
||||
</div>
|
||||
<nav class="playlist-videos">
|
||||
@@ -190,7 +200,7 @@
|
||||
</nav>
|
||||
</div>
|
||||
{% elif settings.related_videos_mode != 0 %}
|
||||
<div class="related-autoplay"><label for="related-autoplay-toggle">Autoplay: </label><input id="related-autoplay-toggle" type="checkbox" class="autoplay-toggle"></div>
|
||||
<div class="related-autoplay"><label for="related-autoplay-toggle">{{ _('AutoNext') }}: </label><input id="related-autoplay-toggle" type="checkbox" class="autoplay-toggle"></div>
|
||||
{% endif %}
|
||||
|
||||
{% if subtitle_sources %}
|
||||
@@ -212,7 +222,7 @@
|
||||
|
||||
{% if settings.related_videos_mode != 0 %}
|
||||
<details class="related-videos-outer" {{'open' if settings.related_videos_mode == 1 else ''}}>
|
||||
<summary>Related Videos</summary>
|
||||
<summary>{{ _('Related Videos') }}</summary>
|
||||
<nav class="related-videos-inner">
|
||||
{% for info in related %}
|
||||
{{ common_elements.item(info, include_badges=false) }}
|
||||
@@ -226,10 +236,10 @@
|
||||
<!-- comments -->
|
||||
{% if settings.comments_mode != 0 %}
|
||||
{% if comments_disabled %}
|
||||
<div class="comments-area-outer comments-disabled">Comments disabled</div>
|
||||
<div class="comments-area-outer comments-disabled">{{ _('Comments disabled') }}</div>
|
||||
{% else %}
|
||||
<details class="comments-area-outer" {{'open' if settings.comments_mode == 1 else ''}}>
|
||||
<summary>{{ comment_count|commatize }} comment{{'s' if comment_count != 1 else ''}}</summary>
|
||||
<summary>{{ comment_count|commatize }} {{ _('Comment') }}{{'s' if comment_count != '1' else ''}}</summary>
|
||||
<div class="comments-area-inner comments-area">
|
||||
{% if comments_info %}
|
||||
{{ comments.video_comments(comments_info) }}
|
||||
@@ -241,25 +251,64 @@
|
||||
|
||||
</div>
|
||||
|
||||
<script src="/youtube.com/static/js/av-merge.js"></script>
|
||||
<script src="/youtube.com/static/js/watch.js"></script>
|
||||
<script>
|
||||
// @license magnet:?xt=urn:btih:0b31508aeb0634b347b8270c7bee4d411b5d4109&dn=agpl-3.0.txt AGPL-v3-or-Later
|
||||
let storyboard_url = {{ storyboard_url | tojson }};
|
||||
let hls_manifest_url = {{ hls_manifest_url | tojson }};
|
||||
let hls_unavailable = {{ hls_unavailable | tojson }};
|
||||
let playback_mode = {{ playback_mode | tojson }};
|
||||
let pair_sources = {{ pair_sources | tojson }};
|
||||
let pair_idx = {{ pair_idx | tojson }};
|
||||
// @license-end
|
||||
</script>
|
||||
|
||||
<script src="/youtube.com/static/js/common.js"></script>
|
||||
<script src="/youtube.com/static/js/transcript-table.js"></script>
|
||||
{% if settings.use_video_player == 2 %}
|
||||
|
||||
{% set hls_should_work = (playback_mode == 'hls' or playback_mode == 'auto') and not hls_unavailable %}
|
||||
{% set use_dash = not hls_should_work %}
|
||||
|
||||
{% if use_dash %}
|
||||
<script src="/youtube.com/static/js/av-merge.js"></script>
|
||||
{% else %}
|
||||
<script src="/youtube.com/static/js/hls.min.js"
|
||||
integrity="sha512-CSVqc4a7tn+tizDNt+eDoVn2fXYAwMDpCLrwGlWrOktNfZQ9gp4dKKScElMeRlrIifhliXs0a06BLaUgmMlCUw=="
|
||||
crossorigin="anonymous"></script>
|
||||
{% endif %}
|
||||
|
||||
{% if settings.use_video_player == 0 %}
|
||||
<!-- Native player (no hotkeys) -->
|
||||
{% if use_dash %}
|
||||
<script src="/youtube.com/static/js/watch.dash.js"></script>
|
||||
{% else %}
|
||||
<script src="/youtube.com/static/js/watch.hls.js"></script>
|
||||
{% endif %}
|
||||
{% elif settings.use_video_player == 1 %}
|
||||
<!-- Native player with hotkeys -->
|
||||
<script src="/youtube.com/static/js/hotkeys.js"></script>
|
||||
{% if use_dash %}
|
||||
<script src="/youtube.com/static/js/watch.dash.js"></script>
|
||||
{% else %}
|
||||
<script src="/youtube.com/static/js/watch.hls.js"></script>
|
||||
{% endif %}
|
||||
{% elif settings.use_video_player == 2 %}
|
||||
<!-- plyr -->
|
||||
<script src="/youtube.com/static/modules/plyr/plyr.min.js"
|
||||
integrity="sha512-l6ZzdXpfMHRfifqaR79wbYCEWjLDMI9DnROvb+oLkKq6d7MGroGpMbI7HFpicvmAH/2aQO+vJhewq8rhysrImw=="
|
||||
crossorigin="anonymous"></script>
|
||||
<script src="/youtube.com/static/js/plyr-start.js"></script>
|
||||
{% if use_dash %}
|
||||
<script src="/youtube.com/static/js/plyr.dash.start.js"></script>
|
||||
{% else %}
|
||||
<script src="/youtube.com/static/js/plyr.hls.start.js"></script>
|
||||
{% endif %}
|
||||
<!-- /plyr -->
|
||||
{% elif settings.use_video_player == 1 %}
|
||||
<script src="/youtube.com/static/js/hotkeys.js"></script>
|
||||
{% endif %}
|
||||
|
||||
<!-- Storyboard Preview Thumbnails -->
|
||||
{% if settings.use_video_player != 2 %}
|
||||
<script src="/youtube.com/static/js/storyboard-preview.js"></script>
|
||||
{% endif %}
|
||||
|
||||
{% if settings.use_comments_js %} <script src="/youtube.com/static/js/comments.js"></script> {% endif %}
|
||||
{% if settings.use_sponsorblock_js %} <script src="/youtube.com/static/js/sponsorblock.js"></script> {% endif %}
|
||||
{% endblock main %}
|
||||
|
||||
479
youtube/util.py
479
youtube/util.py
@@ -1,4 +1,6 @@
|
||||
from datetime import datetime
|
||||
import logging
|
||||
import random
|
||||
import settings
|
||||
import socks
|
||||
import sockshandler
|
||||
@@ -21,6 +23,8 @@ import stem
|
||||
import stem.control
|
||||
import traceback
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# The trouble with the requests library: It ships its own certificate bundle via certifi
|
||||
# instead of using the system certificate store, meaning self-signed certificates
|
||||
# configured by the user will not work. Some draconian networks block TLS unless a corporate
|
||||
@@ -51,8 +55,8 @@ import traceback
|
||||
# https://github.com/kennethreitz/requests/issues/2966
|
||||
|
||||
# Until then, I will use a mix of urllib3 and urllib.
|
||||
import urllib3
|
||||
import urllib3.contrib.socks
|
||||
import urllib3 # noqa: E402 (imported here intentionally after the long note above)
|
||||
import urllib3.contrib.socks # noqa: E402
|
||||
|
||||
URL_ORIGIN = "/https://www.youtube.com"
|
||||
|
||||
@@ -174,7 +178,6 @@ def get_pool(use_tor):
|
||||
class HTTPAsymmetricCookieProcessor(urllib.request.BaseHandler):
|
||||
'''Separate cookiejars for receiving and sending'''
|
||||
def __init__(self, cookiejar_send=None, cookiejar_receive=None):
|
||||
import http.cookiejar
|
||||
self.cookiejar_send = cookiejar_send
|
||||
self.cookiejar_receive = cookiejar_receive
|
||||
|
||||
@@ -205,6 +208,16 @@ class FetchError(Exception):
|
||||
self.error_message = error_message
|
||||
|
||||
|
||||
def _noop_cleanup(response):
|
||||
'''No-op cleanup used when the urllib opener owns the response.'''
|
||||
return None
|
||||
|
||||
|
||||
def _release_conn_cleanup(response):
|
||||
'''Release the urllib3 pooled connection back to the pool.'''
|
||||
response.release_conn()
|
||||
|
||||
|
||||
def decode_content(content, encoding_header):
|
||||
encodings = encoding_header.replace(' ', '').split(',')
|
||||
for encoding in reversed(encodings):
|
||||
@@ -260,7 +273,7 @@ def fetch_url_response(url, headers=(), timeout=15, data=None,
|
||||
opener = urllib.request.build_opener(cookie_processor)
|
||||
|
||||
response = opener.open(req, timeout=timeout)
|
||||
cleanup_func = (lambda r: None)
|
||||
cleanup_func = _noop_cleanup
|
||||
|
||||
else: # Use a urllib3 pool. Cookies can't be used since urllib3 doesn't have easy support for them.
|
||||
# default: Retry.DEFAULT = Retry(3)
|
||||
@@ -294,7 +307,7 @@ def fetch_url_response(url, headers=(), timeout=15, data=None,
|
||||
error_message=msg)
|
||||
else:
|
||||
raise
|
||||
cleanup_func = (lambda r: r.release_conn())
|
||||
cleanup_func = _release_conn_cleanup
|
||||
|
||||
return response, cleanup_func
|
||||
|
||||
@@ -302,72 +315,138 @@ def fetch_url_response(url, headers=(), timeout=15, data=None,
|
||||
def fetch_url(url, headers=(), timeout=15, report_text=None, data=None,
|
||||
cookiejar_send=None, cookiejar_receive=None, use_tor=True,
|
||||
debug_name=None):
|
||||
while True:
|
||||
start_time = time.monotonic()
|
||||
"""
|
||||
Fetch URL with exponential backoff retry logic for rate limiting.
|
||||
|
||||
response, cleanup_func = fetch_url_response(
|
||||
url, headers, timeout=timeout, data=data,
|
||||
cookiejar_send=cookiejar_send, cookiejar_receive=cookiejar_receive,
|
||||
use_tor=use_tor)
|
||||
response_time = time.monotonic()
|
||||
Retries:
|
||||
- 429 Too Many Requests: Exponential backoff (1s, 2s, 4s, 8s, 16s)
|
||||
- 503 Service Unavailable: Exponential backoff
|
||||
- 302 Redirect to Google Sorry: Treated as rate limit
|
||||
|
||||
content = response.read()
|
||||
Max retries: 5 attempts with exponential backoff
|
||||
"""
|
||||
max_retries = 5
|
||||
base_delay = 1.0 # Base delay in seconds
|
||||
|
||||
read_finish = time.monotonic()
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
start_time = time.monotonic()
|
||||
|
||||
cleanup_func(response) # release_connection for urllib3
|
||||
content = decode_content(
|
||||
content,
|
||||
response.getheader('Content-Encoding', default='identity'))
|
||||
response, cleanup_func = fetch_url_response(
|
||||
url, headers, timeout=timeout, data=data,
|
||||
cookiejar_send=cookiejar_send, cookiejar_receive=cookiejar_receive,
|
||||
use_tor=use_tor)
|
||||
response_time = time.monotonic()
|
||||
|
||||
if (settings.debugging_save_responses
|
||||
and debug_name is not None and content):
|
||||
save_dir = os.path.join(settings.data_dir, 'debug')
|
||||
if not os.path.exists(save_dir):
|
||||
os.makedirs(save_dir)
|
||||
content = response.read()
|
||||
|
||||
with open(os.path.join(save_dir, debug_name), 'wb') as f:
|
||||
f.write(content)
|
||||
read_finish = time.monotonic()
|
||||
|
||||
if response.status == 429 or (
|
||||
response.status == 302 and (response.getheader('Location') == url
|
||||
or response.getheader('Location').startswith(
|
||||
'https://www.google.com/sorry/index'
|
||||
)
|
||||
)
|
||||
):
|
||||
print(response.status, response.reason, response.getheaders())
|
||||
ip = re.search(
|
||||
br'IP address: ((?:[\da-f]*:)+[\da-f]+|(?:\d+\.)+\d+)',
|
||||
content)
|
||||
ip = ip.group(1).decode('ascii') if ip else None
|
||||
if not ip:
|
||||
ip = re.search(r'IP=((?:\d+\.)+\d+)',
|
||||
response.getheader('Set-Cookie') or '')
|
||||
ip = ip.group(1) if ip else None
|
||||
cleanup_func(response) # release_connection for urllib3
|
||||
content = decode_content(
|
||||
content,
|
||||
response.headers.get('Content-Encoding', default='identity'))
|
||||
|
||||
# don't get new identity if we're not using Tor
|
||||
if not use_tor:
|
||||
raise FetchError('429', reason=response.reason, ip=ip)
|
||||
if (settings.debugging_save_responses
|
||||
and debug_name is not None
|
||||
and content):
|
||||
save_dir = os.path.join(settings.data_dir, 'debug')
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
|
||||
print('Error: YouTube blocked the request because the Tor exit node is overutilized. Exit node IP address: %s' % ip)
|
||||
with open(os.path.join(save_dir, debug_name), 'wb') as f:
|
||||
f.write(content)
|
||||
|
||||
# get new identity
|
||||
error = tor_manager.new_identity(start_time)
|
||||
if error:
|
||||
raise FetchError(
|
||||
'429', reason=response.reason, ip=ip,
|
||||
error_message='Automatic circuit change: ' + error)
|
||||
else:
|
||||
continue # retry now that we have new identity
|
||||
# Check for rate limiting (429) or redirect to Google Sorry
|
||||
if response.status == 429 or (
|
||||
response.status == 302 and (response.getheader('Location') == url
|
||||
or response.getheader('Location').startswith(
|
||||
'https://www.google.com/sorry/index'
|
||||
)
|
||||
)
|
||||
):
|
||||
logger.info(f'Rate limit response: {response.status} {response.reason}')
|
||||
ip = re.search(
|
||||
br'IP address: ((?:[\da-f]*:)+[\da-f]+|(?:\d+\.)+\d+)',
|
||||
content)
|
||||
ip = ip.group(1).decode('ascii') if ip else None
|
||||
if not ip:
|
||||
ip = re.search(r'IP=((?:\d+\.)+\d+)',
|
||||
response.getheader('Set-Cookie') or '')
|
||||
ip = ip.group(1) if ip else None
|
||||
|
||||
elif response.status >= 400:
|
||||
raise FetchError(str(response.status), reason=response.reason,
|
||||
ip=None)
|
||||
break
|
||||
# Without Tor, no point retrying with same IP
|
||||
if not use_tor or not settings.route_tor:
|
||||
logger.warning('Rate limited (429). Enable Tor routing to retry with new IP.')
|
||||
raise FetchError('429', reason=response.reason, ip=ip)
|
||||
|
||||
# Tor: exhausted retries
|
||||
if attempt >= max_retries - 1:
|
||||
logger.error(f'Rate limited after {max_retries} retries. Exit IP: {ip}')
|
||||
raise FetchError('429', reason=response.reason, ip=ip,
|
||||
error_message='Tor exit node overutilized after multiple retries')
|
||||
|
||||
# Tor: get new identity and retry
|
||||
logger.info(f'Rate limited. Getting new Tor identity... (IP: {ip})')
|
||||
error = tor_manager.new_identity(start_time)
|
||||
if error:
|
||||
raise FetchError(
|
||||
'429', reason=response.reason, ip=ip,
|
||||
error_message='Automatic circuit change: ' + error)
|
||||
continue # retry with new identity
|
||||
|
||||
# Check for client errors (400, 404) - don't retry these
|
||||
if response.status == 400:
|
||||
logger.error(f'Bad Request (400) - Invalid parameters or URL: {url[:100]}')
|
||||
raise FetchError('400', reason='Bad Request - Invalid parameters or URL format', ip=None)
|
||||
|
||||
if response.status == 404:
|
||||
logger.warning(f'Not Found (404): {url[:100]}')
|
||||
raise FetchError('404', reason='Not Found', ip=None)
|
||||
|
||||
# Check for other server errors (503, 502, 504)
|
||||
if response.status in (502, 503, 504):
|
||||
if attempt >= max_retries - 1:
|
||||
logger.error(f'Server error {response.status} after {max_retries} retries')
|
||||
raise FetchError(str(response.status), reason=response.reason, ip=None)
|
||||
|
||||
# Exponential backoff for server errors. Non-crypto jitter.
|
||||
delay = (base_delay * (2 ** attempt)) + random.uniform(0, 1)
|
||||
logger.warning(f'Server error ({response.status}). Waiting {delay:.1f}s before retry {attempt + 1}/{max_retries}...')
|
||||
time.sleep(delay)
|
||||
continue
|
||||
|
||||
# Success - break out of retry loop
|
||||
break
|
||||
|
||||
except urllib3.exceptions.MaxRetryError as e:
|
||||
# If this is the last attempt, raise the error
|
||||
if attempt >= max_retries - 1:
|
||||
exception_cause = e.__context__.__context__
|
||||
if (isinstance(exception_cause, socks.ProxyConnectionError)
|
||||
and settings.route_tor):
|
||||
msg = ('Failed to connect to Tor. Check that Tor is open and '
|
||||
'that your internet connection is working.\n\n'
|
||||
+ str(e))
|
||||
logger.error(f'Tor connection failed: {msg}')
|
||||
raise FetchError('502', reason='Bad Gateway',
|
||||
error_message=msg)
|
||||
elif isinstance(e.__context__,
|
||||
urllib3.exceptions.NewConnectionError):
|
||||
msg = 'Failed to establish a connection.\n\n' + str(e)
|
||||
logger.error(f'Connection failed: {msg}')
|
||||
raise FetchError(
|
||||
'502', reason='Bad Gateway',
|
||||
error_message=msg)
|
||||
else:
|
||||
raise
|
||||
|
||||
# Wait and retry. Non-crypto jitter.
|
||||
delay = (base_delay * (2 ** attempt)) + random.uniform(0, 1)
|
||||
logger.warning(f'Connection error. Waiting {delay:.1f}s before retry {attempt + 1}/{max_retries}...')
|
||||
time.sleep(delay)
|
||||
|
||||
if report_text:
|
||||
print(report_text, ' Latency:', round(response_time - start_time, 3), ' Read time:', round(read_finish - response_time,3))
|
||||
logger.info(f'{report_text} - Latency: {round(response_time - start_time, 3)}s - Read time: {round(read_finish - response_time, 3)}s')
|
||||
|
||||
return content
|
||||
|
||||
@@ -394,7 +473,6 @@ def head(url, use_tor=False, report_text=None, max_redirects=10):
|
||||
round(time.monotonic() - start_time, 3))
|
||||
return response
|
||||
|
||||
|
||||
mobile_user_agent = 'Mozilla/5.0 (Linux; Android 7.0; Redmi Note 4 Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Mobile Safari/537.36'
|
||||
mobile_ua = (('User-Agent', mobile_user_agent),)
|
||||
desktop_user_agent = 'Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0'
|
||||
@@ -404,13 +482,13 @@ desktop_xhr_headers = (
|
||||
('Accept', '*/*'),
|
||||
('Accept-Language', 'en-US,en;q=0.5'),
|
||||
('X-YouTube-Client-Name', '1'),
|
||||
('X-YouTube-Client-Version', '2.20180830'),
|
||||
('X-YouTube-Client-Version', '2.20240304.00.00'),
|
||||
) + desktop_ua
|
||||
mobile_xhr_headers = (
|
||||
('Accept', '*/*'),
|
||||
('Accept-Language', 'en-US,en;q=0.5'),
|
||||
('X-YouTube-Client-Name', '2'),
|
||||
('X-YouTube-Client-Version', '2.20180830'),
|
||||
('X-YouTube-Client-Version', '2.20240304.08.00'),
|
||||
) + mobile_ua
|
||||
|
||||
|
||||
@@ -462,21 +540,31 @@ class RateLimitedQueue(gevent.queue.Queue):
|
||||
|
||||
|
||||
def download_thumbnail(save_directory, video_id):
|
||||
url = "https://i.ytimg.com/vi/" + video_id + "/mqdefault.jpg"
|
||||
save_location = os.path.join(save_directory, video_id + ".jpg")
|
||||
try:
|
||||
thumbnail = fetch_url(url, report_text="Saved thumbnail: " + video_id)
|
||||
except urllib.error.HTTPError as e:
|
||||
print("Failed to download thumbnail for " + video_id + ": " + str(e))
|
||||
return False
|
||||
try:
|
||||
f = open(save_location, 'wb')
|
||||
except FileNotFoundError:
|
||||
os.makedirs(save_directory, exist_ok=True)
|
||||
f = open(save_location, 'wb')
|
||||
f.write(thumbnail)
|
||||
f.close()
|
||||
return True
|
||||
save_location = os.path.join(save_directory, video_id + '.jpg')
|
||||
for quality in ('hq720.jpg', 'sddefault.jpg', 'hqdefault.jpg'):
|
||||
url = f'https://i.ytimg.com/vi/{video_id}/{quality}'
|
||||
try:
|
||||
thumbnail = fetch_url(url, report_text='Saved thumbnail: ' + video_id)
|
||||
except FetchError as e:
|
||||
if '404' in str(e):
|
||||
continue
|
||||
print('Failed to download thumbnail for ' + video_id + ': ' + str(e))
|
||||
return False
|
||||
except urllib.error.HTTPError as e:
|
||||
if e.code == 404:
|
||||
continue
|
||||
print('Failed to download thumbnail for ' + video_id + ': ' + str(e))
|
||||
return False
|
||||
try:
|
||||
with open(save_location, 'wb') as f:
|
||||
f.write(thumbnail)
|
||||
except FileNotFoundError:
|
||||
os.makedirs(save_directory, exist_ok=True)
|
||||
with open(save_location, 'wb') as f:
|
||||
f.write(thumbnail)
|
||||
return True
|
||||
print('No thumbnail available for ' + video_id)
|
||||
return False
|
||||
|
||||
|
||||
def download_thumbnails(save_directory, ids):
|
||||
@@ -502,9 +590,40 @@ def video_id(url):
|
||||
return urllib.parse.parse_qs(url_parts.query)['v'][0]
|
||||
|
||||
|
||||
# default, sddefault, mqdefault, hqdefault, hq720
|
||||
def get_thumbnail_url(video_id):
|
||||
return settings.img_prefix + "https://i.ytimg.com/vi/" + video_id + "/mqdefault.jpg"
|
||||
def get_thumbnail_url(video_id, quality='hq720'):
|
||||
"""Get thumbnail URL with fallback to lower quality if needed.
|
||||
|
||||
Args:
|
||||
video_id: YouTube video ID
|
||||
quality: Preferred quality ('maxres', 'hq720', 'sd', 'hq', 'mq', 'default')
|
||||
|
||||
Returns:
|
||||
Tuple of (best_available_url, quality_used)
|
||||
"""
|
||||
# Quality priority order (highest to lowest)
|
||||
quality_order = {
|
||||
'maxres': ['maxresdefault.jpg', 'sddefault.jpg', 'hqdefault.jpg'],
|
||||
'hq720': ['hq720.jpg', 'sddefault.jpg', 'hqdefault.jpg'],
|
||||
'sd': ['sddefault.jpg', 'hqdefault.jpg'],
|
||||
'hq': ['hqdefault.jpg', 'mqdefault.jpg'],
|
||||
'mq': ['mqdefault.jpg', 'default.jpg'],
|
||||
'default': ['default.jpg'],
|
||||
}
|
||||
|
||||
qualities = quality_order.get(quality, quality_order['hq720'])
|
||||
base_url = f"{settings.img_prefix}https://i.ytimg.com/vi/{video_id}/"
|
||||
|
||||
# For now, return the highest quality URL
|
||||
# The browser will handle 404s gracefully with alt text
|
||||
return base_url + qualities[0], qualities[0]
|
||||
|
||||
|
||||
def get_best_thumbnail_url(video_id):
|
||||
"""Get the best available thumbnail URL for a video.
|
||||
|
||||
Tries hq720 first (for HD videos), falls back to sddefault for SD videos.
|
||||
"""
|
||||
return get_thumbnail_url(video_id, quality='hq720')[0]
|
||||
|
||||
|
||||
def seconds_to_timestamp(seconds):
|
||||
@@ -538,6 +657,12 @@ def prefix_url(url):
|
||||
if url is None:
|
||||
return None
|
||||
url = url.lstrip('/') # some urls have // before them, which has a special meaning
|
||||
|
||||
# Increase resolution for YouTube channel avatars
|
||||
if url and ('ggpht.com' in url or 'yt3.ggpht.com' in url):
|
||||
# Replace size parameter with higher resolution (s240 instead of s88)
|
||||
url = re.sub(r'=s\d+-c-k', '=s240-c-k-c0x00ffffff-no-rj', url)
|
||||
|
||||
return '/' + url
|
||||
|
||||
|
||||
@@ -665,8 +790,204 @@ def to_valid_filename(name):
|
||||
return name
|
||||
|
||||
|
||||
# https://github.com/yt-dlp/yt-dlp/blob/master/yt_dlp/extractor/youtube.py#L72
|
||||
INNERTUBE_CLIENTS = {
|
||||
'android': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'ANDROID',
|
||||
'clientVersion': '19.09.36',
|
||||
'osName': 'Android',
|
||||
'osVersion': '12',
|
||||
'androidSdkVersion': 31,
|
||||
'platform': 'MOBILE',
|
||||
'userAgent': 'com.google.android.youtube/19.09.36 (Linux; U; Android 12; US) gzip'
|
||||
},
|
||||
# https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-887739287
|
||||
#'thirdParty': {
|
||||
# 'embedUrl': 'https://google.com', # Can be any valid URL
|
||||
#}
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
},
|
||||
|
||||
'android-test-suite': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'ANDROID_TESTSUITE',
|
||||
'clientVersion': '1.9',
|
||||
'osName': 'Android',
|
||||
'osVersion': '12',
|
||||
'androidSdkVersion': 31,
|
||||
'platform': 'MOBILE',
|
||||
'userAgent': 'com.google.android.youtube/1.9 (Linux; U; Android 12; US) gzip'
|
||||
},
|
||||
# https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-887739287
|
||||
#'thirdParty': {
|
||||
# 'embedUrl': 'https://google.com', # Can be any valid URL
|
||||
#}
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 3,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
},
|
||||
|
||||
'ios': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyB-63vPrdThhKuerbB2N_l7Kwwcxj6yUAc',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'IOS',
|
||||
'clientVersion': '21.03.2',
|
||||
'deviceMake': 'Apple',
|
||||
'deviceModel': 'iPhone16,2',
|
||||
'osName': 'iPhone',
|
||||
'osVersion': '18.7.2.22H124',
|
||||
'userAgent': 'com.google.ios.youtube/21.03.2 (iPhone16,2; U; CPU iOS 18_7_2 like Mac OS X)'
|
||||
}
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
|
||||
'REQUIRE_JS_PLAYER': False
|
||||
},
|
||||
|
||||
# This client can access age restricted videos (unless the uploader has disabled the 'allow embedding' option)
|
||||
# See: https://github.com/zerodytrash/YouTube-Internal-Clients
|
||||
'tv_embedded': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'TVHTML5_SIMPLY_EMBEDDED_PLAYER',
|
||||
'clientVersion': '2.0',
|
||||
'clientScreen': 'EMBED',
|
||||
},
|
||||
# https://github.com/yt-dlp/yt-dlp/pull/575#issuecomment-887739287
|
||||
'thirdParty': {
|
||||
'embedUrl': 'https://google.com', # Can be any valid URL
|
||||
}
|
||||
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 85,
|
||||
'REQUIRE_JS_PLAYER': True,
|
||||
},
|
||||
|
||||
'web': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': '2.20220801.00.00',
|
||||
'userAgent': desktop_user_agent,
|
||||
}
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 1
|
||||
},
|
||||
'android_vr': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'clientName': 'ANDROID_VR',
|
||||
'clientVersion': '1.60.19',
|
||||
'deviceMake': 'Oculus',
|
||||
'deviceModel': 'Quest 3',
|
||||
'androidSdkVersion': 32,
|
||||
'userAgent': 'com.google.android.apps.youtube.vr.oculus/1.60.19 (Linux; U; Android 12L; eureka-user Build/SQ3A.220605.009.A1) gzip',
|
||||
'osName': 'Android',
|
||||
'osVersion': '12L',
|
||||
},
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 28,
|
||||
'REQUIRE_JS_PLAYER': False,
|
||||
},
|
||||
|
||||
'ios_vr': {
|
||||
'INNERTUBE_API_KEY': 'AIzaSyA8eiZmM1FaDVjRy-df2KTyQ_vz_yYM39w',
|
||||
'INNERTUBE_CONTEXT': {
|
||||
'client': {
|
||||
'hl': 'en',
|
||||
'gl': 'US',
|
||||
'clientName': 'IOS_VR',
|
||||
'clientVersion': '1.0',
|
||||
'deviceMake': 'Apple',
|
||||
'deviceModel': 'iPhone16,2',
|
||||
'osName': 'iPhone',
|
||||
'osVersion': '18.7.2.22H124',
|
||||
'userAgent': 'com.google.ios.youtube/1.0 (iPhone16,2; U; CPU iOS 18_7_2 like Mac OS X)'
|
||||
}
|
||||
},
|
||||
'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
|
||||
'REQUIRE_JS_PLAYER': False
|
||||
},
|
||||
}
|
||||
|
||||
def get_visitor_data():
|
||||
visitor_data = None
|
||||
visitor_data_cache = os.path.join(settings.data_dir, 'visitorData.txt')
|
||||
os.makedirs(settings.data_dir, exist_ok=True)
|
||||
if os.path.isfile(visitor_data_cache):
|
||||
with open(visitor_data_cache, 'r') as file:
|
||||
print('Getting visitor_data from cache')
|
||||
visitor_data = file.read()
|
||||
max_age = 12*3600
|
||||
file_age = time.time() - os.path.getmtime(visitor_data_cache)
|
||||
if file_age > max_age:
|
||||
print('visitor_data cache is too old. Removing file...')
|
||||
os.remove(visitor_data_cache)
|
||||
return visitor_data
|
||||
|
||||
print('Fetching youtube homepage to get visitor_data')
|
||||
yt_homepage = 'https://www.youtube.com'
|
||||
yt_resp = fetch_url(yt_homepage, headers={'User-Agent': mobile_user_agent}, report_text='Getting youtube homepage')
|
||||
visitor_data_re = r'''"visitorData":\s*?"(.+?)"'''
|
||||
visitor_data_match = re.search(visitor_data_re, yt_resp.decode())
|
||||
if visitor_data_match:
|
||||
visitor_data = visitor_data_match.group(1)
|
||||
print(f'Got visitor_data: {len(visitor_data)}')
|
||||
with open(visitor_data_cache, 'w') as file:
|
||||
print('Saving visitor_data cache...')
|
||||
file.write(visitor_data)
|
||||
return visitor_data
|
||||
else:
|
||||
print('Unable to get visitor_data value')
|
||||
return visitor_data
|
||||
|
||||
def call_youtube_api(client, api, data):
|
||||
client_params = INNERTUBE_CLIENTS[client]
|
||||
context = client_params['INNERTUBE_CONTEXT']
|
||||
key = client_params['INNERTUBE_API_KEY']
|
||||
host = client_params.get('INNERTUBE_HOST') or 'www.youtube.com'
|
||||
user_agent = context['client'].get('userAgent') or mobile_user_agent
|
||||
visitor_data = get_visitor_data()
|
||||
|
||||
url = 'https://' + host + '/youtubei/v1/' + api + '?key=' + key
|
||||
if visitor_data:
|
||||
context['client'].update({'visitorData': visitor_data})
|
||||
data['context'] = context
|
||||
|
||||
data = json.dumps(data)
|
||||
headers = (('Content-Type', 'application/json'),('User-Agent', user_agent))
|
||||
if visitor_data:
|
||||
headers = ( *headers, ('X-Goog-Visitor-Id', visitor_data ))
|
||||
response = fetch_url(
|
||||
url, data=data, headers=headers,
|
||||
debug_name='youtubei_' + api + '_' + client,
|
||||
report_text='Fetched ' + client + ' youtubei ' + api
|
||||
).decode('utf-8')
|
||||
return response
|
||||
|
||||
|
||||
def strip_non_ascii(string):
|
||||
''' Returns the string without non ASCII characters'''
|
||||
if string is None:
|
||||
return ""
|
||||
stripped = (c for c in string if 0 < ord(c) < 127)
|
||||
return ''.join(stripped)
|
||||
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '0.2.3'
|
||||
__version__ = 'v0.4.5'
|
||||
|
||||
918
youtube/watch.py
918
youtube/watch.py
File diff suppressed because it is too large
Load Diff
@@ -7,7 +7,7 @@ from .everything_else import (extract_channel_info, extract_search_info,
|
||||
extract_playlist_metadata, extract_playlist_info, extract_comments_info)
|
||||
|
||||
from .watch_extraction import (extract_watch_info, get_caption_url,
|
||||
update_with_age_restricted_info, requires_decryption,
|
||||
update_with_new_urls, requires_decryption,
|
||||
extract_decryption_function, decrypt_signatures, _formats,
|
||||
update_format_with_type_info, extract_hls_formats,
|
||||
extract_watch_info_from_html, captions_available)
|
||||
extract_watch_info_from_html, captions_available, parse_format)
|
||||
|
||||
@@ -109,7 +109,7 @@ def concat_or_none(*strings):
|
||||
def remove_redirect(url):
|
||||
if url is None:
|
||||
return None
|
||||
if re.fullmatch(r'(((https?:)?//)?(www.)?youtube.com)?/redirect\?.*', url) is not None: # youtube puts these on external links to do tracking
|
||||
if re.fullmatch(r'(((https?:)?//)?(www.)?youtube.com)?/redirect\?.*', url) is not None: # YouTube puts these on external links to do tracking
|
||||
query_string = url[url.find('?')+1: ]
|
||||
return urllib.parse.parse_qs(query_string)['q'][0]
|
||||
return url
|
||||
@@ -133,11 +133,11 @@ def _recover_urls(runs):
|
||||
for run in runs:
|
||||
url = deep_get(run, 'navigationEndpoint', 'urlEndpoint', 'url')
|
||||
text = run.get('text', '')
|
||||
# second condition is necessary because youtube makes other things into urls, such as hashtags, which we want to keep as text
|
||||
# second condition is necessary because YouTube makes other things into urls, such as hashtags, which we want to keep as text
|
||||
if url is not None and (text.startswith('http://') or text.startswith('https://')):
|
||||
url = remove_redirect(url)
|
||||
run['url'] = url
|
||||
run['text'] = url # youtube truncates the url text, use actual url instead
|
||||
run['text'] = url # YouTube truncates the url text, use actual url instead
|
||||
|
||||
def extract_str(node, default=None, recover_urls=False):
|
||||
'''default is the value returned if the extraction fails. If recover_urls is true, will attempt to fix YouTube's truncation of url text (most prominently seen in descriptions)'''
|
||||
@@ -185,7 +185,7 @@ def extract_int(string, default=None, whole_word=True):
|
||||
return default
|
||||
|
||||
def extract_approx_int(string):
|
||||
'''e.g. "15.1M" from "15.1M subscribers"'''
|
||||
'''e.g. "15.1M" from "15.1M subscribers" or '4,353' from 4353'''
|
||||
if not isinstance(string, str):
|
||||
string = extract_str(string)
|
||||
if not string:
|
||||
@@ -193,7 +193,10 @@ def extract_approx_int(string):
|
||||
match = re.search(r'\b(\d+(?:\.\d+)?[KMBTkmbt]?)\b', string.replace(',', ''))
|
||||
if match is None:
|
||||
return None
|
||||
return match.group(1)
|
||||
result = match.group(1)
|
||||
if re.fullmatch(r'\d+', result):
|
||||
result = '{:,}'.format(int(result))
|
||||
return result
|
||||
|
||||
MONTH_ABBREVIATIONS = {'jan':'1', 'feb':'2', 'mar':'3', 'apr':'4', 'may':'5', 'jun':'6', 'jul':'7', 'aug':'8', 'sep':'9', 'oct':'10', 'nov':'11', 'dec':'12'}
|
||||
def extract_date(date_text):
|
||||
@@ -223,6 +226,190 @@ def check_missing_keys(object, *key_sequences):
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def extract_lockup_view_model_info(item, additional_info={}):
|
||||
"""Extract info from new lockupViewModel format (YouTube 2024+)"""
|
||||
info = {'error': None}
|
||||
|
||||
content_type = item.get('contentType', '')
|
||||
content_id = item.get('contentId', '')
|
||||
|
||||
# Extract title from metadata
|
||||
metadata = item.get('metadata', {})
|
||||
lockup_metadata = metadata.get('lockupMetadataViewModel', {})
|
||||
title_data = lockup_metadata.get('title', {})
|
||||
info['title'] = title_data.get('content', '')
|
||||
|
||||
# Determine type based on contentType
|
||||
if 'PLAYLIST' in content_type or 'PODCAST' in content_type:
|
||||
info['type'] = 'playlist'
|
||||
info['playlist_type'] = 'playlist'
|
||||
info['id'] = content_id
|
||||
info['video_count'] = None
|
||||
info['first_video_id'] = None
|
||||
|
||||
# Try to get video count from metadata
|
||||
metadata_rows = lockup_metadata.get('metadata', {})
|
||||
for row in metadata_rows.get('contentMetadataViewModel', {}).get('metadataRows', []):
|
||||
for part in row.get('metadataParts', []):
|
||||
text = part.get('text', {}).get('content', '')
|
||||
if 'video' in text.lower() or 'episode' in text.lower():
|
||||
info['video_count'] = extract_int(text)
|
||||
elif 'VIDEO' in content_type:
|
||||
info['type'] = 'video'
|
||||
info['id'] = content_id
|
||||
info['view_count'] = None
|
||||
info['approx_view_count'] = None
|
||||
info['time_published'] = None
|
||||
info['duration'] = None
|
||||
|
||||
# Extract duration/other info from metadata rows
|
||||
metadata_rows = lockup_metadata.get('metadata', {})
|
||||
for row in metadata_rows.get('contentMetadataViewModel', {}).get('metadataRows', []):
|
||||
for part in row.get('metadataParts', []):
|
||||
text = part.get('text', {}).get('content', '')
|
||||
if 'view' in text.lower():
|
||||
info['approx_view_count'] = extract_approx_int(text)
|
||||
elif 'ago' in text.lower():
|
||||
info['time_published'] = text
|
||||
elif 'CHANNEL' in content_type:
|
||||
info['type'] = 'channel'
|
||||
info['id'] = content_id
|
||||
info['approx_subscriber_count'] = None
|
||||
info['video_count'] = None
|
||||
|
||||
# Extract subscriber count and video count from metadata rows
|
||||
metadata_rows = lockup_metadata.get('metadata', {})
|
||||
for row in metadata_rows.get('contentMetadataViewModel', {}).get('metadataRows', []):
|
||||
for part in row.get('metadataParts', []):
|
||||
text = part.get('text', {}).get('content', '')
|
||||
if 'subscriber' in text.lower():
|
||||
info['approx_subscriber_count'] = extract_approx_int(text)
|
||||
elif 'video' in text.lower():
|
||||
info['video_count'] = extract_int(text)
|
||||
else:
|
||||
info['type'] = 'unsupported'
|
||||
return info
|
||||
|
||||
# Extract thumbnail from contentImage
|
||||
content_image = item.get('contentImage', {})
|
||||
info['thumbnail'] = normalize_url(multi_deep_get(content_image,
|
||||
# playlists with collection thumbnail
|
||||
['collectionThumbnailViewModel', 'primaryThumbnail', 'thumbnailViewModel', 'image', 'sources', 0, 'url'],
|
||||
# single thumbnail (some playlists, videos)
|
||||
['thumbnailViewModel', 'image', 'sources', 0, 'url'],
|
||||
)) or ''
|
||||
|
||||
# Extract video/episode count from thumbnail overlay badges
|
||||
# (podcasts and some playlists put the count here instead of metadata rows)
|
||||
thumb_vm = multi_deep_get(content_image,
|
||||
['collectionThumbnailViewModel', 'primaryThumbnail', 'thumbnailViewModel'],
|
||||
['thumbnailViewModel'],
|
||||
) or {}
|
||||
for overlay in thumb_vm.get('overlays', []):
|
||||
for badge in deep_get(overlay, 'thumbnailOverlayBadgeViewModel', 'thumbnailBadges', default=[]):
|
||||
badge_text = deep_get(badge, 'thumbnailBadgeViewModel', 'text', default='')
|
||||
if badge_text and not info.get('video_count'):
|
||||
conservative_update(info, 'video_count', extract_int(badge_text))
|
||||
|
||||
# Extract author info if available
|
||||
info['author'] = None
|
||||
info['author_id'] = None
|
||||
info['author_url'] = None
|
||||
info['description'] = None
|
||||
info['badges'] = []
|
||||
|
||||
# Try to get first video ID from inline player data
|
||||
item_playback = item.get('itemPlayback', {})
|
||||
inline_player = item_playback.get('inlinePlayerData', {})
|
||||
on_select = inline_player.get('onSelect', {})
|
||||
innertube_cmd = on_select.get('innertubeCommand', {})
|
||||
watch_endpoint = innertube_cmd.get('watchEndpoint', {})
|
||||
if watch_endpoint.get('videoId'):
|
||||
info['first_video_id'] = watch_endpoint.get('videoId')
|
||||
|
||||
info.update(additional_info)
|
||||
return info
|
||||
|
||||
|
||||
def extract_shorts_lockup_view_model_info(item, additional_info={}):
|
||||
"""Extract info from shortsLockupViewModel format (YouTube Shorts)"""
|
||||
info = {'error': None, 'type': 'video'}
|
||||
|
||||
# Video ID from reelWatchEndpoint or entityId
|
||||
info['id'] = deep_get(item,
|
||||
'onTap', 'innertubeCommand', 'reelWatchEndpoint', 'videoId')
|
||||
if not info['id']:
|
||||
entity_id = item.get('entityId', '')
|
||||
if entity_id.startswith('shorts-shelf-item-'):
|
||||
info['id'] = entity_id[len('shorts-shelf-item-'):]
|
||||
|
||||
# Thumbnail
|
||||
info['thumbnail'] = normalize_url(deep_get(item,
|
||||
'onTap', 'innertubeCommand', 'reelWatchEndpoint',
|
||||
'thumbnail', 'thumbnails', 0, 'url'))
|
||||
|
||||
# Parse title and views from accessibilityText
|
||||
# Format: "Title, N views - play Short"
|
||||
acc_text = item.get('accessibilityText', '')
|
||||
info['title'] = ''
|
||||
info['view_count'] = None
|
||||
info['approx_view_count'] = None
|
||||
if acc_text:
|
||||
# Remove trailing " - play Short"
|
||||
cleaned = re.sub(r'\s*-\s*play Short$', '', acc_text)
|
||||
# Split on last comma+views pattern to separate title from view count
|
||||
match = re.match(r'^(.*?),\s*([\d,.]+\s*(?:thousand|million|billion|)\s*views?)$',
|
||||
cleaned, re.IGNORECASE)
|
||||
if match:
|
||||
info['title'] = match.group(1).strip()
|
||||
view_text = match.group(2)
|
||||
info['view_count'] = extract_int(view_text)
|
||||
# Convert "7.1 thousand" -> "7.1 K" for display
|
||||
suffix_map = {'thousand': 'K', 'million': 'M', 'billion': 'B'}
|
||||
suffix_match = re.search(r'([\d,.]+)\s*(thousand|million|billion)?', view_text, re.IGNORECASE)
|
||||
if suffix_match:
|
||||
num = suffix_match.group(1)
|
||||
word = suffix_match.group(2)
|
||||
if word:
|
||||
info['approx_view_count'] = num + ' ' + suffix_map[word.lower()]
|
||||
else:
|
||||
info['approx_view_count'] = '{:,}'.format(int(num.replace(',', ''))) if num.isdigit() or num.replace(',','').isdigit() else num
|
||||
else:
|
||||
info['approx_view_count'] = extract_approx_int(view_text)
|
||||
else:
|
||||
# Fallback: try "N views" at end
|
||||
match2 = re.match(r'^(.*?),\s*(.+views?)$', cleaned, re.IGNORECASE)
|
||||
if match2:
|
||||
info['title'] = match2.group(1).strip()
|
||||
info['approx_view_count'] = extract_approx_int(match2.group(2))
|
||||
else:
|
||||
info['title'] = cleaned
|
||||
|
||||
# Overlay text (usually has the title too)
|
||||
overlay_metadata = deep_get(item, 'overlayMetadata',
|
||||
'secondaryText', 'content')
|
||||
if overlay_metadata and not info['approx_view_count']:
|
||||
info['approx_view_count'] = extract_approx_int(overlay_metadata)
|
||||
|
||||
primary_text = deep_get(item, 'overlayMetadata',
|
||||
'primaryText', 'content')
|
||||
if primary_text and not info['title']:
|
||||
info['title'] = primary_text
|
||||
|
||||
info['duration'] = ''
|
||||
info['time_published'] = None
|
||||
info['description'] = None
|
||||
info['badges'] = []
|
||||
info['author'] = None
|
||||
info['author_id'] = None
|
||||
info['author_url'] = None
|
||||
info['index'] = None
|
||||
|
||||
info.update(additional_info)
|
||||
return info
|
||||
|
||||
|
||||
def extract_item_info(item, additional_info={}):
|
||||
if not item:
|
||||
return {'error': 'No item given'}
|
||||
@@ -240,6 +427,14 @@ def extract_item_info(item, additional_info={}):
|
||||
info['type'] = 'unsupported'
|
||||
return info
|
||||
|
||||
# Handle new lockupViewModel format (YouTube 2024+)
|
||||
if type == 'lockupViewModel':
|
||||
return extract_lockup_view_model_info(item, additional_info)
|
||||
|
||||
# Handle shortsLockupViewModel format (YouTube Shorts)
|
||||
if type == 'shortsLockupViewModel':
|
||||
return extract_shorts_lockup_view_model_info(item, additional_info)
|
||||
|
||||
# type looks like e.g. 'compactVideoRenderer' or 'gridVideoRenderer'
|
||||
# camelCase split, https://stackoverflow.com/a/37697078
|
||||
type_parts = [s.lower() for s in re.sub(r'([A-Z][a-z]+)', r' \1', type).split()]
|
||||
@@ -249,6 +444,9 @@ def extract_item_info(item, additional_info={}):
|
||||
primary_type = type_parts[-2]
|
||||
if primary_type == 'video':
|
||||
info['type'] = 'video'
|
||||
elif type_parts[0] == 'reel': # shorts
|
||||
info['type'] = 'video'
|
||||
primary_type = 'video'
|
||||
elif primary_type in ('playlist', 'radio', 'show'):
|
||||
info['type'] = 'playlist'
|
||||
info['playlist_type'] = primary_type
|
||||
@@ -276,9 +474,9 @@ def extract_item_info(item, additional_info={}):
|
||||
['detailedMetadataSnippets', 0, 'snippetText'],
|
||||
))
|
||||
info['thumbnail'] = normalize_url(multi_deep_get(item,
|
||||
['thumbnail', 'thumbnails', 0, 'url'], # videos
|
||||
['thumbnails', 0, 'thumbnails', 0, 'url'], # playlists
|
||||
['thumbnailRenderer', 'showCustomThumbnailRenderer', 'thumbnail', 'thumbnails', 0, 'url'], # shows
|
||||
['thumbnail', 'thumbnails', -1, 'url'], # videos (highest quality)
|
||||
['thumbnails', 0, 'thumbnails', -1, 'url'], # playlists
|
||||
['thumbnailRenderer', 'showCustomThumbnailRenderer', 'thumbnail', 'thumbnails', -1, 'url'], # shows
|
||||
))
|
||||
|
||||
info['badges'] = []
|
||||
@@ -295,7 +493,11 @@ def extract_item_info(item, additional_info={}):
|
||||
info['time_published'] = timestamp.group(1)
|
||||
|
||||
if primary_type == 'video':
|
||||
info['id'] = item.get('videoId')
|
||||
info['id'] = multi_deep_get(item,
|
||||
['videoId'],
|
||||
['navigationEndpoint', 'watchEndpoint', 'videoId'],
|
||||
['navigationEndpoint', 'reelWatchEndpoint', 'videoId'] # shorts
|
||||
)
|
||||
info['view_count'] = extract_int(item.get('viewCountText'))
|
||||
|
||||
# dig into accessibility data to get view_count for videos marked as recommended, and to get time_published
|
||||
@@ -313,17 +515,35 @@ def extract_item_info(item, additional_info={}):
|
||||
if info['view_count']:
|
||||
info['approx_view_count'] = '{:,}'.format(info['view_count'])
|
||||
else:
|
||||
info['approx_view_count'] = extract_approx_int(item.get('shortViewCountText'))
|
||||
info['approx_view_count'] = extract_approx_int(multi_get(item,
|
||||
'shortViewCountText',
|
||||
'viewCountText' # shorts
|
||||
))
|
||||
|
||||
# handle case where it is "No views"
|
||||
if not info['approx_view_count']:
|
||||
if ('No views' in item.get('shortViewCountText', '')
|
||||
or 'no views' in accessibility_label.lower()):
|
||||
or 'no views' in accessibility_label.lower()
|
||||
or 'No views' in extract_str(item.get('viewCountText', '')) # shorts
|
||||
):
|
||||
info['view_count'] = 0
|
||||
info['approx_view_count'] = '0'
|
||||
|
||||
info['duration'] = extract_str(item.get('lengthText'))
|
||||
|
||||
# dig into accessibility data to get duration for shorts
|
||||
accessibility_label = deep_get(item,
|
||||
'accessibility', 'accessibilityData', 'label',
|
||||
default='')
|
||||
duration = re.search(r'(\d+) (second|seconds|minute) - play video$',
|
||||
accessibility_label)
|
||||
if duration:
|
||||
if duration.group(2) == 'minute':
|
||||
conservative_update(info, 'duration', '1:00')
|
||||
else:
|
||||
conservative_update(info,
|
||||
'duration', '0:' + duration.group(1).zfill(2))
|
||||
|
||||
# if it's an item in a playlist, get its index
|
||||
if 'index' in item: # url has wrong index on playlist page
|
||||
info['index'] = extract_int(item.get('index'))
|
||||
@@ -348,6 +568,13 @@ def extract_item_info(item, additional_info={}):
|
||||
elif primary_type == 'channel':
|
||||
info['id'] = item.get('channelId')
|
||||
info['approx_subscriber_count'] = extract_approx_int(item.get('subscriberCountText'))
|
||||
# YouTube sometimes puts the handle (@name) in subscriberCountText
|
||||
# instead of the actual count. Fall back to accessibility data.
|
||||
if not info['approx_subscriber_count']:
|
||||
acc_label = deep_get(item, 'subscriberCountText',
|
||||
'accessibility', 'accessibilityData', 'label', default='')
|
||||
if 'subscriber' in acc_label.lower():
|
||||
info['approx_subscriber_count'] = extract_approx_int(acc_label)
|
||||
elif primary_type == 'show':
|
||||
info['id'] = deep_get(item, 'navigationEndpoint', 'watchEndpoint', 'playlistId')
|
||||
info['first_video_id'] = deep_get(item, 'navigationEndpoint',
|
||||
@@ -395,6 +622,8 @@ _item_types = {
|
||||
'gridVideoRenderer',
|
||||
'playlistVideoRenderer',
|
||||
|
||||
'reelItemRenderer',
|
||||
|
||||
'playlistRenderer',
|
||||
'compactPlaylistRenderer',
|
||||
'gridPlaylistRenderer',
|
||||
@@ -411,6 +640,10 @@ _item_types = {
|
||||
'channelRenderer',
|
||||
'compactChannelRenderer',
|
||||
'gridChannelRenderer',
|
||||
|
||||
# New viewModel format (YouTube 2024+)
|
||||
'lockupViewModel',
|
||||
'shortsLockupViewModel',
|
||||
}
|
||||
|
||||
def _traverse_browse_renderer(renderer):
|
||||
@@ -542,9 +775,13 @@ def extract_items(response, item_types=_item_types,
|
||||
item_types=item_types)
|
||||
if items:
|
||||
break
|
||||
elif 'onResponseReceivedEndpoints' in response:
|
||||
for endpoint in response.get('onResponseReceivedEndpoints', []):
|
||||
items, ctoken = extract_items_from_renderer_list(
|
||||
if ('onResponseReceivedEndpoints' in response
|
||||
or 'onResponseReceivedActions' in response):
|
||||
for endpoint in multi_get(response,
|
||||
'onResponseReceivedEndpoints',
|
||||
'onResponseReceivedActions',
|
||||
[]):
|
||||
new_items, new_ctoken = extract_items_from_renderer_list(
|
||||
multi_deep_get(
|
||||
endpoint,
|
||||
['reloadContinuationItemsCommand', 'continuationItems'],
|
||||
@@ -553,13 +790,17 @@ def extract_items(response, item_types=_item_types,
|
||||
),
|
||||
item_types=item_types,
|
||||
)
|
||||
if items:
|
||||
break
|
||||
elif 'contents' in response:
|
||||
items += new_items
|
||||
if (not ctoken) or (new_ctoken and new_items):
|
||||
ctoken = new_ctoken
|
||||
if 'contents' in response:
|
||||
renderer = get(response, 'contents', {})
|
||||
items, ctoken = extract_items_from_renderer(
|
||||
new_items, new_ctoken = extract_items_from_renderer(
|
||||
renderer,
|
||||
item_types=item_types)
|
||||
items += new_items
|
||||
if (not ctoken) or (new_ctoken and new_items):
|
||||
ctoken = new_ctoken
|
||||
|
||||
if search_engagement_panels and 'engagementPanels' in response:
|
||||
new_items, new_ctoken = extract_items_from_renderer_list(
|
||||
|
||||
@@ -9,7 +9,7 @@ import re
|
||||
import urllib
|
||||
from math import ceil
|
||||
|
||||
def extract_channel_info(polymer_json, tab):
|
||||
def extract_channel_info(polymer_json, tab, continuation=False):
|
||||
response, err = extract_response(polymer_json)
|
||||
if err:
|
||||
return {'error': err}
|
||||
@@ -23,7 +23,8 @@ def extract_channel_info(polymer_json, tab):
|
||||
|
||||
# channel doesn't exist or was terminated
|
||||
# example terminated channel: https://www.youtube.com/channel/UCnKJeK_r90jDdIuzHXC0Org
|
||||
if not metadata:
|
||||
# metadata and microformat are not present for continuation requests
|
||||
if not metadata and not continuation:
|
||||
if response.get('alerts'):
|
||||
error_string = ' '.join(
|
||||
extract_str(deep_get(alert, 'alertRenderer', 'text'), default='')
|
||||
@@ -44,7 +45,7 @@ def extract_channel_info(polymer_json, tab):
|
||||
info['approx_subscriber_count'] = extract_approx_int(deep_get(response,
|
||||
'header', 'c4TabbedHeaderRenderer', 'subscriberCountText'))
|
||||
|
||||
# stuff from microformat (info given by youtube for every page on channel)
|
||||
# stuff from microformat (info given by youtube for first page on channel)
|
||||
info['short_description'] = metadata.get('description')
|
||||
if info['short_description'] and len(info['short_description']) > 730:
|
||||
info['short_description'] = info['short_description'][0:730] + '...'
|
||||
@@ -69,10 +70,10 @@ def extract_channel_info(polymer_json, tab):
|
||||
info['ctoken'] = None
|
||||
|
||||
# empty channel
|
||||
if 'contents' not in response and 'continuationContents' not in response:
|
||||
return info
|
||||
#if 'contents' not in response and 'continuationContents' not in response:
|
||||
# return info
|
||||
|
||||
if tab in ('videos', 'playlists', 'search'):
|
||||
if tab in ('videos', 'shorts', 'streams', 'playlists', 'search'):
|
||||
items, ctoken = extract_items(response)
|
||||
additional_info = {
|
||||
'author': info['channel_name'],
|
||||
@@ -84,23 +85,84 @@ def extract_channel_info(polymer_json, tab):
|
||||
if tab in ('search', 'playlists'):
|
||||
info['is_last_page'] = (ctoken is None)
|
||||
elif tab == 'about':
|
||||
items, _ = extract_items(response, item_types={'channelAboutFullMetadataRenderer'})
|
||||
if not items:
|
||||
info['error'] = 'Could not find channelAboutFullMetadataRenderer'
|
||||
return info
|
||||
channel_metadata = items[0]['channelAboutFullMetadataRenderer']
|
||||
# Latest type
|
||||
items, _ = extract_items(response, item_types={'aboutChannelRenderer'})
|
||||
if items:
|
||||
a_metadata = deep_get(items, 0, 'aboutChannelRenderer',
|
||||
'metadata', 'aboutChannelViewModel')
|
||||
if not a_metadata:
|
||||
info['error'] = 'Could not find aboutChannelViewModel'
|
||||
return info
|
||||
|
||||
info['links'] = []
|
||||
for link_json in channel_metadata.get('primaryLinks', ()):
|
||||
url = remove_redirect(deep_get(link_json, 'navigationEndpoint', 'urlEndpoint', 'url'))
|
||||
if not (url.startswith('http://') or url.startswith('https://')):
|
||||
url = 'http://' + url
|
||||
text = extract_str(link_json.get('title'))
|
||||
info['links'].append( (text, url) )
|
||||
info['links'] = []
|
||||
for link_outer in a_metadata.get('links', ()):
|
||||
link = link_outer.get('channelExternalLinkViewModel') or {}
|
||||
link_content = extract_str(deep_get(link, 'link', 'content'))
|
||||
for run in deep_get(link, 'link', 'commandRuns') or ():
|
||||
url = remove_redirect(deep_get(run, 'onTap',
|
||||
'innertubeCommand', 'urlEndpoint', 'url'))
|
||||
if url and not (url.startswith('http://')
|
||||
or url.startswith('https://')):
|
||||
url = 'https://' + url
|
||||
if link_content is None or (link_content in url):
|
||||
break
|
||||
else: # didn't break
|
||||
url = link_content
|
||||
if url and not (url.startswith('http://')
|
||||
or url.startswith('https://')):
|
||||
url = 'https://' + url
|
||||
text = extract_str(deep_get(link, 'title', 'content'))
|
||||
info['links'].append( (text, url) )
|
||||
|
||||
info['date_joined'] = extract_date(channel_metadata.get('joinedDateText'))
|
||||
info['view_count'] = extract_int(channel_metadata.get('viewCountText'))
|
||||
info['description'] = extract_str(channel_metadata.get('description'), default='')
|
||||
info['date_joined'] = extract_date(
|
||||
a_metadata.get('joinedDateText')
|
||||
)
|
||||
info['view_count'] = extract_int(a_metadata.get('viewCountText'))
|
||||
info['approx_view_count'] = extract_approx_int(
|
||||
a_metadata.get('viewCountText')
|
||||
)
|
||||
info['description'] = extract_str(
|
||||
a_metadata.get('description'), default=''
|
||||
)
|
||||
info['approx_video_count'] = extract_approx_int(
|
||||
a_metadata.get('videoCountText')
|
||||
)
|
||||
info['approx_subscriber_count'] = extract_approx_int(
|
||||
a_metadata.get('subscriberCountText')
|
||||
)
|
||||
info['country'] = extract_str(a_metadata.get('country'))
|
||||
info['canonical_url'] = extract_str(
|
||||
a_metadata.get('canonicalChannelUrl')
|
||||
)
|
||||
|
||||
# Old type
|
||||
else:
|
||||
items, _ = extract_items(response,
|
||||
item_types={'channelAboutFullMetadataRenderer'})
|
||||
if not items:
|
||||
info['error'] = 'Could not find aboutChannelRenderer or channelAboutFullMetadataRenderer'
|
||||
return info
|
||||
a_metadata = items[0]['channelAboutFullMetadataRenderer']
|
||||
|
||||
info['links'] = []
|
||||
for link_json in a_metadata.get('primaryLinks', ()):
|
||||
url = remove_redirect(deep_get(link_json, 'navigationEndpoint',
|
||||
'urlEndpoint', 'url'))
|
||||
if url and not (url.startswith('http://')
|
||||
or url.startswith('https://')):
|
||||
url = 'https://' + url
|
||||
text = extract_str(link_json.get('title'))
|
||||
info['links'].append( (text, url) )
|
||||
|
||||
info['date_joined'] = extract_date(a_metadata.get('joinedDateText'))
|
||||
info['view_count'] = extract_int(a_metadata.get('viewCountText'))
|
||||
info['description'] = extract_str(a_metadata.get(
|
||||
'description'), default='')
|
||||
|
||||
info['approx_video_count'] = None
|
||||
info['approx_subscriber_count'] = None
|
||||
info['country'] = None
|
||||
info['canonical_url'] = None
|
||||
else:
|
||||
raise NotImplementedError('Unknown or unsupported channel tab: ' + tab)
|
||||
|
||||
@@ -156,39 +218,112 @@ def extract_playlist_metadata(polymer_json):
|
||||
return {'error': err}
|
||||
|
||||
metadata = {'error': None}
|
||||
header = deep_get(response, 'header', 'playlistHeaderRenderer', default={})
|
||||
metadata['title'] = extract_str(header.get('title'))
|
||||
metadata['title'] = None
|
||||
metadata['first_video_id'] = None
|
||||
metadata['thumbnail'] = None
|
||||
metadata['video_count'] = None
|
||||
metadata['description'] = ''
|
||||
metadata['author'] = None
|
||||
metadata['author_id'] = None
|
||||
metadata['author_url'] = None
|
||||
metadata['view_count'] = None
|
||||
metadata['like_count'] = None
|
||||
metadata['time_published'] = None
|
||||
|
||||
header = deep_get(response, 'header', 'playlistHeaderRenderer', default={})
|
||||
|
||||
if header:
|
||||
# Classic playlistHeaderRenderer format
|
||||
metadata['title'] = extract_str(header.get('title'))
|
||||
metadata['first_video_id'] = deep_get(header, 'playEndpoint', 'watchEndpoint', 'videoId')
|
||||
first_id = re.search(r'([a-z_\-]{11})', deep_get(header,
|
||||
'thumbnail', 'thumbnails', 0, 'url', default=''))
|
||||
if first_id:
|
||||
conservative_update(metadata, 'first_video_id', first_id.group(1))
|
||||
|
||||
metadata['video_count'] = extract_int(header.get('numVideosText'))
|
||||
metadata['description'] = extract_str(header.get('descriptionText'), default='')
|
||||
metadata['author'] = extract_str(header.get('ownerText'))
|
||||
metadata['author_id'] = multi_deep_get(header,
|
||||
['ownerText', 'runs', 0, 'navigationEndpoint', 'browseEndpoint', 'browseId'],
|
||||
['ownerEndpoint', 'browseEndpoint', 'browseId'])
|
||||
metadata['view_count'] = extract_int(header.get('viewCountText'))
|
||||
metadata['like_count'] = extract_int(header.get('likesCountWithoutLikeText'))
|
||||
for stat in header.get('stats', ()):
|
||||
text = extract_str(stat)
|
||||
if 'videos' in text or 'episodes' in text:
|
||||
conservative_update(metadata, 'video_count', extract_int(text))
|
||||
elif 'views' in text:
|
||||
conservative_update(metadata, 'view_count', extract_int(text))
|
||||
elif 'updated' in text:
|
||||
metadata['time_published'] = extract_date(text)
|
||||
else:
|
||||
# New pageHeaderRenderer format (YouTube 2024+)
|
||||
page_header = deep_get(response, 'header', 'pageHeaderRenderer', default={})
|
||||
metadata['title'] = page_header.get('pageTitle')
|
||||
view_model = deep_get(page_header, 'content', 'pageHeaderViewModel', default={})
|
||||
|
||||
# Extract title from viewModel if not found
|
||||
if not metadata['title']:
|
||||
metadata['title'] = deep_get(view_model,
|
||||
'title', 'dynamicTextViewModel', 'text', 'content')
|
||||
|
||||
# Extract metadata from rows (author, video count, views, etc.)
|
||||
meta_rows = deep_get(view_model,
|
||||
'metadata', 'contentMetadataViewModel', 'metadataRows', default=[])
|
||||
for row in meta_rows:
|
||||
for part in row.get('metadataParts', []):
|
||||
text_content = deep_get(part, 'text', 'content', default='')
|
||||
# Author from avatarStack
|
||||
avatar_stack = deep_get(part, 'avatarStack', 'avatarStackViewModel', default={})
|
||||
if avatar_stack:
|
||||
author_text = deep_get(avatar_stack, 'text', 'content')
|
||||
if author_text:
|
||||
metadata['author'] = author_text
|
||||
# Extract author_id from commandRuns
|
||||
for run in deep_get(avatar_stack, 'text', 'commandRuns', default=[]):
|
||||
browse_id = deep_get(run, 'onTap', 'innertubeCommand',
|
||||
'browseEndpoint', 'browseId')
|
||||
if browse_id:
|
||||
metadata['author_id'] = browse_id
|
||||
# Video/episode count
|
||||
if text_content and ('video' in text_content.lower() or 'episode' in text_content.lower()):
|
||||
conservative_update(metadata, 'video_count', extract_int(text_content))
|
||||
# View count
|
||||
elif text_content and 'view' in text_content.lower():
|
||||
conservative_update(metadata, 'view_count', extract_int(text_content))
|
||||
# Last updated
|
||||
elif text_content and 'updated' in text_content.lower():
|
||||
metadata['time_published'] = extract_date(text_content)
|
||||
|
||||
# Extract description from sidebar if available
|
||||
sidebar = deep_get(response, 'sidebar', 'playlistSidebarRenderer', 'items', default=[])
|
||||
for sidebar_item in sidebar:
|
||||
desc = deep_get(sidebar_item, 'playlistSidebarPrimaryInfoRenderer',
|
||||
'description', 'simpleText')
|
||||
if desc:
|
||||
metadata['description'] = desc
|
||||
|
||||
if metadata['author_id']:
|
||||
metadata['author_url'] = 'https://www.youtube.com/channel/' + metadata['author_id']
|
||||
|
||||
metadata['first_video_id'] = deep_get(header, 'playEndpoint', 'watchEndpoint', 'videoId')
|
||||
first_id = re.search(r'([a-z_\-]{11})', deep_get(header,
|
||||
'thumbnail', 'thumbnails', 0, 'url', default=''))
|
||||
if first_id:
|
||||
conservative_update(metadata, 'first_video_id', first_id.group(1))
|
||||
if metadata['first_video_id'] is None:
|
||||
metadata['thumbnail'] = None
|
||||
else:
|
||||
metadata['thumbnail'] = 'https://i.ytimg.com/vi/' + metadata['first_video_id'] + '/mqdefault.jpg'
|
||||
metadata['thumbnail'] = f"https://i.ytimg.com/vi/{metadata['first_video_id']}/hqdefault.jpg"
|
||||
|
||||
metadata['video_count'] = extract_int(header.get('numVideosText'))
|
||||
metadata['description'] = extract_str(header.get('descriptionText'), default='')
|
||||
metadata['author'] = extract_str(header.get('ownerText'))
|
||||
metadata['author_id'] = multi_deep_get(header,
|
||||
['ownerText', 'runs', 0, 'navigationEndpoint', 'browseEndpoint', 'browseId'],
|
||||
['ownerEndpoint', 'browseEndpoint', 'browseId'])
|
||||
if metadata['author_id']:
|
||||
metadata['author_url'] = 'https://www.youtube.com/channel/' + metadata['author_id']
|
||||
else:
|
||||
metadata['author_url'] = None
|
||||
metadata['view_count'] = extract_int(header.get('viewCountText'))
|
||||
metadata['like_count'] = extract_int(header.get('likesCountWithoutLikeText'))
|
||||
for stat in header.get('stats', ()):
|
||||
text = extract_str(stat)
|
||||
if 'videos' in text:
|
||||
conservative_update(metadata, 'video_count', extract_int(text))
|
||||
elif 'views' in text:
|
||||
conservative_update(metadata, 'view_count', extract_int(text))
|
||||
elif 'updated' in text:
|
||||
metadata['time_published'] = extract_date(text)
|
||||
microformat = deep_get(response, 'microformat', 'microformatDataRenderer',
|
||||
default={})
|
||||
conservative_update(
|
||||
metadata, 'title', extract_str(microformat.get('title'))
|
||||
)
|
||||
conservative_update(
|
||||
metadata, 'description', extract_str(microformat.get('description'))
|
||||
)
|
||||
conservative_update(
|
||||
metadata, 'thumbnail', deep_get(microformat, 'thumbnail',
|
||||
'thumbnails', -1, 'url')
|
||||
)
|
||||
|
||||
return metadata
|
||||
|
||||
@@ -197,13 +332,11 @@ def extract_playlist_info(polymer_json):
|
||||
if err:
|
||||
return {'error': err}
|
||||
info = {'error': None}
|
||||
first_page = 'continuationContents' not in response
|
||||
video_list, _ = extract_items(response)
|
||||
|
||||
info['items'] = [extract_item_info(renderer) for renderer in video_list]
|
||||
|
||||
if first_page:
|
||||
info['metadata'] = extract_playlist_metadata(polymer_json)
|
||||
info['metadata'] = extract_playlist_metadata(polymer_json)
|
||||
|
||||
return info
|
||||
|
||||
|
||||
@@ -111,14 +111,10 @@ _formats = {
|
||||
'_rtmp': {'protocol': 'rtmp'},
|
||||
|
||||
# av01 video only formats sometimes served with "unknown" codecs
|
||||
'394': {'ext': 'mp4', 'height': 144, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
|
||||
'395': {'ext': 'mp4', 'height': 240, 'format_note': 'DASH video', 'vcodec': 'av01.0.00M.08'},
|
||||
'396': {'ext': 'mp4', 'height': 360, 'format_note': 'DASH video', 'vcodec': 'av01.0.01M.08'},
|
||||
'397': {'ext': 'mp4', 'height': 480, 'format_note': 'DASH video', 'vcodec': 'av01.0.04M.08'},
|
||||
'398': {'ext': 'mp4', 'height': 720, 'format_note': 'DASH video', 'vcodec': 'av01.0.05M.08'},
|
||||
'399': {'ext': 'mp4', 'height': 1080, 'format_note': 'DASH video', 'vcodec': 'av01.0.08M.08'},
|
||||
'400': {'ext': 'mp4', 'height': 1440, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
|
||||
'401': {'ext': 'mp4', 'height': 2160, 'format_note': 'DASH video', 'vcodec': 'av01.0.12M.08'},
|
||||
'394': {'vcodec': 'av01.0.05M.08'},
|
||||
'395': {'vcodec': 'av01.0.05M.08'},
|
||||
'396': {'vcodec': 'av01.0.05M.08'},
|
||||
'397': {'vcodec': 'av01.0.05M.08'},
|
||||
}
|
||||
|
||||
|
||||
@@ -137,29 +133,59 @@ def _extract_from_video_information_renderer(renderer_content):
|
||||
return info
|
||||
|
||||
def _extract_likes_dislikes(renderer_content):
|
||||
info = {
|
||||
'like_count': None,
|
||||
}
|
||||
for button in renderer_content.get('buttons', ()):
|
||||
button_renderer = button.get('slimMetadataToggleButtonRenderer', {})
|
||||
|
||||
def extract_button_count(toggle_button_renderer):
|
||||
# all the digits can be found in the accessibility data
|
||||
count = extract_int(deep_get(
|
||||
button_renderer,
|
||||
'button', 'toggleButtonRenderer', 'defaultText',
|
||||
'accessibility', 'accessibilityData', 'label'))
|
||||
count = extract_int(multi_deep_get(
|
||||
toggle_button_renderer,
|
||||
['defaultText', 'accessibility', 'accessibilityData', 'label'],
|
||||
['accessibility', 'label'],
|
||||
['accessibilityData', 'accessibilityData', 'label'],
|
||||
['accessibilityText'],
|
||||
))
|
||||
|
||||
# this count doesn't have all the digits, it's like 53K for instance
|
||||
dumb_count = extract_int(extract_str(deep_get(
|
||||
button_renderer, 'button', 'toggleButtonRenderer', 'defaultText')))
|
||||
dumb_count = extract_int(extract_str(multi_get(
|
||||
toggle_button_renderer, ['defaultText', 'title'])))
|
||||
|
||||
# The accessibility text will be "No likes" or "No dislikes" or
|
||||
# something like that, but dumb count will be 0
|
||||
if dumb_count == 0:
|
||||
count = 0
|
||||
return count
|
||||
|
||||
if 'isLike' in button_renderer:
|
||||
info['like_count'] = count
|
||||
info = {
|
||||
'like_count': None,
|
||||
'dislike_count': None,
|
||||
}
|
||||
for button in renderer_content.get('buttons', ()):
|
||||
if 'slimMetadataToggleButtonRenderer' in button:
|
||||
button_renderer = button['slimMetadataToggleButtonRenderer']
|
||||
count = extract_button_count(deep_get(button_renderer,
|
||||
'button',
|
||||
'toggleButtonRenderer'))
|
||||
if 'isLike' in button_renderer:
|
||||
info['like_count'] = count
|
||||
elif 'isDislike' in button_renderer:
|
||||
info['dislike_count'] = count
|
||||
elif 'slimMetadataButtonRenderer' in button:
|
||||
button_renderer = button['slimMetadataButtonRenderer']
|
||||
liberal_update(info, 'like_count', extract_button_count(
|
||||
multi_deep_get(button_renderer,
|
||||
['button', 'segmentedLikeDislikeButtonRenderer',
|
||||
'likeButton', 'toggleButtonRenderer'],
|
||||
['button', 'segmentedLikeDislikeButtonViewModel',
|
||||
'likeButtonViewModel', 'likeButtonViewModel',
|
||||
'toggleButtonViewModel', 'toggleButtonViewModel',
|
||||
'defaultButtonViewModel', 'buttonViewModel']
|
||||
)
|
||||
))
|
||||
'''liberal_update(info, 'dislike_count', extract_button_count(
|
||||
deep_get(
|
||||
button_renderer, 'button',
|
||||
'segmentedLikeDislikeButtonRenderer',
|
||||
'dislikeButton', 'toggleButtonRenderer'
|
||||
)
|
||||
))'''
|
||||
return info
|
||||
|
||||
def _extract_from_owner_renderer(renderer_content):
|
||||
@@ -213,6 +239,36 @@ def _extract_metadata_row_info(renderer_content):
|
||||
|
||||
return info
|
||||
|
||||
def _extract_from_music_renderer(renderer_content):
|
||||
# latest format for the music list
|
||||
info = {
|
||||
'music_list': [],
|
||||
}
|
||||
|
||||
for carousel in renderer_content.get('carouselLockups', []):
|
||||
song = {}
|
||||
carousel = carousel.get('carouselLockupRenderer', {})
|
||||
video_renderer = carousel.get('videoLockup', {})
|
||||
video_renderer_info = extract_item_info(video_renderer)
|
||||
video_id = video_renderer_info.get('id')
|
||||
song['url'] = concat_or_none('https://www.youtube.com/watch?v=',
|
||||
video_id)
|
||||
song['title'] = video_renderer_info.get('title')
|
||||
for row in carousel.get('infoRows', []):
|
||||
row = row.get('infoRowRenderer', {})
|
||||
title = extract_str(row.get('title'))
|
||||
data = extract_str(row.get('defaultMetadata'))
|
||||
if title == 'SONG':
|
||||
song['title'] = data
|
||||
elif title == 'ARTIST':
|
||||
song['artist'] = data
|
||||
elif title == 'ALBUM':
|
||||
song['album'] = data
|
||||
elif title == 'WRITERS':
|
||||
song['writers'] = data
|
||||
info['music_list'].append(song)
|
||||
return info
|
||||
|
||||
def _extract_from_video_metadata(renderer_content):
|
||||
info = _extract_from_video_information_renderer(renderer_content)
|
||||
liberal_dict_update(info, _extract_likes_dislikes(renderer_content))
|
||||
@@ -236,6 +292,7 @@ visible_extraction_dispatch = {
|
||||
'slimVideoActionBarRenderer': _extract_likes_dislikes,
|
||||
'slimOwnerRenderer': _extract_from_owner_renderer,
|
||||
'videoDescriptionHeaderRenderer': _extract_from_video_header_renderer,
|
||||
'videoDescriptionMusicSectionRenderer': _extract_from_music_renderer,
|
||||
'expandableVideoDescriptionRenderer': _extract_from_description_renderer,
|
||||
'metadataRowContainerRenderer': _extract_metadata_row_info,
|
||||
# OR just this one, which contains SOME of the above inside it
|
||||
@@ -308,17 +365,18 @@ def _extract_watch_info_mobile(top_level):
|
||||
# https://www.androidpolice.com/2019/10/31/google-youtube-app-comment-section-below-videos/
|
||||
# https://www.youtube.com/watch?v=bR5Q-wD-6qo
|
||||
if header_type == 'commentsEntryPointHeaderRenderer':
|
||||
comment_count_text = extract_str(comment_info.get('headerText'))
|
||||
comment_count_text = extract_str(multi_get(
|
||||
comment_info, 'commentCount', 'headerText'))
|
||||
else:
|
||||
comment_count_text = extract_str(deep_get(comment_info,
|
||||
'header', 'commentSectionHeaderRenderer', 'countText'))
|
||||
if comment_count_text == 'Comments': # just this with no number, means 0 comments
|
||||
info['comment_count'] = 0
|
||||
info['comment_count'] = '0'
|
||||
else:
|
||||
info['comment_count'] = extract_int(comment_count_text)
|
||||
info['comment_count'] = extract_approx_int(comment_count_text)
|
||||
info['comments_disabled'] = False
|
||||
else: # no comment section present means comments are disabled
|
||||
info['comment_count'] = 0
|
||||
info['comment_count'] = '0'
|
||||
info['comments_disabled'] = True
|
||||
|
||||
# check for limited state
|
||||
@@ -354,8 +412,10 @@ def _extract_watch_info_desktop(top_level):
|
||||
likes_dislikes = deep_get(video_info, 'sentimentBar', 'sentimentBarRenderer', 'tooltip', default='').split('/')
|
||||
if len(likes_dislikes) == 2:
|
||||
info['like_count'] = extract_int(likes_dislikes[0])
|
||||
info['dislike_count'] = extract_int(likes_dislikes[1])
|
||||
else:
|
||||
info['like_count'] = None
|
||||
info['dislike_count'] = None
|
||||
|
||||
info['title'] = extract_str(video_info.get('title', None))
|
||||
info['author'] = extract_str(deep_get(video_info, 'owner', 'videoOwnerRenderer', 'title'))
|
||||
@@ -368,26 +428,28 @@ def _extract_watch_info_desktop(top_level):
|
||||
return info
|
||||
|
||||
def update_format_with_codec_info(fmt, codec):
|
||||
if (codec.startswith('av')
|
||||
or codec in ('vp9', 'vp8', 'vp8.0', 'h263', 'h264', 'mp4v')):
|
||||
if any(codec.startswith(c) for c in ('av', 'vp', 'h263', 'h264', 'mp4v')):
|
||||
if codec == 'vp8.0':
|
||||
codec = 'vp8'
|
||||
conservative_update(fmt, 'vcodec', codec)
|
||||
elif (codec.startswith('mp4a')
|
||||
or codec in ('opus', 'mp3', 'aac', 'dtse', 'ec-3', 'vorbis')):
|
||||
or codec in ('opus', 'mp3', 'aac', 'dtse', 'ec-3', 'vorbis',
|
||||
'ac-3')):
|
||||
conservative_update(fmt, 'acodec', codec)
|
||||
else:
|
||||
print('Warning: unrecognized codec: ' + codec)
|
||||
|
||||
fmt_type_re = re.compile(
|
||||
r'(text|audio|video)/([\w0-9]+); codecs="([\w0-9\.]+(?:, [\w0-9\.]+)*)"')
|
||||
r'(text|audio|video)/([\w0-9]+); codecs="([^"]+)"')
|
||||
def update_format_with_type_info(fmt, yt_fmt):
|
||||
# 'type' for invidious api format
|
||||
mime_type = multi_get(yt_fmt, 'mimeType', 'type')
|
||||
if mime_type is None:
|
||||
return
|
||||
match = re.fullmatch(fmt_type_re, mime_type)
|
||||
|
||||
if match is None:
|
||||
print('Warning: Could not read mimetype', mime_type)
|
||||
return
|
||||
type, fmt['ext'], codecs = match.groups()
|
||||
codecs = codecs.split(', ')
|
||||
for codec in codecs:
|
||||
@@ -410,7 +472,23 @@ def _extract_formats(info, player_response):
|
||||
for yt_fmt in yt_formats:
|
||||
itag = yt_fmt.get('itag')
|
||||
|
||||
# Translated audio track
|
||||
# Keep non-default tracks for multi-audio support
|
||||
# (they will be served via local proxy)
|
||||
|
||||
fmt = {}
|
||||
|
||||
# Audio track info
|
||||
audio_track = yt_fmt.get('audioTrack')
|
||||
if audio_track:
|
||||
fmt['audio_track_id'] = audio_track.get('id')
|
||||
fmt['audio_track_name'] = audio_track.get('displayName')
|
||||
fmt['audio_track_is_default'] = audio_track.get('audioIsDefault', True)
|
||||
else:
|
||||
fmt['audio_track_id'] = None
|
||||
fmt['audio_track_name'] = None
|
||||
fmt['audio_track_is_default'] = True
|
||||
|
||||
fmt['itag'] = itag
|
||||
fmt['ext'] = None
|
||||
fmt['audio_bitrate'] = None
|
||||
@@ -463,6 +541,61 @@ def _extract_formats(info, player_response):
|
||||
else:
|
||||
info['ip_address'] = None
|
||||
|
||||
|
||||
def parse_format(yt_fmt):
|
||||
'''Parse a single YouTube format dict into our internal format dict.'''
|
||||
itag = yt_fmt.get('itag')
|
||||
fmt = {}
|
||||
|
||||
audio_track = yt_fmt.get('audioTrack')
|
||||
if audio_track:
|
||||
fmt['audio_track_id'] = audio_track.get('id')
|
||||
fmt['audio_track_name'] = audio_track.get('displayName')
|
||||
fmt['audio_track_is_default'] = audio_track.get('audioIsDefault', True)
|
||||
else:
|
||||
fmt['audio_track_id'] = None
|
||||
fmt['audio_track_name'] = None
|
||||
fmt['audio_track_is_default'] = True
|
||||
|
||||
fmt['itag'] = itag
|
||||
fmt['ext'] = None
|
||||
fmt['audio_bitrate'] = None
|
||||
fmt['bitrate'] = yt_fmt.get('bitrate')
|
||||
fmt['acodec'] = None
|
||||
fmt['vcodec'] = None
|
||||
fmt['width'] = yt_fmt.get('width')
|
||||
fmt['height'] = yt_fmt.get('height')
|
||||
fmt['file_size'] = extract_int(yt_fmt.get('contentLength'))
|
||||
fmt['audio_sample_rate'] = extract_int(yt_fmt.get('audioSampleRate'))
|
||||
fmt['duration_ms'] = yt_fmt.get('approxDurationMs')
|
||||
fmt['fps'] = yt_fmt.get('fps')
|
||||
fmt['init_range'] = yt_fmt.get('initRange')
|
||||
fmt['index_range'] = yt_fmt.get('indexRange')
|
||||
for key in ('init_range', 'index_range'):
|
||||
if fmt[key]:
|
||||
fmt[key]['start'] = int(fmt[key]['start'])
|
||||
fmt[key]['end'] = int(fmt[key]['end'])
|
||||
update_format_with_type_info(fmt, yt_fmt)
|
||||
cipher = dict(urllib.parse.parse_qsl(multi_get(yt_fmt,
|
||||
'cipher', 'signatureCipher', default='')))
|
||||
if cipher:
|
||||
fmt['url'] = cipher.get('url')
|
||||
else:
|
||||
fmt['url'] = yt_fmt.get('url')
|
||||
fmt['s'] = cipher.get('s')
|
||||
fmt['sp'] = cipher.get('sp')
|
||||
|
||||
hardcoded_itag_info = _formats.get(str(itag), {})
|
||||
for key, value in hardcoded_itag_info.items():
|
||||
conservative_update(fmt, key, value)
|
||||
fmt['quality'] = hardcoded_itag_info.get('height')
|
||||
conservative_update(fmt, 'quality',
|
||||
extract_int(yt_fmt.get('quality'), whole_word=False))
|
||||
conservative_update(fmt, 'quality',
|
||||
extract_int(yt_fmt.get('qualityLabel'), whole_word=False))
|
||||
|
||||
return fmt
|
||||
|
||||
hls_regex = re.compile(r'[\w_-]+=(?:"[^"]+"|[^",]+),')
|
||||
def extract_hls_formats(hls_manifest):
|
||||
'''returns hls_formats, err'''
|
||||
@@ -559,6 +692,7 @@ def extract_watch_info(polymer_json):
|
||||
info['manual_caption_languages'] = []
|
||||
info['_manual_caption_language_names'] = {} # language name written in that language, needed in some cases to create the url
|
||||
info['translation_languages'] = []
|
||||
info['_caption_track_urls'] = {} # lang_code -> full baseUrl from player response
|
||||
captions_info = player_response.get('captions', {})
|
||||
info['_captions_base_url'] = normalize_url(deep_get(captions_info, 'playerCaptionsRenderer', 'baseUrl'))
|
||||
# Sometimes the above playerCaptionsRender is randomly missing
|
||||
@@ -589,6 +723,10 @@ def extract_watch_info(polymer_json):
|
||||
else:
|
||||
info['manual_caption_languages'].append(lang_code)
|
||||
base_url = caption_track.get('baseUrl', '')
|
||||
# Store the full URL from the player response (includes valid tokens)
|
||||
if base_url:
|
||||
normalized = normalize_url(base_url) if base_url.startswith('/') or not base_url.startswith('http') else base_url
|
||||
info['_caption_track_urls'][lang_code + ('_asr' if caption_track.get('kind') == 'asr' else '')] = normalized
|
||||
lang_name = deep_get(urllib.parse.parse_qs(urllib.parse.urlparse(base_url).query), 'name', 0)
|
||||
if lang_name:
|
||||
info['_manual_caption_language_names'][lang_code] = lang_name
|
||||
@@ -756,6 +894,21 @@ def captions_available(info):
|
||||
|
||||
def get_caption_url(info, language, format, automatic=False, translation_language=None):
|
||||
'''Gets the url for captions with the given language and format. If automatic is True, get the automatic captions for that language. If translation_language is given, translate the captions from `language` to `translation_language`. If automatic is true and translation_language is given, the automatic captions will be translated.'''
|
||||
# Try to use the direct URL from the player response first (has valid tokens)
|
||||
track_key = language + ('_asr' if automatic else '')
|
||||
direct_url = info.get('_caption_track_urls', {}).get(track_key)
|
||||
if direct_url:
|
||||
url = direct_url
|
||||
# Override format
|
||||
if '&fmt=' in url:
|
||||
url = re.sub(r'&fmt=[^&]*', '&fmt=' + format, url)
|
||||
else:
|
||||
url += '&fmt=' + format
|
||||
if translation_language:
|
||||
url += '&tlang=' + translation_language
|
||||
return url
|
||||
|
||||
# Fallback to base_url construction
|
||||
url = info['_captions_base_url']
|
||||
if not url:
|
||||
return None
|
||||
@@ -770,7 +923,7 @@ def get_caption_url(info, language, format, automatic=False, translation_languag
|
||||
url += '&tlang=' + translation_language
|
||||
return url
|
||||
|
||||
def update_with_age_restricted_info(info, player_response):
|
||||
def update_with_new_urls(info, player_response):
|
||||
'''Inserts urls from player_response json'''
|
||||
ERROR_PREFIX = 'Error getting missing player or bypassing age-restriction: '
|
||||
|
||||
|
||||
Reference in New Issue
Block a user