Revert "Add support for more qualities, merging video+audio using MSE"
This reverts commit d56df02e7b1eba86baf511289208295b1f6c5a50.
This commit is contained in:
parent
d56df02e7b
commit
e4af99fd17
@ -87,9 +87,6 @@ def proxy_site(env, start_response, video=False):
|
||||
response_headers = response.getheaders()
|
||||
if isinstance(response_headers, urllib3._collections.HTTPHeaderDict):
|
||||
response_headers = response_headers.items()
|
||||
if video:
|
||||
response_headers = (list(response_headers)
|
||||
+[('Access-Control-Allow-Origin', '*')])
|
||||
|
||||
if first_attempt:
|
||||
start_response(str(response.status) + ' ' + response.reason,
|
||||
|
@ -156,14 +156,9 @@ For security reasons, enabling this is not recommended.''',
|
||||
'default': 720,
|
||||
'comment': '',
|
||||
'options': [
|
||||
(144, '144p'),
|
||||
(240, '240p'),
|
||||
(360, '360p'),
|
||||
(480, '480p'),
|
||||
(720, '720p'),
|
||||
(1080, '1080p'),
|
||||
(1440, '1440p'),
|
||||
(2160, '2160p'),
|
||||
],
|
||||
'category': 'playback',
|
||||
}),
|
||||
|
@ -1,445 +0,0 @@
|
||||
// Heavily modified from
|
||||
// https://github.com/nickdesaulniers/netfix/issues/4#issuecomment-578856471
|
||||
// which was in turn modified from
|
||||
// https://github.com/nickdesaulniers/netfix/blob/gh-pages/demo/bufferWhenNeeded.html
|
||||
|
||||
// Useful reading:
|
||||
// https://stackoverflow.com/questions/35177797/what-exactly-is-fragmented-mp4fmp4-how-is-it-different-from-normal-mp4
|
||||
// https://axel.isouard.fr/blog/2016/05/24/streaming-webm-video-over-html5-with-media-source
|
||||
|
||||
// We start by parsing the sidx (segment index) table in order to get the
|
||||
// byte ranges of the segments. The byte range of the sidx table is provided
|
||||
// by the indexRange variable by YouTube
|
||||
|
||||
// Useful info, as well as segments vs sequence mode (we use segments mode)
|
||||
// https://joshuatz.com/posts/2020/appending-videos-in-javascript-with-mediasource-buffers/
|
||||
|
||||
// SourceBuffer data limits:
|
||||
// https://developers.google.com/web/updates/2017/10/quotaexceedederror
|
||||
|
||||
// TODO: Better buffering algorithm
|
||||
// TODO: Call abort to cancel in-progress appends?
|
||||
|
||||
|
||||
var video_source = data['pair_sources'][data['pair_idx']][0];
|
||||
var audio_source = data['pair_sources'][data['pair_idx']][1];
|
||||
|
||||
var audioStream = null;
|
||||
var videoStream = null;
|
||||
var seeking = false;
|
||||
|
||||
var video = document.querySelector('video');
|
||||
var mediaSource = null;
|
||||
|
||||
setup();
|
||||
|
||||
|
||||
function setup() {
|
||||
if ('MediaSource' in window
|
||||
&& MediaSource.isTypeSupported(audio_source['mime_codec'])
|
||||
&& MediaSource.isTypeSupported(video_source['mime_codec'])) {
|
||||
mediaSource = new MediaSource();
|
||||
video.src = URL.createObjectURL(mediaSource);
|
||||
mediaSource.addEventListener('sourceopen', sourceOpen);
|
||||
} else {
|
||||
reportError('Unsupported MIME type or codec: ',
|
||||
audio_source['mime_codec'],
|
||||
video_source['mime_codec']);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
function sourceOpen(_) {
|
||||
videoStream = new Stream(mediaSource, video_source);
|
||||
audioStream = new Stream(mediaSource, audio_source);
|
||||
|
||||
videoStream.setup();
|
||||
audioStream.setup();
|
||||
|
||||
video.addEventListener('timeupdate', checkBothBuffers);
|
||||
video.addEventListener('seeking', debounce(seek, 500));
|
||||
//video.addEventListener('seeked', function() {console.log('seeked')});
|
||||
}
|
||||
|
||||
|
||||
function Stream(mediaSource, source) {
|
||||
this.url = source['url'];
|
||||
this.mimeCodec = source['mime_codec']
|
||||
this.streamType = source['acodec'] ? 'audio' : 'video';
|
||||
|
||||
this.initRange = source['init_range'];
|
||||
this.indexRange = source['index_range'];
|
||||
|
||||
this.mediaSource = mediaSource;
|
||||
this.sidx = null;
|
||||
this.appendRetries = 0;
|
||||
this.appendQueue = []; // list of [segmentIdx, data]
|
||||
this.sourceBuffer = mediaSource.addSourceBuffer(this.mimeCodec);
|
||||
this.sourceBuffer.mode = 'segments';
|
||||
this.sourceBuffer.addEventListener('error', (e) => {
|
||||
this.reportError('sourceBuffer error', e);
|
||||
});
|
||||
this.sourceBuffer.addEventListener('updateend', (e) => {
|
||||
this.reportDebug('updateend', e);
|
||||
if (this.appendQueue.length != 0) {
|
||||
this.appendSegment(...this.appendQueue.pop());
|
||||
}
|
||||
});
|
||||
}
|
||||
Stream.prototype.setup = async function(){
|
||||
// Group requests together
|
||||
if (this.initRange.end+1 == this.indexRange.start){
|
||||
fetchRange(
|
||||
this.url,
|
||||
this.initRange.start,
|
||||
this.indexRange.end,
|
||||
(buffer) => {
|
||||
var init_end = this.initRange.end - this.initRange.start + 1;
|
||||
var index_start = this.indexRange.start - this.initRange.start;
|
||||
var index_end = this.indexRange.end - this.initRange.start + 1;
|
||||
this.appendSegment(null, buffer.slice(0, init_end));
|
||||
this.setupSegments(buffer.slice(index_start, index_end));
|
||||
}
|
||||
)
|
||||
} else {
|
||||
// initialization data
|
||||
await fetchRange(
|
||||
this.url,
|
||||
this.initRange.start,
|
||||
this.initRange.end,
|
||||
this.appendSegment.bind(this, null),
|
||||
);
|
||||
// sidx (segment index) table
|
||||
fetchRange(
|
||||
this.url,
|
||||
this.indexRange.start,
|
||||
this.indexRange.end,
|
||||
this.setupSegments.bind(this)
|
||||
);
|
||||
}
|
||||
}
|
||||
Stream.prototype.setupSegments = async function(sidxBox){
|
||||
var box = unbox(sidxBox);
|
||||
this.sidx = sidx_parse(box.data, this.indexRange.end+1);
|
||||
this.reportDebug('sidx', this.sidx);
|
||||
|
||||
this.reportDebug('appending first segment');
|
||||
this.fetchSegmentIfNeeded(0);
|
||||
}
|
||||
Stream.prototype.appendSegment = function(segmentIdx, chunk) {
|
||||
// cannot append right now, schedule for updateend
|
||||
if (this.sourceBuffer.updating) {
|
||||
this.reportDebug('sourceBuffer updating, queueing for later');
|
||||
this.appendQueue.push([segmentIdx, chunk]);
|
||||
if (this.appendQueue.length > 2){
|
||||
this.reportWarning('appendQueue length:', this.appendQueue.length);
|
||||
}
|
||||
return;
|
||||
}
|
||||
try {
|
||||
this.sourceBuffer.appendBuffer(chunk);
|
||||
if (segmentIdx !== null)
|
||||
this.sidx.entries[segmentIdx].have = true;
|
||||
this.appendRetries = 0;
|
||||
} catch (e) {
|
||||
if (e.name !== 'QuotaExceededError') {
|
||||
throw e;
|
||||
}
|
||||
// Delete 3 segments (arbitrary) from beginning of buffer, making sure
|
||||
// not to delete current one
|
||||
var currentSegment = this.getSegmentIdx(video.currentTime);
|
||||
this.reportDebug('QuotaExceededError. Deleting segments.');
|
||||
var numDeleted = 0;
|
||||
var i = 0;
|
||||
while (numDeleted < 3 && i < currentSegment) {
|
||||
let entry = this.sidx.entries[i];
|
||||
let start = entry.tickStart/this.sidx.timeScale;
|
||||
let end = entry.tickEnd/this.sidx.timeScale;
|
||||
if (entry.have) {
|
||||
this.reportDebug('Deleting segment', i);
|
||||
this.sourceBuffer.remove(start, end);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Stream.prototype.getSegmentIdx = function(videoTime) {
|
||||
// get an estimate
|
||||
var currentTick = videoTime * this.sidx.timeScale;
|
||||
var firstSegmentDuration = this.sidx.entries[0].subSegmentDuration;
|
||||
var index = 1 + Math.floor(currentTick / firstSegmentDuration);
|
||||
var index = clamp(index, 0, this.sidx.entries.length - 1);
|
||||
|
||||
var increment = 1;
|
||||
if (currentTick < this.sidx.entries[index].tickStart){
|
||||
increment = -1;
|
||||
}
|
||||
|
||||
// go up or down to find correct index
|
||||
while (index >= 0 && index < this.sidx.entries.length) {
|
||||
var entry = this.sidx.entries[index];
|
||||
if (entry.tickStart <= currentTick && entry.tickEnd >= currentTick){
|
||||
return index;
|
||||
}
|
||||
index = index + increment;
|
||||
}
|
||||
this.reportError('Could not find segment index for time', videoTime);
|
||||
return 0;
|
||||
}
|
||||
Stream.prototype.shouldFetchNextSegment = function(nextSegment) {
|
||||
// > 15% done with current segment
|
||||
if (nextSegment >= this.sidx.entries.length){
|
||||
return false;
|
||||
}
|
||||
var entry = this.sidx.entries[nextSegment - 1];
|
||||
var currentTick = video.currentTime * this.sidx.timeScale;
|
||||
return currentTick > (entry.tickStart + entry.subSegmentDuration*0.15);
|
||||
}
|
||||
Stream.prototype.checkBuffer = async function() {
|
||||
this.reportDebug('check Buffer');
|
||||
if (seeking) {
|
||||
return;
|
||||
}
|
||||
var nextSegment = this.getSegmentIdx(video.currentTime) + 1;
|
||||
|
||||
if (this.shouldFetchNextSegment(nextSegment)) {
|
||||
this.fetchSegmentIfNeeded(nextSegment);
|
||||
}
|
||||
}
|
||||
Stream.prototype.segmentInBuffer = function(segmentIdx) {
|
||||
var entry = this.sidx.entries[segmentIdx];
|
||||
// allow for 0.01 second error
|
||||
var timeStart = entry.tickStart/this.sidx.timeScale + 0.01;
|
||||
var timeEnd = entry.tickEnd/this.sidx.timeScale - 0.01;
|
||||
var timeRanges = this.sourceBuffer.buffered;
|
||||
for (var i=0; i < timeRanges.length; i++) {
|
||||
if (timeRanges.start(i) <= timeStart && timeEnd <= timeRanges.end(i)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
Stream.prototype.fetchSegmentIfNeeded = function(segmentIdx) {
|
||||
entry = this.sidx.entries[segmentIdx];
|
||||
// check if we had it before, but it was deleted by the browser
|
||||
if (entry.have && !this.segmentInBuffer(segmentIdx)) {
|
||||
this.reportDebug('segment', segmentIdx, 'deleted by browser');
|
||||
entry.have = false;
|
||||
entry.requested = false;
|
||||
}
|
||||
if (entry.requested) {
|
||||
return;
|
||||
}
|
||||
if (segmentIdx < 0 || segmentIdx >= this.sidx.entries.length){
|
||||
return;
|
||||
}
|
||||
entry.requested = true;
|
||||
|
||||
fetchRange(
|
||||
this.url,
|
||||
entry.start,
|
||||
entry.end,
|
||||
this.appendSegment.bind(this, segmentIdx),
|
||||
);
|
||||
}
|
||||
Stream.prototype.handleSeek = async function() {
|
||||
var segmentIdx = this.getSegmentIdx(video.currentTime);
|
||||
this.fetchSegmentIfNeeded(segmentIdx);
|
||||
}
|
||||
Stream.prototype.reportDebug = function(...args) {
|
||||
reportDebug(String(this.streamType) + ':', ...args);
|
||||
}
|
||||
Stream.prototype.reportWarning = function(...args) {
|
||||
reportWarning(String(this.streamType) + ':', ...args);
|
||||
}
|
||||
Stream.prototype.reportError = function(...args) {
|
||||
reportError(String(this.streamType) + ':', ...args);
|
||||
}
|
||||
|
||||
function checkBothBuffers() {
|
||||
audioStream.checkBuffer();
|
||||
videoStream.checkBuffer();
|
||||
}
|
||||
|
||||
function seek(e) {
|
||||
if (mediaSource.readyState === 'open') {
|
||||
seeking = true;
|
||||
audioStream.handleSeek();
|
||||
videoStream.handleSeek();
|
||||
seeking = false;
|
||||
} else {
|
||||
this.reportWarning('seek but not open? readyState:',
|
||||
mediaSource.readyState);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Utility functions
|
||||
function fetchRange(url, start, end, cb) {
|
||||
reportDebug('fetchRange', start, end);
|
||||
return new Promise((resolve, reject) => {
|
||||
var xhr = new XMLHttpRequest();
|
||||
xhr.open('get', url);
|
||||
xhr.responseType = 'arraybuffer';
|
||||
xhr.setRequestHeader('Range', 'bytes=' + start + '-' + end);
|
||||
xhr.onload = function() {
|
||||
reportDebug('fetched bytes: ', start, end);
|
||||
//bytesFetched += end - start + 1;
|
||||
resolve(cb(xhr.response));
|
||||
};
|
||||
xhr.send();
|
||||
});
|
||||
}
|
||||
|
||||
function debounce(func, wait, immediate) {
|
||||
var timeout;
|
||||
return function() {
|
||||
var context = this;
|
||||
var args = arguments;
|
||||
var later = function() {
|
||||
timeout = null;
|
||||
if (!immediate) func.apply(context, args);
|
||||
};
|
||||
var callNow = immediate && !timeout;
|
||||
clearTimeout(timeout);
|
||||
timeout = setTimeout(later, wait);
|
||||
if (callNow) func.apply(context, args);
|
||||
};
|
||||
}
|
||||
|
||||
function clamp(number, min, max) {
|
||||
return Math.max(min, Math.min(number, max));
|
||||
}
|
||||
|
||||
function reportWarning(...args){
|
||||
console.log(...args);
|
||||
}
|
||||
function reportError(...args){
|
||||
console.log(...args);
|
||||
}
|
||||
function reportDebug(...args){
|
||||
console.log(...args);
|
||||
}
|
||||
|
||||
function byteArrayToIntegerLittleEndian(unsignedByteArray){
|
||||
var result = 0;
|
||||
for (byte of unsignedByteArray){
|
||||
result = result*256;
|
||||
result += byte
|
||||
}
|
||||
return result;
|
||||
}
|
||||
function ByteParser(data){
|
||||
this.curIndex = 0;
|
||||
this.data = new Uint8Array(data);
|
||||
}
|
||||
ByteParser.prototype.readInteger = function(nBytes){
|
||||
var result = byteArrayToIntegerLittleEndian(
|
||||
this.data.slice(this.curIndex, this.curIndex + nBytes)
|
||||
);
|
||||
this.curIndex += nBytes;
|
||||
return result;
|
||||
}
|
||||
ByteParser.prototype.readBufferBytes = function(nBytes){
|
||||
var result = this.data.slice(this.curIndex, this.curIndex + nBytes);
|
||||
this.curIndex += nBytes;
|
||||
return result;
|
||||
}
|
||||
|
||||
// BEGIN iso-bmff-parser-stream/lib/box/sidx.js (modified)
|
||||
// https://github.com/necccc/iso-bmff-parser-stream/blob/master/lib/box/sidx.js
|
||||
/* The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Szabolcs Szabolcsi-Toth
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.*/
|
||||
function sidx_parse (data, offset) {
|
||||
var bp = new ByteParser(data),
|
||||
version = bp.readInteger(1),
|
||||
flags = bp.readInteger(3),
|
||||
referenceId = bp.readInteger(4),
|
||||
timeScale = bp.readInteger(4),
|
||||
earliestPresentationTime = bp.readInteger(version === 0 ? 4 : 8),
|
||||
firstOffset = bp.readInteger(4),
|
||||
__reserved = bp.readInteger(2),
|
||||
entryCount = bp.readInteger(2),
|
||||
entries = [];
|
||||
|
||||
var totalBytesOffset = firstOffset + offset;
|
||||
var totalTicks = 0;
|
||||
for (var i = entryCount; i > 0; i=i-1 ) {
|
||||
let referencedSize = bp.readInteger(4),
|
||||
subSegmentDuration = bp.readInteger(4),
|
||||
unused = bp.readBufferBytes(4)
|
||||
entries.push({
|
||||
referencedSize: referencedSize,
|
||||
subSegmentDuration: subSegmentDuration,
|
||||
unused: unused,
|
||||
start: totalBytesOffset,
|
||||
end: totalBytesOffset + referencedSize - 1, // inclusive
|
||||
tickStart: totalTicks,
|
||||
tickEnd: totalTicks + subSegmentDuration - 1,
|
||||
requested: false,
|
||||
have: false,
|
||||
});
|
||||
totalBytesOffset = totalBytesOffset + referencedSize;
|
||||
totalTicks = totalTicks + subSegmentDuration;
|
||||
}
|
||||
|
||||
return {
|
||||
version: version,
|
||||
flags: flags,
|
||||
referenceId: referenceId,
|
||||
timeScale: timeScale,
|
||||
earliestPresentationTime: earliestPresentationTime,
|
||||
firstOffset: firstOffset,
|
||||
entries: entries
|
||||
};
|
||||
}
|
||||
// END sidx.js
|
||||
|
||||
// BEGIN iso-bmff-parser-stream/lib/unbox.js (same license), modified
|
||||
function unbox(buf) {
|
||||
var bp = new ByteParser(buf),
|
||||
bufferLength = buf.length,
|
||||
length,
|
||||
typeData,
|
||||
boxData
|
||||
|
||||
length = bp.readInteger(4); // length of entire box,
|
||||
typeData = bp.readInteger(4);
|
||||
|
||||
if (bufferLength - length < 0) {
|
||||
reportWarning('Warning: sidx table is cut off');
|
||||
return {
|
||||
currentLength: bufferLength,
|
||||
length: length,
|
||||
type: typeData,
|
||||
data: bp.readBufferBytes(bufferLength)
|
||||
};
|
||||
}
|
||||
|
||||
boxData = bp.readBufferBytes(length - 8);
|
||||
|
||||
return {
|
||||
length: length,
|
||||
type: typeData,
|
||||
data: boxData
|
||||
};
|
||||
}
|
||||
// END unbox.js
|
@ -3,7 +3,7 @@
|
||||
<head>
|
||||
<meta charset="UTF-8"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
||||
<meta http-equiv="Content-Security-Policy" content="default-src 'self' 'unsafe-inline' 'unsafe-eval'; media-src 'self' blob: https://*.googlevideo.com; {{ "img-src 'self' https://*.googleusercontent.com https://*.ggpht.com https://*.ytimg.com;" if not settings.proxy_images else "" }}">
|
||||
<meta http-equiv="Content-Security-Policy" content="default-src 'self' 'unsafe-inline' 'unsafe-eval'; media-src 'self' https://*.googlevideo.com; {{ "img-src 'self' https://*.googleusercontent.com https://*.ggpht.com https://*.ytimg.com;" if not settings.proxy_images else "" }}"/>
|
||||
<title>{{ page_title }}</title>
|
||||
<link title="YT Local" href="/youtube.com/opensearch.xml" rel="search" type="application/opensearchdescription+xml"/>
|
||||
<link href="/youtube.com/static/favicon.ico" type="image/x-icon" rel="icon"/>
|
||||
|
@ -29,7 +29,7 @@
|
||||
{% endif %}
|
||||
</span>
|
||||
</div>
|
||||
{% elif (uni_sources.__len__() == 0 or live) and hls_formats.__len__() != 0 %}
|
||||
{% elif (video_sources.__len__() == 0 or live) and hls_formats.__len__() != 0 %}
|
||||
<div class="live-url-choices">
|
||||
<span>Copy a url into your video player:</span>
|
||||
<ol>
|
||||
@ -41,9 +41,9 @@
|
||||
{% else %}
|
||||
<figure class="sc-video">
|
||||
<video id="js-video-player" playsinline controls>
|
||||
{% if uni_sources %}
|
||||
<source src="{{ uni_sources[uni_idx]['url'] }}" type="{{ uni_sources[uni_idx]['type'] }}" data-res="{{ uni_sources[uni_idx]['quality'] }}">
|
||||
{% endif %}
|
||||
{% for video_source in video_sources %}
|
||||
<source src="{{ video_source['src'] }}" type="{{ video_source['type'] }}" data-res="{{ video_source['quality'] }}">
|
||||
{% endfor %}
|
||||
|
||||
{% for source in subtitle_sources %}
|
||||
{% if source['on'] %}
|
||||
@ -55,10 +55,6 @@
|
||||
</video>
|
||||
</figure>
|
||||
|
||||
{% if pair_sources and (not uni_sources or pair_sources[pair_idx][0]['quality'] != uni_sources[uni_idx]['quality']) %}
|
||||
<script src="/youtube.com/static/js/av-merge.js"></script>
|
||||
{% endif %}
|
||||
|
||||
{% if time_start != 0 %}
|
||||
<script>
|
||||
document.getElementById('js-video-player').currentTime = {{ time_start|tojson }};
|
||||
|
127
youtube/watch.py
127
youtube/watch.py
@ -24,97 +24,25 @@ except FileNotFoundError:
|
||||
|
||||
|
||||
def get_video_sources(info):
|
||||
'''return dict with organized sources: {
|
||||
'uni_sources': [{}, ...], # video and audio in one file
|
||||
'uni_idx': int, # default unified source index
|
||||
'pair_sources': [({video}, {audio}), ...],
|
||||
'pair_idx': int, # default pair source index
|
||||
}
|
||||
'''
|
||||
audio_sources = []
|
||||
video_only_sources = []
|
||||
uni_sources = []
|
||||
pair_sources = []
|
||||
target_resolution = settings.default_resolution
|
||||
video_sources = []
|
||||
max_resolution = settings.default_resolution
|
||||
for fmt in info['formats']:
|
||||
if not all(fmt[attr] for attr in ('ext', 'url')):
|
||||
if not all(fmt[attr] for attr in ('quality', 'width', 'ext', 'url')):
|
||||
continue
|
||||
if fmt['ext'] != 'mp4': # temporary until webm support
|
||||
continue
|
||||
|
||||
# unified source
|
||||
if fmt['acodec'] and fmt['vcodec']:
|
||||
source = {
|
||||
if (fmt['acodec'] and fmt['vcodec']
|
||||
and fmt['quality'] <= max_resolution):
|
||||
video_sources.append({
|
||||
'src': fmt['url'],
|
||||
'type': 'video/' + fmt['ext'],
|
||||
}
|
||||
source.update(fmt)
|
||||
uni_sources.append(source)
|
||||
continue
|
||||
'quality': fmt['quality'],
|
||||
'height': fmt['height'],
|
||||
'width': fmt['width'],
|
||||
})
|
||||
|
||||
if not (fmt['init_range'] and fmt['index_range']):
|
||||
continue
|
||||
# order the videos sources so the preferred resolution is first #
|
||||
video_sources.sort(key=lambda source: source['quality'], reverse=True)
|
||||
|
||||
# audio source
|
||||
if fmt['acodec'] and not fmt['vcodec'] and fmt['audio_bitrate']:
|
||||
source = {
|
||||
'type': 'audio/' + fmt['ext'],
|
||||
'bitrate': fmt['audio_bitrate'],
|
||||
}
|
||||
source.update(fmt)
|
||||
source['mime_codec'] = (source['type'] + '; codecs="'
|
||||
+ source['acodec'] + '"')
|
||||
audio_sources.append(source)
|
||||
# video-only source, include audio source
|
||||
elif all(fmt[attr] for attr in ('vcodec', 'quality', 'width')):
|
||||
source = {
|
||||
'type': 'video/' + fmt['ext'],
|
||||
}
|
||||
source.update(fmt)
|
||||
source['mime_codec'] = (source['type'] + '; codecs="'
|
||||
+ source['vcodec'] + '"')
|
||||
video_only_sources.append(source)
|
||||
|
||||
audio_sources.sort(key=lambda source: source['audio_bitrate'])
|
||||
video_only_sources.sort(key=lambda src: src['quality'])
|
||||
uni_sources.sort(key=lambda src: src['quality'])
|
||||
|
||||
for source in video_only_sources:
|
||||
# choose an audio source to go with it
|
||||
# 0.15 is semiarbitrary empirical constant to spread audio sources
|
||||
# between 144p and 1080p. Use something better eventually.
|
||||
target_audio_bitrate = source['quality']*source.get('fps', 30)/30*0.15
|
||||
compat_audios = [a for a in audio_sources if a['ext'] == source['ext']]
|
||||
if compat_audios:
|
||||
closest_audio_source = compat_audios[0]
|
||||
best_err = target_audio_bitrate - compat_audios[0]['audio_bitrate']
|
||||
best_err = abs(best_err)
|
||||
for audio_source in compat_audios[1:]:
|
||||
err = abs(audio_source['audio_bitrate'] - target_audio_bitrate)
|
||||
# once err gets worse we have passed the closest one
|
||||
if err > best_err:
|
||||
break
|
||||
best_err = err
|
||||
closest_audio_source = audio_source
|
||||
pair_sources.append((source, closest_audio_source))
|
||||
|
||||
uni_idx = 0 if uni_sources else None
|
||||
for i, source in enumerate(uni_sources):
|
||||
if source['quality'] > target_resolution:
|
||||
break
|
||||
uni_idx = i
|
||||
|
||||
pair_idx = 0 if pair_sources else None
|
||||
for i, source_pair in enumerate(pair_sources):
|
||||
if source_pair[0]['quality'] > target_resolution:
|
||||
break
|
||||
pair_idx = i
|
||||
|
||||
return {
|
||||
'uni_sources': uni_sources,
|
||||
'uni_idx': uni_idx,
|
||||
'pair_sources': pair_sources,
|
||||
'pair_idx': pair_idx,
|
||||
}
|
||||
return video_sources
|
||||
|
||||
|
||||
def make_caption_src(info, lang, auto=False, trans_lang=None):
|
||||
@ -510,11 +438,10 @@ def get_watch_page(video_id=None):
|
||||
item['url'] += '&index=' + str(item['index'])
|
||||
info['playlist']['author_url'] = util.prefix_url(
|
||||
info['playlist']['author_url'])
|
||||
if settings.img_prefix:
|
||||
# Don't prefix hls_formats for now because the urls inside the manifest
|
||||
# would need to be prefixed as well.
|
||||
for fmt in info['formats']:
|
||||
fmt['url'] = util.prefix_url(fmt['url'])
|
||||
# Don't prefix hls_formats for now because the urls inside the manifest
|
||||
# would need to be prefixed as well.
|
||||
for fmt in info['formats']:
|
||||
fmt['url'] = util.prefix_url(fmt['url'])
|
||||
|
||||
# Add video title to end of url path so it has a filename other than just
|
||||
# "videoplayback" when downloaded
|
||||
@ -550,14 +477,9 @@ def get_watch_page(video_id=None):
|
||||
'codecs': codecs_string,
|
||||
})
|
||||
|
||||
source_info = get_video_sources(info)
|
||||
uni_idx = source_info['uni_idx']
|
||||
video_height = yt_data_extract.deep_get(source_info, 'uni_sources',
|
||||
uni_idx, 'height',
|
||||
default=360)
|
||||
video_width = yt_data_extract.deep_get(source_info, 'uni_sources',
|
||||
uni_idx, 'width',
|
||||
default=640)
|
||||
video_sources = get_video_sources(info)
|
||||
video_height = yt_data_extract.deep_get(video_sources, 0, 'height', default=360)
|
||||
video_width = yt_data_extract.deep_get(video_sources, 0, 'width', default=640)
|
||||
# 1 second per pixel, or the actual video width
|
||||
theater_video_target_width = max(640, info['duration'] or 0, video_width)
|
||||
|
||||
@ -602,6 +524,7 @@ def get_watch_page(video_id=None):
|
||||
download_formats = download_formats,
|
||||
other_downloads = other_downloads,
|
||||
video_info = json.dumps(video_info),
|
||||
video_sources = video_sources,
|
||||
hls_formats = info['hls_formats'],
|
||||
subtitle_sources = subtitle_sources,
|
||||
related = info['related_videos'],
|
||||
@ -634,14 +557,12 @@ def get_watch_page(video_id=None):
|
||||
time_start = time_start,
|
||||
|
||||
js_data = {
|
||||
'video_id': info['id'],
|
||||
'video_duration': info['duration'],
|
||||
'video_id': video_info['id'],
|
||||
'settings': settings.current_settings_dict,
|
||||
'has_manual_captions': any(s.get('on') for s in subtitle_sources),
|
||||
**source_info,
|
||||
},
|
||||
# for embed page
|
||||
font_family=youtube.font_choices[settings.font],
|
||||
**source_info,
|
||||
)
|
||||
|
||||
|
||||
|
@ -415,21 +415,13 @@ def _extract_formats(info, player_response):
|
||||
fmt['itag'] = itag
|
||||
fmt['ext'] = None
|
||||
fmt['audio_bitrate'] = None
|
||||
fmt['bitrate'] = yt_fmt.get('bitrate')
|
||||
fmt['acodec'] = None
|
||||
fmt['vcodec'] = None
|
||||
fmt['width'] = yt_fmt.get('width')
|
||||
fmt['height'] = yt_fmt.get('height')
|
||||
fmt['file_size'] = yt_fmt.get('contentLength')
|
||||
fmt['audio_sample_rate'] = extract_int(yt_fmt.get('audioSampleRate'))
|
||||
fmt['duration_ms'] = yt_fmt.get('approxDurationMs')
|
||||
fmt['audio_sample_rate'] = yt_fmt.get('audioSampleRate')
|
||||
fmt['fps'] = yt_fmt.get('fps')
|
||||
fmt['init_range'] = yt_fmt.get('initRange')
|
||||
fmt['index_range'] = yt_fmt.get('indexRange')
|
||||
for key in ('init_range', 'index_range'):
|
||||
if fmt[key]:
|
||||
fmt[key]['start'] = int(fmt[key]['start'])
|
||||
fmt[key]['end'] = int(fmt[key]['end'])
|
||||
update_format_with_type_info(fmt, yt_fmt)
|
||||
cipher = dict(urllib.parse.parse_qsl(multi_get(yt_fmt,
|
||||
'cipher', 'signatureCipher', default='')))
|
||||
@ -467,7 +459,7 @@ def extract_hls_formats(hls_manifest):
|
||||
if lines[i].startswith('#EXT-X-STREAM-INF'):
|
||||
fmt = {'acodec': None, 'vcodec': None, 'height': None,
|
||||
'width': None, 'fps': None, 'audio_bitrate': None,
|
||||
'itag': None, 'file_size': None, 'duration_ms': None,
|
||||
'itag': None, 'file_size': None,
|
||||
'audio_sample_rate': None, 'url': None}
|
||||
properties = lines[i].split(':')[1]
|
||||
properties += ',' # make regex work for last key-value pair
|
||||
|
Loading…
x
Reference in New Issue
Block a user