Merge branch 'transcoding_progress'

This probably broke stuff
This commit is contained in:
Boris Bobrov 2018-07-12 18:49:35 +02:00
commit 23af1d8cbc
24 changed files with 576 additions and 80 deletions

1
.gitignore vendored
View File

@ -64,3 +64,4 @@ venv*
/extlib/leaflet/
/extlib/tinymce/
/extlib/video.js/
/extlib/videojs-resolution-switcher

View File

@ -13,7 +13,8 @@
],
"dependencies": {
"jquery": "~2.1.3",
"video.js": "~4.11.4",
"video.js": "~5.20.1",
"videojs-resolution-switcher": "~0.4.2",
"leaflet": "~0.7.3"
}
}

View File

@ -154,6 +154,7 @@ CELERY_RESULT_DBURI = string(default="sqlite:///%(here)s/celery.db")
# default kombu stuff
BROKER_URL = string(default="amqp://")
CELERY_DEFAULT_QUEUE = string(default="default")
# known booleans
CELERY_RESULT_PERSISTENT = boolean()
@ -165,7 +166,7 @@ CELERY_EAGER_PROPAGATES_EXCEPTIONS = boolean()
CELERY_IGNORE_RESULT = boolean()
CELERY_TRACK_STARTED = boolean()
CELERY_DISABLE_RATE_LIMITS = boolean()
CELERY_ACKS_LATE = boolean()
CELERY_ACKS_LATE = boolean(default=True)
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = boolean()
CELERY_SEND_TASK_ERROR_EMAILS = boolean()
CELERY_SEND_EVENTS = boolean()
@ -174,8 +175,8 @@ CELERYD_LOG_COLOR = boolean()
CELERY_REDIRECT_STDOUTS = boolean()
# known ints
CELERYD_CONCURRENCY = integer()
CELERYD_PREFETCH_MULTIPLIER = integer()
CELERYD_CONCURRENCY = integer(default=1)
CELERYD_PREFETCH_MULTIPLIER = integer(default=1)
CELERY_AMQP_TASK_RESULT_EXPIRES = integer()
CELERY_AMQP_TASK_RESULT_CONNECTION_MAX = integer()
REDIS_PORT = integer()

View File

@ -0,0 +1,28 @@
"""add main transcoding progress column to MediaEntry
Revision ID: cc3651803714
Revises: 228916769bd2
Create Date: 2017-08-21 23:33:01.401589
"""
# revision identifiers, used by Alembic.
revision = 'cc3651803714'
down_revision = '228916769bd2'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
"""
Addition of main_transcoding_progress is required to save the progress of the
default resolution (other than the total progress of the video).
"""
op.add_column('core__media_entries', sa.Column('main_transcoding_progress', sa.Float(), default=0))
def downgrade():
pass

View File

@ -249,6 +249,33 @@ class MediaEntryMixin(GenerateSlugMixin, GeneratePublicIDMixin):
if media_size in media_sizes:
return media_size, self.media_files[media_size]
def get_all_media(self):
"""
Returns all available qualties of a media
"""
fetch_order = self.media_manager.media_fetch_order
# No fetching order found? well, give up!
if not fetch_order:
return None
media_sizes = self.media_files.keys()
all_media_path = []
for media_size in fetch_order:
if media_size in media_sizes:
file_metadata = self.get_file_metadata(media_size)
size = file_metadata['medium_size']
if media_size != 'webm_video':
all_media_path.append((media_size[5:], size,
self.media_files[media_size]))
else:
all_media_path.append(('default', size,
self.media_files[media_size]))
return all_media_path
def main_mediafile(self):
pass

View File

@ -25,7 +25,7 @@ import datetime
from sqlalchemy import Column, Integer, Unicode, UnicodeText, DateTime, \
Boolean, ForeignKey, UniqueConstraint, PrimaryKeyConstraint, \
SmallInteger, Date, types
SmallInteger, Date, types, Float
from sqlalchemy.orm import relationship, backref, with_polymorphic, validates, \
class_mapper
from sqlalchemy.orm.collections import attribute_mapped_collection
@ -543,7 +543,8 @@ class MediaEntry(Base, MediaEntryMixin, CommentingMixin):
fail_error = Column(Unicode)
fail_metadata = Column(JSONEncoded)
transcoding_progress = Column(SmallInteger)
transcoding_progress = Column(Float, default=0)
main_transcoding_progress = Column(Float, default=0)
queued_media_file = Column(PathTupleWithSlashes)

View File

@ -22,6 +22,7 @@ import logging
import six
from celery import Celery
from kombu import Exchange, Queue
from mediagoblin.tools.pluginapi import hook_runall
@ -32,6 +33,7 @@ MANDATORY_CELERY_IMPORTS = [
'mediagoblin.processing.task',
'mediagoblin.notifications.task',
'mediagoblin.submit.task',
'mediagoblin.media_types.video.processing',
]
DEFAULT_SETTINGS_MODULE = 'mediagoblin.init.celery.dummy_settings_module'
@ -47,6 +49,12 @@ def get_celery_settings_dict(app_config, global_config,
else:
celery_conf = {}
# Add x-max-priority to config
celery_conf['CELERY_QUEUES'] = (
Queue('default', Exchange('default'), routing_key='default',
queue_arguments={'x-max-priority': 10}),
)
celery_settings = {}
# Add all celery settings from config

View File

@ -431,6 +431,7 @@ class ImageProcessingManager(ProcessingManager):
self.add_processor(Resizer)
self.add_processor(MetadataProcessing)
if __name__ == '__main__':
import sys
import pprint

View File

@ -14,6 +14,7 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mediagoblin import mg_globals as mgg
from mediagoblin.media_types import MediaManagerBase
from mediagoblin.media_types.video.processing import (VideoProcessingManager,
sniff_handler, sniffer)
@ -31,9 +32,17 @@ class VideoMediaManager(MediaManagerBase):
type_icon = "images/type_icons/video.png"
# Used by the media_entry.get_display_media method
media_fetch_order = [u'webm_video', u'original']
default_webm_type = 'video/webm; codecs="vp8, vorbis"'
@property
def media_fetch_order(self):
video_config = mgg.global_config['plugins'][MEDIA_TYPE]
video_res = video_config['available_resolutions']
video_res.remove(video_config['default_resolution'])
video_res.insert(0, video_config['default_resolution'])
video_res = ['webm_{}'.format(x) for x in video_res]
return ([u'webm_video'] + video_res + [u'original'])
def get_media_type_and_manager(ext):
if ext in ACCEPTED_EXTENSIONS:

View File

@ -12,6 +12,14 @@ vorbis_quality = float(default=0.3)
# Autoplay the video when page is loaded?
auto_play = boolean(default=False)
# List of resolutions that the video should be transcoded to
# Choose among ['144p', '240p', '360p', '480p', '720p', '1080p'],
# preferrably in the order of transcoding.
available_resolutions = string_list(default=list('480p', '360p', '720p'))
# Default resolution of video
default_resolution = string(default='480p')
[[skip_transcode]]
mime_types = string_list(default=list("video/webm"))
container_formats = string_list(default=list("Matroska"))

View File

@ -18,21 +18,23 @@ import argparse
import os.path
import logging
import datetime
import celery
import six
from celery import group
from mediagoblin import mg_globals as mgg
from mediagoblin.processing import (
FilenameBuilder, BaseProcessingFail,
ProgressCallback, MediaProcessor,
ProcessingManager, request_from_args,
get_process_filename, store_public,
copy_original)
copy_original, get_entry_and_processing_manager)
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
from mediagoblin.media_types import MissingComponents
from . import transcoders
from .util import skip_transcode
from .util import skip_transcode, ACCEPTED_RESOLUTIONS
_log = logging.getLogger(__name__)
_log.setLevel(logging.DEBUG)
@ -173,13 +175,67 @@ def store_metadata(media_entry, metadata):
media_entry.media_data_init(orig_metadata=stored_metadata)
@celery.task()
def main_task(entry_id, resolution, medium_size, **process_info):
"""
Main celery task to transcode the video to the default resolution
and store original video metadata.
"""
_log.debug('MediaEntry processing')
entry, manager = get_entry_and_processing_manager(entry_id)
with CommonVideoProcessor(manager, entry) as processor:
processor.common_setup(resolution)
processor.transcode(medium_size=tuple(medium_size),
vp8_quality=process_info['vp8_quality'],
vp8_threads=process_info['vp8_threads'],
vorbis_quality=process_info['vorbis_quality'])
processor.generate_thumb(thumb_size=process_info['thumb_size'])
processor.store_orig_metadata()
# Make state of entry as processed
entry.state = u'processed'
entry.save()
_log.info(u'MediaEntry ID {0} is processed (transcoded to default'
' resolution): {1}'.format(entry.id, medium_size))
_log.debug('MediaEntry processed')
@celery.task()
def complementary_task(entry_id, resolution, medium_size, **process_info):
"""
Side celery task to transcode the video to other resolutions
"""
entry, manager = get_entry_and_processing_manager(entry_id)
with CommonVideoProcessor(manager, entry) as processor:
processor.common_setup(resolution)
processor.transcode(medium_size=tuple(medium_size),
vp8_quality=process_info['vp8_quality'],
vp8_threads=process_info['vp8_threads'],
vorbis_quality=process_info['vorbis_quality'])
_log.info(u'MediaEntry ID {0} is transcoded to {1}'.format(
entry.id, medium_size))
@celery.task()
def processing_cleanup(entry_id):
_log.debug('Entered processing_cleanup')
entry, manager = get_entry_and_processing_manager(entry_id)
with CommonVideoProcessor(manager, entry) as processor:
# no need to specify a resolution here
processor.common_setup()
processor.copy_original()
processor.keep_best()
processor.delete_queue_file()
_log.debug('Deleted queue_file')
class CommonVideoProcessor(MediaProcessor):
"""
Provides a base for various video processing steps
"""
acceptable_files = ['original', 'best_quality', 'webm_video']
acceptable_files = ['original, best_quality', 'webm_144p', 'webm_360p',
'webm_480p', 'webm_720p', 'webm_1080p', 'webm_video']
def common_setup(self):
def common_setup(self, resolution=None):
self.video_config = mgg \
.global_config['plugins'][MEDIA_TYPE]
@ -191,24 +247,47 @@ class CommonVideoProcessor(MediaProcessor):
self.transcoder = transcoders.VideoTranscoder()
self.did_transcode = False
if resolution:
self.curr_file = 'webm_' + str(resolution)
self.part_filename = (self.name_builder.fill('{basename}.' +
str(resolution) + '.webm'))
else:
self.curr_file = 'webm_video'
self.part_filename = self.name_builder.fill('{basename}.medium.webm')
def copy_original(self):
# If we didn't transcode, then we need to keep the original
self.did_transcode = False
for each_res in self.video_config['available_resolutions']:
if ('webm_' + str(each_res)) in self.entry.media_files:
self.did_transcode = True
break
if not self.did_transcode or \
(self.video_config['keep_original'] and self.did_transcode):
copy_original(
self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def _keep_best(self):
def keep_best(self):
"""
If there is no original, keep the best file that we have
"""
best_file = None
best_file_dim = (0, 0)
for each_res in self.video_config['available_resolutions']:
curr_dim = ACCEPTED_RESOLUTIONS[each_res]
if curr_dim[0] >= best_file_dim[0] and curr_dim[1] >= best_file_dim[1]:
best_file = each_res
best_file_dim = curr_dim
if not self.entry.media_files.get('best_quality'):
# Save the best quality file if no original?
if not self.entry.media_files.get('original') and \
self.entry.media_files.get('webm_video'):
self.entry.media_files.get(str(best_file)):
self.entry.media_files['best_quality'] = self.entry \
.media_files['webm_video']
.media_files[str(best_file)]
def _skip_processing(self, keyname, **kwargs):
file_metadata = self.entry.get_file_metadata(keyname)
@ -217,7 +296,7 @@ class CommonVideoProcessor(MediaProcessor):
return False
skip = True
if keyname == 'webm_video':
if 'webm' in keyname:
if kwargs.get('medium_size') != file_metadata.get('medium_size'):
skip = False
elif kwargs.get('vp8_quality') != file_metadata.get('vp8_quality'):
@ -237,8 +316,7 @@ class CommonVideoProcessor(MediaProcessor):
def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None,
vorbis_quality=None):
progress_callback = ProgressCallback(self.entry)
tmp_dst = os.path.join(self.workbench.dir,
self.name_builder.fill('{basename}.medium.webm'))
tmp_dst = os.path.join(self.workbench.dir, self.part_filename)
if not medium_size:
medium_size = (
@ -256,17 +334,10 @@ class CommonVideoProcessor(MediaProcessor):
'vp8_quality': vp8_quality,
'vorbis_quality': vorbis_quality}
if self._skip_processing('webm_video', **file_metadata):
if self._skip_processing(self.curr_file, **file_metadata):
return
# Extract metadata and keep a record of it
metadata = transcoders.discover(self.process_filename)
# metadata's stream info here is a DiscovererContainerInfo instance,
# it gets split into DiscovererAudioInfo and DiscovererVideoInfo;
# metadata itself has container-related data in tags, like video-codec
store_metadata(self.entry, metadata)
orig_dst_dimensions = (metadata.get_video_streams()[0].get_width(),
metadata.get_video_streams()[0].get_height())
@ -275,45 +346,37 @@ class CommonVideoProcessor(MediaProcessor):
if skip_transcode(metadata, medium_size):
_log.debug('Skipping transcoding')
dst_dimensions = orig_dst_dimensions
# If there is an original and transcoded, delete the transcoded
# since it must be of lower quality then the original
if self.entry.media_files.get('original') and \
self.entry.media_files.get('webm_video'):
self.entry.media_files['webm_video'].delete()
self.entry.media_files.get(self.curr_file):
self.entry.media_files[self.curr_file].delete()
else:
_log.debug('Entered transcoder')
video_config = (mgg.global_config['plugins']
['mediagoblin.media_types.video'])
num_res = len(video_config['available_resolutions'])
default_res = video_config['default_resolution']
self.transcoder.transcode(self.process_filename, tmp_dst,
default_res, num_res,
vp8_quality=vp8_quality,
vp8_threads=vp8_threads,
vorbis_quality=vorbis_quality,
progress_callback=progress_callback,
dimensions=tuple(medium_size))
if self.transcoder.dst_data:
video_info = self.transcoder.dst_data.get_video_streams()[0]
dst_dimensions = (video_info.get_width(),
video_info.get_height())
self._keep_best()
# Push transcoded video to public storage
_log.debug('Saving medium...')
store_public(self.entry, 'webm_video', tmp_dst,
self.name_builder.fill('{basename}.medium.webm'))
store_public(self.entry, self.curr_file, tmp_dst, self.part_filename)
_log.debug('Saved medium')
self.entry.set_file_metadata('webm_video', **file_metadata)
self.entry.set_file_metadata(self.curr_file, **file_metadata)
self.did_transcode = True
else:
dst_dimensions = orig_dst_dimensions
# Save the width and height of the transcoded video
self.entry.media_data_init(
width=dst_dimensions[0],
height=dst_dimensions[1])
def generate_thumb(self, thumb_size=None):
_log.debug("Enter generate_thumb()")
# Temporary file for the video thumbnail (cleaned up with workbench)
tmp_thumb = os.path.join(self.workbench.dir,
self.name_builder.fill(
@ -343,6 +406,17 @@ class CommonVideoProcessor(MediaProcessor):
self.entry.set_file_metadata('thumb', thumb_size=thumb_size)
def store_orig_metadata(self):
# Extract metadata and keep a record of it
metadata = transcoders.discover(self.process_filename)
# metadata's stream info here is a DiscovererContainerInfo instance,
# it gets split into DiscovererAudioInfo and DiscovererVideoInfo;
# metadata itself has container-related data in tags, like video-codec
store_metadata(self.entry, metadata)
_log.debug("Stored original video metadata")
class InitialProcessor(CommonVideoProcessor):
"""
Initial processing steps for new video
@ -399,13 +473,12 @@ class InitialProcessor(CommonVideoProcessor):
'vorbis_quality', 'thumb_size'])
def process(self, medium_size=None, vp8_threads=None, vp8_quality=None,
vorbis_quality=None, thumb_size=None):
self.common_setup()
vorbis_quality=None, thumb_size=None, resolution=None):
self.common_setup(resolution=resolution)
self.store_orig_metadata()
self.transcode(medium_size=medium_size, vp8_quality=vp8_quality,
vp8_threads=vp8_threads, vorbis_quality=vorbis_quality)
self.copy_original()
self.generate_thumb(thumb_size=thumb_size)
self.delete_queue_file()
@ -516,3 +589,43 @@ class VideoProcessingManager(ProcessingManager):
self.add_processor(InitialProcessor)
self.add_processor(Resizer)
self.add_processor(Transcoder)
def workflow(self, entry, feed_url, reprocess_action, reprocess_info=None):
video_config = mgg.global_config['plugins'][MEDIA_TYPE]
def_res = video_config['default_resolution']
priority_num = len(video_config['available_resolutions']) + 1
entry.state = u'processing'
entry.save()
reprocess_info = reprocess_info or {}
if 'vp8_quality' not in reprocess_info:
reprocess_info['vp8_quality'] = None
if 'vorbis_quality' not in reprocess_info:
reprocess_info['vorbis_quality'] = None
if 'vp8_threads' not in reprocess_info:
reprocess_info['vp8_threads'] = None
if 'thumb_size' not in reprocess_info:
reprocess_info['thumb_size'] = None
tasks_list = [main_task.signature(args=(entry.id, def_res,
ACCEPTED_RESOLUTIONS[def_res]),
kwargs=reprocess_info, queue='default',
priority=priority_num, immutable=True)]
for comp_res in video_config['available_resolutions']:
if comp_res != def_res:
priority_num += -1
tasks_list.append(
complementary_task.signature(args=(entry.id, comp_res,
ACCEPTED_RESOLUTIONS[comp_res]),
kwargs=reprocess_info, queue='default',
priority=priority_num, immutable=True)
)
transcoding_tasks = group(tasks_list)
cleanup_task = processing_cleanup.signature(args=(entry.id,),
queue='default', immutable=True)
return (transcoding_tasks, cleanup_task)

View File

@ -21,8 +21,10 @@ import sys
import logging
import multiprocessing
from mediagoblin import mg_globals as mgg
from mediagoblin.media_types.tools import discover
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
from .util import ACCEPTED_RESOLUTIONS
#os.environ['GST_DEBUG'] = '4,python:4'
@ -153,10 +155,10 @@ class VideoTranscoder(object):
'''
def __init__(self):
_log.info('Initializing VideoTranscoder...')
self.progress_percentage = None
self.progress_percentage = 0
self.loop = GLib.MainLoop()
def transcode(self, src, dst, **kwargs):
def transcode(self, src, dst, default_res, num_res, **kwargs):
'''
Transcode a video file into a 'medium'-sized version.
'''
@ -184,6 +186,10 @@ class VideoTranscoder(object):
self._progress_callback = kwargs.get('progress_callback') or None
# Get number of resolutions available for the video
self.num_of_resolutions = num_res
self.default_resolution = default_res
if not type(self.destination_dimensions) == tuple:
raise Exception('dimensions must be tuple: (width, height)')
@ -354,10 +360,19 @@ class VideoTranscoder(object):
# Update progress state if it has changed
(success, percent) = structure.get_int('percent')
if self.progress_percentage != percent and success:
# FIXME: the code below is a workaround for structure.get_int('percent')
# returning 0 when the transcoding gets over (100%)
if self.progress_percentage > percent and percent == 0:
percent = 100
percent_increment = percent - self.progress_percentage
self.progress_percentage = percent
if self._progress_callback:
self._progress_callback(percent)
_log.info('{percent}% done...'.format(percent=percent))
if ACCEPTED_RESOLUTIONS[self.default_resolution] == self.destination_dimensions:
self._progress_callback(percent_increment/self.num_of_resolutions, percent)
else:
self._progress_callback(percent_increment/self.num_of_resolutions)
_log.info('{percent}% of {dest} resolution done..'
'.'.format(percent=percent, dest=self.destination_dimensions))
elif message.type == Gst.MessageType.ERROR:
_log.error('Got error: {0}'.format(message.parse_error()))
self.dst_data = None

View File

@ -18,6 +18,16 @@ import logging
from mediagoblin import mg_globals as mgg
ACCEPTED_RESOLUTIONS = {
'144p': (256, 144),
'240p': (352, 240),
'360p': (480, 360),
'480p': (858, 480),
'720p': (1280, 720),
'1080p': (1920, 1080),
'webm': (640, 640),
}
_log = logging.getLogger(__name__)

View File

@ -39,9 +39,14 @@ class ProgressCallback(object):
def __init__(self, entry):
self.entry = entry
def __call__(self, progress):
def __call__(self, progress, default_quality_progress=None):
if progress:
self.entry.transcoding_progress = progress
if 100 - (self.entry.transcoding_progress + progress) < 0.01:
self.entry.transcoding_progress = 100
else:
self.entry.transcoding_progress += round(progress, 2)
if default_quality_progress:
self.entry.main_transcoding_progress = default_quality_progress
self.entry.save()
@ -257,6 +262,12 @@ class ProcessingManager(object):
return processor
def workflow(self, entry, feed_url, reprocess_action, reprocess_info=None):
"""
Returns the Celery command needed to proceed with media processing
"""
return None
def request_from_args(args, which_args):
"""

View File

@ -1 +1 @@
../../../extlib/video.js/dist/video-js
../../../extlib/video.js/dist/

View File

@ -0,0 +1 @@
../../../extlib/videojs-resolution-switcher/lib/

View File

@ -0,0 +1,27 @@
var glplayer;
$(document).ready(function()
{
// fire up the plugin
glplayer = videojs('video_1', {
controls: true,
muted: true,
height: 400,
width: 700,
plugins: {
videoJsResolutionSwitcher: {
ui: true,
default: 'low', // Default resolution [{Number}, 'low', 'high'],
dynamicLabel: true // Display dynamic labels or gear symbol
}
}
}, function(){
var player = this;
window.player = player
player.on('resolutionchange', function(){
console.info('Source changed to %s', player.src());
console.log(player.currentTime());
})
})
});

View File

@ -20,6 +20,8 @@ from os.path import splitext
import six
from celery import chord
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
@ -28,7 +30,7 @@ from mediagoblin.tools.response import json_response
from mediagoblin.tools.text import convert_to_tag_list_of_dicts
from mediagoblin.tools.federation import create_activity, create_generator
from mediagoblin.db.models import Collection, MediaEntry, ProcessingMetaData
from mediagoblin.processing import mark_entry_failed
from mediagoblin.processing import mark_entry_failed, get_entry_and_processing_manager
from mediagoblin.processing.task import ProcessMedia
from mediagoblin.notifications import add_comment_subscription
from mediagoblin.media_types import sniff_media
@ -262,10 +264,17 @@ def run_process_media(entry, feed_url=None,
:param reprocess_action: What particular action should be run.
:param reprocess_info: A dict containing all of the necessary reprocessing
info for the given media_type"""
entry, manager = get_entry_and_processing_manager(entry.id)
try:
wf = manager.workflow(entry, feed_url, reprocess_action, reprocess_info)
if wf is None:
ProcessMedia().apply_async(
[entry.id, feed_url, reprocess_action, reprocess_info], {},
task_id=entry.queued_task_id)
else:
chord(wf[0])(wf[1])
except BaseException as exc:
# The purpose of this section is because when running in "lazy"
# or always-eager-with-exceptions-propagated celery mode that

View File

@ -22,15 +22,19 @@
{{ super() }}
<script type="text/javascript" src="{{
request.staticdirect('/extlib/video-js/video.js') }}"></script>
<script type="text/javascript" src="{{
request.staticdirect('/extlib/videojs-resolution-switcher/videojs-resolution-switcher.js') }}">
</script>
<script type="text/javascript"
src="{{ request.staticdirect('/js/change-video-resolution.js') }}"></script>
{# Sadly commented out till we can get the mediagoblin skin ported over
# to the newest video.js release ;\ #}
{#
<link href="{{ request.staticdirect('/css/vjs-mg-skin.css') }}"
rel="stylesheet">
#}
<link href="{{
request.staticdirect('/extlib/video-js/video-js.css') }}"
rel="stylesheet">
<link href="{{
request.staticdirect('/extlib/videojs-resolution-switcher/videojs-resolution-switcher.css') }}"
rel="stylesheet">
<style type="text/css">
.vjs-default-skin .vjs-big-play-button
@ -43,27 +47,29 @@
background-color: #86D4B1 !important;
}
</style>
{%- endblock %}
{% block mediagoblin_media %}
<div class="media_other_container">
{% set display_type, display_path = media.get_display_media() %}
{% set all_media_path = media.get_all_media() %}
<video controls
{% if global_config['plugins']['mediagoblin.media_types.video']['auto_play'] %}autoplay{% endif %}
preload="auto" class="video-js vjs-default-skin"
data-setup='{"height": {{ media.media_data.height }},
"width": {{ media.media_data.width }} }'>
<source src="{{ request.app.public_store.file_url(display_path) }}"
preload="auto" class="video-js vjs-default-skin" id="video_1">
{% for each_media_path in all_media_path %}
<source src="{{ request.app.public_store.file_url(each_media_path[2]) }}"
{% if media.media_data %}
type="{{ media.media_data.source_type() }}"
{% else %}
type="{{ media.media_manager['default_webm_type'] }}"
{% endif %} />
{% endif %}
label="{{ each_media_path[0] }}" res="{{ each_media_path[1][1] }}" />
{%- for subtitle in media.subtitle_files %}
<track src="{{ request.app.public_store.file_url(subtitle.filepath) }}"
label="{{ subtitle.name }}" kind="subtitles">
{%- endfor %}
{% endfor %}
<div class="no_html5">
{%- trans -%}Sorry, this video will not work because
your web browser does not support HTML5

View File

@ -23,6 +23,7 @@
{% trans %}Media processing panel{% endtrans %} &mdash; {{ super() }}
{%- endblock %}
{% block mediagoblin_content %}
<h1>{% trans %}Media processing panel{% endtrans %}</h1>
@ -50,7 +51,8 @@ Show:
<th width="210">Thumbnail</th>
<th>Title</th>
<th width="20%">When submitted</th>
<th width="200">Transcoding progress</th>
<th width="200">Total transcoding progress</th>
<th width="200">Default resolution transcoding progress</th>
</tr>
{% for media_entry in entries %}
<tr>
@ -84,6 +86,11 @@ Show:
{% else %}
<td>Unknown</td>
{% endif %}
{% if media_entry.main_transcoding_progress %}
<td>{{ media_entry.main_transcoding_progress }}%</td>
{% else %}
<td>Unknown</td>
{% endif %}
{% endif %}
</tr>
{% endfor %}
@ -93,3 +100,4 @@ Show:
<p><em>{% trans %}You have not uploaded anything yet!{% endtrans %}</em></p>
{% endif %}
{% endblock %}

View File

@ -48,8 +48,9 @@ def test_setup_celery_from_config():
assert isinstance(fake_celery_module.CELERYD_ETA_SCHEDULER_PRECISION, float)
assert fake_celery_module.CELERY_RESULT_PERSISTENT is True
assert fake_celery_module.CELERY_IMPORTS == [
'foo.bar.baz', 'this.is.an.import', 'mediagoblin.processing.task', \
'mediagoblin.notifications.task', 'mediagoblin.submit.task']
'foo.bar.baz', 'this.is.an.import', 'mediagoblin.processing.task',
'mediagoblin.notifications.task', 'mediagoblin.submit.task',
'mediagoblin.media_types.video.processing']
assert fake_celery_module.CELERY_RESULT_BACKEND == 'database'
assert fake_celery_module.CELERY_RESULT_DBURI == (
'sqlite:///' +

View File

@ -47,17 +47,27 @@ import os
import pytest
import webtest.forms
import pkg_resources
try:
import mock
except ImportError:
import unittest.mock as mock
import six.moves.urllib.parse as urlparse
from celery import Signature
from mediagoblin.tests.tools import (
fixture_add_user, fixture_add_collection, get_app)
from mediagoblin import mg_globals
from mediagoblin.db.models import MediaEntry, User, LocalUser, Activity
from mediagoblin.db.models import MediaEntry, User, LocalUser, Activity, MediaFile
from mediagoblin.db.base import Session
from mediagoblin.tools import template
from mediagoblin.media_types.image import ImageMediaManager
from mediagoblin.media_types.pdf.processing import check_prerequisites as pdf_check_prerequisites
from mediagoblin.media_types.video.processing import (
VideoProcessingManager, main_task, complementary_task, group,
processing_cleanup, CommonVideoProcessor)
from mediagoblin.media_types.video.util import ACCEPTED_RESOLUTIONS
from mediagoblin.submit.lib import new_upload_entry, run_process_media
from .resources import GOOD_JPG, GOOD_PNG, EVIL_FILE, EVIL_JPG, EVIL_PNG, \
BIG_BLUE, GOOD_PDF, GPS_JPG, MED_PNG, BIG_PNG
@ -101,6 +111,16 @@ def pdf_plugin_app(request):
'mediagoblin.tests',
'test_mgoblin_app_pdf.ini'))
def get_sample_entry(user, media_type):
entry = new_upload_entry(user)
entry.media_type = media_type
entry.title = 'testentry'
entry.description = u""
entry.license = None
entry.media_metadata = {}
entry.save()
return entry
class BaseTestSubmission:
@pytest.fixture(autouse=True)
@ -523,6 +543,7 @@ class TestSubmissionVideo(BaseTestSubmission):
@pytest.fixture(autouse=True)
def setup(self, video_plugin_app):
self.test_app = video_plugin_app
self.media_type = 'mediagoblin.media_types.video'
# TODO: Possibly abstract into a decorator like:
# @as_authenticated_user('chris')
@ -536,6 +557,179 @@ class TestSubmissionVideo(BaseTestSubmission):
with create_av(make_video=True) as path:
self.check_normal_upload('Video', path)
media = mg_globals.database.MediaEntry.query.filter_by(
title=u'Video').first()
video_config = mg_globals.global_config['plugins'][self.media_type]
for each_res in video_config['available_resolutions']:
assert (('webm_' + str(each_res)) in media.media_files)
@pytest.mark.skipif(SKIP_VIDEO,
reason="Dependencies for video not met")
def test_get_all_media(self, video_plugin_app):
"""Test if the get_all_media function returns sensible things
"""
with create_av(make_video=True) as path:
self.check_normal_upload('testgetallmedia', path)
media = mg_globals.database.MediaEntry.query.filter_by(
title=u'testgetallmedia').first()
result = media.get_all_media()
video_config = mg_globals.global_config['plugins'][self.media_type]
for media_file in result:
# checking that each returned media file list has 3 elements
assert len(media_file) == 3
# result[0][0] is the video label of the first video in the list
if result[0][0] == 'default':
media_file = MediaFile.query.filter_by(media_entry=media.id,
name=('webm_video')).first()
# only one media file has to be present in this case
assert len(result) == 1
# check dimensions of media_file
assert result[0][1] == list(ACCEPTED_RESOLUTIONS['webm'])
# check media_file path
assert result[0][2] == media_file.file_path
else:
assert len(result) == len(video_config['available_resolutions'])
for i in range(len(video_config['available_resolutions'])):
media_file = MediaFile.query.filter_by(media_entry=media.id,
name=('webm_{0}'.format(str(result[i][0])))).first()
# check media_file label
assert result[i][0] == video_config['available_resolutions'][i]
# check dimensions of media_file
assert result[i][1] == list(ACCEPTED_RESOLUTIONS[
video_config['available_resolutions'][i]])
# check media_file path
assert result[i][2] == media_file.file_path
@mock.patch('mediagoblin.media_types.video.processing.processing_cleanup.signature')
@mock.patch('mediagoblin.media_types.video.processing.complementary_task.signature')
@mock.patch('mediagoblin.media_types.video.processing.main_task.signature')
def test_celery_tasks(self, mock_main_task, mock_comp_task, mock_cleanup):
# create a new entry and get video manager
entry = get_sample_entry(self.our_user(), self.media_type)
manager = VideoProcessingManager()
# prepare things for testing
video_config = mg_globals.global_config['plugins'][entry.media_type]
def_res = video_config['default_resolution']
priority_num = len(video_config['available_resolutions']) + 1
main_priority = priority_num
calls = []
reprocess_info = {
'vorbis_quality': None,
'vp8_threads': None,
'thumb_size': None,
'vp8_quality': None
}
for comp_res in video_config['available_resolutions']:
if comp_res != def_res:
priority_num += -1
calls.append(
mock.call(args=(entry.id, comp_res, ACCEPTED_RESOLUTIONS[comp_res]),
kwargs=reprocess_info, queue='default',
priority=priority_num, immutable=True)
)
# call workflow method
manager.workflow(entry, feed_url=None, reprocess_action='initial')
# test section
mock_main_task.assert_called_once_with(args=(entry.id, def_res,
ACCEPTED_RESOLUTIONS[def_res]),
kwargs=reprocess_info, queue='default',
priority=main_priority, immutable=True)
mock_comp_task.assert_has_calls(calls)
mock_cleanup.assert_called_once_with(args=(entry.id,), queue='default',
immutable=True)
assert entry.state == u'processing'
# delete the entry
entry.delete()
def test_workflow(self):
entry = get_sample_entry(self.our_user(), self.media_type)
manager = VideoProcessingManager()
wf = manager.workflow(entry, feed_url=None, reprocess_action='initial')
assert type(wf) == tuple
assert len(wf) == 2
assert isinstance(wf[0], group)
assert isinstance(wf[1], Signature)
# more precise testing
video_config = mg_globals.global_config['plugins'][entry.media_type]
def_res = video_config['default_resolution']
priority_num = len(video_config['available_resolutions']) + 1
reprocess_info = {
'vorbis_quality': None,
'vp8_threads': None,
'thumb_size': None,
'vp8_quality': None
}
tasks_list = [main_task.signature(args=(entry.id, def_res,
ACCEPTED_RESOLUTIONS[def_res]),
kwargs=reprocess_info, queue='default',
priority=priority_num, immutable=True)]
for comp_res in video_config['available_resolutions']:
if comp_res != def_res:
priority_num += -1
tasks_list.append(
complementary_task.signature(args=(entry.id, comp_res,
ACCEPTED_RESOLUTIONS[comp_res]),
kwargs=reprocess_info, queue='default',
priority=priority_num, immutable=True)
)
transcoding_tasks = group(tasks_list)
cleanup_task = processing_cleanup.signature(args=(entry.id,),
queue='default', immutable=True)
assert wf[0] == transcoding_tasks
assert wf[1] == cleanup_task
entry.delete()
@mock.patch('mediagoblin.submit.lib.ProcessMedia.apply_async')
@mock.patch('mediagoblin.submit.lib.chord')
def test_celery_chord(self, mock_chord, mock_process_media):
entry = get_sample_entry(self.our_user(), self.media_type)
# prepare things for testing
video_config = mg_globals.global_config['plugins'][entry.media_type]
def_res = video_config['default_resolution']
priority_num = len(video_config['available_resolutions']) + 1
reprocess_info = {
'vorbis_quality': None,
'vp8_threads': None,
'thumb_size': None,
'vp8_quality': None
}
tasks_list = [main_task.signature(args=(entry.id, def_res,
ACCEPTED_RESOLUTIONS[def_res]),
kwargs=reprocess_info, queue='default',
priority=priority_num, immutable=True)]
for comp_res in video_config['available_resolutions']:
if comp_res != def_res:
priority_num += -1
tasks_list.append(
complementary_task.signature(args=(entry.id, comp_res,
ACCEPTED_RESOLUTIONS[comp_res]),
kwargs=reprocess_info, queue='default',
priority=priority_num, immutable=True)
)
transcoding_tasks = group(tasks_list)
run_process_media(entry)
mock_chord.assert_called_once_with(transcoding_tasks)
entry.delete()
def test_accepted_files(self):
entry = get_sample_entry(self.our_user(), 'mediagoblin.media_types.video')
manager = VideoProcessingManager()
processor = CommonVideoProcessor(manager, entry)
acceptable_files = ['original, best_quality', 'webm_144p', 'webm_360p',
'webm_480p', 'webm_720p', 'webm_1080p', 'webm_video']
assert processor.acceptable_files == acceptable_files
class TestSubmissionAudio(BaseTestSubmission):
@pytest.fixture(autouse=True)
@ -591,3 +785,4 @@ class TestSubmissionPDF(BaseTestSubmission):
**self.upload_data(GOOD_PDF))
self.check_url(response, '/u/{0}/'.format(self.our_user().username))
assert 'mediagoblin/user_pages/user.html' in context

View File

@ -30,7 +30,9 @@ Gst.init(None)
from mediagoblin.media_types.video.transcoders import (capture_thumb,
VideoTranscoder)
from mediagoblin.media_types.video.util import ACCEPTED_RESOLUTIONS
from mediagoblin.media_types.tools import discover
from mediagoblin.tests.tools import get_app
@contextmanager
def create_data(suffix=None, make_audio=False):
@ -114,6 +116,7 @@ def test_transcoder():
transcoder = VideoTranscoder()
transcoder.transcode(
video_name, result_name,
'480p', 1,
vp8_quality=8,
vp8_threads=0, # autodetect
vorbis_quality=0.3,
@ -124,9 +127,22 @@ def test_transcoder():
transcoder = VideoTranscoder()
transcoder.transcode(
video_name, result_name,
'480p', 1,
vp8_quality=8,
vp8_threads=0, # autodetect
vorbis_quality=0.3,
dimensions=(640, 640))
assert len(discover(result_name).get_video_streams()) == 1
assert len(discover(result_name).get_audio_streams()) == 1
def test_accepted_resolutions():
accepted_resolutions = {
'144p': (256, 144),
'240p': (352, 240),
'360p': (480, 360),
'480p': (858, 480),
'720p': (1280, 720),
'1080p': (1920, 1080),
'webm': (640, 640),
}
assert accepted_resolutions == ACCEPTED_RESOLUTIONS

View File

@ -66,8 +66,7 @@ def user_home(request, page):
{'user': user})
cursor = MediaEntry.query.\
filter_by(actor = user.id,
state = u'processed').order_by(MediaEntry.created.desc())
filter_by(actor = user.id).order_by(MediaEntry.created.desc())
pagination = Pagination(page, cursor)
media_entries = pagination()