Porting video to GStreamer 1.0

Porting includes:
 - thumbnailer
 - transcoder
 - metadata handling
 - new common discoverer for media
 - new tests with in-memory test video generating
 - handling regardless of audio availability in the file
 - Pythonic gst pipelines
This commit is contained in:
Boris Bobrov 2014-06-05 15:42:12 +04:00
parent 7e266d5a37
commit 91f5f5e791
6 changed files with 301 additions and 306 deletions

View File

@ -17,6 +17,11 @@ import logging
from mediagoblin import mg_globals
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst, GstPbutils, GLib
Gst.init(None)
_log = logging.getLogger(__name__)
@ -25,3 +30,19 @@ def media_type_warning():
_log.warning('Media_types have been converted to plugins. Old'
' media_types will no longer work. Please convert them'
' to plugins to continue using them.')
def discover(src):
'''
Discover properties about a media file
'''
_log.info('Discovering {0}...'.format(src))
uri = 'file://{0}'.format(src)
discoverer = GstPbutils.Discoverer.new(60 * Gst.SECOND)
try:
info = discoverer.discover_uri(uri)
except GLib.GError as e:
_log.warning(u'Exception: {0}'.format(e))
info = None
_log.info('Done')
return info

View File

@ -47,15 +47,7 @@ class VideoTranscodingFail(BaseProcessingFail):
EXCLUDED_EXTS = ["nef", "cr2"]
def sniff_handler(media_file, filename):
name, ext = os.path.splitext(filename)
clean_ext = ext.lower()[1:]
if clean_ext in EXCLUDED_EXTS:
# We don't handle this filetype, though gstreamer might think we can
return None
transcoder = transcoders.VideoTranscoder()
data = transcoder.discover(media_file.name)
data = transcoders.discover(media_file.name)
_log.info('Sniffing {0}'.format(MEDIA_TYPE))
_log.debug('Discovered: {0}'.format(data))
@ -64,7 +56,7 @@ def sniff_handler(media_file, filename):
_log.error('Could not discover {0}'.format(filename))
return None
if data['is_video'] is True:
if data.get_video_streams():
return MEDIA_TYPE
return None
@ -82,51 +74,50 @@ def store_metadata(media_entry, metadata):
# video is always there
video_info = metadata.get_video_streams()[0]
# Let's pull out the easy, not having to be converted ones first
stored_metadata = dict(
[(key, metadata[key])
for key in [
"videoheight", "videolength", "videowidth",
"audiorate", "audiolength", "audiochannels", "audiowidth",
"mimetype"]
if key in metadata])
stored_metadata = dict()
audio_info_list = metadata.get_audio_streams()
if audio_info:
audio_info = audio_info_list[0]
stored_metadata['audiochannels'] = audio_info.get_channels()
# video is always there
video_info = metadata.get_video_streams()[0]
# Let's pull out the easy, not having to be converted ones first
stored_metadata['videoheight'] = video_info.get_height()
stored_metadata['videowidth'] = video_info.get_width()
stored_metadata['videolength'] = metadata.get_duration()
stored_metadata['mimetype'] = metadata.get_tags().get_string('mimetype')
# We have to convert videorate into a sequence because it's a
# special type normally..
stored_metadata['videorate'] = [video_info.get_framerate_num(),
video_info.get_framerate_denom()]
if "videorate" in metadata:
videorate = metadata["videorate"]
stored_metadata["videorate"] = [videorate.num, videorate.denom]
# Also make a whitelist conversion of the tags.
if "tags" in metadata:
tags_metadata = metadata['tags']
if metadata.get_tags():
tags_metadata = metadata.get_tags()
# we don't use *all* of these, but we know these ones are
# safe...
# get_string returns (success, value) tuple
tags = dict(
[(key, tags_metadata[key])
[(key, tags_metadata.get_string(key)[1])
for key in [
"application-name", "artist", "audio-codec", "bitrate",
"container-format", "copyright", "encoder",
"encoder-version", "license", "nominal-bitrate", "title",
"video-codec"]
if key in tags_metadata])
if 'date' in tags_metadata:
date = tags_metadata['date']
if tags_metadata.get_string(key)[0]])
(success, date) = tags_metadata.get_date('date')
if success:
tags['date'] = "%s-%s-%s" % (
date.year, date.month, date.day)
# TODO: handle timezone info; gst.get_time_zone_offset +
# python's tzinfo should help
if 'datetime' in tags_metadata:
dt = tags_metadata['datetime']
(success, dt) = tags_metadata.get_date_time('datetime')
if success:
tags['datetime'] = datetime.datetime(
dt.get_year(), dt.get_month(), dt.get_day(), dt.get_hour(),
dt.get_minute(), dt.get_second(),
dt.get_microsecond()).isoformat()
stored_metadata['tags'] = tags
# Only save this field if there's something to save
if len(stored_metadata):
media_entry.media_data_init(
@ -220,7 +211,10 @@ class CommonVideoProcessor(MediaProcessor):
return
# Extract metadata and keep a record of it
metadata = self.transcoder.discover(self.process_filename)
metadata = transcoders.discover(self.process_filename)
# metadata's stream info here is a DiscovererContainerInfo instance,
# it gets split into DiscovererAudioInfo and DiscovererVideoInfo;
# metadata itself has container-related data in tags, like video-codec
store_metadata(self.entry, metadata)
# Figure out whether or not we need to transcode this video or
@ -243,10 +237,8 @@ class CommonVideoProcessor(MediaProcessor):
vorbis_quality=vorbis_quality,
progress_callback=progress_callback,
dimensions=tuple(medium_size))
dst_dimensions = self.transcoder.dst_data.videowidth,\
self.transcoder.dst_data.videoheight
video_info = self.transcoder.dst_data.get_video_streams()[0]
dst_dimensions = (video_info.get_width(), video_info.get_height())
self._keep_best()
# Push transcoded video to public storage

View File

@ -19,16 +19,18 @@ from __future__ import division
import os
import sys
import logging
import urllib
import multiprocessing
import gobject
from mediagoblin.media_types.tools import discover
#os.environ['GST_DEBUG'] = '4,python:4'
old_argv = sys.argv
sys.argv = []
import pygst
pygst.require('0.10')
import gst
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst, GstPbutils
Gst.init(None)
sys.argv = old_argv
import struct
@ -37,12 +39,8 @@ try:
except ImportError:
import Image
from gst.extend import discoverer
_log = logging.getLogger(__name__)
gobject.threads_init()
CPU_COUNT = 2
try:
@ -53,57 +51,70 @@ except NotImplementedError:
os.putenv('GST_DEBUG_DUMP_DOT_DIR', '/tmp')
def pixbuf_to_pilbuf(buf):
data = list()
for i in range(0, len(buf)-4, 4):
r, g, b, x = struct.unpack('BBBB', buf[i:i + 4])
# XXX: can something be done with the 'X' part of RGBX?
data.append((r, g, b))
return data
def capture_thumb(video_path, dest_path, width=None, height=None, percent=0.5):
def pad_added(element, pad, connect_to):
caps = pad.get_caps()
name = caps[0].get_name()
'''This is a callback to dynamically add element to pipeline'''
caps = pad.query_caps(None)
name = caps.to_string()
_log.debug('on_pad_added: {0}'.format(name))
if name.startswith('video') and not connect_to.is_linked():
pad.link(connect_to)
# construct pipeline: uridecodebin ! ffmpegcolorspace ! videoscale ! \
# construct pipeline: uridecodebin ! videoconvert ! videoscale ! \
# ! CAPS ! appsink
pipeline = gst.Pipeline()
uridecodebin = gst.element_factory_make('uridecodebin')
pipeline = Gst.Pipeline()
uridecodebin = Gst.ElementFactory.make('uridecodebin', None)
uridecodebin.set_property('uri', 'file://{0}'.format(video_path))
ffmpegcolorspace = gst.element_factory_make('ffmpegcolorspace')
videoconvert = Gst.ElementFactory.make('videoconvert', None)
uridecodebin.connect('pad-added', pad_added,
ffmpegcolorspace.get_pad('sink'))
videoscale = gst.element_factory_make('videoscale')
filter = gst.element_factory_make('capsfilter', 'filter')
videoconvert.get_static_pad('sink'))
videoscale = Gst.ElementFactory.make('videoscale', None)
# create caps for video scaling
caps_struct = gst.Structure('video/x-raw-rgb')
caps_struct.set_value('pixel-aspect-ratio', gst.Fraction(1, 1))
caps_struct = Gst.Structure.new_empty('video/x-raw')
caps_struct.set_value('pixel-aspect-ratio', Gst.Fraction(1, 1))
caps_struct.set_value('format', 'RGB')
if height:
caps_struct.set_value('height', height)
if width:
caps_struct.set_value('width', width)
caps = gst.Caps(caps_struct)
filter.set_property('caps', caps)
appsink = gst.element_factory_make('appsink')
pipeline.add(uridecodebin, ffmpegcolorspace, videoscale, filter, appsink)
gst.element_link_many(ffmpegcolorspace, videoscale, filter, appsink)
caps = Gst.Caps.new_empty()
caps.append_structure(caps_struct)
# sink everything to memory
appsink = Gst.ElementFactory.make('appsink', None)
appsink.set_property('caps', caps)
# add everything to pipeline
elements = [uridecodebin, videoconvert, videoscale, appsink]
for e in elements:
pipeline.add(e)
videoconvert.link(videoscale)
videoscale.link(appsink)
# pipeline constructed, starting playing, but first some preparations
if pipeline.set_state(gst.STATE_PAUSED) == gst.STATE_CHANGE_FAILURE:
_log.warning('state change failed')
pipeline.get_state()
duration = pipeline.query_duration(gst.FORMAT_TIME, None)[0]
if duration == gst.CLOCK_TIME_NONE:
# seek to 50% of the file is required
pipeline.set_state(Gst.State.PAUSED)
# timeout of 3 seconds below was set experimentally
state = pipeline.get_state(Gst.SECOND * 3)
if state[0] != Gst.StateChangeReturn.SUCCESS:
_log.warning('state change failed, {0}'.format(state))
return
# get duration
(success, duration) = pipeline.query_duration(Gst.Format.TIME)
if not success:
_log.warning('query_duration failed')
duration = 0 # XXX
return
seek_to = int(duration * int(percent * 100) / 100)
_log.debug('Seeking to {0} of {1}'.format(
seek_to / gst.SECOND, duration / gst.SECOND))
seek = pipeline.seek_simple(gst.FORMAT_TIME, gst.SEEK_FLAG_FLUSH, seek_to)
float(seek_to) / Gst.SECOND, float(duration) / Gst.SECOND))
seek = pipeline.seek_simple(Gst.Format.TIME, Gst.SeekFlags.FLUSH, seek_to)
if not seek:
_log.warning('seek failed')
return
# get sample, retrieve it's format and save
sample = appsink.emit("pull-preroll")
if not sample:
@ -112,16 +123,20 @@ def capture_thumb(video_path, dest_path, width=None, height=None, percent=0.5):
caps = sample.get_caps()
if not caps:
_log.warning('could not get snapshot format')
return
structure = caps.get_structure(0)
(success, width) = structure.get_int('width')
(success, height) = structure.get_int('height')
buffer = sample.get_buffer()
# get the image from the buffer and save it to disk
im = Image.frombytes('RGB', (width, height),
buffer.extract_dup(0, buffer.get_size()))
im.save(dest_path)
_log.info('thumbnail saved to {0}'.format(dest_path))
# cleanup
pipeline.set_state(gst.STATE_NULL)
pipeline.set_state(Gst.State.NULL)
class VideoTranscoder(object):
@ -130,16 +145,12 @@ class VideoTranscoder(object):
Transcodes the SRC video file to a VP8 WebM video file at DST
- Does the same thing as VideoThumbnailer, but produces a WebM vp8
and vorbis video file.
- The VideoTranscoder exceeds the VideoThumbnailer in the way
that it was refined afterwards and therefore is done more
correctly.
- Produces a WebM vp8 and vorbis video file.
'''
def __init__(self):
_log.info('Initializing VideoTranscoder...')
self.progress_percentage = None
self.loop = gobject.MainLoop()
self.loop = GObject.MainLoop()
def transcode(self, src, dst, **kwargs):
'''
@ -172,152 +183,85 @@ class VideoTranscoder(object):
if not type(self.destination_dimensions) == tuple:
raise Exception('dimensions must be tuple: (width, height)')
self._setup()
self._run()
# XXX: This could be a static method.
def discover(self, src):
'''
Discover properties about a media file
'''
_log.info('Discovering {0}'.format(src))
self.source_path = src
self._setup_discover(discovered_callback=self.__on_discovered)
self.discoverer.discover()
self.loop.run()
if hasattr(self, '_discovered_data'):
return self._discovered_data.__dict__
else:
return None
def __on_discovered(self, data, is_media):
_log.debug('Discovered: {0}'.format(data))
if not is_media:
self.__stop()
raise Exception('Could not discover {0}'.format(self.source_path))
self._discovered_data = data
self.__stop_mainloop()
def _setup(self):
self._setup_discover()
self._setup_pipeline()
def _run(self):
_log.info('Discovering...')
self.discoverer.discover()
_log.info('Done')
self.data = discover(self.source_path)
self._link_elements()
self.__setup_videoscale_capsfilter()
self.pipeline.set_state(Gst.State.PLAYING)
_log.info('Transcoding...')
_log.debug('Initializing MainLoop()')
self.loop.run()
def _setup_discover(self, **kw):
_log.debug('Setting up discoverer')
self.discoverer = discoverer.Discoverer(self.source_path)
# Connect self.__discovered to the 'discovered' event
self.discoverer.connect(
'discovered',
kw.get('discovered_callback', self.__discovered))
def __discovered(self, data, is_media):
'''
Callback for media discoverer.
'''
if not is_media:
self.__stop()
raise Exception('Could not discover {0}'.format(self.source_path))
_log.debug('__discovered, data: {0}'.format(data.__dict__))
self.data = data
# Launch things that should be done after discovery
self._link_elements()
self.__setup_videoscale_capsfilter()
# Tell the transcoding pipeline to start running
self.pipeline.set_state(gst.STATE_PLAYING)
_log.info('Transcoding...')
def _setup_pipeline(self):
_log.debug('Setting up transcoding pipeline')
# Create the pipeline bin.
self.pipeline = gst.Pipeline('VideoTranscoderPipeline')
self.pipeline = Gst.Pipeline.new('VideoTranscoderPipeline')
# Create all GStreamer elements, starting with
# filesrc & decoder
self.filesrc = gst.element_factory_make('filesrc', 'filesrc')
self.filesrc = Gst.ElementFactory.make('filesrc', 'filesrc')
self.filesrc.set_property('location', self.source_path)
self.pipeline.add(self.filesrc)
self.decoder = gst.element_factory_make('decodebin2', 'decoder')
self.decoder.connect('new-decoded-pad', self._on_dynamic_pad)
self.decoder = Gst.ElementFactory.make('decodebin', 'decoder')
self.decoder.connect('pad-added', self._on_dynamic_pad)
self.pipeline.add(self.decoder)
# Video elements
self.videoqueue = gst.element_factory_make('queue', 'videoqueue')
self.videoqueue = Gst.ElementFactory.make('queue', 'videoqueue')
self.pipeline.add(self.videoqueue)
self.videorate = gst.element_factory_make('videorate', 'videorate')
self.videorate = Gst.ElementFactory.make('videorate', 'videorate')
self.pipeline.add(self.videorate)
self.ffmpegcolorspace = gst.element_factory_make(
'ffmpegcolorspace', 'ffmpegcolorspace')
self.pipeline.add(self.ffmpegcolorspace)
self.videoconvert = Gst.ElementFactory.make('videoconvert',
'videoconvert')
self.pipeline.add(self.videoconvert)
self.videoscale = gst.element_factory_make('ffvideoscale', 'videoscale')
#self.videoscale.set_property('method', 2) # I'm not sure this works
#self.videoscale.set_property('add-borders', 0)
self.videoscale = Gst.ElementFactory.make('videoscale', 'videoscale')
self.pipeline.add(self.videoscale)
self.capsfilter = gst.element_factory_make('capsfilter', 'capsfilter')
self.capsfilter = Gst.ElementFactory.make('capsfilter', 'capsfilter')
self.pipeline.add(self.capsfilter)
self.vp8enc = gst.element_factory_make('vp8enc', 'vp8enc')
self.vp8enc.set_property('quality', self.vp8_quality)
self.vp8enc = Gst.ElementFactory.make('vp8enc', 'vp8enc')
self.vp8enc.set_property('threads', self.vp8_threads)
self.vp8enc.set_property('max-latency', 25)
self.pipeline.add(self.vp8enc)
# Audio elements
self.audioqueue = gst.element_factory_make('queue', 'audioqueue')
self.audioqueue = Gst.ElementFactory.make('queue', 'audioqueue')
self.pipeline.add(self.audioqueue)
self.audiorate = gst.element_factory_make('audiorate', 'audiorate')
self.audiorate = Gst.ElementFactory.make('audiorate', 'audiorate')
self.audiorate.set_property('tolerance', 80000000)
self.pipeline.add(self.audiorate)
self.audioconvert = gst.element_factory_make('audioconvert', 'audioconvert')
self.audioconvert = Gst.ElementFactory.make('audioconvert', 'audioconvert')
self.pipeline.add(self.audioconvert)
self.audiocapsfilter = gst.element_factory_make('capsfilter',
'audiocapsfilter')
audiocaps = ['audio/x-raw-float']
self.audiocapsfilter.set_property(
'caps',
gst.caps_from_string(
','.join(audiocaps)))
self.audiocapsfilter = Gst.ElementFactory.make('capsfilter',
'audiocapsfilter')
audiocaps = Gst.Caps.new_empty()
audiocaps_struct = Gst.Structure.new_empty('audio/x-raw')
audiocaps.append_structure(audiocaps_struct)
self.audiocapsfilter.set_property('caps', audiocaps)
self.pipeline.add(self.audiocapsfilter)
self.vorbisenc = gst.element_factory_make('vorbisenc', 'vorbisenc')
self.vorbisenc = Gst.ElementFactory.make('vorbisenc', 'vorbisenc')
self.vorbisenc.set_property('quality', self.vorbis_quality)
self.pipeline.add(self.vorbisenc)
# WebMmux & filesink
self.webmmux = gst.element_factory_make('webmmux', 'webmmux')
self.webmmux = Gst.ElementFactory.make('webmmux', 'webmmux')
self.pipeline.add(self.webmmux)
self.filesink = gst.element_factory_make('filesink', 'filesink')
self.filesink = Gst.ElementFactory.make('filesink', 'filesink')
self.filesink.set_property('location', self.destination_path)
self.pipeline.add(self.filesink)
# Progressreport
self.progressreport = gst.element_factory_make(
self.progressreport = Gst.ElementFactory.make(
'progressreport', 'progressreport')
# Update every second
self.progressreport.set_property('update-freq', 1)
@ -336,48 +280,41 @@ class VideoTranscoder(object):
# 'new-decoded-pad' which links decoded src pads to either a video
# or audio sink
self.filesrc.link(self.decoder)
# Link all the video elements in a row to webmmux
gst.element_link_many(
self.videoqueue,
self.videorate,
self.ffmpegcolorspace,
self.videoscale,
self.capsfilter,
self.vp8enc,
self.webmmux)
# link the rest
self.videoqueue.link(self.videorate)
self.videorate.link(self.videoconvert)
self.videoconvert.link(self.videoscale)
self.videoscale.link(self.capsfilter)
self.capsfilter.link(self.vp8enc)
self.vp8enc.link(self.webmmux)
if self.data.is_audio:
# Link all the audio elements in a row to webmux
gst.element_link_many(
self.audioqueue,
self.audiorate,
self.audioconvert,
self.audiocapsfilter,
self.vorbisenc,
self.webmmux)
gst.element_link_many(
self.webmmux,
self.progressreport,
self.filesink)
# Link all the audio elements in a row to webmmux
self.audioqueue.link(self.audiorate)
self.audiorate.link(self.audioconvert)
self.audioconvert.link(self.audiocapsfilter)
self.audiocapsfilter.link(self.vorbisenc)
self.vorbisenc.link(self.webmmux)
self.webmmux.link(self.progressreport)
self.progressreport.link(self.filesink)
# Setup the message bus and connect _on_message to the pipeline
self._setup_bus()
def _on_dynamic_pad(self, dbin, pad, islast):
def _on_dynamic_pad(self, dbin, pad):
'''
Callback called when ``decodebin2`` has a pad that we can connect to
Callback called when ``decodebin`` has a pad that we can connect to
'''
# Intersect the capabilities of the video sink and the pad src
# Then check if they have no common capabilities.
if self.ffmpegcolorspace.get_pad_template('sink')\
.get_caps().intersect(pad.get_caps()).is_empty():
if (self.videorate.get_static_pad('sink').get_pad_template()
.get_caps().intersect(pad.query_caps()).is_empty()):
# It is NOT a video src pad.
pad.link(self.audioqueue.get_pad('sink'))
pad.link(self.audioqueue.get_static_pad('sink'))
else:
# It IS a video src pad.
pad.link(self.videoqueue.get_pad('sink'))
_log.debug('linking video to the pad dynamically')
pad.link(self.videoqueue.get_static_pad('sink'))
def _setup_bus(self):
self.bus = self.pipeline.get_bus()
@ -388,73 +325,53 @@ class VideoTranscoder(object):
'''
Sets up the output format (width, height) for the video
'''
caps = ['video/x-raw-yuv', 'pixel-aspect-ratio=1/1', 'framerate=30/1']
if self.data.videoheight > self.data.videowidth:
# Whoa! We have ourselves a portrait video!
caps.append('height={0}'.format(
self.destination_dimensions[1]))
caps_struct = Gst.Structure.new_empty('video/x-raw')
caps_struct.set_value('pixel-aspect-ratio', Gst.Fraction(1, 1))
caps_struct.set_value('framerate', Gst.Fraction(30, 1))
video_info = self.data.get_video_streams()[0]
if video_info.get_height() > video_info.get_width():
# portrait
caps_struct.set_value('height', self.destination_dimensions[1])
else:
# It's a landscape, phew, how normal.
caps.append('width={0}'.format(
self.destination_dimensions[0]))
self.capsfilter.set_property(
'caps',
gst.caps_from_string(
','.join(caps)))
# landscape
caps_struct.set_value('width', self.destination_dimensions[0])
caps = Gst.Caps.new_empty()
caps.append_structure(caps_struct)
self.capsfilter.set_property('caps', caps)
def _on_message(self, bus, message):
_log.debug((bus, message, message.type))
t = message.type
if message.type == gst.MESSAGE_EOS:
self._discover_dst_and_stop()
_log.info('Done')
elif message.type == gst.MESSAGE_ELEMENT:
if message.structure.get_name() == 'progress':
data = dict(message.structure)
# Update progress state if it has changed
if self.progress_percentage != data.get('percent'):
self.progress_percentage = data.get('percent')
if self._progress_callback:
self._progress_callback(data.get('percent'))
_log.info('{percent}% done...'.format(
percent=data.get('percent')))
_log.debug(data)
elif t == gst.MESSAGE_ERROR:
_log.error((bus, message))
if message.type == Gst.MessageType.EOS:
self.dst_data = discover(self.destination_path)
self.__stop()
_log.info('Done')
elif message.type == Gst.MessageType.ELEMENT:
if message.has_name('progress'):
structure = message.get_structure()
# Update progress state if it has changed
(success, percent) = structure.get_int('percent')
if self.progress_percentage != percent and success:
self.progress_percentage = percent
if self._progress_callback:
self._progress_callback(percent)
_log.info('{percent}% done...'.format(percent=percent))
elif message.type == Gst.MessageType.ERROR:
_log.error('Got error: {0}'.format(message.parse_error()))
self.__stop()
def _discover_dst_and_stop(self):
self.dst_discoverer = discoverer.Discoverer(self.destination_path)
self.dst_discoverer.connect('discovered', self.__dst_discovered)
self.dst_discoverer.discover()
def __dst_discovered(self, data, is_media):
self.dst_data = data
self.__stop()
def __stop(self):
_log.debug(self.loop)
if hasattr(self, 'pipeline'):
# Stop executing the pipeline
self.pipeline.set_state(gst.STATE_NULL)
self.pipeline.set_state(Gst.State.NULL)
# This kills the loop, mercifully
gobject.idle_add(self.__stop_mainloop)
GObject.idle_add(self.__stop_mainloop)
def __stop_mainloop(self):
'''
Wrapper for gobject.MainLoop.quit()
Wrapper for GObject.MainLoop.quit()
This wrapper makes us able to see if self.loop.quit has been called
'''

View File

@ -33,27 +33,33 @@ def skip_transcode(metadata, size):
medium_config = mgg.global_config['media:medium']
_log.debug('skip_transcode config: {0}'.format(config))
if config['mime_types'] and metadata.get('mimetype'):
if not metadata['mimetype'] in config['mime_types']:
tags = metadata.get_tags()
if config['mime_types'] and tags.get_string('mimetype'):
if not tags.get_string('mimetype') in config['mime_types']:
return False
if config['container_formats'] and metadata['tags'].get('container-format'):
if not metadata['tags']['container-format'] in config['container_formats']:
if config['container_formats'] and tags.get_string('container-format'):
if not (metadata.get_tags().get_string('container-format') in
config['container_formats']):
return False
if config['video_codecs'] and metadata['tags'].get('video-codec'):
if not metadata['tags']['video-codec'] in config['video_codecs']:
if (config['video_codecs'] and
metadata.get_tags().get_string('video-codec')):
if not (metadata.get_tags().get_string('video-codec') in
config['video_codecs']):
return False
if config['audio_codecs'] and metadata['tags'].get('audio-codec'):
if not metadata['tags']['audio-codec'] in config['audio_codecs']:
if (config['audio_codecs'] and
metadata.get_tags().get_string('audio-codec')):
if not (metadata.get_tags().get_string('audio-codec') in
config['audio_codecs']):
return False
video_info = metadata.get_video_streams()[0]
if config['dimensions_match']:
if not metadata['videoheight'] <= size[1]:
if not video_info.get_height() <= size[1]:
return False
if not metadata['videowidth'] <= size[0]:
if not video_info.get_width() <= size[0]:
return False
return True

View File

@ -378,12 +378,11 @@ def store_public(entry, keyname, local_file, target_name=None,
entry.media_files[keyname], target_filepath)
if delete_if_exists:
mgg.public_store.delete_file(entry.media_files[keyname])
try:
mgg.public_store.copy_local_to_storage(local_file, target_filepath)
except:
except Exception as e:
_log.error(u'Exception happened: {0}'.format(e))
raise PublicStoreFail(keyname=keyname)
# raise an error if the file failed to copy
if not mgg.public_store.file_exists(target_filepath):
raise PublicStoreFail(keyname=keyname)

View File

@ -15,57 +15,117 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import tempfile
import shutil
import os
import pytest
from contextlib import contextmanager
import logging
import imghdr
#TODO: this should be skipped if video plugin is not enabled
import pygst
pygst.require('0.10')
import gst
#os.environ['GST_DEBUG'] = '4,python:4'
from mediagoblin.media_types.video.transcoders import capture_thumb
#TODO: this should be skipped if video plugin is not enabled
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
Gst.init(None)
from mediagoblin.media_types.video.transcoders import (capture_thumb,
VideoTranscoder)
from mediagoblin.media_types.tools import discover
@contextmanager
def create_data(suffix):
def create_data(suffix=None, make_audio=False):
video = tempfile.NamedTemporaryFile()
src = gst.element_factory_make('videotestsrc')
src.set_property('num-buffers', 50)
enc = gst.element_factory_make('theoraenc')
mux = gst.element_factory_make('oggmux')
dst = gst.element_factory_make('filesink')
src = Gst.ElementFactory.make('videotestsrc', None)
src.set_property('num-buffers', 10)
videorate = Gst.ElementFactory.make('videorate', None)
enc = Gst.ElementFactory.make('theoraenc', None)
mux = Gst.ElementFactory.make('oggmux', None)
dst = Gst.ElementFactory.make('filesink', None)
dst.set_property('location', video.name)
pipeline = gst.Pipeline()
pipeline.add(src, enc, mux, dst)
gst.element_link_many(src, enc, mux, dst)
pipeline.set_state(gst.STATE_PLAYING)
# wait for finish
pipeline = Gst.Pipeline()
pipeline.add(src)
pipeline.add(videorate)
pipeline.add(enc)
pipeline.add(mux)
pipeline.add(dst)
src.link(videorate)
videorate.link(enc)
enc.link(mux)
mux.link(dst)
if make_audio:
audio_src = Gst.ElementFactory.make('audiotestsrc', None)
audio_src.set_property('num-buffers', 10)
audiorate = Gst.ElementFactory.make('audiorate', None)
audio_enc = Gst.ElementFactory.make('vorbisenc', None)
pipeline.add(audio_src)
pipeline.add(audio_enc)
pipeline.add(audiorate)
audio_src.link(audiorate)
audiorate.link(audio_enc)
audio_enc.link(mux)
pipeline.set_state(Gst.State.PLAYING)
state = pipeline.get_state(3 * Gst.SECOND)
assert state[0] == Gst.StateChangeReturn.SUCCESS
bus = pipeline.get_bus()
message = bus.timed_pop_filtered(gst.CLOCK_TIME_NONE,
gst.MESSAGE_ERROR | gst.MESSAGE_EOS)
thumb = tempfile.NamedTemporaryFile(suffix=suffix)
pipeline.set_state(gst.STATE_NULL)
yield (video.name, thumb.name)
message = bus.timed_pop_filtered(
3 * Gst.SECOND,
Gst.MessageType.ERROR | Gst.MessageType.EOS)
pipeline.set_state(Gst.State.NULL)
if suffix:
result = tempfile.NamedTemporaryFile(suffix=suffix)
else:
result = tempfile.NamedTemporaryFile()
yield (video.name, result.name)
#TODO: this should be skipped if video plugin is not enabled
def test_thumbnails():
'''
Test thumbnails generation.
1. Create a video from gst's videotestsrc
3. Capture thumbnail
4. Remove it
1. Create a video (+audio) from gst's videotestsrc
2. Capture thumbnail
3. Everything should get removed because of temp files usage
'''
#data create_data() as (video_name, thumbnail_name):
test_formats = [('.png', 'png'), ('.jpg', 'jpeg'), ('.gif', 'gif')]
for suffix, format in test_formats:
with create_data(suffix) as (video_name, thumbnail_name):
capture_thumb(video_name, thumbnail_name, width=40)
# check if png
# check result file format
assert imghdr.what(thumbnail_name) == format
# TODO: check height and width
# FIXME: it doesn't work with small width, say, 10px. This should be
# fixed somehow
suffix, format = test_formats[0]
with create_data(suffix, True) as (video_name, thumbnail_name):
capture_thumb(video_name, thumbnail_name, width=40)
assert imghdr.what(thumbnail_name) == format
with create_data(suffix, True) as (video_name, thumbnail_name):
capture_thumb(video_name, thumbnail_name, width=10) # smaller width
assert imghdr.what(thumbnail_name) == format
with create_data(suffix, True) as (video_name, thumbnail_name):
capture_thumb(video_name, thumbnail_name, width=100) # bigger width
assert imghdr.what(thumbnail_name) == format
def test_transcoder():
# test without audio
with create_data() as (video_name, result_name):
transcoder = VideoTranscoder()
transcoder.transcode(
video_name, result_name,
vp8_quality=8,
vp8_threads=0, # autodetect
vorbis_quality=0.3,
dimensions=(640, 640))
assert len(discover(result_name).get_video_streams()) == 1
# test with audio
with create_data(make_audio=True) as (video_name, result_name):
transcoder = VideoTranscoder()
transcoder.transcode(
video_name, result_name,
vp8_quality=8,
vp8_threads=0, # autodetect
vorbis_quality=0.3,
dimensions=(640, 640))
assert len(discover(result_name).get_video_streams()) == 1
assert len(discover(result_name).get_audio_streams()) == 1