Added new thumbnailer, renamed old.

This commit is contained in:
Joar Wandborg 2011-10-27 02:55:12 +02:00
parent 959b0df398
commit 206ef74943
2 changed files with 474 additions and 194 deletions

View File

@ -116,118 +116,6 @@ def process_video(entry):
# Save the MediaEntry # Save the MediaEntry
entry.save() entry.save()
def __create_thumbnail(info):
thumbnail = tempfile.NamedTemporaryFile()
logger.info('thumbnailing...')
transcoders.VideoThumbnailer(info['tmp_file'].name, thumbnail.name)
logger.debug('Done thumbnailing')
os.remove(info['tmp_file'].name)
mgg.public_store.get_file(info['thumb_filepath'], 'wb').write(
thumbnail.read())
info['entry']['media_files']['thumb'] = info['thumb_filepath']
info['entry'].save()
def __close_processing(queue, qentry, info, **kwargs):
'''
Updates MediaEntry, moves files, handles errors
'''
if not kwargs.get('error'):
logger.info('Transcoding successful')
qentry.transcoder.stop()
gobject.idle_add(info['loop'].quit)
info['loop'].quit() # Do I have to do this again?
logger.info('Saving files...')
# Write the transcoded media to the storage system
with info['tmp_file'] as tmp_file:
mgg.public_store.get_file(info['medium_filepath'], 'wb').write(
tmp_file.read())
info['entry']['media_files']['medium'] = info['medium_filepath']
# we have to re-read because unlike PIL, not everything reads
# things in string representation :)
queued_file = file(info['queued_filename'], 'rb')
with queued_file:
original_filepath = create_pub_filepath(
info['entry'],
info['queued_filepath'][-1])
with mgg.public_store.get_file(original_filepath, 'wb') as \
original_file:
original_file.write(queued_file.read())
mgg.queue_store.delete_file(info['queued_filepath'])
logger.debug('...Done')
info['entry']['queued_media_file'] = []
media_files_dict = info['entry'].setdefault('media_files', {})
media_files_dict['original'] = original_filepath
info['entry']['state'] = u'processed'
info['entry']['media_data'][u'preset'] = info['preset'].name
__create_thumbnail(info)
info['entry'].save()
else:
qentry.transcoder.stop()
gobject.idle_add(info['loop'].quit)
info['loop'].quit()
info['entry']['state'] = u'failed'
info['entry'].save()
# clean up workbench
info['workbench'].destroy_self()
def _transcoding_start(queue, qentry, info):
logger.info('-> Starting transcoding')
logger.debug((queue, qentry, info))
def _transcoding_complete(*args):
__close_processing(*args)
logger.debug(*args)
def _transcoding_error(queue, qentry, arg, info):
logger.info('Error')
__close_processing(queue, qentry, info, error=True)
def _transcoding_pass_setup(queue, qentry, options):
logger.info('Pass setup')
logger.debug((queue, qentry, options))
def check_interrupted():
"""
Check whether we have been interrupted by Ctrl-C and stop the
transcoder.
"""
if interrupted:
try:
source = transcoder.pipe.get_by_name("source")
source.send_event(gst.event_new_eos())
except:
# Something pretty bad happened... just exit!
gobject.idle_add(loop.quit)
return False
return True
def create_pub_filepath(entry, filename): def create_pub_filepath(entry, filename):
return mgg.public_store.get_unique_filepath( return mgg.public_store.get_unique_filepath(

View File

@ -15,20 +15,27 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division from __future__ import division
import os
os.environ["GST_DEBUG_DUMP_DOT_DIR"] = "/tmp"
os.putenv('GST_DEBUG_DUMP_DOT_DIR', '/tmp')
import sys import sys
import logging import logging
import pdb import pdb
import urllib
_log = logging.getLogger(__name__) _log = logging.getLogger(__name__)
logging.basicConfig() logging.basicConfig()
_log.setLevel(logging.DEBUG) _log.setLevel(logging.DEBUG)
import gtk
try: try:
import gobject import gobject
gobject.threads_init() gobject.threads_init()
except: except:
_log.error('Could not import gobject') raise Exception('gobject could not be found')
raise Exception()
try: try:
import pygst import pygst
@ -37,11 +44,288 @@ try:
from gst import pbutils from gst import pbutils
from gst.extend import discoverer from gst.extend import discoverer
except: except:
_log.error('pygst could not be imported') raise Exception('gst/pygst 0.10 could not be found')
raise Exception()
class VideoThumbnailer: class VideoThumbnailer:
# Declaration of thumbnailer states
STATE_NULL = 0
STATE_HALTING = 1
STATE_PROCESSING = 2
# The current thumbnailer state
state = STATE_NULL
# This will contain the thumbnailing pipeline
thumbnail_pipeline = None
buffer_probes = {}
errors = []
def __init__(self, source_path, dest_path):
'''
Set up playbin pipeline in order to get video properties.
Initializes and runs the gobject.MainLoop()
'''
self.source_path = source_path
self.dest_path = dest_path
self.loop = gobject.MainLoop()
# Set up the playbin. It will be used to discover certain
# properties of the input file
self.playbin = gst.element_factory_make('playbin')
self.videosink = gst.element_factory_make('fakesink', 'videosink')
self.playbin.set_property('video-sink', self.videosink)
#self.audiosink = gst.element_factory_make('fakesink', 'audiosink')
#self.playbin.set_property('audio-sink', self.audiosink)
self.bus = self.playbin.get_bus()
self.bus.add_signal_watch()
self.watch_id = self.bus.connect('message', self._on_bus_message)
self.playbin.set_property('uri', 'file:{0}'.format(
urllib.pathname2url(self.source_path)))
self.playbin.set_state(gst.STATE_PAUSED)
self.run()
def run(self):
self.loop.run()
def _on_bus_message(self, bus, message):
_log.debug(' BUS MESSAGE: {0}'.format(message))
if message.type == gst.MESSAGE_ERROR:
gobject.idle_add(self._on_bus_error)
elif message.type == gst.MESSAGE_STATE_CHANGED:
# The pipeline state has changed
# Parse state changing data
_prev, state, _pending = message.parse_state_changed()
_log.debug('State changed: {0}'.format(state))
if state == gst.STATE_PAUSED:
if message.src == self.playbin:
gobject.idle_add(self._on_bus_paused)
def _on_bus_paused(self):
'''
Set up thumbnailing pipeline
'''
current_video = self.playbin.get_property('current-video')
if current_video == 0:
_log.debug('Found current video from playbin')
else:
_log.error('Could not get any current video from playbin!')
self.duration = self._get_duration(self.playbin)
_log.info('Video length: {0}'.format(self.duration / gst.SECOND))
_log.info('Setting up thumbnailing pipeline')
self.thumbnail_pipeline = gst.parse_launch(
'filesrc location="{0}" ! decodebin ! '
'ffmpegcolorspace ! videoscale ! '
'video/x-raw-rgb,depth=24,bpp=24,pixel-aspect-ratio=1/1,width=180 ! '
'fakesink signal-handoffs=True'.format(self.source_path))
self.thumbnail_bus = self.thumbnail_pipeline.get_bus()
self.thumbnail_bus.add_signal_watch()
self.thumbnail_watch_id = self.thumbnail_bus.connect(
'message', self._on_thumbnail_bus_message)
self.thumbnail_pipeline.set_state(gst.STATE_PAUSED)
#gobject.timeout_add(3000, self._on_timeout)
return False
def _on_thumbnail_bus_message(self, bus, message):
_log.debug('Thumbnail bus called, message: {0}'.format(message))
if message.type == gst.MESSAGE_ERROR:
_log.error(message)
gobject.idle_add(self._on_bus_error)
if message.type == gst.MESSAGE_STATE_CHANGED:
_prev, state, _pending = message.parse_state_changed()
if (state == gst.STATE_PAUSED and
not self.state == self.STATE_PROCESSING and
message.src == self.thumbnail_pipeline):
_log.info('Pipeline paused, processing')
self.state = self.STATE_PROCESSING
for sink in self.thumbnail_pipeline.sinks():
name = sink.get_name()
factoryname = sink.get_factory().get_name()
if factoryname == 'fakesink':
sinkpad = sink.get_pad('sink')
self.buffer_probes[name] = sinkpad.add_buffer_probe(
self.buffer_probe_handler, name)
_log.info('Added buffer probe')
break
# Apply the wadsworth constant, fallback to 1 second
seek_amount = max(self.duration / 100 * 30, 1 * gst.SECOND)
_log.debug('seek amount: {0}'.format(seek_amount))
seek_result = self.thumbnail_pipeline.seek(
1.0,
gst.FORMAT_TIME,
gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_ACCURATE,
gst.SEEK_TYPE_SET,
seek_amount,
gst.SEEK_TYPE_NONE,
0)
'''
seek_result = self.thumbnail_pipeline.seek_simple(
gst.FORMAT_TIME,
gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_ACCURATE,
seek_amount)
'''
if not seek_result:
self.errors.append('COULD_NOT_SEEK')
_log.error('Couldn\'t seek! result: {0}'.format(
seek_result))
_log.info(message)
self.shutdown()
else:
pass
#self.thumbnail_pipeline.set_state(gst.STATE_PAUSED)
#pdb.set_trace()
def buffer_probe_handler_real(self, pad, buff, name):
'''
Capture buffers as gdk_pixbufs when told to.
'''
try:
caps = buff.caps
if caps is None:
_log.error('No caps passed to buffer probe handler!')
self.shutdown()
return False
_log.debug('caps: {0}'.format(caps))
filters = caps[0]
width = filters["width"]
height = filters["height"]
pixbuf = gtk.gdk.pixbuf_new_from_data(
buff.data, gtk.gdk.COLORSPACE_RGB, False, 8,
width, height, width * 3)
# NOTE: 200x136 is sort of arbitrary. it's larger than what
# the ui uses at the time of this writing.
# new_width, new_height = scaled_size((width, height), (200, 136))
#pixbuf = pixbuf.scale_simple(
#new_width, new_height, gtk.gdk.INTERP_BILINEAR)
pixbuf.save(self.dest_path, 'jpeg')
_log.info('Saved thumbnail')
del pixbuf
self.shutdown()
except gst.QueryError:
pass
return False
def buffer_probe_handler(self, pad, buff, name):
'''
Proxy function for buffer_probe_handler_real
'''
gobject.idle_add(
lambda: self.buffer_probe_handler_real(pad, buff, name))
return True
def _get_duration(self, pipeline, retries=0):
'''
Get the duration of a pipeline.
Retries 5 times.
'''
if retries == 5:
return 0
try:
return pipeline.query_duration(gst.FORMAT_TIME)[0]
except gst.QueryError:
return self._get_duration(pipeline, retries + 1)
def _on_timeout(self):
_log.error('TIMEOUT! DROP EVERYTHING!')
self.shutdown()
def _on_bus_error(self, *args):
_log.error('AHAHAHA! Error! args: {0}'.format(args))
def shutdown(self):
'''
Tell gobject to call __halt when the mainloop is idle.
'''
_log.info('Shutting down')
self.__halt()
def __halt(self):
'''
Halt all pipelines and shut down the main loop
'''
_log.info('Halting...')
self.state = self.STATE_HALTING
self.__disconnect()
gobject.idle_add(self.__halt_final)
def __disconnect(self):
_log.debug('Disconnecting...')
if not self.playbin is None:
self.playbin.set_state(gst.STATE_NULL)
for sink in self.playbin.sinks():
name = sink.get_name()
factoryname = sink.get_factory().get_name()
_log.debug('Disconnecting {0}'.format(name))
if factoryname == "fakesink":
pad = sink.get_pad("sink")
pad.remove_buffer_probe(self.buffer_probes[name])
del self.buffer_probes[name]
self.playbin = None
if self.bus is not None:
self.bus.disconnect(self.watch_id)
self.bus = None
def __halt_final(self):
_log.info('Done')
if self.errors:
_log.error(','.join(self.errors))
self.loop.quit()
class DeprecatedVideoThumbnailer:
''' '''
Creates a video thumbnail Creates a video thumbnail
@ -50,14 +334,18 @@ class VideoThumbnailer:
- Launches gobject.MainLoop, this triggers the discoverer to start running - Launches gobject.MainLoop, this triggers the discoverer to start running
- Once the discoverer is done, it calls the __discovered callback function - Once the discoverer is done, it calls the __discovered callback function
- The __discovered callback function launches the transcoding process - The __discovered callback function launches the transcoding process
- The _on_message callback is called from the transcoding process until it gets a - The _on_message callback is called from the transcoding process until it
message of type gst.MESSAGE_EOS, then it calls __stop which shuts down the gets a message of type gst.MESSAGE_EOS, then it calls __stop which shuts
gobject.MainLoop down the gobject.MainLoop
''' '''
WADSWORTH_CONSTANT = 30 # percent
def __init__(self, src, dst, **kwargs): def __init__(self, src, dst, **kwargs):
_log.info('Initializing VideoThumbnailer...') _log.info('Initializing VideoThumbnailer...')
self.loop = gobject.MainLoop() self.loop = gobject.MainLoop()
self.source_path = src self.source_path = src
self.destination_path = dst self.destination_path = dst
@ -70,7 +358,7 @@ class VideoThumbnailer:
self._run() self._run()
def _setup(self): def _setup(self):
self._setup_pass() self._setup_pipeline()
self._setup_discover() self._setup_discover()
def _run(self): def _run(self):
@ -99,28 +387,30 @@ class VideoThumbnailer:
self.data = data self.data = data
# Run any tasks that depend on the info from the discovery
self._on_discovered() self._on_discovered()
# Tell the transcoding pipeline to start running # Tell the transcoding pipeline to start running
self.pipeline.set_state(gst.STATE_PLAYING)
_log.info('Transcoding...') _log.info('Transcoding...')
def _on_discovered(self): def _on_discovered(self):
self.__setup_capsfilter() self.__setup_capsfilter()
def _setup_pass(self): def _setup_pipeline(self):
# Create a new pipeline
self.pipeline = gst.Pipeline('VideoThumbnailerPipeline') self.pipeline = gst.Pipeline('VideoThumbnailerPipeline')
# Create the elements in the pipeline
self.filesrc = gst.element_factory_make('filesrc', 'filesrc') self.filesrc = gst.element_factory_make('filesrc', 'filesrc')
self.filesrc.set_property('location', self.source_path) self.filesrc.set_property('location', self.source_path)
self.pipeline.add(self.filesrc) self.pipeline.add(self.filesrc)
self.decoder = gst.element_factory_make('decodebin2', 'decoder') self.decoder = gst.element_factory_make('decodebin2', 'decoder')
self.decoder.connect('new-decoded-pad', self._on_dynamic_pad) self.decoder.connect('new-decoded-pad', self._on_dynamic_pad)
self.pipeline.add(self.decoder) self.pipeline.add(self.decoder)
self.ffmpegcolorspace = gst.element_factory_make('ffmpegcolorspace', 'ffmpegcolorspace') self.ffmpegcolorspace = gst.element_factory_make(
'ffmpegcolorspace', 'ffmpegcolorspace')
self.pipeline.add(self.ffmpegcolorspace) self.pipeline.add(self.ffmpegcolorspace)
self.videoscale = gst.element_factory_make('videoscale', 'videoscale') self.videoscale = gst.element_factory_make('videoscale', 'videoscale')
@ -132,70 +422,113 @@ class VideoThumbnailer:
self.jpegenc = gst.element_factory_make('jpegenc', 'jpegenc') self.jpegenc = gst.element_factory_make('jpegenc', 'jpegenc')
self.pipeline.add(self.jpegenc) self.pipeline.add(self.jpegenc)
#self.filesink = gst.element_factory_make('filesink', 'filesink')
#self.filesink.set_property('location', self.destination_path)
#self.pipeline.add(self.filesink)
self.filesink = gst.element_factory_make('filesink', 'filesink') self.appsink = gst.element_factory_make('appsink', 'appsink')
self.filesink.set_property('location', self.destination_path) self.appsink.set_property('emit-signals', True)
self.pipeline.add(self.filesink) self.appsink.connect('new-preroll', self.__on_sink_preroll)
self.pipeline.add(self.appsink)
self.progressreport = gst.element_factory_make(
'progressreport', 'progressreport')
self.progressreport.set_property('update-freq', 1)
self.pipeline.add(self.progressreport)
self.identity = gst.element_factory_make('identity', 'id')
self.pipeline.add(self.identity)
# Link all the elements together # Link all the elements together
self.filesrc.link(self.decoder) self.filesrc.link(self.decoder)
self.ffmpegcolorspace.link(self.videoscale) self.ffmpegcolorspace.link(self.videoscale)
self.videoscale.link(self.capsfilter) self.videoscale.link(self.capsfilter)
self.capsfilter.link(self.jpegenc) self.capsfilter.link(self.jpegenc)
self.jpegenc.link(self.filesink) self.jpegenc.link(self.progressreport)
self.progressreport.link(self.identity)
#self.identity.link(self.filesink)
self.identity.link(self.appsink)
self.pipeline.set_state(gst.STATE_PAUSED)
self._setup_bus() self._setup_bus()
def __on_sink_preroll(self, sink):
_log.debug('SINK PREROLL!!!!')
def _on_dynamic_pad(self, dbin, pad, islast):
'''
Callback called when ``decodebin2`` has a pad that we can connect to
'''
# Intersect the capabilities of the video sink and the pad src
# Then check if they have no common capabilities.
if not self.ffmpegcolorspace.get_pad_template('sink')\
.get_caps().intersect(pad.get_caps()).is_empty():
# It IS a video src pad.
pad.link(self.ffmpegcolorspace.get_pad('sink'))
gst.DEBUG_BIN_TO_DOT_FILE(
self.pipeline,
gst.DEBUG_GRAPH_SHOW_ALL,
'ss')
def _setup_bus(self): def _setup_bus(self):
self.bus = self.pipeline.get_bus() self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch() self.bus.add_signal_watch()
self.bus.connect('message', self._on_message) self.bus.connect('message', self._on_message)
def __setup_capsfilter(self): def __setup_capsfilter(self):
thumbsizes = self.calculate_resize() # Returns tuple with (width, height) caps = ['video/x-raw-rgb', 'pixel-aspect-ratio=1/1']
if self.data.videoheight > self.data.videowidth:
# Whoa! We have ourselves a portrait video!
caps.append('height={0}'.format(
self.destination_dimensions[1]))
else:
# It's a landscape, phew, how normal.
caps.append('width={0}'.format(
self.destination_dimensions[0]))
self.capsfilter.set_property( self.capsfilter.set_property(
'caps', 'caps',
gst.caps_from_string('video/x-raw-rgb, width={width}, height={height}'.format( gst.caps_from_string(
width=thumbsizes[0], ', '.join(caps)))
height=thumbsizes[1]
)))
def calculate_resize(self): def __find_wadsworth(self):
x_ratio = self.destination_dimensions[0] / self.data.videowidth if self.decoder.seek_simple(
y_ratio = self.destination_dimensions[1] / self.data.videoheight gst.FORMAT_PERCENT,
gst.SEEK_FLAG_NONE,
if self.data.videoheight > self.data.videowidth: 0 * 10000):
# We're dealing with a portrait! _log.info('Found wadsworth')
dimensions = ( #pdb.set_trace()
int(self.data.videowidth * y_ratio), #self.pipeline.set_state(gst.STATE_PLAYING)
180) self.__get_buffer()
self.__stop()
else: else:
dimensions = ( pdb.set_trace()
180,
int(self.data.videoheight * x_ratio))
return dimensions def __get_buffer(self):
buffer = self.appsink.emit('pull-preroll')
open(self.destination_path, 'wb').write(buffer)
def _on_message(self, bus, message): def _on_message(self, bus, message):
_log.debug((bus, message))
t = message.type t = message.type
_log.debug((
t == gst.MESSAGE_ASYNC_DONE,
bus,
message))
if t == gst.MESSAGE_EOS: if t == gst.MESSAGE_EOS:
self.__stop() self.__stop()
_log.info('Done') _log.info('Got EOS')
elif t == gst.MESSAGE_ASYNC_DONE:
#pdb.set_trace()
self.__find_wadsworth()
elif t == gst.MESSAGE_ERROR: elif t == gst.MESSAGE_ERROR:
_log.error((bus, message)) _log.error((bus, message))
self.__stop() self.__stop()
def _on_dynamic_pad(self, dbin, pad, islast):
'''
Callback called when ``decodebin2`` has a pad that we can connect to
'''
pad.link(
self.ffmpegcolorspace.get_pad('sink'))
def __stop(self): def __stop(self):
_log.debug(self.loop) _log.debug(self.loop)
@ -210,8 +543,11 @@ class VideoTranscoder:
Transcodes the SRC video file to a VP8 WebM video file at DST Transcodes the SRC video file to a VP8 WebM video file at DST
TODO: - Does the same thing as VideoThumbnailer, but produces a WebM vp8
- Audio pipeline and vorbis video file.
- The VideoTranscoder exceeds the VideoThumbnailer in the way
that it was refined afterwards and therefore is done more
correctly.
''' '''
def __init__(self, src, dst, **kwargs): def __init__(self, src, dst, **kwargs):
_log.info('Initializing VideoTranscoder...') _log.info('Initializing VideoTranscoder...')
@ -220,7 +556,9 @@ class VideoTranscoder:
self.source_path = src self.source_path = src
self.destination_path = dst self.destination_path = dst
# Options
self.destination_dimensions = kwargs.get('dimensions') or (640, 640) self.destination_dimensions = kwargs.get('dimensions') or (640, 640)
self._progress_callback = kwargs.get('progress_callback') or None
if not type(self.destination_dimensions) == tuple: if not type(self.destination_dimensions) == tuple:
raise Exception('dimensions must be tuple: (width, height)') raise Exception('dimensions must be tuple: (width, height)')
@ -229,8 +567,8 @@ class VideoTranscoder:
self._run() self._run()
def _setup(self): def _setup(self):
self._setup_pass()
self._setup_discover() self._setup_discover()
self._setup_pipeline()
def _run(self): def _run(self):
_log.info('Discovering...') _log.info('Discovering...')
@ -241,6 +579,7 @@ class VideoTranscoder:
self.loop.run() self.loop.run()
def _setup_discover(self): def _setup_discover(self):
_log.debug('Setting up discoverer')
self.discoverer = discoverer.Discoverer(self.source_path) self.discoverer = discoverer.Discoverer(self.source_path)
# Connect self.__discovered to the 'discovered' event # Connect self.__discovered to the 'discovered' event
@ -254,32 +593,36 @@ class VideoTranscoder:
self.__stop() self.__stop()
raise Exception('Could not discover {0}'.format(self.source_path)) raise Exception('Could not discover {0}'.format(self.source_path))
_log.debug('__discovered, data: {0}'.format(data)) _log.debug('__discovered, data: {0}'.format(data.__dict__))
self.data = data self.data = data
self._on_discovered() # Launch things that should be done after discovery
self._link_elements()
self.__setup_videoscale_capsfilter()
# Tell the transcoding pipeline to start running # Tell the transcoding pipeline to start running
self.pipeline.set_state(gst.STATE_PLAYING) self.pipeline.set_state(gst.STATE_PLAYING)
_log.info('Transcoding...') _log.info('Transcoding...')
def _on_discovered(self): def _setup_pipeline(self):
self.__setup_videoscale_capsfilter() _log.debug('Setting up transcoding pipeline')
# Create the pipeline bin.
def _setup_pass(self):
self.pipeline = gst.Pipeline('VideoTranscoderPipeline') self.pipeline = gst.Pipeline('VideoTranscoderPipeline')
# Create all GStreamer elements, starting with
# filesrc & decoder
self.filesrc = gst.element_factory_make('filesrc', 'filesrc') self.filesrc = gst.element_factory_make('filesrc', 'filesrc')
self.filesrc.set_property('location', self.source_path) self.filesrc.set_property('location', self.source_path)
self.pipeline.add(self.filesrc) self.pipeline.add(self.filesrc)
self.decoder = gst.element_factory_make('decodebin2', 'decoder') self.decoder = gst.element_factory_make('decodebin2', 'decoder')
self.decoder.connect('new-decoded-pad', self._on_dynamic_pad) self.decoder.connect('new-decoded-pad', self._on_dynamic_pad)
self.pipeline.add(self.decoder) self.pipeline.add(self.decoder)
self.ffmpegcolorspace = gst.element_factory_make('ffmpegcolorspace', 'ffmpegcolorspace') # Video elements
self.ffmpegcolorspace = gst.element_factory_make(
'ffmpegcolorspace', 'ffmpegcolorspace')
self.pipeline.add(self.ffmpegcolorspace) self.pipeline.add(self.ffmpegcolorspace)
self.videoscale = gst.element_factory_make('videoscale', 'videoscale') self.videoscale = gst.element_factory_make('videoscale', 'videoscale')
@ -293,11 +636,9 @@ class VideoTranscoder:
self.vp8enc = gst.element_factory_make('vp8enc', 'vp8enc') self.vp8enc = gst.element_factory_make('vp8enc', 'vp8enc')
self.vp8enc.set_property('quality', 6) self.vp8enc.set_property('quality', 6)
self.vp8enc.set_property('threads', 2) self.vp8enc.set_property('threads', 2)
self.vp8enc.set_property('speed', 2)
self.pipeline.add(self.vp8enc) self.pipeline.add(self.vp8enc)
# Audio elements
# Audio
self.audioconvert = gst.element_factory_make('audioconvert', 'audioconvert') self.audioconvert = gst.element_factory_make('audioconvert', 'audioconvert')
self.pipeline.add(self.audioconvert) self.pipeline.add(self.audioconvert)
@ -305,7 +646,7 @@ class VideoTranscoder:
self.vorbisenc.set_property('quality', 0.7) self.vorbisenc.set_property('quality', 0.7)
self.pipeline.add(self.vorbisenc) self.pipeline.add(self.vorbisenc)
# WebMmux & filesink
self.webmmux = gst.element_factory_make('webmmux', 'webmmux') self.webmmux = gst.element_factory_make('webmmux', 'webmmux')
self.pipeline.add(self.webmmux) self.pipeline.add(self.webmmux)
@ -313,35 +654,60 @@ class VideoTranscoder:
self.filesink.set_property('location', self.destination_path) self.filesink.set_property('location', self.destination_path)
self.pipeline.add(self.filesink) self.pipeline.add(self.filesink)
# Progressreport
self.progressreport = gst.element_factory_make(
'progressreport', 'progressreport')
# Update every second
self.progressreport.set_property('update-freq', 1)
self.progressreport.set_property('silent', True)
self.pipeline.add(self.progressreport)
def _link_elements(self):
'''
Link all the elements
This code depends on data from the discoverer and is called
from __discovered
'''
_log.debug('linking elements')
# Link the filesrc element to the decoder. The decoder then emits
# 'new-decoded-pad' which links decoded src pads to either a video
# or audio sink
self.filesrc.link(self.decoder) self.filesrc.link(self.decoder)
# Link all the video elements in a link to webmux
self.ffmpegcolorspace.link(self.videoscale) self.ffmpegcolorspace.link(self.videoscale)
self.videoscale.link(self.capsfilter) self.videoscale.link(self.capsfilter)
#self.capsfilter.link(self.xvimagesink)
self.capsfilter.link(self.vp8enc) self.capsfilter.link(self.vp8enc)
self.vp8enc.link(self.webmmux) self.vp8enc.link(self.webmmux)
# Audio if self.data.is_audio:
self.audioconvert.link(self.vorbisenc) # Link all the audio elements in a line to webmux
self.vorbisenc.link(self.webmmux) #self.audioconvert.link(self.alsasink)
self.audioconvert.link(self.vorbisenc)
self.vorbisenc.link(self.webmmux)
self.webmmux.link(self.filesink) self.webmmux.link(self.progressreport)
self.progressreport.link(self.filesink)
# Setup the message bus and connect _on_message to the pipeline
self._setup_bus() self._setup_bus()
def _on_dynamic_pad(self, dbin, pad, islast): def _on_dynamic_pad(self, dbin, pad, islast):
''' '''
Callback called when ``decodebin2`` has a pad that we can connect to Callback called when ``decodebin2`` has a pad that we can connect to
''' '''
_log.debug('Linked {0}'.format(pad)) # Intersect the capabilities of the video sink and the pad src
# Then check if they have no common capabilities.
#pdb.set_trace()
if self.ffmpegcolorspace.get_pad_template('sink')\ if self.ffmpegcolorspace.get_pad_template('sink')\
.get_caps().intersect(pad.get_caps()).is_empty(): .get_caps().intersect(pad.get_caps()).is_empty():
pad.link( # It is NOT a video src pad.
self.audioconvert.get_pad('sink')) pad.link(self.audioconvert.get_pad('sink'))
else: else:
pad.link( # It IS a video src pad.
self.ffmpegcolorspace.get_pad('sink')) pad.link(self.ffmpegcolorspace.get_pad('sink'))
def _setup_bus(self): def _setup_bus(self):
self.bus = self.pipeline.get_bus() self.bus = self.pipeline.get_bus()
@ -349,6 +715,9 @@ class VideoTranscoder:
self.bus.connect('message', self._on_message) self.bus.connect('message', self._on_message)
def __setup_videoscale_capsfilter(self): def __setup_videoscale_capsfilter(self):
'''
Sets up the output format (width, height) for the video
'''
caps = ['video/x-raw-yuv', 'pixel-aspect-ratio=1/1'] caps = ['video/x-raw-yuv', 'pixel-aspect-ratio=1/1']
if self.data.videoheight > self.data.videowidth: if self.data.videoheight > self.data.videowidth:
@ -364,19 +733,31 @@ class VideoTranscoder:
'caps', 'caps',
gst.caps_from_string( gst.caps_from_string(
', '.join(caps))) ', '.join(caps)))
gst.DEBUG_BIN_TO_DOT_FILE (
self.pipeline,
gst.DEBUG_GRAPH_SHOW_ALL,
'supersimple-debug-graph')
def _on_message(self, bus, message): def _on_message(self, bus, message):
_log.debug((bus, message)) _log.debug((bus, message, message.type))
t = message.type t = message.type
if t == gst.MESSAGE_EOS: if t == gst.MESSAGE_EOS:
self._discover_dst_and_stop() self._discover_dst_and_stop()
_log.info('Done') _log.info('Done')
elif t == gst.MESSAGE_ELEMENT:
if message.structure.get_name() == 'progress':
data = {
'structure': message.structure,
'percent': message.structure['percent'],
'total': message.structure['total'],
'current': message.structure['current']}
if self._progress_callback:
self._progress_callback(data)
_log.info('{percent}% done...'.format(
percent=data['percent']))
_log.debug(data)
elif t == gst.MESSAGE_ERROR: elif t == gst.MESSAGE_ERROR:
_log.error((bus, message)) _log.error((bus, message))
self.__stop() self.__stop()
@ -397,15 +778,25 @@ class VideoTranscoder:
def __stop(self): def __stop(self):
_log.debug(self.loop) _log.debug(self.loop)
# Stop executing the pipeline
self.pipeline.set_state(gst.STATE_NULL) self.pipeline.set_state(gst.STATE_NULL)
gobject.idle_add(self.loop.quit) # This kills the loop, mercifully
gobject.idle_add(self.__stop_mainloop)
def __stop_mainloop(self):
'''
Wrapper for gobject.MainLoop.quit()
This wrapper makes us able to see if self.loop.quit has been called
'''
_log.info('Terminating MainLoop')
self.loop.quit()
if __name__ == '__main__': if __name__ == '__main__':
import os os.nice(19)
os.environ["GST_DEBUG_DUMP_DOT_DIR"] = "/tmp"
os.putenv('GST_DEBUG_DUMP_DOT_DIR', '/tmp')
from optparse import OptionParser from optparse import OptionParser
parser = OptionParser( parser = OptionParser(
@ -444,5 +835,6 @@ if __name__ == '__main__':
if options.action == 'thumbnail': if options.action == 'thumbnail':
VideoThumbnailer(*args) VideoThumbnailer(*args)
elif options.action == 'video': elif options.action == 'video':
transcoder = VideoTranscoder(*args) def cb(data):
pdb.set_trace() print('I\'m a callback!')
transcoder = VideoTranscoder(*args, progress_callback=cb)