Merge remote-tracking branch 'joar/media-fixes'

This commit is contained in:
Joar Wandborg 2012-04-09 20:36:50 +02:00
commit d684af2766
8 changed files with 87 additions and 46 deletions

View File

@ -86,12 +86,24 @@ max_height = integer(default=180)
# Should we keep the original file? # Should we keep the original file?
keep_original = boolean(default=False) keep_original = boolean(default=False)
# 0 means autodetect, autodetect means number_of_CPUs - 1
vp8_threads = integer(default=0)
# Range: 0..10
vp8_quality = integer(default=8)
# Range: -0.1..1
vorbis_quality = float(default=0.3)
[media_type:mediagoblin.media_types.audio] [media_type:mediagoblin.media_types.audio]
# vorbisenc qualiy # vorbisenc qualiy
quality = float(default=0.3) quality = float(default=0.3)
create_spectrogram = boolean(default=True) create_spectrogram = boolean(default=True)
spectrogram_fft_size = integer(default=4096)
[media_type:mediagoblin.media_types.ascii]
thumbnail_font = string(default=None)
[beaker.cache] [beaker.cache]
type = string(default="file") type = string(default="file")
data_dir = string(default="%(here)s/user_dev/beaker/cache/data") data_dir = string(default="%(here)s/user_dev/beaker/cache/data")

View File

@ -34,31 +34,12 @@ class AsciiToImage(object):
- font_size: Font size, ``int`` - font_size: Font size, ``int``
default: 11 default: 11
''' '''
# Font file path
_font = None
_font_size = 11
# ImageFont instance
_if = None
# ImageFont
_if_dims = None
# Image instance
_im = None
def __init__(self, **kw): def __init__(self, **kw):
if kw.get('font'): self._font = kw.get('font', pkg_resources.resource_filename(
self._font = kw.get('font')
else:
self._font = pkg_resources.resource_filename(
'mediagoblin.media_types.ascii', 'mediagoblin.media_types.ascii',
os.path.join('fonts', 'Inconsolata.otf')) os.path.join('fonts', 'Inconsolata.otf')))
if kw.get('font_size'): self._font_size = kw.get('font_size', 11)
self._font_size = kw.get('font_size')
self._if = ImageFont.truetype( self._if = ImageFont.truetype(
self._font, self._font,

View File

@ -42,6 +42,7 @@ def process_ascii(entry):
''' '''
Code to process a txt file Code to process a txt file
''' '''
ascii_config = mgg.global_config['media_type:mediagoblin.media_types.ascii']
workbench = mgg.workbench_manager.create_workbench() workbench = mgg.workbench_manager.create_workbench()
# Conversions subdirectory to avoid collisions # Conversions subdirectory to avoid collisions
conversions_subdir = os.path.join( conversions_subdir = os.path.join(
@ -77,7 +78,14 @@ def process_ascii(entry):
tmp_thumb_filename = os.path.join( tmp_thumb_filename = os.path.join(
conversions_subdir, thumb_filepath[-1]) conversions_subdir, thumb_filepath[-1])
converter = asciitoimage.AsciiToImage() ascii_converter_args = {}
if ascii_config['thumbnail_font']:
ascii_converter_args.update(
{'font': ascii_config['thumbnail_font']})
converter = asciitoimage.AsciiToImage(
**ascii_converter_args)
thumb = converter._create_image( thumb = converter._create_image(
queued_file.read()) queued_file.read())

View File

@ -27,7 +27,7 @@ from mediagoblin.media_types.audio.transcoders import AudioTranscoder, \
_log = logging.getLogger(__name__) _log = logging.getLogger(__name__)
def sniff_handler(media_file, **kw): def sniff_handler(media_file, **kw):
try: try:
transcoder = AudioTranscoder() transcoder = AudioTranscoder()
data = transcoder.discover(media_file.name) data = transcoder.discover(media_file.name)
except BadMediaFail: except BadMediaFail:
@ -94,7 +94,8 @@ def process_audio(entry):
thumbnailer.spectrogram( thumbnailer.spectrogram(
wav_tmp.name, wav_tmp.name,
spectrogram_tmp.name, spectrogram_tmp.name,
width=mgg.global_config['media:medium']['max_width']) width=mgg.global_config['media:medium']['max_width'],
fft_size=audio_config['spectrogram_fft_size'])
_log.debug('Saving spectrogram...') _log.debug('Saving spectrogram...')
mgg.public_store.get_file(spectrogram_filepath, 'wb').write( mgg.public_store.get_file(spectrogram_filepath, 'wb').write(
@ -121,7 +122,7 @@ def process_audio(entry):
entry.media_files['thumb'] = thumb_filepath entry.media_files['thumb'] = thumb_filepath
else: else:
entry.media_files['thumb'] = ['fake', 'thumb', 'path.jpg'] entry.media_files['thumb'] = ['fake', 'thumb', 'path.jpg']
mgg.queue_store.delete_file(queued_filepath) mgg.queue_store.delete_file(queued_filepath)
entry.save() entry.save()

View File

@ -16,15 +16,12 @@
import tempfile import tempfile
import logging import logging
import os
from mediagoblin import mg_globals as mgg from mediagoblin import mg_globals as mgg
from mediagoblin.processing import mark_entry_failed, \ from mediagoblin.processing import \
create_pub_filepath, FilenameBuilder create_pub_filepath, FilenameBuilder
from . import transcoders from . import transcoders
logging.basicConfig()
_log = logging.getLogger(__name__) _log = logging.getLogger(__name__)
_log.setLevel(logging.DEBUG) _log.setLevel(logging.DEBUG)
@ -73,7 +70,10 @@ def process_video(entry):
with tmp_dst: with tmp_dst:
# Transcode queued file to a VP8/vorbis file that fits in a 640x640 square # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square
transcoder = transcoders.VideoTranscoder() transcoder = transcoders.VideoTranscoder()
transcoder.transcode(queued_filename, tmp_dst.name) transcoder.transcode(queued_filename, tmp_dst.name,
vp8_quality=video_config['vp8_quality'],
vp8_threads=video_config['vp8_threads'],
vorbis_quality=video_config['vorbis_quality'])
# Push transcoded video to public storage # Push transcoded video to public storage
_log.debug('Saving medium...') _log.debug('Saving medium...')

View File

@ -72,6 +72,11 @@ class VideoThumbnailer:
Set up playbin pipeline in order to get video properties. Set up playbin pipeline in order to get video properties.
Initializes and runs the gobject.MainLoop() Initializes and runs the gobject.MainLoop()
Abstract
- Set up a playbin with a fake audio sink and video sink. Load the video
into the playbin
- Initialize
''' '''
self.errors = [] self.errors = []
@ -105,9 +110,10 @@ class VideoThumbnailer:
self.loop.run() self.loop.run()
def _on_bus_message(self, bus, message): def _on_bus_message(self, bus, message):
_log.debug(' BUS MESSAGE: {0}'.format(message)) _log.debug(' thumbnail playbin: {0}'.format(message))
if message.type == gst.MESSAGE_ERROR: if message.type == gst.MESSAGE_ERROR:
_log.error('thumbnail playbin: {0}'.format(message))
gobject.idle_add(self._on_bus_error) gobject.idle_add(self._on_bus_error)
elif message.type == gst.MESSAGE_STATE_CHANGED: elif message.type == gst.MESSAGE_STATE_CHANGED:
@ -154,13 +160,14 @@ class VideoThumbnailer:
return False return False
def _on_thumbnail_bus_message(self, bus, message): def _on_thumbnail_bus_message(self, bus, message):
_log.debug('Thumbnail bus called, message: {0}'.format(message)) _log.debug('thumbnail: {0}'.format(message))
if message.type == gst.MESSAGE_ERROR: if message.type == gst.MESSAGE_ERROR:
_log.error(message) _log.error(message)
gobject.idle_add(self._on_bus_error) gobject.idle_add(self._on_bus_error)
if message.type == gst.MESSAGE_STATE_CHANGED: if message.type == gst.MESSAGE_STATE_CHANGED:
_log.debug('State changed')
_prev, state, _pending = message.parse_state_changed() _prev, state, _pending = message.parse_state_changed()
if (state == gst.STATE_PAUSED and if (state == gst.STATE_PAUSED and
@ -184,6 +191,7 @@ class VideoThumbnailer:
break break
# Apply the wadsworth constant, fallback to 1 second # Apply the wadsworth constant, fallback to 1 second
# TODO: Will break if video is shorter than 1 sec
seek_amount = max(self.duration / 100 * 30, 1 * gst.SECOND) seek_amount = max(self.duration / 100 * 30, 1 * gst.SECOND)
_log.debug('seek amount: {0}'.format(seek_amount)) _log.debug('seek amount: {0}'.format(seek_amount))
@ -204,14 +212,19 @@ class VideoThumbnailer:
_log.info(message) _log.info(message)
self.shutdown() self.shutdown()
else: else:
pass _log.debug('Seek successful')
#self.thumbnail_pipeline.set_state(gst.STATE_PAUSED) self.thumbnail_pipeline.set_state(gst.STATE_PAUSED)
#pdb.set_trace() #pdb.set_trace()
else:
_log.debug('Won\'t seek: \t{0}\n\t{1}'.format(
self.state,
message.src))
def buffer_probe_handler_real(self, pad, buff, name): def buffer_probe_handler_real(self, pad, buff, name):
''' '''
Capture buffers as gdk_pixbufs when told to. Capture buffers as gdk_pixbufs when told to.
''' '''
_log.info('Capturing frame')
try: try:
caps = buff.caps caps = buff.caps
if caps is None: if caps is None:
@ -237,14 +250,16 @@ class VideoThumbnailer:
self.shutdown() self.shutdown()
except gst.QueryError: except gst.QueryError as e:
pass _log.error('QueryError: {0}'.format(e))
return False return False
def buffer_probe_handler(self, pad, buff, name): def buffer_probe_handler(self, pad, buff, name):
''' '''
Proxy function for buffer_probe_handler_real Proxy function for buffer_probe_handler_real
''' '''
_log.debug('Attaching real buffer handler to gobject idle event')
gobject.idle_add( gobject.idle_add(
lambda: self.buffer_probe_handler_real(pad, buff, name)) lambda: self.buffer_probe_handler_real(pad, buff, name))
@ -265,7 +280,7 @@ class VideoThumbnailer:
return self._get_duration(pipeline, retries + 1) return self._get_duration(pipeline, retries + 1)
def _on_timeout(self): def _on_timeout(self):
_log.error('TIMEOUT! DROP EVERYTHING!') _log.error('Timeout in thumbnailer!')
self.shutdown() self.shutdown()
def _on_bus_error(self, *args): def _on_bus_error(self, *args):
@ -342,8 +357,25 @@ class VideoTranscoder:
self.source_path = src self.source_path = src
self.destination_path = dst self.destination_path = dst
# Options # vp8enc options
self.destination_dimensions = kwargs.get('dimensions') or (640, 640) self.destination_dimensions = kwargs.get('dimensions', (640, 640))
self.vp8_quality = kwargs.get('vp8_quality', 8)
# Number of threads used by vp8enc:
# number of real cores - 1 as per recommendation on
# <http://www.webmproject.org/tools/encoder-parameters/#6-multi-threaded-encode-and-decode>
self.vp8_threads = kwargs.get('vp8_threads', CPU_COUNT - 1)
# 0 means auto-detect, but dict.get() only falls back to CPU_COUNT
# if value is None, this will correct our incompatibility with
# dict.get()
# This will also correct cases where there's only 1 CPU core, see
# original self.vp8_threads assignment above.
if self.vp8_threads == 0:
self.vp8_threads = CPU_COUNT
# vorbisenc options
self.vorbis_quality = kwargs.get('vorbis_quality', 0.3)
self._progress_callback = kwargs.get('progress_callback') or None self._progress_callback = kwargs.get('progress_callback') or None
if not type(self.destination_dimensions) == tuple: if not type(self.destination_dimensions) == tuple:
@ -456,8 +488,9 @@ class VideoTranscoder:
self.pipeline.add(self.capsfilter) self.pipeline.add(self.capsfilter)
self.vp8enc = gst.element_factory_make('vp8enc', 'vp8enc') self.vp8enc = gst.element_factory_make('vp8enc', 'vp8enc')
self.vp8enc.set_property('quality', 6) self.vp8enc.set_property('quality', self.vp8_quality)
self.vp8enc.set_property('threads', 2) self.vp8enc.set_property('threads', self.vp8_threads)
self.vp8enc.set_property('max-latency', 25)
self.pipeline.add(self.vp8enc) self.pipeline.add(self.vp8enc)
# Audio elements # Audio elements
@ -480,7 +513,7 @@ class VideoTranscoder:
self.pipeline.add(self.audiocapsfilter) self.pipeline.add(self.audiocapsfilter)
self.vorbisenc = gst.element_factory_make('vorbisenc', 'vorbisenc') self.vorbisenc = gst.element_factory_make('vorbisenc', 'vorbisenc')
self.vorbisenc.set_property('quality', 1) self.vorbisenc.set_property('quality', self.vorbis_quality)
self.pipeline.add(self.vorbisenc) self.pipeline.add(self.vorbisenc)
# WebMmux & filesink # WebMmux & filesink

View File

@ -80,5 +80,5 @@
transition: opacity .1s ease-in-out; transition: opacity .1s ease-in-out;
} }
.audio-spectrogram:hover .audio-volume { .audio-spectrogram:hover .audio-volume {
opacity: 1; opacity: 0.7;
} }

View File

@ -210,8 +210,14 @@ var audioPlayer = new Object();
$('<div class="seekbar"></div>').appendTo(im.parent()); $('<div class="seekbar"></div>').appendTo(im.parent());
$('<div class="audio-control-play-pause paused">▶</div>').appendTo(im.parent()); $('<div class="audio-control-play-pause paused">▶</div>').appendTo(im.parent());
$('<div class="audio-currentTime">00:00</div>').appendTo(im.parent()); $('<div class="audio-currentTime">00:00</div>').appendTo(im.parent());
$('<input placeholder="Range input not supported" class="audio-volume"' if (navigator && /Firefox/.test(navigator.userAgent)) {
+'type="range" min="0" max="1" step="0.01" />').appendTo(im.parent()); $('<p class="message_warning">Sorry, Firefox does not support the '
+ 'range input type, you won\'t be able to change the volume</p>')
.appendTo(im.parent().parent());
} else {
$('<input type="range" class="audio-volume"'
+'value="1" min="0" max="1" step="0.001" />').appendTo(im.parent());
}
$('.audio-spectrogram').trigger('attachedControls'); $('.audio-spectrogram').trigger('attachedControls');
}; };
})(audioPlayer); })(audioPlayer);