Panel improvements

- Added progress meter for video and audio media types.
- Changed the __repr__ method of a MediaEntry to display a bit more
  useful explanation.
- Added a new MediaEntry.state, 'processing', which means that the task
  is running the processor on the item currently.
- Fixed some PEP8 issues in user_pages/views.py
- Fixed the ATOM TAG URI to show the correct year.
This commit is contained in:
Joar Wandborg 2012-07-11 00:36:42 +02:00
parent 51eb0267d9
commit 6471291575
10 changed files with 101 additions and 27 deletions

View File

@ -14,7 +14,7 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
from sqlalchemy import MetaData, Table, Column, Boolean from sqlalchemy import MetaData, Table, Column, Boolean, SmallInteger
from mediagoblin.db.sql.util import RegisterMigration from mediagoblin.db.sql.util import RegisterMigration
@ -47,3 +47,15 @@ def add_wants_notification_column(db_conn):
default=True, nullable=True) default=True, nullable=True)
col.create(users, populate_defaults=True) col.create(users, populate_defaults=True)
db_conn.commit() db_conn.commit()
@RegisterMigration(3, MIGRATIONS)
def add_transcoding_progress(db_conn):
metadata = MetaData(bind=db_conn.bind)
media_entry = Table('core__media_entries', metadata, autoload=True,
autoload_with=db_conn.bind)
col = Column('transcoding_progress', SmallInteger)
col.create(media_entry)
db_conn.commit()

View File

@ -107,6 +107,8 @@ class MediaEntry(Base, MediaEntryMixin):
fail_error = Column(Unicode) fail_error = Column(Unicode)
fail_metadata = Column(JSONEncoded) fail_metadata = Column(JSONEncoded)
transcoding_progress = Column(SmallInteger)
queued_media_file = Column(PathTupleWithSlashes) queued_media_file = Column(PathTupleWithSlashes)
queued_task_id = Column(Unicode) queued_task_id = Column(Unicode)
@ -209,6 +211,12 @@ class MediaEntry(Base, MediaEntryMixin):
__import__(models_module) __import__(models_module)
return sys.modules[models_module].DATA_MODEL return sys.modules[models_module].DATA_MODEL
def __repr__(self):
return '<{classname} {id}: {title}>'.format(
classname=self.__class__.__name__,
id=self.id,
title=self.title)
class FileKeynames(Base): class FileKeynames(Base):
""" """

View File

@ -20,13 +20,14 @@ import os
from mediagoblin import mg_globals as mgg from mediagoblin import mg_globals as mgg
from mediagoblin.processing import (create_pub_filepath, BadMediaFail, from mediagoblin.processing import (create_pub_filepath, BadMediaFail,
FilenameBuilder) FilenameBuilder, ProgressCallback)
from mediagoblin.media_types.audio.transcoders import (AudioTranscoder, from mediagoblin.media_types.audio.transcoders import (AudioTranscoder,
AudioThumbnailer) AudioThumbnailer)
_log = logging.getLogger(__name__) _log = logging.getLogger(__name__)
def sniff_handler(media_file, **kw): def sniff_handler(media_file, **kw):
try: try:
transcoder = AudioTranscoder() transcoder = AudioTranscoder()
@ -40,6 +41,7 @@ def sniff_handler(media_file, **kw):
return False return False
def process_audio(entry): def process_audio(entry):
audio_config = mgg.global_config['media_type:mediagoblin.media_types.audio'] audio_config = mgg.global_config['media_type:mediagoblin.media_types.audio']
@ -72,11 +74,13 @@ def process_audio(entry):
transcoder = AudioTranscoder() transcoder = AudioTranscoder()
with tempfile.NamedTemporaryFile() as webm_audio_tmp: with tempfile.NamedTemporaryFile() as webm_audio_tmp:
progress_callback = ProgressCallback(entry)
transcoder.transcode( transcoder.transcode(
queued_filename, queued_filename,
webm_audio_tmp.name, webm_audio_tmp.name,
quality=audio_config['quality']) quality=audio_config['quality'],
progress_callback=progress_callback)
transcoder.discover(webm_audio_tmp.name) transcoder.discover(webm_audio_tmp.name)

View File

@ -206,7 +206,7 @@ class AudioTranscoder(object):
data = dict(message.structure) data = dict(message.structure)
if self.__on_progress: if self.__on_progress:
self.__on_progress(data) self.__on_progress(data.get('percent'))
_log.info('{0}% done...'.format( _log.info('{0}% done...'.format(
data.get('percent'))) data.get('percent')))

View File

@ -19,7 +19,7 @@ import logging
from mediagoblin import mg_globals as mgg from mediagoblin import mg_globals as mgg
from mediagoblin.processing import \ from mediagoblin.processing import \
create_pub_filepath, FilenameBuilder, BaseProcessingFail create_pub_filepath, FilenameBuilder, BaseProcessingFail, ProgressCallback
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _ from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
from . import transcoders from . import transcoders
@ -78,11 +78,13 @@ def process_video(entry):
with tmp_dst: with tmp_dst:
# Transcode queued file to a VP8/vorbis file that fits in a 640x640 square # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square
progress_callback = ProgressCallback(entry)
transcoder = transcoders.VideoTranscoder() transcoder = transcoders.VideoTranscoder()
transcoder.transcode(queued_filename, tmp_dst.name, transcoder.transcode(queued_filename, tmp_dst.name,
vp8_quality=video_config['vp8_quality'], vp8_quality=video_config['vp8_quality'],
vp8_threads=video_config['vp8_threads'], vp8_threads=video_config['vp8_threads'],
vorbis_quality=video_config['vorbis_quality']) vorbis_quality=video_config['vorbis_quality'],
progress_callback=progress_callback)
# Push transcoded video to public storage # Push transcoded video to public storage
_log.debug('Saving medium...') _log.debug('Saving medium...')

View File

@ -625,7 +625,7 @@ class VideoTranscoder:
data = dict(message.structure) data = dict(message.structure)
if self._progress_callback: if self._progress_callback:
self._progress_callback(data) self._progress_callback(data.get('percent'))
_log.info('{percent}% done...'.format( _log.info('{percent}% done...'.format(
percent=data.get('percent'))) percent=data.get('percent')))

View File

@ -25,12 +25,23 @@ from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
_log = logging.getLogger(__name__) _log = logging.getLogger(__name__)
class ProgressCallback(object):
def __init__(self, entry):
self.entry = entry
def __call__(self, progress):
if progress:
self.entry.transcoding_progress = progress
self.entry.save()
def create_pub_filepath(entry, filename): def create_pub_filepath(entry, filename):
return mgg.public_store.get_unique_filepath( return mgg.public_store.get_unique_filepath(
['media_entries', ['media_entries',
unicode(entry._id), unicode(entry._id),
filename]) filename])
class FilenameBuilder(object): class FilenameBuilder(object):
"""Easily slice and dice filenames. """Easily slice and dice filenames.

View File

@ -44,20 +44,24 @@ class ProcessMedia(Task):
entry = mgg.database.MediaEntry.one( entry = mgg.database.MediaEntry.one(
{'_id': ObjectId(media_id)}) {'_id': ObjectId(media_id)})
_log.info('Running task {0} on media {1}: {2}'.format(
self.name,
entry._id,
entry.title))
# Try to process, and handle expected errors. # Try to process, and handle expected errors.
try: try:
#__import__(entry.media_type)
manager = get_media_manager(entry.media_type) manager = get_media_manager(entry.media_type)
entry.state = u'processing'
entry.save()
_log.debug('Processing {0}'.format(entry)) _log.debug('Processing {0}'.format(entry))
manager['processor'](entry) manager['processor'](entry)
entry.state = u'processed'
entry.save()
except BaseProcessingFail as exc: except BaseProcessingFail as exc:
mark_entry_failed(entry._id, exc) mark_entry_failed(entry._id, exc)
return return
except ImportError as exc: except ImportError as exc:
_log.error( _log.error(
'Entry {0} failed to process due to an import error: {1}'\ 'Entry {0} failed to process due to an import error: {1}'\
@ -67,9 +71,6 @@ class ProcessMedia(Task):
mark_entry_failed(entry._id, exc) mark_entry_failed(entry._id, exc)
entry.state = u'processed'
entry.save()
def on_failure(self, exc, task_id, args, kwargs, einfo): def on_failure(self, exc, task_id, args, kwargs, einfo):
""" """
If the processing failed we should mark that in the database. If the processing failed we should mark that in the database.

View File

@ -33,19 +33,23 @@
<th>ID</th> <th>ID</th>
<th>Title</th> <th>Title</th>
<th>When submitted</th> <th>When submitted</th>
<th>Status</th> <th>Transcoding progress</th>
</tr> </tr>
{% for media_entry in processing_entries %} {% for media_entry in processing_entries %}
<tr> <tr>
<td>{{ media_entry._id }}</td> <td>{{ media_entry._id }}</td>
<td>{{ media_entry.title }}</td> <td>{{ media_entry.title }}</td>
<td>{{ media_entry.created.strftime("%m-%d-%Y %I:%M %p") }}</td> <td>{{ media_entry.created.strftime("%m-%d-%Y %I:%M %p") }}</td>
<td></td> {% if media_entry.transcoding_progress %}
<td>{{ media_entry.transcoding_progress }}%</td>
{% else %}
<td>Unknown</td>
{% endif %}
</tr> </tr>
{% endfor %} {% endfor %}
</table> </table>
{% else %} {% else %}
<p><i>{% trans %}No media in-processing{% endtrans %}</i></p> <p><em>{% trans %}No media in-processing{% endtrans %}</em></p>
{% endif %} {% endif %}
{% if failed_entries.count() %} {% if failed_entries.count() %}
@ -74,5 +78,27 @@
</tr> </tr>
{% endfor %} {% endfor %}
</table> </table>
{% else %}
<p><em>{% trans %}No failed entries!{% endtrans %}</em></p>
{% endif %}
{% if processed_entries.count() %}
<h2>{% trans %}Your last 10 successful uploads{% endtrans %}</h2>
<table class="media_panel processed">
<tr>
<th>ID</th>
<th>Title</th>
<th>Submitted</th>
</tr>
{% for entry in processed_entries %}
<tr>
<td>{{ entry._id }}</td>
<td><a href="{{ entry.url_for_self(request.urlgen) }}">{{ entry.title }}</a></td>
<td>{{ entry.created.strftime("%m-%d-%Y %I:%M %p") }}</td>
</tr>
{% endfor %}
</table>
{% else %}
<p><em>{% trans %}No processed entries, yet!{% endtrans %}</em></p>
{% endif %} {% endif %}
{% endblock %} {% endblock %}

View File

@ -16,6 +16,7 @@
from webob import exc from webob import exc
import logging import logging
import datetime
from mediagoblin import messages, mg_globals from mediagoblin import messages, mg_globals
from mediagoblin.db.util import DESCENDING, ObjectId from mediagoblin.db.util import DESCENDING, ObjectId
@ -37,6 +38,7 @@ from mediagoblin.media_types import get_media_manager
_log = logging.getLogger(__name__) _log = logging.getLogger(__name__)
_log.setLevel(logging.DEBUG) _log.setLevel(logging.DEBUG)
@uses_pagination @uses_pagination
def user_home(request, page): def user_home(request, page):
"""'Homepage' of a User()""" """'Homepage' of a User()"""
@ -251,10 +253,11 @@ def atom_feed(request):
atomlinks = [{ atomlinks = [{
'href': request.urlgen( 'href': request.urlgen(
'mediagoblin.user_pages.user_home', 'mediagoblin.user_pages.user_home',
qualified=True,user=request.matchdict['user']), qualified=True, user=request.matchdict['user']),
'rel': 'alternate', 'rel': 'alternate',
'type': 'text/html' 'type': 'text/html'
}]; }]
if mg_globals.app_config["push_urls"]: if mg_globals.app_config["push_urls"]:
for push_url in mg_globals.app_config["push_urls"]: for push_url in mg_globals.app_config["push_urls"]:
atomlinks.append({ atomlinks.append({
@ -264,14 +267,16 @@ def atom_feed(request):
feed = AtomFeed( feed = AtomFeed(
"MediaGoblin: Feed for user '%s'" % request.matchdict['user'], "MediaGoblin: Feed for user '%s'" % request.matchdict['user'],
feed_url=request.url, feed_url=request.url,
id='tag:'+request.host+',2011:gallery.user-'+request.matchdict['user'], id='tag:{host},{year}:gallery.user-{user}'.format(
host=request.host,
year=datetime.datetime.today().strftime('%Y'),
user=request.matchdict['user']),
links=atomlinks) links=atomlinks)
for entry in cursor: for entry in cursor:
feed.add(entry.get('title'), feed.add(entry.get('title'),
entry.description_html, entry.description_html,
id=entry.url_for_self(request.urlgen,qualified=True), id=entry.url_for_self(request.urlgen, qualified=True),
content_type='html', content_type='html',
author={ author={
'name': entry.get_uploader.username, 'name': entry.get_uploader.username,
@ -323,17 +328,22 @@ def processing_panel(request):
# Get media entries which are in-processing # Get media entries which are in-processing
processing_entries = request.db.MediaEntry.find( processing_entries = request.db.MediaEntry.find(
{'uploader': user._id, {'uploader': user._id,
'state': u'unprocessed'}).sort('created', DESCENDING) 'state': u'processing'}).sort('created', DESCENDING)
# Get media entries which have failed to process # Get media entries which have failed to process
failed_entries = request.db.MediaEntry.find( failed_entries = request.db.MediaEntry.find(
{'uploader': user._id, {'uploader': user._id,
'state': u'failed'}).sort('created', DESCENDING) 'state': u'failed'}).sort('created', DESCENDING)
processed_entries = request.db.MediaEntry.find(
{'uploader': user._id,
'state': u'processed'}).sort('created', DESCENDING).limit(10)
# Render to response # Render to response
return render_to_response( return render_to_response(
request, request,
'mediagoblin/user_pages/processing_panel.html', 'mediagoblin/user_pages/processing_panel.html',
{'user': user, {'user': user,
'processing_entries': processing_entries, 'processing_entries': processing_entries,
'failed_entries': failed_entries}) 'failed_entries': failed_entries,
'processed_entries': processed_entries})