diff --git a/mediagoblin/config_spec.ini b/mediagoblin/config_spec.ini
index eef6f6e0..e5e059c9 100644
--- a/mediagoblin/config_spec.ini
+++ b/mediagoblin/config_spec.ini
@@ -50,6 +50,9 @@ allow_attachments = boolean(default=False)
# Cookie stuff
csrf_cookie_name = string(default='mediagoblin_csrftoken')
+# Media types
+enable_video = boolean(default=False)
+
[storage:publicstore]
storage_class = string(default="mediagoblin.storage.filestorage:BasicFileStorage")
base_dir = string(default="%(here)s/user_dev/media/public")
diff --git a/mediagoblin/db/migrations.py b/mediagoblin/db/migrations.py
index edaf5630..cfc01287 100644
--- a/mediagoblin/db/migrations.py
+++ b/mediagoblin/db/migrations.py
@@ -100,3 +100,11 @@ def user_add_forgot_password_token_and_expires(database):
"""
add_table_field(database, 'users', 'fp_verification_key', None)
add_table_field(database, 'users', 'fp_token_expire', None)
+
+
+@RegisterMigration(7)
+def media_type_image_to_multimedia_type_image(database):
+ database['media_entries'].update(
+ {'media_type': 'image'},
+ {'$set': {'media_type': 'mediagoblin.media_types.image'}},
+ multi=True)
diff --git a/mediagoblin/gmg_commands/import_export.py b/mediagoblin/gmg_commands/import_export.py
index 30112969..4ec17d47 100644
--- a/mediagoblin/gmg_commands/import_export.py
+++ b/mediagoblin/gmg_commands/import_export.py
@@ -211,10 +211,12 @@ def _export_media(db, args):
_log.info(u'Exporting {0} - {1}'.format(
entry['title'],
name))
-
- mc_file = media_cache.get_file(path, mode='wb')
- mc_file.write(
- mg_globals.public_store.get_file(path, mode='rb').read())
+ try:
+ mc_file = media_cache.get_file(path, mode='wb')
+ mc_file.write(
+ mg_globals.public_store.get_file(path, mode='rb').read())
+ except e:
+ _log.error('Failed: {0}'.format(e))
_log.info('...Media exported')
diff --git a/mediagoblin/init/celery/__init__.py b/mediagoblin/init/celery/__init__.py
index f7ef9f39..1eb21d7a 100644
--- a/mediagoblin/init/celery/__init__.py
+++ b/mediagoblin/init/celery/__init__.py
@@ -18,7 +18,7 @@ import os
import sys
-MANDATORY_CELERY_IMPORTS = ['mediagoblin.process_media']
+MANDATORY_CELERY_IMPORTS = ['mediagoblin.processing']
DEFAULT_SETTINGS_MODULE = 'mediagoblin.init.celery.dummy_settings_module'
diff --git a/mediagoblin/media_types/__init__.py b/mediagoblin/media_types/__init__.py
new file mode 100644
index 00000000..f56fd942
--- /dev/null
+++ b/mediagoblin/media_types/__init__.py
@@ -0,0 +1,73 @@
+# GNU MediaGoblin -- federated, autonomous media hosting
+# Copyright (C) 2011 MediaGoblin contributors. See AUTHORS.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import os
+import sys
+
+from mediagoblin import mg_globals
+from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
+
+
+class FileTypeNotSupported(Exception):
+ pass
+
+class InvalidFileType(Exception):
+ pass
+
+# This should be more dynamic in the future. Perhaps put it in the .ini?
+# -- Joar
+MEDIA_TYPES = [
+ 'mediagoblin.media_types.image']
+
+if mg_globals.app_config['enable_video']:
+ MEDIA_TYPES.append('mediagoblin.media_types.video')
+
+
+def get_media_types():
+ '''
+ Generator that returns the available media types
+ '''
+ for media_type in MEDIA_TYPES:
+ yield media_type
+
+
+def get_media_managers():
+ '''
+ Generator that returns all available media managers
+ '''
+ for media_type in get_media_types():
+ __import__(media_type)
+
+ yield media_type, sys.modules[media_type].MEDIA_MANAGER
+
+
+def get_media_manager(_media_type = None):
+ for media_type, manager in get_media_managers():
+ if media_type in _media_type:
+ return manager
+
+
+def get_media_type_and_manager(filename):
+ for media_type, manager in get_media_managers():
+ if filename.find('.') > 0:
+ ext = os.path.splitext(filename)[1].lower()
+ else:
+ raise InvalidFileType(
+ _('Could not find any file extension in "{filename}"').format(
+ filename=filename))
+
+ if ext[1:] in manager['accepted_extensions']:
+ return media_type, manager
diff --git a/mediagoblin/media_types/image/__init__.py b/mediagoblin/media_types/image/__init__.py
new file mode 100644
index 00000000..3b63d8eb
--- /dev/null
+++ b/mediagoblin/media_types/image/__init__.py
@@ -0,0 +1,26 @@
+# GNU MediaGoblin -- federated, autonomous media hosting
+# Copyright (C) 2011 MediaGoblin contributors. See AUTHORS.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from mediagoblin.media_types.image.processing import process_image
+
+
+MEDIA_MANAGER = {
+ "human_readable": "Image",
+ "processor": process_image, # alternately a string,
+ # 'mediagoblin.media_types.image.processing'?
+ "display_template": "mediagoblin/media_displays/image.html",
+ "default_thumb": "images/media_thumbs/image.jpg",
+ "accepted_extensions": ["jpg", "jpeg", "png", "gif", "tiff"]}
diff --git a/mediagoblin/process_media/__init__.py b/mediagoblin/media_types/image/processing.py
similarity index 57%
rename from mediagoblin/process_media/__init__.py
rename to mediagoblin/media_types/image/processing.py
index 54c0c493..2932c455 100644
--- a/mediagoblin/process_media/__init__.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -14,104 +14,23 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+import Image
import os
-import Image
from celery.task import Task
from celery import registry
from mediagoblin.db.util import ObjectId
from mediagoblin import mg_globals as mgg
-from mediagoblin.process_media.errors import BaseProcessingFail, BadMediaFail
-
-
-THUMB_SIZE = 180, 180
-MEDIUM_SIZE = 640, 640
-
-
-def create_pub_filepath(entry, filename):
- return mgg.public_store.get_unique_filepath(
- ['media_entries',
- unicode(entry._id),
- filename])
+from mediagoblin.processing import BaseProcessingFail, \
+ mark_entry_failed, BadMediaFail, create_pub_filepath, THUMB_SIZE, \
+ MEDIUM_SIZE
################################
# Media processing initial steps
################################
-class ProcessMedia(Task):
- """
- Pass this entry off for processing.
- """
- def run(self, media_id):
- """
- Pass the media entry off to the appropriate processing function
- (for now just process_image...)
- """
- entry = mgg.database.MediaEntry.one(
- {'_id': ObjectId(media_id)})
-
- # Try to process, and handle expected errors.
- try:
- process_image(entry)
- except BaseProcessingFail, exc:
- mark_entry_failed(entry._id, exc)
- return
-
- entry['state'] = u'processed'
- entry.save()
-
- def on_failure(self, exc, task_id, args, kwargs, einfo):
- """
- If the processing failed we should mark that in the database.
-
- Assuming that the exception raised is a subclass of
- BaseProcessingFail, we can use that to get more information
- about the failure and store that for conveying information to
- users about the failure, etc.
- """
- entry_id = args[0]
- mark_entry_failed(entry_id, exc)
-
-
-process_media = registry.tasks[ProcessMedia.name]
-
-
-def mark_entry_failed(entry_id, exc):
- """
- Mark a media entry as having failed in its conversion.
-
- Uses the exception that was raised to mark more information. If
- the exception is a derivative of BaseProcessingFail then we can
- store extra information that can be useful for users telling them
- why their media failed to process.
-
- Args:
- - entry_id: The id of the media entry
-
- """
- # Was this a BaseProcessingFail? In other words, was this a
- # type of error that we know how to handle?
- if isinstance(exc, BaseProcessingFail):
- # Looks like yes, so record information about that failure and any
- # metadata the user might have supplied.
- mgg.database['media_entries'].update(
- {'_id': entry_id},
- {'$set': {u'state': u'failed',
- u'fail_error': exc.exception_path,
- u'fail_metadata': exc.metadata}})
- else:
- # Looks like no, so just mark it as failed and don't record a
- # failure_error (we'll assume it wasn't handled) and don't record
- # metadata (in fact overwrite it if somehow it had previous info
- # here)
- mgg.database['media_entries'].update(
- {'_id': entry_id},
- {'$set': {u'state': u'failed',
- u'fail_error': None,
- u'fail_metadata': {}}})
-
def process_image(entry):
"""
diff --git a/mediagoblin/media_types/video/__init__.py b/mediagoblin/media_types/video/__init__.py
new file mode 100644
index 00000000..a970ab01
--- /dev/null
+++ b/mediagoblin/media_types/video/__init__.py
@@ -0,0 +1,27 @@
+# GNU MediaGoblin -- federated, autonomous media hosting
+# Copyright (C) 2011 MediaGoblin contributors. See AUTHORS.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from mediagoblin.media_types.video.processing import process_video
+
+
+MEDIA_MANAGER = {
+ "human_readable": "Video",
+ "processor": process_video, # alternately a string,
+ # 'mediagoblin.media_types.image.processing'?
+ "display_template": "mediagoblin/media_displays/video.html",
+ "default_thumb": "images/media_thumbs/video.jpg",
+ "accepted_extensions": [
+ "mp4", "mov", "webm", "avi", "3gp", "3gpp", "mkv", "ogv", "ogg"]}
diff --git a/mediagoblin/media_types/video/devices/web-advanced.json b/mediagoblin/media_types/video/devices/web-advanced.json
new file mode 100644
index 00000000..ce1d22ff
--- /dev/null
+++ b/mediagoblin/media_types/video/devices/web-advanced.json
@@ -0,0 +1,505 @@
+{
+ "make": "Generic",
+ "model": "Web Browser (Advanced)",
+ "description": "Media for World Wide Web",
+ "version": "0.1",
+ "author": {
+ "name": "Dionisio E Alonso",
+ "email": "dealonso@gmail.com"
+ },
+ "icon": "file://web.svg",
+ "default": "WebM 480p",
+ "presets": [
+ {
+ "name": "H.264 720p",
+ "extension": "mp4",
+ "container": "qtmux",
+ "vcodec": {
+ "name": "x264enc",
+ "container": "qtmux",
+ "width": [
+ 960, 1280
+ ],
+ "height": [
+ 720, 720
+ ],
+ "rate": [
+ 1, 30
+ ],
+ "passes": [
+ "pass=qual quantizer=23 subme=6 cabac=0 threads=0"
+ ]
+ },
+ "acodec": {
+ "name": "faac",
+ "container": "qtmux",
+ "width": [
+ 8, 24
+ ],
+ "depth": [
+ 8, 24
+ ],
+ "rate": [
+ 8000, 96000
+ ],
+ "channels": [
+ 1, 2
+ ],
+ "passes": [
+ "bitrate=131072 profile=LC"
+ ]
+ }
+ },
+ {
+ "name": "WebM 720p",
+ "extension": "webm",
+ "container": "webmmux",
+ "icon": "file://web-webm.svg",
+ "vcodec": {
+ "name": "vp8enc",
+ "container": "webmmux",
+ "width": [
+ 960, 1280
+ ],
+ "height": [
+ 720, 720
+ ],
+ "rate": [
+ 1, 30
+ ],
+ "passes": [
+ "quality=5.75 threads=%(threads)s speed=2"
+ ]
+ },
+ "acodec": {
+ "name": "vorbisenc",
+ "container": "webmmux",
+ "width": [
+ 8, 32
+ ],
+ "depth": [
+ 8, 24
+ ],
+ "rate": [
+ 8000, 96000
+ ],
+ "channels": [
+ 1, 2
+ ],
+ "passes": [
+ "quality=0.3"
+ ]
+ }
+ },
+ {
+ "name": "Flash Video 720p",
+ "extension": "flv",
+ "icon": "file://web-flv.png",
+ "container": "flvmux",
+ "vcodec": {
+ "name": "x264enc",
+ "container": "flvmux",
+ "width": [
+ 960, 1280
+ ],
+ "height": [
+ 720, 720
+ ],
+ "rate": [
+ 1, 30
+ ],
+ "passes": [
+ "pass=qual quantizer=23 subme=6 cabac=0 threads=0"
+ ]
+ },
+ "acodec": {
+ "name": "faac",
+ "container": "flvmux",
+ "width": [
+ 8, 24
+ ],
+ "depth": [
+ 8, 24
+ ],
+ "rate": [
+ 8000, 96000
+ ],
+ "channels": [
+ 1, 2
+ ],
+ "passes": [
+ "bitrate=131072 profile=LC"
+ ]
+ }
+ },
+
+ {
+ "name": "H.264 576p",
+ "extension": "mp4",
+ "container": "qtmux",
+ "vcodec": {
+ "name": "x264enc",
+ "container": "qtmux",
+ "width": [
+ 768, 1024
+ ],
+ "height": [
+ 576, 576
+ ],
+ "rate": [
+ 1, 30
+ ],
+ "passes": [
+ "pass=qual quantizer=23 subme=6 cabac=0 threads=0"
+ ]
+ },
+ "acodec": {
+ "name": "faac",
+ "container": "qtmux",
+ "width": [
+ 8, 24
+ ],
+ "depth": [
+ 8, 24
+ ],
+ "rate": [
+ 8000, 96000
+ ],
+ "channels": [
+ 1, 2
+ ],
+ "passes": [
+ "bitrate=131072 profile=LC"
+ ]
+ }
+ },
+ {
+ "name": "WebM 576p",
+ "extension": "webm",
+ "container": "webmmux",
+ "icon": "file://web-webm.svg",
+ "vcodec": {
+ "name": "vp8enc",
+ "container": "webmmux",
+ "width": [
+ 768, 1024
+ ],
+ "height": [
+ 576, 576
+ ],
+ "rate": [
+ 1, 30
+ ],
+ "passes": [
+ "quality=5.75 threads=%(threads)s speed=2"
+ ]
+ },
+ "acodec": {
+ "name": "vorbisenc",
+ "container": "webmmux",
+ "width": [
+ 8, 32
+ ],
+ "depth": [
+ 8, 24
+ ],
+ "rate": [
+ 8000, 96000
+ ],
+ "channels": [
+ 1, 2
+ ],
+ "passes": [
+ "quality=0.3"
+ ]
+ }
+ },
+ {
+ "name": "Flash Video 576p",
+ "extension": "flv",
+ "icon": "file://web-flv.png",
+ "container": "flvmux",
+ "vcodec": {
+ "name": "x264enc",
+ "container": "flvmux",
+ "width": [
+ 768, 1024
+ ],
+ "height": [
+ 576, 576
+ ],
+ "rate": [
+ 1, 30
+ ],
+ "passes": [
+ "pass=qual quantizer=23 subme=6 cabac=0 threads=0"
+ ]
+ },
+ "acodec": {
+ "name": "faac",
+ "container": "flvmux",
+ "width": [
+ 8, 24
+ ],
+ "depth": [
+ 8, 24
+ ],
+ "rate": [
+ 8000, 96000
+ ],
+ "channels": [
+ 1, 2
+ ],
+ "passes": [
+ "bitrate=131072 profile=LC"
+ ]
+ }
+ },
+
+ {
+ "name": "H.264 480p",
+ "extension": "mp4",
+ "container": "qtmux",
+ "vcodec": {
+ "name": "x264enc",
+ "container": "qtmux",
+ "width": [
+ 640, 854
+ ],
+ "height": [
+ 480, 480
+ ],
+ "rate": [
+ 1, 30
+ ],
+ "passes": [
+ "pass=qual quantizer=23 subme=6 cabac=0 threads=0"
+ ]
+ },
+ "acodec": {
+ "name": "faac",
+ "container": "qtmux",
+ "width": [
+ 8, 24
+ ],
+ "depth": [
+ 8, 24
+ ],
+ "rate": [
+ 8000, 96000
+ ],
+ "channels": [
+ 1, 2
+ ],
+ "passes": [
+ "bitrate=131072 profile=LC"
+ ]
+ }
+ },
+ {
+ "name": "WebM 480p",
+ "extension": "webm",
+ "container": "webmmux",
+ "icon": "file://web-webm.svg",
+ "vcodec": {
+ "name": "vp8enc",
+ "container": "webmmux",
+ "width": [
+ 640, 854
+ ],
+ "height": [
+ 480, 480
+ ],
+ "rate": [
+ 1, 30
+ ],
+ "passes": [
+ "quality=5.75 threads=%(threads)s speed=2"
+ ]
+ },
+ "acodec": {
+ "name": "vorbisenc",
+ "container": "webmmux",
+ "width": [
+ 8, 32
+ ],
+ "depth": [
+ 8, 24
+ ],
+ "rate": [
+ 8000, 96000
+ ],
+ "channels": [
+ 1, 2
+ ],
+ "passes": [
+ "quality=0.3"
+ ]
+ }
+ },
+ {
+ "name": "Flash Video 480p",
+ "extension": "flv",
+ "icon": "file://web-flv.png",
+ "container": "flvmux",
+ "vcodec": {
+ "name": "x264enc",
+ "container": "flvmux",
+ "width": [
+ 640, 854
+ ],
+ "height": [
+ 480, 480
+ ],
+ "rate": [
+ 1, 30
+ ],
+ "passes": [
+ "pass=qual quantizer=23 subme=6 cabac=0 threads=0"
+ ]
+ },
+ "acodec": {
+ "name": "faac",
+ "container": "flvmux",
+ "width": [
+ 8, 24
+ ],
+ "depth": [
+ 8, 24
+ ],
+ "rate": [
+ 8000, 96000
+ ],
+ "channels": [
+ 1, 2
+ ],
+ "passes": [
+ "bitrate=131072 profile=LC"
+ ]
+ }
+ },
+
+ {
+ "name": "H.264 360p",
+ "extension": "mp4",
+ "container": "qtmux",
+ "vcodec": {
+ "name": "x264enc",
+ "container": "qtmux",
+ "width": [
+ 480, 640
+ ],
+ "height": [
+ 360, 360
+ ],
+ "rate": [
+ 1, 30
+ ],
+ "passes": [
+ "pass=qual quantizer=23 subme=6 cabac=0 threads=0"
+ ]
+ },
+ "acodec": {
+ "name": "faac",
+ "container": "qtmux",
+ "width": [
+ 8, 24
+ ],
+ "depth": [
+ 8, 24
+ ],
+ "rate": [
+ 8000, 96000
+ ],
+ "channels": [
+ 1, 2
+ ],
+ "passes": [
+ "bitrate=131072 profile=LC"
+ ]
+ }
+ },
+ {
+ "name": "WebM 360p",
+ "extension": "webm",
+ "container": "webmmux",
+ "icon": "file://web-webm.svg",
+ "vcodec": {
+ "name": "vp8enc",
+ "container": "webmmux",
+ "width": [
+ 480, 640
+ ],
+ "height": [
+ 360, 360
+ ],
+ "rate": [
+ 1, 30
+ ],
+ "passes": [
+ "quality=5.75 threads=%(threads)s speed=2"
+ ]
+ },
+ "acodec": {
+ "name": "vorbisenc",
+ "container": "webmmux",
+ "width": [
+ 8, 32
+ ],
+ "depth": [
+ 8, 24
+ ],
+ "rate": [
+ 8000, 96000
+ ],
+ "channels": [
+ 1, 2
+ ],
+ "passes": [
+ "quality=0.3"
+ ]
+ }
+ },
+ {
+ "name": "Flash Video 360p",
+ "extension": "flv",
+ "icon": "file://web-flv.png",
+ "container": "flvmux",
+ "vcodec": {
+ "name": "x264enc",
+ "container": "flvmux",
+ "width": [
+ 480, 640
+ ],
+ "height": [
+ 360, 360
+ ],
+ "rate": [
+ 1, 30
+ ],
+ "passes": [
+ "pass=qual quantizer=23 subme=6 cabac=0 threads=0"
+ ]
+ },
+ "acodec": {
+ "name": "faac",
+ "container": "flvmux",
+ "width": [
+ 8, 24
+ ],
+ "depth": [
+ 8, 24
+ ],
+ "rate": [
+ 8000, 96000
+ ],
+ "channels": [
+ 1, 2
+ ],
+ "passes": [
+ "bitrate=131072 profile=LC"
+ ]
+ }
+ }
+ ]
+}
diff --git a/mediagoblin/media_types/video/devices/web-flv.png b/mediagoblin/media_types/video/devices/web-flv.png
new file mode 100644
index 00000000..b75699f4
Binary files /dev/null and b/mediagoblin/media_types/video/devices/web-flv.png differ
diff --git a/mediagoblin/media_types/video/devices/web-webm.svg b/mediagoblin/media_types/video/devices/web-webm.svg
new file mode 100644
index 00000000..4e5b3e97
--- /dev/null
+++ b/mediagoblin/media_types/video/devices/web-webm.svg
@@ -0,0 +1,259 @@
+
+
+
+
diff --git a/mediagoblin/media_types/video/devices/web.svg b/mediagoblin/media_types/video/devices/web.svg
new file mode 100644
index 00000000..c0c68244
--- /dev/null
+++ b/mediagoblin/media_types/video/devices/web.svg
@@ -0,0 +1,982 @@
+
+
+
diff --git a/mediagoblin/media_types/video/processing.py b/mediagoblin/media_types/video/processing.py
new file mode 100644
index 00000000..6125e49c
--- /dev/null
+++ b/mediagoblin/media_types/video/processing.py
@@ -0,0 +1,118 @@
+# GNU MediaGoblin -- federated, autonomous media hosting
+# Copyright (C) 2011 MediaGoblin contributors. See AUTHORS.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import tempfile
+import logging
+import os
+
+from mediagoblin import mg_globals as mgg
+from mediagoblin.processing import mark_entry_failed, \
+ THUMB_SIZE, MEDIUM_SIZE, create_pub_filepath
+from . import transcoders
+
+logging.basicConfig()
+
+_log = logging.getLogger(__name__)
+_log.setLevel(logging.DEBUG)
+
+
+def process_video(entry):
+ """
+ Code to process a video
+
+ Much of this code is derived from the arista-transcoder script in
+ the arista PyPI package and changed to match the needs of
+ MediaGoblin
+
+ This function sets up the arista video encoder in some kind of new thread
+ and attaches callbacks to that child process, hopefully, the
+ entry-complete callback will be called when the video is done.
+ """
+ workbench = mgg.workbench_manager.create_workbench()
+
+ queued_filepath = entry['queued_media_file']
+ queued_filename = workbench.localized_file(
+ mgg.queue_store, queued_filepath,
+ 'source')
+
+ medium_filepath = create_pub_filepath(
+ entry,
+ '{original}-640p.webm'.format(
+ original=os.path.splitext(
+ queued_filepath[-1])[0] # Select the
+ ))
+
+ thumbnail_filepath = create_pub_filepath(
+ entry, 'thumbnail.jpg')
+
+
+ # Create a temporary file for the video destination
+ tmp_dst = tempfile.NamedTemporaryFile()
+
+ with tmp_dst:
+ # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square
+ transcoder = transcoders.VideoTranscoder(queued_filename, tmp_dst.name)
+
+ # Push transcoded video to public storage
+ _log.debug('Saving medium...')
+ mgg.public_store.get_file(medium_filepath, 'wb').write(
+ tmp_dst.read())
+ _log.debug('Saved medium')
+
+ entry['media_files']['webm_640'] = medium_filepath
+
+ # Save the width and height of the transcoded video
+ entry['media_data']['video'] = {
+ u'width': transcoder.dst_data.videowidth,
+ u'height': transcoder.dst_data.videoheight}
+
+ # Create a temporary file for the video thumbnail
+ tmp_thumb = tempfile.NamedTemporaryFile()
+
+ with tmp_thumb:
+ # Create a thumbnail.jpg that fits in a 180x180 square
+ transcoders.VideoThumbnailer(queued_filename, tmp_thumb.name)
+
+ # Push the thumbnail to public storage
+ _log.debug('Saving thumbnail...')
+ mgg.public_store.get_file(thumbnail_filepath, 'wb').write(
+ tmp_thumb.read())
+ _log.debug('Saved thumbnail')
+
+ entry['media_files']['thumb'] = thumbnail_filepath
+
+
+ # Push original file to public storage
+ queued_file = file(queued_filename, 'rb')
+
+ with queued_file:
+ original_filepath = create_pub_filepath(
+ entry,
+ queued_filepath[-1])
+
+ with mgg.public_store.get_file(original_filepath, 'wb') as \
+ original_file:
+ _log.debug('Saving original...')
+ original_file.write(queued_file.read())
+ _log.debug('Saved original')
+
+ entry['media_files']['original'] = original_filepath
+
+ mgg.queue_store.delete_file(queued_filepath)
+
+
+ # Save the MediaEntry
+ entry.save()
diff --git a/mediagoblin/media_types/video/transcoders.py b/mediagoblin/media_types/video/transcoders.py
new file mode 100644
index 00000000..d7ed14ca
--- /dev/null
+++ b/mediagoblin/media_types/video/transcoders.py
@@ -0,0 +1,658 @@
+# GNU MediaGoblin -- federated, autonomous media hosting
+# Copyright (C) 2011 MediaGoblin contributors. See AUTHORS.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from __future__ import division
+
+import os
+os.putenv('GST_DEBUG_DUMP_DOT_DIR', '/tmp')
+
+import sys
+import logging
+import pdb
+import urllib
+
+_log = logging.getLogger(__name__)
+logging.basicConfig()
+_log.setLevel(logging.DEBUG)
+
+CPU_COUNT = 2
+try:
+ import multiprocessing
+ try:
+ CPU_COUNT = multiprocessing.cpu_count()
+ except NotImplementedError:
+ _log.warning('multiprocessing.cpu_count not implemented')
+ pass
+except ImportError:
+ _log.warning('Could not import multiprocessing, defaulting to 2 CPU cores')
+ pass
+
+try:
+ import gtk
+except:
+ raise Exception('Could not find pygtk')
+
+try:
+ import gobject
+ gobject.threads_init()
+except:
+ raise Exception('gobject could not be found')
+
+try:
+ import pygst
+ pygst.require('0.10')
+ import gst
+ from gst.extend import discoverer
+except:
+ raise Exception('gst/pygst 0.10 could not be found')
+
+
+class VideoThumbnailer:
+ # Declaration of thumbnailer states
+ STATE_NULL = 0
+ STATE_HALTING = 1
+ STATE_PROCESSING = 2
+
+ # The current thumbnailer state
+ state = STATE_NULL
+
+ # This will contain the thumbnailing pipeline
+ thumbnail_pipeline = None
+
+ buffer_probes = {}
+
+ errors = []
+
+ def __init__(self, source_path, dest_path):
+ '''
+ Set up playbin pipeline in order to get video properties.
+
+ Initializes and runs the gobject.MainLoop()
+ '''
+ self.source_path = source_path
+ self.dest_path = dest_path
+
+ self.loop = gobject.MainLoop()
+
+ # Set up the playbin. It will be used to discover certain
+ # properties of the input file
+ self.playbin = gst.element_factory_make('playbin')
+
+ self.videosink = gst.element_factory_make('fakesink', 'videosink')
+ self.playbin.set_property('video-sink', self.videosink)
+
+ self.audiosink = gst.element_factory_make('fakesink', 'audiosink')
+ self.playbin.set_property('audio-sink', self.audiosink)
+
+ self.bus = self.playbin.get_bus()
+ self.bus.add_signal_watch()
+ self.watch_id = self.bus.connect('message', self._on_bus_message)
+
+ self.playbin.set_property('uri', 'file:{0}'.format(
+ urllib.pathname2url(self.source_path)))
+
+ self.playbin.set_state(gst.STATE_PAUSED)
+
+ self.run()
+
+ def run(self):
+ self.loop.run()
+
+ def _on_bus_message(self, bus, message):
+ _log.debug(' BUS MESSAGE: {0}'.format(message))
+
+ if message.type == gst.MESSAGE_ERROR:
+ gobject.idle_add(self._on_bus_error)
+
+ elif message.type == gst.MESSAGE_STATE_CHANGED:
+ # The pipeline state has changed
+ # Parse state changing data
+ _prev, state, _pending = message.parse_state_changed()
+
+ _log.debug('State changed: {0}'.format(state))
+
+ if state == gst.STATE_PAUSED:
+ if message.src == self.playbin:
+ gobject.idle_add(self._on_bus_paused)
+
+ def _on_bus_paused(self):
+ '''
+ Set up thumbnailing pipeline
+ '''
+ current_video = self.playbin.get_property('current-video')
+
+ if current_video == 0:
+ _log.debug('Found current video from playbin')
+ else:
+ _log.error('Could not get any current video from playbin!')
+
+ self.duration = self._get_duration(self.playbin)
+ _log.info('Video length: {0}'.format(self.duration / gst.SECOND))
+
+ _log.info('Setting up thumbnailing pipeline')
+ self.thumbnail_pipeline = gst.parse_launch(
+ 'filesrc location="{0}" ! decodebin ! '
+ 'ffmpegcolorspace ! videoscale ! '
+ 'video/x-raw-rgb,depth=24,bpp=24,pixel-aspect-ratio=1/1,width=180 ! '
+ 'fakesink signal-handoffs=True'.format(self.source_path))
+
+ self.thumbnail_bus = self.thumbnail_pipeline.get_bus()
+ self.thumbnail_bus.add_signal_watch()
+ self.thumbnail_watch_id = self.thumbnail_bus.connect(
+ 'message', self._on_thumbnail_bus_message)
+
+ self.thumbnail_pipeline.set_state(gst.STATE_PAUSED)
+
+ #gobject.timeout_add(3000, self._on_timeout)
+
+ return False
+
+ def _on_thumbnail_bus_message(self, bus, message):
+ _log.debug('Thumbnail bus called, message: {0}'.format(message))
+
+ if message.type == gst.MESSAGE_ERROR:
+ _log.error(message)
+ gobject.idle_add(self._on_bus_error)
+
+ if message.type == gst.MESSAGE_STATE_CHANGED:
+ _prev, state, _pending = message.parse_state_changed()
+
+ if (state == gst.STATE_PAUSED and
+ not self.state == self.STATE_PROCESSING and
+ message.src == self.thumbnail_pipeline):
+ _log.info('Pipeline paused, processing')
+ self.state = self.STATE_PROCESSING
+
+ for sink in self.thumbnail_pipeline.sinks():
+ name = sink.get_name()
+ factoryname = sink.get_factory().get_name()
+
+ if factoryname == 'fakesink':
+ sinkpad = sink.get_pad('sink')
+
+ self.buffer_probes[name] = sinkpad.add_buffer_probe(
+ self.buffer_probe_handler, name)
+
+ _log.info('Added buffer probe')
+
+ break
+
+ # Apply the wadsworth constant, fallback to 1 second
+ seek_amount = max(self.duration / 100 * 30, 1 * gst.SECOND)
+
+ _log.debug('seek amount: {0}'.format(seek_amount))
+
+ seek_result = self.thumbnail_pipeline.seek(
+ 1.0,
+ gst.FORMAT_TIME,
+ gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_ACCURATE,
+ gst.SEEK_TYPE_SET,
+ seek_amount,
+ gst.SEEK_TYPE_NONE,
+ 0)
+
+ if not seek_result:
+ self.errors.append('COULD_NOT_SEEK')
+ _log.error('Couldn\'t seek! result: {0}'.format(
+ seek_result))
+ _log.info(message)
+ self.shutdown()
+ else:
+ pass
+ #self.thumbnail_pipeline.set_state(gst.STATE_PAUSED)
+ #pdb.set_trace()
+
+ def buffer_probe_handler_real(self, pad, buff, name):
+ '''
+ Capture buffers as gdk_pixbufs when told to.
+ '''
+ try:
+ caps = buff.caps
+ if caps is None:
+ _log.error('No caps passed to buffer probe handler!')
+ self.shutdown()
+ return False
+
+ _log.debug('caps: {0}'.format(caps))
+
+ filters = caps[0]
+ width = filters["width"]
+ height = filters["height"]
+
+ pixbuf = gtk.gdk.pixbuf_new_from_data(
+ buff.data, gtk.gdk.COLORSPACE_RGB, False, 8,
+ width, height, width * 3)
+
+ # NOTE: 200x136 is sort of arbitrary. it's larger than what
+ # the ui uses at the time of this writing.
+ # new_width, new_height = scaled_size((width, height), (200, 136))
+
+ #pixbuf = pixbuf.scale_simple(
+ #new_width, new_height, gtk.gdk.INTERP_BILINEAR)
+
+ pixbuf.save(self.dest_path, 'jpeg')
+ _log.info('Saved thumbnail')
+ del pixbuf
+ self.shutdown()
+ except gst.QueryError:
+ pass
+ return False
+
+ def buffer_probe_handler(self, pad, buff, name):
+ '''
+ Proxy function for buffer_probe_handler_real
+ '''
+ gobject.idle_add(
+ lambda: self.buffer_probe_handler_real(pad, buff, name))
+
+ return True
+
+ def _get_duration(self, pipeline, retries=0):
+ '''
+ Get the duration of a pipeline.
+
+ Retries 5 times.
+ '''
+ if retries == 5:
+ return 0
+
+ try:
+ return pipeline.query_duration(gst.FORMAT_TIME)[0]
+ except gst.QueryError:
+ return self._get_duration(pipeline, retries + 1)
+
+ def _on_timeout(self):
+ _log.error('TIMEOUT! DROP EVERYTHING!')
+ self.shutdown()
+
+ def _on_bus_error(self, *args):
+ _log.error('AHAHAHA! Error! args: {0}'.format(args))
+
+ def shutdown(self):
+ '''
+ Tell gobject to call __halt when the mainloop is idle.
+ '''
+ _log.info('Shutting down')
+ self.__halt()
+
+ def __halt(self):
+ '''
+ Halt all pipelines and shut down the main loop
+ '''
+ _log.info('Halting...')
+ self.state = self.STATE_HALTING
+
+ self.__disconnect()
+
+ gobject.idle_add(self.__halt_final)
+
+ def __disconnect(self):
+ _log.debug('Disconnecting...')
+ if not self.playbin is None:
+ self.playbin.set_state(gst.STATE_NULL)
+ for sink in self.playbin.sinks():
+ name = sink.get_name()
+ factoryname = sink.get_factory().get_name()
+
+ _log.debug('Disconnecting {0}'.format(name))
+
+ if factoryname == "fakesink":
+ pad = sink.get_pad("sink")
+ pad.remove_buffer_probe(self.buffer_probes[name])
+ del self.buffer_probes[name]
+
+ self.playbin = None
+
+ if self.bus is not None:
+ self.bus.disconnect(self.watch_id)
+ self.bus = None
+
+
+ def __halt_final(self):
+ _log.info('Done')
+ if self.errors:
+ _log.error(','.join(self.errors))
+
+ self.loop.quit()
+
+
+class VideoTranscoder:
+ '''
+ Video transcoder
+
+ Transcodes the SRC video file to a VP8 WebM video file at DST
+
+ - Does the same thing as VideoThumbnailer, but produces a WebM vp8
+ and vorbis video file.
+ - The VideoTranscoder exceeds the VideoThumbnailer in the way
+ that it was refined afterwards and therefore is done more
+ correctly.
+ '''
+ def __init__(self, src, dst, **kwargs):
+ _log.info('Initializing VideoTranscoder...')
+
+ self.loop = gobject.MainLoop()
+ self.source_path = src
+ self.destination_path = dst
+
+ # Options
+ self.destination_dimensions = kwargs.get('dimensions') or (640, 640)
+ self._progress_callback = kwargs.get('progress_callback') or None
+
+ if not type(self.destination_dimensions) == tuple:
+ raise Exception('dimensions must be tuple: (width, height)')
+
+ self._setup()
+ self._run()
+
+ def _setup(self):
+ self._setup_discover()
+ self._setup_pipeline()
+
+ def _run(self):
+ _log.info('Discovering...')
+ self.discoverer.discover()
+ _log.info('Done')
+
+ _log.debug('Initializing MainLoop()')
+ self.loop.run()
+
+ def _setup_discover(self):
+ _log.debug('Setting up discoverer')
+ self.discoverer = discoverer.Discoverer(self.source_path)
+
+ # Connect self.__discovered to the 'discovered' event
+ self.discoverer.connect('discovered', self.__discovered)
+
+ def __discovered(self, data, is_media):
+ '''
+ Callback for media discoverer.
+ '''
+ if not is_media:
+ self.__stop()
+ raise Exception('Could not discover {0}'.format(self.source_path))
+
+ _log.debug('__discovered, data: {0}'.format(data.__dict__))
+
+ self.data = data
+
+ # Launch things that should be done after discovery
+ self._link_elements()
+ self.__setup_videoscale_capsfilter()
+
+ # Tell the transcoding pipeline to start running
+ self.pipeline.set_state(gst.STATE_PLAYING)
+ _log.info('Transcoding...')
+
+ def _setup_pipeline(self):
+ _log.debug('Setting up transcoding pipeline')
+ # Create the pipeline bin.
+ self.pipeline = gst.Pipeline('VideoTranscoderPipeline')
+
+ # Create all GStreamer elements, starting with
+ # filesrc & decoder
+ self.filesrc = gst.element_factory_make('filesrc', 'filesrc')
+ self.filesrc.set_property('location', self.source_path)
+ self.pipeline.add(self.filesrc)
+
+ self.decoder = gst.element_factory_make('decodebin2', 'decoder')
+ self.decoder.connect('new-decoded-pad', self._on_dynamic_pad)
+ self.pipeline.add(self.decoder)
+
+ # Video elements
+ self.videoqueue = gst.element_factory_make('queue', 'videoqueue')
+ self.pipeline.add(self.videoqueue)
+
+ self.videorate = gst.element_factory_make('videorate', 'videorate')
+ self.pipeline.add(self.videorate)
+
+ self.ffmpegcolorspace = gst.element_factory_make(
+ 'ffmpegcolorspace', 'ffmpegcolorspace')
+ self.pipeline.add(self.ffmpegcolorspace)
+
+ self.videoscale = gst.element_factory_make('ffvideoscale', 'videoscale')
+ #self.videoscale.set_property('method', 2) # I'm not sure this works
+ #self.videoscale.set_property('add-borders', 0)
+ self.pipeline.add(self.videoscale)
+
+ self.capsfilter = gst.element_factory_make('capsfilter', 'capsfilter')
+ self.pipeline.add(self.capsfilter)
+
+ self.vp8enc = gst.element_factory_make('vp8enc', 'vp8enc')
+ self.vp8enc.set_property('quality', 6)
+ self.vp8enc.set_property('threads', 2)
+ self.pipeline.add(self.vp8enc)
+
+ # Audio elements
+ self.audioqueue = gst.element_factory_make('queue', 'audioqueue')
+ self.pipeline.add(self.audioqueue)
+
+ self.audiorate = gst.element_factory_make('audiorate', 'audiorate')
+ self.audiorate.set_property('tolerance', 80000000)
+ self.pipeline.add(self.audiorate)
+
+ self.audioconvert = gst.element_factory_make('audioconvert', 'audioconvert')
+ self.pipeline.add(self.audioconvert)
+
+ self.audiocapsfilter = gst.element_factory_make('capsfilter', 'audiocapsfilter')
+ audiocaps = ['audio/x-raw-float']
+ self.audiocapsfilter.set_property(
+ 'caps',
+ gst.caps_from_string(
+ ','.join(audiocaps)))
+ self.pipeline.add(self.audiocapsfilter)
+
+ self.vorbisenc = gst.element_factory_make('vorbisenc', 'vorbisenc')
+ self.vorbisenc.set_property('quality', 1)
+ self.pipeline.add(self.vorbisenc)
+
+ # WebMmux & filesink
+ self.webmmux = gst.element_factory_make('webmmux', 'webmmux')
+ self.pipeline.add(self.webmmux)
+
+ self.filesink = gst.element_factory_make('filesink', 'filesink')
+ self.filesink.set_property('location', self.destination_path)
+ self.pipeline.add(self.filesink)
+
+ # Progressreport
+ self.progressreport = gst.element_factory_make(
+ 'progressreport', 'progressreport')
+ # Update every second
+ self.progressreport.set_property('update-freq', 1)
+ self.progressreport.set_property('silent', True)
+ self.pipeline.add(self.progressreport)
+
+ def _link_elements(self):
+ '''
+ Link all the elements
+
+ This code depends on data from the discoverer and is called
+ from __discovered
+ '''
+ _log.debug('linking elements')
+ # Link the filesrc element to the decoder. The decoder then emits
+ # 'new-decoded-pad' which links decoded src pads to either a video
+ # or audio sink
+ self.filesrc.link(self.decoder)
+
+ # Link all the video elements in a row to webmmux
+ gst.element_link_many(
+ self.videoqueue,
+ self.videorate,
+ self.ffmpegcolorspace,
+ self.videoscale,
+ self.capsfilter,
+ self.vp8enc,
+ self.webmmux)
+
+ if self.data.is_audio:
+ # Link all the audio elements in a row to webmux
+ gst.element_link_many(
+ self.audioqueue,
+ self.audiorate,
+ self.audioconvert,
+ self.audiocapsfilter,
+ self.vorbisenc,
+ self.webmmux)
+
+ gst.element_link_many(
+ self.webmmux,
+ self.progressreport,
+ self.filesink)
+
+ # Setup the message bus and connect _on_message to the pipeline
+ self._setup_bus()
+
+
+ def _on_dynamic_pad(self, dbin, pad, islast):
+ '''
+ Callback called when ``decodebin2`` has a pad that we can connect to
+ '''
+ # Intersect the capabilities of the video sink and the pad src
+ # Then check if they have no common capabilities.
+ if self.ffmpegcolorspace.get_pad_template('sink')\
+ .get_caps().intersect(pad.get_caps()).is_empty():
+ # It is NOT a video src pad.
+ pad.link(self.audioqueue.get_pad('sink'))
+ else:
+ # It IS a video src pad.
+ pad.link(self.videoqueue.get_pad('sink'))
+
+ def _setup_bus(self):
+ self.bus = self.pipeline.get_bus()
+ self.bus.add_signal_watch()
+ self.bus.connect('message', self._on_message)
+
+ def __setup_videoscale_capsfilter(self):
+ '''
+ Sets up the output format (width, height) for the video
+ '''
+ caps = ['video/x-raw-yuv', 'pixel-aspect-ratio=1/1', 'framerate=30/1']
+
+ if self.data.videoheight > self.data.videowidth:
+ # Whoa! We have ourselves a portrait video!
+ caps.append('height={0}'.format(
+ self.destination_dimensions[1]))
+ else:
+ # It's a landscape, phew, how normal.
+ caps.append('width={0}'.format(
+ self.destination_dimensions[0]))
+
+ self.capsfilter.set_property(
+ 'caps',
+ gst.caps_from_string(
+ ','.join(caps)))
+
+ def _on_message(self, bus, message):
+ _log.debug((bus, message, message.type))
+
+ t = message.type
+
+ if t == gst.MESSAGE_EOS:
+ self._discover_dst_and_stop()
+ _log.info('Done')
+
+ elif t == gst.MESSAGE_ELEMENT:
+ if message.structure.get_name() == 'progress':
+ data = dict(message.structure)
+
+ if self._progress_callback:
+ self._progress_callback(data)
+
+ _log.info('{percent}% done...'.format(
+ percent=data.get('percent')))
+ _log.debug(data)
+
+ elif t == gst.MESSAGE_ERROR:
+ _log.error((bus, message))
+ self.__stop()
+
+ def _discover_dst_and_stop(self):
+ self.dst_discoverer = discoverer.Discoverer(self.destination_path)
+
+ self.dst_discoverer.connect('discovered', self.__dst_discovered)
+
+ self.dst_discoverer.discover()
+
+
+ def __dst_discovered(self, data, is_media):
+ self.dst_data = data
+
+ self.__stop()
+
+ def __stop(self):
+ _log.debug(self.loop)
+
+ # Stop executing the pipeline
+ self.pipeline.set_state(gst.STATE_NULL)
+
+ # This kills the loop, mercifully
+ gobject.idle_add(self.__stop_mainloop)
+
+ def __stop_mainloop(self):
+ '''
+ Wrapper for gobject.MainLoop.quit()
+
+ This wrapper makes us able to see if self.loop.quit has been called
+ '''
+ _log.info('Terminating MainLoop')
+
+ self.loop.quit()
+
+
+if __name__ == '__main__':
+ os.nice(19)
+ from optparse import OptionParser
+
+ parser = OptionParser(
+ usage='%prog [-v] -a [ video | thumbnail ] SRC DEST')
+
+ parser.add_option('-a', '--action',
+ dest='action',
+ help='One of "video" or "thumbnail"')
+
+ parser.add_option('-v',
+ dest='verbose',
+ action='store_true',
+ help='Output debug information')
+
+ parser.add_option('-q',
+ dest='quiet',
+ action='store_true',
+ help='Dear program, please be quiet unless *error*')
+
+ (options, args) = parser.parse_args()
+
+ if options.verbose:
+ _log.setLevel(logging.DEBUG)
+ else:
+ _log.setLevel(logging.INFO)
+
+ if options.quiet:
+ _log.setLevel(logging.ERROR)
+
+ _log.debug(args)
+
+ if not len(args) == 2:
+ parser.print_help()
+ sys.exit()
+
+ if options.action == 'thumbnail':
+ VideoThumbnailer(*args)
+ elif options.action == 'video':
+ def cb(data):
+ print('I\'m a callback!')
+ transcoder = VideoTranscoder(*args, progress_callback=cb)
diff --git a/mediagoblin/process_media/errors.py b/mediagoblin/process_media/errors.py
deleted file mode 100644
index 4224a3e1..00000000
--- a/mediagoblin/process_media/errors.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# GNU MediaGoblin -- federated, autonomous media hosting
-# Copyright (C) 2011 MediaGoblin contributors. See AUTHORS.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
-
-
-class BaseProcessingFail(Exception):
- """
- Base exception that all other processing failure messages should
- subclass from.
-
- You shouldn't call this itself; instead you should subclass it
- and provid the exception_path and general_message applicable to
- this error.
- """
- general_message = u''
-
- @property
- def exception_path(self):
- return u"%s:%s" % (
- self.__class__.__module__, self.__class__.__name__)
-
- def __init__(self, **metadata):
- self.metadata = metadata or {}
-
-
-class BadMediaFail(BaseProcessingFail):
- """
- Error that should be raised when an inappropriate file was given
- for the media type specified.
- """
- general_message = _(u'Invalid file given for media type.')
diff --git a/mediagoblin/processing.py b/mediagoblin/processing.py
new file mode 100644
index 00000000..89c4ac89
--- /dev/null
+++ b/mediagoblin/processing.py
@@ -0,0 +1,143 @@
+# GNU MediaGoblin -- federated, autonomous media hosting
+# Copyright (C) 2011 MediaGoblin contributors. See AUTHORS.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from celery.task import Task
+
+from mediagoblin.db.util import ObjectId
+from mediagoblin import mg_globals as mgg
+
+from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
+
+from mediagoblin.media_types import get_media_manager
+
+
+THUMB_SIZE = 180, 180
+MEDIUM_SIZE = 640, 640
+
+
+def create_pub_filepath(entry, filename):
+ return mgg.public_store.get_unique_filepath(
+ ['media_entries',
+ unicode(entry._id),
+ filename])
+
+
+################################
+# Media processing initial steps
+################################
+
+class ProcessMedia(Task):
+ """
+ DEPRECATED -- This now resides in the individual media plugins
+
+ Pass this entry off for processing.
+ """
+ def run(self, media_id):
+ """
+ Pass the media entry off to the appropriate processing function
+ (for now just process_image...)
+ """
+ entry = mgg.database.MediaEntry.one(
+ {'_id': ObjectId(media_id)})
+
+ # Try to process, and handle expected errors.
+ try:
+ #__import__(entry['media_type'])
+ manager = get_media_manager(entry['media_type'])
+ manager['processor'](entry)
+ except BaseProcessingFail, exc:
+ mark_entry_failed(entry._id, exc)
+ return
+ except ImportError, exc:
+ mark_entry_failed(entry[u'_id'], exc)
+
+ entry['state'] = u'processed'
+ entry.save()
+
+ def on_failure(self, exc, task_id, args, kwargs, einfo):
+ """
+ If the processing failed we should mark that in the database.
+
+ Assuming that the exception raised is a subclass of
+ BaseProcessingFail, we can use that to get more information
+ about the failure and store that for conveying information to
+ users about the failure, etc.
+ """
+ entry_id = args[0]
+ mark_entry_failed(entry_id, exc)
+
+
+def mark_entry_failed(entry_id, exc):
+ """
+ Mark a media entry as having failed in its conversion.
+
+ Uses the exception that was raised to mark more information. If
+ the exception is a derivative of BaseProcessingFail then we can
+ store extra information that can be useful for users telling them
+ why their media failed to process.
+
+ Args:
+ - entry_id: The id of the media entry
+
+ """
+ # Was this a BaseProcessingFail? In other words, was this a
+ # type of error that we know how to handle?
+ if isinstance(exc, BaseProcessingFail):
+ # Looks like yes, so record information about that failure and any
+ # metadata the user might have supplied.
+ mgg.database['media_entries'].update(
+ {'_id': entry_id},
+ {'$set': {u'state': u'failed',
+ u'fail_error': exc.exception_path,
+ u'fail_metadata': exc.metadata}})
+ else:
+ # Looks like no, so just mark it as failed and don't record a
+ # failure_error (we'll assume it wasn't handled) and don't record
+ # metadata (in fact overwrite it if somehow it had previous info
+ # here)
+ mgg.database['media_entries'].update(
+ {'_id': entry_id},
+ {'$set': {u'state': u'failed',
+ u'fail_error': None,
+ u'fail_metadata': {}}})
+
+
+class BaseProcessingFail(Exception):
+ """
+ Base exception that all other processing failure messages should
+ subclass from.
+
+ You shouldn't call this itself; instead you should subclass it
+ and provid the exception_path and general_message applicable to
+ this error.
+ """
+ general_message = u''
+
+ @property
+ def exception_path(self):
+ return u"%s:%s" % (
+ self.__class__.__module__, self.__class__.__name__)
+
+ def __init__(self, **metadata):
+ self.metadata = metadata or {}
+
+
+class BadMediaFail(BaseProcessingFail):
+ """
+ Error that should be raised when an inappropriate file was given
+ for the media type specified.
+ """
+ general_message = _(u'Invalid file given for media type.')
diff --git a/mediagoblin/static/images/media_thumbs/video.jpg b/mediagoblin/static/images/media_thumbs/video.jpg
new file mode 100644
index 00000000..841dc796
Binary files /dev/null and b/mediagoblin/static/images/media_thumbs/video.jpg differ
diff --git a/mediagoblin/storage/cloudfiles.py b/mediagoblin/storage/cloudfiles.py
index 0d3cc3df..51b73579 100644
--- a/mediagoblin/storage/cloudfiles.py
+++ b/mediagoblin/storage/cloudfiles.py
@@ -98,8 +98,14 @@ class CloudFilesStorage(StorageInterface):
def delete_file(self, filepath):
# TODO: Also delete unused directories if empty (safely, with
# checks to avoid race conditions).
- self.container.delete_object(
- self._resolve_filepath(filepath))
+ try:
+ self.container.delete_object(
+ self._resolve_filepath(filepath))
+ except cloudfiles.container.ResponseError:
+ pass
+ finally:
+ pass
+
def file_url(self, filepath):
return '/'.join([
diff --git a/mediagoblin/submit/views.py b/mediagoblin/submit/views.py
index 139b1d1d..3def44ce 100644
--- a/mediagoblin/submit/views.py
+++ b/mediagoblin/submit/views.py
@@ -19,6 +19,8 @@ import uuid
from os.path import splitext
from cgi import FieldStorage
+from celery import registry
+
from werkzeug.utils import secure_filename
from mediagoblin.db.util import ObjectId
@@ -27,8 +29,9 @@ from mediagoblin.tools.translate import pass_to_ugettext as _
from mediagoblin.tools.response import render_to_response, redirect
from mediagoblin.decorators import require_active_login
from mediagoblin.submit import forms as submit_forms, security
-from mediagoblin.process_media import process_media, mark_entry_failed
+from mediagoblin.processing import mark_entry_failed, ProcessMedia
from mediagoblin.messages import add_message, SUCCESS
+from mediagoblin.media_types import get_media_type_and_manager, InvalidFileType
@require_active_login
@@ -44,86 +47,90 @@ def submit_start(request):
and request.POST['file'].file):
submit_form.file.errors.append(
_(u'You must provide a file.'))
- elif not security.check_filetype(request.POST['file']):
- submit_form.file.errors.append(
- _(u"The file doesn't seem to be an image!"))
else:
- filename = request.POST['file'].filename
-
- # create entry and save in database
- entry = request.db.MediaEntry()
- entry['_id'] = ObjectId()
- entry['title'] = (
- unicode(request.POST['title'])
- or unicode(splitext(filename)[0]))
-
- entry['description'] = unicode(request.POST.get('description'))
- entry['description_html'] = cleaned_markdown_conversion(
- entry['description'])
-
- entry['media_type'] = u'image' # heh
- entry['uploader'] = request.user._id
-
- # Process the user's folksonomy "tags"
- entry['tags'] = convert_to_tag_list_of_dicts(
- request.POST.get('tags'))
-
- # Generate a slug from the title
- entry.generate_slug()
-
- # Now store generate the queueing related filename
- queue_filepath = request.app.queue_store.get_unique_filepath(
- ['media_entries',
- unicode(entry._id),
- secure_filename(filename)])
-
- # queue appropriately
- queue_file = request.app.queue_store.get_file(
- queue_filepath, 'wb')
-
- with queue_file:
- queue_file.write(request.POST['file'].file.read())
-
- # Add queued filename to the entry
- entry['queued_media_file'] = queue_filepath
-
- # We generate this ourselves so we know what the taks id is for
- # retrieval later.
-
- # (If we got it off the task's auto-generation, there'd be
- # a risk of a race condition when we'd save after sending
- # off the task)
- task_id = unicode(uuid.uuid4())
- entry['queued_task_id'] = task_id
-
- # Save now so we have this data before kicking off processing
- entry.save(validate=True)
-
- # Pass off to processing
- #
- # (... don't change entry after this point to avoid race
- # conditions with changes to the document via processing code)
try:
- process_media.apply_async(
- [unicode(entry._id)], {},
- task_id=task_id)
- except BaseException as exc:
- # The purpose of this section is because when running in "lazy"
- # or always-eager-with-exceptions-propagated celery mode that
- # the failure handling won't happen on Celery end. Since we
- # expect a lot of users to run things in this way we have to
- # capture stuff here.
+ filename = request.POST['file'].filename
+ media_type, media_manager = get_media_type_and_manager(filename)
+
+ # create entry and save in database
+ entry = request.db.MediaEntry()
+ entry['_id'] = ObjectId()
+ entry['media_type'] = unicode(media_type)
+ entry['title'] = (
+ unicode(request.POST['title'])
+ or unicode(splitext(filename)[0]))
+
+ entry['description'] = unicode(request.POST.get('description'))
+ entry['description_html'] = cleaned_markdown_conversion(
+ entry['description'])
+
+ entry['uploader'] = request.user['_id']
+
+ # Process the user's folksonomy "tags"
+ entry['tags'] = convert_to_tag_list_of_dicts(
+ request.POST.get('tags'))
+
+ # Generate a slug from the title
+ entry.generate_slug()
+
+
+ # Now store generate the queueing related filename
+ queue_filepath = request.app.queue_store.get_unique_filepath(
+ ['media_entries',
+ unicode(entry._id),
+ secure_filename(filename)])
+
+ # queue appropriately
+ queue_file = request.app.queue_store.get_file(
+ queue_filepath, 'wb')
+
+ with queue_file:
+ queue_file.write(request.POST['file'].file.read())
+
+ # Add queued filename to the entry
+ entry['queued_media_file'] = queue_filepath
+
+ # We generate this ourselves so we know what the taks id is for
+ # retrieval later.
+
+ # (If we got it off the task's auto-generation, there'd be
+ # a risk of a race condition when we'd save after sending
+ # off the task)
+ task_id = unicode(uuid.uuid4())
+ entry['queued_task_id'] = task_id
+
+ # Save now so we have this data before kicking off processing
+ entry.save(validate=True)
+
+ # Pass off to processing
#
- # ... not completely the diaper pattern because the
- # exception is re-raised :)
- mark_entry_failed(entry._id, exc)
- # re-raise the exception
- raise
+ # (... don't change entry after this point to avoid race
+ # conditions with changes to the document via processing code)
+ process_media = registry.tasks[ProcessMedia.name]
+ try:
+ process_media.apply_async(
+ [unicode(entry._id)], {},
+ task_id=task_id)
+ except BaseException as exc:
+ # The purpose of this section is because when running in "lazy"
+ # or always-eager-with-exceptions-propagated celery mode that
+ # the failure handling won't happen on Celery end. Since we
+ # expect a lot of users to run things in this way we have to
+ # capture stuff here.
+ #
+ # ... not completely the diaper pattern because the
+ # exception is re-raised :)
+ mark_entry_failed(entry._id, exc)
+ # re-raise the exception
+ raise
- add_message(request, SUCCESS, _('Woohoo! Submitted!'))
+ add_message(request, SUCCESS, _('Woohoo! Submitted!'))
- return redirect(request, "mediagoblin.user_pages.user_home",
- user=request.user['username'])
+ return redirect(request, "mediagoblin.user_pages.user_home",
+ user=request.user['username'])
+ except InvalidFileType, exc:
+ submit_form.file.errors.append(
+ _(u'Invalid file type.'))
return render_to_response(
request,
diff --git a/mediagoblin/templates/mediagoblin/base.html b/mediagoblin/templates/mediagoblin/base.html
index ede5f5c6..3dd9c8ff 100644
--- a/mediagoblin/templates/mediagoblin/base.html
+++ b/mediagoblin/templates/mediagoblin/base.html
@@ -28,8 +28,15 @@
href="{{ request.staticdirect('/css/extlib/960_16_col.css') }}"/>
+
+
+
+
{% block mediagoblin_head %}
{% endblock mediagoblin_head %}
diff --git a/mediagoblin/templates/mediagoblin/media_displays/image.html b/mediagoblin/templates/mediagoblin/media_displays/image.html
new file mode 100644
index 00000000..ad60fa94
--- /dev/null
+++ b/mediagoblin/templates/mediagoblin/media_displays/image.html
@@ -0,0 +1 @@
+{% extends 'mediagoblin/user_pages/media.html' %}
diff --git a/mediagoblin/templates/mediagoblin/media_displays/video.html b/mediagoblin/templates/mediagoblin/media_displays/video.html
new file mode 100644
index 00000000..5b8ec789
--- /dev/null
+++ b/mediagoblin/templates/mediagoblin/media_displays/video.html
@@ -0,0 +1,25 @@
+{% extends 'mediagoblin/user_pages/media.html' %}
+{% block mediagoblin_media %}
+