From aa387fc57ec1c176a739df8c5f4cedaf70ae9af1 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 1 Aug 2013 10:23:37 -0700
Subject: [PATCH 001/160] use parser.parse_known_args() instead of
parser.parse_args()
---
mediagoblin/gmg_commands/__init__.py | 12 +++++------
mediagoblin/gmg_commands/assetlink.py | 2 +-
mediagoblin/gmg_commands/dbupdate.py | 2 +-
mediagoblin/gmg_commands/import_export.py | 26 +++++++++++------------
mediagoblin/gmg_commands/shell.py | 4 ++--
mediagoblin/gmg_commands/users.py | 26 +++++++++++------------
6 files changed, 36 insertions(+), 36 deletions(-)
diff --git a/mediagoblin/gmg_commands/__init__.py b/mediagoblin/gmg_commands/__init__.py
index d8156126..dc3409f9 100644
--- a/mediagoblin/gmg_commands/__init__.py
+++ b/mediagoblin/gmg_commands/__init__.py
@@ -92,16 +92,16 @@ def main_cli():
subparser.set_defaults(func=exec_func)
- args = parser.parse_args()
- args.orig_conf_file = args.conf_file
- if args.conf_file is None:
+ args = parser.parse_known_args()
+ args[0].orig_conf_file = args[0].conf_file
+ if args[0].conf_file is None:
if os.path.exists('mediagoblin_local.ini') \
and os.access('mediagoblin_local.ini', os.R_OK):
- args.conf_file = 'mediagoblin_local.ini'
+ args[0].conf_file = 'mediagoblin_local.ini'
else:
- args.conf_file = 'mediagoblin.ini'
+ args[0].conf_file = 'mediagoblin.ini'
- args.func(args)
+ args[0].func(args)
if __name__ == '__main__':
diff --git a/mediagoblin/gmg_commands/assetlink.py b/mediagoblin/gmg_commands/assetlink.py
index 148ebe9e..49e27e33 100644
--- a/mediagoblin/gmg_commands/assetlink.py
+++ b/mediagoblin/gmg_commands/assetlink.py
@@ -138,7 +138,7 @@ def assetlink(args):
"""
Link the asset directory of the currently installed theme and plugins
"""
- mgoblin_app = commands_util.setup_app(args)
+ mgoblin_app = commands_util.setup_app(args[0])
app_config = mg_globals.app_config
# link theme
diff --git a/mediagoblin/gmg_commands/dbupdate.py b/mediagoblin/gmg_commands/dbupdate.py
index 961752f6..fb533d0a 100644
--- a/mediagoblin/gmg_commands/dbupdate.py
+++ b/mediagoblin/gmg_commands/dbupdate.py
@@ -147,5 +147,5 @@ def run_all_migrations(db, app_config, global_config):
def dbupdate(args):
- global_config, app_config = setup_global_and_app_config(args.conf_file)
+ global_config, app_config = setup_global_and_app_config(args[0].conf_file)
run_dbupdate(app_config, global_config)
diff --git a/mediagoblin/gmg_commands/import_export.py b/mediagoblin/gmg_commands/import_export.py
index 98ec617d..2a624b96 100644
--- a/mediagoblin/gmg_commands/import_export.py
+++ b/mediagoblin/gmg_commands/import_export.py
@@ -96,27 +96,27 @@ def env_import(args):
'''
Restore mongo database and media files from a tar archive
'''
- if not args.cache_path:
- args.cache_path = tempfile.mkdtemp()
+ if not args[0].cache_path:
+ args[0].cache_path = tempfile.mkdtemp()
- setup_global_and_app_config(args.conf_file)
+ setup_global_and_app_config(args[0].conf_file)
# Creates mg_globals.public_store and mg_globals.queue_store
setup_storage()
- global_config, app_config = setup_global_and_app_config(args.conf_file)
+ global_config, app_config = setup_global_and_app_config(args[0].conf_file)
db = setup_connection_and_db_from_config(
app_config)
tf = tarfile.open(
- args.tar_file,
+ args[0].tar_file,
mode='r|gz')
- tf.extractall(args.cache_path)
+ tf.extractall(args[0].cache_path)
- args.cache_path = os.path.join(
- args.cache_path, 'mediagoblin-data')
- args = _setup_paths(args)
+ args[0].cache_path = os.path.join(
+ args[0].cache_path, 'mediagoblin-data')
+ args = _setup_paths(args[0])
# Import database from extracted data
_import_database(db, args)
@@ -224,16 +224,16 @@ def env_export(args):
Export database and media files to a tar archive
'''
if args.cache_path:
- if os.path.exists(args.cache_path):
+ if os.path.exists(args[0].cache_path):
_log.error('The cache directory must not exist '
'before you run this script')
- _log.error('Cache directory: {0}'.format(args.cache_path))
+ _log.error('Cache directory: {0}'.format(args[0].cache_path))
return False
else:
- args.cache_path = tempfile.mkdtemp()
+ args[0].cache_path = tempfile.mkdtemp()
- args = _setup_paths(args)
+ args = _setup_paths(args[0])
if not _export_check(args):
_log.error('Checks did not pass, exiting')
diff --git a/mediagoblin/gmg_commands/shell.py b/mediagoblin/gmg_commands/shell.py
index 4998acd7..b19af837 100644
--- a/mediagoblin/gmg_commands/shell.py
+++ b/mediagoblin/gmg_commands/shell.py
@@ -65,10 +65,10 @@ def shell(args):
"""
user_namespace = {
'mg_globals': mg_globals,
- 'mgoblin_app': commands_util.setup_app(args),
+ 'mgoblin_app': commands_util.setup_app(args[0]),
'db': mg_globals.database}
- if args.ipython:
+ if args[0].ipython:
ipython_shell(**user_namespace)
else:
# Try ipython_shell first and fall back if not available
diff --git a/mediagoblin/gmg_commands/users.py b/mediagoblin/gmg_commands/users.py
index e44b0aa9..c2a4dddb 100644
--- a/mediagoblin/gmg_commands/users.py
+++ b/mediagoblin/gmg_commands/users.py
@@ -32,16 +32,16 @@ def adduser_parser_setup(subparser):
def adduser(args):
#TODO: Lets trust admins this do not validate Emails :)
- commands_util.setup_app(args)
+ commands_util.setup_app(args[0])
- args.username = commands_util.prompt_if_not_set(args.username, "Username:")
- args.password = commands_util.prompt_if_not_set(args.password, "Password:",True)
- args.email = commands_util.prompt_if_not_set(args.email, "Email:")
+ args[0].username = commands_util.prompt_if_not_set(args[0].username, "Username:")
+ args[0].password = commands_util.prompt_if_not_set(args[0].password, "Password:",True)
+ args[0].email = commands_util.prompt_if_not_set(args[0].email, "Email:")
db = mg_globals.database
users_with_username = \
db.User.query.filter_by(
- username=args.username.lower()
+ username=args[0].username.lower()
).count()
if users_with_username:
@@ -50,9 +50,9 @@ def adduser(args):
else:
# Create the user
entry = db.User()
- entry.username = unicode(args.username.lower())
- entry.email = unicode(args.email)
- entry.pw_hash = auth.gen_password_hash(args.password)
+ entry.username = unicode(args[0].username.lower())
+ entry.email = unicode(args[0].email)
+ entry.pw_hash = auth.gen_password_hash(args[0].password)
entry.status = u'active'
entry.email_verified = True
entry.save()
@@ -67,12 +67,12 @@ def makeadmin_parser_setup(subparser):
def makeadmin(args):
- commands_util.setup_app(args)
+ commands_util.setup_app(args[0])
db = mg_globals.database
user = db.User.query.filter_by(
- username=unicode(args.username.lower())).one()
+ username=unicode(args[0].username.lower())).one()
if user:
user.is_admin = True
user.save()
@@ -91,14 +91,14 @@ def changepw_parser_setup(subparser):
def changepw(args):
- commands_util.setup_app(args)
+ commands_util.setup_app(args[0])
db = mg_globals.database
user = db.User.query.filter_by(
- username=unicode(args.username.lower())).one()
+ username=unicode(args[0].username.lower())).one()
if user:
- user.pw_hash = auth.gen_password_hash(args.password)
+ user.pw_hash = auth.gen_password_hash(args[0].password)
user.save()
print 'Password successfully changed'
else:
From 262c789754931d95a4bb567fc59a3ffb833ed1bb Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 1 Aug 2013 11:11:04 -0700
Subject: [PATCH 002/160] Throw an error if there are unrecognized arguments
---
mediagoblin/gmg_commands/assetlink.py | 1 +
mediagoblin/gmg_commands/dbupdate.py | 2 ++
mediagoblin/gmg_commands/import_export.py | 3 +++
mediagoblin/gmg_commands/shell.py | 1 +
mediagoblin/gmg_commands/users.py | 3 +++
mediagoblin/gmg_commands/util.py | 9 ++++++++-
6 files changed, 18 insertions(+), 1 deletion(-)
diff --git a/mediagoblin/gmg_commands/assetlink.py b/mediagoblin/gmg_commands/assetlink.py
index 49e27e33..dff737ff 100644
--- a/mediagoblin/gmg_commands/assetlink.py
+++ b/mediagoblin/gmg_commands/assetlink.py
@@ -138,6 +138,7 @@ def assetlink(args):
"""
Link the asset directory of the currently installed theme and plugins
"""
+ commands_util.check_unrecognized_args(args)
mgoblin_app = commands_util.setup_app(args[0])
app_config = mg_globals.app_config
diff --git a/mediagoblin/gmg_commands/dbupdate.py b/mediagoblin/gmg_commands/dbupdate.py
index fb533d0a..b2efa5de 100644
--- a/mediagoblin/gmg_commands/dbupdate.py
+++ b/mediagoblin/gmg_commands/dbupdate.py
@@ -20,6 +20,7 @@ from sqlalchemy.orm import sessionmaker
from mediagoblin.db.open import setup_connection_and_db_from_config
from mediagoblin.db.migration_tools import MigrationManager
+from mediagoblin.gmg_commands import util as commands_util
from mediagoblin.init import setup_global_and_app_config
from mediagoblin.tools.common import import_component
@@ -147,5 +148,6 @@ def run_all_migrations(db, app_config, global_config):
def dbupdate(args):
+ commands_util.check_unrecognized_args(args)
global_config, app_config = setup_global_and_app_config(args[0].conf_file)
run_dbupdate(app_config, global_config)
diff --git a/mediagoblin/gmg_commands/import_export.py b/mediagoblin/gmg_commands/import_export.py
index 2a624b96..1d4ae1f7 100644
--- a/mediagoblin/gmg_commands/import_export.py
+++ b/mediagoblin/gmg_commands/import_export.py
@@ -16,6 +16,7 @@
from mediagoblin import mg_globals
from mediagoblin.db.open import setup_connection_and_db_from_config
+from mediagoblin.gmg_commands import util as commands_util
from mediagoblin.storage.filestorage import BasicFileStorage
from mediagoblin.init import setup_storage, setup_global_and_app_config
@@ -96,6 +97,7 @@ def env_import(args):
'''
Restore mongo database and media files from a tar archive
'''
+ commands_util.check_unrecognized_args(args)
if not args[0].cache_path:
args[0].cache_path = tempfile.mkdtemp()
@@ -223,6 +225,7 @@ def env_export(args):
'''
Export database and media files to a tar archive
'''
+ commands_util.check_unrecognized_args(args)
if args.cache_path:
if os.path.exists(args[0].cache_path):
_log.error('The cache directory must not exist '
diff --git a/mediagoblin/gmg_commands/shell.py b/mediagoblin/gmg_commands/shell.py
index b19af837..03e08b23 100644
--- a/mediagoblin/gmg_commands/shell.py
+++ b/mediagoblin/gmg_commands/shell.py
@@ -63,6 +63,7 @@ def shell(args):
"""
Setup a shell for the user either a normal Python shell or an IPython one
"""
+ commands_util.check_unrecognized_args(args)
user_namespace = {
'mg_globals': mg_globals,
'mgoblin_app': commands_util.setup_app(args[0]),
diff --git a/mediagoblin/gmg_commands/users.py b/mediagoblin/gmg_commands/users.py
index c2a4dddb..b164e672 100644
--- a/mediagoblin/gmg_commands/users.py
+++ b/mediagoblin/gmg_commands/users.py
@@ -32,6 +32,7 @@ def adduser_parser_setup(subparser):
def adduser(args):
#TODO: Lets trust admins this do not validate Emails :)
+ commands_util.check_unrecognized_args(args)
commands_util.setup_app(args[0])
args[0].username = commands_util.prompt_if_not_set(args[0].username, "Username:")
@@ -67,6 +68,7 @@ def makeadmin_parser_setup(subparser):
def makeadmin(args):
+ commands_util.check_unrecognized_args(args)
commands_util.setup_app(args[0])
db = mg_globals.database
@@ -91,6 +93,7 @@ def changepw_parser_setup(subparser):
def changepw(args):
+ commands_util.check_unrecognized_args(args)
commands_util.setup_app(args[0])
db = mg_globals.database
diff --git a/mediagoblin/gmg_commands/util.py b/mediagoblin/gmg_commands/util.py
index 6a6853d5..8b057996 100644
--- a/mediagoblin/gmg_commands/util.py
+++ b/mediagoblin/gmg_commands/util.py
@@ -17,6 +17,7 @@
from mediagoblin import app
import getpass
+import argparse
def setup_app(args):
@@ -36,5 +37,11 @@ def prompt_if_not_set(variable, text, password=False):
variable=raw_input(text + u' ')
else:
variable=getpass.getpass(text + u' ')
-
+
return variable
+
+
+def check_unrecognized_args(args):
+ if args[1]:
+ parser = argparse.ArgumentParser()
+ parser.error('unrecognized arguments: {}'.format(args[1]))
From c30714805b1936497ea846ab96e9104f5e1176ef Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 1 Aug 2013 12:32:49 -0700
Subject: [PATCH 003/160] Beginnings of a reprocess command
---
mediagoblin/gmg_commands/__init__.py | 4 +++
mediagoblin/gmg_commands/reprocess.py | 41 +++++++++++++++++++++++++++
2 files changed, 45 insertions(+)
create mode 100644 mediagoblin/gmg_commands/reprocess.py
diff --git a/mediagoblin/gmg_commands/__init__.py b/mediagoblin/gmg_commands/__init__.py
index dc3409f9..d3b28a3d 100644
--- a/mediagoblin/gmg_commands/__init__.py
+++ b/mediagoblin/gmg_commands/__init__.py
@@ -45,6 +45,10 @@ SUBCOMMAND_MAP = {
'setup': 'mediagoblin.gmg_commands.assetlink:assetlink_parser_setup',
'func': 'mediagoblin.gmg_commands.assetlink:assetlink',
'help': 'Link assets for themes and plugins for static serving'},
+ 'reprocess': {
+ 'setup': 'mediagoblin.gmg_commands.reprocess:reprocess_parser_setup',
+ 'func': 'mediagoblin.gmg_commands.reprocess:reprocess',
+ 'help': 'Reprocess media entries'},
# 'theme': {
# 'setup': 'mediagoblin.gmg_commands.theme:theme_parser_setup',
# 'func': 'mediagoblin.gmg_commands.theme:theme',
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
new file mode 100644
index 00000000..1cc9f71a
--- /dev/null
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -0,0 +1,41 @@
+# GNU MediaGoblin -- federated, autonomous media hosting
+# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+
+def reprocess_parser_setup(subparser):
+ subparser.add_argument(
+ '--available', '-a',
+ action="store_true",
+ help="List available actions for a given media entry")
+ subparser.add_argument(
+ '--all', '-A',
+ action="store_true",
+ help="Reprocess all media entries")
+ subparser.add_argument(
+ '--state', '-s',
+ help="Reprocess media entries in this state"
+ " such as 'failed' or 'processed'")
+ subparser.add_argument(
+ '--type', '-t',
+ help="The type of media to be reprocessed such as 'video' or 'image'")
+ subparser.add_argument(
+ 'media_id',
+ nargs='*',
+ help="The media_entry id(s) you wish to reprocess.")
+
+
+def reprocess(args):
+ pass
From 99b34c4ce68b636583657bccec137d04875a5bf1 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 1 Aug 2013 12:35:49 -0700
Subject: [PATCH 004/160] Added a set_media_type function that has checks to
only reprocess one media_type at a time
---
mediagoblin/gmg_commands/reprocess.py | 48 ++++++++++++++++++++++++++-
1 file changed, 47 insertions(+), 1 deletion(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 1cc9f71a..9dbadefb 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -13,6 +13,9 @@
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+from mediagoblin.db.models import MediaEntry
+from mediagoblin.gmg_commands import util as commands_util
+from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
def reprocess_parser_setup(subparser):
@@ -37,5 +40,48 @@ def reprocess_parser_setup(subparser):
help="The media_entry id(s) you wish to reprocess.")
-def reprocess(args):
+class MismatchingMediaTypes(Exception):
+ """
+ Error that should be raised if the media_types are not the same
+ """
pass
+
+
+def _set_media_type(args):
+ if len(args[0].media_id) == 1:
+ media_type = MediaEntry.query.filter_by(id=args[0].media_id[0])\
+ .first().media_type.split('.')[-1]
+
+ if not args[0].type:
+ args[0].type = media_type
+ elif args[0].type != media_type:
+ raise MismatchingMediaTypes(_('The type that you set does not'
+ ' match the type of the given'
+ ' media_id.'))
+ elif len(args[0].media_id) > 1:
+ media_types = []
+
+ for id in args[0].media_id:
+ media_types.append(MediaEntry.query.filter_by(id=id).first()\
+ .media_type.split('.')[-1])
+ for type in media_types:
+ if media_types[0] != type:
+ raise MismatchingMediaTypes((u'You cannot reprocess different'
+ ' media_types at the same time.'))
+
+ if not args[0].type:
+ args[0].type = media_types[0]
+ elif args[0].type != media_types[0]:
+ raise MismatchingMediaTypes(_('The type that you set does not'
+ ' match the type of the given'
+ ' media_ids.'))
+
+ elif not args[0].type:
+ raise MismatchingMediaTypes(_('You must provide either a media_id or'
+ ' set the --type flag'))
+
+
+def reprocess(args):
+ commands_util.setup_app(args[0])
+
+ _set_media_type(args)
From 6fc8aaf65f483ab523cab74489dd3421314e2b7e Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 1 Aug 2013 13:23:40 -0700
Subject: [PATCH 005/160] add reprocess_all function. still need to add code to
reprocess all failed entries
---
mediagoblin/gmg_commands/reprocess.py | 28 +++++++++++++++++++++++++++
1 file changed, 28 insertions(+)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 9dbadefb..f458cd1d 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -16,6 +16,7 @@
from mediagoblin.db.models import MediaEntry
from mediagoblin.gmg_commands import util as commands_util
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
+from mediagoblin.tools.pluginapi import hook_handle
def reprocess_parser_setup(subparser):
@@ -81,7 +82,34 @@ def _set_media_type(args):
' set the --type flag'))
+def _reprocess_all(args):
+ if not args[0].type:
+ if args[0].state == 'failed':
+ if args[0].available:
+ print '\n Available reprocess actions for all failed' \
+ ' media_entries: \n \t --initial_processing'
+ return
+ else:
+ #TODO reprocess all failed entries
+ pass
+ else:
+ raise Exception(_('You must set --type when trying to reprocess'
+ ' all media_entries, unless you set --state'
+ ' to "failed".'))
+
+ if args[0].available:
+ return hook_handle(('reprocess_action', args[0].type), args)
+ else:
+ return hook_handle(('media_reprocess', args[0].type), args)
+
+
def reprocess(args):
commands_util.setup_app(args[0])
+ if not args[0].state:
+ args[0].state = 'processed'
+
+ if args[0].all:
+ return _reprocess_all(args)
+
_set_media_type(args)
From 7c1f6a6aeea47c27a56e3f39e0a6cf33d9dd2486 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 1 Aug 2013 13:47:44 -0700
Subject: [PATCH 006/160] added a _run_reprocessing function which handles the
hook calls
---
mediagoblin/gmg_commands/reprocess.py | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index f458cd1d..50434bd2 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -97,6 +97,10 @@ def _reprocess_all(args):
' all media_entries, unless you set --state'
' to "failed".'))
+ _run_reprocessing(args)
+
+
+def _run_reprocessing(args):
if args[0].available:
return hook_handle(('reprocess_action', args[0].type), args)
else:
@@ -113,3 +117,5 @@ def reprocess(args):
return _reprocess_all(args)
_set_media_type(args)
+
+ return _run_reprocessing(args)
From 81d880b16adb2d6c872e0ad37ffe34bf7bfaba6c Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 1 Aug 2013 13:51:36 -0700
Subject: [PATCH 007/160] Just raise standard exception. Pass print statement
to gettext
---
mediagoblin/gmg_commands/reprocess.py | 29 +++++++++------------------
1 file changed, 10 insertions(+), 19 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 50434bd2..2158d36e 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -41,13 +41,6 @@ def reprocess_parser_setup(subparser):
help="The media_entry id(s) you wish to reprocess.")
-class MismatchingMediaTypes(Exception):
- """
- Error that should be raised if the media_types are not the same
- """
- pass
-
-
def _set_media_type(args):
if len(args[0].media_id) == 1:
media_type = MediaEntry.query.filter_by(id=args[0].media_id[0])\
@@ -56,9 +49,8 @@ def _set_media_type(args):
if not args[0].type:
args[0].type = media_type
elif args[0].type != media_type:
- raise MismatchingMediaTypes(_('The type that you set does not'
- ' match the type of the given'
- ' media_id.'))
+ raise Exception(_('The type that you set does not match the type'
+ ' of the given media_id.'))
elif len(args[0].media_id) > 1:
media_types = []
@@ -67,27 +59,26 @@ def _set_media_type(args):
.media_type.split('.')[-1])
for type in media_types:
if media_types[0] != type:
- raise MismatchingMediaTypes((u'You cannot reprocess different'
- ' media_types at the same time.'))
+ raise Exception((u'You cannot reprocess different media_types'
+ ' at the same time.'))
if not args[0].type:
args[0].type = media_types[0]
elif args[0].type != media_types[0]:
- raise MismatchingMediaTypes(_('The type that you set does not'
- ' match the type of the given'
- ' media_ids.'))
+ raise Exception(_('The type that you set does not match the type'
+ ' of the given media_ids.'))
elif not args[0].type:
- raise MismatchingMediaTypes(_('You must provide either a media_id or'
- ' set the --type flag'))
+ raise Exception(_('You must provide either a media_id or set the'
+ ' --type flag'))
def _reprocess_all(args):
if not args[0].type:
if args[0].state == 'failed':
if args[0].available:
- print '\n Available reprocess actions for all failed' \
- ' media_entries: \n \t --initial_processing'
+ print _('\n Available reprocess actions for all failed' \
+ ' media_entries: \n \t --initial_processing')
return
else:
#TODO reprocess all failed entries
From 4a36407d39a18f295e5fc09125fac8e2a7252f55 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 1 Aug 2013 13:53:20 -0700
Subject: [PATCH 008/160] Pep 8
---
mediagoblin/gmg_commands/reprocess.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 2158d36e..9390861f 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -44,7 +44,7 @@ def reprocess_parser_setup(subparser):
def _set_media_type(args):
if len(args[0].media_id) == 1:
media_type = MediaEntry.query.filter_by(id=args[0].media_id[0])\
- .first().media_type.split('.')[-1]
+ .first().media_type.split('.')[-1]
if not args[0].type:
args[0].type = media_type
@@ -55,7 +55,7 @@ def _set_media_type(args):
media_types = []
for id in args[0].media_id:
- media_types.append(MediaEntry.query.filter_by(id=id).first()\
+ media_types.append(MediaEntry.query.filter_by(id=id).first()
.media_type.split('.')[-1])
for type in media_types:
if media_types[0] != type:
@@ -77,8 +77,8 @@ def _reprocess_all(args):
if not args[0].type:
if args[0].state == 'failed':
if args[0].available:
- print _('\n Available reprocess actions for all failed' \
- ' media_entries: \n \t --initial_processing')
+ print _('\n Available reprocess actions for all failed'
+ ' media_entries: \n \t --initial_processing')
return
else:
#TODO reprocess all failed entries
From 243756e0205b2c8c009b6ac3e96eca8923508c38 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 1 Aug 2013 14:47:24 -0700
Subject: [PATCH 009/160] added a set_media_state function. removed the --all
flag (just don't enter any media_ids to process all media). slight refactor
---
mediagoblin/gmg_commands/reprocess.py | 44 +++++++++++++++++----------
1 file changed, 28 insertions(+), 16 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 9390861f..cad75c45 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -24,10 +24,6 @@ def reprocess_parser_setup(subparser):
'--available', '-a',
action="store_true",
help="List available actions for a given media entry")
- subparser.add_argument(
- '--all', '-A',
- action="store_true",
- help="Reprocess all media entries")
subparser.add_argument(
'--state', '-s',
help="Reprocess media entries in this state"
@@ -49,7 +45,7 @@ def _set_media_type(args):
if not args[0].type:
args[0].type = media_type
elif args[0].type != media_type:
- raise Exception(_('The type that you set does not match the type'
+ raise Exception(_('The --type that you set does not match the type'
' of the given media_id.'))
elif len(args[0].media_id) > 1:
media_types = []
@@ -65,13 +61,9 @@ def _set_media_type(args):
if not args[0].type:
args[0].type = media_types[0]
elif args[0].type != media_types[0]:
- raise Exception(_('The type that you set does not match the type'
+ raise Exception(_('The --type that you set does not match the type'
' of the given media_ids.'))
- elif not args[0].type:
- raise Exception(_('You must provide either a media_id or set the'
- ' --type flag'))
-
def _reprocess_all(args):
if not args[0].type:
@@ -98,15 +90,35 @@ def _run_reprocessing(args):
return hook_handle(('media_reprocess', args[0].type), args)
+def _set_media_state(args):
+ if len(args[0].media_id) == 1:
+ args[0].state = MediaEntry.query.filter_by(id=args[0].media_id[0])\
+ .first().state
+
+ elif len(args[0].media_id) > 1:
+ media_states = []
+
+ for id in args[0].media_id:
+ media_states.append(MediaEntry.query.filter_by(id=id).first()
+ .state)
+ for state in media_states:
+ if state != media_states[0]:
+ raise Exception(_('You can only reprocess media that is in the'
+ ' same state.'))
+
+ args[0].state = media_states[0]
+
+ elif not args[0].state:
+ args[0].state = 'processed'
+
+
def reprocess(args):
commands_util.setup_app(args[0])
- if not args[0].state:
- args[0].state = 'processed'
-
- if args[0].all:
- return _reprocess_all(args)
-
+ _set_media_state(args)
_set_media_type(args)
+ if not args[0].media_id:
+ return _reprocess_all(args)
+
return _run_reprocessing(args)
From 11a99d787f899808b10410e271491cbdfb69e55f Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 1 Aug 2013 15:24:34 -0700
Subject: [PATCH 010/160] refactored _run_reprocessing
---
mediagoblin/gmg_commands/reprocess.py | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index cad75c45..60df697f 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -85,7 +85,15 @@ def _reprocess_all(args):
def _run_reprocessing(args):
if args[0].available:
- return hook_handle(('reprocess_action', args[0].type), args)
+ if args[0].state == 'failed':
+ print _('\n Available reprocess actions for all failed'
+ ' media_entries: \n \t --initial_processing')
+ else:
+ result = hook_handle(('reprocess_action', args[0].type), args)
+ if not result:
+ print _('Sorry there is no available reprocessing for {}'
+ ' entries in the {} state'.format(args[0].type,
+ args[0].state))
else:
return hook_handle(('media_reprocess', args[0].type), args)
From 842ba30529040fb47ac9905df6373d7c1f0286ed Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 1 Aug 2013 15:40:26 -0700
Subject: [PATCH 011/160] make media_id an optional argument
---
mediagoblin/gmg_commands/reprocess.py | 76 ++++++++++++++-------------
1 file changed, 39 insertions(+), 37 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 60df697f..f3445ea6 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -32,37 +32,38 @@ def reprocess_parser_setup(subparser):
'--type', '-t',
help="The type of media to be reprocessed such as 'video' or 'image'")
subparser.add_argument(
- 'media_id',
+ '--media_id',
nargs='*',
help="The media_entry id(s) you wish to reprocess.")
def _set_media_type(args):
- if len(args[0].media_id) == 1:
- media_type = MediaEntry.query.filter_by(id=args[0].media_id[0])\
- .first().media_type.split('.')[-1]
+ if args[0].media_id:
+ if len(args[0].media_id) == 1:
+ media_type = MediaEntry.query.filter_by(id=args[0].media_id[0])\
+ .first().media_type.split('.')[-1]
- if not args[0].type:
- args[0].type = media_type
- elif args[0].type != media_type:
- raise Exception(_('The --type that you set does not match the type'
- ' of the given media_id.'))
- elif len(args[0].media_id) > 1:
- media_types = []
+ if not args[0].type:
+ args[0].type = media_type
+ elif args[0].type != media_type:
+ raise Exception(_('The --type that you set does not match the type'
+ ' of the given media_id.'))
+ elif len(args[0].media_id) > 1:
+ media_types = []
- for id in args[0].media_id:
- media_types.append(MediaEntry.query.filter_by(id=id).first()
- .media_type.split('.')[-1])
- for type in media_types:
- if media_types[0] != type:
- raise Exception((u'You cannot reprocess different media_types'
- ' at the same time.'))
+ for id in args[0].media_id:
+ media_types.append(MediaEntry.query.filter_by(id=id).first()
+ .media_type.split('.')[-1])
+ for type in media_types:
+ if media_types[0] != type:
+ raise Exception((u'You cannot reprocess different media_types'
+ ' at the same time.'))
- if not args[0].type:
- args[0].type = media_types[0]
- elif args[0].type != media_types[0]:
- raise Exception(_('The --type that you set does not match the type'
- ' of the given media_ids.'))
+ if not args[0].type:
+ args[0].type = media_types[0]
+ elif args[0].type != media_types[0]:
+ raise Exception(_('The --type that you set does not match the type'
+ ' of the given media_ids.'))
def _reprocess_all(args):
@@ -99,24 +100,25 @@ def _run_reprocessing(args):
def _set_media_state(args):
- if len(args[0].media_id) == 1:
- args[0].state = MediaEntry.query.filter_by(id=args[0].media_id[0])\
- .first().state
+ if args[0].media_id:
+ if len(args[0].media_id) == 1:
+ args[0].state = MediaEntry.query.filter_by(id=args[0].media_id[0])\
+ .first().state
- elif len(args[0].media_id) > 1:
- media_states = []
+ elif len(args[0].media_id) > 1:
+ media_states = []
- for id in args[0].media_id:
- media_states.append(MediaEntry.query.filter_by(id=id).first()
- .state)
- for state in media_states:
- if state != media_states[0]:
- raise Exception(_('You can only reprocess media that is in the'
- ' same state.'))
+ for id in args[0].media_id:
+ media_states.append(MediaEntry.query.filter_by(id=id).first()
+ .state)
+ for state in media_states:
+ if state != media_states[0]:
+ raise Exception(_('You can only reprocess media that is in the'
+ ' same state.'))
- args[0].state = media_states[0]
+ args[0].state = media_states[0]
- elif not args[0].state:
+ if not args[0].state:
args[0].state = 'processed'
From 065db04730936286bbe213133a3175d48950fd32 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 1 Aug 2013 15:55:39 -0700
Subject: [PATCH 012/160] add command option for regenerating all thumbnails
---
mediagoblin/gmg_commands/reprocess.py | 21 ++++++++++++++++++---
1 file changed, 18 insertions(+), 3 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index f3445ea6..b45543e4 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -35,6 +35,10 @@ def reprocess_parser_setup(subparser):
'--media_id',
nargs='*',
help="The media_entry id(s) you wish to reprocess.")
+ subparser.add_argument(
+ '--thumbnails',
+ action="store_true",
+ help="Regenerate thumbnails for all processed media")
def _set_media_type(args):
@@ -68,20 +72,31 @@ def _set_media_type(args):
def _reprocess_all(args):
if not args[0].type:
- if args[0].state == 'failed':
+ if args[0].thumbnails:
+ if args[0].available:
+ print _('Available options for regenerating all processed'
+ ' media thumbnails: \n'
+ '\t --size: max_width max_height'
+ ' (defaults to config specs)')
+ else:
+ #TODO regenerate all thumbnails
+ pass
+
+ elif args[0].state == 'failed':
if args[0].available:
print _('\n Available reprocess actions for all failed'
' media_entries: \n \t --initial_processing')
- return
else:
#TODO reprocess all failed entries
pass
+
else:
raise Exception(_('You must set --type when trying to reprocess'
' all media_entries, unless you set --state'
' to "failed".'))
- _run_reprocessing(args)
+ else:
+ _run_reprocessing(args)
def _run_reprocessing(args):
From bf909ab048d2741b91542ad2b00789f30e51c2cf Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 1 Aug 2013 15:58:25 -0700
Subject: [PATCH 013/160] pep 8
---
mediagoblin/gmg_commands/reprocess.py | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index b45543e4..0390c48d 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -50,24 +50,24 @@ def _set_media_type(args):
if not args[0].type:
args[0].type = media_type
elif args[0].type != media_type:
- raise Exception(_('The --type that you set does not match the type'
- ' of the given media_id.'))
+ raise Exception(_('The --type that you set does not match the'
+ 'type of the given media_id.'))
elif len(args[0].media_id) > 1:
media_types = []
for id in args[0].media_id:
media_types.append(MediaEntry.query.filter_by(id=id).first()
- .media_type.split('.')[-1])
+ .media_type.split('.')[-1])
for type in media_types:
if media_types[0] != type:
- raise Exception((u'You cannot reprocess different media_types'
- ' at the same time.'))
+ raise Exception((u'You cannot reprocess different'
+ ' media_types at the same time.'))
if not args[0].type:
args[0].type = media_types[0]
elif args[0].type != media_types[0]:
- raise Exception(_('The --type that you set does not match the type'
- ' of the given media_ids.'))
+ raise Exception(_('The --type that you set does not match the'
+ ' type of the given media_ids.'))
def _reprocess_all(args):
@@ -128,8 +128,8 @@ def _set_media_state(args):
.state)
for state in media_states:
if state != media_states[0]:
- raise Exception(_('You can only reprocess media that is in the'
- ' same state.'))
+ raise Exception(_('You can only reprocess media that is in'
+ ' the same state.'))
args[0].state = media_states[0]
From e36b9f035f73caf9cb8a7ec62643e346461433fc Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 1 Aug 2013 15:58:57 -0700
Subject: [PATCH 014/160] end of day commit
---
mediagoblin/media_types/image/__init__.py | 17 +++++++++++++++++
1 file changed, 17 insertions(+)
diff --git a/mediagoblin/media_types/image/__init__.py b/mediagoblin/media_types/image/__init__.py
index 1bb9c6f3..9e2d4ad7 100644
--- a/mediagoblin/media_types/image/__init__.py
+++ b/mediagoblin/media_types/image/__init__.py
@@ -19,6 +19,7 @@ from mediagoblin.media_types import MediaManagerBase
from mediagoblin.media_types.image.processing import process_image, \
sniff_handler
from mediagoblin.tools import pluginapi
+from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
ACCEPTED_EXTENSIONS = ["jpg", "jpeg", "png", "gif", "tiff"]
@@ -64,9 +65,25 @@ def get_media_type_and_manager(ext):
return MEDIA_TYPE, ImageMediaManager
+def reprocess_action(args):
+ if args[0].state == 'processed':
+ print _('\n Available reprocessing actions for processed images:'
+ '\n \t --resize: thumbnail or medium'
+ '\n Options:'
+ '\n \t --size: max_width max_height (defaults to config specs)')
+ return True
+
+
+def media_reprocess(args):
+ import ipdb
+ ipdb.set_trace()
+
+
hooks = {
'setup': setup_plugin,
'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: ImageMediaManager,
+ ('reprocess_action', 'image'): reprocess_action,
+ ('media_reprocess', 'image'): media_reprocess,
}
From 663b378b25228e81eb654a7f42e80be7e3d2907e Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Fri, 2 Aug 2013 07:44:03 -0700
Subject: [PATCH 015/160] added a parser for the image media_type reprocessing
---
mediagoblin/media_types/image/__init__.py | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/mediagoblin/media_types/image/__init__.py b/mediagoblin/media_types/image/__init__.py
index 9e2d4ad7..2ad76111 100644
--- a/mediagoblin/media_types/image/__init__.py
+++ b/mediagoblin/media_types/image/__init__.py
@@ -13,6 +13,7 @@
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+import argparse
import datetime
from mediagoblin.media_types import MediaManagerBase
@@ -74,7 +75,24 @@ def reprocess_action(args):
return True
+def _parser(args):
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '--resize',
+ action='store_true')
+ parser.add_argument(
+ '--size',
+ nargs=2)
+ parser.add_argument(
+ '--initial_processing',
+ action='store_true')
+
+ return parser.parse_args(args[1])
+
+
def media_reprocess(args):
+ reprocess_args = _parser(args)
+ args = args[0]
import ipdb
ipdb.set_trace()
From 273c79513d82b03b99035dcfa47e839c61322483 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Fri, 2 Aug 2013 08:02:14 -0700
Subject: [PATCH 016/160] added a check_eligible function to image reprocessing
---
mediagoblin/media_types/image/__init__.py | 20 +++++++++++++++++++-
1 file changed, 19 insertions(+), 1 deletion(-)
diff --git a/mediagoblin/media_types/image/__init__.py b/mediagoblin/media_types/image/__init__.py
index 2ad76111..a1b43479 100644
--- a/mediagoblin/media_types/image/__init__.py
+++ b/mediagoblin/media_types/image/__init__.py
@@ -90,9 +90,27 @@ def _parser(args):
return parser.parse_args(args[1])
+def _check_eligible(entry_args, reprocess_args):
+ if entry_args.state == 'processed':
+ if reprocess_args.initial_processing:
+ raise Exception(_('You can not run --initial_processing on media'
+ ' that has already been processed.'))
+
+ if entry_args.state == 'failed':
+ if reprocess_args.resize:
+ raise Exception(_('You can not run --resize on media that has not'
+ 'been processed.'))
+
+ if entry_args.state == 'processing':
+ raise Exception(_('We currently do not support reprocessing on media'
+ 'that is in the "processing" state.'))
+
+
def media_reprocess(args):
reprocess_args = _parser(args)
- args = args[0]
+ entry_args = args[0]
+
+ _check_eligible(entry_args, reprocess_args)
import ipdb
ipdb.set_trace()
From 9a2c66ca9ef763fa68dc09a483c02fe2ee02d78f Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Fri, 2 Aug 2013 11:40:41 -0700
Subject: [PATCH 017/160] added image reprocessing
---
mediagoblin/gmg_commands/reprocess.py | 3 +
mediagoblin/media_types/image/__init__.py | 47 ++++++++-
mediagoblin/media_types/image/processing.py | 104 +++++++++++++-------
mediagoblin/processing/__init__.py | 17 ++++
mediagoblin/processing/task.py | 6 +-
mediagoblin/submit/lib.py | 8 +-
6 files changed, 139 insertions(+), 46 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 0390c48d..f6b9e653 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -13,6 +13,7 @@
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+from mediagoblin import mg_globals
from mediagoblin.db.models import MediaEntry
from mediagoblin.gmg_commands import util as commands_util
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
@@ -143,6 +144,8 @@ def reprocess(args):
_set_media_state(args)
_set_media_type(args)
+ import ipdb
+ ipdb.set_trace()
if not args[0].media_id:
return _reprocess_all(args)
diff --git a/mediagoblin/media_types/image/__init__.py b/mediagoblin/media_types/image/__init__.py
index a1b43479..3a056718 100644
--- a/mediagoblin/media_types/image/__init__.py
+++ b/mediagoblin/media_types/image/__init__.py
@@ -15,13 +15,18 @@
# along with this program. If not, see .
import argparse
import datetime
+import logging
+from mediagoblin.db.models import MediaEntry
from mediagoblin.media_types import MediaManagerBase
from mediagoblin.media_types.image.processing import process_image, \
sniff_handler
+from mediagoblin.submit.lib import run_process_media
from mediagoblin.tools import pluginapi
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
+_log = logging.getLogger(__name__)
+
ACCEPTED_EXTENSIONS = ["jpg", "jpeg", "png", "gif", "tiff"]
MEDIA_TYPE = 'mediagoblin.media_types.image'
@@ -69,7 +74,7 @@ def get_media_type_and_manager(ext):
def reprocess_action(args):
if args[0].state == 'processed':
print _('\n Available reprocessing actions for processed images:'
- '\n \t --resize: thumbnail or medium'
+ '\n \t --resize: thumb or medium'
'\n Options:'
'\n \t --size: max_width max_height (defaults to config specs)')
return True
@@ -78,8 +83,7 @@ def reprocess_action(args):
def _parser(args):
parser = argparse.ArgumentParser()
parser.add_argument(
- '--resize',
- action='store_true')
+ '--resize')
parser.add_argument(
'--size',
nargs=2)
@@ -100,6 +104,9 @@ def _check_eligible(entry_args, reprocess_args):
if reprocess_args.resize:
raise Exception(_('You can not run --resize on media that has not'
'been processed.'))
+ if reprocess_args.size:
+ _log.warn('With --initial_processing, the --size flag will be'
+ ' ignored.')
if entry_args.state == 'processing':
raise Exception(_('We currently do not support reprocessing on media'
@@ -111,8 +118,38 @@ def media_reprocess(args):
entry_args = args[0]
_check_eligible(entry_args, reprocess_args)
- import ipdb
- ipdb.set_trace()
+ if reprocess_args.initial_processing:
+ for id in entry_args.media_id:
+ entry = MediaEntry.query.filter_by(id=id).first()
+ # Should we get the feed_url?
+ run_process_media(entry)
+
+ elif reprocess_args.resize:
+ if reprocess_args.resize == 'medium' or reprocess_args.resize == \
+ 'thumb':
+ for id in entry_args.media_id:
+ entry = MediaEntry.query.filter_by(id=id).first()
+
+ # For now we can only reprocess with the original file
+ if not entry.media_files.get('original'):
+ raise Exception(_('The original file for this media entry'
+ 'does not exist.'))
+
+ reprocess_info = {'resize': reprocess_args.resize}
+
+ if reprocess_args.size and len(reprocess_args.size) == 2:
+ reprocess_info['max_width'] = reprocess_args.size[0]
+ reprocess_info['max_height'] = reprocess_args.size[1]
+
+ run_process_media(entry, reprocess_info)
+
+ else:
+ raise Exception(_('The --resize flag must set either "thumb"'
+ ' or "medium".'))
+
+ else:
+ _log.warn('You must set either --resize or --initial_processing flag'
+ ' to reprocess an image.')
hooks = {
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index baf2ac7e..4f619f47 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -74,11 +74,13 @@ def resize_image(proc_state, resized, keyname, target_name, new_size,
def resize_tool(proc_state, force, keyname, target_name,
- conversions_subdir, exif_tags):
+ conversions_subdir, exif_tags, new_size=None):
# filename -- the filename of the original image being resized
filename = proc_state.get_queued_filename()
- max_width = mgg.global_config['media:' + keyname]['max_width']
- max_height = mgg.global_config['media:' + keyname]['max_height']
+ if not new_size:
+ max_width = mgg.global_config['media:' + keyname]['max_width']
+ max_height = mgg.global_config['media:' + keyname]['max_height']
+ new_size = (max_width, max_height)
# If the size of the original file exceeds the specified size for the desized
# file, a target_name file is created and later associated with the media
# entry.
@@ -93,7 +95,7 @@ def resize_tool(proc_state, force, keyname, target_name,
or exif_image_needs_rotation(exif_tags):
resize_image(
proc_state, im, unicode(keyname), target_name,
- (max_width, max_height),
+ new_size,
exif_tags, conversions_subdir)
@@ -119,7 +121,7 @@ def sniff_handler(media_file, **kw):
return None
-def process_image(proc_state):
+def process_image(proc_state, reprocess_info=None):
"""Code to process an image. Will be run by celery.
A Workbench() represents a local tempory dir. It is automatically
@@ -127,45 +129,75 @@ def process_image(proc_state):
"""
entry = proc_state.entry
workbench = proc_state.workbench
-
+ import ipdb
+ ipdb.set_trace()
# Conversions subdirectory to avoid collisions
conversions_subdir = os.path.join(
workbench.dir, 'conversions')
os.mkdir(conversions_subdir)
- queued_filename = proc_state.get_queued_filename()
- name_builder = FilenameBuilder(queued_filename)
+ if reprocess_info:
+ _reprocess_image(proc_state, reprocess_info, conversions_subdir)
- # EXIF extraction
- exif_tags = extract_exif(queued_filename)
- gps_data = get_gps_data(exif_tags)
+ else:
+ queued_filename = proc_state.get_queued_filename()
+ name_builder = FilenameBuilder(queued_filename)
- # Always create a small thumbnail
- resize_tool(proc_state, True, 'thumb',
+ # EXIF extraction
+ exif_tags = extract_exif(queued_filename)
+ gps_data = get_gps_data(exif_tags)
+
+ # Always create a small thumbnail
+ resize_tool(proc_state, True, 'thumb',
+ name_builder.fill('{basename}.thumbnail{ext}'),
+ conversions_subdir, exif_tags)
+
+ # Possibly create a medium
+ resize_tool(proc_state, False, 'medium',
+ name_builder.fill('{basename}.medium{ext}'),
+ conversions_subdir, exif_tags)
+
+ # Copy our queued local workbench to its final destination
+ proc_state.copy_original(name_builder.fill('{basename}{ext}'))
+
+ # Remove queued media file from storage and database
+ proc_state.delete_queue_file()
+
+ # Insert exif data into database
+ exif_all = clean_exif(exif_tags)
+
+ if len(exif_all):
+ entry.media_data_init(exif_all=exif_all)
+
+ if len(gps_data):
+ for key in list(gps_data.keys()):
+ gps_data['gps_' + key] = gps_data.pop(key)
+ entry.media_data_init(**gps_data)
+
+
+def _reprocess_image(proc_state, reprocess_info, conversions_subdir):
+ reprocess_filename = proc_state.get_reprocess_filename()
+ name_builder = FilenameBuilder(reprocess_filename)
+
+ exif_tags = extract_exif(reprocess_filename)
+
+ if reprocess_info.get('max_width'):
+ max_width = reprocess_info['max_width']
+ else:
+ max_width = mgg.global_config \
+ ['media:' + reprocess_info['resize']]['max_width']
+
+ if reprocess_info.get('max_height'):
+ max_height = reprocess_info['max_height']
+ else:
+ max_height = mgg.global_config \
+ ['media:' + reprocess_info['resize']]['max_height']
+
+ new_size = (max_width, max_height)
+
+ resize_tool(proc_state, False, reprocess_info['resize'],
name_builder.fill('{basename}.thumbnail{ext}'),
- conversions_subdir, exif_tags)
-
- # Possibly create a medium
- resize_tool(proc_state, False, 'medium',
- name_builder.fill('{basename}.medium{ext}'),
- conversions_subdir, exif_tags)
-
- # Copy our queued local workbench to its final destination
- proc_state.copy_original(name_builder.fill('{basename}{ext}'))
-
- # Remove queued media file from storage and database
- proc_state.delete_queue_file()
-
- # Insert exif data into database
- exif_all = clean_exif(exif_tags)
-
- if len(exif_all):
- entry.media_data_init(exif_all=exif_all)
-
- if len(gps_data):
- for key in list(gps_data.keys()):
- gps_data['gps_' + key] = gps_data.pop(key)
- entry.media_data_init(**gps_data)
+ conversions_subdir, exif_tags, new_size)
if __name__ == '__main__':
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index f3a85940..bbe9f364 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -87,6 +87,7 @@ class ProcessingState(object):
self.entry = entry
self.workbench = None
self.queued_filename = None
+ self.reprocess_filename = None
def set_workbench(self, wb):
self.workbench = wb
@@ -128,6 +129,22 @@ class ProcessingState(object):
mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
self.entry.queued_media_file = []
+ def get_reprocess_filename(self):
+ """
+ Get the filename to use during reprocessing
+ """
+ # Currently only returns the original file, but eventually will return
+ # the highest quality file if the original doesn't exist
+ if self.reprocess_filename is not None:
+ return self.reprocess_filename
+
+ reprocess_filepath = self.entry.media_files['original'][2]
+ reprocess_filename = self.workbench.local_file(
+ mgg.public_store, reprocess_filepath,
+ 'original')
+ self.reprocess_filename = reprocess_filename
+ return reprocess_filename
+
def mark_entry_failed(entry_id, exc):
"""
diff --git a/mediagoblin/processing/task.py b/mediagoblin/processing/task.py
index 9af192ed..c0dfb9b4 100644
--- a/mediagoblin/processing/task.py
+++ b/mediagoblin/processing/task.py
@@ -68,13 +68,15 @@ class ProcessMedia(task.Task):
"""
Pass this entry off for processing.
"""
- def run(self, media_id, feed_url):
+ def run(self, media_id, feed_url, reprocess_info=None):
"""
Pass the media entry off to the appropriate processing function
(for now just process_image...)
:param feed_url: The feed URL that the PuSH server needs to be
updated for.
+ :param reprocess: A dict containing all of the necessary reprocessing
+ info for the media_type.
"""
entry = MediaEntry.query.get(media_id)
@@ -89,7 +91,7 @@ class ProcessMedia(task.Task):
with mgg.workbench_manager.create() as workbench:
proc_state.set_workbench(workbench)
# run the processing code
- entry.media_manager.processor(proc_state)
+ entry.media_manager.processor(proc_state, reprocess_info)
# We set the state to processed and save the entry here so there's
# no need to save at the end of the processing stage, probably ;)
diff --git a/mediagoblin/submit/lib.py b/mediagoblin/submit/lib.py
index 7e85696b..3619a329 100644
--- a/mediagoblin/submit/lib.py
+++ b/mediagoblin/submit/lib.py
@@ -76,17 +76,19 @@ def prepare_queue_task(app, entry, filename):
return queue_file
-def run_process_media(entry, feed_url=None):
+def run_process_media(entry, feed_url=None, reprocess_info=None):
"""Process the media asynchronously
:param entry: MediaEntry() instance to be processed.
:param feed_url: A string indicating the feed_url that the PuSH servers
should be notified of. This will be sth like: `request.urlgen(
'mediagoblin.user_pages.atom_feed',qualified=True,
- user=request.user.username)`"""
+ user=request.user.username)`
+ :param reprocess: A dict containing all of the necessary reprocessing
+ info for the given media_type"""
try:
process_media.apply_async(
- [entry.id, feed_url], {},
+ [entry.id, feed_url, reprocess_info], {},
task_id=entry.queued_task_id)
except BaseException as exc:
# The purpose of this section is because when running in "lazy"
From f30fbfe60c5cd84cdf5df653d2c281a0bb05bae8 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Fri, 2 Aug 2013 12:06:50 -0700
Subject: [PATCH 018/160] add option to not run eagerly
---
mediagoblin/gmg_commands/reprocess.py | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index f6b9e653..9d8ede24 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -13,6 +13,8 @@
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+import os
+
from mediagoblin import mg_globals
from mediagoblin.db.models import MediaEntry
from mediagoblin.gmg_commands import util as commands_util
@@ -40,6 +42,10 @@ def reprocess_parser_setup(subparser):
'--thumbnails',
action="store_true",
help="Regenerate thumbnails for all processed media")
+ subparser.add_argument(
+ '--celery',
+ action='store_true',
+ help="Don't process eagerly, pass off to celery")
def _set_media_type(args):
@@ -139,13 +145,13 @@ def _set_media_state(args):
def reprocess(args):
+ if not args[0].celery:
+ os.environ['CELERY_ALWAYS_EAGER'] = 'true'
commands_util.setup_app(args[0])
_set_media_state(args)
_set_media_type(args)
- import ipdb
- ipdb.set_trace()
if not args[0].media_id:
return _reprocess_all(args)
From 49db7785797a251ee408c62c0954ccd71af9d088 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Fri, 2 Aug 2013 13:18:35 -0700
Subject: [PATCH 019/160] very rough working version of image reprocessing
---
mediagoblin/media_types/image/__init__.py | 13 ++++++------
mediagoblin/media_types/image/processing.py | 22 ++++++++-------------
mediagoblin/processing/__init__.py | 6 +++---
3 files changed, 18 insertions(+), 23 deletions(-)
diff --git a/mediagoblin/media_types/image/__init__.py b/mediagoblin/media_types/image/__init__.py
index 3a056718..1aff21d4 100644
--- a/mediagoblin/media_types/image/__init__.py
+++ b/mediagoblin/media_types/image/__init__.py
@@ -86,7 +86,8 @@ def _parser(args):
'--resize')
parser.add_argument(
'--size',
- nargs=2)
+ nargs=2,
+ type=int)
parser.add_argument(
'--initial_processing',
action='store_true')
@@ -103,14 +104,14 @@ def _check_eligible(entry_args, reprocess_args):
if entry_args.state == 'failed':
if reprocess_args.resize:
raise Exception(_('You can not run --resize on media that has not'
- 'been processed.'))
+ ' been processed.'))
if reprocess_args.size:
_log.warn('With --initial_processing, the --size flag will be'
' ignored.')
if entry_args.state == 'processing':
raise Exception(_('We currently do not support reprocessing on media'
- 'that is in the "processing" state.'))
+ ' that is in the "processing" state.'))
def media_reprocess(args):
@@ -133,15 +134,15 @@ def media_reprocess(args):
# For now we can only reprocess with the original file
if not entry.media_files.get('original'):
raise Exception(_('The original file for this media entry'
- 'does not exist.'))
+ ' does not exist.'))
reprocess_info = {'resize': reprocess_args.resize}
- if reprocess_args.size and len(reprocess_args.size) == 2:
+ if reprocess_args.size:
reprocess_info['max_width'] = reprocess_args.size[0]
reprocess_info['max_height'] = reprocess_args.size[1]
- run_process_media(entry, reprocess_info)
+ run_process_media(entry, reprocess_info=reprocess_info)
else:
raise Exception(_('The --resize flag must set either "thumb"'
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index 4f619f47..18b8bd4e 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -73,10 +73,8 @@ def resize_image(proc_state, resized, keyname, target_name, new_size,
proc_state.store_public(keyname, tmp_resized_filename, target_name)
-def resize_tool(proc_state, force, keyname, target_name,
+def resize_tool(proc_state, force, keyname, filename, target_name,
conversions_subdir, exif_tags, new_size=None):
- # filename -- the filename of the original image being resized
- filename = proc_state.get_queued_filename()
if not new_size:
max_width = mgg.global_config['media:' + keyname]['max_width']
max_height = mgg.global_config['media:' + keyname]['max_height']
@@ -90,8 +88,8 @@ def resize_tool(proc_state, force, keyname, target_name,
except IOError:
raise BadMediaFail()
if force \
- or im.size[0] > max_width \
- or im.size[1] > max_height \
+ or im.size[0] > new_size[0]\
+ or im.size[1] > new_size[1]\
or exif_image_needs_rotation(exif_tags):
resize_image(
proc_state, im, unicode(keyname), target_name,
@@ -129,8 +127,7 @@ def process_image(proc_state, reprocess_info=None):
"""
entry = proc_state.entry
workbench = proc_state.workbench
- import ipdb
- ipdb.set_trace()
+
# Conversions subdirectory to avoid collisions
conversions_subdir = os.path.join(
workbench.dir, 'conversions')
@@ -148,12 +145,12 @@ def process_image(proc_state, reprocess_info=None):
gps_data = get_gps_data(exif_tags)
# Always create a small thumbnail
- resize_tool(proc_state, True, 'thumb',
+ resize_tool(proc_state, True, 'thumb', queued_filename,
name_builder.fill('{basename}.thumbnail{ext}'),
conversions_subdir, exif_tags)
# Possibly create a medium
- resize_tool(proc_state, False, 'medium',
+ resize_tool(proc_state, False, 'medium', queued_filename,
name_builder.fill('{basename}.medium{ext}'),
conversions_subdir, exif_tags)
@@ -183,19 +180,16 @@ def _reprocess_image(proc_state, reprocess_info, conversions_subdir):
if reprocess_info.get('max_width'):
max_width = reprocess_info['max_width']
+ max_height = reprocess_info['max_height']
else:
max_width = mgg.global_config \
['media:' + reprocess_info['resize']]['max_width']
-
- if reprocess_info.get('max_height'):
- max_height = reprocess_info['max_height']
- else:
max_height = mgg.global_config \
['media:' + reprocess_info['resize']]['max_height']
new_size = (max_width, max_height)
- resize_tool(proc_state, False, reprocess_info['resize'],
+ resize_tool(proc_state, False, reprocess_info['resize'], reprocess_filename,
name_builder.fill('{basename}.thumbnail{ext}'),
conversions_subdir, exif_tags, new_size)
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index bbe9f364..13c677eb 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -138,10 +138,10 @@ class ProcessingState(object):
if self.reprocess_filename is not None:
return self.reprocess_filename
- reprocess_filepath = self.entry.media_files['original'][2]
- reprocess_filename = self.workbench.local_file(
+ reprocess_filepath = self.entry.media_files['original']
+ reprocess_filename = self.workbench.localized_file(
mgg.public_store, reprocess_filepath,
- 'original')
+ 'source')
self.reprocess_filename = reprocess_filename
return reprocess_filename
From 3e9faf85da1ee2971e9ff2fde12b192ea470d806 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Fri, 2 Aug 2013 15:12:07 -0700
Subject: [PATCH 020/160] added comments and did a little refactoring. not sure
if it is actually any clearer though
---
mediagoblin/gmg_commands/reprocess.py | 45 ++++++++---
mediagoblin/media_types/image/__init__.py | 48 ++++++-----
mediagoblin/media_types/image/processing.py | 90 ++++++++++-----------
mediagoblin/processing/task.py | 12 ++-
4 files changed, 118 insertions(+), 77 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 9d8ede24..4df0d581 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -49,16 +49,18 @@ def reprocess_parser_setup(subparser):
def _set_media_type(args):
+ """
+ This will verify that all media id's are of the same media_type. If the
+ --type flag is set, it will be replaced by the given media id's type.
+
+ If they are trying to process different media types, an Exception will be
+ raised.
+ """
if args[0].media_id:
if len(args[0].media_id) == 1:
- media_type = MediaEntry.query.filter_by(id=args[0].media_id[0])\
+ args[0].type = MediaEntry.query.filter_by(id=args[0].media_id[0])\
.first().media_type.split('.')[-1]
- if not args[0].type:
- args[0].type = media_type
- elif args[0].type != media_type:
- raise Exception(_('The --type that you set does not match the'
- 'type of the given media_id.'))
elif len(args[0].media_id) > 1:
media_types = []
@@ -70,15 +72,17 @@ def _set_media_type(args):
raise Exception((u'You cannot reprocess different'
' media_types at the same time.'))
- if not args[0].type:
- args[0].type = media_types[0]
- elif args[0].type != media_types[0]:
- raise Exception(_('The --type that you set does not match the'
- ' type of the given media_ids.'))
+ args[0].type = media_types[0]
def _reprocess_all(args):
+ """
+ This handles reprocessing if no media_id's are given.
+ """
if not args[0].type:
+ # If no media type is given, we can either regenerate all thumbnails,
+ # or try to reprocess all failed media
+
if args[0].thumbnails:
if args[0].available:
print _('Available options for regenerating all processed'
@@ -89,6 +93,7 @@ def _reprocess_all(args):
#TODO regenerate all thumbnails
pass
+ # Reprocess all failed media
elif args[0].state == 'failed':
if args[0].available:
print _('\n Available reprocess actions for all failed'
@@ -97,6 +102,8 @@ def _reprocess_all(args):
#TODO reprocess all failed entries
pass
+ # If here, they didn't set the --type flag and were trying to do
+ # something other the generating thumbnails or initial_processing
else:
raise Exception(_('You must set --type when trying to reprocess'
' all media_entries, unless you set --state'
@@ -107,6 +114,8 @@ def _reprocess_all(args):
def _run_reprocessing(args):
+ # Are they just asking for the available reprocessing options for the given
+ # media?
if args[0].available:
if args[0].state == 'failed':
print _('\n Available reprocess actions for all failed'
@@ -118,11 +127,20 @@ def _run_reprocessing(args):
' entries in the {} state'.format(args[0].type,
args[0].state))
else:
+ # Run media reprocessing
return hook_handle(('media_reprocess', args[0].type), args)
def _set_media_state(args):
+ """
+ This will verify that all media id's are in the same state. If the
+ --state flag is set, it will be replaced by the given media id's state.
+
+ If they are trying to process different media states, an Exception will be
+ raised.
+ """
if args[0].media_id:
+ # Only check if we are given media_ids
if len(args[0].media_id) == 1:
args[0].state = MediaEntry.query.filter_by(id=args[0].media_id[0])\
.first().state
@@ -133,6 +151,8 @@ def _set_media_state(args):
for id in args[0].media_id:
media_states.append(MediaEntry.query.filter_by(id=id).first()
.state)
+
+ # Make sure that all media are in the same state
for state in media_states:
if state != media_states[0]:
raise Exception(_('You can only reprocess media that is in'
@@ -140,11 +160,13 @@ def _set_media_state(args):
args[0].state = media_states[0]
+ # If no state was set, then we will default to the processed state
if not args[0].state:
args[0].state = 'processed'
def reprocess(args):
+ # Run eagerly unless explicetly set not to
if not args[0].celery:
os.environ['CELERY_ALWAYS_EAGER'] = 'true'
commands_util.setup_app(args[0])
@@ -152,6 +174,7 @@ def reprocess(args):
_set_media_state(args)
_set_media_type(args)
+ # If no media_ids were given, then try to reprocess all entries
if not args[0].media_id:
return _reprocess_all(args)
diff --git a/mediagoblin/media_types/image/__init__.py b/mediagoblin/media_types/image/__init__.py
index 1aff21d4..de7de3ac 100644
--- a/mediagoblin/media_types/image/__init__.py
+++ b/mediagoblin/media_types/image/__init__.py
@@ -72,6 +72,9 @@ def get_media_type_and_manager(ext):
def reprocess_action(args):
+ """
+ List the available actions for media in a given state
+ """
if args[0].state == 'processed':
print _('\n Available reprocessing actions for processed images:'
'\n \t --resize: thumb or medium'
@@ -81,9 +84,13 @@ def reprocess_action(args):
def _parser(args):
+ """
+ Parses the unknown args from the gmg parser
+ """
parser = argparse.ArgumentParser()
parser.add_argument(
- '--resize')
+ '--resize',
+ choices=['thumb', 'medium'])
parser.add_argument(
'--size',
nargs=2,
@@ -96,6 +103,10 @@ def _parser(args):
def _check_eligible(entry_args, reprocess_args):
+ """
+ Check to see if we can actually process the given media as requested
+ """
+
if entry_args.state == 'processed':
if reprocess_args.initial_processing:
raise Exception(_('You can not run --initial_processing on media'
@@ -118,36 +129,37 @@ def media_reprocess(args):
reprocess_args = _parser(args)
entry_args = args[0]
+ # Can we actually process the given media as requested?
_check_eligible(entry_args, reprocess_args)
+
+ # Do we want to re-try initial processing?
if reprocess_args.initial_processing:
for id in entry_args.media_id:
entry = MediaEntry.query.filter_by(id=id).first()
- # Should we get the feed_url?
run_process_media(entry)
+ # Are we wanting to resize the thumbnail or medium?
elif reprocess_args.resize:
- if reprocess_args.resize == 'medium' or reprocess_args.resize == \
- 'thumb':
- for id in entry_args.media_id:
- entry = MediaEntry.query.filter_by(id=id).first()
- # For now we can only reprocess with the original file
- if not entry.media_files.get('original'):
- raise Exception(_('The original file for this media entry'
- ' does not exist.'))
+ # reprocess all given media entries
+ for id in entry_args.media_id:
+ entry = MediaEntry.query.filter_by(id=id).first()
- reprocess_info = {'resize': reprocess_args.resize}
+ # For now we can only reprocess with the original file
+ if not entry.media_files.get('original'):
+ raise Exception(_('The original file for this media entry'
+ ' does not exist.'))
- if reprocess_args.size:
- reprocess_info['max_width'] = reprocess_args.size[0]
- reprocess_info['max_height'] = reprocess_args.size[1]
+ reprocess_info = {'resize': reprocess_args.resize}
- run_process_media(entry, reprocess_info=reprocess_info)
+ if reprocess_args.size:
+ reprocess_info['max_width'] = reprocess_args.size[0]
+ reprocess_info['max_height'] = reprocess_args.size[1]
- else:
- raise Exception(_('The --resize flag must set either "thumb"'
- ' or "medium".'))
+ run_process_media(entry, reprocess_info=reprocess_info)
+
+ # If we are here, they forgot to tell us how to reprocess
else:
_log.warn('You must set either --resize or --initial_processing flag'
' to reprocess an image.')
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index 18b8bd4e..078ab0d8 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -73,12 +73,17 @@ def resize_image(proc_state, resized, keyname, target_name, new_size,
proc_state.store_public(keyname, tmp_resized_filename, target_name)
-def resize_tool(proc_state, force, keyname, filename, target_name,
+def resize_tool(proc_state, force, keyname, target_name,
conversions_subdir, exif_tags, new_size=None):
+ # Get the filename of the original file
+ filename = proc_state.get_orig_filename()
+
+ # Use the default size if new_size was not given
if not new_size:
max_width = mgg.global_config['media:' + keyname]['max_width']
max_height = mgg.global_config['media:' + keyname]['max_height']
new_size = (max_width, max_height)
+
# If the size of the original file exceeds the specified size for the desized
# file, a target_name file is created and later associated with the media
# entry.
@@ -125,74 +130,67 @@ def process_image(proc_state, reprocess_info=None):
A Workbench() represents a local tempory dir. It is automatically
cleaned up when this function exits.
"""
- entry = proc_state.entry
- workbench = proc_state.workbench
+ def init(self, proc_state):
+ self.proc_state = proc_state
+ self.entry = proc_state.entry
+ self.workbench = proc_state.workbench
- # Conversions subdirectory to avoid collisions
- conversions_subdir = os.path.join(
- workbench.dir, 'conversions')
- os.mkdir(conversions_subdir)
+ # Conversions subdirectory to avoid collisions
+ self.conversions_subdir = os.path.join(
+ self.workbench.dir, 'convirsions')
- if reprocess_info:
- _reprocess_image(proc_state, reprocess_info, conversions_subdir)
+ self.orig_filename = proc_state.get_orig_filename()
+ self.name_builder = FilenameBuilder(self.orig_filename)
- else:
- queued_filename = proc_state.get_queued_filename()
- name_builder = FilenameBuilder(queued_filename)
+ # Exif extraction
+ self.exif_tags = extract_exif(self.orig_filename)
- # EXIF extraction
- exif_tags = extract_exif(queued_filename)
- gps_data = get_gps_data(exif_tags)
+ os.mkdir(self.conversions_subdir)
- # Always create a small thumbnail
- resize_tool(proc_state, True, 'thumb', queued_filename,
- name_builder.fill('{basename}.thumbnail{ext}'),
- conversions_subdir, exif_tags)
+ def initial_processing(self):
+ # Is there any GPS data
+ gps_data = get_gps_data(self.exif_tags)
+
+ # Always create a small thumbnail
+ resize_tool(self.proc_state, True, 'thumb', self.orig_filename,
+ self.name_builder.fill('{basename}.thumbnail{ext}'),
+ self.conversions_subdir, self.exif_tags)
# Possibly create a medium
- resize_tool(proc_state, False, 'medium', queued_filename,
- name_builder.fill('{basename}.medium{ext}'),
- conversions_subdir, exif_tags)
+ resize_tool(self.proc_state, False, 'medium', self.orig_filename,
+ self.name_builder.fill('{basename}.medium{ext}'),
+ self.conversions_subdir, self.exif_tags)
# Copy our queued local workbench to its final destination
- proc_state.copy_original(name_builder.fill('{basename}{ext}'))
+ self.proc_state.copy_original(self.name_builder.fill('{basename}{ext}'))
# Remove queued media file from storage and database
- proc_state.delete_queue_file()
+ self.proc_state.delete_queue_file()
# Insert exif data into database
- exif_all = clean_exif(exif_tags)
+ exif_all = clean_exif(self.exif_tags)
if len(exif_all):
- entry.media_data_init(exif_all=exif_all)
+ self.entry.media_data_init(exif_all=exif_all)
if len(gps_data):
for key in list(gps_data.keys()):
gps_data['gps_' + key] = gps_data.pop(key)
- entry.media_data_init(**gps_data)
+ self.entry.media_data_init(**gps_data)
+ def reprocess(self, reprocess_info):
+ new_size = None
-def _reprocess_image(proc_state, reprocess_info, conversions_subdir):
- reprocess_filename = proc_state.get_reprocess_filename()
- name_builder = FilenameBuilder(reprocess_filename)
+ # Did they specify a size?
+ if reprocess_info.get('max_width'):
+ max_width = reprocess_info['max_width']
+ max_height = reprocess_info['max_height']
- exif_tags = extract_exif(reprocess_filename)
-
- if reprocess_info.get('max_width'):
- max_width = reprocess_info['max_width']
- max_height = reprocess_info['max_height']
- else:
- max_width = mgg.global_config \
- ['media:' + reprocess_info['resize']]['max_width']
- max_height = mgg.global_config \
- ['media:' + reprocess_info['resize']]['max_height']
-
- new_size = (max_width, max_height)
-
- resize_tool(proc_state, False, reprocess_info['resize'], reprocess_filename,
- name_builder.fill('{basename}.thumbnail{ext}'),
- conversions_subdir, exif_tags, new_size)
+ new_size = (max_width, max_height)
+ resize_tool(self.proc_state, False, reprocess_info['resize'],
+ self.name_builder.fill('{basename}.medium{ext}'),
+ self.conversions_subdir, self.exif_tags, new_size)
if __name__ == '__main__':
import sys
diff --git a/mediagoblin/processing/task.py b/mediagoblin/processing/task.py
index c0dfb9b4..36ee31fd 100644
--- a/mediagoblin/processing/task.py
+++ b/mediagoblin/processing/task.py
@@ -89,9 +89,17 @@ class ProcessMedia(task.Task):
proc_state = ProcessingState(entry)
with mgg.workbench_manager.create() as workbench:
+
proc_state.set_workbench(workbench)
- # run the processing code
- entry.media_manager.processor(proc_state, reprocess_info)
+ processor = entry.media_manager.processor(proc_state)
+
+ # If we have reprocess_info, let's reprocess
+ if reprocess_info:
+ processor.reprocess(reprocess_info)
+
+ # Run initial processing
+ else:
+ processor.initial_processing()
# We set the state to processed and save the entry here so there's
# no need to save at the end of the processing stage, probably ;)
From 45b20dce1ac5a8d9fb045faf67e796a8092f65e4 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Fri, 2 Aug 2013 15:20:59 -0700
Subject: [PATCH 021/160] change get_queued_filename to get_orig_filename and
modified function
---
mediagoblin/media_types/image/processing.py | 2 +-
mediagoblin/processing/__init__.py | 48 +++++++++------------
2 files changed, 22 insertions(+), 28 deletions(-)
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index 078ab0d8..665a2931 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -75,7 +75,7 @@ def resize_image(proc_state, resized, keyname, target_name, new_size,
def resize_tool(proc_state, force, keyname, target_name,
conversions_subdir, exif_tags, new_size=None):
- # Get the filename of the original file
+ # filename -- the filename of the original image being resized
filename = proc_state.get_orig_filename()
# Use the default size if new_size was not given
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 13c677eb..60565e09 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -86,27 +86,37 @@ class ProcessingState(object):
def __init__(self, entry):
self.entry = entry
self.workbench = None
- self.queued_filename = None
- self.reprocess_filename = None
+ self.orig_filename = None
def set_workbench(self, wb):
self.workbench = wb
- def get_queued_filename(self):
+ def get_orig_filename(self):
"""
Get the a filename for the original, on local storage
+
+ If the media entry has a queued_media_file, use that, otherwise
+ use the original.
+
+ In the future, this will return the highest quality file available
+ if neither the original or queued file are available
"""
- if self.queued_filename is not None:
- return self.queued_filename
- queued_filepath = self.entry.queued_media_file
- queued_filename = self.workbench.localized_file(
- mgg.queue_store, queued_filepath,
+ if self.orig_filename is not None:
+ return self.orig_filename
+
+ if self.entry.queued_media_file:
+ orig_filepath = self.entry.queued_media_file
+ else:
+ orig_filepath = self.entry.media_files['original']
+
+ orig_filename = self.workbench.localized_file(
+ mgg.queue_store, orig_filepath,
'source')
- self.queued_filename = queued_filename
- return queued_filename
+ self.orig_filename = orig_filename
+ return orig_filename
def copy_original(self, target_name, keyname=u"original"):
- self.store_public(keyname, self.get_queued_filename(), target_name)
+ self.store_public(keyname, self.get_orig_filename(), target_name)
def store_public(self, keyname, local_file, target_name=None):
if target_name is None:
@@ -129,22 +139,6 @@ class ProcessingState(object):
mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
self.entry.queued_media_file = []
- def get_reprocess_filename(self):
- """
- Get the filename to use during reprocessing
- """
- # Currently only returns the original file, but eventually will return
- # the highest quality file if the original doesn't exist
- if self.reprocess_filename is not None:
- return self.reprocess_filename
-
- reprocess_filepath = self.entry.media_files['original']
- reprocess_filename = self.workbench.localized_file(
- mgg.public_store, reprocess_filepath,
- 'source')
- self.reprocess_filename = reprocess_filename
- return reprocess_filename
-
def mark_entry_failed(entry_id, exc):
"""
From 3988c9d66db47ded3df012bb2e8d8bf04f2f4fd4 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 6 Aug 2013 09:17:37 -0700
Subject: [PATCH 022/160] forgot to change to a class from a function after the
refactor
---
mediagoblin/media_types/image/__init__.py | 4 ++--
mediagoblin/media_types/image/processing.py | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/mediagoblin/media_types/image/__init__.py b/mediagoblin/media_types/image/__init__.py
index de7de3ac..e2b00e50 100644
--- a/mediagoblin/media_types/image/__init__.py
+++ b/mediagoblin/media_types/image/__init__.py
@@ -19,7 +19,7 @@ import logging
from mediagoblin.db.models import MediaEntry
from mediagoblin.media_types import MediaManagerBase
-from mediagoblin.media_types.image.processing import process_image, \
+from mediagoblin.media_types.image.processing import ProcessImage, \
sniff_handler
from mediagoblin.submit.lib import run_process_media
from mediagoblin.tools import pluginapi
@@ -38,7 +38,7 @@ def setup_plugin():
class ImageMediaManager(MediaManagerBase):
human_readable = "Image"
- processor = staticmethod(process_image)
+ processor = ProcessImage
display_template = "mediagoblin/media_displays/image.html"
default_thumb = "images/media_thumbs/image.png"
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index 665a2931..8126ea2d 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -124,7 +124,7 @@ def sniff_handler(media_file, **kw):
return None
-def process_image(proc_state, reprocess_info=None):
+class ProcessImage(object):
"""Code to process an image. Will be run by celery.
A Workbench() represents a local tempory dir. It is automatically
From 7ac66a3dd095c8358e7392702f2948265135dc1c Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 6 Aug 2013 09:47:09 -0700
Subject: [PATCH 023/160] Refactor processing/reprocessing functions into
ProcessImage class
---
mediagoblin/media_types/image/__init__.py | 102 +-----------------
mediagoblin/media_types/image/processing.py | 110 +++++++++++++++++++-
2 files changed, 111 insertions(+), 101 deletions(-)
diff --git a/mediagoblin/media_types/image/__init__.py b/mediagoblin/media_types/image/__init__.py
index e2b00e50..072611e7 100644
--- a/mediagoblin/media_types/image/__init__.py
+++ b/mediagoblin/media_types/image/__init__.py
@@ -13,17 +13,13 @@
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-import argparse
import datetime
import logging
-from mediagoblin.db.models import MediaEntry
from mediagoblin.media_types import MediaManagerBase
from mediagoblin.media_types.image.processing import ProcessImage, \
sniff_handler
-from mediagoblin.submit.lib import run_process_media
from mediagoblin.tools import pluginapi
-from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
_log = logging.getLogger(__name__)
@@ -71,105 +67,11 @@ def get_media_type_and_manager(ext):
return MEDIA_TYPE, ImageMediaManager
-def reprocess_action(args):
- """
- List the available actions for media in a given state
- """
- if args[0].state == 'processed':
- print _('\n Available reprocessing actions for processed images:'
- '\n \t --resize: thumb or medium'
- '\n Options:'
- '\n \t --size: max_width max_height (defaults to config specs)')
- return True
-
-
-def _parser(args):
- """
- Parses the unknown args from the gmg parser
- """
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--resize',
- choices=['thumb', 'medium'])
- parser.add_argument(
- '--size',
- nargs=2,
- type=int)
- parser.add_argument(
- '--initial_processing',
- action='store_true')
-
- return parser.parse_args(args[1])
-
-
-def _check_eligible(entry_args, reprocess_args):
- """
- Check to see if we can actually process the given media as requested
- """
-
- if entry_args.state == 'processed':
- if reprocess_args.initial_processing:
- raise Exception(_('You can not run --initial_processing on media'
- ' that has already been processed.'))
-
- if entry_args.state == 'failed':
- if reprocess_args.resize:
- raise Exception(_('You can not run --resize on media that has not'
- ' been processed.'))
- if reprocess_args.size:
- _log.warn('With --initial_processing, the --size flag will be'
- ' ignored.')
-
- if entry_args.state == 'processing':
- raise Exception(_('We currently do not support reprocessing on media'
- ' that is in the "processing" state.'))
-
-
-def media_reprocess(args):
- reprocess_args = _parser(args)
- entry_args = args[0]
-
- # Can we actually process the given media as requested?
- _check_eligible(entry_args, reprocess_args)
-
- # Do we want to re-try initial processing?
- if reprocess_args.initial_processing:
- for id in entry_args.media_id:
- entry = MediaEntry.query.filter_by(id=id).first()
- run_process_media(entry)
-
- # Are we wanting to resize the thumbnail or medium?
- elif reprocess_args.resize:
-
- # reprocess all given media entries
- for id in entry_args.media_id:
- entry = MediaEntry.query.filter_by(id=id).first()
-
- # For now we can only reprocess with the original file
- if not entry.media_files.get('original'):
- raise Exception(_('The original file for this media entry'
- ' does not exist.'))
-
- reprocess_info = {'resize': reprocess_args.resize}
-
- if reprocess_args.size:
- reprocess_info['max_width'] = reprocess_args.size[0]
- reprocess_info['max_height'] = reprocess_args.size[1]
-
- run_process_media(entry, reprocess_info=reprocess_info)
-
-
- # If we are here, they forgot to tell us how to reprocess
- else:
- _log.warn('You must set either --resize or --initial_processing flag'
- ' to reprocess an image.')
-
-
hooks = {
'setup': setup_plugin,
'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: ImageMediaManager,
- ('reprocess_action', 'image'): reprocess_action,
- ('media_reprocess', 'image'): media_reprocess,
+ ('reprocess_action', 'image'): ProcessImage().reprocess_action,
+ ('media_reprocess', 'image'): ProcessImage().media_reprocess,
}
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index 8126ea2d..7ac3ac17 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -20,12 +20,16 @@ except ImportError:
import Image
import os
import logging
+import argparse
from mediagoblin import mg_globals as mgg
+from mediagoblin.db.models import MediaEntry
from mediagoblin.processing import BadMediaFail, FilenameBuilder
+from mediagoblin.submit.lib import run_process_media
from mediagoblin.tools.exif import exif_fix_image_orientation, \
extract_exif, clean_exif, get_gps_data, get_useful, \
exif_image_needs_rotation
+from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
_log = logging.getLogger(__name__)
@@ -147,6 +151,58 @@ class ProcessImage(object):
os.mkdir(self.conversions_subdir)
+ def reprocess_action(self, args):
+ """
+ List the available actions for media in a given state
+ """
+ if args[0].state == 'processed':
+ print _('\n Available reprocessing actions for processed images:'
+ '\n \t --resize: thumb or medium'
+ '\n Options:'
+ '\n \t --size: max_width max_height (defaults to'
+ 'config specs)')
+ return True
+
+ def _parser(self, args):
+ """
+ Parses the unknown args from the gmg parser
+ """
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ '--resize',
+ choices=['thumb', 'medium'])
+ parser.add_argument(
+ '--size',
+ nargs=2,
+ type=int)
+ parser.add_argument(
+ '--initial_processing',
+ action='store_true')
+
+ return parser.parse_args(args[1])
+
+ def _check_eligible(self, entry_args, reprocess_args):
+ """
+ Check to see if we can actually process the given media as requested
+ """
+
+ if entry_args.state == 'processed':
+ if reprocess_args.initial_processing:
+ raise Exception(_('You can not run --initial_processing on'
+ ' media that has already been processed.'))
+
+ if entry_args.state == 'failed':
+ if reprocess_args.resize:
+ raise Exception(_('You can not run --resize on media that has'
+ ' not been processed.'))
+ if reprocess_args.size:
+ _log.warn('With --initial_processing, the --size flag will be'
+ ' ignored.')
+
+ if entry_args.state == 'processing':
+ raise Exception(_('We currently do not support reprocessing on'
+ ' media that is in the "processing" state.'))
+
def initial_processing(self):
# Is there any GPS data
gps_data = get_gps_data(self.exif_tags)
@@ -179,9 +235,14 @@ class ProcessImage(object):
self.entry.media_data_init(**gps_data)
def reprocess(self, reprocess_info):
+ """
+ This function actually does the reprocessing when called by
+ ProcessMedia in gmg/processing/task.py
+ """
new_size = None
- # Did they specify a size?
+ # Did they specify a size? They must specify either both or none, so
+ # we only need to check if one is present
if reprocess_info.get('max_width'):
max_width = reprocess_info['max_width']
max_height = reprocess_info['max_height']
@@ -192,6 +253,53 @@ class ProcessImage(object):
self.name_builder.fill('{basename}.medium{ext}'),
self.conversions_subdir, self.exif_tags, new_size)
+ def media_reprocess(self, args):
+ """
+ This function handles the all of the reprocessing logic, before calling
+ gmg/submit/lib/run_process_media
+ """
+ reprocess_args = self._parser(args)
+ entry_args = args[0]
+
+ # Can we actually process the given media as requested?
+ self._check_eligible(entry_args, reprocess_args)
+
+ # Do we want to re-try initial processing?
+ if reprocess_args.initial_processing:
+ for id in entry_args.media_id:
+ entry = MediaEntry.query.filter_by(id=id).first()
+ run_process_media(entry)
+
+ # Are we wanting to resize the thumbnail or medium?
+ elif reprocess_args.resize:
+
+ # reprocess all given media entries
+ for id in entry_args.media_id:
+ entry = MediaEntry.query.filter_by(id=id).first()
+
+ # For now we can only reprocess with the original file
+ if not entry.media_files.get('original'):
+ raise Exception(_('The original file for this media entry'
+ ' does not exist.'))
+
+ reprocess_info = self._get_reprocess_info(reprocess_args)
+ run_process_media(entry, reprocess_info=reprocess_info)
+
+ # If we are here, they forgot to tell us how to reprocess
+ else:
+ _log.warn('You must set either --resize or --initial_processing'
+ ' flag to reprocess an image.')
+
+ def _get_reprocess_info(self, args):
+ """ Returns a dict with the info needed for reprocessing"""
+ reprocess_info = {'resize': args.resize}
+
+ if args.size:
+ reprocess_info['max_width'] = args.size[0]
+ reprocess_info['max_height'] = args.size[1]
+
+ return reprocess_info
+
if __name__ == '__main__':
import sys
import pprint
From c541fb71f7f92ce13783400cf9b22083f38ae189 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 6 Aug 2013 10:48:26 -0700
Subject: [PATCH 024/160] fix storage paramater in get_orig_filename(), fix
__init__ for ProceessImage, better description for --size flag
---
mediagoblin/media_types/image/__init__.py | 2 +-
mediagoblin/media_types/image/processing.py | 26 +++++++++++----------
mediagoblin/processing/__init__.py | 4 +++-
3 files changed, 18 insertions(+), 14 deletions(-)
diff --git a/mediagoblin/media_types/image/__init__.py b/mediagoblin/media_types/image/__init__.py
index 072611e7..68376f7f 100644
--- a/mediagoblin/media_types/image/__init__.py
+++ b/mediagoblin/media_types/image/__init__.py
@@ -34,7 +34,7 @@ def setup_plugin():
class ImageMediaManager(MediaManagerBase):
human_readable = "Image"
- processor = ProcessImage
+ processor = staticmethod(ProcessImage)
display_template = "mediagoblin/media_displays/image.html"
default_thumb = "images/media_thumbs/image.png"
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index 7ac3ac17..c97811b1 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -134,22 +134,23 @@ class ProcessImage(object):
A Workbench() represents a local tempory dir. It is automatically
cleaned up when this function exits.
"""
- def init(self, proc_state):
- self.proc_state = proc_state
- self.entry = proc_state.entry
- self.workbench = proc_state.workbench
+ def __init__(self, proc_state=None):
+ if proc_state:
+ self.proc_state = proc_state
+ self.entry = proc_state.entry
+ self.workbench = proc_state.workbench
- # Conversions subdirectory to avoid collisions
- self.conversions_subdir = os.path.join(
- self.workbench.dir, 'convirsions')
+ # Conversions subdirectory to avoid collisions
+ self.conversions_subdir = os.path.join(
+ self.workbench.dir, 'convirsions')
- self.orig_filename = proc_state.get_orig_filename()
- self.name_builder = FilenameBuilder(self.orig_filename)
+ self.orig_filename = proc_state.get_orig_filename()
+ self.name_builder = FilenameBuilder(self.orig_filename)
- # Exif extraction
- self.exif_tags = extract_exif(self.orig_filename)
+ # Exif extraction
+ self.exif_tags = extract_exif(self.orig_filename)
- os.mkdir(self.conversions_subdir)
+ os.mkdir(self.conversions_subdir)
def reprocess_action(self, args):
"""
@@ -174,6 +175,7 @@ class ProcessImage(object):
parser.add_argument(
'--size',
nargs=2,
+ metavar=('max_width', 'max_height'),
type=int)
parser.add_argument(
'--initial_processing',
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 60565e09..5ce9281b 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -106,11 +106,13 @@ class ProcessingState(object):
if self.entry.queued_media_file:
orig_filepath = self.entry.queued_media_file
+ storage = mgg.queue_store
else:
orig_filepath = self.entry.media_files['original']
+ storage = mgg.public_store
orig_filename = self.workbench.localized_file(
- mgg.queue_store, orig_filepath,
+ storage, orig_filepath,
'source')
self.orig_filename = orig_filename
return orig_filename
From 14565fb72022e015ee9ba64cf087befb33516b71 Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Wed, 7 Aug 2013 15:01:46 -0500
Subject: [PATCH 025/160] started coding basics of new processing code
---
mediagoblin/processing/__init__.py | 52 ++++++++++++++++++++++++++++++
1 file changed, 52 insertions(+)
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 5ce9281b..95f346d2 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -74,6 +74,58 @@ class FilenameBuilder(object):
ext=self.ext)
+
+
+class MediaProcessor(object):
+ # You MUST override this in the child MediaProcessor!
+ name = None
+
+ def __init__(self, manager):
+ self.manager = manager
+
+ def media_is_eligibile(self, media_entry):
+ raise NotImplementedError
+
+ def process(self):
+ raise NotImplementedError
+
+ def generate_parser(self):
+ raise NotImplementedError
+
+
+class ProcessingManager(object):
+ def __init__(self, entry):
+ self.entry = entry
+ # May merge these two classes soon....
+ self.state = ProcessingState(entry)
+
+ # Dict of all MediaProcessors of this media type
+ self.processors = {}
+
+ def add_processor(self, processor):
+ """
+ Add a processor class to this media type
+ """
+ name = processor.name
+ if name is None:
+ raise AttributeError("Processor class's .name attribute not set")
+
+ self.processors[name] = processor
+
+ def list_eligible_processors(self):
+ """
+ List all processors that this media entry is eligible to be processed
+ for.
+ """
+ return [
+ processor
+ for processor in self.processors.keys()
+ if processor.media_is_eligible(self.entry)]
+
+ def process(self, processor):
+ pass
+
+
class ProcessingState(object):
"""
The first and only argument to the "processor" of a media type
From 274a0f67fd9c36fe01950f2547425fb115c59aff Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Wed, 7 Aug 2013 17:07:19 -0500
Subject: [PATCH 026/160] Documentation for the MediaProcessor
---
mediagoblin/processing/__init__.py | 58 +++++++++++++++++++++++++++---
1 file changed, 54 insertions(+), 4 deletions(-)
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 95f346d2..41028fbb 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -75,25 +75,75 @@ class FilenameBuilder(object):
-
class MediaProcessor(object):
+ """A particular processor for this media type.
+
+ While the ProcessingManager handles all types of MediaProcessing
+ possible for a particular media type, a MediaProcessor can be
+ thought of as a *particular* processing action for a media type.
+ For example, you may have separate MediaProcessors for:
+
+ - initial_processing: the intial processing of a media
+ - gen_thumb: generate a thumbnail
+ - resize: resize an image
+ - transcode: transcode a video
+
+ ... etc.
+
+ Some information on producing a new MediaProcessor for your media type:
+
+ - You *must* supply a name attribute. This must be a class level
+ attribute, and a string. This will be used to determine the
+ subcommand of your process
+ - It's recommended that you supply a class level description
+ attribute.
+ - Supply a media_is_eligible classmethod. This will be used to
+ determine whether or not a media entry is eligible to use this
+ processor type. See the method documentation for details.
+ - To give "./bin/gmg reprocess run" abilities to this media type,
+ supply both gnerate_parser and parser_to_request classmethods.
+ - The process method will be what actually processes your media.
+ """
# You MUST override this in the child MediaProcessor!
name = None
+ # Optional, but will be used in various places to describe the
+ # action this MediaProcessor provides
+ description = None
+
def __init__(self, manager):
self.manager = manager
+ def process(self, **kwargs):
+ """
+ Actually process this media entry.
+ """
+ raise NotImplementedError
+
+ @classmethod
def media_is_eligibile(self, media_entry):
raise NotImplementedError
- def process(self):
- raise NotImplementedError
+ ###############################
+ # Command line interface things
+ ###############################
+ @classmethod
def generate_parser(self):
raise NotImplementedError
+ @classmethod
+ def parser_to_request(self, parser):
+ raise NotImplementedError
+
+ ##########################################
+ # THE FUTURE: web interface things here :)
+ ##########################################
+
class ProcessingManager(object):
+ """
+ """
def __init__(self, entry):
self.entry = entry
# May merge these two classes soon....
@@ -122,7 +172,7 @@ class ProcessingManager(object):
for processor in self.processors.keys()
if processor.media_is_eligible(self.entry)]
- def process(self, processor):
+ def process(self, directive, request):
pass
From e4bdc9091c392bf31d40bcb5ae12b92e17a6cb2a Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Thu, 8 Aug 2013 13:53:28 -0500
Subject: [PATCH 027/160] More steps towards a working reprocessing system.
Fleshing out the base classes and setting up some docstrings. Not
everything is totally clear yet, but I think it's on a good track, and
getting clearer.
This commit sponsored by Ben Finney, on behalf of Free Software Melbourne.
Thank you all!
---
mediagoblin/processing/__init__.py | 28 +++++++++++++++++++---------
1 file changed, 19 insertions(+), 9 deletions(-)
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 41028fbb..66ef2a53 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -114,6 +114,10 @@ class MediaProcessor(object):
def __init__(self, manager):
self.manager = manager
+ # Should be initialized at time of processing, at least
+ self.workbench = None
+
+ # @with_workbench
def process(self, **kwargs):
"""
Actually process this media entry.
@@ -142,13 +146,12 @@ class MediaProcessor(object):
class ProcessingManager(object):
- """
- """
- def __init__(self, entry):
- self.entry = entry
- # May merge these two classes soon....
- self.state = ProcessingState(entry)
+ """Manages all the processing actions available for a media type
+ Specific processing actions, MediaProcessor subclasses, are added
+ to the ProcessingManager.
+ """
+ def __init__(self):
# Dict of all MediaProcessors of this media type
self.processors = {}
@@ -162,7 +165,7 @@ class ProcessingManager(object):
self.processors[name] = processor
- def list_eligible_processors(self):
+ def list_eligible_processors(self, entry):
"""
List all processors that this media entry is eligible to be processed
for.
@@ -170,9 +173,16 @@ class ProcessingManager(object):
return [
processor
for processor in self.processors.keys()
- if processor.media_is_eligible(self.entry)]
+ if processor.media_is_eligible(entry)]
- def process(self, directive, request):
+ def gen_process_request_via_cli(self, subparser):
+ # Got to figure out what actually goes here before I can write this properly
+ pass
+
+ def process(self, entry, directive, request):
+ """
+ Process a media entry.
+ """
pass
From 58bacb33aca6505673f90460d31811ed487bcb4c Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Fri, 9 Aug 2013 11:20:21 -0500
Subject: [PATCH 028/160] More progress towards the new reprocessing
infrastructure: args updating
This commit sponsored by Elizabeth Webber. Thanks, sis!
---
mediagoblin/gmg_commands/__init__.py | 12 ++--
mediagoblin/gmg_commands/reprocess.py | 82 ++++++++++++++++++-----
mediagoblin/media_types/image/__init__.py | 4 +-
3 files changed, 75 insertions(+), 23 deletions(-)
diff --git a/mediagoblin/gmg_commands/__init__.py b/mediagoblin/gmg_commands/__init__.py
index d3b28a3d..165a76fd 100644
--- a/mediagoblin/gmg_commands/__init__.py
+++ b/mediagoblin/gmg_commands/__init__.py
@@ -96,16 +96,16 @@ def main_cli():
subparser.set_defaults(func=exec_func)
- args = parser.parse_known_args()
- args[0].orig_conf_file = args[0].conf_file
- if args[0].conf_file is None:
+ args = parser.parse_args()
+ args.orig_conf_file = args.conf_file
+ if args.conf_file is None:
if os.path.exists('mediagoblin_local.ini') \
and os.access('mediagoblin_local.ini', os.R_OK):
- args[0].conf_file = 'mediagoblin_local.ini'
+ args.conf_file = 'mediagoblin_local.ini'
else:
- args[0].conf_file = 'mediagoblin.ini'
+ args.conf_file = 'mediagoblin.ini'
- args[0].func(args)
+ args.func(args)
if __name__ == '__main__':
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 4df0d581..30575033 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -13,6 +13,7 @@
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+import argparse
import os
from mediagoblin import mg_globals
@@ -23,30 +24,63 @@ from mediagoblin.tools.pluginapi import hook_handle
def reprocess_parser_setup(subparser):
- subparser.add_argument(
- '--available', '-a',
- action="store_true",
- help="List available actions for a given media entry")
- subparser.add_argument(
+ subparsers = subparser.add_subparsers(dest="reprocess_subcommand")
+
+ ###################
+ # available command
+ ###################
+ available_parser = subparsers.add_parser(
+ "available",
+ help="Find out what actions are available for this media")
+
+ available_parser.add_argument(
+ "id_or_type",
+ help="Media id or media type to check")
+
+
+ ############################################
+ # run command (TODO: and bulk_run command??)
+ ############################################
+
+ run_parser = subparsers.add_parser(
+ "run",
+ help="Run a reprocessing on one or more media")
+
+ run_parser.add_argument(
'--state', '-s',
help="Reprocess media entries in this state"
" such as 'failed' or 'processed'")
- subparser.add_argument(
+ run_parser.add_argument(
'--type', '-t',
help="The type of media to be reprocessed such as 'video' or 'image'")
- subparser.add_argument(
- '--media_id',
- nargs='*',
- help="The media_entry id(s) you wish to reprocess.")
- subparser.add_argument(
+ run_parser.add_argument(
'--thumbnails',
action="store_true",
help="Regenerate thumbnails for all processed media")
- subparser.add_argument(
+ run_parser.add_argument(
'--celery',
action='store_true',
help="Don't process eagerly, pass off to celery")
+ run_parser.add_argument(
+ 'media_id',
+ help="The media_entry id(s) you wish to reprocess.")
+
+ run_parser.add_argument(
+ 'reprocess_command',
+ help="The reprocess command you intend to run")
+
+ run_parser.add_argument(
+ 'reprocess_args',
+ nargs=argparse.REMAINDER,
+ help="rest of arguments to the reprocessing tool")
+
+
+ ###############
+ # help command?
+ ###############
+
+
def _set_media_type(args):
"""
@@ -165,11 +199,22 @@ def _set_media_state(args):
args[0].state = 'processed'
-def reprocess(args):
+def available(args):
+ # Get the media type, either by looking up media id, or by specific type
+
+ ### TODO: look up by id
+
+ #
+ pass
+
+
+def run(args):
+ ### OLD CODE, review
+
# Run eagerly unless explicetly set not to
- if not args[0].celery:
+ if not args.celery:
os.environ['CELERY_ALWAYS_EAGER'] = 'true'
- commands_util.setup_app(args[0])
+ commands_util.setup_app(args)
_set_media_state(args)
_set_media_type(args)
@@ -179,3 +224,10 @@ def reprocess(args):
return _reprocess_all(args)
return _run_reprocessing(args)
+
+
+def reprocess(args):
+ if args.reprocess_subcommand == "run":
+ run(args)
+ elif args.reprocess_subcommand == "available":
+ available(args)
diff --git a/mediagoblin/media_types/image/__init__.py b/mediagoblin/media_types/image/__init__.py
index 68376f7f..17689393 100644
--- a/mediagoblin/media_types/image/__init__.py
+++ b/mediagoblin/media_types/image/__init__.py
@@ -72,6 +72,6 @@ hooks = {
'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: ImageMediaManager,
- ('reprocess_action', 'image'): ProcessImage().reprocess_action,
- ('media_reprocess', 'image'): ProcessImage().media_reprocess,
+ ('reprocess_action', MEDIA_TYPE): ProcessImage().reprocess_action,
+ ('media_reprocess', MEDIA_TYPE): ProcessImage().media_reprocess,
}
From 85ead8ac3cf59aeee12ddd3b33ecfeb03c3aa946 Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Fri, 9 Aug 2013 12:13:53 -0500
Subject: [PATCH 029/160] "initial" reprocessing subcommand now works!
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
We are on our way now to a working reprocessing system under this
redesign!
This commit sponsored by Bjarni Rúnar Einarsson. Thank you!
---
mediagoblin/gmg_commands/reprocess.py | 33 +++++++++--
mediagoblin/media_types/image/__init__.py | 6 +-
mediagoblin/media_types/image/processing.py | 64 ++++++++++++++++++++-
mediagoblin/processing/__init__.py | 8 ++-
4 files changed, 100 insertions(+), 11 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 30575033..d6ac99ac 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -199,13 +199,35 @@ def _set_media_state(args):
args[0].state = 'processed'
+def extract_entry_and_type(media_id):
+ raise NotImplementedError
+
+
def available(args):
# Get the media type, either by looking up media id, or by specific type
-
- ### TODO: look up by id
+ try:
+ media_id = int(args.id_or_type)
+ media_type, media_entry = extract_entry_and_type(media_id)
+ except ValueError:
+ media_type = args.id_or_type
+ media_entry = None
- #
- pass
+ manager_class = hook_handle(('reprocess_manager', media_type))
+ manager = manager_class()
+
+ if media_entry is None:
+ processors = manager.list_all_processors()
+ else:
+ processors = manager.list_eligible_processors(media_entry)
+
+ print "Available processors:"
+ print "---------------------"
+
+ for processor in processors:
+ if processor.description:
+ print " - %s: %s" % (processor.name, processor.description)
+ else:
+ print " - %s" % processor.name
def run(args):
@@ -214,7 +236,6 @@ def run(args):
# Run eagerly unless explicetly set not to
if not args.celery:
os.environ['CELERY_ALWAYS_EAGER'] = 'true'
- commands_util.setup_app(args)
_set_media_state(args)
_set_media_type(args)
@@ -227,6 +248,8 @@ def run(args):
def reprocess(args):
+ commands_util.setup_app(args)
+
if args.reprocess_subcommand == "run":
run(args)
elif args.reprocess_subcommand == "available":
diff --git a/mediagoblin/media_types/image/__init__.py b/mediagoblin/media_types/image/__init__.py
index 17689393..774b9bfa 100644
--- a/mediagoblin/media_types/image/__init__.py
+++ b/mediagoblin/media_types/image/__init__.py
@@ -18,7 +18,7 @@ import logging
from mediagoblin.media_types import MediaManagerBase
from mediagoblin.media_types.image.processing import ProcessImage, \
- sniff_handler
+ sniff_handler, ImageProcessingManager
from mediagoblin.tools import pluginapi
_log = logging.getLogger(__name__)
@@ -72,6 +72,6 @@ hooks = {
'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: ImageMediaManager,
- ('reprocess_action', MEDIA_TYPE): ProcessImage().reprocess_action,
- ('media_reprocess', MEDIA_TYPE): ProcessImage().media_reprocess,
+ ('reprocess_manager', MEDIA_TYPE): lambda: ImageProcessingManager,
+ # ('media_reprocess', MEDIA_TYPE): ProcessImage().media_reprocess,
}
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index c97811b1..ea372e76 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -24,7 +24,9 @@ import argparse
from mediagoblin import mg_globals as mgg
from mediagoblin.db.models import MediaEntry
-from mediagoblin.processing import BadMediaFail, FilenameBuilder
+from mediagoblin.processing import (
+ BadMediaFail, FilenameBuilder,
+ MediaProcessor, ProcessingManager)
from mediagoblin.submit.lib import run_process_media
from mediagoblin.tools.exif import exif_fix_image_orientation, \
extract_exif, clean_exif, get_gps_data, get_useful, \
@@ -302,6 +304,66 @@ class ProcessImage(object):
return reprocess_info
+
+class CommonImageProcessor(MediaProcessor):
+ """
+ Provides a base for various media processing steps
+ """
+ # Common resizing step
+ def resize_step(self):
+ pass
+
+ def _add_width_height_args(self, parser):
+ parser.add_argument(
+ "--width", default=None,
+ help=(
+ "Width of the resized image (if not using defaults)"))
+ parser.add_argument(
+ "--height", default=None,
+ help=(
+ "Height of the resized image (if not using defaults)"))
+
+
+class InitialProcessor(CommonImageProcessor):
+ """
+ Initial processing step for new images
+ """
+ name = "initial"
+ description = "Initial processing"
+
+ @classmethod
+ def media_is_eligibile(self, media_entry):
+ """
+ Determine if this media type is eligible for processing
+ """
+ return media_entry.state in (
+ "unprocessed", "failed")
+
+ ###############################
+ # Command line interface things
+ ###############################
+
+ @classmethod
+ def generate_parser(self):
+ parser = argparse.ArgumentParser(
+ description=self.description)
+
+ self._add_width_height_args(parser)
+
+ return parser
+
+ @classmethod
+ def args_to_request(self, args):
+ raise NotImplementedError
+
+
+
+class ImageProcessingManager(ProcessingManager):
+ def __init__(self):
+ super(self.__class__, self).__init__()
+ self.add_processor(InitialProcessor)
+
+
if __name__ == '__main__':
import sys
import pprint
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 66ef2a53..95622b9d 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -14,6 +14,7 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+from collections import OrderedDict
import logging
import os
@@ -153,7 +154,7 @@ class ProcessingManager(object):
"""
def __init__(self):
# Dict of all MediaProcessors of this media type
- self.processors = {}
+ self.processors = OrderedDict()
def add_processor(self, processor):
"""
@@ -172,9 +173,12 @@ class ProcessingManager(object):
"""
return [
processor
- for processor in self.processors.keys()
+ for processor in self.processors.values()
if processor.media_is_eligible(entry)]
+ def list_all_processors(self):
+ return self.processors.values()
+
def gen_process_request_via_cli(self, subparser):
# Got to figure out what actually goes here before I can write this properly
pass
From 55a10fef0ae97cb33c8393a7a25487c2666b4cf1 Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Fri, 9 Aug 2013 13:56:23 -0500
Subject: [PATCH 030/160] `gmg reprocess available --action-help` now tells you
processor arguments!
Every reprocessing action possible can inform you of its command line
argument stuff! Is that awesome or what?
---
mediagoblin/gmg_commands/reprocess.py | 26 ++++++++++++++++-----
mediagoblin/media_types/image/processing.py | 14 ++++++-----
mediagoblin/processing/__init__.py | 6 ++---
3 files changed, 31 insertions(+), 15 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index d6ac99ac..70163928 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -37,6 +37,11 @@ def reprocess_parser_setup(subparser):
"id_or_type",
help="Media id or media type to check")
+ available_parser.add_argument(
+ "--action-help",
+ action="store_true",
+ help="List argument help for each action available")
+
############################################
# run command (TODO: and bulk_run command??)
@@ -221,13 +226,22 @@ def available(args):
processors = manager.list_eligible_processors(media_entry)
print "Available processors:"
- print "---------------------"
+ print "====================="
- for processor in processors:
- if processor.description:
- print " - %s: %s" % (processor.name, processor.description)
- else:
- print " - %s" % processor.name
+ if args.action_help:
+ for processor in processors:
+ print processor.name
+ print "-" * len(processor.name)
+
+ parser = processor.generate_parser()
+ parser.print_help()
+
+ else:
+ for processor in processors:
+ if processor.description:
+ print " - %s: %s" % (processor.name, processor.description)
+ else:
+ print " - %s" % processor.name
def run(args):
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index ea372e76..f4ba4e5a 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -313,7 +313,8 @@ class CommonImageProcessor(MediaProcessor):
def resize_step(self):
pass
- def _add_width_height_args(self, parser):
+ @classmethod
+ def _add_width_height_args(cls, parser):
parser.add_argument(
"--width", default=None,
help=(
@@ -332,7 +333,7 @@ class InitialProcessor(CommonImageProcessor):
description = "Initial processing"
@classmethod
- def media_is_eligibile(self, media_entry):
+ def media_is_eligibile(cls, media_entry):
"""
Determine if this media type is eligible for processing
"""
@@ -344,16 +345,17 @@ class InitialProcessor(CommonImageProcessor):
###############################
@classmethod
- def generate_parser(self):
+ def generate_parser(cls):
parser = argparse.ArgumentParser(
- description=self.description)
+ description=cls.description,
+ prog=cls.name)
- self._add_width_height_args(parser)
+ cls._add_width_height_args(parser)
return parser
@classmethod
- def args_to_request(self, args):
+ def args_to_request(cls, args):
raise NotImplementedError
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 95622b9d..9e77d2b2 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -126,7 +126,7 @@ class MediaProcessor(object):
raise NotImplementedError
@classmethod
- def media_is_eligibile(self, media_entry):
+ def media_is_eligibile(cls, media_entry):
raise NotImplementedError
###############################
@@ -134,11 +134,11 @@ class MediaProcessor(object):
###############################
@classmethod
- def generate_parser(self):
+ def generate_parser(cls):
raise NotImplementedError
@classmethod
- def parser_to_request(self, parser):
+ def parser_to_request(cls, parser):
raise NotImplementedError
##########################################
From 19ed12b2790ec32d993e6e73c101ea820fe3ed29 Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Fri, 9 Aug 2013 14:10:52 -0500
Subject: [PATCH 031/160] Whitespacin' it up.
---
mediagoblin/gmg_commands/reprocess.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 70163928..62a6d428 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -227,6 +227,7 @@ def available(args):
print "Available processors:"
print "====================="
+ print ""
if args.action_help:
for processor in processors:
@@ -235,6 +236,7 @@ def available(args):
parser = processor.generate_parser()
parser.print_help()
+ print ""
else:
for processor in processors:
@@ -266,5 +268,6 @@ def reprocess(args):
if args.reprocess_subcommand == "run":
run(args)
+
elif args.reprocess_subcommand == "available":
available(args)
From 7a414c8d42d63685655c0142345dcae6c66726af Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Fri, 9 Aug 2013 14:15:18 -0500
Subject: [PATCH 032/160] Moving celery setup to the right place
This commit sponsored by Jose Manuel Zueco Lazaro. Thank you!
---
mediagoblin/gmg_commands/reprocess.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 62a6d428..2af88847 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -249,10 +249,6 @@ def available(args):
def run(args):
### OLD CODE, review
- # Run eagerly unless explicetly set not to
- if not args.celery:
- os.environ['CELERY_ALWAYS_EAGER'] = 'true'
-
_set_media_state(args)
_set_media_type(args)
@@ -264,6 +260,10 @@ def run(args):
def reprocess(args):
+ # Run eagerly unless explicetly set not to
+ if not args.celery:
+ os.environ['CELERY_ALWAYS_EAGER'] = 'true'
+
commands_util.setup_app(args)
if args.reprocess_subcommand == "run":
From 4ba5bdd96ef3703d8da216ca3dd92f080214f164 Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Fri, 9 Aug 2013 16:12:06 -0500
Subject: [PATCH 033/160] Steps toward working "run" reprocessing command.
This commit sponsored by Philippe Casteleyn. Thank you!
---
mediagoblin/gmg_commands/reprocess.py | 39 ++++++++++++++++-----
mediagoblin/media_types/image/processing.py | 2 +-
mediagoblin/processing/__init__.py | 26 +++++++++++++-
3 files changed, 57 insertions(+), 10 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 2af88847..0d8db858 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -21,6 +21,7 @@ from mediagoblin.db.models import MediaEntry
from mediagoblin.gmg_commands import util as commands_util
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
from mediagoblin.tools.pluginapi import hook_handle
+from mediagoblin.processing import ProcessorDoesNotExist, ProcessorNotEligible
def reprocess_parser_setup(subparser):
@@ -204,8 +205,17 @@ def _set_media_state(args):
args[0].state = 'processed'
+class MediaEntryNotFound(Exception): pass
+
def extract_entry_and_type(media_id):
- raise NotImplementedError
+ """
+ Fetch a media entry, as well as its media type
+ """
+ entry = MediaEntry.query.filter_by(id=media_id).first()
+ if entry is None:
+ raise MediaEntryNotFound("Can't find media with id '%s'" % media_id)
+
+ return entry.media_type, entry
def available(args):
@@ -247,16 +257,29 @@ def available(args):
def run(args):
- ### OLD CODE, review
+ media_type, media_entry = extract_entry_and_type(args.media_id)
- _set_media_state(args)
- _set_media_type(args)
+ manager_class = hook_handle(('reprocess_manager', media_type))
+ manager = manager_class()
- # If no media_ids were given, then try to reprocess all entries
- if not args[0].media_id:
- return _reprocess_all(args)
+ # TOOD: Specify in error
+ try:
+ processor_class = manager.get_processor(
+ args.reprocess_command, media_entry)
+ except ProcessorDoesNotExist:
+ print 'No such processor "%s" for media with id "%s"' % (
+ args.reprocess_command, media_entry.id)
+ return
+ except ProcessorNotEligible:
+ print 'Processor "%s" exists but media "%s" is not eligible' % (
+ args.reprocess_command, media_entry.id)
+ return
- return _run_reprocessing(args)
+ reprocess_parser = processor_class.generate_parser()
+ reprocess_args = reprocess_parser.parse_args(args.reprocess_args)
+
+ import pdb
+ pdb.set_trace()
def reprocess(args):
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index f4ba4e5a..575e9f5f 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -333,7 +333,7 @@ class InitialProcessor(CommonImageProcessor):
description = "Initial processing"
@classmethod
- def media_is_eligibile(cls, media_entry):
+ def media_is_eligible(cls, media_entry):
"""
Determine if this media type is eligible for processing
"""
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 9e77d2b2..6ef203cb 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -126,7 +126,7 @@ class MediaProcessor(object):
raise NotImplementedError
@classmethod
- def media_is_eligibile(cls, media_entry):
+ def media_is_eligible(cls, media_entry):
raise NotImplementedError
###############################
@@ -146,6 +146,11 @@ class MediaProcessor(object):
##########################################
+class ProcessingKeyError(Exception): pass
+class ProcessorDoesNotExist(ProcessingKeyError): pass
+class ProcessorNotEligible(ProcessingKeyError): pass
+
+
class ProcessingManager(object):
"""Manages all the processing actions available for a media type
@@ -183,6 +188,25 @@ class ProcessingManager(object):
# Got to figure out what actually goes here before I can write this properly
pass
+ def get_processor(self, key, entry=None):
+ """
+ Get the processor with this key.
+
+ If entry supplied, make sure this entry is actually compatible;
+ otherwise raise error.
+ """
+ try:
+ processor = self.processors[key]
+ except KeyError:
+ raise ProcessorDoesNotExist(
+ "'%s' processor does not exist for this media type" % key)
+
+ if entry and not processor.media_is_eligible(entry):
+ raise ProcessorNotEligible(
+ "This entry is not eligible for processor with name '%s'" % key)
+
+ return processor
+
def process(self, entry, directive, request):
"""
Process a media entry.
From d1e9913b71a6f3b7bb41f5eee1051093b92fcd8a Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Fri, 9 Aug 2013 17:30:52 -0500
Subject: [PATCH 034/160] Should be enough to get to the point where you can
actually initialize a processing command now.
However, it doesn't celery task-ify it...
This commit sponsored by Catalin Cosovanu. Thank you!
---
mediagoblin/gmg_commands/reprocess.py | 9 +++++----
mediagoblin/media_types/image/processing.py | 6 ++++--
mediagoblin/processing/__init__.py | 16 ++++++++++++++--
3 files changed, 23 insertions(+), 8 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 0d8db858..6d04427e 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -262,7 +262,8 @@ def run(args):
manager_class = hook_handle(('reprocess_manager', media_type))
manager = manager_class()
- # TOOD: Specify in error
+ # TODO: (maybe?) This could probably be handled entirely by the
+ # processor class...
try:
processor_class = manager.get_processor(
args.reprocess_command, media_entry)
@@ -277,9 +278,9 @@ def run(args):
reprocess_parser = processor_class.generate_parser()
reprocess_args = reprocess_parser.parse_args(args.reprocess_args)
-
- import pdb
- pdb.set_trace()
+ reprocess_request = processor_class.args_to_request(reprocess_args)
+ processor = processor_class(manager, media_entry)
+ processor.process(**reprocess_request)
def reprocess(args):
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index 575e9f5f..83b4adff 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -26,7 +26,8 @@ from mediagoblin import mg_globals as mgg
from mediagoblin.db.models import MediaEntry
from mediagoblin.processing import (
BadMediaFail, FilenameBuilder,
- MediaProcessor, ProcessingManager)
+ MediaProcessor, ProcessingManager,
+ request_from_args)
from mediagoblin.submit.lib import run_process_media
from mediagoblin.tools.exif import exif_fix_image_orientation, \
extract_exif, clean_exif, get_gps_data, get_useful, \
@@ -356,7 +357,8 @@ class InitialProcessor(CommonImageProcessor):
@classmethod
def args_to_request(cls, args):
- raise NotImplementedError
+ return request_from_args(
+ args, ['width', 'height'])
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 6ef203cb..1c8f7202 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -112,8 +112,9 @@ class MediaProcessor(object):
# action this MediaProcessor provides
description = None
- def __init__(self, manager):
+ def __init__(self, manager, media_entry):
self.manager = manager
+ self.media_entry = media_entry
# Should be initialized at time of processing, at least
self.workbench = None
@@ -138,7 +139,7 @@ class MediaProcessor(object):
raise NotImplementedError
@classmethod
- def parser_to_request(cls, parser):
+ def args_to_request(cls, args):
raise NotImplementedError
##########################################
@@ -214,6 +215,17 @@ class ProcessingManager(object):
pass
+def request_from_args(args, which_args):
+ """
+ Generate a request from the values of some argparse parsed args
+ """
+ request = {}
+ for arg in which_args:
+ request[arg] = getattr(args, arg)
+
+ return request
+
+
class ProcessingState(object):
"""
The first and only argument to the "processor" of a media type
From 77ea4c9bd1e8372fb7206596ca5125738033ced5 Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Sun, 11 Aug 2013 14:34:45 -0500
Subject: [PATCH 035/160] Updating to the point where we can allllmost run with
the new reprocessing code
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This commit sponsored by Odin Hørthe Omdal. Thank you!
---
mediagoblin/gmg_commands/reprocess.py | 38 +++++++++----------------
mediagoblin/processing/__init__.py | 40 +++++++++++++++++++++++++--
mediagoblin/processing/task.py | 26 ++++++-----------
mediagoblin/submit/lib.py | 8 ++++--
4 files changed, 64 insertions(+), 48 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 6d04427e..24fcde37 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -19,9 +19,12 @@ import os
from mediagoblin import mg_globals
from mediagoblin.db.models import MediaEntry
from mediagoblin.gmg_commands import util as commands_util
+from mediagoblin.submit.lib import run_process_media
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
from mediagoblin.tools.pluginapi import hook_handle
-from mediagoblin.processing import ProcessorDoesNotExist, ProcessorNotEligible
+from mediagoblin.processing import (
+ ProcessorDoesNotExist, ProcessorNotEligible,
+ get_entry_and_manager, get_manager_for_type)
def reprocess_parser_setup(subparser):
@@ -205,31 +208,16 @@ def _set_media_state(args):
args[0].state = 'processed'
-class MediaEntryNotFound(Exception): pass
-
-def extract_entry_and_type(media_id):
- """
- Fetch a media entry, as well as its media type
- """
- entry = MediaEntry.query.filter_by(id=media_id).first()
- if entry is None:
- raise MediaEntryNotFound("Can't find media with id '%s'" % media_id)
-
- return entry.media_type, entry
-
-
def available(args):
# Get the media type, either by looking up media id, or by specific type
try:
- media_id = int(args.id_or_type)
- media_type, media_entry = extract_entry_and_type(media_id)
+ media_entry, manager = get_entry_and_manager(args.id_or_type)
+ media_type = media_entry.type
except ValueError:
media_type = args.id_or_type
media_entry = None
+ manager = get_manager_for_type(media_type)
- manager_class = hook_handle(('reprocess_manager', media_type))
- manager = manager_class()
-
if media_entry is None:
processors = manager.list_all_processors()
else:
@@ -257,10 +245,7 @@ def available(args):
def run(args):
- media_type, media_entry = extract_entry_and_type(args.media_id)
-
- manager_class = hook_handle(('reprocess_manager', media_type))
- manager = manager_class()
+ media_entry, manager = get_entry_and_manager(args.media_id)
# TODO: (maybe?) This could probably be handled entirely by the
# processor class...
@@ -279,8 +264,11 @@ def run(args):
reprocess_parser = processor_class.generate_parser()
reprocess_args = reprocess_parser.parse_args(args.reprocess_args)
reprocess_request = processor_class.args_to_request(reprocess_args)
- processor = processor_class(manager, media_entry)
- processor.process(**reprocess_request)
+ run_process_media(
+ media_entry,
+ reprocess_action=args.reprocess_command,
+ reprocess_info=reprocess_request)
+ manager.process(media_entry, args.reprocess_command, **reprocess_request)
def reprocess(args):
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 1c8f7202..b668baa7 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -18,9 +18,10 @@ from collections import OrderedDict
import logging
import os
-from mediagoblin.db.util import atomic_update
from mediagoblin import mg_globals as mgg
-
+from mediagoblin.db.util import atomic_update
+from mediagoblin.db.models import MediaEntry
+from mediagoblin.tools.pluginapi import hook_handle
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
_log = logging.getLogger(__name__)
@@ -208,7 +209,7 @@ class ProcessingManager(object):
return processor
- def process(self, entry, directive, request):
+ def process_from_args(self, entry, reprocess_command, request):
"""
Process a media entry.
"""
@@ -226,6 +227,39 @@ def request_from_args(args, which_args):
return request
+class MediaEntryNotFound(Exception): pass
+
+
+def get_manager_for_type(media_type):
+ """
+ Get the appropriate media manager for this type
+ """
+ manager_class = hook_handle(('reprocess_manager', media_type))
+ manager = manager_class()
+
+ return manager
+
+
+def get_entry_and_manager(media_id):
+ """
+ Get a MediaEntry, its media type, and its manager all in one go.
+
+ Returns a tuple of: `(entry, media_type, media_manager)`
+ """
+ entry = MediaEntry.query.filter_by(id=media_id).first()
+ if entry is None:
+ raise MediaEntryNotFound("Can't find media with id '%s'" % media_id)
+
+ manager = get_manager_for_type(entry.media_type)
+
+ return entry, manager
+
+
+################################################
+# TODO: This ProcessingState is OUTDATED,
+# and needs to be refactored into other tools!
+################################################
+
class ProcessingState(object):
"""
The first and only argument to the "processor" of a media type
diff --git a/mediagoblin/processing/task.py b/mediagoblin/processing/task.py
index 36ee31fd..240be4e5 100644
--- a/mediagoblin/processing/task.py
+++ b/mediagoblin/processing/task.py
@@ -21,9 +21,9 @@ import urllib2
from celery import registry, task
from mediagoblin import mg_globals as mgg
-from mediagoblin.db.models import MediaEntry
-from . import mark_entry_failed, BaseProcessingFail, ProcessingState
+from . import mark_entry_failed, BaseProcessingFail
from mediagoblin.tools.processing import json_processing_callback
+from mediagoblin.processing import get_entry_and_manager
_log = logging.getLogger(__name__)
logging.basicConfig()
@@ -68,7 +68,7 @@ class ProcessMedia(task.Task):
"""
Pass this entry off for processing.
"""
- def run(self, media_id, feed_url, reprocess_info=None):
+ def run(self, media_id, feed_url, reprocess_action, reprocess_info=None):
"""
Pass the media entry off to the appropriate processing function
(for now just process_image...)
@@ -78,28 +78,20 @@ class ProcessMedia(task.Task):
:param reprocess: A dict containing all of the necessary reprocessing
info for the media_type.
"""
- entry = MediaEntry.query.get(media_id)
+ reprocess_info = reprocess_info or {}
+ entry, manager = get_entry_and_manager(media_id)
# Try to process, and handle expected errors.
try:
+ processor_class = manager.get_processor(reprocess_action, entry)
+
entry.state = u'processing'
entry.save()
_log.debug('Processing {0}'.format(entry))
- proc_state = ProcessingState(entry)
- with mgg.workbench_manager.create() as workbench:
-
- proc_state.set_workbench(workbench)
- processor = entry.media_manager.processor(proc_state)
-
- # If we have reprocess_info, let's reprocess
- if reprocess_info:
- processor.reprocess(reprocess_info)
-
- # Run initial processing
- else:
- processor.initial_processing()
+ with processor_class(manager, entry) as processor:
+ processor.process(**reprocess_info)
# We set the state to processed and save the entry here so there's
# no need to save at the end of the processing stage, probably ;)
diff --git a/mediagoblin/submit/lib.py b/mediagoblin/submit/lib.py
index 3619a329..ad37203d 100644
--- a/mediagoblin/submit/lib.py
+++ b/mediagoblin/submit/lib.py
@@ -76,7 +76,8 @@ def prepare_queue_task(app, entry, filename):
return queue_file
-def run_process_media(entry, feed_url=None, reprocess_info=None):
+def run_process_media(entry, feed_url=None,
+ reprocess_action="inital", reprocess_info=None):
"""Process the media asynchronously
:param entry: MediaEntry() instance to be processed.
@@ -84,11 +85,12 @@ def run_process_media(entry, feed_url=None, reprocess_info=None):
should be notified of. This will be sth like: `request.urlgen(
'mediagoblin.user_pages.atom_feed',qualified=True,
user=request.user.username)`
- :param reprocess: A dict containing all of the necessary reprocessing
+ :param reprocess_action: What particular action should be run.
+ :param reprocess_info: A dict containing all of the necessary reprocessing
info for the given media_type"""
try:
process_media.apply_async(
- [entry.id, feed_url, reprocess_info], {},
+ [entry.id, feed_url, reprocess_action, reprocess_info], {},
task_id=entry.queued_task_id)
except BaseException as exc:
# The purpose of this section is because when running in "lazy"
From a59f92f3eca79b8070c2142a2be0ee5a48fe4611 Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Sun, 11 Aug 2013 14:48:21 -0500
Subject: [PATCH 036/160] That manager.process() line no longer made sense
---
mediagoblin/gmg_commands/reprocess.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 24fcde37..9a9196bb 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -268,7 +268,6 @@ def run(args):
media_entry,
reprocess_action=args.reprocess_command,
reprocess_info=reprocess_request)
- manager.process(media_entry, args.reprocess_command, **reprocess_request)
def reprocess(args):
From 55cfa3406390732173195bb920bf3f86bd1ce9f4 Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Sun, 11 Aug 2013 15:22:43 -0500
Subject: [PATCH 037/160] Renaming the processing manager stuff to be less
ambiguous.
BONUS COMMIT to Ben Finney and the Free Software Melbourne crew. :)
IRONY: Initially I committed this as "media manager".
---
mediagoblin/gmg_commands/reprocess.py | 8 ++++----
mediagoblin/processing/__init__.py | 13 ++++++++++---
mediagoblin/processing/task.py | 4 ++--
3 files changed, 16 insertions(+), 9 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 9a9196bb..10ab50ab 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -24,7 +24,7 @@ from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
from mediagoblin.tools.pluginapi import hook_handle
from mediagoblin.processing import (
ProcessorDoesNotExist, ProcessorNotEligible,
- get_entry_and_manager, get_manager_for_type)
+ get_entry_and_processing_manager, get_processing_manager_for_type)
def reprocess_parser_setup(subparser):
@@ -211,12 +211,12 @@ def _set_media_state(args):
def available(args):
# Get the media type, either by looking up media id, or by specific type
try:
- media_entry, manager = get_entry_and_manager(args.id_or_type)
+ media_entry, manager = get_entry_and_processing_manager(args.id_or_type)
media_type = media_entry.type
except ValueError:
media_type = args.id_or_type
media_entry = None
- manager = get_manager_for_type(media_type)
+ manager = get_processing_manager_for_type(media_type)
if media_entry is None:
processors = manager.list_all_processors()
@@ -245,7 +245,7 @@ def available(args):
def run(args):
- media_entry, manager = get_entry_and_manager(args.media_id)
+ media_entry, manager = get_entry_and_processing_manager(args.media_id)
# TODO: (maybe?) This could probably be handled entirely by the
# processor class...
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index b668baa7..02dba2f9 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -120,6 +120,13 @@ class MediaProcessor(object):
# Should be initialized at time of processing, at least
self.workbench = None
+ def __enter__(self):
+ self.workbench = mgg.workbench_manager.create()
+
+ def __exit__(self, *args):
+ self.workbench.destroy()
+ self.workbench = None
+
# @with_workbench
def process(self, **kwargs):
"""
@@ -230,7 +237,7 @@ def request_from_args(args, which_args):
class MediaEntryNotFound(Exception): pass
-def get_manager_for_type(media_type):
+def get_processing_manager_for_type(media_type):
"""
Get the appropriate media manager for this type
"""
@@ -240,7 +247,7 @@ def get_manager_for_type(media_type):
return manager
-def get_entry_and_manager(media_id):
+def get_entry_and_processing_manager(media_id):
"""
Get a MediaEntry, its media type, and its manager all in one go.
@@ -250,7 +257,7 @@ def get_entry_and_manager(media_id):
if entry is None:
raise MediaEntryNotFound("Can't find media with id '%s'" % media_id)
- manager = get_manager_for_type(entry.media_type)
+ manager = get_processing_manager_for_type(entry.media_type)
return entry, manager
diff --git a/mediagoblin/processing/task.py b/mediagoblin/processing/task.py
index 240be4e5..397514d0 100644
--- a/mediagoblin/processing/task.py
+++ b/mediagoblin/processing/task.py
@@ -23,7 +23,7 @@ from celery import registry, task
from mediagoblin import mg_globals as mgg
from . import mark_entry_failed, BaseProcessingFail
from mediagoblin.tools.processing import json_processing_callback
-from mediagoblin.processing import get_entry_and_manager
+from mediagoblin.processing import get_entry_and_processing_manager
_log = logging.getLogger(__name__)
logging.basicConfig()
@@ -79,7 +79,7 @@ class ProcessMedia(task.Task):
info for the media_type.
"""
reprocess_info = reprocess_info or {}
- entry, manager = get_entry_and_manager(media_id)
+ entry, manager = get_entry_and_processing_manager(media_id)
# Try to process, and handle expected errors.
try:
From 2fa7b7f81a308210e6f7a6556df18e24466732af Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Sun, 11 Aug 2013 16:53:37 -0500
Subject: [PATCH 038/160] Marking the initial steps for processing
---
mediagoblin/media_types/image/processing.py | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index 83b4adff..b8ac1a60 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -325,6 +325,18 @@ class CommonImageProcessor(MediaProcessor):
help=(
"Height of the resized image (if not using defaults)"))
+ def fetch_original(self):
+ pass
+
+ def generate_medium_if_applicable(self, size=None):
+ pass
+
+ def generate_thumb(self, size=None):
+ pass
+
+ def extract_metadata(self):
+ pass
+
class InitialProcessor(CommonImageProcessor):
"""
@@ -361,6 +373,12 @@ class InitialProcessor(CommonImageProcessor):
args, ['width', 'height'])
+ def process(self, size=None, thumb_size=None):
+ self.fetch_original()
+ self.generate_medium_if_applicable(size=size)
+ self.generate_thumb(size=thumb_size)
+ self.extract_metadata()
+
class ImageProcessingManager(ProcessingManager):
def __init__(self):
From 22479c39a0fff75208309e437f5fdaf57730cf0e Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Mon, 12 Aug 2013 08:22:14 -0500
Subject: [PATCH 039/160] Record the original state of the media entry in the
processor
This allows our processor to make some informed decisions based on the
state by still having access to the original state.
This commit sponsored by William Rico. Thank you!
---
mediagoblin/processing/__init__.py | 1 +
mediagoblin/processing/task.py | 12 +++++++-----
2 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 02dba2f9..47f0b84e 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -116,6 +116,7 @@ class MediaProcessor(object):
def __init__(self, manager, media_entry):
self.manager = manager
self.media_entry = media_entry
+ self.entry_orig_state = media_entry.state
# Should be initialized at time of processing, at least
self.workbench = None
diff --git a/mediagoblin/processing/task.py b/mediagoblin/processing/task.py
index 397514d0..d3770588 100644
--- a/mediagoblin/processing/task.py
+++ b/mediagoblin/processing/task.py
@@ -85,12 +85,14 @@ class ProcessMedia(task.Task):
try:
processor_class = manager.get_processor(reprocess_action, entry)
- entry.state = u'processing'
- entry.save()
-
- _log.debug('Processing {0}'.format(entry))
-
with processor_class(manager, entry) as processor:
+ # Initial state change has to be here because
+ # the entry.state gets recorded on processor_class init
+ entry.state = u'processing'
+ entry.save()
+
+ _log.debug('Processing {0}'.format(entry))
+
processor.process(**reprocess_info)
# We set the state to processed and save the entry here so there's
From eb372949a13c67962e7460e2411f389ff87d2661 Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Mon, 12 Aug 2013 08:57:56 -0500
Subject: [PATCH 040/160] Factored the get_orig_filename from processing state
and put it to use.
This commit sponsored by Vincent Demeester. Thank you!
---
mediagoblin/media_types/image/processing.py | 12 ++++++--
mediagoblin/processing/__init__.py | 34 +++++++++++++++++++++
2 files changed, 44 insertions(+), 2 deletions(-)
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index b8ac1a60..35069af4 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -27,7 +27,7 @@ from mediagoblin.db.models import MediaEntry
from mediagoblin.processing import (
BadMediaFail, FilenameBuilder,
MediaProcessor, ProcessingManager,
- request_from_args)
+ request_from_args, get_orig_filename)
from mediagoblin.submit.lib import run_process_media
from mediagoblin.tools.exif import exif_fix_image_orientation, \
extract_exif, clean_exif, get_gps_data, get_useful, \
@@ -325,8 +325,15 @@ class CommonImageProcessor(MediaProcessor):
help=(
"Height of the resized image (if not using defaults)"))
+ def setup_workbench_subdirs(self):
+ # Conversions subdirectory to avoid collisions
+ self.conversions_subdir = os.path.join(
+ self.workbench.dir, 'convirsions')
+
def fetch_original(self):
- pass
+ self.orig_filename = get_orig_filename(
+ self.entry, self.workbench)
+ self.name_builder = FilenameBuilder(self.orig_filename)
def generate_medium_if_applicable(self, size=None):
pass
@@ -374,6 +381,7 @@ class InitialProcessor(CommonImageProcessor):
def process(self, size=None, thumb_size=None):
+ self.setup_workbench_subdirs()
self.fetch_original()
self.generate_medium_if_applicable(size=size)
self.generate_thumb(size=thumb_size)
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 47f0b84e..9466aec6 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -372,6 +372,40 @@ def mark_entry_failed(entry_id, exc):
u'fail_metadata': {}})
+###############################################################################
+# refactoring procstate stuff here
+
+
+def get_orig_filename(entry, workbench):
+ """
+ Get the a filename for the original, on local storage
+
+ If the media entry has a queued_media_file, use that, otherwise
+ use the original.
+
+ In the future, this will return the highest quality file available
+ if neither the original or queued file are available by checking
+ some ordered list of preferred keys.
+ """
+ if entry.queued_media_file:
+ orig_filepath = entry.queued_media_file
+ storage = mgg.queue_store
+ else:
+ orig_filepath = entry.media_files['original']
+ storage = mgg.public_store
+
+ orig_filename = workbench.localized_file(
+ storage, orig_filepath,
+ 'source')
+
+ return orig_filename
+
+
+# end refactoring
+###############################################################################
+
+
+
class BaseProcessingFail(Exception):
"""
Base exception that all other processing failure messages should
From 5fd239fa581780134e5d5f6547bb2f50f139e30d Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Mon, 12 Aug 2013 10:40:07 -0500
Subject: [PATCH 041/160] Theoretically the last steps to get reprocessing
working for initial & images
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Haven't tested it yet though :)
This commit sponsored by Samuel Bächler. Thank you!
---
mediagoblin/media_types/image/processing.py | 60 +++++++++++++++------
mediagoblin/processing/__init__.py | 44 +++++++++++----
2 files changed, 79 insertions(+), 25 deletions(-)
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index 35069af4..affa7dc9 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -27,7 +27,8 @@ from mediagoblin.db.models import MediaEntry
from mediagoblin.processing import (
BadMediaFail, FilenameBuilder,
MediaProcessor, ProcessingManager,
- request_from_args, get_orig_filename)
+ request_from_args, get_orig_filename,
+ store_public, copy_original)
from mediagoblin.submit.lib import run_process_media
from mediagoblin.tools.exif import exif_fix_image_orientation, \
extract_exif, clean_exif, get_gps_data, get_useful, \
@@ -45,7 +46,7 @@ PIL_FILTERS = {
MEDIA_TYPE = 'mediagoblin.media_types.image'
-def resize_image(proc_state, resized, keyname, target_name, new_size,
+def resize_image(entry, resized, keyname, target_name, new_size,
exif_tags, workdir):
"""
Store a resized version of an image and return its pathname.
@@ -77,13 +78,14 @@ def resize_image(proc_state, resized, keyname, target_name, new_size,
tmp_resized_filename = os.path.join(workdir, target_name)
with file(tmp_resized_filename, 'w') as resized_file:
resized.save(resized_file, quality=config['quality'])
- proc_state.store_public(keyname, tmp_resized_filename, target_name)
+ store_public(entry, keyname, tmp_resized_filename, target_name)
-def resize_tool(proc_state, force, keyname, target_name,
+def resize_tool(entry, orig_filename,
+ force, keyname, target_name,
conversions_subdir, exif_tags, new_size=None):
# filename -- the filename of the original image being resized
- filename = proc_state.get_orig_filename()
+ filename = orig_filename
# Use the default size if new_size was not given
if not new_size:
@@ -104,7 +106,7 @@ def resize_tool(proc_state, force, keyname, target_name,
or im.size[1] > new_size[1]\
or exif_image_needs_rotation(exif_tags):
resize_image(
- proc_state, im, unicode(keyname), target_name,
+ entry, im, unicode(keyname), target_name,
new_size,
exif_tags, conversions_subdir)
@@ -325,24 +327,53 @@ class CommonImageProcessor(MediaProcessor):
help=(
"Height of the resized image (if not using defaults)"))
- def setup_workbench_subdirs(self):
+ def common_setup(self):
+ """
+ Set up the workbench directory and pull down the original file
+ """
+ ## @@: Should this be two functions?
# Conversions subdirectory to avoid collisions
self.conversions_subdir = os.path.join(
self.workbench.dir, 'convirsions')
+ os.mkdir(self.conversions_subdir)
- def fetch_original(self):
+ # Pull down and set up the original file
self.orig_filename = get_orig_filename(
self.entry, self.workbench)
self.name_builder = FilenameBuilder(self.orig_filename)
def generate_medium_if_applicable(self, size=None):
- pass
+ resize_tool(self.entry, False, 'medium', self.orig_filename,
+ self.name_builder.fill('{basename}.medium{ext}'),
+ self.conversions_subdir, self.exif_tags)
def generate_thumb(self, size=None):
- pass
+ resize_tool(self.entry, True, 'thumb', self.orig_filename,
+ self.name_builder.fill('{basename}.thumbnail{ext}'),
+ self.conversions_subdir, self.exif_tags)
+
+ def copy_original(self):
+ copy_original(
+ self.entry, self.orig_filename,
+ self.name_builder.fill('{basename}{ext}'))
def extract_metadata(self):
- pass
+ # Exif extraction
+ exif_tags = extract_exif(self.orig_filename)
+
+ # Is there any GPS data
+ gps_data = get_gps_data(exif_tags)
+
+ # Insert exif data into database
+ exif_all = clean_exif(exif_tags)
+
+ if len(exif_all):
+ self.entry.media_data_init(exif_all=exif_all)
+
+ if len(gps_data):
+ for key in list(gps_data.keys()):
+ gps_data['gps_' + key] = gps_data.pop(key)
+ self.entry.media_data_init(**gps_data)
class InitialProcessor(CommonImageProcessor):
@@ -353,11 +384,11 @@ class InitialProcessor(CommonImageProcessor):
description = "Initial processing"
@classmethod
- def media_is_eligible(cls, media_entry):
+ def media_is_eligible(cls, entry):
"""
Determine if this media type is eligible for processing
"""
- return media_entry.state in (
+ return entry.state in (
"unprocessed", "failed")
###############################
@@ -381,8 +412,7 @@ class InitialProcessor(CommonImageProcessor):
def process(self, size=None, thumb_size=None):
- self.setup_workbench_subdirs()
- self.fetch_original()
+ self.common_setup()
self.generate_medium_if_applicable(size=size)
self.generate_thumb(size=thumb_size)
self.extract_metadata()
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 9466aec6..bfb78780 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -113,10 +113,10 @@ class MediaProcessor(object):
# action this MediaProcessor provides
description = None
- def __init__(self, manager, media_entry):
+ def __init__(self, manager, entry):
self.manager = manager
- self.media_entry = media_entry
- self.entry_orig_state = media_entry.state
+ self.entry = entry
+ self.entry_orig_state = entry.state
# Should be initialized at time of processing, at least
self.workbench = None
@@ -136,7 +136,7 @@ class MediaProcessor(object):
raise NotImplementedError
@classmethod
- def media_is_eligible(cls, media_entry):
+ def media_is_eligible(cls, entry):
raise NotImplementedError
###############################
@@ -155,6 +155,20 @@ class MediaProcessor(object):
# THE FUTURE: web interface things here :)
##########################################
+ #####################
+ # Some common "steps"
+ #####################
+
+ def delete_queue_file(self):
+ # Remove queued media file from storage and database.
+ # queued_filepath is in the task_id directory which should
+ # be removed too, but fail if the directory is not empty to be on
+ # the super-safe side.
+ queued_filepath = self.entry.queued_media_file
+ mgg.queue_store.delete_file(queued_filepath) # rm file
+ mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
+ self.entry.queued_media_file = []
+
class ProcessingKeyError(Exception): pass
class ProcessorDoesNotExist(ProcessingKeyError): pass
@@ -217,12 +231,6 @@ class ProcessingManager(object):
return processor
- def process_from_args(self, entry, reprocess_command, request):
- """
- Process a media entry.
- """
- pass
-
def request_from_args(args, which_args):
"""
@@ -401,6 +409,22 @@ def get_orig_filename(entry, workbench):
return orig_filename
+def store_public(entry, keyname, local_file, target_name=None):
+ if target_name is None:
+ target_name = os.path.basename(local_file)
+ target_filepath = create_pub_filepath(entry, target_name)
+ if keyname in entry.media_files:
+ _log.warn("store_public: keyname %r already used for file %r, "
+ "replacing with %r", keyname,
+ entry.media_files[keyname], target_filepath)
+ mgg.public_store.copy_local_to_storage(local_file, target_filepath)
+ entry.media_files[keyname] = target_filepath
+
+
+def copy_original(entry, orig_filename, target_name, keyname=u"original"):
+ store_public(entry, keyname, orig_filename, target_name)
+
+
# end refactoring
###############################################################################
From 7a85bf985db67482a56e8987e28a6139b5e087fd Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Mon, 12 Aug 2013 10:48:07 -0500
Subject: [PATCH 042/160] Fixing the MediaProcessor context manager so it
actually, you know, works :)
This commit sponsored by Mikiya Okuno. Thank you!
---
mediagoblin/processing/__init__.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index bfb78780..9493091b 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -123,6 +123,7 @@ class MediaProcessor(object):
def __enter__(self):
self.workbench = mgg.workbench_manager.create()
+ return self
def __exit__(self, *args):
self.workbench.destroy()
From 5b546d6533ce80a55f28546098bfff7fa5caa474 Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Mon, 12 Aug 2013 10:53:15 -0500
Subject: [PATCH 043/160] A couple of fixes to stupid things I did while coding
this. And it WORKS!
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This commit sponsored by José María Serralde Ruiz. Thank you!
---
mediagoblin/media_types/image/processing.py | 28 ++++++++++++++-------
1 file changed, 19 insertions(+), 9 deletions(-)
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index affa7dc9..e0ff928d 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -81,11 +81,11 @@ def resize_image(entry, resized, keyname, target_name, new_size,
store_public(entry, keyname, tmp_resized_filename, target_name)
-def resize_tool(entry, orig_filename,
+def resize_tool(entry,
force, keyname, target_name,
conversions_subdir, exif_tags, new_size=None):
# filename -- the filename of the original image being resized
- filename = orig_filename
+ filename = target_name
# Use the default size if new_size was not given
if not new_size:
@@ -342,6 +342,10 @@ class CommonImageProcessor(MediaProcessor):
self.entry, self.workbench)
self.name_builder = FilenameBuilder(self.orig_filename)
+ # Exif extraction
+ self.exif_tags = extract_exif(self.orig_filename)
+
+
def generate_medium_if_applicable(self, size=None):
resize_tool(self.entry, False, 'medium', self.orig_filename,
self.name_builder.fill('{basename}.medium{ext}'),
@@ -358,14 +362,11 @@ class CommonImageProcessor(MediaProcessor):
self.name_builder.fill('{basename}{ext}'))
def extract_metadata(self):
- # Exif extraction
- exif_tags = extract_exif(self.orig_filename)
-
# Is there any GPS data
- gps_data = get_gps_data(exif_tags)
+ gps_data = get_gps_data(self.exif_tags)
# Insert exif data into database
- exif_all = clean_exif(exif_tags)
+ exif_all = clean_exif(self.exif_tags)
if len(exif_all):
self.entry.media_data_init(exif_all=exif_all)
@@ -401,14 +402,23 @@ class InitialProcessor(CommonImageProcessor):
description=cls.description,
prog=cls.name)
- cls._add_width_height_args(parser)
+ parser.add_argument(
+ '--size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--thumb-size',
+ nargs=2,
+ type=int)
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
- args, ['width', 'height'])
+ args, ['size', 'thumb_size'])
def process(self, size=None, thumb_size=None):
From 98d1fa3beddfc602c541fe7f538ca882ad6c7e9c Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Mon, 12 Aug 2013 11:00:15 -0500
Subject: [PATCH 044/160] Fixing normal submission of media (well for images
anyway)
---
mediagoblin/processing/__init__.py | 2 ++
mediagoblin/submit/lib.py | 2 +-
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 9493091b..684ffe04 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -223,6 +223,8 @@ class ProcessingManager(object):
try:
processor = self.processors[key]
except KeyError:
+ import pdb
+ pdb.set_trace()
raise ProcessorDoesNotExist(
"'%s' processor does not exist for this media type" % key)
diff --git a/mediagoblin/submit/lib.py b/mediagoblin/submit/lib.py
index ad37203d..1a45e447 100644
--- a/mediagoblin/submit/lib.py
+++ b/mediagoblin/submit/lib.py
@@ -77,7 +77,7 @@ def prepare_queue_task(app, entry, filename):
def run_process_media(entry, feed_url=None,
- reprocess_action="inital", reprocess_info=None):
+ reprocess_action="initial", reprocess_info=None):
"""Process the media asynchronously
:param entry: MediaEntry() instance to be processed.
From ff12ecef347d0fdb29534eb2bc0390cf183c10ba Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Mon, 12 Aug 2013 11:13:00 -0500
Subject: [PATCH 045/160] Revert "use parser.parse_known_args() instead of
parser.parse_args()"
This reverts commit 029e779c468ba1a6bfd893679cfaae7f418f45dd.
(and a bit more!)
This wasn't needed anymore because we did a "rest" capture and passed
that over to the reprocess run command.
Conflicts:
mediagoblin/gmg_commands/assetlink.py
mediagoblin/gmg_commands/dbupdate.py
mediagoblin/gmg_commands/import_export.py
mediagoblin/gmg_commands/users.py
---
mediagoblin/gmg_commands/assetlink.py | 3 +--
mediagoblin/gmg_commands/dbupdate.py | 4 +---
mediagoblin/gmg_commands/import_export.py | 27 ++++++++++-----------
mediagoblin/gmg_commands/shell.py | 5 ++--
mediagoblin/gmg_commands/users.py | 29 ++++++++++-------------
mediagoblin/gmg_commands/util.py | 7 ------
6 files changed, 30 insertions(+), 45 deletions(-)
diff --git a/mediagoblin/gmg_commands/assetlink.py b/mediagoblin/gmg_commands/assetlink.py
index dff737ff..148ebe9e 100644
--- a/mediagoblin/gmg_commands/assetlink.py
+++ b/mediagoblin/gmg_commands/assetlink.py
@@ -138,8 +138,7 @@ def assetlink(args):
"""
Link the asset directory of the currently installed theme and plugins
"""
- commands_util.check_unrecognized_args(args)
- mgoblin_app = commands_util.setup_app(args[0])
+ mgoblin_app = commands_util.setup_app(args)
app_config = mg_globals.app_config
# link theme
diff --git a/mediagoblin/gmg_commands/dbupdate.py b/mediagoblin/gmg_commands/dbupdate.py
index b2efa5de..961752f6 100644
--- a/mediagoblin/gmg_commands/dbupdate.py
+++ b/mediagoblin/gmg_commands/dbupdate.py
@@ -20,7 +20,6 @@ from sqlalchemy.orm import sessionmaker
from mediagoblin.db.open import setup_connection_and_db_from_config
from mediagoblin.db.migration_tools import MigrationManager
-from mediagoblin.gmg_commands import util as commands_util
from mediagoblin.init import setup_global_and_app_config
from mediagoblin.tools.common import import_component
@@ -148,6 +147,5 @@ def run_all_migrations(db, app_config, global_config):
def dbupdate(args):
- commands_util.check_unrecognized_args(args)
- global_config, app_config = setup_global_and_app_config(args[0].conf_file)
+ global_config, app_config = setup_global_and_app_config(args.conf_file)
run_dbupdate(app_config, global_config)
diff --git a/mediagoblin/gmg_commands/import_export.py b/mediagoblin/gmg_commands/import_export.py
index 1d4ae1f7..fbac09f6 100644
--- a/mediagoblin/gmg_commands/import_export.py
+++ b/mediagoblin/gmg_commands/import_export.py
@@ -97,28 +97,27 @@ def env_import(args):
'''
Restore mongo database and media files from a tar archive
'''
- commands_util.check_unrecognized_args(args)
- if not args[0].cache_path:
- args[0].cache_path = tempfile.mkdtemp()
+ if not args.cache_path:
+ args.cache_path = tempfile.mkdtemp()
- setup_global_and_app_config(args[0].conf_file)
+ setup_global_and_app_config(args.conf_file)
# Creates mg_globals.public_store and mg_globals.queue_store
setup_storage()
- global_config, app_config = setup_global_and_app_config(args[0].conf_file)
+ global_config, app_config = setup_global_and_app_config(args.conf_file)
db = setup_connection_and_db_from_config(
app_config)
tf = tarfile.open(
- args[0].tar_file,
+ args.tar_file,
mode='r|gz')
- tf.extractall(args[0].cache_path)
+ tf.extractall(args.cache_path)
- args[0].cache_path = os.path.join(
- args[0].cache_path, 'mediagoblin-data')
- args = _setup_paths(args[0])
+ args.cache_path = os.path.join(
+ args.cache_path, 'mediagoblin-data')
+ args = _setup_paths(args)
# Import database from extracted data
_import_database(db, args)
@@ -227,16 +226,16 @@ def env_export(args):
'''
commands_util.check_unrecognized_args(args)
if args.cache_path:
- if os.path.exists(args[0].cache_path):
+ if os.path.exists(args.cache_path):
_log.error('The cache directory must not exist '
'before you run this script')
- _log.error('Cache directory: {0}'.format(args[0].cache_path))
+ _log.error('Cache directory: {0}'.format(args.cache_path))
return False
else:
- args[0].cache_path = tempfile.mkdtemp()
+ args.cache_path = tempfile.mkdtemp()
- args = _setup_paths(args[0])
+ args = _setup_paths(args)
if not _export_check(args):
_log.error('Checks did not pass, exiting')
diff --git a/mediagoblin/gmg_commands/shell.py b/mediagoblin/gmg_commands/shell.py
index 03e08b23..4998acd7 100644
--- a/mediagoblin/gmg_commands/shell.py
+++ b/mediagoblin/gmg_commands/shell.py
@@ -63,13 +63,12 @@ def shell(args):
"""
Setup a shell for the user either a normal Python shell or an IPython one
"""
- commands_util.check_unrecognized_args(args)
user_namespace = {
'mg_globals': mg_globals,
- 'mgoblin_app': commands_util.setup_app(args[0]),
+ 'mgoblin_app': commands_util.setup_app(args),
'db': mg_globals.database}
- if args[0].ipython:
+ if args.ipython:
ipython_shell(**user_namespace)
else:
# Try ipython_shell first and fall back if not available
diff --git a/mediagoblin/gmg_commands/users.py b/mediagoblin/gmg_commands/users.py
index b164e672..e44b0aa9 100644
--- a/mediagoblin/gmg_commands/users.py
+++ b/mediagoblin/gmg_commands/users.py
@@ -32,17 +32,16 @@ def adduser_parser_setup(subparser):
def adduser(args):
#TODO: Lets trust admins this do not validate Emails :)
- commands_util.check_unrecognized_args(args)
- commands_util.setup_app(args[0])
+ commands_util.setup_app(args)
- args[0].username = commands_util.prompt_if_not_set(args[0].username, "Username:")
- args[0].password = commands_util.prompt_if_not_set(args[0].password, "Password:",True)
- args[0].email = commands_util.prompt_if_not_set(args[0].email, "Email:")
+ args.username = commands_util.prompt_if_not_set(args.username, "Username:")
+ args.password = commands_util.prompt_if_not_set(args.password, "Password:",True)
+ args.email = commands_util.prompt_if_not_set(args.email, "Email:")
db = mg_globals.database
users_with_username = \
db.User.query.filter_by(
- username=args[0].username.lower()
+ username=args.username.lower()
).count()
if users_with_username:
@@ -51,9 +50,9 @@ def adduser(args):
else:
# Create the user
entry = db.User()
- entry.username = unicode(args[0].username.lower())
- entry.email = unicode(args[0].email)
- entry.pw_hash = auth.gen_password_hash(args[0].password)
+ entry.username = unicode(args.username.lower())
+ entry.email = unicode(args.email)
+ entry.pw_hash = auth.gen_password_hash(args.password)
entry.status = u'active'
entry.email_verified = True
entry.save()
@@ -68,13 +67,12 @@ def makeadmin_parser_setup(subparser):
def makeadmin(args):
- commands_util.check_unrecognized_args(args)
- commands_util.setup_app(args[0])
+ commands_util.setup_app(args)
db = mg_globals.database
user = db.User.query.filter_by(
- username=unicode(args[0].username.lower())).one()
+ username=unicode(args.username.lower())).one()
if user:
user.is_admin = True
user.save()
@@ -93,15 +91,14 @@ def changepw_parser_setup(subparser):
def changepw(args):
- commands_util.check_unrecognized_args(args)
- commands_util.setup_app(args[0])
+ commands_util.setup_app(args)
db = mg_globals.database
user = db.User.query.filter_by(
- username=unicode(args[0].username.lower())).one()
+ username=unicode(args.username.lower())).one()
if user:
- user.pw_hash = auth.gen_password_hash(args[0].password)
+ user.pw_hash = auth.gen_password_hash(args.password)
user.save()
print 'Password successfully changed'
else:
diff --git a/mediagoblin/gmg_commands/util.py b/mediagoblin/gmg_commands/util.py
index 8b057996..63e39ca9 100644
--- a/mediagoblin/gmg_commands/util.py
+++ b/mediagoblin/gmg_commands/util.py
@@ -17,7 +17,6 @@
from mediagoblin import app
import getpass
-import argparse
def setup_app(args):
@@ -39,9 +38,3 @@ def prompt_if_not_set(variable, text, password=False):
variable=getpass.getpass(text + u' ')
return variable
-
-
-def check_unrecognized_args(args):
- if args[1]:
- parser = argparse.ArgumentParser()
- parser.error('unrecognized arguments: {}'.format(args[1]))
From de332ab9f5c56589349494d1ccbbf2cfc65424ca Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Mon, 12 Aug 2013 11:19:20 -0500
Subject: [PATCH 046/160] Trying to fix the bug that's happening to rodney757
but not to me ;)
---
mediagoblin/gmg_commands/reprocess.py | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 10ab50ab..4678ca6f 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -28,6 +28,11 @@ from mediagoblin.processing import (
def reprocess_parser_setup(subparser):
+ subparser.add_argument(
+ '--celery',
+ action='store_true',
+ help="Don't process eagerly, pass off to celery")
+
subparsers = subparser.add_subparsers(dest="reprocess_subcommand")
###################
@@ -66,10 +71,6 @@ def reprocess_parser_setup(subparser):
'--thumbnails',
action="store_true",
help="Regenerate thumbnails for all processed media")
- run_parser.add_argument(
- '--celery',
- action='store_true',
- help="Don't process eagerly, pass off to celery")
run_parser.add_argument(
'media_id',
From c100b8b284456e5102de4b05d77a66f829a34939 Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Mon, 12 Aug 2013 11:27:43 -0500
Subject: [PATCH 047/160] Fixing ./bin/gmg reprocess available, which I broke
:)
---
mediagoblin/gmg_commands/reprocess.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 4678ca6f..bd8039b5 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -212,8 +212,9 @@ def _set_media_state(args):
def available(args):
# Get the media type, either by looking up media id, or by specific type
try:
- media_entry, manager = get_entry_and_processing_manager(args.id_or_type)
- media_type = media_entry.type
+ media_id = int(args.id_or_type)
+ media_entry, manager = get_entry_and_processing_manager(media_id)
+ media_type = media_entry.media_type
except ValueError:
media_type = args.id_or_type
media_entry = None
From 916db96edba66a4deb0ec290e378031a85e2e7e0 Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Mon, 12 Aug 2013 11:40:55 -0500
Subject: [PATCH 048/160] Don't forget to run copy_original()! That's
critical!
This commit sponsored by Tony Schmidt. Thank you!
---
mediagoblin/media_types/image/processing.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index e0ff928d..6c6c7708 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -425,6 +425,7 @@ class InitialProcessor(CommonImageProcessor):
self.common_setup()
self.generate_medium_if_applicable(size=size)
self.generate_thumb(size=thumb_size)
+ self.copy_original()
self.extract_metadata()
From af51c423fb4c808b46afbec12d3aa832c53c0034 Mon Sep 17 00:00:00 2001
From: Christopher Allan Webber
Date: Mon, 12 Aug 2013 11:57:52 -0500
Subject: [PATCH 049/160] Fix (by Rodney Ewing) for processing, specifically
the way resize_tool was being called
Thanks for fixing, Rodney :)
---
mediagoblin/media_types/image/processing.py | 7 ++-----
1 file changed, 2 insertions(+), 5 deletions(-)
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index 6c6c7708..01e5cce5 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -82,11 +82,8 @@ def resize_image(entry, resized, keyname, target_name, new_size,
def resize_tool(entry,
- force, keyname, target_name,
+ force, keyname, orig_file, target_name,
conversions_subdir, exif_tags, new_size=None):
- # filename -- the filename of the original image being resized
- filename = target_name
-
# Use the default size if new_size was not given
if not new_size:
max_width = mgg.global_config['media:' + keyname]['max_width']
@@ -98,7 +95,7 @@ def resize_tool(entry,
# entry.
# Also created if the file needs rotation, or if forced.
try:
- im = Image.open(filename)
+ im = Image.open(orig_file)
except IOError:
raise BadMediaFail()
if force \
From 583501415acce95ff458e5cf12733d1b61332e0e Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Mon, 12 Aug 2013 11:37:59 -0700
Subject: [PATCH 050/160] Add image resizer and some cleanup of old code
---
mediagoblin/media_types/image/__init__.py | 5 +-
mediagoblin/media_types/image/processing.py | 236 +++++---------------
mediagoblin/processing/__init__.py | 4 +-
3 files changed, 58 insertions(+), 187 deletions(-)
diff --git a/mediagoblin/media_types/image/__init__.py b/mediagoblin/media_types/image/__init__.py
index 774b9bfa..f8c4a7d1 100644
--- a/mediagoblin/media_types/image/__init__.py
+++ b/mediagoblin/media_types/image/__init__.py
@@ -17,8 +17,8 @@ import datetime
import logging
from mediagoblin.media_types import MediaManagerBase
-from mediagoblin.media_types.image.processing import ProcessImage, \
- sniff_handler, ImageProcessingManager
+from mediagoblin.media_types.image.processing import sniff_handler, \
+ ImageProcessingManager
from mediagoblin.tools import pluginapi
_log = logging.getLogger(__name__)
@@ -34,7 +34,6 @@ def setup_plugin():
class ImageMediaManager(MediaManagerBase):
human_readable = "Image"
- processor = staticmethod(ProcessImage)
display_template = "mediagoblin/media_displays/image.html"
default_thumb = "images/media_thumbs/image.png"
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index 01e5cce5..6eb8302c 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -23,17 +23,14 @@ import logging
import argparse
from mediagoblin import mg_globals as mgg
-from mediagoblin.db.models import MediaEntry
from mediagoblin.processing import (
BadMediaFail, FilenameBuilder,
MediaProcessor, ProcessingManager,
request_from_args, get_orig_filename,
store_public, copy_original)
-from mediagoblin.submit.lib import run_process_media
from mediagoblin.tools.exif import exif_fix_image_orientation, \
extract_exif, clean_exif, get_gps_data, get_useful, \
exif_image_needs_rotation
-from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
_log = logging.getLogger(__name__)
@@ -130,181 +127,6 @@ def sniff_handler(media_file, **kw):
return None
-class ProcessImage(object):
- """Code to process an image. Will be run by celery.
-
- A Workbench() represents a local tempory dir. It is automatically
- cleaned up when this function exits.
- """
- def __init__(self, proc_state=None):
- if proc_state:
- self.proc_state = proc_state
- self.entry = proc_state.entry
- self.workbench = proc_state.workbench
-
- # Conversions subdirectory to avoid collisions
- self.conversions_subdir = os.path.join(
- self.workbench.dir, 'convirsions')
-
- self.orig_filename = proc_state.get_orig_filename()
- self.name_builder = FilenameBuilder(self.orig_filename)
-
- # Exif extraction
- self.exif_tags = extract_exif(self.orig_filename)
-
- os.mkdir(self.conversions_subdir)
-
- def reprocess_action(self, args):
- """
- List the available actions for media in a given state
- """
- if args[0].state == 'processed':
- print _('\n Available reprocessing actions for processed images:'
- '\n \t --resize: thumb or medium'
- '\n Options:'
- '\n \t --size: max_width max_height (defaults to'
- 'config specs)')
- return True
-
- def _parser(self, args):
- """
- Parses the unknown args from the gmg parser
- """
- parser = argparse.ArgumentParser()
- parser.add_argument(
- '--resize',
- choices=['thumb', 'medium'])
- parser.add_argument(
- '--size',
- nargs=2,
- metavar=('max_width', 'max_height'),
- type=int)
- parser.add_argument(
- '--initial_processing',
- action='store_true')
-
- return parser.parse_args(args[1])
-
- def _check_eligible(self, entry_args, reprocess_args):
- """
- Check to see if we can actually process the given media as requested
- """
-
- if entry_args.state == 'processed':
- if reprocess_args.initial_processing:
- raise Exception(_('You can not run --initial_processing on'
- ' media that has already been processed.'))
-
- if entry_args.state == 'failed':
- if reprocess_args.resize:
- raise Exception(_('You can not run --resize on media that has'
- ' not been processed.'))
- if reprocess_args.size:
- _log.warn('With --initial_processing, the --size flag will be'
- ' ignored.')
-
- if entry_args.state == 'processing':
- raise Exception(_('We currently do not support reprocessing on'
- ' media that is in the "processing" state.'))
-
- def initial_processing(self):
- # Is there any GPS data
- gps_data = get_gps_data(self.exif_tags)
-
- # Always create a small thumbnail
- resize_tool(self.proc_state, True, 'thumb', self.orig_filename,
- self.name_builder.fill('{basename}.thumbnail{ext}'),
- self.conversions_subdir, self.exif_tags)
-
- # Possibly create a medium
- resize_tool(self.proc_state, False, 'medium', self.orig_filename,
- self.name_builder.fill('{basename}.medium{ext}'),
- self.conversions_subdir, self.exif_tags)
-
- # Copy our queued local workbench to its final destination
- self.proc_state.copy_original(self.name_builder.fill('{basename}{ext}'))
-
- # Remove queued media file from storage and database
- self.proc_state.delete_queue_file()
-
- # Insert exif data into database
- exif_all = clean_exif(self.exif_tags)
-
- if len(exif_all):
- self.entry.media_data_init(exif_all=exif_all)
-
- if len(gps_data):
- for key in list(gps_data.keys()):
- gps_data['gps_' + key] = gps_data.pop(key)
- self.entry.media_data_init(**gps_data)
-
- def reprocess(self, reprocess_info):
- """
- This function actually does the reprocessing when called by
- ProcessMedia in gmg/processing/task.py
- """
- new_size = None
-
- # Did they specify a size? They must specify either both or none, so
- # we only need to check if one is present
- if reprocess_info.get('max_width'):
- max_width = reprocess_info['max_width']
- max_height = reprocess_info['max_height']
-
- new_size = (max_width, max_height)
-
- resize_tool(self.proc_state, False, reprocess_info['resize'],
- self.name_builder.fill('{basename}.medium{ext}'),
- self.conversions_subdir, self.exif_tags, new_size)
-
- def media_reprocess(self, args):
- """
- This function handles the all of the reprocessing logic, before calling
- gmg/submit/lib/run_process_media
- """
- reprocess_args = self._parser(args)
- entry_args = args[0]
-
- # Can we actually process the given media as requested?
- self._check_eligible(entry_args, reprocess_args)
-
- # Do we want to re-try initial processing?
- if reprocess_args.initial_processing:
- for id in entry_args.media_id:
- entry = MediaEntry.query.filter_by(id=id).first()
- run_process_media(entry)
-
- # Are we wanting to resize the thumbnail or medium?
- elif reprocess_args.resize:
-
- # reprocess all given media entries
- for id in entry_args.media_id:
- entry = MediaEntry.query.filter_by(id=id).first()
-
- # For now we can only reprocess with the original file
- if not entry.media_files.get('original'):
- raise Exception(_('The original file for this media entry'
- ' does not exist.'))
-
- reprocess_info = self._get_reprocess_info(reprocess_args)
- run_process_media(entry, reprocess_info=reprocess_info)
-
- # If we are here, they forgot to tell us how to reprocess
- else:
- _log.warn('You must set either --resize or --initial_processing'
- ' flag to reprocess an image.')
-
- def _get_reprocess_info(self, args):
- """ Returns a dict with the info needed for reprocessing"""
- reprocess_info = {'resize': args.resize}
-
- if args.size:
- reprocess_info['max_width'] = args.size[0]
- reprocess_info['max_height'] = args.size[1]
-
- return reprocess_info
-
-
class CommonImageProcessor(MediaProcessor):
"""
Provides a base for various media processing steps
@@ -342,16 +164,15 @@ class CommonImageProcessor(MediaProcessor):
# Exif extraction
self.exif_tags = extract_exif(self.orig_filename)
-
def generate_medium_if_applicable(self, size=None):
resize_tool(self.entry, False, 'medium', self.orig_filename,
self.name_builder.fill('{basename}.medium{ext}'),
- self.conversions_subdir, self.exif_tags)
+ self.conversions_subdir, self.exif_tags, size)
def generate_thumb(self, size=None):
resize_tool(self.entry, True, 'thumb', self.orig_filename,
self.name_builder.fill('{basename}.thumbnail{ext}'),
- self.conversions_subdir, self.exif_tags)
+ self.conversions_subdir, self.exif_tags, size)
def copy_original(self):
copy_original(
@@ -408,6 +229,7 @@ class InitialProcessor(CommonImageProcessor):
parser.add_argument(
'--thumb-size',
nargs=2,
+ metavar=('max_width', 'max_height'),
type=int)
return parser
@@ -417,19 +239,69 @@ class InitialProcessor(CommonImageProcessor):
return request_from_args(
args, ['size', 'thumb_size'])
-
def process(self, size=None, thumb_size=None):
self.common_setup()
self.generate_medium_if_applicable(size=size)
self.generate_thumb(size=thumb_size)
self.copy_original()
self.extract_metadata()
+ self.delete_queue_file()
+
+
+class Resizer(CommonImageProcessor):
+ """
+ Resizing process steps for processed media
+ """
+ name = 'resize'
+ description = 'Resize image'
+
+ @classmethod
+ def media_is_eligible(cls, entry):
+ """
+ Determine if this media type is eligible for processing
+ """
+ return entry.state in 'processed'
+
+ ###############################
+ # Command line interface things
+ ###############################
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ 'file',
+ choices=['medium', 'thumb'])
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['size', 'file'])
+
+ def process(self, file, size=None):
+ self.common_setup()
+ if file == 'medium':
+ self.generate_medium_if_applicable(size=size)
+ elif file == 'thumb':
+ self.generate_thumb(size=size)
class ImageProcessingManager(ProcessingManager):
def __init__(self):
super(self.__class__, self).__init__()
self.add_processor(InitialProcessor)
+ self.add_processor(Resizer)
if __name__ == '__main__':
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 684ffe04..d5ec1fba 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -90,7 +90,7 @@ class MediaProcessor(object):
- resize: resize an image
- transcode: transcode a video
- ... etc.
+ ... etc.
Some information on producing a new MediaProcessor for your media type:
@@ -193,7 +193,7 @@ class ProcessingManager(object):
name = processor.name
if name is None:
raise AttributeError("Processor class's .name attribute not set")
-
+
self.processors[name] = processor
def list_eligible_processors(self, entry):
From fb56676bf49de8e25487b938dc9a56f8440086f5 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Mon, 12 Aug 2013 11:55:00 -0700
Subject: [PATCH 051/160] delete existing file in store_public
---
mediagoblin/processing/__init__.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index d5ec1fba..aadee78b 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -412,7 +412,8 @@ def get_orig_filename(entry, workbench):
return orig_filename
-def store_public(entry, keyname, local_file, target_name=None):
+def store_public(entry, keyname, local_file, target_name=None,
+ delete_if_exists=True):
if target_name is None:
target_name = os.path.basename(local_file)
target_filepath = create_pub_filepath(entry, target_name)
@@ -420,6 +421,8 @@ def store_public(entry, keyname, local_file, target_name=None):
_log.warn("store_public: keyname %r already used for file %r, "
"replacing with %r", keyname,
entry.media_files[keyname], target_filepath)
+ if delete_if_exists:
+ mgg.public_store.delete_file(entry.media_files[keyname])
mgg.public_store.copy_local_to_storage(local_file, target_filepath)
entry.media_files[keyname] = target_filepath
From 455f71d24c7d5e3163b1cc25682161fe1c7f7cc6 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Mon, 12 Aug 2013 11:57:47 -0700
Subject: [PATCH 052/160] remove ProcessingState
---
mediagoblin/processing/__init__.py | 82 ------------------------------
1 file changed, 82 deletions(-)
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index aadee78b..19e88199 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -274,79 +274,6 @@ def get_entry_and_processing_manager(media_id):
return entry, manager
-################################################
-# TODO: This ProcessingState is OUTDATED,
-# and needs to be refactored into other tools!
-################################################
-
-class ProcessingState(object):
- """
- The first and only argument to the "processor" of a media type
-
- This could be thought of as a "request" to the processor
- function. It has the main info for the request (media entry)
- and a bunch of tools for the request on it.
- It can get more fancy without impacting old media types.
- """
- def __init__(self, entry):
- self.entry = entry
- self.workbench = None
- self.orig_filename = None
-
- def set_workbench(self, wb):
- self.workbench = wb
-
- def get_orig_filename(self):
- """
- Get the a filename for the original, on local storage
-
- If the media entry has a queued_media_file, use that, otherwise
- use the original.
-
- In the future, this will return the highest quality file available
- if neither the original or queued file are available
- """
- if self.orig_filename is not None:
- return self.orig_filename
-
- if self.entry.queued_media_file:
- orig_filepath = self.entry.queued_media_file
- storage = mgg.queue_store
- else:
- orig_filepath = self.entry.media_files['original']
- storage = mgg.public_store
-
- orig_filename = self.workbench.localized_file(
- storage, orig_filepath,
- 'source')
- self.orig_filename = orig_filename
- return orig_filename
-
- def copy_original(self, target_name, keyname=u"original"):
- self.store_public(keyname, self.get_orig_filename(), target_name)
-
- def store_public(self, keyname, local_file, target_name=None):
- if target_name is None:
- target_name = os.path.basename(local_file)
- target_filepath = create_pub_filepath(self.entry, target_name)
- if keyname in self.entry.media_files:
- _log.warn("store_public: keyname %r already used for file %r, "
- "replacing with %r", keyname,
- self.entry.media_files[keyname], target_filepath)
- mgg.public_store.copy_local_to_storage(local_file, target_filepath)
- self.entry.media_files[keyname] = target_filepath
-
- def delete_queue_file(self):
- # Remove queued media file from storage and database.
- # queued_filepath is in the task_id directory which should
- # be removed too, but fail if the directory is not empty to be on
- # the super-safe side.
- queued_filepath = self.entry.queued_media_file
- mgg.queue_store.delete_file(queued_filepath) # rm file
- mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
- self.entry.queued_media_file = []
-
-
def mark_entry_failed(entry_id, exc):
"""
Mark a media entry as having failed in its conversion.
@@ -383,10 +310,6 @@ def mark_entry_failed(entry_id, exc):
u'fail_metadata': {}})
-###############################################################################
-# refactoring procstate stuff here
-
-
def get_orig_filename(entry, workbench):
"""
Get the a filename for the original, on local storage
@@ -431,11 +354,6 @@ def copy_original(entry, orig_filename, target_name, keyname=u"original"):
store_public(entry, keyname, orig_filename, target_name)
-# end refactoring
-###############################################################################
-
-
-
class BaseProcessingFail(Exception):
"""
Base exception that all other processing failure messages should
From 7584080bf7d7b2d74087d31ca781e1111c2024da Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Mon, 12 Aug 2013 14:28:03 -0700
Subject: [PATCH 053/160] add bulk_run, thumbs, and initial sub_commands
---
mediagoblin/gmg_commands/reprocess.py | 96 +++++++++++++++++----
mediagoblin/media_types/image/processing.py | 12 ++-
mediagoblin/processing/__init__.py | 15 +++-
3 files changed, 98 insertions(+), 25 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index bd8039b5..34311f6d 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -41,7 +41,7 @@ def reprocess_parser_setup(subparser):
available_parser = subparsers.add_parser(
"available",
help="Find out what actions are available for this media")
-
+
available_parser.add_argument(
"id_or_type",
help="Media id or media type to check")
@@ -51,27 +51,19 @@ def reprocess_parser_setup(subparser):
action="store_true",
help="List argument help for each action available")
-
- ############################################
- # run command (TODO: and bulk_run command??)
- ############################################
-
+ available_parser.add_argument(
+ "--state",
+ help="The state of media you would like to reprocess")
+
+
+ #############
+ # run command
+ #############
+
run_parser = subparsers.add_parser(
"run",
help="Run a reprocessing on one or more media")
- run_parser.add_argument(
- '--state', '-s',
- help="Reprocess media entries in this state"
- " such as 'failed' or 'processed'")
- run_parser.add_argument(
- '--type', '-t',
- help="The type of media to be reprocessed such as 'video' or 'image'")
- run_parser.add_argument(
- '--thumbnails',
- action="store_true",
- help="Regenerate thumbnails for all processed media")
-
run_parser.add_argument(
'media_id',
help="The media_entry id(s) you wish to reprocess.")
@@ -86,6 +78,48 @@ def reprocess_parser_setup(subparser):
help="rest of arguments to the reprocessing tool")
+ ################
+ # thumbs command
+ ################
+ thumbs = subparsers.add_parser(
+ 'thumbs',
+ help='Regenerate thumbs for all processed media')
+
+ thumbs.add_argument(
+ '--size',
+ nargs=2,
+ type=int,
+ metavar=('max_width', 'max_height'))
+
+ #################
+ # initial command
+ #################
+ subparsers.add_parser(
+ 'initial',
+ help='Reprocess all failed media')
+
+ ##################
+ # bulk_run command
+ ##################
+ bulk_run_parser = subparsers.add_parser(
+ 'bulk_run',
+ help='Run reprocessing on a given media type or state')
+
+ bulk_run_parser.add_argument(
+ 'type',
+ help='The type of media you would like to process')
+
+ bulk_run_parser.add_argument(
+ 'state',
+ default='processed',
+ help='The state of the media you would like to process. Defaults to' \
+ " 'processed'")
+
+ bulk_run_parser.add_argument(
+ 'reprocess_args',
+ nargs=argparse.REMAINDER,
+ help='The rest of the arguments to the reprocessing tool')
+
###############
# help command?
###############
@@ -220,7 +254,9 @@ def available(args):
media_entry = None
manager = get_processing_manager_for_type(media_type)
- if media_entry is None:
+ if args.state:
+ processors = manager.list_all_processors_by_state(args.state)
+ elif media_entry is None:
processors = manager.list_all_processors()
else:
processors = manager.list_eligible_processors(media_entry)
@@ -271,6 +307,19 @@ def run(args):
reprocess_action=args.reprocess_command,
reprocess_info=reprocess_request)
+def bulk_run(args):
+ pass
+
+
+def thumbs(args):
+ #TODO regenerate thumbs for all processed media
+ pass
+
+
+def initial(args):
+ #TODO initial processing on all failed media
+ pass
+
def reprocess(args):
# Run eagerly unless explicetly set not to
@@ -284,3 +333,12 @@ def reprocess(args):
elif args.reprocess_subcommand == "available":
available(args)
+
+ elif args.reprocess_subcommand == "bulk_run":
+ bulk_run(args)
+
+ elif args.reprocess_subcommand == "thumbs":
+ thumbs(args)
+
+ elif args.reprocess_subcommand == "initial":
+ initial(args)
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index 6eb8302c..c3dfc5fe 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -203,11 +203,13 @@ class InitialProcessor(CommonImageProcessor):
description = "Initial processing"
@classmethod
- def media_is_eligible(cls, entry):
+ def media_is_eligible(cls, entry=None, state=None):
"""
Determine if this media type is eligible for processing
"""
- return entry.state in (
+ if not state:
+ state = entry.state
+ return state in (
"unprocessed", "failed")
###############################
@@ -256,11 +258,13 @@ class Resizer(CommonImageProcessor):
description = 'Resize image'
@classmethod
- def media_is_eligible(cls, entry):
+ def media_is_eligible(cls, entry=None, state=None):
"""
Determine if this media type is eligible for processing
"""
- return entry.state in 'processed'
+ if not state:
+ state = entry.state
+ return state in 'processed'
###############################
# Command line interface things
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 19e88199..1930a480 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -137,7 +137,7 @@ class MediaProcessor(object):
raise NotImplementedError
@classmethod
- def media_is_eligible(cls, entry):
+ def media_is_eligible(cls, entry=None, state=None):
raise NotImplementedError
###############################
@@ -204,7 +204,18 @@ class ProcessingManager(object):
return [
processor
for processor in self.processors.values()
- if processor.media_is_eligible(entry)]
+ if processor.media_is_eligible(entry=entry)]
+
+ def list_all_processors_by_state(self, state):
+ """
+ List all processors that this media state is eligible to be processed
+ for.
+ """
+ return [
+ processor
+ for processor in self.processors.values()
+ if processor.media_is_eligible(state=state)]
+
def list_all_processors(self):
return self.processors.values()
From 4e6013689beded08121c0d139565ffccbf3c0000 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Mon, 12 Aug 2013 14:54:02 -0700
Subject: [PATCH 054/160] run initial processing on all failed entries
---
mediagoblin/gmg_commands/reprocess.py | 19 ++++++++++++++++---
mediagoblin/processing/__init__.py | 5 +++++
2 files changed, 21 insertions(+), 3 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 34311f6d..a3c732b9 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -24,7 +24,8 @@ from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
from mediagoblin.tools.pluginapi import hook_handle
from mediagoblin.processing import (
ProcessorDoesNotExist, ProcessorNotEligible,
- get_entry_and_processing_manager, get_processing_manager_for_type)
+ get_entry_and_processing_manager, get_processing_manager_for_type,
+ ProcessingManagerDoesNotExist)
def reprocess_parser_setup(subparser):
@@ -307,6 +308,7 @@ def run(args):
reprocess_action=args.reprocess_command,
reprocess_info=reprocess_request)
+
def bulk_run(args):
pass
@@ -317,8 +319,19 @@ def thumbs(args):
def initial(args):
- #TODO initial processing on all failed media
- pass
+ """
+ Reprocess all failed media
+ """
+ query = MediaEntry.query.filter_by(state='failed')
+
+ for entry in query:
+ try:
+ media_entry, manager = get_entry_and_processing_manager(entry.id)
+ run_process_media(
+ media_entry,
+ reprocess_action='initial')
+ except ProcessingManagerDoesNotExist:
+ print 'No such processing manager for {0}'.format(entry.media_type)
def reprocess(args):
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 1930a480..0c13e807 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -174,6 +174,8 @@ class MediaProcessor(object):
class ProcessingKeyError(Exception): pass
class ProcessorDoesNotExist(ProcessingKeyError): pass
class ProcessorNotEligible(ProcessingKeyError): pass
+class ProcessingManagerDoesNotExist(ProcessingKeyError): pass
+
class ProcessingManager(object):
@@ -265,6 +267,9 @@ def get_processing_manager_for_type(media_type):
Get the appropriate media manager for this type
"""
manager_class = hook_handle(('reprocess_manager', media_type))
+ if not manager_class:
+ raise ProcessingManagerDoesNotExist(
+ "A processing manager does not exist for {0}".format(media_type))
manager = manager_class()
return manager
From 441ed10de0cbc50ef2f40bae8dc25ea8493e8f8f Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Mon, 12 Aug 2013 14:57:45 -0700
Subject: [PATCH 055/160] wrap get_entry_and_processing_manager in try, except
block
---
mediagoblin/gmg_commands/reprocess.py | 50 ++++++++++++++++-----------
1 file changed, 29 insertions(+), 21 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index a3c732b9..579ba478 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -254,6 +254,9 @@ def available(args):
media_type = args.id_or_type
media_entry = None
manager = get_processing_manager_for_type(media_type)
+ except ProcessingManagerDoesNotExist:
+ entry = MediaEntry.query.filter_by(id=args.id_or_type).first()
+ print 'No such processing manager for {0}'.format(entry.media_type)
if args.state:
processors = manager.list_all_processors_by_state(args.state)
@@ -284,29 +287,34 @@ def available(args):
def run(args):
- media_entry, manager = get_entry_and_processing_manager(args.media_id)
-
- # TODO: (maybe?) This could probably be handled entirely by the
- # processor class...
try:
- processor_class = manager.get_processor(
- args.reprocess_command, media_entry)
- except ProcessorDoesNotExist:
- print 'No such processor "%s" for media with id "%s"' % (
- args.reprocess_command, media_entry.id)
- return
- except ProcessorNotEligible:
- print 'Processor "%s" exists but media "%s" is not eligible' % (
- args.reprocess_command, media_entry.id)
- return
+ media_entry, manager = get_entry_and_processing_manager(args.media_id)
- reprocess_parser = processor_class.generate_parser()
- reprocess_args = reprocess_parser.parse_args(args.reprocess_args)
- reprocess_request = processor_class.args_to_request(reprocess_args)
- run_process_media(
- media_entry,
- reprocess_action=args.reprocess_command,
- reprocess_info=reprocess_request)
+ # TODO: (maybe?) This could probably be handled entirely by the
+ # processor class...
+ try:
+ processor_class = manager.get_processor(
+ args.reprocess_command, media_entry)
+ except ProcessorDoesNotExist:
+ print 'No such processor "%s" for media with id "%s"' % (
+ args.reprocess_command, media_entry.id)
+ return
+ except ProcessorNotEligible:
+ print 'Processor "%s" exists but media "%s" is not eligible' % (
+ args.reprocess_command, media_entry.id)
+ return
+
+ reprocess_parser = processor_class.generate_parser()
+ reprocess_args = reprocess_parser.parse_args(args.reprocess_args)
+ reprocess_request = processor_class.args_to_request(reprocess_args)
+ run_process_media(
+ media_entry,
+ reprocess_action=args.reprocess_command,
+ reprocess_info=reprocess_request)
+
+ except ProcessingManagerDoesNotExist:
+ entry = MediaEntry.query.filter_by(id=args.media_id).first()
+ print 'No such processing manager for {0}'.format(entry.media_type)
def bulk_run(args):
From 0c4b68a8049acdef5f75e5fe7e95be2360d524eb Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Mon, 12 Aug 2013 15:25:52 -0700
Subject: [PATCH 056/160] Resize all processed thumbs
---
mediagoblin/gmg_commands/reprocess.py | 42 +++++++++++++++++++++++++--
1 file changed, 40 insertions(+), 2 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 579ba478..55aa6cc9 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -322,8 +322,46 @@ def bulk_run(args):
def thumbs(args):
- #TODO regenerate thumbs for all processed media
- pass
+ """
+ Regenerate thumbs for all processed media
+ """
+ query = MediaEntry.query.filter_by(state='processed')
+
+ for entry in query:
+ try:
+ media_entry, manager = get_entry_and_processing_manager(entry.id)
+
+ # TODO: (maybe?) This could probably be handled entirely by the
+ # processor class...
+ try:
+ processor_class = manager.get_processor(
+ 'resize', media_entry)
+ except ProcessorDoesNotExist:
+ print 'No such processor "%s" for media with id "%s"' % (
+ 'resize', media_entry.id)
+ return
+ except ProcessorNotEligible:
+ print 'Processor "%s" exists but media "%s" is not eligible' % (
+ 'resize', media_entry.id)
+ return
+
+ reprocess_parser = processor_class.generate_parser()
+
+ # prepare filetype and size to be passed into reprocess_parser
+ if args.size:
+ extra_args = 'thumb --size {0} {1}'.format(args.size[0], args.size[1])
+ else:
+ extra_args = 'thumb'
+
+ reprocess_args = reprocess_parser.parse_args(extra_args.split())
+ reprocess_request = processor_class.args_to_request(reprocess_args)
+ run_process_media(
+ media_entry,
+ reprocess_action='resize',
+ reprocess_info=reprocess_request)
+
+ except ProcessingManagerDoesNotExist:
+ print 'No such processing manager for {0}'.format(entry.media_type)
def initial(args):
From a7f426368dfce15f86fa0ef56200ec1228f6e4af Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Mon, 12 Aug 2013 15:45:09 -0700
Subject: [PATCH 057/160] bulk_run reprocessing complete
---
mediagoblin/gmg_commands/reprocess.py | 24 +++++++++++++++++++-----
1 file changed, 19 insertions(+), 5 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 55aa6cc9..3ba5d92c 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -111,11 +111,16 @@ def reprocess_parser_setup(subparser):
help='The type of media you would like to process')
bulk_run_parser.add_argument(
- 'state',
+ '--state',
default='processed',
+ nargs='?',
help='The state of the media you would like to process. Defaults to' \
" 'processed'")
+ bulk_run_parser.add_argument(
+ 'reprocess_command',
+ help='The reprocess command you intend to run')
+
bulk_run_parser.add_argument(
'reprocess_args',
nargs=argparse.REMAINDER,
@@ -286,9 +291,11 @@ def available(args):
print " - %s" % processor.name
-def run(args):
+def run(args, media_id=None):
+ if not media_id:
+ media_id = args.media_id
try:
- media_entry, manager = get_entry_and_processing_manager(args.media_id)
+ media_entry, manager = get_entry_and_processing_manager(media_id)
# TODO: (maybe?) This could probably be handled entirely by the
# processor class...
@@ -313,12 +320,19 @@ def run(args):
reprocess_info=reprocess_request)
except ProcessingManagerDoesNotExist:
- entry = MediaEntry.query.filter_by(id=args.media_id).first()
+ entry = MediaEntry.query.filter_by(id=media_id).first()
print 'No such processing manager for {0}'.format(entry.media_type)
def bulk_run(args):
- pass
+ """
+ Bulk reprocessing of a given media_type
+ """
+ query = MediaEntry.query.filter_by(media_type=args.type,
+ state=args.state)
+
+ for entry in query:
+ run(args, entry.id)
def thumbs(args):
From 36c17b85c12546076067e039e5f55c7780dc54f3 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Mon, 12 Aug 2013 15:48:40 -0700
Subject: [PATCH 058/160] remove old code
---
mediagoblin/gmg_commands/reprocess.py | 118 ----------------------
mediagoblin/media_types/image/__init__.py | 1 -
2 files changed, 119 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 3ba5d92c..5285942e 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -131,124 +131,6 @@ def reprocess_parser_setup(subparser):
###############
-
-def _set_media_type(args):
- """
- This will verify that all media id's are of the same media_type. If the
- --type flag is set, it will be replaced by the given media id's type.
-
- If they are trying to process different media types, an Exception will be
- raised.
- """
- if args[0].media_id:
- if len(args[0].media_id) == 1:
- args[0].type = MediaEntry.query.filter_by(id=args[0].media_id[0])\
- .first().media_type.split('.')[-1]
-
- elif len(args[0].media_id) > 1:
- media_types = []
-
- for id in args[0].media_id:
- media_types.append(MediaEntry.query.filter_by(id=id).first()
- .media_type.split('.')[-1])
- for type in media_types:
- if media_types[0] != type:
- raise Exception((u'You cannot reprocess different'
- ' media_types at the same time.'))
-
- args[0].type = media_types[0]
-
-
-def _reprocess_all(args):
- """
- This handles reprocessing if no media_id's are given.
- """
- if not args[0].type:
- # If no media type is given, we can either regenerate all thumbnails,
- # or try to reprocess all failed media
-
- if args[0].thumbnails:
- if args[0].available:
- print _('Available options for regenerating all processed'
- ' media thumbnails: \n'
- '\t --size: max_width max_height'
- ' (defaults to config specs)')
- else:
- #TODO regenerate all thumbnails
- pass
-
- # Reprocess all failed media
- elif args[0].state == 'failed':
- if args[0].available:
- print _('\n Available reprocess actions for all failed'
- ' media_entries: \n \t --initial_processing')
- else:
- #TODO reprocess all failed entries
- pass
-
- # If here, they didn't set the --type flag and were trying to do
- # something other the generating thumbnails or initial_processing
- else:
- raise Exception(_('You must set --type when trying to reprocess'
- ' all media_entries, unless you set --state'
- ' to "failed".'))
-
- else:
- _run_reprocessing(args)
-
-
-def _run_reprocessing(args):
- # Are they just asking for the available reprocessing options for the given
- # media?
- if args[0].available:
- if args[0].state == 'failed':
- print _('\n Available reprocess actions for all failed'
- ' media_entries: \n \t --initial_processing')
- else:
- result = hook_handle(('reprocess_action', args[0].type), args)
- if not result:
- print _('Sorry there is no available reprocessing for {}'
- ' entries in the {} state'.format(args[0].type,
- args[0].state))
- else:
- # Run media reprocessing
- return hook_handle(('media_reprocess', args[0].type), args)
-
-
-def _set_media_state(args):
- """
- This will verify that all media id's are in the same state. If the
- --state flag is set, it will be replaced by the given media id's state.
-
- If they are trying to process different media states, an Exception will be
- raised.
- """
- if args[0].media_id:
- # Only check if we are given media_ids
- if len(args[0].media_id) == 1:
- args[0].state = MediaEntry.query.filter_by(id=args[0].media_id[0])\
- .first().state
-
- elif len(args[0].media_id) > 1:
- media_states = []
-
- for id in args[0].media_id:
- media_states.append(MediaEntry.query.filter_by(id=id).first()
- .state)
-
- # Make sure that all media are in the same state
- for state in media_states:
- if state != media_states[0]:
- raise Exception(_('You can only reprocess media that is in'
- ' the same state.'))
-
- args[0].state = media_states[0]
-
- # If no state was set, then we will default to the processed state
- if not args[0].state:
- args[0].state = 'processed'
-
-
def available(args):
# Get the media type, either by looking up media id, or by specific type
try:
diff --git a/mediagoblin/media_types/image/__init__.py b/mediagoblin/media_types/image/__init__.py
index f8c4a7d1..99643409 100644
--- a/mediagoblin/media_types/image/__init__.py
+++ b/mediagoblin/media_types/image/__init__.py
@@ -72,5 +72,4 @@ hooks = {
'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: ImageMediaManager,
('reprocess_manager', MEDIA_TYPE): lambda: ImageProcessingManager,
- # ('media_reprocess', MEDIA_TYPE): ProcessImage().media_reprocess,
}
From 5ac1fe806483de28656a056f10314ecc6a10aed4 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 13 Aug 2013 09:57:35 -0700
Subject: [PATCH 059/160] Audio Initial Processor
---
mediagoblin/gmg_commands/reprocess.py | 3 +-
mediagoblin/media_types/audio/__init__.py | 5 +-
mediagoblin/media_types/audio/processing.py | 201 +++++++++++++++++++-
3 files changed, 202 insertions(+), 7 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 5285942e..375d9ff2 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -245,7 +245,8 @@ def thumbs(args):
# prepare filetype and size to be passed into reprocess_parser
if args.size:
- extra_args = 'thumb --size {0} {1}'.format(args.size[0], args.size[1])
+ extra_args = 'thumb --size {0} {1}'.format(args.size[0],
+ args.size[1])
else:
extra_args = 'thumb'
diff --git a/mediagoblin/media_types/audio/__init__.py b/mediagoblin/media_types/audio/__init__.py
index c7ed8d2d..6ad473c8 100644
--- a/mediagoblin/media_types/audio/__init__.py
+++ b/mediagoblin/media_types/audio/__init__.py
@@ -15,7 +15,7 @@
# along with this program. If not, see .
from mediagoblin.media_types import MediaManagerBase
-from mediagoblin.media_types.audio.processing import process_audio, \
+from mediagoblin.media_types.audio.processing import AudioProcessingManager, \
sniff_handler
from mediagoblin.tools import pluginapi
@@ -32,8 +32,8 @@ def setup_plugin():
class AudioMediaManager(MediaManagerBase):
human_readable = "Audio"
- processor = staticmethod(process_audio)
display_template = "mediagoblin/media_displays/audio.html"
+ default_thumb = "images/media_thumbs/image.png"
def get_media_type_and_manager(ext):
@@ -45,4 +45,5 @@ hooks = {
'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: AudioMediaManager,
+ ('reprocess_manager', MEDIA_TYPE): lambda: AudioProcessingManager,
}
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index 22383bc1..3299c72d 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -14,16 +14,20 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+import argparse
import logging
from tempfile import NamedTemporaryFile
import os
from mediagoblin import mg_globals as mgg
-from mediagoblin.processing import (create_pub_filepath, BadMediaFail,
- FilenameBuilder, ProgressCallback)
+from mediagoblin.processing import (
+ create_pub_filepath, BadMediaFail, FilenameBuilder,
+ ProgressCallback, MediaProcessor, ProcessingManager,
+ request_from_args, get_orig_filename,
+ store_public, copy_original)
-from mediagoblin.media_types.audio.transcoders import (AudioTranscoder,
- AudioThumbnailer)
+from mediagoblin.media_types.audio.transcoders import (
+ AudioTranscoder,AudioThumbnailer)
_log = logging.getLogger(__name__)
@@ -157,3 +161,192 @@ def process_audio(proc_state):
mgg.queue_store.delete_file(queued_filepath) # rm file
mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
entry.queued_media_file = []
+
+
+class CommonAudioProcessor(MediaProcessor):
+ """
+ Provides a base for various audio processing steps
+ """
+
+ def common_setup(self):
+ """
+ """
+ self.audio_config = mgg \
+ .global_config['media_type:mediagoblin.media_types.audio']
+
+ # Pull down and set up the original file
+ self.orig_filename = get_orig_filename(
+ self.entry, self.workbench)
+ self.name_builder = FilenameBuilder(self.orig_filename)
+
+ self.spectrogram_tmp = os.path.join(self.workbench.dir,
+ self.name_builder.fill(
+ '{basename}-spectrogram.jpg'))
+
+ self.transcoder = AudioTranscoder()
+ self.thumbnailer = AudioThumbnailer()
+
+ def copy_original(self):
+ if self.audio_config['keep_original']:
+ copy_original(
+ self.entry, self.orig_filename,
+ self.name_builder.fill('{basename}{ext}'))
+
+ def transcode(self, quality=None):
+ if not quality:
+ quality = self.audio_config['quality']
+
+ progress_callback = ProgressCallback(self.entry)
+ webm_audio_tmp = os.path.join(self.workbench.dir,
+ self.name_builder.fill(
+ '{basename}{ext}'))
+
+ webm_audio_filepath = create_pub_filepath(
+ self.entry,
+ '{original}.webm'.format(
+ original=os.path.splitext(
+ self.orig_filename[-1])[0]))
+
+ self.transcoder.transcode(
+ self.orig_filename,
+ webm_audio_tmp,
+ quality=quality,
+ progress_callback=progress_callback)
+
+ self.transcoder.discover(webm_audio_tmp)
+
+ _log.debug('Saving medium...')
+ store_public(self.entry, 'medium', webm_audio_tmp,
+ webm_audio_filepath)
+
+ def create_spectrogram(self, quality=None, max_width=None, fft_size=None):
+ if not quality:
+ quality = self.audio_config['quality']
+ if not max_width:
+ max_width = mgg.global_config['media:medium']['max_width']
+ if not fft_size:
+ fft_size = self.audio_config['spectrogram_fft_size']
+
+ spectrogram_filepath = create_pub_filepath(
+ self.entry,
+ '{original}-spectrogram.jpg'.format(
+ original=os.path.splitext(
+ self.orig_filename[-1])[0]))
+
+ wav_tmp = os.path.join(self.workbench.dir, self.name_builder.fill(
+ '{basename}.ogg'))
+
+ _log.info('Creating OGG source for spectrogram')
+ self.transcoder.transcode(
+ self.orig_filename,
+ wav_tmp,
+ mux_string='vorbisenc quality={0} ! oggmux'.format(quality))
+
+ self.thumbnailer.spectrogram(
+ wav_tmp,
+ self.spectrogram_tmp,
+ width=max_width,
+ fft_size=fft_size)
+
+ _log.debug('Saving spectrogram...')
+ store_public(self.entry, 'spectrogram', self.spectrogram_tmp,
+ spectrogram_filepath)
+
+ def generate_thumb(self, size=None):
+ if not size:
+ max_width = mgg.global_config['medium:thumb']['max_width']
+ max_height = mgg.global_config['medium:thumb']['max_height']
+ size = (max_width, max_height)
+
+ thumb_tmp = os.path.join(self.workbench.dir, self.name_builder.fill(
+ '{basename}-thumbnail.jpg'))
+
+ self.thumbnailer.thumbnail_spectrogram(
+ self.spectrogram_tmp,
+ thumb_tmp,
+ size)
+
+ thumb_filepath = create_pub_filepath(
+ self.entry,
+ '{original}-thumbnail.jpg'.format(
+ original=os.path.splitext(
+ self.orig_filename[-1])[0]))
+
+ store_public(self.entry, 'thumb', thumb_tmp, thumb_filepath)
+
+
+class InitialProcessor(CommonAudioProcessor):
+ """
+ Initial processing steps for new audio
+ """
+ name = "initial"
+ description = "Initial processing"
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media type is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in (
+ "unprocessed", "failed")
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--quality',
+ help='vorbisenc quality')
+
+ parser.add_argument(
+ '--fft_size',
+ type=int,
+ help='spectrogram fft size')
+
+ parser.add_argument(
+ '--thumb_size',
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--medium_width',
+ type=int,
+ help='The width of the spectogram')
+
+ parser.add_argument(
+ '--create_spectrogram',
+ action='store_true',
+ help='Create spectogram and thumbnail')
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['create_spectrogram', 'quality', 'fft_size',
+ 'thumb_size', 'medium_width'])
+
+ def process(self, quality=None, fft_size=None, thumb_size=None,
+ create_spectrogram=None, medium_width=None):
+ if not create_spectrogram:
+ create_spectrogram = self.audio_config['create_spectrogram']
+
+ self.common_setup()
+ self.transcode(quality=quality)
+ self.copy_original()
+
+ if create_spectrogram:
+ self.create_spectrogram(quality=quality, max_width=medium_width,
+ fft_size=fft_size)
+ self.generate_thumb(size=thumb_size)
+ self.delete_queue_file()
+
+
+class AudioProcessingManager(ProcessingManager):
+ def __init__(self):
+ super(self.__class__, self).__init__()
+ self.add_processor(InitialProcessor)
From 550af89fc1502f8f695a00fec7b40783415c04c9 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 13 Aug 2013 10:13:26 -0700
Subject: [PATCH 060/160] common_setup() must be the first processing step
---
mediagoblin/media_types/audio/processing.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index 3299c72d..6945ee08 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -332,10 +332,11 @@ class InitialProcessor(CommonAudioProcessor):
def process(self, quality=None, fft_size=None, thumb_size=None,
create_spectrogram=None, medium_width=None):
+ self.common_setup()
+
if not create_spectrogram:
create_spectrogram = self.audio_config['create_spectrogram']
- self.common_setup()
self.transcode(quality=quality)
self.copy_original()
From c6eaa555de22329e6aa8657bd52c8a4e0a7fe00e Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 13 Aug 2013 10:28:12 -0700
Subject: [PATCH 061/160] use name_builder with store_public, not
create_pub_filepath
---
mediagoblin/media_types/audio/processing.py | 37 +++++++++++----------
1 file changed, 19 insertions(+), 18 deletions(-)
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index 6945ee08..6c565eb4 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -201,11 +201,11 @@ class CommonAudioProcessor(MediaProcessor):
self.name_builder.fill(
'{basename}{ext}'))
- webm_audio_filepath = create_pub_filepath(
- self.entry,
- '{original}.webm'.format(
- original=os.path.splitext(
- self.orig_filename[-1])[0]))
+ #webm_audio_filepath = create_pub_filepath(
+ # self.entry,
+ # '{original}.webm'.format(
+ # original=os.path.splitext(
+ # self.orig_filename[-1])[0]))
self.transcoder.transcode(
self.orig_filename,
@@ -217,7 +217,7 @@ class CommonAudioProcessor(MediaProcessor):
_log.debug('Saving medium...')
store_public(self.entry, 'medium', webm_audio_tmp,
- webm_audio_filepath)
+ self.name_builder.fill('{basename}.medium{ext}'))
def create_spectrogram(self, quality=None, max_width=None, fft_size=None):
if not quality:
@@ -227,11 +227,11 @@ class CommonAudioProcessor(MediaProcessor):
if not fft_size:
fft_size = self.audio_config['spectrogram_fft_size']
- spectrogram_filepath = create_pub_filepath(
- self.entry,
- '{original}-spectrogram.jpg'.format(
- original=os.path.splitext(
- self.orig_filename[-1])[0]))
+ #spectrogram_filepath = create_pub_filepath(
+ # self.entry,
+ # '{original}-spectrogram.jpg'.format(
+ # original=os.path.splitext(
+ # self.orig_filename[-1])[0]))
wav_tmp = os.path.join(self.workbench.dir, self.name_builder.fill(
'{basename}.ogg'))
@@ -250,7 +250,7 @@ class CommonAudioProcessor(MediaProcessor):
_log.debug('Saving spectrogram...')
store_public(self.entry, 'spectrogram', self.spectrogram_tmp,
- spectrogram_filepath)
+ self.name_builder.fill('{basename}.spectrogram.jpg'))
def generate_thumb(self, size=None):
if not size:
@@ -266,13 +266,14 @@ class CommonAudioProcessor(MediaProcessor):
thumb_tmp,
size)
- thumb_filepath = create_pub_filepath(
- self.entry,
- '{original}-thumbnail.jpg'.format(
- original=os.path.splitext(
- self.orig_filename[-1])[0]))
+ #thumb_filepath = create_pub_filepath(
+ # self.entry,
+ # '{original}-thumbnail.jpg'.format(
+ # original=os.path.splitext(
+ # self.orig_filename[-1])[0]))
- store_public(self.entry, 'thumb', thumb_tmp, thumb_filepath)
+ store_public(self.entry, 'thumb', thumb_tmp,
+ self.name_builder.fill('{basename}.thumbnail.jpg'))
class InitialProcessor(CommonAudioProcessor):
From 776e4d7adc45bfd94399165853fb23c94f31501b Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 13 Aug 2013 10:32:40 -0700
Subject: [PATCH 062/160] media:thumb not medium:thumb :)
---
mediagoblin/media_types/audio/processing.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index 6c565eb4..67bf63b4 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -254,8 +254,8 @@ class CommonAudioProcessor(MediaProcessor):
def generate_thumb(self, size=None):
if not size:
- max_width = mgg.global_config['medium:thumb']['max_width']
- max_height = mgg.global_config['medium:thumb']['max_height']
+ max_width = mgg.global_config['media:thumb']['max_width']
+ max_height = mgg.global_config['media:thumb']['max_height']
size = (max_width, max_height)
thumb_tmp = os.path.join(self.workbench.dir, self.name_builder.fill(
From 9448a98eb27fd1260b9e3b7603c3cc4492b2f569 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 13 Aug 2013 10:36:30 -0700
Subject: [PATCH 063/160] should store file as webm_audio instead of medium
---
mediagoblin/media_types/audio/processing.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index 67bf63b4..74167ace 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -216,8 +216,8 @@ class CommonAudioProcessor(MediaProcessor):
self.transcoder.discover(webm_audio_tmp)
_log.debug('Saving medium...')
- store_public(self.entry, 'medium', webm_audio_tmp,
- self.name_builder.fill('{basename}.medium{ext}'))
+ store_public(self.entry, 'webm_audio', webm_audio_tmp,
+ self.name_builder.fill('{basename}.medium.webm'))
def create_spectrogram(self, quality=None, max_width=None, fft_size=None):
if not quality:
From 440e33aa54d5b6b3a9219283f4d0c31054323762 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 13 Aug 2013 10:51:21 -0700
Subject: [PATCH 064/160] audio processing code cleanup
---
mediagoblin/media_types/audio/processing.py | 142 +-------------------
1 file changed, 6 insertions(+), 136 deletions(-)
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index 74167ace..f6d0cc03 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -16,18 +16,17 @@
import argparse
import logging
-from tempfile import NamedTemporaryFile
import os
from mediagoblin import mg_globals as mgg
from mediagoblin.processing import (
- create_pub_filepath, BadMediaFail, FilenameBuilder,
+ BadMediaFail, FilenameBuilder,
ProgressCallback, MediaProcessor, ProcessingManager,
request_from_args, get_orig_filename,
store_public, copy_original)
from mediagoblin.media_types.audio.transcoders import (
- AudioTranscoder,AudioThumbnailer)
+ AudioTranscoder, AudioThumbnailer)
_log = logging.getLogger(__name__)
@@ -43,126 +42,12 @@ def sniff_handler(media_file, **kw):
_log.debug('Audio discovery raised BadMediaFail')
return None
- if data.is_audio == True and data.is_video == False:
+ if data.is_audio is True and data.is_video is False:
return MEDIA_TYPE
return None
-def process_audio(proc_state):
- """Code to process uploaded audio. Will be run by celery.
-
- A Workbench() represents a local tempory dir. It is automatically
- cleaned up when this function exits.
- """
- entry = proc_state.entry
- workbench = proc_state.workbench
- audio_config = mgg.global_config['media_type:mediagoblin.media_types.audio']
-
- queued_filepath = entry.queued_media_file
- queued_filename = workbench.localized_file(
- mgg.queue_store, queued_filepath,
- 'source')
- name_builder = FilenameBuilder(queued_filename)
-
- webm_audio_filepath = create_pub_filepath(
- entry,
- '{original}.webm'.format(
- original=os.path.splitext(
- queued_filepath[-1])[0]))
-
- if audio_config['keep_original']:
- with open(queued_filename, 'rb') as queued_file:
- original_filepath = create_pub_filepath(
- entry, name_builder.fill('{basename}{ext}'))
-
- with mgg.public_store.get_file(original_filepath, 'wb') as \
- original_file:
- _log.debug('Saving original...')
- original_file.write(queued_file.read())
-
- entry.media_files['original'] = original_filepath
-
- transcoder = AudioTranscoder()
-
- with NamedTemporaryFile(dir=workbench.dir) as webm_audio_tmp:
- progress_callback = ProgressCallback(entry)
-
- transcoder.transcode(
- queued_filename,
- webm_audio_tmp.name,
- quality=audio_config['quality'],
- progress_callback=progress_callback)
-
- transcoder.discover(webm_audio_tmp.name)
-
- _log.debug('Saving medium...')
- mgg.public_store.get_file(webm_audio_filepath, 'wb').write(
- webm_audio_tmp.read())
-
- entry.media_files['webm_audio'] = webm_audio_filepath
-
- # entry.media_data_init(length=int(data.audiolength))
-
- if audio_config['create_spectrogram']:
- spectrogram_filepath = create_pub_filepath(
- entry,
- '{original}-spectrogram.jpg'.format(
- original=os.path.splitext(
- queued_filepath[-1])[0]))
-
- with NamedTemporaryFile(dir=workbench.dir, suffix='.ogg') as wav_tmp:
- _log.info('Creating OGG source for spectrogram')
- transcoder.transcode(
- queued_filename,
- wav_tmp.name,
- mux_string='vorbisenc quality={0} ! oggmux'.format(
- audio_config['quality']))
-
- thumbnailer = AudioThumbnailer()
-
- with NamedTemporaryFile(dir=workbench.dir, suffix='.jpg') as spectrogram_tmp:
- thumbnailer.spectrogram(
- wav_tmp.name,
- spectrogram_tmp.name,
- width=mgg.global_config['media:medium']['max_width'],
- fft_size=audio_config['spectrogram_fft_size'])
-
- _log.debug('Saving spectrogram...')
- mgg.public_store.get_file(spectrogram_filepath, 'wb').write(
- spectrogram_tmp.read())
-
- entry.media_files['spectrogram'] = spectrogram_filepath
-
- with NamedTemporaryFile(dir=workbench.dir, suffix='.jpg') as thumb_tmp:
- thumbnailer.thumbnail_spectrogram(
- spectrogram_tmp.name,
- thumb_tmp.name,
- (mgg.global_config['media:thumb']['max_width'],
- mgg.global_config['media:thumb']['max_height']))
-
- thumb_filepath = create_pub_filepath(
- entry,
- '{original}-thumbnail.jpg'.format(
- original=os.path.splitext(
- queued_filepath[-1])[0]))
-
- mgg.public_store.get_file(thumb_filepath, 'wb').write(
- thumb_tmp.read())
-
- entry.media_files['thumb'] = thumb_filepath
- else:
- entry.media_files['thumb'] = ['fake', 'thumb', 'path.jpg']
-
- # Remove queued media file from storage and database.
- # queued_filepath is in the task_id directory which should
- # be removed too, but fail if the directory is not empty to be on
- # the super-safe side.
- mgg.queue_store.delete_file(queued_filepath) # rm file
- mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
- entry.queued_media_file = []
-
-
class CommonAudioProcessor(MediaProcessor):
"""
Provides a base for various audio processing steps
@@ -170,6 +55,8 @@ class CommonAudioProcessor(MediaProcessor):
def common_setup(self):
"""
+ Setup the workbench directory and pull down the original file, add
+ the audio_config, transcoder, thumbnailer and spectrogram_tmp path
"""
self.audio_config = mgg \
.global_config['media_type:mediagoblin.media_types.audio']
@@ -179,6 +66,7 @@ class CommonAudioProcessor(MediaProcessor):
self.entry, self.workbench)
self.name_builder = FilenameBuilder(self.orig_filename)
+ # spectrogram_tmp is used for thumbnails and spectograms
self.spectrogram_tmp = os.path.join(self.workbench.dir,
self.name_builder.fill(
'{basename}-spectrogram.jpg'))
@@ -201,12 +89,6 @@ class CommonAudioProcessor(MediaProcessor):
self.name_builder.fill(
'{basename}{ext}'))
- #webm_audio_filepath = create_pub_filepath(
- # self.entry,
- # '{original}.webm'.format(
- # original=os.path.splitext(
- # self.orig_filename[-1])[0]))
-
self.transcoder.transcode(
self.orig_filename,
webm_audio_tmp,
@@ -227,12 +109,6 @@ class CommonAudioProcessor(MediaProcessor):
if not fft_size:
fft_size = self.audio_config['spectrogram_fft_size']
- #spectrogram_filepath = create_pub_filepath(
- # self.entry,
- # '{original}-spectrogram.jpg'.format(
- # original=os.path.splitext(
- # self.orig_filename[-1])[0]))
-
wav_tmp = os.path.join(self.workbench.dir, self.name_builder.fill(
'{basename}.ogg'))
@@ -266,12 +142,6 @@ class CommonAudioProcessor(MediaProcessor):
thumb_tmp,
size)
- #thumb_filepath = create_pub_filepath(
- # self.entry,
- # '{original}-thumbnail.jpg'.format(
- # original=os.path.splitext(
- # self.orig_filename[-1])[0]))
-
store_public(self.entry, 'thumb', thumb_tmp,
self.name_builder.fill('{basename}.thumbnail.jpg'))
From 757376e34a645eda95df974906c103fb8421ab35 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 13 Aug 2013 11:02:36 -0700
Subject: [PATCH 065/160] forgot nargs=2
---
mediagoblin/media_types/audio/processing.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index f6d0cc03..25ec39e6 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -180,6 +180,7 @@ class InitialProcessor(CommonAudioProcessor):
parser.add_argument(
'--thumb_size',
+ nargs=2,
metavar=('max_width', 'max_height'),
type=int)
From d8f886dcb45972881977be6e60fbdaa870ea8115 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 13 Aug 2013 11:29:44 -0700
Subject: [PATCH 066/160] fetch spectrogram for thumbnail creation, create
spectrogram if not found
---
mediagoblin/media_types/audio/processing.py | 26 ++++++++++++++-------
1 file changed, 17 insertions(+), 9 deletions(-)
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index 25ec39e6..b70f7841 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -66,11 +66,6 @@ class CommonAudioProcessor(MediaProcessor):
self.entry, self.workbench)
self.name_builder = FilenameBuilder(self.orig_filename)
- # spectrogram_tmp is used for thumbnails and spectograms
- self.spectrogram_tmp = os.path.join(self.workbench.dir,
- self.name_builder.fill(
- '{basename}-spectrogram.jpg'))
-
self.transcoder = AudioTranscoder()
self.thumbnailer = AudioThumbnailer()
@@ -118,14 +113,18 @@ class CommonAudioProcessor(MediaProcessor):
wav_tmp,
mux_string='vorbisenc quality={0} ! oggmux'.format(quality))
+ spectrogram_tmp = os.path.join(self.workbench.dir,
+ self.name_builder.fill(
+ '{basename}-spectrogram.jpg'))
+
self.thumbnailer.spectrogram(
wav_tmp,
- self.spectrogram_tmp,
+ spectrogram_tmp,
width=max_width,
fft_size=fft_size)
_log.debug('Saving spectrogram...')
- store_public(self.entry, 'spectrogram', self.spectrogram_tmp,
+ store_public(self.entry, 'spectrogram', spectrogram_tmp,
self.name_builder.fill('{basename}.spectrogram.jpg'))
def generate_thumb(self, size=None):
@@ -137,8 +136,17 @@ class CommonAudioProcessor(MediaProcessor):
thumb_tmp = os.path.join(self.workbench.dir, self.name_builder.fill(
'{basename}-thumbnail.jpg'))
+ # We need the spectrogram to create a thumbnail
+ spectrogram = self.entry.media_files.get('spectrogram')
+ if not spectrogram:
+ _log.info('No spectrogram found, we will create one.')
+ self.create_spectrogram()
+ spectrogram = self.entry.media_files['spectrogram']
+
+ spectrogram_filepath = mgg.public_store.get_local_path(spectrogram)
+
self.thumbnailer.thumbnail_spectrogram(
- self.spectrogram_tmp,
+ spectrogram_filepath,
thumb_tmp,
size)
@@ -171,7 +179,7 @@ class InitialProcessor(CommonAudioProcessor):
parser.add_argument(
'--quality',
- help='vorbisenc quality')
+ help='vorbisenc quality. Range: -0.1..1')
parser.add_argument(
'--fft_size',
From 2e50e4b5f374f10630e078216e1a15b99a7354fd Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 13 Aug 2013 11:30:18 -0700
Subject: [PATCH 067/160] add audio thumbnail and spectrogram resizer
---
mediagoblin/media_types/audio/processing.py | 64 +++++++++++++++++++++
1 file changed, 64 insertions(+)
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index b70f7841..362d2d3c 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -227,7 +227,71 @@ class InitialProcessor(CommonAudioProcessor):
self.delete_queue_file()
+class Resizer(CommonAudioProcessor):
+ """
+ Thumbnail and spectogram resizing process steps for processed audio
+ """
+ name = 'resize'
+ description = 'Resize audio thumbnail or spectogram'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media entry is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--quality',
+ help='vorbisenc quality. Range: -0.1..1')
+
+ parser.add_argument(
+ '--fft_size',
+ type=int,
+ help='spectrogram fft size')
+
+ parser.add_argument(
+ '--thumb_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--medium_width',
+ type=int,
+ help='The width of the spectogram')
+
+ parser.add_argument(
+ 'file',
+ choices=['thumb', 'spectrogram'])
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['thumb_size', 'file', 'quality', 'fft_size', 'medium_width'])
+
+ def process(self, thumb_size=None, file=None, quality=None, fft_size=None,
+ medium_width=None):
+ self.common_setup()
+ if file == 'thumb':
+ self.generate_thumb(size=thumb_size)
+ elif file == 'spectrogram':
+ self.create_spectrogram(quality=quality, max_width=medium_width,
+ fft_size=fft_size)
+
+
class AudioProcessingManager(ProcessingManager):
def __init__(self):
super(self.__class__, self).__init__()
self.add_processor(InitialProcessor)
+ self.add_processor(Resizer)
From ad80fc8ac7fbd3ab8ceff31fea6cf9055a4e7d5f Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 13 Aug 2013 11:34:05 -0700
Subject: [PATCH 068/160] just use the default quality when creating a
thumbnail or spectrogram
---
mediagoblin/media_types/audio/processing.py | 24 ++++++++-------------
1 file changed, 9 insertions(+), 15 deletions(-)
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index 362d2d3c..2f965e45 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -96,9 +96,7 @@ class CommonAudioProcessor(MediaProcessor):
store_public(self.entry, 'webm_audio', webm_audio_tmp,
self.name_builder.fill('{basename}.medium.webm'))
- def create_spectrogram(self, quality=None, max_width=None, fft_size=None):
- if not quality:
- quality = self.audio_config['quality']
+ def create_spectrogram(self, max_width=None, fft_size=None):
if not max_width:
max_width = mgg.global_config['media:medium']['max_width']
if not fft_size:
@@ -111,7 +109,8 @@ class CommonAudioProcessor(MediaProcessor):
self.transcoder.transcode(
self.orig_filename,
wav_tmp,
- mux_string='vorbisenc quality={0} ! oggmux'.format(quality))
+ mux_string='vorbisenc quality={0} ! oggmux'.format(
+ self.audio_config['quality']))
spectrogram_tmp = os.path.join(self.workbench.dir,
self.name_builder.fill(
@@ -221,8 +220,7 @@ class InitialProcessor(CommonAudioProcessor):
self.copy_original()
if create_spectrogram:
- self.create_spectrogram(quality=quality, max_width=medium_width,
- fft_size=fft_size)
+ self.create_spectrogram(max_width=medium_width, fft_size=fft_size)
self.generate_thumb(size=thumb_size)
self.delete_queue_file()
@@ -232,7 +230,7 @@ class Resizer(CommonAudioProcessor):
Thumbnail and spectogram resizing process steps for processed audio
"""
name = 'resize'
- description = 'Resize audio thumbnail or spectogram'
+ description = 'Resize thumbnail or spectogram'
@classmethod
def media_is_eligible(cls, entry=None, state=None):
@@ -249,10 +247,6 @@ class Resizer(CommonAudioProcessor):
description=cls.description,
prog=cls.name)
- parser.add_argument(
- '--quality',
- help='vorbisenc quality. Range: -0.1..1')
-
parser.add_argument(
'--fft_size',
type=int,
@@ -278,16 +272,16 @@ class Resizer(CommonAudioProcessor):
@classmethod
def args_to_request(cls, args):
return request_from_args(
- args, ['thumb_size', 'file', 'quality', 'fft_size', 'medium_width'])
+ args, ['thumb_size', 'file', 'fft_size', 'medium_width'])
- def process(self, thumb_size=None, file=None, quality=None, fft_size=None,
+ def process(self, thumb_size=None, file=None, fft_size=None,
medium_width=None):
self.common_setup()
+
if file == 'thumb':
self.generate_thumb(size=thumb_size)
elif file == 'spectrogram':
- self.create_spectrogram(quality=quality, max_width=medium_width,
- fft_size=fft_size)
+ self.create_spectrogram(max_width=medium_width, fft_size=fft_size)
class AudioProcessingManager(ProcessingManager):
From 0c509b1b7e6f7c8ba52aec4860dc0cae1dc0de80 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 13 Aug 2013 11:42:42 -0700
Subject: [PATCH 069/160] added audio reprocessing transcoder
---
mediagoblin/media_types/audio/processing.py | 38 ++++++++++++++++++++-
1 file changed, 37 insertions(+), 1 deletion(-)
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index 2f965e45..7d8fd2d2 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -274,7 +274,7 @@ class Resizer(CommonAudioProcessor):
return request_from_args(
args, ['thumb_size', 'file', 'fft_size', 'medium_width'])
- def process(self, thumb_size=None, file=None, fft_size=None,
+ def process(self, file, thumb_size=None, fft_size=None,
medium_width=None):
self.common_setup()
@@ -284,8 +284,44 @@ class Resizer(CommonAudioProcessor):
self.create_spectrogram(max_width=medium_width, fft_size=fft_size)
+class Transcoder(CommonAudioProcessor):
+ """
+ Transcoding processing steps for processed audio
+ """
+ name = 'transcode'
+ description = 'Re-transcode audio'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--quality',
+ help='vorbisenc quality. Range: -0.1..1')
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['quality'])
+
+ def process(self, quality=None):
+ self.common_setup()
+ self.transcode(quality=quality)
+
+
class AudioProcessingManager(ProcessingManager):
def __init__(self):
super(self.__class__, self).__init__()
self.add_processor(InitialProcessor)
self.add_processor(Resizer)
+ self.add_processor(Transcoder)
From 347ef583829f0943abf18a8b42d953b185ae2a46 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 13 Aug 2013 13:19:52 -0700
Subject: [PATCH 070/160] Added Initial processor for video
---
mediagoblin/media_types/audio/processing.py | 4 +-
mediagoblin/media_types/video/processing.py | 280 +++++++++++++-------
2 files changed, 180 insertions(+), 104 deletions(-)
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index 7d8fd2d2..f7c0a234 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -113,8 +113,8 @@ class CommonAudioProcessor(MediaProcessor):
self.audio_config['quality']))
spectrogram_tmp = os.path.join(self.workbench.dir,
- self.name_builder.fill(
- '{basename}-spectrogram.jpg'))
+ self.name_builder.fill(
+ '{basename}-spectrogram.jpg'))
self.thumbnailer.spectrogram(
wav_tmp,
diff --git a/mediagoblin/media_types/video/processing.py b/mediagoblin/media_types/video/processing.py
index 857c1647..b9725401 100644
--- a/mediagoblin/media_types/video/processing.py
+++ b/mediagoblin/media_types/video/processing.py
@@ -19,8 +19,12 @@ import logging
import datetime
from mediagoblin import mg_globals as mgg
-from mediagoblin.processing import \
- create_pub_filepath, FilenameBuilder, BaseProcessingFail, ProgressCallback
+from mediagoblin.processing import (
+ FilenameBuilder, BaseProcessingFail,
+ ProgressCallback, MediaProcessor,
+ ProcessingManager, request_from_args,
+ get_orig_filename, store_public,
+ copy_original)
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
from . import transcoders
@@ -57,106 +61,6 @@ def sniff_handler(media_file, **kw):
return None
-def process_video(proc_state):
- """
- Process a video entry, transcode the queued media files (originals) and
- create a thumbnail for the entry.
-
- A Workbench() represents a local tempory dir. It is automatically
- cleaned up when this function exits.
- """
- entry = proc_state.entry
- workbench = proc_state.workbench
- video_config = mgg.global_config['media_type:mediagoblin.media_types.video']
-
- queued_filepath = entry.queued_media_file
- queued_filename = proc_state.get_queued_filename()
- name_builder = FilenameBuilder(queued_filename)
-
- medium_basename = name_builder.fill('{basename}-640p.webm')
- medium_filepath = create_pub_filepath(entry, medium_basename)
-
- thumbnail_basename = name_builder.fill('{basename}.thumbnail.jpg')
- thumbnail_filepath = create_pub_filepath(entry, thumbnail_basename)
-
- # Create a temporary file for the video destination (cleaned up with workbench)
- tmp_dst = os.path.join(workbench.dir, medium_basename)
- # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square
- progress_callback = ProgressCallback(entry)
-
- dimensions = (
- mgg.global_config['media:medium']['max_width'],
- mgg.global_config['media:medium']['max_height'])
-
- # Extract metadata and keep a record of it
- metadata = transcoders.VideoTranscoder().discover(queued_filename)
- store_metadata(entry, metadata)
-
- # Figure out whether or not we need to transcode this video or
- # if we can skip it
- if skip_transcode(metadata):
- _log.debug('Skipping transcoding')
-
- dst_dimensions = metadata['videowidth'], metadata['videoheight']
-
- # Push original file to public storage
- _log.debug('Saving original...')
- proc_state.copy_original(queued_filepath[-1])
-
- did_transcode = False
- else:
- transcoder = transcoders.VideoTranscoder()
-
- transcoder.transcode(queued_filename, tmp_dst,
- vp8_quality=video_config['vp8_quality'],
- vp8_threads=video_config['vp8_threads'],
- vorbis_quality=video_config['vorbis_quality'],
- progress_callback=progress_callback,
- dimensions=dimensions)
-
- dst_dimensions = transcoder.dst_data.videowidth,\
- transcoder.dst_data.videoheight
-
- # Push transcoded video to public storage
- _log.debug('Saving medium...')
- mgg.public_store.copy_local_to_storage(tmp_dst, medium_filepath)
- _log.debug('Saved medium')
-
- entry.media_files['webm_640'] = medium_filepath
-
- did_transcode = True
-
- # Save the width and height of the transcoded video
- entry.media_data_init(
- width=dst_dimensions[0],
- height=dst_dimensions[1])
-
- # Temporary file for the video thumbnail (cleaned up with workbench)
- tmp_thumb = os.path.join(workbench.dir, thumbnail_basename)
-
- # Create a thumbnail.jpg that fits in a 180x180 square
- transcoders.VideoThumbnailerMarkII(
- queued_filename,
- tmp_thumb,
- 180)
-
- # Push the thumbnail to public storage
- _log.debug('Saving thumbnail...')
- mgg.public_store.copy_local_to_storage(tmp_thumb, thumbnail_filepath)
- entry.media_files['thumb'] = thumbnail_filepath
-
- # save the original... but only if we did a transcoding
- # (if we skipped transcoding and just kept the original anyway as the main
- # media, then why would we save the original twice?)
- if video_config['keep_original'] and did_transcode:
- # Push original file to public storage
- _log.debug('Saving original...')
- proc_state.copy_original(queued_filepath[-1])
-
- # Remove queued media file from storage and database
- proc_state.delete_queue_file()
-
-
def store_metadata(media_entry, metadata):
"""
Store metadata from this video for this media entry.
@@ -211,3 +115,175 @@ def store_metadata(media_entry, metadata):
if len(stored_metadata):
media_entry.media_data_init(
orig_metadata=stored_metadata)
+
+
+class CommonVideoProcessor(MediaProcessor):
+ """
+ Provides a base for various video processing steps
+ """
+
+ def common_setup(self):
+ self.video_config = mgg \
+ .global_config['media_type:mediagoblin.media_types.audio']
+
+ # Pull down and set up the original file
+ self.orig_filename = get_orig_filename(
+ self.entry, self.workbench)
+ self.name_builder = FilenameBuilder(self.orig_filename)
+
+ self.transcoder = transcoders.VideoTranscoder()
+ self.did_transcode = False
+
+ def copy_original(self):
+ # If we didn't transcode, then we need to keep the original
+ if not self.did_transcode or \
+ (self.video_config['keep_original'] and self.did_transcode):
+ copy_original(
+ self.entry, self.orig_filename,
+ self.name_builder.fill('{basename}{ext}'))
+
+ def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None,
+ vorbis_quality=None):
+ progress_callback = ProgressCallback(entry)
+ tmp_dst = os.path.join(self.workbench.dir,
+ self.name_builder.fill('{basename}-640p.webm'))
+
+ if not medium_size:
+ medium_size = (
+ mgg.global_config['media:medium']['max_width'],
+ mgg.global_config['media:medium']['max_height'])
+ if not vp8_quality:
+ vp8_quality = self.video_config['vp8_quality']
+ if not vp8_threads:
+ vp8_threads = self.video_config['vp8_threads']
+ if not vorbis_quality:
+ vorbis_quality = self.video_config['vorbis_quality']
+
+ # Extract metadata and keep a record of it
+ metadata = self.transcoder.discover(self.orig_filename)
+ store_metadata(self.entry, metadata)
+
+ # Figure out whether or not we need to transcode this video or
+ # if we can skip it
+ if skip_transcode(metadata):
+ _log.debug('Skipping transcoding')
+
+ dst_dimensions = metadata['videowidth'], metadata['videoheight']
+
+ else:
+ self.transcoder.transcode(self.orig_filename, tmp_dst,
+ vp8_quality=vp8_quality,
+ vp8_threads=vp8_threads,
+ vorbis_quality=vorbis_quality,
+ progress_callback=progress_callback,
+ dimensions=medium_size)
+
+ dst_dimensions = self.transcoder.dst_data.videowidth,\
+ self.transcoder.dst_data.videoheight
+
+ # Push transcoded video to public storage
+ _log.debug('Saving medium...')
+ store_public(self.entry, 'webm_640', tmp_dst,
+ self.name_builder.fill('{basename}-640p.webm'))
+ _log.debug('Saved medium')
+
+ self.did_transcode = True
+
+ # Save the width and height of the transcoded video
+ self.entry.media_data_init(
+ width=dst_dimensions[0],
+ height=dst_dimensions[1])
+
+ def generate_thumb(self, thumb_size=None):
+ # Temporary file for the video thumbnail (cleaned up with workbench)
+ tmp_thumb = os.path.join(self.workbench.dir,
+ self.name_builder.fill(
+ '{basename}.thumbnail.jpg'))
+
+ if not thumb_size:
+ thumb_size = (mgg.global_config['media:thumb']['max_width'],
+ mgg.global_config['media:thumb']['max_height'])
+
+ transcoders.VideoThumbnailerMarkII(
+ self.orig_filename,
+ tmp_thumb,
+ thumb_size[0],
+ thumb_size[1])
+
+ # Push the thumbnail to public storage
+ _log.debug('Saving thumbnail...')
+ store_public(self.entry, 'thumb', tmp_thumb,
+ self.name_builder.fill('{basename}.thumbnail.jpg'))
+
+
+class InitialProcessor(CommonVideoProcessor):
+ """
+ Initial processing steps for new video
+ """
+ name = "initial"
+ description = "Initial processing"
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ if not state:
+ state = entry.state
+ return state in (
+ "unprocessed", "failed")
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--medium_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--vp8_quality',
+ type=int,
+ help='Range 0..10')
+
+ parser.add_argument(
+ '--vp8_threads',
+ type=int,
+ help='0 means number_of_CPUs - 1')
+
+ parser.add_argument(
+ '--vorbis_quality',
+ type=float,
+ help='Range -0.1..1')
+
+ parser.add_argument(
+ '--thumb_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['medium_size', 'vp8_quality', 'vp8_threads',
+ 'vorbis_quality', 'thumb_size'])
+
+ def process(self, medium_size=None, vp8_threads=None, vp8_quality=None,
+ vorbis_quality=None, thumb_size=None):
+ self.common_setup()
+
+ self.transcode(medium_size=medium_size, vp8_quality=vp8_quality,
+ vp8_threads=vp8_threads, vorbis_quality=vorbis_quality)
+
+ self.copy_original()
+ self.generate_thumb(thumb_size=thumb_size)
+ self.delete_queue_file()
+
+
+class VideoProcessingManager(ProcessingManager):
+ def __init__(self):
+ super(self.__class__, self).__init__()
+ self.add_processor(InitialProcessor)
From 52e9770466336bf66585e00088b3fe3eca163562 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 13 Aug 2013 13:27:51 -0700
Subject: [PATCH 071/160] use type=float for audio vorbis quality
---
mediagoblin/media_types/audio/processing.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index f7c0a234..7000b3c4 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -178,6 +178,7 @@ class InitialProcessor(CommonAudioProcessor):
parser.add_argument(
'--quality',
+ type=float,
help='vorbisenc quality. Range: -0.1..1')
parser.add_argument(
From 371bcc24d2d937174bcf3132baad491c2f8d31fa Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 13 Aug 2013 13:31:48 -0700
Subject: [PATCH 072/160] Added thumbnail resizer for video
---
mediagoblin/media_types/video/processing.py | 36 +++++++++++++++++++++
1 file changed, 36 insertions(+)
diff --git a/mediagoblin/media_types/video/processing.py b/mediagoblin/media_types/video/processing.py
index b9725401..8b8fdac9 100644
--- a/mediagoblin/media_types/video/processing.py
+++ b/mediagoblin/media_types/video/processing.py
@@ -283,7 +283,43 @@ class InitialProcessor(CommonVideoProcessor):
self.delete_queue_file()
+class Resizer(CommonVideoProcessor):
+ """
+ Video thumbnail resizing process steps for processed media
+ """
+ name = 'resize'
+ description = 'Resize thumbnail'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--thumb_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['thumb_size'])
+
+ def process(self, thumb_size=None):
+ self.common_setup()
+ self.generate_thumb(thumb_size=thumb_size)
+
+
class VideoProcessingManager(ProcessingManager):
def __init__(self):
super(self.__class__, self).__init__()
self.add_processor(InitialProcessor)
+ self.add_processor(Resizer)
From 57d1cb3cef44f53283a3c11b7e16a48dc3057062 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 13 Aug 2013 13:40:16 -0700
Subject: [PATCH 073/160] added video transcoder
---
mediagoblin/media_types/video/__init__.py | 4 +-
mediagoblin/media_types/video/processing.py | 81 ++++++++++++++++++---
2 files changed, 72 insertions(+), 13 deletions(-)
diff --git a/mediagoblin/media_types/video/__init__.py b/mediagoblin/media_types/video/__init__.py
index e8a4308b..03ffcb1f 100644
--- a/mediagoblin/media_types/video/__init__.py
+++ b/mediagoblin/media_types/video/__init__.py
@@ -15,7 +15,7 @@
# along with this program. If not, see .
from mediagoblin.media_types import MediaManagerBase
-from mediagoblin.media_types.video.processing import process_video, \
+from mediagoblin.media_types.video.processing import VideoProcessingManager, \
sniff_handler
from mediagoblin.tools import pluginapi
@@ -30,7 +30,6 @@ def setup_plugin():
class VideoMediaManager(MediaManagerBase):
human_readable = "Video"
- processor = staticmethod(process_video)
display_template = "mediagoblin/media_displays/video.html"
default_thumb = "images/media_thumbs/video.jpg"
@@ -48,4 +47,5 @@ hooks = {
'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: VideoMediaManager,
+ ('reprocess_manager', MEDIA_TYPE): lambda: VideoProcessingManager,
}
diff --git a/mediagoblin/media_types/video/processing.py b/mediagoblin/media_types/video/processing.py
index 8b8fdac9..3f96dc66 100644
--- a/mediagoblin/media_types/video/processing.py
+++ b/mediagoblin/media_types/video/processing.py
@@ -14,6 +14,7 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+import argparse
import os.path
import logging
import datetime
@@ -52,10 +53,10 @@ def sniff_handler(media_file, **kw):
if not data:
_log.error('Could not discover {0}'.format(
- kw.get('media')))
+ kw.get('media')))
return None
- if data['is_video'] == True:
+ if data['is_video'] is True:
return MEDIA_TYPE
return None
@@ -69,9 +70,9 @@ def store_metadata(media_entry, metadata):
stored_metadata = dict(
[(key, metadata[key])
for key in [
- "videoheight", "videolength", "videowidth",
- "audiorate", "audiolength", "audiochannels", "audiowidth",
- "mimetype"]
+ "videoheight", "videolength", "videowidth",
+ "audiorate", "audiolength", "audiochannels", "audiowidth",
+ "mimetype"]
if key in metadata])
# We have to convert videorate into a sequence because it's a
@@ -90,10 +91,10 @@ def store_metadata(media_entry, metadata):
tags = dict(
[(key, tags_metadata[key])
for key in [
- "application-name", "artist", "audio-codec", "bitrate",
- "container-format", "copyright", "encoder",
- "encoder-version", "license", "nominal-bitrate", "title",
- "video-codec"]
+ "application-name", "artist", "audio-codec", "bitrate",
+ "container-format", "copyright", "encoder",
+ "encoder-version", "license", "nominal-bitrate", "title",
+ "video-codec"]
if key in tags_metadata])
if 'date' in tags_metadata:
date = tags_metadata['date']
@@ -144,7 +145,7 @@ class CommonVideoProcessor(MediaProcessor):
def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None,
vorbis_quality=None):
- progress_callback = ProgressCallback(entry)
+ progress_callback = ProgressCallback(self.entry)
tmp_dst = os.path.join(self.workbench.dir,
self.name_builder.fill('{basename}-640p.webm'))
@@ -299,7 +300,7 @@ class Resizer(CommonVideoProcessor):
@classmethod
def generate_parser(cls):
parser = argparse.ArgumentParser(
- description=description,
+ description=cls.description,
prog=cls.name)
parser.add_argument(
@@ -308,6 +309,8 @@ class Resizer(CommonVideoProcessor):
metavar=('max_width', 'max_height'),
type=int)
+ return parser
+
@classmethod
def args_to_request(cls, args):
return request_from_args(
@@ -318,8 +321,64 @@ class Resizer(CommonVideoProcessor):
self.generate_thumb(thumb_size=thumb_size)
+class Transcoder(CommonVideoProcessor):
+ """
+ Transcoding processing steps for processed video
+ """
+ name = 'transcode'
+ description = 'Re-transcode video'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--medium_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--vp8_quality',
+ type=int,
+ help='Range 0..10')
+
+ parser.add_argument(
+ '--vp8_threads',
+ type=int,
+ help='0 means number_of_CPUs - 1')
+
+ parser.add_argument(
+ '--vorbis_quality',
+ type=float,
+ help='Range -0.1..1')
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['medium_size', 'vp8_threads', 'vp8_quality',
+ 'vorbis_quality'])
+
+ def process(self, medium_size=None, vp8_quality=None, vp8_threads=None,
+ vorbis_quality=None):
+ self.common_setup()
+ self.transcode(medium_size=medium_size, vp8_threads=vp8_threads,
+ vp8_quality=vp8_quality, vorbis_quality=vorbis_quality)
+
+
class VideoProcessingManager(ProcessingManager):
def __init__(self):
super(self.__class__, self).__init__()
self.add_processor(InitialProcessor)
self.add_processor(Resizer)
+ self.add_processor(Transcoder)
From 35d6a95008ac63a00cc2e4d7fac8187bc58eea9a Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 13 Aug 2013 17:32:59 -0700
Subject: [PATCH 074/160] Added initial processor for ascii media
---
mediagoblin/media_types/ascii/processing.py | 201 ++++++++++++--------
1 file changed, 124 insertions(+), 77 deletions(-)
diff --git a/mediagoblin/media_types/ascii/processing.py b/mediagoblin/media_types/ascii/processing.py
index aca784e8..4cf8081a 100644
--- a/mediagoblin/media_types/ascii/processing.py
+++ b/mediagoblin/media_types/ascii/processing.py
@@ -13,6 +13,7 @@
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+import argparse
import chardet
import os
try:
@@ -22,7 +23,11 @@ except ImportError:
import logging
from mediagoblin import mg_globals as mgg
-from mediagoblin.processing import create_pub_filepath
+from mediagoblin.processing import (
+ create_pub_filepath, FilenameBuilder,
+ MediaProcessor, ProcessingManager,
+ get_orig_filename, copy_original,
+ store_public, request_from_args)
from mediagoblin.media_types.ascii import asciitoimage
_log = logging.getLogger(__name__)
@@ -43,106 +48,148 @@ def sniff_handler(media_file, **kw):
return None
-def process_ascii(proc_state):
- """Code to process a txt file. Will be run by celery.
-
- A Workbench() represents a local tempory dir. It is automatically
- cleaned up when this function exits.
+class CommonAsciiProcessor(MediaProcessor):
"""
- entry = proc_state.entry
- workbench = proc_state.workbench
- ascii_config = mgg.global_config['media_type:mediagoblin.media_types.ascii']
- # Conversions subdirectory to avoid collisions
- conversions_subdir = os.path.join(
- workbench.dir, 'conversions')
- os.mkdir(conversions_subdir)
+ Provides a base for various ascii processing steps
+ """
+ def common_setup(self):
+ self.ascii_config = mgg.global_config[
+ 'media_type:mediagoblin.media_types.ascii']
- queued_filepath = entry.queued_media_file
- queued_filename = workbench.localized_file(
- mgg.queue_store, queued_filepath,
- 'source')
+ # Conversions subdirectory to avoid collisions
+ self.conversions_subdir = os.path.join(
+ self.workbench.dir, 'convirsions')
+ os.mkdir(self.conversions_subdir)
- queued_file = file(queued_filename, 'rb')
+ # Pull down and set up the original file
+ self.orig_filename = get_orig_filename(
+ self.entry, self.workbench)
+ self.name_builder = FilenameBuilder(self.orig_filename)
- with queued_file:
- queued_file_charset = chardet.detect(queued_file.read())
+ self.charset = None
+
+ def copy_original(self):
+ copy_original(
+ self.entry, self.orig_filename,
+ self.name_builder.fill('{basename}{ext}'))
+
+ def _detect_charset(self, orig_file):
+ d_charset = chardet.detect(orig_file.read())
# Only select a non-utf-8 charset if chardet is *really* sure
- # Tested with "Feli\x0109an superjaron", which was detecte
- if queued_file_charset['confidence'] < 0.9:
- interpreted_charset = 'utf-8'
+ # Tested with "Feli\x0109an superjaron", which was detected
+ if d_charset['confidence'] < 0.9:
+ self.charset = 'utf-8'
else:
- interpreted_charset = queued_file_charset['encoding']
+ self.charset = d_charset['encoding']
_log.info('Charset detected: {0}\nWill interpret as: {1}'.format(
- queued_file_charset,
- interpreted_charset))
+ d_charset,
+ self.charset))
- queued_file.seek(0) # Rewind the queued file
+ def store_unicode_file(self):
+ with file(self.orig_filename, 'rb') as orig_file:
+ self._detect_charset(orig_file)
+ unicode_filepath = create_pub_filepath(self.entry,
+ 'ascii-portable.txt')
- thumb_filepath = create_pub_filepath(
- entry, 'thumbnail.png')
+ with mgg.public_store.get_file(unicode_filepath, 'wb') \
+ as unicode_file:
+ # Decode the original file from its detected charset (or UTF8)
+ # Encode the unicode instance to ASCII and replace any
+ # non-ASCII with an HTML entity (
+ unicode_file.write(
+ unicode(orig_file.read().decode(
+ self.charset)).encode(
+ 'ascii',
+ 'xmlcharrefreplace'))
- tmp_thumb_filename = os.path.join(
- conversions_subdir, thumb_filepath[-1])
+ self.entry.media_files['unicode'] = unicode_filepath
- ascii_converter_args = {}
+ def generate_thumb(self, font=None, thumb_size=None):
+ with file(self.orig_filename, 'rb') as orig_file:
+ # If no font kwarg, check config
+ if not font:
+ font = self.ascii_config.get('thumbnail_font', None)
+ if not thumb_size:
+ thumb_size = (mgg.global_config['media:thumb']['max_width'],
+ mgg.global_config['media:thumb']['max_height'])
- if ascii_config['thumbnail_font']:
- ascii_converter_args.update(
- {'font': ascii_config['thumbnail_font']})
+ tmp_thumb = os.path.join(
+ self.conversions_subdir,
+ self.name_builder.fill('{basename}.thumbnail.png'))
- converter = asciitoimage.AsciiToImage(
- **ascii_converter_args)
+ ascii_converter_args = {}
- thumb = converter._create_image(
- queued_file.read())
+ # If there is a font from either the config or kwarg, update
+ # ascii_converter_args
+ if font:
+ ascii_converter_args.update(
+ {'font': self.ascii_config['thumbnail_font']})
- with file(tmp_thumb_filename, 'w') as thumb_file:
- thumb.thumbnail(
- (mgg.global_config['media:thumb']['max_width'],
- mgg.global_config['media:thumb']['max_height']),
- Image.ANTIALIAS)
- thumb.save(thumb_file)
+ converter = asciitoimage.AsciiToImage(
+ **ascii_converter_args)
- _log.debug('Copying local file to public storage')
- mgg.public_store.copy_local_to_storage(
- tmp_thumb_filename, thumb_filepath)
+ thumb = converter._create_image(
+ orig_file.read())
- queued_file.seek(0)
+ with file(tmp_thumb, 'w') as thumb_file:
+ thumb.thumbnail(
+ thumb_size,
+ Image.ANTIALIAS)
+ thumb.save(thumb_file)
- original_filepath = create_pub_filepath(entry, queued_filepath[-1])
+ _log.debug('Copying local file to public storage')
+ store_public(self.entry, 'thumb', tmp_thumb,
+ self.name_builder.fill('{basename}.thumbnail.jpg'))
- with mgg.public_store.get_file(original_filepath, 'wb') \
- as original_file:
- original_file.write(queued_file.read())
- queued_file.seek(0) # Rewind *again*
+class InitialProcessor(CommonAsciiProcessor):
+ """
+ Initial processing step for new ascii media
+ """
+ name = "initial"
+ description = "Initial processing"
- unicode_filepath = create_pub_filepath(entry, 'ascii-portable.txt')
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ if not state:
+ state = entry.state
+ return state in (
+ "unprocessed", "failed")
- with mgg.public_store.get_file(unicode_filepath, 'wb') \
- as unicode_file:
- # Decode the original file from its detected charset (or UTF8)
- # Encode the unicode instance to ASCII and replace any non-ASCII
- # with an HTML entity (
- unicode_file.write(
- unicode(queued_file.read().decode(
- interpreted_charset)).encode(
- 'ascii',
- 'xmlcharrefreplace'))
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
- # Remove queued media file from storage and database.
- # queued_filepath is in the task_id directory which should
- # be removed too, but fail if the directory is not empty to be on
- # the super-safe side.
- mgg.queue_store.delete_file(queued_filepath) # rm file
- mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
- entry.queued_media_file = []
+ parser.add_argument(
+ '--thumb_size',
+ nargs=2,
+ metavar=('max_width', 'max_width'),
+ type=int)
- media_files_dict = entry.setdefault('media_files', {})
- media_files_dict['thumb'] = thumb_filepath
- media_files_dict['unicode'] = unicode_filepath
- media_files_dict['original'] = original_filepath
+ parser.add_argument(
+ '--font',
+ help='the thumbnail font')
- entry.save()
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['thumb_size', 'font'])
+
+ def process(self, thumb_size=None, font=None):
+ self.common_setup()
+ self.store_unicode_file()
+ self.generate_thumb(thumb_size=thumb_size, font=font)
+ self.copy_original()
+ self.delete_queue_file()
+
+
+class AsciiProcessingManager(ProcessingManager):
+ def __init__(self):
+ super(self.__class__, self).__init__()
+ self.add_processor(InitialProcessor)
From 698c7a8bc602ead69fed2f0307d74c3528a38e0d Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 13 Aug 2013 17:42:42 -0700
Subject: [PATCH 075/160] added file argument to video resizer and added ascii
resizer
---
mediagoblin/media_types/ascii/processing.py | 46 +++++++++++++++++++++
mediagoblin/media_types/video/processing.py | 10 ++++-
2 files changed, 54 insertions(+), 2 deletions(-)
diff --git a/mediagoblin/media_types/ascii/processing.py b/mediagoblin/media_types/ascii/processing.py
index 4cf8081a..82ee9cd7 100644
--- a/mediagoblin/media_types/ascii/processing.py
+++ b/mediagoblin/media_types/ascii/processing.py
@@ -189,6 +189,52 @@ class InitialProcessor(CommonAsciiProcessor):
self.delete_queue_file()
+class Resizer(CommonAsciiProcessor):
+ """
+ Resizing process steps for processed media
+ """
+ name = 'resize'
+ description = 'Resize thumbnail'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media type is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--thumb_size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ # Needed for gmg reprocess thumbs to work
+ parser.add_argument(
+ 'file',
+ nargs='?',
+ default='thumb')
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['size', 'file'])
+
+ def process(self, thumb_size=None, file=None):
+ self.common_setup()
+ self.generate_thumb(thumb_size=thumb_size)
+
+
class AsciiProcessingManager(ProcessingManager):
def __init__(self):
super(self.__class__, self).__init__()
diff --git a/mediagoblin/media_types/video/processing.py b/mediagoblin/media_types/video/processing.py
index 3f96dc66..ab78e8ed 100644
--- a/mediagoblin/media_types/video/processing.py
+++ b/mediagoblin/media_types/video/processing.py
@@ -309,14 +309,20 @@ class Resizer(CommonVideoProcessor):
metavar=('max_width', 'max_height'),
type=int)
+ # Needed for gmg reprocess thumbs to work
+ parser.add_argument(
+ 'file',
+ nargs='?',
+ default='thumb')
+
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
- args, ['thumb_size'])
+ args, ['thumb_size', 'file'])
- def process(self, thumb_size=None):
+ def process(self, thumb_size=None, file=None):
self.common_setup()
self.generate_thumb(thumb_size=thumb_size)
From 5fabbcc4975bb8987a9975626d382e662fad6634 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 09:09:35 -0700
Subject: [PATCH 076/160] Added pdf Initial processor
---
mediagoblin/media_types/pdf/processing.py | 169 +++++++++++++++++-----
1 file changed, 130 insertions(+), 39 deletions(-)
diff --git a/mediagoblin/media_types/pdf/processing.py b/mediagoblin/media_types/pdf/processing.py
index f35b4376..7ee17e9d 100644
--- a/mediagoblin/media_types/pdf/processing.py
+++ b/mediagoblin/media_types/pdf/processing.py
@@ -230,51 +230,142 @@ def pdf_info(original):
return ret_dict
-def process_pdf(proc_state):
- """Code to process a pdf file. Will be run by celery.
- A Workbench() represents a local tempory dir. It is automatically
- cleaned up when this function exits.
+class CommonPdfProcessor(MediaProcessor):
"""
- entry = proc_state.entry
- workbench = proc_state.workbench
+ Provides a base for various pdf processing steps
+ """
+ def common_setup(self):
+ """
+ Set up common pdf processing steps
+ """
+ # Pull down and set up the original file
+ self.orig_filename = get_orig_filename(
+ self.entry, self.workbench)
+ self.name_builder = FilenameBuilder(self.orig_filename)
- queued_filename = proc_state.get_queued_filename()
- name_builder = FilenameBuilder(queued_filename)
+ self._set_pdf_filename()
- # Copy our queued local workbench to its final destination
- original_dest = name_builder.fill('{basename}{ext}')
- proc_state.copy_original(original_dest)
+ def _set_pdf_filename(self):
+ if self.name_builder.ext == 'pdf':
+ self.pdf_filename = self.orig_filename
+ else:
+ self.pdf_filename = self.name_builder.fill('{basename}.pdf')
- # Create a pdf if this is a different doc, store pdf for viewer
- ext = queued_filename.rsplit('.', 1)[-1].lower()
- if ext == 'pdf':
- pdf_filename = queued_filename
- else:
- pdf_filename = queued_filename.rsplit('.', 1)[0] + '.pdf'
- unoconv = where('unoconv')
- Popen(executable=unoconv,
- args=[unoconv, '-v', '-f', 'pdf', queued_filename]).wait()
- if not os.path.exists(pdf_filename):
- _log.debug('unoconv failed to convert file to pdf')
- raise BadMediaFail()
- proc_state.store_public(keyname=u'pdf', local_file=pdf_filename)
+ def copy_original(self):
+ copy_original(
+ self.entry, self.orig_filename,
+ self.name_builder.fill('{basename}{ext}'))
- pdf_info_dict = pdf_info(pdf_filename)
+ def generate_thumb(self, thumb_size=None):
+ if not thumb_size:
+ thumb_size = (mgg.global_config['media:thumb']['max_width'],
+ mgg.global_config['media:thumb']['max_height'])
- for name, width, height in [
- (u'thumb', mgg.global_config['media:thumb']['max_width'],
- mgg.global_config['media:thumb']['max_height']),
- (u'medium', mgg.global_config['media:medium']['max_width'],
- mgg.global_config['media:medium']['max_height']),
- ]:
- filename = name_builder.fill('{basename}.%s.png' % name)
- path = workbench.joinpath(filename)
- create_pdf_thumb(pdf_filename, path, width, height)
- assert(os.path.exists(path))
- proc_state.store_public(keyname=name, local_file=path)
+ # Note: pdftocairo adds '.png', so don't include an ext
+ thumb_filename = self.name_builder.fill('{basename}.thumbnail')
- proc_state.delete_queue_file()
+ executable = where('pdftocairo')
+ args = [executable, '-scale-to', str(thumb_size),
+ '-singlefile', '-png', self.pdf_filename, thumb_filename]
- entry.media_data_init(**pdf_info_dict)
- entry.save()
+ _log.debug('calling {0}'.format(repr(' '.join(args))))
+ Popen(executable=executable, args=args).wait()
+
+ store_public(self.entry, 'thumb', thumb_filename,
+ self.name_builder.fill('{basename}.thumbnail.png'))
+
+ def generate_pdf(self):
+ """
+ Store the pdf. If the file is not a pdf, make it a pdf
+ """
+ if self.name_builder.ext != 'pdf':
+ unoconv = where('unoconv')
+ Popen(executable=unoconv,
+ args=[unoconv, '-v', '-f', 'pdf', self.orig_filename]).wait()
+
+ if not os.path.exists(self.pdf_filename):
+ _log.debug('unoconv failed to convert file to pdf')
+ raise BadMediaFail()
+
+ store_public(self.entry, 'pdf', self.pdf_filename,
+ self.name_builder.fill('{basename}.pdf'))
+
+ def extract_pdf_info(self):
+ pdf_info_dict = pdf_info(self.pdf_filename)
+ entry.media_data_init(**pdf_info_dict)
+
+ def generate_medium(self, size=None):
+ if not size:
+ size = (mgg.global_config['media:medium']['max_width'],
+ mgg.global_config['media:medium']['max_height'])
+
+ # Note: pdftocairo adds '.png', so don't include an ext
+ filename = self.name_builder.fill('{basename}.medium')
+
+ executable = where('pdftocairo')
+ args = [executable, '-scale-to', str(size),
+ '-singlefile', '-png', self.pdf_filename, filename]
+
+ _log.debug('calling {0}'.format(repr(' '.join(args))))
+ Popen(executable=executable, args=args).wait()
+
+ store_public(self.entry, 'thumb', filename,
+ self.name_builder.fill('{basename}.medium.png'))
+
+class InitialProcessor(CommonPdfProcessor):
+ """
+ Initial processing step for new pdfs
+ """
+ name = "initial"
+ description = "Initial processing"
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media type is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in (
+ "unprocessed", "failed")
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--thumb-size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['size', 'thumb_size'])
+
+ def process(self, size=None, thumb_size=None):
+ self.common_setup()
+ self.generate_pdf()
+ self.extract_pdf_info()
+ self.copy_original()
+ self.generate_medium(size=size)
+ self.generate_thumb(thumb_size=thumb_size)
+ self.delete_queue_file()
+
+
+class PdfProcessingManager(ProcessingManager):
+ def __init__(self):
+ super(self.__class__, self).__init__()
+ self.add_processor(InitialProcessor)
From 696b0ec64bd07c98b8049e56c8742dd455888cd9 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 09:13:25 -0700
Subject: [PATCH 077/160] added pdf Resizer
---
mediagoblin/media_types/pdf/processing.py | 53 ++++++++++++++++++++++-
1 file changed, 52 insertions(+), 1 deletion(-)
diff --git a/mediagoblin/media_types/pdf/processing.py b/mediagoblin/media_types/pdf/processing.py
index 7ee17e9d..5e82f672 100644
--- a/mediagoblin/media_types/pdf/processing.py
+++ b/mediagoblin/media_types/pdf/processing.py
@@ -20,7 +20,10 @@ from subprocess import PIPE, Popen
from mediagoblin import mg_globals as mgg
from mediagoblin.processing import (create_pub_filepath,
- FilenameBuilder, BadMediaFail)
+ FilenameBuilder, BadMediaFail,
+ MediaProcessor, ProcessingManager,
+ request_from_args, get_orig_filename,
+ store_public, copy_original)
from mediagoblin.tools.translate import fake_ugettext_passthrough as _
_log = logging.getLogger(__name__)
@@ -365,7 +368,55 @@ class InitialProcessor(CommonPdfProcessor):
self.delete_queue_file()
+class Resizer(CommonImageProcessor):
+ """
+ Resizing process steps for processed pdfs
+ """
+ name = 'resize'
+ description = 'Resize thumbnail and medium'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media type is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ 'file',
+ choices=['medium', 'thumb'])
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['size', 'file'])
+
+ def process(self, file, size=None):
+ self.common_setup()
+ if file == 'medium':
+ self.generate_medium(size=size)
+ elif file == 'thumb':
+ self.generate_thumb(size=size)
+
+
class PdfProcessingManager(ProcessingManager):
def __init__(self):
super(self.__class__, self).__init__()
self.add_processor(InitialProcessor)
+ self.add_processor(Resizer)
From 40554b339505a66a0adfd98158fb5094005c6577 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 09:40:47 -0700
Subject: [PATCH 078/160] Use workbench for temp files, refactor
_set_pdf_filename, and correct imports
---
mediagoblin/media_types/pdf/processing.py | 47 ++++++++++++++---------
1 file changed, 28 insertions(+), 19 deletions(-)
diff --git a/mediagoblin/media_types/pdf/processing.py b/mediagoblin/media_types/pdf/processing.py
index 5e82f672..8294fbe2 100644
--- a/mediagoblin/media_types/pdf/processing.py
+++ b/mediagoblin/media_types/pdf/processing.py
@@ -13,17 +13,18 @@
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+import argparse
import os
import logging
import dateutil.parser
from subprocess import PIPE, Popen
from mediagoblin import mg_globals as mgg
-from mediagoblin.processing import (create_pub_filepath,
- FilenameBuilder, BadMediaFail,
- MediaProcessor, ProcessingManager,
- request_from_args, get_orig_filename,
- store_public, copy_original)
+from mediagoblin.processing import (
+ FilenameBuilder, BadMediaFail,
+ MediaProcessor, ProcessingManager,
+ request_from_args, get_orig_filename,
+ store_public, copy_original)
from mediagoblin.tools.translate import fake_ugettext_passthrough as _
_log = logging.getLogger(__name__)
@@ -252,8 +253,11 @@ class CommonPdfProcessor(MediaProcessor):
def _set_pdf_filename(self):
if self.name_builder.ext == 'pdf':
self.pdf_filename = self.orig_filename
+ elif self.entry.media_files.get('pdf'):
+ self.pdf_filename = self.workbench.local_file(
+ mgg.public_store, self.entry.media_files['pdf'])
else:
- self.pdf_filename = self.name_builder.fill('{basename}.pdf')
+ self.pdf_filename = self._generate_pdf()
def copy_original(self):
copy_original(
@@ -266,7 +270,9 @@ class CommonPdfProcessor(MediaProcessor):
mgg.global_config['media:thumb']['max_height'])
# Note: pdftocairo adds '.png', so don't include an ext
- thumb_filename = self.name_builder.fill('{basename}.thumbnail')
+ thumb_filename = os.path.join(self.workbench.dir,
+ self.name_builder.fill(
+ '{basename}.thumbnail'))
executable = where('pdftocairo')
args = [executable, '-scale-to', str(thumb_size),
@@ -278,25 +284,27 @@ class CommonPdfProcessor(MediaProcessor):
store_public(self.entry, 'thumb', thumb_filename,
self.name_builder.fill('{basename}.thumbnail.png'))
- def generate_pdf(self):
+ def _generate_pdf(self):
"""
Store the pdf. If the file is not a pdf, make it a pdf
"""
- if self.name_builder.ext != 'pdf':
- unoconv = where('unoconv')
- Popen(executable=unoconv,
- args=[unoconv, '-v', '-f', 'pdf', self.orig_filename]).wait()
+ unoconv = where('unoconv')
+ Popen(executable=unoconv,
+ args=[unoconv, '-v', '-f', 'pdf', self.orig_filename]).wait()
- if not os.path.exists(self.pdf_filename):
- _log.debug('unoconv failed to convert file to pdf')
- raise BadMediaFail()
+ if not os.path.exists(self.pdf_filename):
+ _log.debug('unoconv failed to convert file to pdf')
+ raise BadMediaFail()
store_public(self.entry, 'pdf', self.pdf_filename,
self.name_builder.fill('{basename}.pdf'))
+ return self.workbench.local_file(
+ mgg.public_store, self.entry.media_files['pdf'])
+
def extract_pdf_info(self):
pdf_info_dict = pdf_info(self.pdf_filename)
- entry.media_data_init(**pdf_info_dict)
+ self.entry.media_data_init(**pdf_info_dict)
def generate_medium(self, size=None):
if not size:
@@ -304,7 +312,8 @@ class CommonPdfProcessor(MediaProcessor):
mgg.global_config['media:medium']['max_height'])
# Note: pdftocairo adds '.png', so don't include an ext
- filename = self.name_builder.fill('{basename}.medium')
+ filename = os.path.join(self.workbench.dir,
+ self.name_builder.fill('{basename}.medium'))
executable = where('pdftocairo')
args = [executable, '-scale-to', str(size),
@@ -316,6 +325,7 @@ class CommonPdfProcessor(MediaProcessor):
store_public(self.entry, 'thumb', filename,
self.name_builder.fill('{basename}.medium.png'))
+
class InitialProcessor(CommonPdfProcessor):
"""
Initial processing step for new pdfs
@@ -360,7 +370,6 @@ class InitialProcessor(CommonPdfProcessor):
def process(self, size=None, thumb_size=None):
self.common_setup()
- self.generate_pdf()
self.extract_pdf_info()
self.copy_original()
self.generate_medium(size=size)
@@ -368,7 +377,7 @@ class InitialProcessor(CommonPdfProcessor):
self.delete_queue_file()
-class Resizer(CommonImageProcessor):
+class Resizer(CommonPdfProcessor):
"""
Resizing process steps for processed pdfs
"""
From ab64ca3474af63c5d6187580a702095ff222ab75 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 09:46:44 -0700
Subject: [PATCH 079/160] add reprocess_manager to __init__
---
mediagoblin/media_types/pdf/__init__.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mediagoblin/media_types/pdf/__init__.py b/mediagoblin/media_types/pdf/__init__.py
index 67509ddc..bc5c373b 100644
--- a/mediagoblin/media_types/pdf/__init__.py
+++ b/mediagoblin/media_types/pdf/__init__.py
@@ -15,7 +15,7 @@
# along with this program. If not, see .
from mediagoblin.media_types import MediaManagerBase
-from mediagoblin.media_types.pdf.processing import process_pdf, \
+from mediagoblin.media_types.pdf.processing import PdfProcessingManager, \
sniff_handler
from mediagoblin.tools import pluginapi
@@ -29,7 +29,6 @@ def setup_plugin():
class PDFMediaManager(MediaManagerBase):
human_readable = "PDF"
- processor = staticmethod(process_pdf)
display_template = "mediagoblin/media_displays/pdf.html"
default_thumb = "images/media_thumbs/pdf.jpg"
@@ -44,4 +43,5 @@ hooks = {
'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: PDFMediaManager,
+ ('reprocess_manager', MEDIA_TYPE): lambda: PdfProcessingManager,
}
From 77daec9224bf78a71d49a5ddfca72f9bb10efb11 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 10:38:13 -0700
Subject: [PATCH 080/160] Added initial stl processor
---
mediagoblin/media_types/stl/processing.py | 266 ++++++++++++++--------
1 file changed, 175 insertions(+), 91 deletions(-)
diff --git a/mediagoblin/media_types/stl/processing.py b/mediagoblin/media_types/stl/processing.py
index 53751416..6adc68ae 100644
--- a/mediagoblin/media_types/stl/processing.py
+++ b/mediagoblin/media_types/stl/processing.py
@@ -14,6 +14,7 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+import argparse
import os
import json
import logging
@@ -21,8 +22,11 @@ import subprocess
import pkg_resources
from mediagoblin import mg_globals as mgg
-from mediagoblin.processing import create_pub_filepath, \
- FilenameBuilder
+from mediagoblin.processing import (
+ FilenameBuilder, MediaProcessor,
+ ProcessingManager, request_from_args,
+ get_orig_filename, store_public,
+ copy_original)
from mediagoblin.media_types.stl import model_loader
@@ -75,49 +79,60 @@ def blender_render(config):
env=env)
-def process_stl(proc_state):
- """Code to process an stl or obj model. Will be run by celery.
-
- A Workbench() represents a local tempory dir. It is automatically
- cleaned up when this function exits.
+class CommonStlProcessor(MediaProcessor):
+ """
+ Provides a common base for various stl processing steps
"""
- entry = proc_state.entry
- workbench = proc_state.workbench
- queued_filepath = entry.queued_media_file
- queued_filename = workbench.localized_file(
- mgg.queue_store, queued_filepath, 'source')
- name_builder = FilenameBuilder(queued_filename)
+ def common_setup(self):
+ # Pull down and set up the original file
+ self.orig_filename = get_orig_filename(
+ self.entry, self.workbench)
+ self.name_builder = FilenameBuilder(self.orig_filename)
- ext = queued_filename.lower().strip()[-4:]
- if ext.startswith("."):
- ext = ext[1:]
- else:
- ext = None
+ self._set_ext()
+ self._set_model()
+ self._set_greatest()
- # Attempt to parse the model file and divine some useful
- # information about it.
- with open(queued_filename, 'rb') as model_file:
- model = model_loader.auto_detect(model_file, ext)
+ def _set_ext(self):
+ ext = self.name_builder.ext
- # generate preview images
- greatest = [model.width, model.height, model.depth]
- greatest.sort()
- greatest = greatest[-1]
+ if not ext:
+ ext = None
- def snap(name, camera, width=640, height=640, project="ORTHO"):
- filename = name_builder.fill(name)
- workbench_path = workbench.joinpath(filename)
+ self.ext = ext
+
+ def _set_model(self):
+ """
+ Attempt to parse the model file and divine some useful
+ information about it.
+ """
+ with open(self.orig_filename, 'rb') as model_file:
+ self.model = model_loader.auto_detect(model_file, self.ext)
+
+ def _set_greatest(self):
+ greatest = [self.model.width, self.model.height, self.model.depth]
+ greatest.sort()
+ self.greatest = greatest[-1]
+
+ def copy_original(self):
+ copy_original(
+ self.entry, self.orig_filename,
+ self.name_builder.fill('{basename}{ext}'))
+
+ def _snap(self, keyname, name, camera, size, project="ORTHO"):
+ filename = self.name_builder.fill(name)
+ workbench_path = self.workbench.joinpath(filename)
shot = {
- "model_path": queued_filename,
- "model_ext": ext,
+ "model_path": self.orig_filename,
+ "model_ext": self.ext,
"camera_coord": camera,
- "camera_focus": model.average,
- "camera_clip": greatest*10,
- "greatest": greatest,
+ "camera_focus": self.model.average,
+ "camera_clip": self.greatest*10,
+ "greatest": self.greatest,
"projection": project,
- "width": width,
- "height": height,
+ "width": size[0],
+ "height": size[1],
"out_file": workbench_path,
}
blender_render(shot)
@@ -126,70 +141,139 @@ def process_stl(proc_state):
assert os.path.exists(workbench_path)
# copy it up!
- with open(workbench_path, 'rb') as rendered_file:
- public_path = create_pub_filepath(entry, filename)
+ store_public(self.entry, keyname, workbench_path, filename)
- with mgg.public_store.get_file(public_path, "wb") as public_file:
- public_file.write(rendered_file.read())
+ def generate_thumb(self, thumb_size=None):
+ if not thumb_size:
+ thumb_size = (mgg.global_config['media:thumb']['max_width'],
+ mgg.global_config['media:thumb']['max_height'])
- return public_path
+ self._snap(
+ "thumb",
+ "{basename}.thumb.jpg",
+ [0, self.greatest*-1.5, self.greatest],
+ thumb_size,
+ project="PERSP")
- thumb_path = snap(
- "{basename}.thumb.jpg",
- [0, greatest*-1.5, greatest],
- mgg.global_config['media:thumb']['max_width'],
- mgg.global_config['media:thumb']['max_height'],
- project="PERSP")
+ def generate_perspective(self, size=None):
+ if not size:
+ size = (mgg.global_config['media:medium']['max_width'],
+ mgg.global_config['media:medium']['max_height'])
- perspective_path = snap(
- "{basename}.perspective.jpg",
- [0, greatest*-1.5, greatest], project="PERSP")
+ self._snap(
+ "perspective",
+ "{basename}.perspective.jpg",
+ [0, self.greatest*-1.5, self.greatest],
+ size,
+ project="PERSP")
- topview_path = snap(
- "{basename}.top.jpg",
- [model.average[0], model.average[1], greatest*2])
+ def generate_topview(self, size=None):
+ if not size:
+ size = (mgg.global_config['media:medium']['max_width'],
+ mgg.global_config['media:medium']['max_height'])
- frontview_path = snap(
- "{basename}.front.jpg",
- [model.average[0], greatest*-2, model.average[2]])
+ self._snap(
+ "top",
+ "{basename}.top.jpg",
+ [self.model.average[0], self.model.average[1],
+ self.greatest*2],
+ size)
- sideview_path = snap(
- "{basename}.side.jpg",
- [greatest*-2, model.average[1], model.average[2]])
+ def generate_frontview(self, size=None):
+ if not size:
+ size = (mgg.global_config['media:medium']['max_width'],
+ mgg.global_config['media:medium']['max_height'])
- ## Save the public file stuffs
- model_filepath = create_pub_filepath(
- entry, name_builder.fill('{basename}{ext}'))
+ self._snap(
+ "front",
+ "{basename}.front.jpg",
+ [self.model.average[0], self.greatest*-2,
+ self.model.average[2]],
+ size)
- with mgg.public_store.get_file(model_filepath, 'wb') as model_file:
- with open(queued_filename, 'rb') as queued_file:
- model_file.write(queued_file.read())
+ def generate_sideview(self, size=None):
+ if not size:
+ size = (mgg.global_config['media:medium']['max_width'],
+ mgg.global_config['media:medium']['max_height'])
- # Remove queued media file from storage and database.
- # queued_filepath is in the task_id directory which should
- # be removed too, but fail if the directory is not empty to be on
- # the super-safe side.
- mgg.queue_store.delete_file(queued_filepath) # rm file
- mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
- entry.queued_media_file = []
+ self._snap(
+ "side",
+ "{basename}.side.jpg",
+ [self.greatest*-2, self.model.average[1],
+ self.model.average[2]],
+ size)
- # Insert media file information into database
- media_files_dict = entry.setdefault('media_files', {})
- media_files_dict[u'original'] = model_filepath
- media_files_dict[u'thumb'] = thumb_path
- media_files_dict[u'perspective'] = perspective_path
- media_files_dict[u'top'] = topview_path
- media_files_dict[u'side'] = sideview_path
- media_files_dict[u'front'] = frontview_path
+ def store_dimensions(self):
+ """
+ Put model dimensions into the database
+ """
+ dimensions = {
+ "center_x": self.model.average[0],
+ "center_y": self.model.average[1],
+ "center_z": self.model.average[2],
+ "width": self.model.width,
+ "height": self.model.height,
+ "depth": self.model.depth,
+ "file_type": self.ext,
+ }
+ self.entry.media_data_init(**dimensions)
- # Put model dimensions into the database
- dimensions = {
- "center_x" : model.average[0],
- "center_y" : model.average[1],
- "center_z" : model.average[2],
- "width" : model.width,
- "height" : model.height,
- "depth" : model.depth,
- "file_type" : ext,
- }
- entry.media_data_init(**dimensions)
+
+class InitialProcessor(CommonStlProcessor):
+ """
+ Initial processing step for new stls
+ """
+ name = "initial"
+ description = "Initial processing"
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media type is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in (
+ "unprocessed", "failed")
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ '--thumb-size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['size', 'thumb_size'])
+
+ def process(self, size=None, thumb_size=None):
+ self.common_setup()
+ self.generate_thumb(thumb_size=thumb_size)
+ self.generate_perspective(size=size)
+ self.generate_topview(size=size)
+ self.generate_frontview(size=size)
+ self.generate_sideview(size=size)
+ self.store_dimensions()
+ self.copy_original()
+ self.delete_queue_file()
+
+
+class StlProcessingManager(ProcessingManager):
+ def __init__(self):
+ super(self.__class__, self).__init__()
+ self.add_processor(InitialProcessor)
From a3cc93c6af284c7f3cd6712611bfc6fc0f040a4f Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 10:40:14 -0700
Subject: [PATCH 081/160] added resizer for stl media
---
mediagoblin/media_types/stl/processing.py | 51 +++++++++++++++++++++++
1 file changed, 51 insertions(+)
diff --git a/mediagoblin/media_types/stl/processing.py b/mediagoblin/media_types/stl/processing.py
index 6adc68ae..2ec4241c 100644
--- a/mediagoblin/media_types/stl/processing.py
+++ b/mediagoblin/media_types/stl/processing.py
@@ -273,7 +273,58 @@ class InitialProcessor(CommonStlProcessor):
self.delete_queue_file()
+class Resizer(CommonStlProcessor):
+ """
+ Resizing process steps for processed stls
+ """
+ name = 'resize'
+ description = 'Resize thumbnail and mediums'
+
+ @classmethod
+ def media_is_eligible(cls, entry=None, state=None):
+ """
+ Determine if this media type is eligible for processing
+ """
+ if not state:
+ state = entry.state
+ return state in 'processed'
+
+ @classmethod
+ def generate_parser(cls):
+ parser = argparse.ArgumentParser(
+ description=cls.description,
+ prog=cls.name)
+
+ parser.add_argument(
+ '--size',
+ nargs=2,
+ metavar=('max_width', 'max_height'),
+ type=int)
+
+ parser.add_argument(
+ 'file',
+ choices=['medium', 'thumb'])
+
+ return parser
+
+ @classmethod
+ def args_to_request(cls, args):
+ return request_from_args(
+ args, ['size', 'file'])
+
+ def process(self, file, size=None):
+ self.common_setup()
+ if file == 'medium':
+ self.generate_perspective(size=size)
+ self.generate_topview(size=size)
+ self.generate_frontview(size=size)
+ self.generate_sideview(size=size)
+ elif file == 'thumb':
+ self.generate_thumb(size=size)
+
+
class StlProcessingManager(ProcessingManager):
def __init__(self):
super(self.__class__, self).__init__()
self.add_processor(InitialProcessor)
+ self.add_processor(Resizer)
From 2834d84c0cd57c7fd6294678d7872ecf55dc55ba Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 10:41:16 -0700
Subject: [PATCH 082/160] Cleanup image common processor
---
mediagoblin/media_types/image/processing.py | 15 ---------------
1 file changed, 15 deletions(-)
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index c3dfc5fe..3b3c2b0f 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -131,21 +131,6 @@ class CommonImageProcessor(MediaProcessor):
"""
Provides a base for various media processing steps
"""
- # Common resizing step
- def resize_step(self):
- pass
-
- @classmethod
- def _add_width_height_args(cls, parser):
- parser.add_argument(
- "--width", default=None,
- help=(
- "Width of the resized image (if not using defaults)"))
- parser.add_argument(
- "--height", default=None,
- help=(
- "Height of the resized image (if not using defaults)"))
-
def common_setup(self):
"""
Set up the workbench directory and pull down the original file
From 2e90b2be16d401507301dfe2369624f2d5cba87f Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 10:47:04 -0700
Subject: [PATCH 083/160] forgot to change __init__ for new processing managers
---
mediagoblin/media_types/ascii/__init__.py | 4 ++--
mediagoblin/media_types/stl/__init__.py | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/mediagoblin/media_types/ascii/__init__.py b/mediagoblin/media_types/ascii/__init__.py
index 4baf8dd3..b0f7551d 100644
--- a/mediagoblin/media_types/ascii/__init__.py
+++ b/mediagoblin/media_types/ascii/__init__.py
@@ -15,7 +15,7 @@
# along with this program. If not, see .
from mediagoblin.media_types import MediaManagerBase
-from mediagoblin.media_types.ascii.processing import process_ascii, \
+from mediagoblin.media_types.ascii.processing import AsciiProcessingManager, \
sniff_handler
from mediagoblin.tools import pluginapi
@@ -29,7 +29,6 @@ def setup_plugin():
class ASCIIMediaManager(MediaManagerBase):
human_readable = "ASCII"
- processor = staticmethod(process_ascii)
display_template = "mediagoblin/media_displays/ascii.html"
default_thumb = "images/media_thumbs/ascii.jpg"
@@ -43,5 +42,6 @@ hooks = {
'setup': setup_plugin,
'get_media_type_and_manager': get_media_type_and_manager,
('media_manager', MEDIA_TYPE): lambda: ASCIIMediaManager,
+ ('reprocess_manager', MEDIA_TYPE): lambda: AsciiProcessingManager,
'sniff_handler': sniff_handler,
}
diff --git a/mediagoblin/media_types/stl/__init__.py b/mediagoblin/media_types/stl/__init__.py
index 1d2a8478..7170a45b 100644
--- a/mediagoblin/media_types/stl/__init__.py
+++ b/mediagoblin/media_types/stl/__init__.py
@@ -15,7 +15,7 @@
# along with this program. If not, see .
from mediagoblin.media_types import MediaManagerBase
-from mediagoblin.media_types.stl.processing import process_stl, \
+from mediagoblin.media_types.stl.processing import StlProcessingManager, \
sniff_handler
from mediagoblin.tools import pluginapi
@@ -29,7 +29,6 @@ def setup_plugin():
class STLMediaManager(MediaManagerBase):
human_readable = "stereo lithographics"
- processor = staticmethod(process_stl)
display_template = "mediagoblin/media_displays/stl.html"
default_thumb = "images/media_thumbs/video.jpg"
@@ -43,4 +42,5 @@ hooks = {
'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: STLMediaManager,
+ ('reprocess_manager', MEDIA_TYPE): lambda: StlProcessingManager,
}
From 61b3fc5078751523adeb4b8b5920667dd65ca356 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 10:52:37 -0700
Subject: [PATCH 084/160] don't include the '.' in the ext
---
mediagoblin/media_types/stl/processing.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mediagoblin/media_types/stl/processing.py b/mediagoblin/media_types/stl/processing.py
index 2ec4241c..55938c06 100644
--- a/mediagoblin/media_types/stl/processing.py
+++ b/mediagoblin/media_types/stl/processing.py
@@ -95,7 +95,7 @@ class CommonStlProcessor(MediaProcessor):
self._set_greatest()
def _set_ext(self):
- ext = self.name_builder.ext
+ ext = self.name_builder.ext[1:]
if not ext:
ext = None
From e7672e5b48687f75c08c360ee86f71ec43566512 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 10:58:31 -0700
Subject: [PATCH 085/160] use a tmp_pdf filename
---
mediagoblin/media_types/pdf/processing.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/mediagoblin/media_types/pdf/processing.py b/mediagoblin/media_types/pdf/processing.py
index 8294fbe2..17a3246c 100644
--- a/mediagoblin/media_types/pdf/processing.py
+++ b/mediagoblin/media_types/pdf/processing.py
@@ -288,15 +288,17 @@ class CommonPdfProcessor(MediaProcessor):
"""
Store the pdf. If the file is not a pdf, make it a pdf
"""
+ tmp_pdf = self.orig_filename
+
unoconv = where('unoconv')
Popen(executable=unoconv,
args=[unoconv, '-v', '-f', 'pdf', self.orig_filename]).wait()
- if not os.path.exists(self.pdf_filename):
+ if not os.path.exists(tmp_pdf):
_log.debug('unoconv failed to convert file to pdf')
raise BadMediaFail()
- store_public(self.entry, 'pdf', self.pdf_filename,
+ store_public(self.entry, 'pdf', tmp_pdf,
self.name_builder.fill('{basename}.pdf'))
return self.workbench.local_file(
From 96109a58dab80fc66310d40f0fdeb83fc8c5a5d0 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 11:00:05 -0700
Subject: [PATCH 086/160] oops, localized_file not local_file
---
mediagoblin/media_types/pdf/processing.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mediagoblin/media_types/pdf/processing.py b/mediagoblin/media_types/pdf/processing.py
index 17a3246c..906be436 100644
--- a/mediagoblin/media_types/pdf/processing.py
+++ b/mediagoblin/media_types/pdf/processing.py
@@ -301,7 +301,7 @@ class CommonPdfProcessor(MediaProcessor):
store_public(self.entry, 'pdf', tmp_pdf,
self.name_builder.fill('{basename}.pdf'))
- return self.workbench.local_file(
+ return self.workbench.localized_file(
mgg.public_store, self.entry.media_files['pdf'])
def extract_pdf_info(self):
From d4380b52b6e49cb0fc1942e77dc8662f38f48ed6 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 11:34:31 -0700
Subject: [PATCH 087/160] include '.png' in filename
---
mediagoblin/media_types/pdf/processing.py | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/mediagoblin/media_types/pdf/processing.py b/mediagoblin/media_types/pdf/processing.py
index 906be436..c7c85e20 100644
--- a/mediagoblin/media_types/pdf/processing.py
+++ b/mediagoblin/media_types/pdf/processing.py
@@ -281,7 +281,9 @@ class CommonPdfProcessor(MediaProcessor):
_log.debug('calling {0}'.format(repr(' '.join(args))))
Popen(executable=executable, args=args).wait()
- store_public(self.entry, 'thumb', thumb_filename,
+ # since pdftocairo added '.png', we need to include it with the
+ # filename
+ store_public(self.entry, 'thumb', thumb_filename + '.png',
self.name_builder.fill('{basename}.thumbnail.png'))
def _generate_pdf(self):
@@ -324,7 +326,9 @@ class CommonPdfProcessor(MediaProcessor):
_log.debug('calling {0}'.format(repr(' '.join(args))))
Popen(executable=executable, args=args).wait()
- store_public(self.entry, 'thumb', filename,
+ # since pdftocairo added '.png', we need to include it with the
+ # filename
+ store_public(self.entry, 'thumb', filename + '.png',
self.name_builder.fill('{basename}.medium.png'))
From 7a89d27c80c89b646ffda44fe1baf98a0a7b91bc Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 12:13:06 -0700
Subject: [PATCH 088/160] -scale-to only takes 1 size, so choose the smallest
---
mediagoblin/media_types/pdf/processing.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mediagoblin/media_types/pdf/processing.py b/mediagoblin/media_types/pdf/processing.py
index c7c85e20..f8aea7bb 100644
--- a/mediagoblin/media_types/pdf/processing.py
+++ b/mediagoblin/media_types/pdf/processing.py
@@ -275,7 +275,7 @@ class CommonPdfProcessor(MediaProcessor):
'{basename}.thumbnail'))
executable = where('pdftocairo')
- args = [executable, '-scale-to', str(thumb_size),
+ args = [executable, '-scale-to', str(min(thumb_size)),
'-singlefile', '-png', self.pdf_filename, thumb_filename]
_log.debug('calling {0}'.format(repr(' '.join(args))))
@@ -320,7 +320,7 @@ class CommonPdfProcessor(MediaProcessor):
self.name_builder.fill('{basename}.medium'))
executable = where('pdftocairo')
- args = [executable, '-scale-to', str(size),
+ args = [executable, '-scale-to', str(min(size)),
'-singlefile', '-png', self.pdf_filename, filename]
_log.debug('calling {0}'.format(repr(' '.join(args))))
From 8a528add8bbd4f448833311da612e8a1df6bef28 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 12:25:49 -0700
Subject: [PATCH 089/160] rewind the file to the begining
---
mediagoblin/media_types/ascii/processing.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/mediagoblin/media_types/ascii/processing.py b/mediagoblin/media_types/ascii/processing.py
index 82ee9cd7..d69af1bf 100644
--- a/mediagoblin/media_types/ascii/processing.py
+++ b/mediagoblin/media_types/ascii/processing.py
@@ -87,6 +87,9 @@ class CommonAsciiProcessor(MediaProcessor):
d_charset,
self.charset))
+ # Rewind the file
+ orig_file.seek(0)
+
def store_unicode_file(self):
with file(self.orig_filename, 'rb') as orig_file:
self._detect_charset(orig_file)
From b95cc59bb9a4e5bf63009d26b33e07002d934cb9 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 12:41:03 -0700
Subject: [PATCH 090/160] size should be a tuple
---
mediagoblin/media_types/audio/processing.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index 7000b3c4..0bcac52c 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -147,7 +147,7 @@ class CommonAudioProcessor(MediaProcessor):
self.thumbnailer.thumbnail_spectrogram(
spectrogram_filepath,
thumb_tmp,
- size)
+ tuple(size))
store_public(self.entry, 'thumb', thumb_tmp,
self.name_builder.fill('{basename}.thumbnail.jpg'))
@@ -200,7 +200,7 @@ class InitialProcessor(CommonAudioProcessor):
parser.add_argument(
'--create_spectrogram',
action='store_true',
- help='Create spectogram and thumbnail')
+ help='Create spectogram and thumbnail, will default to config')
return parser
From 100a73a298f739d345cc41d9e5260c170a408c03 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 13:06:08 -0700
Subject: [PATCH 091/160] force thumbnail creation of audio
---
mediagoblin/media_types/audio/transcoders.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/mediagoblin/media_types/audio/transcoders.py b/mediagoblin/media_types/audio/transcoders.py
index 84e6af7e..150dad8e 100644
--- a/mediagoblin/media_types/audio/transcoders.py
+++ b/mediagoblin/media_types/audio/transcoders.py
@@ -122,8 +122,7 @@ class AudioThumbnailer(object):
int(start_x), 0,
int(stop_x), int(im_h)))
- if th.size[0] > th_w or th.size[1] > th_h:
- th.thumbnail(thumb_size, Image.ANTIALIAS)
+ th.thumbnail(thumb_size, Image.ANTIALIAS)
th.save(dst)
From 79f84d7e479f6b370709c6826c85070ab1996ea6 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 13:47:39 -0700
Subject: [PATCH 092/160] raise an error if the file failed to copy to public
storage
catch copy_local_to_storage errors and raise PublicStoreFail, saving the keyname
---
mediagoblin/processing/__init__.py | 20 +++++++++++++++++++-
1 file changed, 19 insertions(+), 1 deletion(-)
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index 0c13e807..e31b70bb 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -356,13 +356,24 @@ def store_public(entry, keyname, local_file, target_name=None,
if target_name is None:
target_name = os.path.basename(local_file)
target_filepath = create_pub_filepath(entry, target_name)
+
if keyname in entry.media_files:
_log.warn("store_public: keyname %r already used for file %r, "
"replacing with %r", keyname,
entry.media_files[keyname], target_filepath)
if delete_if_exists:
mgg.public_store.delete_file(entry.media_files[keyname])
- mgg.public_store.copy_local_to_storage(local_file, target_filepath)
+
+ try:
+ mgg.public_store.copy_local_to_storage(local_file, target_filepath)
+ except:
+ raise PublicStoreFail(keyname=keyname)
+
+ # raise an error if the file failed to copy
+ copied_filepath = mgg.public_store.get_local_path(target_filepath)
+ if not os.path.exists(copied_filepath):
+ raise PublicStoreFail(keyname=keyname)
+
entry.media_files[keyname] = target_filepath
@@ -396,3 +407,10 @@ class BadMediaFail(BaseProcessingFail):
for the media type specified.
"""
general_message = _(u'Invalid file given for media type.')
+
+
+class PublicStoreFail(BaseProcessingFail):
+ """
+ Error that should be raised when copying to public store fails
+ """
+ general_message = _('Copying to public storage failed.')
From 7d3fda06b03691601bc08b5d88baf1da1c3f83fc Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 14:54:10 -0700
Subject: [PATCH 093/160] catch processing exceptions and if entry_orig_state
is processed, then ignore the exception
---
mediagoblin/processing/task.py | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/mediagoblin/processing/task.py b/mediagoblin/processing/task.py
index d3770588..df44dd7a 100644
--- a/mediagoblin/processing/task.py
+++ b/mediagoblin/processing/task.py
@@ -93,7 +93,18 @@ class ProcessMedia(task.Task):
_log.debug('Processing {0}'.format(entry))
- processor.process(**reprocess_info)
+ try:
+ processor.process(**reprocess_info)
+ except Exception as exc:
+ if processor.entry_orig_state == 'processed':
+ _log.error(
+ 'Entry {0} failed to process due to the following'
+ ' error: {1}'.format(entry.id, exc))
+ _log.info(
+ 'Setting entry.state back to "processed"')
+ pass
+ else:
+ raise
# We set the state to processed and save the entry here so there's
# no need to save at the end of the processing stage, probably ;)
From d63f78fad9f64646c2cc1917862605bd4a1e55ba Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 15:11:03 -0700
Subject: [PATCH 094/160] forgot to add the ascii resizer to the list of
processors
---
mediagoblin/media_types/ascii/processing.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/mediagoblin/media_types/ascii/processing.py b/mediagoblin/media_types/ascii/processing.py
index d69af1bf..e11ac91e 100644
--- a/mediagoblin/media_types/ascii/processing.py
+++ b/mediagoblin/media_types/ascii/processing.py
@@ -242,3 +242,4 @@ class AsciiProcessingManager(ProcessingManager):
def __init__(self):
super(self.__class__, self).__init__()
self.add_processor(InitialProcessor)
+ self.add_processor(Resizer)
From a2f501982a8ca7d902f6e3ad8f5934e1e72bbae7 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 16:06:14 -0700
Subject: [PATCH 095/160] add quality and filter args to image processors
---
mediagoblin/media_types/image/processing.py | 78 +++++++++++++++------
1 file changed, 57 insertions(+), 21 deletions(-)
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index 3b3c2b0f..ef415496 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -44,7 +44,7 @@ MEDIA_TYPE = 'mediagoblin.media_types.image'
def resize_image(entry, resized, keyname, target_name, new_size,
- exif_tags, workdir):
+ exif_tags, workdir, quality, filter):
"""
Store a resized version of an image and return its pathname.
@@ -56,17 +56,16 @@ def resize_image(entry, resized, keyname, target_name, new_size,
exif_tags -- EXIF data for the original image
workdir -- directory path for storing converted image files
new_size -- 2-tuple size for the resized image
+ quality -- level of compression used when resizing images
+ filter -- One of BICUBIC, BILINEAR, NEAREST, ANTIALIAS
"""
- config = mgg.global_config['media_type:mediagoblin.media_types.image']
-
resized = exif_fix_image_orientation(resized, exif_tags) # Fix orientation
- filter_config = config['resize_filter']
try:
- resize_filter = PIL_FILTERS[filter_config.upper()]
+ resize_filter = PIL_FILTERS[filter.upper()]
except KeyError:
raise Exception('Filter "{0}" not found, choose one of {1}'.format(
- unicode(filter_config),
+ unicode(filter),
u', '.join(PIL_FILTERS.keys())))
resized.thumbnail(new_size, resize_filter)
@@ -74,13 +73,13 @@ def resize_image(entry, resized, keyname, target_name, new_size,
# Copy the new file to the conversion subdir, then remotely.
tmp_resized_filename = os.path.join(workdir, target_name)
with file(tmp_resized_filename, 'w') as resized_file:
- resized.save(resized_file, quality=config['quality'])
+ resized.save(resized_file, quality=quality)
store_public(entry, keyname, tmp_resized_filename, target_name)
def resize_tool(entry,
force, keyname, orig_file, target_name,
- conversions_subdir, exif_tags, new_size=None):
+ conversions_subdir, exif_tags, quality, filter, new_size=None):
# Use the default size if new_size was not given
if not new_size:
max_width = mgg.global_config['media:' + keyname]['max_width']
@@ -102,7 +101,8 @@ def resize_tool(entry,
resize_image(
entry, im, unicode(keyname), target_name,
new_size,
- exif_tags, conversions_subdir)
+ exif_tags, conversions_subdir,
+ quality, filter)
SUPPORTED_FILETYPES = ['png', 'gif', 'jpg', 'jpeg', 'tiff']
@@ -135,6 +135,9 @@ class CommonImageProcessor(MediaProcessor):
"""
Set up the workbench directory and pull down the original file
"""
+ self.image_config = mgg.global_config[
+ 'media_type:mediagoblin.media_types.image']
+
## @@: Should this be two functions?
# Conversions subdirectory to avoid collisions
self.conversions_subdir = os.path.join(
@@ -149,15 +152,28 @@ class CommonImageProcessor(MediaProcessor):
# Exif extraction
self.exif_tags = extract_exif(self.orig_filename)
- def generate_medium_if_applicable(self, size=None):
+ def generate_medium_if_applicable(self, size=None, quality=None,
+ filter=None):
+ if not quality:
+ quality = self.image_config['quality']
+ if not filter:
+ filter = self.image_config['resize_filter']
+
resize_tool(self.entry, False, 'medium', self.orig_filename,
self.name_builder.fill('{basename}.medium{ext}'),
- self.conversions_subdir, self.exif_tags, size)
+ self.conversions_subdir, self.exif_tags, quality,
+ filter, size)
+
+ def generate_thumb(self, size=None, quality=None, filter=None):
+ if not quality:
+ quality = self.image_config['quality']
+ if not filter:
+ filter = self.image_config['filter']
- def generate_thumb(self, size=None):
resize_tool(self.entry, True, 'thumb', self.orig_filename,
self.name_builder.fill('{basename}.thumbnail{ext}'),
- self.conversions_subdir, self.exif_tags, size)
+ self.conversions_subdir, self.exif_tags, quality,
+ filter, size)
def copy_original(self):
copy_original(
@@ -219,17 +235,27 @@ class InitialProcessor(CommonImageProcessor):
metavar=('max_width', 'max_height'),
type=int)
+ parser.add_argument(
+ '--filter',
+ choices=['BICUBIC', 'BILINEAR', 'NEAREST', 'ANTIALIAS'])
+
+ parser.add_argument(
+ '--quality',
+ type=int,
+ help='level of compression used when resizing images')
+
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
- args, ['size', 'thumb_size'])
+ args, ['size', 'thumb_size', 'filter', 'quality'])
- def process(self, size=None, thumb_size=None):
+ def process(self, size=None, thumb_size=None, quality=None, filter=None):
self.common_setup()
- self.generate_medium_if_applicable(size=size)
- self.generate_thumb(size=thumb_size)
+ self.generate_medium_if_applicable(size=size, filter=filter,
+ quality=quality)
+ self.generate_thumb(size=thumb_sizei, filter=filter, quality=quality)
self.copy_original()
self.extract_metadata()
self.delete_queue_file()
@@ -267,6 +293,15 @@ class Resizer(CommonImageProcessor):
metavar=('max_width', 'max_height'),
type=int)
+ parser.add_argument(
+ '--filter',
+ choices=['BICUBIC', 'BILINEAR', 'NEAREST', 'ANTIALIAS'])
+
+ parser.add_argument(
+ '--quality',
+ type=int,
+ help='level of compression used when resizing images')
+
parser.add_argument(
'file',
choices=['medium', 'thumb'])
@@ -276,14 +311,15 @@ class Resizer(CommonImageProcessor):
@classmethod
def args_to_request(cls, args):
return request_from_args(
- args, ['size', 'file'])
+ args, ['size', 'file', 'quality', 'filter'])
- def process(self, file, size=None):
+ def process(self, file, size=None, filter=None, quality=None):
self.common_setup()
if file == 'medium':
- self.generate_medium_if_applicable(size=size)
+ self.generate_medium_if_applicable(size=size, filter=filter,
+ quality=quality)
elif file == 'thumb':
- self.generate_thumb(size=size)
+ self.generate_thumb(size=size, filter=filter, quality=quality)
class ImageProcessingManager(ProcessingManager):
From 63021eb6092d080178215ff3647469fee27d789b Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 16:07:27 -0700
Subject: [PATCH 096/160] typos
---
mediagoblin/media_types/ascii/processing.py | 5 +++--
mediagoblin/media_types/image/processing.py | 4 ++--
mediagoblin/media_types/pdf/processing.py | 4 ++--
mediagoblin/media_types/stl/processing.py | 4 ++--
4 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/mediagoblin/media_types/ascii/processing.py b/mediagoblin/media_types/ascii/processing.py
index e11ac91e..6ba432e2 100644
--- a/mediagoblin/media_types/ascii/processing.py
+++ b/mediagoblin/media_types/ascii/processing.py
@@ -224,14 +224,15 @@ class Resizer(CommonAsciiProcessor):
parser.add_argument(
'file',
nargs='?',
- default='thumb')
+ default='thumb',
+ choices=['thumb'])
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
- args, ['size', 'file'])
+ args, ['thumb_size', 'file'])
def process(self, thumb_size=None, file=None):
self.common_setup()
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index ef415496..c29030c9 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -168,7 +168,7 @@ class CommonImageProcessor(MediaProcessor):
if not quality:
quality = self.image_config['quality']
if not filter:
- filter = self.image_config['filter']
+ filter = self.image_config['resize_filter']
resize_tool(self.entry, True, 'thumb', self.orig_filename,
self.name_builder.fill('{basename}.thumbnail{ext}'),
@@ -255,7 +255,7 @@ class InitialProcessor(CommonImageProcessor):
self.common_setup()
self.generate_medium_if_applicable(size=size, filter=filter,
quality=quality)
- self.generate_thumb(size=thumb_sizei, filter=filter, quality=quality)
+ self.generate_thumb(size=thumb_size, filter=filter, quality=quality)
self.copy_original()
self.extract_metadata()
self.delete_queue_file()
diff --git a/mediagoblin/media_types/pdf/processing.py b/mediagoblin/media_types/pdf/processing.py
index f8aea7bb..fdf4b161 100644
--- a/mediagoblin/media_types/pdf/processing.py
+++ b/mediagoblin/media_types/pdf/processing.py
@@ -254,7 +254,7 @@ class CommonPdfProcessor(MediaProcessor):
if self.name_builder.ext == 'pdf':
self.pdf_filename = self.orig_filename
elif self.entry.media_files.get('pdf'):
- self.pdf_filename = self.workbench.local_file(
+ self.pdf_filename = self.workbench.localized_file(
mgg.public_store, self.entry.media_files['pdf'])
else:
self.pdf_filename = self._generate_pdf()
@@ -328,7 +328,7 @@ class CommonPdfProcessor(MediaProcessor):
# since pdftocairo added '.png', we need to include it with the
# filename
- store_public(self.entry, 'thumb', filename + '.png',
+ store_public(self.entry, 'medium', filename + '.png',
self.name_builder.fill('{basename}.medium.png'))
diff --git a/mediagoblin/media_types/stl/processing.py b/mediagoblin/media_types/stl/processing.py
index 55938c06..fc8c10b6 100644
--- a/mediagoblin/media_types/stl/processing.py
+++ b/mediagoblin/media_types/stl/processing.py
@@ -249,7 +249,7 @@ class InitialProcessor(CommonStlProcessor):
type=int)
parser.add_argument(
- '--thumb-size',
+ '--thumb_size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
@@ -320,7 +320,7 @@ class Resizer(CommonStlProcessor):
self.generate_frontview(size=size)
self.generate_sideview(size=size)
elif file == 'thumb':
- self.generate_thumb(size=size)
+ self.generate_thumb(thumb_size=size)
class StlProcessingManager(ProcessingManager):
From 3225008f04e263c618186da881e3e01439578900 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 16:25:44 -0700
Subject: [PATCH 097/160] include a thumb_size string with each Resizer to run
gmg reprocess thumbs
---
mediagoblin/gmg_commands/reprocess.py | 6 ++++--
mediagoblin/media_types/ascii/processing.py | 1 +
mediagoblin/media_types/audio/processing.py | 1 +
mediagoblin/media_types/image/processing.py | 1 +
mediagoblin/media_types/pdf/processing.py | 1 +
mediagoblin/media_types/stl/processing.py | 1 +
mediagoblin/media_types/video/processing.py | 1 +
7 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/mediagoblin/gmg_commands/reprocess.py b/mediagoblin/gmg_commands/reprocess.py
index 375d9ff2..e2f19ea3 100644
--- a/mediagoblin/gmg_commands/reprocess.py
+++ b/mediagoblin/gmg_commands/reprocess.py
@@ -245,8 +245,10 @@ def thumbs(args):
# prepare filetype and size to be passed into reprocess_parser
if args.size:
- extra_args = 'thumb --size {0} {1}'.format(args.size[0],
- args.size[1])
+ extra_args = 'thumb --{0} {1} {2}'.format(
+ processor_class.thumb_size,
+ args.size[0],
+ args.size[1])
else:
extra_args = 'thumb'
diff --git a/mediagoblin/media_types/ascii/processing.py b/mediagoblin/media_types/ascii/processing.py
index 6ba432e2..ab89f4ad 100644
--- a/mediagoblin/media_types/ascii/processing.py
+++ b/mediagoblin/media_types/ascii/processing.py
@@ -198,6 +198,7 @@ class Resizer(CommonAsciiProcessor):
"""
name = 'resize'
description = 'Resize thumbnail'
+ thumb_size = 'thumb_size'
@classmethod
def media_is_eligible(cls, entry=None, state=None):
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index 0bcac52c..77647d12 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -232,6 +232,7 @@ class Resizer(CommonAudioProcessor):
"""
name = 'resize'
description = 'Resize thumbnail or spectogram'
+ thumb_size = 'thumb_size'
@classmethod
def media_is_eligible(cls, entry=None, state=None):
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index c29030c9..81b4d449 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -267,6 +267,7 @@ class Resizer(CommonImageProcessor):
"""
name = 'resize'
description = 'Resize image'
+ thumb_size = 'size'
@classmethod
def media_is_eligible(cls, entry=None, state=None):
diff --git a/mediagoblin/media_types/pdf/processing.py b/mediagoblin/media_types/pdf/processing.py
index fdf4b161..22bbf01a 100644
--- a/mediagoblin/media_types/pdf/processing.py
+++ b/mediagoblin/media_types/pdf/processing.py
@@ -389,6 +389,7 @@ class Resizer(CommonPdfProcessor):
"""
name = 'resize'
description = 'Resize thumbnail and medium'
+ thumb_size = 'size'
@classmethod
def media_is_eligible(cls, entry=None, state=None):
diff --git a/mediagoblin/media_types/stl/processing.py b/mediagoblin/media_types/stl/processing.py
index fc8c10b6..fddb94a2 100644
--- a/mediagoblin/media_types/stl/processing.py
+++ b/mediagoblin/media_types/stl/processing.py
@@ -279,6 +279,7 @@ class Resizer(CommonStlProcessor):
"""
name = 'resize'
description = 'Resize thumbnail and mediums'
+ thumb_size = 'size'
@classmethod
def media_is_eligible(cls, entry=None, state=None):
diff --git a/mediagoblin/media_types/video/processing.py b/mediagoblin/media_types/video/processing.py
index ab78e8ed..fefbebfe 100644
--- a/mediagoblin/media_types/video/processing.py
+++ b/mediagoblin/media_types/video/processing.py
@@ -290,6 +290,7 @@ class Resizer(CommonVideoProcessor):
"""
name = 'resize'
description = 'Resize thumbnail'
+ thumb_size = 'thumb_size'
@classmethod
def media_is_eligible(cls, entry=None, state=None):
From 23a3703a1dc9207ac39354721a007920a53894f8 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 16:30:52 -0700
Subject: [PATCH 098/160] use thumb_size
---
mediagoblin/media_types/pdf/processing.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mediagoblin/media_types/pdf/processing.py b/mediagoblin/media_types/pdf/processing.py
index 22bbf01a..19ab54b2 100644
--- a/mediagoblin/media_types/pdf/processing.py
+++ b/mediagoblin/media_types/pdf/processing.py
@@ -428,7 +428,7 @@ class Resizer(CommonPdfProcessor):
if file == 'medium':
self.generate_medium(size=size)
elif file == 'thumb':
- self.generate_thumb(size=size)
+ self.generate_thumb(thumb_size=size)
class PdfProcessingManager(ProcessingManager):
From 882779f547f4bc20887e8af7d4973d5bbb8bf147 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 14 Aug 2013 16:39:01 -0700
Subject: [PATCH 099/160] only try and delete queue file if it exists
---
mediagoblin/processing/__init__.py | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index e31b70bb..e2415fd5 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -166,9 +166,10 @@ class MediaProcessor(object):
# be removed too, but fail if the directory is not empty to be on
# the super-safe side.
queued_filepath = self.entry.queued_media_file
- mgg.queue_store.delete_file(queued_filepath) # rm file
- mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
- self.entry.queued_media_file = []
+ if queued_filepath:
+ mgg.queue_store.delete_file(queued_filepath) # rm file
+ mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
+ self.entry.queued_media_file = []
class ProcessingKeyError(Exception): pass
From 8bb0df62d45ca5f8c774180e73b6441efd4df6c8 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 15 Aug 2013 08:10:00 -0700
Subject: [PATCH 100/160] check medium_size arg in skip_transcoding, not config
---
mediagoblin/media_types/video/processing.py | 7 ++++---
mediagoblin/media_types/video/util.py | 6 +++---
2 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/mediagoblin/media_types/video/processing.py b/mediagoblin/media_types/video/processing.py
index fefbebfe..b91503ca 100644
--- a/mediagoblin/media_types/video/processing.py
+++ b/mediagoblin/media_types/video/processing.py
@@ -125,7 +125,7 @@ class CommonVideoProcessor(MediaProcessor):
def common_setup(self):
self.video_config = mgg \
- .global_config['media_type:mediagoblin.media_types.audio']
+ .global_config['media_type:mediagoblin.media_types.video']
# Pull down and set up the original file
self.orig_filename = get_orig_filename(
@@ -166,7 +166,7 @@ class CommonVideoProcessor(MediaProcessor):
# Figure out whether or not we need to transcode this video or
# if we can skip it
- if skip_transcode(metadata):
+ if skip_transcode(metadata, medium_size):
_log.debug('Skipping transcoding')
dst_dimensions = metadata['videowidth'], metadata['videoheight']
@@ -314,7 +314,8 @@ class Resizer(CommonVideoProcessor):
parser.add_argument(
'file',
nargs='?',
- default='thumb')
+ default='thumb',
+ choices=['thumb'])
return parser
diff --git a/mediagoblin/media_types/video/util.py b/mediagoblin/media_types/video/util.py
index 5765ecfb..c33cce5a 100644
--- a/mediagoblin/media_types/video/util.py
+++ b/mediagoblin/media_types/video/util.py
@@ -21,7 +21,7 @@ from mediagoblin import mg_globals as mgg
_log = logging.getLogger(__name__)
-def skip_transcode(metadata):
+def skip_transcode(metadata, size):
'''
Checks video metadata against configuration values for skip_transcode.
@@ -51,9 +51,9 @@ def skip_transcode(metadata):
return False
if config['dimensions_match']:
- if not metadata['videoheight'] <= medium_config['max_height']:
+ if not metadata['videoheight'] <= size[1]:
return False
- if not metadata['videowidth'] <= medium_config['max_width']:
+ if not metadata['videowidth'] <= size[0]:
return False
return True
From 9b1317e3e29b87a6e9959583ff27a62c844f48b3 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 15 Aug 2013 08:11:29 -0700
Subject: [PATCH 101/160] make medium dimensions a tuple
---
mediagoblin/media_types/video/processing.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mediagoblin/media_types/video/processing.py b/mediagoblin/media_types/video/processing.py
index b91503ca..4d7d9921 100644
--- a/mediagoblin/media_types/video/processing.py
+++ b/mediagoblin/media_types/video/processing.py
@@ -177,7 +177,7 @@ class CommonVideoProcessor(MediaProcessor):
vp8_threads=vp8_threads,
vorbis_quality=vorbis_quality,
progress_callback=progress_callback,
- dimensions=medium_size)
+ dimensions=tuple(medium_size))
dst_dimensions = self.transcoder.dst_data.videowidth,\
self.transcoder.dst_data.videoheight
From 1cefccc7554a5df4c9bb126ef3b80b53f9e41cd7 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 15 Aug 2013 08:54:09 -0700
Subject: [PATCH 102/160] refactor get_orig_filename to return an acceptable
filename to the processor.
If there is an original video file and we skip transcoding, delete the webm_640 file
---
mediagoblin/media_types/ascii/processing.py | 18 +++++----
mediagoblin/media_types/audio/processing.py | 17 +++++----
mediagoblin/media_types/image/processing.py | 21 ++++++-----
mediagoblin/media_types/pdf/processing.py | 20 +++++-----
mediagoblin/media_types/stl/processing.py | 17 +++++----
mediagoblin/media_types/video/processing.py | 25 ++++++++-----
mediagoblin/processing/__init__.py | 41 ++++++++++++++-------
7 files changed, 94 insertions(+), 65 deletions(-)
diff --git a/mediagoblin/media_types/ascii/processing.py b/mediagoblin/media_types/ascii/processing.py
index ab89f4ad..9b6b3ad4 100644
--- a/mediagoblin/media_types/ascii/processing.py
+++ b/mediagoblin/media_types/ascii/processing.py
@@ -26,7 +26,7 @@ from mediagoblin import mg_globals as mgg
from mediagoblin.processing import (
create_pub_filepath, FilenameBuilder,
MediaProcessor, ProcessingManager,
- get_orig_filename, copy_original,
+ get_process_filename, copy_original,
store_public, request_from_args)
from mediagoblin.media_types.ascii import asciitoimage
@@ -52,6 +52,8 @@ class CommonAsciiProcessor(MediaProcessor):
"""
Provides a base for various ascii processing steps
"""
+ acceptable_files = ['original', 'unicode']
+
def common_setup(self):
self.ascii_config = mgg.global_config[
'media_type:mediagoblin.media_types.ascii']
@@ -61,16 +63,16 @@ class CommonAsciiProcessor(MediaProcessor):
self.workbench.dir, 'convirsions')
os.mkdir(self.conversions_subdir)
- # Pull down and set up the original file
- self.orig_filename = get_orig_filename(
- self.entry, self.workbench)
- self.name_builder = FilenameBuilder(self.orig_filename)
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
self.charset = None
def copy_original(self):
copy_original(
- self.entry, self.orig_filename,
+ self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def _detect_charset(self, orig_file):
@@ -91,7 +93,7 @@ class CommonAsciiProcessor(MediaProcessor):
orig_file.seek(0)
def store_unicode_file(self):
- with file(self.orig_filename, 'rb') as orig_file:
+ with file(self.process_filename, 'rb') as orig_file:
self._detect_charset(orig_file)
unicode_filepath = create_pub_filepath(self.entry,
'ascii-portable.txt')
@@ -110,7 +112,7 @@ class CommonAsciiProcessor(MediaProcessor):
self.entry.media_files['unicode'] = unicode_filepath
def generate_thumb(self, font=None, thumb_size=None):
- with file(self.orig_filename, 'rb') as orig_file:
+ with file(self.process_filename, 'rb') as orig_file:
# If no font kwarg, check config
if not font:
font = self.ascii_config.get('thumbnail_font', None)
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index 77647d12..8f1ddcbd 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -22,7 +22,7 @@ from mediagoblin import mg_globals as mgg
from mediagoblin.processing import (
BadMediaFail, FilenameBuilder,
ProgressCallback, MediaProcessor, ProcessingManager,
- request_from_args, get_orig_filename,
+ request_from_args, get_process_filename,
store_public, copy_original)
from mediagoblin.media_types.audio.transcoders import (
@@ -52,6 +52,7 @@ class CommonAudioProcessor(MediaProcessor):
"""
Provides a base for various audio processing steps
"""
+ acceptable_files = ['original', 'webm_audio']
def common_setup(self):
"""
@@ -61,10 +62,10 @@ class CommonAudioProcessor(MediaProcessor):
self.audio_config = mgg \
.global_config['media_type:mediagoblin.media_types.audio']
- # Pull down and set up the original file
- self.orig_filename = get_orig_filename(
- self.entry, self.workbench)
- self.name_builder = FilenameBuilder(self.orig_filename)
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
self.transcoder = AudioTranscoder()
self.thumbnailer = AudioThumbnailer()
@@ -72,7 +73,7 @@ class CommonAudioProcessor(MediaProcessor):
def copy_original(self):
if self.audio_config['keep_original']:
copy_original(
- self.entry, self.orig_filename,
+ self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def transcode(self, quality=None):
@@ -85,7 +86,7 @@ class CommonAudioProcessor(MediaProcessor):
'{basename}{ext}'))
self.transcoder.transcode(
- self.orig_filename,
+ self.process_filename,
webm_audio_tmp,
quality=quality,
progress_callback=progress_callback)
@@ -107,7 +108,7 @@ class CommonAudioProcessor(MediaProcessor):
_log.info('Creating OGG source for spectrogram')
self.transcoder.transcode(
- self.orig_filename,
+ self.process_filename,
wav_tmp,
mux_string='vorbisenc quality={0} ! oggmux'.format(
self.audio_config['quality']))
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index 81b4d449..eaa19261 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -26,7 +26,7 @@ from mediagoblin import mg_globals as mgg
from mediagoblin.processing import (
BadMediaFail, FilenameBuilder,
MediaProcessor, ProcessingManager,
- request_from_args, get_orig_filename,
+ request_from_args, get_process_filename,
store_public, copy_original)
from mediagoblin.tools.exif import exif_fix_image_orientation, \
extract_exif, clean_exif, get_gps_data, get_useful, \
@@ -131,6 +131,9 @@ class CommonImageProcessor(MediaProcessor):
"""
Provides a base for various media processing steps
"""
+ # list of acceptable file keys in order of prefrence for reprocessing
+ acceptable_files = ['original', 'medium']
+
def common_setup(self):
"""
Set up the workbench directory and pull down the original file
@@ -144,13 +147,13 @@ class CommonImageProcessor(MediaProcessor):
self.workbench.dir, 'convirsions')
os.mkdir(self.conversions_subdir)
- # Pull down and set up the original file
- self.orig_filename = get_orig_filename(
- self.entry, self.workbench)
- self.name_builder = FilenameBuilder(self.orig_filename)
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
# Exif extraction
- self.exif_tags = extract_exif(self.orig_filename)
+ self.exif_tags = extract_exif(self.process_filename)
def generate_medium_if_applicable(self, size=None, quality=None,
filter=None):
@@ -159,7 +162,7 @@ class CommonImageProcessor(MediaProcessor):
if not filter:
filter = self.image_config['resize_filter']
- resize_tool(self.entry, False, 'medium', self.orig_filename,
+ resize_tool(self.entry, False, 'medium', self.process_filename,
self.name_builder.fill('{basename}.medium{ext}'),
self.conversions_subdir, self.exif_tags, quality,
filter, size)
@@ -170,14 +173,14 @@ class CommonImageProcessor(MediaProcessor):
if not filter:
filter = self.image_config['resize_filter']
- resize_tool(self.entry, True, 'thumb', self.orig_filename,
+ resize_tool(self.entry, True, 'thumb', self.process_filename,
self.name_builder.fill('{basename}.thumbnail{ext}'),
self.conversions_subdir, self.exif_tags, quality,
filter, size)
def copy_original(self):
copy_original(
- self.entry, self.orig_filename,
+ self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def extract_metadata(self):
diff --git a/mediagoblin/media_types/pdf/processing.py b/mediagoblin/media_types/pdf/processing.py
index 19ab54b2..6ef95a72 100644
--- a/mediagoblin/media_types/pdf/processing.py
+++ b/mediagoblin/media_types/pdf/processing.py
@@ -23,7 +23,7 @@ from mediagoblin import mg_globals as mgg
from mediagoblin.processing import (
FilenameBuilder, BadMediaFail,
MediaProcessor, ProcessingManager,
- request_from_args, get_orig_filename,
+ request_from_args, get_process_filename,
store_public, copy_original)
from mediagoblin.tools.translate import fake_ugettext_passthrough as _
@@ -239,20 +239,22 @@ class CommonPdfProcessor(MediaProcessor):
"""
Provides a base for various pdf processing steps
"""
+ acceptable_files = ['original', 'pdf']
+
def common_setup(self):
"""
Set up common pdf processing steps
"""
- # Pull down and set up the original file
- self.orig_filename = get_orig_filename(
- self.entry, self.workbench)
- self.name_builder = FilenameBuilder(self.orig_filename)
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
self._set_pdf_filename()
def _set_pdf_filename(self):
if self.name_builder.ext == 'pdf':
- self.pdf_filename = self.orig_filename
+ self.pdf_filename = self.process_filename
elif self.entry.media_files.get('pdf'):
self.pdf_filename = self.workbench.localized_file(
mgg.public_store, self.entry.media_files['pdf'])
@@ -261,7 +263,7 @@ class CommonPdfProcessor(MediaProcessor):
def copy_original(self):
copy_original(
- self.entry, self.orig_filename,
+ self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def generate_thumb(self, thumb_size=None):
@@ -290,11 +292,11 @@ class CommonPdfProcessor(MediaProcessor):
"""
Store the pdf. If the file is not a pdf, make it a pdf
"""
- tmp_pdf = self.orig_filename
+ tmp_pdf = self.process_filename
unoconv = where('unoconv')
Popen(executable=unoconv,
- args=[unoconv, '-v', '-f', 'pdf', self.orig_filename]).wait()
+ args=[unoconv, '-v', '-f', 'pdf', self.process_filename]).wait()
if not os.path.exists(tmp_pdf):
_log.debug('unoconv failed to convert file to pdf')
diff --git a/mediagoblin/media_types/stl/processing.py b/mediagoblin/media_types/stl/processing.py
index fddb94a2..77d3d86e 100644
--- a/mediagoblin/media_types/stl/processing.py
+++ b/mediagoblin/media_types/stl/processing.py
@@ -25,7 +25,7 @@ from mediagoblin import mg_globals as mgg
from mediagoblin.processing import (
FilenameBuilder, MediaProcessor,
ProcessingManager, request_from_args,
- get_orig_filename, store_public,
+ get_process_filename, store_public,
copy_original)
from mediagoblin.media_types.stl import model_loader
@@ -83,12 +83,13 @@ class CommonStlProcessor(MediaProcessor):
"""
Provides a common base for various stl processing steps
"""
+ acceptable_files = ['original']
def common_setup(self):
- # Pull down and set up the original file
- self.orig_filename = get_orig_filename(
- self.entry, self.workbench)
- self.name_builder = FilenameBuilder(self.orig_filename)
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
self._set_ext()
self._set_model()
@@ -107,7 +108,7 @@ class CommonStlProcessor(MediaProcessor):
Attempt to parse the model file and divine some useful
information about it.
"""
- with open(self.orig_filename, 'rb') as model_file:
+ with open(self.process_filename, 'rb') as model_file:
self.model = model_loader.auto_detect(model_file, self.ext)
def _set_greatest(self):
@@ -117,14 +118,14 @@ class CommonStlProcessor(MediaProcessor):
def copy_original(self):
copy_original(
- self.entry, self.orig_filename,
+ self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def _snap(self, keyname, name, camera, size, project="ORTHO"):
filename = self.name_builder.fill(name)
workbench_path = self.workbench.joinpath(filename)
shot = {
- "model_path": self.orig_filename,
+ "model_path": self.process_filename,
"model_ext": self.ext,
"camera_coord": camera,
"camera_focus": self.model.average,
diff --git a/mediagoblin/media_types/video/processing.py b/mediagoblin/media_types/video/processing.py
index 4d7d9921..bb854ffb 100644
--- a/mediagoblin/media_types/video/processing.py
+++ b/mediagoblin/media_types/video/processing.py
@@ -24,7 +24,7 @@ from mediagoblin.processing import (
FilenameBuilder, BaseProcessingFail,
ProgressCallback, MediaProcessor,
ProcessingManager, request_from_args,
- get_orig_filename, store_public,
+ get_process_filename, store_public,
copy_original)
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
@@ -122,15 +122,16 @@ class CommonVideoProcessor(MediaProcessor):
"""
Provides a base for various video processing steps
"""
+ acceptable_files = ['original', 'webm_640']
def common_setup(self):
self.video_config = mgg \
.global_config['media_type:mediagoblin.media_types.video']
- # Pull down and set up the original file
- self.orig_filename = get_orig_filename(
- self.entry, self.workbench)
- self.name_builder = FilenameBuilder(self.orig_filename)
+ # Pull down and set up the processing file
+ self.process_filename = get_process_filename(
+ self.entry, self.workbench, self.acceptable_files)
+ self.name_builder = FilenameBuilder(self.process_filename)
self.transcoder = transcoders.VideoTranscoder()
self.did_transcode = False
@@ -140,7 +141,7 @@ class CommonVideoProcessor(MediaProcessor):
if not self.did_transcode or \
(self.video_config['keep_original'] and self.did_transcode):
copy_original(
- self.entry, self.orig_filename,
+ self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None,
@@ -161,7 +162,7 @@ class CommonVideoProcessor(MediaProcessor):
vorbis_quality = self.video_config['vorbis_quality']
# Extract metadata and keep a record of it
- metadata = self.transcoder.discover(self.orig_filename)
+ metadata = self.transcoder.discover(self.process_filename)
store_metadata(self.entry, metadata)
# Figure out whether or not we need to transcode this video or
@@ -171,8 +172,14 @@ class CommonVideoProcessor(MediaProcessor):
dst_dimensions = metadata['videowidth'], metadata['videoheight']
+ # If there is an original and transcoded, delete the transcoded
+ # since it must be of lower quality then the original
+ if self.entry.media_files.get('original') and \
+ self.entry.media_files.get('webm_640'):
+ self.entry.media_files['webm_640'].delete()
+
else:
- self.transcoder.transcode(self.orig_filename, tmp_dst,
+ self.transcoder.transcode(self.process_filename, tmp_dst,
vp8_quality=vp8_quality,
vp8_threads=vp8_threads,
vorbis_quality=vorbis_quality,
@@ -206,7 +213,7 @@ class CommonVideoProcessor(MediaProcessor):
mgg.global_config['media:thumb']['max_height'])
transcoders.VideoThumbnailerMarkII(
- self.orig_filename,
+ self.process_filename,
tmp_thumb,
thumb_size[0],
thumb_size[1])
diff --git a/mediagoblin/processing/__init__.py b/mediagoblin/processing/__init__.py
index e2415fd5..746f4d8e 100644
--- a/mediagoblin/processing/__init__.py
+++ b/mediagoblin/processing/__init__.py
@@ -327,29 +327,34 @@ def mark_entry_failed(entry_id, exc):
u'fail_metadata': {}})
-def get_orig_filename(entry, workbench):
+def get_process_filename(entry, workbench, acceptable_files):
"""
- Get the a filename for the original, on local storage
+ Try and get the queued file if available, otherwise return the first file
+ in the acceptable_files that we have.
- If the media entry has a queued_media_file, use that, otherwise
- use the original.
-
- In the future, this will return the highest quality file available
- if neither the original or queued file are available by checking
- some ordered list of preferred keys.
+ If no acceptable_files, raise ProcessFileNotFound
"""
if entry.queued_media_file:
- orig_filepath = entry.queued_media_file
+ filepath = entry.queued_media_file
storage = mgg.queue_store
else:
- orig_filepath = entry.media_files['original']
- storage = mgg.public_store
+ for keyname in acceptable_files:
+ if entry.media_files.get(keyname):
+ filepath = entry.media_files[keyname]
+ storage = mgg.public_store
+ break
- orig_filename = workbench.localized_file(
- storage, orig_filepath,
+ if not filepath:
+ raise ProcessFileNotFound()
+
+ filename = workbench.localized_file(
+ storage, filepath,
'source')
- return orig_filename
+ if not os.path.exists(filename):
+ raise ProcessFileNotFound()
+
+ return filename
def store_public(entry, keyname, local_file, target_name=None,
@@ -415,3 +420,11 @@ class PublicStoreFail(BaseProcessingFail):
Error that should be raised when copying to public store fails
"""
general_message = _('Copying to public storage failed.')
+
+
+class ProcessFileNotFound(BaseProcessingFail):
+ """
+ Error that should be raised when an acceptable file for processing
+ is not found.
+ """
+ general_message = _(u'An acceptable processing file was not found')
From 7674b9c05b29178e280c9cbd19d8dc276f0e71ea Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 15 Aug 2013 09:29:11 -0700
Subject: [PATCH 103/160] for some reason, the minimum thumbnail size for
videos is 100 x 100
---
mediagoblin/media_types/audio/processing.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index 8f1ddcbd..42855265 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -191,7 +191,8 @@ class InitialProcessor(CommonAudioProcessor):
'--thumb_size',
nargs=2,
metavar=('max_width', 'max_height'),
- type=int)
+ type=int,
+ help='minimum size is 100 x 100')
parser.add_argument(
'--medium_width',
@@ -259,7 +260,8 @@ class Resizer(CommonAudioProcessor):
'--thumb_size',
nargs=2,
metavar=('max_width', 'max_height'),
- type=int)
+ type=int,
+ help='minimum size is 100 x 100')
parser.add_argument(
'--medium_width',
From 0a8c0c704d27fbff7328b94650b797ccb2383f16 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 15 Aug 2013 12:40:19 -0700
Subject: [PATCH 104/160] Keep the best quality file if there's no original
---
mediagoblin/media_types/audio/processing.py | 15 ++++++++++++++-
mediagoblin/media_types/pdf/processing.py | 2 +-
mediagoblin/media_types/video/processing.py | 16 +++++++++++++++-
3 files changed, 30 insertions(+), 3 deletions(-)
diff --git a/mediagoblin/media_types/audio/processing.py b/mediagoblin/media_types/audio/processing.py
index 42855265..6a506741 100644
--- a/mediagoblin/media_types/audio/processing.py
+++ b/mediagoblin/media_types/audio/processing.py
@@ -52,7 +52,7 @@ class CommonAudioProcessor(MediaProcessor):
"""
Provides a base for various audio processing steps
"""
- acceptable_files = ['original', 'webm_audio']
+ acceptable_files = ['original', 'best_quality', 'webm_audio']
def common_setup(self):
"""
@@ -76,6 +76,17 @@ class CommonAudioProcessor(MediaProcessor):
self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
+ def _keep_best(self):
+ """
+ If there is no original, keep the best file that we have
+ """
+ if not self.entry.media_files.get('best_quality'):
+ # Save the best quality file if no original?
+ if not self.entry.media_files.get('original') and \
+ self.entry.media_files.get('webm_audio'):
+ self.entry.media_files['best_quality'] = self.entry \
+ .media_files['webm_audio']
+
def transcode(self, quality=None):
if not quality:
quality = self.audio_config['quality']
@@ -93,6 +104,8 @@ class CommonAudioProcessor(MediaProcessor):
self.transcoder.discover(webm_audio_tmp)
+ self._keep_best()
+
_log.debug('Saving medium...')
store_public(self.entry, 'webm_audio', webm_audio_tmp,
self.name_builder.fill('{basename}.medium.webm'))
diff --git a/mediagoblin/media_types/pdf/processing.py b/mediagoblin/media_types/pdf/processing.py
index 6ef95a72..549def69 100644
--- a/mediagoblin/media_types/pdf/processing.py
+++ b/mediagoblin/media_types/pdf/processing.py
@@ -253,7 +253,7 @@ class CommonPdfProcessor(MediaProcessor):
self._set_pdf_filename()
def _set_pdf_filename(self):
- if self.name_builder.ext == 'pdf':
+ if self.name_builder.ext == '.pdf':
self.pdf_filename = self.process_filename
elif self.entry.media_files.get('pdf'):
self.pdf_filename = self.workbench.localized_file(
diff --git a/mediagoblin/media_types/video/processing.py b/mediagoblin/media_types/video/processing.py
index bb854ffb..a3aa9bcf 100644
--- a/mediagoblin/media_types/video/processing.py
+++ b/mediagoblin/media_types/video/processing.py
@@ -122,7 +122,7 @@ class CommonVideoProcessor(MediaProcessor):
"""
Provides a base for various video processing steps
"""
- acceptable_files = ['original', 'webm_640']
+ acceptable_files = ['original', 'best_quality', 'webm_640']
def common_setup(self):
self.video_config = mgg \
@@ -144,6 +144,18 @@ class CommonVideoProcessor(MediaProcessor):
self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
+ def _keep_best(self):
+ """
+ If there is no original, keep the best file that we have
+ """
+ if not self.entry.media_files.get('best_quality'):
+ # Save the best quality file if no original?
+ if not self.entry.media_files.get('original') and \
+ self.entry.media_files.get('webm_640'):
+ self.entry.media_files['best_quality'] = self.entry \
+ .media_files['webm_640']
+
+
def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None,
vorbis_quality=None):
progress_callback = ProgressCallback(self.entry)
@@ -189,6 +201,8 @@ class CommonVideoProcessor(MediaProcessor):
dst_dimensions = self.transcoder.dst_data.videowidth,\
self.transcoder.dst_data.videoheight
+ self._keep_best()
+
# Push transcoded video to public storage
_log.debug('Saving medium...')
store_public(self.entry, 'webm_640', tmp_dst,
From 931fa43fbce7d702eb4b2447cefeaf73dcecf7ac Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Fri, 16 Aug 2013 15:08:21 -0700
Subject: [PATCH 105/160] make sure size is a tuple
---
mediagoblin/media_types/image/processing.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/mediagoblin/media_types/image/processing.py b/mediagoblin/media_types/image/processing.py
index eaa19261..088979bc 100644
--- a/mediagoblin/media_types/image/processing.py
+++ b/mediagoblin/media_types/image/processing.py
@@ -100,7 +100,7 @@ def resize_tool(entry,
or exif_image_needs_rotation(exif_tags):
resize_image(
entry, im, unicode(keyname), target_name,
- new_size,
+ tuple(new_size),
exif_tags, conversions_subdir,
quality, filter)
From d0708da727b72cfc95defe14dd6780d2cba0a0b7 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 7 Aug 2013 12:44:43 -0700
Subject: [PATCH 106/160] add the ability to mark all notifications read.
---
mediagoblin/notifications/routing.py | 4 ++++
mediagoblin/notifications/views.py | 16 +++++++++++++++-
mediagoblin/static/js/notifications.js | 13 +++++++++++++
.../fragments/header_notifications.html | 4 ++++
4 files changed, 36 insertions(+), 1 deletion(-)
diff --git a/mediagoblin/notifications/routing.py b/mediagoblin/notifications/routing.py
index e57956d3..cd7bbc21 100644
--- a/mediagoblin/notifications/routing.py
+++ b/mediagoblin/notifications/routing.py
@@ -23,3 +23,7 @@ add_route('mediagoblin.notifications.subscribe_comments',
add_route('mediagoblin.notifications.silence_comments',
'/u//m//notifications/silence/',
'mediagoblin.notifications.views:silence_comments')
+
+add_route('mediagoblin.notifications.mark_all_comment_notifications_seen',
+ '/notifications/comments/mark_all_seen/',
+ 'mediagoblin.notifications.views:mark_all_comment_notifications_seen')
diff --git a/mediagoblin/notifications/views.py b/mediagoblin/notifications/views.py
index d275bc92..cda7f0af 100644
--- a/mediagoblin/notifications/views.py
+++ b/mediagoblin/notifications/views.py
@@ -24,7 +24,7 @@ from mediagoblin.decorators import (uses_pagination, get_user_media_entry,
from mediagoblin import messages
from mediagoblin.notifications import add_comment_subscription, \
- silence_comment_subscription
+ silence_comment_subscription, mark_comment_notification_seen
from werkzeug.exceptions import BadRequest
@@ -52,3 +52,17 @@ def silence_comments(request, media):
' %s.') % media.title)
return redirect(request, location=media.url_for_self(request.urlgen))
+
+
+@require_active_login
+def mark_all_comment_notifications_seen(request):
+ """
+ Marks all comment notifications seen.
+ """
+ for comment in request.notifications.get_notifications(request.user.id):
+ mark_comment_notification_seen(comment.subject_id, request.user)
+
+ if request.GET.get('next'):
+ return redirect(request, location=request.GET.get('next'))
+ else:
+ return redirect(request, 'index')
diff --git a/mediagoblin/static/js/notifications.js b/mediagoblin/static/js/notifications.js
index 0153463a..c1c06a43 100644
--- a/mediagoblin/static/js/notifications.js
+++ b/mediagoblin/static/js/notifications.js
@@ -33,4 +33,17 @@ var notifications = {};
$(document).ready(function () {
notifications.init();
+
+ var mark_all_comments_seen = document.getElementById('mark_all_comments_seen');
+
+ if (mark_all_comments_seen) {
+ mark_all_comments_seen.href = '#';
+ mark_all_comments_seen.onclick = function() {
+ $.ajax({
+ type: 'GET',
+ url: '/notifications/comments/mark_all_seen/',
+ success: function(res, status, xhr) { window.location.reload(); },
+ });
+ }
+ }
});
diff --git a/mediagoblin/templates/mediagoblin/fragments/header_notifications.html b/mediagoblin/templates/mediagoblin/fragments/header_notifications.html
index 70d7935a..55759a39 100644
--- a/mediagoblin/templates/mediagoblin/fragments/header_notifications.html
+++ b/mediagoblin/templates/mediagoblin/fragments/header_notifications.html
@@ -36,5 +36,9 @@
{% endfor %}
+
{% endif %}
From 1cca2a6857d529bb3927b9e4b0d41ac1534ae06b Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 7 Aug 2013 12:50:59 -0700
Subject: [PATCH 107/160] Pep 8
---
mediagoblin/notifications/views.py | 12 ++++--------
1 file changed, 4 insertions(+), 8 deletions(-)
diff --git a/mediagoblin/notifications/views.py b/mediagoblin/notifications/views.py
index cda7f0af..5a67c1ba 100644
--- a/mediagoblin/notifications/views.py
+++ b/mediagoblin/notifications/views.py
@@ -14,19 +14,14 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-from mediagoblin.tools.response import render_to_response, render_404, redirect
+from mediagoblin.tools.response import redirect
from mediagoblin.tools.translate import pass_to_ugettext as _
-from mediagoblin.decorators import (uses_pagination, get_user_media_entry,
- get_media_entry_by_id,
- require_active_login, user_may_delete_media, user_may_alter_collection,
- get_user_collection, get_user_collection_item, active_user_from_url)
-
+from mediagoblin.decorators import get_user_media_entry, require_active_login
from mediagoblin import messages
from mediagoblin.notifications import add_comment_subscription, \
- silence_comment_subscription, mark_comment_notification_seen
+ silence_comment_subscription, mark_comment_notification_seen
-from werkzeug.exceptions import BadRequest
@get_user_media_entry
@require_active_login
@@ -41,6 +36,7 @@ def subscribe_comments(request, media):
return redirect(request, location=media.url_for_self(request.urlgen))
+
@get_user_media_entry
@require_active_login
def silence_comments(request, media):
From 04d8ced5c620c29abd42d6892d900e620da1e5f3 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Wed, 7 Aug 2013 15:21:08 -0700
Subject: [PATCH 108/160] added test for mark all comment notifications seen
---
mediagoblin/tests/test_notifications.py | 53 +++++++++++++++++++++++++
1 file changed, 53 insertions(+)
diff --git a/mediagoblin/tests/test_notifications.py b/mediagoblin/tests/test_notifications.py
index d52b8d5a..e075d475 100644
--- a/mediagoblin/tests/test_notifications.py
+++ b/mediagoblin/tests/test_notifications.py
@@ -149,3 +149,56 @@ otherperson@example.com\n\nSGkgb3RoZXJwZXJzb24sCmNocmlzIGNvbW1lbnRlZCBvbiB5b3VyI
# User should not have been notified
assert len(notifications) == 1
+
+ def test_mark_all_comment_notifications_seen(self):
+ """ Test that mark_all_comments_seen works"""
+
+ user = fixture_add_user('otherperson', password='nosreprehto')
+
+ media_entry = fixture_media_entry(uploader=user.id, state=u'processed')
+
+ fixture_comment_subscription(media_entry)
+
+ media_uri_id = '/u/{0}/m/{1}/'.format(user.username,
+ media_entry.id)
+
+ # add 2 comments
+ self.test_app.post(
+ media_uri_id + 'comment/add/',
+ {
+ 'comment_content': u'Test comment #43'
+ }
+ )
+
+ self.test_app.post(
+ media_uri_id + 'comment/add/',
+ {
+ 'comment_content': u'Test comment #44'
+ }
+ )
+
+ notifications = Notification.query.filter_by(
+ user_id=user.id).all()
+
+ assert len(notifications) == 2
+
+ # both comments should not be marked seen
+ assert notifications[0].seen == False
+ assert notifications[1].seen == False
+
+ # login with other user to mark notifications seen
+ self.logout()
+ self.login('otherperson', 'nosreprehto')
+
+ # mark all comment notifications seen
+ res = self.test_app.get('/notifications/comments/mark_all_seen/')
+ res.follow()
+
+ assert urlparse.urlsplit(res.location)[2] == '/'
+
+ notifications = Notification.query.filter_by(
+ user_id=user.id).all()
+
+ # both notifications should be marked seen
+ assert notifications[0].seen == True
+ assert notifications[1].seen == True
From 4a2aa93c6abeb4831a03a9e8bd7089d0a6f2470e Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 8 Aug 2013 11:23:16 -0700
Subject: [PATCH 109/160] use urlgen and store it in a variable
---
mediagoblin/static/js/notifications.js | 2 +-
mediagoblin/templates/mediagoblin/base.html | 3 +++
2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/mediagoblin/static/js/notifications.js b/mediagoblin/static/js/notifications.js
index c1c06a43..78694f59 100644
--- a/mediagoblin/static/js/notifications.js
+++ b/mediagoblin/static/js/notifications.js
@@ -41,7 +41,7 @@ $(document).ready(function () {
mark_all_comments_seen.onclick = function() {
$.ajax({
type: 'GET',
- url: '/notifications/comments/mark_all_seen/',
+ url: mark_all_comments_seen_url,
success: function(res, status, xhr) { window.location.reload(); },
});
}
diff --git a/mediagoblin/templates/mediagoblin/base.html b/mediagoblin/templates/mediagoblin/base.html
index f7e2dff0..f9deb2ad 100644
--- a/mediagoblin/templates/mediagoblin/base.html
+++ b/mediagoblin/templates/mediagoblin/base.html
@@ -37,6 +37,9 @@
src="{{ request.staticdirect('/js/header_dropdown.js') }}">
+
{# For clarification, the difference between the extra_head.html template
# and the head template hook is that the former should be used by
From 93d805ad6b0e5324c515323d2fc0a4a7ea3f1dad Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 8 Aug 2013 15:07:07 -0700
Subject: [PATCH 110/160] add user prefrence for insite notifications
---
mediagoblin/db/migrations.py | 16 ++++++++++++++--
mediagoblin/db/models.py | 1 +
mediagoblin/edit/forms.py | 2 ++
mediagoblin/edit/views.py | 4 +++-
mediagoblin/notifications/__init__.py | 18 ++++++++++++++++--
5 files changed, 36 insertions(+), 5 deletions(-)
diff --git a/mediagoblin/db/migrations.py b/mediagoblin/db/migrations.py
index 374ab4c8..d542d7b9 100644
--- a/mediagoblin/db/migrations.py
+++ b/mediagoblin/db/migrations.py
@@ -425,7 +425,7 @@ class RequestToken_v0(declarative_base()):
callback = Column(Unicode, nullable=False, default=u"oob")
created = Column(DateTime, nullable=False, default=datetime.datetime.now)
updated = Column(DateTime, nullable=False, default=datetime.datetime.now)
-
+
class AccessToken_v0(declarative_base()):
"""
Model for representing the access tokens
@@ -438,7 +438,7 @@ class AccessToken_v0(declarative_base()):
request_token = Column(Unicode, ForeignKey(RequestToken_v0.token))
created = Column(DateTime, nullable=False, default=datetime.datetime.now)
updated = Column(DateTime, nullable=False, default=datetime.datetime.now)
-
+
class NonceTimestamp_v0(declarative_base()):
"""
@@ -460,3 +460,15 @@ def create_oauth1_tables(db):
NonceTimestamp_v0.__table__.create(db.bind)
db.commit()
+
+
+@RegisterMigration(15, MIGRATIONS)
+def wants_notifications(db):
+ """Add a wants_notifications field to User model"""
+ metadata = MetaData(bind=db.bind)
+ user_table = inspect_table(metadata, "core__users")
+
+ col = Column('wants_notifications', Boolean, default=True)
+ col.create(user_table)
+
+ db.commit()
diff --git a/mediagoblin/db/models.py b/mediagoblin/db/models.py
index 9cb39ff4..4341e086 100644
--- a/mediagoblin/db/models.py
+++ b/mediagoblin/db/models.py
@@ -69,6 +69,7 @@ class User(Base, UserMixin):
# Intented to be nullable=False, but migrations would not work for it
# set to nullable=True implicitly.
wants_comment_notification = Column(Boolean, default=True)
+ wants_notifications = Column(Boolean, default=True)
license_preference = Column(Unicode)
is_admin = Column(Boolean, default=False, nullable=False)
url = Column(Unicode)
diff --git a/mediagoblin/edit/forms.py b/mediagoblin/edit/forms.py
index 85c243a0..5de1bf96 100644
--- a/mediagoblin/edit/forms.py
+++ b/mediagoblin/edit/forms.py
@@ -67,6 +67,8 @@ class EditAccountForm(wtforms.Form):
normalize_user_or_email_field(allow_user=False)])
wants_comment_notification = wtforms.BooleanField(
description=_("Email me when others comment on my media"))
+ wants_notifications = wtforms.BooleanField(
+ description=_("Enable/Disable insite notifications"))
license_preference = wtforms.SelectField(
_('License preference'),
[
diff --git a/mediagoblin/edit/views.py b/mediagoblin/edit/views.py
index 6aa2acd9..a11cb932 100644
--- a/mediagoblin/edit/views.py
+++ b/mediagoblin/edit/views.py
@@ -228,10 +228,12 @@ def edit_account(request):
user = request.user
form = forms.EditAccountForm(request.form,
wants_comment_notification=user.wants_comment_notification,
- license_preference=user.license_preference)
+ license_preference=user.license_preference,
+ wants_notifications=user.wants_notifications)
if request.method == 'POST' and form.validate():
user.wants_comment_notification = form.wants_comment_notification.data
+ user.wants_notifications = form.wants_notifications.data
user.license_preference = form.license_preference.data
diff --git a/mediagoblin/notifications/__init__.py b/mediagoblin/notifications/__init__.py
index ed9f8d78..b6f9f478 100644
--- a/mediagoblin/notifications/__init__.py
+++ b/mediagoblin/notifications/__init__.py
@@ -17,7 +17,8 @@
import logging
from mediagoblin.db.models import Notification, \
- CommentNotification, CommentSubscription
+ CommentNotification, CommentSubscription, User
+from mediagoblin.notifications.task import email_notification_task
from mediagoblin.notifications.tools import generate_comment_message
_log = logging.getLogger(__name__)
@@ -121,6 +122,12 @@ NOTIFICATION_FETCH_LIMIT = 100
def get_notifications(user_id, only_unseen=True):
query = Notification.query.filter_by(user_id=user_id)
+ wants_notifications = User.query.filter_by(id=user_id).first()\
+ .wants_notifications
+
+ # If the user does not want notifications, don't return any
+ if not wants_notifications:
+ return None
if only_unseen:
query = query.filter_by(seen=False)
@@ -130,12 +137,19 @@ def get_notifications(user_id, only_unseen=True):
return notifications
+
def get_notification_count(user_id, only_unseen=True):
query = Notification.query.filter_by(user_id=user_id)
+ wants_notifications = User.query.filter_by(id=user_id).first()\
+ .wants_notifications
if only_unseen:
query = query.filter_by(seen=False)
- count = query.count()
+ # If the user doesn't want notifications, don't show any
+ if not wants_notifications:
+ count = None
+ else:
+ count = query.count()
return count
From a30d2d8b6ca0837737e92b09bac74c4504ba0182 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Tue, 20 Aug 2013 08:25:26 -0700
Subject: [PATCH 111/160] request object no longer contains notification
functions
---
mediagoblin/notifications/views.py | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/mediagoblin/notifications/views.py b/mediagoblin/notifications/views.py
index 5a67c1ba..cfe66b2e 100644
--- a/mediagoblin/notifications/views.py
+++ b/mediagoblin/notifications/views.py
@@ -19,8 +19,9 @@ from mediagoblin.tools.translate import pass_to_ugettext as _
from mediagoblin.decorators import get_user_media_entry, require_active_login
from mediagoblin import messages
-from mediagoblin.notifications import add_comment_subscription, \
- silence_comment_subscription, mark_comment_notification_seen
+from mediagoblin.notifications import (add_comment_subscription,
+ silence_comment_subscription, mark_comment_notification_seen,
+ get_notifications)
@get_user_media_entry
@@ -55,7 +56,7 @@ def mark_all_comment_notifications_seen(request):
"""
Marks all comment notifications seen.
"""
- for comment in request.notifications.get_notifications(request.user.id):
+ for comment in get_notifications(request.user.id):
mark_comment_notification_seen(comment.subject_id, request.user)
if request.GET.get('next'):
From 402f43601164e26390cf7c78a383537eb009e499 Mon Sep 17 00:00:00 2001
From: Rodney Ewing
Date: Thu, 11 Jul 2013 16:16:41 -0700
Subject: [PATCH 112/160] maybe have change password and email on same page
---
mediagoblin/edit/forms.py | 16 +++--
mediagoblin/edit/routing.py | 2 +
mediagoblin/edit/views.py | 68 ++++++++++++-------
.../mediagoblin/edit/change_email.html | 45 ++++++++++++
.../mediagoblin/edit/edit_account.html | 5 ++
5 files changed, 109 insertions(+), 27 deletions(-)
create mode 100644 mediagoblin/templates/mediagoblin/edit/change_email.html
diff --git a/mediagoblin/edit/forms.py b/mediagoblin/edit/forms.py
index 85c243a0..71f30520 100644
--- a/mediagoblin/edit/forms.py
+++ b/mediagoblin/edit/forms.py
@@ -61,10 +61,6 @@ class EditProfileForm(wtforms.Form):
class EditAccountForm(wtforms.Form):
- new_email = wtforms.TextField(
- _('New email address'),
- [wtforms.validators.Optional(),
- normalize_user_or_email_field(allow_user=False)])
wants_comment_notification = wtforms.BooleanField(
description=_("Email me when others comment on my media"))
license_preference = wtforms.SelectField(
@@ -111,3 +107,15 @@ class ChangePassForm(wtforms.Form):
[wtforms.validators.Required(),
wtforms.validators.Length(min=6, max=30)],
id="password")
+
+
+class ChangeEmailForm(wtforms.Form):
+ new_email = wtforms.TextField(
+ _('New email address'),
+ [wtforms.validators.Required(),
+ normalize_user_or_email_field(allow_user=False)])
+ password = wtforms.PasswordField(
+ _('Password'),
+ [wtforms.validators.Required()],
+ description=_(
+ "Enter your password to prove you own this account."))
diff --git a/mediagoblin/edit/routing.py b/mediagoblin/edit/routing.py
index 3592f708..75f5a6d8 100644
--- a/mediagoblin/edit/routing.py
+++ b/mediagoblin/edit/routing.py
@@ -28,3 +28,5 @@ add_route('mediagoblin.edit.pass', '/edit/password/',
'mediagoblin.edit.views:change_pass')
add_route('mediagoblin.edit.verify_email', '/edit/verify_email/',
'mediagoblin.edit.views:verify_email')
+add_route('mediagoblin.edit.email', '/edit/email/',
+ 'mediagoblin.edit.views:change_email')
diff --git a/mediagoblin/edit/views.py b/mediagoblin/edit/views.py
index 6aa2acd9..82cec8da 100644
--- a/mediagoblin/edit/views.py
+++ b/mediagoblin/edit/views.py
@@ -425,30 +425,52 @@ def verify_email(request):
user=user.username)
-def _update_email(request, form, user):
- new_email = form.new_email.data
- users_with_email = User.query.filter_by(
- email=new_email).count()
+def change_email(request):
+ """ View to change the user's email """
+ form = forms.ChangeEmailForm(request.form)
+ user = request.user
- if users_with_email:
- form.new_email.errors.append(
- _('Sorry, a user with that email address'
- ' already exists.'))
+ # If no password authentication, no need to enter a password
+ if 'pass_auth' not in request.template_env.globals or not user.pw_hash:
+ form.__delitem__('password')
- elif not users_with_email:
- verification_key = get_timed_signer_url(
- 'mail_verification_token').dumps({
- 'user': user.id,
- 'email': new_email})
+ if request.method == 'POST' and form.validate():
+ new_email = form.new_email.data
+ users_with_email = User.query.filter_by(
+ email=new_email).count()
- rendered_email = render_template(
- request, 'mediagoblin/edit/verification.txt',
- {'username': user.username,
- 'verification_url': EMAIL_VERIFICATION_TEMPLATE.format(
- uri=request.urlgen('mediagoblin.edit.verify_email',
- qualified=True),
- verification_key=verification_key)})
+ if users_with_email:
+ form.new_email.errors.append(
+ _('Sorry, a user with that email address'
+ ' already exists.'))
- email_debug_message(request)
- auth_tools.send_verification_email(user, request, new_email,
- rendered_email)
+ if user.pw_hash and not auth.check_password(
+ form.password.data, user.pw_hash):
+ form.password.errors.append(
+ _('Wrong password'))
+
+ if not form.errors:
+ verification_key = get_timed_signer_url(
+ 'mail_verification_token').dumps({
+ 'user': user.id,
+ 'email': new_email})
+
+ rendered_email = render_template(
+ request, 'mediagoblin/edit/verification.txt',
+ {'username': user.username,
+ 'verification_url': EMAIL_VERIFICATION_TEMPLATE.format(
+ uri=request.urlgen('mediagoblin.edit.verify_email',
+ qualified=True),
+ verification_key=verification_key)})
+
+ email_debug_message(request)
+ auth_tools.send_verification_email(user, request, new_email,
+ rendered_email)
+
+ return redirect(request, 'mediagoblin.edit.account')
+
+ return render_to_response(
+ request,
+ 'mediagoblin/edit/change_email.html',
+ {'form': form,
+ 'user': user})
diff --git a/mediagoblin/templates/mediagoblin/edit/change_email.html b/mediagoblin/templates/mediagoblin/edit/change_email.html
new file mode 100644
index 00000000..76cc4771
--- /dev/null
+++ b/mediagoblin/templates/mediagoblin/edit/change_email.html
@@ -0,0 +1,45 @@
+{#
+# GNU MediaGoblin -- federated, autonomous media hosting
+# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+#}
+{% extends "mediagoblin/base.html" %}
+
+{% import "/mediagoblin/utils/wtforms.html" as wtforms_util %}
+
+{% block title -%}
+ {% trans username=user.username -%}
+ Changing {{ username }}'s email
+ {%- endtrans %} — {{ super() }}
+{%- endblock %}
+
+{% block mediagoblin_content %}
+
+{% endblock %}
diff --git a/mediagoblin/templates/mediagoblin/edit/edit_account.html b/mediagoblin/templates/mediagoblin/edit/edit_account.html
index 51293acb..04f9230f 100644
--- a/mediagoblin/templates/mediagoblin/edit/edit_account.html
+++ b/mediagoblin/templates/mediagoblin/edit/edit_account.html
@@ -48,6 +48,11 @@
{% endif %}
+
+
+ {% trans %}Change your email.{% endtrans %}
+
+
{% template_hook("edit_link") %}
{{ wtforms_util.render_divs(form, True) }}