Merge remote-tracking branch 'refs/remotes/rodney757/reprocessing'
Conflicts: mediagoblin/processing/task.py mediagoblin/submit/lib.py
This commit is contained in:
commit
f6497ce572
@ -45,6 +45,10 @@ SUBCOMMAND_MAP = {
|
|||||||
'setup': 'mediagoblin.gmg_commands.assetlink:assetlink_parser_setup',
|
'setup': 'mediagoblin.gmg_commands.assetlink:assetlink_parser_setup',
|
||||||
'func': 'mediagoblin.gmg_commands.assetlink:assetlink',
|
'func': 'mediagoblin.gmg_commands.assetlink:assetlink',
|
||||||
'help': 'Link assets for themes and plugins for static serving'},
|
'help': 'Link assets for themes and plugins for static serving'},
|
||||||
|
'reprocess': {
|
||||||
|
'setup': 'mediagoblin.gmg_commands.reprocess:reprocess_parser_setup',
|
||||||
|
'func': 'mediagoblin.gmg_commands.reprocess:reprocess',
|
||||||
|
'help': 'Reprocess media entries'},
|
||||||
# 'theme': {
|
# 'theme': {
|
||||||
# 'setup': 'mediagoblin.gmg_commands.theme:theme_parser_setup',
|
# 'setup': 'mediagoblin.gmg_commands.theme:theme_parser_setup',
|
||||||
# 'func': 'mediagoblin.gmg_commands.theme:theme',
|
# 'func': 'mediagoblin.gmg_commands.theme:theme',
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
|
|
||||||
from mediagoblin import mg_globals
|
from mediagoblin import mg_globals
|
||||||
from mediagoblin.db.open import setup_connection_and_db_from_config
|
from mediagoblin.db.open import setup_connection_and_db_from_config
|
||||||
|
from mediagoblin.gmg_commands import util as commands_util
|
||||||
from mediagoblin.storage.filestorage import BasicFileStorage
|
from mediagoblin.storage.filestorage import BasicFileStorage
|
||||||
from mediagoblin.init import setup_storage, setup_global_and_app_config
|
from mediagoblin.init import setup_storage, setup_global_and_app_config
|
||||||
|
|
||||||
@ -223,6 +224,7 @@ def env_export(args):
|
|||||||
'''
|
'''
|
||||||
Export database and media files to a tar archive
|
Export database and media files to a tar archive
|
||||||
'''
|
'''
|
||||||
|
commands_util.check_unrecognized_args(args)
|
||||||
if args.cache_path:
|
if args.cache_path:
|
||||||
if os.path.exists(args.cache_path):
|
if os.path.exists(args.cache_path):
|
||||||
_log.error('The cache directory must not exist '
|
_log.error('The cache directory must not exist '
|
||||||
|
302
mediagoblin/gmg_commands/reprocess.py
Normal file
302
mediagoblin/gmg_commands/reprocess.py
Normal file
@ -0,0 +1,302 @@
|
|||||||
|
# GNU MediaGoblin -- federated, autonomous media hosting
|
||||||
|
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Affero General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
import argparse
|
||||||
|
import os
|
||||||
|
|
||||||
|
from mediagoblin import mg_globals
|
||||||
|
from mediagoblin.db.models import MediaEntry
|
||||||
|
from mediagoblin.gmg_commands import util as commands_util
|
||||||
|
from mediagoblin.submit.lib import run_process_media
|
||||||
|
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
|
||||||
|
from mediagoblin.tools.pluginapi import hook_handle
|
||||||
|
from mediagoblin.processing import (
|
||||||
|
ProcessorDoesNotExist, ProcessorNotEligible,
|
||||||
|
get_entry_and_processing_manager, get_processing_manager_for_type,
|
||||||
|
ProcessingManagerDoesNotExist)
|
||||||
|
|
||||||
|
|
||||||
|
def reprocess_parser_setup(subparser):
|
||||||
|
subparser.add_argument(
|
||||||
|
'--celery',
|
||||||
|
action='store_true',
|
||||||
|
help="Don't process eagerly, pass off to celery")
|
||||||
|
|
||||||
|
subparsers = subparser.add_subparsers(dest="reprocess_subcommand")
|
||||||
|
|
||||||
|
###################
|
||||||
|
# available command
|
||||||
|
###################
|
||||||
|
available_parser = subparsers.add_parser(
|
||||||
|
"available",
|
||||||
|
help="Find out what actions are available for this media")
|
||||||
|
|
||||||
|
available_parser.add_argument(
|
||||||
|
"id_or_type",
|
||||||
|
help="Media id or media type to check")
|
||||||
|
|
||||||
|
available_parser.add_argument(
|
||||||
|
"--action-help",
|
||||||
|
action="store_true",
|
||||||
|
help="List argument help for each action available")
|
||||||
|
|
||||||
|
available_parser.add_argument(
|
||||||
|
"--state",
|
||||||
|
help="The state of media you would like to reprocess")
|
||||||
|
|
||||||
|
|
||||||
|
#############
|
||||||
|
# run command
|
||||||
|
#############
|
||||||
|
|
||||||
|
run_parser = subparsers.add_parser(
|
||||||
|
"run",
|
||||||
|
help="Run a reprocessing on one or more media")
|
||||||
|
|
||||||
|
run_parser.add_argument(
|
||||||
|
'media_id',
|
||||||
|
help="The media_entry id(s) you wish to reprocess.")
|
||||||
|
|
||||||
|
run_parser.add_argument(
|
||||||
|
'reprocess_command',
|
||||||
|
help="The reprocess command you intend to run")
|
||||||
|
|
||||||
|
run_parser.add_argument(
|
||||||
|
'reprocess_args',
|
||||||
|
nargs=argparse.REMAINDER,
|
||||||
|
help="rest of arguments to the reprocessing tool")
|
||||||
|
|
||||||
|
|
||||||
|
################
|
||||||
|
# thumbs command
|
||||||
|
################
|
||||||
|
thumbs = subparsers.add_parser(
|
||||||
|
'thumbs',
|
||||||
|
help='Regenerate thumbs for all processed media')
|
||||||
|
|
||||||
|
thumbs.add_argument(
|
||||||
|
'--size',
|
||||||
|
nargs=2,
|
||||||
|
type=int,
|
||||||
|
metavar=('max_width', 'max_height'))
|
||||||
|
|
||||||
|
#################
|
||||||
|
# initial command
|
||||||
|
#################
|
||||||
|
subparsers.add_parser(
|
||||||
|
'initial',
|
||||||
|
help='Reprocess all failed media')
|
||||||
|
|
||||||
|
##################
|
||||||
|
# bulk_run command
|
||||||
|
##################
|
||||||
|
bulk_run_parser = subparsers.add_parser(
|
||||||
|
'bulk_run',
|
||||||
|
help='Run reprocessing on a given media type or state')
|
||||||
|
|
||||||
|
bulk_run_parser.add_argument(
|
||||||
|
'type',
|
||||||
|
help='The type of media you would like to process')
|
||||||
|
|
||||||
|
bulk_run_parser.add_argument(
|
||||||
|
'--state',
|
||||||
|
default='processed',
|
||||||
|
nargs='?',
|
||||||
|
help='The state of the media you would like to process. Defaults to' \
|
||||||
|
" 'processed'")
|
||||||
|
|
||||||
|
bulk_run_parser.add_argument(
|
||||||
|
'reprocess_command',
|
||||||
|
help='The reprocess command you intend to run')
|
||||||
|
|
||||||
|
bulk_run_parser.add_argument(
|
||||||
|
'reprocess_args',
|
||||||
|
nargs=argparse.REMAINDER,
|
||||||
|
help='The rest of the arguments to the reprocessing tool')
|
||||||
|
|
||||||
|
###############
|
||||||
|
# help command?
|
||||||
|
###############
|
||||||
|
|
||||||
|
|
||||||
|
def available(args):
|
||||||
|
# Get the media type, either by looking up media id, or by specific type
|
||||||
|
try:
|
||||||
|
media_id = int(args.id_or_type)
|
||||||
|
media_entry, manager = get_entry_and_processing_manager(media_id)
|
||||||
|
media_type = media_entry.media_type
|
||||||
|
except ValueError:
|
||||||
|
media_type = args.id_or_type
|
||||||
|
media_entry = None
|
||||||
|
manager = get_processing_manager_for_type(media_type)
|
||||||
|
except ProcessingManagerDoesNotExist:
|
||||||
|
entry = MediaEntry.query.filter_by(id=args.id_or_type).first()
|
||||||
|
print 'No such processing manager for {0}'.format(entry.media_type)
|
||||||
|
|
||||||
|
if args.state:
|
||||||
|
processors = manager.list_all_processors_by_state(args.state)
|
||||||
|
elif media_entry is None:
|
||||||
|
processors = manager.list_all_processors()
|
||||||
|
else:
|
||||||
|
processors = manager.list_eligible_processors(media_entry)
|
||||||
|
|
||||||
|
print "Available processors:"
|
||||||
|
print "====================="
|
||||||
|
print ""
|
||||||
|
|
||||||
|
if args.action_help:
|
||||||
|
for processor in processors:
|
||||||
|
print processor.name
|
||||||
|
print "-" * len(processor.name)
|
||||||
|
|
||||||
|
parser = processor.generate_parser()
|
||||||
|
parser.print_help()
|
||||||
|
print ""
|
||||||
|
|
||||||
|
else:
|
||||||
|
for processor in processors:
|
||||||
|
if processor.description:
|
||||||
|
print " - %s: %s" % (processor.name, processor.description)
|
||||||
|
else:
|
||||||
|
print " - %s" % processor.name
|
||||||
|
|
||||||
|
|
||||||
|
def run(args, media_id=None):
|
||||||
|
if not media_id:
|
||||||
|
media_id = args.media_id
|
||||||
|
try:
|
||||||
|
media_entry, manager = get_entry_and_processing_manager(media_id)
|
||||||
|
|
||||||
|
# TODO: (maybe?) This could probably be handled entirely by the
|
||||||
|
# processor class...
|
||||||
|
try:
|
||||||
|
processor_class = manager.get_processor(
|
||||||
|
args.reprocess_command, media_entry)
|
||||||
|
except ProcessorDoesNotExist:
|
||||||
|
print 'No such processor "%s" for media with id "%s"' % (
|
||||||
|
args.reprocess_command, media_entry.id)
|
||||||
|
return
|
||||||
|
except ProcessorNotEligible:
|
||||||
|
print 'Processor "%s" exists but media "%s" is not eligible' % (
|
||||||
|
args.reprocess_command, media_entry.id)
|
||||||
|
return
|
||||||
|
|
||||||
|
reprocess_parser = processor_class.generate_parser()
|
||||||
|
reprocess_args = reprocess_parser.parse_args(args.reprocess_args)
|
||||||
|
reprocess_request = processor_class.args_to_request(reprocess_args)
|
||||||
|
run_process_media(
|
||||||
|
media_entry,
|
||||||
|
reprocess_action=args.reprocess_command,
|
||||||
|
reprocess_info=reprocess_request)
|
||||||
|
|
||||||
|
except ProcessingManagerDoesNotExist:
|
||||||
|
entry = MediaEntry.query.filter_by(id=media_id).first()
|
||||||
|
print 'No such processing manager for {0}'.format(entry.media_type)
|
||||||
|
|
||||||
|
|
||||||
|
def bulk_run(args):
|
||||||
|
"""
|
||||||
|
Bulk reprocessing of a given media_type
|
||||||
|
"""
|
||||||
|
query = MediaEntry.query.filter_by(media_type=args.type,
|
||||||
|
state=args.state)
|
||||||
|
|
||||||
|
for entry in query:
|
||||||
|
run(args, entry.id)
|
||||||
|
|
||||||
|
|
||||||
|
def thumbs(args):
|
||||||
|
"""
|
||||||
|
Regenerate thumbs for all processed media
|
||||||
|
"""
|
||||||
|
query = MediaEntry.query.filter_by(state='processed')
|
||||||
|
|
||||||
|
for entry in query:
|
||||||
|
try:
|
||||||
|
media_entry, manager = get_entry_and_processing_manager(entry.id)
|
||||||
|
|
||||||
|
# TODO: (maybe?) This could probably be handled entirely by the
|
||||||
|
# processor class...
|
||||||
|
try:
|
||||||
|
processor_class = manager.get_processor(
|
||||||
|
'resize', media_entry)
|
||||||
|
except ProcessorDoesNotExist:
|
||||||
|
print 'No such processor "%s" for media with id "%s"' % (
|
||||||
|
'resize', media_entry.id)
|
||||||
|
return
|
||||||
|
except ProcessorNotEligible:
|
||||||
|
print 'Processor "%s" exists but media "%s" is not eligible' % (
|
||||||
|
'resize', media_entry.id)
|
||||||
|
return
|
||||||
|
|
||||||
|
reprocess_parser = processor_class.generate_parser()
|
||||||
|
|
||||||
|
# prepare filetype and size to be passed into reprocess_parser
|
||||||
|
if args.size:
|
||||||
|
extra_args = 'thumb --{0} {1} {2}'.format(
|
||||||
|
processor_class.thumb_size,
|
||||||
|
args.size[0],
|
||||||
|
args.size[1])
|
||||||
|
else:
|
||||||
|
extra_args = 'thumb'
|
||||||
|
|
||||||
|
reprocess_args = reprocess_parser.parse_args(extra_args.split())
|
||||||
|
reprocess_request = processor_class.args_to_request(reprocess_args)
|
||||||
|
run_process_media(
|
||||||
|
media_entry,
|
||||||
|
reprocess_action='resize',
|
||||||
|
reprocess_info=reprocess_request)
|
||||||
|
|
||||||
|
except ProcessingManagerDoesNotExist:
|
||||||
|
print 'No such processing manager for {0}'.format(entry.media_type)
|
||||||
|
|
||||||
|
|
||||||
|
def initial(args):
|
||||||
|
"""
|
||||||
|
Reprocess all failed media
|
||||||
|
"""
|
||||||
|
query = MediaEntry.query.filter_by(state='failed')
|
||||||
|
|
||||||
|
for entry in query:
|
||||||
|
try:
|
||||||
|
media_entry, manager = get_entry_and_processing_manager(entry.id)
|
||||||
|
run_process_media(
|
||||||
|
media_entry,
|
||||||
|
reprocess_action='initial')
|
||||||
|
except ProcessingManagerDoesNotExist:
|
||||||
|
print 'No such processing manager for {0}'.format(entry.media_type)
|
||||||
|
|
||||||
|
|
||||||
|
def reprocess(args):
|
||||||
|
# Run eagerly unless explicetly set not to
|
||||||
|
if not args.celery:
|
||||||
|
os.environ['CELERY_ALWAYS_EAGER'] = 'true'
|
||||||
|
|
||||||
|
commands_util.setup_app(args)
|
||||||
|
|
||||||
|
if args.reprocess_subcommand == "run":
|
||||||
|
run(args)
|
||||||
|
|
||||||
|
elif args.reprocess_subcommand == "available":
|
||||||
|
available(args)
|
||||||
|
|
||||||
|
elif args.reprocess_subcommand == "bulk_run":
|
||||||
|
bulk_run(args)
|
||||||
|
|
||||||
|
elif args.reprocess_subcommand == "thumbs":
|
||||||
|
thumbs(args)
|
||||||
|
|
||||||
|
elif args.reprocess_subcommand == "initial":
|
||||||
|
initial(args)
|
@ -15,7 +15,7 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from mediagoblin.media_types import MediaManagerBase
|
from mediagoblin.media_types import MediaManagerBase
|
||||||
from mediagoblin.media_types.ascii.processing import process_ascii, \
|
from mediagoblin.media_types.ascii.processing import AsciiProcessingManager, \
|
||||||
sniff_handler
|
sniff_handler
|
||||||
from mediagoblin.tools import pluginapi
|
from mediagoblin.tools import pluginapi
|
||||||
|
|
||||||
@ -29,7 +29,6 @@ def setup_plugin():
|
|||||||
|
|
||||||
class ASCIIMediaManager(MediaManagerBase):
|
class ASCIIMediaManager(MediaManagerBase):
|
||||||
human_readable = "ASCII"
|
human_readable = "ASCII"
|
||||||
processor = staticmethod(process_ascii)
|
|
||||||
display_template = "mediagoblin/media_displays/ascii.html"
|
display_template = "mediagoblin/media_displays/ascii.html"
|
||||||
default_thumb = "images/media_thumbs/ascii.jpg"
|
default_thumb = "images/media_thumbs/ascii.jpg"
|
||||||
|
|
||||||
@ -43,5 +42,6 @@ hooks = {
|
|||||||
'setup': setup_plugin,
|
'setup': setup_plugin,
|
||||||
'get_media_type_and_manager': get_media_type_and_manager,
|
'get_media_type_and_manager': get_media_type_and_manager,
|
||||||
('media_manager', MEDIA_TYPE): lambda: ASCIIMediaManager,
|
('media_manager', MEDIA_TYPE): lambda: ASCIIMediaManager,
|
||||||
|
('reprocess_manager', MEDIA_TYPE): lambda: AsciiProcessingManager,
|
||||||
'sniff_handler': sniff_handler,
|
'sniff_handler': sniff_handler,
|
||||||
}
|
}
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
#
|
#
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
import argparse
|
||||||
import chardet
|
import chardet
|
||||||
import os
|
import os
|
||||||
try:
|
try:
|
||||||
@ -22,7 +23,11 @@ except ImportError:
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
from mediagoblin import mg_globals as mgg
|
from mediagoblin import mg_globals as mgg
|
||||||
from mediagoblin.processing import create_pub_filepath
|
from mediagoblin.processing import (
|
||||||
|
create_pub_filepath, FilenameBuilder,
|
||||||
|
MediaProcessor, ProcessingManager,
|
||||||
|
get_process_filename, copy_original,
|
||||||
|
store_public, request_from_args)
|
||||||
from mediagoblin.media_types.ascii import asciitoimage
|
from mediagoblin.media_types.ascii import asciitoimage
|
||||||
|
|
||||||
_log = logging.getLogger(__name__)
|
_log = logging.getLogger(__name__)
|
||||||
@ -43,106 +48,202 @@ def sniff_handler(media_file, **kw):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def process_ascii(proc_state):
|
class CommonAsciiProcessor(MediaProcessor):
|
||||||
"""Code to process a txt file. Will be run by celery.
|
|
||||||
|
|
||||||
A Workbench() represents a local tempory dir. It is automatically
|
|
||||||
cleaned up when this function exits.
|
|
||||||
"""
|
"""
|
||||||
entry = proc_state.entry
|
Provides a base for various ascii processing steps
|
||||||
workbench = proc_state.workbench
|
"""
|
||||||
ascii_config = mgg.global_config['media_type:mediagoblin.media_types.ascii']
|
acceptable_files = ['original', 'unicode']
|
||||||
|
|
||||||
|
def common_setup(self):
|
||||||
|
self.ascii_config = mgg.global_config[
|
||||||
|
'media_type:mediagoblin.media_types.ascii']
|
||||||
|
|
||||||
# Conversions subdirectory to avoid collisions
|
# Conversions subdirectory to avoid collisions
|
||||||
conversions_subdir = os.path.join(
|
self.conversions_subdir = os.path.join(
|
||||||
workbench.dir, 'conversions')
|
self.workbench.dir, 'convirsions')
|
||||||
os.mkdir(conversions_subdir)
|
os.mkdir(self.conversions_subdir)
|
||||||
|
|
||||||
queued_filepath = entry.queued_media_file
|
# Pull down and set up the processing file
|
||||||
queued_filename = workbench.localized_file(
|
self.process_filename = get_process_filename(
|
||||||
mgg.queue_store, queued_filepath,
|
self.entry, self.workbench, self.acceptable_files)
|
||||||
'source')
|
self.name_builder = FilenameBuilder(self.process_filename)
|
||||||
|
|
||||||
queued_file = file(queued_filename, 'rb')
|
self.charset = None
|
||||||
|
|
||||||
with queued_file:
|
def copy_original(self):
|
||||||
queued_file_charset = chardet.detect(queued_file.read())
|
copy_original(
|
||||||
|
self.entry, self.process_filename,
|
||||||
|
self.name_builder.fill('{basename}{ext}'))
|
||||||
|
|
||||||
|
def _detect_charset(self, orig_file):
|
||||||
|
d_charset = chardet.detect(orig_file.read())
|
||||||
|
|
||||||
# Only select a non-utf-8 charset if chardet is *really* sure
|
# Only select a non-utf-8 charset if chardet is *really* sure
|
||||||
# Tested with "Feli\x0109an superjaron", which was detecte
|
# Tested with "Feli\x0109an superjaron", which was detected
|
||||||
if queued_file_charset['confidence'] < 0.9:
|
if d_charset['confidence'] < 0.9:
|
||||||
interpreted_charset = 'utf-8'
|
self.charset = 'utf-8'
|
||||||
else:
|
else:
|
||||||
interpreted_charset = queued_file_charset['encoding']
|
self.charset = d_charset['encoding']
|
||||||
|
|
||||||
_log.info('Charset detected: {0}\nWill interpret as: {1}'.format(
|
_log.info('Charset detected: {0}\nWill interpret as: {1}'.format(
|
||||||
queued_file_charset,
|
d_charset,
|
||||||
interpreted_charset))
|
self.charset))
|
||||||
|
|
||||||
queued_file.seek(0) # Rewind the queued file
|
# Rewind the file
|
||||||
|
orig_file.seek(0)
|
||||||
|
|
||||||
thumb_filepath = create_pub_filepath(
|
def store_unicode_file(self):
|
||||||
entry, 'thumbnail.png')
|
with file(self.process_filename, 'rb') as orig_file:
|
||||||
|
self._detect_charset(orig_file)
|
||||||
|
unicode_filepath = create_pub_filepath(self.entry,
|
||||||
|
'ascii-portable.txt')
|
||||||
|
|
||||||
tmp_thumb_filename = os.path.join(
|
with mgg.public_store.get_file(unicode_filepath, 'wb') \
|
||||||
conversions_subdir, thumb_filepath[-1])
|
as unicode_file:
|
||||||
|
# Decode the original file from its detected charset (or UTF8)
|
||||||
|
# Encode the unicode instance to ASCII and replace any
|
||||||
|
# non-ASCII with an HTML entity (&#
|
||||||
|
unicode_file.write(
|
||||||
|
unicode(orig_file.read().decode(
|
||||||
|
self.charset)).encode(
|
||||||
|
'ascii',
|
||||||
|
'xmlcharrefreplace'))
|
||||||
|
|
||||||
|
self.entry.media_files['unicode'] = unicode_filepath
|
||||||
|
|
||||||
|
def generate_thumb(self, font=None, thumb_size=None):
|
||||||
|
with file(self.process_filename, 'rb') as orig_file:
|
||||||
|
# If no font kwarg, check config
|
||||||
|
if not font:
|
||||||
|
font = self.ascii_config.get('thumbnail_font', None)
|
||||||
|
if not thumb_size:
|
||||||
|
thumb_size = (mgg.global_config['media:thumb']['max_width'],
|
||||||
|
mgg.global_config['media:thumb']['max_height'])
|
||||||
|
|
||||||
|
tmp_thumb = os.path.join(
|
||||||
|
self.conversions_subdir,
|
||||||
|
self.name_builder.fill('{basename}.thumbnail.png'))
|
||||||
|
|
||||||
ascii_converter_args = {}
|
ascii_converter_args = {}
|
||||||
|
|
||||||
if ascii_config['thumbnail_font']:
|
# If there is a font from either the config or kwarg, update
|
||||||
|
# ascii_converter_args
|
||||||
|
if font:
|
||||||
ascii_converter_args.update(
|
ascii_converter_args.update(
|
||||||
{'font': ascii_config['thumbnail_font']})
|
{'font': self.ascii_config['thumbnail_font']})
|
||||||
|
|
||||||
converter = asciitoimage.AsciiToImage(
|
converter = asciitoimage.AsciiToImage(
|
||||||
**ascii_converter_args)
|
**ascii_converter_args)
|
||||||
|
|
||||||
thumb = converter._create_image(
|
thumb = converter._create_image(
|
||||||
queued_file.read())
|
orig_file.read())
|
||||||
|
|
||||||
with file(tmp_thumb_filename, 'w') as thumb_file:
|
with file(tmp_thumb, 'w') as thumb_file:
|
||||||
thumb.thumbnail(
|
thumb.thumbnail(
|
||||||
(mgg.global_config['media:thumb']['max_width'],
|
thumb_size,
|
||||||
mgg.global_config['media:thumb']['max_height']),
|
|
||||||
Image.ANTIALIAS)
|
Image.ANTIALIAS)
|
||||||
thumb.save(thumb_file)
|
thumb.save(thumb_file)
|
||||||
|
|
||||||
_log.debug('Copying local file to public storage')
|
_log.debug('Copying local file to public storage')
|
||||||
mgg.public_store.copy_local_to_storage(
|
store_public(self.entry, 'thumb', tmp_thumb,
|
||||||
tmp_thumb_filename, thumb_filepath)
|
self.name_builder.fill('{basename}.thumbnail.jpg'))
|
||||||
|
|
||||||
queued_file.seek(0)
|
|
||||||
|
|
||||||
original_filepath = create_pub_filepath(entry, queued_filepath[-1])
|
class InitialProcessor(CommonAsciiProcessor):
|
||||||
|
"""
|
||||||
|
Initial processing step for new ascii media
|
||||||
|
"""
|
||||||
|
name = "initial"
|
||||||
|
description = "Initial processing"
|
||||||
|
|
||||||
with mgg.public_store.get_file(original_filepath, 'wb') \
|
@classmethod
|
||||||
as original_file:
|
def media_is_eligible(cls, entry=None, state=None):
|
||||||
original_file.write(queued_file.read())
|
if not state:
|
||||||
|
state = entry.state
|
||||||
|
return state in (
|
||||||
|
"unprocessed", "failed")
|
||||||
|
|
||||||
queued_file.seek(0) # Rewind *again*
|
@classmethod
|
||||||
|
def generate_parser(cls):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=cls.description,
|
||||||
|
prog=cls.name)
|
||||||
|
|
||||||
unicode_filepath = create_pub_filepath(entry, 'ascii-portable.txt')
|
parser.add_argument(
|
||||||
|
'--thumb_size',
|
||||||
|
nargs=2,
|
||||||
|
metavar=('max_width', 'max_width'),
|
||||||
|
type=int)
|
||||||
|
|
||||||
with mgg.public_store.get_file(unicode_filepath, 'wb') \
|
parser.add_argument(
|
||||||
as unicode_file:
|
'--font',
|
||||||
# Decode the original file from its detected charset (or UTF8)
|
help='the thumbnail font')
|
||||||
# Encode the unicode instance to ASCII and replace any non-ASCII
|
|
||||||
# with an HTML entity (&#
|
|
||||||
unicode_file.write(
|
|
||||||
unicode(queued_file.read().decode(
|
|
||||||
interpreted_charset)).encode(
|
|
||||||
'ascii',
|
|
||||||
'xmlcharrefreplace'))
|
|
||||||
|
|
||||||
# Remove queued media file from storage and database.
|
return parser
|
||||||
# queued_filepath is in the task_id directory which should
|
|
||||||
# be removed too, but fail if the directory is not empty to be on
|
|
||||||
# the super-safe side.
|
|
||||||
mgg.queue_store.delete_file(queued_filepath) # rm file
|
|
||||||
mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
|
|
||||||
entry.queued_media_file = []
|
|
||||||
|
|
||||||
media_files_dict = entry.setdefault('media_files', {})
|
@classmethod
|
||||||
media_files_dict['thumb'] = thumb_filepath
|
def args_to_request(cls, args):
|
||||||
media_files_dict['unicode'] = unicode_filepath
|
return request_from_args(
|
||||||
media_files_dict['original'] = original_filepath
|
args, ['thumb_size', 'font'])
|
||||||
|
|
||||||
entry.save()
|
def process(self, thumb_size=None, font=None):
|
||||||
|
self.common_setup()
|
||||||
|
self.store_unicode_file()
|
||||||
|
self.generate_thumb(thumb_size=thumb_size, font=font)
|
||||||
|
self.copy_original()
|
||||||
|
self.delete_queue_file()
|
||||||
|
|
||||||
|
|
||||||
|
class Resizer(CommonAsciiProcessor):
|
||||||
|
"""
|
||||||
|
Resizing process steps for processed media
|
||||||
|
"""
|
||||||
|
name = 'resize'
|
||||||
|
description = 'Resize thumbnail'
|
||||||
|
thumb_size = 'thumb_size'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def media_is_eligible(cls, entry=None, state=None):
|
||||||
|
"""
|
||||||
|
Determine if this media type is eligible for processing
|
||||||
|
"""
|
||||||
|
if not state:
|
||||||
|
state = entry.state
|
||||||
|
return state in 'processed'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def generate_parser(cls):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=cls.description,
|
||||||
|
prog=cls.name)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--thumb_size',
|
||||||
|
nargs=2,
|
||||||
|
metavar=('max_width', 'max_height'),
|
||||||
|
type=int)
|
||||||
|
|
||||||
|
# Needed for gmg reprocess thumbs to work
|
||||||
|
parser.add_argument(
|
||||||
|
'file',
|
||||||
|
nargs='?',
|
||||||
|
default='thumb',
|
||||||
|
choices=['thumb'])
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def args_to_request(cls, args):
|
||||||
|
return request_from_args(
|
||||||
|
args, ['thumb_size', 'file'])
|
||||||
|
|
||||||
|
def process(self, thumb_size=None, file=None):
|
||||||
|
self.common_setup()
|
||||||
|
self.generate_thumb(thumb_size=thumb_size)
|
||||||
|
|
||||||
|
|
||||||
|
class AsciiProcessingManager(ProcessingManager):
|
||||||
|
def __init__(self):
|
||||||
|
super(self.__class__, self).__init__()
|
||||||
|
self.add_processor(InitialProcessor)
|
||||||
|
self.add_processor(Resizer)
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from mediagoblin.media_types import MediaManagerBase
|
from mediagoblin.media_types import MediaManagerBase
|
||||||
from mediagoblin.media_types.audio.processing import process_audio, \
|
from mediagoblin.media_types.audio.processing import AudioProcessingManager, \
|
||||||
sniff_handler
|
sniff_handler
|
||||||
from mediagoblin.tools import pluginapi
|
from mediagoblin.tools import pluginapi
|
||||||
|
|
||||||
@ -32,8 +32,8 @@ def setup_plugin():
|
|||||||
|
|
||||||
class AudioMediaManager(MediaManagerBase):
|
class AudioMediaManager(MediaManagerBase):
|
||||||
human_readable = "Audio"
|
human_readable = "Audio"
|
||||||
processor = staticmethod(process_audio)
|
|
||||||
display_template = "mediagoblin/media_displays/audio.html"
|
display_template = "mediagoblin/media_displays/audio.html"
|
||||||
|
default_thumb = "images/media_thumbs/image.png"
|
||||||
|
|
||||||
|
|
||||||
def get_media_type_and_manager(ext):
|
def get_media_type_and_manager(ext):
|
||||||
@ -45,4 +45,5 @@ hooks = {
|
|||||||
'get_media_type_and_manager': get_media_type_and_manager,
|
'get_media_type_and_manager': get_media_type_and_manager,
|
||||||
'sniff_handler': sniff_handler,
|
'sniff_handler': sniff_handler,
|
||||||
('media_manager', MEDIA_TYPE): lambda: AudioMediaManager,
|
('media_manager', MEDIA_TYPE): lambda: AudioMediaManager,
|
||||||
|
('reprocess_manager', MEDIA_TYPE): lambda: AudioProcessingManager,
|
||||||
}
|
}
|
||||||
|
@ -14,16 +14,19 @@
|
|||||||
# You should have received a copy of the GNU Affero General Public License
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
from tempfile import NamedTemporaryFile
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from mediagoblin import mg_globals as mgg
|
from mediagoblin import mg_globals as mgg
|
||||||
from mediagoblin.processing import (create_pub_filepath, BadMediaFail,
|
from mediagoblin.processing import (
|
||||||
FilenameBuilder, ProgressCallback)
|
BadMediaFail, FilenameBuilder,
|
||||||
|
ProgressCallback, MediaProcessor, ProcessingManager,
|
||||||
|
request_from_args, get_process_filename,
|
||||||
|
store_public, copy_original)
|
||||||
|
|
||||||
from mediagoblin.media_types.audio.transcoders import (AudioTranscoder,
|
from mediagoblin.media_types.audio.transcoders import (
|
||||||
AudioThumbnailer)
|
AudioTranscoder, AudioThumbnailer)
|
||||||
|
|
||||||
_log = logging.getLogger(__name__)
|
_log = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -39,121 +42,304 @@ def sniff_handler(media_file, **kw):
|
|||||||
_log.debug('Audio discovery raised BadMediaFail')
|
_log.debug('Audio discovery raised BadMediaFail')
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if data.is_audio == True and data.is_video == False:
|
if data.is_audio is True and data.is_video is False:
|
||||||
return MEDIA_TYPE
|
return MEDIA_TYPE
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def process_audio(proc_state):
|
class CommonAudioProcessor(MediaProcessor):
|
||||||
"""Code to process uploaded audio. Will be run by celery.
|
|
||||||
|
|
||||||
A Workbench() represents a local tempory dir. It is automatically
|
|
||||||
cleaned up when this function exits.
|
|
||||||
"""
|
"""
|
||||||
entry = proc_state.entry
|
Provides a base for various audio processing steps
|
||||||
workbench = proc_state.workbench
|
"""
|
||||||
audio_config = mgg.global_config['media_type:mediagoblin.media_types.audio']
|
acceptable_files = ['original', 'best_quality', 'webm_audio']
|
||||||
|
|
||||||
queued_filepath = entry.queued_media_file
|
def common_setup(self):
|
||||||
queued_filename = workbench.localized_file(
|
"""
|
||||||
mgg.queue_store, queued_filepath,
|
Setup the workbench directory and pull down the original file, add
|
||||||
'source')
|
the audio_config, transcoder, thumbnailer and spectrogram_tmp path
|
||||||
name_builder = FilenameBuilder(queued_filename)
|
"""
|
||||||
|
self.audio_config = mgg \
|
||||||
|
.global_config['media_type:mediagoblin.media_types.audio']
|
||||||
|
|
||||||
webm_audio_filepath = create_pub_filepath(
|
# Pull down and set up the processing file
|
||||||
entry,
|
self.process_filename = get_process_filename(
|
||||||
'{original}.webm'.format(
|
self.entry, self.workbench, self.acceptable_files)
|
||||||
original=os.path.splitext(
|
self.name_builder = FilenameBuilder(self.process_filename)
|
||||||
queued_filepath[-1])[0]))
|
|
||||||
|
|
||||||
if audio_config['keep_original']:
|
self.transcoder = AudioTranscoder()
|
||||||
with open(queued_filename, 'rb') as queued_file:
|
self.thumbnailer = AudioThumbnailer()
|
||||||
original_filepath = create_pub_filepath(
|
|
||||||
entry, name_builder.fill('{basename}{ext}'))
|
|
||||||
|
|
||||||
with mgg.public_store.get_file(original_filepath, 'wb') as \
|
def copy_original(self):
|
||||||
original_file:
|
if self.audio_config['keep_original']:
|
||||||
_log.debug('Saving original...')
|
copy_original(
|
||||||
original_file.write(queued_file.read())
|
self.entry, self.process_filename,
|
||||||
|
self.name_builder.fill('{basename}{ext}'))
|
||||||
|
|
||||||
entry.media_files['original'] = original_filepath
|
def _keep_best(self):
|
||||||
|
"""
|
||||||
|
If there is no original, keep the best file that we have
|
||||||
|
"""
|
||||||
|
if not self.entry.media_files.get('best_quality'):
|
||||||
|
# Save the best quality file if no original?
|
||||||
|
if not self.entry.media_files.get('original') and \
|
||||||
|
self.entry.media_files.get('webm_audio'):
|
||||||
|
self.entry.media_files['best_quality'] = self.entry \
|
||||||
|
.media_files['webm_audio']
|
||||||
|
|
||||||
transcoder = AudioTranscoder()
|
def transcode(self, quality=None):
|
||||||
|
if not quality:
|
||||||
|
quality = self.audio_config['quality']
|
||||||
|
|
||||||
with NamedTemporaryFile(dir=workbench.dir) as webm_audio_tmp:
|
progress_callback = ProgressCallback(self.entry)
|
||||||
progress_callback = ProgressCallback(entry)
|
webm_audio_tmp = os.path.join(self.workbench.dir,
|
||||||
|
self.name_builder.fill(
|
||||||
|
'{basename}{ext}'))
|
||||||
|
|
||||||
transcoder.transcode(
|
self.transcoder.transcode(
|
||||||
queued_filename,
|
self.process_filename,
|
||||||
webm_audio_tmp.name,
|
webm_audio_tmp,
|
||||||
quality=audio_config['quality'],
|
quality=quality,
|
||||||
progress_callback=progress_callback)
|
progress_callback=progress_callback)
|
||||||
|
|
||||||
transcoder.discover(webm_audio_tmp.name)
|
self.transcoder.discover(webm_audio_tmp)
|
||||||
|
|
||||||
|
self._keep_best()
|
||||||
|
|
||||||
_log.debug('Saving medium...')
|
_log.debug('Saving medium...')
|
||||||
mgg.public_store.get_file(webm_audio_filepath, 'wb').write(
|
store_public(self.entry, 'webm_audio', webm_audio_tmp,
|
||||||
webm_audio_tmp.read())
|
self.name_builder.fill('{basename}.medium.webm'))
|
||||||
|
|
||||||
entry.media_files['webm_audio'] = webm_audio_filepath
|
def create_spectrogram(self, max_width=None, fft_size=None):
|
||||||
|
if not max_width:
|
||||||
|
max_width = mgg.global_config['media:medium']['max_width']
|
||||||
|
if not fft_size:
|
||||||
|
fft_size = self.audio_config['spectrogram_fft_size']
|
||||||
|
|
||||||
# entry.media_data_init(length=int(data.audiolength))
|
wav_tmp = os.path.join(self.workbench.dir, self.name_builder.fill(
|
||||||
|
'{basename}.ogg'))
|
||||||
|
|
||||||
if audio_config['create_spectrogram']:
|
|
||||||
spectrogram_filepath = create_pub_filepath(
|
|
||||||
entry,
|
|
||||||
'{original}-spectrogram.jpg'.format(
|
|
||||||
original=os.path.splitext(
|
|
||||||
queued_filepath[-1])[0]))
|
|
||||||
|
|
||||||
with NamedTemporaryFile(dir=workbench.dir, suffix='.ogg') as wav_tmp:
|
|
||||||
_log.info('Creating OGG source for spectrogram')
|
_log.info('Creating OGG source for spectrogram')
|
||||||
transcoder.transcode(
|
self.transcoder.transcode(
|
||||||
queued_filename,
|
self.process_filename,
|
||||||
wav_tmp.name,
|
wav_tmp,
|
||||||
mux_string='vorbisenc quality={0} ! oggmux'.format(
|
mux_string='vorbisenc quality={0} ! oggmux'.format(
|
||||||
audio_config['quality']))
|
self.audio_config['quality']))
|
||||||
|
|
||||||
thumbnailer = AudioThumbnailer()
|
spectrogram_tmp = os.path.join(self.workbench.dir,
|
||||||
|
self.name_builder.fill(
|
||||||
|
'{basename}-spectrogram.jpg'))
|
||||||
|
|
||||||
with NamedTemporaryFile(dir=workbench.dir, suffix='.jpg') as spectrogram_tmp:
|
self.thumbnailer.spectrogram(
|
||||||
thumbnailer.spectrogram(
|
wav_tmp,
|
||||||
wav_tmp.name,
|
spectrogram_tmp,
|
||||||
spectrogram_tmp.name,
|
width=max_width,
|
||||||
width=mgg.global_config['media:medium']['max_width'],
|
fft_size=fft_size)
|
||||||
fft_size=audio_config['spectrogram_fft_size'])
|
|
||||||
|
|
||||||
_log.debug('Saving spectrogram...')
|
_log.debug('Saving spectrogram...')
|
||||||
mgg.public_store.get_file(spectrogram_filepath, 'wb').write(
|
store_public(self.entry, 'spectrogram', spectrogram_tmp,
|
||||||
spectrogram_tmp.read())
|
self.name_builder.fill('{basename}.spectrogram.jpg'))
|
||||||
|
|
||||||
entry.media_files['spectrogram'] = spectrogram_filepath
|
def generate_thumb(self, size=None):
|
||||||
|
if not size:
|
||||||
|
max_width = mgg.global_config['media:thumb']['max_width']
|
||||||
|
max_height = mgg.global_config['media:thumb']['max_height']
|
||||||
|
size = (max_width, max_height)
|
||||||
|
|
||||||
with NamedTemporaryFile(dir=workbench.dir, suffix='.jpg') as thumb_tmp:
|
thumb_tmp = os.path.join(self.workbench.dir, self.name_builder.fill(
|
||||||
thumbnailer.thumbnail_spectrogram(
|
'{basename}-thumbnail.jpg'))
|
||||||
spectrogram_tmp.name,
|
|
||||||
thumb_tmp.name,
|
|
||||||
(mgg.global_config['media:thumb']['max_width'],
|
|
||||||
mgg.global_config['media:thumb']['max_height']))
|
|
||||||
|
|
||||||
thumb_filepath = create_pub_filepath(
|
# We need the spectrogram to create a thumbnail
|
||||||
entry,
|
spectrogram = self.entry.media_files.get('spectrogram')
|
||||||
'{original}-thumbnail.jpg'.format(
|
if not spectrogram:
|
||||||
original=os.path.splitext(
|
_log.info('No spectrogram found, we will create one.')
|
||||||
queued_filepath[-1])[0]))
|
self.create_spectrogram()
|
||||||
|
spectrogram = self.entry.media_files['spectrogram']
|
||||||
|
|
||||||
mgg.public_store.get_file(thumb_filepath, 'wb').write(
|
spectrogram_filepath = mgg.public_store.get_local_path(spectrogram)
|
||||||
thumb_tmp.read())
|
|
||||||
|
|
||||||
entry.media_files['thumb'] = thumb_filepath
|
self.thumbnailer.thumbnail_spectrogram(
|
||||||
else:
|
spectrogram_filepath,
|
||||||
entry.media_files['thumb'] = ['fake', 'thumb', 'path.jpg']
|
thumb_tmp,
|
||||||
|
tuple(size))
|
||||||
|
|
||||||
# Remove queued media file from storage and database.
|
store_public(self.entry, 'thumb', thumb_tmp,
|
||||||
# queued_filepath is in the task_id directory which should
|
self.name_builder.fill('{basename}.thumbnail.jpg'))
|
||||||
# be removed too, but fail if the directory is not empty to be on
|
|
||||||
# the super-safe side.
|
|
||||||
mgg.queue_store.delete_file(queued_filepath) # rm file
|
class InitialProcessor(CommonAudioProcessor):
|
||||||
mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
|
"""
|
||||||
entry.queued_media_file = []
|
Initial processing steps for new audio
|
||||||
|
"""
|
||||||
|
name = "initial"
|
||||||
|
description = "Initial processing"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def media_is_eligible(cls, entry=None, state=None):
|
||||||
|
"""
|
||||||
|
Determine if this media type is eligible for processing
|
||||||
|
"""
|
||||||
|
if not state:
|
||||||
|
state = entry.state
|
||||||
|
return state in (
|
||||||
|
"unprocessed", "failed")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def generate_parser(cls):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=cls.description,
|
||||||
|
prog=cls.name)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--quality',
|
||||||
|
type=float,
|
||||||
|
help='vorbisenc quality. Range: -0.1..1')
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--fft_size',
|
||||||
|
type=int,
|
||||||
|
help='spectrogram fft size')
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--thumb_size',
|
||||||
|
nargs=2,
|
||||||
|
metavar=('max_width', 'max_height'),
|
||||||
|
type=int,
|
||||||
|
help='minimum size is 100 x 100')
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--medium_width',
|
||||||
|
type=int,
|
||||||
|
help='The width of the spectogram')
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--create_spectrogram',
|
||||||
|
action='store_true',
|
||||||
|
help='Create spectogram and thumbnail, will default to config')
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def args_to_request(cls, args):
|
||||||
|
return request_from_args(
|
||||||
|
args, ['create_spectrogram', 'quality', 'fft_size',
|
||||||
|
'thumb_size', 'medium_width'])
|
||||||
|
|
||||||
|
def process(self, quality=None, fft_size=None, thumb_size=None,
|
||||||
|
create_spectrogram=None, medium_width=None):
|
||||||
|
self.common_setup()
|
||||||
|
|
||||||
|
if not create_spectrogram:
|
||||||
|
create_spectrogram = self.audio_config['create_spectrogram']
|
||||||
|
|
||||||
|
self.transcode(quality=quality)
|
||||||
|
self.copy_original()
|
||||||
|
|
||||||
|
if create_spectrogram:
|
||||||
|
self.create_spectrogram(max_width=medium_width, fft_size=fft_size)
|
||||||
|
self.generate_thumb(size=thumb_size)
|
||||||
|
self.delete_queue_file()
|
||||||
|
|
||||||
|
|
||||||
|
class Resizer(CommonAudioProcessor):
|
||||||
|
"""
|
||||||
|
Thumbnail and spectogram resizing process steps for processed audio
|
||||||
|
"""
|
||||||
|
name = 'resize'
|
||||||
|
description = 'Resize thumbnail or spectogram'
|
||||||
|
thumb_size = 'thumb_size'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def media_is_eligible(cls, entry=None, state=None):
|
||||||
|
"""
|
||||||
|
Determine if this media entry is eligible for processing
|
||||||
|
"""
|
||||||
|
if not state:
|
||||||
|
state = entry.state
|
||||||
|
return state in 'processed'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def generate_parser(cls):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=cls.description,
|
||||||
|
prog=cls.name)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--fft_size',
|
||||||
|
type=int,
|
||||||
|
help='spectrogram fft size')
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--thumb_size',
|
||||||
|
nargs=2,
|
||||||
|
metavar=('max_width', 'max_height'),
|
||||||
|
type=int,
|
||||||
|
help='minimum size is 100 x 100')
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--medium_width',
|
||||||
|
type=int,
|
||||||
|
help='The width of the spectogram')
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'file',
|
||||||
|
choices=['thumb', 'spectrogram'])
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def args_to_request(cls, args):
|
||||||
|
return request_from_args(
|
||||||
|
args, ['thumb_size', 'file', 'fft_size', 'medium_width'])
|
||||||
|
|
||||||
|
def process(self, file, thumb_size=None, fft_size=None,
|
||||||
|
medium_width=None):
|
||||||
|
self.common_setup()
|
||||||
|
|
||||||
|
if file == 'thumb':
|
||||||
|
self.generate_thumb(size=thumb_size)
|
||||||
|
elif file == 'spectrogram':
|
||||||
|
self.create_spectrogram(max_width=medium_width, fft_size=fft_size)
|
||||||
|
|
||||||
|
|
||||||
|
class Transcoder(CommonAudioProcessor):
|
||||||
|
"""
|
||||||
|
Transcoding processing steps for processed audio
|
||||||
|
"""
|
||||||
|
name = 'transcode'
|
||||||
|
description = 'Re-transcode audio'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def media_is_eligible(cls, entry=None, state=None):
|
||||||
|
if not state:
|
||||||
|
state = entry.state
|
||||||
|
return state in 'processed'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def generate_parser(cls):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=cls.description,
|
||||||
|
prog=cls.name)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--quality',
|
||||||
|
help='vorbisenc quality. Range: -0.1..1')
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def args_to_request(cls, args):
|
||||||
|
return request_from_args(
|
||||||
|
args, ['quality'])
|
||||||
|
|
||||||
|
def process(self, quality=None):
|
||||||
|
self.common_setup()
|
||||||
|
self.transcode(quality=quality)
|
||||||
|
|
||||||
|
|
||||||
|
class AudioProcessingManager(ProcessingManager):
|
||||||
|
def __init__(self):
|
||||||
|
super(self.__class__, self).__init__()
|
||||||
|
self.add_processor(InitialProcessor)
|
||||||
|
self.add_processor(Resizer)
|
||||||
|
self.add_processor(Transcoder)
|
||||||
|
@ -122,7 +122,6 @@ class AudioThumbnailer(object):
|
|||||||
int(start_x), 0,
|
int(start_x), 0,
|
||||||
int(stop_x), int(im_h)))
|
int(stop_x), int(im_h)))
|
||||||
|
|
||||||
if th.size[0] > th_w or th.size[1] > th_h:
|
|
||||||
th.thumbnail(thumb_size, Image.ANTIALIAS)
|
th.thumbnail(thumb_size, Image.ANTIALIAS)
|
||||||
|
|
||||||
th.save(dst)
|
th.save(dst)
|
||||||
|
@ -14,12 +14,15 @@
|
|||||||
# You should have received a copy of the GNU Affero General Public License
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
import datetime
|
import datetime
|
||||||
|
import logging
|
||||||
|
|
||||||
from mediagoblin.media_types import MediaManagerBase
|
from mediagoblin.media_types import MediaManagerBase
|
||||||
from mediagoblin.media_types.image.processing import process_image, \
|
from mediagoblin.media_types.image.processing import sniff_handler, \
|
||||||
sniff_handler
|
ImageProcessingManager
|
||||||
from mediagoblin.tools import pluginapi
|
from mediagoblin.tools import pluginapi
|
||||||
|
|
||||||
|
_log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
ACCEPTED_EXTENSIONS = ["jpg", "jpeg", "png", "gif", "tiff"]
|
ACCEPTED_EXTENSIONS = ["jpg", "jpeg", "png", "gif", "tiff"]
|
||||||
MEDIA_TYPE = 'mediagoblin.media_types.image'
|
MEDIA_TYPE = 'mediagoblin.media_types.image'
|
||||||
@ -31,7 +34,6 @@ def setup_plugin():
|
|||||||
|
|
||||||
class ImageMediaManager(MediaManagerBase):
|
class ImageMediaManager(MediaManagerBase):
|
||||||
human_readable = "Image"
|
human_readable = "Image"
|
||||||
processor = staticmethod(process_image)
|
|
||||||
display_template = "mediagoblin/media_displays/image.html"
|
display_template = "mediagoblin/media_displays/image.html"
|
||||||
default_thumb = "images/media_thumbs/image.png"
|
default_thumb = "images/media_thumbs/image.png"
|
||||||
|
|
||||||
@ -69,4 +71,5 @@ hooks = {
|
|||||||
'get_media_type_and_manager': get_media_type_and_manager,
|
'get_media_type_and_manager': get_media_type_and_manager,
|
||||||
'sniff_handler': sniff_handler,
|
'sniff_handler': sniff_handler,
|
||||||
('media_manager', MEDIA_TYPE): lambda: ImageMediaManager,
|
('media_manager', MEDIA_TYPE): lambda: ImageMediaManager,
|
||||||
|
('reprocess_manager', MEDIA_TYPE): lambda: ImageProcessingManager,
|
||||||
}
|
}
|
||||||
|
@ -20,9 +20,14 @@ except ImportError:
|
|||||||
import Image
|
import Image
|
||||||
import os
|
import os
|
||||||
import logging
|
import logging
|
||||||
|
import argparse
|
||||||
|
|
||||||
from mediagoblin import mg_globals as mgg
|
from mediagoblin import mg_globals as mgg
|
||||||
from mediagoblin.processing import BadMediaFail, FilenameBuilder
|
from mediagoblin.processing import (
|
||||||
|
BadMediaFail, FilenameBuilder,
|
||||||
|
MediaProcessor, ProcessingManager,
|
||||||
|
request_from_args, get_process_filename,
|
||||||
|
store_public, copy_original)
|
||||||
from mediagoblin.tools.exif import exif_fix_image_orientation, \
|
from mediagoblin.tools.exif import exif_fix_image_orientation, \
|
||||||
extract_exif, clean_exif, get_gps_data, get_useful, \
|
extract_exif, clean_exif, get_gps_data, get_useful, \
|
||||||
exif_image_needs_rotation
|
exif_image_needs_rotation
|
||||||
@ -38,8 +43,8 @@ PIL_FILTERS = {
|
|||||||
MEDIA_TYPE = 'mediagoblin.media_types.image'
|
MEDIA_TYPE = 'mediagoblin.media_types.image'
|
||||||
|
|
||||||
|
|
||||||
def resize_image(proc_state, resized, keyname, target_name, new_size,
|
def resize_image(entry, resized, keyname, target_name, new_size,
|
||||||
exif_tags, workdir):
|
exif_tags, workdir, quality, filter):
|
||||||
"""
|
"""
|
||||||
Store a resized version of an image and return its pathname.
|
Store a resized version of an image and return its pathname.
|
||||||
|
|
||||||
@ -51,17 +56,16 @@ def resize_image(proc_state, resized, keyname, target_name, new_size,
|
|||||||
exif_tags -- EXIF data for the original image
|
exif_tags -- EXIF data for the original image
|
||||||
workdir -- directory path for storing converted image files
|
workdir -- directory path for storing converted image files
|
||||||
new_size -- 2-tuple size for the resized image
|
new_size -- 2-tuple size for the resized image
|
||||||
|
quality -- level of compression used when resizing images
|
||||||
|
filter -- One of BICUBIC, BILINEAR, NEAREST, ANTIALIAS
|
||||||
"""
|
"""
|
||||||
config = mgg.global_config['media_type:mediagoblin.media_types.image']
|
|
||||||
|
|
||||||
resized = exif_fix_image_orientation(resized, exif_tags) # Fix orientation
|
resized = exif_fix_image_orientation(resized, exif_tags) # Fix orientation
|
||||||
|
|
||||||
filter_config = config['resize_filter']
|
|
||||||
try:
|
try:
|
||||||
resize_filter = PIL_FILTERS[filter_config.upper()]
|
resize_filter = PIL_FILTERS[filter.upper()]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise Exception('Filter "{0}" not found, choose one of {1}'.format(
|
raise Exception('Filter "{0}" not found, choose one of {1}'.format(
|
||||||
unicode(filter_config),
|
unicode(filter),
|
||||||
u', '.join(PIL_FILTERS.keys())))
|
u', '.join(PIL_FILTERS.keys())))
|
||||||
|
|
||||||
resized.thumbnail(new_size, resize_filter)
|
resized.thumbnail(new_size, resize_filter)
|
||||||
@ -69,32 +73,36 @@ def resize_image(proc_state, resized, keyname, target_name, new_size,
|
|||||||
# Copy the new file to the conversion subdir, then remotely.
|
# Copy the new file to the conversion subdir, then remotely.
|
||||||
tmp_resized_filename = os.path.join(workdir, target_name)
|
tmp_resized_filename = os.path.join(workdir, target_name)
|
||||||
with file(tmp_resized_filename, 'w') as resized_file:
|
with file(tmp_resized_filename, 'w') as resized_file:
|
||||||
resized.save(resized_file, quality=config['quality'])
|
resized.save(resized_file, quality=quality)
|
||||||
proc_state.store_public(keyname, tmp_resized_filename, target_name)
|
store_public(entry, keyname, tmp_resized_filename, target_name)
|
||||||
|
|
||||||
|
|
||||||
def resize_tool(proc_state, force, keyname, target_name,
|
def resize_tool(entry,
|
||||||
conversions_subdir, exif_tags):
|
force, keyname, orig_file, target_name,
|
||||||
# filename -- the filename of the original image being resized
|
conversions_subdir, exif_tags, quality, filter, new_size=None):
|
||||||
filename = proc_state.get_queued_filename()
|
# Use the default size if new_size was not given
|
||||||
|
if not new_size:
|
||||||
max_width = mgg.global_config['media:' + keyname]['max_width']
|
max_width = mgg.global_config['media:' + keyname]['max_width']
|
||||||
max_height = mgg.global_config['media:' + keyname]['max_height']
|
max_height = mgg.global_config['media:' + keyname]['max_height']
|
||||||
|
new_size = (max_width, max_height)
|
||||||
|
|
||||||
# If the size of the original file exceeds the specified size for the desized
|
# If the size of the original file exceeds the specified size for the desized
|
||||||
# file, a target_name file is created and later associated with the media
|
# file, a target_name file is created and later associated with the media
|
||||||
# entry.
|
# entry.
|
||||||
# Also created if the file needs rotation, or if forced.
|
# Also created if the file needs rotation, or if forced.
|
||||||
try:
|
try:
|
||||||
im = Image.open(filename)
|
im = Image.open(orig_file)
|
||||||
except IOError:
|
except IOError:
|
||||||
raise BadMediaFail()
|
raise BadMediaFail()
|
||||||
if force \
|
if force \
|
||||||
or im.size[0] > max_width \
|
or im.size[0] > new_size[0]\
|
||||||
or im.size[1] > max_height \
|
or im.size[1] > new_size[1]\
|
||||||
or exif_image_needs_rotation(exif_tags):
|
or exif_image_needs_rotation(exif_tags):
|
||||||
resize_image(
|
resize_image(
|
||||||
proc_state, im, unicode(keyname), target_name,
|
entry, im, unicode(keyname), target_name,
|
||||||
(max_width, max_height),
|
tuple(new_size),
|
||||||
exif_tags, conversions_subdir)
|
exif_tags, conversions_subdir,
|
||||||
|
quality, filter)
|
||||||
|
|
||||||
|
|
||||||
SUPPORTED_FILETYPES = ['png', 'gif', 'jpg', 'jpeg', 'tiff']
|
SUPPORTED_FILETYPES = ['png', 'gif', 'jpg', 'jpeg', 'tiff']
|
||||||
@ -119,53 +127,210 @@ def sniff_handler(media_file, **kw):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def process_image(proc_state):
|
class CommonImageProcessor(MediaProcessor):
|
||||||
"""Code to process an image. Will be run by celery.
|
|
||||||
|
|
||||||
A Workbench() represents a local tempory dir. It is automatically
|
|
||||||
cleaned up when this function exits.
|
|
||||||
"""
|
"""
|
||||||
entry = proc_state.entry
|
Provides a base for various media processing steps
|
||||||
workbench = proc_state.workbench
|
"""
|
||||||
|
# list of acceptable file keys in order of prefrence for reprocessing
|
||||||
|
acceptable_files = ['original', 'medium']
|
||||||
|
|
||||||
|
def common_setup(self):
|
||||||
|
"""
|
||||||
|
Set up the workbench directory and pull down the original file
|
||||||
|
"""
|
||||||
|
self.image_config = mgg.global_config[
|
||||||
|
'media_type:mediagoblin.media_types.image']
|
||||||
|
|
||||||
|
## @@: Should this be two functions?
|
||||||
# Conversions subdirectory to avoid collisions
|
# Conversions subdirectory to avoid collisions
|
||||||
conversions_subdir = os.path.join(
|
self.conversions_subdir = os.path.join(
|
||||||
workbench.dir, 'conversions')
|
self.workbench.dir, 'convirsions')
|
||||||
os.mkdir(conversions_subdir)
|
os.mkdir(self.conversions_subdir)
|
||||||
|
|
||||||
queued_filename = proc_state.get_queued_filename()
|
# Pull down and set up the processing file
|
||||||
name_builder = FilenameBuilder(queued_filename)
|
self.process_filename = get_process_filename(
|
||||||
|
self.entry, self.workbench, self.acceptable_files)
|
||||||
|
self.name_builder = FilenameBuilder(self.process_filename)
|
||||||
|
|
||||||
# EXIF extraction
|
# Exif extraction
|
||||||
exif_tags = extract_exif(queued_filename)
|
self.exif_tags = extract_exif(self.process_filename)
|
||||||
gps_data = get_gps_data(exif_tags)
|
|
||||||
|
|
||||||
# Always create a small thumbnail
|
def generate_medium_if_applicable(self, size=None, quality=None,
|
||||||
resize_tool(proc_state, True, 'thumb',
|
filter=None):
|
||||||
name_builder.fill('{basename}.thumbnail{ext}'),
|
if not quality:
|
||||||
conversions_subdir, exif_tags)
|
quality = self.image_config['quality']
|
||||||
|
if not filter:
|
||||||
|
filter = self.image_config['resize_filter']
|
||||||
|
|
||||||
# Possibly create a medium
|
resize_tool(self.entry, False, 'medium', self.process_filename,
|
||||||
resize_tool(proc_state, False, 'medium',
|
self.name_builder.fill('{basename}.medium{ext}'),
|
||||||
name_builder.fill('{basename}.medium{ext}'),
|
self.conversions_subdir, self.exif_tags, quality,
|
||||||
conversions_subdir, exif_tags)
|
filter, size)
|
||||||
|
|
||||||
# Copy our queued local workbench to its final destination
|
def generate_thumb(self, size=None, quality=None, filter=None):
|
||||||
proc_state.copy_original(name_builder.fill('{basename}{ext}'))
|
if not quality:
|
||||||
|
quality = self.image_config['quality']
|
||||||
|
if not filter:
|
||||||
|
filter = self.image_config['resize_filter']
|
||||||
|
|
||||||
# Remove queued media file from storage and database
|
resize_tool(self.entry, True, 'thumb', self.process_filename,
|
||||||
proc_state.delete_queue_file()
|
self.name_builder.fill('{basename}.thumbnail{ext}'),
|
||||||
|
self.conversions_subdir, self.exif_tags, quality,
|
||||||
|
filter, size)
|
||||||
|
|
||||||
|
def copy_original(self):
|
||||||
|
copy_original(
|
||||||
|
self.entry, self.process_filename,
|
||||||
|
self.name_builder.fill('{basename}{ext}'))
|
||||||
|
|
||||||
|
def extract_metadata(self):
|
||||||
|
# Is there any GPS data
|
||||||
|
gps_data = get_gps_data(self.exif_tags)
|
||||||
|
|
||||||
# Insert exif data into database
|
# Insert exif data into database
|
||||||
exif_all = clean_exif(exif_tags)
|
exif_all = clean_exif(self.exif_tags)
|
||||||
|
|
||||||
if len(exif_all):
|
if len(exif_all):
|
||||||
entry.media_data_init(exif_all=exif_all)
|
self.entry.media_data_init(exif_all=exif_all)
|
||||||
|
|
||||||
if len(gps_data):
|
if len(gps_data):
|
||||||
for key in list(gps_data.keys()):
|
for key in list(gps_data.keys()):
|
||||||
gps_data['gps_' + key] = gps_data.pop(key)
|
gps_data['gps_' + key] = gps_data.pop(key)
|
||||||
entry.media_data_init(**gps_data)
|
self.entry.media_data_init(**gps_data)
|
||||||
|
|
||||||
|
|
||||||
|
class InitialProcessor(CommonImageProcessor):
|
||||||
|
"""
|
||||||
|
Initial processing step for new images
|
||||||
|
"""
|
||||||
|
name = "initial"
|
||||||
|
description = "Initial processing"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def media_is_eligible(cls, entry=None, state=None):
|
||||||
|
"""
|
||||||
|
Determine if this media type is eligible for processing
|
||||||
|
"""
|
||||||
|
if not state:
|
||||||
|
state = entry.state
|
||||||
|
return state in (
|
||||||
|
"unprocessed", "failed")
|
||||||
|
|
||||||
|
###############################
|
||||||
|
# Command line interface things
|
||||||
|
###############################
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def generate_parser(cls):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=cls.description,
|
||||||
|
prog=cls.name)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--size',
|
||||||
|
nargs=2,
|
||||||
|
metavar=('max_width', 'max_height'),
|
||||||
|
type=int)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--thumb-size',
|
||||||
|
nargs=2,
|
||||||
|
metavar=('max_width', 'max_height'),
|
||||||
|
type=int)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--filter',
|
||||||
|
choices=['BICUBIC', 'BILINEAR', 'NEAREST', 'ANTIALIAS'])
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--quality',
|
||||||
|
type=int,
|
||||||
|
help='level of compression used when resizing images')
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def args_to_request(cls, args):
|
||||||
|
return request_from_args(
|
||||||
|
args, ['size', 'thumb_size', 'filter', 'quality'])
|
||||||
|
|
||||||
|
def process(self, size=None, thumb_size=None, quality=None, filter=None):
|
||||||
|
self.common_setup()
|
||||||
|
self.generate_medium_if_applicable(size=size, filter=filter,
|
||||||
|
quality=quality)
|
||||||
|
self.generate_thumb(size=thumb_size, filter=filter, quality=quality)
|
||||||
|
self.copy_original()
|
||||||
|
self.extract_metadata()
|
||||||
|
self.delete_queue_file()
|
||||||
|
|
||||||
|
|
||||||
|
class Resizer(CommonImageProcessor):
|
||||||
|
"""
|
||||||
|
Resizing process steps for processed media
|
||||||
|
"""
|
||||||
|
name = 'resize'
|
||||||
|
description = 'Resize image'
|
||||||
|
thumb_size = 'size'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def media_is_eligible(cls, entry=None, state=None):
|
||||||
|
"""
|
||||||
|
Determine if this media type is eligible for processing
|
||||||
|
"""
|
||||||
|
if not state:
|
||||||
|
state = entry.state
|
||||||
|
return state in 'processed'
|
||||||
|
|
||||||
|
###############################
|
||||||
|
# Command line interface things
|
||||||
|
###############################
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def generate_parser(cls):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=cls.description,
|
||||||
|
prog=cls.name)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--size',
|
||||||
|
nargs=2,
|
||||||
|
metavar=('max_width', 'max_height'),
|
||||||
|
type=int)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--filter',
|
||||||
|
choices=['BICUBIC', 'BILINEAR', 'NEAREST', 'ANTIALIAS'])
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--quality',
|
||||||
|
type=int,
|
||||||
|
help='level of compression used when resizing images')
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'file',
|
||||||
|
choices=['medium', 'thumb'])
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def args_to_request(cls, args):
|
||||||
|
return request_from_args(
|
||||||
|
args, ['size', 'file', 'quality', 'filter'])
|
||||||
|
|
||||||
|
def process(self, file, size=None, filter=None, quality=None):
|
||||||
|
self.common_setup()
|
||||||
|
if file == 'medium':
|
||||||
|
self.generate_medium_if_applicable(size=size, filter=filter,
|
||||||
|
quality=quality)
|
||||||
|
elif file == 'thumb':
|
||||||
|
self.generate_thumb(size=size, filter=filter, quality=quality)
|
||||||
|
|
||||||
|
|
||||||
|
class ImageProcessingManager(ProcessingManager):
|
||||||
|
def __init__(self):
|
||||||
|
super(self.__class__, self).__init__()
|
||||||
|
self.add_processor(InitialProcessor)
|
||||||
|
self.add_processor(Resizer)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from mediagoblin.media_types import MediaManagerBase
|
from mediagoblin.media_types import MediaManagerBase
|
||||||
from mediagoblin.media_types.pdf.processing import process_pdf, \
|
from mediagoblin.media_types.pdf.processing import PdfProcessingManager, \
|
||||||
sniff_handler
|
sniff_handler
|
||||||
from mediagoblin.tools import pluginapi
|
from mediagoblin.tools import pluginapi
|
||||||
|
|
||||||
@ -29,7 +29,6 @@ def setup_plugin():
|
|||||||
|
|
||||||
class PDFMediaManager(MediaManagerBase):
|
class PDFMediaManager(MediaManagerBase):
|
||||||
human_readable = "PDF"
|
human_readable = "PDF"
|
||||||
processor = staticmethod(process_pdf)
|
|
||||||
display_template = "mediagoblin/media_displays/pdf.html"
|
display_template = "mediagoblin/media_displays/pdf.html"
|
||||||
default_thumb = "images/media_thumbs/pdf.jpg"
|
default_thumb = "images/media_thumbs/pdf.jpg"
|
||||||
|
|
||||||
@ -44,4 +43,5 @@ hooks = {
|
|||||||
'get_media_type_and_manager': get_media_type_and_manager,
|
'get_media_type_and_manager': get_media_type_and_manager,
|
||||||
'sniff_handler': sniff_handler,
|
'sniff_handler': sniff_handler,
|
||||||
('media_manager', MEDIA_TYPE): lambda: PDFMediaManager,
|
('media_manager', MEDIA_TYPE): lambda: PDFMediaManager,
|
||||||
|
('reprocess_manager', MEDIA_TYPE): lambda: PdfProcessingManager,
|
||||||
}
|
}
|
||||||
|
@ -13,14 +13,18 @@
|
|||||||
#
|
#
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
import argparse
|
||||||
import os
|
import os
|
||||||
import logging
|
import logging
|
||||||
import dateutil.parser
|
import dateutil.parser
|
||||||
from subprocess import PIPE, Popen
|
from subprocess import PIPE, Popen
|
||||||
|
|
||||||
from mediagoblin import mg_globals as mgg
|
from mediagoblin import mg_globals as mgg
|
||||||
from mediagoblin.processing import (create_pub_filepath,
|
from mediagoblin.processing import (
|
||||||
FilenameBuilder, BadMediaFail)
|
FilenameBuilder, BadMediaFail,
|
||||||
|
MediaProcessor, ProcessingManager,
|
||||||
|
request_from_args, get_process_filename,
|
||||||
|
store_public, copy_original)
|
||||||
from mediagoblin.tools.translate import fake_ugettext_passthrough as _
|
from mediagoblin.tools.translate import fake_ugettext_passthrough as _
|
||||||
|
|
||||||
_log = logging.getLogger(__name__)
|
_log = logging.getLogger(__name__)
|
||||||
@ -230,51 +234,207 @@ def pdf_info(original):
|
|||||||
|
|
||||||
return ret_dict
|
return ret_dict
|
||||||
|
|
||||||
def process_pdf(proc_state):
|
|
||||||
"""Code to process a pdf file. Will be run by celery.
|
|
||||||
|
|
||||||
A Workbench() represents a local tempory dir. It is automatically
|
class CommonPdfProcessor(MediaProcessor):
|
||||||
cleaned up when this function exits.
|
|
||||||
"""
|
"""
|
||||||
entry = proc_state.entry
|
Provides a base for various pdf processing steps
|
||||||
workbench = proc_state.workbench
|
"""
|
||||||
|
acceptable_files = ['original', 'pdf']
|
||||||
|
|
||||||
queued_filename = proc_state.get_queued_filename()
|
def common_setup(self):
|
||||||
name_builder = FilenameBuilder(queued_filename)
|
"""
|
||||||
|
Set up common pdf processing steps
|
||||||
|
"""
|
||||||
|
# Pull down and set up the processing file
|
||||||
|
self.process_filename = get_process_filename(
|
||||||
|
self.entry, self.workbench, self.acceptable_files)
|
||||||
|
self.name_builder = FilenameBuilder(self.process_filename)
|
||||||
|
|
||||||
# Copy our queued local workbench to its final destination
|
self._set_pdf_filename()
|
||||||
original_dest = name_builder.fill('{basename}{ext}')
|
|
||||||
proc_state.copy_original(original_dest)
|
|
||||||
|
|
||||||
# Create a pdf if this is a different doc, store pdf for viewer
|
def _set_pdf_filename(self):
|
||||||
ext = queued_filename.rsplit('.', 1)[-1].lower()
|
if self.name_builder.ext == '.pdf':
|
||||||
if ext == 'pdf':
|
self.pdf_filename = self.process_filename
|
||||||
pdf_filename = queued_filename
|
elif self.entry.media_files.get('pdf'):
|
||||||
|
self.pdf_filename = self.workbench.localized_file(
|
||||||
|
mgg.public_store, self.entry.media_files['pdf'])
|
||||||
else:
|
else:
|
||||||
pdf_filename = queued_filename.rsplit('.', 1)[0] + '.pdf'
|
self.pdf_filename = self._generate_pdf()
|
||||||
|
|
||||||
|
def copy_original(self):
|
||||||
|
copy_original(
|
||||||
|
self.entry, self.process_filename,
|
||||||
|
self.name_builder.fill('{basename}{ext}'))
|
||||||
|
|
||||||
|
def generate_thumb(self, thumb_size=None):
|
||||||
|
if not thumb_size:
|
||||||
|
thumb_size = (mgg.global_config['media:thumb']['max_width'],
|
||||||
|
mgg.global_config['media:thumb']['max_height'])
|
||||||
|
|
||||||
|
# Note: pdftocairo adds '.png', so don't include an ext
|
||||||
|
thumb_filename = os.path.join(self.workbench.dir,
|
||||||
|
self.name_builder.fill(
|
||||||
|
'{basename}.thumbnail'))
|
||||||
|
|
||||||
|
executable = where('pdftocairo')
|
||||||
|
args = [executable, '-scale-to', str(min(thumb_size)),
|
||||||
|
'-singlefile', '-png', self.pdf_filename, thumb_filename]
|
||||||
|
|
||||||
|
_log.debug('calling {0}'.format(repr(' '.join(args))))
|
||||||
|
Popen(executable=executable, args=args).wait()
|
||||||
|
|
||||||
|
# since pdftocairo added '.png', we need to include it with the
|
||||||
|
# filename
|
||||||
|
store_public(self.entry, 'thumb', thumb_filename + '.png',
|
||||||
|
self.name_builder.fill('{basename}.thumbnail.png'))
|
||||||
|
|
||||||
|
def _generate_pdf(self):
|
||||||
|
"""
|
||||||
|
Store the pdf. If the file is not a pdf, make it a pdf
|
||||||
|
"""
|
||||||
|
tmp_pdf = self.process_filename
|
||||||
|
|
||||||
unoconv = where('unoconv')
|
unoconv = where('unoconv')
|
||||||
Popen(executable=unoconv,
|
Popen(executable=unoconv,
|
||||||
args=[unoconv, '-v', '-f', 'pdf', queued_filename]).wait()
|
args=[unoconv, '-v', '-f', 'pdf', self.process_filename]).wait()
|
||||||
if not os.path.exists(pdf_filename):
|
|
||||||
|
if not os.path.exists(tmp_pdf):
|
||||||
_log.debug('unoconv failed to convert file to pdf')
|
_log.debug('unoconv failed to convert file to pdf')
|
||||||
raise BadMediaFail()
|
raise BadMediaFail()
|
||||||
proc_state.store_public(keyname=u'pdf', local_file=pdf_filename)
|
|
||||||
|
|
||||||
pdf_info_dict = pdf_info(pdf_filename)
|
store_public(self.entry, 'pdf', tmp_pdf,
|
||||||
|
self.name_builder.fill('{basename}.pdf'))
|
||||||
|
|
||||||
for name, width, height in [
|
return self.workbench.localized_file(
|
||||||
(u'thumb', mgg.global_config['media:thumb']['max_width'],
|
mgg.public_store, self.entry.media_files['pdf'])
|
||||||
mgg.global_config['media:thumb']['max_height']),
|
|
||||||
(u'medium', mgg.global_config['media:medium']['max_width'],
|
|
||||||
mgg.global_config['media:medium']['max_height']),
|
|
||||||
]:
|
|
||||||
filename = name_builder.fill('{basename}.%s.png' % name)
|
|
||||||
path = workbench.joinpath(filename)
|
|
||||||
create_pdf_thumb(pdf_filename, path, width, height)
|
|
||||||
assert(os.path.exists(path))
|
|
||||||
proc_state.store_public(keyname=name, local_file=path)
|
|
||||||
|
|
||||||
proc_state.delete_queue_file()
|
def extract_pdf_info(self):
|
||||||
|
pdf_info_dict = pdf_info(self.pdf_filename)
|
||||||
|
self.entry.media_data_init(**pdf_info_dict)
|
||||||
|
|
||||||
entry.media_data_init(**pdf_info_dict)
|
def generate_medium(self, size=None):
|
||||||
entry.save()
|
if not size:
|
||||||
|
size = (mgg.global_config['media:medium']['max_width'],
|
||||||
|
mgg.global_config['media:medium']['max_height'])
|
||||||
|
|
||||||
|
# Note: pdftocairo adds '.png', so don't include an ext
|
||||||
|
filename = os.path.join(self.workbench.dir,
|
||||||
|
self.name_builder.fill('{basename}.medium'))
|
||||||
|
|
||||||
|
executable = where('pdftocairo')
|
||||||
|
args = [executable, '-scale-to', str(min(size)),
|
||||||
|
'-singlefile', '-png', self.pdf_filename, filename]
|
||||||
|
|
||||||
|
_log.debug('calling {0}'.format(repr(' '.join(args))))
|
||||||
|
Popen(executable=executable, args=args).wait()
|
||||||
|
|
||||||
|
# since pdftocairo added '.png', we need to include it with the
|
||||||
|
# filename
|
||||||
|
store_public(self.entry, 'medium', filename + '.png',
|
||||||
|
self.name_builder.fill('{basename}.medium.png'))
|
||||||
|
|
||||||
|
|
||||||
|
class InitialProcessor(CommonPdfProcessor):
|
||||||
|
"""
|
||||||
|
Initial processing step for new pdfs
|
||||||
|
"""
|
||||||
|
name = "initial"
|
||||||
|
description = "Initial processing"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def media_is_eligible(cls, entry=None, state=None):
|
||||||
|
"""
|
||||||
|
Determine if this media type is eligible for processing
|
||||||
|
"""
|
||||||
|
if not state:
|
||||||
|
state = entry.state
|
||||||
|
return state in (
|
||||||
|
"unprocessed", "failed")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def generate_parser(cls):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=cls.description,
|
||||||
|
prog=cls.name)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--size',
|
||||||
|
nargs=2,
|
||||||
|
metavar=('max_width', 'max_height'),
|
||||||
|
type=int)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--thumb-size',
|
||||||
|
nargs=2,
|
||||||
|
metavar=('max_width', 'max_height'),
|
||||||
|
type=int)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def args_to_request(cls, args):
|
||||||
|
return request_from_args(
|
||||||
|
args, ['size', 'thumb_size'])
|
||||||
|
|
||||||
|
def process(self, size=None, thumb_size=None):
|
||||||
|
self.common_setup()
|
||||||
|
self.extract_pdf_info()
|
||||||
|
self.copy_original()
|
||||||
|
self.generate_medium(size=size)
|
||||||
|
self.generate_thumb(thumb_size=thumb_size)
|
||||||
|
self.delete_queue_file()
|
||||||
|
|
||||||
|
|
||||||
|
class Resizer(CommonPdfProcessor):
|
||||||
|
"""
|
||||||
|
Resizing process steps for processed pdfs
|
||||||
|
"""
|
||||||
|
name = 'resize'
|
||||||
|
description = 'Resize thumbnail and medium'
|
||||||
|
thumb_size = 'size'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def media_is_eligible(cls, entry=None, state=None):
|
||||||
|
"""
|
||||||
|
Determine if this media type is eligible for processing
|
||||||
|
"""
|
||||||
|
if not state:
|
||||||
|
state = entry.state
|
||||||
|
return state in 'processed'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def generate_parser(cls):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=cls.description,
|
||||||
|
prog=cls.name)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--size',
|
||||||
|
nargs=2,
|
||||||
|
metavar=('max_width', 'max_height'),
|
||||||
|
type=int)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'file',
|
||||||
|
choices=['medium', 'thumb'])
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def args_to_request(cls, args):
|
||||||
|
return request_from_args(
|
||||||
|
args, ['size', 'file'])
|
||||||
|
|
||||||
|
def process(self, file, size=None):
|
||||||
|
self.common_setup()
|
||||||
|
if file == 'medium':
|
||||||
|
self.generate_medium(size=size)
|
||||||
|
elif file == 'thumb':
|
||||||
|
self.generate_thumb(thumb_size=size)
|
||||||
|
|
||||||
|
|
||||||
|
class PdfProcessingManager(ProcessingManager):
|
||||||
|
def __init__(self):
|
||||||
|
super(self.__class__, self).__init__()
|
||||||
|
self.add_processor(InitialProcessor)
|
||||||
|
self.add_processor(Resizer)
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from mediagoblin.media_types import MediaManagerBase
|
from mediagoblin.media_types import MediaManagerBase
|
||||||
from mediagoblin.media_types.stl.processing import process_stl, \
|
from mediagoblin.media_types.stl.processing import StlProcessingManager, \
|
||||||
sniff_handler
|
sniff_handler
|
||||||
from mediagoblin.tools import pluginapi
|
from mediagoblin.tools import pluginapi
|
||||||
|
|
||||||
@ -29,7 +29,6 @@ def setup_plugin():
|
|||||||
|
|
||||||
class STLMediaManager(MediaManagerBase):
|
class STLMediaManager(MediaManagerBase):
|
||||||
human_readable = "stereo lithographics"
|
human_readable = "stereo lithographics"
|
||||||
processor = staticmethod(process_stl)
|
|
||||||
display_template = "mediagoblin/media_displays/stl.html"
|
display_template = "mediagoblin/media_displays/stl.html"
|
||||||
default_thumb = "images/media_thumbs/video.jpg"
|
default_thumb = "images/media_thumbs/video.jpg"
|
||||||
|
|
||||||
@ -43,4 +42,5 @@ hooks = {
|
|||||||
'get_media_type_and_manager': get_media_type_and_manager,
|
'get_media_type_and_manager': get_media_type_and_manager,
|
||||||
'sniff_handler': sniff_handler,
|
'sniff_handler': sniff_handler,
|
||||||
('media_manager', MEDIA_TYPE): lambda: STLMediaManager,
|
('media_manager', MEDIA_TYPE): lambda: STLMediaManager,
|
||||||
|
('reprocess_manager', MEDIA_TYPE): lambda: StlProcessingManager,
|
||||||
}
|
}
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
# You should have received a copy of the GNU Affero General Public License
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import argparse
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
@ -21,8 +22,11 @@ import subprocess
|
|||||||
import pkg_resources
|
import pkg_resources
|
||||||
|
|
||||||
from mediagoblin import mg_globals as mgg
|
from mediagoblin import mg_globals as mgg
|
||||||
from mediagoblin.processing import create_pub_filepath, \
|
from mediagoblin.processing import (
|
||||||
FilenameBuilder
|
FilenameBuilder, MediaProcessor,
|
||||||
|
ProcessingManager, request_from_args,
|
||||||
|
get_process_filename, store_public,
|
||||||
|
copy_original)
|
||||||
|
|
||||||
from mediagoblin.media_types.stl import model_loader
|
from mediagoblin.media_types.stl import model_loader
|
||||||
|
|
||||||
@ -75,49 +79,61 @@ def blender_render(config):
|
|||||||
env=env)
|
env=env)
|
||||||
|
|
||||||
|
|
||||||
def process_stl(proc_state):
|
class CommonStlProcessor(MediaProcessor):
|
||||||
"""Code to process an stl or obj model. Will be run by celery.
|
|
||||||
|
|
||||||
A Workbench() represents a local tempory dir. It is automatically
|
|
||||||
cleaned up when this function exits.
|
|
||||||
"""
|
"""
|
||||||
entry = proc_state.entry
|
Provides a common base for various stl processing steps
|
||||||
workbench = proc_state.workbench
|
"""
|
||||||
|
acceptable_files = ['original']
|
||||||
|
|
||||||
queued_filepath = entry.queued_media_file
|
def common_setup(self):
|
||||||
queued_filename = workbench.localized_file(
|
# Pull down and set up the processing file
|
||||||
mgg.queue_store, queued_filepath, 'source')
|
self.process_filename = get_process_filename(
|
||||||
name_builder = FilenameBuilder(queued_filename)
|
self.entry, self.workbench, self.acceptable_files)
|
||||||
|
self.name_builder = FilenameBuilder(self.process_filename)
|
||||||
|
|
||||||
ext = queued_filename.lower().strip()[-4:]
|
self._set_ext()
|
||||||
if ext.startswith("."):
|
self._set_model()
|
||||||
ext = ext[1:]
|
self._set_greatest()
|
||||||
else:
|
|
||||||
|
def _set_ext(self):
|
||||||
|
ext = self.name_builder.ext[1:]
|
||||||
|
|
||||||
|
if not ext:
|
||||||
ext = None
|
ext = None
|
||||||
|
|
||||||
# Attempt to parse the model file and divine some useful
|
self.ext = ext
|
||||||
# information about it.
|
|
||||||
with open(queued_filename, 'rb') as model_file:
|
|
||||||
model = model_loader.auto_detect(model_file, ext)
|
|
||||||
|
|
||||||
# generate preview images
|
def _set_model(self):
|
||||||
greatest = [model.width, model.height, model.depth]
|
"""
|
||||||
|
Attempt to parse the model file and divine some useful
|
||||||
|
information about it.
|
||||||
|
"""
|
||||||
|
with open(self.process_filename, 'rb') as model_file:
|
||||||
|
self.model = model_loader.auto_detect(model_file, self.ext)
|
||||||
|
|
||||||
|
def _set_greatest(self):
|
||||||
|
greatest = [self.model.width, self.model.height, self.model.depth]
|
||||||
greatest.sort()
|
greatest.sort()
|
||||||
greatest = greatest[-1]
|
self.greatest = greatest[-1]
|
||||||
|
|
||||||
def snap(name, camera, width=640, height=640, project="ORTHO"):
|
def copy_original(self):
|
||||||
filename = name_builder.fill(name)
|
copy_original(
|
||||||
workbench_path = workbench.joinpath(filename)
|
self.entry, self.process_filename,
|
||||||
|
self.name_builder.fill('{basename}{ext}'))
|
||||||
|
|
||||||
|
def _snap(self, keyname, name, camera, size, project="ORTHO"):
|
||||||
|
filename = self.name_builder.fill(name)
|
||||||
|
workbench_path = self.workbench.joinpath(filename)
|
||||||
shot = {
|
shot = {
|
||||||
"model_path": queued_filename,
|
"model_path": self.process_filename,
|
||||||
"model_ext": ext,
|
"model_ext": self.ext,
|
||||||
"camera_coord": camera,
|
"camera_coord": camera,
|
||||||
"camera_focus": model.average,
|
"camera_focus": self.model.average,
|
||||||
"camera_clip": greatest*10,
|
"camera_clip": self.greatest*10,
|
||||||
"greatest": greatest,
|
"greatest": self.greatest,
|
||||||
"projection": project,
|
"projection": project,
|
||||||
"width": width,
|
"width": size[0],
|
||||||
"height": height,
|
"height": size[1],
|
||||||
"out_file": workbench_path,
|
"out_file": workbench_path,
|
||||||
}
|
}
|
||||||
blender_render(shot)
|
blender_render(shot)
|
||||||
@ -126,70 +142,191 @@ def process_stl(proc_state):
|
|||||||
assert os.path.exists(workbench_path)
|
assert os.path.exists(workbench_path)
|
||||||
|
|
||||||
# copy it up!
|
# copy it up!
|
||||||
with open(workbench_path, 'rb') as rendered_file:
|
store_public(self.entry, keyname, workbench_path, filename)
|
||||||
public_path = create_pub_filepath(entry, filename)
|
|
||||||
|
|
||||||
with mgg.public_store.get_file(public_path, "wb") as public_file:
|
def generate_thumb(self, thumb_size=None):
|
||||||
public_file.write(rendered_file.read())
|
if not thumb_size:
|
||||||
|
thumb_size = (mgg.global_config['media:thumb']['max_width'],
|
||||||
|
mgg.global_config['media:thumb']['max_height'])
|
||||||
|
|
||||||
return public_path
|
self._snap(
|
||||||
|
"thumb",
|
||||||
thumb_path = snap(
|
|
||||||
"{basename}.thumb.jpg",
|
"{basename}.thumb.jpg",
|
||||||
[0, greatest*-1.5, greatest],
|
[0, self.greatest*-1.5, self.greatest],
|
||||||
mgg.global_config['media:thumb']['max_width'],
|
thumb_size,
|
||||||
mgg.global_config['media:thumb']['max_height'],
|
|
||||||
project="PERSP")
|
project="PERSP")
|
||||||
|
|
||||||
perspective_path = snap(
|
def generate_perspective(self, size=None):
|
||||||
|
if not size:
|
||||||
|
size = (mgg.global_config['media:medium']['max_width'],
|
||||||
|
mgg.global_config['media:medium']['max_height'])
|
||||||
|
|
||||||
|
self._snap(
|
||||||
|
"perspective",
|
||||||
"{basename}.perspective.jpg",
|
"{basename}.perspective.jpg",
|
||||||
[0, greatest*-1.5, greatest], project="PERSP")
|
[0, self.greatest*-1.5, self.greatest],
|
||||||
|
size,
|
||||||
|
project="PERSP")
|
||||||
|
|
||||||
topview_path = snap(
|
def generate_topview(self, size=None):
|
||||||
|
if not size:
|
||||||
|
size = (mgg.global_config['media:medium']['max_width'],
|
||||||
|
mgg.global_config['media:medium']['max_height'])
|
||||||
|
|
||||||
|
self._snap(
|
||||||
|
"top",
|
||||||
"{basename}.top.jpg",
|
"{basename}.top.jpg",
|
||||||
[model.average[0], model.average[1], greatest*2])
|
[self.model.average[0], self.model.average[1],
|
||||||
|
self.greatest*2],
|
||||||
|
size)
|
||||||
|
|
||||||
frontview_path = snap(
|
def generate_frontview(self, size=None):
|
||||||
|
if not size:
|
||||||
|
size = (mgg.global_config['media:medium']['max_width'],
|
||||||
|
mgg.global_config['media:medium']['max_height'])
|
||||||
|
|
||||||
|
self._snap(
|
||||||
|
"front",
|
||||||
"{basename}.front.jpg",
|
"{basename}.front.jpg",
|
||||||
[model.average[0], greatest*-2, model.average[2]])
|
[self.model.average[0], self.greatest*-2,
|
||||||
|
self.model.average[2]],
|
||||||
|
size)
|
||||||
|
|
||||||
sideview_path = snap(
|
def generate_sideview(self, size=None):
|
||||||
|
if not size:
|
||||||
|
size = (mgg.global_config['media:medium']['max_width'],
|
||||||
|
mgg.global_config['media:medium']['max_height'])
|
||||||
|
|
||||||
|
self._snap(
|
||||||
|
"side",
|
||||||
"{basename}.side.jpg",
|
"{basename}.side.jpg",
|
||||||
[greatest*-2, model.average[1], model.average[2]])
|
[self.greatest*-2, self.model.average[1],
|
||||||
|
self.model.average[2]],
|
||||||
|
size)
|
||||||
|
|
||||||
## Save the public file stuffs
|
def store_dimensions(self):
|
||||||
model_filepath = create_pub_filepath(
|
"""
|
||||||
entry, name_builder.fill('{basename}{ext}'))
|
Put model dimensions into the database
|
||||||
|
"""
|
||||||
with mgg.public_store.get_file(model_filepath, 'wb') as model_file:
|
|
||||||
with open(queued_filename, 'rb') as queued_file:
|
|
||||||
model_file.write(queued_file.read())
|
|
||||||
|
|
||||||
# Remove queued media file from storage and database.
|
|
||||||
# queued_filepath is in the task_id directory which should
|
|
||||||
# be removed too, but fail if the directory is not empty to be on
|
|
||||||
# the super-safe side.
|
|
||||||
mgg.queue_store.delete_file(queued_filepath) # rm file
|
|
||||||
mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
|
|
||||||
entry.queued_media_file = []
|
|
||||||
|
|
||||||
# Insert media file information into database
|
|
||||||
media_files_dict = entry.setdefault('media_files', {})
|
|
||||||
media_files_dict[u'original'] = model_filepath
|
|
||||||
media_files_dict[u'thumb'] = thumb_path
|
|
||||||
media_files_dict[u'perspective'] = perspective_path
|
|
||||||
media_files_dict[u'top'] = topview_path
|
|
||||||
media_files_dict[u'side'] = sideview_path
|
|
||||||
media_files_dict[u'front'] = frontview_path
|
|
||||||
|
|
||||||
# Put model dimensions into the database
|
|
||||||
dimensions = {
|
dimensions = {
|
||||||
"center_x" : model.average[0],
|
"center_x": self.model.average[0],
|
||||||
"center_y" : model.average[1],
|
"center_y": self.model.average[1],
|
||||||
"center_z" : model.average[2],
|
"center_z": self.model.average[2],
|
||||||
"width" : model.width,
|
"width": self.model.width,
|
||||||
"height" : model.height,
|
"height": self.model.height,
|
||||||
"depth" : model.depth,
|
"depth": self.model.depth,
|
||||||
"file_type" : ext,
|
"file_type": self.ext,
|
||||||
}
|
}
|
||||||
entry.media_data_init(**dimensions)
|
self.entry.media_data_init(**dimensions)
|
||||||
|
|
||||||
|
|
||||||
|
class InitialProcessor(CommonStlProcessor):
|
||||||
|
"""
|
||||||
|
Initial processing step for new stls
|
||||||
|
"""
|
||||||
|
name = "initial"
|
||||||
|
description = "Initial processing"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def media_is_eligible(cls, entry=None, state=None):
|
||||||
|
"""
|
||||||
|
Determine if this media type is eligible for processing
|
||||||
|
"""
|
||||||
|
if not state:
|
||||||
|
state = entry.state
|
||||||
|
return state in (
|
||||||
|
"unprocessed", "failed")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def generate_parser(cls):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=cls.description,
|
||||||
|
prog=cls.name)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--size',
|
||||||
|
nargs=2,
|
||||||
|
metavar=('max_width', 'max_height'),
|
||||||
|
type=int)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--thumb_size',
|
||||||
|
nargs=2,
|
||||||
|
metavar=('max_width', 'max_height'),
|
||||||
|
type=int)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def args_to_request(cls, args):
|
||||||
|
return request_from_args(
|
||||||
|
args, ['size', 'thumb_size'])
|
||||||
|
|
||||||
|
def process(self, size=None, thumb_size=None):
|
||||||
|
self.common_setup()
|
||||||
|
self.generate_thumb(thumb_size=thumb_size)
|
||||||
|
self.generate_perspective(size=size)
|
||||||
|
self.generate_topview(size=size)
|
||||||
|
self.generate_frontview(size=size)
|
||||||
|
self.generate_sideview(size=size)
|
||||||
|
self.store_dimensions()
|
||||||
|
self.copy_original()
|
||||||
|
self.delete_queue_file()
|
||||||
|
|
||||||
|
|
||||||
|
class Resizer(CommonStlProcessor):
|
||||||
|
"""
|
||||||
|
Resizing process steps for processed stls
|
||||||
|
"""
|
||||||
|
name = 'resize'
|
||||||
|
description = 'Resize thumbnail and mediums'
|
||||||
|
thumb_size = 'size'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def media_is_eligible(cls, entry=None, state=None):
|
||||||
|
"""
|
||||||
|
Determine if this media type is eligible for processing
|
||||||
|
"""
|
||||||
|
if not state:
|
||||||
|
state = entry.state
|
||||||
|
return state in 'processed'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def generate_parser(cls):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=cls.description,
|
||||||
|
prog=cls.name)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--size',
|
||||||
|
nargs=2,
|
||||||
|
metavar=('max_width', 'max_height'),
|
||||||
|
type=int)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'file',
|
||||||
|
choices=['medium', 'thumb'])
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def args_to_request(cls, args):
|
||||||
|
return request_from_args(
|
||||||
|
args, ['size', 'file'])
|
||||||
|
|
||||||
|
def process(self, file, size=None):
|
||||||
|
self.common_setup()
|
||||||
|
if file == 'medium':
|
||||||
|
self.generate_perspective(size=size)
|
||||||
|
self.generate_topview(size=size)
|
||||||
|
self.generate_frontview(size=size)
|
||||||
|
self.generate_sideview(size=size)
|
||||||
|
elif file == 'thumb':
|
||||||
|
self.generate_thumb(thumb_size=size)
|
||||||
|
|
||||||
|
|
||||||
|
class StlProcessingManager(ProcessingManager):
|
||||||
|
def __init__(self):
|
||||||
|
super(self.__class__, self).__init__()
|
||||||
|
self.add_processor(InitialProcessor)
|
||||||
|
self.add_processor(Resizer)
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from mediagoblin.media_types import MediaManagerBase
|
from mediagoblin.media_types import MediaManagerBase
|
||||||
from mediagoblin.media_types.video.processing import process_video, \
|
from mediagoblin.media_types.video.processing import VideoProcessingManager, \
|
||||||
sniff_handler
|
sniff_handler
|
||||||
from mediagoblin.tools import pluginapi
|
from mediagoblin.tools import pluginapi
|
||||||
|
|
||||||
@ -30,12 +30,11 @@ def setup_plugin():
|
|||||||
|
|
||||||
class VideoMediaManager(MediaManagerBase):
|
class VideoMediaManager(MediaManagerBase):
|
||||||
human_readable = "Video"
|
human_readable = "Video"
|
||||||
processor = staticmethod(process_video)
|
|
||||||
display_template = "mediagoblin/media_displays/video.html"
|
display_template = "mediagoblin/media_displays/video.html"
|
||||||
default_thumb = "images/media_thumbs/video.jpg"
|
default_thumb = "images/media_thumbs/video.jpg"
|
||||||
|
|
||||||
# Used by the media_entry.get_display_media method
|
# Used by the media_entry.get_display_media method
|
||||||
media_fetch_order = [u'webm_640', u'original']
|
media_fetch_order = [u'webm_video', u'original']
|
||||||
default_webm_type = 'video/webm; codecs="vp8, vorbis"'
|
default_webm_type = 'video/webm; codecs="vp8, vorbis"'
|
||||||
|
|
||||||
|
|
||||||
@ -48,4 +47,5 @@ hooks = {
|
|||||||
'get_media_type_and_manager': get_media_type_and_manager,
|
'get_media_type_and_manager': get_media_type_and_manager,
|
||||||
'sniff_handler': sniff_handler,
|
'sniff_handler': sniff_handler,
|
||||||
('media_manager', MEDIA_TYPE): lambda: VideoMediaManager,
|
('media_manager', MEDIA_TYPE): lambda: VideoMediaManager,
|
||||||
|
('reprocess_manager', MEDIA_TYPE): lambda: VideoProcessingManager,
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@ from sqlalchemy import MetaData, Column, Unicode
|
|||||||
|
|
||||||
MIGRATIONS = {}
|
MIGRATIONS = {}
|
||||||
|
|
||||||
|
|
||||||
@RegisterMigration(1, MIGRATIONS)
|
@RegisterMigration(1, MIGRATIONS)
|
||||||
def add_orig_metadata_column(db_conn):
|
def add_orig_metadata_column(db_conn):
|
||||||
metadata = MetaData(bind=db_conn.bind)
|
metadata = MetaData(bind=db_conn.bind)
|
||||||
@ -30,3 +31,19 @@ def add_orig_metadata_column(db_conn):
|
|||||||
default=None, nullable=True)
|
default=None, nullable=True)
|
||||||
col.create(vid_data)
|
col.create(vid_data)
|
||||||
db_conn.commit()
|
db_conn.commit()
|
||||||
|
|
||||||
|
|
||||||
|
@RegisterMigration(2, MIGRATIONS)
|
||||||
|
def webm_640_to_wemb_video(db):
|
||||||
|
metadata = MetaData(bind=db.bind)
|
||||||
|
|
||||||
|
file_keynames = inspect_table(metadata, 'core__file_keynames')
|
||||||
|
|
||||||
|
for row in db.execute(file_keynames.select()):
|
||||||
|
if row.name == 'webm_640':
|
||||||
|
db.execute(
|
||||||
|
file_keynames.update(). \
|
||||||
|
where(file_keynames.c.id==row.id).\
|
||||||
|
values(name='webm_video'))
|
||||||
|
|
||||||
|
db.commit()
|
||||||
|
@ -68,7 +68,7 @@ class VideoData(Base):
|
|||||||
"""
|
"""
|
||||||
orig_metadata = self.orig_metadata or {}
|
orig_metadata = self.orig_metadata or {}
|
||||||
|
|
||||||
if "webm_640" not in self.get_media_entry.media_files \
|
if "webm_video" not in self.get_media_entry.media_files \
|
||||||
and "mimetype" in orig_metadata \
|
and "mimetype" in orig_metadata \
|
||||||
and "tags" in orig_metadata \
|
and "tags" in orig_metadata \
|
||||||
and "audio-codec" in orig_metadata["tags"] \
|
and "audio-codec" in orig_metadata["tags"] \
|
||||||
|
@ -14,13 +14,18 @@
|
|||||||
# You should have received a copy of the GNU Affero General Public License
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import argparse
|
||||||
import os.path
|
import os.path
|
||||||
import logging
|
import logging
|
||||||
import datetime
|
import datetime
|
||||||
|
|
||||||
from mediagoblin import mg_globals as mgg
|
from mediagoblin import mg_globals as mgg
|
||||||
from mediagoblin.processing import \
|
from mediagoblin.processing import (
|
||||||
create_pub_filepath, FilenameBuilder, BaseProcessingFail, ProgressCallback
|
FilenameBuilder, BaseProcessingFail,
|
||||||
|
ProgressCallback, MediaProcessor,
|
||||||
|
ProcessingManager, request_from_args,
|
||||||
|
get_process_filename, store_public,
|
||||||
|
copy_original)
|
||||||
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
|
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
|
||||||
|
|
||||||
from . import transcoders
|
from . import transcoders
|
||||||
@ -51,112 +56,12 @@ def sniff_handler(media_file, **kw):
|
|||||||
kw.get('media')))
|
kw.get('media')))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if data['is_video'] == True:
|
if data['is_video'] is True:
|
||||||
return MEDIA_TYPE
|
return MEDIA_TYPE
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def process_video(proc_state):
|
|
||||||
"""
|
|
||||||
Process a video entry, transcode the queued media files (originals) and
|
|
||||||
create a thumbnail for the entry.
|
|
||||||
|
|
||||||
A Workbench() represents a local tempory dir. It is automatically
|
|
||||||
cleaned up when this function exits.
|
|
||||||
"""
|
|
||||||
entry = proc_state.entry
|
|
||||||
workbench = proc_state.workbench
|
|
||||||
video_config = mgg.global_config['media_type:mediagoblin.media_types.video']
|
|
||||||
|
|
||||||
queued_filepath = entry.queued_media_file
|
|
||||||
queued_filename = proc_state.get_queued_filename()
|
|
||||||
name_builder = FilenameBuilder(queued_filename)
|
|
||||||
|
|
||||||
medium_basename = name_builder.fill('{basename}-640p.webm')
|
|
||||||
medium_filepath = create_pub_filepath(entry, medium_basename)
|
|
||||||
|
|
||||||
thumbnail_basename = name_builder.fill('{basename}.thumbnail.jpg')
|
|
||||||
thumbnail_filepath = create_pub_filepath(entry, thumbnail_basename)
|
|
||||||
|
|
||||||
# Create a temporary file for the video destination (cleaned up with workbench)
|
|
||||||
tmp_dst = os.path.join(workbench.dir, medium_basename)
|
|
||||||
# Transcode queued file to a VP8/vorbis file that fits in a 640x640 square
|
|
||||||
progress_callback = ProgressCallback(entry)
|
|
||||||
|
|
||||||
dimensions = (
|
|
||||||
mgg.global_config['media:medium']['max_width'],
|
|
||||||
mgg.global_config['media:medium']['max_height'])
|
|
||||||
|
|
||||||
# Extract metadata and keep a record of it
|
|
||||||
metadata = transcoders.VideoTranscoder().discover(queued_filename)
|
|
||||||
store_metadata(entry, metadata)
|
|
||||||
|
|
||||||
# Figure out whether or not we need to transcode this video or
|
|
||||||
# if we can skip it
|
|
||||||
if skip_transcode(metadata):
|
|
||||||
_log.debug('Skipping transcoding')
|
|
||||||
|
|
||||||
dst_dimensions = metadata['videowidth'], metadata['videoheight']
|
|
||||||
|
|
||||||
# Push original file to public storage
|
|
||||||
_log.debug('Saving original...')
|
|
||||||
proc_state.copy_original(queued_filepath[-1])
|
|
||||||
|
|
||||||
did_transcode = False
|
|
||||||
else:
|
|
||||||
transcoder = transcoders.VideoTranscoder()
|
|
||||||
|
|
||||||
transcoder.transcode(queued_filename, tmp_dst,
|
|
||||||
vp8_quality=video_config['vp8_quality'],
|
|
||||||
vp8_threads=video_config['vp8_threads'],
|
|
||||||
vorbis_quality=video_config['vorbis_quality'],
|
|
||||||
progress_callback=progress_callback,
|
|
||||||
dimensions=dimensions)
|
|
||||||
|
|
||||||
dst_dimensions = transcoder.dst_data.videowidth,\
|
|
||||||
transcoder.dst_data.videoheight
|
|
||||||
|
|
||||||
# Push transcoded video to public storage
|
|
||||||
_log.debug('Saving medium...')
|
|
||||||
mgg.public_store.copy_local_to_storage(tmp_dst, medium_filepath)
|
|
||||||
_log.debug('Saved medium')
|
|
||||||
|
|
||||||
entry.media_files['webm_640'] = medium_filepath
|
|
||||||
|
|
||||||
did_transcode = True
|
|
||||||
|
|
||||||
# Save the width and height of the transcoded video
|
|
||||||
entry.media_data_init(
|
|
||||||
width=dst_dimensions[0],
|
|
||||||
height=dst_dimensions[1])
|
|
||||||
|
|
||||||
# Temporary file for the video thumbnail (cleaned up with workbench)
|
|
||||||
tmp_thumb = os.path.join(workbench.dir, thumbnail_basename)
|
|
||||||
|
|
||||||
# Create a thumbnail.jpg that fits in a 180x180 square
|
|
||||||
transcoders.VideoThumbnailerMarkII(
|
|
||||||
queued_filename,
|
|
||||||
tmp_thumb,
|
|
||||||
180)
|
|
||||||
|
|
||||||
# Push the thumbnail to public storage
|
|
||||||
_log.debug('Saving thumbnail...')
|
|
||||||
mgg.public_store.copy_local_to_storage(tmp_thumb, thumbnail_filepath)
|
|
||||||
entry.media_files['thumb'] = thumbnail_filepath
|
|
||||||
|
|
||||||
# save the original... but only if we did a transcoding
|
|
||||||
# (if we skipped transcoding and just kept the original anyway as the main
|
|
||||||
# media, then why would we save the original twice?)
|
|
||||||
if video_config['keep_original'] and did_transcode:
|
|
||||||
# Push original file to public storage
|
|
||||||
_log.debug('Saving original...')
|
|
||||||
proc_state.copy_original(queued_filepath[-1])
|
|
||||||
|
|
||||||
# Remove queued media file from storage and database
|
|
||||||
proc_state.delete_queue_file()
|
|
||||||
|
|
||||||
|
|
||||||
def store_metadata(media_entry, metadata):
|
def store_metadata(media_entry, metadata):
|
||||||
"""
|
"""
|
||||||
Store metadata from this video for this media entry.
|
Store metadata from this video for this media entry.
|
||||||
@ -211,3 +116,298 @@ def store_metadata(media_entry, metadata):
|
|||||||
if len(stored_metadata):
|
if len(stored_metadata):
|
||||||
media_entry.media_data_init(
|
media_entry.media_data_init(
|
||||||
orig_metadata=stored_metadata)
|
orig_metadata=stored_metadata)
|
||||||
|
|
||||||
|
|
||||||
|
class CommonVideoProcessor(MediaProcessor):
|
||||||
|
"""
|
||||||
|
Provides a base for various video processing steps
|
||||||
|
"""
|
||||||
|
acceptable_files = ['original', 'best_quality', 'webm_video']
|
||||||
|
|
||||||
|
def common_setup(self):
|
||||||
|
self.video_config = mgg \
|
||||||
|
.global_config['media_type:mediagoblin.media_types.video']
|
||||||
|
|
||||||
|
# Pull down and set up the processing file
|
||||||
|
self.process_filename = get_process_filename(
|
||||||
|
self.entry, self.workbench, self.acceptable_files)
|
||||||
|
self.name_builder = FilenameBuilder(self.process_filename)
|
||||||
|
|
||||||
|
self.transcoder = transcoders.VideoTranscoder()
|
||||||
|
self.did_transcode = False
|
||||||
|
|
||||||
|
def copy_original(self):
|
||||||
|
# If we didn't transcode, then we need to keep the original
|
||||||
|
if not self.did_transcode or \
|
||||||
|
(self.video_config['keep_original'] and self.did_transcode):
|
||||||
|
copy_original(
|
||||||
|
self.entry, self.process_filename,
|
||||||
|
self.name_builder.fill('{basename}{ext}'))
|
||||||
|
|
||||||
|
def _keep_best(self):
|
||||||
|
"""
|
||||||
|
If there is no original, keep the best file that we have
|
||||||
|
"""
|
||||||
|
if not self.entry.media_files.get('best_quality'):
|
||||||
|
# Save the best quality file if no original?
|
||||||
|
if not self.entry.media_files.get('original') and \
|
||||||
|
self.entry.media_files.get('webm_video'):
|
||||||
|
self.entry.media_files['best_quality'] = self.entry \
|
||||||
|
.media_files['webm_video']
|
||||||
|
|
||||||
|
|
||||||
|
def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None,
|
||||||
|
vorbis_quality=None):
|
||||||
|
progress_callback = ProgressCallback(self.entry)
|
||||||
|
tmp_dst = os.path.join(self.workbench.dir,
|
||||||
|
self.name_builder.fill('{basename}.medium.webm'))
|
||||||
|
|
||||||
|
if not medium_size:
|
||||||
|
medium_size = (
|
||||||
|
mgg.global_config['media:medium']['max_width'],
|
||||||
|
mgg.global_config['media:medium']['max_height'])
|
||||||
|
if not vp8_quality:
|
||||||
|
vp8_quality = self.video_config['vp8_quality']
|
||||||
|
if not vp8_threads:
|
||||||
|
vp8_threads = self.video_config['vp8_threads']
|
||||||
|
if not vorbis_quality:
|
||||||
|
vorbis_quality = self.video_config['vorbis_quality']
|
||||||
|
|
||||||
|
# Extract metadata and keep a record of it
|
||||||
|
metadata = self.transcoder.discover(self.process_filename)
|
||||||
|
store_metadata(self.entry, metadata)
|
||||||
|
|
||||||
|
# Figure out whether or not we need to transcode this video or
|
||||||
|
# if we can skip it
|
||||||
|
if skip_transcode(metadata, medium_size):
|
||||||
|
_log.debug('Skipping transcoding')
|
||||||
|
|
||||||
|
dst_dimensions = metadata['videowidth'], metadata['videoheight']
|
||||||
|
|
||||||
|
# If there is an original and transcoded, delete the transcoded
|
||||||
|
# since it must be of lower quality then the original
|
||||||
|
if self.entry.media_files.get('original') and \
|
||||||
|
self.entry.media_files.get('webm_video'):
|
||||||
|
self.entry.media_files['webm_video'].delete()
|
||||||
|
|
||||||
|
else:
|
||||||
|
self.transcoder.transcode(self.process_filename, tmp_dst,
|
||||||
|
vp8_quality=vp8_quality,
|
||||||
|
vp8_threads=vp8_threads,
|
||||||
|
vorbis_quality=vorbis_quality,
|
||||||
|
progress_callback=progress_callback,
|
||||||
|
dimensions=tuple(medium_size))
|
||||||
|
|
||||||
|
dst_dimensions = self.transcoder.dst_data.videowidth,\
|
||||||
|
self.transcoder.dst_data.videoheight
|
||||||
|
|
||||||
|
self._keep_best()
|
||||||
|
|
||||||
|
# Push transcoded video to public storage
|
||||||
|
_log.debug('Saving medium...')
|
||||||
|
store_public(self.entry, 'webm_video', tmp_dst,
|
||||||
|
self.name_builder.fill('{basename}.medium.webm'))
|
||||||
|
_log.debug('Saved medium')
|
||||||
|
|
||||||
|
self.did_transcode = True
|
||||||
|
|
||||||
|
# Save the width and height of the transcoded video
|
||||||
|
self.entry.media_data_init(
|
||||||
|
width=dst_dimensions[0],
|
||||||
|
height=dst_dimensions[1])
|
||||||
|
|
||||||
|
def generate_thumb(self, thumb_size=None):
|
||||||
|
# Temporary file for the video thumbnail (cleaned up with workbench)
|
||||||
|
tmp_thumb = os.path.join(self.workbench.dir,
|
||||||
|
self.name_builder.fill(
|
||||||
|
'{basename}.thumbnail.jpg'))
|
||||||
|
|
||||||
|
if not thumb_size:
|
||||||
|
thumb_size = (mgg.global_config['media:thumb']['max_width'],
|
||||||
|
mgg.global_config['media:thumb']['max_height'])
|
||||||
|
|
||||||
|
transcoders.VideoThumbnailerMarkII(
|
||||||
|
self.process_filename,
|
||||||
|
tmp_thumb,
|
||||||
|
thumb_size[0],
|
||||||
|
thumb_size[1])
|
||||||
|
|
||||||
|
# Push the thumbnail to public storage
|
||||||
|
_log.debug('Saving thumbnail...')
|
||||||
|
store_public(self.entry, 'thumb', tmp_thumb,
|
||||||
|
self.name_builder.fill('{basename}.thumbnail.jpg'))
|
||||||
|
|
||||||
|
|
||||||
|
class InitialProcessor(CommonVideoProcessor):
|
||||||
|
"""
|
||||||
|
Initial processing steps for new video
|
||||||
|
"""
|
||||||
|
name = "initial"
|
||||||
|
description = "Initial processing"
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def media_is_eligible(cls, entry=None, state=None):
|
||||||
|
if not state:
|
||||||
|
state = entry.state
|
||||||
|
return state in (
|
||||||
|
"unprocessed", "failed")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def generate_parser(cls):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=cls.description,
|
||||||
|
prog=cls.name)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--medium_size',
|
||||||
|
nargs=2,
|
||||||
|
metavar=('max_width', 'max_height'),
|
||||||
|
type=int)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--vp8_quality',
|
||||||
|
type=int,
|
||||||
|
help='Range 0..10')
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--vp8_threads',
|
||||||
|
type=int,
|
||||||
|
help='0 means number_of_CPUs - 1')
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--vorbis_quality',
|
||||||
|
type=float,
|
||||||
|
help='Range -0.1..1')
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--thumb_size',
|
||||||
|
nargs=2,
|
||||||
|
metavar=('max_width', 'max_height'),
|
||||||
|
type=int)
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def args_to_request(cls, args):
|
||||||
|
return request_from_args(
|
||||||
|
args, ['medium_size', 'vp8_quality', 'vp8_threads',
|
||||||
|
'vorbis_quality', 'thumb_size'])
|
||||||
|
|
||||||
|
def process(self, medium_size=None, vp8_threads=None, vp8_quality=None,
|
||||||
|
vorbis_quality=None, thumb_size=None):
|
||||||
|
self.common_setup()
|
||||||
|
|
||||||
|
self.transcode(medium_size=medium_size, vp8_quality=vp8_quality,
|
||||||
|
vp8_threads=vp8_threads, vorbis_quality=vorbis_quality)
|
||||||
|
|
||||||
|
self.copy_original()
|
||||||
|
self.generate_thumb(thumb_size=thumb_size)
|
||||||
|
self.delete_queue_file()
|
||||||
|
|
||||||
|
|
||||||
|
class Resizer(CommonVideoProcessor):
|
||||||
|
"""
|
||||||
|
Video thumbnail resizing process steps for processed media
|
||||||
|
"""
|
||||||
|
name = 'resize'
|
||||||
|
description = 'Resize thumbnail'
|
||||||
|
thumb_size = 'thumb_size'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def media_is_eligible(cls, entry=None, state=None):
|
||||||
|
if not state:
|
||||||
|
state = entry.state
|
||||||
|
return state in 'processed'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def generate_parser(cls):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=cls.description,
|
||||||
|
prog=cls.name)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--thumb_size',
|
||||||
|
nargs=2,
|
||||||
|
metavar=('max_width', 'max_height'),
|
||||||
|
type=int)
|
||||||
|
|
||||||
|
# Needed for gmg reprocess thumbs to work
|
||||||
|
parser.add_argument(
|
||||||
|
'file',
|
||||||
|
nargs='?',
|
||||||
|
default='thumb',
|
||||||
|
choices=['thumb'])
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def args_to_request(cls, args):
|
||||||
|
return request_from_args(
|
||||||
|
args, ['thumb_size', 'file'])
|
||||||
|
|
||||||
|
def process(self, thumb_size=None, file=None):
|
||||||
|
self.common_setup()
|
||||||
|
self.generate_thumb(thumb_size=thumb_size)
|
||||||
|
|
||||||
|
|
||||||
|
class Transcoder(CommonVideoProcessor):
|
||||||
|
"""
|
||||||
|
Transcoding processing steps for processed video
|
||||||
|
"""
|
||||||
|
name = 'transcode'
|
||||||
|
description = 'Re-transcode video'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def media_is_eligible(cls, entry=None, state=None):
|
||||||
|
if not state:
|
||||||
|
state = entry.state
|
||||||
|
return state in 'processed'
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def generate_parser(cls):
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description=cls.description,
|
||||||
|
prog=cls.name)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--medium_size',
|
||||||
|
nargs=2,
|
||||||
|
metavar=('max_width', 'max_height'),
|
||||||
|
type=int)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--vp8_quality',
|
||||||
|
type=int,
|
||||||
|
help='Range 0..10')
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--vp8_threads',
|
||||||
|
type=int,
|
||||||
|
help='0 means number_of_CPUs - 1')
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
'--vorbis_quality',
|
||||||
|
type=float,
|
||||||
|
help='Range -0.1..1')
|
||||||
|
|
||||||
|
return parser
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def args_to_request(cls, args):
|
||||||
|
return request_from_args(
|
||||||
|
args, ['medium_size', 'vp8_threads', 'vp8_quality',
|
||||||
|
'vorbis_quality'])
|
||||||
|
|
||||||
|
def process(self, medium_size=None, vp8_quality=None, vp8_threads=None,
|
||||||
|
vorbis_quality=None):
|
||||||
|
self.common_setup()
|
||||||
|
self.transcode(medium_size=medium_size, vp8_threads=vp8_threads,
|
||||||
|
vp8_quality=vp8_quality, vorbis_quality=vorbis_quality)
|
||||||
|
|
||||||
|
|
||||||
|
class VideoProcessingManager(ProcessingManager):
|
||||||
|
def __init__(self):
|
||||||
|
super(self.__class__, self).__init__()
|
||||||
|
self.add_processor(InitialProcessor)
|
||||||
|
self.add_processor(Resizer)
|
||||||
|
self.add_processor(Transcoder)
|
||||||
|
@ -21,7 +21,7 @@ from mediagoblin import mg_globals as mgg
|
|||||||
_log = logging.getLogger(__name__)
|
_log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def skip_transcode(metadata):
|
def skip_transcode(metadata, size):
|
||||||
'''
|
'''
|
||||||
Checks video metadata against configuration values for skip_transcode.
|
Checks video metadata against configuration values for skip_transcode.
|
||||||
|
|
||||||
@ -51,9 +51,9 @@ def skip_transcode(metadata):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
if config['dimensions_match']:
|
if config['dimensions_match']:
|
||||||
if not metadata['videoheight'] <= medium_config['max_height']:
|
if not metadata['videoheight'] <= size[1]:
|
||||||
return False
|
return False
|
||||||
if not metadata['videowidth'] <= medium_config['max_width']:
|
if not metadata['videowidth'] <= size[0]:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
@ -14,12 +14,14 @@
|
|||||||
# You should have received a copy of the GNU Affero General Public License
|
# You should have received a copy of the GNU Affero General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from collections import OrderedDict
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from mediagoblin.db.util import atomic_update
|
|
||||||
from mediagoblin import mg_globals as mgg
|
from mediagoblin import mg_globals as mgg
|
||||||
|
from mediagoblin.db.util import atomic_update
|
||||||
|
from mediagoblin.db.models import MediaEntry
|
||||||
|
from mediagoblin.tools.pluginapi import hook_handle
|
||||||
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
|
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
|
||||||
|
|
||||||
_log = logging.getLogger(__name__)
|
_log = logging.getLogger(__name__)
|
||||||
@ -74,49 +76,89 @@ class FilenameBuilder(object):
|
|||||||
ext=self.ext)
|
ext=self.ext)
|
||||||
|
|
||||||
|
|
||||||
class ProcessingState(object):
|
|
||||||
"""
|
|
||||||
The first and only argument to the "processor" of a media type
|
|
||||||
|
|
||||||
This could be thought of as a "request" to the processor
|
class MediaProcessor(object):
|
||||||
function. It has the main info for the request (media entry)
|
"""A particular processor for this media type.
|
||||||
and a bunch of tools for the request on it.
|
|
||||||
It can get more fancy without impacting old media types.
|
While the ProcessingManager handles all types of MediaProcessing
|
||||||
|
possible for a particular media type, a MediaProcessor can be
|
||||||
|
thought of as a *particular* processing action for a media type.
|
||||||
|
For example, you may have separate MediaProcessors for:
|
||||||
|
|
||||||
|
- initial_processing: the intial processing of a media
|
||||||
|
- gen_thumb: generate a thumbnail
|
||||||
|
- resize: resize an image
|
||||||
|
- transcode: transcode a video
|
||||||
|
|
||||||
|
... etc.
|
||||||
|
|
||||||
|
Some information on producing a new MediaProcessor for your media type:
|
||||||
|
|
||||||
|
- You *must* supply a name attribute. This must be a class level
|
||||||
|
attribute, and a string. This will be used to determine the
|
||||||
|
subcommand of your process
|
||||||
|
- It's recommended that you supply a class level description
|
||||||
|
attribute.
|
||||||
|
- Supply a media_is_eligible classmethod. This will be used to
|
||||||
|
determine whether or not a media entry is eligible to use this
|
||||||
|
processor type. See the method documentation for details.
|
||||||
|
- To give "./bin/gmg reprocess run" abilities to this media type,
|
||||||
|
supply both gnerate_parser and parser_to_request classmethods.
|
||||||
|
- The process method will be what actually processes your media.
|
||||||
"""
|
"""
|
||||||
def __init__(self, entry):
|
# You MUST override this in the child MediaProcessor!
|
||||||
|
name = None
|
||||||
|
|
||||||
|
# Optional, but will be used in various places to describe the
|
||||||
|
# action this MediaProcessor provides
|
||||||
|
description = None
|
||||||
|
|
||||||
|
def __init__(self, manager, entry):
|
||||||
|
self.manager = manager
|
||||||
self.entry = entry
|
self.entry = entry
|
||||||
|
self.entry_orig_state = entry.state
|
||||||
|
|
||||||
|
# Should be initialized at time of processing, at least
|
||||||
self.workbench = None
|
self.workbench = None
|
||||||
self.queued_filename = None
|
|
||||||
|
|
||||||
def set_workbench(self, wb):
|
def __enter__(self):
|
||||||
self.workbench = wb
|
self.workbench = mgg.workbench_manager.create()
|
||||||
|
return self
|
||||||
|
|
||||||
def get_queued_filename(self):
|
def __exit__(self, *args):
|
||||||
|
self.workbench.destroy()
|
||||||
|
self.workbench = None
|
||||||
|
|
||||||
|
# @with_workbench
|
||||||
|
def process(self, **kwargs):
|
||||||
"""
|
"""
|
||||||
Get the a filename for the original, on local storage
|
Actually process this media entry.
|
||||||
"""
|
"""
|
||||||
if self.queued_filename is not None:
|
raise NotImplementedError
|
||||||
return self.queued_filename
|
|
||||||
queued_filepath = self.entry.queued_media_file
|
|
||||||
queued_filename = self.workbench.localized_file(
|
|
||||||
mgg.queue_store, queued_filepath,
|
|
||||||
'source')
|
|
||||||
self.queued_filename = queued_filename
|
|
||||||
return queued_filename
|
|
||||||
|
|
||||||
def copy_original(self, target_name, keyname=u"original"):
|
@classmethod
|
||||||
self.store_public(keyname, self.get_queued_filename(), target_name)
|
def media_is_eligible(cls, entry=None, state=None):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
def store_public(self, keyname, local_file, target_name=None):
|
###############################
|
||||||
if target_name is None:
|
# Command line interface things
|
||||||
target_name = os.path.basename(local_file)
|
###############################
|
||||||
target_filepath = create_pub_filepath(self.entry, target_name)
|
|
||||||
if keyname in self.entry.media_files:
|
@classmethod
|
||||||
_log.warn("store_public: keyname %r already used for file %r, "
|
def generate_parser(cls):
|
||||||
"replacing with %r", keyname,
|
raise NotImplementedError
|
||||||
self.entry.media_files[keyname], target_filepath)
|
|
||||||
mgg.public_store.copy_local_to_storage(local_file, target_filepath)
|
@classmethod
|
||||||
self.entry.media_files[keyname] = target_filepath
|
def args_to_request(cls, args):
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
##########################################
|
||||||
|
# THE FUTURE: web interface things here :)
|
||||||
|
##########################################
|
||||||
|
|
||||||
|
#####################
|
||||||
|
# Some common "steps"
|
||||||
|
#####################
|
||||||
|
|
||||||
def delete_queue_file(self):
|
def delete_queue_file(self):
|
||||||
# Remove queued media file from storage and database.
|
# Remove queued media file from storage and database.
|
||||||
@ -124,11 +166,131 @@ class ProcessingState(object):
|
|||||||
# be removed too, but fail if the directory is not empty to be on
|
# be removed too, but fail if the directory is not empty to be on
|
||||||
# the super-safe side.
|
# the super-safe side.
|
||||||
queued_filepath = self.entry.queued_media_file
|
queued_filepath = self.entry.queued_media_file
|
||||||
|
if queued_filepath:
|
||||||
mgg.queue_store.delete_file(queued_filepath) # rm file
|
mgg.queue_store.delete_file(queued_filepath) # rm file
|
||||||
mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
|
mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
|
||||||
self.entry.queued_media_file = []
|
self.entry.queued_media_file = []
|
||||||
|
|
||||||
|
|
||||||
|
class ProcessingKeyError(Exception): pass
|
||||||
|
class ProcessorDoesNotExist(ProcessingKeyError): pass
|
||||||
|
class ProcessorNotEligible(ProcessingKeyError): pass
|
||||||
|
class ProcessingManagerDoesNotExist(ProcessingKeyError): pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class ProcessingManager(object):
|
||||||
|
"""Manages all the processing actions available for a media type
|
||||||
|
|
||||||
|
Specific processing actions, MediaProcessor subclasses, are added
|
||||||
|
to the ProcessingManager.
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
# Dict of all MediaProcessors of this media type
|
||||||
|
self.processors = OrderedDict()
|
||||||
|
|
||||||
|
def add_processor(self, processor):
|
||||||
|
"""
|
||||||
|
Add a processor class to this media type
|
||||||
|
"""
|
||||||
|
name = processor.name
|
||||||
|
if name is None:
|
||||||
|
raise AttributeError("Processor class's .name attribute not set")
|
||||||
|
|
||||||
|
self.processors[name] = processor
|
||||||
|
|
||||||
|
def list_eligible_processors(self, entry):
|
||||||
|
"""
|
||||||
|
List all processors that this media entry is eligible to be processed
|
||||||
|
for.
|
||||||
|
"""
|
||||||
|
return [
|
||||||
|
processor
|
||||||
|
for processor in self.processors.values()
|
||||||
|
if processor.media_is_eligible(entry=entry)]
|
||||||
|
|
||||||
|
def list_all_processors_by_state(self, state):
|
||||||
|
"""
|
||||||
|
List all processors that this media state is eligible to be processed
|
||||||
|
for.
|
||||||
|
"""
|
||||||
|
return [
|
||||||
|
processor
|
||||||
|
for processor in self.processors.values()
|
||||||
|
if processor.media_is_eligible(state=state)]
|
||||||
|
|
||||||
|
|
||||||
|
def list_all_processors(self):
|
||||||
|
return self.processors.values()
|
||||||
|
|
||||||
|
def gen_process_request_via_cli(self, subparser):
|
||||||
|
# Got to figure out what actually goes here before I can write this properly
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_processor(self, key, entry=None):
|
||||||
|
"""
|
||||||
|
Get the processor with this key.
|
||||||
|
|
||||||
|
If entry supplied, make sure this entry is actually compatible;
|
||||||
|
otherwise raise error.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
processor = self.processors[key]
|
||||||
|
except KeyError:
|
||||||
|
import pdb
|
||||||
|
pdb.set_trace()
|
||||||
|
raise ProcessorDoesNotExist(
|
||||||
|
"'%s' processor does not exist for this media type" % key)
|
||||||
|
|
||||||
|
if entry and not processor.media_is_eligible(entry):
|
||||||
|
raise ProcessorNotEligible(
|
||||||
|
"This entry is not eligible for processor with name '%s'" % key)
|
||||||
|
|
||||||
|
return processor
|
||||||
|
|
||||||
|
|
||||||
|
def request_from_args(args, which_args):
|
||||||
|
"""
|
||||||
|
Generate a request from the values of some argparse parsed args
|
||||||
|
"""
|
||||||
|
request = {}
|
||||||
|
for arg in which_args:
|
||||||
|
request[arg] = getattr(args, arg)
|
||||||
|
|
||||||
|
return request
|
||||||
|
|
||||||
|
|
||||||
|
class MediaEntryNotFound(Exception): pass
|
||||||
|
|
||||||
|
|
||||||
|
def get_processing_manager_for_type(media_type):
|
||||||
|
"""
|
||||||
|
Get the appropriate media manager for this type
|
||||||
|
"""
|
||||||
|
manager_class = hook_handle(('reprocess_manager', media_type))
|
||||||
|
if not manager_class:
|
||||||
|
raise ProcessingManagerDoesNotExist(
|
||||||
|
"A processing manager does not exist for {0}".format(media_type))
|
||||||
|
manager = manager_class()
|
||||||
|
|
||||||
|
return manager
|
||||||
|
|
||||||
|
|
||||||
|
def get_entry_and_processing_manager(media_id):
|
||||||
|
"""
|
||||||
|
Get a MediaEntry, its media type, and its manager all in one go.
|
||||||
|
|
||||||
|
Returns a tuple of: `(entry, media_type, media_manager)`
|
||||||
|
"""
|
||||||
|
entry = MediaEntry.query.filter_by(id=media_id).first()
|
||||||
|
if entry is None:
|
||||||
|
raise MediaEntryNotFound("Can't find media with id '%s'" % media_id)
|
||||||
|
|
||||||
|
manager = get_processing_manager_for_type(entry.media_type)
|
||||||
|
|
||||||
|
return entry, manager
|
||||||
|
|
||||||
|
|
||||||
def mark_entry_failed(entry_id, exc):
|
def mark_entry_failed(entry_id, exc):
|
||||||
"""
|
"""
|
||||||
Mark a media entry as having failed in its conversion.
|
Mark a media entry as having failed in its conversion.
|
||||||
@ -165,6 +327,66 @@ def mark_entry_failed(entry_id, exc):
|
|||||||
u'fail_metadata': {}})
|
u'fail_metadata': {}})
|
||||||
|
|
||||||
|
|
||||||
|
def get_process_filename(entry, workbench, acceptable_files):
|
||||||
|
"""
|
||||||
|
Try and get the queued file if available, otherwise return the first file
|
||||||
|
in the acceptable_files that we have.
|
||||||
|
|
||||||
|
If no acceptable_files, raise ProcessFileNotFound
|
||||||
|
"""
|
||||||
|
if entry.queued_media_file:
|
||||||
|
filepath = entry.queued_media_file
|
||||||
|
storage = mgg.queue_store
|
||||||
|
else:
|
||||||
|
for keyname in acceptable_files:
|
||||||
|
if entry.media_files.get(keyname):
|
||||||
|
filepath = entry.media_files[keyname]
|
||||||
|
storage = mgg.public_store
|
||||||
|
break
|
||||||
|
|
||||||
|
if not filepath:
|
||||||
|
raise ProcessFileNotFound()
|
||||||
|
|
||||||
|
filename = workbench.localized_file(
|
||||||
|
storage, filepath,
|
||||||
|
'source')
|
||||||
|
|
||||||
|
if not os.path.exists(filename):
|
||||||
|
raise ProcessFileNotFound()
|
||||||
|
|
||||||
|
return filename
|
||||||
|
|
||||||
|
|
||||||
|
def store_public(entry, keyname, local_file, target_name=None,
|
||||||
|
delete_if_exists=True):
|
||||||
|
if target_name is None:
|
||||||
|
target_name = os.path.basename(local_file)
|
||||||
|
target_filepath = create_pub_filepath(entry, target_name)
|
||||||
|
|
||||||
|
if keyname in entry.media_files:
|
||||||
|
_log.warn("store_public: keyname %r already used for file %r, "
|
||||||
|
"replacing with %r", keyname,
|
||||||
|
entry.media_files[keyname], target_filepath)
|
||||||
|
if delete_if_exists:
|
||||||
|
mgg.public_store.delete_file(entry.media_files[keyname])
|
||||||
|
|
||||||
|
try:
|
||||||
|
mgg.public_store.copy_local_to_storage(local_file, target_filepath)
|
||||||
|
except:
|
||||||
|
raise PublicStoreFail(keyname=keyname)
|
||||||
|
|
||||||
|
# raise an error if the file failed to copy
|
||||||
|
copied_filepath = mgg.public_store.get_local_path(target_filepath)
|
||||||
|
if not os.path.exists(copied_filepath):
|
||||||
|
raise PublicStoreFail(keyname=keyname)
|
||||||
|
|
||||||
|
entry.media_files[keyname] = target_filepath
|
||||||
|
|
||||||
|
|
||||||
|
def copy_original(entry, orig_filename, target_name, keyname=u"original"):
|
||||||
|
store_public(entry, keyname, orig_filename, target_name)
|
||||||
|
|
||||||
|
|
||||||
class BaseProcessingFail(Exception):
|
class BaseProcessingFail(Exception):
|
||||||
"""
|
"""
|
||||||
Base exception that all other processing failure messages should
|
Base exception that all other processing failure messages should
|
||||||
@ -190,3 +412,18 @@ class BadMediaFail(BaseProcessingFail):
|
|||||||
for the media type specified.
|
for the media type specified.
|
||||||
"""
|
"""
|
||||||
general_message = _(u'Invalid file given for media type.')
|
general_message = _(u'Invalid file given for media type.')
|
||||||
|
|
||||||
|
|
||||||
|
class PublicStoreFail(BaseProcessingFail):
|
||||||
|
"""
|
||||||
|
Error that should be raised when copying to public store fails
|
||||||
|
"""
|
||||||
|
general_message = _('Copying to public storage failed.')
|
||||||
|
|
||||||
|
|
||||||
|
class ProcessFileNotFound(BaseProcessingFail):
|
||||||
|
"""
|
||||||
|
Error that should be raised when an acceptable file for processing
|
||||||
|
is not found.
|
||||||
|
"""
|
||||||
|
general_message = _(u'An acceptable processing file was not found')
|
||||||
|
@ -22,10 +22,9 @@ import celery
|
|||||||
from celery.registry import tasks
|
from celery.registry import tasks
|
||||||
|
|
||||||
from mediagoblin import mg_globals as mgg
|
from mediagoblin import mg_globals as mgg
|
||||||
from mediagoblin.db.models import MediaEntry
|
from . import mark_entry_failed, BaseProcessingFail
|
||||||
from mediagoblin.processing import (mark_entry_failed, BaseProcessingFail,
|
|
||||||
ProcessingState)
|
|
||||||
from mediagoblin.tools.processing import json_processing_callback
|
from mediagoblin.tools.processing import json_processing_callback
|
||||||
|
from mediagoblin.processing import get_entry_and_processing_manager
|
||||||
|
|
||||||
_log = logging.getLogger(__name__)
|
_log = logging.getLogger(__name__)
|
||||||
logging.basicConfig()
|
logging.basicConfig()
|
||||||
@ -70,30 +69,43 @@ class ProcessMedia(celery.Task):
|
|||||||
"""
|
"""
|
||||||
Pass this entry off for processing.
|
Pass this entry off for processing.
|
||||||
"""
|
"""
|
||||||
track_started=True
|
def run(self, media_id, feed_url, reprocess_action, reprocess_info=None):
|
||||||
|
|
||||||
def run(self, media_id, feed_url):
|
|
||||||
"""
|
"""
|
||||||
Pass the media entry off to the appropriate processing function
|
Pass the media entry off to the appropriate processing function
|
||||||
(for now just process_image...)
|
(for now just process_image...)
|
||||||
|
|
||||||
:param feed_url: The feed URL that the PuSH server needs to be
|
:param feed_url: The feed URL that the PuSH server needs to be
|
||||||
updated for.
|
updated for.
|
||||||
|
:param reprocess: A dict containing all of the necessary reprocessing
|
||||||
|
info for the media_type.
|
||||||
"""
|
"""
|
||||||
entry = MediaEntry.query.get(media_id)
|
reprocess_info = reprocess_info or {}
|
||||||
|
entry, manager = get_entry_and_processing_manager(media_id)
|
||||||
|
|
||||||
# Try to process, and handle expected errors.
|
# Try to process, and handle expected errors.
|
||||||
try:
|
try:
|
||||||
|
processor_class = manager.get_processor(reprocess_action, entry)
|
||||||
|
|
||||||
|
with processor_class(manager, entry) as processor:
|
||||||
|
# Initial state change has to be here because
|
||||||
|
# the entry.state gets recorded on processor_class init
|
||||||
entry.state = u'processing'
|
entry.state = u'processing'
|
||||||
entry.save()
|
entry.save()
|
||||||
|
|
||||||
_log.debug('Processing {0}'.format(entry))
|
_log.debug('Processing {0}'.format(entry))
|
||||||
|
|
||||||
proc_state = ProcessingState(entry)
|
try:
|
||||||
with mgg.workbench_manager.create() as workbench:
|
processor.process(**reprocess_info)
|
||||||
proc_state.set_workbench(workbench)
|
except Exception as exc:
|
||||||
# run the processing code
|
if processor.entry_orig_state == 'processed':
|
||||||
entry.media_manager.processor(proc_state)
|
_log.error(
|
||||||
|
'Entry {0} failed to process due to the following'
|
||||||
|
' error: {1}'.format(entry.id, exc))
|
||||||
|
_log.info(
|
||||||
|
'Setting entry.state back to "processed"')
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
# We set the state to processed and save the entry here so there's
|
# We set the state to processed and save the entry here so there's
|
||||||
# no need to save at the end of the processing stage, probably ;)
|
# no need to save at the end of the processing stage, probably ;)
|
||||||
|
@ -76,17 +76,21 @@ def prepare_queue_task(app, entry, filename):
|
|||||||
return queue_file
|
return queue_file
|
||||||
|
|
||||||
|
|
||||||
def run_process_media(entry, feed_url=None):
|
def run_process_media(entry, feed_url=None,
|
||||||
|
reprocess_action="initial", reprocess_info=None):
|
||||||
"""Process the media asynchronously
|
"""Process the media asynchronously
|
||||||
|
|
||||||
:param entry: MediaEntry() instance to be processed.
|
:param entry: MediaEntry() instance to be processed.
|
||||||
:param feed_url: A string indicating the feed_url that the PuSH servers
|
:param feed_url: A string indicating the feed_url that the PuSH servers
|
||||||
should be notified of. This will be sth like: `request.urlgen(
|
should be notified of. This will be sth like: `request.urlgen(
|
||||||
'mediagoblin.user_pages.atom_feed',qualified=True,
|
'mediagoblin.user_pages.atom_feed',qualified=True,
|
||||||
user=request.user.username)`"""
|
user=request.user.username)`
|
||||||
|
:param reprocess_action: What particular action should be run.
|
||||||
|
:param reprocess_info: A dict containing all of the necessary reprocessing
|
||||||
|
info for the given media_type"""
|
||||||
try:
|
try:
|
||||||
ProcessMedia().apply_async(
|
ProcessMedia().apply_async(
|
||||||
[entry.id, feed_url], {},
|
[entry.id, feed_url, reprocess_action, reprocess_info], {},
|
||||||
task_id=entry.queued_task_id)
|
task_id=entry.queued_task_id)
|
||||||
except BaseException as exc:
|
except BaseException as exc:
|
||||||
# The purpose of this section is because when running in "lazy"
|
# The purpose of this section is because when running in "lazy"
|
||||||
|
@ -62,11 +62,11 @@
|
|||||||
</a>
|
</a>
|
||||||
</li>
|
</li>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if 'webm_640' in media.media_files %}
|
{% if 'webm_video' in media.media_files %}
|
||||||
<li>
|
<li>
|
||||||
<a href="{{ request.app.public_store.file_url(
|
<a href="{{ request.app.public_store.file_url(
|
||||||
media.media_files.webm_640) }}">
|
media.media_files.webm_video) }}">
|
||||||
{%- trans %}WebM file (640p; VP8/Vorbis){% endtrans -%}
|
{%- trans %}WebM file (VP8/Vorbis){% endtrans -%}
|
||||||
</a>
|
</a>
|
||||||
</li>
|
</li>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user