added comments and did a little refactoring. not sure if it is actually any clearer though

This commit is contained in:
Rodney Ewing 2013-08-02 15:12:07 -07:00
parent 49db778579
commit 3e9faf85da
4 changed files with 118 additions and 77 deletions

View File

@ -49,16 +49,18 @@ def reprocess_parser_setup(subparser):
def _set_media_type(args): def _set_media_type(args):
"""
This will verify that all media id's are of the same media_type. If the
--type flag is set, it will be replaced by the given media id's type.
If they are trying to process different media types, an Exception will be
raised.
"""
if args[0].media_id: if args[0].media_id:
if len(args[0].media_id) == 1: if len(args[0].media_id) == 1:
media_type = MediaEntry.query.filter_by(id=args[0].media_id[0])\ args[0].type = MediaEntry.query.filter_by(id=args[0].media_id[0])\
.first().media_type.split('.')[-1] .first().media_type.split('.')[-1]
if not args[0].type:
args[0].type = media_type
elif args[0].type != media_type:
raise Exception(_('The --type that you set does not match the'
'type of the given media_id.'))
elif len(args[0].media_id) > 1: elif len(args[0].media_id) > 1:
media_types = [] media_types = []
@ -70,15 +72,17 @@ def _set_media_type(args):
raise Exception((u'You cannot reprocess different' raise Exception((u'You cannot reprocess different'
' media_types at the same time.')) ' media_types at the same time.'))
if not args[0].type: args[0].type = media_types[0]
args[0].type = media_types[0]
elif args[0].type != media_types[0]:
raise Exception(_('The --type that you set does not match the'
' type of the given media_ids.'))
def _reprocess_all(args): def _reprocess_all(args):
"""
This handles reprocessing if no media_id's are given.
"""
if not args[0].type: if not args[0].type:
# If no media type is given, we can either regenerate all thumbnails,
# or try to reprocess all failed media
if args[0].thumbnails: if args[0].thumbnails:
if args[0].available: if args[0].available:
print _('Available options for regenerating all processed' print _('Available options for regenerating all processed'
@ -89,6 +93,7 @@ def _reprocess_all(args):
#TODO regenerate all thumbnails #TODO regenerate all thumbnails
pass pass
# Reprocess all failed media
elif args[0].state == 'failed': elif args[0].state == 'failed':
if args[0].available: if args[0].available:
print _('\n Available reprocess actions for all failed' print _('\n Available reprocess actions for all failed'
@ -97,6 +102,8 @@ def _reprocess_all(args):
#TODO reprocess all failed entries #TODO reprocess all failed entries
pass pass
# If here, they didn't set the --type flag and were trying to do
# something other the generating thumbnails or initial_processing
else: else:
raise Exception(_('You must set --type when trying to reprocess' raise Exception(_('You must set --type when trying to reprocess'
' all media_entries, unless you set --state' ' all media_entries, unless you set --state'
@ -107,6 +114,8 @@ def _reprocess_all(args):
def _run_reprocessing(args): def _run_reprocessing(args):
# Are they just asking for the available reprocessing options for the given
# media?
if args[0].available: if args[0].available:
if args[0].state == 'failed': if args[0].state == 'failed':
print _('\n Available reprocess actions for all failed' print _('\n Available reprocess actions for all failed'
@ -118,11 +127,20 @@ def _run_reprocessing(args):
' entries in the {} state'.format(args[0].type, ' entries in the {} state'.format(args[0].type,
args[0].state)) args[0].state))
else: else:
# Run media reprocessing
return hook_handle(('media_reprocess', args[0].type), args) return hook_handle(('media_reprocess', args[0].type), args)
def _set_media_state(args): def _set_media_state(args):
"""
This will verify that all media id's are in the same state. If the
--state flag is set, it will be replaced by the given media id's state.
If they are trying to process different media states, an Exception will be
raised.
"""
if args[0].media_id: if args[0].media_id:
# Only check if we are given media_ids
if len(args[0].media_id) == 1: if len(args[0].media_id) == 1:
args[0].state = MediaEntry.query.filter_by(id=args[0].media_id[0])\ args[0].state = MediaEntry.query.filter_by(id=args[0].media_id[0])\
.first().state .first().state
@ -133,6 +151,8 @@ def _set_media_state(args):
for id in args[0].media_id: for id in args[0].media_id:
media_states.append(MediaEntry.query.filter_by(id=id).first() media_states.append(MediaEntry.query.filter_by(id=id).first()
.state) .state)
# Make sure that all media are in the same state
for state in media_states: for state in media_states:
if state != media_states[0]: if state != media_states[0]:
raise Exception(_('You can only reprocess media that is in' raise Exception(_('You can only reprocess media that is in'
@ -140,11 +160,13 @@ def _set_media_state(args):
args[0].state = media_states[0] args[0].state = media_states[0]
# If no state was set, then we will default to the processed state
if not args[0].state: if not args[0].state:
args[0].state = 'processed' args[0].state = 'processed'
def reprocess(args): def reprocess(args):
# Run eagerly unless explicetly set not to
if not args[0].celery: if not args[0].celery:
os.environ['CELERY_ALWAYS_EAGER'] = 'true' os.environ['CELERY_ALWAYS_EAGER'] = 'true'
commands_util.setup_app(args[0]) commands_util.setup_app(args[0])
@ -152,6 +174,7 @@ def reprocess(args):
_set_media_state(args) _set_media_state(args)
_set_media_type(args) _set_media_type(args)
# If no media_ids were given, then try to reprocess all entries
if not args[0].media_id: if not args[0].media_id:
return _reprocess_all(args) return _reprocess_all(args)

View File

@ -72,6 +72,9 @@ def get_media_type_and_manager(ext):
def reprocess_action(args): def reprocess_action(args):
"""
List the available actions for media in a given state
"""
if args[0].state == 'processed': if args[0].state == 'processed':
print _('\n Available reprocessing actions for processed images:' print _('\n Available reprocessing actions for processed images:'
'\n \t --resize: thumb or medium' '\n \t --resize: thumb or medium'
@ -81,9 +84,13 @@ def reprocess_action(args):
def _parser(args): def _parser(args):
"""
Parses the unknown args from the gmg parser
"""
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument( parser.add_argument(
'--resize') '--resize',
choices=['thumb', 'medium'])
parser.add_argument( parser.add_argument(
'--size', '--size',
nargs=2, nargs=2,
@ -96,6 +103,10 @@ def _parser(args):
def _check_eligible(entry_args, reprocess_args): def _check_eligible(entry_args, reprocess_args):
"""
Check to see if we can actually process the given media as requested
"""
if entry_args.state == 'processed': if entry_args.state == 'processed':
if reprocess_args.initial_processing: if reprocess_args.initial_processing:
raise Exception(_('You can not run --initial_processing on media' raise Exception(_('You can not run --initial_processing on media'
@ -118,36 +129,37 @@ def media_reprocess(args):
reprocess_args = _parser(args) reprocess_args = _parser(args)
entry_args = args[0] entry_args = args[0]
# Can we actually process the given media as requested?
_check_eligible(entry_args, reprocess_args) _check_eligible(entry_args, reprocess_args)
# Do we want to re-try initial processing?
if reprocess_args.initial_processing: if reprocess_args.initial_processing:
for id in entry_args.media_id: for id in entry_args.media_id:
entry = MediaEntry.query.filter_by(id=id).first() entry = MediaEntry.query.filter_by(id=id).first()
# Should we get the feed_url?
run_process_media(entry) run_process_media(entry)
# Are we wanting to resize the thumbnail or medium?
elif reprocess_args.resize: elif reprocess_args.resize:
if reprocess_args.resize == 'medium' or reprocess_args.resize == \
'thumb':
for id in entry_args.media_id:
entry = MediaEntry.query.filter_by(id=id).first()
# For now we can only reprocess with the original file # reprocess all given media entries
if not entry.media_files.get('original'): for id in entry_args.media_id:
raise Exception(_('The original file for this media entry' entry = MediaEntry.query.filter_by(id=id).first()
' does not exist.'))
reprocess_info = {'resize': reprocess_args.resize} # For now we can only reprocess with the original file
if not entry.media_files.get('original'):
raise Exception(_('The original file for this media entry'
' does not exist.'))
if reprocess_args.size: reprocess_info = {'resize': reprocess_args.resize}
reprocess_info['max_width'] = reprocess_args.size[0]
reprocess_info['max_height'] = reprocess_args.size[1]
run_process_media(entry, reprocess_info=reprocess_info) if reprocess_args.size:
reprocess_info['max_width'] = reprocess_args.size[0]
reprocess_info['max_height'] = reprocess_args.size[1]
else: run_process_media(entry, reprocess_info=reprocess_info)
raise Exception(_('The --resize flag must set either "thumb"'
' or "medium".'))
# If we are here, they forgot to tell us how to reprocess
else: else:
_log.warn('You must set either --resize or --initial_processing flag' _log.warn('You must set either --resize or --initial_processing flag'
' to reprocess an image.') ' to reprocess an image.')

View File

@ -73,12 +73,17 @@ def resize_image(proc_state, resized, keyname, target_name, new_size,
proc_state.store_public(keyname, tmp_resized_filename, target_name) proc_state.store_public(keyname, tmp_resized_filename, target_name)
def resize_tool(proc_state, force, keyname, filename, target_name, def resize_tool(proc_state, force, keyname, target_name,
conversions_subdir, exif_tags, new_size=None): conversions_subdir, exif_tags, new_size=None):
# Get the filename of the original file
filename = proc_state.get_orig_filename()
# Use the default size if new_size was not given
if not new_size: if not new_size:
max_width = mgg.global_config['media:' + keyname]['max_width'] max_width = mgg.global_config['media:' + keyname]['max_width']
max_height = mgg.global_config['media:' + keyname]['max_height'] max_height = mgg.global_config['media:' + keyname]['max_height']
new_size = (max_width, max_height) new_size = (max_width, max_height)
# If the size of the original file exceeds the specified size for the desized # If the size of the original file exceeds the specified size for the desized
# file, a target_name file is created and later associated with the media # file, a target_name file is created and later associated with the media
# entry. # entry.
@ -125,74 +130,67 @@ def process_image(proc_state, reprocess_info=None):
A Workbench() represents a local tempory dir. It is automatically A Workbench() represents a local tempory dir. It is automatically
cleaned up when this function exits. cleaned up when this function exits.
""" """
entry = proc_state.entry def init(self, proc_state):
workbench = proc_state.workbench self.proc_state = proc_state
self.entry = proc_state.entry
self.workbench = proc_state.workbench
# Conversions subdirectory to avoid collisions # Conversions subdirectory to avoid collisions
conversions_subdir = os.path.join( self.conversions_subdir = os.path.join(
workbench.dir, 'conversions') self.workbench.dir, 'convirsions')
os.mkdir(conversions_subdir)
if reprocess_info: self.orig_filename = proc_state.get_orig_filename()
_reprocess_image(proc_state, reprocess_info, conversions_subdir) self.name_builder = FilenameBuilder(self.orig_filename)
else: # Exif extraction
queued_filename = proc_state.get_queued_filename() self.exif_tags = extract_exif(self.orig_filename)
name_builder = FilenameBuilder(queued_filename)
# EXIF extraction os.mkdir(self.conversions_subdir)
exif_tags = extract_exif(queued_filename)
gps_data = get_gps_data(exif_tags)
# Always create a small thumbnail def initial_processing(self):
resize_tool(proc_state, True, 'thumb', queued_filename, # Is there any GPS data
name_builder.fill('{basename}.thumbnail{ext}'), gps_data = get_gps_data(self.exif_tags)
conversions_subdir, exif_tags)
# Always create a small thumbnail
resize_tool(self.proc_state, True, 'thumb', self.orig_filename,
self.name_builder.fill('{basename}.thumbnail{ext}'),
self.conversions_subdir, self.exif_tags)
# Possibly create a medium # Possibly create a medium
resize_tool(proc_state, False, 'medium', queued_filename, resize_tool(self.proc_state, False, 'medium', self.orig_filename,
name_builder.fill('{basename}.medium{ext}'), self.name_builder.fill('{basename}.medium{ext}'),
conversions_subdir, exif_tags) self.conversions_subdir, self.exif_tags)
# Copy our queued local workbench to its final destination # Copy our queued local workbench to its final destination
proc_state.copy_original(name_builder.fill('{basename}{ext}')) self.proc_state.copy_original(self.name_builder.fill('{basename}{ext}'))
# Remove queued media file from storage and database # Remove queued media file from storage and database
proc_state.delete_queue_file() self.proc_state.delete_queue_file()
# Insert exif data into database # Insert exif data into database
exif_all = clean_exif(exif_tags) exif_all = clean_exif(self.exif_tags)
if len(exif_all): if len(exif_all):
entry.media_data_init(exif_all=exif_all) self.entry.media_data_init(exif_all=exif_all)
if len(gps_data): if len(gps_data):
for key in list(gps_data.keys()): for key in list(gps_data.keys()):
gps_data['gps_' + key] = gps_data.pop(key) gps_data['gps_' + key] = gps_data.pop(key)
entry.media_data_init(**gps_data) self.entry.media_data_init(**gps_data)
def reprocess(self, reprocess_info):
new_size = None
def _reprocess_image(proc_state, reprocess_info, conversions_subdir): # Did they specify a size?
reprocess_filename = proc_state.get_reprocess_filename() if reprocess_info.get('max_width'):
name_builder = FilenameBuilder(reprocess_filename) max_width = reprocess_info['max_width']
max_height = reprocess_info['max_height']
exif_tags = extract_exif(reprocess_filename) new_size = (max_width, max_height)
if reprocess_info.get('max_width'):
max_width = reprocess_info['max_width']
max_height = reprocess_info['max_height']
else:
max_width = mgg.global_config \
['media:' + reprocess_info['resize']]['max_width']
max_height = mgg.global_config \
['media:' + reprocess_info['resize']]['max_height']
new_size = (max_width, max_height)
resize_tool(proc_state, False, reprocess_info['resize'], reprocess_filename,
name_builder.fill('{basename}.thumbnail{ext}'),
conversions_subdir, exif_tags, new_size)
resize_tool(self.proc_state, False, reprocess_info['resize'],
self.name_builder.fill('{basename}.medium{ext}'),
self.conversions_subdir, self.exif_tags, new_size)
if __name__ == '__main__': if __name__ == '__main__':
import sys import sys

View File

@ -89,9 +89,17 @@ class ProcessMedia(task.Task):
proc_state = ProcessingState(entry) proc_state = ProcessingState(entry)
with mgg.workbench_manager.create() as workbench: with mgg.workbench_manager.create() as workbench:
proc_state.set_workbench(workbench) proc_state.set_workbench(workbench)
# run the processing code processor = entry.media_manager.processor(proc_state)
entry.media_manager.processor(proc_state, reprocess_info)
# If we have reprocess_info, let's reprocess
if reprocess_info:
processor.reprocess(reprocess_info)
# Run initial processing
else:
processor.initial_processing()
# We set the state to processed and save the entry here so there's # We set the state to processed and save the entry here so there's
# no need to save at the end of the processing stage, probably ;) # no need to save at the end of the processing stage, probably ;)