Merge branch 'master' of git://gitorious.org/mediagoblin/mediagoblin

This commit is contained in:
Aditi 2013-08-25 09:24:13 +05:30
commit 7d9dbe32cc
42 changed files with 2240 additions and 585 deletions

View File

@ -152,7 +152,7 @@ CELERY_RESULT_DBURI = string(default="sqlite:///%(here)s/celery.db")
# default kombu stuff # default kombu stuff
BROKER_TRANSPORT = string(default="sqlalchemy") BROKER_TRANSPORT = string(default="sqlalchemy")
BROKER_HOST = string(default="sqlite:///%(here)s/kombu.db") BROKER_URL = string(default="sqlite:///%(here)s/kombu.db")
# known booleans # known booleans
CELERY_RESULT_PERSISTENT = boolean() CELERY_RESULT_PERSISTENT = boolean()

View File

@ -365,6 +365,8 @@ def add_new_notification_tables(db):
CommentNotification_v0.__table__.create(db.bind) CommentNotification_v0.__table__.create(db.bind)
ProcessingNotification_v0.__table__.create(db.bind) ProcessingNotification_v0.__table__.create(db.bind)
db.commit()
@RegisterMigration(13, MIGRATIONS) @RegisterMigration(13, MIGRATIONS)
def pw_hash_nullable(db): def pw_hash_nullable(db):
@ -425,7 +427,7 @@ class RequestToken_v0(declarative_base()):
callback = Column(Unicode, nullable=False, default=u"oob") callback = Column(Unicode, nullable=False, default=u"oob")
created = Column(DateTime, nullable=False, default=datetime.datetime.now) created = Column(DateTime, nullable=False, default=datetime.datetime.now)
updated = Column(DateTime, nullable=False, default=datetime.datetime.now) updated = Column(DateTime, nullable=False, default=datetime.datetime.now)
class AccessToken_v0(declarative_base()): class AccessToken_v0(declarative_base()):
""" """
Model for representing the access tokens Model for representing the access tokens
@ -438,7 +440,7 @@ class AccessToken_v0(declarative_base()):
request_token = Column(Unicode, ForeignKey(RequestToken_v0.token)) request_token = Column(Unicode, ForeignKey(RequestToken_v0.token))
created = Column(DateTime, nullable=False, default=datetime.datetime.now) created = Column(DateTime, nullable=False, default=datetime.datetime.now)
updated = Column(DateTime, nullable=False, default=datetime.datetime.now) updated = Column(DateTime, nullable=False, default=datetime.datetime.now)
class NonceTimestamp_v0(declarative_base()): class NonceTimestamp_v0(declarative_base()):
""" """
@ -460,3 +462,15 @@ def create_oauth1_tables(db):
NonceTimestamp_v0.__table__.create(db.bind) NonceTimestamp_v0.__table__.create(db.bind)
db.commit() db.commit()
@RegisterMigration(15, MIGRATIONS)
def wants_notifications(db):
"""Add a wants_notifications field to User model"""
metadata = MetaData(bind=db.bind)
user_table = inspect_table(metadata, "core__users")
col = Column('wants_notifications', Boolean, default=True)
col.create(user_table)
db.commit()

View File

@ -69,6 +69,7 @@ class User(Base, UserMixin):
# Intented to be nullable=False, but migrations would not work for it # Intented to be nullable=False, but migrations would not work for it
# set to nullable=True implicitly. # set to nullable=True implicitly.
wants_comment_notification = Column(Boolean, default=True) wants_comment_notification = Column(Boolean, default=True)
wants_notifications = Column(Boolean, default=True)
license_preference = Column(Unicode) license_preference = Column(Unicode)
is_admin = Column(Boolean, default=False, nullable=False) is_admin = Column(Boolean, default=False, nullable=False)
url = Column(Unicode) url = Column(Unicode)
@ -146,7 +147,7 @@ class RequestToken(Base):
callback = Column(Unicode, nullable=False, default=u"oob") callback = Column(Unicode, nullable=False, default=u"oob")
created = Column(DateTime, nullable=False, default=datetime.datetime.now) created = Column(DateTime, nullable=False, default=datetime.datetime.now)
updated = Column(DateTime, nullable=False, default=datetime.datetime.now) updated = Column(DateTime, nullable=False, default=datetime.datetime.now)
class AccessToken(Base): class AccessToken(Base):
""" """
Model for representing the access tokens Model for representing the access tokens
@ -159,7 +160,7 @@ class AccessToken(Base):
request_token = Column(Unicode, ForeignKey(RequestToken.token)) request_token = Column(Unicode, ForeignKey(RequestToken.token))
created = Column(DateTime, nullable=False, default=datetime.datetime.now) created = Column(DateTime, nullable=False, default=datetime.datetime.now)
updated = Column(DateTime, nullable=False, default=datetime.datetime.now) updated = Column(DateTime, nullable=False, default=datetime.datetime.now)
class NonceTimestamp(Base): class NonceTimestamp(Base):
""" """
@ -646,13 +647,13 @@ with_polymorphic(
[ProcessingNotification, CommentNotification]) [ProcessingNotification, CommentNotification])
MODELS = [ MODELS = [
User, Client, RequestToken, AccessToken, NonceTimestamp, MediaEntry, Tag, User, Client, RequestToken, AccessToken, NonceTimestamp, MediaEntry, Tag,
MediaTag, MediaComment, Collection, CollectionItem, MediaFile, FileKeynames, MediaTag, MediaComment, Collection, CollectionItem, MediaFile, FileKeynames,
MediaAttachmentFile, ProcessingMetaData, Notification, CommentNotification, MediaAttachmentFile, ProcessingMetaData, Notification, CommentNotification,
ProcessingNotification, CommentSubscription] ProcessingNotification, CommentSubscription]
""" """
Foundations are the default rows that are created immediately after the tables Foundations are the default rows that are created immediately after the tables
are initialized. Each entry to this dictionary should be in the format of: are initialized. Each entry to this dictionary should be in the format of:
ModelConstructorObject:List of Dictionaries ModelConstructorObject:List of Dictionaries
(Each Dictionary represents a row on the Table to be created, containing each (Each Dictionary represents a row on the Table to be created, containing each

View File

@ -67,6 +67,8 @@ class EditAccountForm(wtforms.Form):
normalize_user_or_email_field(allow_user=False)]) normalize_user_or_email_field(allow_user=False)])
wants_comment_notification = wtforms.BooleanField( wants_comment_notification = wtforms.BooleanField(
description=_("Email me when others comment on my media")) description=_("Email me when others comment on my media"))
wants_notifications = wtforms.BooleanField(
description=_("Enable/Disable insite notifications"))
license_preference = wtforms.SelectField( license_preference = wtforms.SelectField(
_('License preference'), _('License preference'),
[ [

View File

@ -228,10 +228,12 @@ def edit_account(request):
user = request.user user = request.user
form = forms.EditAccountForm(request.form, form = forms.EditAccountForm(request.form,
wants_comment_notification=user.wants_comment_notification, wants_comment_notification=user.wants_comment_notification,
license_preference=user.license_preference) license_preference=user.license_preference,
wants_notifications=user.wants_notifications)
if request.method == 'POST' and form.validate(): if request.method == 'POST' and form.validate():
user.wants_comment_notification = form.wants_comment_notification.data user.wants_comment_notification = form.wants_comment_notification.data
user.wants_notifications = form.wants_notifications.data
user.license_preference = form.license_preference.data user.license_preference = form.license_preference.data

View File

@ -45,6 +45,10 @@ SUBCOMMAND_MAP = {
'setup': 'mediagoblin.gmg_commands.assetlink:assetlink_parser_setup', 'setup': 'mediagoblin.gmg_commands.assetlink:assetlink_parser_setup',
'func': 'mediagoblin.gmg_commands.assetlink:assetlink', 'func': 'mediagoblin.gmg_commands.assetlink:assetlink',
'help': 'Link assets for themes and plugins for static serving'}, 'help': 'Link assets for themes and plugins for static serving'},
'reprocess': {
'setup': 'mediagoblin.gmg_commands.reprocess:reprocess_parser_setup',
'func': 'mediagoblin.gmg_commands.reprocess:reprocess',
'help': 'Reprocess media entries'},
# 'theme': { # 'theme': {
# 'setup': 'mediagoblin.gmg_commands.theme:theme_parser_setup', # 'setup': 'mediagoblin.gmg_commands.theme:theme_parser_setup',
# 'func': 'mediagoblin.gmg_commands.theme:theme', # 'func': 'mediagoblin.gmg_commands.theme:theme',

View File

@ -16,6 +16,7 @@
from mediagoblin import mg_globals from mediagoblin import mg_globals
from mediagoblin.db.open import setup_connection_and_db_from_config from mediagoblin.db.open import setup_connection_and_db_from_config
from mediagoblin.gmg_commands import util as commands_util
from mediagoblin.storage.filestorage import BasicFileStorage from mediagoblin.storage.filestorage import BasicFileStorage
from mediagoblin.init import setup_storage, setup_global_and_app_config from mediagoblin.init import setup_storage, setup_global_and_app_config
@ -223,6 +224,7 @@ def env_export(args):
''' '''
Export database and media files to a tar archive Export database and media files to a tar archive
''' '''
commands_util.check_unrecognized_args(args)
if args.cache_path: if args.cache_path:
if os.path.exists(args.cache_path): if os.path.exists(args.cache_path):
_log.error('The cache directory must not exist ' _log.error('The cache directory must not exist '

View File

@ -0,0 +1,302 @@
# GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import os
from mediagoblin import mg_globals
from mediagoblin.db.models import MediaEntry
from mediagoblin.gmg_commands import util as commands_util
from mediagoblin.submit.lib import run_process_media
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
from mediagoblin.tools.pluginapi import hook_handle
from mediagoblin.processing import (
ProcessorDoesNotExist, ProcessorNotEligible,
get_entry_and_processing_manager, get_processing_manager_for_type,
ProcessingManagerDoesNotExist)
def reprocess_parser_setup(subparser):
subparser.add_argument(
'--celery',
action='store_true',
help="Don't process eagerly, pass off to celery")
subparsers = subparser.add_subparsers(dest="reprocess_subcommand")
###################
# available command
###################
available_parser = subparsers.add_parser(
"available",
help="Find out what actions are available for this media")
available_parser.add_argument(
"id_or_type",
help="Media id or media type to check")
available_parser.add_argument(
"--action-help",
action="store_true",
help="List argument help for each action available")
available_parser.add_argument(
"--state",
help="The state of media you would like to reprocess")
#############
# run command
#############
run_parser = subparsers.add_parser(
"run",
help="Run a reprocessing on one or more media")
run_parser.add_argument(
'media_id',
help="The media_entry id(s) you wish to reprocess.")
run_parser.add_argument(
'reprocess_command',
help="The reprocess command you intend to run")
run_parser.add_argument(
'reprocess_args',
nargs=argparse.REMAINDER,
help="rest of arguments to the reprocessing tool")
################
# thumbs command
################
thumbs = subparsers.add_parser(
'thumbs',
help='Regenerate thumbs for all processed media')
thumbs.add_argument(
'--size',
nargs=2,
type=int,
metavar=('max_width', 'max_height'))
#################
# initial command
#################
subparsers.add_parser(
'initial',
help='Reprocess all failed media')
##################
# bulk_run command
##################
bulk_run_parser = subparsers.add_parser(
'bulk_run',
help='Run reprocessing on a given media type or state')
bulk_run_parser.add_argument(
'type',
help='The type of media you would like to process')
bulk_run_parser.add_argument(
'--state',
default='processed',
nargs='?',
help='The state of the media you would like to process. Defaults to' \
" 'processed'")
bulk_run_parser.add_argument(
'reprocess_command',
help='The reprocess command you intend to run')
bulk_run_parser.add_argument(
'reprocess_args',
nargs=argparse.REMAINDER,
help='The rest of the arguments to the reprocessing tool')
###############
# help command?
###############
def available(args):
# Get the media type, either by looking up media id, or by specific type
try:
media_id = int(args.id_or_type)
media_entry, manager = get_entry_and_processing_manager(media_id)
media_type = media_entry.media_type
except ValueError:
media_type = args.id_or_type
media_entry = None
manager = get_processing_manager_for_type(media_type)
except ProcessingManagerDoesNotExist:
entry = MediaEntry.query.filter_by(id=args.id_or_type).first()
print 'No such processing manager for {0}'.format(entry.media_type)
if args.state:
processors = manager.list_all_processors_by_state(args.state)
elif media_entry is None:
processors = manager.list_all_processors()
else:
processors = manager.list_eligible_processors(media_entry)
print "Available processors:"
print "====================="
print ""
if args.action_help:
for processor in processors:
print processor.name
print "-" * len(processor.name)
parser = processor.generate_parser()
parser.print_help()
print ""
else:
for processor in processors:
if processor.description:
print " - %s: %s" % (processor.name, processor.description)
else:
print " - %s" % processor.name
def run(args, media_id=None):
if not media_id:
media_id = args.media_id
try:
media_entry, manager = get_entry_and_processing_manager(media_id)
# TODO: (maybe?) This could probably be handled entirely by the
# processor class...
try:
processor_class = manager.get_processor(
args.reprocess_command, media_entry)
except ProcessorDoesNotExist:
print 'No such processor "%s" for media with id "%s"' % (
args.reprocess_command, media_entry.id)
return
except ProcessorNotEligible:
print 'Processor "%s" exists but media "%s" is not eligible' % (
args.reprocess_command, media_entry.id)
return
reprocess_parser = processor_class.generate_parser()
reprocess_args = reprocess_parser.parse_args(args.reprocess_args)
reprocess_request = processor_class.args_to_request(reprocess_args)
run_process_media(
media_entry,
reprocess_action=args.reprocess_command,
reprocess_info=reprocess_request)
except ProcessingManagerDoesNotExist:
entry = MediaEntry.query.filter_by(id=media_id).first()
print 'No such processing manager for {0}'.format(entry.media_type)
def bulk_run(args):
"""
Bulk reprocessing of a given media_type
"""
query = MediaEntry.query.filter_by(media_type=args.type,
state=args.state)
for entry in query:
run(args, entry.id)
def thumbs(args):
"""
Regenerate thumbs for all processed media
"""
query = MediaEntry.query.filter_by(state='processed')
for entry in query:
try:
media_entry, manager = get_entry_and_processing_manager(entry.id)
# TODO: (maybe?) This could probably be handled entirely by the
# processor class...
try:
processor_class = manager.get_processor(
'resize', media_entry)
except ProcessorDoesNotExist:
print 'No such processor "%s" for media with id "%s"' % (
'resize', media_entry.id)
return
except ProcessorNotEligible:
print 'Processor "%s" exists but media "%s" is not eligible' % (
'resize', media_entry.id)
return
reprocess_parser = processor_class.generate_parser()
# prepare filetype and size to be passed into reprocess_parser
if args.size:
extra_args = 'thumb --{0} {1} {2}'.format(
processor_class.thumb_size,
args.size[0],
args.size[1])
else:
extra_args = 'thumb'
reprocess_args = reprocess_parser.parse_args(extra_args.split())
reprocess_request = processor_class.args_to_request(reprocess_args)
run_process_media(
media_entry,
reprocess_action='resize',
reprocess_info=reprocess_request)
except ProcessingManagerDoesNotExist:
print 'No such processing manager for {0}'.format(entry.media_type)
def initial(args):
"""
Reprocess all failed media
"""
query = MediaEntry.query.filter_by(state='failed')
for entry in query:
try:
media_entry, manager = get_entry_and_processing_manager(entry.id)
run_process_media(
media_entry,
reprocess_action='initial')
except ProcessingManagerDoesNotExist:
print 'No such processing manager for {0}'.format(entry.media_type)
def reprocess(args):
# Run eagerly unless explicetly set not to
if not args.celery:
os.environ['CELERY_ALWAYS_EAGER'] = 'true'
commands_util.setup_app(args)
if args.reprocess_subcommand == "run":
run(args)
elif args.reprocess_subcommand == "available":
available(args)
elif args.reprocess_subcommand == "bulk_run":
bulk_run(args)
elif args.reprocess_subcommand == "thumbs":
thumbs(args)
elif args.reprocess_subcommand == "initial":
initial(args)

View File

@ -36,5 +36,5 @@ def prompt_if_not_set(variable, text, password=False):
variable=raw_input(text + u' ') variable=raw_input(text + u' ')
else: else:
variable=getpass.getpass(text + u' ') variable=getpass.getpass(text + u' ')
return variable return variable

View File

@ -15,7 +15,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
from mediagoblin.media_types import MediaManagerBase from mediagoblin.media_types import MediaManagerBase
from mediagoblin.media_types.ascii.processing import process_ascii, \ from mediagoblin.media_types.ascii.processing import AsciiProcessingManager, \
sniff_handler sniff_handler
from mediagoblin.tools import pluginapi from mediagoblin.tools import pluginapi
@ -29,7 +29,6 @@ def setup_plugin():
class ASCIIMediaManager(MediaManagerBase): class ASCIIMediaManager(MediaManagerBase):
human_readable = "ASCII" human_readable = "ASCII"
processor = staticmethod(process_ascii)
display_template = "mediagoblin/media_displays/ascii.html" display_template = "mediagoblin/media_displays/ascii.html"
default_thumb = "images/media_thumbs/ascii.jpg" default_thumb = "images/media_thumbs/ascii.jpg"
@ -43,5 +42,6 @@ hooks = {
'setup': setup_plugin, 'setup': setup_plugin,
'get_media_type_and_manager': get_media_type_and_manager, 'get_media_type_and_manager': get_media_type_and_manager,
('media_manager', MEDIA_TYPE): lambda: ASCIIMediaManager, ('media_manager', MEDIA_TYPE): lambda: ASCIIMediaManager,
('reprocess_manager', MEDIA_TYPE): lambda: AsciiProcessingManager,
'sniff_handler': sniff_handler, 'sniff_handler': sniff_handler,
} }

View File

@ -13,6 +13,7 @@
# #
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import chardet import chardet
import os import os
try: try:
@ -22,7 +23,11 @@ except ImportError:
import logging import logging
from mediagoblin import mg_globals as mgg from mediagoblin import mg_globals as mgg
from mediagoblin.processing import create_pub_filepath from mediagoblin.processing import (
create_pub_filepath, FilenameBuilder,
MediaProcessor, ProcessingManager,
get_process_filename, copy_original,
store_public, request_from_args)
from mediagoblin.media_types.ascii import asciitoimage from mediagoblin.media_types.ascii import asciitoimage
_log = logging.getLogger(__name__) _log = logging.getLogger(__name__)
@ -43,106 +48,202 @@ def sniff_handler(media_file, **kw):
return None return None
def process_ascii(proc_state): class CommonAsciiProcessor(MediaProcessor):
"""Code to process a txt file. Will be run by celery.
A Workbench() represents a local tempory dir. It is automatically
cleaned up when this function exits.
""" """
entry = proc_state.entry Provides a base for various ascii processing steps
workbench = proc_state.workbench """
ascii_config = mgg.global_config['media_type:mediagoblin.media_types.ascii'] acceptable_files = ['original', 'unicode']
# Conversions subdirectory to avoid collisions
conversions_subdir = os.path.join(
workbench.dir, 'conversions')
os.mkdir(conversions_subdir)
queued_filepath = entry.queued_media_file def common_setup(self):
queued_filename = workbench.localized_file( self.ascii_config = mgg.global_config[
mgg.queue_store, queued_filepath, 'media_type:mediagoblin.media_types.ascii']
'source')
queued_file = file(queued_filename, 'rb') # Conversions subdirectory to avoid collisions
self.conversions_subdir = os.path.join(
self.workbench.dir, 'convirsions')
os.mkdir(self.conversions_subdir)
with queued_file: # Pull down and set up the processing file
queued_file_charset = chardet.detect(queued_file.read()) self.process_filename = get_process_filename(
self.entry, self.workbench, self.acceptable_files)
self.name_builder = FilenameBuilder(self.process_filename)
self.charset = None
def copy_original(self):
copy_original(
self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def _detect_charset(self, orig_file):
d_charset = chardet.detect(orig_file.read())
# Only select a non-utf-8 charset if chardet is *really* sure # Only select a non-utf-8 charset if chardet is *really* sure
# Tested with "Feli\x0109an superjaron", which was detecte # Tested with "Feli\x0109an superjaron", which was detected
if queued_file_charset['confidence'] < 0.9: if d_charset['confidence'] < 0.9:
interpreted_charset = 'utf-8' self.charset = 'utf-8'
else: else:
interpreted_charset = queued_file_charset['encoding'] self.charset = d_charset['encoding']
_log.info('Charset detected: {0}\nWill interpret as: {1}'.format( _log.info('Charset detected: {0}\nWill interpret as: {1}'.format(
queued_file_charset, d_charset,
interpreted_charset)) self.charset))
queued_file.seek(0) # Rewind the queued file # Rewind the file
orig_file.seek(0)
thumb_filepath = create_pub_filepath( def store_unicode_file(self):
entry, 'thumbnail.png') with file(self.process_filename, 'rb') as orig_file:
self._detect_charset(orig_file)
unicode_filepath = create_pub_filepath(self.entry,
'ascii-portable.txt')
tmp_thumb_filename = os.path.join( with mgg.public_store.get_file(unicode_filepath, 'wb') \
conversions_subdir, thumb_filepath[-1]) as unicode_file:
# Decode the original file from its detected charset (or UTF8)
# Encode the unicode instance to ASCII and replace any
# non-ASCII with an HTML entity (&#
unicode_file.write(
unicode(orig_file.read().decode(
self.charset)).encode(
'ascii',
'xmlcharrefreplace'))
ascii_converter_args = {} self.entry.media_files['unicode'] = unicode_filepath
if ascii_config['thumbnail_font']: def generate_thumb(self, font=None, thumb_size=None):
ascii_converter_args.update( with file(self.process_filename, 'rb') as orig_file:
{'font': ascii_config['thumbnail_font']}) # If no font kwarg, check config
if not font:
font = self.ascii_config.get('thumbnail_font', None)
if not thumb_size:
thumb_size = (mgg.global_config['media:thumb']['max_width'],
mgg.global_config['media:thumb']['max_height'])
converter = asciitoimage.AsciiToImage( tmp_thumb = os.path.join(
**ascii_converter_args) self.conversions_subdir,
self.name_builder.fill('{basename}.thumbnail.png'))
thumb = converter._create_image( ascii_converter_args = {}
queued_file.read())
with file(tmp_thumb_filename, 'w') as thumb_file: # If there is a font from either the config or kwarg, update
thumb.thumbnail( # ascii_converter_args
(mgg.global_config['media:thumb']['max_width'], if font:
mgg.global_config['media:thumb']['max_height']), ascii_converter_args.update(
Image.ANTIALIAS) {'font': self.ascii_config['thumbnail_font']})
thumb.save(thumb_file)
_log.debug('Copying local file to public storage') converter = asciitoimage.AsciiToImage(
mgg.public_store.copy_local_to_storage( **ascii_converter_args)
tmp_thumb_filename, thumb_filepath)
queued_file.seek(0) thumb = converter._create_image(
orig_file.read())
original_filepath = create_pub_filepath(entry, queued_filepath[-1]) with file(tmp_thumb, 'w') as thumb_file:
thumb.thumbnail(
thumb_size,
Image.ANTIALIAS)
thumb.save(thumb_file)
with mgg.public_store.get_file(original_filepath, 'wb') \ _log.debug('Copying local file to public storage')
as original_file: store_public(self.entry, 'thumb', tmp_thumb,
original_file.write(queued_file.read()) self.name_builder.fill('{basename}.thumbnail.jpg'))
queued_file.seek(0) # Rewind *again*
unicode_filepath = create_pub_filepath(entry, 'ascii-portable.txt') class InitialProcessor(CommonAsciiProcessor):
"""
Initial processing step for new ascii media
"""
name = "initial"
description = "Initial processing"
with mgg.public_store.get_file(unicode_filepath, 'wb') \ @classmethod
as unicode_file: def media_is_eligible(cls, entry=None, state=None):
# Decode the original file from its detected charset (or UTF8) if not state:
# Encode the unicode instance to ASCII and replace any non-ASCII state = entry.state
# with an HTML entity (&# return state in (
unicode_file.write( "unprocessed", "failed")
unicode(queued_file.read().decode(
interpreted_charset)).encode(
'ascii',
'xmlcharrefreplace'))
# Remove queued media file from storage and database. @classmethod
# queued_filepath is in the task_id directory which should def generate_parser(cls):
# be removed too, but fail if the directory is not empty to be on parser = argparse.ArgumentParser(
# the super-safe side. description=cls.description,
mgg.queue_store.delete_file(queued_filepath) # rm file prog=cls.name)
mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
entry.queued_media_file = []
media_files_dict = entry.setdefault('media_files', {}) parser.add_argument(
media_files_dict['thumb'] = thumb_filepath '--thumb_size',
media_files_dict['unicode'] = unicode_filepath nargs=2,
media_files_dict['original'] = original_filepath metavar=('max_width', 'max_width'),
type=int)
entry.save() parser.add_argument(
'--font',
help='the thumbnail font')
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
args, ['thumb_size', 'font'])
def process(self, thumb_size=None, font=None):
self.common_setup()
self.store_unicode_file()
self.generate_thumb(thumb_size=thumb_size, font=font)
self.copy_original()
self.delete_queue_file()
class Resizer(CommonAsciiProcessor):
"""
Resizing process steps for processed media
"""
name = 'resize'
description = 'Resize thumbnail'
thumb_size = 'thumb_size'
@classmethod
def media_is_eligible(cls, entry=None, state=None):
"""
Determine if this media type is eligible for processing
"""
if not state:
state = entry.state
return state in 'processed'
@classmethod
def generate_parser(cls):
parser = argparse.ArgumentParser(
description=cls.description,
prog=cls.name)
parser.add_argument(
'--thumb_size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
# Needed for gmg reprocess thumbs to work
parser.add_argument(
'file',
nargs='?',
default='thumb',
choices=['thumb'])
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
args, ['thumb_size', 'file'])
def process(self, thumb_size=None, file=None):
self.common_setup()
self.generate_thumb(thumb_size=thumb_size)
class AsciiProcessingManager(ProcessingManager):
def __init__(self):
super(self.__class__, self).__init__()
self.add_processor(InitialProcessor)
self.add_processor(Resizer)

View File

@ -15,7 +15,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
from mediagoblin.media_types import MediaManagerBase from mediagoblin.media_types import MediaManagerBase
from mediagoblin.media_types.audio.processing import process_audio, \ from mediagoblin.media_types.audio.processing import AudioProcessingManager, \
sniff_handler sniff_handler
from mediagoblin.tools import pluginapi from mediagoblin.tools import pluginapi
@ -32,8 +32,8 @@ def setup_plugin():
class AudioMediaManager(MediaManagerBase): class AudioMediaManager(MediaManagerBase):
human_readable = "Audio" human_readable = "Audio"
processor = staticmethod(process_audio)
display_template = "mediagoblin/media_displays/audio.html" display_template = "mediagoblin/media_displays/audio.html"
default_thumb = "images/media_thumbs/image.png"
def get_media_type_and_manager(ext): def get_media_type_and_manager(ext):
@ -45,4 +45,5 @@ hooks = {
'get_media_type_and_manager': get_media_type_and_manager, 'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler, 'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: AudioMediaManager, ('media_manager', MEDIA_TYPE): lambda: AudioMediaManager,
('reprocess_manager', MEDIA_TYPE): lambda: AudioProcessingManager,
} }

View File

@ -14,16 +14,19 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import logging import logging
from tempfile import NamedTemporaryFile
import os import os
from mediagoblin import mg_globals as mgg from mediagoblin import mg_globals as mgg
from mediagoblin.processing import (create_pub_filepath, BadMediaFail, from mediagoblin.processing import (
FilenameBuilder, ProgressCallback) BadMediaFail, FilenameBuilder,
ProgressCallback, MediaProcessor, ProcessingManager,
request_from_args, get_process_filename,
store_public, copy_original)
from mediagoblin.media_types.audio.transcoders import (AudioTranscoder, from mediagoblin.media_types.audio.transcoders import (
AudioThumbnailer) AudioTranscoder, AudioThumbnailer)
_log = logging.getLogger(__name__) _log = logging.getLogger(__name__)
@ -39,121 +42,304 @@ def sniff_handler(media_file, **kw):
_log.debug('Audio discovery raised BadMediaFail') _log.debug('Audio discovery raised BadMediaFail')
return None return None
if data.is_audio == True and data.is_video == False: if data.is_audio is True and data.is_video is False:
return MEDIA_TYPE return MEDIA_TYPE
return None return None
def process_audio(proc_state): class CommonAudioProcessor(MediaProcessor):
"""Code to process uploaded audio. Will be run by celery.
A Workbench() represents a local tempory dir. It is automatically
cleaned up when this function exits.
""" """
entry = proc_state.entry Provides a base for various audio processing steps
workbench = proc_state.workbench """
audio_config = mgg.global_config['media_type:mediagoblin.media_types.audio'] acceptable_files = ['original', 'best_quality', 'webm_audio']
queued_filepath = entry.queued_media_file def common_setup(self):
queued_filename = workbench.localized_file( """
mgg.queue_store, queued_filepath, Setup the workbench directory and pull down the original file, add
'source') the audio_config, transcoder, thumbnailer and spectrogram_tmp path
name_builder = FilenameBuilder(queued_filename) """
self.audio_config = mgg \
.global_config['media_type:mediagoblin.media_types.audio']
webm_audio_filepath = create_pub_filepath( # Pull down and set up the processing file
entry, self.process_filename = get_process_filename(
'{original}.webm'.format( self.entry, self.workbench, self.acceptable_files)
original=os.path.splitext( self.name_builder = FilenameBuilder(self.process_filename)
queued_filepath[-1])[0]))
if audio_config['keep_original']: self.transcoder = AudioTranscoder()
with open(queued_filename, 'rb') as queued_file: self.thumbnailer = AudioThumbnailer()
original_filepath = create_pub_filepath(
entry, name_builder.fill('{basename}{ext}'))
with mgg.public_store.get_file(original_filepath, 'wb') as \ def copy_original(self):
original_file: if self.audio_config['keep_original']:
_log.debug('Saving original...') copy_original(
original_file.write(queued_file.read()) self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
entry.media_files['original'] = original_filepath def _keep_best(self):
"""
If there is no original, keep the best file that we have
"""
if not self.entry.media_files.get('best_quality'):
# Save the best quality file if no original?
if not self.entry.media_files.get('original') and \
self.entry.media_files.get('webm_audio'):
self.entry.media_files['best_quality'] = self.entry \
.media_files['webm_audio']
transcoder = AudioTranscoder() def transcode(self, quality=None):
if not quality:
quality = self.audio_config['quality']
with NamedTemporaryFile(dir=workbench.dir) as webm_audio_tmp: progress_callback = ProgressCallback(self.entry)
progress_callback = ProgressCallback(entry) webm_audio_tmp = os.path.join(self.workbench.dir,
self.name_builder.fill(
'{basename}{ext}'))
transcoder.transcode( self.transcoder.transcode(
queued_filename, self.process_filename,
webm_audio_tmp.name, webm_audio_tmp,
quality=audio_config['quality'], quality=quality,
progress_callback=progress_callback) progress_callback=progress_callback)
transcoder.discover(webm_audio_tmp.name) self.transcoder.discover(webm_audio_tmp)
self._keep_best()
_log.debug('Saving medium...') _log.debug('Saving medium...')
mgg.public_store.get_file(webm_audio_filepath, 'wb').write( store_public(self.entry, 'webm_audio', webm_audio_tmp,
webm_audio_tmp.read()) self.name_builder.fill('{basename}.medium.webm'))
entry.media_files['webm_audio'] = webm_audio_filepath def create_spectrogram(self, max_width=None, fft_size=None):
if not max_width:
max_width = mgg.global_config['media:medium']['max_width']
if not fft_size:
fft_size = self.audio_config['spectrogram_fft_size']
# entry.media_data_init(length=int(data.audiolength)) wav_tmp = os.path.join(self.workbench.dir, self.name_builder.fill(
'{basename}.ogg'))
if audio_config['create_spectrogram']: _log.info('Creating OGG source for spectrogram')
spectrogram_filepath = create_pub_filepath( self.transcoder.transcode(
entry, self.process_filename,
'{original}-spectrogram.jpg'.format( wav_tmp,
original=os.path.splitext( mux_string='vorbisenc quality={0} ! oggmux'.format(
queued_filepath[-1])[0])) self.audio_config['quality']))
with NamedTemporaryFile(dir=workbench.dir, suffix='.ogg') as wav_tmp: spectrogram_tmp = os.path.join(self.workbench.dir,
_log.info('Creating OGG source for spectrogram') self.name_builder.fill(
transcoder.transcode( '{basename}-spectrogram.jpg'))
queued_filename,
wav_tmp.name,
mux_string='vorbisenc quality={0} ! oggmux'.format(
audio_config['quality']))
thumbnailer = AudioThumbnailer() self.thumbnailer.spectrogram(
wav_tmp,
spectrogram_tmp,
width=max_width,
fft_size=fft_size)
with NamedTemporaryFile(dir=workbench.dir, suffix='.jpg') as spectrogram_tmp: _log.debug('Saving spectrogram...')
thumbnailer.spectrogram( store_public(self.entry, 'spectrogram', spectrogram_tmp,
wav_tmp.name, self.name_builder.fill('{basename}.spectrogram.jpg'))
spectrogram_tmp.name,
width=mgg.global_config['media:medium']['max_width'],
fft_size=audio_config['spectrogram_fft_size'])
_log.debug('Saving spectrogram...') def generate_thumb(self, size=None):
mgg.public_store.get_file(spectrogram_filepath, 'wb').write( if not size:
spectrogram_tmp.read()) max_width = mgg.global_config['media:thumb']['max_width']
max_height = mgg.global_config['media:thumb']['max_height']
size = (max_width, max_height)
entry.media_files['spectrogram'] = spectrogram_filepath thumb_tmp = os.path.join(self.workbench.dir, self.name_builder.fill(
'{basename}-thumbnail.jpg'))
with NamedTemporaryFile(dir=workbench.dir, suffix='.jpg') as thumb_tmp: # We need the spectrogram to create a thumbnail
thumbnailer.thumbnail_spectrogram( spectrogram = self.entry.media_files.get('spectrogram')
spectrogram_tmp.name, if not spectrogram:
thumb_tmp.name, _log.info('No spectrogram found, we will create one.')
(mgg.global_config['media:thumb']['max_width'], self.create_spectrogram()
mgg.global_config['media:thumb']['max_height'])) spectrogram = self.entry.media_files['spectrogram']
thumb_filepath = create_pub_filepath( spectrogram_filepath = mgg.public_store.get_local_path(spectrogram)
entry,
'{original}-thumbnail.jpg'.format(
original=os.path.splitext(
queued_filepath[-1])[0]))
mgg.public_store.get_file(thumb_filepath, 'wb').write( self.thumbnailer.thumbnail_spectrogram(
thumb_tmp.read()) spectrogram_filepath,
thumb_tmp,
tuple(size))
entry.media_files['thumb'] = thumb_filepath store_public(self.entry, 'thumb', thumb_tmp,
else: self.name_builder.fill('{basename}.thumbnail.jpg'))
entry.media_files['thumb'] = ['fake', 'thumb', 'path.jpg']
# Remove queued media file from storage and database.
# queued_filepath is in the task_id directory which should class InitialProcessor(CommonAudioProcessor):
# be removed too, but fail if the directory is not empty to be on """
# the super-safe side. Initial processing steps for new audio
mgg.queue_store.delete_file(queued_filepath) # rm file """
mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir name = "initial"
entry.queued_media_file = [] description = "Initial processing"
@classmethod
def media_is_eligible(cls, entry=None, state=None):
"""
Determine if this media type is eligible for processing
"""
if not state:
state = entry.state
return state in (
"unprocessed", "failed")
@classmethod
def generate_parser(cls):
parser = argparse.ArgumentParser(
description=cls.description,
prog=cls.name)
parser.add_argument(
'--quality',
type=float,
help='vorbisenc quality. Range: -0.1..1')
parser.add_argument(
'--fft_size',
type=int,
help='spectrogram fft size')
parser.add_argument(
'--thumb_size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int,
help='minimum size is 100 x 100')
parser.add_argument(
'--medium_width',
type=int,
help='The width of the spectogram')
parser.add_argument(
'--create_spectrogram',
action='store_true',
help='Create spectogram and thumbnail, will default to config')
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
args, ['create_spectrogram', 'quality', 'fft_size',
'thumb_size', 'medium_width'])
def process(self, quality=None, fft_size=None, thumb_size=None,
create_spectrogram=None, medium_width=None):
self.common_setup()
if not create_spectrogram:
create_spectrogram = self.audio_config['create_spectrogram']
self.transcode(quality=quality)
self.copy_original()
if create_spectrogram:
self.create_spectrogram(max_width=medium_width, fft_size=fft_size)
self.generate_thumb(size=thumb_size)
self.delete_queue_file()
class Resizer(CommonAudioProcessor):
"""
Thumbnail and spectogram resizing process steps for processed audio
"""
name = 'resize'
description = 'Resize thumbnail or spectogram'
thumb_size = 'thumb_size'
@classmethod
def media_is_eligible(cls, entry=None, state=None):
"""
Determine if this media entry is eligible for processing
"""
if not state:
state = entry.state
return state in 'processed'
@classmethod
def generate_parser(cls):
parser = argparse.ArgumentParser(
description=cls.description,
prog=cls.name)
parser.add_argument(
'--fft_size',
type=int,
help='spectrogram fft size')
parser.add_argument(
'--thumb_size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int,
help='minimum size is 100 x 100')
parser.add_argument(
'--medium_width',
type=int,
help='The width of the spectogram')
parser.add_argument(
'file',
choices=['thumb', 'spectrogram'])
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
args, ['thumb_size', 'file', 'fft_size', 'medium_width'])
def process(self, file, thumb_size=None, fft_size=None,
medium_width=None):
self.common_setup()
if file == 'thumb':
self.generate_thumb(size=thumb_size)
elif file == 'spectrogram':
self.create_spectrogram(max_width=medium_width, fft_size=fft_size)
class Transcoder(CommonAudioProcessor):
"""
Transcoding processing steps for processed audio
"""
name = 'transcode'
description = 'Re-transcode audio'
@classmethod
def media_is_eligible(cls, entry=None, state=None):
if not state:
state = entry.state
return state in 'processed'
@classmethod
def generate_parser(cls):
parser = argparse.ArgumentParser(
description=cls.description,
prog=cls.name)
parser.add_argument(
'--quality',
help='vorbisenc quality. Range: -0.1..1')
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
args, ['quality'])
def process(self, quality=None):
self.common_setup()
self.transcode(quality=quality)
class AudioProcessingManager(ProcessingManager):
def __init__(self):
super(self.__class__, self).__init__()
self.add_processor(InitialProcessor)
self.add_processor(Resizer)
self.add_processor(Transcoder)

View File

@ -122,8 +122,7 @@ class AudioThumbnailer(object):
int(start_x), 0, int(start_x), 0,
int(stop_x), int(im_h))) int(stop_x), int(im_h)))
if th.size[0] > th_w or th.size[1] > th_h: th.thumbnail(thumb_size, Image.ANTIALIAS)
th.thumbnail(thumb_size, Image.ANTIALIAS)
th.save(dst) th.save(dst)

View File

@ -14,12 +14,15 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime import datetime
import logging
from mediagoblin.media_types import MediaManagerBase from mediagoblin.media_types import MediaManagerBase
from mediagoblin.media_types.image.processing import process_image, \ from mediagoblin.media_types.image.processing import sniff_handler, \
sniff_handler ImageProcessingManager
from mediagoblin.tools import pluginapi from mediagoblin.tools import pluginapi
_log = logging.getLogger(__name__)
ACCEPTED_EXTENSIONS = ["jpg", "jpeg", "png", "gif", "tiff"] ACCEPTED_EXTENSIONS = ["jpg", "jpeg", "png", "gif", "tiff"]
MEDIA_TYPE = 'mediagoblin.media_types.image' MEDIA_TYPE = 'mediagoblin.media_types.image'
@ -31,7 +34,6 @@ def setup_plugin():
class ImageMediaManager(MediaManagerBase): class ImageMediaManager(MediaManagerBase):
human_readable = "Image" human_readable = "Image"
processor = staticmethod(process_image)
display_template = "mediagoblin/media_displays/image.html" display_template = "mediagoblin/media_displays/image.html"
default_thumb = "images/media_thumbs/image.png" default_thumb = "images/media_thumbs/image.png"
@ -69,4 +71,5 @@ hooks = {
'get_media_type_and_manager': get_media_type_and_manager, 'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler, 'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: ImageMediaManager, ('media_manager', MEDIA_TYPE): lambda: ImageMediaManager,
('reprocess_manager', MEDIA_TYPE): lambda: ImageProcessingManager,
} }

View File

@ -20,9 +20,14 @@ except ImportError:
import Image import Image
import os import os
import logging import logging
import argparse
from mediagoblin import mg_globals as mgg from mediagoblin import mg_globals as mgg
from mediagoblin.processing import BadMediaFail, FilenameBuilder from mediagoblin.processing import (
BadMediaFail, FilenameBuilder,
MediaProcessor, ProcessingManager,
request_from_args, get_process_filename,
store_public, copy_original)
from mediagoblin.tools.exif import exif_fix_image_orientation, \ from mediagoblin.tools.exif import exif_fix_image_orientation, \
extract_exif, clean_exif, get_gps_data, get_useful, \ extract_exif, clean_exif, get_gps_data, get_useful, \
exif_image_needs_rotation exif_image_needs_rotation
@ -38,8 +43,8 @@ PIL_FILTERS = {
MEDIA_TYPE = 'mediagoblin.media_types.image' MEDIA_TYPE = 'mediagoblin.media_types.image'
def resize_image(proc_state, resized, keyname, target_name, new_size, def resize_image(entry, resized, keyname, target_name, new_size,
exif_tags, workdir): exif_tags, workdir, quality, filter):
""" """
Store a resized version of an image and return its pathname. Store a resized version of an image and return its pathname.
@ -51,17 +56,16 @@ def resize_image(proc_state, resized, keyname, target_name, new_size,
exif_tags -- EXIF data for the original image exif_tags -- EXIF data for the original image
workdir -- directory path for storing converted image files workdir -- directory path for storing converted image files
new_size -- 2-tuple size for the resized image new_size -- 2-tuple size for the resized image
quality -- level of compression used when resizing images
filter -- One of BICUBIC, BILINEAR, NEAREST, ANTIALIAS
""" """
config = mgg.global_config['media_type:mediagoblin.media_types.image']
resized = exif_fix_image_orientation(resized, exif_tags) # Fix orientation resized = exif_fix_image_orientation(resized, exif_tags) # Fix orientation
filter_config = config['resize_filter']
try: try:
resize_filter = PIL_FILTERS[filter_config.upper()] resize_filter = PIL_FILTERS[filter.upper()]
except KeyError: except KeyError:
raise Exception('Filter "{0}" not found, choose one of {1}'.format( raise Exception('Filter "{0}" not found, choose one of {1}'.format(
unicode(filter_config), unicode(filter),
u', '.join(PIL_FILTERS.keys()))) u', '.join(PIL_FILTERS.keys())))
resized.thumbnail(new_size, resize_filter) resized.thumbnail(new_size, resize_filter)
@ -69,32 +73,36 @@ def resize_image(proc_state, resized, keyname, target_name, new_size,
# Copy the new file to the conversion subdir, then remotely. # Copy the new file to the conversion subdir, then remotely.
tmp_resized_filename = os.path.join(workdir, target_name) tmp_resized_filename = os.path.join(workdir, target_name)
with file(tmp_resized_filename, 'w') as resized_file: with file(tmp_resized_filename, 'w') as resized_file:
resized.save(resized_file, quality=config['quality']) resized.save(resized_file, quality=quality)
proc_state.store_public(keyname, tmp_resized_filename, target_name) store_public(entry, keyname, tmp_resized_filename, target_name)
def resize_tool(proc_state, force, keyname, target_name, def resize_tool(entry,
conversions_subdir, exif_tags): force, keyname, orig_file, target_name,
# filename -- the filename of the original image being resized conversions_subdir, exif_tags, quality, filter, new_size=None):
filename = proc_state.get_queued_filename() # Use the default size if new_size was not given
max_width = mgg.global_config['media:' + keyname]['max_width'] if not new_size:
max_height = mgg.global_config['media:' + keyname]['max_height'] max_width = mgg.global_config['media:' + keyname]['max_width']
max_height = mgg.global_config['media:' + keyname]['max_height']
new_size = (max_width, max_height)
# If the size of the original file exceeds the specified size for the desized # If the size of the original file exceeds the specified size for the desized
# file, a target_name file is created and later associated with the media # file, a target_name file is created and later associated with the media
# entry. # entry.
# Also created if the file needs rotation, or if forced. # Also created if the file needs rotation, or if forced.
try: try:
im = Image.open(filename) im = Image.open(orig_file)
except IOError: except IOError:
raise BadMediaFail() raise BadMediaFail()
if force \ if force \
or im.size[0] > max_width \ or im.size[0] > new_size[0]\
or im.size[1] > max_height \ or im.size[1] > new_size[1]\
or exif_image_needs_rotation(exif_tags): or exif_image_needs_rotation(exif_tags):
resize_image( resize_image(
proc_state, im, unicode(keyname), target_name, entry, im, unicode(keyname), target_name,
(max_width, max_height), tuple(new_size),
exif_tags, conversions_subdir) exif_tags, conversions_subdir,
quality, filter)
SUPPORTED_FILETYPES = ['png', 'gif', 'jpg', 'jpeg', 'tiff'] SUPPORTED_FILETYPES = ['png', 'gif', 'jpg', 'jpeg', 'tiff']
@ -119,53 +127,210 @@ def sniff_handler(media_file, **kw):
return None return None
def process_image(proc_state): class CommonImageProcessor(MediaProcessor):
"""Code to process an image. Will be run by celery.
A Workbench() represents a local tempory dir. It is automatically
cleaned up when this function exits.
""" """
entry = proc_state.entry Provides a base for various media processing steps
workbench = proc_state.workbench """
# list of acceptable file keys in order of prefrence for reprocessing
acceptable_files = ['original', 'medium']
# Conversions subdirectory to avoid collisions def common_setup(self):
conversions_subdir = os.path.join( """
workbench.dir, 'conversions') Set up the workbench directory and pull down the original file
os.mkdir(conversions_subdir) """
self.image_config = mgg.global_config[
'media_type:mediagoblin.media_types.image']
queued_filename = proc_state.get_queued_filename() ## @@: Should this be two functions?
name_builder = FilenameBuilder(queued_filename) # Conversions subdirectory to avoid collisions
self.conversions_subdir = os.path.join(
self.workbench.dir, 'convirsions')
os.mkdir(self.conversions_subdir)
# EXIF extraction # Pull down and set up the processing file
exif_tags = extract_exif(queued_filename) self.process_filename = get_process_filename(
gps_data = get_gps_data(exif_tags) self.entry, self.workbench, self.acceptable_files)
self.name_builder = FilenameBuilder(self.process_filename)
# Always create a small thumbnail # Exif extraction
resize_tool(proc_state, True, 'thumb', self.exif_tags = extract_exif(self.process_filename)
name_builder.fill('{basename}.thumbnail{ext}'),
conversions_subdir, exif_tags)
# Possibly create a medium def generate_medium_if_applicable(self, size=None, quality=None,
resize_tool(proc_state, False, 'medium', filter=None):
name_builder.fill('{basename}.medium{ext}'), if not quality:
conversions_subdir, exif_tags) quality = self.image_config['quality']
if not filter:
filter = self.image_config['resize_filter']
# Copy our queued local workbench to its final destination resize_tool(self.entry, False, 'medium', self.process_filename,
proc_state.copy_original(name_builder.fill('{basename}{ext}')) self.name_builder.fill('{basename}.medium{ext}'),
self.conversions_subdir, self.exif_tags, quality,
filter, size)
# Remove queued media file from storage and database def generate_thumb(self, size=None, quality=None, filter=None):
proc_state.delete_queue_file() if not quality:
quality = self.image_config['quality']
if not filter:
filter = self.image_config['resize_filter']
# Insert exif data into database resize_tool(self.entry, True, 'thumb', self.process_filename,
exif_all = clean_exif(exif_tags) self.name_builder.fill('{basename}.thumbnail{ext}'),
self.conversions_subdir, self.exif_tags, quality,
filter, size)
if len(exif_all): def copy_original(self):
entry.media_data_init(exif_all=exif_all) copy_original(
self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
if len(gps_data): def extract_metadata(self):
for key in list(gps_data.keys()): # Is there any GPS data
gps_data['gps_' + key] = gps_data.pop(key) gps_data = get_gps_data(self.exif_tags)
entry.media_data_init(**gps_data)
# Insert exif data into database
exif_all = clean_exif(self.exif_tags)
if len(exif_all):
self.entry.media_data_init(exif_all=exif_all)
if len(gps_data):
for key in list(gps_data.keys()):
gps_data['gps_' + key] = gps_data.pop(key)
self.entry.media_data_init(**gps_data)
class InitialProcessor(CommonImageProcessor):
"""
Initial processing step for new images
"""
name = "initial"
description = "Initial processing"
@classmethod
def media_is_eligible(cls, entry=None, state=None):
"""
Determine if this media type is eligible for processing
"""
if not state:
state = entry.state
return state in (
"unprocessed", "failed")
###############################
# Command line interface things
###############################
@classmethod
def generate_parser(cls):
parser = argparse.ArgumentParser(
description=cls.description,
prog=cls.name)
parser.add_argument(
'--size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
parser.add_argument(
'--thumb-size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
parser.add_argument(
'--filter',
choices=['BICUBIC', 'BILINEAR', 'NEAREST', 'ANTIALIAS'])
parser.add_argument(
'--quality',
type=int,
help='level of compression used when resizing images')
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
args, ['size', 'thumb_size', 'filter', 'quality'])
def process(self, size=None, thumb_size=None, quality=None, filter=None):
self.common_setup()
self.generate_medium_if_applicable(size=size, filter=filter,
quality=quality)
self.generate_thumb(size=thumb_size, filter=filter, quality=quality)
self.copy_original()
self.extract_metadata()
self.delete_queue_file()
class Resizer(CommonImageProcessor):
"""
Resizing process steps for processed media
"""
name = 'resize'
description = 'Resize image'
thumb_size = 'size'
@classmethod
def media_is_eligible(cls, entry=None, state=None):
"""
Determine if this media type is eligible for processing
"""
if not state:
state = entry.state
return state in 'processed'
###############################
# Command line interface things
###############################
@classmethod
def generate_parser(cls):
parser = argparse.ArgumentParser(
description=cls.description,
prog=cls.name)
parser.add_argument(
'--size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
parser.add_argument(
'--filter',
choices=['BICUBIC', 'BILINEAR', 'NEAREST', 'ANTIALIAS'])
parser.add_argument(
'--quality',
type=int,
help='level of compression used when resizing images')
parser.add_argument(
'file',
choices=['medium', 'thumb'])
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
args, ['size', 'file', 'quality', 'filter'])
def process(self, file, size=None, filter=None, quality=None):
self.common_setup()
if file == 'medium':
self.generate_medium_if_applicable(size=size, filter=filter,
quality=quality)
elif file == 'thumb':
self.generate_thumb(size=size, filter=filter, quality=quality)
class ImageProcessingManager(ProcessingManager):
def __init__(self):
super(self.__class__, self).__init__()
self.add_processor(InitialProcessor)
self.add_processor(Resizer)
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -15,7 +15,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
from mediagoblin.media_types import MediaManagerBase from mediagoblin.media_types import MediaManagerBase
from mediagoblin.media_types.pdf.processing import process_pdf, \ from mediagoblin.media_types.pdf.processing import PdfProcessingManager, \
sniff_handler sniff_handler
from mediagoblin.tools import pluginapi from mediagoblin.tools import pluginapi
@ -29,7 +29,6 @@ def setup_plugin():
class PDFMediaManager(MediaManagerBase): class PDFMediaManager(MediaManagerBase):
human_readable = "PDF" human_readable = "PDF"
processor = staticmethod(process_pdf)
display_template = "mediagoblin/media_displays/pdf.html" display_template = "mediagoblin/media_displays/pdf.html"
default_thumb = "images/media_thumbs/pdf.jpg" default_thumb = "images/media_thumbs/pdf.jpg"
@ -44,4 +43,5 @@ hooks = {
'get_media_type_and_manager': get_media_type_and_manager, 'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler, 'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: PDFMediaManager, ('media_manager', MEDIA_TYPE): lambda: PDFMediaManager,
('reprocess_manager', MEDIA_TYPE): lambda: PdfProcessingManager,
} }

View File

@ -13,14 +13,18 @@
# #
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import os import os
import logging import logging
import dateutil.parser import dateutil.parser
from subprocess import PIPE, Popen from subprocess import PIPE, Popen
from mediagoblin import mg_globals as mgg from mediagoblin import mg_globals as mgg
from mediagoblin.processing import (create_pub_filepath, from mediagoblin.processing import (
FilenameBuilder, BadMediaFail) FilenameBuilder, BadMediaFail,
MediaProcessor, ProcessingManager,
request_from_args, get_process_filename,
store_public, copy_original)
from mediagoblin.tools.translate import fake_ugettext_passthrough as _ from mediagoblin.tools.translate import fake_ugettext_passthrough as _
_log = logging.getLogger(__name__) _log = logging.getLogger(__name__)
@ -230,51 +234,207 @@ def pdf_info(original):
return ret_dict return ret_dict
def process_pdf(proc_state):
"""Code to process a pdf file. Will be run by celery.
A Workbench() represents a local tempory dir. It is automatically class CommonPdfProcessor(MediaProcessor):
cleaned up when this function exits.
""" """
entry = proc_state.entry Provides a base for various pdf processing steps
workbench = proc_state.workbench """
acceptable_files = ['original', 'pdf']
queued_filename = proc_state.get_queued_filename() def common_setup(self):
name_builder = FilenameBuilder(queued_filename) """
Set up common pdf processing steps
"""
# Pull down and set up the processing file
self.process_filename = get_process_filename(
self.entry, self.workbench, self.acceptable_files)
self.name_builder = FilenameBuilder(self.process_filename)
# Copy our queued local workbench to its final destination self._set_pdf_filename()
original_dest = name_builder.fill('{basename}{ext}')
proc_state.copy_original(original_dest) def _set_pdf_filename(self):
if self.name_builder.ext == '.pdf':
self.pdf_filename = self.process_filename
elif self.entry.media_files.get('pdf'):
self.pdf_filename = self.workbench.localized_file(
mgg.public_store, self.entry.media_files['pdf'])
else:
self.pdf_filename = self._generate_pdf()
def copy_original(self):
copy_original(
self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def generate_thumb(self, thumb_size=None):
if not thumb_size:
thumb_size = (mgg.global_config['media:thumb']['max_width'],
mgg.global_config['media:thumb']['max_height'])
# Note: pdftocairo adds '.png', so don't include an ext
thumb_filename = os.path.join(self.workbench.dir,
self.name_builder.fill(
'{basename}.thumbnail'))
executable = where('pdftocairo')
args = [executable, '-scale-to', str(min(thumb_size)),
'-singlefile', '-png', self.pdf_filename, thumb_filename]
_log.debug('calling {0}'.format(repr(' '.join(args))))
Popen(executable=executable, args=args).wait()
# since pdftocairo added '.png', we need to include it with the
# filename
store_public(self.entry, 'thumb', thumb_filename + '.png',
self.name_builder.fill('{basename}.thumbnail.png'))
def _generate_pdf(self):
"""
Store the pdf. If the file is not a pdf, make it a pdf
"""
tmp_pdf = self.process_filename
# Create a pdf if this is a different doc, store pdf for viewer
ext = queued_filename.rsplit('.', 1)[-1].lower()
if ext == 'pdf':
pdf_filename = queued_filename
else:
pdf_filename = queued_filename.rsplit('.', 1)[0] + '.pdf'
unoconv = where('unoconv') unoconv = where('unoconv')
Popen(executable=unoconv, Popen(executable=unoconv,
args=[unoconv, '-v', '-f', 'pdf', queued_filename]).wait() args=[unoconv, '-v', '-f', 'pdf', self.process_filename]).wait()
if not os.path.exists(pdf_filename):
if not os.path.exists(tmp_pdf):
_log.debug('unoconv failed to convert file to pdf') _log.debug('unoconv failed to convert file to pdf')
raise BadMediaFail() raise BadMediaFail()
proc_state.store_public(keyname=u'pdf', local_file=pdf_filename)
pdf_info_dict = pdf_info(pdf_filename) store_public(self.entry, 'pdf', tmp_pdf,
self.name_builder.fill('{basename}.pdf'))
for name, width, height in [ return self.workbench.localized_file(
(u'thumb', mgg.global_config['media:thumb']['max_width'], mgg.public_store, self.entry.media_files['pdf'])
mgg.global_config['media:thumb']['max_height']),
(u'medium', mgg.global_config['media:medium']['max_width'],
mgg.global_config['media:medium']['max_height']),
]:
filename = name_builder.fill('{basename}.%s.png' % name)
path = workbench.joinpath(filename)
create_pdf_thumb(pdf_filename, path, width, height)
assert(os.path.exists(path))
proc_state.store_public(keyname=name, local_file=path)
proc_state.delete_queue_file() def extract_pdf_info(self):
pdf_info_dict = pdf_info(self.pdf_filename)
self.entry.media_data_init(**pdf_info_dict)
entry.media_data_init(**pdf_info_dict) def generate_medium(self, size=None):
entry.save() if not size:
size = (mgg.global_config['media:medium']['max_width'],
mgg.global_config['media:medium']['max_height'])
# Note: pdftocairo adds '.png', so don't include an ext
filename = os.path.join(self.workbench.dir,
self.name_builder.fill('{basename}.medium'))
executable = where('pdftocairo')
args = [executable, '-scale-to', str(min(size)),
'-singlefile', '-png', self.pdf_filename, filename]
_log.debug('calling {0}'.format(repr(' '.join(args))))
Popen(executable=executable, args=args).wait()
# since pdftocairo added '.png', we need to include it with the
# filename
store_public(self.entry, 'medium', filename + '.png',
self.name_builder.fill('{basename}.medium.png'))
class InitialProcessor(CommonPdfProcessor):
"""
Initial processing step for new pdfs
"""
name = "initial"
description = "Initial processing"
@classmethod
def media_is_eligible(cls, entry=None, state=None):
"""
Determine if this media type is eligible for processing
"""
if not state:
state = entry.state
return state in (
"unprocessed", "failed")
@classmethod
def generate_parser(cls):
parser = argparse.ArgumentParser(
description=cls.description,
prog=cls.name)
parser.add_argument(
'--size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
parser.add_argument(
'--thumb-size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
args, ['size', 'thumb_size'])
def process(self, size=None, thumb_size=None):
self.common_setup()
self.extract_pdf_info()
self.copy_original()
self.generate_medium(size=size)
self.generate_thumb(thumb_size=thumb_size)
self.delete_queue_file()
class Resizer(CommonPdfProcessor):
"""
Resizing process steps for processed pdfs
"""
name = 'resize'
description = 'Resize thumbnail and medium'
thumb_size = 'size'
@classmethod
def media_is_eligible(cls, entry=None, state=None):
"""
Determine if this media type is eligible for processing
"""
if not state:
state = entry.state
return state in 'processed'
@classmethod
def generate_parser(cls):
parser = argparse.ArgumentParser(
description=cls.description,
prog=cls.name)
parser.add_argument(
'--size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
parser.add_argument(
'file',
choices=['medium', 'thumb'])
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
args, ['size', 'file'])
def process(self, file, size=None):
self.common_setup()
if file == 'medium':
self.generate_medium(size=size)
elif file == 'thumb':
self.generate_thumb(thumb_size=size)
class PdfProcessingManager(ProcessingManager):
def __init__(self):
super(self.__class__, self).__init__()
self.add_processor(InitialProcessor)
self.add_processor(Resizer)

View File

@ -15,7 +15,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
from mediagoblin.media_types import MediaManagerBase from mediagoblin.media_types import MediaManagerBase
from mediagoblin.media_types.stl.processing import process_stl, \ from mediagoblin.media_types.stl.processing import StlProcessingManager, \
sniff_handler sniff_handler
from mediagoblin.tools import pluginapi from mediagoblin.tools import pluginapi
@ -29,7 +29,6 @@ def setup_plugin():
class STLMediaManager(MediaManagerBase): class STLMediaManager(MediaManagerBase):
human_readable = "stereo lithographics" human_readable = "stereo lithographics"
processor = staticmethod(process_stl)
display_template = "mediagoblin/media_displays/stl.html" display_template = "mediagoblin/media_displays/stl.html"
default_thumb = "images/media_thumbs/video.jpg" default_thumb = "images/media_thumbs/video.jpg"
@ -43,4 +42,5 @@ hooks = {
'get_media_type_and_manager': get_media_type_and_manager, 'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler, 'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: STLMediaManager, ('media_manager', MEDIA_TYPE): lambda: STLMediaManager,
('reprocess_manager', MEDIA_TYPE): lambda: StlProcessingManager,
} }

View File

@ -14,6 +14,7 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import os import os
import json import json
import logging import logging
@ -21,8 +22,11 @@ import subprocess
import pkg_resources import pkg_resources
from mediagoblin import mg_globals as mgg from mediagoblin import mg_globals as mgg
from mediagoblin.processing import create_pub_filepath, \ from mediagoblin.processing import (
FilenameBuilder FilenameBuilder, MediaProcessor,
ProcessingManager, request_from_args,
get_process_filename, store_public,
copy_original)
from mediagoblin.media_types.stl import model_loader from mediagoblin.media_types.stl import model_loader
@ -75,49 +79,61 @@ def blender_render(config):
env=env) env=env)
def process_stl(proc_state): class CommonStlProcessor(MediaProcessor):
"""Code to process an stl or obj model. Will be run by celery.
A Workbench() represents a local tempory dir. It is automatically
cleaned up when this function exits.
""" """
entry = proc_state.entry Provides a common base for various stl processing steps
workbench = proc_state.workbench """
acceptable_files = ['original']
queued_filepath = entry.queued_media_file def common_setup(self):
queued_filename = workbench.localized_file( # Pull down and set up the processing file
mgg.queue_store, queued_filepath, 'source') self.process_filename = get_process_filename(
name_builder = FilenameBuilder(queued_filename) self.entry, self.workbench, self.acceptable_files)
self.name_builder = FilenameBuilder(self.process_filename)
ext = queued_filename.lower().strip()[-4:] self._set_ext()
if ext.startswith("."): self._set_model()
ext = ext[1:] self._set_greatest()
else:
ext = None
# Attempt to parse the model file and divine some useful def _set_ext(self):
# information about it. ext = self.name_builder.ext[1:]
with open(queued_filename, 'rb') as model_file:
model = model_loader.auto_detect(model_file, ext)
# generate preview images if not ext:
greatest = [model.width, model.height, model.depth] ext = None
greatest.sort()
greatest = greatest[-1]
def snap(name, camera, width=640, height=640, project="ORTHO"): self.ext = ext
filename = name_builder.fill(name)
workbench_path = workbench.joinpath(filename) def _set_model(self):
"""
Attempt to parse the model file and divine some useful
information about it.
"""
with open(self.process_filename, 'rb') as model_file:
self.model = model_loader.auto_detect(model_file, self.ext)
def _set_greatest(self):
greatest = [self.model.width, self.model.height, self.model.depth]
greatest.sort()
self.greatest = greatest[-1]
def copy_original(self):
copy_original(
self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def _snap(self, keyname, name, camera, size, project="ORTHO"):
filename = self.name_builder.fill(name)
workbench_path = self.workbench.joinpath(filename)
shot = { shot = {
"model_path": queued_filename, "model_path": self.process_filename,
"model_ext": ext, "model_ext": self.ext,
"camera_coord": camera, "camera_coord": camera,
"camera_focus": model.average, "camera_focus": self.model.average,
"camera_clip": greatest*10, "camera_clip": self.greatest*10,
"greatest": greatest, "greatest": self.greatest,
"projection": project, "projection": project,
"width": width, "width": size[0],
"height": height, "height": size[1],
"out_file": workbench_path, "out_file": workbench_path,
} }
blender_render(shot) blender_render(shot)
@ -126,70 +142,191 @@ def process_stl(proc_state):
assert os.path.exists(workbench_path) assert os.path.exists(workbench_path)
# copy it up! # copy it up!
with open(workbench_path, 'rb') as rendered_file: store_public(self.entry, keyname, workbench_path, filename)
public_path = create_pub_filepath(entry, filename)
with mgg.public_store.get_file(public_path, "wb") as public_file: def generate_thumb(self, thumb_size=None):
public_file.write(rendered_file.read()) if not thumb_size:
thumb_size = (mgg.global_config['media:thumb']['max_width'],
mgg.global_config['media:thumb']['max_height'])
return public_path self._snap(
"thumb",
"{basename}.thumb.jpg",
[0, self.greatest*-1.5, self.greatest],
thumb_size,
project="PERSP")
thumb_path = snap( def generate_perspective(self, size=None):
"{basename}.thumb.jpg", if not size:
[0, greatest*-1.5, greatest], size = (mgg.global_config['media:medium']['max_width'],
mgg.global_config['media:thumb']['max_width'], mgg.global_config['media:medium']['max_height'])
mgg.global_config['media:thumb']['max_height'],
project="PERSP")
perspective_path = snap( self._snap(
"{basename}.perspective.jpg", "perspective",
[0, greatest*-1.5, greatest], project="PERSP") "{basename}.perspective.jpg",
[0, self.greatest*-1.5, self.greatest],
size,
project="PERSP")
topview_path = snap( def generate_topview(self, size=None):
"{basename}.top.jpg", if not size:
[model.average[0], model.average[1], greatest*2]) size = (mgg.global_config['media:medium']['max_width'],
mgg.global_config['media:medium']['max_height'])
frontview_path = snap( self._snap(
"{basename}.front.jpg", "top",
[model.average[0], greatest*-2, model.average[2]]) "{basename}.top.jpg",
[self.model.average[0], self.model.average[1],
self.greatest*2],
size)
sideview_path = snap( def generate_frontview(self, size=None):
"{basename}.side.jpg", if not size:
[greatest*-2, model.average[1], model.average[2]]) size = (mgg.global_config['media:medium']['max_width'],
mgg.global_config['media:medium']['max_height'])
## Save the public file stuffs self._snap(
model_filepath = create_pub_filepath( "front",
entry, name_builder.fill('{basename}{ext}')) "{basename}.front.jpg",
[self.model.average[0], self.greatest*-2,
self.model.average[2]],
size)
with mgg.public_store.get_file(model_filepath, 'wb') as model_file: def generate_sideview(self, size=None):
with open(queued_filename, 'rb') as queued_file: if not size:
model_file.write(queued_file.read()) size = (mgg.global_config['media:medium']['max_width'],
mgg.global_config['media:medium']['max_height'])
# Remove queued media file from storage and database. self._snap(
# queued_filepath is in the task_id directory which should "side",
# be removed too, but fail if the directory is not empty to be on "{basename}.side.jpg",
# the super-safe side. [self.greatest*-2, self.model.average[1],
mgg.queue_store.delete_file(queued_filepath) # rm file self.model.average[2]],
mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir size)
entry.queued_media_file = []
# Insert media file information into database def store_dimensions(self):
media_files_dict = entry.setdefault('media_files', {}) """
media_files_dict[u'original'] = model_filepath Put model dimensions into the database
media_files_dict[u'thumb'] = thumb_path """
media_files_dict[u'perspective'] = perspective_path dimensions = {
media_files_dict[u'top'] = topview_path "center_x": self.model.average[0],
media_files_dict[u'side'] = sideview_path "center_y": self.model.average[1],
media_files_dict[u'front'] = frontview_path "center_z": self.model.average[2],
"width": self.model.width,
"height": self.model.height,
"depth": self.model.depth,
"file_type": self.ext,
}
self.entry.media_data_init(**dimensions)
# Put model dimensions into the database
dimensions = { class InitialProcessor(CommonStlProcessor):
"center_x" : model.average[0], """
"center_y" : model.average[1], Initial processing step for new stls
"center_z" : model.average[2], """
"width" : model.width, name = "initial"
"height" : model.height, description = "Initial processing"
"depth" : model.depth,
"file_type" : ext, @classmethod
} def media_is_eligible(cls, entry=None, state=None):
entry.media_data_init(**dimensions) """
Determine if this media type is eligible for processing
"""
if not state:
state = entry.state
return state in (
"unprocessed", "failed")
@classmethod
def generate_parser(cls):
parser = argparse.ArgumentParser(
description=cls.description,
prog=cls.name)
parser.add_argument(
'--size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
parser.add_argument(
'--thumb_size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
args, ['size', 'thumb_size'])
def process(self, size=None, thumb_size=None):
self.common_setup()
self.generate_thumb(thumb_size=thumb_size)
self.generate_perspective(size=size)
self.generate_topview(size=size)
self.generate_frontview(size=size)
self.generate_sideview(size=size)
self.store_dimensions()
self.copy_original()
self.delete_queue_file()
class Resizer(CommonStlProcessor):
"""
Resizing process steps for processed stls
"""
name = 'resize'
description = 'Resize thumbnail and mediums'
thumb_size = 'size'
@classmethod
def media_is_eligible(cls, entry=None, state=None):
"""
Determine if this media type is eligible for processing
"""
if not state:
state = entry.state
return state in 'processed'
@classmethod
def generate_parser(cls):
parser = argparse.ArgumentParser(
description=cls.description,
prog=cls.name)
parser.add_argument(
'--size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
parser.add_argument(
'file',
choices=['medium', 'thumb'])
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
args, ['size', 'file'])
def process(self, file, size=None):
self.common_setup()
if file == 'medium':
self.generate_perspective(size=size)
self.generate_topview(size=size)
self.generate_frontview(size=size)
self.generate_sideview(size=size)
elif file == 'thumb':
self.generate_thumb(thumb_size=size)
class StlProcessingManager(ProcessingManager):
def __init__(self):
super(self.__class__, self).__init__()
self.add_processor(InitialProcessor)
self.add_processor(Resizer)

View File

@ -15,7 +15,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
from mediagoblin.media_types import MediaManagerBase from mediagoblin.media_types import MediaManagerBase
from mediagoblin.media_types.video.processing import process_video, \ from mediagoblin.media_types.video.processing import VideoProcessingManager, \
sniff_handler sniff_handler
from mediagoblin.tools import pluginapi from mediagoblin.tools import pluginapi
@ -30,12 +30,11 @@ def setup_plugin():
class VideoMediaManager(MediaManagerBase): class VideoMediaManager(MediaManagerBase):
human_readable = "Video" human_readable = "Video"
processor = staticmethod(process_video)
display_template = "mediagoblin/media_displays/video.html" display_template = "mediagoblin/media_displays/video.html"
default_thumb = "images/media_thumbs/video.jpg" default_thumb = "images/media_thumbs/video.jpg"
# Used by the media_entry.get_display_media method # Used by the media_entry.get_display_media method
media_fetch_order = [u'webm_640', u'original'] media_fetch_order = [u'webm_video', u'original']
default_webm_type = 'video/webm; codecs="vp8, vorbis"' default_webm_type = 'video/webm; codecs="vp8, vorbis"'
@ -48,4 +47,5 @@ hooks = {
'get_media_type_and_manager': get_media_type_and_manager, 'get_media_type_and_manager': get_media_type_and_manager,
'sniff_handler': sniff_handler, 'sniff_handler': sniff_handler,
('media_manager', MEDIA_TYPE): lambda: VideoMediaManager, ('media_manager', MEDIA_TYPE): lambda: VideoMediaManager,
('reprocess_manager', MEDIA_TYPE): lambda: VideoProcessingManager,
} }

View File

@ -20,6 +20,7 @@ from sqlalchemy import MetaData, Column, Unicode
MIGRATIONS = {} MIGRATIONS = {}
@RegisterMigration(1, MIGRATIONS) @RegisterMigration(1, MIGRATIONS)
def add_orig_metadata_column(db_conn): def add_orig_metadata_column(db_conn):
metadata = MetaData(bind=db_conn.bind) metadata = MetaData(bind=db_conn.bind)
@ -30,3 +31,19 @@ def add_orig_metadata_column(db_conn):
default=None, nullable=True) default=None, nullable=True)
col.create(vid_data) col.create(vid_data)
db_conn.commit() db_conn.commit()
@RegisterMigration(2, MIGRATIONS)
def webm_640_to_webm_video(db):
metadata = MetaData(bind=db.bind)
file_keynames = inspect_table(metadata, 'core__file_keynames')
for row in db.execute(file_keynames.select()):
if row.name == 'webm_640':
db.execute(
file_keynames.update(). \
where(file_keynames.c.id==row.id).\
values(name='webm_video'))
db.commit()

View File

@ -36,12 +36,12 @@ class VideoData(Base):
- orig_metadata: A loose json structure containing metadata gstreamer - orig_metadata: A loose json structure containing metadata gstreamer
pulled from the original video. pulled from the original video.
This field is NOT GUARANTEED to exist! This field is NOT GUARANTEED to exist!
Likely metadata extracted: Likely metadata extracted:
"videoheight", "videolength", "videowidth", "videoheight", "videolength", "videowidth",
"audiorate", "audiolength", "audiochannels", "audiowidth", "audiorate", "audiolength", "audiochannels", "audiowidth",
"mimetype", "tags" "mimetype", "tags"
TODO: document the above better. TODO: document the above better.
""" """
__tablename__ = "video__mediadata" __tablename__ = "video__mediadata"
@ -68,7 +68,7 @@ class VideoData(Base):
""" """
orig_metadata = self.orig_metadata or {} orig_metadata = self.orig_metadata or {}
if "webm_640" not in self.get_media_entry.media_files \ if "webm_video" not in self.get_media_entry.media_files \
and "mimetype" in orig_metadata \ and "mimetype" in orig_metadata \
and "tags" in orig_metadata \ and "tags" in orig_metadata \
and "audio-codec" in orig_metadata["tags"] \ and "audio-codec" in orig_metadata["tags"] \

View File

@ -14,13 +14,18 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import os.path import os.path
import logging import logging
import datetime import datetime
from mediagoblin import mg_globals as mgg from mediagoblin import mg_globals as mgg
from mediagoblin.processing import \ from mediagoblin.processing import (
create_pub_filepath, FilenameBuilder, BaseProcessingFail, ProgressCallback FilenameBuilder, BaseProcessingFail,
ProgressCallback, MediaProcessor,
ProcessingManager, request_from_args,
get_process_filename, store_public,
copy_original)
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _ from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
from . import transcoders from . import transcoders
@ -48,115 +53,15 @@ def sniff_handler(media_file, **kw):
if not data: if not data:
_log.error('Could not discover {0}'.format( _log.error('Could not discover {0}'.format(
kw.get('media'))) kw.get('media')))
return None return None
if data['is_video'] == True: if data['is_video'] is True:
return MEDIA_TYPE return MEDIA_TYPE
return None return None
def process_video(proc_state):
"""
Process a video entry, transcode the queued media files (originals) and
create a thumbnail for the entry.
A Workbench() represents a local tempory dir. It is automatically
cleaned up when this function exits.
"""
entry = proc_state.entry
workbench = proc_state.workbench
video_config = mgg.global_config['media_type:mediagoblin.media_types.video']
queued_filepath = entry.queued_media_file
queued_filename = proc_state.get_queued_filename()
name_builder = FilenameBuilder(queued_filename)
medium_basename = name_builder.fill('{basename}-640p.webm')
medium_filepath = create_pub_filepath(entry, medium_basename)
thumbnail_basename = name_builder.fill('{basename}.thumbnail.jpg')
thumbnail_filepath = create_pub_filepath(entry, thumbnail_basename)
# Create a temporary file for the video destination (cleaned up with workbench)
tmp_dst = os.path.join(workbench.dir, medium_basename)
# Transcode queued file to a VP8/vorbis file that fits in a 640x640 square
progress_callback = ProgressCallback(entry)
dimensions = (
mgg.global_config['media:medium']['max_width'],
mgg.global_config['media:medium']['max_height'])
# Extract metadata and keep a record of it
metadata = transcoders.VideoTranscoder().discover(queued_filename)
store_metadata(entry, metadata)
# Figure out whether or not we need to transcode this video or
# if we can skip it
if skip_transcode(metadata):
_log.debug('Skipping transcoding')
dst_dimensions = metadata['videowidth'], metadata['videoheight']
# Push original file to public storage
_log.debug('Saving original...')
proc_state.copy_original(queued_filepath[-1])
did_transcode = False
else:
transcoder = transcoders.VideoTranscoder()
transcoder.transcode(queued_filename, tmp_dst,
vp8_quality=video_config['vp8_quality'],
vp8_threads=video_config['vp8_threads'],
vorbis_quality=video_config['vorbis_quality'],
progress_callback=progress_callback,
dimensions=dimensions)
dst_dimensions = transcoder.dst_data.videowidth,\
transcoder.dst_data.videoheight
# Push transcoded video to public storage
_log.debug('Saving medium...')
mgg.public_store.copy_local_to_storage(tmp_dst, medium_filepath)
_log.debug('Saved medium')
entry.media_files['webm_640'] = medium_filepath
did_transcode = True
# Save the width and height of the transcoded video
entry.media_data_init(
width=dst_dimensions[0],
height=dst_dimensions[1])
# Temporary file for the video thumbnail (cleaned up with workbench)
tmp_thumb = os.path.join(workbench.dir, thumbnail_basename)
# Create a thumbnail.jpg that fits in a 180x180 square
transcoders.VideoThumbnailerMarkII(
queued_filename,
tmp_thumb,
180)
# Push the thumbnail to public storage
_log.debug('Saving thumbnail...')
mgg.public_store.copy_local_to_storage(tmp_thumb, thumbnail_filepath)
entry.media_files['thumb'] = thumbnail_filepath
# save the original... but only if we did a transcoding
# (if we skipped transcoding and just kept the original anyway as the main
# media, then why would we save the original twice?)
if video_config['keep_original'] and did_transcode:
# Push original file to public storage
_log.debug('Saving original...')
proc_state.copy_original(queued_filepath[-1])
# Remove queued media file from storage and database
proc_state.delete_queue_file()
def store_metadata(media_entry, metadata): def store_metadata(media_entry, metadata):
""" """
Store metadata from this video for this media entry. Store metadata from this video for this media entry.
@ -165,9 +70,9 @@ def store_metadata(media_entry, metadata):
stored_metadata = dict( stored_metadata = dict(
[(key, metadata[key]) [(key, metadata[key])
for key in [ for key in [
"videoheight", "videolength", "videowidth", "videoheight", "videolength", "videowidth",
"audiorate", "audiolength", "audiochannels", "audiowidth", "audiorate", "audiolength", "audiochannels", "audiowidth",
"mimetype"] "mimetype"]
if key in metadata]) if key in metadata])
# We have to convert videorate into a sequence because it's a # We have to convert videorate into a sequence because it's a
@ -186,10 +91,10 @@ def store_metadata(media_entry, metadata):
tags = dict( tags = dict(
[(key, tags_metadata[key]) [(key, tags_metadata[key])
for key in [ for key in [
"application-name", "artist", "audio-codec", "bitrate", "application-name", "artist", "audio-codec", "bitrate",
"container-format", "copyright", "encoder", "container-format", "copyright", "encoder",
"encoder-version", "license", "nominal-bitrate", "title", "encoder-version", "license", "nominal-bitrate", "title",
"video-codec"] "video-codec"]
if key in tags_metadata]) if key in tags_metadata])
if 'date' in tags_metadata: if 'date' in tags_metadata:
date = tags_metadata['date'] date = tags_metadata['date']
@ -211,3 +116,297 @@ def store_metadata(media_entry, metadata):
if len(stored_metadata): if len(stored_metadata):
media_entry.media_data_init( media_entry.media_data_init(
orig_metadata=stored_metadata) orig_metadata=stored_metadata)
class CommonVideoProcessor(MediaProcessor):
"""
Provides a base for various video processing steps
"""
acceptable_files = ['original', 'best_quality', 'webm_video']
def common_setup(self):
self.video_config = mgg \
.global_config['media_type:mediagoblin.media_types.video']
# Pull down and set up the processing file
self.process_filename = get_process_filename(
self.entry, self.workbench, self.acceptable_files)
self.name_builder = FilenameBuilder(self.process_filename)
self.transcoder = transcoders.VideoTranscoder()
self.did_transcode = False
def copy_original(self):
# If we didn't transcode, then we need to keep the original
if not self.did_transcode or \
(self.video_config['keep_original'] and self.did_transcode):
copy_original(
self.entry, self.process_filename,
self.name_builder.fill('{basename}{ext}'))
def _keep_best(self):
"""
If there is no original, keep the best file that we have
"""
if not self.entry.media_files.get('best_quality'):
# Save the best quality file if no original?
if not self.entry.media_files.get('original') and \
self.entry.media_files.get('webm_video'):
self.entry.media_files['best_quality'] = self.entry \
.media_files['webm_video']
def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None,
vorbis_quality=None):
progress_callback = ProgressCallback(self.entry)
tmp_dst = os.path.join(self.workbench.dir,
self.name_builder.fill('{basename}.medium.webm'))
if not medium_size:
medium_size = (
mgg.global_config['media:medium']['max_width'],
mgg.global_config['media:medium']['max_height'])
if not vp8_quality:
vp8_quality = self.video_config['vp8_quality']
if not vp8_threads:
vp8_threads = self.video_config['vp8_threads']
if not vorbis_quality:
vorbis_quality = self.video_config['vorbis_quality']
# Extract metadata and keep a record of it
metadata = self.transcoder.discover(self.process_filename)
store_metadata(self.entry, metadata)
# Figure out whether or not we need to transcode this video or
# if we can skip it
if skip_transcode(metadata, medium_size):
_log.debug('Skipping transcoding')
dst_dimensions = metadata['videowidth'], metadata['videoheight']
# If there is an original and transcoded, delete the transcoded
# since it must be of lower quality then the original
if self.entry.media_files.get('original') and \
self.entry.media_files.get('webm_video'):
self.entry.media_files['webm_video'].delete()
else:
self.transcoder.transcode(self.process_filename, tmp_dst,
vp8_quality=vp8_quality,
vp8_threads=vp8_threads,
vorbis_quality=vorbis_quality,
progress_callback=progress_callback,
dimensions=tuple(medium_size))
dst_dimensions = self.transcoder.dst_data.videowidth,\
self.transcoder.dst_data.videoheight
self._keep_best()
# Push transcoded video to public storage
_log.debug('Saving medium...')
store_public(self.entry, 'webm_video', tmp_dst,
self.name_builder.fill('{basename}.medium.webm'))
_log.debug('Saved medium')
self.did_transcode = True
# Save the width and height of the transcoded video
self.entry.media_data_init(
width=dst_dimensions[0],
height=dst_dimensions[1])
def generate_thumb(self, thumb_size=None):
# Temporary file for the video thumbnail (cleaned up with workbench)
tmp_thumb = os.path.join(self.workbench.dir,
self.name_builder.fill(
'{basename}.thumbnail.jpg'))
if not thumb_size:
thumb_size = (mgg.global_config['media:thumb']['max_width'])
# We will only use the width so that the correct scale is kept
transcoders.VideoThumbnailerMarkII(
self.process_filename,
tmp_thumb,
thumb_size[0])
# Push the thumbnail to public storage
_log.debug('Saving thumbnail...')
store_public(self.entry, 'thumb', tmp_thumb,
self.name_builder.fill('{basename}.thumbnail.jpg'))
class InitialProcessor(CommonVideoProcessor):
"""
Initial processing steps for new video
"""
name = "initial"
description = "Initial processing"
@classmethod
def media_is_eligible(cls, entry=None, state=None):
if not state:
state = entry.state
return state in (
"unprocessed", "failed")
@classmethod
def generate_parser(cls):
parser = argparse.ArgumentParser(
description=cls.description,
prog=cls.name)
parser.add_argument(
'--medium_size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
parser.add_argument(
'--vp8_quality',
type=int,
help='Range 0..10')
parser.add_argument(
'--vp8_threads',
type=int,
help='0 means number_of_CPUs - 1')
parser.add_argument(
'--vorbis_quality',
type=float,
help='Range -0.1..1')
parser.add_argument(
'--thumb_size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
args, ['medium_size', 'vp8_quality', 'vp8_threads',
'vorbis_quality', 'thumb_size'])
def process(self, medium_size=None, vp8_threads=None, vp8_quality=None,
vorbis_quality=None, thumb_size=None):
self.common_setup()
self.transcode(medium_size=medium_size, vp8_quality=vp8_quality,
vp8_threads=vp8_threads, vorbis_quality=vorbis_quality)
self.copy_original()
self.generate_thumb(thumb_size=thumb_size)
self.delete_queue_file()
class Resizer(CommonVideoProcessor):
"""
Video thumbnail resizing process steps for processed media
"""
name = 'resize'
description = 'Resize thumbnail'
thumb_size = 'thumb_size'
@classmethod
def media_is_eligible(cls, entry=None, state=None):
if not state:
state = entry.state
return state in 'processed'
@classmethod
def generate_parser(cls):
parser = argparse.ArgumentParser(
description=cls.description,
prog=cls.name)
parser.add_argument(
'--thumb_size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
# Needed for gmg reprocess thumbs to work
parser.add_argument(
'file',
nargs='?',
default='thumb',
choices=['thumb'])
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
args, ['thumb_size', 'file'])
def process(self, thumb_size=None, file=None):
self.common_setup()
self.generate_thumb(thumb_size=thumb_size)
class Transcoder(CommonVideoProcessor):
"""
Transcoding processing steps for processed video
"""
name = 'transcode'
description = 'Re-transcode video'
@classmethod
def media_is_eligible(cls, entry=None, state=None):
if not state:
state = entry.state
return state in 'processed'
@classmethod
def generate_parser(cls):
parser = argparse.ArgumentParser(
description=cls.description,
prog=cls.name)
parser.add_argument(
'--medium_size',
nargs=2,
metavar=('max_width', 'max_height'),
type=int)
parser.add_argument(
'--vp8_quality',
type=int,
help='Range 0..10')
parser.add_argument(
'--vp8_threads',
type=int,
help='0 means number_of_CPUs - 1')
parser.add_argument(
'--vorbis_quality',
type=float,
help='Range -0.1..1')
return parser
@classmethod
def args_to_request(cls, args):
return request_from_args(
args, ['medium_size', 'vp8_threads', 'vp8_quality',
'vorbis_quality'])
def process(self, medium_size=None, vp8_quality=None, vp8_threads=None,
vorbis_quality=None):
self.common_setup()
self.transcode(medium_size=medium_size, vp8_threads=vp8_threads,
vp8_quality=vp8_quality, vorbis_quality=vorbis_quality)
class VideoProcessingManager(ProcessingManager):
def __init__(self):
super(self.__class__, self).__init__()
self.add_processor(InitialProcessor)
self.add_processor(Resizer)
self.add_processor(Transcoder)

View File

@ -21,7 +21,7 @@ from mediagoblin import mg_globals as mgg
_log = logging.getLogger(__name__) _log = logging.getLogger(__name__)
def skip_transcode(metadata): def skip_transcode(metadata, size):
''' '''
Checks video metadata against configuration values for skip_transcode. Checks video metadata against configuration values for skip_transcode.
@ -51,9 +51,9 @@ def skip_transcode(metadata):
return False return False
if config['dimensions_match']: if config['dimensions_match']:
if not metadata['videoheight'] <= medium_config['max_height']: if not metadata['videoheight'] <= size[1]:
return False return False
if not metadata['videowidth'] <= medium_config['max_width']: if not metadata['videowidth'] <= size[0]:
return False return False
return True return True

View File

@ -17,7 +17,8 @@
import logging import logging
from mediagoblin.db.models import Notification, \ from mediagoblin.db.models import Notification, \
CommentNotification, CommentSubscription CommentNotification, CommentSubscription, User
from mediagoblin.notifications.task import email_notification_task
from mediagoblin.notifications.tools import generate_comment_message from mediagoblin.notifications.tools import generate_comment_message
_log = logging.getLogger(__name__) _log = logging.getLogger(__name__)
@ -121,6 +122,12 @@ NOTIFICATION_FETCH_LIMIT = 100
def get_notifications(user_id, only_unseen=True): def get_notifications(user_id, only_unseen=True):
query = Notification.query.filter_by(user_id=user_id) query = Notification.query.filter_by(user_id=user_id)
wants_notifications = User.query.filter_by(id=user_id).first()\
.wants_notifications
# If the user does not want notifications, don't return any
if not wants_notifications:
return None
if only_unseen: if only_unseen:
query = query.filter_by(seen=False) query = query.filter_by(seen=False)
@ -130,12 +137,19 @@ def get_notifications(user_id, only_unseen=True):
return notifications return notifications
def get_notification_count(user_id, only_unseen=True): def get_notification_count(user_id, only_unseen=True):
query = Notification.query.filter_by(user_id=user_id) query = Notification.query.filter_by(user_id=user_id)
wants_notifications = User.query.filter_by(id=user_id).first()\
.wants_notifications
if only_unseen: if only_unseen:
query = query.filter_by(seen=False) query = query.filter_by(seen=False)
count = query.count() # If the user doesn't want notifications, don't show any
if not wants_notifications:
count = None
else:
count = query.count()
return count return count

View File

@ -23,3 +23,7 @@ add_route('mediagoblin.notifications.subscribe_comments',
add_route('mediagoblin.notifications.silence_comments', add_route('mediagoblin.notifications.silence_comments',
'/u/<string:user>/m/<string:media>/notifications/silence/', '/u/<string:user>/m/<string:media>/notifications/silence/',
'mediagoblin.notifications.views:silence_comments') 'mediagoblin.notifications.views:silence_comments')
add_route('mediagoblin.notifications.mark_all_comment_notifications_seen',
'/notifications/comments/mark_all_seen/',
'mediagoblin.notifications.views:mark_all_comment_notifications_seen')

View File

@ -14,19 +14,15 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
from mediagoblin.tools.response import render_to_response, render_404, redirect from mediagoblin.tools.response import redirect
from mediagoblin.tools.translate import pass_to_ugettext as _ from mediagoblin.tools.translate import pass_to_ugettext as _
from mediagoblin.decorators import (uses_pagination, get_user_media_entry, from mediagoblin.decorators import get_user_media_entry, require_active_login
get_media_entry_by_id,
require_active_login, user_may_delete_media, user_may_alter_collection,
get_user_collection, get_user_collection_item, active_user_from_url)
from mediagoblin import messages from mediagoblin import messages
from mediagoblin.notifications import add_comment_subscription, \ from mediagoblin.notifications import (add_comment_subscription,
silence_comment_subscription silence_comment_subscription, mark_comment_notification_seen,
get_notifications)
from werkzeug.exceptions import BadRequest
@get_user_media_entry @get_user_media_entry
@require_active_login @require_active_login
@ -41,6 +37,7 @@ def subscribe_comments(request, media):
return redirect(request, location=media.url_for_self(request.urlgen)) return redirect(request, location=media.url_for_self(request.urlgen))
@get_user_media_entry @get_user_media_entry
@require_active_login @require_active_login
def silence_comments(request, media): def silence_comments(request, media):
@ -52,3 +49,17 @@ def silence_comments(request, media):
' %s.') % media.title) ' %s.') % media.title)
return redirect(request, location=media.url_for_self(request.urlgen)) return redirect(request, location=media.url_for_self(request.urlgen))
@require_active_login
def mark_all_comment_notifications_seen(request):
"""
Marks all comment notifications seen.
"""
for comment in get_notifications(request.user.id):
mark_comment_notification_seen(comment.subject_id, request.user)
if request.GET.get('next'):
return redirect(request, location=request.GET.get('next'))
else:
return redirect(request, 'index')

View File

@ -14,12 +14,14 @@
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict
import logging import logging
import os import os
from mediagoblin.db.util import atomic_update
from mediagoblin import mg_globals as mgg from mediagoblin import mg_globals as mgg
from mediagoblin.db.util import atomic_update
from mediagoblin.db.models import MediaEntry
from mediagoblin.tools.pluginapi import hook_handle
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _ from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
_log = logging.getLogger(__name__) _log = logging.getLogger(__name__)
@ -74,49 +76,89 @@ class FilenameBuilder(object):
ext=self.ext) ext=self.ext)
class ProcessingState(object):
"""
The first and only argument to the "processor" of a media type
This could be thought of as a "request" to the processor class MediaProcessor(object):
function. It has the main info for the request (media entry) """A particular processor for this media type.
and a bunch of tools for the request on it.
It can get more fancy without impacting old media types. While the ProcessingManager handles all types of MediaProcessing
possible for a particular media type, a MediaProcessor can be
thought of as a *particular* processing action for a media type.
For example, you may have separate MediaProcessors for:
- initial_processing: the intial processing of a media
- gen_thumb: generate a thumbnail
- resize: resize an image
- transcode: transcode a video
... etc.
Some information on producing a new MediaProcessor for your media type:
- You *must* supply a name attribute. This must be a class level
attribute, and a string. This will be used to determine the
subcommand of your process
- It's recommended that you supply a class level description
attribute.
- Supply a media_is_eligible classmethod. This will be used to
determine whether or not a media entry is eligible to use this
processor type. See the method documentation for details.
- To give "./bin/gmg reprocess run" abilities to this media type,
supply both gnerate_parser and parser_to_request classmethods.
- The process method will be what actually processes your media.
""" """
def __init__(self, entry): # You MUST override this in the child MediaProcessor!
name = None
# Optional, but will be used in various places to describe the
# action this MediaProcessor provides
description = None
def __init__(self, manager, entry):
self.manager = manager
self.entry = entry self.entry = entry
self.entry_orig_state = entry.state
# Should be initialized at time of processing, at least
self.workbench = None self.workbench = None
self.queued_filename = None
def set_workbench(self, wb): def __enter__(self):
self.workbench = wb self.workbench = mgg.workbench_manager.create()
return self
def get_queued_filename(self): def __exit__(self, *args):
self.workbench.destroy()
self.workbench = None
# @with_workbench
def process(self, **kwargs):
""" """
Get the a filename for the original, on local storage Actually process this media entry.
""" """
if self.queued_filename is not None: raise NotImplementedError
return self.queued_filename
queued_filepath = self.entry.queued_media_file
queued_filename = self.workbench.localized_file(
mgg.queue_store, queued_filepath,
'source')
self.queued_filename = queued_filename
return queued_filename
def copy_original(self, target_name, keyname=u"original"): @classmethod
self.store_public(keyname, self.get_queued_filename(), target_name) def media_is_eligible(cls, entry=None, state=None):
raise NotImplementedError
def store_public(self, keyname, local_file, target_name=None): ###############################
if target_name is None: # Command line interface things
target_name = os.path.basename(local_file) ###############################
target_filepath = create_pub_filepath(self.entry, target_name)
if keyname in self.entry.media_files: @classmethod
_log.warn("store_public: keyname %r already used for file %r, " def generate_parser(cls):
"replacing with %r", keyname, raise NotImplementedError
self.entry.media_files[keyname], target_filepath)
mgg.public_store.copy_local_to_storage(local_file, target_filepath) @classmethod
self.entry.media_files[keyname] = target_filepath def args_to_request(cls, args):
raise NotImplementedError
##########################################
# THE FUTURE: web interface things here :)
##########################################
#####################
# Some common "steps"
#####################
def delete_queue_file(self): def delete_queue_file(self):
# Remove queued media file from storage and database. # Remove queued media file from storage and database.
@ -124,9 +166,129 @@ class ProcessingState(object):
# be removed too, but fail if the directory is not empty to be on # be removed too, but fail if the directory is not empty to be on
# the super-safe side. # the super-safe side.
queued_filepath = self.entry.queued_media_file queued_filepath = self.entry.queued_media_file
mgg.queue_store.delete_file(queued_filepath) # rm file if queued_filepath:
mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir mgg.queue_store.delete_file(queued_filepath) # rm file
self.entry.queued_media_file = [] mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
self.entry.queued_media_file = []
class ProcessingKeyError(Exception): pass
class ProcessorDoesNotExist(ProcessingKeyError): pass
class ProcessorNotEligible(ProcessingKeyError): pass
class ProcessingManagerDoesNotExist(ProcessingKeyError): pass
class ProcessingManager(object):
"""Manages all the processing actions available for a media type
Specific processing actions, MediaProcessor subclasses, are added
to the ProcessingManager.
"""
def __init__(self):
# Dict of all MediaProcessors of this media type
self.processors = OrderedDict()
def add_processor(self, processor):
"""
Add a processor class to this media type
"""
name = processor.name
if name is None:
raise AttributeError("Processor class's .name attribute not set")
self.processors[name] = processor
def list_eligible_processors(self, entry):
"""
List all processors that this media entry is eligible to be processed
for.
"""
return [
processor
for processor in self.processors.values()
if processor.media_is_eligible(entry=entry)]
def list_all_processors_by_state(self, state):
"""
List all processors that this media state is eligible to be processed
for.
"""
return [
processor
for processor in self.processors.values()
if processor.media_is_eligible(state=state)]
def list_all_processors(self):
return self.processors.values()
def gen_process_request_via_cli(self, subparser):
# Got to figure out what actually goes here before I can write this properly
pass
def get_processor(self, key, entry=None):
"""
Get the processor with this key.
If entry supplied, make sure this entry is actually compatible;
otherwise raise error.
"""
try:
processor = self.processors[key]
except KeyError:
import pdb
pdb.set_trace()
raise ProcessorDoesNotExist(
"'%s' processor does not exist for this media type" % key)
if entry and not processor.media_is_eligible(entry):
raise ProcessorNotEligible(
"This entry is not eligible for processor with name '%s'" % key)
return processor
def request_from_args(args, which_args):
"""
Generate a request from the values of some argparse parsed args
"""
request = {}
for arg in which_args:
request[arg] = getattr(args, arg)
return request
class MediaEntryNotFound(Exception): pass
def get_processing_manager_for_type(media_type):
"""
Get the appropriate media manager for this type
"""
manager_class = hook_handle(('reprocess_manager', media_type))
if not manager_class:
raise ProcessingManagerDoesNotExist(
"A processing manager does not exist for {0}".format(media_type))
manager = manager_class()
return manager
def get_entry_and_processing_manager(media_id):
"""
Get a MediaEntry, its media type, and its manager all in one go.
Returns a tuple of: `(entry, media_type, media_manager)`
"""
entry = MediaEntry.query.filter_by(id=media_id).first()
if entry is None:
raise MediaEntryNotFound("Can't find media with id '%s'" % media_id)
manager = get_processing_manager_for_type(entry.media_type)
return entry, manager
def mark_entry_failed(entry_id, exc): def mark_entry_failed(entry_id, exc):
@ -165,6 +327,66 @@ def mark_entry_failed(entry_id, exc):
u'fail_metadata': {}}) u'fail_metadata': {}})
def get_process_filename(entry, workbench, acceptable_files):
"""
Try and get the queued file if available, otherwise return the first file
in the acceptable_files that we have.
If no acceptable_files, raise ProcessFileNotFound
"""
if entry.queued_media_file:
filepath = entry.queued_media_file
storage = mgg.queue_store
else:
for keyname in acceptable_files:
if entry.media_files.get(keyname):
filepath = entry.media_files[keyname]
storage = mgg.public_store
break
if not filepath:
raise ProcessFileNotFound()
filename = workbench.localized_file(
storage, filepath,
'source')
if not os.path.exists(filename):
raise ProcessFileNotFound()
return filename
def store_public(entry, keyname, local_file, target_name=None,
delete_if_exists=True):
if target_name is None:
target_name = os.path.basename(local_file)
target_filepath = create_pub_filepath(entry, target_name)
if keyname in entry.media_files:
_log.warn("store_public: keyname %r already used for file %r, "
"replacing with %r", keyname,
entry.media_files[keyname], target_filepath)
if delete_if_exists:
mgg.public_store.delete_file(entry.media_files[keyname])
try:
mgg.public_store.copy_local_to_storage(local_file, target_filepath)
except:
raise PublicStoreFail(keyname=keyname)
# raise an error if the file failed to copy
copied_filepath = mgg.public_store.get_local_path(target_filepath)
if not os.path.exists(copied_filepath):
raise PublicStoreFail(keyname=keyname)
entry.media_files[keyname] = target_filepath
def copy_original(entry, orig_filename, target_name, keyname=u"original"):
store_public(entry, keyname, orig_filename, target_name)
class BaseProcessingFail(Exception): class BaseProcessingFail(Exception):
""" """
Base exception that all other processing failure messages should Base exception that all other processing failure messages should
@ -184,10 +406,24 @@ class BaseProcessingFail(Exception):
def __init__(self, **metadata): def __init__(self, **metadata):
self.metadata = metadata or {} self.metadata = metadata or {}
class BadMediaFail(BaseProcessingFail): class BadMediaFail(BaseProcessingFail):
""" """
Error that should be raised when an inappropriate file was given Error that should be raised when an inappropriate file was given
for the media type specified. for the media type specified.
""" """
general_message = _(u'Invalid file given for media type.') general_message = _(u'Invalid file given for media type.')
class PublicStoreFail(BaseProcessingFail):
"""
Error that should be raised when copying to public store fails
"""
general_message = _('Copying to public storage failed.')
class ProcessFileNotFound(BaseProcessingFail):
"""
Error that should be raised when an acceptable file for processing
is not found.
"""
general_message = _(u'An acceptable processing file was not found')

View File

@ -18,19 +18,20 @@ import logging
import urllib import urllib
import urllib2 import urllib2
from celery import registry, task import celery
from celery.registry import tasks
from mediagoblin import mg_globals as mgg from mediagoblin import mg_globals as mgg
from mediagoblin.db.models import MediaEntry from . import mark_entry_failed, BaseProcessingFail
from . import mark_entry_failed, BaseProcessingFail, ProcessingState
from mediagoblin.tools.processing import json_processing_callback from mediagoblin.tools.processing import json_processing_callback
from mediagoblin.processing import get_entry_and_processing_manager
_log = logging.getLogger(__name__) _log = logging.getLogger(__name__)
logging.basicConfig() logging.basicConfig()
_log.setLevel(logging.DEBUG) _log.setLevel(logging.DEBUG)
@task.task(default_retry_delay=2 * 60) @celery.task(default_retry_delay=2 * 60)
def handle_push_urls(feed_url): def handle_push_urls(feed_url):
"""Subtask, notifying the PuSH servers of new content """Subtask, notifying the PuSH servers of new content
@ -60,36 +61,51 @@ def handle_push_urls(feed_url):
'Giving up.'.format(feed_url)) 'Giving up.'.format(feed_url))
return False return False
################################ ################################
# Media processing initial steps # Media processing initial steps
################################ ################################
class ProcessMedia(celery.Task):
class ProcessMedia(task.Task):
""" """
Pass this entry off for processing. Pass this entry off for processing.
""" """
def run(self, media_id, feed_url): def run(self, media_id, feed_url, reprocess_action, reprocess_info=None):
""" """
Pass the media entry off to the appropriate processing function Pass the media entry off to the appropriate processing function
(for now just process_image...) (for now just process_image...)
:param feed_url: The feed URL that the PuSH server needs to be :param feed_url: The feed URL that the PuSH server needs to be
updated for. updated for.
:param reprocess: A dict containing all of the necessary reprocessing
info for the media_type.
""" """
entry = MediaEntry.query.get(media_id) reprocess_info = reprocess_info or {}
entry, manager = get_entry_and_processing_manager(media_id)
# Try to process, and handle expected errors. # Try to process, and handle expected errors.
try: try:
entry.state = u'processing' processor_class = manager.get_processor(reprocess_action, entry)
entry.save()
_log.debug('Processing {0}'.format(entry)) with processor_class(manager, entry) as processor:
# Initial state change has to be here because
# the entry.state gets recorded on processor_class init
entry.state = u'processing'
entry.save()
proc_state = ProcessingState(entry) _log.debug('Processing {0}'.format(entry))
with mgg.workbench_manager.create() as workbench:
proc_state.set_workbench(workbench) try:
# run the processing code processor.process(**reprocess_info)
entry.media_manager.processor(proc_state) except Exception as exc:
if processor.entry_orig_state == 'processed':
_log.error(
'Entry {0} failed to process due to the following'
' error: {1}'.format(entry.id, exc))
_log.info(
'Setting entry.state back to "processed"')
pass
else:
raise
# We set the state to processed and save the entry here so there's # We set the state to processed and save the entry here so there's
# no need to save at the end of the processing stage, probably ;) # no need to save at the end of the processing stage, probably ;)
@ -140,6 +156,4 @@ class ProcessMedia(task.Task):
entry = mgg.database.MediaEntry.query.filter_by(id=entry_id).first() entry = mgg.database.MediaEntry.query.filter_by(id=entry_id).first()
json_processing_callback(entry) json_processing_callback(entry)
# Register the task tasks.register(ProcessMedia)
process_media = registry.tasks[ProcessMedia.name]

View File

@ -33,4 +33,17 @@ var notifications = {};
$(document).ready(function () { $(document).ready(function () {
notifications.init(); notifications.init();
var mark_all_comments_seen = document.getElementById('mark_all_comments_seen');
if (mark_all_comments_seen) {
mark_all_comments_seen.href = '#';
mark_all_comments_seen.onclick = function() {
$.ajax({
type: 'GET',
url: mark_all_comments_seen_url,
success: function(res, status, xhr) { window.location.reload(); },
});
}
}
}); });

View File

@ -21,7 +21,7 @@ from werkzeug.datastructures import FileStorage
from mediagoblin.db.models import MediaEntry from mediagoblin.db.models import MediaEntry
from mediagoblin.processing import mark_entry_failed from mediagoblin.processing import mark_entry_failed
from mediagoblin.processing.task import process_media from mediagoblin.processing.task import ProcessMedia
_log = logging.getLogger(__name__) _log = logging.getLogger(__name__)
@ -76,17 +76,21 @@ def prepare_queue_task(app, entry, filename):
return queue_file return queue_file
def run_process_media(entry, feed_url=None): def run_process_media(entry, feed_url=None,
reprocess_action="initial", reprocess_info=None):
"""Process the media asynchronously """Process the media asynchronously
:param entry: MediaEntry() instance to be processed. :param entry: MediaEntry() instance to be processed.
:param feed_url: A string indicating the feed_url that the PuSH servers :param feed_url: A string indicating the feed_url that the PuSH servers
should be notified of. This will be sth like: `request.urlgen( should be notified of. This will be sth like: `request.urlgen(
'mediagoblin.user_pages.atom_feed',qualified=True, 'mediagoblin.user_pages.atom_feed',qualified=True,
user=request.user.username)`""" user=request.user.username)`
:param reprocess_action: What particular action should be run.
:param reprocess_info: A dict containing all of the necessary reprocessing
info for the given media_type"""
try: try:
process_media.apply_async( ProcessMedia().apply_async(
[entry.id, feed_url], {}, [entry.id, feed_url, reprocess_action, reprocess_info], {},
task_id=entry.queued_task_id) task_id=entry.queued_task_id)
except BaseException as exc: except BaseException as exc:
# The purpose of this section is because when running in "lazy" # The purpose of this section is because when running in "lazy"

View File

@ -89,7 +89,7 @@ def submit_start(request):
# Save now so we have this data before kicking off processing # Save now so we have this data before kicking off processing
entry.save() entry.save()
# Pass off to processing # Pass off to async processing
# #
# (... don't change entry after this point to avoid race # (... don't change entry after this point to avoid race
# conditions with changes to the document via processing code) # conditions with changes to the document via processing code)
@ -97,6 +97,7 @@ def submit_start(request):
'mediagoblin.user_pages.atom_feed', 'mediagoblin.user_pages.atom_feed',
qualified=True, user=request.user.username) qualified=True, user=request.user.username)
run_process_media(entry, feed_url) run_process_media(entry, feed_url)
add_message(request, SUCCESS, _('Woohoo! Submitted!')) add_message(request, SUCCESS, _('Woohoo! Submitted!'))
add_comment_subscription(request.user, entry) add_comment_subscription(request.user, entry)

View File

@ -37,6 +37,9 @@
src="{{ request.staticdirect('/js/header_dropdown.js') }}"></script> src="{{ request.staticdirect('/js/header_dropdown.js') }}"></script>
<script type="text/javascript" <script type="text/javascript"
src="{{ request.staticdirect('/js/notifications.js') }}"></script> src="{{ request.staticdirect('/js/notifications.js') }}"></script>
<script>
var mark_all_comments_seen_url = "{{ request.urlgen('mediagoblin.notifications.mark_all_comment_notifications_seen') }}"
</script>
{# For clarification, the difference between the extra_head.html template {# For clarification, the difference between the extra_head.html template
# and the head template hook is that the former should be used by # and the head template hook is that the former should be used by
@ -63,11 +66,11 @@
{% set notification_count = get_notification_count(request.user.id) %} {% set notification_count = get_notification_count(request.user.id) %}
{% if notification_count %} {% if notification_count %}
<a href="#notifications" class="notification-gem button_action" title="Notifications"> <a href="javascript:;" class="notification-gem button_action" title="Notifications">
{{ notification_count }}</a> {{ notification_count }}</a>
{% endif %} {% endif %}
<a href="#header" class="button_action header_dropdown_down">&#9660;</a> <a href="javascript:;" class="button_action header_dropdown_down">&#9660;</a>
<a href="#no_header" class="button_action header_dropdown_up">&#9650;</a> <a href="javascript:;" class="button_action header_dropdown_up">&#9650;</a>
{% elif request.user and request.user.status == "needs_email_verification" %} {% elif request.user and request.user.status == "needs_email_verification" %}
{# the following link should only appear when verification is needed #} {# the following link should only appear when verification is needed #}
<a href="{{ request.urlgen('mediagoblin.user_pages.user_home', <a href="{{ request.urlgen('mediagoblin.user_pages.user_home',

View File

@ -36,5 +36,9 @@
</li> </li>
{% endfor %} {% endfor %}
</ul> </ul>
<a href="{{ request.urlgen('mediagoblin.notifications.mark_all_comment_notifications_seen') }}?next={{
request.base_url|urlencode }}" id="mark_all_comments_seen">
{% trans %}Mark all read{% endtrans %}
</a>
</div> </div>
{% endif %} {% endif %}

View File

@ -62,11 +62,11 @@
</a> </a>
</li> </li>
{% endif %} {% endif %}
{% if 'webm_640' in media.media_files %} {% if 'webm_video' in media.media_files %}
<li> <li>
<a href="{{ request.app.public_store.file_url( <a href="{{ request.app.public_store.file_url(
media.media_files.webm_640) }}"> media.media_files.webm_video) }}">
{%- trans %}WebM file (640p; VP8/Vorbis){% endtrans -%} {%- trans %}WebM file (VP8/Vorbis){% endtrans -%}
</a> </a>
</li> </li>
{% endif %} {% endif %}

View File

@ -55,6 +55,6 @@ def test_setup_celery_from_config():
pkg_resources.resource_filename('mediagoblin.tests', 'celery.db')) pkg_resources.resource_filename('mediagoblin.tests', 'celery.db'))
assert fake_celery_module.BROKER_TRANSPORT == 'sqlalchemy' assert fake_celery_module.BROKER_TRANSPORT == 'sqlalchemy'
assert fake_celery_module.BROKER_HOST == ( assert fake_celery_module.BROKER_URL == (
'sqlite:///' + 'sqlite:///' +
pkg_resources.resource_filename('mediagoblin.tests', 'kombu.db')) pkg_resources.resource_filename('mediagoblin.tests', 'kombu.db'))

View File

@ -23,7 +23,7 @@ base_dir = %(here)s/user_dev/media/queue
[celery] [celery]
CELERY_ALWAYS_EAGER = true CELERY_ALWAYS_EAGER = true
CELERY_RESULT_DBURI = "sqlite:///%(here)s/user_dev/celery.db" CELERY_RESULT_DBURI = "sqlite:///%(here)s/user_dev/celery.db"
BROKER_HOST = "sqlite:///%(here)s/user_dev/kombu.db" BROKER_URL = "sqlite:///%(here)s/test_user_dev/kombu.db"
[plugins] [plugins]
[[mediagoblin.plugins.api]] [[mediagoblin.plugins.api]]

View File

@ -149,3 +149,56 @@ otherperson@example.com\n\nSGkgb3RoZXJwZXJzb24sCmNocmlzIGNvbW1lbnRlZCBvbiB5b3VyI
# User should not have been notified # User should not have been notified
assert len(notifications) == 1 assert len(notifications) == 1
def test_mark_all_comment_notifications_seen(self):
""" Test that mark_all_comments_seen works"""
user = fixture_add_user('otherperson', password='nosreprehto')
media_entry = fixture_media_entry(uploader=user.id, state=u'processed')
fixture_comment_subscription(media_entry)
media_uri_id = '/u/{0}/m/{1}/'.format(user.username,
media_entry.id)
# add 2 comments
self.test_app.post(
media_uri_id + 'comment/add/',
{
'comment_content': u'Test comment #43'
}
)
self.test_app.post(
media_uri_id + 'comment/add/',
{
'comment_content': u'Test comment #44'
}
)
notifications = Notification.query.filter_by(
user_id=user.id).all()
assert len(notifications) == 2
# both comments should not be marked seen
assert notifications[0].seen == False
assert notifications[1].seen == False
# login with other user to mark notifications seen
self.logout()
self.login('otherperson', 'nosreprehto')
# mark all comment notifications seen
res = self.test_app.get('/notifications/comments/mark_all_seen/')
res.follow()
assert urlparse.urlsplit(res.location)[2] == '/'
notifications = Notification.query.filter_by(
user_id=user.id).all()
# both notifications should be marked seen
assert notifications[0].seen == True
assert notifications[1].seen == True

View File

@ -18,6 +18,8 @@ import pkg_resources
import pytest import pytest
import mock import mock
pytest.importorskip("requests")
from mediagoblin import mg_globals from mediagoblin import mg_globals
from mediagoblin.db.base import Session from mediagoblin.db.base import Session
from mediagoblin.tests.tools import get_app from mediagoblin.tests.tools import get_app

View File

@ -323,8 +323,9 @@ def media_confirm_delete(request):
if not location: if not location:
location=media.url_to_prev(request.urlgen) location=media.url_to_prev(request.urlgen)
if not location: if not location:
location="mediagoblin.user_pages.user_home" location=request.urlgen("mediagoblin.user_pages.user_home",
return redirect(request, location=location, user=username) user=username)
return redirect(request, location=location)
else: else:
messages.add_message( messages.add_message(
request, messages.ERROR, request, messages.ERROR,

View File

@ -48,8 +48,8 @@ setup(
'pytest>=2.3.1', 'pytest>=2.3.1',
'pytest-xdist', 'pytest-xdist',
'werkzeug>=0.7', 'werkzeug>=0.7',
'celery==2.5.3', 'celery',
'kombu==2.1.7', 'kombu',
'jinja2', 'jinja2',
'sphinx', 'sphinx',
'Babel<1.0', 'Babel<1.0',