Merge branch 'master' into OPW-Moderation-Update
Conflicts: mediagoblin/db/migrations.py
This commit is contained in:
@@ -23,4 +23,4 @@
|
||||
|
||||
# see http://www.python.org/dev/peps/pep-0386/
|
||||
|
||||
__version__ = "0.5.0.dev"
|
||||
__version__ = "0.6.0.dev"
|
||||
|
||||
@@ -341,7 +341,7 @@ def verify_forgot_password(request):
|
||||
messages.add_message(
|
||||
request, messages.ERROR,
|
||||
_('You are no longer an active user. Please contact the system'
|
||||
' admin to reactivate your accoutn.'))
|
||||
' admin to reactivate your account.'))
|
||||
|
||||
return redirect(
|
||||
request, 'index')
|
||||
|
||||
@@ -104,47 +104,6 @@ max_height = integer(default=640)
|
||||
max_width = integer(default=180)
|
||||
max_height = integer(default=180)
|
||||
|
||||
[media_type:mediagoblin.media_types.image]
|
||||
# One of BICUBIC, BILINEAR, NEAREST, ANTIALIAS
|
||||
resize_filter = string(default="ANTIALIAS")
|
||||
#level of compression used when resizing images
|
||||
quality = integer(default=90)
|
||||
|
||||
[media_type:mediagoblin.media_types.video]
|
||||
# Should we keep the original file?
|
||||
keep_original = boolean(default=False)
|
||||
|
||||
# 0 means autodetect, autodetect means number_of_CPUs - 1
|
||||
vp8_threads = integer(default=0)
|
||||
# Range: 0..10
|
||||
vp8_quality = integer(default=8)
|
||||
# Range: -0.1..1
|
||||
vorbis_quality = float(default=0.3)
|
||||
|
||||
# Autoplay the video when page is loaded?
|
||||
auto_play = boolean(default=False)
|
||||
|
||||
[[skip_transcode]]
|
||||
mime_types = string_list(default=list("video/webm"))
|
||||
container_formats = string_list(default=list("Matroska"))
|
||||
video_codecs = string_list(default=list("VP8 video"))
|
||||
audio_codecs = string_list(default=list("Vorbis"))
|
||||
dimensions_match = boolean(default=True)
|
||||
|
||||
[media_type:mediagoblin.media_types.audio]
|
||||
keep_original = boolean(default=True)
|
||||
# vorbisenc quality
|
||||
quality = float(default=0.3)
|
||||
create_spectrogram = boolean(default=True)
|
||||
spectrogram_fft_size = integer(default=4096)
|
||||
|
||||
[media_type:mediagoblin.media_types.ascii]
|
||||
thumbnail_font = string(default=None)
|
||||
|
||||
[media_type:mediagoblin.media_types.pdf]
|
||||
pdf_js = boolean(default=True)
|
||||
|
||||
|
||||
[celery]
|
||||
# default result stuff
|
||||
CELERY_RESULT_BACKEND = string(default="database")
|
||||
|
||||
@@ -301,7 +301,6 @@ def drop_token_related_User_columns(db):
|
||||
metadata = MetaData(bind=db.bind)
|
||||
user_table = inspect_table(metadata, 'core__users')
|
||||
|
||||
|
||||
verification_key = user_table.columns['verification_key']
|
||||
fp_verification_key = user_table.columns['fp_verification_key']
|
||||
fp_token_expire = user_table.columns['fp_token_expire']
|
||||
@@ -323,7 +322,6 @@ class CommentSubscription_v0(declarative_base()):
|
||||
|
||||
user_id = Column(Integer, ForeignKey(User.id), nullable=False)
|
||||
|
||||
|
||||
notify = Column(Boolean, nullable=False, default=True)
|
||||
send_email = Column(Boolean, nullable=False, default=True)
|
||||
|
||||
@@ -369,6 +367,8 @@ def add_new_notification_tables(db):
|
||||
CommentNotification_v0.__table__.create(db.bind)
|
||||
ProcessingNotification_v0.__table__.create(db.bind)
|
||||
|
||||
db.commit()
|
||||
|
||||
|
||||
@RegisterMigration(13, MIGRATIONS)
|
||||
def pw_hash_nullable(db):
|
||||
@@ -384,6 +384,9 @@ def pw_hash_nullable(db):
|
||||
constraint = UniqueConstraint('username', table=user_table)
|
||||
constraint.create()
|
||||
|
||||
db.commit()
|
||||
|
||||
|
||||
# oauth1 migrations
|
||||
class Client_v0(declarative_base()):
|
||||
"""
|
||||
@@ -462,6 +465,16 @@ def create_oauth1_tables(db):
|
||||
|
||||
db.commit()
|
||||
|
||||
|
||||
@RegisterMigration(15, MIGRATIONS)
|
||||
def wants_notifications(db):
|
||||
"""Add a wants_notifications field to User model"""
|
||||
metadata = MetaData(bind=db.bind)
|
||||
user_table = inspect_table(metadata, "core__users")
|
||||
col = Column('wants_notifications', Boolean, default=True)
|
||||
col.create(user_table)
|
||||
db.commit()
|
||||
|
||||
class ReportBase_v0(declarative_base()):
|
||||
__tablename__ = 'core__reports'
|
||||
id = Column(Integer, primary_key=True)
|
||||
@@ -483,6 +496,8 @@ class CommentReport_v0(ReportBase_v0):
|
||||
primary_key=True)
|
||||
comment_id = Column(Integer, ForeignKey(MediaComment.id), nullable=False)
|
||||
|
||||
|
||||
|
||||
class MediaReport_v0(ReportBase_v0):
|
||||
__tablename__ = 'core__reports_on_media'
|
||||
__mapper_args__ = {'polymorphic_identity': 'media_report'}
|
||||
@@ -515,7 +530,7 @@ class PrivilegeUserAssociation_v0(declarative_base()):
|
||||
ForeignKey(Privilege.id),
|
||||
primary_key=True)
|
||||
|
||||
@RegisterMigration(15, MIGRATIONS)
|
||||
@RegisterMigration(16, MIGRATIONS)
|
||||
def create_moderation_tables(db):
|
||||
ReportBase_v0.__table__.create(db.bind)
|
||||
CommentReport_v0.__table__.create(db.bind)
|
||||
@@ -531,7 +546,7 @@ def create_moderation_tables(db):
|
||||
p.save()
|
||||
|
||||
|
||||
@RegisterMigration(16, MIGRATIONS)
|
||||
@RegisterMigration(17, MIGRATIONS)
|
||||
def update_user_privilege_columns(db):
|
||||
# first, create the privileges which would be created by foundations
|
||||
default_privileges = Privilege.query.filter(
|
||||
|
||||
@@ -70,6 +70,7 @@ class User(Base, UserMixin):
|
||||
# Intented to be nullable=False, but migrations would not work for it
|
||||
# set to nullable=True implicitly.
|
||||
wants_comment_notification = Column(Boolean, default=True)
|
||||
wants_notifications = Column(Boolean, default=True)
|
||||
license_preference = Column(Unicode)
|
||||
#--column admin is VESTIGIAL with privileges and should not be used------------
|
||||
#--should be dropped ASAP though a bug in sqlite3 prevents this atm------------
|
||||
|
||||
@@ -61,12 +61,10 @@ class EditProfileForm(wtforms.Form):
|
||||
|
||||
|
||||
class EditAccountForm(wtforms.Form):
|
||||
new_email = wtforms.TextField(
|
||||
_('New email address'),
|
||||
[wtforms.validators.Optional(),
|
||||
normalize_user_or_email_field(allow_user=False)])
|
||||
wants_comment_notification = wtforms.BooleanField(
|
||||
description=_("Email me when others comment on my media"))
|
||||
wants_notifications = wtforms.BooleanField(
|
||||
description=_("Enable insite notifications about events."))
|
||||
license_preference = wtforms.SelectField(
|
||||
_('License preference'),
|
||||
[
|
||||
@@ -111,3 +109,15 @@ class ChangePassForm(wtforms.Form):
|
||||
[wtforms.validators.Required(),
|
||||
wtforms.validators.Length(min=6, max=30)],
|
||||
id="password")
|
||||
|
||||
|
||||
class ChangeEmailForm(wtforms.Form):
|
||||
new_email = wtforms.TextField(
|
||||
_('New email address'),
|
||||
[wtforms.validators.Required(),
|
||||
normalize_user_or_email_field(allow_user=False)])
|
||||
password = wtforms.PasswordField(
|
||||
_('Password'),
|
||||
[wtforms.validators.Required()],
|
||||
description=_(
|
||||
"Enter your password to prove you own this account."))
|
||||
|
||||
@@ -28,3 +28,5 @@ add_route('mediagoblin.edit.pass', '/edit/password/',
|
||||
'mediagoblin.edit.views:change_pass')
|
||||
add_route('mediagoblin.edit.verify_email', '/edit/verify_email/',
|
||||
'mediagoblin.edit.views:verify_email')
|
||||
add_route('mediagoblin.edit.email', '/edit/email/',
|
||||
'mediagoblin.edit.views:change_email')
|
||||
|
||||
@@ -228,24 +228,22 @@ def edit_account(request):
|
||||
user = request.user
|
||||
form = forms.EditAccountForm(request.form,
|
||||
wants_comment_notification=user.wants_comment_notification,
|
||||
license_preference=user.license_preference)
|
||||
license_preference=user.license_preference,
|
||||
wants_notifications=user.wants_notifications)
|
||||
|
||||
if request.method == 'POST' and form.validate():
|
||||
user.wants_comment_notification = form.wants_comment_notification.data
|
||||
user.wants_notifications = form.wants_notifications.data
|
||||
|
||||
user.license_preference = form.license_preference.data
|
||||
|
||||
if form.new_email.data:
|
||||
_update_email(request, form, user)
|
||||
|
||||
if not form.errors:
|
||||
user.save()
|
||||
messages.add_message(request,
|
||||
messages.SUCCESS,
|
||||
_("Account settings saved"))
|
||||
return redirect(request,
|
||||
'mediagoblin.user_pages.user_home',
|
||||
user=user.username)
|
||||
user.save()
|
||||
messages.add_message(request,
|
||||
messages.SUCCESS,
|
||||
_("Account settings saved"))
|
||||
return redirect(request,
|
||||
'mediagoblin.user_pages.user_home',
|
||||
user=user.username)
|
||||
|
||||
return render_to_response(
|
||||
request,
|
||||
@@ -425,30 +423,52 @@ def verify_email(request):
|
||||
user=user.username)
|
||||
|
||||
|
||||
def _update_email(request, form, user):
|
||||
new_email = form.new_email.data
|
||||
users_with_email = User.query.filter_by(
|
||||
email=new_email).count()
|
||||
def change_email(request):
|
||||
""" View to change the user's email """
|
||||
form = forms.ChangeEmailForm(request.form)
|
||||
user = request.user
|
||||
|
||||
if users_with_email:
|
||||
form.new_email.errors.append(
|
||||
_('Sorry, a user with that email address'
|
||||
' already exists.'))
|
||||
# If no password authentication, no need to enter a password
|
||||
if 'pass_auth' not in request.template_env.globals or not user.pw_hash:
|
||||
form.__delitem__('password')
|
||||
|
||||
elif not users_with_email:
|
||||
verification_key = get_timed_signer_url(
|
||||
'mail_verification_token').dumps({
|
||||
'user': user.id,
|
||||
'email': new_email})
|
||||
if request.method == 'POST' and form.validate():
|
||||
new_email = form.new_email.data
|
||||
users_with_email = User.query.filter_by(
|
||||
email=new_email).count()
|
||||
|
||||
rendered_email = render_template(
|
||||
request, 'mediagoblin/edit/verification.txt',
|
||||
{'username': user.username,
|
||||
'verification_url': EMAIL_VERIFICATION_TEMPLATE.format(
|
||||
uri=request.urlgen('mediagoblin.edit.verify_email',
|
||||
qualified=True),
|
||||
verification_key=verification_key)})
|
||||
if users_with_email:
|
||||
form.new_email.errors.append(
|
||||
_('Sorry, a user with that email address'
|
||||
' already exists.'))
|
||||
|
||||
email_debug_message(request)
|
||||
auth_tools.send_verification_email(user, request, new_email,
|
||||
rendered_email)
|
||||
if form.password and user.pw_hash and not auth.check_password(
|
||||
form.password.data, user.pw_hash):
|
||||
form.password.errors.append(
|
||||
_('Wrong password'))
|
||||
|
||||
if not form.errors:
|
||||
verification_key = get_timed_signer_url(
|
||||
'mail_verification_token').dumps({
|
||||
'user': user.id,
|
||||
'email': new_email})
|
||||
|
||||
rendered_email = render_template(
|
||||
request, 'mediagoblin/edit/verification.txt',
|
||||
{'username': user.username,
|
||||
'verification_url': EMAIL_VERIFICATION_TEMPLATE.format(
|
||||
uri=request.urlgen('mediagoblin.edit.verify_email',
|
||||
qualified=True),
|
||||
verification_key=verification_key)})
|
||||
|
||||
email_debug_message(request)
|
||||
auth_tools.send_verification_email(user, request, new_email,
|
||||
rendered_email)
|
||||
|
||||
return redirect(request, 'mediagoblin.edit.account')
|
||||
|
||||
return render_to_response(
|
||||
request,
|
||||
'mediagoblin/edit/change_email.html',
|
||||
{'form': form,
|
||||
'user': user})
|
||||
|
||||
@@ -45,6 +45,10 @@ SUBCOMMAND_MAP = {
|
||||
'setup': 'mediagoblin.gmg_commands.assetlink:assetlink_parser_setup',
|
||||
'func': 'mediagoblin.gmg_commands.assetlink:assetlink',
|
||||
'help': 'Link assets for themes and plugins for static serving'},
|
||||
'reprocess': {
|
||||
'setup': 'mediagoblin.gmg_commands.reprocess:reprocess_parser_setup',
|
||||
'func': 'mediagoblin.gmg_commands.reprocess:reprocess',
|
||||
'help': 'Reprocess media entries'},
|
||||
# 'theme': {
|
||||
# 'setup': 'mediagoblin.gmg_commands.theme:theme_parser_setup',
|
||||
# 'func': 'mediagoblin.gmg_commands.theme:theme',
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
|
||||
from mediagoblin import mg_globals
|
||||
from mediagoblin.db.open import setup_connection_and_db_from_config
|
||||
from mediagoblin.gmg_commands import util as commands_util
|
||||
from mediagoblin.storage.filestorage import BasicFileStorage
|
||||
from mediagoblin.init import setup_storage, setup_global_and_app_config
|
||||
|
||||
@@ -223,6 +224,7 @@ def env_export(args):
|
||||
'''
|
||||
Export database and media files to a tar archive
|
||||
'''
|
||||
commands_util.check_unrecognized_args(args)
|
||||
if args.cache_path:
|
||||
if os.path.exists(args.cache_path):
|
||||
_log.error('The cache directory must not exist '
|
||||
|
||||
302
mediagoblin/gmg_commands/reprocess.py
Normal file
302
mediagoblin/gmg_commands/reprocess.py
Normal file
@@ -0,0 +1,302 @@
|
||||
# GNU MediaGoblin -- federated, autonomous media hosting
|
||||
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
import argparse
|
||||
import os
|
||||
|
||||
from mediagoblin import mg_globals
|
||||
from mediagoblin.db.models import MediaEntry
|
||||
from mediagoblin.gmg_commands import util as commands_util
|
||||
from mediagoblin.submit.lib import run_process_media
|
||||
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
|
||||
from mediagoblin.tools.pluginapi import hook_handle
|
||||
from mediagoblin.processing import (
|
||||
ProcessorDoesNotExist, ProcessorNotEligible,
|
||||
get_entry_and_processing_manager, get_processing_manager_for_type,
|
||||
ProcessingManagerDoesNotExist)
|
||||
|
||||
|
||||
def reprocess_parser_setup(subparser):
|
||||
subparser.add_argument(
|
||||
'--celery',
|
||||
action='store_true',
|
||||
help="Don't process eagerly, pass off to celery")
|
||||
|
||||
subparsers = subparser.add_subparsers(dest="reprocess_subcommand")
|
||||
|
||||
###################
|
||||
# available command
|
||||
###################
|
||||
available_parser = subparsers.add_parser(
|
||||
"available",
|
||||
help="Find out what actions are available for this media")
|
||||
|
||||
available_parser.add_argument(
|
||||
"id_or_type",
|
||||
help="Media id or media type to check")
|
||||
|
||||
available_parser.add_argument(
|
||||
"--action-help",
|
||||
action="store_true",
|
||||
help="List argument help for each action available")
|
||||
|
||||
available_parser.add_argument(
|
||||
"--state",
|
||||
help="The state of media you would like to reprocess")
|
||||
|
||||
|
||||
#############
|
||||
# run command
|
||||
#############
|
||||
|
||||
run_parser = subparsers.add_parser(
|
||||
"run",
|
||||
help="Run a reprocessing on one or more media")
|
||||
|
||||
run_parser.add_argument(
|
||||
'media_id',
|
||||
help="The media_entry id(s) you wish to reprocess.")
|
||||
|
||||
run_parser.add_argument(
|
||||
'reprocess_command',
|
||||
help="The reprocess command you intend to run")
|
||||
|
||||
run_parser.add_argument(
|
||||
'reprocess_args',
|
||||
nargs=argparse.REMAINDER,
|
||||
help="rest of arguments to the reprocessing tool")
|
||||
|
||||
|
||||
################
|
||||
# thumbs command
|
||||
################
|
||||
thumbs = subparsers.add_parser(
|
||||
'thumbs',
|
||||
help='Regenerate thumbs for all processed media')
|
||||
|
||||
thumbs.add_argument(
|
||||
'--size',
|
||||
nargs=2,
|
||||
type=int,
|
||||
metavar=('max_width', 'max_height'))
|
||||
|
||||
#################
|
||||
# initial command
|
||||
#################
|
||||
subparsers.add_parser(
|
||||
'initial',
|
||||
help='Reprocess all failed media')
|
||||
|
||||
##################
|
||||
# bulk_run command
|
||||
##################
|
||||
bulk_run_parser = subparsers.add_parser(
|
||||
'bulk_run',
|
||||
help='Run reprocessing on a given media type or state')
|
||||
|
||||
bulk_run_parser.add_argument(
|
||||
'type',
|
||||
help='The type of media you would like to process')
|
||||
|
||||
bulk_run_parser.add_argument(
|
||||
'--state',
|
||||
default='processed',
|
||||
nargs='?',
|
||||
help='The state of the media you would like to process. Defaults to' \
|
||||
" 'processed'")
|
||||
|
||||
bulk_run_parser.add_argument(
|
||||
'reprocess_command',
|
||||
help='The reprocess command you intend to run')
|
||||
|
||||
bulk_run_parser.add_argument(
|
||||
'reprocess_args',
|
||||
nargs=argparse.REMAINDER,
|
||||
help='The rest of the arguments to the reprocessing tool')
|
||||
|
||||
###############
|
||||
# help command?
|
||||
###############
|
||||
|
||||
|
||||
def available(args):
|
||||
# Get the media type, either by looking up media id, or by specific type
|
||||
try:
|
||||
media_id = int(args.id_or_type)
|
||||
media_entry, manager = get_entry_and_processing_manager(media_id)
|
||||
media_type = media_entry.media_type
|
||||
except ValueError:
|
||||
media_type = args.id_or_type
|
||||
media_entry = None
|
||||
manager = get_processing_manager_for_type(media_type)
|
||||
except ProcessingManagerDoesNotExist:
|
||||
entry = MediaEntry.query.filter_by(id=args.id_or_type).first()
|
||||
print 'No such processing manager for {0}'.format(entry.media_type)
|
||||
|
||||
if args.state:
|
||||
processors = manager.list_all_processors_by_state(args.state)
|
||||
elif media_entry is None:
|
||||
processors = manager.list_all_processors()
|
||||
else:
|
||||
processors = manager.list_eligible_processors(media_entry)
|
||||
|
||||
print "Available processors:"
|
||||
print "====================="
|
||||
print ""
|
||||
|
||||
if args.action_help:
|
||||
for processor in processors:
|
||||
print processor.name
|
||||
print "-" * len(processor.name)
|
||||
|
||||
parser = processor.generate_parser()
|
||||
parser.print_help()
|
||||
print ""
|
||||
|
||||
else:
|
||||
for processor in processors:
|
||||
if processor.description:
|
||||
print " - %s: %s" % (processor.name, processor.description)
|
||||
else:
|
||||
print " - %s" % processor.name
|
||||
|
||||
|
||||
def run(args, media_id=None):
|
||||
if not media_id:
|
||||
media_id = args.media_id
|
||||
try:
|
||||
media_entry, manager = get_entry_and_processing_manager(media_id)
|
||||
|
||||
# TODO: (maybe?) This could probably be handled entirely by the
|
||||
# processor class...
|
||||
try:
|
||||
processor_class = manager.get_processor(
|
||||
args.reprocess_command, media_entry)
|
||||
except ProcessorDoesNotExist:
|
||||
print 'No such processor "%s" for media with id "%s"' % (
|
||||
args.reprocess_command, media_entry.id)
|
||||
return
|
||||
except ProcessorNotEligible:
|
||||
print 'Processor "%s" exists but media "%s" is not eligible' % (
|
||||
args.reprocess_command, media_entry.id)
|
||||
return
|
||||
|
||||
reprocess_parser = processor_class.generate_parser()
|
||||
reprocess_args = reprocess_parser.parse_args(args.reprocess_args)
|
||||
reprocess_request = processor_class.args_to_request(reprocess_args)
|
||||
run_process_media(
|
||||
media_entry,
|
||||
reprocess_action=args.reprocess_command,
|
||||
reprocess_info=reprocess_request)
|
||||
|
||||
except ProcessingManagerDoesNotExist:
|
||||
entry = MediaEntry.query.filter_by(id=media_id).first()
|
||||
print 'No such processing manager for {0}'.format(entry.media_type)
|
||||
|
||||
|
||||
def bulk_run(args):
|
||||
"""
|
||||
Bulk reprocessing of a given media_type
|
||||
"""
|
||||
query = MediaEntry.query.filter_by(media_type=args.type,
|
||||
state=args.state)
|
||||
|
||||
for entry in query:
|
||||
run(args, entry.id)
|
||||
|
||||
|
||||
def thumbs(args):
|
||||
"""
|
||||
Regenerate thumbs for all processed media
|
||||
"""
|
||||
query = MediaEntry.query.filter_by(state='processed')
|
||||
|
||||
for entry in query:
|
||||
try:
|
||||
media_entry, manager = get_entry_and_processing_manager(entry.id)
|
||||
|
||||
# TODO: (maybe?) This could probably be handled entirely by the
|
||||
# processor class...
|
||||
try:
|
||||
processor_class = manager.get_processor(
|
||||
'resize', media_entry)
|
||||
except ProcessorDoesNotExist:
|
||||
print 'No such processor "%s" for media with id "%s"' % (
|
||||
'resize', media_entry.id)
|
||||
return
|
||||
except ProcessorNotEligible:
|
||||
print 'Processor "%s" exists but media "%s" is not eligible' % (
|
||||
'resize', media_entry.id)
|
||||
return
|
||||
|
||||
reprocess_parser = processor_class.generate_parser()
|
||||
|
||||
# prepare filetype and size to be passed into reprocess_parser
|
||||
if args.size:
|
||||
extra_args = 'thumb --{0} {1} {2}'.format(
|
||||
processor_class.thumb_size,
|
||||
args.size[0],
|
||||
args.size[1])
|
||||
else:
|
||||
extra_args = 'thumb'
|
||||
|
||||
reprocess_args = reprocess_parser.parse_args(extra_args.split())
|
||||
reprocess_request = processor_class.args_to_request(reprocess_args)
|
||||
run_process_media(
|
||||
media_entry,
|
||||
reprocess_action='resize',
|
||||
reprocess_info=reprocess_request)
|
||||
|
||||
except ProcessingManagerDoesNotExist:
|
||||
print 'No such processing manager for {0}'.format(entry.media_type)
|
||||
|
||||
|
||||
def initial(args):
|
||||
"""
|
||||
Reprocess all failed media
|
||||
"""
|
||||
query = MediaEntry.query.filter_by(state='failed')
|
||||
|
||||
for entry in query:
|
||||
try:
|
||||
media_entry, manager = get_entry_and_processing_manager(entry.id)
|
||||
run_process_media(
|
||||
media_entry,
|
||||
reprocess_action='initial')
|
||||
except ProcessingManagerDoesNotExist:
|
||||
print 'No such processing manager for {0}'.format(entry.media_type)
|
||||
|
||||
|
||||
def reprocess(args):
|
||||
# Run eagerly unless explicetly set not to
|
||||
if not args.celery:
|
||||
os.environ['CELERY_ALWAYS_EAGER'] = 'true'
|
||||
|
||||
commands_util.setup_app(args)
|
||||
|
||||
if args.reprocess_subcommand == "run":
|
||||
run(args)
|
||||
|
||||
elif args.reprocess_subcommand == "available":
|
||||
available(args)
|
||||
|
||||
elif args.reprocess_subcommand == "bulk_run":
|
||||
bulk_run(args)
|
||||
|
||||
elif args.reprocess_subcommand == "thumbs":
|
||||
thumbs(args)
|
||||
|
||||
elif args.reprocess_subcommand == "initial":
|
||||
initial(args)
|
||||
@@ -36,5 +36,5 @@ def prompt_if_not_set(variable, text, password=False):
|
||||
variable=raw_input(text + u' ')
|
||||
else:
|
||||
variable=getpass.getpass(text + u' ')
|
||||
|
||||
|
||||
return variable
|
||||
|
||||
Binary file not shown.
File diff suppressed because it is too large
Load Diff
BIN
mediagoblin/i18n/bg/LC_MESSAGES/mediagoblin.mo
Normal file
BIN
mediagoblin/i18n/bg/LC_MESSAGES/mediagoblin.mo
Normal file
Binary file not shown.
1559
mediagoblin/i18n/bg/LC_MESSAGES/mediagoblin.po
Normal file
1559
mediagoblin/i18n/bg/LC_MESSAGES/mediagoblin.po
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
BIN
mediagoblin/i18n/vi/LC_MESSAGES/mediagoblin.mo
Normal file
BIN
mediagoblin/i18n/vi/LC_MESSAGES/mediagoblin.mo
Normal file
Binary file not shown.
1558
mediagoblin/i18n/vi/LC_MESSAGES/mediagoblin.po
Normal file
1558
mediagoblin/i18n/vi/LC_MESSAGES/mediagoblin.po
Normal file
File diff suppressed because it is too large
Load Diff
BIN
mediagoblin/i18n/vi_VN/LC_MESSAGES/mediagoblin.mo
Normal file
BIN
mediagoblin/i18n/vi_VN/LC_MESSAGES/mediagoblin.mo
Normal file
Binary file not shown.
1558
mediagoblin/i18n/vi_VN/LC_MESSAGES/mediagoblin.po
Normal file
1558
mediagoblin/i18n/vi_VN/LC_MESSAGES/mediagoblin.po
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
@@ -15,21 +15,15 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from mediagoblin.media_types import MediaManagerBase
|
||||
from mediagoblin.media_types.ascii.processing import process_ascii, \
|
||||
from mediagoblin.media_types.ascii.processing import AsciiProcessingManager, \
|
||||
sniff_handler
|
||||
from mediagoblin.tools import pluginapi
|
||||
|
||||
ACCEPTED_EXTENSIONS = ["txt", "asc", "nfo"]
|
||||
MEDIA_TYPE = 'mediagoblin.media_types.ascii'
|
||||
|
||||
|
||||
def setup_plugin():
|
||||
config = pluginapi.get_config(MEDIA_TYPE)
|
||||
|
||||
|
||||
class ASCIIMediaManager(MediaManagerBase):
|
||||
human_readable = "ASCII"
|
||||
processor = staticmethod(process_ascii)
|
||||
display_template = "mediagoblin/media_displays/ascii.html"
|
||||
default_thumb = "images/media_thumbs/ascii.jpg"
|
||||
|
||||
@@ -40,8 +34,8 @@ def get_media_type_and_manager(ext):
|
||||
|
||||
|
||||
hooks = {
|
||||
'setup': setup_plugin,
|
||||
'get_media_type_and_manager': get_media_type_and_manager,
|
||||
('media_manager', MEDIA_TYPE): lambda: ASCIIMediaManager,
|
||||
('reprocess_manager', MEDIA_TYPE): lambda: AsciiProcessingManager,
|
||||
'sniff_handler': sniff_handler,
|
||||
}
|
||||
|
||||
4
mediagoblin/media_types/ascii/config_spec.ini
Normal file
4
mediagoblin/media_types/ascii/config_spec.ini
Normal file
@@ -0,0 +1,4 @@
|
||||
[plugin_spec]
|
||||
thumbnail_font = string(default=None)
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
import argparse
|
||||
import chardet
|
||||
import os
|
||||
try:
|
||||
@@ -22,7 +23,11 @@ except ImportError:
|
||||
import logging
|
||||
|
||||
from mediagoblin import mg_globals as mgg
|
||||
from mediagoblin.processing import create_pub_filepath
|
||||
from mediagoblin.processing import (
|
||||
create_pub_filepath, FilenameBuilder,
|
||||
MediaProcessor, ProcessingManager,
|
||||
get_process_filename, copy_original,
|
||||
store_public, request_from_args)
|
||||
from mediagoblin.media_types.ascii import asciitoimage
|
||||
|
||||
_log = logging.getLogger(__name__)
|
||||
@@ -43,106 +48,202 @@ def sniff_handler(media_file, **kw):
|
||||
return None
|
||||
|
||||
|
||||
def process_ascii(proc_state):
|
||||
"""Code to process a txt file. Will be run by celery.
|
||||
|
||||
A Workbench() represents a local tempory dir. It is automatically
|
||||
cleaned up when this function exits.
|
||||
class CommonAsciiProcessor(MediaProcessor):
|
||||
"""
|
||||
entry = proc_state.entry
|
||||
workbench = proc_state.workbench
|
||||
ascii_config = mgg.global_config['media_type:mediagoblin.media_types.ascii']
|
||||
# Conversions subdirectory to avoid collisions
|
||||
conversions_subdir = os.path.join(
|
||||
workbench.dir, 'conversions')
|
||||
os.mkdir(conversions_subdir)
|
||||
Provides a base for various ascii processing steps
|
||||
"""
|
||||
acceptable_files = ['original', 'unicode']
|
||||
|
||||
queued_filepath = entry.queued_media_file
|
||||
queued_filename = workbench.localized_file(
|
||||
mgg.queue_store, queued_filepath,
|
||||
'source')
|
||||
def common_setup(self):
|
||||
self.ascii_config = mgg.global_config['plugins'][
|
||||
'mediagoblin.media_types.ascii']
|
||||
|
||||
queued_file = file(queued_filename, 'rb')
|
||||
# Conversions subdirectory to avoid collisions
|
||||
self.conversions_subdir = os.path.join(
|
||||
self.workbench.dir, 'conversions')
|
||||
os.mkdir(self.conversions_subdir)
|
||||
|
||||
with queued_file:
|
||||
queued_file_charset = chardet.detect(queued_file.read())
|
||||
# Pull down and set up the processing file
|
||||
self.process_filename = get_process_filename(
|
||||
self.entry, self.workbench, self.acceptable_files)
|
||||
self.name_builder = FilenameBuilder(self.process_filename)
|
||||
|
||||
self.charset = None
|
||||
|
||||
def copy_original(self):
|
||||
copy_original(
|
||||
self.entry, self.process_filename,
|
||||
self.name_builder.fill('{basename}{ext}'))
|
||||
|
||||
def _detect_charset(self, orig_file):
|
||||
d_charset = chardet.detect(orig_file.read())
|
||||
|
||||
# Only select a non-utf-8 charset if chardet is *really* sure
|
||||
# Tested with "Feli\x0109an superjaron", which was detecte
|
||||
if queued_file_charset['confidence'] < 0.9:
|
||||
interpreted_charset = 'utf-8'
|
||||
# Tested with "Feli\x0109an superjaron", which was detected
|
||||
if d_charset['confidence'] < 0.9:
|
||||
self.charset = 'utf-8'
|
||||
else:
|
||||
interpreted_charset = queued_file_charset['encoding']
|
||||
self.charset = d_charset['encoding']
|
||||
|
||||
_log.info('Charset detected: {0}\nWill interpret as: {1}'.format(
|
||||
queued_file_charset,
|
||||
interpreted_charset))
|
||||
d_charset,
|
||||
self.charset))
|
||||
|
||||
queued_file.seek(0) # Rewind the queued file
|
||||
# Rewind the file
|
||||
orig_file.seek(0)
|
||||
|
||||
thumb_filepath = create_pub_filepath(
|
||||
entry, 'thumbnail.png')
|
||||
def store_unicode_file(self):
|
||||
with file(self.process_filename, 'rb') as orig_file:
|
||||
self._detect_charset(orig_file)
|
||||
unicode_filepath = create_pub_filepath(self.entry,
|
||||
'ascii-portable.txt')
|
||||
|
||||
tmp_thumb_filename = os.path.join(
|
||||
conversions_subdir, thumb_filepath[-1])
|
||||
with mgg.public_store.get_file(unicode_filepath, 'wb') \
|
||||
as unicode_file:
|
||||
# Decode the original file from its detected charset (or UTF8)
|
||||
# Encode the unicode instance to ASCII and replace any
|
||||
# non-ASCII with an HTML entity (&#
|
||||
unicode_file.write(
|
||||
unicode(orig_file.read().decode(
|
||||
self.charset)).encode(
|
||||
'ascii',
|
||||
'xmlcharrefreplace'))
|
||||
|
||||
ascii_converter_args = {}
|
||||
self.entry.media_files['unicode'] = unicode_filepath
|
||||
|
||||
if ascii_config['thumbnail_font']:
|
||||
ascii_converter_args.update(
|
||||
{'font': ascii_config['thumbnail_font']})
|
||||
def generate_thumb(self, font=None, thumb_size=None):
|
||||
with file(self.process_filename, 'rb') as orig_file:
|
||||
# If no font kwarg, check config
|
||||
if not font:
|
||||
font = self.ascii_config.get('thumbnail_font', None)
|
||||
if not thumb_size:
|
||||
thumb_size = (mgg.global_config['media:thumb']['max_width'],
|
||||
mgg.global_config['media:thumb']['max_height'])
|
||||
|
||||
converter = asciitoimage.AsciiToImage(
|
||||
**ascii_converter_args)
|
||||
tmp_thumb = os.path.join(
|
||||
self.conversions_subdir,
|
||||
self.name_builder.fill('{basename}.thumbnail.png'))
|
||||
|
||||
thumb = converter._create_image(
|
||||
queued_file.read())
|
||||
ascii_converter_args = {}
|
||||
|
||||
with file(tmp_thumb_filename, 'w') as thumb_file:
|
||||
thumb.thumbnail(
|
||||
(mgg.global_config['media:thumb']['max_width'],
|
||||
mgg.global_config['media:thumb']['max_height']),
|
||||
Image.ANTIALIAS)
|
||||
thumb.save(thumb_file)
|
||||
# If there is a font from either the config or kwarg, update
|
||||
# ascii_converter_args
|
||||
if font:
|
||||
ascii_converter_args.update(
|
||||
{'font': self.ascii_config['thumbnail_font']})
|
||||
|
||||
_log.debug('Copying local file to public storage')
|
||||
mgg.public_store.copy_local_to_storage(
|
||||
tmp_thumb_filename, thumb_filepath)
|
||||
converter = asciitoimage.AsciiToImage(
|
||||
**ascii_converter_args)
|
||||
|
||||
queued_file.seek(0)
|
||||
thumb = converter._create_image(
|
||||
orig_file.read())
|
||||
|
||||
original_filepath = create_pub_filepath(entry, queued_filepath[-1])
|
||||
with file(tmp_thumb, 'w') as thumb_file:
|
||||
thumb.thumbnail(
|
||||
thumb_size,
|
||||
Image.ANTIALIAS)
|
||||
thumb.save(thumb_file)
|
||||
|
||||
with mgg.public_store.get_file(original_filepath, 'wb') \
|
||||
as original_file:
|
||||
original_file.write(queued_file.read())
|
||||
_log.debug('Copying local file to public storage')
|
||||
store_public(self.entry, 'thumb', tmp_thumb,
|
||||
self.name_builder.fill('{basename}.thumbnail.jpg'))
|
||||
|
||||
queued_file.seek(0) # Rewind *again*
|
||||
|
||||
unicode_filepath = create_pub_filepath(entry, 'ascii-portable.txt')
|
||||
class InitialProcessor(CommonAsciiProcessor):
|
||||
"""
|
||||
Initial processing step for new ascii media
|
||||
"""
|
||||
name = "initial"
|
||||
description = "Initial processing"
|
||||
|
||||
with mgg.public_store.get_file(unicode_filepath, 'wb') \
|
||||
as unicode_file:
|
||||
# Decode the original file from its detected charset (or UTF8)
|
||||
# Encode the unicode instance to ASCII and replace any non-ASCII
|
||||
# with an HTML entity (&#
|
||||
unicode_file.write(
|
||||
unicode(queued_file.read().decode(
|
||||
interpreted_charset)).encode(
|
||||
'ascii',
|
||||
'xmlcharrefreplace'))
|
||||
@classmethod
|
||||
def media_is_eligible(cls, entry=None, state=None):
|
||||
if not state:
|
||||
state = entry.state
|
||||
return state in (
|
||||
"unprocessed", "failed")
|
||||
|
||||
# Remove queued media file from storage and database.
|
||||
# queued_filepath is in the task_id directory which should
|
||||
# be removed too, but fail if the directory is not empty to be on
|
||||
# the super-safe side.
|
||||
mgg.queue_store.delete_file(queued_filepath) # rm file
|
||||
mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
|
||||
entry.queued_media_file = []
|
||||
@classmethod
|
||||
def generate_parser(cls):
|
||||
parser = argparse.ArgumentParser(
|
||||
description=cls.description,
|
||||
prog=cls.name)
|
||||
|
||||
media_files_dict = entry.setdefault('media_files', {})
|
||||
media_files_dict['thumb'] = thumb_filepath
|
||||
media_files_dict['unicode'] = unicode_filepath
|
||||
media_files_dict['original'] = original_filepath
|
||||
parser.add_argument(
|
||||
'--thumb_size',
|
||||
nargs=2,
|
||||
metavar=('max_width', 'max_width'),
|
||||
type=int)
|
||||
|
||||
entry.save()
|
||||
parser.add_argument(
|
||||
'--font',
|
||||
help='the thumbnail font')
|
||||
|
||||
return parser
|
||||
|
||||
@classmethod
|
||||
def args_to_request(cls, args):
|
||||
return request_from_args(
|
||||
args, ['thumb_size', 'font'])
|
||||
|
||||
def process(self, thumb_size=None, font=None):
|
||||
self.common_setup()
|
||||
self.store_unicode_file()
|
||||
self.generate_thumb(thumb_size=thumb_size, font=font)
|
||||
self.copy_original()
|
||||
self.delete_queue_file()
|
||||
|
||||
|
||||
class Resizer(CommonAsciiProcessor):
|
||||
"""
|
||||
Resizing process steps for processed media
|
||||
"""
|
||||
name = 'resize'
|
||||
description = 'Resize thumbnail'
|
||||
thumb_size = 'thumb_size'
|
||||
|
||||
@classmethod
|
||||
def media_is_eligible(cls, entry=None, state=None):
|
||||
"""
|
||||
Determine if this media type is eligible for processing
|
||||
"""
|
||||
if not state:
|
||||
state = entry.state
|
||||
return state in 'processed'
|
||||
|
||||
@classmethod
|
||||
def generate_parser(cls):
|
||||
parser = argparse.ArgumentParser(
|
||||
description=cls.description,
|
||||
prog=cls.name)
|
||||
|
||||
parser.add_argument(
|
||||
'--thumb_size',
|
||||
nargs=2,
|
||||
metavar=('max_width', 'max_height'),
|
||||
type=int)
|
||||
|
||||
# Needed for gmg reprocess thumbs to work
|
||||
parser.add_argument(
|
||||
'file',
|
||||
nargs='?',
|
||||
default='thumb',
|
||||
choices=['thumb'])
|
||||
|
||||
return parser
|
||||
|
||||
@classmethod
|
||||
def args_to_request(cls, args):
|
||||
return request_from_args(
|
||||
args, ['thumb_size', 'file'])
|
||||
|
||||
def process(self, thumb_size=None, file=None):
|
||||
self.common_setup()
|
||||
self.generate_thumb(thumb_size=thumb_size)
|
||||
|
||||
|
||||
class AsciiProcessingManager(ProcessingManager):
|
||||
def __init__(self):
|
||||
super(self.__class__, self).__init__()
|
||||
self.add_processor(InitialProcessor)
|
||||
self.add_processor(Resizer)
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from mediagoblin.media_types import MediaManagerBase
|
||||
from mediagoblin.media_types.audio.processing import process_audio, \
|
||||
from mediagoblin.media_types.audio.processing import AudioProcessingManager, \
|
||||
sniff_handler
|
||||
from mediagoblin.tools import pluginapi
|
||||
|
||||
@@ -32,8 +32,8 @@ def setup_plugin():
|
||||
|
||||
class AudioMediaManager(MediaManagerBase):
|
||||
human_readable = "Audio"
|
||||
processor = staticmethod(process_audio)
|
||||
display_template = "mediagoblin/media_displays/audio.html"
|
||||
default_thumb = "images/media_thumbs/image.png"
|
||||
|
||||
|
||||
def get_media_type_and_manager(ext):
|
||||
@@ -45,4 +45,5 @@ hooks = {
|
||||
'get_media_type_and_manager': get_media_type_and_manager,
|
||||
'sniff_handler': sniff_handler,
|
||||
('media_manager', MEDIA_TYPE): lambda: AudioMediaManager,
|
||||
('reprocess_manager', MEDIA_TYPE): lambda: AudioProcessingManager,
|
||||
}
|
||||
|
||||
8
mediagoblin/media_types/audio/config_spec.ini
Normal file
8
mediagoblin/media_types/audio/config_spec.ini
Normal file
@@ -0,0 +1,8 @@
|
||||
[plugin_spec]
|
||||
keep_original = boolean(default=True)
|
||||
# vorbisenc quality
|
||||
quality = float(default=0.3)
|
||||
create_spectrogram = boolean(default=True)
|
||||
spectrogram_fft_size = integer(default=4096)
|
||||
|
||||
|
||||
@@ -14,16 +14,19 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
from tempfile import NamedTemporaryFile
|
||||
import os
|
||||
|
||||
from mediagoblin import mg_globals as mgg
|
||||
from mediagoblin.processing import (create_pub_filepath, BadMediaFail,
|
||||
FilenameBuilder, ProgressCallback)
|
||||
from mediagoblin.processing import (
|
||||
BadMediaFail, FilenameBuilder,
|
||||
ProgressCallback, MediaProcessor, ProcessingManager,
|
||||
request_from_args, get_process_filename,
|
||||
store_public, copy_original)
|
||||
|
||||
from mediagoblin.media_types.audio.transcoders import (AudioTranscoder,
|
||||
AudioThumbnailer)
|
||||
from mediagoblin.media_types.audio.transcoders import (
|
||||
AudioTranscoder, AudioThumbnailer)
|
||||
|
||||
_log = logging.getLogger(__name__)
|
||||
|
||||
@@ -39,121 +42,304 @@ def sniff_handler(media_file, **kw):
|
||||
_log.debug('Audio discovery raised BadMediaFail')
|
||||
return None
|
||||
|
||||
if data.is_audio == True and data.is_video == False:
|
||||
if data.is_audio is True and data.is_video is False:
|
||||
return MEDIA_TYPE
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def process_audio(proc_state):
|
||||
"""Code to process uploaded audio. Will be run by celery.
|
||||
|
||||
A Workbench() represents a local tempory dir. It is automatically
|
||||
cleaned up when this function exits.
|
||||
class CommonAudioProcessor(MediaProcessor):
|
||||
"""
|
||||
entry = proc_state.entry
|
||||
workbench = proc_state.workbench
|
||||
audio_config = mgg.global_config['media_type:mediagoblin.media_types.audio']
|
||||
Provides a base for various audio processing steps
|
||||
"""
|
||||
acceptable_files = ['original', 'best_quality', 'webm_audio']
|
||||
|
||||
queued_filepath = entry.queued_media_file
|
||||
queued_filename = workbench.localized_file(
|
||||
mgg.queue_store, queued_filepath,
|
||||
'source')
|
||||
name_builder = FilenameBuilder(queued_filename)
|
||||
def common_setup(self):
|
||||
"""
|
||||
Setup the workbench directory and pull down the original file, add
|
||||
the audio_config, transcoder, thumbnailer and spectrogram_tmp path
|
||||
"""
|
||||
self.audio_config = mgg \
|
||||
.global_config['plugins']['mediagoblin.media_types.audio']
|
||||
|
||||
webm_audio_filepath = create_pub_filepath(
|
||||
entry,
|
||||
'{original}.webm'.format(
|
||||
original=os.path.splitext(
|
||||
queued_filepath[-1])[0]))
|
||||
# Pull down and set up the processing file
|
||||
self.process_filename = get_process_filename(
|
||||
self.entry, self.workbench, self.acceptable_files)
|
||||
self.name_builder = FilenameBuilder(self.process_filename)
|
||||
|
||||
if audio_config['keep_original']:
|
||||
with open(queued_filename, 'rb') as queued_file:
|
||||
original_filepath = create_pub_filepath(
|
||||
entry, name_builder.fill('{basename}{ext}'))
|
||||
self.transcoder = AudioTranscoder()
|
||||
self.thumbnailer = AudioThumbnailer()
|
||||
|
||||
with mgg.public_store.get_file(original_filepath, 'wb') as \
|
||||
original_file:
|
||||
_log.debug('Saving original...')
|
||||
original_file.write(queued_file.read())
|
||||
def copy_original(self):
|
||||
if self.audio_config['keep_original']:
|
||||
copy_original(
|
||||
self.entry, self.process_filename,
|
||||
self.name_builder.fill('{basename}{ext}'))
|
||||
|
||||
entry.media_files['original'] = original_filepath
|
||||
def _keep_best(self):
|
||||
"""
|
||||
If there is no original, keep the best file that we have
|
||||
"""
|
||||
if not self.entry.media_files.get('best_quality'):
|
||||
# Save the best quality file if no original?
|
||||
if not self.entry.media_files.get('original') and \
|
||||
self.entry.media_files.get('webm_audio'):
|
||||
self.entry.media_files['best_quality'] = self.entry \
|
||||
.media_files['webm_audio']
|
||||
|
||||
transcoder = AudioTranscoder()
|
||||
def transcode(self, quality=None):
|
||||
if not quality:
|
||||
quality = self.audio_config['quality']
|
||||
|
||||
with NamedTemporaryFile(dir=workbench.dir) as webm_audio_tmp:
|
||||
progress_callback = ProgressCallback(entry)
|
||||
progress_callback = ProgressCallback(self.entry)
|
||||
webm_audio_tmp = os.path.join(self.workbench.dir,
|
||||
self.name_builder.fill(
|
||||
'{basename}{ext}'))
|
||||
|
||||
transcoder.transcode(
|
||||
queued_filename,
|
||||
webm_audio_tmp.name,
|
||||
quality=audio_config['quality'],
|
||||
self.transcoder.transcode(
|
||||
self.process_filename,
|
||||
webm_audio_tmp,
|
||||
quality=quality,
|
||||
progress_callback=progress_callback)
|
||||
|
||||
transcoder.discover(webm_audio_tmp.name)
|
||||
self.transcoder.discover(webm_audio_tmp)
|
||||
|
||||
self._keep_best()
|
||||
|
||||
_log.debug('Saving medium...')
|
||||
mgg.public_store.get_file(webm_audio_filepath, 'wb').write(
|
||||
webm_audio_tmp.read())
|
||||
store_public(self.entry, 'webm_audio', webm_audio_tmp,
|
||||
self.name_builder.fill('{basename}.medium.webm'))
|
||||
|
||||
entry.media_files['webm_audio'] = webm_audio_filepath
|
||||
def create_spectrogram(self, max_width=None, fft_size=None):
|
||||
if not max_width:
|
||||
max_width = mgg.global_config['media:medium']['max_width']
|
||||
if not fft_size:
|
||||
fft_size = self.audio_config['spectrogram_fft_size']
|
||||
|
||||
# entry.media_data_init(length=int(data.audiolength))
|
||||
wav_tmp = os.path.join(self.workbench.dir, self.name_builder.fill(
|
||||
'{basename}.ogg'))
|
||||
|
||||
if audio_config['create_spectrogram']:
|
||||
spectrogram_filepath = create_pub_filepath(
|
||||
entry,
|
||||
'{original}-spectrogram.jpg'.format(
|
||||
original=os.path.splitext(
|
||||
queued_filepath[-1])[0]))
|
||||
_log.info('Creating OGG source for spectrogram')
|
||||
self.transcoder.transcode(
|
||||
self.process_filename,
|
||||
wav_tmp,
|
||||
mux_string='vorbisenc quality={0} ! oggmux'.format(
|
||||
self.audio_config['quality']))
|
||||
|
||||
with NamedTemporaryFile(dir=workbench.dir, suffix='.ogg') as wav_tmp:
|
||||
_log.info('Creating OGG source for spectrogram')
|
||||
transcoder.transcode(
|
||||
queued_filename,
|
||||
wav_tmp.name,
|
||||
mux_string='vorbisenc quality={0} ! oggmux'.format(
|
||||
audio_config['quality']))
|
||||
spectrogram_tmp = os.path.join(self.workbench.dir,
|
||||
self.name_builder.fill(
|
||||
'{basename}-spectrogram.jpg'))
|
||||
|
||||
thumbnailer = AudioThumbnailer()
|
||||
self.thumbnailer.spectrogram(
|
||||
wav_tmp,
|
||||
spectrogram_tmp,
|
||||
width=max_width,
|
||||
fft_size=fft_size)
|
||||
|
||||
with NamedTemporaryFile(dir=workbench.dir, suffix='.jpg') as spectrogram_tmp:
|
||||
thumbnailer.spectrogram(
|
||||
wav_tmp.name,
|
||||
spectrogram_tmp.name,
|
||||
width=mgg.global_config['media:medium']['max_width'],
|
||||
fft_size=audio_config['spectrogram_fft_size'])
|
||||
_log.debug('Saving spectrogram...')
|
||||
store_public(self.entry, 'spectrogram', spectrogram_tmp,
|
||||
self.name_builder.fill('{basename}.spectrogram.jpg'))
|
||||
|
||||
_log.debug('Saving spectrogram...')
|
||||
mgg.public_store.get_file(spectrogram_filepath, 'wb').write(
|
||||
spectrogram_tmp.read())
|
||||
def generate_thumb(self, size=None):
|
||||
if not size:
|
||||
max_width = mgg.global_config['media:thumb']['max_width']
|
||||
max_height = mgg.global_config['media:thumb']['max_height']
|
||||
size = (max_width, max_height)
|
||||
|
||||
entry.media_files['spectrogram'] = spectrogram_filepath
|
||||
thumb_tmp = os.path.join(self.workbench.dir, self.name_builder.fill(
|
||||
'{basename}-thumbnail.jpg'))
|
||||
|
||||
with NamedTemporaryFile(dir=workbench.dir, suffix='.jpg') as thumb_tmp:
|
||||
thumbnailer.thumbnail_spectrogram(
|
||||
spectrogram_tmp.name,
|
||||
thumb_tmp.name,
|
||||
(mgg.global_config['media:thumb']['max_width'],
|
||||
mgg.global_config['media:thumb']['max_height']))
|
||||
# We need the spectrogram to create a thumbnail
|
||||
spectrogram = self.entry.media_files.get('spectrogram')
|
||||
if not spectrogram:
|
||||
_log.info('No spectrogram found, we will create one.')
|
||||
self.create_spectrogram()
|
||||
spectrogram = self.entry.media_files['spectrogram']
|
||||
|
||||
thumb_filepath = create_pub_filepath(
|
||||
entry,
|
||||
'{original}-thumbnail.jpg'.format(
|
||||
original=os.path.splitext(
|
||||
queued_filepath[-1])[0]))
|
||||
spectrogram_filepath = mgg.public_store.get_local_path(spectrogram)
|
||||
|
||||
mgg.public_store.get_file(thumb_filepath, 'wb').write(
|
||||
thumb_tmp.read())
|
||||
self.thumbnailer.thumbnail_spectrogram(
|
||||
spectrogram_filepath,
|
||||
thumb_tmp,
|
||||
tuple(size))
|
||||
|
||||
entry.media_files['thumb'] = thumb_filepath
|
||||
else:
|
||||
entry.media_files['thumb'] = ['fake', 'thumb', 'path.jpg']
|
||||
store_public(self.entry, 'thumb', thumb_tmp,
|
||||
self.name_builder.fill('{basename}.thumbnail.jpg'))
|
||||
|
||||
# Remove queued media file from storage and database.
|
||||
# queued_filepath is in the task_id directory which should
|
||||
# be removed too, but fail if the directory is not empty to be on
|
||||
# the super-safe side.
|
||||
mgg.queue_store.delete_file(queued_filepath) # rm file
|
||||
mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
|
||||
entry.queued_media_file = []
|
||||
|
||||
class InitialProcessor(CommonAudioProcessor):
|
||||
"""
|
||||
Initial processing steps for new audio
|
||||
"""
|
||||
name = "initial"
|
||||
description = "Initial processing"
|
||||
|
||||
@classmethod
|
||||
def media_is_eligible(cls, entry=None, state=None):
|
||||
"""
|
||||
Determine if this media type is eligible for processing
|
||||
"""
|
||||
if not state:
|
||||
state = entry.state
|
||||
return state in (
|
||||
"unprocessed", "failed")
|
||||
|
||||
@classmethod
|
||||
def generate_parser(cls):
|
||||
parser = argparse.ArgumentParser(
|
||||
description=cls.description,
|
||||
prog=cls.name)
|
||||
|
||||
parser.add_argument(
|
||||
'--quality',
|
||||
type=float,
|
||||
help='vorbisenc quality. Range: -0.1..1')
|
||||
|
||||
parser.add_argument(
|
||||
'--fft_size',
|
||||
type=int,
|
||||
help='spectrogram fft size')
|
||||
|
||||
parser.add_argument(
|
||||
'--thumb_size',
|
||||
nargs=2,
|
||||
metavar=('max_width', 'max_height'),
|
||||
type=int,
|
||||
help='minimum size is 100 x 100')
|
||||
|
||||
parser.add_argument(
|
||||
'--medium_width',
|
||||
type=int,
|
||||
help='The width of the spectogram')
|
||||
|
||||
parser.add_argument(
|
||||
'--create_spectrogram',
|
||||
action='store_true',
|
||||
help='Create spectogram and thumbnail, will default to config')
|
||||
|
||||
return parser
|
||||
|
||||
@classmethod
|
||||
def args_to_request(cls, args):
|
||||
return request_from_args(
|
||||
args, ['create_spectrogram', 'quality', 'fft_size',
|
||||
'thumb_size', 'medium_width'])
|
||||
|
||||
def process(self, quality=None, fft_size=None, thumb_size=None,
|
||||
create_spectrogram=None, medium_width=None):
|
||||
self.common_setup()
|
||||
|
||||
if not create_spectrogram:
|
||||
create_spectrogram = self.audio_config['create_spectrogram']
|
||||
|
||||
self.transcode(quality=quality)
|
||||
self.copy_original()
|
||||
|
||||
if create_spectrogram:
|
||||
self.create_spectrogram(max_width=medium_width, fft_size=fft_size)
|
||||
self.generate_thumb(size=thumb_size)
|
||||
self.delete_queue_file()
|
||||
|
||||
|
||||
class Resizer(CommonAudioProcessor):
|
||||
"""
|
||||
Thumbnail and spectogram resizing process steps for processed audio
|
||||
"""
|
||||
name = 'resize'
|
||||
description = 'Resize thumbnail or spectogram'
|
||||
thumb_size = 'thumb_size'
|
||||
|
||||
@classmethod
|
||||
def media_is_eligible(cls, entry=None, state=None):
|
||||
"""
|
||||
Determine if this media entry is eligible for processing
|
||||
"""
|
||||
if not state:
|
||||
state = entry.state
|
||||
return state in 'processed'
|
||||
|
||||
@classmethod
|
||||
def generate_parser(cls):
|
||||
parser = argparse.ArgumentParser(
|
||||
description=cls.description,
|
||||
prog=cls.name)
|
||||
|
||||
parser.add_argument(
|
||||
'--fft_size',
|
||||
type=int,
|
||||
help='spectrogram fft size')
|
||||
|
||||
parser.add_argument(
|
||||
'--thumb_size',
|
||||
nargs=2,
|
||||
metavar=('max_width', 'max_height'),
|
||||
type=int,
|
||||
help='minimum size is 100 x 100')
|
||||
|
||||
parser.add_argument(
|
||||
'--medium_width',
|
||||
type=int,
|
||||
help='The width of the spectogram')
|
||||
|
||||
parser.add_argument(
|
||||
'file',
|
||||
choices=['thumb', 'spectrogram'])
|
||||
|
||||
return parser
|
||||
|
||||
@classmethod
|
||||
def args_to_request(cls, args):
|
||||
return request_from_args(
|
||||
args, ['thumb_size', 'file', 'fft_size', 'medium_width'])
|
||||
|
||||
def process(self, file, thumb_size=None, fft_size=None,
|
||||
medium_width=None):
|
||||
self.common_setup()
|
||||
|
||||
if file == 'thumb':
|
||||
self.generate_thumb(size=thumb_size)
|
||||
elif file == 'spectrogram':
|
||||
self.create_spectrogram(max_width=medium_width, fft_size=fft_size)
|
||||
|
||||
|
||||
class Transcoder(CommonAudioProcessor):
|
||||
"""
|
||||
Transcoding processing steps for processed audio
|
||||
"""
|
||||
name = 'transcode'
|
||||
description = 'Re-transcode audio'
|
||||
|
||||
@classmethod
|
||||
def media_is_eligible(cls, entry=None, state=None):
|
||||
if not state:
|
||||
state = entry.state
|
||||
return state in 'processed'
|
||||
|
||||
@classmethod
|
||||
def generate_parser(cls):
|
||||
parser = argparse.ArgumentParser(
|
||||
description=cls.description,
|
||||
prog=cls.name)
|
||||
|
||||
parser.add_argument(
|
||||
'--quality',
|
||||
help='vorbisenc quality. Range: -0.1..1')
|
||||
|
||||
return parser
|
||||
|
||||
@classmethod
|
||||
def args_to_request(cls, args):
|
||||
return request_from_args(
|
||||
args, ['quality'])
|
||||
|
||||
def process(self, quality=None):
|
||||
self.common_setup()
|
||||
self.transcode(quality=quality)
|
||||
|
||||
|
||||
class AudioProcessingManager(ProcessingManager):
|
||||
def __init__(self):
|
||||
super(self.__class__, self).__init__()
|
||||
self.add_processor(InitialProcessor)
|
||||
self.add_processor(Resizer)
|
||||
self.add_processor(Transcoder)
|
||||
|
||||
@@ -122,8 +122,7 @@ class AudioThumbnailer(object):
|
||||
int(start_x), 0,
|
||||
int(stop_x), int(im_h)))
|
||||
|
||||
if th.size[0] > th_w or th.size[1] > th_h:
|
||||
th.thumbnail(thumb_size, Image.ANTIALIAS)
|
||||
th.thumbnail(thumb_size, Image.ANTIALIAS)
|
||||
|
||||
th.save(dst)
|
||||
|
||||
|
||||
@@ -14,24 +14,22 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
from mediagoblin.media_types import MediaManagerBase
|
||||
from mediagoblin.media_types.image.processing import process_image, \
|
||||
sniff_handler
|
||||
from mediagoblin.tools import pluginapi
|
||||
from mediagoblin.media_types.image.processing import sniff_handler, \
|
||||
ImageProcessingManager
|
||||
|
||||
|
||||
_log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
ACCEPTED_EXTENSIONS = ["jpg", "jpeg", "png", "gif", "tiff"]
|
||||
MEDIA_TYPE = 'mediagoblin.media_types.image'
|
||||
|
||||
|
||||
def setup_plugin():
|
||||
config = pluginapi.get_config('mediagoblin.media_types.image')
|
||||
|
||||
|
||||
class ImageMediaManager(MediaManagerBase):
|
||||
human_readable = "Image"
|
||||
processor = staticmethod(process_image)
|
||||
display_template = "mediagoblin/media_displays/image.html"
|
||||
default_thumb = "images/media_thumbs/image.png"
|
||||
|
||||
@@ -65,8 +63,8 @@ def get_media_type_and_manager(ext):
|
||||
|
||||
|
||||
hooks = {
|
||||
'setup': setup_plugin,
|
||||
'get_media_type_and_manager': get_media_type_and_manager,
|
||||
'sniff_handler': sniff_handler,
|
||||
('media_manager', MEDIA_TYPE): lambda: ImageMediaManager,
|
||||
('reprocess_manager', MEDIA_TYPE): lambda: ImageProcessingManager,
|
||||
}
|
||||
|
||||
7
mediagoblin/media_types/image/config_spec.ini
Normal file
7
mediagoblin/media_types/image/config_spec.ini
Normal file
@@ -0,0 +1,7 @@
|
||||
[plugin_spec]
|
||||
# One of BICUBIC, BILINEAR, NEAREST, ANTIALIAS
|
||||
resize_filter = string(default="ANTIALIAS")
|
||||
#level of compression used when resizing images
|
||||
quality = integer(default=90)
|
||||
|
||||
|
||||
@@ -20,9 +20,14 @@ except ImportError:
|
||||
import Image
|
||||
import os
|
||||
import logging
|
||||
import argparse
|
||||
|
||||
from mediagoblin import mg_globals as mgg
|
||||
from mediagoblin.processing import BadMediaFail, FilenameBuilder
|
||||
from mediagoblin.processing import (
|
||||
BadMediaFail, FilenameBuilder,
|
||||
MediaProcessor, ProcessingManager,
|
||||
request_from_args, get_process_filename,
|
||||
store_public, copy_original)
|
||||
from mediagoblin.tools.exif import exif_fix_image_orientation, \
|
||||
extract_exif, clean_exif, get_gps_data, get_useful, \
|
||||
exif_image_needs_rotation
|
||||
@@ -38,8 +43,8 @@ PIL_FILTERS = {
|
||||
MEDIA_TYPE = 'mediagoblin.media_types.image'
|
||||
|
||||
|
||||
def resize_image(proc_state, resized, keyname, target_name, new_size,
|
||||
exif_tags, workdir):
|
||||
def resize_image(entry, resized, keyname, target_name, new_size,
|
||||
exif_tags, workdir, quality, filter):
|
||||
"""
|
||||
Store a resized version of an image and return its pathname.
|
||||
|
||||
@@ -51,17 +56,16 @@ def resize_image(proc_state, resized, keyname, target_name, new_size,
|
||||
exif_tags -- EXIF data for the original image
|
||||
workdir -- directory path for storing converted image files
|
||||
new_size -- 2-tuple size for the resized image
|
||||
quality -- level of compression used when resizing images
|
||||
filter -- One of BICUBIC, BILINEAR, NEAREST, ANTIALIAS
|
||||
"""
|
||||
config = mgg.global_config['media_type:mediagoblin.media_types.image']
|
||||
|
||||
resized = exif_fix_image_orientation(resized, exif_tags) # Fix orientation
|
||||
|
||||
filter_config = config['resize_filter']
|
||||
try:
|
||||
resize_filter = PIL_FILTERS[filter_config.upper()]
|
||||
resize_filter = PIL_FILTERS[filter.upper()]
|
||||
except KeyError:
|
||||
raise Exception('Filter "{0}" not found, choose one of {1}'.format(
|
||||
unicode(filter_config),
|
||||
unicode(filter),
|
||||
u', '.join(PIL_FILTERS.keys())))
|
||||
|
||||
resized.thumbnail(new_size, resize_filter)
|
||||
@@ -69,32 +73,36 @@ def resize_image(proc_state, resized, keyname, target_name, new_size,
|
||||
# Copy the new file to the conversion subdir, then remotely.
|
||||
tmp_resized_filename = os.path.join(workdir, target_name)
|
||||
with file(tmp_resized_filename, 'w') as resized_file:
|
||||
resized.save(resized_file, quality=config['quality'])
|
||||
proc_state.store_public(keyname, tmp_resized_filename, target_name)
|
||||
resized.save(resized_file, quality=quality)
|
||||
store_public(entry, keyname, tmp_resized_filename, target_name)
|
||||
|
||||
|
||||
def resize_tool(proc_state, force, keyname, target_name,
|
||||
conversions_subdir, exif_tags):
|
||||
# filename -- the filename of the original image being resized
|
||||
filename = proc_state.get_queued_filename()
|
||||
max_width = mgg.global_config['media:' + keyname]['max_width']
|
||||
max_height = mgg.global_config['media:' + keyname]['max_height']
|
||||
def resize_tool(entry,
|
||||
force, keyname, orig_file, target_name,
|
||||
conversions_subdir, exif_tags, quality, filter, new_size=None):
|
||||
# Use the default size if new_size was not given
|
||||
if not new_size:
|
||||
max_width = mgg.global_config['media:' + keyname]['max_width']
|
||||
max_height = mgg.global_config['media:' + keyname]['max_height']
|
||||
new_size = (max_width, max_height)
|
||||
|
||||
# If the size of the original file exceeds the specified size for the desized
|
||||
# file, a target_name file is created and later associated with the media
|
||||
# entry.
|
||||
# Also created if the file needs rotation, or if forced.
|
||||
try:
|
||||
im = Image.open(filename)
|
||||
im = Image.open(orig_file)
|
||||
except IOError:
|
||||
raise BadMediaFail()
|
||||
if force \
|
||||
or im.size[0] > max_width \
|
||||
or im.size[1] > max_height \
|
||||
or im.size[0] > new_size[0]\
|
||||
or im.size[1] > new_size[1]\
|
||||
or exif_image_needs_rotation(exif_tags):
|
||||
resize_image(
|
||||
proc_state, im, unicode(keyname), target_name,
|
||||
(max_width, max_height),
|
||||
exif_tags, conversions_subdir)
|
||||
entry, im, unicode(keyname), target_name,
|
||||
tuple(new_size),
|
||||
exif_tags, conversions_subdir,
|
||||
quality, filter)
|
||||
|
||||
|
||||
SUPPORTED_FILETYPES = ['png', 'gif', 'jpg', 'jpeg', 'tiff']
|
||||
@@ -119,53 +127,210 @@ def sniff_handler(media_file, **kw):
|
||||
return None
|
||||
|
||||
|
||||
def process_image(proc_state):
|
||||
"""Code to process an image. Will be run by celery.
|
||||
|
||||
A Workbench() represents a local tempory dir. It is automatically
|
||||
cleaned up when this function exits.
|
||||
class CommonImageProcessor(MediaProcessor):
|
||||
"""
|
||||
entry = proc_state.entry
|
||||
workbench = proc_state.workbench
|
||||
Provides a base for various media processing steps
|
||||
"""
|
||||
# list of acceptable file keys in order of prefrence for reprocessing
|
||||
acceptable_files = ['original', 'medium']
|
||||
|
||||
# Conversions subdirectory to avoid collisions
|
||||
conversions_subdir = os.path.join(
|
||||
workbench.dir, 'conversions')
|
||||
os.mkdir(conversions_subdir)
|
||||
def common_setup(self):
|
||||
"""
|
||||
Set up the workbench directory and pull down the original file
|
||||
"""
|
||||
self.image_config = mgg.global_config['plugins'][
|
||||
'mediagoblin.media_types.image']
|
||||
|
||||
queued_filename = proc_state.get_queued_filename()
|
||||
name_builder = FilenameBuilder(queued_filename)
|
||||
## @@: Should this be two functions?
|
||||
# Conversions subdirectory to avoid collisions
|
||||
self.conversions_subdir = os.path.join(
|
||||
self.workbench.dir, 'conversions')
|
||||
os.mkdir(self.conversions_subdir)
|
||||
|
||||
# EXIF extraction
|
||||
exif_tags = extract_exif(queued_filename)
|
||||
gps_data = get_gps_data(exif_tags)
|
||||
# Pull down and set up the processing file
|
||||
self.process_filename = get_process_filename(
|
||||
self.entry, self.workbench, self.acceptable_files)
|
||||
self.name_builder = FilenameBuilder(self.process_filename)
|
||||
|
||||
# Always create a small thumbnail
|
||||
resize_tool(proc_state, True, 'thumb',
|
||||
name_builder.fill('{basename}.thumbnail{ext}'),
|
||||
conversions_subdir, exif_tags)
|
||||
# Exif extraction
|
||||
self.exif_tags = extract_exif(self.process_filename)
|
||||
|
||||
# Possibly create a medium
|
||||
resize_tool(proc_state, False, 'medium',
|
||||
name_builder.fill('{basename}.medium{ext}'),
|
||||
conversions_subdir, exif_tags)
|
||||
def generate_medium_if_applicable(self, size=None, quality=None,
|
||||
filter=None):
|
||||
if not quality:
|
||||
quality = self.image_config['quality']
|
||||
if not filter:
|
||||
filter = self.image_config['resize_filter']
|
||||
|
||||
# Copy our queued local workbench to its final destination
|
||||
proc_state.copy_original(name_builder.fill('{basename}{ext}'))
|
||||
resize_tool(self.entry, False, 'medium', self.process_filename,
|
||||
self.name_builder.fill('{basename}.medium{ext}'),
|
||||
self.conversions_subdir, self.exif_tags, quality,
|
||||
filter, size)
|
||||
|
||||
# Remove queued media file from storage and database
|
||||
proc_state.delete_queue_file()
|
||||
def generate_thumb(self, size=None, quality=None, filter=None):
|
||||
if not quality:
|
||||
quality = self.image_config['quality']
|
||||
if not filter:
|
||||
filter = self.image_config['resize_filter']
|
||||
|
||||
# Insert exif data into database
|
||||
exif_all = clean_exif(exif_tags)
|
||||
resize_tool(self.entry, True, 'thumb', self.process_filename,
|
||||
self.name_builder.fill('{basename}.thumbnail{ext}'),
|
||||
self.conversions_subdir, self.exif_tags, quality,
|
||||
filter, size)
|
||||
|
||||
if len(exif_all):
|
||||
entry.media_data_init(exif_all=exif_all)
|
||||
def copy_original(self):
|
||||
copy_original(
|
||||
self.entry, self.process_filename,
|
||||
self.name_builder.fill('{basename}{ext}'))
|
||||
|
||||
if len(gps_data):
|
||||
for key in list(gps_data.keys()):
|
||||
gps_data['gps_' + key] = gps_data.pop(key)
|
||||
entry.media_data_init(**gps_data)
|
||||
def extract_metadata(self):
|
||||
# Is there any GPS data
|
||||
gps_data = get_gps_data(self.exif_tags)
|
||||
|
||||
# Insert exif data into database
|
||||
exif_all = clean_exif(self.exif_tags)
|
||||
|
||||
if len(exif_all):
|
||||
self.entry.media_data_init(exif_all=exif_all)
|
||||
|
||||
if len(gps_data):
|
||||
for key in list(gps_data.keys()):
|
||||
gps_data['gps_' + key] = gps_data.pop(key)
|
||||
self.entry.media_data_init(**gps_data)
|
||||
|
||||
|
||||
class InitialProcessor(CommonImageProcessor):
|
||||
"""
|
||||
Initial processing step for new images
|
||||
"""
|
||||
name = "initial"
|
||||
description = "Initial processing"
|
||||
|
||||
@classmethod
|
||||
def media_is_eligible(cls, entry=None, state=None):
|
||||
"""
|
||||
Determine if this media type is eligible for processing
|
||||
"""
|
||||
if not state:
|
||||
state = entry.state
|
||||
return state in (
|
||||
"unprocessed", "failed")
|
||||
|
||||
###############################
|
||||
# Command line interface things
|
||||
###############################
|
||||
|
||||
@classmethod
|
||||
def generate_parser(cls):
|
||||
parser = argparse.ArgumentParser(
|
||||
description=cls.description,
|
||||
prog=cls.name)
|
||||
|
||||
parser.add_argument(
|
||||
'--size',
|
||||
nargs=2,
|
||||
metavar=('max_width', 'max_height'),
|
||||
type=int)
|
||||
|
||||
parser.add_argument(
|
||||
'--thumb-size',
|
||||
nargs=2,
|
||||
metavar=('max_width', 'max_height'),
|
||||
type=int)
|
||||
|
||||
parser.add_argument(
|
||||
'--filter',
|
||||
choices=['BICUBIC', 'BILINEAR', 'NEAREST', 'ANTIALIAS'])
|
||||
|
||||
parser.add_argument(
|
||||
'--quality',
|
||||
type=int,
|
||||
help='level of compression used when resizing images')
|
||||
|
||||
return parser
|
||||
|
||||
@classmethod
|
||||
def args_to_request(cls, args):
|
||||
return request_from_args(
|
||||
args, ['size', 'thumb_size', 'filter', 'quality'])
|
||||
|
||||
def process(self, size=None, thumb_size=None, quality=None, filter=None):
|
||||
self.common_setup()
|
||||
self.generate_medium_if_applicable(size=size, filter=filter,
|
||||
quality=quality)
|
||||
self.generate_thumb(size=thumb_size, filter=filter, quality=quality)
|
||||
self.copy_original()
|
||||
self.extract_metadata()
|
||||
self.delete_queue_file()
|
||||
|
||||
|
||||
class Resizer(CommonImageProcessor):
|
||||
"""
|
||||
Resizing process steps for processed media
|
||||
"""
|
||||
name = 'resize'
|
||||
description = 'Resize image'
|
||||
thumb_size = 'size'
|
||||
|
||||
@classmethod
|
||||
def media_is_eligible(cls, entry=None, state=None):
|
||||
"""
|
||||
Determine if this media type is eligible for processing
|
||||
"""
|
||||
if not state:
|
||||
state = entry.state
|
||||
return state in 'processed'
|
||||
|
||||
###############################
|
||||
# Command line interface things
|
||||
###############################
|
||||
|
||||
@classmethod
|
||||
def generate_parser(cls):
|
||||
parser = argparse.ArgumentParser(
|
||||
description=cls.description,
|
||||
prog=cls.name)
|
||||
|
||||
parser.add_argument(
|
||||
'--size',
|
||||
nargs=2,
|
||||
metavar=('max_width', 'max_height'),
|
||||
type=int)
|
||||
|
||||
parser.add_argument(
|
||||
'--filter',
|
||||
choices=['BICUBIC', 'BILINEAR', 'NEAREST', 'ANTIALIAS'])
|
||||
|
||||
parser.add_argument(
|
||||
'--quality',
|
||||
type=int,
|
||||
help='level of compression used when resizing images')
|
||||
|
||||
parser.add_argument(
|
||||
'file',
|
||||
choices=['medium', 'thumb'])
|
||||
|
||||
return parser
|
||||
|
||||
@classmethod
|
||||
def args_to_request(cls, args):
|
||||
return request_from_args(
|
||||
args, ['size', 'file', 'quality', 'filter'])
|
||||
|
||||
def process(self, file, size=None, filter=None, quality=None):
|
||||
self.common_setup()
|
||||
if file == 'medium':
|
||||
self.generate_medium_if_applicable(size=size, filter=filter,
|
||||
quality=quality)
|
||||
elif file == 'thumb':
|
||||
self.generate_thumb(size=size, filter=filter, quality=quality)
|
||||
|
||||
|
||||
class ImageProcessingManager(ProcessingManager):
|
||||
def __init__(self):
|
||||
super(self.__class__, self).__init__()
|
||||
self.add_processor(InitialProcessor)
|
||||
self.add_processor(Resizer)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
@@ -15,21 +15,16 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from mediagoblin.media_types import MediaManagerBase
|
||||
from mediagoblin.media_types.pdf.processing import process_pdf, \
|
||||
from mediagoblin.media_types.pdf.processing import PdfProcessingManager, \
|
||||
sniff_handler
|
||||
from mediagoblin.tools import pluginapi
|
||||
|
||||
|
||||
ACCEPTED_EXTENSIONS = ['pdf']
|
||||
MEDIA_TYPE = 'mediagoblin.media_types.pdf'
|
||||
|
||||
|
||||
def setup_plugin():
|
||||
config = pluginapi.get_config(MEDIA_TYPE)
|
||||
|
||||
|
||||
class PDFMediaManager(MediaManagerBase):
|
||||
human_readable = "PDF"
|
||||
processor = staticmethod(process_pdf)
|
||||
display_template = "mediagoblin/media_displays/pdf.html"
|
||||
default_thumb = "images/media_thumbs/pdf.jpg"
|
||||
|
||||
@@ -40,8 +35,8 @@ def get_media_type_and_manager(ext):
|
||||
|
||||
|
||||
hooks = {
|
||||
'setup': setup_plugin,
|
||||
'get_media_type_and_manager': get_media_type_and_manager,
|
||||
'sniff_handler': sniff_handler,
|
||||
('media_manager', MEDIA_TYPE): lambda: PDFMediaManager,
|
||||
('reprocess_manager', MEDIA_TYPE): lambda: PdfProcessingManager,
|
||||
}
|
||||
|
||||
5
mediagoblin/media_types/pdf/config_spec.ini
Normal file
5
mediagoblin/media_types/pdf/config_spec.ini
Normal file
@@ -0,0 +1,5 @@
|
||||
[plugin_spec]
|
||||
pdf_js = boolean(default=True)
|
||||
|
||||
|
||||
|
||||
@@ -13,14 +13,18 @@
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
import argparse
|
||||
import os
|
||||
import logging
|
||||
import dateutil.parser
|
||||
from subprocess import PIPE, Popen
|
||||
|
||||
from mediagoblin import mg_globals as mgg
|
||||
from mediagoblin.processing import (create_pub_filepath,
|
||||
FilenameBuilder, BadMediaFail)
|
||||
from mediagoblin.processing import (
|
||||
FilenameBuilder, BadMediaFail,
|
||||
MediaProcessor, ProcessingManager,
|
||||
request_from_args, get_process_filename,
|
||||
store_public, copy_original)
|
||||
from mediagoblin.tools.translate import fake_ugettext_passthrough as _
|
||||
|
||||
_log = logging.getLogger(__name__)
|
||||
@@ -230,51 +234,207 @@ def pdf_info(original):
|
||||
|
||||
return ret_dict
|
||||
|
||||
def process_pdf(proc_state):
|
||||
"""Code to process a pdf file. Will be run by celery.
|
||||
|
||||
A Workbench() represents a local tempory dir. It is automatically
|
||||
cleaned up when this function exits.
|
||||
class CommonPdfProcessor(MediaProcessor):
|
||||
"""
|
||||
entry = proc_state.entry
|
||||
workbench = proc_state.workbench
|
||||
Provides a base for various pdf processing steps
|
||||
"""
|
||||
acceptable_files = ['original', 'pdf']
|
||||
|
||||
queued_filename = proc_state.get_queued_filename()
|
||||
name_builder = FilenameBuilder(queued_filename)
|
||||
def common_setup(self):
|
||||
"""
|
||||
Set up common pdf processing steps
|
||||
"""
|
||||
# Pull down and set up the processing file
|
||||
self.process_filename = get_process_filename(
|
||||
self.entry, self.workbench, self.acceptable_files)
|
||||
self.name_builder = FilenameBuilder(self.process_filename)
|
||||
|
||||
# Copy our queued local workbench to its final destination
|
||||
original_dest = name_builder.fill('{basename}{ext}')
|
||||
proc_state.copy_original(original_dest)
|
||||
self._set_pdf_filename()
|
||||
|
||||
def _set_pdf_filename(self):
|
||||
if self.name_builder.ext == '.pdf':
|
||||
self.pdf_filename = self.process_filename
|
||||
elif self.entry.media_files.get('pdf'):
|
||||
self.pdf_filename = self.workbench.localized_file(
|
||||
mgg.public_store, self.entry.media_files['pdf'])
|
||||
else:
|
||||
self.pdf_filename = self._generate_pdf()
|
||||
|
||||
def copy_original(self):
|
||||
copy_original(
|
||||
self.entry, self.process_filename,
|
||||
self.name_builder.fill('{basename}{ext}'))
|
||||
|
||||
def generate_thumb(self, thumb_size=None):
|
||||
if not thumb_size:
|
||||
thumb_size = (mgg.global_config['media:thumb']['max_width'],
|
||||
mgg.global_config['media:thumb']['max_height'])
|
||||
|
||||
# Note: pdftocairo adds '.png', so don't include an ext
|
||||
thumb_filename = os.path.join(self.workbench.dir,
|
||||
self.name_builder.fill(
|
||||
'{basename}.thumbnail'))
|
||||
|
||||
executable = where('pdftocairo')
|
||||
args = [executable, '-scale-to', str(min(thumb_size)),
|
||||
'-singlefile', '-png', self.pdf_filename, thumb_filename]
|
||||
|
||||
_log.debug('calling {0}'.format(repr(' '.join(args))))
|
||||
Popen(executable=executable, args=args).wait()
|
||||
|
||||
# since pdftocairo added '.png', we need to include it with the
|
||||
# filename
|
||||
store_public(self.entry, 'thumb', thumb_filename + '.png',
|
||||
self.name_builder.fill('{basename}.thumbnail.png'))
|
||||
|
||||
def _generate_pdf(self):
|
||||
"""
|
||||
Store the pdf. If the file is not a pdf, make it a pdf
|
||||
"""
|
||||
tmp_pdf = self.process_filename
|
||||
|
||||
# Create a pdf if this is a different doc, store pdf for viewer
|
||||
ext = queued_filename.rsplit('.', 1)[-1].lower()
|
||||
if ext == 'pdf':
|
||||
pdf_filename = queued_filename
|
||||
else:
|
||||
pdf_filename = queued_filename.rsplit('.', 1)[0] + '.pdf'
|
||||
unoconv = where('unoconv')
|
||||
Popen(executable=unoconv,
|
||||
args=[unoconv, '-v', '-f', 'pdf', queued_filename]).wait()
|
||||
if not os.path.exists(pdf_filename):
|
||||
args=[unoconv, '-v', '-f', 'pdf', self.process_filename]).wait()
|
||||
|
||||
if not os.path.exists(tmp_pdf):
|
||||
_log.debug('unoconv failed to convert file to pdf')
|
||||
raise BadMediaFail()
|
||||
proc_state.store_public(keyname=u'pdf', local_file=pdf_filename)
|
||||
|
||||
pdf_info_dict = pdf_info(pdf_filename)
|
||||
store_public(self.entry, 'pdf', tmp_pdf,
|
||||
self.name_builder.fill('{basename}.pdf'))
|
||||
|
||||
for name, width, height in [
|
||||
(u'thumb', mgg.global_config['media:thumb']['max_width'],
|
||||
mgg.global_config['media:thumb']['max_height']),
|
||||
(u'medium', mgg.global_config['media:medium']['max_width'],
|
||||
mgg.global_config['media:medium']['max_height']),
|
||||
]:
|
||||
filename = name_builder.fill('{basename}.%s.png' % name)
|
||||
path = workbench.joinpath(filename)
|
||||
create_pdf_thumb(pdf_filename, path, width, height)
|
||||
assert(os.path.exists(path))
|
||||
proc_state.store_public(keyname=name, local_file=path)
|
||||
return self.workbench.localized_file(
|
||||
mgg.public_store, self.entry.media_files['pdf'])
|
||||
|
||||
proc_state.delete_queue_file()
|
||||
def extract_pdf_info(self):
|
||||
pdf_info_dict = pdf_info(self.pdf_filename)
|
||||
self.entry.media_data_init(**pdf_info_dict)
|
||||
|
||||
entry.media_data_init(**pdf_info_dict)
|
||||
entry.save()
|
||||
def generate_medium(self, size=None):
|
||||
if not size:
|
||||
size = (mgg.global_config['media:medium']['max_width'],
|
||||
mgg.global_config['media:medium']['max_height'])
|
||||
|
||||
# Note: pdftocairo adds '.png', so don't include an ext
|
||||
filename = os.path.join(self.workbench.dir,
|
||||
self.name_builder.fill('{basename}.medium'))
|
||||
|
||||
executable = where('pdftocairo')
|
||||
args = [executable, '-scale-to', str(min(size)),
|
||||
'-singlefile', '-png', self.pdf_filename, filename]
|
||||
|
||||
_log.debug('calling {0}'.format(repr(' '.join(args))))
|
||||
Popen(executable=executable, args=args).wait()
|
||||
|
||||
# since pdftocairo added '.png', we need to include it with the
|
||||
# filename
|
||||
store_public(self.entry, 'medium', filename + '.png',
|
||||
self.name_builder.fill('{basename}.medium.png'))
|
||||
|
||||
|
||||
class InitialProcessor(CommonPdfProcessor):
|
||||
"""
|
||||
Initial processing step for new pdfs
|
||||
"""
|
||||
name = "initial"
|
||||
description = "Initial processing"
|
||||
|
||||
@classmethod
|
||||
def media_is_eligible(cls, entry=None, state=None):
|
||||
"""
|
||||
Determine if this media type is eligible for processing
|
||||
"""
|
||||
if not state:
|
||||
state = entry.state
|
||||
return state in (
|
||||
"unprocessed", "failed")
|
||||
|
||||
@classmethod
|
||||
def generate_parser(cls):
|
||||
parser = argparse.ArgumentParser(
|
||||
description=cls.description,
|
||||
prog=cls.name)
|
||||
|
||||
parser.add_argument(
|
||||
'--size',
|
||||
nargs=2,
|
||||
metavar=('max_width', 'max_height'),
|
||||
type=int)
|
||||
|
||||
parser.add_argument(
|
||||
'--thumb-size',
|
||||
nargs=2,
|
||||
metavar=('max_width', 'max_height'),
|
||||
type=int)
|
||||
|
||||
return parser
|
||||
|
||||
@classmethod
|
||||
def args_to_request(cls, args):
|
||||
return request_from_args(
|
||||
args, ['size', 'thumb_size'])
|
||||
|
||||
def process(self, size=None, thumb_size=None):
|
||||
self.common_setup()
|
||||
self.extract_pdf_info()
|
||||
self.copy_original()
|
||||
self.generate_medium(size=size)
|
||||
self.generate_thumb(thumb_size=thumb_size)
|
||||
self.delete_queue_file()
|
||||
|
||||
|
||||
class Resizer(CommonPdfProcessor):
|
||||
"""
|
||||
Resizing process steps for processed pdfs
|
||||
"""
|
||||
name = 'resize'
|
||||
description = 'Resize thumbnail and medium'
|
||||
thumb_size = 'size'
|
||||
|
||||
@classmethod
|
||||
def media_is_eligible(cls, entry=None, state=None):
|
||||
"""
|
||||
Determine if this media type is eligible for processing
|
||||
"""
|
||||
if not state:
|
||||
state = entry.state
|
||||
return state in 'processed'
|
||||
|
||||
@classmethod
|
||||
def generate_parser(cls):
|
||||
parser = argparse.ArgumentParser(
|
||||
description=cls.description,
|
||||
prog=cls.name)
|
||||
|
||||
parser.add_argument(
|
||||
'--size',
|
||||
nargs=2,
|
||||
metavar=('max_width', 'max_height'),
|
||||
type=int)
|
||||
|
||||
parser.add_argument(
|
||||
'file',
|
||||
choices=['medium', 'thumb'])
|
||||
|
||||
return parser
|
||||
|
||||
@classmethod
|
||||
def args_to_request(cls, args):
|
||||
return request_from_args(
|
||||
args, ['size', 'file'])
|
||||
|
||||
def process(self, file, size=None):
|
||||
self.common_setup()
|
||||
if file == 'medium':
|
||||
self.generate_medium(size=size)
|
||||
elif file == 'thumb':
|
||||
self.generate_thumb(thumb_size=size)
|
||||
|
||||
|
||||
class PdfProcessingManager(ProcessingManager):
|
||||
def __init__(self):
|
||||
super(self.__class__, self).__init__()
|
||||
self.add_processor(InitialProcessor)
|
||||
self.add_processor(Resizer)
|
||||
|
||||
@@ -15,21 +15,16 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from mediagoblin.media_types import MediaManagerBase
|
||||
from mediagoblin.media_types.stl.processing import process_stl, \
|
||||
from mediagoblin.media_types.stl.processing import StlProcessingManager, \
|
||||
sniff_handler
|
||||
from mediagoblin.tools import pluginapi
|
||||
|
||||
|
||||
MEDIA_TYPE = 'mediagoblin.media_types.stl'
|
||||
ACCEPTED_EXTENSIONS = ["obj", "stl"]
|
||||
|
||||
|
||||
def setup_plugin():
|
||||
config = pluginapi.get_config(MEDIA_TYPE)
|
||||
|
||||
|
||||
class STLMediaManager(MediaManagerBase):
|
||||
human_readable = "stereo lithographics"
|
||||
processor = staticmethod(process_stl)
|
||||
display_template = "mediagoblin/media_displays/stl.html"
|
||||
default_thumb = "images/media_thumbs/video.jpg"
|
||||
|
||||
@@ -39,8 +34,8 @@ def get_media_type_and_manager(ext):
|
||||
return MEDIA_TYPE, STLMediaManager
|
||||
|
||||
hooks = {
|
||||
'setup': setup_plugin,
|
||||
'get_media_type_and_manager': get_media_type_and_manager,
|
||||
'sniff_handler': sniff_handler,
|
||||
('media_manager', MEDIA_TYPE): lambda: STLMediaManager,
|
||||
('reprocess_manager', MEDIA_TYPE): lambda: StlProcessingManager,
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
@@ -21,8 +22,11 @@ import subprocess
|
||||
import pkg_resources
|
||||
|
||||
from mediagoblin import mg_globals as mgg
|
||||
from mediagoblin.processing import create_pub_filepath, \
|
||||
FilenameBuilder
|
||||
from mediagoblin.processing import (
|
||||
FilenameBuilder, MediaProcessor,
|
||||
ProcessingManager, request_from_args,
|
||||
get_process_filename, store_public,
|
||||
copy_original)
|
||||
|
||||
from mediagoblin.media_types.stl import model_loader
|
||||
|
||||
@@ -75,49 +79,61 @@ def blender_render(config):
|
||||
env=env)
|
||||
|
||||
|
||||
def process_stl(proc_state):
|
||||
"""Code to process an stl or obj model. Will be run by celery.
|
||||
|
||||
A Workbench() represents a local tempory dir. It is automatically
|
||||
cleaned up when this function exits.
|
||||
class CommonStlProcessor(MediaProcessor):
|
||||
"""
|
||||
entry = proc_state.entry
|
||||
workbench = proc_state.workbench
|
||||
Provides a common base for various stl processing steps
|
||||
"""
|
||||
acceptable_files = ['original']
|
||||
|
||||
queued_filepath = entry.queued_media_file
|
||||
queued_filename = workbench.localized_file(
|
||||
mgg.queue_store, queued_filepath, 'source')
|
||||
name_builder = FilenameBuilder(queued_filename)
|
||||
def common_setup(self):
|
||||
# Pull down and set up the processing file
|
||||
self.process_filename = get_process_filename(
|
||||
self.entry, self.workbench, self.acceptable_files)
|
||||
self.name_builder = FilenameBuilder(self.process_filename)
|
||||
|
||||
ext = queued_filename.lower().strip()[-4:]
|
||||
if ext.startswith("."):
|
||||
ext = ext[1:]
|
||||
else:
|
||||
ext = None
|
||||
self._set_ext()
|
||||
self._set_model()
|
||||
self._set_greatest()
|
||||
|
||||
# Attempt to parse the model file and divine some useful
|
||||
# information about it.
|
||||
with open(queued_filename, 'rb') as model_file:
|
||||
model = model_loader.auto_detect(model_file, ext)
|
||||
def _set_ext(self):
|
||||
ext = self.name_builder.ext[1:]
|
||||
|
||||
# generate preview images
|
||||
greatest = [model.width, model.height, model.depth]
|
||||
greatest.sort()
|
||||
greatest = greatest[-1]
|
||||
if not ext:
|
||||
ext = None
|
||||
|
||||
def snap(name, camera, width=640, height=640, project="ORTHO"):
|
||||
filename = name_builder.fill(name)
|
||||
workbench_path = workbench.joinpath(filename)
|
||||
self.ext = ext
|
||||
|
||||
def _set_model(self):
|
||||
"""
|
||||
Attempt to parse the model file and divine some useful
|
||||
information about it.
|
||||
"""
|
||||
with open(self.process_filename, 'rb') as model_file:
|
||||
self.model = model_loader.auto_detect(model_file, self.ext)
|
||||
|
||||
def _set_greatest(self):
|
||||
greatest = [self.model.width, self.model.height, self.model.depth]
|
||||
greatest.sort()
|
||||
self.greatest = greatest[-1]
|
||||
|
||||
def copy_original(self):
|
||||
copy_original(
|
||||
self.entry, self.process_filename,
|
||||
self.name_builder.fill('{basename}{ext}'))
|
||||
|
||||
def _snap(self, keyname, name, camera, size, project="ORTHO"):
|
||||
filename = self.name_builder.fill(name)
|
||||
workbench_path = self.workbench.joinpath(filename)
|
||||
shot = {
|
||||
"model_path": queued_filename,
|
||||
"model_ext": ext,
|
||||
"model_path": self.process_filename,
|
||||
"model_ext": self.ext,
|
||||
"camera_coord": camera,
|
||||
"camera_focus": model.average,
|
||||
"camera_clip": greatest*10,
|
||||
"greatest": greatest,
|
||||
"camera_focus": self.model.average,
|
||||
"camera_clip": self.greatest*10,
|
||||
"greatest": self.greatest,
|
||||
"projection": project,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"width": size[0],
|
||||
"height": size[1],
|
||||
"out_file": workbench_path,
|
||||
}
|
||||
blender_render(shot)
|
||||
@@ -126,70 +142,191 @@ def process_stl(proc_state):
|
||||
assert os.path.exists(workbench_path)
|
||||
|
||||
# copy it up!
|
||||
with open(workbench_path, 'rb') as rendered_file:
|
||||
public_path = create_pub_filepath(entry, filename)
|
||||
store_public(self.entry, keyname, workbench_path, filename)
|
||||
|
||||
with mgg.public_store.get_file(public_path, "wb") as public_file:
|
||||
public_file.write(rendered_file.read())
|
||||
def generate_thumb(self, thumb_size=None):
|
||||
if not thumb_size:
|
||||
thumb_size = (mgg.global_config['media:thumb']['max_width'],
|
||||
mgg.global_config['media:thumb']['max_height'])
|
||||
|
||||
return public_path
|
||||
self._snap(
|
||||
"thumb",
|
||||
"{basename}.thumb.jpg",
|
||||
[0, self.greatest*-1.5, self.greatest],
|
||||
thumb_size,
|
||||
project="PERSP")
|
||||
|
||||
thumb_path = snap(
|
||||
"{basename}.thumb.jpg",
|
||||
[0, greatest*-1.5, greatest],
|
||||
mgg.global_config['media:thumb']['max_width'],
|
||||
mgg.global_config['media:thumb']['max_height'],
|
||||
project="PERSP")
|
||||
def generate_perspective(self, size=None):
|
||||
if not size:
|
||||
size = (mgg.global_config['media:medium']['max_width'],
|
||||
mgg.global_config['media:medium']['max_height'])
|
||||
|
||||
perspective_path = snap(
|
||||
"{basename}.perspective.jpg",
|
||||
[0, greatest*-1.5, greatest], project="PERSP")
|
||||
self._snap(
|
||||
"perspective",
|
||||
"{basename}.perspective.jpg",
|
||||
[0, self.greatest*-1.5, self.greatest],
|
||||
size,
|
||||
project="PERSP")
|
||||
|
||||
topview_path = snap(
|
||||
"{basename}.top.jpg",
|
||||
[model.average[0], model.average[1], greatest*2])
|
||||
def generate_topview(self, size=None):
|
||||
if not size:
|
||||
size = (mgg.global_config['media:medium']['max_width'],
|
||||
mgg.global_config['media:medium']['max_height'])
|
||||
|
||||
frontview_path = snap(
|
||||
"{basename}.front.jpg",
|
||||
[model.average[0], greatest*-2, model.average[2]])
|
||||
self._snap(
|
||||
"top",
|
||||
"{basename}.top.jpg",
|
||||
[self.model.average[0], self.model.average[1],
|
||||
self.greatest*2],
|
||||
size)
|
||||
|
||||
sideview_path = snap(
|
||||
"{basename}.side.jpg",
|
||||
[greatest*-2, model.average[1], model.average[2]])
|
||||
def generate_frontview(self, size=None):
|
||||
if not size:
|
||||
size = (mgg.global_config['media:medium']['max_width'],
|
||||
mgg.global_config['media:medium']['max_height'])
|
||||
|
||||
## Save the public file stuffs
|
||||
model_filepath = create_pub_filepath(
|
||||
entry, name_builder.fill('{basename}{ext}'))
|
||||
self._snap(
|
||||
"front",
|
||||
"{basename}.front.jpg",
|
||||
[self.model.average[0], self.greatest*-2,
|
||||
self.model.average[2]],
|
||||
size)
|
||||
|
||||
with mgg.public_store.get_file(model_filepath, 'wb') as model_file:
|
||||
with open(queued_filename, 'rb') as queued_file:
|
||||
model_file.write(queued_file.read())
|
||||
def generate_sideview(self, size=None):
|
||||
if not size:
|
||||
size = (mgg.global_config['media:medium']['max_width'],
|
||||
mgg.global_config['media:medium']['max_height'])
|
||||
|
||||
# Remove queued media file from storage and database.
|
||||
# queued_filepath is in the task_id directory which should
|
||||
# be removed too, but fail if the directory is not empty to be on
|
||||
# the super-safe side.
|
||||
mgg.queue_store.delete_file(queued_filepath) # rm file
|
||||
mgg.queue_store.delete_dir(queued_filepath[:-1]) # rm dir
|
||||
entry.queued_media_file = []
|
||||
self._snap(
|
||||
"side",
|
||||
"{basename}.side.jpg",
|
||||
[self.greatest*-2, self.model.average[1],
|
||||
self.model.average[2]],
|
||||
size)
|
||||
|
||||
# Insert media file information into database
|
||||
media_files_dict = entry.setdefault('media_files', {})
|
||||
media_files_dict[u'original'] = model_filepath
|
||||
media_files_dict[u'thumb'] = thumb_path
|
||||
media_files_dict[u'perspective'] = perspective_path
|
||||
media_files_dict[u'top'] = topview_path
|
||||
media_files_dict[u'side'] = sideview_path
|
||||
media_files_dict[u'front'] = frontview_path
|
||||
def store_dimensions(self):
|
||||
"""
|
||||
Put model dimensions into the database
|
||||
"""
|
||||
dimensions = {
|
||||
"center_x": self.model.average[0],
|
||||
"center_y": self.model.average[1],
|
||||
"center_z": self.model.average[2],
|
||||
"width": self.model.width,
|
||||
"height": self.model.height,
|
||||
"depth": self.model.depth,
|
||||
"file_type": self.ext,
|
||||
}
|
||||
self.entry.media_data_init(**dimensions)
|
||||
|
||||
# Put model dimensions into the database
|
||||
dimensions = {
|
||||
"center_x" : model.average[0],
|
||||
"center_y" : model.average[1],
|
||||
"center_z" : model.average[2],
|
||||
"width" : model.width,
|
||||
"height" : model.height,
|
||||
"depth" : model.depth,
|
||||
"file_type" : ext,
|
||||
}
|
||||
entry.media_data_init(**dimensions)
|
||||
|
||||
class InitialProcessor(CommonStlProcessor):
|
||||
"""
|
||||
Initial processing step for new stls
|
||||
"""
|
||||
name = "initial"
|
||||
description = "Initial processing"
|
||||
|
||||
@classmethod
|
||||
def media_is_eligible(cls, entry=None, state=None):
|
||||
"""
|
||||
Determine if this media type is eligible for processing
|
||||
"""
|
||||
if not state:
|
||||
state = entry.state
|
||||
return state in (
|
||||
"unprocessed", "failed")
|
||||
|
||||
@classmethod
|
||||
def generate_parser(cls):
|
||||
parser = argparse.ArgumentParser(
|
||||
description=cls.description,
|
||||
prog=cls.name)
|
||||
|
||||
parser.add_argument(
|
||||
'--size',
|
||||
nargs=2,
|
||||
metavar=('max_width', 'max_height'),
|
||||
type=int)
|
||||
|
||||
parser.add_argument(
|
||||
'--thumb_size',
|
||||
nargs=2,
|
||||
metavar=('max_width', 'max_height'),
|
||||
type=int)
|
||||
|
||||
return parser
|
||||
|
||||
@classmethod
|
||||
def args_to_request(cls, args):
|
||||
return request_from_args(
|
||||
args, ['size', 'thumb_size'])
|
||||
|
||||
def process(self, size=None, thumb_size=None):
|
||||
self.common_setup()
|
||||
self.generate_thumb(thumb_size=thumb_size)
|
||||
self.generate_perspective(size=size)
|
||||
self.generate_topview(size=size)
|
||||
self.generate_frontview(size=size)
|
||||
self.generate_sideview(size=size)
|
||||
self.store_dimensions()
|
||||
self.copy_original()
|
||||
self.delete_queue_file()
|
||||
|
||||
|
||||
class Resizer(CommonStlProcessor):
|
||||
"""
|
||||
Resizing process steps for processed stls
|
||||
"""
|
||||
name = 'resize'
|
||||
description = 'Resize thumbnail and mediums'
|
||||
thumb_size = 'size'
|
||||
|
||||
@classmethod
|
||||
def media_is_eligible(cls, entry=None, state=None):
|
||||
"""
|
||||
Determine if this media type is eligible for processing
|
||||
"""
|
||||
if not state:
|
||||
state = entry.state
|
||||
return state in 'processed'
|
||||
|
||||
@classmethod
|
||||
def generate_parser(cls):
|
||||
parser = argparse.ArgumentParser(
|
||||
description=cls.description,
|
||||
prog=cls.name)
|
||||
|
||||
parser.add_argument(
|
||||
'--size',
|
||||
nargs=2,
|
||||
metavar=('max_width', 'max_height'),
|
||||
type=int)
|
||||
|
||||
parser.add_argument(
|
||||
'file',
|
||||
choices=['medium', 'thumb'])
|
||||
|
||||
return parser
|
||||
|
||||
@classmethod
|
||||
def args_to_request(cls, args):
|
||||
return request_from_args(
|
||||
args, ['size', 'file'])
|
||||
|
||||
def process(self, file, size=None):
|
||||
self.common_setup()
|
||||
if file == 'medium':
|
||||
self.generate_perspective(size=size)
|
||||
self.generate_topview(size=size)
|
||||
self.generate_frontview(size=size)
|
||||
self.generate_sideview(size=size)
|
||||
elif file == 'thumb':
|
||||
self.generate_thumb(thumb_size=size)
|
||||
|
||||
|
||||
class StlProcessingManager(ProcessingManager):
|
||||
def __init__(self):
|
||||
super(self.__class__, self).__init__()
|
||||
self.add_processor(InitialProcessor)
|
||||
self.add_processor(Resizer)
|
||||
|
||||
@@ -15,27 +15,22 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from mediagoblin.media_types import MediaManagerBase
|
||||
from mediagoblin.media_types.video.processing import process_video, \
|
||||
from mediagoblin.media_types.video.processing import VideoProcessingManager, \
|
||||
sniff_handler
|
||||
from mediagoblin.tools import pluginapi
|
||||
|
||||
|
||||
MEDIA_TYPE = 'mediagoblin.media_types.video'
|
||||
ACCEPTED_EXTENSIONS = [
|
||||
"mp4", "mov", "webm", "avi", "3gp", "3gpp", "mkv", "ogv", "m4v"]
|
||||
|
||||
|
||||
def setup_plugin():
|
||||
config = pluginapi.get_config(MEDIA_TYPE)
|
||||
|
||||
|
||||
class VideoMediaManager(MediaManagerBase):
|
||||
human_readable = "Video"
|
||||
processor = staticmethod(process_video)
|
||||
display_template = "mediagoblin/media_displays/video.html"
|
||||
default_thumb = "images/media_thumbs/video.jpg"
|
||||
|
||||
# Used by the media_entry.get_display_media method
|
||||
media_fetch_order = [u'webm_640', u'original']
|
||||
media_fetch_order = [u'webm_video', u'original']
|
||||
default_webm_type = 'video/webm; codecs="vp8, vorbis"'
|
||||
|
||||
|
||||
@@ -44,8 +39,8 @@ def get_media_type_and_manager(ext):
|
||||
return MEDIA_TYPE, VideoMediaManager
|
||||
|
||||
hooks = {
|
||||
'setup': setup_plugin,
|
||||
'get_media_type_and_manager': get_media_type_and_manager,
|
||||
'sniff_handler': sniff_handler,
|
||||
('media_manager', MEDIA_TYPE): lambda: VideoMediaManager,
|
||||
('reprocess_manager', MEDIA_TYPE): lambda: VideoProcessingManager,
|
||||
}
|
||||
|
||||
22
mediagoblin/media_types/video/config_spec.ini
Normal file
22
mediagoblin/media_types/video/config_spec.ini
Normal file
@@ -0,0 +1,22 @@
|
||||
[plugin_spec]
|
||||
# Should we keep the original file?
|
||||
keep_original = boolean(default=False)
|
||||
|
||||
# 0 means autodetect, autodetect means number_of_CPUs - 1
|
||||
vp8_threads = integer(default=0)
|
||||
# Range: 0..10
|
||||
vp8_quality = integer(default=8)
|
||||
# Range: -0.1..1
|
||||
vorbis_quality = float(default=0.3)
|
||||
|
||||
# Autoplay the video when page is loaded?
|
||||
auto_play = boolean(default=False)
|
||||
|
||||
[[skip_transcode]]
|
||||
mime_types = string_list(default=list("video/webm"))
|
||||
container_formats = string_list(default=list("Matroska"))
|
||||
video_codecs = string_list(default=list("VP8 video"))
|
||||
audio_codecs = string_list(default=list("Vorbis"))
|
||||
dimensions_match = boolean(default=True)
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ from sqlalchemy import MetaData, Column, Unicode
|
||||
|
||||
MIGRATIONS = {}
|
||||
|
||||
|
||||
@RegisterMigration(1, MIGRATIONS)
|
||||
def add_orig_metadata_column(db_conn):
|
||||
metadata = MetaData(bind=db_conn.bind)
|
||||
@@ -30,3 +31,19 @@ def add_orig_metadata_column(db_conn):
|
||||
default=None, nullable=True)
|
||||
col.create(vid_data)
|
||||
db_conn.commit()
|
||||
|
||||
|
||||
@RegisterMigration(2, MIGRATIONS)
|
||||
def webm_640_to_webm_video(db):
|
||||
metadata = MetaData(bind=db.bind)
|
||||
|
||||
file_keynames = inspect_table(metadata, 'core__file_keynames')
|
||||
|
||||
for row in db.execute(file_keynames.select()):
|
||||
if row.name == 'webm_640':
|
||||
db.execute(
|
||||
file_keynames.update(). \
|
||||
where(file_keynames.c.id==row.id).\
|
||||
values(name='webm_video'))
|
||||
|
||||
db.commit()
|
||||
|
||||
@@ -36,12 +36,12 @@ class VideoData(Base):
|
||||
- orig_metadata: A loose json structure containing metadata gstreamer
|
||||
pulled from the original video.
|
||||
This field is NOT GUARANTEED to exist!
|
||||
|
||||
|
||||
Likely metadata extracted:
|
||||
"videoheight", "videolength", "videowidth",
|
||||
"audiorate", "audiolength", "audiochannels", "audiowidth",
|
||||
"mimetype", "tags"
|
||||
|
||||
|
||||
TODO: document the above better.
|
||||
"""
|
||||
__tablename__ = "video__mediadata"
|
||||
@@ -68,7 +68,7 @@ class VideoData(Base):
|
||||
"""
|
||||
orig_metadata = self.orig_metadata or {}
|
||||
|
||||
if "webm_640" not in self.get_media_entry.media_files \
|
||||
if "webm_video" not in self.get_media_entry.media_files \
|
||||
and "mimetype" in orig_metadata \
|
||||
and "tags" in orig_metadata \
|
||||
and "audio-codec" in orig_metadata["tags"] \
|
||||
|
||||
@@ -14,13 +14,18 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import argparse
|
||||
import os.path
|
||||
import logging
|
||||
import datetime
|
||||
|
||||
from mediagoblin import mg_globals as mgg
|
||||
from mediagoblin.processing import \
|
||||
create_pub_filepath, FilenameBuilder, BaseProcessingFail, ProgressCallback
|
||||
from mediagoblin.processing import (
|
||||
FilenameBuilder, BaseProcessingFail,
|
||||
ProgressCallback, MediaProcessor,
|
||||
ProcessingManager, request_from_args,
|
||||
get_process_filename, store_public,
|
||||
copy_original)
|
||||
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
|
||||
|
||||
from . import transcoders
|
||||
@@ -48,115 +53,15 @@ def sniff_handler(media_file, **kw):
|
||||
|
||||
if not data:
|
||||
_log.error('Could not discover {0}'.format(
|
||||
kw.get('media')))
|
||||
kw.get('media')))
|
||||
return None
|
||||
|
||||
if data['is_video'] == True:
|
||||
if data['is_video'] is True:
|
||||
return MEDIA_TYPE
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def process_video(proc_state):
|
||||
"""
|
||||
Process a video entry, transcode the queued media files (originals) and
|
||||
create a thumbnail for the entry.
|
||||
|
||||
A Workbench() represents a local tempory dir. It is automatically
|
||||
cleaned up when this function exits.
|
||||
"""
|
||||
entry = proc_state.entry
|
||||
workbench = proc_state.workbench
|
||||
video_config = mgg.global_config['media_type:mediagoblin.media_types.video']
|
||||
|
||||
queued_filepath = entry.queued_media_file
|
||||
queued_filename = proc_state.get_queued_filename()
|
||||
name_builder = FilenameBuilder(queued_filename)
|
||||
|
||||
medium_basename = name_builder.fill('{basename}-640p.webm')
|
||||
medium_filepath = create_pub_filepath(entry, medium_basename)
|
||||
|
||||
thumbnail_basename = name_builder.fill('{basename}.thumbnail.jpg')
|
||||
thumbnail_filepath = create_pub_filepath(entry, thumbnail_basename)
|
||||
|
||||
# Create a temporary file for the video destination (cleaned up with workbench)
|
||||
tmp_dst = os.path.join(workbench.dir, medium_basename)
|
||||
# Transcode queued file to a VP8/vorbis file that fits in a 640x640 square
|
||||
progress_callback = ProgressCallback(entry)
|
||||
|
||||
dimensions = (
|
||||
mgg.global_config['media:medium']['max_width'],
|
||||
mgg.global_config['media:medium']['max_height'])
|
||||
|
||||
# Extract metadata and keep a record of it
|
||||
metadata = transcoders.VideoTranscoder().discover(queued_filename)
|
||||
store_metadata(entry, metadata)
|
||||
|
||||
# Figure out whether or not we need to transcode this video or
|
||||
# if we can skip it
|
||||
if skip_transcode(metadata):
|
||||
_log.debug('Skipping transcoding')
|
||||
|
||||
dst_dimensions = metadata['videowidth'], metadata['videoheight']
|
||||
|
||||
# Push original file to public storage
|
||||
_log.debug('Saving original...')
|
||||
proc_state.copy_original(queued_filepath[-1])
|
||||
|
||||
did_transcode = False
|
||||
else:
|
||||
transcoder = transcoders.VideoTranscoder()
|
||||
|
||||
transcoder.transcode(queued_filename, tmp_dst,
|
||||
vp8_quality=video_config['vp8_quality'],
|
||||
vp8_threads=video_config['vp8_threads'],
|
||||
vorbis_quality=video_config['vorbis_quality'],
|
||||
progress_callback=progress_callback,
|
||||
dimensions=dimensions)
|
||||
|
||||
dst_dimensions = transcoder.dst_data.videowidth,\
|
||||
transcoder.dst_data.videoheight
|
||||
|
||||
# Push transcoded video to public storage
|
||||
_log.debug('Saving medium...')
|
||||
mgg.public_store.copy_local_to_storage(tmp_dst, medium_filepath)
|
||||
_log.debug('Saved medium')
|
||||
|
||||
entry.media_files['webm_640'] = medium_filepath
|
||||
|
||||
did_transcode = True
|
||||
|
||||
# Save the width and height of the transcoded video
|
||||
entry.media_data_init(
|
||||
width=dst_dimensions[0],
|
||||
height=dst_dimensions[1])
|
||||
|
||||
# Temporary file for the video thumbnail (cleaned up with workbench)
|
||||
tmp_thumb = os.path.join(workbench.dir, thumbnail_basename)
|
||||
|
||||
# Create a thumbnail.jpg that fits in a 180x180 square
|
||||
transcoders.VideoThumbnailerMarkII(
|
||||
queued_filename,
|
||||
tmp_thumb,
|
||||
180)
|
||||
|
||||
# Push the thumbnail to public storage
|
||||
_log.debug('Saving thumbnail...')
|
||||
mgg.public_store.copy_local_to_storage(tmp_thumb, thumbnail_filepath)
|
||||
entry.media_files['thumb'] = thumbnail_filepath
|
||||
|
||||
# save the original... but only if we did a transcoding
|
||||
# (if we skipped transcoding and just kept the original anyway as the main
|
||||
# media, then why would we save the original twice?)
|
||||
if video_config['keep_original'] and did_transcode:
|
||||
# Push original file to public storage
|
||||
_log.debug('Saving original...')
|
||||
proc_state.copy_original(queued_filepath[-1])
|
||||
|
||||
# Remove queued media file from storage and database
|
||||
proc_state.delete_queue_file()
|
||||
|
||||
|
||||
def store_metadata(media_entry, metadata):
|
||||
"""
|
||||
Store metadata from this video for this media entry.
|
||||
@@ -165,9 +70,9 @@ def store_metadata(media_entry, metadata):
|
||||
stored_metadata = dict(
|
||||
[(key, metadata[key])
|
||||
for key in [
|
||||
"videoheight", "videolength", "videowidth",
|
||||
"audiorate", "audiolength", "audiochannels", "audiowidth",
|
||||
"mimetype"]
|
||||
"videoheight", "videolength", "videowidth",
|
||||
"audiorate", "audiolength", "audiochannels", "audiowidth",
|
||||
"mimetype"]
|
||||
if key in metadata])
|
||||
|
||||
# We have to convert videorate into a sequence because it's a
|
||||
@@ -186,10 +91,10 @@ def store_metadata(media_entry, metadata):
|
||||
tags = dict(
|
||||
[(key, tags_metadata[key])
|
||||
for key in [
|
||||
"application-name", "artist", "audio-codec", "bitrate",
|
||||
"container-format", "copyright", "encoder",
|
||||
"encoder-version", "license", "nominal-bitrate", "title",
|
||||
"video-codec"]
|
||||
"application-name", "artist", "audio-codec", "bitrate",
|
||||
"container-format", "copyright", "encoder",
|
||||
"encoder-version", "license", "nominal-bitrate", "title",
|
||||
"video-codec"]
|
||||
if key in tags_metadata])
|
||||
if 'date' in tags_metadata:
|
||||
date = tags_metadata['date']
|
||||
@@ -211,3 +116,297 @@ def store_metadata(media_entry, metadata):
|
||||
if len(stored_metadata):
|
||||
media_entry.media_data_init(
|
||||
orig_metadata=stored_metadata)
|
||||
|
||||
|
||||
class CommonVideoProcessor(MediaProcessor):
|
||||
"""
|
||||
Provides a base for various video processing steps
|
||||
"""
|
||||
acceptable_files = ['original', 'best_quality', 'webm_video']
|
||||
|
||||
def common_setup(self):
|
||||
self.video_config = mgg \
|
||||
.global_config['plugins'][MEDIA_TYPE]
|
||||
|
||||
# Pull down and set up the processing file
|
||||
self.process_filename = get_process_filename(
|
||||
self.entry, self.workbench, self.acceptable_files)
|
||||
self.name_builder = FilenameBuilder(self.process_filename)
|
||||
|
||||
self.transcoder = transcoders.VideoTranscoder()
|
||||
self.did_transcode = False
|
||||
|
||||
def copy_original(self):
|
||||
# If we didn't transcode, then we need to keep the original
|
||||
if not self.did_transcode or \
|
||||
(self.video_config['keep_original'] and self.did_transcode):
|
||||
copy_original(
|
||||
self.entry, self.process_filename,
|
||||
self.name_builder.fill('{basename}{ext}'))
|
||||
|
||||
def _keep_best(self):
|
||||
"""
|
||||
If there is no original, keep the best file that we have
|
||||
"""
|
||||
if not self.entry.media_files.get('best_quality'):
|
||||
# Save the best quality file if no original?
|
||||
if not self.entry.media_files.get('original') and \
|
||||
self.entry.media_files.get('webm_video'):
|
||||
self.entry.media_files['best_quality'] = self.entry \
|
||||
.media_files['webm_video']
|
||||
|
||||
|
||||
def transcode(self, medium_size=None, vp8_quality=None, vp8_threads=None,
|
||||
vorbis_quality=None):
|
||||
progress_callback = ProgressCallback(self.entry)
|
||||
tmp_dst = os.path.join(self.workbench.dir,
|
||||
self.name_builder.fill('{basename}.medium.webm'))
|
||||
|
||||
if not medium_size:
|
||||
medium_size = (
|
||||
mgg.global_config['media:medium']['max_width'],
|
||||
mgg.global_config['media:medium']['max_height'])
|
||||
if not vp8_quality:
|
||||
vp8_quality = self.video_config['vp8_quality']
|
||||
if not vp8_threads:
|
||||
vp8_threads = self.video_config['vp8_threads']
|
||||
if not vorbis_quality:
|
||||
vorbis_quality = self.video_config['vorbis_quality']
|
||||
|
||||
# Extract metadata and keep a record of it
|
||||
metadata = self.transcoder.discover(self.process_filename)
|
||||
store_metadata(self.entry, metadata)
|
||||
|
||||
# Figure out whether or not we need to transcode this video or
|
||||
# if we can skip it
|
||||
if skip_transcode(metadata, medium_size):
|
||||
_log.debug('Skipping transcoding')
|
||||
|
||||
dst_dimensions = metadata['videowidth'], metadata['videoheight']
|
||||
|
||||
# If there is an original and transcoded, delete the transcoded
|
||||
# since it must be of lower quality then the original
|
||||
if self.entry.media_files.get('original') and \
|
||||
self.entry.media_files.get('webm_video'):
|
||||
self.entry.media_files['webm_video'].delete()
|
||||
|
||||
else:
|
||||
self.transcoder.transcode(self.process_filename, tmp_dst,
|
||||
vp8_quality=vp8_quality,
|
||||
vp8_threads=vp8_threads,
|
||||
vorbis_quality=vorbis_quality,
|
||||
progress_callback=progress_callback,
|
||||
dimensions=tuple(medium_size))
|
||||
|
||||
dst_dimensions = self.transcoder.dst_data.videowidth,\
|
||||
self.transcoder.dst_data.videoheight
|
||||
|
||||
self._keep_best()
|
||||
|
||||
# Push transcoded video to public storage
|
||||
_log.debug('Saving medium...')
|
||||
store_public(self.entry, 'webm_video', tmp_dst,
|
||||
self.name_builder.fill('{basename}.medium.webm'))
|
||||
_log.debug('Saved medium')
|
||||
|
||||
self.did_transcode = True
|
||||
|
||||
# Save the width and height of the transcoded video
|
||||
self.entry.media_data_init(
|
||||
width=dst_dimensions[0],
|
||||
height=dst_dimensions[1])
|
||||
|
||||
def generate_thumb(self, thumb_size=None):
|
||||
# Temporary file for the video thumbnail (cleaned up with workbench)
|
||||
tmp_thumb = os.path.join(self.workbench.dir,
|
||||
self.name_builder.fill(
|
||||
'{basename}.thumbnail.jpg'))
|
||||
|
||||
if not thumb_size:
|
||||
thumb_size = (mgg.global_config['media:thumb']['max_width'],)
|
||||
|
||||
# We will only use the width so that the correct scale is kept
|
||||
transcoders.VideoThumbnailerMarkII(
|
||||
self.process_filename,
|
||||
tmp_thumb,
|
||||
thumb_size[0])
|
||||
|
||||
# Push the thumbnail to public storage
|
||||
_log.debug('Saving thumbnail...')
|
||||
store_public(self.entry, 'thumb', tmp_thumb,
|
||||
self.name_builder.fill('{basename}.thumbnail.jpg'))
|
||||
|
||||
|
||||
class InitialProcessor(CommonVideoProcessor):
|
||||
"""
|
||||
Initial processing steps for new video
|
||||
"""
|
||||
name = "initial"
|
||||
description = "Initial processing"
|
||||
|
||||
@classmethod
|
||||
def media_is_eligible(cls, entry=None, state=None):
|
||||
if not state:
|
||||
state = entry.state
|
||||
return state in (
|
||||
"unprocessed", "failed")
|
||||
|
||||
@classmethod
|
||||
def generate_parser(cls):
|
||||
parser = argparse.ArgumentParser(
|
||||
description=cls.description,
|
||||
prog=cls.name)
|
||||
|
||||
parser.add_argument(
|
||||
'--medium_size',
|
||||
nargs=2,
|
||||
metavar=('max_width', 'max_height'),
|
||||
type=int)
|
||||
|
||||
parser.add_argument(
|
||||
'--vp8_quality',
|
||||
type=int,
|
||||
help='Range 0..10')
|
||||
|
||||
parser.add_argument(
|
||||
'--vp8_threads',
|
||||
type=int,
|
||||
help='0 means number_of_CPUs - 1')
|
||||
|
||||
parser.add_argument(
|
||||
'--vorbis_quality',
|
||||
type=float,
|
||||
help='Range -0.1..1')
|
||||
|
||||
parser.add_argument(
|
||||
'--thumb_size',
|
||||
nargs=2,
|
||||
metavar=('max_width', 'max_height'),
|
||||
type=int)
|
||||
|
||||
return parser
|
||||
|
||||
@classmethod
|
||||
def args_to_request(cls, args):
|
||||
return request_from_args(
|
||||
args, ['medium_size', 'vp8_quality', 'vp8_threads',
|
||||
'vorbis_quality', 'thumb_size'])
|
||||
|
||||
def process(self, medium_size=None, vp8_threads=None, vp8_quality=None,
|
||||
vorbis_quality=None, thumb_size=None):
|
||||
self.common_setup()
|
||||
|
||||
self.transcode(medium_size=medium_size, vp8_quality=vp8_quality,
|
||||
vp8_threads=vp8_threads, vorbis_quality=vorbis_quality)
|
||||
|
||||
self.copy_original()
|
||||
self.generate_thumb(thumb_size=thumb_size)
|
||||
self.delete_queue_file()
|
||||
|
||||
|
||||
class Resizer(CommonVideoProcessor):
|
||||
"""
|
||||
Video thumbnail resizing process steps for processed media
|
||||
"""
|
||||
name = 'resize'
|
||||
description = 'Resize thumbnail'
|
||||
thumb_size = 'thumb_size'
|
||||
|
||||
@classmethod
|
||||
def media_is_eligible(cls, entry=None, state=None):
|
||||
if not state:
|
||||
state = entry.state
|
||||
return state in 'processed'
|
||||
|
||||
@classmethod
|
||||
def generate_parser(cls):
|
||||
parser = argparse.ArgumentParser(
|
||||
description=cls.description,
|
||||
prog=cls.name)
|
||||
|
||||
parser.add_argument(
|
||||
'--thumb_size',
|
||||
nargs=2,
|
||||
metavar=('max_width', 'max_height'),
|
||||
type=int)
|
||||
|
||||
# Needed for gmg reprocess thumbs to work
|
||||
parser.add_argument(
|
||||
'file',
|
||||
nargs='?',
|
||||
default='thumb',
|
||||
choices=['thumb'])
|
||||
|
||||
return parser
|
||||
|
||||
@classmethod
|
||||
def args_to_request(cls, args):
|
||||
return request_from_args(
|
||||
args, ['thumb_size', 'file'])
|
||||
|
||||
def process(self, thumb_size=None, file=None):
|
||||
self.common_setup()
|
||||
self.generate_thumb(thumb_size=thumb_size)
|
||||
|
||||
|
||||
class Transcoder(CommonVideoProcessor):
|
||||
"""
|
||||
Transcoding processing steps for processed video
|
||||
"""
|
||||
name = 'transcode'
|
||||
description = 'Re-transcode video'
|
||||
|
||||
@classmethod
|
||||
def media_is_eligible(cls, entry=None, state=None):
|
||||
if not state:
|
||||
state = entry.state
|
||||
return state in 'processed'
|
||||
|
||||
@classmethod
|
||||
def generate_parser(cls):
|
||||
parser = argparse.ArgumentParser(
|
||||
description=cls.description,
|
||||
prog=cls.name)
|
||||
|
||||
parser.add_argument(
|
||||
'--medium_size',
|
||||
nargs=2,
|
||||
metavar=('max_width', 'max_height'),
|
||||
type=int)
|
||||
|
||||
parser.add_argument(
|
||||
'--vp8_quality',
|
||||
type=int,
|
||||
help='Range 0..10')
|
||||
|
||||
parser.add_argument(
|
||||
'--vp8_threads',
|
||||
type=int,
|
||||
help='0 means number_of_CPUs - 1')
|
||||
|
||||
parser.add_argument(
|
||||
'--vorbis_quality',
|
||||
type=float,
|
||||
help='Range -0.1..1')
|
||||
|
||||
return parser
|
||||
|
||||
@classmethod
|
||||
def args_to_request(cls, args):
|
||||
return request_from_args(
|
||||
args, ['medium_size', 'vp8_threads', 'vp8_quality',
|
||||
'vorbis_quality'])
|
||||
|
||||
def process(self, medium_size=None, vp8_quality=None, vp8_threads=None,
|
||||
vorbis_quality=None):
|
||||
self.common_setup()
|
||||
self.transcode(medium_size=medium_size, vp8_threads=vp8_threads,
|
||||
vp8_quality=vp8_quality, vorbis_quality=vorbis_quality)
|
||||
|
||||
|
||||
class VideoProcessingManager(ProcessingManager):
|
||||
def __init__(self):
|
||||
super(self.__class__, self).__init__()
|
||||
self.add_processor(InitialProcessor)
|
||||
self.add_processor(Resizer)
|
||||
self.add_processor(Transcoder)
|
||||
|
||||
@@ -21,13 +21,13 @@ from mediagoblin import mg_globals as mgg
|
||||
_log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def skip_transcode(metadata):
|
||||
def skip_transcode(metadata, size):
|
||||
'''
|
||||
Checks video metadata against configuration values for skip_transcode.
|
||||
|
||||
Returns True if the video matches the requirements in the configuration.
|
||||
'''
|
||||
config = mgg.global_config['media_type:mediagoblin.media_types.video']\
|
||||
config = mgg.global_config['plugins']['mediagoblin.media_types.video']\
|
||||
['skip_transcode']
|
||||
|
||||
medium_config = mgg.global_config['media:medium']
|
||||
@@ -51,9 +51,9 @@ def skip_transcode(metadata):
|
||||
return False
|
||||
|
||||
if config['dimensions_match']:
|
||||
if not metadata['videoheight'] <= medium_config['max_height']:
|
||||
if not metadata['videoheight'] <= size[1]:
|
||||
return False
|
||||
if not metadata['videowidth'] <= medium_config['max_width']:
|
||||
if not metadata['videowidth'] <= size[0]:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user