Merge branch 'master' into 419_cherrypick_large_uploads
This commit is contained in:
commit
f415c35b4e
@ -103,6 +103,13 @@ vorbis_quality = float(default=0.3)
|
||||
# Autoplay the video when page is loaded?
|
||||
auto_play = boolean(default=True)
|
||||
|
||||
[[skip_transcode]]
|
||||
mime_types = string_list(default=list("video/webm"))
|
||||
container_formats = string_list(default=list("Matroska"))
|
||||
video_codecs = string_list(default=list("VP8 video"))
|
||||
audio_codecs = string_list(default=list("Vorbis"))
|
||||
dimensions_match = boolean(default=True)
|
||||
|
||||
|
||||
[media_type:mediagoblin.media_types.audio]
|
||||
keep_original = boolean(default=True)
|
||||
|
@ -17,6 +17,9 @@
|
||||
from mediagoblin.tools.common import simple_printer
|
||||
from sqlalchemy import Table
|
||||
|
||||
class TableAlreadyExists(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class MigrationManager(object):
|
||||
"""
|
||||
@ -128,7 +131,10 @@ class MigrationManager(object):
|
||||
# sanity check before we proceed, none of these should be created
|
||||
for model in self.models:
|
||||
# Maybe in the future just print out a "Yikes!" or something?
|
||||
assert not model.__table__.exists(self.session.bind)
|
||||
if model.__table__.exists(self.session.bind):
|
||||
raise TableAlreadyExists(
|
||||
u"Intended to create table '%s' but it already exists" %
|
||||
model.__table__.name)
|
||||
|
||||
self.migration_model.metadata.create_all(
|
||||
self.session.bind,
|
||||
|
@ -126,24 +126,28 @@ class MediaEntryMixin(object):
|
||||
"""
|
||||
return cleaned_markdown_conversion(self.description)
|
||||
|
||||
def get_display_media(self, media_map,
|
||||
fetch_order=common.DISPLAY_IMAGE_FETCHING_ORDER):
|
||||
"""
|
||||
Find the best media for display.
|
||||
def get_display_media(self):
|
||||
"""Find the best media for display.
|
||||
|
||||
Args:
|
||||
- media_map: a dict like
|
||||
{u'image_size': [u'dir1', u'dir2', u'image.jpg']}
|
||||
- fetch_order: the order we should try fetching images in
|
||||
We try checking self.media_manager.fetching_order if it exists to
|
||||
pull down the order.
|
||||
|
||||
Returns:
|
||||
(media_size, media_path)
|
||||
"""
|
||||
media_sizes = media_map.keys()
|
||||
(media_size, media_path)
|
||||
or, if not found, None.
|
||||
|
||||
for media_size in common.DISPLAY_IMAGE_FETCHING_ORDER:
|
||||
"""
|
||||
fetch_order = self.media_manager.get("media_fetch_order")
|
||||
|
||||
# No fetching order found? well, give up!
|
||||
if not fetch_order:
|
||||
return None
|
||||
|
||||
media_sizes = self.media_files.keys()
|
||||
|
||||
for media_size in fetch_order:
|
||||
if media_size in media_sizes:
|
||||
return media_map[media_size]
|
||||
return media_size, self.media_files[media_size]
|
||||
|
||||
def main_mediafile(self):
|
||||
pass
|
||||
|
@ -14,7 +14,6 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import pdb
|
||||
import logging
|
||||
import Image
|
||||
|
||||
@ -233,5 +232,3 @@ if __name__ == '__main__':
|
||||
thumbnailer = AudioThumbnailer()
|
||||
|
||||
thumbnailer.spectrogram(*sys.argv[1:], width=640)
|
||||
|
||||
pdb.set_trace()
|
||||
|
@ -25,4 +25,8 @@ MEDIA_MANAGER = {
|
||||
"sniff_handler": sniff_handler,
|
||||
"display_template": "mediagoblin/media_displays/image.html",
|
||||
"default_thumb": "images/media_thumbs/image.png",
|
||||
"accepted_extensions": ["jpg", "jpeg", "png", "gif", "tiff"]}
|
||||
"accepted_extensions": ["jpg", "jpeg", "png", "gif", "tiff"],
|
||||
|
||||
# Used by the media_entry.get_display_media method
|
||||
"media_fetch_order": [u'medium', u'original', u'thumb'],
|
||||
}
|
||||
|
@ -26,4 +26,9 @@ MEDIA_MANAGER = {
|
||||
"display_template": "mediagoblin/media_displays/video.html",
|
||||
"default_thumb": "images/media_thumbs/video.jpg",
|
||||
"accepted_extensions": [
|
||||
"mp4", "mov", "webm", "avi", "3gp", "3gpp", "mkv", "ogv", "m4v"]}
|
||||
"mp4", "mov", "webm", "avi", "3gp", "3gpp", "mkv", "ogv", "m4v"],
|
||||
|
||||
# Used by the media_entry.get_display_media method
|
||||
"media_fetch_order": [u'webm_640', u'original'],
|
||||
"default_webm_type": 'video/webm; codecs="vp8, vorbis"',
|
||||
}
|
||||
|
@ -14,4 +14,19 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from mediagoblin.db.migration_tools import RegisterMigration, inspect_table
|
||||
|
||||
from sqlalchemy import MetaData, Column, Unicode
|
||||
|
||||
MIGRATIONS = {}
|
||||
|
||||
@RegisterMigration(1, MIGRATIONS)
|
||||
def add_orig_metadata_column(db_conn):
|
||||
metadata = MetaData(bind=db_conn.bind)
|
||||
|
||||
vid_data = inspect_table(metadata, "video__mediadata")
|
||||
|
||||
col = Column('orig_metadata', Unicode,
|
||||
default=None, nullable=True)
|
||||
col.create(vid_data)
|
||||
db_conn.commit()
|
||||
|
@ -20,12 +20,30 @@ from mediagoblin.db.base import Base
|
||||
from sqlalchemy import (
|
||||
Column, Integer, SmallInteger, ForeignKey)
|
||||
from sqlalchemy.orm import relationship, backref
|
||||
from mediagoblin.db.extratypes import JSONEncoded
|
||||
from mediagoblin.media_types import video
|
||||
|
||||
|
||||
BACKREF_NAME = "video__media_data"
|
||||
|
||||
|
||||
class VideoData(Base):
|
||||
"""
|
||||
Attributes:
|
||||
- media_data: the originating media entry (of course)
|
||||
- width: width of the transcoded video
|
||||
- height: height of the transcoded video
|
||||
- orig_metadata: A loose json structure containing metadata gstreamer
|
||||
pulled from the original video.
|
||||
This field is NOT GUARANTEED to exist!
|
||||
|
||||
Likely metadata extracted:
|
||||
"videoheight", "videolength", "videowidth",
|
||||
"audiorate", "audiolength", "audiochannels", "audiowidth",
|
||||
"mimetype", "tags"
|
||||
|
||||
TODO: document the above better.
|
||||
"""
|
||||
__tablename__ = "video__mediadata"
|
||||
|
||||
# The primary key *and* reference to the main media_entry
|
||||
@ -38,6 +56,35 @@ class VideoData(Base):
|
||||
width = Column(SmallInteger)
|
||||
height = Column(SmallInteger)
|
||||
|
||||
orig_metadata = Column(JSONEncoded)
|
||||
|
||||
def source_type(self):
|
||||
"""
|
||||
Construct a useful type=... that is to say, used like:
|
||||
<video><source type="{{ entry.media_data.source_type() }}" /></video>
|
||||
|
||||
Try to construct it out of self.orig_metadata... if we fail we
|
||||
just dope'ily fall back on DEFAULT_WEBM_TYPE
|
||||
"""
|
||||
orig_metadata = self.orig_metadata or {}
|
||||
|
||||
if "webm_640" not in self.get_media_entry.media_files \
|
||||
and "mimetype" in orig_metadata \
|
||||
and "tags" in orig_metadata \
|
||||
and "audio-codec" in orig_metadata["tags"] \
|
||||
and "video-codec" in orig_metadata["tags"]:
|
||||
if orig_metadata['mimetype'] == 'application/ogg':
|
||||
# stupid ambiguous .ogg extension
|
||||
mimetype = "video/ogg"
|
||||
else:
|
||||
mimetype = orig_metadata['mimetype']
|
||||
return '%s; codecs="%s, %s"' % (
|
||||
mimetype,
|
||||
orig_metadata["tags"]["video-codec"].lower(),
|
||||
orig_metadata["tags"]["audio-codec"].lower())
|
||||
else:
|
||||
return video.MEDIA_MANAGER["default_webm_type"]
|
||||
|
||||
|
||||
DATA_MODEL = VideoData
|
||||
MODELS = [VideoData]
|
||||
|
@ -23,6 +23,7 @@ from mediagoblin.processing import \
|
||||
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
|
||||
|
||||
from . import transcoders
|
||||
from .util import skip_transcode
|
||||
|
||||
_log = logging.getLogger(__name__)
|
||||
_log.setLevel(logging.DEBUG)
|
||||
@ -79,24 +80,53 @@ def process_video(proc_state):
|
||||
with tmp_dst:
|
||||
# Transcode queued file to a VP8/vorbis file that fits in a 640x640 square
|
||||
progress_callback = ProgressCallback(entry)
|
||||
transcoder = transcoders.VideoTranscoder()
|
||||
transcoder.transcode(queued_filename, tmp_dst.name,
|
||||
vp8_quality=video_config['vp8_quality'],
|
||||
vp8_threads=video_config['vp8_threads'],
|
||||
vorbis_quality=video_config['vorbis_quality'],
|
||||
progress_callback=progress_callback)
|
||||
|
||||
# Push transcoded video to public storage
|
||||
_log.debug('Saving medium...')
|
||||
mgg.public_store.copy_local_to_storage(tmp_dst.name, medium_filepath)
|
||||
_log.debug('Saved medium')
|
||||
dimensions = (
|
||||
mgg.global_config['media:medium']['max_width'],
|
||||
mgg.global_config['media:medium']['max_height'])
|
||||
|
||||
entry.media_files['webm_640'] = medium_filepath
|
||||
# Extract metadata and keep a record of it
|
||||
metadata = transcoders.VideoTranscoder().discover(queued_filename)
|
||||
store_metadata(entry, metadata)
|
||||
|
||||
# Save the width and height of the transcoded video
|
||||
entry.media_data_init(
|
||||
width=transcoder.dst_data.videowidth,
|
||||
height=transcoder.dst_data.videoheight)
|
||||
# Figure out whether or not we need to transcode this video or
|
||||
# if we can skip it
|
||||
if skip_transcode(metadata):
|
||||
_log.debug('Skipping transcoding')
|
||||
|
||||
dst_dimensions = metadata['videowidth'], metadata['videoheight']
|
||||
|
||||
# Push original file to public storage
|
||||
_log.debug('Saving original...')
|
||||
proc_state.copy_original(queued_filepath[-1])
|
||||
|
||||
did_transcode = False
|
||||
else:
|
||||
transcoder = transcoders.VideoTranscoder()
|
||||
|
||||
transcoder.transcode(queued_filename, tmp_dst.name,
|
||||
vp8_quality=video_config['vp8_quality'],
|
||||
vp8_threads=video_config['vp8_threads'],
|
||||
vorbis_quality=video_config['vorbis_quality'],
|
||||
progress_callback=progress_callback,
|
||||
dimensions=dimensions)
|
||||
|
||||
dst_dimensions = transcoder.dst_data.videowidth,\
|
||||
transcoder.dst_data.videoheight
|
||||
|
||||
# Push transcoded video to public storage
|
||||
_log.debug('Saving medium...')
|
||||
mgg.public_store.copy_local_to_storage(tmp_dst.name, medium_filepath)
|
||||
_log.debug('Saved medium')
|
||||
|
||||
entry.media_files['webm_640'] = medium_filepath
|
||||
|
||||
did_transcode = True
|
||||
|
||||
# Save the width and height of the transcoded video
|
||||
entry.media_data_init(
|
||||
width=dst_dimensions[0],
|
||||
height=dst_dimensions[1])
|
||||
|
||||
# Temporary file for the video thumbnail (cleaned up with workbench)
|
||||
tmp_thumb = NamedTemporaryFile(dir=workbench.dir, suffix='.jpg', delete=False)
|
||||
@ -108,15 +138,44 @@ def process_video(proc_state):
|
||||
tmp_thumb.name,
|
||||
180)
|
||||
|
||||
# Push the thumbnail to public storage
|
||||
_log.debug('Saving thumbnail...')
|
||||
mgg.public_store.copy_local_to_storage(tmp_thumb.name, thumbnail_filepath)
|
||||
entry.media_files['thumb'] = thumbnail_filepath
|
||||
# Push the thumbnail to public storage
|
||||
_log.debug('Saving thumbnail...')
|
||||
mgg.public_store.copy_local_to_storage(tmp_thumb.name, thumbnail_filepath)
|
||||
entry.media_files['thumb'] = thumbnail_filepath
|
||||
|
||||
if video_config['keep_original']:
|
||||
# save the original... but only if we did a transcoding
|
||||
# (if we skipped transcoding and just kept the original anyway as the main
|
||||
# media, then why would we save the original twice?)
|
||||
if video_config['keep_original'] and did_transcode:
|
||||
# Push original file to public storage
|
||||
_log.debug('Saving original...')
|
||||
proc_state.copy_original(queued_filepath[-1])
|
||||
|
||||
# Remove queued media file from storage and database
|
||||
proc_state.delete_queue_file()
|
||||
|
||||
|
||||
def store_metadata(media_entry, metadata):
|
||||
"""
|
||||
Store metadata from this video for this media entry.
|
||||
"""
|
||||
# Let's pull out the easy, not having to be converted ones first
|
||||
stored_metadata = dict(
|
||||
[(key, metadata[key])
|
||||
for key in [
|
||||
"videoheight", "videolength", "videowidth",
|
||||
"audiorate", "audiolength", "audiochannels", "audiowidth",
|
||||
"mimetype", "tags"]
|
||||
if key in metadata])
|
||||
|
||||
# We have to convert videorate into a sequence because it's a
|
||||
# special type normally..
|
||||
|
||||
if "videorate" in metadata:
|
||||
videorate = metadata["videorate"]
|
||||
stored_metadata["videorate"] = [videorate.num, videorate.denom]
|
||||
|
||||
# Only save this field if there's something to save
|
||||
if len(stored_metadata):
|
||||
media_entry.media_data_init(
|
||||
orig_metadata=stored_metadata)
|
||||
|
@ -700,6 +700,7 @@ class VideoTranscoder:
|
||||
self._setup()
|
||||
self._run()
|
||||
|
||||
# XXX: This could be a static method.
|
||||
def discover(self, src):
|
||||
'''
|
||||
Discover properties about a media file
|
||||
@ -820,7 +821,8 @@ class VideoTranscoder:
|
||||
self.audioconvert = gst.element_factory_make('audioconvert', 'audioconvert')
|
||||
self.pipeline.add(self.audioconvert)
|
||||
|
||||
self.audiocapsfilter = gst.element_factory_make('capsfilter', 'audiocapsfilter')
|
||||
self.audiocapsfilter = gst.element_factory_make('capsfilter',
|
||||
'audiocapsfilter')
|
||||
audiocaps = ['audio/x-raw-float']
|
||||
self.audiocapsfilter.set_property(
|
||||
'caps',
|
||||
|
59
mediagoblin/media_types/video/util.py
Normal file
59
mediagoblin/media_types/video/util.py
Normal file
@ -0,0 +1,59 @@
|
||||
# GNU MediaGoblin -- federated, autonomous media hosting
|
||||
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import logging
|
||||
|
||||
from mediagoblin import mg_globals as mgg
|
||||
|
||||
_log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def skip_transcode(metadata):
|
||||
'''
|
||||
Checks video metadata against configuration values for skip_transcode.
|
||||
|
||||
Returns True if the video matches the requirements in the configuration.
|
||||
'''
|
||||
config = mgg.global_config['media_type:mediagoblin.media_types.video']\
|
||||
['skip_transcode']
|
||||
|
||||
medium_config = mgg.global_config['media:medium']
|
||||
|
||||
_log.debug('skip_transcode config: {0}'.format(config))
|
||||
|
||||
if config['mime_types'] and metadata.get('mimetype'):
|
||||
if not metadata['mimetype'] in config['mime_types']:
|
||||
return False
|
||||
|
||||
if config['container_formats'] and metadata['tags'].get('audio-codec'):
|
||||
if not metadata['tags']['container-format'] in config['container_formats']:
|
||||
return False
|
||||
|
||||
if config['video_codecs'] and metadata['tags'].get('audio-codec'):
|
||||
if not metadata['tags']['video-codec'] in config['video_codecs']:
|
||||
return False
|
||||
|
||||
if config['audio_codecs'] and metadata['tags'].get('audio-codec'):
|
||||
if not metadata['tags']['audio-codec'] in config['audio_codecs']:
|
||||
return False
|
||||
|
||||
if config['dimensions_match']:
|
||||
if not metadata['videoheight'] <= medium_config['max_height']:
|
||||
return False
|
||||
if not metadata['videowidth'] <= medium_config['max_width']:
|
||||
return False
|
||||
|
||||
return True
|
@ -23,7 +23,7 @@
|
||||
|
||||
|
||||
{% set model_download = request.app.public_store.file_url(
|
||||
media.get_display_media(media.media_files)) %}
|
||||
media.media_files['original']) %}
|
||||
{% set perspective_view = request.app.public_store.file_url(
|
||||
media.media_files['perspective']) %}
|
||||
{% set top_view = request.app.public_store.file_url(
|
||||
|
@ -22,19 +22,24 @@
|
||||
{{ super() }}
|
||||
<script type="text/javascript" src="{{
|
||||
request.staticdirect('/extlib/video-js/video.min.js') }}"></script>
|
||||
<link href="{{ request.staticdirect('/css/vjs-mg-skin.css')
|
||||
}}" rel="stylesheet">
|
||||
<link href="{{ request.staticdirect('/css/vjs-mg-skin.css') }}"
|
||||
rel="stylesheet">
|
||||
{%- endblock %}
|
||||
|
||||
{% block mediagoblin_media %}
|
||||
{% set display_type, display_path = media.get_display_media() %}
|
||||
|
||||
<video controls
|
||||
{% if global_config['media_type:mediagoblin.media_types.video']['auto_play'] %}autoplay{% endif %}
|
||||
preload="auto" class="video-js vjs-mg-skin"
|
||||
data-setup='{"height": {{ media.media_data.height }},
|
||||
"width": {{ media.media_data.width }} }'>
|
||||
<source src="{{ request.app.public_store.file_url(
|
||||
media.media_files['webm_640']) }}"
|
||||
type="video/webm; codecs="vp8, vorbis"" />
|
||||
data-setup='{"height": {{ media.media_data.height }},
|
||||
"width": {{ media.media_data.width }} }'>
|
||||
<source src="{{ request.app.public_store.file_url(display_path) }}"
|
||||
{% if media.media_data %}
|
||||
type="{{ media.media_data.source_type() }}"
|
||||
{% else %}
|
||||
type="{{ media.media_manager['default_webm_type'] }}"
|
||||
{% endif %} />
|
||||
<div class="no_html5">
|
||||
{%- trans -%}Sorry, this video will not work because
|
||||
your web browser does not support HTML5
|
||||
@ -50,10 +55,20 @@
|
||||
<h3>{% trans %}Download{% endtrans %}</h3>
|
||||
<ul>
|
||||
{% if 'original' in media.media_files %}
|
||||
<li><a href="{{ request.app.public_store.file_url(
|
||||
media.media_files.original) }}">{% trans %}Original file{% endtrans %}</a>
|
||||
<li>
|
||||
<a href="{{ request.app.public_store.file_url(
|
||||
media.media_files.original) }}">
|
||||
{%- trans %}Original file{% endtrans -%}
|
||||
</a>
|
||||
</li>
|
||||
{% endif %}
|
||||
{% if 'webm_640' in media.media_files %}
|
||||
<li>
|
||||
<a href="{{ request.app.public_store.file_url(
|
||||
media.media_files.webm_640) }}">
|
||||
{%- trans %}WebM file (640p; VP8/Vorbis){% endtrans -%}
|
||||
</a>
|
||||
</li>
|
||||
{% endif %}
|
||||
<li><a href="{{ request.app.public_store.file_url(
|
||||
media.media_files.webm_640) }}">{% trans %}WebM file (640p; VP8/Vorbis){% endtrans %}</a>
|
||||
</ul>
|
||||
{% endblock %}
|
||||
|
@ -47,7 +47,7 @@
|
||||
<div class="media_image_container">
|
||||
{% block mediagoblin_media %}
|
||||
{% set display_media = request.app.public_store.file_url(
|
||||
media.get_display_media(media.media_files)) %}
|
||||
media.get_display_media()[1]) %}
|
||||
{# if there's a medium file size, that means the medium size
|
||||
# isn't the original... so link to the original!
|
||||
#}
|
||||
|
@ -16,7 +16,6 @@
|
||||
|
||||
import sys
|
||||
|
||||
DISPLAY_IMAGE_FETCHING_ORDER = [u'medium', u'original', u'thumb']
|
||||
|
||||
global TESTS_ENABLED
|
||||
TESTS_ENABLED = False
|
||||
|
@ -227,7 +227,8 @@ def media_collect(request, media):
|
||||
# Otherwise, use the collection selected from the drop-down
|
||||
else:
|
||||
collection = Collection.query.filter_by(
|
||||
id=request.form.get('collection')).first()
|
||||
id=form.collection.data,
|
||||
creator=request.user.id).first()
|
||||
|
||||
# Make sure the user actually selected a collection
|
||||
if not collection:
|
||||
@ -236,7 +237,7 @@ def media_collect(request, media):
|
||||
_('You have to select or add a collection'))
|
||||
return redirect(request, "mediagoblin.user_pages.media_collect",
|
||||
user=media.get_uploader.username,
|
||||
media=media.id)
|
||||
media_id=media.id)
|
||||
|
||||
|
||||
# Check whether media already exists in collection
|
||||
@ -250,7 +251,6 @@ def media_collect(request, media):
|
||||
collection_item = request.db.CollectionItem()
|
||||
collection_item.collection = collection.id
|
||||
collection_item.media_entry = media.id
|
||||
collection_item.author = request.user.id
|
||||
collection_item.note = request.form['note']
|
||||
collection_item.save()
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user