Now store metadata info from processing into the media type.
This comes in several parts: - Store the metadata from gstreamer during processing - Add a new JSONEncoded field to the VideoData table - And, of course, add a migration for that field! This commit sponsored by Julius Tuomisto. Thank you, Julius!
This commit is contained in:
parent
ddbf6af1e2
commit
29adab4651
@ -14,4 +14,19 @@
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from mediagoblin.db.migration_tools import RegisterMigration, inspect_table
|
||||
|
||||
from sqlalchemy import MetaData, Column, Unicode
|
||||
|
||||
MIGRATIONS = {}
|
||||
|
||||
@RegisterMigration(1, MIGRATIONS)
|
||||
def add_orig_metadata_column(db_conn):
|
||||
metadata = MetaData(bind=db_conn.bind)
|
||||
|
||||
vid_data = inspect_table(metadata, "video__mediadata")
|
||||
|
||||
col = Column('orig_metadata', Unicode,
|
||||
default=None, nullable=True)
|
||||
col.create(vid_data)
|
||||
db_conn.commit()
|
||||
|
@ -20,12 +20,29 @@ from mediagoblin.db.base import Base
|
||||
from sqlalchemy import (
|
||||
Column, Integer, SmallInteger, ForeignKey)
|
||||
from sqlalchemy.orm import relationship, backref
|
||||
from mediagoblin.db.extratypes import JSONEncoded
|
||||
|
||||
|
||||
BACKREF_NAME = "video__media_data"
|
||||
|
||||
|
||||
class VideoData(Base):
|
||||
"""
|
||||
Attributes:
|
||||
- media_data: the originating media entry (of course)
|
||||
- width: width of the transcoded video
|
||||
- height: height of the transcoded video
|
||||
- orig_metadata: A loose json structure containing metadata gstreamer
|
||||
pulled from the original video.
|
||||
This field is NOT GUARANTEED to exist!
|
||||
|
||||
Likely metadata extracted:
|
||||
"videoheight", "videolength", "videowidth",
|
||||
"audiorate", "audiolength", "audiochannels", "audiowidth",
|
||||
"mimetype", "tags"
|
||||
|
||||
TODO: document the above better.
|
||||
"""
|
||||
__tablename__ = "video__mediadata"
|
||||
|
||||
# The primary key *and* reference to the main media_entry
|
||||
@ -38,6 +55,8 @@ class VideoData(Base):
|
||||
width = Column(SmallInteger)
|
||||
height = Column(SmallInteger)
|
||||
|
||||
orig_metadata = Column(JSONEncoded)
|
||||
|
||||
|
||||
DATA_MODEL = VideoData
|
||||
MODELS = [VideoData]
|
||||
|
@ -86,8 +86,12 @@ def process_video(proc_state):
|
||||
mgg.global_config['media:medium']['max_width'],
|
||||
mgg.global_config['media:medium']['max_height'])
|
||||
|
||||
# Extract metadata and keep a record of it
|
||||
metadata = transcoders.VideoTranscoder().discover(queued_filename)
|
||||
store_metadata(entry, metadata)
|
||||
|
||||
# Figure out whether or not we need to transcode this video or
|
||||
# if we can skip it
|
||||
if skip_transcode(metadata):
|
||||
_log.debug('Skipping transcoding')
|
||||
# Just push the submitted file to the tmp_dst
|
||||
@ -152,3 +156,27 @@ def process_video(proc_state):
|
||||
|
||||
# Remove queued media file from storage and database
|
||||
proc_state.delete_queue_file()
|
||||
|
||||
|
||||
def store_metadata(media_entry, metadata):
|
||||
"""
|
||||
Store metadata from this video for this media entry.
|
||||
"""
|
||||
# Let's pull out the easy, not having to be converted ones first
|
||||
stored_metadata = dict(
|
||||
[(key, metadata[key])
|
||||
for key in [
|
||||
"videoheight", "videolength", "videowidth",
|
||||
"audiorate", "audiolength", "audiochannels", "audiowidth",
|
||||
"mimetype", "tags"]
|
||||
if key in metadata])
|
||||
|
||||
# We have to convert videorate into a sequence because it's a
|
||||
# special type normally..
|
||||
|
||||
if "videorate" in metadata:
|
||||
videorate = metadata["videorate"]
|
||||
stored_metadata["videorate"] = [videorate.num, videorate.denom]
|
||||
|
||||
media_entry.media_data_init(
|
||||
orig_metadata=stored_metadata)
|
||||
|
Loading…
x
Reference in New Issue
Block a user