Feature 477 - Support Cloud Files public storage

*   Added configuration options to mediagoblin.ini
*   process_media supports the python-cloudfiles
    almost-file-like objects by wrapping them in a
    contextlib.contextmanager-decorated func.
*   storage now has the CloudFilesStorage
*   New dependency added to setup.py; `python-cloudfiles`
This commit is contained in:
Joar Wandborg
2011-08-04 01:32:34 +02:00
parent 4d74812dfc
commit 851c51a354
4 changed files with 94 additions and 4 deletions

View File

@@ -19,6 +19,7 @@ from mediagoblin.db.util import ObjectId
from celery.task import task
from mediagoblin import mg_globals as mgg
from contextlib import contextmanager
THUMB_SIZE = 180, 180
@@ -31,6 +32,12 @@ def create_pub_filepath(entry, filename):
unicode(entry['_id']),
filename])
@contextmanager
def closing(callback):
try:
yield callback
finally:
pass
@task
def process_media_initial(media_id):
@@ -53,7 +60,7 @@ def process_media_initial(media_id):
thumb_filepath = create_pub_filepath(entry, 'thumbnail.jpg')
thumb_file = mgg.public_store.get_file(thumb_filepath, 'w')
with thumb_file:
with closing(thumb_file):
thumb.save(thumb_file, "JPEG", quality=90)
"""
@@ -73,7 +80,7 @@ def process_media_initial(media_id):
medium_filepath = create_pub_filepath(entry, 'medium.jpg')
medium_file = mgg.public_store.get_file(medium_filepath, 'w')
with medium_file:
with closing(medium_file):
medium.save(medium_file, "JPEG", quality=90)
medium_processed = True
@@ -84,7 +91,7 @@ def process_media_initial(media_id):
with queued_file:
original_filepath = create_pub_filepath(entry, queued_filepath[-1])
with mgg.public_store.get_file(original_filepath, 'wb') as original_file:
with closing(mgg.public_store.get_file(original_filepath, 'wb')) as original_file:
original_file.write(queued_file.read())
mgg.queue_store.delete_file(queued_filepath)

View File

@@ -19,6 +19,7 @@ import re
import shutil
import urlparse
import uuid
import cloudfiles
from werkzeug.utils import secure_filename
@@ -161,6 +162,61 @@ class StorageInterface(object):
dest_file.write(source_file.read())
class CloudFilesStorage(StorageInterface):
def __init__(self, **kwargs):
self.param_container = kwargs.get('cloudfiles_container')
self.param_user = kwargs.get('cloudfiles_user')
self.param_api_key = kwargs.get('cloudfiles_api_key')
self.param_host = kwargs.get('cloudfiles_host')
self.param_use_servicenet = kwargs.get('cloudfiles_use_servicenet')
if not self.param_host:
print('No CloudFiles host URL specified, defaulting to Rackspace US')
self.connection = cloudfiles.get_connection(
username=self.param_user,
api_key=self.param_api_key,
servicenet=True if self.param_use_servicenet == 'true' or \
self.param_use_servicenet == True else False)
if not self.param_container in [self.connection.get_container(self.param_container)]:
self.container = self.connection.create_container(self.param_container)
self.container.make_public(
ttl=60 * 60 * 2)
else:
self.container = self.connection.get_container(self.param_container)
def _resolve_filepath(self, filepath):
return '-'.join(
clean_listy_filepath(filepath))
def file_exists(self, filepath):
try:
object = self.container.get_object(
self._resolve_filepath(filepath))
return True
except cloudfiles.errors.NoSuchObject:
return False
def get_file(self, filepath, mode='r'):
try:
obj = self.container.get_object(
self._resolve_filepath(filepath))
except cloudfiles.errors.NoSuchObject:
obj = self.container.create_object(
self._resolve_filepath(filepath))
return obj
def delete_file(self, filepath):
# TODO: Also delete unused directories if empty (safely, with
# checks to avoid race conditions).
self.container.delete_object(filepath)
def file_url(self, filepath):
return self.get_file(filepath).public_uri()
class BasicFileStorage(StorageInterface):
"""
Basic local filesystem implementation of storage API