Change all unicode() calls with six.text_type().

Fixes #5329.
This commit is contained in:
Berker Peksag 2015-06-25 22:24:03 +03:00
parent a17845d42e
commit 896d00fbf2
6 changed files with 18 additions and 10 deletions

View File

@ -219,10 +219,10 @@ def edit_profile(request, url_user=None):
# Save location # Save location
if form.location.data and user.location is None: if form.location.data and user.location is None:
user.get_location = Location(name=unicode(form.location.data)) user.get_location = Location(name=six.text_type(form.location.data))
elif form.location.data: elif form.location.data:
location = user.get_location location = user.get_location
location.name = unicode(form.location.data) location.name = six.text_type(form.location.data)
location.save() location.save()
user.save() user.save()

View File

@ -178,11 +178,12 @@ u"FAIL: This file is larger than the upload limits for this site."))
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs): def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
# csv.py doesn't do Unicode; encode temporarily as UTF-8: # csv.py doesn't do Unicode; encode temporarily as UTF-8:
# TODO: this probably won't be necessary in Python 3
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data), csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),
dialect=dialect, **kwargs) dialect=dialect, **kwargs)
for row in csv_reader: for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell: # decode UTF-8 back to Unicode, cell by cell:
yield [unicode(cell, 'utf-8') for cell in row] yield [six.text_type(cell, 'utf-8') for cell in row]
def utf_8_encoder(unicode_csv_data): def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data: for line in unicode_csv_data:

View File

@ -38,7 +38,7 @@ def adduser(args):
#TODO: Lets trust admins this do not validate Emails :) #TODO: Lets trust admins this do not validate Emails :)
commands_util.setup_app(args) commands_util.setup_app(args)
args.username = unicode(commands_util.prompt_if_not_set(args.username, "Username:")) args.username = six.text_type(commands_util.prompt_if_not_set(args.username, "Username:"))
args.password = commands_util.prompt_if_not_set(args.password, "Password:",True) args.password = commands_util.prompt_if_not_set(args.password, "Password:",True)
args.email = commands_util.prompt_if_not_set(args.email, "Email:") args.email = commands_util.prompt_if_not_set(args.email, "Email:")

View File

@ -18,6 +18,8 @@ import argparse
import logging import logging
import os import os
import six
from mediagoblin import mg_globals as mgg from mediagoblin import mg_globals as mgg
from mediagoblin.processing import ( from mediagoblin.processing import (
BadMediaFail, FilenameBuilder, BadMediaFail, FilenameBuilder,
@ -39,7 +41,7 @@ def sniff_handler(media_file, filename):
try: try:
data = discover(media_file.name) data = discover(media_file.name)
except Exception as e: except Exception as e:
_log.info(unicode(e)) _log.info(six.text_type(e))
return None return None
if data and data.get_audio_streams() and not data.get_video_streams(): if data and data.get_audio_streams() and not data.get_video_streams():
return MEDIA_TYPE return MEDIA_TYPE

View File

@ -19,6 +19,8 @@ import os.path
import logging import logging
import datetime import datetime
import six
from mediagoblin import mg_globals as mgg from mediagoblin import mg_globals as mgg
from mediagoblin.processing import ( from mediagoblin.processing import (
FilenameBuilder, BaseProcessingFail, FilenameBuilder, BaseProcessingFail,
@ -52,8 +54,8 @@ def sniffer(media_file):
data = transcoders.discover(media_file.name) data = transcoders.discover(media_file.name)
except Exception as e: except Exception as e:
# this is usually GLib.GError, but we don't really care which one # this is usually GLib.GError, but we don't really care which one
_log.warning(u'GStreamer: {0}'.format(unicode(e))) _log.warning(u'GStreamer: {0}'.format(six.text_type(e)))
raise MissingComponents(u'GStreamer: {0}'.format(unicode(e))) raise MissingComponents(u'GStreamer: {0}'.format(six.text_type(e)))
_log.debug('Discovered: {0}'.format(data)) _log.debug('Discovered: {0}'.format(data))
if not data.get_video_streams(): if not data.get_video_streams():
@ -110,7 +112,7 @@ def get_tags(stream_info):
dt.get_microsecond()).isoformat() dt.get_microsecond()).isoformat()
for k, v in tags.items(): for k, v in tags.items():
# types below are accepted by json; others must not present # types below are accepted by json; others must not present
if not isinstance(v, (dict, list, basestring, int, float, bool, if not isinstance(v, (dict, list, six.string_types, int, float, bool,
type(None))): type(None))):
del tags[k] del tags[k]
return dict(tags) return dict(tags)

View File

@ -13,6 +13,9 @@
# #
# You should have received a copy of the GNU Affero General Public License # You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import six
from mediagoblin.db.models import MediaEntry, User from mediagoblin.db.models import MediaEntry, User
from mediagoblin.plugins.archivalook.models import FeaturedMedia from mediagoblin.plugins.archivalook.models import FeaturedMedia
from mediagoblin.tools.translate import lazy_pass_to_ugettext as _ from mediagoblin.tools.translate import lazy_pass_to_ugettext as _
@ -53,7 +56,7 @@ def parse_url(url):
who uploaded the piece of media, slug is who uploaded the piece of media, slug is
the media entry's url slug. the media entry's url slug.
""" """
url = unicode(url) url = six.text_type(url)
u_end, m_start, m_end, end = (url.find('/u/') + 3, u_end, m_start, m_end, end = (url.find('/u/') + 3,
url.find('/m/'), url.find('/m/'),
url.find('/m/') + 3, url.find('/m/') + 3,
@ -84,7 +87,7 @@ def split_featured_media_list(featured_media):
or tertiary) or tertiary)
""" """
featured_media = unicode(featured_media) featured_media = six.text_type(featured_media)
featured_media_list = featured_media.split("\n") featured_media_list = featured_media.split("\n")
display_type = 0 display_type = 0
media_already_featured = [] media_already_featured = []