From e158c4842b123944137b985e5d436ecebd2f51f4 Mon Sep 17 00:00:00 2001 From: James Taylor Date: Fri, 15 Feb 2019 20:27:37 -0800 Subject: [PATCH 01/47] subscriptions: basic database code --- youtube/subscriptions.py | 82 ++++++++++++++++++++++++++++++++++------ 1 file changed, 70 insertions(+), 12 deletions(-) diff --git a/youtube/subscriptions.py b/youtube/subscriptions.py index 47f1ea3..39957bf 100644 --- a/youtube/subscriptions.py +++ b/youtube/subscriptions.py @@ -1,18 +1,76 @@ -import urllib +from youtube import common, settings +import sqlite3 +import os -with open("subscriptions.txt", 'r', encoding='utf-8') as file: - subscriptions = file.read() - -# Line format: "channel_id channel_name" -# Example: -# UCYO_jab_esuFRV4b17AJtAw 3Blue1Brown +# https://stackabuse.com/a-sqlite-tutorial-with-python/ -subscriptions = ((line[0:24], line[25: ]) for line in subscriptions.splitlines()) +database_path = os.path.join(settings.data_dir, "subscriptions.sqlite") -def get_new_videos(): - for channel_id, channel_name in subscriptions: - +def open_database(): + if not os.path.exists(settings.data_dir): + os.makedirs(settings.data_dir) + connection = sqlite3.connect(database_path) + # Create tables if they don't exist + try: + cursor = connection.cursor() + cursor.execute('''CREATE TABLE IF NOT EXISTS subscribed_channels ( + id integer PRIMARY KEY, + channel_id text NOT NULL, + channel_name text NOT NULL, + time_last_checked integer + )''') + cursor.execute('''CREATE TABLE IF NOT EXISTS videos ( + id integer PRIMARY KEY, + uploader_id integer NOT NULL REFERENCES subscribed_channels(id) ON UPDATE CASCADE ON DELETE CASCADE, + video_id text NOT NULL, + title text NOT NULL, + time_published integer NOT NULL, + description text, + )''') + connection.commit() + except: + connection.rollback() + connection.close() + raise + return connection -def get_subscriptions_page(): +def _subscribe(channel_id, channel_name): + connection = open_database() + try: + cursor = connection.cursor() + cursor.execute("INSERT INTO subscribed_channels (channel_id, name) VALUES (?, ?)", (channel_id, channel_name)) + connection.commit() + except: + connection.rollback() + raise + finally: + connection.close() + +def _unsubscribe(channel_id): + connection = open_database() + try: + cursor = connection.cursor() + cursor.execute("DELETE FROM subscribed_channels WHERE channel_id=?", (channel_id, )) + connection.commit() + except: + connection.rollback() + raise + finally: + connection.close() + +def _get_videos(number, offset): + connection = open_database() + try: + cursor = connection.cursor() + cursor.execute('''SELECT video_id, title, time_published, description, channel_id, channel_name + FROM videos + INNER JOIN subscribed_channels on videos.uploader_id = subscribed_channels.id + ORDER BY time_published DESC + LIMIT ? OFFSET ?''', number, offset) + except: + connection.rollback() + raise + finally: + connection.close() From c65df7d27df64049e1597e245758f70e808173e1 Mon Sep 17 00:00:00 2001 From: James Taylor Date: Sat, 16 Feb 2019 14:00:06 -0800 Subject: [PATCH 02/47] subscriptions: Basic new videos checking function for channel --- youtube/subscriptions.py | 103 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 102 insertions(+), 1 deletion(-) diff --git a/youtube/subscriptions.py b/youtube/subscriptions.py index 39957bf..0d31bd4 100644 --- a/youtube/subscriptions.py +++ b/youtube/subscriptions.py @@ -1,6 +1,14 @@ -from youtube import common, settings +from youtube import common, settings, channel import sqlite3 import os +import secrets +import datetime + +# so as to not completely break on people who have updated but don't know of new dependency +try: + import atoma +except ModuleNotFoundError: + print('Error: atoma not installed, subscriptions will not work') # https://stackabuse.com/a-sqlite-tutorial-with-python/ @@ -74,3 +82,96 @@ def _get_videos(number, offset): raise finally: connection.close() + + + +units = { + 'year': 31536000, # 365*24*3600 + 'month': 2592000, # 30*24*3600 + 'week': 604800, # 7*24*3600 + 'day': 86400, # 24*3600 + 'hour': 3600, + 'minute': 60, + 'second': 1, +} +def youtube_timestamp_to_posix(dumb_timestamp): + ''' Given a dumbed down timestamp such as 1 year ago, 3 hours ago, + approximates the unix time (seconds since 1/1/1970) ''' + dumb_timestamp = dumb_timestamp.lower() + now = time.time() + if dumb_timestamp == "just now": + return now + split = dumb_timestamp.split(' ') + number, unit = int(split[0]), split[1] + if number > 1: + unit = unit[:-1] # remove s from end + return now - number*units[unit] + + +weekdays = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun') +months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec') +def _get_upstream_videos(channel_id, channel_name, time_last_checked): + feed_url = "https://www.youtube.com/feeds/videos.xml?channel_id=" + channel_id + headers = {} + + # randomly change time_last_checked up to one day earlier to make tracking harder + time_last_checked = time_last_checked - secrets.randbelow(24*3600) + + # If-Modified-Since header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-Modified-Since + struct_time = time.gmtime(time_last_checked) + weekday = weekdays[struct_time.tm_wday] # dumb requirement + month = months[struct_time.tm_mon - 1] + headers['If-Modified-Since'] = time.strftime(weekday + ', %d ' + month + ' %Y %H:%M:%S GMT', struct_time) + print(headers['If-Modified-Since']) + + + headers['User-Agent'] = 'Python-urllib' # Don't leak python version + headers['Accept-Encoding'] = 'gzip, br' + req = urllib.request.Request(url, headers=headers) + if settings.route_tor: + opener = urllib.request.build_opener(sockshandler.SocksiPyHandler(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9150)) + else: + opener = urllib.request.build_opener() + response = opener.open(req, timeout=15) + + + if response.getcode == '304': + print('No new videos for ' + channel_id) + return [] + + + content = response.read() + print('Retrieved videos for ' + channel_id) + content = common.decode_content(content, response.getheader('Content-Encoding', default='identity')) + + + feed = atoma.parse_atom_bytes(content) + atom_videos = {} + for entry in feed.entries: + video_id = entry.id_[9:] # example of id_: yt:video:q6EoRBvdVPQ + + # standard names used in this program for purposes of html templating + atom_videos[video_id] = { + 'title': entry.title.value, + 'author': entry.authors[0].name, + #'description': '', # Not supported by atoma + #'duration': '', # Youtube's atom feeds don't provide it.. very frustrating + 'published': entry.published.strftime('%m/%d/%Y'), + 'time_published': int(entry.published.timestamp()), + } + + + # final list + videos = [] + + # Now check channel page to retrieve missing information for videos + json_channel_videos = channel.get_grid_items(channel.get_channel_tab(channel_id)[1]['response']) + for json_video in json_channel_videos: + info = renderer_info(json_video) + if info['id'] in atom_videos: + info.update(atom_videos[info['id']]) + else: + info['author'] = channel_name + info['time published'] = youtube_timestamp_to_posix(info['published']) + videos.append(info) + return videos From 4a54c4fe301f37ae63e98defe8240a5d526361c6 Mon Sep 17 00:00:00 2001 From: James Taylor Date: Sat, 16 Feb 2019 16:11:53 -0800 Subject: [PATCH 03/47] subscriptions: store video duration in database --- youtube/subscriptions.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/youtube/subscriptions.py b/youtube/subscriptions.py index 0d31bd4..a0f7e48 100644 --- a/youtube/subscriptions.py +++ b/youtube/subscriptions.py @@ -33,6 +33,7 @@ def open_database(): uploader_id integer NOT NULL REFERENCES subscribed_channels(id) ON UPDATE CASCADE ON DELETE CASCADE, video_id text NOT NULL, title text NOT NULL, + duration text, time_published integer NOT NULL, description text, )''') @@ -72,7 +73,7 @@ def _get_videos(number, offset): connection = open_database() try: cursor = connection.cursor() - cursor.execute('''SELECT video_id, title, time_published, description, channel_id, channel_name + cursor.execute('''SELECT video_id, title, duration, time_published, description, channel_id, channel_name FROM videos INNER JOIN subscribed_channels on videos.uploader_id = subscribed_channels.id ORDER BY time_published DESC From 24642455d0dc5841ddec99f456598c4f763c1e8a Mon Sep 17 00:00:00 2001 From: James Taylor Date: Sat, 16 Feb 2019 16:56:46 -0800 Subject: [PATCH 04/47] subscriptions page --- youtube/subscriptions.py | 37 ++++++++++++++++++++++++++++++---- youtube/youtube.py | 4 +++- yt_subscriptions_template.html | 24 ++++++++++++++++++++++ 3 files changed, 60 insertions(+), 5 deletions(-) create mode 100644 yt_subscriptions_template.html diff --git a/youtube/subscriptions.py b/youtube/subscriptions.py index a0f7e48..82916dd 100644 --- a/youtube/subscriptions.py +++ b/youtube/subscriptions.py @@ -1,4 +1,6 @@ -from youtube import common, settings, channel +from youtube import common, channel +import settings +from string import Template import sqlite3 import os import secrets @@ -10,6 +12,10 @@ try: except ModuleNotFoundError: print('Error: atoma not installed, subscriptions will not work') +with open('yt_subscriptions_template.html', 'r', encoding='utf-8') as f: + subscriptions_template = Template(f.read()) + + # https://stackabuse.com/a-sqlite-tutorial-with-python/ database_path = os.path.join(settings.data_dir, "subscriptions.sqlite") @@ -35,7 +41,7 @@ def open_database(): title text NOT NULL, duration text, time_published integer NOT NULL, - description text, + description text )''') connection.commit() except: @@ -73,11 +79,19 @@ def _get_videos(number, offset): connection = open_database() try: cursor = connection.cursor() - cursor.execute('''SELECT video_id, title, duration, time_published, description, channel_id, channel_name + db_videos = cursor.execute('''SELECT video_id, title, duration, channel_name FROM videos INNER JOIN subscribed_channels on videos.uploader_id = subscribed_channels.id ORDER BY time_published DESC - LIMIT ? OFFSET ?''', number, offset) + LIMIT ? OFFSET ?''', (number, offset)) + + for db_video in db_videos: + yield { + 'id': db_video[0], + 'title': db_video[1], + 'duration': db_video[2], + 'author': db_video[3], + } except: connection.rollback() raise @@ -176,3 +190,18 @@ def _get_upstream_videos(channel_id, channel_name, time_last_checked): info['time published'] = youtube_timestamp_to_posix(info['published']) videos.append(info) return videos + +def get_subscriptions_page(env, start_response): + items_html = '''''' + + start_response('200 OK', [('Content-type','text/html'),]) + return subscriptions_template.substitute( + header = common.get_header(), + items = items_html, + page_buttons = '', + ).encode('utf-8') + diff --git a/youtube/youtube.py b/youtube/youtube.py index b6b12fb..ad73a6e 100644 --- a/youtube/youtube.py +++ b/youtube/youtube.py @@ -1,7 +1,7 @@ import mimetypes import urllib.parse import os -from youtube import local_playlist, watch, search, playlist, channel, comments, common, post_comment, accounts +from youtube import local_playlist, watch, search, playlist, channel, comments, common, post_comment, accounts, subscriptions import settings YOUTUBE_FILES = ( "/shared.css", @@ -24,6 +24,8 @@ get_handlers = { 'post_comment': post_comment.get_post_comment_page, 'delete_comment': post_comment.get_delete_comment_page, 'login': accounts.get_account_login_page, + + 'subscriptions': subscriptions.get_subscriptions_page, } post_handlers = { 'edit_playlist': local_playlist.edit_playlist, diff --git a/yt_subscriptions_template.html b/yt_subscriptions_template.html new file mode 100644 index 0000000..8477d25 --- /dev/null +++ b/yt_subscriptions_template.html @@ -0,0 +1,24 @@ + + + + + Subscriptions + + + + + + +$header +
+$items + +
+ + From 3905e7e64059b45479894ba1fdfb0ef9cef64475 Mon Sep 17 00:00:00 2001 From: James Taylor Date: Sat, 16 Feb 2019 23:41:52 -0800 Subject: [PATCH 05/47] basic subscriptions system --- python/atoma/__init__.py | 12 + python/atoma/atom.py | 284 +++ python/atoma/const.py | 1 + python/atoma/exceptions.py | 14 + python/atoma/json_feed.py | 223 ++ python/atoma/opml.py | 107 + python/atoma/rss.py | 221 ++ python/atoma/simple.py | 224 ++ python/atoma/utils.py | 84 + python/attr/__init__.py | 65 + python/attr/__init__.pyi | 252 ++ python/attr/_compat.py | 163 ++ python/attr/_config.py | 23 + python/attr/_funcs.py | 290 +++ python/attr/_make.py | 2034 +++++++++++++++++ python/attr/converters.py | 78 + python/attr/converters.pyi | 12 + python/attr/exceptions.py | 57 + python/attr/exceptions.pyi | 7 + python/attr/filters.py | 52 + python/attr/filters.pyi | 5 + python/attr/py.typed | 0 python/attr/validators.py | 170 ++ python/attr/validators.pyi | 14 + python/dateutil/__init__.py | 2 + python/dateutil/_common.py | 34 + python/dateutil/_version.py | 10 + python/dateutil/easter.py | 89 + python/dateutil/parser.py | 1374 +++++++++++ python/dateutil/relativedelta.py | 549 +++++ python/dateutil/rrule.py | 1610 +++++++++++++ python/dateutil/tz/__init__.py | 5 + python/dateutil/tz/_common.py | 394 ++++ python/dateutil/tz/tz.py | 1511 ++++++++++++ python/dateutil/tz/win.py | 332 +++ python/dateutil/tzwin.py | 2 + python/dateutil/zoneinfo/__init__.py | 183 ++ .../zoneinfo/dateutil-zoneinfo.tar.gz | Bin 0 -> 138881 bytes python/dateutil/zoneinfo/rebuild.py | 52 + python/defusedxml/ElementTree.py | 112 + python/defusedxml/__init__.py | 45 + python/defusedxml/cElementTree.py | 30 + python/defusedxml/common.py | 120 + python/defusedxml/expatbuilder.py | 110 + python/defusedxml/expatreader.py | 59 + python/defusedxml/lxml.py | 153 ++ python/defusedxml/minidom.py | 42 + python/defusedxml/pulldom.py | 34 + python/defusedxml/sax.py | 49 + python/defusedxml/xmlrpc.py | 157 ++ python/six.py | 891 ++++++++ youtube/channel.py | 4 + youtube/subscriptions.py | 71 +- youtube/youtube.py | 2 + yt_channel_about_template.html | 21 +- yt_channel_items_template.html | 20 +- yt_subscriptions_template.html | 4 + 57 files changed, 12440 insertions(+), 23 deletions(-) create mode 100644 python/atoma/__init__.py create mode 100644 python/atoma/atom.py create mode 100644 python/atoma/const.py create mode 100644 python/atoma/exceptions.py create mode 100644 python/atoma/json_feed.py create mode 100644 python/atoma/opml.py create mode 100644 python/atoma/rss.py create mode 100644 python/atoma/simple.py create mode 100644 python/atoma/utils.py create mode 100644 python/attr/__init__.py create mode 100644 python/attr/__init__.pyi create mode 100644 python/attr/_compat.py create mode 100644 python/attr/_config.py create mode 100644 python/attr/_funcs.py create mode 100644 python/attr/_make.py create mode 100644 python/attr/converters.py create mode 100644 python/attr/converters.pyi create mode 100644 python/attr/exceptions.py create mode 100644 python/attr/exceptions.pyi create mode 100644 python/attr/filters.py create mode 100644 python/attr/filters.pyi create mode 100644 python/attr/py.typed create mode 100644 python/attr/validators.py create mode 100644 python/attr/validators.pyi create mode 100644 python/dateutil/__init__.py create mode 100644 python/dateutil/_common.py create mode 100644 python/dateutil/_version.py create mode 100644 python/dateutil/easter.py create mode 100644 python/dateutil/parser.py create mode 100644 python/dateutil/relativedelta.py create mode 100644 python/dateutil/rrule.py create mode 100644 python/dateutil/tz/__init__.py create mode 100644 python/dateutil/tz/_common.py create mode 100644 python/dateutil/tz/tz.py create mode 100644 python/dateutil/tz/win.py create mode 100644 python/dateutil/tzwin.py create mode 100644 python/dateutil/zoneinfo/__init__.py create mode 100644 python/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz create mode 100644 python/dateutil/zoneinfo/rebuild.py create mode 100644 python/defusedxml/ElementTree.py create mode 100644 python/defusedxml/__init__.py create mode 100644 python/defusedxml/cElementTree.py create mode 100644 python/defusedxml/common.py create mode 100644 python/defusedxml/expatbuilder.py create mode 100644 python/defusedxml/expatreader.py create mode 100644 python/defusedxml/lxml.py create mode 100644 python/defusedxml/minidom.py create mode 100644 python/defusedxml/pulldom.py create mode 100644 python/defusedxml/sax.py create mode 100644 python/defusedxml/xmlrpc.py create mode 100644 python/six.py diff --git a/python/atoma/__init__.py b/python/atoma/__init__.py new file mode 100644 index 0000000..0768081 --- /dev/null +++ b/python/atoma/__init__.py @@ -0,0 +1,12 @@ +from .atom import parse_atom_file, parse_atom_bytes +from .rss import parse_rss_file, parse_rss_bytes +from .json_feed import ( + parse_json_feed, parse_json_feed_file, parse_json_feed_bytes +) +from .opml import parse_opml_file, parse_opml_bytes +from .exceptions import ( + FeedParseError, FeedDocumentError, FeedXMLError, FeedJSONError +) +from .const import VERSION + +__version__ = VERSION diff --git a/python/atoma/atom.py b/python/atoma/atom.py new file mode 100644 index 0000000..d4e676c --- /dev/null +++ b/python/atoma/atom.py @@ -0,0 +1,284 @@ +from datetime import datetime +import enum +from io import BytesIO +from typing import Optional, List +from xml.etree.ElementTree import Element + +import attr + +from .utils import ( + parse_xml, get_child, get_text, get_datetime, FeedParseError, ns +) + + +class AtomTextType(enum.Enum): + text = "text" + html = "html" + xhtml = "xhtml" + + +@attr.s +class AtomTextConstruct: + text_type: str = attr.ib() + lang: Optional[str] = attr.ib() + value: str = attr.ib() + + +@attr.s +class AtomEntry: + title: AtomTextConstruct = attr.ib() + id_: str = attr.ib() + + # Should be mandatory but many feeds use published instead + updated: Optional[datetime] = attr.ib() + + authors: List['AtomPerson'] = attr.ib() + contributors: List['AtomPerson'] = attr.ib() + links: List['AtomLink'] = attr.ib() + categories: List['AtomCategory'] = attr.ib() + published: Optional[datetime] = attr.ib() + rights: Optional[AtomTextConstruct] = attr.ib() + summary: Optional[AtomTextConstruct] = attr.ib() + content: Optional[AtomTextConstruct] = attr.ib() + source: Optional['AtomFeed'] = attr.ib() + + +@attr.s +class AtomFeed: + title: Optional[AtomTextConstruct] = attr.ib() + id_: str = attr.ib() + + # Should be mandatory but many feeds do not include it + updated: Optional[datetime] = attr.ib() + + authors: List['AtomPerson'] = attr.ib() + contributors: List['AtomPerson'] = attr.ib() + links: List['AtomLink'] = attr.ib() + categories: List['AtomCategory'] = attr.ib() + generator: Optional['AtomGenerator'] = attr.ib() + subtitle: Optional[AtomTextConstruct] = attr.ib() + rights: Optional[AtomTextConstruct] = attr.ib() + icon: Optional[str] = attr.ib() + logo: Optional[str] = attr.ib() + + entries: List[AtomEntry] = attr.ib() + + +@attr.s +class AtomPerson: + name: str = attr.ib() + uri: Optional[str] = attr.ib() + email: Optional[str] = attr.ib() + + +@attr.s +class AtomLink: + href: str = attr.ib() + rel: Optional[str] = attr.ib() + type_: Optional[str] = attr.ib() + hreflang: Optional[str] = attr.ib() + title: Optional[str] = attr.ib() + length: Optional[int] = attr.ib() + + +@attr.s +class AtomCategory: + term: str = attr.ib() + scheme: Optional[str] = attr.ib() + label: Optional[str] = attr.ib() + + +@attr.s +class AtomGenerator: + name: str = attr.ib() + uri: Optional[str] = attr.ib() + version: Optional[str] = attr.ib() + + +def _get_generator(element: Element, name, + optional: bool=True) -> Optional[AtomGenerator]: + child = get_child(element, name, optional) + if child is None: + return None + + return AtomGenerator( + child.text.strip(), + child.attrib.get('uri'), + child.attrib.get('version'), + ) + + +def _get_text_construct(element: Element, name, + optional: bool=True) -> Optional[AtomTextConstruct]: + child = get_child(element, name, optional) + if child is None: + return None + + try: + text_type = AtomTextType(child.attrib['type']) + except KeyError: + text_type = AtomTextType.text + + try: + lang = child.lang + except AttributeError: + lang = None + + if child.text is None: + if optional: + return None + + raise FeedParseError( + 'Could not parse atom feed: "{}" text is required but is empty' + .format(name) + ) + + return AtomTextConstruct( + text_type, + lang, + child.text.strip() + ) + + +def _get_person(element: Element) -> Optional[AtomPerson]: + try: + return AtomPerson( + get_text(element, 'feed:name', optional=False), + get_text(element, 'feed:uri'), + get_text(element, 'feed:email') + ) + except FeedParseError: + return None + + +def _get_link(element: Element) -> AtomLink: + length = element.attrib.get('length') + length = int(length) if length else None + return AtomLink( + element.attrib['href'], + element.attrib.get('rel'), + element.attrib.get('type'), + element.attrib.get('hreflang'), + element.attrib.get('title'), + length + ) + + +def _get_category(element: Element) -> AtomCategory: + return AtomCategory( + element.attrib['term'], + element.attrib.get('scheme'), + element.attrib.get('label'), + ) + + +def _get_entry(element: Element, + default_authors: List[AtomPerson]) -> AtomEntry: + root = element + + # Mandatory + title = _get_text_construct(root, 'feed:title') + id_ = get_text(root, 'feed:id') + + # Optional + try: + source = _parse_atom(get_child(root, 'feed:source', optional=False), + parse_entries=False) + except FeedParseError: + source = None + source_authors = [] + else: + source_authors = source.authors + + authors = [_get_person(e) + for e in root.findall('feed:author', ns)] or default_authors + authors = [a for a in authors if a is not None] + authors = authors or default_authors or source_authors + + contributors = [_get_person(e) + for e in root.findall('feed:contributor', ns) if e] + contributors = [c for c in contributors if c is not None] + + links = [_get_link(e) for e in root.findall('feed:link', ns)] + categories = [_get_category(e) for e in root.findall('feed:category', ns)] + + updated = get_datetime(root, 'feed:updated') + published = get_datetime(root, 'feed:published') + rights = _get_text_construct(root, 'feed:rights') + summary = _get_text_construct(root, 'feed:summary') + content = _get_text_construct(root, 'feed:content') + + return AtomEntry( + title, + id_, + updated, + authors, + contributors, + links, + categories, + published, + rights, + summary, + content, + source + ) + + +def _parse_atom(root: Element, parse_entries: bool=True) -> AtomFeed: + # Mandatory + id_ = get_text(root, 'feed:id', optional=False) + + # Optional + title = _get_text_construct(root, 'feed:title') + updated = get_datetime(root, 'feed:updated') + authors = [_get_person(e) + for e in root.findall('feed:author', ns) if e] + authors = [a for a in authors if a is not None] + contributors = [_get_person(e) + for e in root.findall('feed:contributor', ns) if e] + contributors = [c for c in contributors if c is not None] + links = [_get_link(e) + for e in root.findall('feed:link', ns)] + categories = [_get_category(e) + for e in root.findall('feed:category', ns)] + + generator = _get_generator(root, 'feed:generator') + subtitle = _get_text_construct(root, 'feed:subtitle') + rights = _get_text_construct(root, 'feed:rights') + icon = get_text(root, 'feed:icon') + logo = get_text(root, 'feed:logo') + + if parse_entries: + entries = [_get_entry(e, authors) + for e in root.findall('feed:entry', ns)] + else: + entries = [] + + atom_feed = AtomFeed( + title, + id_, + updated, + authors, + contributors, + links, + categories, + generator, + subtitle, + rights, + icon, + logo, + entries + ) + return atom_feed + + +def parse_atom_file(filename: str) -> AtomFeed: + """Parse an Atom feed from a local XML file.""" + root = parse_xml(filename).getroot() + return _parse_atom(root) + + +def parse_atom_bytes(data: bytes) -> AtomFeed: + """Parse an Atom feed from a byte-string containing XML data.""" + root = parse_xml(BytesIO(data)).getroot() + return _parse_atom(root) diff --git a/python/atoma/const.py b/python/atoma/const.py new file mode 100644 index 0000000..d52d0f6 --- /dev/null +++ b/python/atoma/const.py @@ -0,0 +1 @@ +VERSION = '0.0.13' diff --git a/python/atoma/exceptions.py b/python/atoma/exceptions.py new file mode 100644 index 0000000..88170c5 --- /dev/null +++ b/python/atoma/exceptions.py @@ -0,0 +1,14 @@ +class FeedParseError(Exception): + """Document is an invalid feed.""" + + +class FeedDocumentError(Exception): + """Document is not a supported file.""" + + +class FeedXMLError(FeedDocumentError): + """Document is not valid XML.""" + + +class FeedJSONError(FeedDocumentError): + """Document is not valid JSON.""" diff --git a/python/atoma/json_feed.py b/python/atoma/json_feed.py new file mode 100644 index 0000000..410ff4a --- /dev/null +++ b/python/atoma/json_feed.py @@ -0,0 +1,223 @@ +from datetime import datetime, timedelta +import json +from typing import Optional, List + +import attr + +from .exceptions import FeedParseError, FeedJSONError +from .utils import try_parse_date + + +@attr.s +class JSONFeedAuthor: + + name: Optional[str] = attr.ib() + url: Optional[str] = attr.ib() + avatar: Optional[str] = attr.ib() + + +@attr.s +class JSONFeedAttachment: + + url: str = attr.ib() + mime_type: str = attr.ib() + title: Optional[str] = attr.ib() + size_in_bytes: Optional[int] = attr.ib() + duration: Optional[timedelta] = attr.ib() + + +@attr.s +class JSONFeedItem: + + id_: str = attr.ib() + url: Optional[str] = attr.ib() + external_url: Optional[str] = attr.ib() + title: Optional[str] = attr.ib() + content_html: Optional[str] = attr.ib() + content_text: Optional[str] = attr.ib() + summary: Optional[str] = attr.ib() + image: Optional[str] = attr.ib() + banner_image: Optional[str] = attr.ib() + date_published: Optional[datetime] = attr.ib() + date_modified: Optional[datetime] = attr.ib() + author: Optional[JSONFeedAuthor] = attr.ib() + + tags: List[str] = attr.ib() + attachments: List[JSONFeedAttachment] = attr.ib() + + +@attr.s +class JSONFeed: + + version: str = attr.ib() + title: str = attr.ib() + home_page_url: Optional[str] = attr.ib() + feed_url: Optional[str] = attr.ib() + description: Optional[str] = attr.ib() + user_comment: Optional[str] = attr.ib() + next_url: Optional[str] = attr.ib() + icon: Optional[str] = attr.ib() + favicon: Optional[str] = attr.ib() + author: Optional[JSONFeedAuthor] = attr.ib() + expired: bool = attr.ib() + + items: List[JSONFeedItem] = attr.ib() + + +def _get_items(root: dict) -> List[JSONFeedItem]: + rv = [] + items = root.get('items', []) + if not items: + return rv + + for item in items: + rv.append(_get_item(item)) + + return rv + + +def _get_item(item_dict: dict) -> JSONFeedItem: + return JSONFeedItem( + id_=_get_text(item_dict, 'id', optional=False), + url=_get_text(item_dict, 'url'), + external_url=_get_text(item_dict, 'external_url'), + title=_get_text(item_dict, 'title'), + content_html=_get_text(item_dict, 'content_html'), + content_text=_get_text(item_dict, 'content_text'), + summary=_get_text(item_dict, 'summary'), + image=_get_text(item_dict, 'image'), + banner_image=_get_text(item_dict, 'banner_image'), + date_published=_get_datetime(item_dict, 'date_published'), + date_modified=_get_datetime(item_dict, 'date_modified'), + author=_get_author(item_dict), + tags=_get_tags(item_dict, 'tags'), + attachments=_get_attachments(item_dict, 'attachments') + ) + + +def _get_attachments(root, name) -> List[JSONFeedAttachment]: + rv = list() + for attachment_dict in root.get(name, []): + rv.append(JSONFeedAttachment( + _get_text(attachment_dict, 'url', optional=False), + _get_text(attachment_dict, 'mime_type', optional=False), + _get_text(attachment_dict, 'title'), + _get_int(attachment_dict, 'size_in_bytes'), + _get_duration(attachment_dict, 'duration_in_seconds') + )) + return rv + + +def _get_tags(root, name) -> List[str]: + tags = root.get(name, []) + return [tag for tag in tags if isinstance(tag, str)] + + +def _get_datetime(root: dict, name, optional: bool=True) -> Optional[datetime]: + text = _get_text(root, name, optional) + if text is None: + return None + + return try_parse_date(text) + + +def _get_expired(root: dict) -> bool: + if root.get('expired') is True: + return True + + return False + + +def _get_author(root: dict) -> Optional[JSONFeedAuthor]: + author_dict = root.get('author') + if not author_dict: + return None + + rv = JSONFeedAuthor( + name=_get_text(author_dict, 'name'), + url=_get_text(author_dict, 'url'), + avatar=_get_text(author_dict, 'avatar'), + ) + if rv.name is None and rv.url is None and rv.avatar is None: + return None + + return rv + + +def _get_int(root: dict, name: str, optional: bool=True) -> Optional[int]: + rv = root.get(name) + if not optional and rv is None: + raise FeedParseError('Could not parse feed: "{}" int is required but ' + 'is empty'.format(name)) + + if optional and rv is None: + return None + + if not isinstance(rv, int): + raise FeedParseError('Could not parse feed: "{}" is not an int' + .format(name)) + + return rv + + +def _get_duration(root: dict, name: str, + optional: bool=True) -> Optional[timedelta]: + duration = _get_int(root, name, optional) + if duration is None: + return None + + return timedelta(seconds=duration) + + +def _get_text(root: dict, name: str, optional: bool=True) -> Optional[str]: + rv = root.get(name) + if not optional and rv is None: + raise FeedParseError('Could not parse feed: "{}" text is required but ' + 'is empty'.format(name)) + + if optional and rv is None: + return None + + if not isinstance(rv, str): + raise FeedParseError('Could not parse feed: "{}" is not a string' + .format(name)) + + return rv + + +def parse_json_feed(root: dict) -> JSONFeed: + return JSONFeed( + version=_get_text(root, 'version', optional=False), + title=_get_text(root, 'title', optional=False), + home_page_url=_get_text(root, 'home_page_url'), + feed_url=_get_text(root, 'feed_url'), + description=_get_text(root, 'description'), + user_comment=_get_text(root, 'user_comment'), + next_url=_get_text(root, 'next_url'), + icon=_get_text(root, 'icon'), + favicon=_get_text(root, 'favicon'), + author=_get_author(root), + expired=_get_expired(root), + items=_get_items(root) + ) + + +def parse_json_feed_file(filename: str) -> JSONFeed: + """Parse a JSON feed from a local json file.""" + with open(filename) as f: + try: + root = json.load(f) + except json.decoder.JSONDecodeError: + raise FeedJSONError('Not a valid JSON document') + + return parse_json_feed(root) + + +def parse_json_feed_bytes(data: bytes) -> JSONFeed: + """Parse a JSON feed from a byte-string containing JSON data.""" + try: + root = json.loads(data) + except json.decoder.JSONDecodeError: + raise FeedJSONError('Not a valid JSON document') + + return parse_json_feed(root) diff --git a/python/atoma/opml.py b/python/atoma/opml.py new file mode 100644 index 0000000..a73105e --- /dev/null +++ b/python/atoma/opml.py @@ -0,0 +1,107 @@ +from datetime import datetime +from io import BytesIO +from typing import Optional, List +from xml.etree.ElementTree import Element + +import attr + +from .utils import parse_xml, get_text, get_int, get_datetime + + +@attr.s +class OPMLOutline: + text: Optional[str] = attr.ib() + type: Optional[str] = attr.ib() + xml_url: Optional[str] = attr.ib() + description: Optional[str] = attr.ib() + html_url: Optional[str] = attr.ib() + language: Optional[str] = attr.ib() + title: Optional[str] = attr.ib() + version: Optional[str] = attr.ib() + + outlines: List['OPMLOutline'] = attr.ib() + + +@attr.s +class OPML: + title: Optional[str] = attr.ib() + owner_name: Optional[str] = attr.ib() + owner_email: Optional[str] = attr.ib() + date_created: Optional[datetime] = attr.ib() + date_modified: Optional[datetime] = attr.ib() + expansion_state: Optional[str] = attr.ib() + + vertical_scroll_state: Optional[int] = attr.ib() + window_top: Optional[int] = attr.ib() + window_left: Optional[int] = attr.ib() + window_bottom: Optional[int] = attr.ib() + window_right: Optional[int] = attr.ib() + + outlines: List[OPMLOutline] = attr.ib() + + +def _get_outlines(element: Element) -> List[OPMLOutline]: + rv = list() + + for outline in element.findall('outline'): + rv.append(OPMLOutline( + outline.attrib.get('text'), + outline.attrib.get('type'), + outline.attrib.get('xmlUrl'), + outline.attrib.get('description'), + outline.attrib.get('htmlUrl'), + outline.attrib.get('language'), + outline.attrib.get('title'), + outline.attrib.get('version'), + _get_outlines(outline) + )) + + return rv + + +def _parse_opml(root: Element) -> OPML: + head = root.find('head') + body = root.find('body') + + return OPML( + get_text(head, 'title'), + get_text(head, 'ownerName'), + get_text(head, 'ownerEmail'), + get_datetime(head, 'dateCreated'), + get_datetime(head, 'dateModified'), + get_text(head, 'expansionState'), + get_int(head, 'vertScrollState'), + get_int(head, 'windowTop'), + get_int(head, 'windowLeft'), + get_int(head, 'windowBottom'), + get_int(head, 'windowRight'), + outlines=_get_outlines(body) + ) + + +def parse_opml_file(filename: str) -> OPML: + """Parse an OPML document from a local XML file.""" + root = parse_xml(filename).getroot() + return _parse_opml(root) + + +def parse_opml_bytes(data: bytes) -> OPML: + """Parse an OPML document from a byte-string containing XML data.""" + root = parse_xml(BytesIO(data)).getroot() + return _parse_opml(root) + + +def get_feed_list(opml_obj: OPML) -> List[str]: + """Walk an OPML document to extract the list of feed it contains.""" + rv = list() + + def collect(obj): + for outline in obj.outlines: + if outline.type == 'rss' and outline.xml_url: + rv.append(outline.xml_url) + + if outline.outlines: + collect(outline) + + collect(opml_obj) + return rv diff --git a/python/atoma/rss.py b/python/atoma/rss.py new file mode 100644 index 0000000..f447a2f --- /dev/null +++ b/python/atoma/rss.py @@ -0,0 +1,221 @@ +from datetime import datetime +from io import BytesIO +from typing import Optional, List +from xml.etree.ElementTree import Element + +import attr + +from .utils import ( + parse_xml, get_child, get_text, get_int, get_datetime, FeedParseError +) + + +@attr.s +class RSSImage: + url: str = attr.ib() + title: Optional[str] = attr.ib() + link: str = attr.ib() + width: int = attr.ib() + height: int = attr.ib() + description: Optional[str] = attr.ib() + + +@attr.s +class RSSEnclosure: + url: str = attr.ib() + length: Optional[int] = attr.ib() + type: Optional[str] = attr.ib() + + +@attr.s +class RSSSource: + title: str = attr.ib() + url: Optional[str] = attr.ib() + + +@attr.s +class RSSItem: + title: Optional[str] = attr.ib() + link: Optional[str] = attr.ib() + description: Optional[str] = attr.ib() + author: Optional[str] = attr.ib() + categories: List[str] = attr.ib() + comments: Optional[str] = attr.ib() + enclosures: List[RSSEnclosure] = attr.ib() + guid: Optional[str] = attr.ib() + pub_date: Optional[datetime] = attr.ib() + source: Optional[RSSSource] = attr.ib() + + # Extension + content_encoded: Optional[str] = attr.ib() + + +@attr.s +class RSSChannel: + title: Optional[str] = attr.ib() + link: Optional[str] = attr.ib() + description: Optional[str] = attr.ib() + language: Optional[str] = attr.ib() + copyright: Optional[str] = attr.ib() + managing_editor: Optional[str] = attr.ib() + web_master: Optional[str] = attr.ib() + pub_date: Optional[datetime] = attr.ib() + last_build_date: Optional[datetime] = attr.ib() + categories: List[str] = attr.ib() + generator: Optional[str] = attr.ib() + docs: Optional[str] = attr.ib() + ttl: Optional[int] = attr.ib() + image: Optional[RSSImage] = attr.ib() + + items: List[RSSItem] = attr.ib() + + # Extension + content_encoded: Optional[str] = attr.ib() + + +def _get_image(element: Element, name, + optional: bool=True) -> Optional[RSSImage]: + child = get_child(element, name, optional) + if child is None: + return None + + return RSSImage( + get_text(child, 'url', optional=False), + get_text(child, 'title'), + get_text(child, 'link', optional=False), + get_int(child, 'width') or 88, + get_int(child, 'height') or 31, + get_text(child, 'description') + ) + + +def _get_source(element: Element, name, + optional: bool=True) -> Optional[RSSSource]: + child = get_child(element, name, optional) + if child is None: + return None + + return RSSSource( + child.text.strip(), + child.attrib.get('url'), + ) + + +def _get_enclosure(element: Element) -> RSSEnclosure: + length = element.attrib.get('length') + try: + length = int(length) + except (TypeError, ValueError): + length = None + + return RSSEnclosure( + element.attrib['url'], + length, + element.attrib.get('type'), + ) + + +def _get_link(element: Element) -> Optional[str]: + """Attempt to retrieve item link. + + Use the GUID as a fallback if it is a permalink. + """ + link = get_text(element, 'link') + if link is not None: + return link + + guid = get_child(element, 'guid') + if guid is not None and guid.attrib.get('isPermaLink') == 'true': + return get_text(element, 'guid') + + return None + + +def _get_item(element: Element) -> RSSItem: + root = element + + title = get_text(root, 'title') + link = _get_link(root) + description = get_text(root, 'description') + author = get_text(root, 'author') + categories = [e.text for e in root.findall('category')] + comments = get_text(root, 'comments') + enclosure = [_get_enclosure(e) for e in root.findall('enclosure')] + guid = get_text(root, 'guid') + pub_date = get_datetime(root, 'pubDate') + source = _get_source(root, 'source') + + content_encoded = get_text(root, 'content:encoded') + + return RSSItem( + title, + link, + description, + author, + categories, + comments, + enclosure, + guid, + pub_date, + source, + content_encoded + ) + + +def _parse_rss(root: Element) -> RSSChannel: + rss_version = root.get('version') + if rss_version != '2.0': + raise FeedParseError('Cannot process RSS feed version "{}"' + .format(rss_version)) + + root = root.find('channel') + + title = get_text(root, 'title') + link = get_text(root, 'link') + description = get_text(root, 'description') + language = get_text(root, 'language') + copyright = get_text(root, 'copyright') + managing_editor = get_text(root, 'managingEditor') + web_master = get_text(root, 'webMaster') + pub_date = get_datetime(root, 'pubDate') + last_build_date = get_datetime(root, 'lastBuildDate') + categories = [e.text for e in root.findall('category')] + generator = get_text(root, 'generator') + docs = get_text(root, 'docs') + ttl = get_int(root, 'ttl') + + image = _get_image(root, 'image') + items = [_get_item(e) for e in root.findall('item')] + + content_encoded = get_text(root, 'content:encoded') + + return RSSChannel( + title, + link, + description, + language, + copyright, + managing_editor, + web_master, + pub_date, + last_build_date, + categories, + generator, + docs, + ttl, + image, + items, + content_encoded + ) + + +def parse_rss_file(filename: str) -> RSSChannel: + """Parse an RSS feed from a local XML file.""" + root = parse_xml(filename).getroot() + return _parse_rss(root) + + +def parse_rss_bytes(data: bytes) -> RSSChannel: + """Parse an RSS feed from a byte-string containing XML data.""" + root = parse_xml(BytesIO(data)).getroot() + return _parse_rss(root) diff --git a/python/atoma/simple.py b/python/atoma/simple.py new file mode 100644 index 0000000..98bb3e1 --- /dev/null +++ b/python/atoma/simple.py @@ -0,0 +1,224 @@ +"""Simple API that abstracts away the differences between feed types.""" + +from datetime import datetime, timedelta +import html +import os +from typing import Optional, List, Tuple +import urllib.parse + +import attr + +from . import atom, rss, json_feed +from .exceptions import ( + FeedParseError, FeedDocumentError, FeedXMLError, FeedJSONError +) + + +@attr.s +class Attachment: + link: str = attr.ib() + mime_type: Optional[str] = attr.ib() + title: Optional[str] = attr.ib() + size_in_bytes: Optional[int] = attr.ib() + duration: Optional[timedelta] = attr.ib() + + +@attr.s +class Article: + id: str = attr.ib() + title: Optional[str] = attr.ib() + link: Optional[str] = attr.ib() + content: str = attr.ib() + published_at: Optional[datetime] = attr.ib() + updated_at: Optional[datetime] = attr.ib() + attachments: List[Attachment] = attr.ib() + + +@attr.s +class Feed: + title: str = attr.ib() + subtitle: Optional[str] = attr.ib() + link: Optional[str] = attr.ib() + updated_at: Optional[datetime] = attr.ib() + articles: List[Article] = attr.ib() + + +def _adapt_atom_feed(atom_feed: atom.AtomFeed) -> Feed: + articles = list() + for entry in atom_feed.entries: + if entry.content is not None: + content = entry.content.value + elif entry.summary is not None: + content = entry.summary.value + else: + content = '' + published_at, updated_at = _get_article_dates(entry.published, + entry.updated) + # Find article link and attachments + article_link = None + attachments = list() + for candidate_link in entry.links: + if candidate_link.rel in ('alternate', None): + article_link = candidate_link.href + elif candidate_link.rel == 'enclosure': + attachments.append(Attachment( + title=_get_attachment_title(candidate_link.title, + candidate_link.href), + link=candidate_link.href, + mime_type=candidate_link.type_, + size_in_bytes=candidate_link.length, + duration=None + )) + + if entry.title is None: + entry_title = None + elif entry.title.text_type in (atom.AtomTextType.html, + atom.AtomTextType.xhtml): + entry_title = html.unescape(entry.title.value).strip() + else: + entry_title = entry.title.value + + articles.append(Article( + entry.id_, + entry_title, + article_link, + content, + published_at, + updated_at, + attachments + )) + + # Find feed link + link = None + for candidate_link in atom_feed.links: + if candidate_link.rel == 'self': + link = candidate_link.href + break + + return Feed( + atom_feed.title.value if atom_feed.title else atom_feed.id_, + atom_feed.subtitle.value if atom_feed.subtitle else None, + link, + atom_feed.updated, + articles + ) + + +def _adapt_rss_channel(rss_channel: rss.RSSChannel) -> Feed: + articles = list() + for item in rss_channel.items: + attachments = [ + Attachment(link=e.url, mime_type=e.type, size_in_bytes=e.length, + title=_get_attachment_title(None, e.url), duration=None) + for e in item.enclosures + ] + articles.append(Article( + item.guid or item.link, + item.title, + item.link, + item.content_encoded or item.description or '', + item.pub_date, + None, + attachments + )) + + if rss_channel.title is None and rss_channel.link is None: + raise FeedParseError('RSS feed does not have a title nor a link') + + return Feed( + rss_channel.title if rss_channel.title else rss_channel.link, + rss_channel.description, + rss_channel.link, + rss_channel.pub_date, + articles + ) + + +def _adapt_json_feed(json_feed: json_feed.JSONFeed) -> Feed: + articles = list() + for item in json_feed.items: + attachments = [ + Attachment(a.url, a.mime_type, + _get_attachment_title(a.title, a.url), + a.size_in_bytes, a.duration) + for a in item.attachments + ] + articles.append(Article( + item.id_, + item.title, + item.url, + item.content_html or item.content_text or '', + item.date_published, + item.date_modified, + attachments + )) + + return Feed( + json_feed.title, + json_feed.description, + json_feed.feed_url, + None, + articles + ) + + +def _get_article_dates(published_at: Optional[datetime], + updated_at: Optional[datetime] + ) -> Tuple[Optional[datetime], Optional[datetime]]: + if published_at and updated_at: + return published_at, updated_at + + if updated_at: + return updated_at, None + + if published_at: + return published_at, None + + raise FeedParseError('Article does not have proper dates') + + +def _get_attachment_title(attachment_title: Optional[str], link: str) -> str: + if attachment_title: + return attachment_title + + parsed_link = urllib.parse.urlparse(link) + return os.path.basename(parsed_link.path) + + +def _simple_parse(pairs, content) -> Feed: + is_xml = True + is_json = True + for parser, adapter in pairs: + try: + return adapter(parser(content)) + except FeedXMLError: + is_xml = False + except FeedJSONError: + is_json = False + except FeedParseError: + continue + + if not is_xml and not is_json: + raise FeedDocumentError('File is not a supported feed type') + + raise FeedParseError('File is not a valid supported feed') + + +def simple_parse_file(filename: str) -> Feed: + """Parse an Atom, RSS or JSON feed from a local file.""" + pairs = ( + (rss.parse_rss_file, _adapt_rss_channel), + (atom.parse_atom_file, _adapt_atom_feed), + (json_feed.parse_json_feed_file, _adapt_json_feed) + ) + return _simple_parse(pairs, filename) + + +def simple_parse_bytes(data: bytes) -> Feed: + """Parse an Atom, RSS or JSON feed from a byte-string containing data.""" + pairs = ( + (rss.parse_rss_bytes, _adapt_rss_channel), + (atom.parse_atom_bytes, _adapt_atom_feed), + (json_feed.parse_json_feed_bytes, _adapt_json_feed) + ) + return _simple_parse(pairs, data) diff --git a/python/atoma/utils.py b/python/atoma/utils.py new file mode 100644 index 0000000..4dc1ab5 --- /dev/null +++ b/python/atoma/utils.py @@ -0,0 +1,84 @@ +from datetime import datetime, timezone +from xml.etree.ElementTree import Element +from typing import Optional + +import dateutil.parser +from defusedxml.ElementTree import parse as defused_xml_parse, ParseError + +from .exceptions import FeedXMLError, FeedParseError + +ns = { + 'content': 'http://purl.org/rss/1.0/modules/content/', + 'feed': 'http://www.w3.org/2005/Atom' +} + + +def parse_xml(xml_content): + try: + return defused_xml_parse(xml_content) + except ParseError: + raise FeedXMLError('Not a valid XML document') + + +def get_child(element: Element, name, + optional: bool=True) -> Optional[Element]: + child = element.find(name, namespaces=ns) + + if child is None and not optional: + raise FeedParseError( + 'Could not parse feed: "{}" does not have a "{}"' + .format(element.tag, name) + ) + + elif child is None: + return None + + return child + + +def get_text(element: Element, name, optional: bool=True) -> Optional[str]: + child = get_child(element, name, optional) + if child is None: + return None + + if child.text is None: + if optional: + return None + + raise FeedParseError( + 'Could not parse feed: "{}" text is required but is empty' + .format(name) + ) + + return child.text.strip() + + +def get_int(element: Element, name, optional: bool=True) -> Optional[int]: + text = get_text(element, name, optional) + if text is None: + return None + + return int(text) + + +def get_datetime(element: Element, name, + optional: bool=True) -> Optional[datetime]: + text = get_text(element, name, optional) + if text is None: + return None + + return try_parse_date(text) + + +def try_parse_date(date_str: str) -> Optional[datetime]: + try: + date = dateutil.parser.parse(date_str, fuzzy=True) + except (ValueError, OverflowError): + return None + + if date.tzinfo is None: + # TZ naive datetime, make it a TZ aware datetime by assuming it + # contains UTC time + date = date.replace(tzinfo=timezone.utc) + + return date diff --git a/python/attr/__init__.py b/python/attr/__init__.py new file mode 100644 index 0000000..debfd57 --- /dev/null +++ b/python/attr/__init__.py @@ -0,0 +1,65 @@ +from __future__ import absolute_import, division, print_function + +from functools import partial + +from . import converters, exceptions, filters, validators +from ._config import get_run_validators, set_run_validators +from ._funcs import asdict, assoc, astuple, evolve, has +from ._make import ( + NOTHING, + Attribute, + Factory, + attrib, + attrs, + fields, + fields_dict, + make_class, + validate, +) + + +__version__ = "18.2.0" + +__title__ = "attrs" +__description__ = "Classes Without Boilerplate" +__url__ = "https://www.attrs.org/" +__uri__ = __url__ +__doc__ = __description__ + " <" + __uri__ + ">" + +__author__ = "Hynek Schlawack" +__email__ = "hs@ox.cx" + +__license__ = "MIT" +__copyright__ = "Copyright (c) 2015 Hynek Schlawack" + + +s = attributes = attrs +ib = attr = attrib +dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) + +__all__ = [ + "Attribute", + "Factory", + "NOTHING", + "asdict", + "assoc", + "astuple", + "attr", + "attrib", + "attributes", + "attrs", + "converters", + "evolve", + "exceptions", + "fields", + "fields_dict", + "filters", + "get_run_validators", + "has", + "ib", + "make_class", + "s", + "set_run_validators", + "validate", + "validators", +] diff --git a/python/attr/__init__.pyi b/python/attr/__init__.pyi new file mode 100644 index 0000000..492fb85 --- /dev/null +++ b/python/attr/__init__.pyi @@ -0,0 +1,252 @@ +from typing import ( + Any, + Callable, + Dict, + Generic, + List, + Optional, + Sequence, + Mapping, + Tuple, + Type, + TypeVar, + Union, + overload, +) + +# `import X as X` is required to make these public +from . import exceptions as exceptions +from . import filters as filters +from . import converters as converters +from . import validators as validators + +_T = TypeVar("_T") +_C = TypeVar("_C", bound=type) + +_ValidatorType = Callable[[Any, Attribute, _T], Any] +_ConverterType = Callable[[Any], _T] +_FilterType = Callable[[Attribute, Any], bool] +# FIXME: in reality, if multiple validators are passed they must be in a list or tuple, +# but those are invariant and so would prevent subtypes of _ValidatorType from working +# when passed in a list or tuple. +_ValidatorArgType = Union[_ValidatorType[_T], Sequence[_ValidatorType[_T]]] + +# _make -- + +NOTHING: object + +# NOTE: Factory lies about its return type to make this possible: `x: List[int] = Factory(list)` +# Work around mypy issue #4554 in the common case by using an overload. +@overload +def Factory(factory: Callable[[], _T]) -> _T: ... +@overload +def Factory( + factory: Union[Callable[[Any], _T], Callable[[], _T]], + takes_self: bool = ..., +) -> _T: ... + +class Attribute(Generic[_T]): + name: str + default: Optional[_T] + validator: Optional[_ValidatorType[_T]] + repr: bool + cmp: bool + hash: Optional[bool] + init: bool + converter: Optional[_ConverterType[_T]] + metadata: Dict[Any, Any] + type: Optional[Type[_T]] + kw_only: bool + def __lt__(self, x: Attribute) -> bool: ... + def __le__(self, x: Attribute) -> bool: ... + def __gt__(self, x: Attribute) -> bool: ... + def __ge__(self, x: Attribute) -> bool: ... + +# NOTE: We had several choices for the annotation to use for type arg: +# 1) Type[_T] +# - Pros: Handles simple cases correctly +# - Cons: Might produce less informative errors in the case of conflicting TypeVars +# e.g. `attr.ib(default='bad', type=int)` +# 2) Callable[..., _T] +# - Pros: Better error messages than #1 for conflicting TypeVars +# - Cons: Terrible error messages for validator checks. +# e.g. attr.ib(type=int, validator=validate_str) +# -> error: Cannot infer function type argument +# 3) type (and do all of the work in the mypy plugin) +# - Pros: Simple here, and we could customize the plugin with our own errors. +# - Cons: Would need to write mypy plugin code to handle all the cases. +# We chose option #1. + +# `attr` lies about its return type to make the following possible: +# attr() -> Any +# attr(8) -> int +# attr(validator=) -> Whatever the callable expects. +# This makes this type of assignments possible: +# x: int = attr(8) +# +# This form catches explicit None or no default but with no other arguments returns Any. +@overload +def attrib( + default: None = ..., + validator: None = ..., + repr: bool = ..., + cmp: bool = ..., + hash: Optional[bool] = ..., + init: bool = ..., + convert: None = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: None = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the other arguments. +@overload +def attrib( + default: None = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: bool = ..., + cmp: bool = ..., + hash: Optional[bool] = ..., + init: bool = ..., + convert: Optional[_ConverterType[_T]] = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: Optional[Type[_T]] = ..., + converter: Optional[_ConverterType[_T]] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def attrib( + default: _T, + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: bool = ..., + cmp: bool = ..., + hash: Optional[bool] = ..., + init: bool = ..., + convert: Optional[_ConverterType[_T]] = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: Optional[Type[_T]] = ..., + converter: Optional[_ConverterType[_T]] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def attrib( + default: Optional[_T] = ..., + validator: Optional[_ValidatorArgType[_T]] = ..., + repr: bool = ..., + cmp: bool = ..., + hash: Optional[bool] = ..., + init: bool = ..., + convert: Optional[_ConverterType[_T]] = ..., + metadata: Optional[Mapping[Any, Any]] = ..., + type: object = ..., + converter: Optional[_ConverterType[_T]] = ..., + factory: Optional[Callable[[], _T]] = ..., + kw_only: bool = ..., +) -> Any: ... +@overload +def attrs( + maybe_cls: _C, + these: Optional[Dict[str, Any]] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: bool = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., +) -> _C: ... +@overload +def attrs( + maybe_cls: None = ..., + these: Optional[Dict[str, Any]] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: bool = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., +) -> Callable[[_C], _C]: ... + +# TODO: add support for returning NamedTuple from the mypy plugin +class _Fields(Tuple[Attribute, ...]): + def __getattr__(self, name: str) -> Attribute: ... + +def fields(cls: type) -> _Fields: ... +def fields_dict(cls: type) -> Dict[str, Attribute]: ... +def validate(inst: Any) -> None: ... + +# TODO: add support for returning a proper attrs class from the mypy plugin +# we use Any instead of _CountingAttr so that e.g. `make_class('Foo', [attr.ib()])` is valid +def make_class( + name: str, + attrs: Union[List[str], Tuple[str, ...], Dict[str, Any]], + bases: Tuple[type, ...] = ..., + repr_ns: Optional[str] = ..., + repr: bool = ..., + cmp: bool = ..., + hash: Optional[bool] = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., +) -> type: ... + +# _funcs -- + +# TODO: add support for returning TypedDict from the mypy plugin +# FIXME: asdict/astuple do not honor their factory args. waiting on one of these: +# https://github.com/python/mypy/issues/4236 +# https://github.com/python/typing/issues/253 +def asdict( + inst: Any, + recurse: bool = ..., + filter: Optional[_FilterType] = ..., + dict_factory: Type[Mapping[Any, Any]] = ..., + retain_collection_types: bool = ..., +) -> Dict[str, Any]: ... + +# TODO: add support for returning NamedTuple from the mypy plugin +def astuple( + inst: Any, + recurse: bool = ..., + filter: Optional[_FilterType] = ..., + tuple_factory: Type[Sequence] = ..., + retain_collection_types: bool = ..., +) -> Tuple[Any, ...]: ... +def has(cls: type) -> bool: ... +def assoc(inst: _T, **changes: Any) -> _T: ... +def evolve(inst: _T, **changes: Any) -> _T: ... + +# _config -- + +def set_run_validators(run: bool) -> None: ... +def get_run_validators() -> bool: ... + +# aliases -- + +s = attributes = attrs +ib = attr = attrib +dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;) diff --git a/python/attr/_compat.py b/python/attr/_compat.py new file mode 100644 index 0000000..5bb0659 --- /dev/null +++ b/python/attr/_compat.py @@ -0,0 +1,163 @@ +from __future__ import absolute_import, division, print_function + +import platform +import sys +import types +import warnings + + +PY2 = sys.version_info[0] == 2 +PYPY = platform.python_implementation() == "PyPy" + + +if PYPY or sys.version_info[:2] >= (3, 6): + ordered_dict = dict +else: + from collections import OrderedDict + + ordered_dict = OrderedDict + + +if PY2: + from UserDict import IterableUserDict + + # We 'bundle' isclass instead of using inspect as importing inspect is + # fairly expensive (order of 10-15 ms for a modern machine in 2016) + def isclass(klass): + return isinstance(klass, (type, types.ClassType)) + + # TYPE is used in exceptions, repr(int) is different on Python 2 and 3. + TYPE = "type" + + def iteritems(d): + return d.iteritems() + + # Python 2 is bereft of a read-only dict proxy, so we make one! + class ReadOnlyDict(IterableUserDict): + """ + Best-effort read-only dict wrapper. + """ + + def __setitem__(self, key, val): + # We gently pretend we're a Python 3 mappingproxy. + raise TypeError( + "'mappingproxy' object does not support item assignment" + ) + + def update(self, _): + # We gently pretend we're a Python 3 mappingproxy. + raise AttributeError( + "'mappingproxy' object has no attribute 'update'" + ) + + def __delitem__(self, _): + # We gently pretend we're a Python 3 mappingproxy. + raise TypeError( + "'mappingproxy' object does not support item deletion" + ) + + def clear(self): + # We gently pretend we're a Python 3 mappingproxy. + raise AttributeError( + "'mappingproxy' object has no attribute 'clear'" + ) + + def pop(self, key, default=None): + # We gently pretend we're a Python 3 mappingproxy. + raise AttributeError( + "'mappingproxy' object has no attribute 'pop'" + ) + + def popitem(self): + # We gently pretend we're a Python 3 mappingproxy. + raise AttributeError( + "'mappingproxy' object has no attribute 'popitem'" + ) + + def setdefault(self, key, default=None): + # We gently pretend we're a Python 3 mappingproxy. + raise AttributeError( + "'mappingproxy' object has no attribute 'setdefault'" + ) + + def __repr__(self): + # Override to be identical to the Python 3 version. + return "mappingproxy(" + repr(self.data) + ")" + + def metadata_proxy(d): + res = ReadOnlyDict() + res.data.update(d) # We blocked update, so we have to do it like this. + return res + + +else: + + def isclass(klass): + return isinstance(klass, type) + + TYPE = "class" + + def iteritems(d): + return d.items() + + def metadata_proxy(d): + return types.MappingProxyType(dict(d)) + + +def import_ctypes(): + """ + Moved into a function for testability. + """ + import ctypes + + return ctypes + + +if not PY2: + + def just_warn(*args, **kw): + """ + We only warn on Python 3 because we are not aware of any concrete + consequences of not setting the cell on Python 2. + """ + warnings.warn( + "Missing ctypes. Some features like bare super() or accessing " + "__class__ will not work with slots classes.", + RuntimeWarning, + stacklevel=2, + ) + + +else: + + def just_warn(*args, **kw): # pragma: nocover + """ + We only warn on Python 3 because we are not aware of any concrete + consequences of not setting the cell on Python 2. + """ + + +def make_set_closure_cell(): + """ + Moved into a function for testability. + """ + if PYPY: # pragma: no cover + + def set_closure_cell(cell, value): + cell.__setstate__((value,)) + + else: + try: + ctypes = import_ctypes() + + set_closure_cell = ctypes.pythonapi.PyCell_Set + set_closure_cell.argtypes = (ctypes.py_object, ctypes.py_object) + set_closure_cell.restype = ctypes.c_int + except Exception: + # We try best effort to set the cell, but sometimes it's not + # possible. For example on Jython or on GAE. + set_closure_cell = just_warn + return set_closure_cell + + +set_closure_cell = make_set_closure_cell() diff --git a/python/attr/_config.py b/python/attr/_config.py new file mode 100644 index 0000000..8ec9209 --- /dev/null +++ b/python/attr/_config.py @@ -0,0 +1,23 @@ +from __future__ import absolute_import, division, print_function + + +__all__ = ["set_run_validators", "get_run_validators"] + +_run_validators = True + + +def set_run_validators(run): + """ + Set whether or not validators are run. By default, they are run. + """ + if not isinstance(run, bool): + raise TypeError("'run' must be bool.") + global _run_validators + _run_validators = run + + +def get_run_validators(): + """ + Return whether or not validators are run. + """ + return _run_validators diff --git a/python/attr/_funcs.py b/python/attr/_funcs.py new file mode 100644 index 0000000..b61d239 --- /dev/null +++ b/python/attr/_funcs.py @@ -0,0 +1,290 @@ +from __future__ import absolute_import, division, print_function + +import copy + +from ._compat import iteritems +from ._make import NOTHING, _obj_setattr, fields +from .exceptions import AttrsAttributeNotFoundError + + +def asdict( + inst, + recurse=True, + filter=None, + dict_factory=dict, + retain_collection_types=False, +): + """ + Return the ``attrs`` attribute values of *inst* as a dict. + + Optionally recurse into other ``attrs``-decorated classes. + + :param inst: Instance of an ``attrs``-decorated class. + :param bool recurse: Recurse into classes that are also + ``attrs``-decorated. + :param callable filter: A callable whose return code determines whether an + attribute or element is included (``True``) or dropped (``False``). Is + called with the :class:`attr.Attribute` as the first argument and the + value as the second argument. + :param callable dict_factory: A callable to produce dictionaries from. For + example, to produce ordered dictionaries instead of normal Python + dictionaries, pass in ``collections.OrderedDict``. + :param bool retain_collection_types: Do not convert to ``list`` when + encountering an attribute whose type is ``tuple`` or ``set``. Only + meaningful if ``recurse`` is ``True``. + + :rtype: return type of *dict_factory* + + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. versionadded:: 16.0.0 *dict_factory* + .. versionadded:: 16.1.0 *retain_collection_types* + """ + attrs = fields(inst.__class__) + rv = dict_factory() + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + if recurse is True: + if has(v.__class__): + rv[a.name] = asdict( + v, True, filter, dict_factory, retain_collection_types + ) + elif isinstance(v, (tuple, list, set)): + cf = v.__class__ if retain_collection_types is True else list + rv[a.name] = cf( + [ + _asdict_anything( + i, filter, dict_factory, retain_collection_types + ) + for i in v + ] + ) + elif isinstance(v, dict): + df = dict_factory + rv[a.name] = df( + ( + _asdict_anything( + kk, filter, df, retain_collection_types + ), + _asdict_anything( + vv, filter, df, retain_collection_types + ), + ) + for kk, vv in iteritems(v) + ) + else: + rv[a.name] = v + else: + rv[a.name] = v + return rv + + +def _asdict_anything(val, filter, dict_factory, retain_collection_types): + """ + ``asdict`` only works on attrs instances, this works on anything. + """ + if getattr(val.__class__, "__attrs_attrs__", None) is not None: + # Attrs class. + rv = asdict(val, True, filter, dict_factory, retain_collection_types) + elif isinstance(val, (tuple, list, set)): + cf = val.__class__ if retain_collection_types is True else list + rv = cf( + [ + _asdict_anything( + i, filter, dict_factory, retain_collection_types + ) + for i in val + ] + ) + elif isinstance(val, dict): + df = dict_factory + rv = df( + ( + _asdict_anything(kk, filter, df, retain_collection_types), + _asdict_anything(vv, filter, df, retain_collection_types), + ) + for kk, vv in iteritems(val) + ) + else: + rv = val + return rv + + +def astuple( + inst, + recurse=True, + filter=None, + tuple_factory=tuple, + retain_collection_types=False, +): + """ + Return the ``attrs`` attribute values of *inst* as a tuple. + + Optionally recurse into other ``attrs``-decorated classes. + + :param inst: Instance of an ``attrs``-decorated class. + :param bool recurse: Recurse into classes that are also + ``attrs``-decorated. + :param callable filter: A callable whose return code determines whether an + attribute or element is included (``True``) or dropped (``False``). Is + called with the :class:`attr.Attribute` as the first argument and the + value as the second argument. + :param callable tuple_factory: A callable to produce tuples from. For + example, to produce lists instead of tuples. + :param bool retain_collection_types: Do not convert to ``list`` + or ``dict`` when encountering an attribute which type is + ``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is + ``True``. + + :rtype: return type of *tuple_factory* + + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. versionadded:: 16.2.0 + """ + attrs = fields(inst.__class__) + rv = [] + retain = retain_collection_types # Very long. :/ + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + if recurse is True: + if has(v.__class__): + rv.append( + astuple( + v, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + ) + elif isinstance(v, (tuple, list, set)): + cf = v.__class__ if retain is True else list + rv.append( + cf( + [ + astuple( + j, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(j.__class__) + else j + for j in v + ] + ) + ) + elif isinstance(v, dict): + df = v.__class__ if retain is True else dict + rv.append( + df( + ( + astuple( + kk, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(kk.__class__) + else kk, + astuple( + vv, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(vv.__class__) + else vv, + ) + for kk, vv in iteritems(v) + ) + ) + else: + rv.append(v) + else: + rv.append(v) + return rv if tuple_factory is list else tuple_factory(rv) + + +def has(cls): + """ + Check whether *cls* is a class with ``attrs`` attributes. + + :param type cls: Class to introspect. + :raise TypeError: If *cls* is not a class. + + :rtype: :class:`bool` + """ + return getattr(cls, "__attrs_attrs__", None) is not None + + +def assoc(inst, **changes): + """ + Copy *inst* and apply *changes*. + + :param inst: Instance of a class with ``attrs`` attributes. + :param changes: Keyword changes in the new copy. + + :return: A copy of inst with *changes* incorporated. + + :raise attr.exceptions.AttrsAttributeNotFoundError: If *attr_name* couldn't + be found on *cls*. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. deprecated:: 17.1.0 + Use :func:`evolve` instead. + """ + import warnings + + warnings.warn( + "assoc is deprecated and will be removed after 2018/01.", + DeprecationWarning, + stacklevel=2, + ) + new = copy.copy(inst) + attrs = fields(inst.__class__) + for k, v in iteritems(changes): + a = getattr(attrs, k, NOTHING) + if a is NOTHING: + raise AttrsAttributeNotFoundError( + "{k} is not an attrs attribute on {cl}.".format( + k=k, cl=new.__class__ + ) + ) + _obj_setattr(new, k, v) + return new + + +def evolve(inst, **changes): + """ + Create a new instance, based on *inst* with *changes* applied. + + :param inst: Instance of a class with ``attrs`` attributes. + :param changes: Keyword changes in the new copy. + + :return: A copy of inst with *changes* incorporated. + + :raise TypeError: If *attr_name* couldn't be found in the class + ``__init__``. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. versionadded:: 17.1.0 + """ + cls = inst.__class__ + attrs = fields(cls) + for a in attrs: + if not a.init: + continue + attr_name = a.name # To deal with private attributes. + init_name = attr_name if attr_name[0] != "_" else attr_name[1:] + if init_name not in changes: + changes[init_name] = getattr(inst, attr_name) + return cls(**changes) diff --git a/python/attr/_make.py b/python/attr/_make.py new file mode 100644 index 0000000..f7fd05e --- /dev/null +++ b/python/attr/_make.py @@ -0,0 +1,2034 @@ +from __future__ import absolute_import, division, print_function + +import copy +import hashlib +import linecache +import sys +import threading +import warnings + +from operator import itemgetter + +from . import _config +from ._compat import ( + PY2, + isclass, + iteritems, + metadata_proxy, + ordered_dict, + set_closure_cell, +) +from .exceptions import ( + DefaultAlreadySetError, + FrozenInstanceError, + NotAnAttrsClassError, + PythonTooOldError, + UnannotatedAttributeError, +) + + +# This is used at least twice, so cache it here. +_obj_setattr = object.__setattr__ +_init_converter_pat = "__attr_converter_{}" +_init_factory_pat = "__attr_factory_{}" +_tuple_property_pat = ( + " {attr_name} = _attrs_property(_attrs_itemgetter({index}))" +) +_classvar_prefixes = ("typing.ClassVar", "t.ClassVar", "ClassVar") +# we don't use a double-underscore prefix because that triggers +# name mangling when trying to create a slot for the field +# (when slots=True) +_hash_cache_field = "_attrs_cached_hash" + +_empty_metadata_singleton = metadata_proxy({}) + + +class _Nothing(object): + """ + Sentinel class to indicate the lack of a value when ``None`` is ambiguous. + + ``_Nothing`` is a singleton. There is only ever one of it. + """ + + _singleton = None + + def __new__(cls): + if _Nothing._singleton is None: + _Nothing._singleton = super(_Nothing, cls).__new__(cls) + return _Nothing._singleton + + def __repr__(self): + return "NOTHING" + + +NOTHING = _Nothing() +""" +Sentinel to indicate the lack of a value when ``None`` is ambiguous. +""" + + +def attrib( + default=NOTHING, + validator=None, + repr=True, + cmp=True, + hash=None, + init=True, + convert=None, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=False, +): + """ + Create a new attribute on a class. + + .. warning:: + + Does *not* do anything unless the class is also decorated with + :func:`attr.s`! + + :param default: A value that is used if an ``attrs``-generated ``__init__`` + is used and no value is passed while instantiating or the attribute is + excluded using ``init=False``. + + If the value is an instance of :class:`Factory`, its callable will be + used to construct a new value (useful for mutable data types like lists + or dicts). + + If a default is not set (or set manually to ``attr.NOTHING``), a value + *must* be supplied when instantiating; otherwise a :exc:`TypeError` + will be raised. + + The default can also be set using decorator notation as shown below. + + :type default: Any value. + + :param callable factory: Syntactic sugar for + ``default=attr.Factory(callable)``. + + :param validator: :func:`callable` that is called by ``attrs``-generated + ``__init__`` methods after the instance has been initialized. They + receive the initialized instance, the :class:`Attribute`, and the + passed value. + + The return value is *not* inspected so the validator has to throw an + exception itself. + + If a ``list`` is passed, its items are treated as validators and must + all pass. + + Validators can be globally disabled and re-enabled using + :func:`get_run_validators`. + + The validator can also be set using decorator notation as shown below. + + :type validator: ``callable`` or a ``list`` of ``callable``\\ s. + + :param bool repr: Include this attribute in the generated ``__repr__`` + method. + :param bool cmp: Include this attribute in the generated comparison methods + (``__eq__`` et al). + :param hash: Include this attribute in the generated ``__hash__`` + method. If ``None`` (default), mirror *cmp*'s value. This is the + correct behavior according the Python spec. Setting this value to + anything else than ``None`` is *discouraged*. + :type hash: ``bool`` or ``None`` + :param bool init: Include this attribute in the generated ``__init__`` + method. It is possible to set this to ``False`` and set a default + value. In that case this attributed is unconditionally initialized + with the specified default value or factory. + :param callable converter: :func:`callable` that is called by + ``attrs``-generated ``__init__`` methods to converter attribute's value + to the desired format. It is given the passed-in value, and the + returned value will be used as the new value of the attribute. The + value is converted before being passed to the validator, if any. + :param metadata: An arbitrary mapping, to be used by third-party + components. See :ref:`extending_metadata`. + :param type: The type of the attribute. In Python 3.6 or greater, the + preferred method to specify the type is using a variable annotation + (see `PEP 526 `_). + This argument is provided for backward compatibility. + Regardless of the approach used, the type will be stored on + ``Attribute.type``. + + Please note that ``attrs`` doesn't do anything with this metadata by + itself. You can use it as part of your own code or for + :doc:`static type checking `. + :param kw_only: Make this attribute keyword-only (Python 3+) + in the generated ``__init__`` (if ``init`` is ``False``, this + parameter is ignored). + + .. versionadded:: 15.2.0 *convert* + .. versionadded:: 16.3.0 *metadata* + .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. + .. versionchanged:: 17.1.0 + *hash* is ``None`` and therefore mirrors *cmp* by default. + .. versionadded:: 17.3.0 *type* + .. deprecated:: 17.4.0 *convert* + .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated + *convert* to achieve consistency with other noun-based arguments. + .. versionadded:: 18.1.0 + ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``. + .. versionadded:: 18.2.0 *kw_only* + """ + if hash is not None and hash is not True and hash is not False: + raise TypeError( + "Invalid value for hash. Must be True, False, or None." + ) + + if convert is not None: + if converter is not None: + raise RuntimeError( + "Can't pass both `convert` and `converter`. " + "Please use `converter` only." + ) + warnings.warn( + "The `convert` argument is deprecated in favor of `converter`. " + "It will be removed after 2019/01.", + DeprecationWarning, + stacklevel=2, + ) + converter = convert + + if factory is not None: + if default is not NOTHING: + raise ValueError( + "The `default` and `factory` arguments are mutually " + "exclusive." + ) + if not callable(factory): + raise ValueError("The `factory` argument must be a callable.") + default = Factory(factory) + + if metadata is None: + metadata = {} + + return _CountingAttr( + default=default, + validator=validator, + repr=repr, + cmp=cmp, + hash=hash, + init=init, + converter=converter, + metadata=metadata, + type=type, + kw_only=kw_only, + ) + + +def _make_attr_tuple_class(cls_name, attr_names): + """ + Create a tuple subclass to hold `Attribute`s for an `attrs` class. + + The subclass is a bare tuple with properties for names. + + class MyClassAttributes(tuple): + __slots__ = () + x = property(itemgetter(0)) + """ + attr_class_name = "{}Attributes".format(cls_name) + attr_class_template = [ + "class {}(tuple):".format(attr_class_name), + " __slots__ = ()", + ] + if attr_names: + for i, attr_name in enumerate(attr_names): + attr_class_template.append( + _tuple_property_pat.format(index=i, attr_name=attr_name) + ) + else: + attr_class_template.append(" pass") + globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property} + eval(compile("\n".join(attr_class_template), "", "exec"), globs) + + return globs[attr_class_name] + + +# Tuple class for extracted attributes from a class definition. +# `base_attrs` is a subset of `attrs`. +_Attributes = _make_attr_tuple_class( + "_Attributes", + [ + # all attributes to build dunder methods for + "attrs", + # attributes that have been inherited + "base_attrs", + # map inherited attributes to their originating classes + "base_attrs_map", + ], +) + + +def _is_class_var(annot): + """ + Check whether *annot* is a typing.ClassVar. + + The string comparison hack is used to avoid evaluating all string + annotations which would put attrs-based classes at a performance + disadvantage compared to plain old classes. + """ + return str(annot).startswith(_classvar_prefixes) + + +def _get_annotations(cls): + """ + Get annotations for *cls*. + """ + anns = getattr(cls, "__annotations__", None) + if anns is None: + return {} + + # Verify that the annotations aren't merely inherited. + for base_cls in cls.__mro__[1:]: + if anns is getattr(base_cls, "__annotations__", None): + return {} + + return anns + + +def _counter_getter(e): + """ + Key function for sorting to avoid re-creating a lambda for every class. + """ + return e[1].counter + + +def _transform_attrs(cls, these, auto_attribs, kw_only): + """ + Transform all `_CountingAttr`s on a class into `Attribute`s. + + If *these* is passed, use that and don't look for them on the class. + + Return an `_Attributes`. + """ + cd = cls.__dict__ + anns = _get_annotations(cls) + + if these is not None: + ca_list = [(name, ca) for name, ca in iteritems(these)] + + if not isinstance(these, ordered_dict): + ca_list.sort(key=_counter_getter) + elif auto_attribs is True: + ca_names = { + name + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + } + ca_list = [] + annot_names = set() + for attr_name, type in anns.items(): + if _is_class_var(type): + continue + annot_names.add(attr_name) + a = cd.get(attr_name, NOTHING) + if not isinstance(a, _CountingAttr): + if a is NOTHING: + a = attrib() + else: + a = attrib(default=a) + ca_list.append((attr_name, a)) + + unannotated = ca_names - annot_names + if len(unannotated) > 0: + raise UnannotatedAttributeError( + "The following `attr.ib`s lack a type annotation: " + + ", ".join( + sorted(unannotated, key=lambda n: cd.get(n).counter) + ) + + "." + ) + else: + ca_list = sorted( + ( + (name, attr) + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + ), + key=lambda e: e[1].counter, + ) + + own_attrs = [ + Attribute.from_counting_attr( + name=attr_name, ca=ca, type=anns.get(attr_name) + ) + for attr_name, ca in ca_list + ] + + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + taken_attr_names = {a.name: a for a in own_attrs} + + # Traverse the MRO and collect attributes. + for base_cls in cls.__mro__[1:-1]: + sub_attrs = getattr(base_cls, "__attrs_attrs__", None) + if sub_attrs is not None: + for a in sub_attrs: + prev_a = taken_attr_names.get(a.name) + # Only add an attribute if it hasn't been defined before. This + # allows for overwriting attribute definitions by subclassing. + if prev_a is None: + base_attrs.append(a) + taken_attr_names[a.name] = a + base_attr_map[a.name] = base_cls + + attr_names = [a.name for a in base_attrs + own_attrs] + + AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names) + + if kw_only: + own_attrs = [a._assoc(kw_only=True) for a in own_attrs] + base_attrs = [a._assoc(kw_only=True) for a in base_attrs] + + attrs = AttrsClass(base_attrs + own_attrs) + + had_default = False + was_kw_only = False + for a in attrs: + if ( + was_kw_only is False + and had_default is True + and a.default is NOTHING + and a.init is True + and a.kw_only is False + ): + raise ValueError( + "No mandatory attributes allowed after an attribute with a " + "default value or factory. Attribute in question: %r" % (a,) + ) + elif ( + had_default is False + and a.default is not NOTHING + and a.init is not False + and + # Keyword-only attributes without defaults can be specified + # after keyword-only attributes with defaults. + a.kw_only is False + ): + had_default = True + if was_kw_only is True and a.kw_only is False: + raise ValueError( + "Non keyword-only attributes are not allowed after a " + "keyword-only attribute. Attribute in question: {a!r}".format( + a=a + ) + ) + if was_kw_only is False and a.init is True and a.kw_only is True: + was_kw_only = True + + return _Attributes((attrs, base_attrs, base_attr_map)) + + +def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + raise FrozenInstanceError() + + +def _frozen_delattrs(self, name): + """ + Attached to frozen classes as __delattr__. + """ + raise FrozenInstanceError() + + +class _ClassBuilder(object): + """ + Iteratively build *one* class. + """ + + __slots__ = ( + "_cls", + "_cls_dict", + "_attrs", + "_base_names", + "_attr_names", + "_slots", + "_frozen", + "_weakref_slot", + "_cache_hash", + "_has_post_init", + "_delete_attribs", + "_base_attr_map", + ) + + def __init__( + self, + cls, + these, + slots, + frozen, + weakref_slot, + auto_attribs, + kw_only, + cache_hash, + ): + attrs, base_attrs, base_map = _transform_attrs( + cls, these, auto_attribs, kw_only + ) + + self._cls = cls + self._cls_dict = dict(cls.__dict__) if slots else {} + self._attrs = attrs + self._base_names = set(a.name for a in base_attrs) + self._base_attr_map = base_map + self._attr_names = tuple(a.name for a in attrs) + self._slots = slots + self._frozen = frozen or _has_frozen_base_class(cls) + self._weakref_slot = weakref_slot + self._cache_hash = cache_hash + self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False)) + self._delete_attribs = not bool(these) + + self._cls_dict["__attrs_attrs__"] = self._attrs + + if frozen: + self._cls_dict["__setattr__"] = _frozen_setattrs + self._cls_dict["__delattr__"] = _frozen_delattrs + + def __repr__(self): + return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__) + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used after calling this method. + """ + if self._slots is True: + return self._create_slots_class() + else: + return self._patch_original_class() + + def _patch_original_class(self): + """ + Apply accumulated methods and return the class. + """ + cls = self._cls + base_names = self._base_names + + # Clean class of attribute definitions (`attr.ib()`s). + if self._delete_attribs: + for name in self._attr_names: + if ( + name not in base_names + and getattr(cls, name, None) is not None + ): + try: + delattr(cls, name) + except AttributeError: + # This can happen if a base class defines a class + # variable and we want to set an attribute with the + # same name by using only a type annotation. + pass + + # Attach our dunder methods. + for name, value in self._cls_dict.items(): + setattr(cls, name, value) + + return cls + + def _create_slots_class(self): + """ + Build and return a new class with a `__slots__` attribute. + """ + base_names = self._base_names + cd = { + k: v + for k, v in iteritems(self._cls_dict) + if k not in tuple(self._attr_names) + ("__dict__", "__weakref__") + } + + weakref_inherited = False + + # Traverse the MRO to check for an existing __weakref__. + for base_cls in self._cls.__mro__[1:-1]: + if "__weakref__" in getattr(base_cls, "__dict__", ()): + weakref_inherited = True + break + + names = self._attr_names + if ( + self._weakref_slot + and "__weakref__" not in getattr(self._cls, "__slots__", ()) + and "__weakref__" not in names + and not weakref_inherited + ): + names += ("__weakref__",) + + # We only add the names of attributes that aren't inherited. + # Settings __slots__ to inherited attributes wastes memory. + slot_names = [name for name in names if name not in base_names] + if self._cache_hash: + slot_names.append(_hash_cache_field) + cd["__slots__"] = tuple(slot_names) + + qualname = getattr(self._cls, "__qualname__", None) + if qualname is not None: + cd["__qualname__"] = qualname + + # __weakref__ is not writable. + state_attr_names = tuple( + an for an in self._attr_names if an != "__weakref__" + ) + + def slots_getstate(self): + """ + Automatically created by attrs. + """ + return tuple(getattr(self, name) for name in state_attr_names) + + def slots_setstate(self, state): + """ + Automatically created by attrs. + """ + __bound_setattr = _obj_setattr.__get__(self, Attribute) + for name, value in zip(state_attr_names, state): + __bound_setattr(name, value) + + # slots and frozen require __getstate__/__setstate__ to work + cd["__getstate__"] = slots_getstate + cd["__setstate__"] = slots_setstate + + # Create new class based on old class and our methods. + cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd) + + # The following is a fix for + # https://github.com/python-attrs/attrs/issues/102. On Python 3, + # if a method mentions `__class__` or uses the no-arg super(), the + # compiler will bake a reference to the class in the method itself + # as `method.__closure__`. Since we replace the class with a + # clone, we rewrite these references so it keeps working. + for item in cls.__dict__.values(): + if isinstance(item, (classmethod, staticmethod)): + # Class- and staticmethods hide their functions inside. + # These might need to be rewritten as well. + closure_cells = getattr(item.__func__, "__closure__", None) + else: + closure_cells = getattr(item, "__closure__", None) + + if not closure_cells: # Catch None or the empty list. + continue + for cell in closure_cells: + if cell.cell_contents is self._cls: + set_closure_cell(cell, cls) + + return cls + + def add_repr(self, ns): + self._cls_dict["__repr__"] = self._add_method_dunders( + _make_repr(self._attrs, ns=ns) + ) + return self + + def add_str(self): + repr = self._cls_dict.get("__repr__") + if repr is None: + raise ValueError( + "__str__ can only be generated if a __repr__ exists." + ) + + def __str__(self): + return self.__repr__() + + self._cls_dict["__str__"] = self._add_method_dunders(__str__) + return self + + def make_unhashable(self): + self._cls_dict["__hash__"] = None + return self + + def add_hash(self): + self._cls_dict["__hash__"] = self._add_method_dunders( + _make_hash( + self._attrs, frozen=self._frozen, cache_hash=self._cache_hash + ) + ) + + return self + + def add_init(self): + self._cls_dict["__init__"] = self._add_method_dunders( + _make_init( + self._attrs, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + ) + ) + + return self + + def add_cmp(self): + cd = self._cls_dict + + cd["__eq__"], cd["__ne__"], cd["__lt__"], cd["__le__"], cd[ + "__gt__" + ], cd["__ge__"] = ( + self._add_method_dunders(meth) for meth in _make_cmp(self._attrs) + ) + + return self + + def _add_method_dunders(self, method): + """ + Add __module__ and __qualname__ to a *method* if possible. + """ + try: + method.__module__ = self._cls.__module__ + except AttributeError: + pass + + try: + method.__qualname__ = ".".join( + (self._cls.__qualname__, method.__name__) + ) + except AttributeError: + pass + + return method + + +def attrs( + maybe_cls=None, + these=None, + repr_ns=None, + repr=True, + cmp=True, + hash=None, + init=True, + slots=False, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=False, + kw_only=False, + cache_hash=False, +): + r""" + A class decorator that adds `dunder + `_\ -methods according to the + specified attributes using :func:`attr.ib` or the *these* argument. + + :param these: A dictionary of name to :func:`attr.ib` mappings. This is + useful to avoid the definition of your attributes within the class body + because you can't (e.g. if you want to add ``__repr__`` methods to + Django models) or don't want to. + + If *these* is not ``None``, ``attrs`` will *not* search the class body + for attributes and will *not* remove any attributes from it. + + If *these* is an ordered dict (:class:`dict` on Python 3.6+, + :class:`collections.OrderedDict` otherwise), the order is deduced from + the order of the attributes inside *these*. Otherwise the order + of the definition of the attributes is used. + + :type these: :class:`dict` of :class:`str` to :func:`attr.ib` + + :param str repr_ns: When using nested classes, there's no way in Python 2 + to automatically detect that. Therefore it's possible to set the + namespace explicitly for a more meaningful ``repr`` output. + :param bool repr: Create a ``__repr__`` method with a human readable + representation of ``attrs`` attributes.. + :param bool str: Create a ``__str__`` method that is identical to + ``__repr__``. This is usually not necessary except for + :class:`Exception`\ s. + :param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``, + ``__gt__``, and ``__ge__`` methods that compare the class as if it were + a tuple of its ``attrs`` attributes. But the attributes are *only* + compared, if the types of both classes are *identical*! + :param hash: If ``None`` (default), the ``__hash__`` method is generated + according how *cmp* and *frozen* are set. + + 1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you. + 2. If *cmp* is True and *frozen* is False, ``__hash__`` will be set to + None, marking it unhashable (which it is). + 3. If *cmp* is False, ``__hash__`` will be left untouched meaning the + ``__hash__`` method of the base class will be used (if base class is + ``object``, this means it will fall back to id-based hashing.). + + Although not recommended, you can decide for yourself and force + ``attrs`` to create one (e.g. if the class is immutable even though you + didn't freeze it programmatically) by passing ``True`` or not. Both of + these cases are rather special and should be used carefully. + + See the `Python documentation \ + `_ + and the `GitHub issue that led to the default behavior \ + `_ for more details. + :type hash: ``bool`` or ``None`` + :param bool init: Create a ``__init__`` method that initializes the + ``attrs`` attributes. Leading underscores are stripped for the + argument name. If a ``__attrs_post_init__`` method exists on the + class, it will be called after the class is fully initialized. + :param bool slots: Create a slots_-style class that's more + memory-efficient. See :ref:`slots` for further ramifications. + :param bool frozen: Make instances immutable after initialization. If + someone attempts to modify a frozen instance, + :exc:`attr.exceptions.FrozenInstanceError` is raised. + + Please note: + + 1. This is achieved by installing a custom ``__setattr__`` method + on your class so you can't implement an own one. + + 2. True immutability is impossible in Python. + + 3. This *does* have a minor a runtime performance :ref:`impact + ` when initializing new instances. In other words: + ``__init__`` is slightly slower with ``frozen=True``. + + 4. If a class is frozen, you cannot modify ``self`` in + ``__attrs_post_init__`` or a self-written ``__init__``. You can + circumvent that limitation by using + ``object.__setattr__(self, "attribute_name", value)``. + + .. _slots: https://docs.python.org/3/reference/datamodel.html#slots + :param bool weakref_slot: Make instances weak-referenceable. This has no + effect unless ``slots`` is also enabled. + :param bool auto_attribs: If True, collect `PEP 526`_-annotated attributes + (Python 3.6 and later only) from the class body. + + In this case, you **must** annotate every field. If ``attrs`` + encounters a field that is set to an :func:`attr.ib` but lacks a type + annotation, an :exc:`attr.exceptions.UnannotatedAttributeError` is + raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't + want to set a type. + + If you assign a value to those attributes (e.g. ``x: int = 42``), that + value becomes the default value like if it were passed using + ``attr.ib(default=42)``. Passing an instance of :class:`Factory` also + works as expected. + + Attributes annotated as :data:`typing.ClassVar` are **ignored**. + + .. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/ + :param bool kw_only: Make all attributes keyword-only (Python 3+) + in the generated ``__init__`` (if ``init`` is ``False``, this + parameter is ignored). + :param bool cache_hash: Ensure that the object's hash code is computed + only once and stored on the object. If this is set to ``True``, + hashing must be either explicitly or implicitly enabled for this + class. If the hash code is cached, then no attributes of this + class which participate in hash code computation may be mutated + after object creation. + + + .. versionadded:: 16.0.0 *slots* + .. versionadded:: 16.1.0 *frozen* + .. versionadded:: 16.3.0 *str* + .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. + .. versionchanged:: 17.1.0 + *hash* supports ``None`` as value which is also the default now. + .. versionadded:: 17.3.0 *auto_attribs* + .. versionchanged:: 18.1.0 + If *these* is passed, no attributes are deleted from the class body. + .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. + .. versionadded:: 18.2.0 *weakref_slot* + .. deprecated:: 18.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a + :class:`DeprecationWarning` if the classes compared are subclasses of + each other. ``__eq`` and ``__ne__`` never tried to compared subclasses + to each other. + .. versionadded:: 18.2.0 *kw_only* + .. versionadded:: 18.2.0 *cache_hash* + """ + + def wrap(cls): + if getattr(cls, "__class__", None) is None: + raise TypeError("attrs only works with new-style classes.") + + builder = _ClassBuilder( + cls, + these, + slots, + frozen, + weakref_slot, + auto_attribs, + kw_only, + cache_hash, + ) + + if repr is True: + builder.add_repr(repr_ns) + if str is True: + builder.add_str() + if cmp is True: + builder.add_cmp() + + if hash is not True and hash is not False and hash is not None: + # Can't use `hash in` because 1 == True for example. + raise TypeError( + "Invalid value for hash. Must be True, False, or None." + ) + elif hash is False or (hash is None and cmp is False): + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " hashing must be either explicitly or implicitly " + "enabled." + ) + elif hash is True or (hash is None and cmp is True and frozen is True): + builder.add_hash() + else: + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " hashing must be either explicitly or implicitly " + "enabled." + ) + builder.make_unhashable() + + if init is True: + builder.add_init() + else: + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " init must be True." + ) + + return builder.build_class() + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but ``None`` if used as `@attrs()`. + if maybe_cls is None: + return wrap + else: + return wrap(maybe_cls) + + +_attrs = attrs +""" +Internal alias so we can use it in functions that take an argument called +*attrs*. +""" + + +if PY2: + + def _has_frozen_base_class(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return ( + getattr(cls.__setattr__, "__module__", None) + == _frozen_setattrs.__module__ + and cls.__setattr__.__name__ == _frozen_setattrs.__name__ + ) + + +else: + + def _has_frozen_base_class(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return cls.__setattr__ == _frozen_setattrs + + +def _attrs_to_tuple(obj, attrs): + """ + Create a tuple of all values of *obj*'s *attrs*. + """ + return tuple(getattr(obj, a.name) for a in attrs) + + +def _make_hash(attrs, frozen, cache_hash): + attrs = tuple( + a + for a in attrs + if a.hash is True or (a.hash is None and a.cmp is True) + ) + + tab = " " + + # We cache the generated hash methods for the same kinds of attributes. + sha1 = hashlib.sha1() + sha1.update(repr(attrs).encode("utf-8")) + unique_filename = "" % (sha1.hexdigest(),) + type_hash = hash(unique_filename) + + method_lines = ["def __hash__(self):"] + + def append_hash_computation_lines(prefix, indent): + """ + Generate the code for actually computing the hash code. + Below this will either be returned directly or used to compute + a value which is then cached, depending on the value of cache_hash + """ + method_lines.extend( + [indent + prefix + "hash((", indent + " %d," % (type_hash,)] + ) + + for a in attrs: + method_lines.append(indent + " self.%s," % a.name) + + method_lines.append(indent + " ))") + + if cache_hash: + method_lines.append(tab + "if self.%s is None:" % _hash_cache_field) + if frozen: + append_hash_computation_lines( + "object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2 + ) + method_lines.append(tab * 2 + ")") # close __setattr__ + else: + append_hash_computation_lines( + "self.%s = " % _hash_cache_field, tab * 2 + ) + method_lines.append(tab + "return self.%s" % _hash_cache_field) + else: + append_hash_computation_lines("return ", tab) + + script = "\n".join(method_lines) + globs = {} + locs = {} + bytecode = compile(script, unique_filename, "exec") + eval(bytecode, globs, locs) + + # In order of debuggers like PDB being able to step through the code, + # we add a fake linecache entry. + linecache.cache[unique_filename] = ( + len(script), + None, + script.splitlines(True), + unique_filename, + ) + + return locs["__hash__"] + + +def _add_hash(cls, attrs): + """ + Add a hash method to *cls*. + """ + cls.__hash__ = _make_hash(attrs, frozen=False, cache_hash=False) + return cls + + +def __ne__(self, other): + """ + Check equality and either forward a NotImplemented or return the result + negated. + """ + result = self.__eq__(other) + if result is NotImplemented: + return NotImplemented + + return not result + + +WARNING_CMP_ISINSTANCE = ( + "Comparision of subclasses using __%s__ is deprecated and will be removed " + "in 2019." +) + + +def _make_cmp(attrs): + attrs = [a for a in attrs if a.cmp] + + # We cache the generated eq methods for the same kinds of attributes. + sha1 = hashlib.sha1() + sha1.update(repr(attrs).encode("utf-8")) + unique_filename = "" % (sha1.hexdigest(),) + lines = [ + "def __eq__(self, other):", + " if other.__class__ is not self.__class__:", + " return NotImplemented", + ] + # We can't just do a big self.x = other.x and... clause due to + # irregularities like nan == nan is false but (nan,) == (nan,) is true. + if attrs: + lines.append(" return (") + others = [" ) == ("] + for a in attrs: + lines.append(" self.%s," % (a.name,)) + others.append(" other.%s," % (a.name,)) + + lines += others + [" )"] + else: + lines.append(" return True") + + script = "\n".join(lines) + globs = {} + locs = {} + bytecode = compile(script, unique_filename, "exec") + eval(bytecode, globs, locs) + + # In order of debuggers like PDB being able to step through the code, + # we add a fake linecache entry. + linecache.cache[unique_filename] = ( + len(script), + None, + script.splitlines(True), + unique_filename, + ) + eq = locs["__eq__"] + ne = __ne__ + + def attrs_to_tuple(obj): + """ + Save us some typing. + """ + return _attrs_to_tuple(obj, attrs) + + def __lt__(self, other): + """ + Automatically created by attrs. + """ + if isinstance(other, self.__class__): + if other.__class__ is not self.__class__: + warnings.warn( + WARNING_CMP_ISINSTANCE % ("lt",), DeprecationWarning + ) + return attrs_to_tuple(self) < attrs_to_tuple(other) + else: + return NotImplemented + + def __le__(self, other): + """ + Automatically created by attrs. + """ + if isinstance(other, self.__class__): + if other.__class__ is not self.__class__: + warnings.warn( + WARNING_CMP_ISINSTANCE % ("le",), DeprecationWarning + ) + return attrs_to_tuple(self) <= attrs_to_tuple(other) + else: + return NotImplemented + + def __gt__(self, other): + """ + Automatically created by attrs. + """ + if isinstance(other, self.__class__): + if other.__class__ is not self.__class__: + warnings.warn( + WARNING_CMP_ISINSTANCE % ("gt",), DeprecationWarning + ) + return attrs_to_tuple(self) > attrs_to_tuple(other) + else: + return NotImplemented + + def __ge__(self, other): + """ + Automatically created by attrs. + """ + if isinstance(other, self.__class__): + if other.__class__ is not self.__class__: + warnings.warn( + WARNING_CMP_ISINSTANCE % ("ge",), DeprecationWarning + ) + return attrs_to_tuple(self) >= attrs_to_tuple(other) + else: + return NotImplemented + + return eq, ne, __lt__, __le__, __gt__, __ge__ + + +def _add_cmp(cls, attrs=None): + """ + Add comparison methods to *cls*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__eq__, cls.__ne__, cls.__lt__, cls.__le__, cls.__gt__, cls.__ge__ = _make_cmp( # noqa + attrs + ) + + return cls + + +_already_repring = threading.local() + + +def _make_repr(attrs, ns): + """ + Make a repr method for *attr_names* adding *ns* to the full name. + """ + attr_names = tuple(a.name for a in attrs if a.repr) + + def __repr__(self): + """ + Automatically created by attrs. + """ + try: + working_set = _already_repring.working_set + except AttributeError: + working_set = set() + _already_repring.working_set = working_set + + if id(self) in working_set: + return "..." + real_cls = self.__class__ + if ns is None: + qualname = getattr(real_cls, "__qualname__", None) + if qualname is not None: + class_name = qualname.rsplit(">.", 1)[-1] + else: + class_name = real_cls.__name__ + else: + class_name = ns + "." + real_cls.__name__ + + # Since 'self' remains on the stack (i.e.: strongly referenced) for the + # duration of this call, it's safe to depend on id(...) stability, and + # not need to track the instance and therefore worry about properties + # like weakref- or hash-ability. + working_set.add(id(self)) + try: + result = [class_name, "("] + first = True + for name in attr_names: + if first: + first = False + else: + result.append(", ") + result.extend((name, "=", repr(getattr(self, name, NOTHING)))) + return "".join(result) + ")" + finally: + working_set.remove(id(self)) + + return __repr__ + + +def _add_repr(cls, ns=None, attrs=None): + """ + Add a repr method to *cls*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__repr__ = _make_repr(attrs, ns) + return cls + + +def _make_init(attrs, post_init, frozen, slots, cache_hash, base_attr_map): + attrs = [a for a in attrs if a.init or a.default is not NOTHING] + + # We cache the generated init methods for the same kinds of attributes. + sha1 = hashlib.sha1() + sha1.update(repr(attrs).encode("utf-8")) + unique_filename = "".format(sha1.hexdigest()) + + script, globs, annotations = _attrs_to_init_script( + attrs, frozen, slots, post_init, cache_hash, base_attr_map + ) + locs = {} + bytecode = compile(script, unique_filename, "exec") + attr_dict = dict((a.name, a) for a in attrs) + globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict}) + if frozen is True: + # Save the lookup overhead in __init__ if we need to circumvent + # immutability. + globs["_cached_setattr"] = _obj_setattr + eval(bytecode, globs, locs) + + # In order of debuggers like PDB being able to step through the code, + # we add a fake linecache entry. + linecache.cache[unique_filename] = ( + len(script), + None, + script.splitlines(True), + unique_filename, + ) + + __init__ = locs["__init__"] + __init__.__annotations__ = annotations + return __init__ + + +def _add_init(cls, frozen): + """ + Add a __init__ method to *cls*. If *frozen* is True, make it immutable. + """ + cls.__init__ = _make_init( + cls.__attrs_attrs__, + getattr(cls, "__attrs_post_init__", False), + frozen, + _is_slot_cls(cls), + cache_hash=False, + base_attr_map={}, + ) + return cls + + +def fields(cls): + """ + Return the tuple of ``attrs`` attributes for a class. + + The tuple also allows accessing the fields by their names (see below for + examples). + + :param type cls: Class to introspect. + + :raise TypeError: If *cls* is not a class. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + :rtype: tuple (with name accessors) of :class:`attr.Attribute` + + .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields + by name. + """ + if not isclass(cls): + raise TypeError("Passed object must be a class.") + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + raise NotAnAttrsClassError( + "{cls!r} is not an attrs-decorated class.".format(cls=cls) + ) + return attrs + + +def fields_dict(cls): + """ + Return an ordered dictionary of ``attrs`` attributes for a class, whose + keys are the attribute names. + + :param type cls: Class to introspect. + + :raise TypeError: If *cls* is not a class. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + :rtype: an ordered dict where keys are attribute names and values are + :class:`attr.Attribute`\\ s. This will be a :class:`dict` if it's + naturally ordered like on Python 3.6+ or an + :class:`~collections.OrderedDict` otherwise. + + .. versionadded:: 18.1.0 + """ + if not isclass(cls): + raise TypeError("Passed object must be a class.") + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + raise NotAnAttrsClassError( + "{cls!r} is not an attrs-decorated class.".format(cls=cls) + ) + return ordered_dict(((a.name, a) for a in attrs)) + + +def validate(inst): + """ + Validate all attributes on *inst* that have a validator. + + Leaves all exceptions through. + + :param inst: Instance of a class with ``attrs`` attributes. + """ + if _config._run_validators is False: + return + + for a in fields(inst.__class__): + v = a.validator + if v is not None: + v(inst, a, getattr(inst, a.name)) + + +def _is_slot_cls(cls): + return "__slots__" in cls.__dict__ + + +def _is_slot_attr(a_name, base_attr_map): + """ + Check if the attribute name comes from a slot class. + """ + return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name]) + + +def _attrs_to_init_script( + attrs, frozen, slots, post_init, cache_hash, base_attr_map +): + """ + Return a script of an initializer for *attrs* and a dict of globals. + + The globals are expected by the generated script. + + If *frozen* is True, we cannot set the attributes directly so we use + a cached ``object.__setattr__``. + """ + lines = [] + any_slot_ancestors = any( + _is_slot_attr(a.name, base_attr_map) for a in attrs + ) + if frozen is True: + if slots is True: + lines.append( + # Circumvent the __setattr__ descriptor to save one lookup per + # assignment. + # Note _setattr will be used again below if cache_hash is True + "_setattr = _cached_setattr.__get__(self, self.__class__)" + ) + + def fmt_setter(attr_name, value_var): + return "_setattr('%(attr_name)s', %(value_var)s)" % { + "attr_name": attr_name, + "value_var": value_var, + } + + def fmt_setter_with_converter(attr_name, value_var): + conv_name = _init_converter_pat.format(attr_name) + return "_setattr('%(attr_name)s', %(conv)s(%(value_var)s))" % { + "attr_name": attr_name, + "value_var": value_var, + "conv": conv_name, + } + + else: + # Dict frozen classes assign directly to __dict__. + # But only if the attribute doesn't come from an ancestor slot + # class. + # Note _inst_dict will be used again below if cache_hash is True + lines.append("_inst_dict = self.__dict__") + if any_slot_ancestors: + lines.append( + # Circumvent the __setattr__ descriptor to save one lookup + # per assignment. + "_setattr = _cached_setattr.__get__(self, self.__class__)" + ) + + def fmt_setter(attr_name, value_var): + if _is_slot_attr(attr_name, base_attr_map): + res = "_setattr('%(attr_name)s', %(value_var)s)" % { + "attr_name": attr_name, + "value_var": value_var, + } + else: + res = "_inst_dict['%(attr_name)s'] = %(value_var)s" % { + "attr_name": attr_name, + "value_var": value_var, + } + return res + + def fmt_setter_with_converter(attr_name, value_var): + conv_name = _init_converter_pat.format(attr_name) + if _is_slot_attr(attr_name, base_attr_map): + tmpl = "_setattr('%(attr_name)s', %(c)s(%(value_var)s))" + else: + tmpl = "_inst_dict['%(attr_name)s'] = %(c)s(%(value_var)s)" + return tmpl % { + "attr_name": attr_name, + "value_var": value_var, + "c": conv_name, + } + + else: + # Not frozen. + def fmt_setter(attr_name, value): + return "self.%(attr_name)s = %(value)s" % { + "attr_name": attr_name, + "value": value, + } + + def fmt_setter_with_converter(attr_name, value_var): + conv_name = _init_converter_pat.format(attr_name) + return "self.%(attr_name)s = %(conv)s(%(value_var)s)" % { + "attr_name": attr_name, + "value_var": value_var, + "conv": conv_name, + } + + args = [] + kw_only_args = [] + attrs_to_validate = [] + + # This is a dictionary of names to validator and converter callables. + # Injecting this into __init__ globals lets us avoid lookups. + names_for_globals = {} + annotations = {"return": None} + + for a in attrs: + if a.validator: + attrs_to_validate.append(a) + attr_name = a.name + arg_name = a.name.lstrip("_") + has_factory = isinstance(a.default, Factory) + if has_factory and a.default.takes_self: + maybe_self = "self" + else: + maybe_self = "" + if a.init is False: + if has_factory: + init_factory_name = _init_factory_pat.format(a.name) + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + init_factory_name + "({0})".format(maybe_self), + ) + ) + conv_name = _init_converter_pat.format(a.name) + names_for_globals[conv_name] = a.converter + else: + lines.append( + fmt_setter( + attr_name, + init_factory_name + "({0})".format(maybe_self), + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + "attr_dict['{attr_name}'].default".format( + attr_name=attr_name + ), + ) + ) + conv_name = _init_converter_pat.format(a.name) + names_for_globals[conv_name] = a.converter + else: + lines.append( + fmt_setter( + attr_name, + "attr_dict['{attr_name}'].default".format( + attr_name=attr_name + ), + ) + ) + elif a.default is not NOTHING and not has_factory: + arg = "{arg_name}=attr_dict['{attr_name}'].default".format( + arg_name=arg_name, attr_name=attr_name + ) + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + if a.converter is not None: + lines.append(fmt_setter_with_converter(attr_name, arg_name)) + names_for_globals[ + _init_converter_pat.format(a.name) + ] = a.converter + else: + lines.append(fmt_setter(attr_name, arg_name)) + elif has_factory: + arg = "{arg_name}=NOTHING".format(arg_name=arg_name) + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + lines.append( + "if {arg_name} is not NOTHING:".format(arg_name=arg_name) + ) + init_factory_name = _init_factory_pat.format(a.name) + if a.converter is not None: + lines.append( + " " + fmt_setter_with_converter(attr_name, arg_name) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter_with_converter( + attr_name, + init_factory_name + "({0})".format(maybe_self), + ) + ) + names_for_globals[ + _init_converter_pat.format(a.name) + ] = a.converter + else: + lines.append(" " + fmt_setter(attr_name, arg_name)) + lines.append("else:") + lines.append( + " " + + fmt_setter( + attr_name, + init_factory_name + "({0})".format(maybe_self), + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.kw_only: + kw_only_args.append(arg_name) + else: + args.append(arg_name) + if a.converter is not None: + lines.append(fmt_setter_with_converter(attr_name, arg_name)) + names_for_globals[ + _init_converter_pat.format(a.name) + ] = a.converter + else: + lines.append(fmt_setter(attr_name, arg_name)) + + if a.init is True and a.converter is None and a.type is not None: + annotations[arg_name] = a.type + + if attrs_to_validate: # we can skip this if there are no validators. + names_for_globals["_config"] = _config + lines.append("if _config._run_validators is True:") + for a in attrs_to_validate: + val_name = "__attr_validator_{}".format(a.name) + attr_name = "__attr_{}".format(a.name) + lines.append( + " {}(self, {}, self.{})".format(val_name, attr_name, a.name) + ) + names_for_globals[val_name] = a.validator + names_for_globals[attr_name] = a + if post_init: + lines.append("self.__attrs_post_init__()") + + # because this is set only after __attrs_post_init is called, a crash + # will result if post-init tries to access the hash code. This seemed + # preferable to setting this beforehand, in which case alteration to + # field values during post-init combined with post-init accessing the + # hash code would result in silent bugs. + if cache_hash: + if frozen: + if slots: + # if frozen and slots, then _setattr defined above + init_hash_cache = "_setattr('%s', %s)" + else: + # if frozen and not slots, then _inst_dict defined above + init_hash_cache = "_inst_dict['%s'] = %s" + else: + init_hash_cache = "self.%s = %s" + lines.append(init_hash_cache % (_hash_cache_field, "None")) + + args = ", ".join(args) + if kw_only_args: + if PY2: + raise PythonTooOldError( + "Keyword-only arguments only work on Python 3 and later." + ) + + args += "{leading_comma}*, {kw_only_args}".format( + leading_comma=", " if args else "", + kw_only_args=", ".join(kw_only_args), + ) + return ( + """\ +def __init__(self, {args}): + {lines} +""".format( + args=args, lines="\n ".join(lines) if lines else "pass" + ), + names_for_globals, + annotations, + ) + + +class Attribute(object): + """ + *Read-only* representation of an attribute. + + :attribute name: The name of the attribute. + + Plus *all* arguments of :func:`attr.ib`. + + For the version history of the fields, see :func:`attr.ib`. + """ + + __slots__ = ( + "name", + "default", + "validator", + "repr", + "cmp", + "hash", + "init", + "metadata", + "type", + "converter", + "kw_only", + ) + + def __init__( + self, + name, + default, + validator, + repr, + cmp, + hash, + init, + convert=None, + metadata=None, + type=None, + converter=None, + kw_only=False, + ): + # Cache this descriptor here to speed things up later. + bound_setattr = _obj_setattr.__get__(self, Attribute) + + # Despite the big red warning, people *do* instantiate `Attribute` + # themselves. + if convert is not None: + if converter is not None: + raise RuntimeError( + "Can't pass both `convert` and `converter`. " + "Please use `converter` only." + ) + warnings.warn( + "The `convert` argument is deprecated in favor of `converter`." + " It will be removed after 2019/01.", + DeprecationWarning, + stacklevel=2, + ) + converter = convert + + bound_setattr("name", name) + bound_setattr("default", default) + bound_setattr("validator", validator) + bound_setattr("repr", repr) + bound_setattr("cmp", cmp) + bound_setattr("hash", hash) + bound_setattr("init", init) + bound_setattr("converter", converter) + bound_setattr( + "metadata", + ( + metadata_proxy(metadata) + if metadata + else _empty_metadata_singleton + ), + ) + bound_setattr("type", type) + bound_setattr("kw_only", kw_only) + + def __setattr__(self, name, value): + raise FrozenInstanceError() + + @property + def convert(self): + warnings.warn( + "The `convert` attribute is deprecated in favor of `converter`. " + "It will be removed after 2019/01.", + DeprecationWarning, + stacklevel=2, + ) + return self.converter + + @classmethod + def from_counting_attr(cls, name, ca, type=None): + # type holds the annotated value. deal with conflicts: + if type is None: + type = ca.type + elif ca.type is not None: + raise ValueError( + "Type annotation and type argument cannot both be present" + ) + inst_dict = { + k: getattr(ca, k) + for k in Attribute.__slots__ + if k + not in ( + "name", + "validator", + "default", + "type", + "convert", + ) # exclude methods and deprecated alias + } + return cls( + name=name, + validator=ca._validator, + default=ca._default, + type=type, + **inst_dict + ) + + # Don't use attr.assoc since fields(Attribute) doesn't work + def _assoc(self, **changes): + """ + Copy *self* and apply *changes*. + """ + new = copy.copy(self) + + new._setattrs(changes.items()) + + return new + + # Don't use _add_pickle since fields(Attribute) doesn't work + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple( + getattr(self, name) if name != "metadata" else dict(self.metadata) + for name in self.__slots__ + ) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + self._setattrs(zip(self.__slots__, state)) + + def _setattrs(self, name_values_pairs): + bound_setattr = _obj_setattr.__get__(self, Attribute) + for name, value in name_values_pairs: + if name != "metadata": + bound_setattr(name, value) + else: + bound_setattr( + name, + metadata_proxy(value) + if value + else _empty_metadata_singleton, + ) + + +_a = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=True, + hash=(name != "metadata"), + init=True, + ) + for name in Attribute.__slots__ + if name != "convert" # XXX: remove once `convert` is gone +] + +Attribute = _add_hash( + _add_cmp(_add_repr(Attribute, attrs=_a), attrs=_a), + attrs=[a for a in _a if a.hash], +) + + +class _CountingAttr(object): + """ + Intermediate representation of attributes that uses a counter to preserve + the order in which the attributes have been defined. + + *Internal* data structure of the attrs library. Running into is most + likely the result of a bug like a forgotten `@attr.s` decorator. + """ + + __slots__ = ( + "counter", + "_default", + "repr", + "cmp", + "hash", + "init", + "metadata", + "_validator", + "converter", + "type", + "kw_only", + ) + __attrs_attrs__ = tuple( + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=True, + hash=True, + init=True, + kw_only=False, + ) + for name in ("counter", "_default", "repr", "cmp", "hash", "init") + ) + ( + Attribute( + name="metadata", + default=None, + validator=None, + repr=True, + cmp=True, + hash=False, + init=True, + kw_only=False, + ), + ) + cls_counter = 0 + + def __init__( + self, + default, + validator, + repr, + cmp, + hash, + init, + converter, + metadata, + type, + kw_only, + ): + _CountingAttr.cls_counter += 1 + self.counter = _CountingAttr.cls_counter + self._default = default + # If validator is a list/tuple, wrap it using helper validator. + if validator and isinstance(validator, (list, tuple)): + self._validator = and_(*validator) + else: + self._validator = validator + self.repr = repr + self.cmp = cmp + self.hash = hash + self.init = init + self.converter = converter + self.metadata = metadata + self.type = type + self.kw_only = kw_only + + def validator(self, meth): + """ + Decorator that adds *meth* to the list of validators. + + Returns *meth* unchanged. + + .. versionadded:: 17.1.0 + """ + if self._validator is None: + self._validator = meth + else: + self._validator = and_(self._validator, meth) + return meth + + def default(self, meth): + """ + Decorator that allows to set the default for an attribute. + + Returns *meth* unchanged. + + :raises DefaultAlreadySetError: If default has been set before. + + .. versionadded:: 17.1.0 + """ + if self._default is not NOTHING: + raise DefaultAlreadySetError() + + self._default = Factory(meth, takes_self=True) + + return meth + + +_CountingAttr = _add_cmp(_add_repr(_CountingAttr)) + + +@attrs(slots=True, init=False, hash=True) +class Factory(object): + """ + Stores a factory callable. + + If passed as the default value to :func:`attr.ib`, the factory is used to + generate a new value. + + :param callable factory: A callable that takes either none or exactly one + mandatory positional argument depending on *takes_self*. + :param bool takes_self: Pass the partially initialized instance that is + being initialized as a positional argument. + + .. versionadded:: 17.1.0 *takes_self* + """ + + factory = attrib() + takes_self = attrib() + + def __init__(self, factory, takes_self=False): + """ + `Factory` is part of the default machinery so if we want a default + value here, we have to implement it ourselves. + """ + self.factory = factory + self.takes_self = takes_self + + +def make_class(name, attrs, bases=(object,), **attributes_arguments): + """ + A quick way to create a new class called *name* with *attrs*. + + :param name: The name for the new class. + :type name: str + + :param attrs: A list of names or a dictionary of mappings of names to + attributes. + + If *attrs* is a list or an ordered dict (:class:`dict` on Python 3.6+, + :class:`collections.OrderedDict` otherwise), the order is deduced from + the order of the names or attributes inside *attrs*. Otherwise the + order of the definition of the attributes is used. + :type attrs: :class:`list` or :class:`dict` + + :param tuple bases: Classes that the new class will subclass. + + :param attributes_arguments: Passed unmodified to :func:`attr.s`. + + :return: A new class with *attrs*. + :rtype: type + + .. versionadded:: 17.1.0 *bases* + .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. + """ + if isinstance(attrs, dict): + cls_dict = attrs + elif isinstance(attrs, (list, tuple)): + cls_dict = dict((a, attrib()) for a in attrs) + else: + raise TypeError("attrs argument must be a dict or a list.") + + post_init = cls_dict.pop("__attrs_post_init__", None) + type_ = type( + name, + bases, + {} if post_init is None else {"__attrs_post_init__": post_init}, + ) + # For pickling to work, the __module__ variable needs to be set to the + # frame where the class is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + try: + type_.__module__ = sys._getframe(1).f_globals.get( + "__name__", "__main__" + ) + except (AttributeError, ValueError): + pass + + return _attrs(these=cls_dict, **attributes_arguments)(type_) + + +# These are required by within this module so we define them here and merely +# import into .validators. + + +@attrs(slots=True, hash=True) +class _AndValidator(object): + """ + Compose many validators to a single one. + """ + + _validators = attrib() + + def __call__(self, inst, attr, value): + for v in self._validators: + v(inst, attr, value) + + +def and_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators. + + :param validators: Arbitrary number of validators. + :type validators: callables + + .. versionadded:: 17.1.0 + """ + vals = [] + for validator in validators: + vals.extend( + validator._validators + if isinstance(validator, _AndValidator) + else [validator] + ) + + return _AndValidator(tuple(vals)) diff --git a/python/attr/converters.py b/python/attr/converters.py new file mode 100644 index 0000000..37c4a07 --- /dev/null +++ b/python/attr/converters.py @@ -0,0 +1,78 @@ +""" +Commonly useful converters. +""" + +from __future__ import absolute_import, division, print_function + +from ._make import NOTHING, Factory + + +def optional(converter): + """ + A converter that allows an attribute to be optional. An optional attribute + is one which can be set to ``None``. + + :param callable converter: the converter that is used for non-``None`` + values. + + .. versionadded:: 17.1.0 + """ + + def optional_converter(val): + if val is None: + return None + return converter(val) + + return optional_converter + + +def default_if_none(default=NOTHING, factory=None): + """ + A converter that allows to replace ``None`` values by *default* or the + result of *factory*. + + :param default: Value to be used if ``None`` is passed. Passing an instance + of :class:`attr.Factory` is supported, however the ``takes_self`` option + is *not*. + :param callable factory: A callable that takes not parameters whose result + is used if ``None`` is passed. + + :raises TypeError: If **neither** *default* or *factory* is passed. + :raises TypeError: If **both** *default* and *factory* are passed. + :raises ValueError: If an instance of :class:`attr.Factory` is passed with + ``takes_self=True``. + + .. versionadded:: 18.2.0 + """ + if default is NOTHING and factory is None: + raise TypeError("Must pass either `default` or `factory`.") + + if default is not NOTHING and factory is not None: + raise TypeError( + "Must pass either `default` or `factory` but not both." + ) + + if factory is not None: + default = Factory(factory) + + if isinstance(default, Factory): + if default.takes_self: + raise ValueError( + "`takes_self` is not supported by default_if_none." + ) + + def default_if_none_converter(val): + if val is not None: + return val + + return default.factory() + + else: + + def default_if_none_converter(val): + if val is not None: + return val + + return default + + return default_if_none_converter diff --git a/python/attr/converters.pyi b/python/attr/converters.pyi new file mode 100644 index 0000000..63b2a38 --- /dev/null +++ b/python/attr/converters.pyi @@ -0,0 +1,12 @@ +from typing import TypeVar, Optional, Callable, overload +from . import _ConverterType + +_T = TypeVar("_T") + +def optional( + converter: _ConverterType[_T] +) -> _ConverterType[Optional[_T]]: ... +@overload +def default_if_none(default: _T) -> _ConverterType[_T]: ... +@overload +def default_if_none(*, factory: Callable[[], _T]) -> _ConverterType[_T]: ... diff --git a/python/attr/exceptions.py b/python/attr/exceptions.py new file mode 100644 index 0000000..b12e41e --- /dev/null +++ b/python/attr/exceptions.py @@ -0,0 +1,57 @@ +from __future__ import absolute_import, division, print_function + + +class FrozenInstanceError(AttributeError): + """ + A frozen/immutable instance has been attempted to be modified. + + It mirrors the behavior of ``namedtuples`` by using the same error message + and subclassing :exc:`AttributeError`. + + .. versionadded:: 16.1.0 + """ + + msg = "can't set attribute" + args = [msg] + + +class AttrsAttributeNotFoundError(ValueError): + """ + An ``attrs`` function couldn't find an attribute that the user asked for. + + .. versionadded:: 16.2.0 + """ + + +class NotAnAttrsClassError(ValueError): + """ + A non-``attrs`` class has been passed into an ``attrs`` function. + + .. versionadded:: 16.2.0 + """ + + +class DefaultAlreadySetError(RuntimeError): + """ + A default has been set using ``attr.ib()`` and is attempted to be reset + using the decorator. + + .. versionadded:: 17.1.0 + """ + + +class UnannotatedAttributeError(RuntimeError): + """ + A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type + annotation. + + .. versionadded:: 17.3.0 + """ + + +class PythonTooOldError(RuntimeError): + """ + An ``attrs`` feature requiring a more recent python version has been used. + + .. versionadded:: 18.2.0 + """ diff --git a/python/attr/exceptions.pyi b/python/attr/exceptions.pyi new file mode 100644 index 0000000..48fffcc --- /dev/null +++ b/python/attr/exceptions.pyi @@ -0,0 +1,7 @@ +class FrozenInstanceError(AttributeError): + msg: str = ... + +class AttrsAttributeNotFoundError(ValueError): ... +class NotAnAttrsClassError(ValueError): ... +class DefaultAlreadySetError(RuntimeError): ... +class UnannotatedAttributeError(RuntimeError): ... diff --git a/python/attr/filters.py b/python/attr/filters.py new file mode 100644 index 0000000..f1c69b8 --- /dev/null +++ b/python/attr/filters.py @@ -0,0 +1,52 @@ +""" +Commonly useful filters for :func:`attr.asdict`. +""" + +from __future__ import absolute_import, division, print_function + +from ._compat import isclass +from ._make import Attribute + + +def _split_what(what): + """ + Returns a tuple of `frozenset`s of classes and attributes. + """ + return ( + frozenset(cls for cls in what if isclass(cls)), + frozenset(cls for cls in what if isinstance(cls, Attribute)), + ) + + +def include(*what): + """ + Whitelist *what*. + + :param what: What to whitelist. + :type what: :class:`list` of :class:`type` or :class:`attr.Attribute`\\ s + + :rtype: :class:`callable` + """ + cls, attrs = _split_what(what) + + def include_(attribute, value): + return value.__class__ in cls or attribute in attrs + + return include_ + + +def exclude(*what): + """ + Blacklist *what*. + + :param what: What to blacklist. + :type what: :class:`list` of classes or :class:`attr.Attribute`\\ s. + + :rtype: :class:`callable` + """ + cls, attrs = _split_what(what) + + def exclude_(attribute, value): + return value.__class__ not in cls and attribute not in attrs + + return exclude_ diff --git a/python/attr/filters.pyi b/python/attr/filters.pyi new file mode 100644 index 0000000..a618140 --- /dev/null +++ b/python/attr/filters.pyi @@ -0,0 +1,5 @@ +from typing import Union +from . import Attribute, _FilterType + +def include(*what: Union[type, Attribute]) -> _FilterType: ... +def exclude(*what: Union[type, Attribute]) -> _FilterType: ... diff --git a/python/attr/py.typed b/python/attr/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/python/attr/validators.py b/python/attr/validators.py new file mode 100644 index 0000000..f12d0aa --- /dev/null +++ b/python/attr/validators.py @@ -0,0 +1,170 @@ +""" +Commonly useful validators. +""" + +from __future__ import absolute_import, division, print_function + +from ._make import _AndValidator, and_, attrib, attrs + + +__all__ = ["and_", "in_", "instance_of", "optional", "provides"] + + +@attrs(repr=False, slots=True, hash=True) +class _InstanceOfValidator(object): + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not isinstance(value, self.type): + raise TypeError( + "'{name}' must be {type!r} (got {value!r} that is a " + "{actual!r}).".format( + name=attr.name, + type=self.type, + actual=value.__class__, + value=value, + ), + attr, + self.type, + value, + ) + + def __repr__(self): + return "".format( + type=self.type + ) + + +def instance_of(type): + """ + A validator that raises a :exc:`TypeError` if the initializer is called + with a wrong type for this particular attribute (checks are performed using + :func:`isinstance` therefore it's also valid to pass a tuple of types). + + :param type: The type to check for. + :type type: type or tuple of types + + :raises TypeError: With a human readable error message, the attribute + (of type :class:`attr.Attribute`), the expected type, and the value it + got. + """ + return _InstanceOfValidator(type) + + +@attrs(repr=False, slots=True, hash=True) +class _ProvidesValidator(object): + interface = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.interface.providedBy(value): + raise TypeError( + "'{name}' must provide {interface!r} which {value!r} " + "doesn't.".format( + name=attr.name, interface=self.interface, value=value + ), + attr, + self.interface, + value, + ) + + def __repr__(self): + return "".format( + interface=self.interface + ) + + +def provides(interface): + """ + A validator that raises a :exc:`TypeError` if the initializer is called + with an object that does not provide the requested *interface* (checks are + performed using ``interface.providedBy(value)`` (see `zope.interface + `_). + + :param zope.interface.Interface interface: The interface to check for. + + :raises TypeError: With a human readable error message, the attribute + (of type :class:`attr.Attribute`), the expected interface, and the + value it got. + """ + return _ProvidesValidator(interface) + + +@attrs(repr=False, slots=True, hash=True) +class _OptionalValidator(object): + validator = attrib() + + def __call__(self, inst, attr, value): + if value is None: + return + + self.validator(inst, attr, value) + + def __repr__(self): + return "".format( + what=repr(self.validator) + ) + + +def optional(validator): + """ + A validator that makes an attribute optional. An optional attribute is one + which can be set to ``None`` in addition to satisfying the requirements of + the sub-validator. + + :param validator: A validator (or a list of validators) that is used for + non-``None`` values. + :type validator: callable or :class:`list` of callables. + + .. versionadded:: 15.1.0 + .. versionchanged:: 17.1.0 *validator* can be a list of validators. + """ + if isinstance(validator, list): + return _OptionalValidator(_AndValidator(validator)) + return _OptionalValidator(validator) + + +@attrs(repr=False, slots=True, hash=True) +class _InValidator(object): + options = attrib() + + def __call__(self, inst, attr, value): + try: + in_options = value in self.options + except TypeError as e: # e.g. `1 in "abc"` + in_options = False + + if not in_options: + raise ValueError( + "'{name}' must be in {options!r} (got {value!r})".format( + name=attr.name, options=self.options, value=value + ) + ) + + def __repr__(self): + return "".format( + options=self.options + ) + + +def in_(options): + """ + A validator that raises a :exc:`ValueError` if the initializer is called + with a value that does not belong in the options provided. The check is + performed using ``value in options``. + + :param options: Allowed options. + :type options: list, tuple, :class:`enum.Enum`, ... + + :raises ValueError: With a human readable error message, the attribute (of + type :class:`attr.Attribute`), the expected options, and the value it + got. + + .. versionadded:: 17.1.0 + """ + return _InValidator(options) diff --git a/python/attr/validators.pyi b/python/attr/validators.pyi new file mode 100644 index 0000000..abbaedf --- /dev/null +++ b/python/attr/validators.pyi @@ -0,0 +1,14 @@ +from typing import Container, List, Union, TypeVar, Type, Any, Optional, Tuple +from . import _ValidatorType + +_T = TypeVar("_T") + +def instance_of( + type: Union[Tuple[Type[_T], ...], Type[_T]] +) -> _ValidatorType[_T]: ... +def provides(interface: Any) -> _ValidatorType[Any]: ... +def optional( + validator: Union[_ValidatorType[_T], List[_ValidatorType[_T]]] +) -> _ValidatorType[Optional[_T]]: ... +def in_(options: Container[_T]) -> _ValidatorType[_T]: ... +def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ... diff --git a/python/dateutil/__init__.py b/python/dateutil/__init__.py new file mode 100644 index 0000000..796ef3d --- /dev/null +++ b/python/dateutil/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +from ._version import VERSION as __version__ diff --git a/python/dateutil/_common.py b/python/dateutil/_common.py new file mode 100644 index 0000000..e8b4af7 --- /dev/null +++ b/python/dateutil/_common.py @@ -0,0 +1,34 @@ +""" +Common code used in multiple modules. +""" + + +class weekday(object): + __slots__ = ["weekday", "n"] + + def __init__(self, weekday, n=None): + self.weekday = weekday + self.n = n + + def __call__(self, n): + if n == self.n: + return self + else: + return self.__class__(self.weekday, n) + + def __eq__(self, other): + try: + if self.weekday != other.weekday or self.n != other.n: + return False + except AttributeError: + return False + return True + + __hash__ = None + + def __repr__(self): + s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] + if not self.n: + return s + else: + return "%s(%+d)" % (s, self.n) diff --git a/python/dateutil/_version.py b/python/dateutil/_version.py new file mode 100644 index 0000000..c1a0357 --- /dev/null +++ b/python/dateutil/_version.py @@ -0,0 +1,10 @@ +""" +Contains information about the dateutil version. +""" + +VERSION_MAJOR = 2 +VERSION_MINOR = 6 +VERSION_PATCH = 1 + +VERSION_TUPLE = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH) +VERSION = '.'.join(map(str, VERSION_TUPLE)) diff --git a/python/dateutil/easter.py b/python/dateutil/easter.py new file mode 100644 index 0000000..e4def97 --- /dev/null +++ b/python/dateutil/easter.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +""" +This module offers a generic easter computing method for any given year, using +Western, Orthodox or Julian algorithms. +""" + +import datetime + +__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] + +EASTER_JULIAN = 1 +EASTER_ORTHODOX = 2 +EASTER_WESTERN = 3 + + +def easter(year, method=EASTER_WESTERN): + """ + This method was ported from the work done by GM Arts, + on top of the algorithm by Claus Tondering, which was + based in part on the algorithm of Ouding (1940), as + quoted in "Explanatory Supplement to the Astronomical + Almanac", P. Kenneth Seidelmann, editor. + + This algorithm implements three different easter + calculation methods: + + 1 - Original calculation in Julian calendar, valid in + dates after 326 AD + 2 - Original method, with date converted to Gregorian + calendar, valid in years 1583 to 4099 + 3 - Revised method, in Gregorian calendar, valid in + years 1583 to 4099 as well + + These methods are represented by the constants: + + * ``EASTER_JULIAN = 1`` + * ``EASTER_ORTHODOX = 2`` + * ``EASTER_WESTERN = 3`` + + The default method is method 3. + + More about the algorithm may be found at: + + http://users.chariot.net.au/~gmarts/eastalg.htm + + and + + http://www.tondering.dk/claus/calendar.html + + """ + + if not (1 <= method <= 3): + raise ValueError("invalid method") + + # g - Golden year - 1 + # c - Century + # h - (23 - Epact) mod 30 + # i - Number of days from March 21 to Paschal Full Moon + # j - Weekday for PFM (0=Sunday, etc) + # p - Number of days from March 21 to Sunday on or before PFM + # (-6 to 28 methods 1 & 3, to 56 for method 2) + # e - Extra days to add for method 2 (converting Julian + # date to Gregorian date) + + y = year + g = y % 19 + e = 0 + if method < 3: + # Old method + i = (19*g + 15) % 30 + j = (y + y//4 + i) % 7 + if method == 2: + # Extra dates to convert Julian to Gregorian date + e = 10 + if y > 1600: + e = e + y//100 - 16 - (y//100 - 16)//4 + else: + # New method + c = y//100 + h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30 + i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11)) + j = (y + y//4 + i + 2 - c + c//4) % 7 + + # p can be from -6 to 56 corresponding to dates 22 March to 23 May + # (later dates apply to method 2, although 23 May never actually occurs) + p = i - j + e + d = 1 + (p + 27 + (p + 6)//40) % 31 + m = 3 + (p + 26)//30 + return datetime.date(int(y), int(m), int(d)) diff --git a/python/dateutil/parser.py b/python/dateutil/parser.py new file mode 100644 index 0000000..595331f --- /dev/null +++ b/python/dateutil/parser.py @@ -0,0 +1,1374 @@ +# -*- coding: utf-8 -*- +""" +This module offers a generic date/time string parser which is able to parse +most known formats to represent a date and/or time. + +This module attempts to be forgiving with regards to unlikely input formats, +returning a datetime object even for dates which are ambiguous. If an element +of a date/time stamp is omitted, the following rules are applied: +- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour + on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is + specified. +- If a time zone is omitted, a timezone-naive datetime is returned. + +If any other elements are missing, they are taken from the +:class:`datetime.datetime` object passed to the parameter ``default``. If this +results in a day number exceeding the valid number of days per month, the +value falls back to the end of the month. + +Additional resources about date/time string formats can be found below: + +- `A summary of the international standard date and time notation + `_ +- `W3C Date and Time Formats `_ +- `Time Formats (Planetary Rings Node) `_ +- `CPAN ParseDate module + `_ +- `Java SimpleDateFormat Class + `_ +""" +from __future__ import unicode_literals + +import datetime +import string +import time +import collections +import re +from io import StringIO +from calendar import monthrange + +from six import text_type, binary_type, integer_types + +from . import relativedelta +from . import tz + +__all__ = ["parse", "parserinfo"] + + +class _timelex(object): + # Fractional seconds are sometimes split by a comma + _split_decimal = re.compile("([.,])") + + def __init__(self, instream): + if isinstance(instream, binary_type): + instream = instream.decode() + + if isinstance(instream, text_type): + instream = StringIO(instream) + + if getattr(instream, 'read', None) is None: + raise TypeError('Parser must be a string or character stream, not ' + '{itype}'.format(itype=instream.__class__.__name__)) + + self.instream = instream + self.charstack = [] + self.tokenstack = [] + self.eof = False + + def get_token(self): + """ + This function breaks the time string into lexical units (tokens), which + can be parsed by the parser. Lexical units are demarcated by changes in + the character set, so any continuous string of letters is considered + one unit, any continuous string of numbers is considered one unit. + + The main complication arises from the fact that dots ('.') can be used + both as separators (e.g. "Sep.20.2009") or decimal points (e.g. + "4:30:21.447"). As such, it is necessary to read the full context of + any dot-separated strings before breaking it into tokens; as such, this + function maintains a "token stack", for when the ambiguous context + demands that multiple tokens be parsed at once. + """ + if self.tokenstack: + return self.tokenstack.pop(0) + + seenletters = False + token = None + state = None + + while not self.eof: + # We only realize that we've reached the end of a token when we + # find a character that's not part of the current token - since + # that character may be part of the next token, it's stored in the + # charstack. + if self.charstack: + nextchar = self.charstack.pop(0) + else: + nextchar = self.instream.read(1) + while nextchar == '\x00': + nextchar = self.instream.read(1) + + if not nextchar: + self.eof = True + break + elif not state: + # First character of the token - determines if we're starting + # to parse a word, a number or something else. + token = nextchar + if self.isword(nextchar): + state = 'a' + elif self.isnum(nextchar): + state = '0' + elif self.isspace(nextchar): + token = ' ' + break # emit token + else: + break # emit token + elif state == 'a': + # If we've already started reading a word, we keep reading + # letters until we find something that's not part of a word. + seenletters = True + if self.isword(nextchar): + token += nextchar + elif nextchar == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0': + # If we've already started reading a number, we keep reading + # numbers until we find something that doesn't fit. + if self.isnum(nextchar): + token += nextchar + elif nextchar == '.' or (nextchar == ',' and len(token) >= 2): + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == 'a.': + # If we've seen some letters and a dot separator, continue + # parsing, and the tokens will be broken up later. + seenletters = True + if nextchar == '.' or self.isword(nextchar): + token += nextchar + elif self.isnum(nextchar) and token[-1] == '.': + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0.': + # If we've seen at least one dot separator, keep going, we'll + # break up the tokens later. + if nextchar == '.' or self.isnum(nextchar): + token += nextchar + elif self.isword(nextchar) and token[-1] == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + + if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or + token[-1] in '.,')): + l = self._split_decimal.split(token) + token = l[0] + for tok in l[1:]: + if tok: + self.tokenstack.append(tok) + + if state == '0.' and token.count('.') == 0: + token = token.replace(',', '.') + + return token + + def __iter__(self): + return self + + def __next__(self): + token = self.get_token() + if token is None: + raise StopIteration + + return token + + def next(self): + return self.__next__() # Python 2.x support + + @classmethod + def split(cls, s): + return list(cls(s)) + + @classmethod + def isword(cls, nextchar): + """ Whether or not the next character is part of a word """ + return nextchar.isalpha() + + @classmethod + def isnum(cls, nextchar): + """ Whether the next character is part of a number """ + return nextchar.isdigit() + + @classmethod + def isspace(cls, nextchar): + """ Whether the next character is whitespace """ + return nextchar.isspace() + + +class _resultbase(object): + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def _repr(self, classname): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, repr(value))) + return "%s(%s)" % (classname, ", ".join(l)) + + def __len__(self): + return (sum(getattr(self, attr) is not None + for attr in self.__slots__)) + + def __repr__(self): + return self._repr(self.__class__.__name__) + + +class parserinfo(object): + """ + Class which handles what inputs are accepted. Subclass this to customize + the language and acceptable values for each parameter. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM + and YMD. Default is ``False``. + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken + to be the year, otherwise the last number is taken to be the year. + Default is ``False``. + """ + + # m from a.m/p.m, t from ISO T separator + JUMP = [" ", ".", ",", ";", "-", "/", "'", + "at", "on", "and", "ad", "m", "t", "of", + "st", "nd", "rd", "th"] + + WEEKDAYS = [("Mon", "Monday"), + ("Tue", "Tuesday"), + ("Wed", "Wednesday"), + ("Thu", "Thursday"), + ("Fri", "Friday"), + ("Sat", "Saturday"), + ("Sun", "Sunday")] + MONTHS = [("Jan", "January"), + ("Feb", "February"), + ("Mar", "March"), + ("Apr", "April"), + ("May", "May"), + ("Jun", "June"), + ("Jul", "July"), + ("Aug", "August"), + ("Sep", "Sept", "September"), + ("Oct", "October"), + ("Nov", "November"), + ("Dec", "December")] + HMS = [("h", "hour", "hours"), + ("m", "minute", "minutes"), + ("s", "second", "seconds")] + AMPM = [("am", "a"), + ("pm", "p")] + UTCZONE = ["UTC", "GMT", "Z"] + PERTAIN = ["of"] + TZOFFSET = {} + + def __init__(self, dayfirst=False, yearfirst=False): + self._jump = self._convert(self.JUMP) + self._weekdays = self._convert(self.WEEKDAYS) + self._months = self._convert(self.MONTHS) + self._hms = self._convert(self.HMS) + self._ampm = self._convert(self.AMPM) + self._utczone = self._convert(self.UTCZONE) + self._pertain = self._convert(self.PERTAIN) + + self.dayfirst = dayfirst + self.yearfirst = yearfirst + + self._year = time.localtime().tm_year + self._century = self._year // 100 * 100 + + def _convert(self, lst): + dct = {} + for i, v in enumerate(lst): + if isinstance(v, tuple): + for v in v: + dct[v.lower()] = i + else: + dct[v.lower()] = i + return dct + + def jump(self, name): + return name.lower() in self._jump + + def weekday(self, name): + if len(name) >= min(len(n) for n in self._weekdays.keys()): + try: + return self._weekdays[name.lower()] + except KeyError: + pass + return None + + def month(self, name): + if len(name) >= min(len(n) for n in self._months.keys()): + try: + return self._months[name.lower()] + 1 + except KeyError: + pass + return None + + def hms(self, name): + try: + return self._hms[name.lower()] + except KeyError: + return None + + def ampm(self, name): + try: + return self._ampm[name.lower()] + except KeyError: + return None + + def pertain(self, name): + return name.lower() in self._pertain + + def utczone(self, name): + return name.lower() in self._utczone + + def tzoffset(self, name): + if name in self._utczone: + return 0 + + return self.TZOFFSET.get(name) + + def convertyear(self, year, century_specified=False): + if year < 100 and not century_specified: + year += self._century + if abs(year - self._year) >= 50: + if year < self._year: + year += 100 + else: + year -= 100 + return year + + def validate(self, res): + # move to info + if res.year is not None: + res.year = self.convertyear(res.year, res.century_specified) + + if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z': + res.tzname = "UTC" + res.tzoffset = 0 + elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname): + res.tzoffset = 0 + return True + + +class _ymd(list): + def __init__(self, tzstr, *args, **kwargs): + super(self.__class__, self).__init__(*args, **kwargs) + self.century_specified = False + self.tzstr = tzstr + + @staticmethod + def token_could_be_year(token, year): + try: + return int(token) == year + except ValueError: + return False + + @staticmethod + def find_potential_year_tokens(year, tokens): + return [token for token in tokens if _ymd.token_could_be_year(token, year)] + + def find_probable_year_index(self, tokens): + """ + attempt to deduce if a pre 100 year was lost + due to padded zeros being taken off + """ + for index, token in enumerate(self): + potential_year_tokens = _ymd.find_potential_year_tokens(token, tokens) + if len(potential_year_tokens) == 1 and len(potential_year_tokens[0]) > 2: + return index + + def append(self, val): + if hasattr(val, '__len__'): + if val.isdigit() and len(val) > 2: + self.century_specified = True + elif val > 100: + self.century_specified = True + + super(self.__class__, self).append(int(val)) + + def resolve_ymd(self, mstridx, yearfirst, dayfirst): + len_ymd = len(self) + year, month, day = (None, None, None) + + if len_ymd > 3: + raise ValueError("More than three YMD values") + elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2): + # One member, or two members with a month string + if mstridx != -1: + month = self[mstridx] + del self[mstridx] + + if len_ymd > 1 or mstridx == -1: + if self[0] > 31: + year = self[0] + else: + day = self[0] + + elif len_ymd == 2: + # Two members with numbers + if self[0] > 31: + # 99-01 + year, month = self + elif self[1] > 31: + # 01-99 + month, year = self + elif dayfirst and self[1] <= 12: + # 13-01 + day, month = self + else: + # 01-13 + month, day = self + + elif len_ymd == 3: + # Three members + if mstridx == 0: + month, day, year = self + elif mstridx == 1: + if self[0] > 31 or (yearfirst and self[2] <= 31): + # 99-Jan-01 + year, month, day = self + else: + # 01-Jan-01 + # Give precendence to day-first, since + # two-digit years is usually hand-written. + day, month, year = self + + elif mstridx == 2: + # WTF!? + if self[1] > 31: + # 01-99-Jan + day, year, month = self + else: + # 99-01-Jan + year, day, month = self + + else: + if self[0] > 31 or \ + self.find_probable_year_index(_timelex.split(self.tzstr)) == 0 or \ + (yearfirst and self[1] <= 12 and self[2] <= 31): + # 99-01-01 + if dayfirst and self[2] <= 12: + year, day, month = self + else: + year, month, day = self + elif self[0] > 12 or (dayfirst and self[1] <= 12): + # 13-01-01 + day, month, year = self + else: + # 01-13-01 + month, day, year = self + + return year, month, day + + +class parser(object): + def __init__(self, info=None): + self.info = info or parserinfo() + + def parse(self, timestr, default=None, ignoretz=False, tzinfos=None, **kwargs): + """ + Parse the date/time string into a :class:`datetime.datetime` object. + + :param timestr: + Any date/time string using the supported formats. + + :param default: + The default datetime object, if this is a datetime object and not + ``None``, elements specified in ``timestr`` replace elements in the + default object. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a + naive :class:`datetime.datetime` object is returned. + + :param tzinfos: + Additional time zone names / aliases which may be present in the + string. This argument maps time zone names (and optionally offsets + from those time zones) to time zones. This parameter can be a + dictionary with timezone aliases mapping time zone names to time + zones or a function taking two parameters (``tzname`` and + ``tzoffset``) and returning a time zone. + + The timezones to which the names are mapped can be an integer + offset from UTC in minutes or a :class:`tzinfo` object. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from dateutil.parser import parse + >>> from dateutil.tz import gettz + >>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")} + >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800)) + >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, + tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) + + This parameter is ignored if ``ignoretz`` is set. + + :param **kwargs: + Keyword arguments as passed to ``_parse()``. + + :return: + Returns a :class:`datetime.datetime` object or, if the + ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the + first element being a :class:`datetime.datetime` object, the second + a tuple containing the fuzzy tokens. + + :raises ValueError: + Raised for invalid or unknown string format, if the provided + :class:`tzinfo` is not in a valid format, or if an invalid date + would be created. + + :raises TypeError: + Raised for non-string or character stream input. + + :raises OverflowError: + Raised if the parsed date exceeds the largest valid C integer on + your system. + """ + + if default is None: + default = datetime.datetime.now().replace(hour=0, minute=0, + second=0, microsecond=0) + + res, skipped_tokens = self._parse(timestr, **kwargs) + + if res is None: + raise ValueError("Unknown string format") + + if len(res) == 0: + raise ValueError("String does not contain a date.") + + repl = {} + for attr in ("year", "month", "day", "hour", + "minute", "second", "microsecond"): + value = getattr(res, attr) + if value is not None: + repl[attr] = value + + if 'day' not in repl: + # If the default day exceeds the last day of the month, fall back to + # the end of the month. + cyear = default.year if res.year is None else res.year + cmonth = default.month if res.month is None else res.month + cday = default.day if res.day is None else res.day + + if cday > monthrange(cyear, cmonth)[1]: + repl['day'] = monthrange(cyear, cmonth)[1] + + ret = default.replace(**repl) + + if res.weekday is not None and not res.day: + ret = ret+relativedelta.relativedelta(weekday=res.weekday) + + if not ignoretz: + if (isinstance(tzinfos, collections.Callable) or + tzinfos and res.tzname in tzinfos): + + if isinstance(tzinfos, collections.Callable): + tzdata = tzinfos(res.tzname, res.tzoffset) + else: + tzdata = tzinfos.get(res.tzname) + + if isinstance(tzdata, datetime.tzinfo): + tzinfo = tzdata + elif isinstance(tzdata, text_type): + tzinfo = tz.tzstr(tzdata) + elif isinstance(tzdata, integer_types): + tzinfo = tz.tzoffset(res.tzname, tzdata) + else: + raise ValueError("Offset must be tzinfo subclass, " + "tz string, or int offset.") + ret = ret.replace(tzinfo=tzinfo) + elif res.tzname and res.tzname in time.tzname: + ret = ret.replace(tzinfo=tz.tzlocal()) + elif res.tzoffset == 0: + ret = ret.replace(tzinfo=tz.tzutc()) + elif res.tzoffset: + ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) + + if kwargs.get('fuzzy_with_tokens', False): + return ret, skipped_tokens + else: + return ret + + class _result(_resultbase): + __slots__ = ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond", + "tzname", "tzoffset", "ampm"] + + def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False, + fuzzy_with_tokens=False): + """ + Private method which performs the heavy lifting of parsing, called from + ``parse()``, which passes on its ``kwargs`` to this function. + + :param timestr: + The string to parse. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM + and YMD. If set to ``None``, this value is retrieved from the + current :class:`parserinfo` object (which itself defaults to + ``False``). + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken + to be the year, otherwise the last number is taken to be the year. + If this is set to ``None``, the value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param fuzzy: + Whether to allow fuzzy parsing, allowing for string like "Today is + January 1, 2047 at 8:21:00AM". + + :param fuzzy_with_tokens: + If ``True``, ``fuzzy`` is automatically set to True, and the parser + will return a tuple where the first element is the parsed + :class:`datetime.datetime` datetimestamp and the second element is + a tuple containing the portions of the string which were ignored: + + .. doctest:: + + >>> from dateutil.parser import parse + >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) + (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) + + """ + if fuzzy_with_tokens: + fuzzy = True + + info = self.info + + if dayfirst is None: + dayfirst = info.dayfirst + + if yearfirst is None: + yearfirst = info.yearfirst + + res = self._result() + l = _timelex.split(timestr) # Splits the timestr into tokens + + # keep up with the last token skipped so we can recombine + # consecutively skipped tokens (-2 for when i begins at 0). + last_skipped_token_i = -2 + skipped_tokens = list() + + try: + # year/month/day list + ymd = _ymd(timestr) + + # Index of the month string in ymd + mstridx = -1 + + len_l = len(l) + i = 0 + while i < len_l: + + # Check if it's a number + try: + value_repr = l[i] + value = float(value_repr) + except ValueError: + value = None + + if value is not None: + # Token is a number + len_li = len(l[i]) + i += 1 + + if (len(ymd) == 3 and len_li in (2, 4) + and res.hour is None and (i >= len_l or (l[i] != ':' and + info.hms(l[i]) is None))): + # 19990101T23[59] + s = l[i-1] + res.hour = int(s[:2]) + + if len_li == 4: + res.minute = int(s[2:]) + + elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6): + # YYMMDD or HHMMSS[.ss] + s = l[i-1] + + if not ymd and l[i-1].find('.') == -1: + #ymd.append(info.convertyear(int(s[:2]))) + + ymd.append(s[:2]) + ymd.append(s[2:4]) + ymd.append(s[4:]) + else: + # 19990101T235959[.59] + res.hour = int(s[:2]) + res.minute = int(s[2:4]) + res.second, res.microsecond = _parsems(s[4:]) + + elif len_li in (8, 12, 14): + # YYYYMMDD + s = l[i-1] + ymd.append(s[:4]) + ymd.append(s[4:6]) + ymd.append(s[6:8]) + + if len_li > 8: + res.hour = int(s[8:10]) + res.minute = int(s[10:12]) + + if len_li > 12: + res.second = int(s[12:]) + + elif ((i < len_l and info.hms(l[i]) is not None) or + (i+1 < len_l and l[i] == ' ' and + info.hms(l[i+1]) is not None)): + + # HH[ ]h or MM[ ]m or SS[.ss][ ]s + if l[i] == ' ': + i += 1 + + idx = info.hms(l[i]) + + while True: + if idx == 0: + res.hour = int(value) + + if value % 1: + res.minute = int(60*(value % 1)) + + elif idx == 1: + res.minute = int(value) + + if value % 1: + res.second = int(60*(value % 1)) + + elif idx == 2: + res.second, res.microsecond = \ + _parsems(value_repr) + + i += 1 + + if i >= len_l or idx == 2: + break + + # 12h00 + try: + value_repr = l[i] + value = float(value_repr) + except ValueError: + break + else: + i += 1 + idx += 1 + + if i < len_l: + newidx = info.hms(l[i]) + + if newidx is not None: + idx = newidx + + elif (i == len_l and l[i-2] == ' ' and + info.hms(l[i-3]) is not None): + # X h MM or X m SS + idx = info.hms(l[i-3]) + + if idx == 0: # h + res.minute = int(value) + + sec_remainder = value % 1 + if sec_remainder: + res.second = int(60 * sec_remainder) + elif idx == 1: # m + res.second, res.microsecond = \ + _parsems(value_repr) + + # We don't need to advance the tokens here because the + # i == len_l call indicates that we're looking at all + # the tokens already. + + elif i+1 < len_l and l[i] == ':': + # HH:MM[:SS[.ss]] + res.hour = int(value) + i += 1 + value = float(l[i]) + res.minute = int(value) + + if value % 1: + res.second = int(60*(value % 1)) + + i += 1 + + if i < len_l and l[i] == ':': + res.second, res.microsecond = _parsems(l[i+1]) + i += 2 + + elif i < len_l and l[i] in ('-', '/', '.'): + sep = l[i] + ymd.append(value_repr) + i += 1 + + if i < len_l and not info.jump(l[i]): + try: + # 01-01[-01] + ymd.append(l[i]) + except ValueError: + # 01-Jan[-01] + value = info.month(l[i]) + + if value is not None: + ymd.append(value) + assert mstridx == -1 + mstridx = len(ymd)-1 + else: + return None, None + + i += 1 + + if i < len_l and l[i] == sep: + # We have three members + i += 1 + value = info.month(l[i]) + + if value is not None: + ymd.append(value) + mstridx = len(ymd)-1 + assert mstridx == -1 + else: + ymd.append(l[i]) + + i += 1 + elif i >= len_l or info.jump(l[i]): + if i+1 < len_l and info.ampm(l[i+1]) is not None: + # 12 am + res.hour = int(value) + + if res.hour < 12 and info.ampm(l[i+1]) == 1: + res.hour += 12 + elif res.hour == 12 and info.ampm(l[i+1]) == 0: + res.hour = 0 + + i += 1 + else: + # Year, month or day + ymd.append(value) + i += 1 + elif info.ampm(l[i]) is not None: + + # 12am + res.hour = int(value) + + if res.hour < 12 and info.ampm(l[i]) == 1: + res.hour += 12 + elif res.hour == 12 and info.ampm(l[i]) == 0: + res.hour = 0 + i += 1 + + elif not fuzzy: + return None, None + else: + i += 1 + continue + + # Check weekday + value = info.weekday(l[i]) + if value is not None: + res.weekday = value + i += 1 + continue + + # Check month name + value = info.month(l[i]) + if value is not None: + ymd.append(value) + assert mstridx == -1 + mstridx = len(ymd)-1 + + i += 1 + if i < len_l: + if l[i] in ('-', '/'): + # Jan-01[-99] + sep = l[i] + i += 1 + ymd.append(l[i]) + i += 1 + + if i < len_l and l[i] == sep: + # Jan-01-99 + i += 1 + ymd.append(l[i]) + i += 1 + + elif (i+3 < len_l and l[i] == l[i+2] == ' ' + and info.pertain(l[i+1])): + # Jan of 01 + # In this case, 01 is clearly year + try: + value = int(l[i+3]) + except ValueError: + # Wrong guess + pass + else: + # Convert it here to become unambiguous + ymd.append(str(info.convertyear(value))) + i += 4 + continue + + # Check am/pm + value = info.ampm(l[i]) + if value is not None: + # For fuzzy parsing, 'a' or 'am' (both valid English words) + # may erroneously trigger the AM/PM flag. Deal with that + # here. + val_is_ampm = True + + # If there's already an AM/PM flag, this one isn't one. + if fuzzy and res.ampm is not None: + val_is_ampm = False + + # If AM/PM is found and hour is not, raise a ValueError + if res.hour is None: + if fuzzy: + val_is_ampm = False + else: + raise ValueError('No hour specified with ' + + 'AM or PM flag.') + elif not 0 <= res.hour <= 12: + # If AM/PM is found, it's a 12 hour clock, so raise + # an error for invalid range + if fuzzy: + val_is_ampm = False + else: + raise ValueError('Invalid hour specified for ' + + '12-hour clock.') + + if val_is_ampm: + if value == 1 and res.hour < 12: + res.hour += 12 + elif value == 0 and res.hour == 12: + res.hour = 0 + + res.ampm = value + + elif fuzzy: + last_skipped_token_i = self._skip_token(skipped_tokens, + last_skipped_token_i, i, l) + i += 1 + continue + + # Check for a timezone name + if (res.hour is not None and len(l[i]) <= 5 and + res.tzname is None and res.tzoffset is None and + not [x for x in l[i] if x not in + string.ascii_uppercase]): + res.tzname = l[i] + res.tzoffset = info.tzoffset(res.tzname) + i += 1 + + # Check for something like GMT+3, or BRST+3. Notice + # that it doesn't mean "I am 3 hours after GMT", but + # "my time +3 is GMT". If found, we reverse the + # logic so that timezone parsing code will get it + # right. + if i < len_l and l[i] in ('+', '-'): + l[i] = ('+', '-')[l[i] == '+'] + res.tzoffset = None + if info.utczone(res.tzname): + # With something like GMT+3, the timezone + # is *not* GMT. + res.tzname = None + + continue + + # Check for a numbered timezone + if res.hour is not None and l[i] in ('+', '-'): + signal = (-1, 1)[l[i] == '+'] + i += 1 + len_li = len(l[i]) + + if len_li == 4: + # -0300 + res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60 + elif i+1 < len_l and l[i+1] == ':': + # -03:00 + res.tzoffset = int(l[i])*3600+int(l[i+2])*60 + i += 2 + elif len_li <= 2: + # -[0]3 + res.tzoffset = int(l[i][:2])*3600 + else: + return None, None + i += 1 + + res.tzoffset *= signal + + # Look for a timezone name between parenthesis + if (i+3 < len_l and + info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and + 3 <= len(l[i+2]) <= 5 and + not [x for x in l[i+2] + if x not in string.ascii_uppercase]): + # -0300 (BRST) + res.tzname = l[i+2] + i += 4 + continue + + # Check jumps + if not (info.jump(l[i]) or fuzzy): + return None, None + + last_skipped_token_i = self._skip_token(skipped_tokens, + last_skipped_token_i, i, l) + i += 1 + + # Process year/month/day + year, month, day = ymd.resolve_ymd(mstridx, yearfirst, dayfirst) + if year is not None: + res.year = year + res.century_specified = ymd.century_specified + + if month is not None: + res.month = month + + if day is not None: + res.day = day + + except (IndexError, ValueError, AssertionError): + return None, None + + if not info.validate(res): + return None, None + + if fuzzy_with_tokens: + return res, tuple(skipped_tokens) + else: + return res, None + + @staticmethod + def _skip_token(skipped_tokens, last_skipped_token_i, i, l): + if last_skipped_token_i == i - 1: + # recombine the tokens + skipped_tokens[-1] += l[i] + else: + # just append + skipped_tokens.append(l[i]) + last_skipped_token_i = i + return last_skipped_token_i + + +DEFAULTPARSER = parser() + + +def parse(timestr, parserinfo=None, **kwargs): + """ + + Parse a string in one of the supported formats, using the + ``parserinfo`` parameters. + + :param timestr: + A string containing a date/time stamp. + + :param parserinfo: + A :class:`parserinfo` object containing parameters for the parser. + If ``None``, the default arguments to the :class:`parserinfo` + constructor are used. + + The ``**kwargs`` parameter takes the following keyword arguments: + + :param default: + The default datetime object, if this is a datetime object and not + ``None``, elements specified in ``timestr`` replace elements in the + default object. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a naive + :class:`datetime` object is returned. + + :param tzinfos: + Additional time zone names / aliases which may be present in the + string. This argument maps time zone names (and optionally offsets + from those time zones) to time zones. This parameter can be a + dictionary with timezone aliases mapping time zone names to time + zones or a function taking two parameters (``tzname`` and + ``tzoffset``) and returning a time zone. + + The timezones to which the names are mapped can be an integer + offset from UTC in minutes or a :class:`tzinfo` object. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from dateutil.parser import parse + >>> from dateutil.tz import gettz + >>> tzinfos = {"BRST": -10800, "CST": gettz("America/Chicago")} + >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -10800)) + >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, + tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) + + This parameter is ignored if ``ignoretz`` is set. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM and + YMD. If set to ``None``, this value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken to + be the year, otherwise the last number is taken to be the year. If + this is set to ``None``, the value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param fuzzy: + Whether to allow fuzzy parsing, allowing for string like "Today is + January 1, 2047 at 8:21:00AM". + + :param fuzzy_with_tokens: + If ``True``, ``fuzzy`` is automatically set to True, and the parser + will return a tuple where the first element is the parsed + :class:`datetime.datetime` datetimestamp and the second element is + a tuple containing the portions of the string which were ignored: + + .. doctest:: + + >>> from dateutil.parser import parse + >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) + (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) + + :return: + Returns a :class:`datetime.datetime` object or, if the + ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the + first element being a :class:`datetime.datetime` object, the second + a tuple containing the fuzzy tokens. + + :raises ValueError: + Raised for invalid or unknown string format, if the provided + :class:`tzinfo` is not in a valid format, or if an invalid date + would be created. + + :raises OverflowError: + Raised if the parsed date exceeds the largest valid C integer on + your system. + """ + if parserinfo: + return parser(parserinfo).parse(timestr, **kwargs) + else: + return DEFAULTPARSER.parse(timestr, **kwargs) + + +class _tzparser(object): + + class _result(_resultbase): + + __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset", + "start", "end"] + + class _attr(_resultbase): + __slots__ = ["month", "week", "weekday", + "yday", "jyday", "day", "time"] + + def __repr__(self): + return self._repr("") + + def __init__(self): + _resultbase.__init__(self) + self.start = self._attr() + self.end = self._attr() + + def parse(self, tzstr): + res = self._result() + l = _timelex.split(tzstr) + try: + + len_l = len(l) + + i = 0 + while i < len_l: + # BRST+3[BRDT[+2]] + j = i + while j < len_l and not [x for x in l[j] + if x in "0123456789:,-+"]: + j += 1 + if j != i: + if not res.stdabbr: + offattr = "stdoffset" + res.stdabbr = "".join(l[i:j]) + else: + offattr = "dstoffset" + res.dstabbr = "".join(l[i:j]) + i = j + if (i < len_l and (l[i] in ('+', '-') or l[i][0] in + "0123456789")): + if l[i] in ('+', '-'): + # Yes, that's right. See the TZ variable + # documentation. + signal = (1, -1)[l[i] == '+'] + i += 1 + else: + signal = -1 + len_li = len(l[i]) + if len_li == 4: + # -0300 + setattr(res, offattr, (int(l[i][:2])*3600 + + int(l[i][2:])*60)*signal) + elif i+1 < len_l and l[i+1] == ':': + # -03:00 + setattr(res, offattr, + (int(l[i])*3600+int(l[i+2])*60)*signal) + i += 2 + elif len_li <= 2: + # -[0]3 + setattr(res, offattr, + int(l[i][:2])*3600*signal) + else: + return None + i += 1 + if res.dstabbr: + break + else: + break + + if i < len_l: + for j in range(i, len_l): + if l[j] == ';': + l[j] = ',' + + assert l[i] == ',' + + i += 1 + + if i >= len_l: + pass + elif (8 <= l.count(',') <= 9 and + not [y for x in l[i:] if x != ',' + for y in x if y not in "0123456789"]): + # GMT0BST,3,0,30,3600,10,0,26,7200[,3600] + for x in (res.start, res.end): + x.month = int(l[i]) + i += 2 + if l[i] == '-': + value = int(l[i+1])*-1 + i += 1 + else: + value = int(l[i]) + i += 2 + if value: + x.week = value + x.weekday = (int(l[i])-1) % 7 + else: + x.day = int(l[i]) + i += 2 + x.time = int(l[i]) + i += 2 + if i < len_l: + if l[i] in ('-', '+'): + signal = (-1, 1)[l[i] == "+"] + i += 1 + else: + signal = 1 + res.dstoffset = (res.stdoffset+int(l[i]))*signal + elif (l.count(',') == 2 and l[i:].count('/') <= 2 and + not [y for x in l[i:] if x not in (',', '/', 'J', 'M', + '.', '-', ':') + for y in x if y not in "0123456789"]): + for x in (res.start, res.end): + if l[i] == 'J': + # non-leap year day (1 based) + i += 1 + x.jyday = int(l[i]) + elif l[i] == 'M': + # month[-.]week[-.]weekday + i += 1 + x.month = int(l[i]) + i += 1 + assert l[i] in ('-', '.') + i += 1 + x.week = int(l[i]) + if x.week == 5: + x.week = -1 + i += 1 + assert l[i] in ('-', '.') + i += 1 + x.weekday = (int(l[i])-1) % 7 + else: + # year day (zero based) + x.yday = int(l[i])+1 + + i += 1 + + if i < len_l and l[i] == '/': + i += 1 + # start time + len_li = len(l[i]) + if len_li == 4: + # -0300 + x.time = (int(l[i][:2])*3600+int(l[i][2:])*60) + elif i+1 < len_l and l[i+1] == ':': + # -03:00 + x.time = int(l[i])*3600+int(l[i+2])*60 + i += 2 + if i+1 < len_l and l[i+1] == ':': + i += 2 + x.time += int(l[i]) + elif len_li <= 2: + # -[0]3 + x.time = (int(l[i][:2])*3600) + else: + return None + i += 1 + + assert i == len_l or l[i] == ',' + + i += 1 + + assert i >= len_l + + except (IndexError, ValueError, AssertionError): + return None + + return res + + +DEFAULTTZPARSER = _tzparser() + + +def _parsetz(tzstr): + return DEFAULTTZPARSER.parse(tzstr) + + +def _parsems(value): + """Parse a I[.F] seconds value into (seconds, microseconds).""" + if "." not in value: + return int(value), 0 + else: + i, f = value.split(".") + return int(i), int(f.ljust(6, "0")[:6]) + + +# vim:ts=4:sw=4:et diff --git a/python/dateutil/relativedelta.py b/python/dateutil/relativedelta.py new file mode 100644 index 0000000..0e66afc --- /dev/null +++ b/python/dateutil/relativedelta.py @@ -0,0 +1,549 @@ +# -*- coding: utf-8 -*- +import datetime +import calendar + +import operator +from math import copysign + +from six import integer_types +from warnings import warn + +from ._common import weekday + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) + +__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + + +class relativedelta(object): + """ + The relativedelta type is based on the specification of the excellent + work done by M.-A. Lemburg in his + `mx.DateTime `_ extension. + However, notice that this type does *NOT* implement the same algorithm as + his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. + + There are two different ways to build a relativedelta instance. The + first one is passing it two date/datetime classes:: + + relativedelta(datetime1, datetime2) + + The second one is passing it any number of the following keyword arguments:: + + relativedelta(arg1=x,arg2=y,arg3=z...) + + year, month, day, hour, minute, second, microsecond: + Absolute information (argument is singular); adding or subtracting a + relativedelta with absolute information does not perform an aritmetic + operation, but rather REPLACES the corresponding value in the + original datetime with the value(s) in relativedelta. + + years, months, weeks, days, hours, minutes, seconds, microseconds: + Relative information, may be negative (argument is plural); adding + or subtracting a relativedelta with relative information performs + the corresponding aritmetic operation on the original datetime value + with the information in the relativedelta. + + weekday: + One of the weekday instances (MO, TU, etc). These instances may + receive a parameter N, specifying the Nth weekday, which could + be positive or negative (like MO(+1) or MO(-2). Not specifying + it is the same as specifying +1. You can also use an integer, + where 0=MO. + + leapdays: + Will add given days to the date found, if year is a leap + year, and the date found is post 28 of february. + + yearday, nlyearday: + Set the yearday or the non-leap year day (jump leap days). + These are converted to day/month/leapdays information. + + Here is the behavior of operations with relativedelta: + + 1. Calculate the absolute year, using the 'year' argument, or the + original datetime year, if the argument is not present. + + 2. Add the relative 'years' argument to the absolute year. + + 3. Do steps 1 and 2 for month/months. + + 4. Calculate the absolute day, using the 'day' argument, or the + original datetime day, if the argument is not present. Then, + subtract from the day until it fits in the year and month + found after their operations. + + 5. Add the relative 'days' argument to the absolute day. Notice + that the 'weeks' argument is multiplied by 7 and added to + 'days'. + + 6. Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds, + microsecond/microseconds. + + 7. If the 'weekday' argument is present, calculate the weekday, + with the given (wday, nth) tuple. wday is the index of the + weekday (0-6, 0=Mon), and nth is the number of weeks to add + forward or backward, depending on its signal. Notice that if + the calculated date is already Monday, for example, using + (0, 1) or (0, -1) won't change the day. + """ + + def __init__(self, dt1=None, dt2=None, + years=0, months=0, days=0, leapdays=0, weeks=0, + hours=0, minutes=0, seconds=0, microseconds=0, + year=None, month=None, day=None, weekday=None, + yearday=None, nlyearday=None, + hour=None, minute=None, second=None, microsecond=None): + + # Check for non-integer values in integer-only quantities + if any(x is not None and x != int(x) for x in (years, months)): + raise ValueError("Non-integer years and months are " + "ambiguous and not currently supported.") + + if dt1 and dt2: + # datetime is a subclass of date. So both must be date + if not (isinstance(dt1, datetime.date) and + isinstance(dt2, datetime.date)): + raise TypeError("relativedelta only diffs datetime/date") + + # We allow two dates, or two datetimes, so we coerce them to be + # of the same type + if (isinstance(dt1, datetime.datetime) != + isinstance(dt2, datetime.datetime)): + if not isinstance(dt1, datetime.datetime): + dt1 = datetime.datetime.fromordinal(dt1.toordinal()) + elif not isinstance(dt2, datetime.datetime): + dt2 = datetime.datetime.fromordinal(dt2.toordinal()) + + self.years = 0 + self.months = 0 + self.days = 0 + self.leapdays = 0 + self.hours = 0 + self.minutes = 0 + self.seconds = 0 + self.microseconds = 0 + self.year = None + self.month = None + self.day = None + self.weekday = None + self.hour = None + self.minute = None + self.second = None + self.microsecond = None + self._has_time = 0 + + # Get year / month delta between the two + months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month) + self._set_months(months) + + # Remove the year/month delta so the timedelta is just well-defined + # time units (seconds, days and microseconds) + dtm = self.__radd__(dt2) + + # If we've overshot our target, make an adjustment + if dt1 < dt2: + compare = operator.gt + increment = 1 + else: + compare = operator.lt + increment = -1 + + while compare(dt1, dtm): + months += increment + self._set_months(months) + dtm = self.__radd__(dt2) + + # Get the timedelta between the "months-adjusted" date and dt1 + delta = dt1 - dtm + self.seconds = delta.seconds + delta.days * 86400 + self.microseconds = delta.microseconds + else: + # Relative information + self.years = years + self.months = months + self.days = days + weeks * 7 + self.leapdays = leapdays + self.hours = hours + self.minutes = minutes + self.seconds = seconds + self.microseconds = microseconds + + # Absolute information + self.year = year + self.month = month + self.day = day + self.hour = hour + self.minute = minute + self.second = second + self.microsecond = microsecond + + if any(x is not None and int(x) != x + for x in (year, month, day, hour, + minute, second, microsecond)): + # For now we'll deprecate floats - later it'll be an error. + warn("Non-integer value passed as absolute information. " + + "This is not a well-defined condition and will raise " + + "errors in future versions.", DeprecationWarning) + + if isinstance(weekday, integer_types): + self.weekday = weekdays[weekday] + else: + self.weekday = weekday + + yday = 0 + if nlyearday: + yday = nlyearday + elif yearday: + yday = yearday + if yearday > 59: + self.leapdays = -1 + if yday: + ydayidx = [31, 59, 90, 120, 151, 181, 212, + 243, 273, 304, 334, 366] + for idx, ydays in enumerate(ydayidx): + if yday <= ydays: + self.month = idx+1 + if idx == 0: + self.day = yday + else: + self.day = yday-ydayidx[idx-1] + break + else: + raise ValueError("invalid year day (%d)" % yday) + + self._fix() + + def _fix(self): + if abs(self.microseconds) > 999999: + s = _sign(self.microseconds) + div, mod = divmod(self.microseconds * s, 1000000) + self.microseconds = mod * s + self.seconds += div * s + if abs(self.seconds) > 59: + s = _sign(self.seconds) + div, mod = divmod(self.seconds * s, 60) + self.seconds = mod * s + self.minutes += div * s + if abs(self.minutes) > 59: + s = _sign(self.minutes) + div, mod = divmod(self.minutes * s, 60) + self.minutes = mod * s + self.hours += div * s + if abs(self.hours) > 23: + s = _sign(self.hours) + div, mod = divmod(self.hours * s, 24) + self.hours = mod * s + self.days += div * s + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years += div * s + if (self.hours or self.minutes or self.seconds or self.microseconds + or self.hour is not None or self.minute is not None or + self.second is not None or self.microsecond is not None): + self._has_time = 1 + else: + self._has_time = 0 + + @property + def weeks(self): + return self.days // 7 + + @weeks.setter + def weeks(self, value): + self.days = self.days - (self.weeks * 7) + value * 7 + + def _set_months(self, months): + self.months = months + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years = div * s + else: + self.years = 0 + + def normalized(self): + """ + Return a version of this object represented entirely using integer + values for the relative attributes. + + >>> relativedelta(days=1.5, hours=2).normalized() + relativedelta(days=1, hours=14) + + :return: + Returns a :class:`dateutil.relativedelta.relativedelta` object. + """ + # Cascade remainders down (rounding each to roughly nearest microsecond) + days = int(self.days) + + hours_f = round(self.hours + 24 * (self.days - days), 11) + hours = int(hours_f) + + minutes_f = round(self.minutes + 60 * (hours_f - hours), 10) + minutes = int(minutes_f) + + seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8) + seconds = int(seconds_f) + + microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds)) + + # Constructor carries overflow back up with call to _fix() + return self.__class__(years=self.years, months=self.months, + days=days, hours=hours, minutes=minutes, + seconds=seconds, microseconds=microseconds, + leapdays=self.leapdays, year=self.year, + month=self.month, day=self.day, + weekday=self.weekday, hour=self.hour, + minute=self.minute, second=self.second, + microsecond=self.microsecond) + + def __add__(self, other): + if isinstance(other, relativedelta): + return self.__class__(years=other.years + self.years, + months=other.months + self.months, + days=other.days + self.days, + hours=other.hours + self.hours, + minutes=other.minutes + self.minutes, + seconds=other.seconds + self.seconds, + microseconds=(other.microseconds + + self.microseconds), + leapdays=other.leapdays or self.leapdays, + year=(other.year if other.year is not None + else self.year), + month=(other.month if other.month is not None + else self.month), + day=(other.day if other.day is not None + else self.day), + weekday=(other.weekday if other.weekday is not None + else self.weekday), + hour=(other.hour if other.hour is not None + else self.hour), + minute=(other.minute if other.minute is not None + else self.minute), + second=(other.second if other.second is not None + else self.second), + microsecond=(other.microsecond if other.microsecond + is not None else + self.microsecond)) + if isinstance(other, datetime.timedelta): + return self.__class__(years=self.years, + months=self.months, + days=self.days + other.days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds + other.seconds, + microseconds=self.microseconds + other.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + if not isinstance(other, datetime.date): + return NotImplemented + elif self._has_time and not isinstance(other, datetime.datetime): + other = datetime.datetime.fromordinal(other.toordinal()) + year = (self.year or other.year)+self.years + month = self.month or other.month + if self.months: + assert 1 <= abs(self.months) <= 12 + month += self.months + if month > 12: + year += 1 + month -= 12 + elif month < 1: + year -= 1 + month += 12 + day = min(calendar.monthrange(year, month)[1], + self.day or other.day) + repl = {"year": year, "month": month, "day": day} + for attr in ["hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + repl[attr] = value + days = self.days + if self.leapdays and month > 2 and calendar.isleap(year): + days += self.leapdays + ret = (other.replace(**repl) + + datetime.timedelta(days=days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds, + microseconds=self.microseconds)) + if self.weekday: + weekday, nth = self.weekday.weekday, self.weekday.n or 1 + jumpdays = (abs(nth) - 1) * 7 + if nth > 0: + jumpdays += (7 - ret.weekday() + weekday) % 7 + else: + jumpdays += (ret.weekday() - weekday) % 7 + jumpdays *= -1 + ret += datetime.timedelta(days=jumpdays) + return ret + + def __radd__(self, other): + return self.__add__(other) + + def __rsub__(self, other): + return self.__neg__().__radd__(other) + + def __sub__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented # In case the other object defines __rsub__ + return self.__class__(years=self.years - other.years, + months=self.months - other.months, + days=self.days - other.days, + hours=self.hours - other.hours, + minutes=self.minutes - other.minutes, + seconds=self.seconds - other.seconds, + microseconds=self.microseconds - other.microseconds, + leapdays=self.leapdays or other.leapdays, + year=(self.year if self.year is not None + else other.year), + month=(self.month if self.month is not None else + other.month), + day=(self.day if self.day is not None else + other.day), + weekday=(self.weekday if self.weekday is not None else + other.weekday), + hour=(self.hour if self.hour is not None else + other.hour), + minute=(self.minute if self.minute is not None else + other.minute), + second=(self.second if self.second is not None else + other.second), + microsecond=(self.microsecond if self.microsecond + is not None else + other.microsecond)) + + def __neg__(self): + return self.__class__(years=-self.years, + months=-self.months, + days=-self.days, + hours=-self.hours, + minutes=-self.minutes, + seconds=-self.seconds, + microseconds=-self.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __bool__(self): + return not (not self.years and + not self.months and + not self.days and + not self.hours and + not self.minutes and + not self.seconds and + not self.microseconds and + not self.leapdays and + self.year is None and + self.month is None and + self.day is None and + self.weekday is None and + self.hour is None and + self.minute is None and + self.second is None and + self.microsecond is None) + # Compatibility with Python 2.x + __nonzero__ = __bool__ + + def __mul__(self, other): + try: + f = float(other) + except TypeError: + return NotImplemented + + return self.__class__(years=int(self.years * f), + months=int(self.months * f), + days=int(self.days * f), + hours=int(self.hours * f), + minutes=int(self.minutes * f), + seconds=int(self.seconds * f), + microseconds=int(self.microseconds * f), + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + __rmul__ = __mul__ + + def __eq__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented + if self.weekday or other.weekday: + if not self.weekday or not other.weekday: + return False + if self.weekday.weekday != other.weekday.weekday: + return False + n1, n2 = self.weekday.n, other.weekday.n + if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): + return False + return (self.years == other.years and + self.months == other.months and + self.days == other.days and + self.hours == other.hours and + self.minutes == other.minutes and + self.seconds == other.seconds and + self.microseconds == other.microseconds and + self.leapdays == other.leapdays and + self.year == other.year and + self.month == other.month and + self.day == other.day and + self.hour == other.hour and + self.minute == other.minute and + self.second == other.second and + self.microsecond == other.microsecond) + + __hash__ = None + + def __ne__(self, other): + return not self.__eq__(other) + + def __div__(self, other): + try: + reciprocal = 1 / float(other) + except TypeError: + return NotImplemented + + return self.__mul__(reciprocal) + + __truediv__ = __div__ + + def __repr__(self): + l = [] + for attr in ["years", "months", "days", "leapdays", + "hours", "minutes", "seconds", "microseconds"]: + value = getattr(self, attr) + if value: + l.append("{attr}={value:+g}".format(attr=attr, value=value)) + for attr in ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + l.append("{attr}={value}".format(attr=attr, value=repr(value))) + return "{classname}({attrs})".format(classname=self.__class__.__name__, + attrs=", ".join(l)) + + +def _sign(x): + return int(copysign(1, x)) + +# vim:ts=4:sw=4:et diff --git a/python/dateutil/rrule.py b/python/dateutil/rrule.py new file mode 100644 index 0000000..429f8fc --- /dev/null +++ b/python/dateutil/rrule.py @@ -0,0 +1,1610 @@ +# -*- coding: utf-8 -*- +""" +The rrule module offers a small, complete, and very fast, implementation of +the recurrence rules documented in the +`iCalendar RFC `_, +including support for caching of results. +""" +import itertools +import datetime +import calendar +import sys + +try: + from math import gcd +except ImportError: + from fractions import gcd + +from six import advance_iterator, integer_types +from six.moves import _thread, range +import heapq + +from ._common import weekday as weekdaybase + +# For warning about deprecation of until and count +from warnings import warn + +__all__ = ["rrule", "rruleset", "rrulestr", + "YEARLY", "MONTHLY", "WEEKLY", "DAILY", + "HOURLY", "MINUTELY", "SECONDLY", + "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + +# Every mask is 7 days longer to handle cross-year weekly periods. +M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 + + [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) +M365MASK = list(M366MASK) +M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32)) +MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +MDAY365MASK = list(MDAY366MASK) +M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0)) +NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +NMDAY365MASK = list(NMDAY366MASK) +M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) +M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) +WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55 +del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] +MDAY365MASK = tuple(MDAY365MASK) +M365MASK = tuple(M365MASK) + +FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY'] + +(YEARLY, + MONTHLY, + WEEKLY, + DAILY, + HOURLY, + MINUTELY, + SECONDLY) = list(range(7)) + +# Imported on demand. +easter = None +parser = None + + +class weekday(weekdaybase): + """ + This version of weekday does not allow n = 0. + """ + def __init__(self, wkday, n=None): + if n == 0: + raise ValueError("Can't create weekday with n==0") + + super(weekday, self).__init__(wkday, n) + + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) + + +def _invalidates_cache(f): + """ + Decorator for rruleset methods which may invalidate the + cached length. + """ + def inner_func(self, *args, **kwargs): + rv = f(self, *args, **kwargs) + self._invalidate_cache() + return rv + + return inner_func + + +class rrulebase(object): + def __init__(self, cache=False): + if cache: + self._cache = [] + self._cache_lock = _thread.allocate_lock() + self._invalidate_cache() + else: + self._cache = None + self._cache_complete = False + self._len = None + + def __iter__(self): + if self._cache_complete: + return iter(self._cache) + elif self._cache is None: + return self._iter() + else: + return self._iter_cached() + + def _invalidate_cache(self): + if self._cache is not None: + self._cache = [] + self._cache_complete = False + self._cache_gen = self._iter() + + if self._cache_lock.locked(): + self._cache_lock.release() + + self._len = None + + def _iter_cached(self): + i = 0 + gen = self._cache_gen + cache = self._cache + acquire = self._cache_lock.acquire + release = self._cache_lock.release + while gen: + if i == len(cache): + acquire() + if self._cache_complete: + break + try: + for j in range(10): + cache.append(advance_iterator(gen)) + except StopIteration: + self._cache_gen = gen = None + self._cache_complete = True + break + release() + yield cache[i] + i += 1 + while i < self._len: + yield cache[i] + i += 1 + + def __getitem__(self, item): + if self._cache_complete: + return self._cache[item] + elif isinstance(item, slice): + if item.step and item.step < 0: + return list(iter(self))[item] + else: + return list(itertools.islice(self, + item.start or 0, + item.stop or sys.maxsize, + item.step or 1)) + elif item >= 0: + gen = iter(self) + try: + for i in range(item+1): + res = advance_iterator(gen) + except StopIteration: + raise IndexError + return res + else: + return list(iter(self))[item] + + def __contains__(self, item): + if self._cache_complete: + return item in self._cache + else: + for i in self: + if i == item: + return True + elif i > item: + return False + return False + + # __len__() introduces a large performance penality. + def count(self): + """ Returns the number of recurrences in this set. It will have go + trough the whole recurrence, if this hasn't been done before. """ + if self._len is None: + for x in self: + pass + return self._len + + def before(self, dt, inc=False): + """ Returns the last recurrence before the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + last = None + if inc: + for i in gen: + if i > dt: + break + last = i + else: + for i in gen: + if i >= dt: + break + last = i + return last + + def after(self, dt, inc=False): + """ Returns the first recurrence after the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + if inc: + for i in gen: + if i >= dt: + return i + else: + for i in gen: + if i > dt: + return i + return None + + def xafter(self, dt, count=None, inc=False): + """ + Generator which yields up to `count` recurrences after the given + datetime instance, equivalent to `after`. + + :param dt: + The datetime at which to start generating recurrences. + + :param count: + The maximum number of recurrences to generate. If `None` (default), + dates are generated until the recurrence rule is exhausted. + + :param inc: + If `dt` is an instance of the rule and `inc` is `True`, it is + included in the output. + + :yields: Yields a sequence of `datetime` objects. + """ + + if self._cache_complete: + gen = self._cache + else: + gen = self + + # Select the comparison function + if inc: + comp = lambda dc, dtc: dc >= dtc + else: + comp = lambda dc, dtc: dc > dtc + + # Generate dates + n = 0 + for d in gen: + if comp(d, dt): + if count is not None: + n += 1 + if n > count: + break + + yield d + + def between(self, after, before, inc=False, count=1): + """ Returns all the occurrences of the rrule between after and before. + The inc keyword defines what happens if after and/or before are + themselves occurrences. With inc=True, they will be included in the + list, if they are found in the recurrence set. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + started = False + l = [] + if inc: + for i in gen: + if i > before: + break + elif not started: + if i >= after: + started = True + l.append(i) + else: + l.append(i) + else: + for i in gen: + if i >= before: + break + elif not started: + if i > after: + started = True + l.append(i) + else: + l.append(i) + return l + + +class rrule(rrulebase): + """ + That's the base of the rrule operation. It accepts all the keywords + defined in the RFC as its constructor parameters (except byday, + which was renamed to byweekday) and more. The constructor prototype is:: + + rrule(freq) + + Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, + or SECONDLY. + + .. note:: + Per RFC section 3.3.10, recurrence instances falling on invalid dates + and times are ignored rather than coerced: + + Recurrence rules may generate recurrence instances with an invalid + date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM + on a day where the local time is moved forward by an hour at 1:00 + AM). Such recurrence instances MUST be ignored and MUST NOT be + counted as part of the recurrence set. + + This can lead to possibly surprising behavior when, for example, the + start date occurs at the end of the month: + + >>> from dateutil.rrule import rrule, MONTHLY + >>> from datetime import datetime + >>> start_date = datetime(2014, 12, 31) + >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date)) + ... # doctest: +NORMALIZE_WHITESPACE + [datetime.datetime(2014, 12, 31, 0, 0), + datetime.datetime(2015, 1, 31, 0, 0), + datetime.datetime(2015, 3, 31, 0, 0), + datetime.datetime(2015, 5, 31, 0, 0)] + + Additionally, it supports the following keyword arguments: + + :param cache: + If given, it must be a boolean value specifying to enable or disable + caching of results. If you will use the same rrule instance multiple + times, enabling caching will improve the performance considerably. + :param dtstart: + The recurrence start. Besides being the base for the recurrence, + missing parameters in the final recurrence instances will also be + extracted from this date. If not given, datetime.now() will be used + instead. + :param interval: + The interval between each freq iteration. For example, when using + YEARLY, an interval of 2 means once every two years, but with HOURLY, + it means once every two hours. The default interval is 1. + :param wkst: + The week start day. Must be one of the MO, TU, WE constants, or an + integer, specifying the first day of the week. This will affect + recurrences based on weekly periods. The default week start is got + from calendar.firstweekday(), and may be modified by + calendar.setfirstweekday(). + :param count: + How many occurrences will be generated. + + .. note:: + As of version 2.5.0, the use of the ``until`` keyword together + with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10. + :param until: + If given, this must be a datetime instance, that will specify the + limit of the recurrence. The last recurrence in the rule is the greatest + datetime that is less than or equal to the value specified in the + ``until`` parameter. + + .. note:: + As of version 2.5.0, the use of the ``until`` keyword together + with the ``count`` keyword is deprecated per RFC-2445 Sec. 4.3.10. + :param bysetpos: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each given integer will specify an occurrence + number, corresponding to the nth occurrence of the rule inside the + frequency period. For example, a bysetpos of -1 if combined with a + MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will + result in the last work day of every month. + :param bymonth: + If given, it must be either an integer, or a sequence of integers, + meaning the months to apply the recurrence to. + :param bymonthday: + If given, it must be either an integer, or a sequence of integers, + meaning the month days to apply the recurrence to. + :param byyearday: + If given, it must be either an integer, or a sequence of integers, + meaning the year days to apply the recurrence to. + :param byweekno: + If given, it must be either an integer, or a sequence of integers, + meaning the week numbers to apply the recurrence to. Week numbers + have the meaning described in ISO8601, that is, the first week of + the year is that containing at least four days of the new year. + :param byweekday: + If given, it must be either an integer (0 == MO), a sequence of + integers, one of the weekday constants (MO, TU, etc), or a sequence + of these constants. When given, these variables will define the + weekdays where the recurrence will be applied. It's also possible to + use an argument n for the weekday instances, which will mean the nth + occurrence of this weekday in the period. For example, with MONTHLY, + or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the + first friday of the month where the recurrence happens. Notice that in + the RFC documentation, this is specified as BYDAY, but was renamed to + avoid the ambiguity of that keyword. + :param byhour: + If given, it must be either an integer, or a sequence of integers, + meaning the hours to apply the recurrence to. + :param byminute: + If given, it must be either an integer, or a sequence of integers, + meaning the minutes to apply the recurrence to. + :param bysecond: + If given, it must be either an integer, or a sequence of integers, + meaning the seconds to apply the recurrence to. + :param byeaster: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each integer will define an offset from the + Easter Sunday. Passing the offset 0 to byeaster will yield the Easter + Sunday itself. This is an extension to the RFC specification. + """ + def __init__(self, freq, dtstart=None, + interval=1, wkst=None, count=None, until=None, bysetpos=None, + bymonth=None, bymonthday=None, byyearday=None, byeaster=None, + byweekno=None, byweekday=None, + byhour=None, byminute=None, bysecond=None, + cache=False): + super(rrule, self).__init__(cache) + global easter + if not dtstart: + dtstart = datetime.datetime.now().replace(microsecond=0) + elif not isinstance(dtstart, datetime.datetime): + dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) + else: + dtstart = dtstart.replace(microsecond=0) + self._dtstart = dtstart + self._tzinfo = dtstart.tzinfo + self._freq = freq + self._interval = interval + self._count = count + + # Cache the original byxxx rules, if they are provided, as the _byxxx + # attributes do not necessarily map to the inputs, and this can be + # a problem in generating the strings. Only store things if they've + # been supplied (the string retrieval will just use .get()) + self._original_rule = {} + + if until and not isinstance(until, datetime.datetime): + until = datetime.datetime.fromordinal(until.toordinal()) + self._until = until + + if count is not None and until: + warn("Using both 'count' and 'until' is inconsistent with RFC 2445" + " and has been deprecated in dateutil. Future versions will " + "raise an error.", DeprecationWarning) + + if wkst is None: + self._wkst = calendar.firstweekday() + elif isinstance(wkst, integer_types): + self._wkst = wkst + else: + self._wkst = wkst.weekday + + if bysetpos is None: + self._bysetpos = None + elif isinstance(bysetpos, integer_types): + if bysetpos == 0 or not (-366 <= bysetpos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + self._bysetpos = (bysetpos,) + else: + self._bysetpos = tuple(bysetpos) + for pos in self._bysetpos: + if pos == 0 or not (-366 <= pos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + + if self._bysetpos: + self._original_rule['bysetpos'] = self._bysetpos + + if (byweekno is None and byyearday is None and bymonthday is None and + byweekday is None and byeaster is None): + if freq == YEARLY: + if bymonth is None: + bymonth = dtstart.month + self._original_rule['bymonth'] = None + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == MONTHLY: + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == WEEKLY: + byweekday = dtstart.weekday() + self._original_rule['byweekday'] = None + + # bymonth + if bymonth is None: + self._bymonth = None + else: + if isinstance(bymonth, integer_types): + bymonth = (bymonth,) + + self._bymonth = tuple(sorted(set(bymonth))) + + if 'bymonth' not in self._original_rule: + self._original_rule['bymonth'] = self._bymonth + + # byyearday + if byyearday is None: + self._byyearday = None + else: + if isinstance(byyearday, integer_types): + byyearday = (byyearday,) + + self._byyearday = tuple(sorted(set(byyearday))) + self._original_rule['byyearday'] = self._byyearday + + # byeaster + if byeaster is not None: + if not easter: + from dateutil import easter + if isinstance(byeaster, integer_types): + self._byeaster = (byeaster,) + else: + self._byeaster = tuple(sorted(byeaster)) + + self._original_rule['byeaster'] = self._byeaster + else: + self._byeaster = None + + # bymonthday + if bymonthday is None: + self._bymonthday = () + self._bynmonthday = () + else: + if isinstance(bymonthday, integer_types): + bymonthday = (bymonthday,) + + bymonthday = set(bymonthday) # Ensure it's unique + + self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0)) + self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0)) + + # Storing positive numbers first, then negative numbers + if 'bymonthday' not in self._original_rule: + self._original_rule['bymonthday'] = tuple( + itertools.chain(self._bymonthday, self._bynmonthday)) + + # byweekno + if byweekno is None: + self._byweekno = None + else: + if isinstance(byweekno, integer_types): + byweekno = (byweekno,) + + self._byweekno = tuple(sorted(set(byweekno))) + + self._original_rule['byweekno'] = self._byweekno + + # byweekday / bynweekday + if byweekday is None: + self._byweekday = None + self._bynweekday = None + else: + # If it's one of the valid non-sequence types, convert to a + # single-element sequence before the iterator that builds the + # byweekday set. + if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"): + byweekday = (byweekday,) + + self._byweekday = set() + self._bynweekday = set() + for wday in byweekday: + if isinstance(wday, integer_types): + self._byweekday.add(wday) + elif not wday.n or freq > MONTHLY: + self._byweekday.add(wday.weekday) + else: + self._bynweekday.add((wday.weekday, wday.n)) + + if not self._byweekday: + self._byweekday = None + elif not self._bynweekday: + self._bynweekday = None + + if self._byweekday is not None: + self._byweekday = tuple(sorted(self._byweekday)) + orig_byweekday = [weekday(x) for x in self._byweekday] + else: + orig_byweekday = tuple() + + if self._bynweekday is not None: + self._bynweekday = tuple(sorted(self._bynweekday)) + orig_bynweekday = [weekday(*x) for x in self._bynweekday] + else: + orig_bynweekday = tuple() + + if 'byweekday' not in self._original_rule: + self._original_rule['byweekday'] = tuple(itertools.chain( + orig_byweekday, orig_bynweekday)) + + # byhour + if byhour is None: + if freq < HOURLY: + self._byhour = set((dtstart.hour,)) + else: + self._byhour = None + else: + if isinstance(byhour, integer_types): + byhour = (byhour,) + + if freq == HOURLY: + self._byhour = self.__construct_byset(start=dtstart.hour, + byxxx=byhour, + base=24) + else: + self._byhour = set(byhour) + + self._byhour = tuple(sorted(self._byhour)) + self._original_rule['byhour'] = self._byhour + + # byminute + if byminute is None: + if freq < MINUTELY: + self._byminute = set((dtstart.minute,)) + else: + self._byminute = None + else: + if isinstance(byminute, integer_types): + byminute = (byminute,) + + if freq == MINUTELY: + self._byminute = self.__construct_byset(start=dtstart.minute, + byxxx=byminute, + base=60) + else: + self._byminute = set(byminute) + + self._byminute = tuple(sorted(self._byminute)) + self._original_rule['byminute'] = self._byminute + + # bysecond + if bysecond is None: + if freq < SECONDLY: + self._bysecond = ((dtstart.second,)) + else: + self._bysecond = None + else: + if isinstance(bysecond, integer_types): + bysecond = (bysecond,) + + self._bysecond = set(bysecond) + + if freq == SECONDLY: + self._bysecond = self.__construct_byset(start=dtstart.second, + byxxx=bysecond, + base=60) + else: + self._bysecond = set(bysecond) + + self._bysecond = tuple(sorted(self._bysecond)) + self._original_rule['bysecond'] = self._bysecond + + if self._freq >= HOURLY: + self._timeset = None + else: + self._timeset = [] + for hour in self._byhour: + for minute in self._byminute: + for second in self._bysecond: + self._timeset.append( + datetime.time(hour, minute, second, + tzinfo=self._tzinfo)) + self._timeset.sort() + self._timeset = tuple(self._timeset) + + def __str__(self): + """ + Output a string that would generate this RRULE if passed to rrulestr. + This is mostly compatible with RFC2445, except for the + dateutil-specific extension BYEASTER. + """ + + output = [] + h, m, s = [None] * 3 + if self._dtstart: + output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S')) + h, m, s = self._dtstart.timetuple()[3:6] + + parts = ['FREQ=' + FREQNAMES[self._freq]] + if self._interval != 1: + parts.append('INTERVAL=' + str(self._interval)) + + if self._wkst: + parts.append('WKST=' + repr(weekday(self._wkst))[0:2]) + + if self._count is not None: + parts.append('COUNT=' + str(self._count)) + + if self._until: + parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S')) + + if self._original_rule.get('byweekday') is not None: + # The str() method on weekday objects doesn't generate + # RFC2445-compliant strings, so we should modify that. + original_rule = dict(self._original_rule) + wday_strings = [] + for wday in original_rule['byweekday']: + if wday.n: + wday_strings.append('{n:+d}{wday}'.format( + n=wday.n, + wday=repr(wday)[0:2])) + else: + wday_strings.append(repr(wday)) + + original_rule['byweekday'] = wday_strings + else: + original_rule = self._original_rule + + partfmt = '{name}={vals}' + for name, key in [('BYSETPOS', 'bysetpos'), + ('BYMONTH', 'bymonth'), + ('BYMONTHDAY', 'bymonthday'), + ('BYYEARDAY', 'byyearday'), + ('BYWEEKNO', 'byweekno'), + ('BYDAY', 'byweekday'), + ('BYHOUR', 'byhour'), + ('BYMINUTE', 'byminute'), + ('BYSECOND', 'bysecond'), + ('BYEASTER', 'byeaster')]: + value = original_rule.get(key) + if value: + parts.append(partfmt.format(name=name, vals=(','.join(str(v) + for v in value)))) + + output.append(';'.join(parts)) + return '\n'.join(output) + + def replace(self, **kwargs): + """Return new rrule with same attributes except for those attributes given new + values by whichever keyword arguments are specified.""" + new_kwargs = {"interval": self._interval, + "count": self._count, + "dtstart": self._dtstart, + "freq": self._freq, + "until": self._until, + "wkst": self._wkst, + "cache": False if self._cache is None else True } + new_kwargs.update(self._original_rule) + new_kwargs.update(kwargs) + return rrule(**new_kwargs) + + def _iter(self): + year, month, day, hour, minute, second, weekday, yearday, _ = \ + self._dtstart.timetuple() + + # Some local variables to speed things up a bit + freq = self._freq + interval = self._interval + wkst = self._wkst + until = self._until + bymonth = self._bymonth + byweekno = self._byweekno + byyearday = self._byyearday + byweekday = self._byweekday + byeaster = self._byeaster + bymonthday = self._bymonthday + bynmonthday = self._bynmonthday + bysetpos = self._bysetpos + byhour = self._byhour + byminute = self._byminute + bysecond = self._bysecond + + ii = _iterinfo(self) + ii.rebuild(year, month) + + getdayset = {YEARLY: ii.ydayset, + MONTHLY: ii.mdayset, + WEEKLY: ii.wdayset, + DAILY: ii.ddayset, + HOURLY: ii.ddayset, + MINUTELY: ii.ddayset, + SECONDLY: ii.ddayset}[freq] + + if freq < HOURLY: + timeset = self._timeset + else: + gettimeset = {HOURLY: ii.htimeset, + MINUTELY: ii.mtimeset, + SECONDLY: ii.stimeset}[freq] + if ((freq >= HOURLY and + self._byhour and hour not in self._byhour) or + (freq >= MINUTELY and + self._byminute and minute not in self._byminute) or + (freq >= SECONDLY and + self._bysecond and second not in self._bysecond)): + timeset = () + else: + timeset = gettimeset(hour, minute, second) + + total = 0 + count = self._count + while True: + # Get dayset with the right frequency + dayset, start, end = getdayset(year, month, day) + + # Do the "hard" work ;-) + filtered = False + for i in dayset[start:end]: + if ((bymonth and ii.mmask[i] not in bymonth) or + (byweekno and not ii.wnomask[i]) or + (byweekday and ii.wdaymask[i] not in byweekday) or + (ii.nwdaymask and not ii.nwdaymask[i]) or + (byeaster and not ii.eastermask[i]) or + ((bymonthday or bynmonthday) and + ii.mdaymask[i] not in bymonthday and + ii.nmdaymask[i] not in bynmonthday) or + (byyearday and + ((i < ii.yearlen and i+1 not in byyearday and + -ii.yearlen+i not in byyearday) or + (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and + -ii.nextyearlen+i-ii.yearlen not in byyearday)))): + dayset[i] = None + filtered = True + + # Output results + if bysetpos and timeset: + poslist = [] + for pos in bysetpos: + if pos < 0: + daypos, timepos = divmod(pos, len(timeset)) + else: + daypos, timepos = divmod(pos-1, len(timeset)) + try: + i = [x for x in dayset[start:end] + if x is not None][daypos] + time = timeset[timepos] + except IndexError: + pass + else: + date = datetime.date.fromordinal(ii.yearordinal+i) + res = datetime.datetime.combine(date, time) + if res not in poslist: + poslist.append(res) + poslist.sort() + for res in poslist: + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + if count is not None: + count -= 1 + if count < 0: + self._len = total + return + total += 1 + yield res + else: + for i in dayset[start:end]: + if i is not None: + date = datetime.date.fromordinal(ii.yearordinal + i) + for time in timeset: + res = datetime.datetime.combine(date, time) + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + if count is not None: + count -= 1 + if count < 0: + self._len = total + return + + total += 1 + yield res + + # Handle frequency and interval + fixday = False + if freq == YEARLY: + year += interval + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == MONTHLY: + month += interval + if month > 12: + div, mod = divmod(month, 12) + month = mod + year += div + if month == 0: + month = 12 + year -= 1 + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == WEEKLY: + if wkst > weekday: + day += -(weekday+1+(6-wkst))+self._interval*7 + else: + day += -(weekday-wkst)+self._interval*7 + weekday = wkst + fixday = True + elif freq == DAILY: + day += interval + fixday = True + elif freq == HOURLY: + if filtered: + # Jump to one iteration before next day + hour += ((23-hour)//interval)*interval + + if byhour: + ndays, hour = self.__mod_distance(value=hour, + byxxx=self._byhour, + base=24) + else: + ndays, hour = divmod(hour+interval, 24) + + if ndays: + day += ndays + fixday = True + + timeset = gettimeset(hour, minute, second) + elif freq == MINUTELY: + if filtered: + # Jump to one iteration before next day + minute += ((1439-(hour*60+minute))//interval)*interval + + valid = False + rep_rate = (24*60) + for j in range(rep_rate // gcd(interval, rep_rate)): + if byminute: + nhours, minute = \ + self.__mod_distance(value=minute, + byxxx=self._byminute, + base=60) + else: + nhours, minute = divmod(minute+interval, 60) + + div, hour = divmod(hour+nhours, 24) + if div: + day += div + fixday = True + filtered = False + + if not byhour or hour in byhour: + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval and ' + + 'byhour resulting in empty rule.') + + timeset = gettimeset(hour, minute, second) + elif freq == SECONDLY: + if filtered: + # Jump to one iteration before next day + second += (((86399 - (hour * 3600 + minute * 60 + second)) + // interval) * interval) + + rep_rate = (24 * 3600) + valid = False + for j in range(0, rep_rate // gcd(interval, rep_rate)): + if bysecond: + nminutes, second = \ + self.__mod_distance(value=second, + byxxx=self._bysecond, + base=60) + else: + nminutes, second = divmod(second+interval, 60) + + div, minute = divmod(minute+nminutes, 60) + if div: + hour += div + div, hour = divmod(hour, 24) + if div: + day += div + fixday = True + + if ((not byhour or hour in byhour) and + (not byminute or minute in byminute) and + (not bysecond or second in bysecond)): + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval, ' + + 'byhour and byminute resulting in empty' + + ' rule.') + + timeset = gettimeset(hour, minute, second) + + if fixday and day > 28: + daysinmonth = calendar.monthrange(year, month)[1] + if day > daysinmonth: + while day > daysinmonth: + day -= daysinmonth + month += 1 + if month == 13: + month = 1 + year += 1 + if year > datetime.MAXYEAR: + self._len = total + return + daysinmonth = calendar.monthrange(year, month)[1] + ii.rebuild(year, month) + + def __construct_byset(self, start, byxxx, base): + """ + If a `BYXXX` sequence is passed to the constructor at the same level as + `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some + specifications which cannot be reached given some starting conditions. + + This occurs whenever the interval is not coprime with the base of a + given unit and the difference between the starting position and the + ending position is not coprime with the greatest common denominator + between the interval and the base. For example, with a FREQ of hourly + starting at 17:00 and an interval of 4, the only valid values for + BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not + coprime. + + :param start: + Specifies the starting position. + :param byxxx: + An iterable containing the list of allowed values. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + This does not preserve the type of the iterable, returning a set, since + the values should be unique and the order is irrelevant, this will + speed up later lookups. + + In the event of an empty set, raises a :exception:`ValueError`, as this + results in an empty rrule. + """ + + cset = set() + + # Support a single byxxx value. + if isinstance(byxxx, integer_types): + byxxx = (byxxx, ) + + for num in byxxx: + i_gcd = gcd(self._interval, base) + # Use divmod rather than % because we need to wrap negative nums. + if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0: + cset.add(num) + + if len(cset) == 0: + raise ValueError("Invalid rrule byxxx generates an empty set.") + + return cset + + def __mod_distance(self, value, byxxx, base): + """ + Calculates the next value in a sequence where the `FREQ` parameter is + specified along with a `BYXXX` parameter at the same "level" + (e.g. `HOURLY` specified with `BYHOUR`). + + :param value: + The old value of the component. + :param byxxx: + The `BYXXX` set, which should have been generated by + `rrule._construct_byset`, or something else which checks that a + valid rule is present. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + If a valid value is not found after `base` iterations (the maximum + number before the sequence would start to repeat), this raises a + :exception:`ValueError`, as no valid values were found. + + This returns a tuple of `divmod(n*interval, base)`, where `n` is the + smallest number of `interval` repetitions until the next specified + value in `byxxx` is found. + """ + accumulator = 0 + for ii in range(1, base + 1): + # Using divmod() over % to account for negative intervals + div, value = divmod(value + self._interval, base) + accumulator += div + if value in byxxx: + return (accumulator, value) + + +class _iterinfo(object): + __slots__ = ["rrule", "lastyear", "lastmonth", + "yearlen", "nextyearlen", "yearordinal", "yearweekday", + "mmask", "mrange", "mdaymask", "nmdaymask", + "wdaymask", "wnomask", "nwdaymask", "eastermask"] + + def __init__(self, rrule): + for attr in self.__slots__: + setattr(self, attr, None) + self.rrule = rrule + + def rebuild(self, year, month): + # Every mask is 7 days longer to handle cross-year weekly periods. + rr = self.rrule + if year != self.lastyear: + self.yearlen = 365 + calendar.isleap(year) + self.nextyearlen = 365 + calendar.isleap(year + 1) + firstyday = datetime.date(year, 1, 1) + self.yearordinal = firstyday.toordinal() + self.yearweekday = firstyday.weekday() + + wday = datetime.date(year, 1, 1).weekday() + if self.yearlen == 365: + self.mmask = M365MASK + self.mdaymask = MDAY365MASK + self.nmdaymask = NMDAY365MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M365RANGE + else: + self.mmask = M366MASK + self.mdaymask = MDAY366MASK + self.nmdaymask = NMDAY366MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M366RANGE + + if not rr._byweekno: + self.wnomask = None + else: + self.wnomask = [0]*(self.yearlen+7) + # no1wkst = firstwkst = self.wdaymask.index(rr._wkst) + no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7 + if no1wkst >= 4: + no1wkst = 0 + # Number of days in the year, plus the days we got + # from last year. + wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7 + else: + # Number of days in the year, minus the days we + # left in last year. + wyearlen = self.yearlen-no1wkst + div, mod = divmod(wyearlen, 7) + numweeks = div+mod//4 + for n in rr._byweekno: + if n < 0: + n += numweeks+1 + if not (0 < n <= numweeks): + continue + if n > 1: + i = no1wkst+(n-1)*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + else: + i = no1wkst + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if 1 in rr._byweekno: + # Check week number 1 of next year as well + # TODO: Check -numweeks for next year. + i = no1wkst+numweeks*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + if i < self.yearlen: + # If week starts in next year, we + # don't care about it. + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if no1wkst: + # Check last week number of last year as + # well. If no1wkst is 0, either the year + # started on week start, or week number 1 + # got days from last year, so there are no + # days from last year's last week number in + # this year. + if -1 not in rr._byweekno: + lyearweekday = datetime.date(year-1, 1, 1).weekday() + lno1wkst = (7-lyearweekday+rr._wkst) % 7 + lyearlen = 365+calendar.isleap(year-1) + if lno1wkst >= 4: + lno1wkst = 0 + lnumweeks = 52+(lyearlen + + (lyearweekday-rr._wkst) % 7) % 7//4 + else: + lnumweeks = 52+(self.yearlen-no1wkst) % 7//4 + else: + lnumweeks = -1 + if lnumweeks in rr._byweekno: + for i in range(no1wkst): + self.wnomask[i] = 1 + + if (rr._bynweekday and (month != self.lastmonth or + year != self.lastyear)): + ranges = [] + if rr._freq == YEARLY: + if rr._bymonth: + for month in rr._bymonth: + ranges.append(self.mrange[month-1:month+1]) + else: + ranges = [(0, self.yearlen)] + elif rr._freq == MONTHLY: + ranges = [self.mrange[month-1:month+1]] + if ranges: + # Weekly frequency won't get here, so we may not + # care about cross-year weekly periods. + self.nwdaymask = [0]*self.yearlen + for first, last in ranges: + last -= 1 + for wday, n in rr._bynweekday: + if n < 0: + i = last+(n+1)*7 + i -= (self.wdaymask[i]-wday) % 7 + else: + i = first+(n-1)*7 + i += (7-self.wdaymask[i]+wday) % 7 + if first <= i <= last: + self.nwdaymask[i] = 1 + + if rr._byeaster: + self.eastermask = [0]*(self.yearlen+7) + eyday = easter.easter(year).toordinal()-self.yearordinal + for offset in rr._byeaster: + self.eastermask[eyday+offset] = 1 + + self.lastyear = year + self.lastmonth = month + + def ydayset(self, year, month, day): + return list(range(self.yearlen)), 0, self.yearlen + + def mdayset(self, year, month, day): + dset = [None]*self.yearlen + start, end = self.mrange[month-1:month+1] + for i in range(start, end): + dset[i] = i + return dset, start, end + + def wdayset(self, year, month, day): + # We need to handle cross-year weeks here. + dset = [None]*(self.yearlen+7) + i = datetime.date(year, month, day).toordinal()-self.yearordinal + start = i + for j in range(7): + dset[i] = i + i += 1 + # if (not (0 <= i < self.yearlen) or + # self.wdaymask[i] == self.rrule._wkst): + # This will cross the year boundary, if necessary. + if self.wdaymask[i] == self.rrule._wkst: + break + return dset, start, i + + def ddayset(self, year, month, day): + dset = [None] * self.yearlen + i = datetime.date(year, month, day).toordinal() - self.yearordinal + dset[i] = i + return dset, i, i + 1 + + def htimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for minute in rr._byminute: + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, + tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def mtimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def stimeset(self, hour, minute, second): + return (datetime.time(hour, minute, second, + tzinfo=self.rrule._tzinfo),) + + +class rruleset(rrulebase): + """ The rruleset type allows more complex recurrence setups, mixing + multiple rules, dates, exclusion rules, and exclusion dates. The type + constructor takes the following keyword arguments: + + :param cache: If True, caching of results will be enabled, improving + performance of multiple queries considerably. """ + + class _genitem(object): + def __init__(self, genlist, gen): + try: + self.dt = advance_iterator(gen) + genlist.append(self) + except StopIteration: + pass + self.genlist = genlist + self.gen = gen + + def __next__(self): + try: + self.dt = advance_iterator(self.gen) + except StopIteration: + if self.genlist[0] is self: + heapq.heappop(self.genlist) + else: + self.genlist.remove(self) + heapq.heapify(self.genlist) + + next = __next__ + + def __lt__(self, other): + return self.dt < other.dt + + def __gt__(self, other): + return self.dt > other.dt + + def __eq__(self, other): + return self.dt == other.dt + + def __ne__(self, other): + return self.dt != other.dt + + def __init__(self, cache=False): + super(rruleset, self).__init__(cache) + self._rrule = [] + self._rdate = [] + self._exrule = [] + self._exdate = [] + + @_invalidates_cache + def rrule(self, rrule): + """ Include the given :py:class:`rrule` instance in the recurrence set + generation. """ + self._rrule.append(rrule) + + @_invalidates_cache + def rdate(self, rdate): + """ Include the given :py:class:`datetime` instance in the recurrence + set generation. """ + self._rdate.append(rdate) + + @_invalidates_cache + def exrule(self, exrule): + """ Include the given rrule instance in the recurrence set exclusion + list. Dates which are part of the given recurrence rules will not + be generated, even if some inclusive rrule or rdate matches them. + """ + self._exrule.append(exrule) + + @_invalidates_cache + def exdate(self, exdate): + """ Include the given datetime instance in the recurrence set + exclusion list. Dates included that way will not be generated, + even if some inclusive rrule or rdate matches them. """ + self._exdate.append(exdate) + + def _iter(self): + rlist = [] + self._rdate.sort() + self._genitem(rlist, iter(self._rdate)) + for gen in [iter(x) for x in self._rrule]: + self._genitem(rlist, gen) + exlist = [] + self._exdate.sort() + self._genitem(exlist, iter(self._exdate)) + for gen in [iter(x) for x in self._exrule]: + self._genitem(exlist, gen) + lastdt = None + total = 0 + heapq.heapify(rlist) + heapq.heapify(exlist) + while rlist: + ritem = rlist[0] + if not lastdt or lastdt != ritem.dt: + while exlist and exlist[0] < ritem: + exitem = exlist[0] + advance_iterator(exitem) + if exlist and exlist[0] is exitem: + heapq.heapreplace(exlist, exitem) + if not exlist or ritem != exlist[0]: + total += 1 + yield ritem.dt + lastdt = ritem.dt + advance_iterator(ritem) + if rlist and rlist[0] is ritem: + heapq.heapreplace(rlist, ritem) + self._len = total + + +class _rrulestr(object): + + _freq_map = {"YEARLY": YEARLY, + "MONTHLY": MONTHLY, + "WEEKLY": WEEKLY, + "DAILY": DAILY, + "HOURLY": HOURLY, + "MINUTELY": MINUTELY, + "SECONDLY": SECONDLY} + + _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3, + "FR": 4, "SA": 5, "SU": 6} + + def _handle_int(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = int(value) + + def _handle_int_list(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = [int(x) for x in value.split(',')] + + _handle_INTERVAL = _handle_int + _handle_COUNT = _handle_int + _handle_BYSETPOS = _handle_int_list + _handle_BYMONTH = _handle_int_list + _handle_BYMONTHDAY = _handle_int_list + _handle_BYYEARDAY = _handle_int_list + _handle_BYEASTER = _handle_int_list + _handle_BYWEEKNO = _handle_int_list + _handle_BYHOUR = _handle_int_list + _handle_BYMINUTE = _handle_int_list + _handle_BYSECOND = _handle_int_list + + def _handle_FREQ(self, rrkwargs, name, value, **kwargs): + rrkwargs["freq"] = self._freq_map[value] + + def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): + global parser + if not parser: + from dateutil import parser + try: + rrkwargs["until"] = parser.parse(value, + ignoretz=kwargs.get("ignoretz"), + tzinfos=kwargs.get("tzinfos")) + except ValueError: + raise ValueError("invalid until date") + + def _handle_WKST(self, rrkwargs, name, value, **kwargs): + rrkwargs["wkst"] = self._weekday_map[value] + + def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs): + """ + Two ways to specify this: +1MO or MO(+1) + """ + l = [] + for wday in value.split(','): + if '(' in wday: + # If it's of the form TH(+1), etc. + splt = wday.split('(') + w = splt[0] + n = int(splt[1][:-1]) + elif len(wday): + # If it's of the form +1MO + for i in range(len(wday)): + if wday[i] not in '+-0123456789': + break + n = wday[:i] or None + w = wday[i:] + if n: + n = int(n) + else: + raise ValueError("Invalid (empty) BYDAY specification.") + + l.append(weekdays[self._weekday_map[w]](n)) + rrkwargs["byweekday"] = l + + _handle_BYDAY = _handle_BYWEEKDAY + + def _parse_rfc_rrule(self, line, + dtstart=None, + cache=False, + ignoretz=False, + tzinfos=None): + if line.find(':') != -1: + name, value = line.split(':') + if name != "RRULE": + raise ValueError("unknown parameter name") + else: + value = line + rrkwargs = {} + for pair in value.split(';'): + name, value = pair.split('=') + name = name.upper() + value = value.upper() + try: + getattr(self, "_handle_"+name)(rrkwargs, name, value, + ignoretz=ignoretz, + tzinfos=tzinfos) + except AttributeError: + raise ValueError("unknown parameter '%s'" % name) + except (KeyError, ValueError): + raise ValueError("invalid '%s': %s" % (name, value)) + return rrule(dtstart=dtstart, cache=cache, **rrkwargs) + + def _parse_rfc(self, s, + dtstart=None, + cache=False, + unfold=False, + forceset=False, + compatible=False, + ignoretz=False, + tzinfos=None): + global parser + if compatible: + forceset = True + unfold = True + s = s.upper() + if not s.strip(): + raise ValueError("empty string") + if unfold: + lines = s.splitlines() + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + else: + lines = s.split() + if (not forceset and len(lines) == 1 and (s.find(':') == -1 or + s.startswith('RRULE:'))): + return self._parse_rfc_rrule(lines[0], cache=cache, + dtstart=dtstart, ignoretz=ignoretz, + tzinfos=tzinfos) + else: + rrulevals = [] + rdatevals = [] + exrulevals = [] + exdatevals = [] + for line in lines: + if not line: + continue + if line.find(':') == -1: + name = "RRULE" + value = line + else: + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0] + parms = parms[1:] + if name == "RRULE": + for parm in parms: + raise ValueError("unsupported RRULE parm: "+parm) + rrulevals.append(value) + elif name == "RDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError("unsupported RDATE parm: "+parm) + rdatevals.append(value) + elif name == "EXRULE": + for parm in parms: + raise ValueError("unsupported EXRULE parm: "+parm) + exrulevals.append(value) + elif name == "EXDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError("unsupported EXDATE parm: "+parm) + exdatevals.append(value) + elif name == "DTSTART": + for parm in parms: + raise ValueError("unsupported DTSTART parm: "+parm) + if not parser: + from dateutil import parser + dtstart = parser.parse(value, ignoretz=ignoretz, + tzinfos=tzinfos) + else: + raise ValueError("unsupported property: "+name) + if (forceset or len(rrulevals) > 1 or rdatevals + or exrulevals or exdatevals): + if not parser and (rdatevals or exdatevals): + from dateutil import parser + rset = rruleset(cache=cache) + for value in rrulevals: + rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in rdatevals: + for datestr in value.split(','): + rset.rdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exrulevals: + rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exdatevals: + for datestr in value.split(','): + rset.exdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + if compatible and dtstart: + rset.rdate(dtstart) + return rset + else: + return self._parse_rfc_rrule(rrulevals[0], + dtstart=dtstart, + cache=cache, + ignoretz=ignoretz, + tzinfos=tzinfos) + + def __call__(self, s, **kwargs): + return self._parse_rfc(s, **kwargs) + + +rrulestr = _rrulestr() + +# vim:ts=4:sw=4:et diff --git a/python/dateutil/tz/__init__.py b/python/dateutil/tz/__init__.py new file mode 100644 index 0000000..b0a5043 --- /dev/null +++ b/python/dateutil/tz/__init__.py @@ -0,0 +1,5 @@ +from .tz import * + +__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", + "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz", + "enfold", "datetime_ambiguous", "datetime_exists"] diff --git a/python/dateutil/tz/_common.py b/python/dateutil/tz/_common.py new file mode 100644 index 0000000..f1cf2af --- /dev/null +++ b/python/dateutil/tz/_common.py @@ -0,0 +1,394 @@ +from six import PY3 + +from functools import wraps + +from datetime import datetime, timedelta, tzinfo + + +ZERO = timedelta(0) + +__all__ = ['tzname_in_python2', 'enfold'] + + +def tzname_in_python2(namefunc): + """Change unicode output into bytestrings in Python 2 + + tzname() API changed in Python 3. It used to return bytes, but was changed + to unicode strings + """ + def adjust_encoding(*args, **kwargs): + name = namefunc(*args, **kwargs) + if name is not None and not PY3: + name = name.encode() + + return name + + return adjust_encoding + + +# The following is adapted from Alexander Belopolsky's tz library +# https://github.com/abalkin/tz +if hasattr(datetime, 'fold'): + # This is the pre-python 3.6 fold situation + def enfold(dt, fold=1): + """ + Provides a unified interface for assigning the ``fold`` attribute to + datetimes both before and after the implementation of PEP-495. + + :param fold: + The value for the ``fold`` attribute in the returned datetime. This + should be either 0 or 1. + + :return: + Returns an object for which ``getattr(dt, 'fold', 0)`` returns + ``fold`` for all versions of Python. In versions prior to + Python 3.6, this is a ``_DatetimeWithFold`` object, which is a + subclass of :py:class:`datetime.datetime` with the ``fold`` + attribute added, if ``fold`` is 1. + + .. versionadded:: 2.6.0 + """ + return dt.replace(fold=fold) + +else: + class _DatetimeWithFold(datetime): + """ + This is a class designed to provide a PEP 495-compliant interface for + Python versions before 3.6. It is used only for dates in a fold, so + the ``fold`` attribute is fixed at ``1``. + + .. versionadded:: 2.6.0 + """ + __slots__ = () + + @property + def fold(self): + return 1 + + def enfold(dt, fold=1): + """ + Provides a unified interface for assigning the ``fold`` attribute to + datetimes both before and after the implementation of PEP-495. + + :param fold: + The value for the ``fold`` attribute in the returned datetime. This + should be either 0 or 1. + + :return: + Returns an object for which ``getattr(dt, 'fold', 0)`` returns + ``fold`` for all versions of Python. In versions prior to + Python 3.6, this is a ``_DatetimeWithFold`` object, which is a + subclass of :py:class:`datetime.datetime` with the ``fold`` + attribute added, if ``fold`` is 1. + + .. versionadded:: 2.6.0 + """ + if getattr(dt, 'fold', 0) == fold: + return dt + + args = dt.timetuple()[:6] + args += (dt.microsecond, dt.tzinfo) + + if fold: + return _DatetimeWithFold(*args) + else: + return datetime(*args) + + +def _validate_fromutc_inputs(f): + """ + The CPython version of ``fromutc`` checks that the input is a ``datetime`` + object and that ``self`` is attached as its ``tzinfo``. + """ + @wraps(f) + def fromutc(self, dt): + if not isinstance(dt, datetime): + raise TypeError("fromutc() requires a datetime argument") + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + return f(self, dt) + + return fromutc + + +class _tzinfo(tzinfo): + """ + Base class for all ``dateutil`` ``tzinfo`` objects. + """ + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + + dt = dt.replace(tzinfo=self) + + wall_0 = enfold(dt, fold=0) + wall_1 = enfold(dt, fold=1) + + same_offset = wall_0.utcoffset() == wall_1.utcoffset() + same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None) + + return same_dt and not same_offset + + def _fold_status(self, dt_utc, dt_wall): + """ + Determine the fold status of a "wall" datetime, given a representation + of the same datetime as a (naive) UTC datetime. This is calculated based + on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all + datetimes, and that this offset is the actual number of hours separating + ``dt_utc`` and ``dt_wall``. + + :param dt_utc: + Representation of the datetime as UTC + + :param dt_wall: + Representation of the datetime as "wall time". This parameter must + either have a `fold` attribute or have a fold-naive + :class:`datetime.tzinfo` attached, otherwise the calculation may + fail. + """ + if self.is_ambiguous(dt_wall): + delta_wall = dt_wall - dt_utc + _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst())) + else: + _fold = 0 + + return _fold + + def _fold(self, dt): + return getattr(dt, 'fold', 0) + + def _fromutc(self, dt): + """ + Given a timezone-aware datetime in a given timezone, calculates a + timezone-aware datetime in a new timezone. + + Since this is the one time that we *know* we have an unambiguous + datetime object, we take this opportunity to determine whether the + datetime is ambiguous and in a "fold" state (e.g. if it's the first + occurence, chronologically, of the ambiguous datetime). + + :param dt: + A timezone-aware :class:`datetime.datetime` object. + """ + + # Re-implement the algorithm from Python's datetime.py + dtoff = dt.utcoffset() + if dtoff is None: + raise ValueError("fromutc() requires a non-None utcoffset() " + "result") + + # The original datetime.py code assumes that `dst()` defaults to + # zero during ambiguous times. PEP 495 inverts this presumption, so + # for pre-PEP 495 versions of python, we need to tweak the algorithm. + dtdst = dt.dst() + if dtdst is None: + raise ValueError("fromutc() requires a non-None dst() result") + delta = dtoff - dtdst + + dt += delta + # Set fold=1 so we can default to being in the fold for + # ambiguous dates. + dtdst = enfold(dt, fold=1).dst() + if dtdst is None: + raise ValueError("fromutc(): dt.dst gave inconsistent " + "results; cannot convert") + return dt + dtdst + + @_validate_fromutc_inputs + def fromutc(self, dt): + """ + Given a timezone-aware datetime in a given timezone, calculates a + timezone-aware datetime in a new timezone. + + Since this is the one time that we *know* we have an unambiguous + datetime object, we take this opportunity to determine whether the + datetime is ambiguous and in a "fold" state (e.g. if it's the first + occurance, chronologically, of the ambiguous datetime). + + :param dt: + A timezone-aware :class:`datetime.datetime` object. + """ + dt_wall = self._fromutc(dt) + + # Calculate the fold status given the two datetimes. + _fold = self._fold_status(dt, dt_wall) + + # Set the default fold value for ambiguous dates + return enfold(dt_wall, fold=_fold) + + +class tzrangebase(_tzinfo): + """ + This is an abstract base class for time zones represented by an annual + transition into and out of DST. Child classes should implement the following + methods: + + * ``__init__(self, *args, **kwargs)`` + * ``transitions(self, year)`` - this is expected to return a tuple of + datetimes representing the DST on and off transitions in standard + time. + + A fully initialized ``tzrangebase`` subclass should also provide the + following attributes: + * ``hasdst``: Boolean whether or not the zone uses DST. + * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects + representing the respective UTC offsets. + * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short + abbreviations in DST and STD, respectively. + * ``_hasdst``: Whether or not the zone has DST. + + .. versionadded:: 2.6.0 + """ + def __init__(self): + raise NotImplementedError('tzrangebase is an abstract base class') + + def utcoffset(self, dt): + isdst = self._isdst(dt) + + if isdst is None: + return None + elif isdst: + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + isdst = self._isdst(dt) + + if isdst is None: + return None + elif isdst: + return self._dst_base_offset + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + if self._isdst(dt): + return self._dst_abbr + else: + return self._std_abbr + + def fromutc(self, dt): + """ Given a datetime in UTC, return local time """ + if not isinstance(dt, datetime): + raise TypeError("fromutc() requires a datetime argument") + + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + # Get transitions - if there are none, fixed offset + transitions = self.transitions(dt.year) + if transitions is None: + return dt + self.utcoffset(dt) + + # Get the transition times in UTC + dston, dstoff = transitions + + dston -= self._std_offset + dstoff -= self._std_offset + + utc_transitions = (dston, dstoff) + dt_utc = dt.replace(tzinfo=None) + + isdst = self._naive_isdst(dt_utc, utc_transitions) + + if isdst: + dt_wall = dt + self._dst_offset + else: + dt_wall = dt + self._std_offset + + _fold = int(not isdst and self.is_ambiguous(dt_wall)) + + return enfold(dt_wall, fold=_fold) + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + if not self.hasdst: + return False + + start, end = self.transitions(dt.year) + + dt = dt.replace(tzinfo=None) + return (end <= dt < end + self._dst_base_offset) + + def _isdst(self, dt): + if not self.hasdst: + return False + elif dt is None: + return None + + transitions = self.transitions(dt.year) + + if transitions is None: + return False + + dt = dt.replace(tzinfo=None) + + isdst = self._naive_isdst(dt, transitions) + + # Handle ambiguous dates + if not isdst and self.is_ambiguous(dt): + return not self._fold(dt) + else: + return isdst + + def _naive_isdst(self, dt, transitions): + dston, dstoff = transitions + + dt = dt.replace(tzinfo=None) + + if dston < dstoff: + isdst = dston <= dt < dstoff + else: + isdst = not dstoff <= dt < dston + + return isdst + + @property + def _dst_base_offset(self): + return self._dst_offset - self._std_offset + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(...)" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +def _total_seconds(td): + # Python 2.6 doesn't have a total_seconds() method on timedelta objects + return ((td.seconds + td.days * 86400) * 1000000 + + td.microseconds) // 1000000 + + +_total_seconds = getattr(timedelta, 'total_seconds', _total_seconds) diff --git a/python/dateutil/tz/tz.py b/python/dateutil/tz/tz.py new file mode 100644 index 0000000..9468282 --- /dev/null +++ b/python/dateutil/tz/tz.py @@ -0,0 +1,1511 @@ +# -*- coding: utf-8 -*- +""" +This module offers timezone implementations subclassing the abstract +:py:`datetime.tzinfo` type. There are classes to handle tzfile format files +(usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, etc), TZ +environment string (in all known formats), given ranges (with help from +relative deltas), local machine timezone, fixed offset timezone, and UTC +timezone. +""" +import datetime +import struct +import time +import sys +import os +import bisect + +from six import string_types +from ._common import tzname_in_python2, _tzinfo, _total_seconds +from ._common import tzrangebase, enfold +from ._common import _validate_fromutc_inputs + +try: + from .win import tzwin, tzwinlocal +except ImportError: + tzwin = tzwinlocal = None + +ZERO = datetime.timedelta(0) +EPOCH = datetime.datetime.utcfromtimestamp(0) +EPOCHORDINAL = EPOCH.toordinal() + + +class tzutc(datetime.tzinfo): + """ + This is a tzinfo object that represents the UTC time zone. + """ + def utcoffset(self, dt): + return ZERO + + def dst(self, dt): + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return "UTC" + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + return False + + @_validate_fromutc_inputs + def fromutc(self, dt): + """ + Fast track version of fromutc() returns the original ``dt`` object for + any valid :py:class:`datetime.datetime` object. + """ + return dt + + def __eq__(self, other): + if not isinstance(other, (tzutc, tzoffset)): + return NotImplemented + + return (isinstance(other, tzutc) or + (isinstance(other, tzoffset) and other._offset == ZERO)) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +class tzoffset(datetime.tzinfo): + """ + A simple class for representing a fixed offset from UTC. + + :param name: + The timezone name, to be returned when ``tzname()`` is called. + + :param offset: + The time zone offset in seconds, or (since version 2.6.0, represented + as a :py:class:`datetime.timedelta` object. + """ + def __init__(self, name, offset): + self._name = name + + try: + # Allow a timedelta + offset = _total_seconds(offset) + except (TypeError, AttributeError): + pass + self._offset = datetime.timedelta(seconds=offset) + + def utcoffset(self, dt): + return self._offset + + def dst(self, dt): + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._name + + @_validate_fromutc_inputs + def fromutc(self, dt): + return dt + self._offset + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + return False + + def __eq__(self, other): + if not isinstance(other, tzoffset): + return NotImplemented + + return self._offset == other._offset + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(%s, %s)" % (self.__class__.__name__, + repr(self._name), + int(_total_seconds(self._offset))) + + __reduce__ = object.__reduce__ + + +class tzlocal(_tzinfo): + """ + A :class:`tzinfo` subclass built around the ``time`` timezone functions. + """ + def __init__(self): + super(tzlocal, self).__init__() + + self._std_offset = datetime.timedelta(seconds=-time.timezone) + if time.daylight: + self._dst_offset = datetime.timedelta(seconds=-time.altzone) + else: + self._dst_offset = self._std_offset + + self._dst_saved = self._dst_offset - self._std_offset + self._hasdst = bool(self._dst_saved) + + def utcoffset(self, dt): + if dt is None and self._hasdst: + return None + + if self._isdst(dt): + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + if dt is None and self._hasdst: + return None + + if self._isdst(dt): + return self._dst_offset - self._std_offset + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return time.tzname[self._isdst(dt)] + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + naive_dst = self._naive_is_dst(dt) + return (not naive_dst and + (naive_dst != self._naive_is_dst(dt - self._dst_saved))) + + def _naive_is_dst(self, dt): + timestamp = _datetime_to_timestamp(dt) + return time.localtime(timestamp + time.timezone).tm_isdst + + def _isdst(self, dt, fold_naive=True): + # We can't use mktime here. It is unstable when deciding if + # the hour near to a change is DST or not. + # + # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour, + # dt.minute, dt.second, dt.weekday(), 0, -1)) + # return time.localtime(timestamp).tm_isdst + # + # The code above yields the following result: + # + # >>> import tz, datetime + # >>> t = tz.tzlocal() + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRDT' + # >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname() + # 'BRST' + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRST' + # >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname() + # 'BRDT' + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRDT' + # + # Here is a more stable implementation: + # + if not self._hasdst: + return False + + # Check for ambiguous times: + dstval = self._naive_is_dst(dt) + fold = getattr(dt, 'fold', None) + + if self.is_ambiguous(dt): + if fold is not None: + return not self._fold(dt) + else: + return True + + return dstval + + def __eq__(self, other): + if not isinstance(other, tzlocal): + return NotImplemented + + return (self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +class _ttinfo(object): + __slots__ = ["offset", "delta", "isdst", "abbr", + "isstd", "isgmt", "dstoffset"] + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def __repr__(self): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, repr(value))) + return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) + + def __eq__(self, other): + if not isinstance(other, _ttinfo): + return NotImplemented + + return (self.offset == other.offset and + self.delta == other.delta and + self.isdst == other.isdst and + self.abbr == other.abbr and + self.isstd == other.isstd and + self.isgmt == other.isgmt and + self.dstoffset == other.dstoffset) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __getstate__(self): + state = {} + for name in self.__slots__: + state[name] = getattr(self, name, None) + return state + + def __setstate__(self, state): + for name in self.__slots__: + if name in state: + setattr(self, name, state[name]) + + +class _tzfile(object): + """ + Lightweight class for holding the relevant transition and time zone + information read from binary tzfiles. + """ + attrs = ['trans_list', 'trans_list_utc', 'trans_idx', 'ttinfo_list', + 'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first'] + + def __init__(self, **kwargs): + for attr in self.attrs: + setattr(self, attr, kwargs.get(attr, None)) + + +class tzfile(_tzinfo): + """ + This is a ``tzinfo`` subclass thant allows one to use the ``tzfile(5)`` + format timezone files to extract current and historical zone information. + + :param fileobj: + This can be an opened file stream or a file name that the time zone + information can be read from. + + :param filename: + This is an optional parameter specifying the source of the time zone + information in the event that ``fileobj`` is a file object. If omitted + and ``fileobj`` is a file stream, this parameter will be set either to + ``fileobj``'s ``name`` attribute or to ``repr(fileobj)``. + + See `Sources for Time Zone and Daylight Saving Time Data + `_ for more information. Time zone + files can be compiled from the `IANA Time Zone database files + `_ with the `zic time zone compiler + `_ + """ + + def __init__(self, fileobj, filename=None): + super(tzfile, self).__init__() + + file_opened_here = False + if isinstance(fileobj, string_types): + self._filename = fileobj + fileobj = open(fileobj, 'rb') + file_opened_here = True + elif filename is not None: + self._filename = filename + elif hasattr(fileobj, "name"): + self._filename = fileobj.name + else: + self._filename = repr(fileobj) + + if fileobj is not None: + if not file_opened_here: + fileobj = _ContextWrapper(fileobj) + + with fileobj as file_stream: + tzobj = self._read_tzfile(file_stream) + + self._set_tzdata(tzobj) + + def _set_tzdata(self, tzobj): + """ Set the time zone data of this object from a _tzfile object """ + # Copy the relevant attributes over as private attributes + for attr in _tzfile.attrs: + setattr(self, '_' + attr, getattr(tzobj, attr)) + + def _read_tzfile(self, fileobj): + out = _tzfile() + + # From tzfile(5): + # + # The time zone information files used by tzset(3) + # begin with the magic characters "TZif" to identify + # them as time zone information files, followed by + # sixteen bytes reserved for future use, followed by + # six four-byte values of type long, written in a + # ``standard'' byte order (the high-order byte + # of the value is written first). + if fileobj.read(4).decode() != "TZif": + raise ValueError("magic not found") + + fileobj.read(16) + + ( + # The number of UTC/local indicators stored in the file. + ttisgmtcnt, + + # The number of standard/wall indicators stored in the file. + ttisstdcnt, + + # The number of leap seconds for which data is + # stored in the file. + leapcnt, + + # The number of "transition times" for which data + # is stored in the file. + timecnt, + + # The number of "local time types" for which data + # is stored in the file (must not be zero). + typecnt, + + # The number of characters of "time zone + # abbreviation strings" stored in the file. + charcnt, + + ) = struct.unpack(">6l", fileobj.read(24)) + + # The above header is followed by tzh_timecnt four-byte + # values of type long, sorted in ascending order. + # These values are written in ``standard'' byte order. + # Each is used as a transition time (as returned by + # time(2)) at which the rules for computing local time + # change. + + if timecnt: + out.trans_list_utc = list(struct.unpack(">%dl" % timecnt, + fileobj.read(timecnt*4))) + else: + out.trans_list_utc = [] + + # Next come tzh_timecnt one-byte values of type unsigned + # char; each one tells which of the different types of + # ``local time'' types described in the file is associated + # with the same-indexed transition time. These values + # serve as indices into an array of ttinfo structures that + # appears next in the file. + + if timecnt: + out.trans_idx = struct.unpack(">%dB" % timecnt, + fileobj.read(timecnt)) + else: + out.trans_idx = [] + + # Each ttinfo structure is written as a four-byte value + # for tt_gmtoff of type long, in a standard byte + # order, followed by a one-byte value for tt_isdst + # and a one-byte value for tt_abbrind. In each + # structure, tt_gmtoff gives the number of + # seconds to be added to UTC, tt_isdst tells whether + # tm_isdst should be set by localtime(3), and + # tt_abbrind serves as an index into the array of + # time zone abbreviation characters that follow the + # ttinfo structure(s) in the file. + + ttinfo = [] + + for i in range(typecnt): + ttinfo.append(struct.unpack(">lbb", fileobj.read(6))) + + abbr = fileobj.read(charcnt).decode() + + # Then there are tzh_leapcnt pairs of four-byte + # values, written in standard byte order; the + # first value of each pair gives the time (as + # returned by time(2)) at which a leap second + # occurs; the second gives the total number of + # leap seconds to be applied after the given time. + # The pairs of values are sorted in ascending order + # by time. + + # Not used, for now (but seek for correct file position) + if leapcnt: + fileobj.seek(leapcnt * 8, os.SEEK_CUR) + + # Then there are tzh_ttisstdcnt standard/wall + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as standard + # time or wall clock time, and are used when + # a time zone file is used in handling POSIX-style + # time zone environment variables. + + if ttisstdcnt: + isstd = struct.unpack(">%db" % ttisstdcnt, + fileobj.read(ttisstdcnt)) + + # Finally, there are tzh_ttisgmtcnt UTC/local + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as UTC or + # local time, and are used when a time zone file + # is used in handling POSIX-style time zone envi- + # ronment variables. + + if ttisgmtcnt: + isgmt = struct.unpack(">%db" % ttisgmtcnt, + fileobj.read(ttisgmtcnt)) + + # Build ttinfo list + out.ttinfo_list = [] + for i in range(typecnt): + gmtoff, isdst, abbrind = ttinfo[i] + # Round to full-minutes if that's not the case. Python's + # datetime doesn't accept sub-minute timezones. Check + # http://python.org/sf/1447945 for some information. + gmtoff = 60 * ((gmtoff + 30) // 60) + tti = _ttinfo() + tti.offset = gmtoff + tti.dstoffset = datetime.timedelta(0) + tti.delta = datetime.timedelta(seconds=gmtoff) + tti.isdst = isdst + tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)] + tti.isstd = (ttisstdcnt > i and isstd[i] != 0) + tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0) + out.ttinfo_list.append(tti) + + # Replace ttinfo indexes for ttinfo objects. + out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx] + + # Set standard, dst, and before ttinfos. before will be + # used when a given time is before any transitions, + # and will be set to the first non-dst ttinfo, or to + # the first dst, if all of them are dst. + out.ttinfo_std = None + out.ttinfo_dst = None + out.ttinfo_before = None + if out.ttinfo_list: + if not out.trans_list_utc: + out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0] + else: + for i in range(timecnt-1, -1, -1): + tti = out.trans_idx[i] + if not out.ttinfo_std and not tti.isdst: + out.ttinfo_std = tti + elif not out.ttinfo_dst and tti.isdst: + out.ttinfo_dst = tti + + if out.ttinfo_std and out.ttinfo_dst: + break + else: + if out.ttinfo_dst and not out.ttinfo_std: + out.ttinfo_std = out.ttinfo_dst + + for tti in out.ttinfo_list: + if not tti.isdst: + out.ttinfo_before = tti + break + else: + out.ttinfo_before = out.ttinfo_list[0] + + # Now fix transition times to become relative to wall time. + # + # I'm not sure about this. In my tests, the tz source file + # is setup to wall time, and in the binary file isstd and + # isgmt are off, so it should be in wall time. OTOH, it's + # always in gmt time. Let me know if you have comments + # about this. + laststdoffset = None + out.trans_list = [] + for i, tti in enumerate(out.trans_idx): + if not tti.isdst: + offset = tti.offset + laststdoffset = offset + else: + if laststdoffset is not None: + # Store the DST offset as well and update it in the list + tti.dstoffset = tti.offset - laststdoffset + out.trans_idx[i] = tti + + offset = laststdoffset or 0 + + out.trans_list.append(out.trans_list_utc[i] + offset) + + # In case we missed any DST offsets on the way in for some reason, make + # a second pass over the list, looking for the /next/ DST offset. + laststdoffset = None + for i in reversed(range(len(out.trans_idx))): + tti = out.trans_idx[i] + if tti.isdst: + if not (tti.dstoffset or laststdoffset is None): + tti.dstoffset = tti.offset - laststdoffset + else: + laststdoffset = tti.offset + + if not isinstance(tti.dstoffset, datetime.timedelta): + tti.dstoffset = datetime.timedelta(seconds=tti.dstoffset) + + out.trans_idx[i] = tti + + out.trans_idx = tuple(out.trans_idx) + out.trans_list = tuple(out.trans_list) + out.trans_list_utc = tuple(out.trans_list_utc) + + return out + + def _find_last_transition(self, dt, in_utc=False): + # If there's no list, there are no transitions to find + if not self._trans_list: + return None + + timestamp = _datetime_to_timestamp(dt) + + # Find where the timestamp fits in the transition list - if the + # timestamp is a transition time, it's part of the "after" period. + trans_list = self._trans_list_utc if in_utc else self._trans_list + idx = bisect.bisect_right(trans_list, timestamp) + + # We want to know when the previous transition was, so subtract off 1 + return idx - 1 + + def _get_ttinfo(self, idx): + # For no list or after the last transition, default to _ttinfo_std + if idx is None or (idx + 1) >= len(self._trans_list): + return self._ttinfo_std + + # If there is a list and the time is before it, return _ttinfo_before + if idx < 0: + return self._ttinfo_before + + return self._trans_idx[idx] + + def _find_ttinfo(self, dt): + idx = self._resolve_ambiguous_time(dt) + + return self._get_ttinfo(idx) + + def fromutc(self, dt): + """ + The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`. + + :param dt: + A :py:class:`datetime.datetime` object. + + :raises TypeError: + Raised if ``dt`` is not a :py:class:`datetime.datetime` object. + + :raises ValueError: + Raised if this is called with a ``dt`` which does not have this + ``tzinfo`` attached. + + :return: + Returns a :py:class:`datetime.datetime` object representing the + wall time in ``self``'s time zone. + """ + # These isinstance checks are in datetime.tzinfo, so we'll preserve + # them, even if we don't care about duck typing. + if not isinstance(dt, datetime.datetime): + raise TypeError("fromutc() requires a datetime argument") + + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + # First treat UTC as wall time and get the transition we're in. + idx = self._find_last_transition(dt, in_utc=True) + tti = self._get_ttinfo(idx) + + dt_out = dt + datetime.timedelta(seconds=tti.offset) + + fold = self.is_ambiguous(dt_out, idx=idx) + + return enfold(dt_out, fold=int(fold)) + + def is_ambiguous(self, dt, idx=None): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + if idx is None: + idx = self._find_last_transition(dt) + + # Calculate the difference in offsets from current to previous + timestamp = _datetime_to_timestamp(dt) + tti = self._get_ttinfo(idx) + + if idx is None or idx <= 0: + return False + + od = self._get_ttinfo(idx - 1).offset - tti.offset + tt = self._trans_list[idx] # Transition time + + return timestamp < tt + od + + def _resolve_ambiguous_time(self, dt): + idx = self._find_last_transition(dt) + + # If we have no transitions, return the index + _fold = self._fold(dt) + if idx is None or idx == 0: + return idx + + # If it's ambiguous and we're in a fold, shift to a different index. + idx_offset = int(not _fold and self.is_ambiguous(dt, idx)) + + return idx - idx_offset + + def utcoffset(self, dt): + if dt is None: + return None + + if not self._ttinfo_std: + return ZERO + + return self._find_ttinfo(dt).delta + + def dst(self, dt): + if dt is None: + return None + + if not self._ttinfo_dst: + return ZERO + + tti = self._find_ttinfo(dt) + + if not tti.isdst: + return ZERO + + # The documentation says that utcoffset()-dst() must + # be constant for every dt. + return tti.dstoffset + + @tzname_in_python2 + def tzname(self, dt): + if not self._ttinfo_std or dt is None: + return None + return self._find_ttinfo(dt).abbr + + def __eq__(self, other): + if not isinstance(other, tzfile): + return NotImplemented + return (self._trans_list == other._trans_list and + self._trans_idx == other._trans_idx and + self._ttinfo_list == other._ttinfo_list) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._filename)) + + def __reduce__(self): + return self.__reduce_ex__(None) + + def __reduce_ex__(self, protocol): + return (self.__class__, (None, self._filename), self.__dict__) + + +class tzrange(tzrangebase): + """ + The ``tzrange`` object is a time zone specified by a set of offsets and + abbreviations, equivalent to the way the ``TZ`` variable can be specified + in POSIX-like systems, but using Python delta objects to specify DST + start, end and offsets. + + :param stdabbr: + The abbreviation for standard time (e.g. ``'EST'``). + + :param stdoffset: + An integer or :class:`datetime.timedelta` object or equivalent + specifying the base offset from UTC. + + If unspecified, +00:00 is used. + + :param dstabbr: + The abbreviation for DST / "Summer" time (e.g. ``'EDT'``). + + If specified, with no other DST information, DST is assumed to occur + and the default behavior or ``dstoffset``, ``start`` and ``end`` is + used. If unspecified and no other DST information is specified, it + is assumed that this zone has no DST. + + If this is unspecified and other DST information is *is* specified, + DST occurs in the zone but the time zone abbreviation is left + unchanged. + + :param dstoffset: + A an integer or :class:`datetime.timedelta` object or equivalent + specifying the UTC offset during DST. If unspecified and any other DST + information is specified, it is assumed to be the STD offset +1 hour. + + :param start: + A :class:`relativedelta.relativedelta` object or equivalent specifying + the time and time of year that daylight savings time starts. To specify, + for example, that DST starts at 2AM on the 2nd Sunday in March, pass: + + ``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))`` + + If unspecified and any other DST information is specified, the default + value is 2 AM on the first Sunday in April. + + :param end: + A :class:`relativedelta.relativedelta` object or equivalent representing + the time and time of year that daylight savings time ends, with the + same specification method as in ``start``. One note is that this should + point to the first time in the *standard* zone, so if a transition + occurs at 2AM in the DST zone and the clocks are set back 1 hour to 1AM, + set the `hours` parameter to +1. + + + **Examples:** + + .. testsetup:: tzrange + + from dateutil.tz import tzrange, tzstr + + .. doctest:: tzrange + + >>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT") + True + + >>> from dateutil.relativedelta import * + >>> range1 = tzrange("EST", -18000, "EDT") + >>> range2 = tzrange("EST", -18000, "EDT", -14400, + ... relativedelta(hours=+2, month=4, day=1, + ... weekday=SU(+1)), + ... relativedelta(hours=+1, month=10, day=31, + ... weekday=SU(-1))) + >>> tzstr('EST5EDT') == range1 == range2 + True + + """ + def __init__(self, stdabbr, stdoffset=None, + dstabbr=None, dstoffset=None, + start=None, end=None): + + global relativedelta + from dateutil import relativedelta + + self._std_abbr = stdabbr + self._dst_abbr = dstabbr + + try: + stdoffset = _total_seconds(stdoffset) + except (TypeError, AttributeError): + pass + + try: + dstoffset = _total_seconds(dstoffset) + except (TypeError, AttributeError): + pass + + if stdoffset is not None: + self._std_offset = datetime.timedelta(seconds=stdoffset) + else: + self._std_offset = ZERO + + if dstoffset is not None: + self._dst_offset = datetime.timedelta(seconds=dstoffset) + elif dstabbr and stdoffset is not None: + self._dst_offset = self._std_offset + datetime.timedelta(hours=+1) + else: + self._dst_offset = ZERO + + if dstabbr and start is None: + self._start_delta = relativedelta.relativedelta( + hours=+2, month=4, day=1, weekday=relativedelta.SU(+1)) + else: + self._start_delta = start + + if dstabbr and end is None: + self._end_delta = relativedelta.relativedelta( + hours=+1, month=10, day=31, weekday=relativedelta.SU(-1)) + else: + self._end_delta = end + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = bool(self._start_delta) + + def transitions(self, year): + """ + For a given year, get the DST on and off transition times, expressed + always on the standard time side. For zones with no transitions, this + function returns ``None``. + + :param year: + The year whose transitions you would like to query. + + :return: + Returns a :class:`tuple` of :class:`datetime.datetime` objects, + ``(dston, dstoff)`` for zones with an annual DST transition, or + ``None`` for fixed offset zones. + """ + if not self.hasdst: + return None + + base_year = datetime.datetime(year, 1, 1) + + start = base_year + self._start_delta + end = base_year + self._end_delta + + return (start, end) + + def __eq__(self, other): + if not isinstance(other, tzrange): + return NotImplemented + + return (self._std_abbr == other._std_abbr and + self._dst_abbr == other._dst_abbr and + self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset and + self._start_delta == other._start_delta and + self._end_delta == other._end_delta) + + @property + def _dst_base_offset(self): + return self._dst_base_offset_ + + +class tzstr(tzrange): + """ + ``tzstr`` objects are time zone objects specified by a time-zone string as + it would be passed to a ``TZ`` variable on POSIX-style systems (see + the `GNU C Library: TZ Variable`_ for more details). + + There is one notable exception, which is that POSIX-style time zones use an + inverted offset format, so normally ``GMT+3`` would be parsed as an offset + 3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an + offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX + behavior, pass a ``True`` value to ``posix_offset``. + + The :class:`tzrange` object provides the same functionality, but is + specified using :class:`relativedelta.relativedelta` objects. rather than + strings. + + :param s: + A time zone string in ``TZ`` variable format. This can be a + :class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: :class:`unicode`) + or a stream emitting unicode characters (e.g. :class:`StringIO`). + + :param posix_offset: + Optional. If set to ``True``, interpret strings such as ``GMT+3`` or + ``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the + POSIX standard. + + .. _`GNU C Library: TZ Variable`: + https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html + """ + def __init__(self, s, posix_offset=False): + global parser + from dateutil import parser + + self._s = s + + res = parser._parsetz(s) + if res is None: + raise ValueError("unknown string format") + + # Here we break the compatibility with the TZ variable handling. + # GMT-3 actually *means* the timezone -3. + if res.stdabbr in ("GMT", "UTC") and not posix_offset: + res.stdoffset *= -1 + + # We must initialize it first, since _delta() needs + # _std_offset and _dst_offset set. Use False in start/end + # to avoid building it two times. + tzrange.__init__(self, res.stdabbr, res.stdoffset, + res.dstabbr, res.dstoffset, + start=False, end=False) + + if not res.dstabbr: + self._start_delta = None + self._end_delta = None + else: + self._start_delta = self._delta(res.start) + if self._start_delta: + self._end_delta = self._delta(res.end, isend=1) + + self.hasdst = bool(self._start_delta) + + def _delta(self, x, isend=0): + from dateutil import relativedelta + kwargs = {} + if x.month is not None: + kwargs["month"] = x.month + if x.weekday is not None: + kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week) + if x.week > 0: + kwargs["day"] = 1 + else: + kwargs["day"] = 31 + elif x.day: + kwargs["day"] = x.day + elif x.yday is not None: + kwargs["yearday"] = x.yday + elif x.jyday is not None: + kwargs["nlyearday"] = x.jyday + if not kwargs: + # Default is to start on first sunday of april, and end + # on last sunday of october. + if not isend: + kwargs["month"] = 4 + kwargs["day"] = 1 + kwargs["weekday"] = relativedelta.SU(+1) + else: + kwargs["month"] = 10 + kwargs["day"] = 31 + kwargs["weekday"] = relativedelta.SU(-1) + if x.time is not None: + kwargs["seconds"] = x.time + else: + # Default is 2AM. + kwargs["seconds"] = 7200 + if isend: + # Convert to standard time, to follow the documented way + # of working with the extra hour. See the documentation + # of the tzinfo class. + delta = self._dst_offset - self._std_offset + kwargs["seconds"] -= delta.seconds + delta.days * 86400 + return relativedelta.relativedelta(**kwargs) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._s)) + + +class _tzicalvtzcomp(object): + def __init__(self, tzoffsetfrom, tzoffsetto, isdst, + tzname=None, rrule=None): + self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom) + self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto) + self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom + self.isdst = isdst + self.tzname = tzname + self.rrule = rrule + + +class _tzicalvtz(_tzinfo): + def __init__(self, tzid, comps=[]): + super(_tzicalvtz, self).__init__() + + self._tzid = tzid + self._comps = comps + self._cachedate = [] + self._cachecomp = [] + + def _find_comp(self, dt): + if len(self._comps) == 1: + return self._comps[0] + + dt = dt.replace(tzinfo=None) + + try: + return self._cachecomp[self._cachedate.index((dt, self._fold(dt)))] + except ValueError: + pass + + lastcompdt = None + lastcomp = None + + for comp in self._comps: + compdt = self._find_compdt(comp, dt) + + if compdt and (not lastcompdt or lastcompdt < compdt): + lastcompdt = compdt + lastcomp = comp + + if not lastcomp: + # RFC says nothing about what to do when a given + # time is before the first onset date. We'll look for the + # first standard component, or the first component, if + # none is found. + for comp in self._comps: + if not comp.isdst: + lastcomp = comp + break + else: + lastcomp = comp[0] + + self._cachedate.insert(0, (dt, self._fold(dt))) + self._cachecomp.insert(0, lastcomp) + + if len(self._cachedate) > 10: + self._cachedate.pop() + self._cachecomp.pop() + + return lastcomp + + def _find_compdt(self, comp, dt): + if comp.tzoffsetdiff < ZERO and self._fold(dt): + dt -= comp.tzoffsetdiff + + compdt = comp.rrule.before(dt, inc=True) + + return compdt + + def utcoffset(self, dt): + if dt is None: + return None + + return self._find_comp(dt).tzoffsetto + + def dst(self, dt): + comp = self._find_comp(dt) + if comp.isdst: + return comp.tzoffsetdiff + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._find_comp(dt).tzname + + def __repr__(self): + return "" % repr(self._tzid) + + __reduce__ = object.__reduce__ + + +class tzical(object): + """ + This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure + as set out in `RFC 2445`_ Section 4.6.5 into one or more `tzinfo` objects. + + :param `fileobj`: + A file or stream in iCalendar format, which should be UTF-8 encoded + with CRLF endings. + + .. _`RFC 2445`: https://www.ietf.org/rfc/rfc2445.txt + """ + def __init__(self, fileobj): + global rrule + from dateutil import rrule + + if isinstance(fileobj, string_types): + self._s = fileobj + # ical should be encoded in UTF-8 with CRLF + fileobj = open(fileobj, 'r') + else: + self._s = getattr(fileobj, 'name', repr(fileobj)) + fileobj = _ContextWrapper(fileobj) + + self._vtz = {} + + with fileobj as fobj: + self._parse_rfc(fobj.read()) + + def keys(self): + """ + Retrieves the available time zones as a list. + """ + return list(self._vtz.keys()) + + def get(self, tzid=None): + """ + Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``. + + :param tzid: + If there is exactly one time zone available, omitting ``tzid`` + or passing :py:const:`None` value returns it. Otherwise a valid + key (which can be retrieved from :func:`keys`) is required. + + :raises ValueError: + Raised if ``tzid`` is not specified but there are either more + or fewer than 1 zone defined. + + :returns: + Returns either a :py:class:`datetime.tzinfo` object representing + the relevant time zone or :py:const:`None` if the ``tzid`` was + not found. + """ + if tzid is None: + if len(self._vtz) == 0: + raise ValueError("no timezones defined") + elif len(self._vtz) > 1: + raise ValueError("more than one timezone available") + tzid = next(iter(self._vtz)) + + return self._vtz.get(tzid) + + def _parse_offset(self, s): + s = s.strip() + if not s: + raise ValueError("empty offset") + if s[0] in ('+', '-'): + signal = (-1, +1)[s[0] == '+'] + s = s[1:] + else: + signal = +1 + if len(s) == 4: + return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal + elif len(s) == 6: + return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal + else: + raise ValueError("invalid offset: " + s) + + def _parse_rfc(self, s): + lines = s.splitlines() + if not lines: + raise ValueError("empty string") + + # Unfold + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + + tzid = None + comps = [] + invtz = False + comptype = None + for line in lines: + if not line: + continue + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0].upper() + parms = parms[1:] + if invtz: + if name == "BEGIN": + if value in ("STANDARD", "DAYLIGHT"): + # Process component + pass + else: + raise ValueError("unknown component: "+value) + comptype = value + founddtstart = False + tzoffsetfrom = None + tzoffsetto = None + rrulelines = [] + tzname = None + elif name == "END": + if value == "VTIMEZONE": + if comptype: + raise ValueError("component not closed: "+comptype) + if not tzid: + raise ValueError("mandatory TZID not found") + if not comps: + raise ValueError( + "at least one component is needed") + # Process vtimezone + self._vtz[tzid] = _tzicalvtz(tzid, comps) + invtz = False + elif value == comptype: + if not founddtstart: + raise ValueError("mandatory DTSTART not found") + if tzoffsetfrom is None: + raise ValueError( + "mandatory TZOFFSETFROM not found") + if tzoffsetto is None: + raise ValueError( + "mandatory TZOFFSETFROM not found") + # Process component + rr = None + if rrulelines: + rr = rrule.rrulestr("\n".join(rrulelines), + compatible=True, + ignoretz=True, + cache=True) + comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto, + (comptype == "DAYLIGHT"), + tzname, rr) + comps.append(comp) + comptype = None + else: + raise ValueError("invalid component end: "+value) + elif comptype: + if name == "DTSTART": + rrulelines.append(line) + founddtstart = True + elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"): + rrulelines.append(line) + elif name == "TZOFFSETFROM": + if parms: + raise ValueError( + "unsupported %s parm: %s " % (name, parms[0])) + tzoffsetfrom = self._parse_offset(value) + elif name == "TZOFFSETTO": + if parms: + raise ValueError( + "unsupported TZOFFSETTO parm: "+parms[0]) + tzoffsetto = self._parse_offset(value) + elif name == "TZNAME": + if parms: + raise ValueError( + "unsupported TZNAME parm: "+parms[0]) + tzname = value + elif name == "COMMENT": + pass + else: + raise ValueError("unsupported property: "+name) + else: + if name == "TZID": + if parms: + raise ValueError( + "unsupported TZID parm: "+parms[0]) + tzid = value + elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"): + pass + else: + raise ValueError("unsupported property: "+name) + elif name == "BEGIN" and value == "VTIMEZONE": + tzid = None + comps = [] + invtz = True + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._s)) + + +if sys.platform != "win32": + TZFILES = ["/etc/localtime", "localtime"] + TZPATHS = ["/usr/share/zoneinfo", + "/usr/lib/zoneinfo", + "/usr/share/lib/zoneinfo", + "/etc/zoneinfo"] +else: + TZFILES = [] + TZPATHS = [] + + +def gettz(name=None): + tz = None + if not name: + try: + name = os.environ["TZ"] + except KeyError: + pass + if name is None or name == ":": + for filepath in TZFILES: + if not os.path.isabs(filepath): + filename = filepath + for path in TZPATHS: + filepath = os.path.join(path, filename) + if os.path.isfile(filepath): + break + else: + continue + if os.path.isfile(filepath): + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = tzlocal() + else: + if name.startswith(":"): + name = name[:-1] + if os.path.isabs(name): + if os.path.isfile(name): + tz = tzfile(name) + else: + tz = None + else: + for path in TZPATHS: + filepath = os.path.join(path, name) + if not os.path.isfile(filepath): + filepath = filepath.replace(' ', '_') + if not os.path.isfile(filepath): + continue + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = None + if tzwin is not None: + try: + tz = tzwin(name) + except WindowsError: + tz = None + + if not tz: + from dateutil.zoneinfo import get_zonefile_instance + tz = get_zonefile_instance().get(name) + + if not tz: + for c in name: + # name must have at least one offset to be a tzstr + if c in "0123456789": + try: + tz = tzstr(name) + except ValueError: + pass + break + else: + if name in ("GMT", "UTC"): + tz = tzutc() + elif name in time.tzname: + tz = tzlocal() + return tz + + +def datetime_exists(dt, tz=None): + """ + Given a datetime and a time zone, determine whether or not a given datetime + would fall in a gap. + + :param dt: + A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` + is provided.) + + :param tz: + A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If + ``None`` or not provided, the datetime's own time zone will be used. + + :return: + Returns a boolean value whether or not the "wall time" exists in ``tz``. + """ + if tz is None: + if dt.tzinfo is None: + raise ValueError('Datetime is naive and no time zone provided.') + tz = dt.tzinfo + + dt = dt.replace(tzinfo=None) + + # This is essentially a test of whether or not the datetime can survive + # a round trip to UTC. + dt_rt = dt.replace(tzinfo=tz).astimezone(tzutc()).astimezone(tz) + dt_rt = dt_rt.replace(tzinfo=None) + + return dt == dt_rt + + +def datetime_ambiguous(dt, tz=None): + """ + Given a datetime and a time zone, determine whether or not a given datetime + is ambiguous (i.e if there are two times differentiated only by their DST + status). + + :param dt: + A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` + is provided.) + + :param tz: + A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If + ``None`` or not provided, the datetime's own time zone will be used. + + :return: + Returns a boolean value whether or not the "wall time" is ambiguous in + ``tz``. + + .. versionadded:: 2.6.0 + """ + if tz is None: + if dt.tzinfo is None: + raise ValueError('Datetime is naive and no time zone provided.') + + tz = dt.tzinfo + + # If a time zone defines its own "is_ambiguous" function, we'll use that. + is_ambiguous_fn = getattr(tz, 'is_ambiguous', None) + if is_ambiguous_fn is not None: + try: + return tz.is_ambiguous(dt) + except: + pass + + # If it doesn't come out and tell us it's ambiguous, we'll just check if + # the fold attribute has any effect on this particular date and time. + dt = dt.replace(tzinfo=tz) + wall_0 = enfold(dt, fold=0) + wall_1 = enfold(dt, fold=1) + + same_offset = wall_0.utcoffset() == wall_1.utcoffset() + same_dst = wall_0.dst() == wall_1.dst() + + return not (same_offset and same_dst) + + +def _datetime_to_timestamp(dt): + """ + Convert a :class:`datetime.datetime` object to an epoch timestamp in seconds + since January 1, 1970, ignoring the time zone. + """ + return _total_seconds((dt.replace(tzinfo=None) - EPOCH)) + + +class _ContextWrapper(object): + """ + Class for wrapping contexts so that they are passed through in a + with statement. + """ + def __init__(self, context): + self.context = context + + def __enter__(self): + return self.context + + def __exit__(*args, **kwargs): + pass + +# vim:ts=4:sw=4:et diff --git a/python/dateutil/tz/win.py b/python/dateutil/tz/win.py new file mode 100644 index 0000000..36a1c26 --- /dev/null +++ b/python/dateutil/tz/win.py @@ -0,0 +1,332 @@ +# This code was originally contributed by Jeffrey Harris. +import datetime +import struct + +from six.moves import winreg +from six import text_type + +try: + import ctypes + from ctypes import wintypes +except ValueError: + # ValueError is raised on non-Windows systems for some horrible reason. + raise ImportError("Running tzwin on non-Windows system") + +from ._common import tzrangebase + +__all__ = ["tzwin", "tzwinlocal", "tzres"] + +ONEWEEK = datetime.timedelta(7) + +TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" +TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" +TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" + + +def _settzkeyname(): + handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) + try: + winreg.OpenKey(handle, TZKEYNAMENT).Close() + TZKEYNAME = TZKEYNAMENT + except WindowsError: + TZKEYNAME = TZKEYNAME9X + handle.Close() + return TZKEYNAME + + +TZKEYNAME = _settzkeyname() + + +class tzres(object): + """ + Class for accessing `tzres.dll`, which contains timezone name related + resources. + + .. versionadded:: 2.5.0 + """ + p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char + + def __init__(self, tzres_loc='tzres.dll'): + # Load the user32 DLL so we can load strings from tzres + user32 = ctypes.WinDLL('user32') + + # Specify the LoadStringW function + user32.LoadStringW.argtypes = (wintypes.HINSTANCE, + wintypes.UINT, + wintypes.LPWSTR, + ctypes.c_int) + + self.LoadStringW = user32.LoadStringW + self._tzres = ctypes.WinDLL(tzres_loc) + self.tzres_loc = tzres_loc + + def load_name(self, offset): + """ + Load a timezone name from a DLL offset (integer). + + >>> from dateutil.tzwin import tzres + >>> tzr = tzres() + >>> print(tzr.load_name(112)) + 'Eastern Standard Time' + + :param offset: + A positive integer value referring to a string from the tzres dll. + + ..note: + Offsets found in the registry are generally of the form + `@tzres.dll,-114`. The offset in this case if 114, not -114. + + """ + resource = self.p_wchar() + lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR) + nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0) + return resource[:nchar] + + def name_from_string(self, tzname_str): + """ + Parse strings as returned from the Windows registry into the time zone + name as defined in the registry. + + >>> from dateutil.tzwin import tzres + >>> tzr = tzres() + >>> print(tzr.name_from_string('@tzres.dll,-251')) + 'Dateline Daylight Time' + >>> print(tzr.name_from_string('Eastern Standard Time')) + 'Eastern Standard Time' + + :param tzname_str: + A timezone name string as returned from a Windows registry key. + + :return: + Returns the localized timezone string from tzres.dll if the string + is of the form `@tzres.dll,-offset`, else returns the input string. + """ + if not tzname_str.startswith('@'): + return tzname_str + + name_splt = tzname_str.split(',-') + try: + offset = int(name_splt[1]) + except: + raise ValueError("Malformed timezone string.") + + return self.load_name(offset) + + +class tzwinbase(tzrangebase): + """tzinfo class based on win32's timezones available in the registry.""" + def __init__(self): + raise NotImplementedError('tzwinbase is an abstract base class') + + def __eq__(self, other): + # Compare on all relevant dimensions, including name. + if not isinstance(other, tzwinbase): + return NotImplemented + + return (self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset and + self._stddayofweek == other._stddayofweek and + self._dstdayofweek == other._dstdayofweek and + self._stdweeknumber == other._stdweeknumber and + self._dstweeknumber == other._dstweeknumber and + self._stdhour == other._stdhour and + self._dsthour == other._dsthour and + self._stdminute == other._stdminute and + self._dstminute == other._dstminute and + self._std_abbr == other._std_abbr and + self._dst_abbr == other._dst_abbr) + + @staticmethod + def list(): + """Return a list of all time zones known to the system.""" + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + with winreg.OpenKey(handle, TZKEYNAME) as tzkey: + result = [winreg.EnumKey(tzkey, i) + for i in range(winreg.QueryInfoKey(tzkey)[0])] + return result + + def display(self): + return self._display + + def transitions(self, year): + """ + For a given year, get the DST on and off transition times, expressed + always on the standard time side. For zones with no transitions, this + function returns ``None``. + + :param year: + The year whose transitions you would like to query. + + :return: + Returns a :class:`tuple` of :class:`datetime.datetime` objects, + ``(dston, dstoff)`` for zones with an annual DST transition, or + ``None`` for fixed offset zones. + """ + + if not self.hasdst: + return None + + dston = picknthweekday(year, self._dstmonth, self._dstdayofweek, + self._dsthour, self._dstminute, + self._dstweeknumber) + + dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek, + self._stdhour, self._stdminute, + self._stdweeknumber) + + # Ambiguous dates default to the STD side + dstoff -= self._dst_base_offset + + return dston, dstoff + + def _get_hasdst(self): + return self._dstmonth != 0 + + @property + def _dst_base_offset(self): + return self._dst_base_offset_ + + +class tzwin(tzwinbase): + + def __init__(self, name): + self._name = name + + # multiple contexts only possible in 2.7 and 3.1, we still support 2.6 + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name) + with winreg.OpenKey(handle, tzkeyname) as tzkey: + keydict = valuestodict(tzkey) + + self._std_abbr = keydict["Std"] + self._dst_abbr = keydict["Dlt"] + + self._display = keydict["Display"] + + # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm + tup = struct.unpack("=3l16h", keydict["TZI"]) + stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 + dstoffset = stdoffset-tup[2] # + DaylightBias * -1 + self._std_offset = datetime.timedelta(minutes=stdoffset) + self._dst_offset = datetime.timedelta(minutes=dstoffset) + + # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs + # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx + (self._stdmonth, + self._stddayofweek, # Sunday = 0 + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[4:9] + + (self._dstmonth, + self._dstdayofweek, # Sunday = 0 + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[12:17] + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = self._get_hasdst() + + def __repr__(self): + return "tzwin(%s)" % repr(self._name) + + def __reduce__(self): + return (self.__class__, (self._name,)) + + +class tzwinlocal(tzwinbase): + def __init__(self): + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey: + keydict = valuestodict(tzlocalkey) + + self._std_abbr = keydict["StandardName"] + self._dst_abbr = keydict["DaylightName"] + + try: + tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME, + sn=self._std_abbr) + with winreg.OpenKey(handle, tzkeyname) as tzkey: + _keydict = valuestodict(tzkey) + self._display = _keydict["Display"] + except OSError: + self._display = None + + stdoffset = -keydict["Bias"]-keydict["StandardBias"] + dstoffset = stdoffset-keydict["DaylightBias"] + + self._std_offset = datetime.timedelta(minutes=stdoffset) + self._dst_offset = datetime.timedelta(minutes=dstoffset) + + # For reasons unclear, in this particular key, the day of week has been + # moved to the END of the SYSTEMTIME structure. + tup = struct.unpack("=8h", keydict["StandardStart"]) + + (self._stdmonth, + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[1:5] + + self._stddayofweek = tup[7] + + tup = struct.unpack("=8h", keydict["DaylightStart"]) + + (self._dstmonth, + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[1:5] + + self._dstdayofweek = tup[7] + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = self._get_hasdst() + + def __repr__(self): + return "tzwinlocal()" + + def __str__(self): + # str will return the standard name, not the daylight name. + return "tzwinlocal(%s)" % repr(self._std_abbr) + + def __reduce__(self): + return (self.__class__, ()) + + +def picknthweekday(year, month, dayofweek, hour, minute, whichweek): + """ dayofweek == 0 means Sunday, whichweek 5 means last instance """ + first = datetime.datetime(year, month, 1, hour, minute) + + # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6), + # Because 7 % 7 = 0 + weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1) + wd = weekdayone + ((whichweek - 1) * ONEWEEK) + if (wd.month != month): + wd -= ONEWEEK + + return wd + + +def valuestodict(key): + """Convert a registry key's values to a dictionary.""" + dout = {} + size = winreg.QueryInfoKey(key)[1] + tz_res = None + + for i in range(size): + key_name, value, dtype = winreg.EnumValue(key, i) + if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN: + # If it's a DWORD (32-bit integer), it's stored as unsigned - convert + # that to a proper signed integer + if value & (1 << 31): + value = value - (1 << 32) + elif dtype == winreg.REG_SZ: + # If it's a reference to the tzres DLL, load the actual string + if value.startswith('@tzres'): + tz_res = tz_res or tzres() + value = tz_res.name_from_string(value) + + value = value.rstrip('\x00') # Remove trailing nulls + + dout[key_name] = value + + return dout diff --git a/python/dateutil/tzwin.py b/python/dateutil/tzwin.py new file mode 100644 index 0000000..cebc673 --- /dev/null +++ b/python/dateutil/tzwin.py @@ -0,0 +1,2 @@ +# tzwin has moved to dateutil.tz.win +from .tz.win import * diff --git a/python/dateutil/zoneinfo/__init__.py b/python/dateutil/zoneinfo/__init__.py new file mode 100644 index 0000000..a2ed4f9 --- /dev/null +++ b/python/dateutil/zoneinfo/__init__.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- +import warnings +import json + +from tarfile import TarFile +from pkgutil import get_data +from io import BytesIO +from contextlib import closing + +from dateutil.tz import tzfile + +__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata", "rebuild"] + +ZONEFILENAME = "dateutil-zoneinfo.tar.gz" +METADATA_FN = 'METADATA' + +# python2.6 compatability. Note that TarFile.__exit__ != TarFile.close, but +# it's close enough for python2.6 +tar_open = TarFile.open +if not hasattr(TarFile, '__exit__'): + def tar_open(*args, **kwargs): + return closing(TarFile.open(*args, **kwargs)) + + +class tzfile(tzfile): + def __reduce__(self): + return (gettz, (self._filename,)) + + +def getzoneinfofile_stream(): + try: + return BytesIO(get_data(__name__, ZONEFILENAME)) + except IOError as e: # TODO switch to FileNotFoundError? + warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror)) + return None + + +class ZoneInfoFile(object): + def __init__(self, zonefile_stream=None): + if zonefile_stream is not None: + with tar_open(fileobj=zonefile_stream, mode='r') as tf: + # dict comprehension does not work on python2.6 + # TODO: get back to the nicer syntax when we ditch python2.6 + # self.zones = {zf.name: tzfile(tf.extractfile(zf), + # filename = zf.name) + # for zf in tf.getmembers() if zf.isfile()} + self.zones = dict((zf.name, tzfile(tf.extractfile(zf), + filename=zf.name)) + for zf in tf.getmembers() + if zf.isfile() and zf.name != METADATA_FN) + # deal with links: They'll point to their parent object. Less + # waste of memory + # links = {zl.name: self.zones[zl.linkname] + # for zl in tf.getmembers() if zl.islnk() or zl.issym()} + links = dict((zl.name, self.zones[zl.linkname]) + for zl in tf.getmembers() if + zl.islnk() or zl.issym()) + self.zones.update(links) + try: + metadata_json = tf.extractfile(tf.getmember(METADATA_FN)) + metadata_str = metadata_json.read().decode('UTF-8') + self.metadata = json.loads(metadata_str) + except KeyError: + # no metadata in tar file + self.metadata = None + else: + self.zones = dict() + self.metadata = None + + def get(self, name, default=None): + """ + Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method + for retrieving zones from the zone dictionary. + + :param name: + The name of the zone to retrieve. (Generally IANA zone names) + + :param default: + The value to return in the event of a missing key. + + .. versionadded:: 2.6.0 + + """ + return self.zones.get(name, default) + + +# The current API has gettz as a module function, although in fact it taps into +# a stateful class. So as a workaround for now, without changing the API, we +# will create a new "global" class instance the first time a user requests a +# timezone. Ugly, but adheres to the api. +# +# TODO: Remove after deprecation period. +_CLASS_ZONE_INSTANCE = list() + + +def get_zonefile_instance(new_instance=False): + """ + This is a convenience function which provides a :class:`ZoneInfoFile` + instance using the data provided by the ``dateutil`` package. By default, it + caches a single instance of the ZoneInfoFile object and returns that. + + :param new_instance: + If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and + used as the cached instance for the next call. Otherwise, new instances + are created only as necessary. + + :return: + Returns a :class:`ZoneInfoFile` object. + + .. versionadded:: 2.6 + """ + if new_instance: + zif = None + else: + zif = getattr(get_zonefile_instance, '_cached_instance', None) + + if zif is None: + zif = ZoneInfoFile(getzoneinfofile_stream()) + + get_zonefile_instance._cached_instance = zif + + return zif + + +def gettz(name): + """ + This retrieves a time zone from the local zoneinfo tarball that is packaged + with dateutil. + + :param name: + An IANA-style time zone name, as found in the zoneinfo file. + + :return: + Returns a :class:`dateutil.tz.tzfile` time zone object. + + .. warning:: + It is generally inadvisable to use this function, and it is only + provided for API compatibility with earlier versions. This is *not* + equivalent to ``dateutil.tz.gettz()``, which selects an appropriate + time zone based on the inputs, favoring system zoneinfo. This is ONLY + for accessing the dateutil-specific zoneinfo (which may be out of + date compared to the system zoneinfo). + + .. deprecated:: 2.6 + If you need to use a specific zoneinfofile over the system zoneinfo, + instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call + :func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead. + + Use :func:`get_zonefile_instance` to retrieve an instance of the + dateutil-provided zoneinfo. + """ + warnings.warn("zoneinfo.gettz() will be removed in future versions, " + "to use the dateutil-provided zoneinfo files, instantiate a " + "ZoneInfoFile object and use ZoneInfoFile.zones.get() " + "instead. See the documentation for details.", + DeprecationWarning) + + if len(_CLASS_ZONE_INSTANCE) == 0: + _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) + return _CLASS_ZONE_INSTANCE[0].zones.get(name) + + +def gettz_db_metadata(): + """ Get the zonefile metadata + + See `zonefile_metadata`_ + + :returns: + A dictionary with the database metadata + + .. deprecated:: 2.6 + See deprecation warning in :func:`zoneinfo.gettz`. To get metadata, + query the attribute ``zoneinfo.ZoneInfoFile.metadata``. + """ + warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future " + "versions, to use the dateutil-provided zoneinfo files, " + "ZoneInfoFile object and query the 'metadata' attribute " + "instead. See the documentation for details.", + DeprecationWarning) + + if len(_CLASS_ZONE_INSTANCE) == 0: + _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) + return _CLASS_ZONE_INSTANCE[0].metadata diff --git a/python/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz b/python/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..613c0ff3b4ef910935629a7d145354f40314150c GIT binary patch literal 138881 zcmX`SRa6{Z*R>r&0s#U9_r~4b-95Ow1$VbV65QPh?(QDk-QC^YyT7`h_aFbk9J}_K z*Ic`*PrAB#7h(99FZIM+S|1^%MlNQqE|xa*UiNlomUia$3@%1aAI|h(4bk@(GQU~< zfR?fsl@}Q;v@b>=Cd*;?=Cvv$@`d~>@$V@~;cwyu=)whv;-4&YV5tXSgjYV!gww){ zoqQxA0DDhjv4vcQcEWD^n@C!xp9qFAe zB?k*b?Znm-_6tLmWLIAqOJ|+rz$K>OW|{rjzaKC2IE=nO@b4={2P+iDhDH?qOsaC; zN!~krpI&|QL-EiFBhd&@(ot_dL?Vid{c0j*-r;kHH1#H|o3!7j5{VT8m1vMROE~uhf zPdyc3e{~Ia7annUixhSjUU+%BKFD~|)OFEBpo8NN&Z~;R$HyW-6ZA;oxx3cGTjmsK zR5U$!$HbA$ZdUBM+$6w#eX_=#x{ihRhq&EUKXqHV9Juem3{u{O>D8!Iue9lrNAEdGI1xapoI!Ir?ZXrWnAA=+Z zB+VkAnwXUz&X+e@k?^?Av>;J})Baw!k1vXs9u}vN+h%c>$n(?Ny;TOB=&>s2uyG`m zZpBCE=tk!auQ%^L|1Akar%?(+%uSwoyG`Qo=Sypnx<_8{>+7rOtsug)0oiVM(*n-w zJiAxga#_#AGGbL^ipuWIIXCAo<3oibmsKv7t0GV37>+71$*73u_mdtNfnh?7%j7pD z=*78c+TWUs^mMkg7vP!>u@bbTIL~Mu*$wIp?Gqnj@8jL>B$l>~hU9e{u2K5*6FR|D z42wV1qL~_1R05~oX+ZYI7j8mo4r~M>XsG_PIeO~&zC&w&MjpZoH8XK3mK~{ z>1@la%?%}MA6MaumWSWk*4uUs5A_vRnQ*jfRi8xJ#pTsHylvcG`X(J-I8t`+tT}s* z4fLI1o?M(^)-KQ4lI4}hDbsm=UAU>8+Lc_CySI00`zuwi7^-}3*zU8k%1e#2I!>3g z<;%&alO9sEa>$0AD4cgICGJMDsM8&tpQ16~5@oz}2x?<+3A)reu>tprS%hQQ^XOb? z|84ZLu4c_O^Gpx7Mq13^b~+fzq_odkdv-cRac_D{z4#0-@Ni?Wi4be54!6@QGMPWL z5wCkQ^Wd;y$JSf$lPr~m9)d#`gTma>Mm$M$>ff`b$3Z@X#)*lu3N-ASG$E5_p|Y0mrkP<;LUl zOp4i_cgU_VS(6!=khNe+WEQ;8vv=L)i5(i&NZ(*d? zj7-5FLPaCSNF@$XNpC}Ax%-FOjZN*CS-P-OO(MovB@S>&ZzE&52Z!2COfQ&OAV2$r zB@T#VnumsbOidM7SQv3q)3LMr6pypzzH`2$u5>O-oDq7Re#06~3>^y4N`chqd96X7 zZ<=T~CP}&9gI@(>Bg|~UkiF~2>wCB{0r2Gh6Rf1^^EKqxOSHjHe>d=B;DSGsF*yDO z84S%Cc=n>WKnO9qhR&5VrG~t>zh}=u&cFsqH-i2HzwE0_ZITbfRQz`bURec{z- z=LG~?qfNStHOdimtQ(_oD;gL}4=$dTd^6tBkdap@H#eIYJNTDt&SdSaSDHr~>w~jb z-PyW~Ud^M;*IS(b*e<3{R=fu3I-C*4OnMpm#hkBh;V=B6N8my7jH)H9^z3&eGeCZw z#%HyW^tUnw6tTq%Yo^!tJH@+)R+ayOQ5->n7EII$E z!>^>@s5^{U|1rAXw35bqp~tQ`WlTT0Zlv5f6CU9+c*?Vu;d%CXkH7JwWSKSs!`E62 zZMJ!B4-u_&KF8vQA7ig2{o0?U82?1c(d+N$tM_zxTS+fY)+>AB7A9r!jcdR3)z3Mt zA}x(h(%HRRyO#Q#ps|;74)A76M$+v05=z$T^4?9+Y!2rnPmXL?W{+-r&+Hut(#7bM zKZ)73oU(SzuTtJGj2;}@?(CSJCR}^1DpZ}G!$_Q-kL&D*B;|Mo$5q@%As!wP*r(ko zc%N!=>w)neGg?6F&dmZNHITQ9G|o-5N8LEp1!%QoKm0^0`H670maW}k*MmpGP0V9w zkFp55)Y<(s3PGB`>>@i{0|U+h$o2szS(k`Ngx zMU+hJRAnj^lnU%rZJ}H&Qk95U%Yh+PXw-G=RHq0zUWq~w>1=c?_0SNTsVNN$3jt1Q zT!fsgL?H#~Y+A`76* zMB-o*;G{{ugIqAoxRT%#V5J#;lHpJx#M%6%lbX4<84Y}%E24podwXL{gw{F`({c!A z7zx}Q&ZYBE$29An{2X@|TD$d8E8(+Xthm+{>l?l-u@{IexL7`(jHQ;2rIwAQmXD=Y zER4mNO21B(rB?T&G|RL(>2d-%p!HZiyK{l5t@Ej8V`XrlU##@kZeAS|1o`UzxdVcH zH#q^Rn}HmTdzfyhbM?sMLrww}O|(x->;zXjwVVeUovo_cUwdJ=W#u|z7s-RICv;n! z_ha*iKySb9|3w6BkpDpA*qi@uHS2L^9l6~(vYUIB-R|+5 zt1JPok9Z5kc~)V)p|sdjoE-B0Amtqp`}is5 z0fzJvMPh?CqTK@9^OETXBqAf^4Mz=iPM!a$z9|Nkua5sf!l0NS!FC=j-Q;Fl+YUKA zKSaCA2fv!1`eUB>S4O~E!Qre|TfdUF{(#-UF7RD7ZMij+GCTf0I++oC>Q6y1 zysp8o4Nh~rTZ5f2K`xUWoOLUs#Mi5j&z71{slV;i!7AX9=%Voz^bNXoiAFyU-sYVr z#M4ihNI}21DIlI{$t5f~=Ek%-UX14Qf7uRk0UJWy_bfg)grDMZGp@H&>iI!v4-@nB z)13S0*(W=qls9Cn%AOSz7S@4m`4x^P&TS-_&bpO%!wi1tXOveQo;`s0$;vZyAWYN8xploLkWB2 z{R+I2fQ8wPL_+GpB_JXqAd?XI{>9#<`rF3QPvZ7zVNs7f=wJYejzuQw!6y(?Jv$QW z>6q>Q?OX&MW+(=m7>i6tk-P6gL=dXdg|~@ox=xZL2rnw#YwQj;h{8iror9m z_FJv9^`SbexaSRrH>;MIVj z3UqXnc)NP#I`K>RH9<7XfGSwFN-1hjrx?HGJYu3Ysf@}u%LTtHgJRt`oejXZO8f)?9Ke&biGQh7c(s!Y(|Zf4av$i7LiJ2 zL9t40TA@ny3B5}FzX@kqw3_Z++eO|PC57G~x}3!)o@jmNbNY^jp6t7o*{EYzinF;6 zRF2ca;{8_l=)79RH|^!7Zi?lqb(iI?HyiGMl+sAef(jhTnT6?!5+lOyX!or;f%{&A zyr=hA_cM4lrDdEMOPcmnH z25HX!++^WKjIhr6+T7`|SNxuVlW(u`)PJFkz$Ly7Pkaw#%{an8sF&<5ze>95MUX%` zn6c>1xkB`KTq7G@`V@e_Zun<|^OA?Zu)T6He{$+|J$lh(R^sSgQQ`<1(|s1!4vz%c zt0Dl@RNTSWY#^?edBlipsbdgzsiO=!(Gynq(n60+OEMg)-+wQ5XvtkNCC@($PUPFU zQSvptA@Q8cl7Bfu?wlGdPpOC|sQFI#xWJ`1(%oNF)PDG~zFTt90y%?B#$!GD*g@W3 zdxJS&|9+-y+w)Ov8*SH9aHJl(bkLILo8={E@kb86RalEYLhnIFx*&f0cAMQo8e2l& zo$fDw$KuU}Dd=J^{ULxXFoN#4X1>~Y?Nb(;FQSojYre+vq5KLPF+z(oaI3LyR+h!_7C4*=qU41Axl zUAC!*n4JC^?HT^6TEXntjz}TZ)*q%D)?}5S5 zG@0AmvN^x|q2SkD;FWXJ{h)@rWn5<65%T~3spR?0;4(|?!R+)w6fZsyyY&=(XiBfh zkfQapm9F)aaWuCL#29|zMX2SnNNPH*ha4;J&M$OjduPDd zOoq_vxx$b6my{_$y&|`rptrNS&vS=BQ^&A6FWoEQ?$jD_YUg>{!ex{AQx!{MIF zD-!L_iiH*+h4qy{dW*n+!&&~oR15bZ4#m)v_1{p?&scdVQaor04%i5rZ`jMgourab zql|e#EFv^%I*bGdQiKgCCOjs<&=d`djI?{m{G(J*V66OSQal6+4y*_pChTQXrrK%4 zF%G=Re>Tn2oM}&#Rt+~5y9R@M`+8-GO`%O|`XO)rhNilzNelk(z*DDeE=aV&!^?!E=h{AeKfw7_#(Ctut|Vs!>paky=w zA;z7Ny5usIknYlVP_fZk*5&~&Tjq7cTXG%pLa`dKfjn=oqdJZJpvypbqf1LzsU{&1 zXW#0>HhB0logKG#7xJ$BGT*SGxapstxyh;5_B7*iyi;VIFPlx5z~KLs<+8t88Rxv` zaD1}zGSg70mw!4-BPF<4LAj2tqVZZF=UHMH`D)euhoGL)*_0FU*2kC8D&iR-N+AeQ;b4dDb4vRcj?aXfl}0-_p8Mz* z%7R{gcWR$<)Nza`xyWW++6^eT`8kj;sWm(IP(}%}J8cHp^v&OQXOp9em5e_N?a0#n z>`P7%Dj&&)G8G4=r9Y3y^1r5J$({WC6pj8dl%V7jDvvTenNvQd)Rq+U&wN~9;ErjI z^&)a4{VcpAK@->)nIJ}HXB|*ZMdFyw5Pm$8eM;ge+heVcO;qv~l~x&-Og*1Ns#wZV z%(y`UHFRENdW`5^*jOB%47F@qNSy?*C8WxwVq-L8X^3k`%tVaEPk#4>qQgtk+`GUf z;9V^q59J$@1dOCa$;nWlQZW;xV#t$y&kgMvp+GIZPLukpE<+L^;`g}#4Q`G$PU^4N zkjXq6TmqNY*D?b)lEh3YGfHI$MeGQYB!mXPW%_aM0SQb`>6L)uX{LzozwvC)R&D?x z+k-k8_gp8Zf_e8#1Qv#MBz z4k}T)x?@@QzP%8!IE z9qtH^q9q{#0w2yMih2~TVFT84kfGB>3BPaz z3!vLX6=^o~5h4M{L^epiQbfMrm+)Syga;>@`}9TB>J=Sc|8ACLC?%a{_Qqm8{Yh1J<1waVS{v$Jbt z@&r7jJR@Hv_MSAzcI>u2@simdI+Ll>yVrJ{S>P?nGyFJvB7>|PxECBgDn0XRW|l0C zoR8moIa( zXRLS3QuPQW9wm%O4wLf}XW${xroqG`@0uf5r2%coF`(>GC*Iw~$f_Yt%N#n#kM21v zj_8(ECi7=1(l!ZJ>4R?(wEw6b-iO_~S<4uEw#PCqA^>6sJ}mvtWsO$9WBkXpS}OWV$CbuXVNO8jcmHwjn8PP>&%>I?NRkBH^HEr}Pql0>belSGhx=z_7%sQ*-@(s6c~ z#%hnfNZET9w=G!Rk#*d^1Zk&V)U>*`<~CGpir6Qoh*}#|HiD+NCFytcjXLrm92L8% z+D?b?f>N1X3}o(}R}FKnIVu-!CnhDYzu%prE8+{Xl&trGCz(=>>Tdi_Gh`L+=ks3K zDVp@9TWtJ1^?xvZsgF3JZFsLT5qs>&V(z18rTI^y}J^rdhUu*n81Tx5Rn zCMC`{J<5u{U5;M=T9gqD_ot++Ph}wIYc8Wq)36RLQaF(=D72K;5uBA=lbMs4NG*!y znbE@IcWW1*D5&IUw5S)jcz0T<8aYn8g<~Fn0dAJ+iX5A@teW@oZS_XN&p&!3Ii@uh z;uKk)iXmGEQ4h?jRNG&WKA0id@H=Dv)B2hkS={uERLP!LJ$FN741IKzaEqN49St$0 zK**Q|_o7dIP9>y9$e13tvrm`JE3_kG%uvFdIVNRn$j01Mf|aEVr`x8Jx$XI(b0|1< z-^ZkoG!R)>Ny~U(_ZQkJPU>?CzRVeH+{eGKA~YY!$c0kK*oBBkiEoV5KhLQsiwlgM z{PcpS{&Yf37=e`(l<=|A=(pzMP*{K$d3IZ1Ke9@;Zg8C(qd z=X-Qzj5!r4t?!D&t^wVeW4wR@eAo|Z8hA2t@%P_z=u%-}FK{I^F$oy1=*k*EV`QDijP@? zB!O8u;0csFDk^mu-zS6J_ctRqkn$OwG5TQDzFqQcOg1sDwOXHVRS%$Ooj~L}74Q7z zp_XsE4A3DErvVO;Ua-k5=OL}@W~}|PBvKPC;quL7%ORD+p0i|kO%eVdWlS#kM>*G_ z%dQiu8HdiiAIcGVnt$M;l6iu8^}VJRm3nk%m3G!VbV_858kJY`Y|V77mPUN=t!Q6; zp>$fLvl?$ol5H`JZ&qn=-O^{y;ws`ASJamiCY8wDqVue_5u&VIb#3OI*&Lg(-ntL` zT1$I(XBIgo8J&2o&cI9R&XyQD75nkF4Vyz|Ww!?((lsE?9J7DbIW7-)Z3GBR2s+u5 zR^C2pbG@x^)PGJWKL66(*@yBZYZUza4=?KPFAW0M#jj3tujMfn;5J))!~-r{#9%L8 z+{ktNIv0Ce>ow^Tow-(Y;Fm~B{kM<4U)NTcT>OcO zLOyR)u%P7O@o!pLFw+r~T%0UiZmsZ{U+{IT%^O^0Zp^Cc*gazBS~EXQ-U_i%nO)#5 z&&!L}5Y|ZuUPjS73ahY>MQZG9y5TjhgjsnZ=-dlg*H~Hb@6{jtx!2Yhz^cv-b4+RL z*t_ZaRb8;Iq|tOWhGCH4I?3He^j9MnYQm@0>KSK(P-RJ-o%3XZUIKHSnQ4J2loB%= zQqCYQ33@n(h^)<34s$2!!6l8fm4W-VU5DY+UUu2bxcWO#^oW^!i}t&xJC^b&JAF&U zgK^+GgSphgFVB-Te5@5pu_Ae-M_>ZAM#V#lqMq;5+ z03i(!3}KW(GK{KnPbN z;RhnA%v=x(xmRxpYCq8P00_DQHZNd<02>*wgHgy|dVj`!`XCK6wM0t&6QzSXidObx z#F3Gst`@GL7-z`t2r3jQk0`!I5^ZUCWv0{ljn(NZXmT_TACI7#5+ zjpG8u4TuL2FCacZ{D1_GAuSIr^MkyI*50pk_qSEi_Ewr@YTt0}-_$GA{cHp~d@Om} zkRK@8B3Mun_^+srr7^q)qNv}-cbiwrrl9T-riAmQK=*B>=6d0;U-rezQq8ecx?j4#l)H`Awd~6R zW0#}P!$R!c+~-mJ>_Oi&aQ{;izCmMFB3O-Z{@OLO=6F@R({fdRC@D+bx|;s{AjR)B z+V{us1UtB;1t!@CJkS5wl_q_gaS>O)^(EfK442dKu;6MZpueQOW8*?Oq={SXmNj^P zL2vaItuaj?4dWJzit82SGvTDPHp2joP%fD|6e@iUd`xku+RyGC7jro!-JOshBSWhF zr9v9aEpgNcWGlvML`qmilB6#zEPrZbVtz!Z1&W!KqmuEMBoZm%mVF9c&?J|d8=w)+ zrQUa}e=JLlBGdfiu(=WHEyHzG;TE9a(SNMlKv|VdZ6Ynyb zgbw#LXrU$v-SWPODAh%9!wyk~!qzMg!5OrAakOALw+3YOJOrr6n|D2SJlfpla`#x-s!s4$v~H~3_3z!QXg@!lDMQ~T#E+TH zW**cb=l^Znb=Z#8);?K{H{0_#Xew>UTJ%y9o=H@EV+)-~)3H~Gv`SKcL+ z^G?%0b0(SM=iFM!UL6`*4gCYMu_kyt-ocVmzFssFRFfa5TaAz`HgnJGQ*H!#Oe&u( z-!JT)2QCErqd;Rb3-N%Ve`5=F5X&D##C+n zM>9YrXTBz{qM_(?N$TrMzAa68Qr194GjJ<0-U&~c>J!EEP?!6$lY@XP*NX-6=xC;+ zMmAx--ls20c_N`;yNyE?II>sg5=O3A(M0HAS3{cA5=RhZQMCh`FjR=kz_#pPjG(W5++P@VR{l;)uTzv! zV0ci~gqI>AT%He_zCnZ6Z@Q0aDEvOfQ|Zposd;*~ll{!!D`v4DIbNu8W#B7zC}@y5ne!k|0qbD>S*aMzkive21~f?-_9YcKXjU0Raz-Y9W9T%Yu9dh0$hBOL~b565+aK8CUH{ z&bkobPXdSM&TB&Ogf6|*yo+=CuitDg1*d1AqY@5}lM|>50Sk^BgIQ|Cmzuf%8zq*w zXbuYQq#zfks0lVN%agYvn&J{B>#@Ij;ld>`%7k{I}a>`ca@e(VY3DW zw7CPOr));Mox_^20-|Uf7rFyHS17<4^Y*=&8Ru0y$^s`u3CAlrhR&f-KbV@+Gcf;O zJKBW+G@U<@cw`5R;j`2~TY3iG{S7beXJhIz*94a!4IupND>(+}2a8+5w~v08khgZ< z+C~2^DzLrRfE)ET#BVw&;)f{6q2Aedlgd?8OH0PFO#*x=k9aB|_@v6O+Zf)5@^ZI? z`GUNCen;rNurTTIa%>@*K6)6|m^iy`J$$_Avw!OeKYaLzmCTsoI)bIKp1^3k%4m?~ zr8;q`X^GC&s>6`NGf!3TSfJ}M3@;8gEY&rwtwbkaR%O7%t7@Ms_@&FoCqj*aPL+K3 z^}CFkT~+dgh75IluK}a;_#~BgYrejnKV071VbeW)nH>(sA;Dr|Uz$uoa??>FFQ#hg zogjtAeOIokb*EOPzEAd!zDd*>cq?xExNtbD=mD2~;xWnEPS0$Phe^GP|4=V)0yq7F z-7Uq)t10w?;5KT9wZ8NMFYWJB1F)Z@&0Y1Q&&>TlKJ$KEhGZK74GFkbZ-vUX^DO1d zC!`iXC+fWJ!UCt>rc3Nd^G<3Vm~19oMa^7J%ewh5QC_0E|m^y7jLfI1*5;9ofiKfC)gD&46jpiC4h7+hWPV3zk7}2 z5U*-yvXD@A6g^gETHS@>R7zrG*oy_bP3JMnGa0X)9y7PS;?{VMoYsu?Mu05{PoN7W zX`#>e_k=@pWY$JtZ?>Yc!d@ChL8vhXp@eUL`0Ag?$#cd>QSZYVWkTScSD*xqzg}PqwHD!L)C*F*NmEZ!!~Fj&w3RPQ%7QC zb5J{TjiSdcXT#uv6Dpb`QhDu;uIiXnhEIxSXgo3+tK*H4F{Ofl+}Hl$;Lw7dKtkJo z?%I$FA!E9UNb&N|sLuZZj1#fqUk8ACKbPc!s-qo+IcuTiw>)O~Uz(AYxP3NqvqWwt zX#=o5(5$1(@+QBp{*AfGO&*c(G1Y-)Sp$E=nm?F$!BBr7E&EVtvG)y>GXG(gO#uOU ztn{ZPZZXirNx+^{-r+qzTB+%j|K;y2P&9u#4H<1~E9{^?v)GYy5w%+#bh9 z>yWL!x|p5H*D9^`nAMn`?%0?S;wIt(Q9Dtq;I$xh)^4FpC(nl_H2;jS(SfB2E;u6O{>F3z|c1L8PVtlJWqM0Ra2}5C{Mm z;s5|o#xAZP8U|0M1`H$cWUfFFK71`m6%$C&11TLK)eNLufz%CDuiBZJ?2G*E49H7YNl+GPkRX#UlI{@EVYfRciJTGB)D_FH=cPC!QE}Lv2aH})A zv_VReeSP^RGWm#+WG6HI5E$6TK?>CQ! zxdayNxFr@zXHL35I>xnjGASD4Jnc)#FXvnmcno^AUOX6eQuSyv&6?hR&^Ec{*S)R9 zMZp7qkkfWLEf*G#&&$M`5G1aitCg|2x~krfZkMamPpPlhb|25al#Vx9{yaI{+%(F3 z518ZFDPCZ3JFhD1*2~rmk-xkWWWilWIl#z@QIo>7xW_y!9(D0+4e4rtWqD=YKx;mN zK1bBO3wZd#a1%rJhJ18_lW)`t3O@3&ksW=>WwH6^nm|1%1}d< z{YWH$eJ=ilwGw~DSkJj_ppP<(=2^89UC56B5%IJN(&qB%n|t=Kl0zO=gKw>@!sP3Mv4<&SC}6tAQL6|-vH6p70)lar zUIAeY1Gkym?7n0pQzkVm$Sh^Z$R+1%eG5U3Up9BGy<*42Dw6r!$0KH6(%6JtCHzL) zbu!)f_N4VDvu(xxRu;=OU6l+TxEy?u)PGN@VCh;UTXyMhBBYvkDo$RM|czbq6o2S`34`YZ)qE zlgF?~2U{4@IWZRX$E6x{vv#5#yzeAU)q{>0F~d-?-j#%2di(BMWI|@!X9+`={q7_` z9wYbPw;o0Ram^Fde9`f*(Yb}h$`)`&|Rfy!vVg_?eefDY$uX3*Z^8zWvBT37xuiY# z{l=o1jIJfyRG^DvIuzA^N;9lob*g5HI>+tF8D>s}fzuSQLUp=|BB zx_&1S@Lg^eeG=M@{y$-UIC}+vnolxfZ*!Ij*vTJ+DTW~q+GWnLHNxbvHNynahM6uJVs`@miBKI7+ z>}G^Ur{K!i+E^w_+eJ>>({Z zm!N^7ZOMA_tLxlxL*EJe$gC0k>&gWI$NIL4;Ka#k$H0NcLBM*~kT(aUEWA8``_?MJ zTTk_tv7`PkS`&fmdGeeWZR_$0BhwtrfR#?~byabs#H6cc5KM7o1%j=-b|?Mk;0C4i z1zeIps!#pPGdH=?DI&B;|n|sG{tUjo*4@*%|&&$O|ja^c+Z)7i!j#fogI5; z0;?4Fnr(~A&T?oai}h*>I?<)ddnx=@moO~_0{n6F#ZJe{F2CI8I$NBrnD^1Ftz$Iy<;z6e`f|_wVKH?o(UP%nTs8@m&$t>Q{O`g^-#ZRh9r-ad_5sv@b+WWzR&vHiSPzIR_J_rKatKEc+3AisHENH6&O z)Cw3cw$3{G$mu*N1-7lE`lxjgLp}bK5u-F!d+ggfwrC$bK~WF#3@s`!euD0F*8kf7kF2 zC5<*!LarxNlvEG(8FmI1D3Ah$%>N1>XB52x6zDBsXiD<&iu2>#{>I6?{8#uA=i*D`YvRoc3rguJNym>`NeEf8}Wi()=-@{Y&BeN6oi%cGyhqG6#loet-c3 z4DN3Q`r~gr4f*0iGo&t7jA0l}<+4{WZ8Bl_0e5f>A~RHX5mH4{CBDeS!p0pUMM-Q3 z7$!3ngERyA20)9T0M2}qh;p-|skkgC>Dj3qdIo$HYdoF?&flcQk)lNpI@=+dr@%FI zhyqb$;5_+(i+TkL@1jMG0e{I)iGa2L0C5mYEk&-VZ0i>?TT@eCF@Qz|Xla0^od)9n z70`f!JW${T3h)0F(1C&yP>`L@A?N?oPb4oMoaBNbwGxIze@q+0h6&hOfNlL>1Mwm^ z7;QGxQwCtYj{h!R1`_`9OyY9k0S=2myNkz9UjS-3UIVg1&j(IThJan(qoX00ix6*E zFN;M;9SdaA=O*VU;@pXjf)EKmz7gUvT`!V;5R9QvY#X~-o3bbCN-xgSmydyHnWap3 zWxOKW5MJa#6>13@v?%hp$VgN-vEENe8$B~>IB;WI^c!Wbh~nVxw{|d0p>_&iUR@C! zFA8PtUp$maGiWfP1~aO`Aq;i&;XCA+Xm7QkQ6Gp_1%Ez|8F%HxL@H~#<9B{XPS0mg ziA$PsP&L-f6Mh^^x{#g0ZMv4RoGDh&u6|ren40D7=|O5_4)$N?DSr;KAE(Fc$XK}f z#YbI1QGsC1Uz^n;-*v}k-PD}WNDoucVC}$P;9KEogo3d?P5|$hzMyoHHgherQLV7q zWp>eiIbO|RfWhjnmzlP3o@Y+IzWL5wKG+v~kkz|3>(x)ESmUalgt@#%SQX|!TXuUz})P@Rjn<*S`9(f7*nTWFzHqSI> zd$mv1O3rqvl3mYAE7%q0BTLPBM!O{5spQG{+|rZI{Ik4~cB})H_nj z%RL(UR_FtZLkEvqwWQ4u1#ZwJXR#3KxB-;~^Pxj{Ru}V^6wKWwc*E5Y^DG)q!d1qFLI#RPf$NYKE_L$1`Lz-{M$pZ zG`tDnV)7IN0roe*{z4HQ@(Hj5he<`YpHNExTa(FMjDZLxB32qEUQ+DWFllY~9?CXg zxBxjmAV(+%Lit7!E%gPke-D$g^z5M!8{LE?0Jhv$3KGhgFYtAeWPgT9a{)&SaH;`E z8wfH03t~}3fB70ONftCrO47TBk_y;UfNcfX9Dt4I`1=bISsdU@-m+i0=}pv0_dpb587;4~2CEx`wecBGjm|QEL1~S} zt&G{_zqKIW1ywVHa2FFu*=Qs*dlz$Oq~9tRcD*7uI!hmyu2-Z}zP9)7(D`G6$sg7I zrRnXFYgY0|G`uz{%Eu?_|LwcgQ4(=%pBa7vkL!#aZwL<{|B9j(y5->q!e+E(_enIE6{Qh@K8XEeuA}&6a$IOR-!#v?Yz(EI`>aE&KHbaMH zcU(3qpP7Hc+wm~bDacnK{ad1sGeRQ3G@i=Gt=k%;`k(#Z91A|1?cevOeBq!TN<=nv z0n6%~D`{w?KZ>|6{~@9PLI|J5_QBE96Zf|eZiuFc=r0r%5lbOsI7CDl$JA%l_Ji)w z(XrVN{bmP6;qL>`7|E$c-C;oGCs47R+F`k6b8t-c#BFcRz(X|Dorr{Cj)7reIFp*OYoY#PjBO(JNCiL)b`Ea7))tvJ_w%`+4yUgtGg&>Fr_M5hyPloZPw>ik13<7J_jw7)=YFLCV1PM;% z-!J+Hqmtyg{iuDfLla)9Dc$|+p61?I4la{iZynlwCg0xg2tY-veQEwux+m9p8VI+k z`m0#k^t!7)mD6$khjxF8-c+w6`vnO~Qk}rtfBUhN;R|)&D#v$ZSu68(74VK6&7L~Z z4|f(o8oU+KEe~nKyRH3p;1&a=XuJ`tTgDtg@3nG{1)nmH{K0cwj@xs*Zjk!~8}lar zy!CYlKflU_y>0tbH@xgfFSi<|ti76oBTl-tV^nhnbtV4<{lxN#jZEcK2B-yEZ``vq z`+kVEGl1LRf-Tyo%Z9Tp^SzH0l0$y*akbC`Jr&24fyZOZ_!!xu2QtrFkoLXRfDs`R zd3F*$%vOE48uR@5cro>d|L!;QD6kJzf6EO|!IssMixf)caW%=xOpbVcL2vjE*>dcTwAU71LB`QWP3kMCj|wDG@4yOu49Di3ycDO z$cjP(HP~EwL=-U{T#ArJHq~#omd!*fGciyz)xrq{IneprzX0qDfS~{wIe;kuSU!Mh z60gimiPRTt{{eun0DuDk>;PZ~0DAziL_9JxC7MvM4VzCK0<8ZHU`PN)QkO3=h>Rg? zq!*PVB8Xyh12vHflpDb(hvQjn9E66bf$xA@TkXqcqa>4;gOI-+{y>9hG;O{fwaAyi zoO2vgEOs3u(H{}SPGhbmSigQ$ap;6|=YK}eCcwjE=V6oWr-cPM9;aM@d^VqijLbY; zlJIMPeqB9{&`RjZ6YQVlUzzYcbr(f~2sbwPGe0Rx5{JKb`8$B!4GbVc(${{Vojc@1 zK?2B1A=c#+@I%my5OV19X5ASxxJmi?B$&LrneQfLQ$K8=2iDaC$xNRm}8v_)Yxo=*+r{*-VU1 z(RcGC+!Mv;ZAJz!RulT!t*FBp*H{E%k+8Gz=_={ohfC;I1-@%_nsn9f>@JMRnEa0A z8_qBH8ni1{*TJ>k(ZI8=aE)-?)=RvnSYFzS|B;X0X>LQevb^N6ud_RwO=dP5TQ+jA zo0&jQ4VzF=2Exwn=u(i*)KY)uvkbcFA9@{0&XlAwXwduNV|!6(Q})_Y+i^>5@>=6F zz)4e_7+2^mtO_>aDBjYv2Rtxv59!>3;^jFGQ%gyXp|^ zR4qcjYZ(9f(tU}4ld~|IAd0LTN$R?x>~}7&*G%Q5!uv=?+0|;Bq3x}IS6Fz?(ix4+ zs(>u@-S3A+?-R2K=fo2_(*%cRWd`kONq!f)QU2z5)tifdh!c2;e7|S-;s0LBSXg8J zzn3zcBDOPlC0cNC?EZ4jw^s#js1^j~a+2!-n-B&CrqgBgNWE0@l5iiJ?3MNAZDkZD zCuA5-jmh0RTV+7Ar!raDBh=4(0WzKSuwm(S4RQXiIC($U^U*WBCaDs1RjK**5{Ui3 z4^qu=%v14*aD;hIa_XP>j8p0F2kQ5<%QGIg>M*KV)##rDCwd+0RGt3s<&Af+Ny}O- zhST#T9Vic_K#V~bVAhZgNyOt1To1*VU<4X%%Peyv41 zM(ekRa2*4R8uw||_Qwrw_*2%3yoHNewF-|iOXcYllT-4Cr7|1*Q6mD^)@GY9>84$7 zvu2Q+y8RE?WZYX|u4P@fG&=ztYS?x8xl=p1q*H>+@zr`7^{ zjE?KyL@m;Fx?T>dS}m3h&R$YizAajn1YSQ9FZZTKE%qH2C-<-TEuC%&F^A~YeMp+t z?o&5TDyW@K&2H~F935+2M?5-gix%(Qp!4n@0-*2z|93GP`0k@P<-HDf#`h+I!%y_= zJcjfZA?5D8M-=XyVMF{2`U&1qZ48fTjgpP`JXDfV+*OZu^}-jkCdnYRYiRD)Zuk!S z4OcHYPrYdA?3~9o!m7c-$f?)th$&dQi$dfz4l&T3HZ;GTlDDTrS6D00v;k@;*@)=4=h-7D^$*0XOv?`3FD{HB;c{cf7jCns8qS%;9< zI7i^3!tuqRYNr|Og;kXWrgT2=yBXJQfvodSc0q17+iuCfX-&cp<5W<}al29fyp7-U zdGqxkS{P{QU>vRRfc(LT^5KZ` z(TMW#i1JA;Xktq?1}#n=p_qI%8*3;la!e1)VO!}{Vs7&&E|02HsAZh@O z{DW@(L4gMJU;ffK+C?JTsAy0=bv*Ewh(VW##AFUWB^(+MWu=2rlDcx+? z+|fRgm$^Wr4gb(%RD89eX#TULP!A2!Bnw(@oU#i>sC%R|2#ZL*$TB0q`a_g54k#!E zw;_PV3|Pv51;GL=ZNM4^tW6TEeWF%Xx#HVUK&kFqoFy8fw%6Bq=U48i1UZH@@b)zQ z0}ua!!*4+XHP$u}RP}lBZ5WbZj|5TwWdt{p>r0eJ`caI2F8owR;2`*@u930KaT?JU z+X9#`a&-p!&lBR!8L|F2_B=xr^nC65V6ja$*zfW!-kgQ5PeUfAL?W^*no|FnV8?6M zXv=MZp?*km{1!W1pMgwFYc!?bGr`B#u1~DC1%~?}sqtG}bbS`jdn{#QMkFG~qAByA z2`Y2DmRoImkMwh8#BcGv3qO+?%`(u0nGMyi~%r##0(F?K#*;h3wCKlp9`?Ha6c!J|jT6D6gHp>}i$xy=ku4x|)!<1AN47nsgT7!{dJ5b7g25Ybk82dO z^Y03itakFlo+`KqbnSfZFi5Q9;}5ua*ygl&+vDZ%&1nt|Jh+SLAZYcY*vEyd&X?uz zoiXq>?Ep5*=RG!B9Spy{ixP2!o#xwqEg*%U-8$X|rd>kbR_yhM+50dr)VCe!t-|N@ zsI>w_TlNCe+2;2Klx~XKR|AY1nbwB)u1~?GVz3nWYSsm;)IhvdBemUjHPXKq!Sx5~ zceU3Imtq}C9DL5ZHw&G7jtrv=XZ?K&u~r$a4}2>&A{lejHV# zv9kJs`9uv;m)UuSp6;{xmtUu`eJHXpAa2|&f4x<$$KqO~>0{wlub?4ay0~Omp;ghY)&7`L>UScaAbV$=NxdgFV<(rN z$=hLDF!K;tsAs>gT5?TVoaIsUa{I!AC+bgwxV>7!!=B=iQmvb z78N_`Dx`tN%CpXxZ(;Ru-b!C;M!Ot-PX0Z}176GBBdYgCvjh1_e1txM1@&cw<81HK z%vFKyxx`xWk->Kt2t>qDvJ@{iB0P35Hxb6(ESm;lzD;P9Goh9Q!%@8xjfMPcMUcP<0XZv1$+|Nt%KtNo8iq zp6@?d7PrNT`-`TFV2cX^R2GN@qWws6K|mhu1@?0)NfHUYke9)KF#v%85EuYK>5R5O z1(5QC3pAq3-%Cug0Z?&|J1rD9M>_ks}$oT_LeW-yMliYU>7ssSASPH?%YF3SH%5dM+Rx1B7l|P)F%l7yzTF^ANRiyX}x==iP_s9na52*GuAc zD(!A<*|3v7LiU>uBePENlAN74krqL>=XfQP(Nmr&^2WuxWRFQJ??oL4Y@w^^L-4}s zyx14gBq7D1>!*!x|98l(J4XO&9Y^bslbBx@wYyhn`$>bxwQrw?A1n!4fy39cMA6YcyJ%w-vNGA2 zAzDz<$-8||wbl+2c{DlhA!0FdbKCz);|9ho-Hs-OmoGPfz>D1sw&+1*-=W7l;u{tlqrk|_F z(>8uLm^AZ#4#&2q&v*w{?PGeJ?5{d_UDfgIPZ#Mb0?f#D^RYY|Wx}*-WQ^@>zPfnT z_a14LLz>5Q%v1KNuf^x1J}7d zqziJ#K0hZjif?*SCFlP|n_jt_ji73SIBm&B?P=$o3}s*7 z^K1akN$=d&p&G>t;8n@B_2Ira0T_G(UmiKQ02w2q05#8bna0>!DOj7{5SSyd0AOE% zWj#&Wy1a#zljga$VOQz$uM^7)uM?L=t@V+AisahI5TUijjQfjX6wd?X-GTip^A{Ko z8W;ns1NIjf4>}m58$lukNW~8E+GuT`w$biCjjO`|VT>g6N%~vNNd|A}Nk(8yz?gxt z0Amf|oMZ$3B`|hi9Kbl$xWsITiMK317Fgz)y}YKdPyazyg;3KF5edPy_)18;<=tjg z0USo_4>B+0niE8Ms46BS3_1Jh-l{bfN&-n_R)gP!layL zh&+=TK4Zl{fM7L_hwy=Z1tq~VkKo-sCOwWcL#jKGoL*$)t9=&MUj-#UFcK^CXx}aV z1K0on0l+8#^r9lq=v_vTW(_n$%zyJ@>-Ygp7-|AqKEV-3rEZsAw8E=>t~j1&prj=z znE}E9GSaC$|6+JhT??R)(w zKE4L88iv1x&519;#f!IS2n2av2#NXssTCwX%Gn7LQ<;Xt9{g*^;1|mmo#IVj)OM$; zJL4f=%@%r(ljlxO%bb1AMzglVT0M&u&Y2PnzvybX>HJDG6uvFk)wE;WvzzZt&}U7} zWP)2MW}jBl?**t1C!E#>3^*}4>s)YlIIvdlR2Qz>durq^P_TzWEBFQo@rp<=Gpr;> zcjIj&N42)IEUpb0DjDf(=UD5j%wM}Uyx+ChcF-82r?b{LWuUXB*t3%yC2^k%$*lM* zufbdUP4D>Hy69lesj!76%le|}?tSSVseXbzrO`J%x5wpeJqjbm3fSP#fQQvzm8*jl zriB8f8gG6~`D;jzo<9X8-<__W&ch`?FDlVH2+FQlszWlC{;=?6`lrCsG`*X|%OYQ4 ztfJYVfRvJ-n`|XG(5hFwzK+JFg)shsuh0s4$IaH4f&xeF(?vb23b^M+j+o7rjA0~c zR+#Mx-9(aIiYJ=iN6cDU*rIuaJGW+N@^6Z1U%50oPo-Aor2N&1b)0O<2~`SIPXpZ?0+x}&I$=0$e}A5N3k?JD||1^QRiu1gBG$w@Pd?2#2+dxyS%~@ z&}EX}QGN4VA+m1H67oKOlUPcrLUpl-rqNc%;9=8=_wFr~n?_jZ*Vi~pkcHRFelizl z3~n#!BBSM~xpZrz+ZNeyV@;5s%dXce_r_$5T=
JhNxWz(jR;|ydeCa;C2AnpExwACw9*3D&&pc zh0xwV7p28|A!~HuVV9eov!)<{50QLtMa|9UQ>G(?h^?xZ!n{K5Xn_8JRMOSoe4%BWSAou`+ z?tvhvC!S1=-%g_#8@?+S8-5^>Cp<}?CjutXE*IPJ%f6^nE|z42cPLmc)~MS)89AQ% zs?RLhK#lW*hld~qc4?g;W?an$i4U~SLd z_)4jN`WwEDvX3v11@eeN9=lwuoRO*mQ^XsVEDJQ#%mTDR_XLUpC3->H0QK|qLUg%{ z&+0<*PbZlYUKFU%OUvcR#l(qK5NMB?=t|y8O@{o9*v8x^lqUiN7C?9p2$fH@NhBc7htkXBc!MQSqKJz^UlsL%s~{CIK! z!9KwNLmWL}k= zg0o>1vv!t64lG?meZ^XA@aT%89t*e!oJK@|;{_jw5XXxPT?4G!v4^1Vr-YAdbNqfV z6|H;t@LqLl1_TEVZ(ylg7(THscazOoHE;OB3zmkW1N zo?_?4cPLfWj}>*Y_#C97u6sUjPVi`)?!Il&h+4NG`N=K4i11V8uj7MMwHK;C%0#fS;e= zjGn+nQXW(&;T+v)Ph$L%``~LkL5x9guz}EV@C0tL^UyiW=w{IC$uJ7vEjW6Y-C7c9 zI*(01meFavB)CB56O@!Q1B(wF{QuSY{&!AyMy?R>b$%fS)%NBFzCy%ziRJ?9VD(a0 zzjpTyb%iv_N8dF2K$(RphAUU7BH503XN9(o$IuUf)22%LSxwgVHtDKm$anQZI9GEi zoVZZ$a>Vs&6H0VL?q+d=p7KP6U+G$;e(r}$N27t;A#48ZM~LsS;3>Js?7Vvwr2ENK ztCIU^suxjC?t725ag?5_0H>9;_I!f%#7gvCF1B6QW{b<_*9zk+ZmuhiH{X2wy!x*a z6`$eou_u8`oBC{cE|nHu--b+h{~~CY&!~|3FiqjB!P$*rDkDzW5rsJO$zhj@z4x6t&qVK z-ANL^(0x#umxzY;zQIQ6^4+1ULJB(ib|p3q^NMmsVynsrX3>VxI6s+y+`sAEk`47j zmQu>TDyN3RGk*xV4PsdZ*1~a*^JIi^L+-oW#sc8!-DfTId!3C7Hc)0K-rQ!(5Rl07f{2cFKd0Fztmo2AC zbG-!Yhp|sNaj>By?o(~jpbsY|hG(OqV}fIDt2Y4&{&s)lhw`FL`!23F+z1Nroy5=2 z>*Q>=BSp9b8$K3{^7N>gS{8iU#@YNcXk9RU67_rfVNymfL{Vi(&@#I0;)@uETLzby zSLCDAJ=WI1(Tn?#2u^YpOR0TA^k_cD5&QnV(1gY#!_u6@ElA+#^thZ;P2?3JbxL6s zZE185>xOwQt7zrNt6F39+auJRfQ_k`xicJL|L)aX6WiXa1Ydif?}lTaY5io1V28Ym z6L>yv=&@Ztd;WbJ_;leDWv(b)98=vWu4||nr9O_?=(w_dKZTwwdh^?Dq>HeA)a|G7 z*!^_0#BqvsxHuOPa;V!x@mT$A0MI9T(HU+a5rf|xEJEjs*enAQzJdfwkRVJ;8s`Ug zZNLEmmjDpx)e)Qf562s(cz5Zm-U`1R3u_)Y9PcQyA67F8Ye=Z%()OWReA&{Qx#MAD z{)gNxWiGc}YAjZ8_;6f(pWH1C=yHR+H*=|TQmoG#StJ85h3d9wtKPPEA`LszkI=3m!Yng8LOo(^YB{sGDaQa=uT*=5z)=;o`o3f+GsX@#J)lIl`oAOsc zV?6(lHvju@x`?bzxdQdn04@+p4$j)Kl9>1t^`5pw$IPbzRs02d+)tor>xTO`wRlBAi&b*_UuFKTnWwwm_ksS zGJ7Nl;Hq<>yd-)&G5LcXgl`HT)2lyZn`!%;(z)>v28A2Bx-;ydY7K@Kd2 z<2}K{yx)0XdO6+--*dW#hu*vhAr`6pgfm`eU;8l0t@Q_c_THw9utlF{P;FP13c2D5 zB)7Tf1U5@vR^HsuR$d9aFTWdDfKGeZ4>D7zE9g>eDW_%lX4dh^4y!ky1lk5%k7IF{ zE#4gLbr`ee7_XKFy0|vic*y8xHA@kj?yc|rcJ^h!Ub_~2d6ICAxfbd~QXBft)N7~l z_e#-=qk*L#ryip0M?<>hr$NGoLj#$WG^x3^R=x>=uWaKyZOlkvAKcN8`-2f+HlKYL zA^oG9=g>uLrU!~BrobO;nL<_(#a_omqYeu%9)tW45I5lP;{XN({n+PNo%X;g>9D%Z z$vx@72qWpKk2!*gIf9Kjf{*!8C23tcZWYaV#M?&{}lPL3$ighT*Sw?>3FNWmtTUuFH_2yf z)cgXra+B+=dGL@gXe$Na5ykf$&aoL}dbvLQsV_+fu50~LM_+eMC_nwDj-kqOn%0~9 zqB)Jt#(+`9)`G-MuUz3mhYdv+ubmmNx30N~$+;CWR~Q9#)p`b93se~!$aJJkJ^$oX z6w8atSaP2m0gLXQ5WS*9)+3Xp2>H`TOEq~8^$llw$WUYo#2`_*zm;e5(j-x#8cI~I zYNO9yFjKHLUS`kMcbTb{sURL38|Dg3tuyM3J9^E5>#qTaKq-1v9E%Q8(v`X5wBZJ( z;V#Y7MCYz2+>=R!Z-199U{2PTGjnm$sSg@2suJz;#zl(iYqeP$JlkJYySmzRd6bEu z?(iLGD8!+;t20s%viwHUIT;Y@_@!!RdMK4Gx~8=;V#k1!d}prwDO+2+0fDiPTDid6 zloJZ^#j*extgdPe#bN9*F$capq@xs!Ny4Nhr+67TF)3mcW8JBYnj15J9BL;A(U%BQ zsmg!5v1%bvRyVbrQNHEIfymW#ed&|JZx*LUXptnVxyt?xtyW1HXgYd`mR zkXT)gok#b0k%T4&*n?Gx5DS|~P0-1t*iKOA5z*A5lt1YH-1DdPoxtP>Ht|>9^1b&7 zw*WrF{cCvd9*^>jE^nq8krodAh7CW*cS>mqA+cf?3`4rj33Y4<(bXgh`MHq5Tdvp{ zVyQQJc3d&Qztyut4431lR*g@eYACOKE|l<&71HvFSzhraik9|McH5HBfdi~OM)vv- z0RAJd$d2&FwgmM%GkZM(ePJOJM${1YEZx37ZQGajnke=-GTvUwX2->umj(hn`!~h@VCE2hj$GJUSuyUkwn(Gm0oz{ghRz1GR@htl4u=m{DRRKvuOeA1 zqr1Ydw#L|3UY$OMe}y%0uJByUjzO0cNcp%*<+}ou0YTI8eU(Bn;4p|MXXgGav3@J;n?b3RxJAI_3oINVS)jEqK&cStbn* z4xJ5*1|3=?zq)}QnN4*Lw^$#|9{J6j=0~zIQ8FCq=PN9Be7Nl&%2SX(6;z1g&i4-` zrR%$?L;m}QwB@Z*Jzw&*n0x)vDiaFN0&8iY4^_`TMK6J(}hEEjkM8pJY27BVNPkX~=Tm89S{<*t7OMr67 zXa+&$enW<%Mk*hma+mDAJtTfK;*+jDEz?@|tu(fxZg1=|?mo&o3?ynSDesf4RYKRN zug_im!-}Xhggu)$JGMe0q;$yXlo87dr)KVg=W>5bNl1lr9yjDt<|s=Hz21k%rj3i# zFNN*owcNyhn77V}I+&~8eHfp|}U;LBR)+2GsaTJ$3RA$2nnfz6^m862$CBeF~~d}3p#YPuIS zdYZq|Vq$*#YUdL)-U%lt>`F1PvmKLp4ioeGE3X{k8=nsOii|M%ZAT85Z;P#2mvMhB zSIgC})kw%0B{%fZ^|ZUWEuU`Vq_o9P%%5~!b&D>0EnsJ`iyd>`OqHK4r%k0|1(j7w zou4|_l-d6gfhQmCi%M`ZUCciL$?a*+Q?e=P|NyeJdDzY2(oJU9Pw0Ik_~ z(fYYn^OyL~_yC~*M^9&u-5O450RNBcmG6^Yhf#B=$dmAQazXa(a;D35m0MP7R+O&k zR4>qyEM>#ZwbNKs{9{sIo3K{UAQ}{2(FIp*V<6rt#5@g#fCB+V<)^{*+bD?MihVB; zcL^ZK%RS-qinvdMJGT)K$=>0ANF9wwddbLy(Pi%X3E|zp1ItBvg!y+FCCVWa%2#vM}2=hu)XkE^(!3a-^ZIRanq`O?xg%Gl*rV(Fd zWj;rkV?k<2q(fO37L-Kj{dXXu#Ds8YD}?B+GKJaYpRSB>GUbj`ol`^@4Cl!TCYR8_ z{DI7q6s#wKe}H%_8l0{wl^eV$frUDauFm{jCQcI3;}a3q58?ttA^>!SfbIaU z7V{?1B|9<+&C?)aBch@zSA8;Q7W!-LCzf>BOD*O_>sm=Q&M4x@mW6WFZ~r1p8Dlt~ z7r)S`t})+y?~Mnmhyknc__qo=unKAxBGnt6Tpf}gtR6ogZzThfJpc5WI8sfmT=fS4 zvd7Fq^NInm4di9O3@7NKh_U?m*oc1WmaC#zf_j3Wo`U<=`a~d{Q?9C+mlNjk?HlHg zLZaO41W82RB!HKdtNK%d8swl8RTI3kx?0R=YTYqc$;4P!6ril3MX03UFRY%_sB+ah zVQ`>WQm!iEA3_{Ct*OPVKy4aI%a=qPDRf@0DpgP;sYaVk9BBt?rV~fL@(nBptJTc4 zFJA%6f?0)b-g&P+sfn$GBx{8Dj$-w6##T$38d!vGy0rA(k77yR(3Pvcl(P(_J;Pul zO4QKJ{X;T_)w2!?ZF_4m3l-H!x)_VF5t(z=NKR}63XG#h@&%Ye9;%_aa~D=99heCg zmPYDq}KD(Ad*wImlC z=+&@;7IUu!MAFm(bdw(d#kIP*Hp!qnNT53q(D-+r;E96|022VrxwIrt!B4Ad2j zB*q%mK62E_UH9*y4*PH5@!udjj5zXXOYf@e8J{?OWw(#?F=KuArz`&{0i<)_zAAN_ zi%w%J;bfR3ZfyyU?>lv0wSIPHQuVm?;u3xrx(6TI^|v%KhFcE%squThn_0b`!-S2n zFoa5XcZ`#UIZY@372@c4?*nPzxpi}~T}8`1YpFauZ06XD!LO1SN=})0H*9>sL=;a66%zCAaIi}b+KD+nuKf!p_v7dw}RTk1T?`a7PV zUYoN&(qH3qv%1lWMSnR_$hc*^io;T*^t@fw}ema(pL-OEUmfJn4k2GKu1xzz?YsB>63aG$uezKWV+>3&2Ou-j9=o_dZsvY}+s6AoO7m2sb?oDPptMSq zx3S{TLRDi`*k-_0fw#IYs3QDAryvvc=tE@G=#|n3nV!uv-fe? z^n?LEO^iXNX%gXpmRp}!+9UK!dmUPvsctbcTQpS3^#{_Flcan`<0Os~5`=cxN| zAs3Z7GXFb|B6ElJFDzs3(KySmuF_(j(N00EQHsn}qTcn*kC1N!@9PwF{R=)N_cj?u zbHczEd1ZynAA&_qHfN435&VRjEM+n+CTlV^2J(?Y5(6*Vo`?*gay&I=rxx+K1lAPf zGb&o}5NeS*1@;q!eOnBx)R;nycYU+9i0kr!H_wk=QEBiNnQJT{JbfO%uR}`#M! zjUY{NDOw0!CwI9!po5uzdrf8DEghm+WbOny?hZN*6`F$3f88ge;GmuUf6xF_Z3-VB z0zDlw7%5paU=2g{E*3sBfQsc08d2q2noJ5>MUp5+nxH~xK`{R_!Q%g$;LrXl| zp}hRWSQj?l*><58rD!K3ub3;!(C5i)9|gCW$WEYJ(>>H`8W~oyFITbdaBd+PX`E&1 zbZ|<&_*M_`OK%3dQR|yax|a#K-@wWx-MHCIf?KY16?X(riQV>6>1O#GV7rY0t1hPr ztL_1$_nE@_sRxUOJ_UX%AJQFFR`m}$pc+X5g51+EXYq<{3tVly;c)HYzhqY?>P$Q1 zzl#nw{@f47BNET#I$}eyppJ`+5L<&sPo>AU$rz@$oze4akTm|x^eyePwvRI>nXdhSH^6- z+JS|$dQIx_lPHI|1GV2418{XczEr%T!CkSKIey5~t-2r>>fErI5_isD>}rf%cy0|@ zJhNz8V0LR+^tN_pcrGgMSi}0xRtOTsI#!y8!pNxih9xCwz2AndU{8!;x-*`bmWJ#c*wdI;7?n1^zXz9;7r*`&&#X zA5LTjrNN5J8>ojUO>x6>>~Y?mTCsA`{BW5HWBZJ>`{OgA`L|owt?7{dB86XRveQ!V zNQKRYWIOc%Y=z01^n+^iHP-Guo<b1S=5L|%5Yn1tk7BPwmn%-0;3N*?|vbc2NZ8Z%6_`mBVEjHN1;)0&L9;aW^1 zgx|!q&K`^B94i>(FS)fLl1v9gTZA6B-_)3Aw!fnlwrtT@nlc4_4K2*1(ylJ|NgnK8 z)Tk@O{2BM99wGeqF9f29P{daR*vKE~Nzf4FUp|pY&>=%$h-XAlj6_J~DMuYI!03|i z)} zK@!C?xBej#9iX>idh{VF404WzXKt=Z-Rr6DO}3=DW9kqPni8e0>C@a)^{aaxAu0@V z_0EXH=wl^B;_TJ6h@pAzy6KTJ-$Tf!pWX8_4!4-0pTi)+^Jj!u7hL1IBkD*KC94TXc<^MQ=DKIjyIl0P?09ozu%SNGBC*rw@AqUYqjyO0eg zB3kO+bP_!COCsuCBlB(*=6A!7ct3_8`I(0w=gNj3Nm+&;*EG{$DjKOUmG^0|PSe*= z6$`*K8Gg($1>R!#k?^GzT#d07E*G^B^fqxJ2pQBt>O1O;?m6oG2qwl&0xtN+stqy* zEwBc4vaLW~@bF`&dK#=sGZi*vhKGnvTN}i;{rgw{QBY#f5nMWP0ZtIL0Jlk85T^(> z`)P6AsbhIfR5JY7O=JZ()U<-X2R;HUf|G6dF`wNE4h7?{U{YyV zwQyHZT$B;dd`x%vQJgCc=G>SHE6xy!7A<=XoqJ~mcS>4-KL^#IthI1?z)yK^1$P0S zCut$bFLVK}4rU<%;+;UxQOOEU2fPQ!;|6v4K>P#XIfgC-MGqczHvKy4ECEe72lN;~ljqP4T*KE0<@RWh0p!%Mppb+3ILANpnj^M~(LLY!I zB@mAObrh5dgiC%M!STUZ8Wd6lT_gnyB9&q8EcMqN4C2mhqSk9^hGJp9!(scYjCX^{ET8{5t z3)P>8Q<3>kJ)V9zy@$7!z=`aKAdcpR~=~CLBQ)Fee{BgD;vw88c z(Sw)Ct&?~%SQYYIO+bUZx_2~yhljfbbh zw9zQoEIKFTaTNE#caHIJI9ch}0;xEJgPwDe0}Zz(uyS0t)qqv0DKmq4J@~-Rv+e_X zS#w;2G71)k6H6%iw0GkANdsa*6!(&k7f7Si1-D{ zuP?ZjcGd3be6$|JM;gm6n=mP{D=GVTWW(dP8mPOqa480|iBYmeb4b0f+cee}z)el8 z$Ch}rMXM&guSN6$LkBQmK))DM#Lp8$#LtQ2>}VktO$fj50_0`zblEm+A3aMt3x7L|Zx+RBqocl@L}L;w?M=xjVvuT3Cmek;Ljbcc ziC7Y{mlx}qF0lMsZ`y}j*1ae7b}7+WGXzq{Z9i+ylb>+B&9M;W9$Tn2GE`n{s8!ad0(MRh#eO zR0O;sX)q}FRL7kh8pAtk)`Klr_01BV^lR-@ufMED^zZZIs+;Hj{U0FupTbl^jL%ym z#W(%aMzcA4qp4nePD%_VXJZm4M3%lau{4S7i56F36AI#RJij>1MRIX!nZkwqrGQ?lPdAt7`8*^yccmjam_j`-VlNDr~ZA z7)F;p_cp?0_YrICFjw=HTadcRF0~Dp8)&E7Y0+ypnkp_g@Jf6VYd5@ns*%>*IGNgP zteMt4x9KDLjU;=CbPISgs}o)yBfP>dd)E)weF@#eu(+<_fb5>(fJ9IMGb#;A@G;FV z2za!wX;AN^G$<|b`K;VGwqOC3D;yi8#~yo^^iyqu3}b#h0-eIo-V z{F!9zSkM`Kta96H>zV8w4iE-8zyBMl_YG%Vk|aPpG?dr>XzKmL?NzWM7Vv15UrEN0hJKK(akPAH`kQE0*-TjALKLm0Jqt=>-2my z+?t^ith{SINtxmXM9KOWVH7OoQ(1HaiQuV@|~9?egmQ%JLj0p-%6vlY6tq&E@-}7ol^= znJ%Z4wL=fR*Y(gPv1hv+wuR8vaqSTY(_}xd9n4)Pw`f1((Zs>WMvY^^v{)8&tKW+w zfB6P~7GOYgBk)+l^2-)trDtEqu4(rK{OU~g*?h3P8Au2lNnE;VR-syQ$Tmvb7ZZ1j zxacl;c8xtS#!8Bg+X$ZzNWIX=AcGbb873{?%b(=1@u+d69Bg)EE-kdV(sl*#wnBW^ z%RUKn=C`H6XMGx9&ApCNTHEiiKEk~;nyDex9%W%T5>gd^T1V;ai+5!TSD(?30+-yrksr=2*;l%=fIrjNru06TfG@U6 z+dXrfhlRRHT4-=#CuW)zUuT%iY!HLXMTDy)WHs0-$dB1P`d>V$SbIA3JIwaPU$ zJB_+;%s|7zSPF^XQXA z>{Rp^S-WEbtZ2lFA%dHS-6wrhgBf(%4xUUmkar%t`X_3+!o%OTpPfu3iNDfl#q%oO zb=01u&8{!rDOc}(|ICUeMNt1)&e9Cn=cKbUaKw0xNeE?8u?)YCK-ObU+yFH3r_LWMJ zJ&FQg>HsD}qBV?apZsGYNXX_nL`Nvp6%S?}xkr?0uoz5;`+ZtPLLQJF@cOhn2-Lv2ms*6R1o8AXLEizIb#e1 z)`ubt1SJXxVgU=1YAx*tIhGc*^(U=^h#xM=gR54`F)UKcG0NR*Y0pXrBD-Glvmp__ z(?aQO(na)EH${CuGm6z^j>U!m@zg?CWG+XLng;8aoSH^7$hEZsS>Cbe8q4A#wHwQ?o9< zO^FRrGo z?jDWo@(d;Zh<60$uC9ZhYHlrU!Z&u2ivgj1rDZC;ZV9AdL|Q15%jFo+T)OxlfaD+h z8rtz?P>VZ}7*QM$X70*4Muj! zfO)R`f4DjLA4)s)|KR^!-jG6krd2C ztcCRHs2oM4sG2}^U{pbZqNUetM4{^~78~M%rxw!5&VneBbM^+!KkuT26nTC`9Vs|= zhL=%}(slp@YX=m%Q25ypozu!uo_qkx-4+a8yC4wpOQFjgC}h9a!L%{AW-uWGr7=wy zzOw=U40uf7SxgutG<7g7`$w1RW5-^7Nm@up(g9Q-& zR6qb9VF=4nK6IFA{C`0D-yre-1B!B#y6^VRQ!dy_Vc@&Qf@An6sc{a5lZXO*>OD9q zCDFkZ9EyD!v=%Z^;`R3to(z{Y`NMe{1nwZF4R6l4A8SX=nSMqr!Vz^pmg&Xl(f?z7 zZm?-~qm*fW1 zF$;M}Wb5%P@i(`2irsBk9`hVb2~e8bX;c)d%B-|#y$9RG?EN3pD|VamNA+ajbHb2k)3U7Kmb=nB};N3rROLJfd-(k5GZ)Sch_K#D0u{hjh#XXye z7cV|4pL8wtGUX<-<>hIa?|8GblDI8AY0!1_fpWj=QQNZCtJ?iMe)1xg0bJkMyt$*e zV^7{JN->#3GUju3f+qcpB@tZO zPK&k?^!8gjUF2Lcs>TD)3|v!*2qyG#2Mjqw*-235dl3Tcwu@W4PKgsYbYEEP8BR zw`FS!e+)SbTVSFve(7=eI;(-R9fb4rwhQhyN9z_prN(zq6eQ7Ozq2G3IBDfv9 zwakxZ-1nNiROGg9Y)6^0vu#D1?>u~deOK$%h+-NbY;Ak4lupis?Uj+Tvllc)95rf6 z7&Y2>awyI8BTn|7y{#nY?us_&0+IC2CNZjQWSy3V)-7Hosi~iH&kWDmm*WCG%U~NS zZ?MfaWU%c<&v8*TXt4cxpPcYh)B{39lz-c>bO+;-ki@4C@oO(#;@94c#SZ+E?kE_R zeq{L^^??1F^zQTt$3?C($AyD%aKvwEv8YO}TxLo2_#)H@Y5at!x|edVBI7IyEZ^j* zg;XAfvRL@?_of zH!P9~2@R+b3IJjQoNBxT3=9ugFMwD8TN6M!Ha#KiW&y}zdLU`B_@F(jKMDkK2U zD4{Rn3X#{$xg zNQlx1N(xd^3kcF6E!_=L5=-p-1VvBa4u} zz;=cXO6fu=A1K9whEfc$!4x*6!G<*0aE9q(1(Us`;?q~y1K;06X%~;zriU7~L_Rsq zB}#vLWJmq&^Z0F!VC3PXAsA*&*LHZk)0%GY@*0qlO<@RF{b$h?U^kjjh~-=pksQuSnv)#1WFQ36)aYVg z?PRKK+(NE7T|J<8x=49>Ef6WV>ire{58(P9+)B?QD!x@Puzqal9+tn0?l8VjXQMeH zamcth=yqXu^^2QC(HX7^!r_`qX@Gq25^YAnd+lJBWbI)0%N3W_Tm6d|DlX-)+psz_ zjz7R4z6ET>R0i0EjX$`@X@5aEp>SJdfwqy~;R2$=duQv&;ezt>S=w1PXMexpB<|Nju&D&$+R0Rd-$4`Jio*>L@K!=D+63#Ksh7m#-lRsc2gwU)wVJ~gUz9>`(~wa z)WYkmX{DeO%;J?8cTUVRH|=pSXurX_z~6 zlA3Myf*xi3a&1$~anc6dmaEtZIc%(3`a`nsrCRjupn$<>49osAMF^B^)Dx&SOgDd* zhwbK8V|lmzH#ieaaWCq=@l?pYl{onNy1b*#Y$9Q*aK6f+s{C{nr)NKSc*5+_Wl#6A zEc}`c*Wuf+q4#ljW%d`324`cfoFK>)*POL}k@`djbX1 zT+NS;`_oRwAEHWxx{;-V-Kh%;EL&B#RP(LPK46=hZ-a{hSfAl{_9-oUhwgNm1fn8w zWoFlnkc{#4VI*AI@AQeTXtPhwSaDOFAVkT&PRZ<|Hz%@+ZB`GB0_&k@e!Jiz5l+jFf0U0(Z- zF9Z*}+wC=0$QEaZ&sIzSM%23R)$N(p5-MNJ6y_=o#h3}sDj~c<%VFd9dTHCut)-oR z7eTi&j~+Sk;E{}%uO4-rP*LqH;&QfdQ2~uhIR21ZKf5Lphkj}tuCg;TPvFO++-D1G z1_&dgx4e`l(ag!e4i|Rbt z%3fIxCODo8ihR1f>=$`)dD%O%duiT1GID8`QGOW{$-lI07x{Z}*($PdarxEnT~7~^ zF;};Le95DiNOAZ)5xfTZuk6exehD$EU0mCV0=73?BlXJkZ$#FVhSmnqZFi5#2 zdknK&i#>)(F2^3jAQxnhp_4PS$6S$-jIgf_FmETH9VFmq^58_W!v%my=sUb4na zpj)glW9S@f%m_Nj8Z(SGv&Iaf<*YINXijTPFPh96(}P~J!gQfqtS}wu94ky4I>-vs zf;O|lG^6FLFyGOfR+t7fnH2_yUb4j0qFXF6)#w~c%r|t9C8h#xW{D|7%dLUID;=Mr z>ohi1%&d29 z$5(iFek5N**^ZlcBBFz$R&VqmE(MGI)_^VFuJ0%_i_1vn8dbCgIi`GEk~N- z+s_|%GwM0MYrF3Dd?`XgV6CRuEVrA`Kyt*H^OH5tJRFp4LM3W7G|`JcvZ0kY*waXLm_3QrNV=KXo$~!9x3@NdWj8% zLd(d82Zz%mm<3S}3}xOn1v*bFlnl#7`PF6gOH% zRi&Ir^U=r|47S~w|-N*4(UqdK~5lohoRZC zTRs03O?s|DQ#%@w%Y@Fiqc3hZXnGuk$YVz5yV4i0LJsGNkU}cFRcj3iRmcrvV zhfT}`X4EgvWf#RQFBj>`M$P~JmHsS~NDs)@(U`U+iqQ+Ycz2ARaSVwn&}ePBBmGpT z!ZZ!}=K{~UPC-*kU7mCQ_aD}XCjiGwftGQ4uqvgBuK@vnYkw`%X+UM;u%2V2m7wimP--oxMj3}l@n4QV+(pgp2zVdU z8>IIEY`9&EZh++k-?bts;J$z5-M5MNW?P*F_AzsR1bN&R%hgW{=Kg@?ZG2+Gy;!DG z_%uDTq_%Y%AH9cNJ@k}5m(uo*k!jKTg&RkOZ&W|o+Z*nl-leL44o}c(y>q+Y-osZD zO1kWdMfM2f?I>;>ZR5wgrfy57R=dg2o0^YzvGv_ln6n}L*v3B%(;VHK+tn*c{HP-( zDK%&q?|gcAqVZoV{DIeeII5?}R8XPX+ta(Uo%gS)&TGKYQ!mzlz{_ z+`o3{ay$D9k*T=u|LmOmPe{qMd_Q$=mrPq$GyFiq%~TMS8Z7{X-KH;R%R_*?A|}eu zwQ`XVKJ^~>pPeWr&_($l>;sn~AfUVDrZd*mQVJ}sL-v&|k&Fw!?q;eEMOH2c+omF! zo6!+?JlpREV}=>&bJHQ|%`BQ|o*HAZ^D3;l4>zTDo_iI7fkrQ-+|Q|WL_%e^lkL5ox%mIhO6E5lQr!3nq6G=I}o6L znoJv-4+=hKYWLfMO!U#B(A4{|Kt6T4pt_H|D|-QZY6}eQebWc|AKk*b zTz+F4+&Rsv@W0Oux(f)ky)6HlQ+BU5?3#!tT@ zi&UF_MG{Fn{R()M+EPUmJu=J0M%^<%ijBHu#)*x(W(J6j zx@0bdBs)~(bGG)a^9p_HStiScXdB0xS8|q<`5@F4o|K~}zrK`xCslt&RKK*HinPD}_Da)1o#(@`_0e3!rInVYfztJkw zvPr%qnCQdxF!K7xuux+h7Azp|Bt3q;m-tI8YMN8?AyJ^p*{kf3uLAk^{uW-q!b|qB zU{$LJ!H17&5kAGn2)JY)7Rq43^>5)DEL8q2)RyKjZzb%Pl8`<>d(|{77`cK+LvGbRJCO0))THu$}vVOzCwTH2) zMT4|?y5d*VYMnpjyBDZw>>Wuj#4S5s(G8K5Gr3|_)5z1?^10^nsd9jNhE&GR<3Gh5 zMKZQ+77PO{YF$@{_R7fflSC>sMDh$uEEdg*o6l+^^t}uR!wbm2JdLp+Eejp>`lOzp zRD7jjv%tFk?xN1v^uK>wR2pN`*19$YZur;>9OnIU%o^_kb$Zv-*kpB{v- zIh}xv?%w`!86MDiSz78D%h76*eR1qOH^Vgc8MM3mtnC6l2VH@RV{pHy3EP6D3zpfP zO7$jspo$0CNLf0UiP?H_!qM1YTh)|2eo9>)-Y#vET1T+fD1PT_fs26R<;jk#mV9FjoV-e#W(699x^_>k z3<^B0)AqeH9_oms7wq3Rc64$AcSnN=-m7wa3a=R0cLziUqbw~W->r^bZVo8Z7X6fZ z^$thAc8rmUP5WiZHR=vk6Nv%_+I)*oIw8KPleK7$0>sj&8h{1qjdR0 zLXM@PyQ&@W>wTk7-E^&oPbmw$JWEF5Qi}~FE^^<+B=$j6Si&JP{#m@2qhltD#^Ocj zX}d~6WI$3}hE6-*^z1vSuea#zW6d5Ud}@$8$$EE5|3;6$^M!lVN$|tzRYAQ_y6U|P zXI?J6`)L(s_8|6b3PnVjD+dkdmg1+>W})=`Gd=!V8uzFfKW!_vrtc_zD)=FkzR2C< zzxwJP^$vs#qZEpTZC-M_cV-W=+j$&9q2K!6%IrA-mBm(}|C)h8aTDZB6x za|#6?cgZbA%S%1}Z(gKOP!76s2oKJ_duA44_8=tQg}rR4AnONXhfsPXZI8dHC5$%< zW|t3Ry$EAHGQUTio3o>MBO(R4cC*L-DvarA?mcP?{w>92g*dYZ-hUgmtjH~dd;IIZ zxNrTuu|6EIu z9Stx2`S`pX#S z7I8w)ZdA2G*tf#Ij=G9tEsvKbofE%Hss%>czYSG&6pdKtZ8?|gc$yRx%!Ws897JFz zoUwhD`;SjKM!4maN~M1CcJJ$xH0yrce_ojTacm@SLg=SzfwJ}IQ7UKky@w4?am@D% z8JW1nE$j5!R4r%NbKcULd%W7QG28e>T7LJt_Z#Fz9;3XmJKBZn`0y^=OiKyY+gzPw zYIvUXPR>h8rKfM1Ol%xQCh@sKEM5V5ZMhWP+0=TNwy&hONvIRX{Lo_`8e*#Wved23 zK4C-ocq=P~#&at88aG+*%(^BPj0}1#ET*a!m;Lg1VYBj&eVNwryKyaS{h%pftc35* z@StAFWyB=M~=;uT{V9TGfpMOr3no~DW83uQ$* zIxbHfe<*6KtnSY|Gdm!BeYt-pIaxw0(tc0Ta(#Vacz95}yyok{plRW+XB`Y4!b6Ln zRG&IqMSj`q5a*OCsZ84F{jlKxxYVtqObXwSH7)%j&F+bhON5)JuJ9zkKC}Ogl(XC6 zM?wdNT_)0=7(qrV>a`aeMH`ym8%K(T1(Hu8Bdf~-dfvw zPBJwIzoSGPV)Mgo{cOv=nx{{R`V5;DPKHw7eDowV!uy-3f2wSFecv~_TGC$>n-NDz z6w17B&YfRUQVSbQYf(k~t|lIEA^EgPYV9oUI(~5)XPrJ_++4V%Ts-yo)|8*piR+|Wj7^7PGg8)e!E*47OLeedg>=*Tx{|-xtUg8{<5}vj2j&XM+jf^2j22O znBrfJi+YKlu9r)fylFi{(P7vzXG5#)pu=14X=_$&5*5}QKLH!~M>MQ7*G!8|Dk7TW zt;EClEUlr0m09tE6H^#p(TJwiJ7c3_lZ@o%_}j1{6w;jKC4-%>ziverBF<`Hiu*Z9LlBBsOk?91{FHL=(k(lGv!TVecWR=ic7opry6 zSa_>ye$@TCW0ex9-@9QjIao=Vq!aa z4DnBcUiN@Ogr^2(<~WQ+X8ZP(-dVQGIz-VLm^+V$$rYh(X!9;?<)YO&B%m~kmm3f0 z?uW_3-A8$=I`#Feh;GvA#D5VGC1MEU?`h|)$}cHizNKi|yI=_e=>J8>L=-8TkiH&9 zDczExV8$874>ujTA0?Ymhl!YmEkaN#9!95elz3&-yTGl(wpFtH=HW1TJ#SUzGNW&zY=RdI&-YVp7V&eFVn3^=5w;g1 zkF+f~2S7^%yfdb(_bvidlqYegVDc1~G6aT~2vVKRr&w|PPNY1WfYBE|tU0G<;&89p z`pD6a#N3JXLFDQ*@3rAc)eJcav$k?49ZBDhA0&IIFmbd>cNGN2EO*P`%eDn8s@i`O zborOG7P)bzc`s)+1gvIxI_5)#CcjRId1rfK`!19}K1H|1WvY&mbh)=ti7uPxsfdqg zmSKVmMkORZ3^J--&O80tt$F%P=eupo_ijce&+w$p1k8}uVJ+)NkyvC>&EO|yZJk1) zb^kwh%Oc-wmISig+1DwBo6s;F%{p= zx9`nah8c1!*Q!Qa6eKvn=M(fw*i$j($vZK*Z8yF2`XO^Bb6#r-zivoRQ(hfc&6E+Z|<(fgV6_NuP(4>71X9-N}J&xL`Y17Mc>Ea)GXq?us))% zH2ps1%8De7FJ;Z8#ow7rXEg}xe~B=c=KPWFHvUxK`xfQ2^O@bZ=F%Q?{XXtbvx#Bj zCPk7Y zXsDb~ktDZ6k>siTJ&#%rAGY7>@@OCA%(bx7Nnd$4%2Ai!w~@FV!$)oQ{7-s+f>S`oS5d*sssH6$j0~fT_g7)K#*H1?-{sc43w{SXIj= zE{0AQ{>diJgh^1cXA^rFXA_%1I&-Vv=j-KupWIvhL={b6hk3>cB((6XL_;;>lv+aP zhLpZgzfUbx7hq5%8M^b1Gz)e(?d^A@O^|Ykzay2QPmw1Pjp18n{;~CbgoyA&=){(A z5i=bK%%m&)i(SkJE?0cYJlJ&@JlGDLC`b&MrD*D<3DHgQ`7 zmnYZW-IiqKH~D$!AA@xw1f88K1!C8E$Jc;|7#=(0)Ga`Bk3Z-HTL%mb2x3ayn#krG zde=5lHXg_KPxHh^!}M@8sH)UL9vdY$>2bkUxqI-%gGvG$+$rydAKyE+qTU{_)XETf zLDM518X_e_V9_O);??~F&?nmsU9(CVAN4G>RBDVjer_IH6gpuQ$%<9BOllpHpC6GN zZ1y8-T;3C=0~Bl9XRf#pCI{+T&xN_~DGh~qZ}QPu)|5-<<3I2ZCEe6~i_QwB+C%aV z{T|iM?=Q(bw41wGT$qZ}D6+cJi<9w~04|Z_(xsSaOt~ks7^eN&G~faxx`|?Qj^Ul-#~TLRj$!g5A9)LyfkWX&h3qk^JeQ=e`~ z?l+j**JoXrtxv#Rj`N)gwO2|WAS=VmUp@Ji;O<$V-sG>=vB&yRZ7oCuBeC7|(rdYl zMuN+-M$!sv>vh;Cw};S1heSz@Jw|@ZN=Up{8ejPI!WFr_S6tcuxIMedtnplN)cMdv~iLLcdsB{{6e*{_LA^ zN_UR8$ioxSG^}y27{s%v(TZH*iE8wMWv?R6XVDcX`_g<5Hb)O2RU+bjA6E6=vXj4~ zMS9U(uA`oH?-j%CEF>RE5KWfJbfh7T;1zS&zy=%M!iMPh(tlo-o>#O`=o87lp?px} zs>h$rt?VMNp7A@nL}r53I;861s*3*~_VZL+iZs{y5^soS-BNazH>=``?voi|EeI)j zXhCff^oOJUBG=Qm%zYoAz+EWdlCd7W0|hpqz$&$B(4QRgOFjq#pBv)R#LCV-BpK^O z6fz^kw814L!k0|QTv^&LUb_E^>gN5o%xQi%#Ou#Np?{zd6BN3eSQ0fye(AvrW_^E@ zG{y6M;VQJeLHqACOMlwz^)4<{(@m3u%;0C=-_|mmV#)6)j^y==JfKo`@Q6+UJD!m7)D?7{m*N30Wno{bIB%6bOR?ba4zm0vSw1 z22lUUjWcim>SvNiqaiy2vYDX17}U3i`ioG1;IBR#)R%(-QBVL01*D*WFVrU$MA2M^ z`dQ@B!3cvG$Yq0EImnHH>Kq}^{bi_r9jZTq>akFO6ACCnfpn;j{+vg2gFW}6-D3nx z+^4(s+_FAukW~bA4WVu))P=j*UbItq%gmAh`>t?HtVu`jP)(YJ!2V|Q8EWd*kL-rY zxXy~*UM?Fu3y1bY@owN92FNCAfcW(r17l#SnSj$k%!DE7chKib#ckc(JOz2b0h+xLlel{H3%N3NQs+(dOfkuw;~mYr4a2(eR0xH69W=# zz=Wy;yLr+=vWqSCTj81VCQP}W>BN3>N^>EaXTVP>R=&q$`%lDfw^oCGC{z+UH)i^u zktJN0me_7K&TJm*pR<7d0?i2kGmFGJ#Q!?UaPt4x$9D-nP)k5wO9Yjz!qOLUrzg1W zvx#7);Cn58lL3!hG{p*)ssl)Go=lPK0wKC=+t&Z(n={Y;%hy->o8e!!wph09 z*8j3?;A=xxSn!bTeT##G`V%QzgCj!7 z9`?W9D|5l8!o2Gzx&Vs+0LWJC-DKHUm}9)&A%&;*11sj~b3`yS7TCE(gv}8ACa1v> z>BN7X3(|&&b>P(V_1CJh=7&RM{@oTid3N{z0HRGs4W zX8vYv&OFUblIp|6OriBjJPw``8Rq_qk^kva+o8Ml9z?-r^YA58faj0{(2e}tcXG&Gv@f<1 zEc~FBCXltm1j&5dT>=gu1Z&v~`I^?%rB{<2+%cX3L`lP-VE zPV7NW+E4vIpsF92CjE;ecT0)U4+(WIC-<3C8AmkQpenCD{KO^G>z1l3>WaX}3%1aqlY=tSV zcMcd<+Cx}p2iMQ8XsNwTOh9lg2w2}Z50{`cr||Ji3KfQ9Ha{J?Z{Nmd%24Um8jtk_ zMtN!-xYgAvYn?n6s9k^FHteu!(s>0vGBMWVP)HZCW2qK=Wm1gupu{?!R>_cYDQL;cq`KR@2kbsy|g&9XS~Umf%&7tBSvKRX!P_E9!E?Q!pl z`-UzE@NnWPi23t#-pPIAmxD^Ofk#u&!=6V@;#kyBUX9L9`3O-)?l<~BRmt3Th}?TmA2`tq`_ts;2>&L#i+ z?uE)C$6YpT+O|;n%G1)ks9pFN-b?bAW7Gt-3cS>pP>|nQMgx?w$T4=nAUp z)rsRMk#)zUWeKf)A@}SMcL5wNdQ(zrxwdHNrr|?euThj}!c~!;u*fcN6zmS$2+=;P;Z+NSf(eq@0n1a~-t?*Jbwmsk;>gFP~~J zhl~Z%Kav#rlVzg(}4hpO8{WWu*D`c!dQEXpfj+j#$yS7Q@hM9!^@KOrP&rr1T1 z0bf>{>ZM6TKe^Ke$M&#dZDMA3-FzA^zA$MDAFf}=jXAXkSov<`qYTf(dYQ)HHh%>5 zc{YwKXhq)e#FsRz4L1$n7&OUz4{i%YP+w$YXZF^gogur9Jbe@GLoY}nXo%_)Aa4ss zP+w-_U`Hz|hDLv&7yP@t)UZjWIk+tpK~2lX!G~6S8J<`}FX$_1c-g24E-z^dM^H1e zafqN5t-=$>=>;1E4c|aU4tZN7g8CX8hXh*DGd%Hx9zk>yMXoQyQXj1EgCHhm^E-n+ zOBzmoksiTvefDBF`89fk^i5RovjQTPhG2a^1o1gGzw_v` zwBh9Z^a%Z%s8* zD#x7h$s}h9Lik3ae3;Q^?}ffUg)`VnXMZ*a*DE0E>DbQPM4vSXCvT!hgbSirjAWkuIIJKxo$}i+w^y`_ zP2+HwORTTw@O@IF@YKe$>CY=zdhM79sQ_Rd*)2^%G))@==LmK6xKuZ{zC z#J?LTeXt`}|9TucT#|IbjiIaQ3|~HRT4U7x`yGQUeB#UwaPsJF+ZzUq5(vTsq+L1ts^f^G(LSAMTLG`_r zCUf(hH!*>we(30uDu%w{SJAmzOick~m7lY5|1d=EMJLq>8&m2wnp{bVbwo`?J47Du} zR&;zl*HOMcoq&X*4OF~o1r^3_NQ$--spvN+KrrM)3U77&1|F0_B_4Ko81DdWsC>{d z57h1EK{9MVaGVF5LeZeYejYrBMvWlfg{c*gLH?$2G>D361;_O8fKiFJ&x5r`@PPDh zbW&WgV2TGuX~Y6>fwgEs764e~2%ysU1gybendgtii?M+rXYt^l$P*y??nH|H84cwQ z$dDlb;&&(DS;z@CSCJs4hyqf>(gfT$83Oib#Jqc`6QkT;`&UVRMEvtdY@Sn{t*F|~uIAg(SWklkO-Ee%i&4P#x? zz$S_hyQb88rzoa$_On0}72)}f7*LdxED_tDd@T7?9D6(R*iy-?;3iIJNq6GeT*@o* z{W;EpAEl}F&cc^0HP;l%x}-v#{GY!O(_?W5!+N=9t`GAMMjiEeXv6k&!m>zHxwIaN zXj_!72o`KBMEQEP_9gMQo*BFOz|sOq?98fSNlQl|njYt@EzR?V3}M(&wbVdcN~3)` z{=mZPbh^pxFN}M+_RESS>%c<< zG@h#XD3#FKStSpw7>5tJbpv6w&wR zQSbZ|bKh=|p?`%y*1~34D|JA0jx36q?Onn+>f*|X0%Ou?-tEqcg10wMs0C+g5`-Jy3Pc_bQT5HVXwC8~zGf=x z&8zeKbKo!P{QIfAM}L_;mw?_`efs0c9OT>pFcJ6BUVyb zAMsjhGhAGp4snT&R;h2t3{w)TO$y$+m}cDe8bO)=m?6w|j*Rr>pYYGF+p~SVqa=J; zLb%IHrDnPGf%7s$HpAfvQNZQ1v0;9~)O%4USh~gC7{z(O1V=wkzbHoI<0)1m;snbr zN055T29PZ=U^5>coe&J<1JAU=vBnv30+b*KkfQM5=rSH4Wp_YM>j8;i8;I>e`@I3! zNH&1exDIggC{Xlj9(;ue6=gmTkaMlT4AQS~bg&7-1JW2qJa#mYDC7wNcLloit3|%~ zaDr8cIl*4o_m+2E1ipXfK~yL_AQV(U$^>CeJs1u-NT+(?poIfhV7L_m>J-(P`vfdb zh9IR0{XazkWF{QDFChZyUI$W7V4d#`K(fPRZoC0KGp%6wN*myGfTp0?YG_W1rtL2t zJ@~$L7DCVMd7uNcR3EXVLBjZ99uSFGQ9#Lw`TT$ImDmb;*r8Wh0(b(k;Ke*hy*@?d zCriMR!c;V15HFx}`RgDRLh2IiC8O&*paRD3Q9OLZY95UJhf15Wbx8ln6C4sE+`Szj zBT6F)0Ry6T>d#iN3DZ-8>ER$QA@gFu6n_lpp>6|3@G~I_(FS_h{>3CL3Mwn^fYY{v z*LH6pPQ(D9bse6*NveRiCs;EG()JPmB2C=CNE3eoykW?t^B|h}k5YVFx%h z7_jnnpyVmb%JKLAeGVX{g8Dy@bY%%ksc^H|H3JO)i%1953AjC}Fvn}0g{~Gw3$MW| z)Gmm3Ec|MAxy*6uz%E&8Tn9oDle$4&(V}C}L`C!zrOYXhd=S_EMtF4j8>JLR!txC-c^R^WZECV??E0cJ? zm{-j2If|6ESX4x=&RfvZrHO0~)jMfRdlkr56Df35<&>(4tZHk)XOYS!z1wH20ajjro?k}qcza{tgHCgl5G0A>=^3ri< zveAmTYcYMCVVMlsLa<=3a(tqmnq{%?qrzXWsVwX%k1J<~KAN8@mAO#5n)=?}qHee> zS21@2=`3a&DGNVy zB*f2XthQT~>vS-FSZe1!QRxhMY1nqB{7I*EjAh%z&WA)7(^8Y860#-+_-&18_tPt8 z3&)zKQDM|e_Zl?s33t`xx%Zkkum60mD75vJ_&hr~2OWty$08Dsm5Mn5fOj6;fmHVb z9{YrM0r=|A18R{SAn_6oWlK1;D&fSYWH{XQ@Z3Bbz>dojrkZux5F_W#FtmYcsEmUn zwFzG6l1vnj3D{yZ0h$ITAf1EM$iR(d=ikrIwhoQ zkWT`wDM3_Q(}php0+VJd04#67lo=}kG4cd#LBNSr4xCjO!+IQa6cKg;oau^vK%fsK?+K}20G!9bF>J#Mq@=HdDL8qOiaR~Nabq6LLH}~luQ>Ey zq4Do5>cK-o>grvZt2^Kr1___-{0mpa|G^dF-`N(N2SIdeg!uS(0z-obgK!%3sTCqr z9rA402{s8$z|}(EXJSrp`p~B)^w|x4LiE(@fD{55_U{Ca0z%F0Ko$s`dgal;R z>O7K0Vh7~u%mXTmd4R+~oJxy<5Dl}=g}6130=T~zJo12m4Us2EO+sr4Fa~x!=!G%d zYy$`IbTmF9OesMhtKi9ik-;~<19C4yD%lRu{C3;0y$yr`BBrF_l}+MP?6o#vmkh48Vz(w_ z5zn6z^iIeUMTYTu6MA0RzizF!4W+d&!p+Dq630^h0lq+N<)o-&A8F7dUzVf({M4)p z12%u!ot_j~3<5^kDfZOiIJgic_@{a?($wWlJHDD*-BRE4%y3St?8_dU^Wp;)yJ|qt z+wp|Th_Z8OCJzj*1zyI!JyxQpo z5`nl%x`DLqDB~3OJJRCRB)fB~#LE3xUF(?2$yiHhJ??v1UMW`oCQ{iHAiV(2q6;{M z)@|ph_wH&7f+9~VR-FBhXER-Dn_PJB3jrrlvV?*$)6cfnu20>LJGBgU-zec-B#+S( zZ`Lxcys87zB6UAmdF*Q%ctkOuM2?(WeL6K2rxUN$SlIaVq}mHL;G$!BC~?3~I~k>y zUIXUt?Pa=(ff$F!67>2il97*_0OTNu9>>+ZdKlTf4{bUvUb(e>cFf!~4SX` zpLD{miENoY*&pASRQ3rZ^)8v=E-W1L`c$9iJ9b*u(c#ncjr0{JV_N(YN-~-%U^9Bn zI&HF<@dC@)HsA0>Jd zC3*`bDvS~pMTy=2oF#XwJdh&l*=`RVg&_8&_VyMqzXvbpcbJL&Y zev|b5CVc~P;@83Vzh62-hIk#+_1TBW(2<)Z_}7Q?u^7c+<8n_?_S6Bc(-= z(xFJ{QKSqgQpShmqB8&74!nd3rg!~^_3)A)5kin0A$SQPNRJR?VR-NiWvGubv~w7| zmMkeTQ&GDo;c4p%Y)P>T@~g2Q4DxnPund6i8g2V)u=d^{*+%I??uWd-U2NNaluMSG z$*+9#xb15TuYlb>c!%4bWb>q$v{^&ws};ivyyi@g$NHXM?Ju}C3m&pH?h&NmC=3L) z3>=4>6Y1k_qN|Zf0UHU8*kgi@cT}B z(Vb(l!aI30s((Idt~K`GiW+r#eD5(X_b6zrc9APda-n%&Vm5S0;%h3YXu+yl`Q?mr z3cnKa15J6@LWd4%pLFk3aV!?`n0h}lx>IPHzN;l#ENvn!H#sVo9K}t6%B4VYpGW1MM{!?3|AWf?2gOZ^%B4hcUqt0zL~&E0 za;Y9ZW085mD)Ry*^MXz0#dVn%>@qL1b9J)RYaRvF$wx6*dBt2i279vZV%sPQi8$8f zjuV3YsTj*d%zGNKPw;hb0>c1_@Y}@(Qe6mDP%75$FYx`I_W1ovhD>R9AY};%Be5mB z+$$1=L!jq9zXT8x=6#R~=HRZy5;nO6@RUsz6xBWZ4cAoQlEL%H6u5f&ip{~kgzC?= zsofp;W5v>1E#cbFH7bK2mmWmy9X+g6F2de1J$>QGyXpl$*ns-{R~fk^iLXbedS^Sp zGO+tCr{CMWg2(NQ0vkEBTA#e7Zk1do2qnK5@pBBvJtoB=(H|IFa4(Y(M!xvu=qM`J z3fz!GhZ=d7b%Lss6RDR#^-9?DKl=hUSW7JxDa#+Oa?a>06*w;8!@fQ2ZT4qHKq3=f zXztznT6*y~zX-5-N2pK5As>1C&eE_JJ|#cs?ou>RX~rh}PZ__S+to)qwRbknjf}cMsb*mg6?_ z?9`zL)P89UHPx_D`c#LswzZ}+Ti=rwOh)u+V8 zt#EB1v)8by6<9q>^pK2plJ#TVvI?xs?f2kg1vais>Nhx-dh`1#13}=r@C#eOZH=`I zlo*Ur<=RwEIRz*TLEZeg#Z>#bm3gF9@54n92yK^bq$->DV>>E9jxJC&Kb8~E{?)(H zcI`O`Sl-i%E61k(;q|a6U)n15N=e>>^=x3X-rb;bapiR$!r5u?q)kcSeyV2@ z8e0h70-=%?^`W!TJK-Qec6mcDZUbr3_1>@`dsph=>yC9`6#8=)mopD17ClV*1njc< z+CgH3STHLDf;aSlfO~W!E7->G{H;-o74{w=2b) z^$%cs@MjM@*aoy@rw&!Cm+^dXdi_U6;hOlOp#40vK!_D-$yn_Z=}4O}bflkbQ3tPk z=H;(J4cT*7XV+ZE?l#+MYhBf%c)^TWaZA7JBT#cyrsjI#hhcxmci-JXM|;qlIb8Ww zkI*jVg%hfV@80|qKu-_o6`T~Q|LMA|`sc$2!0i;{i|Xc`Hh66KRT z@oU{!Yn{lQ{UW9vG24l>n%(Wb6TXfFZtdZvg*?Zdx7I!UA>%2+=n+94F^46c+P69f z;gRDLTG=65O@W*q( z)41nUo`xrx=7BpcBSSxgaMy2`t(h>6jHbp4-0q>4^*DXncT{@m*&O+~b6tYyU2w5nO5icg{ndTKcJ-=W` zr8VF^)pDh&U@dQds{fmQz0jk^`a#MIvu67U(k}2>f9u!2pPL2AZgfgnK>8xG>$?_~ z^ZM4gMSsUl3f<-lA{NDRAs?C8#d`Kksp>7KtPe=ae^7f58Rm&gpOYH>E^>q5_KC=8 z+cII8&*mWJyKdLY9Sh@o{qirj5$%{;XK@d?3Tq3+{4a++{FD{H)OA<8^nn{8FPL8K zAJ)=Kf;E=}n=T17{@Anoch4c*nfu>XyEb3AT~dEl_b=X9xP52 z4Tj5+Kyz@xrqysy*$Wi>um1kDLKHgLJ5T11glU~@xU8%@}!C}_%k5R3MYx6_dvs!1W zvh%hZu6-RloRQuyc+d7beGByIuArB9_Vh@Mr7!#Mey#Jx$nyr_a_%g|Up#fE2zA9r#Z;qx#o zYScpJih0RByCkUWQ|l1#@3s`K-byAq?2C5{nn&Ks`hAR@5{>iG8!Mkv6gY`U+HT8F zum801OnmW3$PFyJ;!O%`Df0ZeyPAu+leb?#!rVQh)zDe+U|QozSCX9QTjy$ew|1BK zH)K>Y4-!?w_;s{{NP-YaeUnJhu_M(r8Vpa5Ow_*#gWm{)PAmBWM+YBb2*db{_w@cY z&y)86e3cV1hY=T+t1t@0)i{FgdEPak#sVnVay#1naY8?X05vO{^QjQ>rctZV(H4$s znB)8W3nuw`lbbJ@f0w*sbr^MUhY_Ra!#YpZ?25>9 z7F{)nwNn(gb$rV|*j%n%d+&2b7as2y{}ddYz-SE$xUCs?oMK(hU$hlxt)kv8u8REd zOeFAf^>}wedUuvX@3gd1{RHq~QZjpep(=9dC41QPi*{w}r)HpJx!?HZnvvz$DjliB z0~y0>Z!jHSMG@8f4FMBJM@De!TyKaFphW;dLYUUCBd7XoynfoITdOsee#*X>^;F)i zH)UM0D*Sz=e}e&=*`2{?=S@9+bt#)^w_7AX8HL8s@Rw0u@q}7xh&zjTVUJPZYE?OR zMN1}Jhdl2BK=8V>)zBi^$d@b;p@N8v+uo`6AIU=BgF2$#SRj-u6)2`%h|=5a$b*H} zZ$ZK89dHQNOU7yssr7ulpC2(vg8tZ1#quU^7aGN*_E_he7%sXaz49vhkf|TeNvND$ zPDJZIefnatU(&cMx}DW&wxtvIma5exxk~HM1`92OX|^_Ejm2hxMNwb;{X3mOf25#YPMmszPXf zR|(S$<;gWEw)bD?IMnoh>(S!`PE6u5eNuX~2`hSZFtNXnFt zBGZq@n8ZPzdSsQYjD|$f{BXJF&=-VYVmL$86VSInxUm#eDw-q$HR=;wj!WJ+U~VPa zIJWP|2qs%mu1`zzi{Nod!17Tss5Rs=c5&0>;@&=)KU zVUo(w7i?hSpMP1RNI^K?;W;wUbuie263q`3-G%Sh2bVL2zTgM{paOjhhi|@x`ZI$2 zLD9Q#IalZlG1PF+X?Dymvt|`Z-@Tunfz5A08TxF|S38e+GxUFD9teb@UCIZca?nqI zYg$q-DLunMnM=KCR_R6My75u%tai^_$=3y%1Ks-BUzJNIU9YHuFhJdb3{PC z>Xp)Sz*)KAtUPd5J~%5soK+Cc`WntE0%sM2vr52OCE=`6a8@Xs^)3FJD58(C6#hI6 zS+5zgBpI^g7_yWZvOY0neP+lqXUMW=$Z}`M`pS?6XUK|T$Vw;|Bk&J~XZ0vEA~Wk4 zvf3E3`WUjt7_w#=vQ`g?nNRhHy=hlD!*6gjp z4aTfRfTBnsJ~g-h4uBnUY-l7}n{LZeT^f;sN}&)9b7ADI%ESGMV0WXV{)Wz9osY;$ zgxzrEhvAxhb#$QfM&Tall)Ai4r;P`+fR!F~A01~~-&vC~2>?;*QR8d=$7+e1L%+>SlkL=GC9ad4*m8oRlWyzH!k@Eu;m& zMjFnr)Mz;OJ*z~!vf z_K%RS38}c*4vb^d=-AiupmuxfRqK4Se{$i=jl4sB(c<`%w{#_+^|A^|?8r@*$83I< zm77|&Cl^bouOPN9oR9`aQ}2(dhUxC|@aXbpWR^c-KlF#Dh9|M_wKgUKi^2 zfV$Zieoqsv$0l`3Pips!$R92T1c~TMcQ+zUAG2`Ja9Y2dTxc*j6~0SWhFwN_2e?U) zQX%ud+FcwlXP$Nmxpy{6TgLc)ztv%G`X$)>Dx~%Dh-J$3J*{ezTS}%NBsI6Hz$tS6 z3-Ba&>suqLydvYz>H8ksmdF~F`QLvzZ@%sPticQSclhug=_*T*saNadeg9_V35_=G z)-il@QWUv3;D)#j+XLosks}b-!p93MA4Pob{O3Kk9CHVgfsg3ND9+vn=O0x35?X7y zw^H^i6ii)T$fdk|210PqF;pMSC7KLdh(7K_5%&v_fguHhZuhMtme=u0XX49N1y|lD z(k(N4>66Pb`a8#WqrU1CS1sda>monHMX$YdIR%aujmwGdRS8;F{yt~0tqeGBg?yxl z8f8PDaV1}O^EwY0y8acA?xDwf{h&z0r>YT*qR6HY6qJ5f0Q*Ei>EKUSzl|-xYnkTX zOY5okT=%o8n&r%|$&lKGhY{V_zi!Jf^+);(Jq4ZDWI8E=*HNez=JJEuSs6ZP@G*md{;qcRNIBx{pAp%|=0Y8m^^G3oQMlGMgeMBs` zRIl7NA5n{7(b9v-iJ}Sd{Yc<_c)@K1f`904+L`YdWoKbe#M-%6aqoBA$f29~`|lnv zml<2;;9U37K{tuOXK=$mQja!Cz>cB0&{a|}iqP#g2>x6en#KU0A&Rzx_X$HcX~2$z zpxY4mb10OS4}%K~rlUk-0Y%5d``$x0S-_6ZKpvrRT^VQ#7;FZLu7&sMK{vUl4xVNczf13q#QvmFk?18^3bWYLc0NoS@JJNt|Bj71!Y>(U{;fu0RBx4p9eb!U@ zEHe5mTKX(z`YbLU3>!(Pk0dl)5}GOrEtZ5fOG1Ywp-Ym`!_W0T;P*InJb?rv{_foj z2h9AKkE=03mzW>{ERYKps2U4&i3JkC2DxB^sZC zMNbNKCZ!#m1YQvm;`KbhR{ir9Cj9SSn9aUC#M(U^orm&Bzyg%e7!$Tg2>vNtlYBG%*e>b?UKgovj^}p_8sXWmwny^27}nbWA2)X9Rbj zAvzslSYMLv6775XIb5_p> zPN$u%T(_(N)wFYgNyxW*{VR@1b=n5m5Lr zX3_gI0jS*E5-vgXFjga4vDlJ7T$3(~xY7Y0o2F~$bbGj{&pC&7E_Q%Ud`AEE)de9U zURD#YBv1lTs>Gjjo;-!W7Wq~$_LX!4o;&`r7G1pORj$ZDJ)JG-2;?Gae7o5Uqjk~l zXUz!f6osc+{ll__RO4Oxm`Ex@#=DkV32zBm8tzza9dI7BEbyhq&%b%P``w@1oyYp4 zEZnKbJ9Vrpk&~<|get5n*QBf~E4`{MD~cX;NBCR!>1qSlzr?oguh6#c>D&jdi!cYS zeRd^{>a22~@&`A{-nQV;nB%*ZZ~FDP3$d-tWZFotmZ-GA;GUzaM=C8q97=Mjwm{_Z zBv)-!TY#Jkf84tB-$J-Y5|{GoYIwWnJmTgE!co|JDno_j-2%r4!lzu>z#4<+!w%cs zv8Fa~eBV-dncf&WeqjCavG%c>M{(=%eX>5P=Z^oF$0;b5b*a0y89IaLRh$K_m1*Yq)X(knzLf~A*JJ1 zJC2#x8B)2U2ob531?0NJU%N>yjy4fkw%(ty(4YwZLvAByyvmTVrAs&7gLx|;URt>NTmF3R~eh?vp3 z{D`E}9dd7t(c`J#qswbf-Yc3;HLyHIQJ^sCe&E=9Ia+q|@3nZ*(ClR>j}zj#$@Dou zYD(xHoPPdQx~^fPbTVY&;f*%ti(4=5^i4Akq|h^GImTc~rY^h*hOa7cIrd;lPG}Cf zbOr@@93S-SElr0g#vYMG8#1Cw8oI|T!;$v%@iS;sSr%OZ9PVuVsVQqkPV@!608sKs zrVy;3@bq!440Ik0cG0p#zXOAVK;Jb_7=MDINAUf|;pBOcN7Y)`Zl}1u8L!uaND-1Q z&>T^)3N6SY5g!2W$RRZCTT|_T%KqlW9`Q1YJ2aL98p{cd z<$}g?Lt}a3dX;-t74{TQVWl>5E`#&cI68?k_nB4ZkifJXH8@cD0YvkzoO1f&naznT!*#10@jNN3@p zD_mg_dy3k|BFgh4DO<(vo*m0S5kNLf79%vgQcm3jf`IJk^z)%8E<~c68|CQTbLEi| zi4C!9X&NL#x-!LP*(>mR@L_Up+Cn8P^FF0^#cF3^YGuPZ-CTJd#zb`tztO7S*l-I! zuugMpamBLFzx~=xNGY0XUErMd97PpvyNb?o!0+n9wH0vW+uo<(;ccJnZq_H4O{{J9 zgO`KPk5II7n8Mb2FyuISH|)lzmN*gkTD}l(aE|(7J6z3fXn2ZH340xS#~zEzIX(3( zOy_%j#Jd4ez_h(4U&#oC9iF|cc6;A;3*NsqxVN{vYIn`Y^hPuBSIj3=Aw4NCGy z8FGp^VrBW`v)gEY`|~FFr$(D$Y?NW{dv;#!zOwPWpA-VdpDZTi!)W<_zYW6Qf5Awp zAwrCu{SDtrhp&W4-T0H+L?{=Wi|@C=x$&pQvR0#05O$1O%(OSA~eO($qw6f%TWXCbGvglH< zQC?3Pl`N_!A*7V@YiGNn{zASUq*JD7`+F>I@_g~9 z028Vgkl$5Ev(vOJV5@k4=z8IXp~v<^iLURR{5K48;-h4Zs&09lBH#B%Qg9(sT{6gcK627cX66$82603hJ1UwVu!bD;T{%6Df= zKuo}3=A_l(c1(ZE<#uM=wxO3h9l3JY*H@7@m!~5xh1$slTwb=)76LIk?y<=D?3La_ zMK&N1?2m)62Yv}g4J<}6_x$MFEALf%6cy2+k;p3#(ZBV>qMTs;&AT3kVrS8~i~z$aR@(rV~xj=5s17j1{}aBQVe z^!#*Zpc0TarzqWT&A$N@u3&uwg_Z!Ung;i&nC@d+B0Fr?+P3{eS|_ z+2<%#X@npRYyq=0!ur>M=)A@^&HPco^bj(ZWL)#1G!8Ku5}Z5`1_b`yfTXFQI1D|P zG0tHvD?jYcU}7kyK{Pc3MS>F&l}=f`CUb?E-~{@tlgZrnPT~QtGrO4a9p|bKhI8c0 z$qY>p%{06*p_dI{;NCcA-agsKN_p!9(qY{2^nnns`xe5S3QUb5KSr=c#AaSSw+a== z{}P9x=Y}N2!6A6c>3}qf6&MuhPKvgkT4+66fVppXSz85_U^dR*m40Tu;$8xTr653~ z$T=)E%MP)JIyy)NYR<}o5IvOAK|(ss`J4Bg&(0uZx3#v(xh>Ue!dr*0tb&FWB(_#= z52#%fN+K&R56S#RJTof}=gfi*#a&NCP00&HePEidT9!DCxT2@WSKpLZT#DDO7aKph zLwtkIkdfz;k?Ttc*#Wl8&tKo&zJ4k5P_3zloY`ts1svdSZXUWEw-hH>Nkt(5!X?T6 z>T{icNMQPkSQxNBl_|CMih23LK4JAi7nW;_9a8zQP9Trn`GtjAbkZ@!`tZ{ma50T) zgQ#fz)3em{=kJU{bwt@Z^4>Jr7qn1`7N-$=8Ats2{(-h3SoTnxiB`aB!>ZP6pOT+d z6t|9eDaZW>QS1bk#s?u*LH6z)Y(op5RD#LIIzRcOyfCwABsPdTj7Avu*I%KKSeye0CN-y9%G(#m`&=m7h{d zJ)-|YME`}JC-Adm*VoTa-uS`&{NU~F4Ik(wDWsxe_XI-zK@>VseRKJ%B%_luu4hj& z)?>D{$Z;`JG~c_0!%yRD!teLEDZbOVRq1-i7Tq(qnF@sMr%+yuuj5UexMme z1!+ne8n-K9u9=zCb|bCTuy|Ew+H8we_y=Edlb2)+g}Zd8$%lJH+)P%bCpRRXKJ~Ki zQoJ;3A2RLq5T3pdSQr{amly1u!qkzGj>b8!do-^RrDG?==a66#zGBLs&LW8tz{ZcO z<768JH)H0@a!skXfh284hq8B zN48R-&eTzh6V<1Slv_Y1e5Nt^|AScn&q1uZjdqQD=tB#lru>e#`sSO0Y|j_=al@Y{ zh@flNJwBm%h2N4*QYZ0sW~%43+eX)Og%e`xUp{>5HhTo0y@Jmo;S}g_3T!w99-M*@ zPC){vAcs>><1{kIVi>wuz&)(s9yazm1`Vws?9)&c^LdO49+`Dw4OvtO0(Dhmx?gR~Ufrhy;KO#EkkfT2W@m_&=k)UXF zcpo}+6AgUk*G*^d&v1eUs&Dte74}};YIT>b!&BfmP_4AFh`1PVk1X0mp%tPk?t%St zSfKzAzZmmJ|6=hg3X2&YU6h>1Qq*%`hdYZzbh?pOrB}%ve%98rmj$e=jViJ65TfB~ z4|oRr*f-9q=nyr^RINB+7|Z@pH*LXTy8g} zwdjyOR~YiyX2$cu-bu<<`4YIB@n<{19LBI6AAPdkP?shSkfALLMcv% zxiP=_auHUeOd2iUCw|9K1+$o)-&g{DuO*~&Vy@eLs@*AwfIu-JXg6{QF zN(vQ64*zqjnr=Z->wM2-RZ%2!&XGXUp^F6RLFp8al1TZxgr=@idYcsn7x|}}Q7zl= zr+5)XXbx*JdukIS{`kH40+n-km|6kHA1iU)5}d&Kc*4DDLOV4S?3=cUb|az_IteX9 zwrLKttV1c{ZDQ=L!8~5N86G3!c7dUjQ>{LAOLQ-?w*5QEZ19KgVn5lbq#NRxc>XA1 z8Iw;U!hdZlNWYta^R8A0q*;55pa0o`*7bChEh$7mUFvU_{Nrv@{xO zjFyztkI^)$&zRYV(eyG3=Vnroc@=a;g%b>aVe|wl4Xt1RuMtH%!{x-FFX+I;M4)$Y zI1~!y1A`er(Xnv3e+O&Q^sL|?u?$2%F&U$NG7yo0-hGF6$w1G*;D6_8@PC`$S72f) z(7SMW*ITILD~x~VYLsYyK+y;IekX7_Pv{E?@DDl=<|B~IBT&$zKiS?=$Xuj{PrHT928OUMATks1?Y;UB^CG3+hLmxcE*+1S4T4&k3s;N%&gS(F3Z zuU->fO5PiEqsOUY>I+!8*DozJEAXup9d!>sj=J?3IuR$v-kI9w}m4B-zB69X6bsFf`V6h z|3Zem-Mxos6d~hN%)hLncJCf6Wh>V%Srr_Y8kd4Xl#V@9n?@n^7AHF;dHW#==)WVW zL~W=)A3b-OBt2VJ+HUz)$iBzbC@jLD7iu9IdKofP`0=~&TWGBp!NOPD_js`fnklrQ~+$r=! z&k9$faafYLvT$nrtD}49HNq#ma$d%M7bA|j81#zK;P{(7{&w!P%(;Is(_Y8e{$-P0 zKZ^BhH2Co<)hy2=U2JNsuU3YgkL<;V?Air*n@09u=7%?&?!!F0vSgQl zw-$xq?M&*5^mtX@YJL+{vdYHQY8>E(sr!eF+PFNGiZky{s66NBkZpb+@uj?Vo8^OuLm!{VJ2y_76^ohJ z=LVcU!*Ak^+%uWE7FUxj9W5v$a!ZCx7pi}^dzAd{;Oqb1$ss_wPJ2uVtJ(WY+Pyb0YFb*w|9(0PGta&|wV*OZZr zN#~EzkHFBjdlET`>I;8;{h8-fjNhfRt5of*-gYUsuNcP zo@#Oj|7&Kc03B7OvjkehYrm_lg>Z~tN1gFp@Dw26YcuJpS^)5(Sy~AqPgI5l+YXkunIQl zZvgz66qJeq{F4GLl_**T-%kTB#~Cci4bAy)1~nM&EDarC03Q-Xd!~dqj$->7`a{(2 zR(FGLSLUWU7M@vYn>^FeyhJ5fwqZfg-5mgx#=BWQ><*p?cAA$N#3%K-OkXwqM^7y&I!Mc%CxVP0mxlF2 zbZNl)43}L`tCf`wF7^Kof1-YN4q@iBND$!cOVatta)lzbXpqpZwsdtkz?Nb%*(SdJMX=^)SWP^&%LRP<@&TRX_8FeW@eO(-nL@|PY1Q?=dd;2XeGW&S5kF7 zef8?re{`5gLX^6dZz5K&3418$VR1Rs)ovx(o25?#tinJr9 zHM2M~ZK$_6ABoFT7&b*Q%?zggrN%?cv#G_f27b2{K_Pwf%9A`d{bbvj!G0`8K6IUT z@w}!p&cc`VgCkFOpN)orb!i8+?7!i!@qtcNC_A<)vWU16NrUX?ek&1ir84MBToaZ6 z!Ig33HJ{3I=kga`?ueIk0IP7meL!mM*9eSo4Q7nGMG^&k@A1jwFj(%~3Zry%?@kdc z`KdQSl1{zjT5M_j!Mo+!VFtC$deO3LVD536twUmP=BlOdd3fCw-@0JyVe*$V#K6bx z=T>!!0bwh0U2HH3l>S^}TdC?d!6*a~{CcLk#Bv|m071bX|9VV@9EUMSW2tA{H98FS(5Z^!SPzFW@YZ`B%-T2 zF4d*^>2av8@!aLoOvp;JPx$_ln;VVq=>EqUFYXrin}?0W62i^YZ*;cQNAv70QM=+u zyzkd}Xij+hY6{;q!SWYadiMbutxY{~bgktL1=|;f+JDUH7u5POf;skn;CRhA%_s~! zU-p^4#^n48Fa8SWY_ECnpZ?7EgtO!AV5T}zxe0~TK94jWXBUw+4oD}Ip^6M#LjkU# z0@pkT*U*A%=)pB$aLo&F4Kuig6Kq^_j%#R8~!qzetPh1jWH1^$^wp$askC&h`k64 z1>?E|ji&Wn@cI^?-IMsCI}Bp;Zgzc{>sQkwNX2uq(&YvXQKb)Xmh`)o~h+lv}K&N3VjWp_;xrbhXprJvR$qj+fZ z+u(8=t5y~uQqgE(`iKD-rRWW7PR5>TwtiMi9BP_?F<9zsFqm2Zp^Z|UxCSEjT94|1Q%7g`rhnA5OPJA}UO zxl@DHMj$9oDEsNBGwMK{GaxD7$KJ7U>m+-hLG+u7{hqG0oT(EVzpFok^)6-q=E^7$ z?OFY^wWWTdxjRCxvqr37+Z~CG2Eg_EqS1zLN&Y(HwdcK-Jqx4{b=wwOw_nyVy#5jOb*Lv4tu+!bzwL$ z-9txqPFilg-{*?cwjBKrv`6NIb!|5uYai`sa2@AM+T7`UTnzIGuTC6gdRM6rDIRWV zjpW|}+4IQsm*!~}qp%buA6rl-vK*o}r&_n`+vs>qi{7iGEVdG5?@k-4yT~v)&D$pM zS5E(!)H2kyz+Y;S7#f%JV#2uu1s{QWBDnlmjg~x#EbtlTKN*NYpMzy+oZb_A?A2p! z|6)MvS5@MP%9mh_&Ob7vS9++2rZvR)iCxn1pP>DDmv;B+iYhzS!xFBBveoWag$zdL zBi}G9iH9&MJwz*}-C2ASs|Ls9r!ht6`zkUkrFQ8XslDPRh#n@&O7^gT+XoYE4XH}! zyiy|gGa@Dc6+53KxEjDrIP6 zX!SQ;EUA5`7fp=E3g#12I1E)Zxe=~^Va$OFuD$*1`Xi_Rfl1n(3u;LBkm`!PqUpxF zkGt8OV~~Kz?ZzZ$Iv>?Lzuf#%q;lcl!eKVEs!2t_vAtUC+&_Dc z9z&V@76%H+1lQISFT!=ohN3QBb88*^XcrNsEsQ-4-gJ1QoC>A3Zcdg|{}QdbfIyp) zGw@h92+6?xORpqy$1HRkF5)d*0ey(M|M9q{_)b~b_9kkDE|LT#Vg5YX|F zcSSSr3zA5-+Z!7-UTDrLhlKPTo9X3tYy7W-75b07K#O8GLIvgb7+-B@2j#cbEaQuB z7%Ct;%CXb00_?kwypW_&&kP3Ab8*xJLnsOot~Ux2(mR$3f4%EQd`=AYh|Fho!aCI~ zTK;|xtiHQ)wz8}a19VY9rGGgD)X{?-ijeaEipXi0>B!QULcQ)gb+i1dFySk(kn7Ro zPH`PF`wf+8>K}3Mw>eHRD_oG?7T&AWj*f+T@)mWUYPz3BS?4=$ssWO0E_PYg*R~vU z2K!t}A`aR~O~yG4#Tg(Wru<|EpYR^xw^f30JVJZb_b^Jn=lk_TrUq0MSlnwBxTF2T z(ROmey%7WToMa01IyD0xW{x?32Q~U$J!O%9WwydSI&jQ2OluaiBeuq_URcL*jyfg^ z*@QhYiUSy8wU8n9rnhubNn5ZdRwzXkN^$Z}k%CefrDcCBt;o>p^Qx^ z&}=>}WAE``+?6#EKKt^7|I001K^Kay^Bclo_+-!w-b;!HOG4tV{rvI$f>BOMwNTM3P|=N1(fd(RV^CvQ27xrCKi_YGW7Brs zUBE=I_}#~=-(rNjK=>1ar_HkfwUt7x$)|}6ghAp15OKkw^EQ~Xlz4dqmCM44*8}Xv zXZ|fK?7{)U(d@hca2uYkGfw~+u1>!gF%dIxlk1`j1ueHkeQliqD?O81_c7Q<>Tv$* z?K#vQ71+tuw}WI|>bom8>|1;!d0seab_DBg3DA`P$o7<1#JuDgi8!>W@f3rtM!JB) zYui^YWw@)ZQ?LY>fx*$wjOt;XVv(V~bxZ8N-PULxfg0r#X>z$5g!)R)V+EblbxwqP zFnkSlb(0m0xUZD+Cj)2Qzd%rL4Uc;IplM!Dq{lPGX{EJL<~K z4Arnl#cV!wcb@_r`_E+G=5jpZQbNv*0u$#j6gAb5ip45RK-W#8$9P6~(`)wwivFfe zG6%U){qeB_gKonXo7}*)bU4mVa}Fra+o7x0SNX>GH`7svzgxB-sdY9k5m z;@|nYSn`ZAn*Rr4A>{GLD*k(5-<;eliD2(6Gx(m9l|{LI-RH)99D9RWti9%1w_ zx-9+^O#c-uE{hjg2to&W8X+50qN+)yH=KM62G`lE%Y6JK%m5|4A{-QHgbY4%Q+K^` zSv<%=8Na6y;#Nk6+~h?h9}>GPZho6|sFcr&Nah8*EDmKqrG|U$)g{WI2vE~SZ0}S< z2u{9=*fuC5u_csy-#_<)Y+|}BI&T+jY}dV|5hAdd#yANSvGqmG!aFm9dM`?PS-E$Z zZLjPx5+x;2lJ;Zou=Y0*Tjo$y963}RBr1-vDiYfl^+0X_m>R15e!KE&npy7v=r7Ax z8%4qx*4E;;0cF*|7QoD98<>|_>28P|CFs6wy={_?BSXhdxV&p!-v^R@o;;EfF;Iz@ zzhYK}oNW>QY)YH0OJQky{if^FpL>i8iN{LEBquM9)YT#F-HbI8F_jZVOaOL>9vA9l zrd6B?aO3jywdzN}>c#C9MTNh!YyyGJvi0i(O(hc3+7pMqK9@3~|E^bw_}rq&WDh2S zxffen6{2SvKu?yxh|B0JMytZ%(>q;ANL);<>#+Szo^d)_{*|E^B_bmBDi38aa=XDYT!OVxyne17IEn0Dk8i* z<}s*k79UW#)n@WNMQ=uHGh=SFJJ`z8}Q1}#rVyZfZEsD zDzrbc15!p3cG(<~mcLn@_=K*dXCRxY(EAOlnRdNs01#s@FmACMl>bFNQAz zxR)r}4BjUI-J}LPJ_UJ%z!!}kZrdPxqd-bC-5C+69uD`0LO<|f=!3yRlxPr8boLPD zV*BBzaGLGY6}GYsN@EAZwkNf zNuA;bc415pVO~GKZ`nA?TedkG>n=$+-9-jYMusXXL@>&${+}B^oxD?YYA?tn7Zn69 zfSN`o$`@J~zwhdo(vPF&h|i6v{)c}-NZ&ug^vOO&|A`tn^5>>q9yJ|L*$r$b6VzMU zKurhd-&@*4HGEMGV-X8PM$FLC!Ixch7@U8fy5Iq?CG-L4`*y{#0F`}f%4A)kk@i4m zk6`(qBp~4fX}Vgu-z0$Af`^UqN~eREqFf{%rzpU}uL}W2jqZT6&@>@w^n=1za(9g8 zSlK)?7R0GfdOKMc)*?*`}14%shW(!xV_W-qy&!FfaF|D-hakM#NvrRh!W ze<)3l@(kR1Qr-)Qg(?(krkGo{HBA}~>QYs#@Z(^-P5jQO;GO8hQ^FFepkYTbtQPb> zwg6XW2pJsxUj0+3Um?$4O&4qa%diGc-uYj+-=hm~&v}(e-!8>bJg5bItYxbBTF65+ zCoPE5F?#jk7Iv5$6VAO+pAC7gZrDqp7WAi&wj$_1Ce!~lp)Ks#wPpBTfSbLf5cU4Q z2BH59icXp@z6SkM=*w`oN>8hh3dUr=Jdi1Dp^C`1wfM}dZiptUp3s7bC@iD4zg;5u zr8ZOZvUKlZg;hs8$!LDl`5r@O>b@1Oa19XPMXtI4n&HUOO{pVFnNb}uX05YjyH8n$ zpP~2_T%^0enN?azxyYvT?NGC_t_D@2QRA4qv9U&C=0C-azvU=<$d~}9v`|? zOFRtrxW3bb^M3HTBG?@Fxw^Rjo?j)F@t5nP+qVXVi_h`yB)gwXJbO^Pk-V_cvizL+ zA9vd4h0{RLsmT?Lubun9bMgHnl#OC75{=Sj_g!XRibC<1bA2fWUhUJC@rh-)r8X?9 zv!@++C17K(S?}`kt)cJSy z7F+uya6Rt|^jQyFZR!yHSxeiZ7PAVih?t?~w-+skyh8cZC}+-e@94Ld`%!tD8XD4v zY1-)dlG&G3PE-MyS`1{zjN|RjL>#@2#YU9`p9PMl5O~)Lk0tuG+sa*3@?&_lcQ_*z z^qxkLqsPCGX(;(l7GN6r<3@(2+e#nSw1BCxI)!RHJ0DDrn$I^c$4(E7mH6~sM+jZT zqs(>U7@|^@yd@xRIraPF$H;YlS+fQya8d6MZRxm?q%k|5Xc-wkg$ebbU$7c;wFwE9xW{=M*u%+H9~^@5dHK1V zb@9s!@lI>_`YNotIgq=cbmgT zD36f}o5Q`|H;404{p@bQ>{$De(d$|OBN`rf`sB@SR@cII`MUyHFc=3XfPPOv0W9@` zky~w`>at1Gs=zb0LSFoiyC#?OGPG7F^65aXe7=iCQRc)|Llx~UY)7=w`i0yiGrMWN zsF`VQ_>uR{^@HzD%CUFfFm`C)a58S+aQfL7o5jpT5%AWgk_QvjYfi_r@;Vs-2mQ^> zX8Z4Z?_(%7H(OphIu`rkIi8u7`3+X$t3^yb-lhc<p&a}?{u={jWxof5%a-}O z^zluYZJf}h;-J7GVE@7+NX`jBaBjU__^>!^`pz*t=SlT;8(FBCnp3rgs9Ub<`9Ps& z_O>_8Op)c13@-V`Qk^$tmwz5Gmbx1IJsNoB0LrY+p!0xU8L; zKL3GgvU7S7|D|s6U6q`s_t6n)Qsbp+lEICnxdW4&MZHt-*q$-Ril^3(F;i9g6<0^X zH$aVMW-}b^!S(D77pQ*jOl{T6&7A$BziH_DvV^(oO0=tY`A&M*?k4IO_N$O;X7wv^?Xi%{>QeQ< zV9|%d04{_yrP!saOW%mGi8(^bWws&>m#$KaXN01Y(5_jMBATMJ*GrLz7(9Q5qiH}! z>{EF9O;+^DJ5pdZW?G2ot${>$q}G`Jd*q&wiFI6UQpIV`kNM-0mqWK|lxE74Rg*#T z4Lh^~*)ve4jUzWHjT1D9{SI40{}~M5j**`oUCG?%r5wr3iXsz!qiHFuO%1t^As*(j zJSL#dS0j%~Nkd)LoZ0r2$kzG@%Vs$bWAWLO`&p)T=VCgnX>gk^i_<%m&`yG#;I(Uy z+N#1wA0=b`{vb1QK`w%@(PFH7(}E9iz&4IfeQg~5j<2Hc38|Jb3Q=V2M5>k3=+9A6 z5z0{^9LaH3h?0Df9OLgL$vl3_k|na!e@BGMl6fdflHKw~a{TxhEz>g0_#N>^hOGYM z8?t&O88Tr_l%$$tbPzI*_QEudet2daP4}b^9DtIqFpQ(i#VYzbszqb9qz^0}B~Dz+nIc2087;%58YQDo5+xIgTOp?fMj zRn=Yf?W+F3PVaP0bpo4vFK9MK8#5(&_vtUW9^H2iq&8WCM;yz9;g=+7hUzgIQr0n= zMo;Q~_@+;eMraMeY`6x52=A$496|rPr)XnpzMLfWBex>)BuWQ?K*PTy;3yZ)hCo6) zMx#DX!>+wfsz=`b|r|cSJD>Ec172N`c;zPu`-lR9h@)@%|vPQ0-j)Y z%cKozf4YkZHg~}$Sp$g zj%AKwma`+yVveGp1`uu{Y5??wI|!2!Y~R^e!=%Yj5Ea&TrAc{tF-7UK6RfjK>U2vj zn1eM@nY5r>Dg4vmF1D(C0i&07$=OTx(q1#6diyC)ed!Qe=e}(xhj8*F(2;@^E=7Kw;v|H;4$Yc2Vs9AmJjAmEwo~SA61o@{8=aPLVkYh z(^3g(>fnfXSx%O^c%zRZ52X{A>t>;qTZp8RBTEgVYs?F!`(+@LtedbNMyZzkDF9v0 zTohYw%ln1`8m+%&@CmnXJCV+KGl}kOFOhEV;uEgUod1O|UDUh~U6ctcD2{!?71v0l z>!qfZGYsEJe4tGpR`I2adc%`E9PvJRcn_peAay279{vDQYmmlw%i!`SKPV=C#6qPU zplH?6s{~#tD14f2(8#DAy#+$2jVWV+qgE)y?h3$27wn!es5^wYC@9~*PJemCm1+~o zZaarDEP8w#;Ri+*a+S7<(fUUG@?88Ii^wBRP5&*;2~_Xs5Z-&wOL}3|P@Dp5>hnBb zZWTX$mwkINdI;HiLcZy%*9K0RZpA&=mRj=7{-l$a?XJEr9cn(xcEz&QxYO+0_+0L( zvkW0sKAPc@U2e`VIGSN0U3NL65X_rQT6W=df?Gn# zfWv1Ldqn$UbD#jG1R$17zH#fMtiHE~T-nt>A9_RXqGQ_5CeT&xGA6w}J193y>nT(} zFYV=9LDqa$OX62YU;_K0hxg1~rQ+%yiU9d{_0IL`L*B~JU!yyJSCy)W z^v96`GmSj^GFci&=>+#G#+CRL($eA>?we)x5^_7rSPTT4w3eKHHpc8X{ba9s8vd+( z>!5Ao#}v8CA~8yfnkRJ zR&c20Yx&Rsx+&1iqh)@824vz%;2oSkp>c&#)7FZ<*Dkj^{l{&o z8Gd(Rg%`#j1mHb@#nX7%*x0Gf6w$9C;@{vfYJi*j#fQGI3*c@RAMjQ$WgL-1u=Ai% z_^REjDO5X!_S@EqxG>EUkEc}pRcqF5M;7o*sv%QiZB1X2CJF6Jd=U?QCGZS!9eE>1 zn<8U--A7&k@qsQc;EB)&M+4BXo1mck0*Io$wfq-uW@&%eoj)valre6Kh#);HJv}j5 zt!Z{c$Hep{azN)8IOT$FOr`@Wea-z2f70XTuADd?pCt09+|B#Wawc+*ypu$0pVJ@P z%*6e_{29-^H5|&nQeNo&Lfe#^x^`!{*NjQmiJYD^coWC+YKl&ywlYbN&zOiM#oxNxKx*iMz%r ziMw^9iuULM8K3cUWUwY*0mmtcnW^S2v*$jcj~kp^7cl6?Ql{8QCbzlfppQlL7vRnp zhM26bo*W+januJE!Uwnn!py>xONdiiA2;G>#rbIM6px6CEiNG9J)FD1ee}Hg_%$vKk2-x{bqPt; z3b2M%iZ%9C-D0yC^n^(m2}!sMbGj&zjpfu@D@0=J_&FVBz8V}^c@TP)uIMiCZKM*d zj%)r1mXpBArTm27;kEu4zz9}pCM}>o0fQTAyj*Exldus%)T>zr%g+HdfWB@nWdvHSlYC3JpEGBIHk4*5mY;+bicECnop?UQV8) zWtG3FS{-Ya9(4+8dR=}hU8zB8eH#As;}T4(toq*7YV_*~8L!qPcxcMzSc>ALMzXH4 z86$7DCdRnPO8!A(3#gBv0w4wwFaR?FDjqEbtH;2(ik zIL_AZa8_Rda05#*O661Y&{=)LJ`jX_3}Ag&dD@uU0lxEd1D1|QkiBjpxyId>sll$Q z`PlO`_ig;vZr6L`PqdslkmW;fQX~@m zVelcy$9+%)zL$Nf$Aaj`Yw`JqU6b5>M}LWJ^|0=4PTsS&>i3x*eU-w(^QyQvxg}Uk z?9)s!_H4>mug+5fweUX2Sl2U* z)pGVbnQuJ+M4T90^>+7-* zn~aw~IjJchi=_ijK03lf-RuRoQ`$2uSyzJIgxCA-C}SJOnZpGp83RVk!r_3kHbhzP zcNQSVocV0#E(J5!G&&QPn!L`)Ufbi4?Jk7st|`FAN97(nenWOx@i|#^VBQk2-K!w_ z`y2hoqY(ayR-@+*6T!3X#a)N?jlG&ZNC8jGGp#2BuwiD=e%vE-4IbDs^e;a{?y5j* z^#amymk4zJG3vP0aCp4S(w@kZ(%S7ZsABSU2ujI{sM_M&E-`e5qL+3Ty5m2X_}ZKy z?N_Z1>zEU|I!}po=e!H4u8G}Wo1MuJbw|0<`ffe0wPeL|vL)29QUfo48+E#yd|WP0 z*R?oKFkYMCIpf?sF4}Rr^X1Qz?^1H$TaooUg_M*RTH|)|pZh76VF~l65`B;3AMiQX zvFmJRuzI@zuP%dY_f<+=kLOYk;tq0wv&pmeT8MMGaCNf?C;AMQTwWGASQyMb!??J4 zXe%ta>wDY`_PIWzFf_qC?d$KJ-LENsxT?2b2uNz-_H_cs)ww_7T-CsC`ncc9ykGVZ_upplIvi5b` z_wgJ^et++CL}E2IQ*`z7L2shLikgSAIa_dsXb{UqG=(y0IVtxIpZ~nDc+Yf{!p+#^>MH*6NG7VE#t@tQtmbvZJ$ryyYjiJ7e}bW|qLXb{bqgn3 zznU2mTw#Cb9Aia_9R%!bkWO%I{$C@%n zzpx=%I*#!~febO3fF<#ug;Y8WWDci5#uS^hiEpQo8hm_3+LT&;50|%`xQmQT+T=6d z&bJt~j+iNuL<4pmopMuIX7DtDv>?(?3|(u&Ofwb`NjgNIhv!LE1F6)j&O| z2r{w%K*knihFu@M?Ua&1a8gjZNdy^h5V<8CWLQB)GYw?${ZxvwM+~))E$SGpVmtCx zx%R5uiNNyVqnXF(*cNp;KCvAfn_PQO*dxa1B^Fbj?-RI}9(hI#QLl(Rrjxn%vMWue z_?1U8kKsUwQ}*YGwANaxFP0WltDh>gY6qA0ON9oC_!qA-!isSl||ojcfJ(jLdX zqypL>eFj^u!CqM+5B`1gDNn#Dwqs`UN9J+%DmcOz$n-kKcHGT^%n`_-{HJ3rFk<*j z4|?+%WWqock~|RZ-8hj)AfZLArTkE)gTRRX_`(3hifmJ z9UL?cY$yU_uwf?hz)IrY>&ylrUV{+4zOfx%dt7_(Nd9x;N2UYce@;YmF9{Gi&}*=b z80t}2)S(K;c6?Xo+VfKz$#lR6C+!);{stMui{;k2QOS%3@reI32dC66wl%pDoVywLY6wqTw3cQ6FkgDpw`dQUi>(^#J=C&@~_b0^D#q4>k!>u<85x=j+ za7KKKp1yFFduCI^1pS8B;{*P6>ZYIU&gSF>>#4I-;wt5Gb@ylGWD!TrhxpaG9c7QZ zi@c2C3F^w~ho6<(?HoO2*JhW29d%B!m;a2g{$5x4lUE&bWo^VSbANoXHlwz8&HVPI zf>P5qg88*88wn2MOer(2zq9iGND&QjMIp`ju4+4Tz z^Qx1+f4{ENWVh_^XV zXSnXmaKAl^VSre$~jxp@HNyzdyTYu9t-+J?a02ME;O9KIf-7gXDEd zuiKJRcsKvKkP_@8etTciv7WrtTkawlz|P#E*2msPnm_@Z#(eR15j@P#TwUOO{;m>J zVOY==)11@QI^`5(rtTHPO3^`IaGe(*@~xFxaElHiGEwa1yimr`*0m4ITD3TAI60Va zO}1!~YM!*5AxwYxLx!Jsy(V_#>d8E!R;ergVu@<@IV@z2JO^iB%SGoy!^1MQUm!}| z_@BId_7IB+xSj*;AM5v0;|)NqprOe=2b%i_`-O7covAAZX}_a zw@Q9o=ksTQX(;aIH)0b6W4>(#xLE4|^sm2SIpKM-FX>HXczcn4+KyxdlT1J0nigjn zBRHGFZ`zh+1e3KQ9rPnC?=IbE;QXg92C8q$g6eQ~pgI+(PE`V8e&L%}fJ@Rxhz4me zNaHgvr@s!xo-cozdbmp%igl^~6w8P-YW99W5SO>tbDL}1AZ#5m*nH&PkIp)vXU24- zjpXGVu~1z>0-v{cW6mE=XX@eXeVBo3KoA;lZy9gqcJ~!IM&4n?24QR#Na&aEQ2P4{_fs5#dgurTl}?}1esWXV4LH7 zeWYE%oMXhO$;IL?JqzJBM@%#K*&GkZq%?7#Z6!SiScWogjI_s4gH2G-Y+>d{y8@vf z&Rfb6qc2t#e@nG_MvNqzTiYj_lhJ=$-}Q+%JA9x-odW`Szj&&UrzM{eT^1Bdi03@0 z%!XgmYVc}orNl8`X2aTMuM^}x$&u%zCMT+5JVGE+ArA?CV1+8f>`O|*Nm66*sZTqw zeWcw2G~4;%yZz|eM_Q%$$;bUve&eygf@#4;O$iwjWpx>?%>NP>Y$LAJyM+d z>$av-a;!6Om49L;TGAK<#X&h9g{Z0FdlRbRGgejjHKKB~Q&;JnaKoK_J;L{Y6>RX$ zZ_!Vs`2X|4ZqZXd$wlB0SXmGZfYok3DJV@eOrA6K!g!I0O@aELz||4DIHI&vTgApz z!ooxGC#XMNiKS&c5pQE{UOpsoKs9zh;Smy62FSx6M@u9tl_cHW?d2XJ5)z5$VYlKz z;O`rV>@VP&eQ7;m9%!5W$nA~YTGA=*QX4tJc4eAt4XsKi1z0tLa=TOqboD;()uH1{ z)kRy0FcjW?WwW1sC|B>(|Mgf&?(4f3E1RTdYc0cEsZ(Uxa{Lwrb-nRPw-@)zmA4Xv zQ3ww9mH8t5J`|o0xtD+xKNMn)K?ah7IXt0Dt!xBFSTwV%qb6EVHS(MMU8>OE!MI^) z`iSzcXrR+5ce98-8JBgb$PbXu_SWit+UoA^l5&@T(oR^qJ98^p>A0;SwUgeDWCbP_S-d03KHM)aR{s3*e-PM0>aS zRQozyQFL;Km-!|Mi1N=#38{>2H#3Gm`iQdmC->oazd?4O^heVsl3>#8(W+S-X6aAyn^-WLpRj+0B-3#p!OD0{*-5~ zd>mf|2n5XJnh8|dh!XPJq;(t#dW+-F?v_+<80#|Sn=qmidMV@e<)kq%O`=3?F#f=R z0|<)|B7j$cAG-Mp!~BH5AzoXkBHl69~2WOTN+BmqVzV$zj?$>gI#j*~h|l^E9q8oeb{BYM?yE zyrK9DHhLEJ;rvB-ZWJiI-=z4gm5&*Ov&qo@?i>|GXFsqQvyB41^b;cjm=R{4_7JSS13seh=gL81_i*t1m;0#EQbieEq8EE4` zZcMbpX@uL(OZMOakMi^2xhr502A^F2Cieg6n0FZxT+6E+wfxTF3&@nkEWe|b>;Fw$ ze8rA}AU;SytTy{G@aK?pq-Rnz!@GQiUzcnlDPh$KsE;O&lI~Xx4JEPVS%^W7P#qI0 zp!yWh?S$4R?<$#%xEt*}xPS_yzZTm#uA+RC_SnUGBT&Oi z!m$EVe8WBGt`;65H23cHDe+~X#P&x^;5E+||!2~@opEl&FZ3Ua? zc%ZYK8}p$yvrZ%HW@;ZM7c+Q(816Fq61p<6#f{5{2a|y(6>%p3NA@2iN_rRayK^SM zGxx_7s<`tD+Fi=up{{N{+5W$00gFm`b+xR%Xj`kBt@9d{5gP|;7>cf`qM4gvtyws= zJuM!7_vdw99UH4}ys?p%#tOXh>dja1y|EXxM{TDr^oG&nNYmkfW?gE2VdkFj2NSbn z;1>2l7a(?4oV9g&RIr222F>;<*sJ@6<^_#zKlKqKQ0xFOco<3?Ac6*%WEKFj+37CV zsfu6;0PSvnDg9M;eQ%%iu5gn6n8#yl2LI!7p}o5-_>(-T)2uFol}2BO9%5@X(cgC9 z?a2D9VRv*9pOxtN)68F|M)NWDacRhh&yPFEmAE;y+WG+ZF_T977)4R+f;ynGQq2UYhyXH5w&FJU`Lo&Xv=yx0lR$uJV zsEsH;{VGR|;r_#0l3(D7>yM?J+>ZRN(L0hty`aY3+~oYXx?>+$>#nU@{sjwl7?z>m z=!V6?!C1#0sA=B>1v$4$#0U|;E}<1X`TKs(S$!&!s}*!w_~1%ZNB7K!2-!m!D`Gc~xsoI~XMN>&I{ zwq4=XKW^^u(c%8vl!H9!h@(6VPY>1ekLqiP&R_k?BhYVWu>5d1z!(ikwILrokfLepKDy*9~yArttFp2)X^3RLE@o z7~OFC{RrKv3REa;{jXY)?E4Y^f<35g{n)y}6b*3VrtqN$u~MdJY!YvDgJ~P!+kM9@m@Oy&!^HYXNIIw1uK|vS@rJq$^dIFI*LZ;)$v**aT`WGa&WCRdZpGs}S_0 zIhv-pp)h@OYwZA9LmChwBxRMjOx~bar{bS6{Ls_hbhJ zVAnWtg7J+@;4a7h`M%9BplBRDi&#z{fcT38`IQ!LTL!G7yVH&C^mo?ErQqJF^O2x%T3U0{^B+=b3gOiq7K7O@nnvDVEC^jND+P`* z>z;=1CGF|hckQurakq&1v#J+oh}64;XMOqo_{)_`ISkj_hun|8gU1}b^u%C_x39wpgasX zTjvq<_E%Vf4*Gh;u!h#{&KT`!Guq_na~haAH#7fO@VX+kep$^O7yMOnuW--%DBE3! zb6_xUc@edQ`t&l3w=<_id153*2({o1>@ryLJjLleGuLtJ4Oh4~WeD~w_rbox*R`I8^ndUBSeD{J-(<2D8 zI^`0T3SnDcM9}8ixhE%NvySq!S-tbcg2NNFi1<&exQyGO^y#0M+#S~2v1w)hY7??# z>6CeY_-9U|zlkafufCp&5>qQ1E+*ci%SbOPxqX=~hTL$)=_S^wq#GQJSl`Zn679E|enfn~dcN`i^b|#o(QIzB_uz~`LOxs<*{3U2oFQ;#qw(IBpCFoNxf!{^3F7`6SeQE26@FPKF_?gZ;s|g{A5e0g+a+blME^VfRT z{Wu-WIb0c-O--Y_5q-(vG1s)u=5EBhgK&HX1WkIh0OI*kB7T_5`M&s1AqL(dU6yA6 z8R_8({{T3v{%@TOt@i}ZQ7VA!EHwc6;LQ23eAa-p#)1I(Fwgq;Vp@)>tVxhzA((h! z>UDN%1twmso_qr)cdo2mJm~KXG)iw}Z=H{nHLR>6|GbKhgjMW#3Y$&u>-xhK8H6 zmHL1vIgBzTG0Bn4g037F+=>lB1|L}j{{}YH9w7BlXkQ)rBShowEFH~geT9InxuRpn zu1v(hS<=er(QUc}LxwWN!$q*t+?GBiMoh=umv>4tN|ZrMzfWPmEW31KLMG+;PjQXx zjG;zaE$N@CV}XO^A0B_~o9T>hqYfS8O8AVXLtb=eD|C%&y*@ae?uERomkBI=m)8B+ zbCS5EX&e%x*T>_@-@i@vX7f@MeKRR{?J%pfUpFO`E<(1#=pWj9ce3A2s+J{0dXBJ_ zE?1b}3F%J#Q|Fiom=SZm1g;>L(8#ZJY9zu!!l3x9KTYANAx;d`kXvqQ$PWxYBGoaI zMg%Mrg8!+&fBBzEAvHvFf(DZ3APMVCl7@ADh=h7_#6Y#?s3A%=NquF9iG6n#Nqr+n ziG3)IVP3YwO-*1;C9VaznIZ=#rMEXXII;Y8c_ zeLmnlt0>21osJ9O_;JDZ>u({PMlcrxKs0h(2QX5Hf@i{$0G!Xz4Pqy`jG#WRV>OM5 zo}0Pt^3-yTr{<8koAr2z=~?cJNA=?^*Q=X;zZ!9>j#B?BV?p?mfzp zYoOxq_ycuq<;^9pG)|!=bTE@oFW!ZH)US{GgtxzzB+1e8!CdzDH~6XPS*dF^Z%n9X z#i_pDL5}UFc;b~?&j|}fA(Kz%EwO4tog||r)l;{=+oqwl zz~AZRr~01oTEOt5PWibfoa-K|oQLjC=Zh6l|eht5e?6LzPni?40$FThZg|J)&IW_yn0U1U@oYhcqHfs89W(LFpIv8ci zkN;qgpa6N};`l$wW2@~S0y&)+PK-Esw=RPC0dJefrjA)2WQUW8@1spG>{P5*XeAS4 z1hdQ_>S3H4^t{Iz_RokIS!&|yB97hcUVrb>SL|kx8?SLcEIt4l+kltTo7O?C?f?EZ zh+#ndS16=C7z*kB3hk=~X(31}gQ4OvA7JM!R6v47B4F|+5oohX1P?|9lSDj_KYzR9 z5z~=i-T^Sepa88O&esSiz>fbY4mNVe0S?d&V<?MN&|CDLuGS#hSH01P~{+m3nVy6yTpfjlkCP#)YELqbOxCZ;c1GM!u{EH$ZcwL!<9eoP4y>e{0+O1R|pGK>0T zjLD>oHU}2f5)B^Jk}%;|R@EX^T?&gLu^*(I-!TYAu(TvCPl6*ZrP5v#OKGAfxrNKy z(j>@NkVeYez6L3AqGvJ zc(0Bsx>KSW`l*&Gx*QJ+b%ZQ^Dnl{u%1|+GbebA^?UovP+_oyZ(!Lt{zd8{v>;EHO z|3|+3A4yk3_noK+XSl2ghYD#*O8IC?@(XK9<{gU0dT2^w`D#kGA7jVf5E6J$(A60Km~@J7Ak-6EmpoFI#k{kT&$T$(YMIR(0`Wv zK+mTD4x$Ir|Kwa*dcG8}FAAjDQuH_23Gz%J<;$0)U-j zaz_Nn-iJEjkxvSoy`Je^W=V9~Ivo=5yf+Wb9il5ZhF5P^P%7Cgx0hqT>-xklxRI(~ z`=nV3|-b(!G+_WA>O#P^r(^WAZ zg;(V19+Z>$+oiYTaO+rHXs#f*_M#bXsF;sJx^0!7;>b|uH<>gIuR@=&`Xz9Z%A z)X%ox_?`l4_^eN>IvyiTU#)hCX69givq>C8Gq$d|eha|D_Ma?VzRr_q3v>6(!S zJtxspwp&y$$u!bABwtFA%N4yt{A+u!xke}TyDibVYSQ%-1{FhkC0~c=VIeIho!;Su z?)P7X;r#~Wjo+JSnu>Df!jJ18zl@VEe@S_pAI+SnWg_y){)(ihWsi}l(N-$VfJ$-=)#F{N(i}GP?9hPD?pfQ#Q$I_fg zkKY6#8dKQ=!yi#D3@6g9EkL`>rc%qlO=R!0zR_G-H*Ed_U9T@o_Yts?%g=Oy!zp(> ze78s1KhJFNE&{ajnMnV-r1AaZ`+_F=t(R2xH%Nt!hgwsDaJZ`9KmH7Eq7Ov_0W{$- z$0YP{KcL#9O9msS7CJ^zAzLXRv4f$0{qP6(1J*=3n)`nSVW0sOh=K=B4HY;wNJ{^y zNP?#L@6d5AiI3$}-xx!5{?(I7S z#(?xBbf0f7j2qZHDrc7;UqrJHh}bTT0ZNMrU5k~>8?}*|RNOsC_!IsK@V4X>(9DL` zf7?}JWSDG?0Ho9Wpelfc%FFiRHxJ|WR@qM?PknpNtQR!q=Cw2l`7zgvJB$Qs`VHug zQySY6Ru9@zof#?x@>ov=7(ZE%5wPr7=V= zK&x5>zQm}NaGPvD{Mo8CKvgdqHFag6)+?7*@@0Ig6`uj!^vff{z+6mb@B*lLBacES~9q0xK%CP3Zza5IpXX1;QbxV zzXK|?BE#?%^vZAqWbkZ4h(`GeWT70h^vduAWQc4+U_SHfhD|!t0k!_n=3wN=qDil) zd0RGZi$;I)#j_1^&(bTMGL zgwLWgG25|iAE*GA-sfR20OC!^2XF-|4V%?pzwn!MU(=*3x3Wm@f;fx)>ujB;g#I?k zxH1X2XXrbv9*5pZ9jB5TtZRAIy_7dOsC`f7^{8p`A5^uPtuUTgZ+~rNQc~X}-k#)M zsE4&%sDr%w^RDfYbF^;)e~k1d)kmd(-<`eQMasS_f7>3=;vYSh$M2ZwXh`goT~9xD z60BUOoYp$#3^a1$5-|#-7BOC=ztKU()rFSK=hC%*Ox3}Dz7u?*YF z5Rkh&Z_j^Sm=zA4q8_&4qZZvp`WSO7C|1mGvfj8eMEHZceEYfIl`nnNP?Pm^<#tjS2no9Dp zd_}BZqTktS5;{0u4cAO%4BtYaew%@oG)7IgMMeF3BYH@ro_5%*oCp{cM>pe&qBUCj z8g*+z_!g2)AK&?wlYyLsHePH)iL6;bMp29_G%@76`NWZ{Btlix;Acvn$2RJ>5PF`& zX3Ie!V%!qK__ik|vouJF;i2FM`jw5Pxj0*|x$#Gan_Kg{t?8|5(!H-Kj2y4ayZMzr zv+Nu8O2UfG=?QfRmnNKj+Wr5Evm}dqez%~swUL}VtoqgoMD+JAA?g15BZ;SJNEj;r>kow?9=!%1vAb2H4OV(t8SA4YORGfZ4 zo;`Y9Arhn4qY*q!E7N{&Nz4t6><&z!Q{e-Aoj{dt&ueQLcR~*teR;y>f6tOXT9u3!SyOa z5C2U>R{7V#S0XcrWkp;=@z=plOj8Zp?@h@tcu$0IbR!2H-|C0@GEJ?Usb;cU{hztS z|I8a+{|{O5#fY-4t`rbFL7(%toxHoEOYP20FUfr&QQY7OVF^1@wOVEesM%|+n)$-^jq5oU z99;LK+4rv>%)T3gRPUpifGkKQK&ouXXUZwoK&bmpwbKqC69AuVyr0X=S7QOwY;H^> z5>2Rzuj3Bp-o0mL{{a3LhQRDbVoPo;Ez%qDZVDh0A^Ye5J<8E~u~`*8WS?wBg@ZKO z_p5)AA7$KYWYoSfSBYMqTT)|O*7!AWa=4fC2hvtGnP1rwEP71WU<1kTZ_?RDW-rhb zxR(1YQWz>L+|?8kNnuH)aim!Gf{}`*>9@>JXpyzbF5M*16G*WoPzSyuNH^)pZ{Yiz z%AO+4X57T6!)_^>XUnhUTWmXJEfXyV_)ghtwssm_SJ{TmWYYomfl!A~d_A_N)beI{@zc+h%F89xXxu3Q0}lf{BhV$*A04`z0}6kwbHhm<9^0PFNa!fAZ;Hv1A0P$`=I zcope3^^f$|RFY^QF}tjRwR=m1joqm~m*S5;iu~A9bt?Gh3Z{h9kymfommZVrIjeL& zHf;CS3FXQ7kLi}B$J#V-mD%0c;QUs!ms4c#ESB~rV~W!AA(`)uY%L7i)Qpg|V(P-1 zQWeAeGw%N+ZlxQ`&CX2Uy7O2tphBry2%B9p*@j>z1J7duKpWjdS;Nl48>>Uu(2;)u zFSJR|J7Sl?<;O6jur9MZ9|CbrY~bnSfH3_jT%UZ~n_r~s5n*EVzVK(K!~qszprq)Z zal&JSTZpZ829*~DjSFmat0C}oC=T1dWjS*Ga-nuBlwYy9vA+T-D8U26x&}4C!I{D%^&`CgStX5t5>cjw&V`q5jGQ>8^TYNuMiwqin;Z8hD17Qi ztR^Cy6GwgBSZj{NRg?~gp$ms^kKPcB>B<)b6^SCjpWzm`mjjwSaL-bc{#;TZi)Mp2ZWZ_(9`VP zxqR_}ADhPL#jzF|^8X&6Nf9t_fJ<;i&9V59;KV^cnXM4=Fk5xA;1`*2yO*$IxFt$j zKQKyDXA{! z;7@WBU6O`#LZyu0h$6-LAWNb710>?PitO2#itL1_ifr#9dMQJV>1_MonjXv*dp56f z^0Tna*`b^)qQYjnN1te#)#Y;m{6PTgCoY()s_s53PQy#hPA_WqE$l!%j1#O9nh%i^ z1!QR}`*@@${Y+}DL0I*ZEpV74K`O92$I6{b>J1vni;q({nN_hFY< z-!(fs)!anHRqCKR-CMoaaQlr_@qRc8#IKW4Brh!kC}x^3 zoqM(8vIh%G<}eWIqkZc#kB5V2Uw-Bxg__nKj;&ff0CLwgeC2S z+9Uolb#~0Phjl^Ct0&}B{FZjg(&|~nX3~TUj-3OV0mI$sg;7E4x|0nxbp-wL$uCcK z(FBYSq|c$t1e$oeJ`;gPX(Q4v@c>{1?;0Pg~8(oVK{|d;%A(GdbHOw1| z5?}Dq%Akl&FQ$a3c-&fESk%G%pY}JA_;ggrLiUcLcunh!|8dEh$PSRbP+%8hb@|2$ z)C`V%AC}`<;mRAb9pa-X#wr$y6`;E!a>`_}X!(bQT#Q9)1vW4dIeks#UU^+)+ENq| z`|Hadh79RnutT%#e$$k1MI!fAF;<#ztU&v(4ODE|Vl0*FB6c^$Sb;yU>dz@ME*kesnMzEz)BHzDTdF4}oC39erUKENn9DZ!E?>N^k$u=EJk zN|3HiDQsTL^MO+ws`mX9@w4No-9Njt)y?e_+6TjXT#D)ImViW~s}H*qI^lz~gzpVm z|ByNFUR0boCxrdPpBVVelTOa<+(PV)uWzZDM?kjNlpiLQd!z>}_?_N_-o64j;H(vT z3JUUR82I*QmTE=Fa$Nr&FXh!6+)G6j*V-sH`N_b@iSFje3HvqrL)(68Sr1!KEE=G_ z|B5S#x^@Zo9v8hR^_u=9L>+cDW#WsC~Y_vKN+5z&pkk*t|7YLHZ6^E zQAD0}&)ca ztLFxqW}pQQuNQ=X>tVp3j^6UmsW-SI^(>|f{38W3Azkrc;=kP2(6#}GEilW*HV1kM z6gw2yL;DVM-bI`()wn@qqh3};)_0Y>L+Nc|y??EV5dW=CDfKW>I?kH%_N;-}_jpyg zhz|8(HXA2o>iQ$YYLna5VEw>=&nsws)GXlxW8KwQzqlK@EH*TZbuPHi)4?L{&Y=Ym ze)S;E`B2Uc>?Z)SZwPY#ADX@bs;%aE8z}DXUR;V7D_-2)TBOAt3N0R>P$(34cPkWk z3l7EIU5h)xlKbWT{m*v}d-vJdoymp^*}aq5XZYUam6*fJ zo2i0n>JVVT*E&UL{X*}0eKQ~MOGd>TY`akGc!r0szd5`b#dI1|Id<`>FD$ z{K<6fo3uM460)qq_<}Yul38zY4q0E$2_Y%-!YlisnYIMG}|~^9(J}AqVus7_=cBxa_k8!gVs2v8KQRk^V3sr zJU*Y8l#pd^oyn!w{t>RO^R4F=$f15{O!Z(bS6xZu3aKBFohDo_|@(&7{Y0rq`SbeH+IbAxkw(q=$$#EXyrJ_Mm}aS;ovJ zmz&~6ARcdH8n;rUfxV=L zUBPkBjPwTdkuZU1_SeH#q!{E_qbVhl=_gn+O z`1jN1`gMubu*XNDTL7~p`{C&KNLK0OPL_$;&~4+#*dKGS(Z8o&VSmfNOypJjS(#{b zp4||i9wp~nXja28?{6^;YDx`1hTY%#)-1l7^V3*jehWPDww&FBD=&8Vq@&B$$tn>>Wgh~+n%(R&gUEKFtc1b7Rb&8Q77aYN%H zFJLnggExDTiMep#MIBy#1%(XAOZV)k0}|xDxB2mwze4tFf5 z+&u(=^N_AU&d}z*F(98=3=lx0_;BN%GbBXU?)~AWHe{$P$Mv$I+IEF2?EdalK%u8A zhfifI+laYk!%p(SKX2yTKyWiKJoS2}^{s#SJZjhO+ms8z&bOP7>zj{pp;C`i<@!l) zwhtd_2!78r{>7DicN#h~?juS!?Ojjk?wm=e9~eyNbt+N(Jbb}L=sdbV&&j=EUilP8|6x_2tg<$H_)>e%|O-&>VnxIG{= zq#ou$YYQF6oOoI#&blMZV}3-=K<9^)&1upysKcP~ILR5Pq#gf=81Dzea*R!`VquAb4%J?Eow+QNnA zZiY8cllQlpS}cJ^Q0aon#IW`)D1)!h4125g1`I-k(jh;C?QTHj<3KW(YlUFtt>+=o z>n9=p^g>6rP;YTLixfA29?*2lY4)$aM*2_!(Nhyjd?+KvLcwPCY#ELH(y7!Xhi9Wc zlzREHDJ3MzN5UaZEeD`I95udxKOX}p>smibJtDKFd2h{1 z;IF`3$;+WSAil6Pe+!ly2l9_7><9S@PivoV;D?3a(Jqgm@Hv3aH_Z0kf6ifIDH28g z#;+jEgrBu4C9FPiO;!lBWhvNG9FNovypB)cqFq4wvm#4e#oV4w8u0z)1^&QXwzvSK z9=w>(UzZG9wl>{@%!rQwlH`LTyRXzRmZz0;t`MnKcy zDPr#GYsTkDFifCZIARRwdSwDRD;)=zdjMbZ#6+D0ozp}_UoJp^_#a?@IeRQc69(MX z?SoX;9A{yV;V4NetI+_`O99G>Rgi`iO_@=lG%Sxde!g4*#pnoq00a0(;?TmOmg@n#D` z#j98o_tf}hCKwWT?fCg(UMKE&@;&&Q^HanSu3qrQDc7_wS$etF?lu{GDm{XII%L-G zkBiUw^lvWZeaZiP%frau46yN@lhvG7`y&R2{)T@2p=Ogg{Qe##y?=v&Xi10PC*Xh1 zWi?suFb^xjw#B=;f-mwXW;zgGQQ}g=4zzSW2V~uif-k*Qp2^qdo1Defx0ld@;YshA z+;UYZ*PzAeCT{xk3$l5#L*}@%G3dtE$M=5mXvBu(L}~#bEhgtdWz@7rc2*;DI}6L2 zX;OKs?*A@RB#!kG{N&}#uM(P&8NdH`d6j6wFG3TgypX?%gYEeXMDhL?NT~RJgOZH@ zGTs=*&RE~U$XNf56EIvQr7=zng+A84!ZttiGbB-cjS!2*$B`d+lrGpUS>E`$EWs~_! zKig*$SziVAOE7}e_k3yC(0?36YKua{$~)WaX}n|<)Zxq9HLVdCRi z_yW6lQ%A+=hi6Ue#R4a0+XEwONND~@j&^5Kr#r()my)?P$nEvMbUo#WOf{xT z;SsXzE{6a8>#61FZX5jIF&i~W4PNW|oiFRruJ^g7{=tn!JQ+#Pei1#sGitbti5dxw z8^1KmQ(NjA7rL7cwt6PFYW(e#CG|XgR?yPn2}70rLaYNAoHs_j-)2&st)X$X;5YV1cZYDo39^u|-k^}KVRE&Q8oBY*Z2iZa zW%pWs$6-l{?|qL#WY>~(d<%_{o5TvdrhCcmMuBr-g`gNBW1TA*zDsAjNc+JqU;K($OIP4eVlu!a{dXxJSib#h{se`FXbpActTYQXDoH*DIGNQ~ zc*o~i0m7M`=;B%+lR?Xz7gqQO;GTcbB!S0`=bBVeeS(#$pC{;+qVY(y4?sKh+t*V6 z*za`tTXKI(Bo_0NUr>iY!UpRN?}5PplEZh;-@DJwD>AV&ItOhbr;Tzv6K02X9`|jI zJnf)|-G*hl$CoYGbWi{CNp+0pW(|wO{ov;)+f3VrEa=q`jtz+MeVc2>;!uFVVlQ;| z*1le9w!Ixh+ddzlI`4RHD#tRm@lTK&_>k8bwg3y@8jBh-Sr!TjYrrRE# zlJomPLVfBXm7?$bCY$9@FQP~B@70%CPw~!qsJC3p(5Onv!`lLTNm44ratxv*rUl+# z+qo-V8VSk%zMgEW^x`3Kri-tms2E8_^d8%mZSQP1R#AL+T94;BeIHrHo2%ezAR`(4 zq5S|{W$_~;;aGHPk7iQeXfW!4vEwH9>Wj#C!4qXYV>a#yW+kI+eBWgs>|0l_cT1 z_;|#yU|#>n;vC+Ox1Iwe|3B> zms=s;RH^^%4IRs=$t|AH;Ya;-2?pbJ0f&e-;i}TM>Ux1`kS{zLF3NFFOKj-(O+Eeq zW52kkeZ1O{?K@=$L~fB2;rZVvoUV>TTN?=(wEk}#$T&z-6#eGh!6&s9QI1DWuTatY z^opK|Q*Q5&o#fsLIfHjsco{=00eGe79;{e{_77p)_ z&_g7AkwLN3(nfrfPg;XcvQ@#s(Ty-Vk<#~1!h(pSTT2MgNR6I<5&tMw(K5SFPC{bw zngK2%DMP_7#Ejb_*AGG-)k36|RprGY*$SUSYRX4?m-k6#_z-TVaSmxAAL#|H6KzZm zC9M)J#UGu>kZ@O24+$=NxxJ;WiQ|w837;cWL&j=R;_gvs!;jkf98b*9GuBJYu*KSO zevwqpE~if6FjC`0Tws@IUCW}1HWx$+C40-kF2O#KcEtO*Ne?_fOAWFVQ_P;tW=M|$ zCbwWd(LS?PDG3AMnRFOS~*^R#h@9lNq= zn!f_N_Ju)vWO_&bwY}t9$pwDohSL9Kmt+22QK(tQD*mWqu9$71IS*#Ya_Q2NfJU)p z!@p63&H}o3ll9L@rW>rGrl!Ib&91Jb=Uira8=XuQKGm*-Y0%s{#PYOjv~<#WzJ5(!E1*ZMwZEk<5F zWQ2}uOa`MS9Qt?xij@SC5r39!UlRXk)Rg{-nk>IJO&UW6N5H0p2m#w1?pITN`KcAOCWzVEImKW53v zHuv)5eAE-Z^8DTQvsrgu{awxhULsDcK@>7dnF*6poF+Hndjza91kyM}uDn-9(&!Et zDnTek2v~M9+%niTL6mcZdk9$n5J~Zox#S7F5fi$j@-Uz*7zJ9AEyS5Vh;>0Io`_iJ zuSm6i1oj}?cO$K6sp6w?<>4Ah)BX;`c+$F4#gCX#4w2P^5;7GWr{)ppgOTIQo|wzt z&-2Kzz=w!w!-uHof-|1x=c-?oZyq}ScAt7FzW3#L;t~5I+q<+~o^JoMI3Bp;nF5%* zH7fwGq^He4DJLe<$|YUkz~=d8U504>9xmw5Ua^BF5|>|!=qSV# zXKm=isNYnD(IhhzsW}#nwE872$h;rvLSub=A?hM}t5@zKvCF!rSuvY?TO&%QfOan5 zMsZRwE<2J1%k6XwcKq!EprK?(h~bd*jCK?-wuyW9u@^Arsa^$&6XCO<4$x-S{6=u?HX-+``=D8gMXt);k8)O)oq4phJ9yO%@7A@-8v&QUHlIGno z{PMyd0Lo*|!ZqXhnu|M84B!fe&GwiK28bC=v}mG*^PkW5=xhXtwKSuYerHv6lc`w> zneDmZ5~18kebQ)+gxl?& z!8Q2&j@0}CnMUH1pxMK{W2~D1exNjIRALg8jCBY0PX`*=8hv|FVjNArx%K_SU5Qpz ztXjRTnCtRMiI-^DC5QYi%AsxC`ZE(8sC%OO=)q?y$rI`5PRF??om?4=@(GtvCO9=u z;Q`&{)Z4qx&SKU^eIR&zpz(?x$weyJPrl|&sXpoH9DOCFY{gK7Jv^Ipogq?h)?_Uu zSH*Z}%11O$PZ-Y&4FpUyMUdu;^r)dCQ}>3m{bSeOR|+3L1A|b*-&=BcA_2pl57790 z!M_ZW@`+?ShJrqiB#U+-AZ-E-|6f&qnF0-%`851vBdzk2*uOJUsHP*~>A{aa3!^g{ z;rGm&VzI~BG%D$q-R^7NNPWIdH#3EObA-|rvQ-1t9NRC7=EtB#kF$>54Ac{{4HFn7 zO4R`B%!gy@B>FPES^aW*(j0E4YAcnVu5GDKQt`+)QR*W-5~vV(4{lO9RJ{M#fbr$E z{y5e@6|}Wo8n#}=;^LVTVS7wGw7J3W$j$0{lP)3Bi`J!uznN%rUzhZ7G$i1&-lMxS zR4w8!48~pT)i#R-4l)1c)6aNvyDJLM&e_XB@fV{heVYPA>rz#)g@kJ}nJJV~N0d%G z1$k4AqecwrpKQI_`3dnfh^wQEuaSxVF@>RvE_W!qpfzgzUAjD3_jlauxpdfMX5uy{ z)O;Go6SiUvEE;`0|3#cFlY0`4#M}$xc?U3m9P&AONHskGaTF^;HOqCDfc?>zNiVd& zQsj$J^ZiLsqQ;)t(n22+mIv2~rqWwdur2JN8PcjU8S)%H{dMQ6d3mydXB$@CbrA*r zSF8GBo%!d}()`th??na>u)!vIr%HEoL{}OC-fUP8yoaftwkm=T0<}CFp+;_lFx?x8 zQ;?1(FYtILaTCDqpkM_0mO8)NpM2etw9)urbj0}7W-dq9&pST3?q3^plf1fG-I!Bu z0bV%8a&7oCv+dnaRcmI07j?ofdZ{MX_r8M&|6PMZ_yNJifZWYS2?WvXw8=cmTi|?g zMV_Ez#S2X8EiAvY9kjI{v~?7;br!UB6|{91wDlOY1r6#(261_eyks;P~mkM@UJ zf0C^_srF*JFmV9OK@>$#05U7~<|om01_#X*2vuYUv8cJXXA_uN-zL ze(lOF=IVFSw^&JMu2LVsxB;i-;9*^jb-}Es@iUnW__m34`pp&9kqYW6wkmSUzxpt1 zm>&1d%qqi)Vo3O-XtZk+(+{%AE*6&@SWxFcTIv}D$1fB|zoV2y@Z}@FCUS93-Y}8O zkg{dN13Z6-8;xYdHI?+;X3+IvB7ughhG^&hIGm39)Yl;0G2Pto9GqVF?XHkSL&`({ z9_9POw2;M?p=Tf36?bGmxJ-vtQo+_QBDa-n5J4wFu_buJi=;T zMLnt~P)lG5oDzV-KM&C7JI0&)>L$R=w*dhYD-fR41RlnpvIcE}+>&tv>)b zq*#N1e38zdK~Psn0Dx7!+XXx~d!If7Mf$=wtdko2bdnnz{#JpYo6>55gTmAI&6Hk4 z)guF}w;)wX)e}q6bh||VjjqD|8-~U4T#zjhh|eK>FA98O+I8Hiay_5in4qgJ`nNn6 zU=@YRIgT!d$EtLRdJkNmRRJblevQffO$QPCzbr%dpHGqgF7runAL@*!(Csm#(4loP zKC%~t?@8sClQL|!?jCaeek?P8>dxhO$nI{VMQwNAt#%Y7!tWaw7)Tg)Z`Wk8V#d zI}+MI(%TkMZZ0&XN}k)Nc9^c|t{2u2vBh!0vJ@(8{RggFJ70EVG9lfQfv z0{%oVBZ4R)7gA6?N4L2VvPCdT6v9@C&lW!yQk+u{BZ4^hJUbG?heHG8Ah#S-0)*ZG z7kLD9w2cP%%e-%9L5>%=!Z#{M=-fE6B^WzY zY-4BKQZOHqCuj*`ij}-m7`IKtGT%SI>kshSJC$+UOf2gC1H88QuT75MLXQ7HjxUma zFc_MO7@Ub1oO^Uiv~4H${N6ibLXx1Wq+T<$`ahA=f1=C@F@kea4>N{C5)U(TGCKT_ zYQ@VxJN`*upoTvf6mw6Csdo)-q((5|f!jB^0U3ahhkxIqZp;ZONfqER7brewSJhf4 zkQ{sA^Fx&H%KYVLbm0K22IpCBv3y!S{nTMVtg7VX%iovBd;cF*h$kfHO{kRv)3#@_ z0gdgC9s7Af(hs%4pJk*68b5VBTcqL7jjI!ESZUy)V*-R|7V(MusBD^^-dVszI}IwG*K#o_s|=S96Q=Ke9V zKVY3SfIzs)X__osh0!HCsxdUfETodu>W;?k-^IuAS%{~#V>oQ_7~9vC8~7~?wT*=W zn8|8WfaDyYPQ7{AxafZN4)`{(?gqQa&r-@%?Baj=r7rRep*3qJ)jQk8w(rbU2#yYs z@9BDX(v^cv-B>q^oxN!NO1XG)n*@)yHQUKZtXmLqm(^v_njE5%l6)XurFWf@sInG| zMBCcyPJD^E&1M0JVv@7fsXQ{}t8b%QFlZ8A_xrj3&BqsTWwm*F2l3nkz2^70zH43f zHQxh~%z+}}mG`aTyRmyYg%lT!syM)>0?_pm6@B`^qmHmq2g@CM4H#C0LCw3yfPom- z5g_J7BMt!bvOh(9nQ(ngG~oPqQdLr~VfFrR_@wUwz$tPnEYz)kmSZ)C11t6#j}7yG zAboVR=zKzy!J(90a-oMW=Z)pVg~o{`GL8D_k;6XBBzG#Pnr!_AP3l>b&dm71ek$oH z6QJmM- zteD=7r&8WnrdCOf;i&l?=$+5V_>8rU_iL1vWEg8N_yxYw%a#FWJkQ`1zN)*;f6LWC z9c^T8w|>*c#e1^a$9fdi0oK=Md>~0E318(d;PEhYz=PMpT6xs23Xy81`F8$YU}8Jz zW>9g22fzFw>E^+K%#KL3YkPFB#pRsi_--`j3@MAsL;EN&<_K&@f##N)Q91s~u)y&$ zLVQ}tyTABfhWL&_m8n&CCmSId(ZUkO7gpIix6&j>bZieFk&3r-M}5j!d#wo_h@1i< z=B19uBid!E+CTjKW!7ohV3T>HT(<@&my~lrjP~!*1JDn0Pn4$@>h*W7)Y?pAz!c$$ zPyMZ#IKpwtWA2CZTA1#eJrMk6e>h2zfX@)+(Y)BJ6Xzq-`0N`8(_d6p?L5V`LVEMl z3w4V;3kDF9FMs4>j7y7HePlmB!l7+2M4J@3Y^EL;)ELG=FKW$19mfN>@xbSf+Ak z9NVCrWv6gs$><~q?>+sKM!Sz36Sizh5WBppKwIBD|IWrW_BySzbI{`6#*)|1FS6(K zi~zM>*y^pD^w`EAUs@;5XI{T3c-P6qnx{&*2%C+257+okMBxlqj0mU38TITZ(r-xv zT<2cgHtzCVHtw2^d{L(3pGXbM;6i;W8+SaTFfNVZotKNP#qKAWJIbxT&nF2tbpHxH z`O6k)LrJ?zG;+d@GEc6m5kDOY<@dq=Nd`{p4C(VLelbZH>9Dna6FvaRVTV0FQiyIW zBYi!AntR0FfIcBZFI1ry96tA=Rpl^qVZnINn<7p_ZJY?Q*c7EB+L{5^FUjYKClnUd zPqZ~7CkW@A?xZIOfwf2{2((YMCkR~+X?`@_Tto|q)N@0HxLf)0&P=qAV@arv^Q zEFMnTqsiZP2_N}n6Ws@|bGB;WM;{e9{3<9w?LM1sq}#2iLP6@IF*CKy*z5Q)hYmu^ z9SyWqiX1|^uYHqQ^0B^nWTqG&euK`weMnVJ?Rq9~n|RDhMF`fd*^)P`5<}q$a$S2H zM6gjljmq1rys?a6P~QgXc6RXs(1%**p|9bqBP7;sL;VCsfa_WDtk_b+;ROWIRU}Oi zPE?j{&|R`-cQ8>ObI~KcMfjkiy@bx9W`k*e`7seSk)E8Ge6EaAU*cbp?JF^eu~bWK zHSI*1a*k@$g^$hDgyV3AYU=lh51{arg9@%dUu3O^zYu}q32uX@wHGJ4abQ@Vzj){+ zR;LU&{BNH>8yK>i@n#X!IzC$4-rQg*l|ErC;ZeHf>vVPs!D#vxz+g`G=tpk2+bMay z%mf=}u63oLdmc9;ys3K#yeLzur232JeH0kDwc7d6VvQL%^+nxhyfWowi&=Z@zi-SR zZdsUOpqZKxV5ObK)tAeOA?3BF$o@)UrH&QFPxB^(rElZN@5FNJ9QcYQRLpU%w)yR~ ztrcGH%^YxY=;0>gY~LmEv;7@N`Xrk@S?t&(_in*y7dS>1OiJ*m*Ig_u!W{H+;>l}c z;Ty9jJgT9xmw@5C!mK&^bw!HuR3jh+#)}co_hh#YSf-015@1dmsu*C)U3E113{gu~w zktVbn>~%-4m#zCa@_p~Ro>9$Sw1b0ui@FrCZRBX+Sa?BeSp{1sEWsHh1RCQL&Yx)?Fu#O8dj^BRY!l{a<&cr#FAY)IB;zx{mfGXaUb|#2Q)9G zyx!O`S#M5s=6+t=cv{gmrLw_UJh;Vv`!ekK)$$i)Vme)IKiqlYBYF8GFZaUqeCVfU ze$*I8YRj^pPh3JE!#fNYX*A29oO*)jSS~V)XatLSN^9RINC?UDmDWP5>RGE~7sm*{ z&DQ=l+Z#z^K0LYmA-|D<`?suuYE40iiB;FYY37|$j)Yws=f70i(R^&R=U?IQ2s;K{0t}$0`xI2LB=2B61#zhpF_t7 zdq*!NKr)|)2;jJ8RK1Zp1z1C?$4BSgDY}jT%pI@^#~AQn`C@L4d}aRC^vb;M6>No4 zZ0$WP=QV6pWf%ObAC!&{oGbx~2iE%sWZt&ud8k+B+=j1#*s1Zwqq`8muN~06J-YG2 zdQkxWS^%l=(K^79XtwJFki?P1AA3h93l+{C1fF7CZK=Z@+hB#}FP?9MI_msW1wf^f zo{aV)Tw9YKN$q8~nrQqJy}H(mE?-XaZ6!rQbQm&l3A;^uBO9oM=WhM$IXZ;22`>|Q z+x~kD>_qMSPfAa^>ua(a*Mebj~;|%aITX5K&1_=hXRpsB5Jk5Li+pC7#;oO>A zqJM8Nnr80XCZh*!&a2@YK=hq}$!|Dw17&AECucWRpHPP={TjkR#@hQ!glpX_6Z&u+ zKyzxJ%!>R)9vB$jV**kKY>&S1Q#Yrirlf7)JLZujagn3GNOQ+1QrNY#Pf#W)W z^`D2w?PJ#!5IG1~@iV}lHy<|dKam58n+p%QcH%FMG(`X1L+V}7^~{x&&xZ!JH}4PH zVt{&2lrAxVQ$gDd;7sCA-U0#>fM$;mx=`>F1oE{}^r}?orBio+pwz~A)n&ze?Yvd0 z;f&5FzoqY$xjpl;tD~64a_L#pZAZx$0B7ZmHlBT(0@uP<$I4mH_7R8IpqNEZspjF8 z-R)-~iOJX+g(G6hl3t-Y?3)*Yy$FOXAAHgDUQtu6&h~HC3=hO^IJwC9k~sAKjM5K! zL6?8+W@IF3=>27-ACiJDl@YgRk$PECLp5>GbE)wqCFEz2f~GKorU-+kD1)XL1(5{=1%obFtWLfbI_v^|WMJ#z2T#~;8_GW*_9i^- za}{p$VBmdTqj{dv>ST@KEN}{Hd^K3PsOAJI_S%vC={0lLT$wnF>QL%`ljY^$k9OAZ zjkZDKf;unz>IIwA^rF(B*(u%t0!3SCFrIC+zi^(l!Jf4ffL#PQ+B9pdjFFiBc|r{~ z3Apz-Ii(Anvtd2@BRX_!eE6Q(_=~xO?Ksx>j7NoJgGZ{O15WGepC5^x4Jd`Q&7`Le zzq85e)x|ID+ZUFe`2O$#p=2^tA)+jr z4pTNdI*oF?CkwqNoBT|9&{Qp40ZA53mnjRM~iP40>`*dU7m!a%_5X9C~tGdU8B^a{OH)tPq59 zdPE%=Es8}>d6pCI!gc{aPKPGZdOhH%&daC%+CKqcfA?TcP#A~xa>2SQb@yQir0@|? z)Ryj~61E`^&fssrl^ZlRNdHCYoD6tCBL{6rhkg~_U2JEk{qEE+H$<BSrfgfx#AJ#R#&V(B`{Y6`- z(KRz2(O5UL%DlH?5npdudHb3t-nt&sscl;Obc|rv**+kn=4k!1Gle=lJ~|`zvu(0q zlQoR8&nDn8N*&aJBmO-8x#_f^!fB$udi5X;Z29F-%6W7bw}#rZeExh2K`L!)k%Lz%m*gRy}v172C7c zf_YPruhzTbAJ)}T-g%7_NoyssLHl-(PSU*dyk&Q$T7Nc2@~wzQdDKah&?sL$ax)Qu#%GSKm_=Erl3es?Z4hMt(0sQ|YGB@g<$b&4X z1vi;ckrvEJb2UvCuQ;a3) z&Z6W)e2du_c+Lum8SiNb;Vb-D2gT*y_ZM);9f%{{WC_qMg;w(@%^u-ME(4`;qwfxZ zP$(Go7AB{OqlZJo7$TEy&44efrhv|%d9B7955oO=nzh`&uPWqS9Dah!u@<+3d|zi|zF}MAp0A*Jq%f%Y3(< z#(g}dmrDX3uW!i(JOpkp5~xB%(|z|0atlhoCs6g#ghc;~7Rnc1pl&Ig$!v!VW{ezz z$T4k-_9*zTZQqnR0uX9IU)dTyn;F2-1gI!j+UQ@8W{t{n8SC~~7*bgam>-uK_17uG zJUDydQ8fHl3~4C_{BnTe&?h>$Rwj_htMd8atP;=#@L(%DT4FWUU&n5zz&{o3A<eU%+qPPq?YosX~=*Wx@N-~_1hSo2Xo1a;AV=oNOjS2ieI2XaKZmSc@^ z2cR7Ufj2(1N}qPkj7D!)@)7v-*ZxrJwzkO2Bc5dJ-5MKouDaQvY@d71tRSN|qg40& z(PnB!LvJ3Qn~~T^E|)0>POt)+;72$LZ_@yhW6*;-%zV?GMg=NmHV}y=`Lp=Wd+Fm= zSS8Bfeo3r_8?Ewbns|b$s%@OrypQ;ySJ`u%g#-WHm+SjW$~7e2K0kC?>A*-LgT$6c zfIOR_8sQ#ct`iu|TQYsT1Nie{so)1tXax}U?Y*a~q}?~8C=@)^^OH*_qklz$%KxHK zEdM;QD49TQ&)RA<#8j8^A5VHu226XKHBHE_0t{76bpfxl4M#is+9Mu*Elye9!bv*5MIno#xC)W~X*CPC@M&r)l;G+`*3s{V7&;TM*j7omrj zUak+KG~9_|hn2+vBs0hUV@)JC6T@=9c0a%BRYVJo#Y0y|Kp&Tm7et{KM3bLk3Yy{~ zR-Q$Wz0qP)f4`b0NYsfjqZc&wA!y1fXv!&Q$}?!nKWHj6XzEwcR8r7X7O`?7l5D*W zllthK$0|~;%5La%x_Ab#3*Y=0w~0zlgGlCq6j|t`c4FbKBDMy}!{#$os)d((RUDJPIhz^0NzI4+1pE#_4#g__9T+p%!`jDb8?@LBSnFJZTsM-Y02gR(LFPxh;hSi{Pz#XfCOZZ_ z>j3&D07@u1*J|VC`Sqx-_k0U1N-T*d*;}A%$1^oK?Nf;K@6RIOeF}VM zuqfEO?nH}Nkg8#@x5k4 z>(1;WTSL$-6h|+4o-4HO9lZ5`<;G>^Sf&}IPD2|=-!dU zF;FUpj4vT<2+O?MNX-x9OOr5QLGIKvt5Ps+>@fdOMNgfb@}pVd!kod{;SwgB+>-eK z`aO}Cc~SVdiFpiEdXJ7GVNn}GMUc999{0DT%QcXPzE<3 z0CBdZi={%>1lgg~DCY=(3_yedJOx0L7BT0Eex0R;cXYeFmi|SHA|8Lz z(gh_(!t<5`jt<7={EiU*nGS)9wE!UIC-52xer(zdIq-?s@VK;@0&u-IEWa4SkZ0&P z0{U+2JHL+H|2YP!&U;MW(1qQB;qPqGctz&d=*z{^vl)*8#MN0B^hidvD*tz|>rp6k zt|OKEXx_%@+7GbS#s2PG_?E^}F`TZ!p3WzE9(I@Dtai`Z#8*mcqN@w~qzf5uiZ%=! zi!sl2%3L}}TkQ|)bWW@SS!HIW^lLmGBGjKp>67RRLK>9nzFS?YsLed58YN6b&>F9j^i z9_F1^qgdHP9u*oZEu%!na_6*}+g4Id7IR{zJylJo9ck;Q9i@Ox^q*Yo=*PG4Ej89s zm~Qnl?SltqyIps46WS*A@EaFXC%=EoxK0D5wFZwpysMG<+z_KzYc{nc^O?LnXPAc9 zp>Kj6|B*%(IU?lyu_e4g3Uw(^YQw~D!@_UF#&5&HZ;R1+btD;CgoT(-#hy?Fr=M<1 zWIWQ5{$~K!LCp7KPk4hG>Vg_tg&KN}8u|t;)CDcH3N7>;4Vef5Ss+hI4H17Vn2`@d zningwUl5_^9UCDLBC~=!(Y&$CS=E(tlB?h>+;R=lV3`%$-%2f6H+=1qM zA9K6rKvmCgay>uHUghExnl0B^f~@{-&jl3p(Wwj5`oNPB&pK1?YLCiJ*Y0Qkx)Y6N zuTi$uj%!L}{}(G$@?%C?w~t3GgHq^udd^NWK=*P$u4Ir$b;N&sul^=^#nL^NK53m`2uXOz5+_cE!?k!2*MguF;T_?vf&luD5g~yZ@?_ zZhyLDNz-Ve`q_rFiv!rAzPzvA^(RweN`}~VZl)N{GShJ-nCW=N{%4uvjmR8Fk zWj@}GBI?IKq3;Flpa$_QpTq4g3okN$FjW!`lsdn~IfK@*GX$2~8%>ck@mmdP6!&z4kd9UrwuA+ zWDZ81qz`UBj0h|cz3ND8Zq-RIk12Mep~-sh00uLtS~IZ~s_?cGZ9|I;MFnzP|Nt%4j1*@muOm}da%HK>%z2=Ac zwc7OzDNOe^-xi>Kt>WWeX_&j+I`%0Uh_%X~r3c=lW@{46YdYHa+hUnMC0(+EAwWkZDFg-Tg58|3cqBKsc^dJx$OS&R5% zMHuNtUek*((TgzCi?Gm(u+odL(TlLti*V43aMFu#$?r4-ZM6h#bp&m76aOT774lk_ zX#yJ^7YAJf2R#M{eF6s^7Z+Uv7d-|SeF7I97Y|(n4?RXRUPOu>A3+1P37mhav<=AO!zn zgZ+eJ6(OVdBct!VuOAey1qBz`Ej)1D^Td&iQ({<7JH^M5(&LkjlVbQFA_Z$pF*2c| zzgMW2m$iYv8X{8pbmf)!sX4uHh7e&$q48@5(F`G%As9+sp>ZL`GnrDXO|j^};_R6A=r<1{PcqrSXFT1D3uD5!%wyqNOIrHo55wJQt(DqR-L`)?3x)zJQo%Sa- zfRA${dl}25nf$3Ml2r@i(Lw(X2>tbfl=}-%`AnR1D5Z)5z2JEQB!|3o=YWf|%^FbN zgWECR-|g7zam5WmBgyr*6KiG5`l^KzmMND(=_!}ac*ee-*$zinE365jt!PYrA^gmJ z;yiWO;L9@RT9~Km&NQ|vibI?m`+n$v4SCj%_q*mkA9Wv#UM+7*QurRHNd^7I$6Ol4E zZd}7B3}yDW37(%wm6>Uo?fcP7<~4b)EotDyw6U}YeXo98ZLiKeLJ*pvOZs2^Rg6hoSzA zcT(Ga+1?4UZ_l^=Cc)0=alH-=?Qh>2-YCLNF685vq1r%rP9Tx;#F?rlXpU zRJyx(>{^|;f!@>YJ?Dza7d=uijzWl(VUqE?JoxNg$-|ccFL9y!?*m=8!tXc6So?Vn z!cmwBAhwU&AQwjdXM4ulRD7S0O7Jwaj1{c-$`T>NM_un#AWZze)F3JGUv!1h+d~d* zH{~;LUqqC7rMi~&;kmhhE21}neVQ-lY*I%{h=JF^jDaH?)jyeFNyFP90`?cm_#RTm z;9SOQ2mxcj&CgSH)`V}=@$G#PbJ?S|sAGEqAevFzY>JVS*oueci?6%&=C0$1PQ+JV z#ta`mz`*{r-!cwugqda}EKG*^QqH2&E3N^@U5jDv3b=rCJfG`Uz>?n{+ z{!%~+yfaerl6KihtlJc|MBhx8+mAm1F9PItu(oI}1gooedD{7!Kn3+y+kU@dWWfDPAaN zp@-yZ5ReV;Iz028C!ia-b&_@t{^9*=;b(qetmUIV>TCehmn{S}NbVlr`ex7PHmOub zH>8s1&pv7p)|<5R3U4(C+9iDI2k;qc5lB zQRqAP?Unr+uG%H;Mfy&jHP4t-pJgUAuSWm~%)V#p2zduLh*=7=;2EW~1rAO6@bECH|06W~PtQK%$0vu-^<9SyLb$r}&LIz&Cuw`)_x6JW~DzhIZQ5OlVuaTmAnhof_@K)=TspDN4#xHhU)=!ie~(`B#wXTKS`f?WklI?l_M&a<1|?+NN=tY~r! z!7SsbFGJ5q9F7btzdMW?wvemX1dh^lE?yS^z*xvC82SElQ3daj>wls-Xb z1vC**0A!SZ@scRTT^L}l3BAag(so1`L07l%Y4I!MfECUOk!W<7p6CQ^ww6_HPZ4#E zcdkq;a#ygFL+%H6svWX0B?k})0|iY`5D5nb!{rO?UTzmZv~{wUS90nT!t5jb7^kf} zWZhE2WBeF2ZQEqrQ+AzppaOHttI)G#(_ERmUz`)P9own)cyNM?m>;huTC*@7sV9Vg zT>*bOdP6uYMgi7sUdQ-Lsae@4TnGhlyYK)tkm~d_Jp|fkv$nh7r*7>(ou5{tPFmr0 zv@&`n*O1;tsF?ZVZkj4UE&_%E9kp@^-$3cF%->v~!w*K7AZw?>W*15@pi~u(LDR~4 z&(NV&r3w!#3F_66s^h$7ml);c8OEi?zB~im?y(}>d%wrBOthQnzB_4dpJ;2v(OF)_;luGxsbBi9g}k|8)v$~X9s#TDc-vl{sHp`n%pIIKSzlzYZEnxt6AG322A zo2P0$jYMI^C~lpSI4nM3WE;I{25mecdTsF>(Dp-H-2!w{^wtxAXNN!l*z~S|PA*zo zl?jyr$qFz1{?SI9Z2Q=OcJUfJ(!mpv4lvhz})wWNka>yqo z*U+^pm&=Q$CU{HK<%K@MnwsDoK%miGFJ<*q$HUEC4P1-;#yW%q76VC2j1P(cyWYjc zJrpSp<;4{gvIhhV2{M6AmG%oJ1CFmvp00cijE3F~ev5Z+=tV{&>uZVq6(utL`;T{< zK-J$L*RjVW)f-~39^Jp#5)655N%47;H%zS86}gf|Fp?ghzjKO}^RL?`*x03rKz;rV_w?0I5!gi8F+-4vcH*5~LEx5>$LqwE zY)j1*p?1dYg{~Xz5j&=$U8O%w%PgBu?>U4O*#+Zf!#pachzCDV)qdE_G82k}o^uMX zZ;%XrStasFiaQq6ko@)~tKru6=gcwCplRAtX50SLI?w8jVeeZB>v{DzMtmOf?Gl{c zjw&B-t!`qzwI&*mO}CB)vks10ylAcFM~;2iOjthh5hn$ooBiAGv3bUMfzj;?`&hr_ zGRiWjHlNRw-w(QTHI<;%zMBy0(=@_nzX8d*-*bt6hPjOsQqYe6yVJHCRGLFo;x55C zc=x#7phA|&NzbhD;6QxLIyGO6K)c$D+1XI5Id4S&-fLruTeL|rTs3}jB9qkGiKxV% zgl-O5$&w?L_Eo4Fre5=Fm(aJA&Cw?Mx*?Y{@%9Az5dp>mO2RSJiH4Rdu}yF-h6fstU4|uW6qGbIHBObew%7?l_D4TB|CzxyazJL5RsU z6$lK0K$nA3O{Q6hiI-G_$t-B`fD~ck)r{Z7Hc?=pE>viszN1xj18$GU3Ji$A8lpsJ zD%18R)>O;&dBYL&WNRKeK|iQ*nhw4Y%v3TTmFxSYuAT1M9U1fmNPD1Hnbm zIl)2$vk~wJT;*%Ty5mf|5<$yp@0m(=&<|ozVVqT_Myv-c@B}Tpz=8&l5+B?a#gyyY zfEDX7IL^A^G&w$d;J@Q@oE=e|sa$n0*N1`?aDslGfEE1hFEEfW1`p5zwDSjvO+XLk zL6-viL1HVA*b_YR3Z`=Xhz0GcL9jp_JXadL;E}FrRpAH}8pLjCRe6Jq&LGosaQl=Y zSMzZ$(&SqAf&UEMarP1_RHQJIEy9l?iN^rzM%J-2DKBJsl{|25uU1*CR08VY;z!MhDNE|!Xr}3RSk6W zdO@p3wD=2&<{)>To41;1|zQo!>;K+o~^9Q3j5#f8-F9ps~8oJL;mI>}RAa(g`B zTPN~m5*VlR4s`j5_Mt)S6q2E50EY>e@F@-hPfUR8z|01GB6pF)^uCjXdc@#x{ryuE z)!9u`mj+Z&cYSbDojI_Jm+*pMN|MmES+Y}zqT0Ya0n5Ut3r=A@xjQkl{U8TRQ%%A0 zdD`vVK#_&{2z-#WGKm{!Km9QBfXkLK1b3}4i3EZOfSg^aFTC*66qi5T7VaX~YngpC z`^fPf8AK2J&Bfh{V?ocI8_V7~_;&YCo;imXNVLac9-l9M62$cIsUag4@ESWC|2(-) zLLByTPV>3?D>0_V-lVvgt2g&Ui?X91X~QtRJ4?}1R3F*&w?qvpD$R#|auJxkr4ZR# zG34Opte*q!39#?c=Pifl(XEFkum-;75k|^YK|kv-(vU<5G1Di;tILVr?~zX7PV%$o zW4f`puqilTMHP$g;ZeRLh$=QtH+i47hIp0Iy7wpOwf-6BlSS5g)dMckC{Sg00z2*O z+ed->2%Kx>SanIxOFc*_zT|F!PdE*zoLs{vm}?HVZ@WqBDiwhwi)brGHahscIuOs$ z`*9G@OeVL71jdcrzzu3a6Vh2G)T7 zAl7+rC7Q0kpNZc*ri|U=;J(>mT!5Zal?C*8y?)3(U50SaGaaaRIrV?6HQj`qC4BNf z^5h(B^YU&d-dvLvH;b;ftIVk#Fe0!obK;$10$~O_TBUhwd7rgClbDx?Z=jd1Mh~Zp zR@C3N0S@H(YQ7;(zh1ubV1B2mOO8<8q4Wss&Q*dsz03%2Z6kukzueUG?wxi4U*jl% zNBWU4nhnvkCuaRX9<_FYMVprq_QdFsdt}*k<=xiO;h)f|w5Qt-bR4W(Kfgr_D@6<2 zDHTf+5Tz9mrxlQ(6_BF!M5OgZru9Un^+c!j#H96nM(g>Us*wIKGKQIaM7grCwx>ox z(n(?aoM^a4dn}H&XxXI7?Qc4^T|%*a#qKFq c?65YLG({~Ia|B8@OLH+-Ci*Wvo z;wS>%zb0WsJhhMmcWdHcOY&e#YC_1+3zNfPwQ{xis$a%6E1SSVcv}X4WBanFnakV1 zhf*av2fT3ZG%PSY;vRb0&g8p2s5(9RXcFim`QC9Tj}-DrL|I~3>$2QF$^KQEk^D$` z_@omrX%Lnm+{h93fE5m>M)bDqD8| zWZgq$PqYE`QZ1~1F*ZImz+Ok`B_t2{(~w`pWhahIM6D~gwyOIw1X9v4Ue7{wl$BEH zbcR-Y^4wKkU)5^`+qy!G%Y;+yt^NeS@BS7$w=8d$0k|Cn&S5^r)?G4b2@H48mPbgn zMqWnRVbg7^-IIkEDvWyuxMl)=VW14ig`99pf2#S`~<4uYjet!)cEMiyARv0CjJXa zJBoR-x!XR0@V!%0*&=>Sp%uFMb$b36c(77Fnsv2*5B2tZSUCj9JDyGr1^rz+CjG>} zS5B`456CPXZlj&lNFA3Qe`5V{pa1Ew747%L#q%G>4bdjY*5raBGo8O_Xa>-LJNMf` z3mCu9R$t2BtRrLmrK^(Vld)W~?#Q&J7U8`4+;<7{c|Tkgr6fH)#cSUOZq-m@^ZVlm z^bhPH4B_EVBG25%(`1>161#7+IecNPt{pCWa88iZ%nQ#&y&PT+ZYDHZ%Yl395KbP2rcra5j7IR9hQ#31y2p@LvFw_OSV{yC{Hb zN`U#C<_6+8)c*`_f&L_=2hROz1Ez9kE16;d(y+kvk#;a>!Dc(>8NghYMnr{~N#Si< z6Ywa!q*+a|0OX6j>o$B#>RUAQR64~kHE1+4xDsjU^jbxNQ1K^nZ}6g0YfQIjnVZ#Q zM`p`Q_mxa1p5NT2%`|u}DJQG6EvgC2e6-6#Fs@}W+GWYj@JA>^FYz^SSKYfj`o2?m zE@(CeNa0J7T?539fR5$SHd$cx`%~SB3OfhOSt?5F^@CRtt(T_`A*jX$C;w zwYRh1e|s<$xB+}ys_Fo$^m^~&xu2j3n-pEW;2#8byDg-N{kQYJz0__is?fk{AaSk` zd1}ru=zCZWRBvX-vzQS!3@eO$QmuCOAv9TXPPcQ0x`#CN72+GHXjnK6RY=t2Br;Uz zpoba#kT+}bj%~F(p1TuwvemM2-r9CrJlU;nkci1zXiz7SPe_v>7hoK(F5SMg7Hq{L zyfMI@?oD(F@a`hO+kjE|QVk_0<0Qmw_9G(3nN7{Qpc;~g%prskf11zNM<}pEDDYxQ zBt{A|KnhDOgd{f-(>9XQHj>jeQqne3(>Bu5Hqz5JGSW7_q-|uOZDgZu$e?0jpFr3_bm&XlG^>Hah!Tn?-K03qF<*tavylQw`*QR;Ba#SbVp*5($9`3u& z1klt?40BHUm)scY!z}1@({3S6Zzlisiq8*_U6ZC=L$co5Q-?K?PVdumUBpDMzoVDv zufOR01zGT;+Q?nxmcYO`?bkc50TbyxX|#iRbH*8 z&U!%33iK-P^&}*y0+5W?Iz=iWRv4MTNft}V+-Sa99Rt`Vf6#N8l*3P(&R4Hu-f(v* zsb4@UmVW}~FPT39UV)$Y8nDGWCvE`NE$T;L>$ME<7!9vGtptS4Kbn)@-5pq0j+U6% zw`-`F>Iq#{wT>|&tf19aog2s|m)C>A%G*2pfkOaW_%2)u>`|Wp|D(U@#fRWPfgJlH z0P&ugof^BqPEyqMc0GOI?&Q5S1LPtM?B^jht9Lnqq@NzR4b!^1*Vuuy5t~Tmzn||Nf9MhmsM`nB2DeriwJNf&ix~o-^dbG{bC=@-9LZM*qi^BZDntf-S!WTP6ovrUzSQ2V3ShCPgM7g%lu#)FXxT zAcg!!3Ry=AIoDxC!^TilOCaT=CFQ3j6`&;*T%)r2!`S?5>%(upHrfj(FPNb)GpDNI z*P2Uh*zGb#%FCzU>EYmjzH`W%AepOj?9lidV3>&ab^HV5^*T)0Vh{iaP1(^7 zd67~9Qcq)Df3u`$t*klb**Jc*HaRpmU8j%;TJ2g)Hj8={iolfZIdjpc68bKs4?KLS zhjA%$(J$~Lc0a+AK*+!igoONWJ(e(X;$P~))2Sv#Fut4JGFzL-IbbRUYc(w0Z%IlA$dGloGyp&(5_! z>z&OHkI(clPReY=WqU3bXJ4h#K0{lsjYl_V*OwnCN{t&8e6A*&m`?p=!~!|Iwy~Sq zR<^);_wej>gDR1KvA%L~F&AdC6PA7_mrqs_omu?rb!4=ZUL%|oSs^wQYpilY^O<}d zUzzCF>+-y8;o~NHSmPe5V(#UH*}JJTyIQOw6P)*-GW)< zyb-V0?d7dg_2}@X=f?N&}0 z12R{M=wT%tmlKNMr1T5GN#I# zVK3s%HUJ|z#g8tE-sa;WqW24ukpT4}B!w*;jGZO}D8##1p(3?Zj{xDWx9f_F6B$`i zC&?znaSCVYxI|s%6Me7GTa*2$Gm^0{NUz)KlCR~tsw4c8hOdQrBND~_Xc%BF=$HBY zWkw?0<(aXWdfv$y_TbMvvubRBgKwrt->w$9Cm0Q|wyT-7kAsN^w-}dhjR59sU`zO1 zYDEHG#d-S?mWNmWsm<9ttY@0pND3eEoG2QhNdXV>wiScc!Y94$nPdLLJ(NpaHEpx$ zIYLVepc8+GtAvDkzACi?Y=SQtPpf>7W&WDVK8sJx@RptZjI{>#jt8;6A;m`eut$m2 z+d2Z-M)Z4lnFfA{gxs)~6y4k{W<{4Yh8H{C^nbmLVe-Ofaz73*{kZo58O8~$!~S8P~xx5W^ek*u%oOA4a|hQ`_OtwTSjNFjZ~OQR0CF?(N2Ko@1E z{1wOR7gb-*RAvnt%dtVr_aT+rDgCM9=`5dR~c``x?@i`_)U* z%X+UKLf(JfTRVAHJUi3(9*&u$mll>hJFW^JU%rjvg@}5S^BTt7w^|^N659k3g!1NX z^X}MRkX}a666g-5uZ9n$(+t>*4#~y$G^!5W{G6w<+>p(#Yr4xzE*Vj`__|+p4=H)G zgAZ);%EE4~V6Gc?rEfM{aR%-I52>^<@LAYI%=x{g>q3aaJBVg}X9m^uEY-GzcWc_bTOXzof&_a;Yw6Z}5VnWlz+%c@MQI*lG>H{CGmF4tkq z>sD+mT3Tg1TbpLg>FQu$;p!L{yGI}QwCHl;-}I?<75-ueUIu5N{&rpHyVj-I|3NiA z8og$fj@omd(np-!QqM)g*>P$}P&q0@A@&VTECs$nK)Kxl$KGpI>3zEEyQ2Um+Wvd- zFwPGbNvk;^%Xft+<6)OK3 zDo}2i^aV(> z3DSt;eW7~P;RvS5cWAu~MD#ws1_QAv0=U6>ghksupE^2VAsz26`T?vRV4Keh$g>y+ zT(blM7Ei|fmEn2a%~+`G~u|`*(wS@ZAPA^E|#r zP|Vr6Z12EASVS2Cjs_i!LaDU{cm40rj^+FfB4qTlu3^B*=Izy|F*`qeE8eX-yxs4Y zDZyVGR%N_B;BSrN^thxdQ|=*qb}?uElzj-89Ex|S>F^};Aq*gjm;r`E%+N6wg>OD+ z9TvaI+k3mt+pFpub2jD|b9U`ZiFOw|gaL^Ibw%Ux;d0aru&h^#cgNrYIU`-y{uF#| z8Mewfh#~d-oRNAPXD$psh%OYL`U?)nGll@HP<%<8oD3r}#k-%c6n%-}@q-W`6WGYh zyars=dq2kRq*T((jK1a_*W9*x!EVceQoK3f-X6B<`v9Fjy@FXJoqzT{D~cGEBbbZY zoHiH2Jr=Xb#)D41;&4dx_1%4QdG;)Pr~kpkW8*jGPJ;dRPO;%*W>S0%aanZ~&pgNc zx$PioR%P10hzI4aYwCoHqcB{K`x#l+DSyB>y%->36r46MiWd%jKX@l)5pxbHA=?BP zc?W?v{DXij{~&~uzTP1Qa0mCg-o^k$1Ec`MsNlq2pxySZ}95XKesAI$9eulbJJAS!g*4r&a^$I zK&+Rl#rrA*3yprnZRl#hZw9n(FO8_l`Z$dc zetl|UL5^g~r@Tv-6LpJJ5_KDkbEgeg66IM4ai{$m;%*s?x-H!ZF8qOn|wkJ+TM(1Qf_cqnS$xg_p6rqz5b$b)#Ziz@=&y|d9%EW@63&KWRGNP8r z`IM8YS~zN{`IO3u`IPDQpehvPkbm|IF&w0q0_jsD``NT{hCwrUb3P?TNxy;dHQrK7e-JZ$YpV^g|W2ngji?eXWJV(U4E+I|KSIMqlrq zj7$zvuY)SnpzTK^Et~+>w=`*3E=Oy6oAKsWIa~s_RV9yLBs)T76faWqtmFRV>Cbo%6&HF_C=xeW zEI`Zq3e6P~v%M9-ykMLM$(}{!9uR-+v~qq0GX@PpFDF=#w(Xk2yDht{#>ci(0g0Fq zLGW?LeC&7j{M}3~=ajkW-0Pxwj-(rNK8wXNT2(gnJjN7tQyoU(Q5GHSZ2e+N{#NB# z7x@H}l{V$TPJ8Y5VT$CB>dM-yRXN2^Hx?{l_mh^`TFKzlr^le=LCz-yF^4C*ikF+| zo@Yh7QJ)z*xMDRQzd1O}mz7|iFGYVwC3+p<6V%~w7hV=caJWmzL_Nk$?&qziJyo?C zl^AcUs%tPnSn~3Twjws+H>Uc#!|&1|VLS54JF?jmd?c}| zJ_Ug;GY}{R0arq+>J|``(haMJ%Y)!;G|!wxvUPQf3hx}Gtj7pSnP5aP_#j+Ndf1}O zJ6D%tU2XAlz(~p>s$Nnxsy--@(B>(TKIWP~if3+ut=CAZ6~pE!5(IMazo1%I&rbrl zsgd~%jM?;OA~U<5>i7#-{2gN;ti^)6@*j*g?=@HVm)GL(O<+zM*< zcXi3nE`ctv6>SaxBdl}+Tn7Qv%w&DcfhH}!Yxu@N!iM^nZ;Dj=%_Di9<#-3DC0bDF z(1MltSmVe&FLFTla>v*83d(i~@Wq28h`>?BeDELCMEo-!V($UmB6uM4Ab_YeLGpar z$M{9dRpr~s2X2AP7*~N%nohk}jO)QKC68^-Cu=DLG0**@B2XnN$yWT#&d;846En|Q z7-jojt#2gQmk~2}W*QvLAXKP8RhSu5_0l92h)%y!chf;5;2asuq*<>T zT$_-Y)glZ-*E#t*#E~61ogo&Fw}m>AKaVMq1UB{Ov}wE&=r>TQ|Jm!kq@5G+QRIeJf&14K{&kC71Dt!q3lL?(+lE8&CAiK=bN;_52`M4P0gi%8_>ni*Q)Lcut;nQm^hi>ls8odFzOB zWW{${=wT#9IA0yL(BH5tQccQ+isi=+)@=e*)~B+tIvE&EA^+9^mIsU(FdUu*yW<}1 zU;wPg2~3Lf7?3>rCwVg)Fg|!vP*>U|yF@L26O5N#`uaC+*k448a~v3Q{QSapBGTmip=|5rK+Ypzw#;;3{b3%%^h3+w zP&5N_ofAE`#iXayZFFgHHu&is08o!D`Jg@PK#l8 zp>tL;n(x57{ZBDiFZ*2o!%L@)-^?`(6XVS}HLe9NmLU4ZLLs;8|!d z+F!64^tC1y{JB30K=qatg`l&}+-FHAn!HYat@N?jn$7ALxhptirmQ#EQvwSayTcNR z1v^R`4^LOx>MRR2yr+bo5G{d#5bZKriKX!d?O2XZ37Fo5Cg%btd*NubZ@>GZ;*7Rkms9`HtDN>mXSo^Jpv%9C%DK&_H$^Hs?8Si?71jd;VD7T_!X>Z zL5s*@jF+n6h`gqRXuDt?L8TZkV|v$gLEh6E7HsqBLLLzZkSm!uyhjyut_~zm$NQ2| z#M6CA)x8YXI$y{`stV}}Rj1JlVdFAkDo2UGlcIN33`h-9jFJ={LBE%Jo^}G}$Vg!& z{WAJNBteoN9QFa29|3x1K?dH6E}vWnA+~%eN>55)`1Ypm6SI53boz0tcQ$b6z)%+l zd;=<9T`~sL-hUS(v$MOmQJ3s|{2duD)Lt~CbkLX*K76g)v5;Cd!s=>)n?fPvIZ>vddh)W#BUKxt8WuTd}PEg6#S_ygs0qAmK^al zUn{_Cz%Z$UU<^xLWa#KiGx* zBDIU*2UJ-{HK&O%V5b9MMjEjHE31P2EDLU^I`WXW{KlKO?T2Rp&^ZgpdA4ON_$|OU zflAV0pdJ`HTTyVD^8nId9NyJ zJoQ4X1OljVpBgJ{_adMS5kUtHAHuQM#y22;Db0dRth!w9EGm*!CGIR(XCPNhz`PHb z1yF2AP=efLvjpIG`PhmJ^?SHnCN}d3a$1&KR?S?!b3XQ3_WlW;K?+F+;QA-;XbCGt z>*~di%d|)CogkW9EI=tj?=cVjPRSFVxnWn=*PtR+A4u#&8E+id|nUCA||I~D|N|T~(6pwfYxHG;uA7`q4Y8_XL7=o-Z z_L1;W(Ky+h3gidxXFr8>M#B(Ro1Bp5Yqqr{`B)O8VKz}UkD70t<4+m>w*4+M5Sd@&Wwk+{5|NiSOPxYIsMz)PE`;-Qbn#v~`gTEW1PHLUk&FQOg z?`$A@mWPiDmCdUWHVbf9t`?j>&7(qYf0q_TrX{^6L!KF^-_UxW%>*2NwGHJAS`#{+ z+)MitMm*otCp*);F0t9z2ca2xDU|D^{tA}nyahRYm*m~@Hc3jdOx@Tcoa%Z4x2fov z#`ndcW~WolzzC`$$QyBkd-^P~9j3$9$%ut>_araawt%;F^upYi)mOGzo~#RE_f$E~ zzrL)@RZI*ml^zqCmP!4Y720fb;1sVZP`{PtKORNgb$*uwnJx8k>VA9ms3y+7C-wgA zos07hZr-q+C%UZ0R@$S+{2(=6|V@pYMe}RLa^M+pVno;oD zo8UFe;5FMT2aOGf;I;R`Yi_}7p22Ir!D|7*Yr(;5VZm#W!D}&uU*wQNbafcxFfqn4 zF>tUjG_Ww@u#|I%y1K2_qeVDV5c@wM_V~R+#zRL#VMaqyLPN1HwW;kds5qn7P)`sM zO2L!yonaCYp%oDg_*8@(@*O#(4>@GIk3>Xr)-3IOhJY5K@&^W{)?bCl-~$d>G(CHK z*?ci45$fky{nB1@2$VR8A^Bp_Ur}a_!&u74>1{vB)g)Zm(>|~Kj`3l#z(AII#sT9K z0@4=*qyz+{3jg_21WOy~i4ja8p==c1iFeW9kt)bq z2TR)qOTP`4b_kYs43>T$EbS63?G`NU9xUw{EbTpWI=B|~l2$^RRx&CKhVTi2m4E;R zg&qZk4+TXQ1w{u1#To_09R(!>1tk#$r4R+B0R^QO1!W2aWdjA}0tE%BL|K^-FEV)p z-Pe{2kB#;#8!a9??N@eMJPz8g9JF|xv|l-C@wjNea?#>(&$5RFAbtu4t3X4cFTG=; z{{1Wr8>1K-qaPb%3mXIFIR@i%-=P;?JZ&+lebJpdgq`}G`hmrrI<`6;?YjiTuK-SN zX4mo2R~E5Ag&p;#6jotAI9!QlQ)>K>C(>sm4^no#{?qvfjWLCtsP^#CFLUemkzKyT zf>8Jp)l1?-R6$x7kqf8S*SnSibasU{bKh9$ma2n^&qf>b8J?-1TlA>ty2;DXc~x)f zu@?)r%`1-kdr9?d(KM!FE{oGHOVI|PN#9WlY$Z;}%5mMl66jBu==Xh;`&~c~F2H3n ztGSN+&~r`mu&hRvWwO08t)B)q%HfL#|b-2lH?z<_dc0JXhqnU^Sc zzWdH~C~9N_{rPMvGBkTPo4ctcAZH=;@nLiJzUlHI8dhw4|216SbK6?Cf{V`(i-q!t z*U%mJ-tswPe1(~?KYxmug&vxF&ionn*#X2WQ7QXxWqeqqD*^s03u?gGTm19q3>#T5 z!-O@O7<@1F#6xhUCHR^pYxDDop4zWywob@I2TuSM_2!S~#95}d5KpGNI{gQ!t}Q6@ zHnm5NuyouJ#difsU+;+h$w5eKpFfv){3|gP?=FF9KhICJH2x0IZQn{Wm_yVNY>r3! z?#BJw{^Nn`vsDO(2&DDQ5FS4Ny){~CXB)WN;$GIgndi9YUy7FEmPi9e{_rlByi?~) z^XJ5eP__NKFdY%wIQ?vM4<)(Uguj2YaItGh;|_URU;thZIq1%c;*RLoE#}MyJ(^>f z)OiR&0Nl2Ljl3LATw;uxurl?^M++ho+|N-v+t2&xZo5Py8@V#?GwBW=jW>Mkb}4%ZATmyxvY1p1q}_Y) zCXh9)Bf2^cL6Pg0?KwMR60I&U#;9AKCp34gS#h1Q3@lNq9)%BH1pBaOl@HlO<87Ervf>Is zcVjHzyHRuxxN35P`e_jyB0oP$+;XH;%umk`ew0AwS7)H0Oj7cqwkE$mM8E%XElEfq zWu7IqGo@$+CI#ZSrG&_nwz$XI!JLS#dco11x9A-*aq_4LcteQ@RAK51g^MQJrOJ+JN1kj`m60d~}shb1aX!jX#V z`b;;1y4grKm#8%S4`*B8>5B_T?(b^2K_0Ke@^BurjDj-FlurH7sZ5@?B%h@HzA86 zmi$poAG*D@DA144+mOG}h3y(IAvj~srgrCQiKD;AYVzo={UhsQr!8!COrV8MW zVWh+Ns3xy&Z*5cbBN9;f+_osjUS5OT9D{d6M)#-D+L`*B^T6UO%gv}JzwSBhEc7F) zCZFyd{E)vzHyRRi<}!~;&bg}9ogv;|7heq-`=cJyfr>A6Kt=i{KZ9>KSc^)Im!nq4 zM2$%^J7hzP><-)&V=sEZt#C!>bas>BWDq?#XSERSA8p6WY&%e|Y-VR+04z7G82g?I zmV-YL`g@LnTub|kG34e{&>2@y5K8MM`1WIF1#w9g<0frU+%goi2S*pw#{%CUdF(G0vH3Ex@(_On+v(1@2X07JXhTNSnZb2Vm}h z_Te~s$o){#>pwPHJsCnPVa+76SYhoz~kePw+sEGlyWv9g-8Yhot@Y@Af1iBiH+RoP5T#w3}`OS79AyX^BOU%J$lIM+Rw z{ZY$pyeMo+?^zE2R$@~^vK^OZF?@P8Cg;8zPrU21kizQoSBwd|v`FWEc5w$gc{by& z?4sNy1}5z%lloH_i;M5s`NLE;^bq5LxMM(TGja0rhnG%W4f{Z3(}YTATD0|6h@Vd0 zvhDA5>(oMmhp69yHpS_aIM%D)$0b3W)~$c?gtp?}pT#)q?UZ%f91*q zarg(N|GhwQXY3etL6q}rIP&9_P-@e;!EPuyBHL00P176fzw(3XPK9{GSq*Q$irS6Q z`uxiD|8Cl?1y2?X#nEcU{wTe`vxfwRgy3#<$uJKGhMH>$VYkbGAUX&#*gi^YvQVIz z6}%_C3U9EQB>VN*w0}}i>93XXKK~Y6JHp67EF$+YlqF8$9!n>)J2$!M?b;^Ct4R{Umi!*kAid z>j|~OrsVG$zxR1gmmgxlz{jqU+JQ9&;`6_A8i=br8K+&e)B(Mg<~amNU9@IXgF|21 zS3-*?*DSSH3$tv?jZKg4|Kj-9)tvsJpX+iy?FUNE+3uPs z@ls%MuEi|5(;}L2b{LI&Q3-V?vM4BUSn}bBr`~}*c_}Cuxm7ytDD$`1JcX*^qN?d~ zC(xPQc6;hP+PUh`Xk9$K?C<)e%_jxivVV-TroF`}v^;sDNNBs|C-6A+m)}|OQ624s zvhdx)L;9D+e}s}N3@krBqx3jE^xIh>@fhvo-pKH`7mqB!$ncNfP4}l6<-euw<-N`2 z;=QG2=DU?twmy+CvOVdfkWk{~zwPQ9Uw#x9+F4nByC52w({^irJlZ+Y#Csc*m(%9c zGq~)Z2!ak29;e#gJ1a?gqcu>KndS}KTLB%-K z3@_lCcJO%U3~CO+s&=o9ds|2QVOEytD8=fq(4 zg^!f}K%+2i>XP{PGLO}a&}F!Pv$)iGyVbPw&~hCrT{Ql<`;B+LNw=ZI7#6SML)5Lr6c!&HNt1ym zDMTOSDSLMp>5GZ6&Zm_`!i7q2jw9c}=;?#>hFG>$KriL$WScdYwe`+*T!8uc_(;Q+ zabPzjRKx!mUMJSX2TZ5Ipes+Sgk~JsKzF9k)Mw?meAh5s3vqTzqf9*_qUQA}i>i&9 zDG~I@%|u0QAFqJlYES{9orZU8qb@R`hlz7SFKe9ROvDR$`kpg%+KIin=*nCQm^gXt z*3U)Bb0Tf~gv4=&9kVAFd3I+U`Sr|?6hEnwm;fIK_Wq4uV5@uAhseLy8JSjNmn)pj zEha!2CVM>)s<;o_Cj+@l22V7*^}y1>%$JMIh1;w-9Q<&vz-F=emQxn!xbeERV7{g9&$%F0t+g789CdD=v@9yK_4n`( z4U^W<(=!*bL)!);>|*+wCnP4j;Ten7`!8FlI>%bGp~+G1%!>>_4SJN!NQwZ9!*y^Om32JZ2 z@N-f*iZ(d zLO*?&d5bWYu!ORMkMuZnB%{~c zn3ilQ%)vDvF!&Ep1pzJ)=mY^YT@X+M0X`5I0|7-{V{j1=b5HbwD<*vUyYybwES4)Wt+ zAz5~CJvUAi>pe4l2(E1@3jWX9X*UYgZ}LzmqFPvNEBqaII`vN{ATF2W|I#eDmXUuJ z7HNQi&xucO!XV$M+@I9*owoCIu%i~E?4zAGW9ur4F$c4hBIwcejhmW zY}e8#A(Ir)Scmw{K=|C?b#*&{!|VaUHBF67JpjJw$<%Vc_I@m_FvS`92YO-J+6Jehn|1IY<^sc{}CZBqTXjdf0g)6^PCr*;K(Ri>%iOomD4w5cx_)bk1j_m5C>;0 z2#R)qHtzOSr7yn?Q!88q2$=Hsw67P@i{Ve@JOMv8bzTjztT=l94_99u7uEBHEr^s7 zOG?+5E+r)w1PPJul929@jwN*g>5?vK0VM^5MN+y!q+7ak*}d=O_t*Qrf9%{d&w0+7 z_^@-AnKLu5?IX1UMTSk#)!C{K!%9;hF$Ol)n$xwZ>tLp}K$S){sSI{=Wk)qR4R+}j zM>WO3oXxv(fzkaIH%8}w!O?(1NC*=H6Wp|)%S5g#>Rd5`{AUp`o^ zjk!1OL}?Je1ben323|8v6r?wUyoCUs*V67TI!+@V)0yX2% z`-MZt2_~lZNe<`RUwJ%aH7i6E(fY@&k9)YU?`c2`m>eO*OWyp`j(Z6(q)JRWjGX;- zTsTET<@!-(7Xv65dJXWJy@rwArhkYfsyrIH0{($N0#$7LI(AFq(g6|jE0p3J(m6q{eyQJGy6V|708OIh9slp+?45*K=+uIMu=%<0su35Oq`S2uw-*VFAJ6o?Jb` zpS?WuDw)=4pB|;;?aeNBTx3a_LZkbd`0`XCauC7v(Iv|0+buB1FLxrCe&q7EM&$YZ zga6t89`o5lo1@;s2S+{v>CmO=dUcCoe+}}jQ=~-O?kP&Wq59|~1C^J#_v#Ghh^*mv z2Pj*xdeF#pl+QFQeXypc*?|X!Jc?X6`tVk&5GA{< zCN{cedb+5q`CzE})tyR}OYTnd0Zh%KqGlhVSJganm;MZNIfmBv32Js!osQWDSa$5} z&=TNbjT;YsD6}RgSB(>T)C&KlQ+0~<7A7?=LWM`^>8E`iHWAf23k(tStN72@RWqx6 zqkogeQzZYB2j}4vdgZJ4*l6vCKaEK+ueg@f)V9bLDt*JVhd6)epLh#BFI{=tV?5lQ z9Tq@-M8vj5c4s`ABA%}WJf;%yoEaLTb&O6I3FQw5L?bxQ^6sVp{(8Sz0P3=PmpJ)P z1z<2Z_~6}4Z4g-C9xxB;7Z=fh`z+yt+B#wpK!)dXa)*!s3?!);)+0GPt+%cQ z{dZ=i8vt4hAmDuqAu~1$=;)zBm}e1rhP4UOGj=&I z=(O78_OSq>eRot!2JjtcY+ZL-?tSh?V zW_Zl2r|a2h;;n1vg>cd`M35P=!^|B713!+59M;#F4u}*4>3^(~Gl(1ee(PXcx;wd{ zW=167h29D(7z|Z*QkWfl_~%nKRgHuk#TVS&ZZFa7QF4*J=S=&^X^-n!&kg#%rpHpy z&x}|82YyVJ!vY0Y@6IBB-!#9>EB%1q8&wohw!ggb@0Xq!n`PlblX2JrKbQQWSr=np zV>QeAR92|$;-k#r3#n0?vqRt&+zeg%Y@;|HE89Wvt^v=9ZT7h8^!F_QQLxWTP&5Tk zi~cGhrzR<^PJcFN;I|)ObCdfHa{6Q2a^u9IfMkk8~MG3mk`yoI&T}G-NHj!sgpA10}jz zJsAd%^kQ0#$SGzl|HM|T=)_jZk+d!5>EEX9z2I~6x2bfme}j^gN4olV)u#zz8y{-t z7+JqCGfzMuoKf)l|C$WKFB}qcBdUMtC~3dEGOsu?tZ8(kJqVtue6`EW90XYmFHh)& zM~0V$jKG!b<-Z88>NaLzPh=gF&A)kQ9|zY_ly6~S-h?Dx!j*#Lzd)HYAIJ+mgxld0 zkJB<1D$B^%g}_TNAj1z=4MDq1yv{`@MvBbG@)1g~Z@?-o)eQppmRRX@qZjMuDSf11`b% zhZgUKv1QZAVh*&*?+gLKKHHP!i$kCnRx{dd%Y4)%=s{$s3>U5%y{iQ|Wyc?;u59?*Kn)tSU~EE7rZ91TppK-LIo!s#>0! zj<46=E09uF7_#jegRyd6S8f*MOBm&FtTEoN%_5UGM@5#)D@5JFRhZ4_hTwI}X1C}0ZpoV+T9S;{Z&jFn z5L7-houa)k|Hh~NK5kX+{p#($#t2Kqj~s4>t@hL0@;AMm`sEWJlk%hKmBNYtdzmVg zfM2=a^VHD#=s7-0^Ci!#IY-OXHj@n9OM`(Bl6R?6X#)8h2G;Szjj!;uqA|6sQrl=2 zrg5~2$sYZuGK=#?1%pEAvldx`fQshR9F4yzg(2OXuQ19<3uI0~`5cr9^2vJaEUh)h zuwHA*&?u?OvivIy)gunG(TK+UE6a&*hg-}M@J9qF;{**FL6BMw1PO{c^2I>lGNBlY zz>;$S9HErKtcYgxM6`8&nQlsEh>d%@CNIzF4s*DdY?D1esx?{l_DTH!31IIIjSC3m+o#qqWdvegFuvBY&Y|) zRe@WrCeoD2D7vm*3z2tOr`dBUnBFcWY}pFQ$vK}sHR;;242g{ka~=0N4vulA!9U9K zOXSBpK}}v398(dWcrvcpS}bj%UZp%r?Xty$ksVCln1y$sjXm4Q(TmuCN;hECCIIxA zU7JmNshz>zW}bxHsdY_uh8~e3}t`as@T1Y9=M? zcUGMB1;|y*FltB8o>5i4<7&jI!(Ls|x6iBrC{PRJPP$;;zcrDyp0Pf3zD8MQsDOsz zzL8R~ID)X>^B;f-=9VTC+y18Q<6jd$6G~5dDgS#%z#k3mqAc-nv42bZxKN5YH5*X4 zShjlhU{WBk8&+jh!z%{pt|Itvj{Nd6mb{i8+#m}21u?dvH5LwIC-yk=54RG&kDFm! z_AYzIv6=mw21kY!yGG}Xg~0W|W}P;f1>np{SjBeLle86Jm<85RupelKBfzr;U?}Pp zUTRrN-Y@7X+IKpD8A})=U3Kaj0Ou(%$0}@$gsBbBppUfIZ=Kf5+b+u<4WAVugKY82 zH2NE||A8~Y+&W!2=t9e4%XkOf?@;X!eeCu+^c&$8yh^sk2;Dx^A}TpV{1DbPa?)g1 zNKzPcu(0vnQQMx?%1x#0TC&y$^G^=OC~@J`u@8q>ccHT-Ar+o%G@L_OK820>H5sHh z8lLp(+z1$JW3e7r)! z(q|!K8@?^&J0n7&2jn5&F%SQrNn~B?UY?tx6GQ+26_Ko1UHqEH9vd3d*~Ht(Uk>*zvW%i7KyyJv|UhZDJ^uQ3>Q63t!z z6cUoi66MX{C!j$k2uTX+QbW=>S)atEDu_@s$L`J;QeU(hQ6FS)^IYg498EgC7!uwz z+`{iRy(mSJ)yoW>`~+_?N7~?6?db}_k`IY=AQe5KnF%-;x&`n@Q;#6J*pO*#h%OFf z8V90_3z^1+=;A@9@gTaG@xOL#iG_~tTgxUxMQ2^hjfGFvZ<86$UlF{!R*un%Q= z+ZsY865L&#R|u^ZjA?#6q?(<Cw(YMu~xSNoOXUtjXq zcoH)D>SS!1yD_N*M=>*Y&S|G}9|=T}Jhn>0O~fSo{eS>0iA&?b!}sPGZ-{PTN{{8g zU^7oVgm8wzg=FO`h%rrx69pn$>37AS`0Y)V%(%?(#VX6-wg{i)BbtcNOse@37yDH7oN_$u9@8d5A*BEC@n7ab9OFdhGhd;E+c9xqPEwu zcV7Ew=%iZ@I{o8aU&ys*Eh-xL+j;H!qFwB$P2v@LE5Xk3Z6nb-)U^M$Mxpc^7Gwxq zn53;r*F;_{gR}NyS2^_tCX{{d+IFk2dY?GS8Orn7ed~*!Jr?*GVoMG&gH!CKZuNW% zCZVUItrn_$oemV+S{Y891@-`Yy|BnjhGi3v+)fz(0K&D!FM2MZM)Sk1I>#X}ys0U^=oXred04$;@2VqV7!0yYMI<)p2u$Ycs_C$;an|w!^F&ynpUzjnVtaobnaAIy3 zhzN(TM}4cjxN8<&-T(O`IwJ|qx_68v2dL_k#k!`;O?>MjEw9TVCh^=mmZMkaCN=!n zQINflQEkfyYzll+Q9-#?tkmqn#6=YEHw%#{zTPiW8@G+Xn|;bN(pl)S>GQ?|O}@)M z(uI!b8uy1ph%N&0vlo<2_K!rbS1K*2 zg&UE(6-&{J3S+?&@NQVWYMt*6!dFKwCo4Y=$rgU}0nLqP)vM@3bd)1bh$Uci?4@@? zwtOzgm@o8_uf2SKJ?Ln+_+9>+_olK1@S=Pjc$opb%p@Y;9A0MSn_svZI`9F9i z-d$c#z{%5S%4`eGK=~PIn>D#a(DK#y(Bm9%-`bGmcd0YHy+w%d=$zb78q};90zT-Q za!BQgf(@SimQEf*RBK)%9bbagU#nvNB1VArPZ_d4$r8hyc~KQ7DRwLSrq%0 z%^RVQzNvtPe~|qxK;0Iny+e*pFTGRT*w`##&+rh?Zes+rE@3Z)vB3#a0lh@nO+Pf) zGB8(|wNK)X{{X%0ki!gkq#^pVGg5bBy}X-#0_vUzD4;+*RscMz@@PzdY1%JP7 zFpaB9^v`N@-#m%yRaBhI%HCVIyLI@{Hkat$yQPx#*VrjesImUe+4Z7qpe$2gb5XoB z?7M0__im$H`}0OekJpRR*`D=)XKPCrKkS8g@a4~z$)i@<9H{T;(&pqvTy>@XTx(nN zZ%7~j+h5Gf-p5tWwbPFwq81)U?He(5mF=O*#iyRyPU#yPV9j9zqJ(9wAbfX;AH4z3{;9lZJFfZBz5C;3+4(H(cuVZ%XN-7>+nIpw zCH1iOdXdQnE*3Iom+w&WGpE6sj7nIkW^ zRD67CZ)_c2ju$LAQyrew6-f5OWe?qOS(R|6XhLjbny)i0j$4ri)YHdLmNj4D%Bm7; z>f*|p5^Gu$YdRBadJ$^|5o<;gE5{x`(CtLT?#jD9(1{+<%-7S2!<=Tux+ttAD|?z+ zEHhlj^Qc0>no~oROC@wb(-40uZa^#FUndRoq6nlgf)s*skfH=q6uDF)>rN>vSW`=7 zgh6mu83Z*!yG*SpES*%)t{SvUEe7p)K|8N|ib@)m&gT}L{9c`0OkJDJa`l~~qSxAf z_*0o6{V%+1DTO^Wnlvm^+izDwqyjRXGoX!}D^t)3;8JhRrA1*7HLh@-&kgC;pA&OWJB>Wb%!~>}M8@d8td7=|w&&8TL=_tx zTp)V#^St5H`ogOY*)N8tvjZY4FVey*rB=!MpbeRoq|G^%67MkdYc|hJP~FNVDA<-K zK^Kl&EEnM>b(0!|CU4wL_2F%ds@Z1_5oAU3pSxedmN9g)mV@ z+ergl=2a78{FpiA)lG&IK~uNfNMJLP3Eg;n}=@ko}B%_HVd6O#J7+2E!n z(D8z=aiG#<|ERJVW1C?eJGdo;rd~8YCcu+KF?{H?u+W+}!(qTxGpms48dZRd@Rj?24QQOUgMoJy!n`Gp|6#2qm^9AQWA;DNFb?6 z#AL}1(fQf%vV7nCB=8*=jl;!FUv~ed12Zu=T>SK99lrS_q+!h3;RGoe%v2h7(dIJc zc%x&fY@2LYk4lxWQ?fzyeYBO#9A^?nWU11Yvd9D$c^(J`M_YwFAnSjC$B~4=tbz5= zDj&B5lcp-{3nP}L#(xht%n2Gx|A{3<2>-De_x$g%|*6*EYR68>v6;rSoAW4-%1ZT~A!`3Y1~y%LZS0!wiMOIhVeGJF+4=Ak4B0)b!0 z8{@eC zvqi7ERGh~0)bHx6g|=qZ?*54KsayrpcE{IEulBjNb*O*#iBdNT{!E!9y)}Fl#Ls%= zmz?(kou4wXYcJh!#b&QHJzHE)k(6?IkgRp9TM4eBmk8bHgb8i-1bi8i(!Tlns;zkc zg5^5ffGTz20FW__A|jb|&b6;v055T(d=AX9x|p z=jH`no7#^|GALa+`>SP2cvcA=CHj%*KhN*XkzSzu+02!%W#O0ePPePtfj6++u;IA( z(aqk#fMCnZ49HS3O=E!?aoAhNLKnuuV2x1x_Yn>R`RwpAZg?3#yi5>YCJHZ;fR{nx zWpeN`MR=JCyi6TlrX7*54=*!@mzlxK-oeXk;bo5SGFNz+C%nuDUKRi^gTc$f;bkA; zWieJBG7Jz+c05^rh^8o>EEJ-t2+>rBXzD{W%^;e#5KUJ|P(kazuIWUF>+0AiGu|AMILt9H-uw$X|h@CyUM2g5zZG? zmnz_Ja}<38?FZWZ48`V}gA|QQZLQGyvk#U$P(D%D%lUfTCkVtbB8eS<{uMEmq~pTf zX5ljLrC%}3WE*jqDn_^Frab_pncJ}qKN$Zcq^O@x{6%fue?@l&T?#xBs@bpe+CxI$ zv+lyc`7ZVL4dCK%JpT@GOx2@3_oMMPQ_yTCbUpIGL1VXkuRMn>O+Hx})Tr=7@<(ch zNedX%JPoV8g`J&$vTuh2au=B-7x#-8_kl(g{xiTx%`*=~PjEj#iWEqwYZpK4M|_fd z`!}DS`mFUKN@#vMl&+sb)Ob#8-RW~y-M`6)ug`9K1h9P>>VK0hJ?6y=z3O_R5Y%#c z-CjoV=qT1f(>f4lr`yRK^di6E?wa9UZkqU{=p7MnKdczoRzc&mv>%tPiL;}D$y3H_ zV4&iMB6Nu1bOL{R?6aJfr3QbZg1a8i`zfcV+k)O-^v1l5QU|PA{?=!(kbm@{mVg&- zMH5ti5Mk5MrBF?Q#8Z`w?v%J}s5h(;(J|{4U9m@<#DZ)2vfZK)*Jgg&vd5J`tbbh$ z@LbGLOwUzyj&;!pnyPo3voLE;JDGF3}*eT<67Bsx+=JxTePF!x@vsSf+=_##K2tlmGAKGdR{w8 zFoaisD+=FE{Dp1Jl6SfeYY~2Dyc3t3Vg>gK*j+?eSu1c-G5JLPW!Vkgeb zN;NNG$LeVQ*J4c%GAfWXut;N%R?+o#QuOj&4OiWRpCiC(x!)Ys5_PQ!Hk#)AzGF6k ziEydk+JZLdrfb+4qT(1qdk4!EW)Wo>Wxs%#8uTNf?to7u5S$3;^Q8f!j{_^*S6;na z=l|*;!o8=-zTHA&0`SHxow&HId{d00H~OH@tw$K5-{To*{nFQKVyUxVvDl9?T8dM= z&_i%|u9=Fotr@fa$sUxTfZ7^m+dKeE5R>3slANR=&)2E}#yjO=&k#E!5edJ<=IjNv zhi9x@e}Xd&Z_udW@|kR%niGKSbT;zCZYnwtb^{wl)_9n13y%ZP>;{qVvhxgIK3u51 zj^(0HqBiw6a|pDlo7(uH>d@j@KIJqY5O@}wbxpyocfy;l6j&(+tmT&vGau2{`>UL^I zj6A1hP|_4~F;{D&lcRjJZkTobOQY6Q82!&$SgIpX5rR)Xh}6_L-i~=Ct`|dO9)PXc zF51En9ILdl(2AX^ASTuIN=Hjl@zpDN>sRuTtXRdx&q&B9A*qxQ@~4p0rx0=~NGcVC zoEnl!4I!t2q|!jhX(6e!5OO+5DjkHJ9+H|%OvV67Wq{y?!2gqxXJ%xkB2FZRE8)xk zhdoEnh_lwFeg2R z;3=p*Quq}t|knK=^ zs;fNdcfhgy-ZMs9M~t-IM+H+lwW$q+y^qRv!Xp+Ui#e{mzy=I zOTAE9Pa9t0pA*kkF0c&F*w1D5}k6P6yn-}hA9zN?uw zZFKT{?{%onqY45_pOx^^sF+Scg)okwJDIQ&zLJaN_3eui+cV+ zn97O>S=jdcZq((H{z&in<=xkqy4eY$O*oOIYFnvG3X8sv$OToa*|R0HMY8ELYo@<} z<7AWPKt#Ky;+r5~0CL+DJUs=RS#*Zv%Z*@2)NO<=yLuBE3F^cl-O4?W17m45cy{tL zq>4v%F(2b!9=*&07exHtK)%XOKJpvg3A1~ym@qxn%=Xwpg@4+hNhii|8)wf+PM(vc z@C!uqFPy_&K1F=-Jy8-n)(%S{He(?LV<8)3p$KE40%M_`#_A)XaB}lM9O_fVTpb~4 z8W`9@k@(N@d38cCVhb>z*b$JM%XpSOc=%WD(bF+Tu8#LMtXSeYHz znC#d=W4ZDN4~<@f#yTLk5#$P`f!qd=J6sHM`9Vr}e?>TjHmJz}HN*GSWW%ara|pK~ z4fAFx{6HF(cCX9XLjh3Qz=Z&1!~t`Cl_A25ofBELkIYo z{-Y(^+Yh`|E*QMZ$uV8d<9si%v`qH%l)eDznDd6oGvM)VU)Y<>`scMX{=dG&9I$>Q zggQ<5I5E-GdutnPVv7BoleN8WuGyg17ZD3Moa4N@ed(d#loq*kWR#N_AIx~}C-TdF z$dFr}_p?JZeTxQMnaX09CW-HSuTu1)_52epjpPtjKwzBYw~`UD;|9Fb{1}34cW#@< z;n4BSwe3%oAAV{s3%xgC_;r5mT4DjIid^+$f5VcgPe!89{_gb;551?wW){?5A~I@D zH)(?Ivs8XV(9sut)r-8#p8j#33t^NSCr9dTYD$JY>{UYIk=GYtH@{9wHdbDBcGpXn zq<~k9O#PWDnTiRItdApCwg1@im@0KNQ{e4tWdHVMW{Kpu>WHLAZ-#JNT6UXXlc23t zyOZDfqir43^SR8rKVns2u!+w4WyZK!Na%oM~ll`1zOX?;js`YFf;&UDH`-|-f)zClMLbpSUhS-_4s)5~3YUkl zPJO{F(fEYrRPYq|yj2k0{Ll2Wp^DCB9p#k30SKmp#MO(6( zQ}n#<;EPTTu^?l?7%WqM^o9kiQS%elCl;(3kUCUkfcHo7JIS*Ikjey77Q8bXW`1P=d^P} znYwuAw1&f(y4pKOEUkuh4p=dXA)8ZQF-u4>=Q-W5dr2|(I2ACrNHLL|dYF%8zLOYW zM5lI;fUW$BAzazPd@o>vg227-5ENeC3s^hP4e(!nqX63 zX81@iB2(8~85Cyj1uW;EwxiS0fpphlt65!9gqZ5i=II z{4m!yd9b36_kt2ARNf13KwJC&$$;2BFr1W{!EKG7Fy>vde(VGWj3Cq zS?9nC=7b#dhOGl%4Jv>-)sa+O;^oo~!2i1X#LVvDc)) zT&RE@0&rWd#ufr8g09@Z?OcC5>qqwqn9-+B2Q;USD*R8dtJ@Fxl?@-C9~nO0NL`&4 zg@Nx}o`MXdSROYm6Zre0Y}Ids8jE@~t}Wop1wlHlsXk1qNOJ03rYL61n{fll)_7X4 z%);l^I|;S#;o510H-i%et>Yq#1l-a1>j+=Z*A5@1DKDVmvf8!iQtew9cp8(#kM`pi zo8qpaC(E^-+@yK2S-jH7(Y}dja{cb@OMY_w5z-!A8gI|WFIFcv9|2;0#T?36t7Z0T z=lM^@qUX9Uax1#-U7OoK^e+*=oc(17of~5*4~S;A-SET=AxmrMyw(_oV5wAz2zb6`dzaEYR+eHntfSKJMVApP_X|+AToW>iS4IMY$wa-2P+r z+ijn1VG6_T-N}OANmA)IFoWMo-V%q%(@xdJyZQ3>n&w3rCI*nALAt|~f2E6KZT-?Vu)M9X$wo&I}SQiVeI>7= zf;&S2;pJmCGL^6Y3AYP^jAODCk}Jroe1gw>p1}`!RnmZIo@!t9z`(|c`@u}c3~opy z8y2^!BUXnNX;{O(_WQlo`Q^8TdYcDdMOT%*rN+Q{Y*s~iM>SjuI!Ki`lYzH<{b7Xp zDr9_sG^`j@s9!%wrZfr~c=GF%rSL*go+t$V_5nl+gUR=y{5dZCA35>3;MjjE$As{I zOvK|VjD@B7nm#MM#N+Q63uk5IM?>LD;cRb1ab)HBUxJ}GkTyJKe=_-VF8Dt~JBC-s zeDHr3#N&gYnXJ5PIGi2_(niSaPcMHiEoA>#l9<__N&Z|uVkNc=z@U%1Wgrr~&9^N? zH7f384Se-xDK5H>5PCGj}&s0YCd|qLV5JSjI}EJ3SRHf<-U|=m6&U$WoSfqQCZK1 zw6J%cAGvEOn7<)NtyH1mLkuBmVU&oSM=Xdrkyo|QKObOTC(OYytXlrVEqZ?)w3J;P zCX1UKU@UFA$KTH{)dRjbBc<1t0~|#Gbj94nW$D`$y3v^ppCN-GSb}uSa>k3u<>}If zdI`zg<;gSQVAnNi`y*TZFDMN633MLI7a~M%AA_&dN2uGcUvy-X&Ef1#XCLVqdKvy^ zu+cZVY!e=#Uy3J{uE#r>bclU~j*88;vD4Abg+N=ZNPON%>>65-< zE5%`3iL(ZJmx0;0W9n<31X?*MaaxNJgpK||mC6@#m4B)0QIj*@maZBV$PwR!Ti%b& zTn}LcB(G;Vsrppc3eS7XOb4!u=*E!Olo|0EmOBVB(>om}d>Q39Z)mAunMi<>6Wm703e*@i52!Ls|J;sZD(_cR z3~VIj^SBmc$T^~-nnhFOB+qX2a|+H59j;a#-FUw5erS^FYeN2saBdQ(Rpr!L2}MJx zj>=Nzxq$Vj-twmz?815#%Arr?({5{$40Zu{w-&H2sewElk}@El8SSPSq2 zeS1C)8@DNkiFYOM`{!fWmcufCYXJweDkww(Sm3(hE+XVzIdJym*N?g>1O`|kWl*;* z7#Pz3m_S+u6%Uq=umdY^&jMzi8txvMw<%a= zcrlujFzq(}{kJIu0?b%|86OWY69r~apTM?1!(#8$0(v9xk;)vc?F!$g^m<76cCf}Y1x%7P^H1h5cy{#*)Nt!=%JzSBf&PS$F*)m=9^o#ZLFc3dw@Wn0>) zxwuqxg2%7GG~U+EIo7y8zHzL-B?0d)wYcAyLIN~+zWDAsJ$n(+@kxNjTrNhZ^pmt41_q9&IK^yQBa zWV@YSU!=)RRJdBZF22-Cm}rB7S1~9U-y`?JJFeE!I@7RYBkj4M2lK4BHDamr65m1K zuXb}A7YJC(SoEE*QY>eN97Gyfk z0{_nMqiE5V0C($1C6nXP5$r$IN(t*)L8@38Je_LcK6kC6~X; zp7K+0t&BOY!}N2ZI>Wl6GUF)MP%eyPkusVmu8}Pr$KGYg$F5P_n*sQpw!eUIn)sF* zSL}QL17y3)w47Vw0YMj6l)}tW2-8`+kR;=mLPcu}FXO zgXaCK+lU6=*=Lbu67v9R3nq-K7M5yRC3*w>uPb{qXE<-r2DmUM!9PIg$0l0>8NjFz z`}TF>T|+_i9XS0Teg>YR5Uc}CucL`TKpjohKQpyMm61^%%mjSXr~-pX#0H>zECEI> zewlOX*9;|u+8buMr$<@IIFJzN94s7+C`p0UPfI$c&8ZOPKe-dNBfCkjyc?dg_oKoG z0H1Zhev9+|fFUq~ZMXh4YLsCQc6GHJM8)mJo4!)B(3bL9;wu0y{bB!7{w6>+(t4*p zGWc3VU1jtb1E_fi5aizE{jchV=yaQra|WP>=-nQa{3bp54(D@~jbjBvKRC<1VHS59 zFU-De;Q4dv9e+4#mKyvj4GIE-Sx?Z=QFbKM>8xWX*t*iY z;h8T}_R1_B*q`eH`S1Hl;eY+)V3an8I0UA+PWjbuN@dQ|$s!*gpk&b&^*{~SPssqT zr<&GFwKVV^G7BR;K>HBjXTKpD7L>#MoSdGPyp3}@YpHE;?+T`*mg_gr{r*`U`WH;D zj2Sph0nA!Wh2KupkT^7#P@2>1GWw z+naN>w=|G9v|J0#u~L#Eo|R+<&4Kif9_cRx$mtfY#^}b3G-vt_%E-`ptI>QsS1}KN z`tEj>a(=tqjd-148|uf^_D|1$be?RB`YpRW$(RW{BPPxG9jTP!%Wq<;{2P=EDCu%44pE{{J)A^aC6w*7Ssh0z{=eXKrZKG=AJ*`ma%()KKFW z;R{hi7;p^LE`<`2*cZ|?Kqytn>jI~zZI(pZr;gJ z;|pH}XP7yjD5x9|aNY#EXscuiOP$RtydQ9#tx^s~Zpq%zF?$<{V|&-jZ_fbaAHB&u-4>msJ!}~3nVaFq3ZQ|K7 z<-j=Wh_a}yZK;~dNL|T|9H8hp#(@OVitcc-COrbl0n;mVJQy(#eg}YmpY~JEv!^)Q z6A*-=$aXd~JF=!52-XA`j>KK#nDV#L038>C5eItJwjfm`#^AB+lkMLD{mI6wG3A}) zX9}EI+T|cvgFd1P(c6<+2Ak`8egk=@vd z%-Ze;E`5HtR_I@HIxg2>@@CSY-Bl^5`p7Kj=6;vTE$nShJgTVrg}JSw{Yq(ikE~;b zvb$?AGhsV`!-?*1>SpTy_@&@VnkBPPRHZ9Iw0YGz?KR6Mbx*u$(GwQky5>Zv(w*J) ziy|U}8~J7;(XCCa;IBo(KBHaO!3!#FJtx}KXQUKOB*`Q%S(#X5gg%O-B;jO3*k_P( zuXpN=-!z{$3zd(|E__*^|5BglQ^>v=a4#G7N1d8-|?hBA&KY1={kp+QLkHy6YVrCBTJwCV z*HH;h;mwrWEH?6pi=xugupG$$P+J)~@Pl%%#G{yM5x6 zUN%@U;!h`;*+^mj_hdSW;r>Zz7{&QjF$Am7$OCf^!kAc=Mmyk-rNmLeI{OY(Q>?Pi zu*if#=xxCa^>k9xcX|vGu;tta@eiUD8#cu5ZNC_uz5rRwTD)Rm7q|%tIw~$BppQ2d ztT?qGJ>{4O^sEU<{0i2-6Kp*lm~}5t`x#>0t7HyVF$!#me2I}jLPqP}pP|O3J~Qu_ zqjA&li!}`_)^)VcCU9b-^0pfI3XFe@jO0Q>Q;P18)@-jb1WyBN(Z-I?QVf819e3(c zKoA8GK)6obt|@`v=P(|nzfmbED|VgGp7;R4xax;isRJvQ2*}-TeZ4a+$?LF@8u27-Tml_h+izu zEHtrovDqw@3hgHSA{EA;ou`fx8^>Ab>toTQ3c8lzSh{PaT=P$-t>-^?LmHnWsCC{g ze>JB;BJroC4(J*Kq+PyJ+d39g`@HBCIXrqleO(1BXMr>h*bV-<%6C33 zx0+9(6m-f6*eb83=$WgX&C(Uy`6V*g`E+{9B3x%i`9OD4Xi=vbRathrJEf&cWbc@j zn^%*mDKRF}<@ZMTtnaMR@Up66?&H<`mgGVGVv|dS!{HEbquUYtH6)^`q3r@-J!pkMZG$$aZl&z~*JaQf0w%E(&=n6h~Yihxu+y^5vK`euC^nJlV&1vZOoyAtGA& zPpFk~jg)aSqu_s)aj9Qp2-$Hm*>OFZ$ghgX|Nj0&#t~1}6HhjPvjdxe4U4x#hDIoZ zxIP2ZWz6p7n8V94C;SfCR?Y`F1ngM6>{zO$A++^5m>y%kFUR~}js@b6$s##raIiH( z2zl~5Xg`O*oxsOa*-*d7-irxTWcd#zIAoUlq(y5ZFyEWwKl;}}fj<`d9K;Xz#j_Ljp*P3AEpr<&p;Dv&}0%cY19lM{8aRS&Oi}E z{8>EadxWJKv7~1FC0d-|>elQ`$kU@lETFLY4uw!FnoR!;l<3}q*DnI0;0aISyi&wR5-0_3k`X|u(?}PkulTl{ghs|5l6GJ~z3i4OB zQwlRn=5H;9>we#+ZH)%}4GN@sE$IRjlL^AWz?IAw`E{`)cW3W2dV4bd+}o+ItiQ|VA9}mp?(Ls~*9jjFbT}_- z?(e>%X#4z$SFDhxg~!B|cSD6Zlem{V_%+z5`c*bYDmKTr(?!%?Y2 zL)2*kkCIorfJ#e8kduq2;-W1Afk_=2rmvWrp9ln2b!a%cOi@zu5)xeM$sy$GvQnri zMKEwzi$bCnPxF+O3OtkCTr!mx9eH`pYJ%0>DE3LqTbC%*MtQt4<=rvc)<5w-|D$^O zoBv;5t>3b6$N%*GTV_s+0d4E~?|;r?X3n{9;y>G0ezi*dzy8yi|Bk;R<96=bKKXxZ zme-yCi?3>boIn3a{nVJ}rjh^DmtC1_ay4lE;Y`NuZjB4tgtkWQ`OTM?CAu?w%JhG( zUFnTV-#RR79FGL;)rj7;PNijg`hVlgMtSS*$DR3KqWb>7@rtQ`LC31ETBmPwYsb0S zO8*O!yVH}?pV#cjhGyR{yXl&zxhs|4@(YPtoLW zPp{j2(hPq+b>Gje={-+>d@-!K`_gb))#OD-e|Z-FU~`^)W1o;Kf0?uI(o-KUYgGg- z4NC1>T_AMd>UQ{|jGc3TS3g|l$$K;ZZnaOvmL2Oi-#n5Vw|?8Z*H1G~Eqf!aQJu%r zbbW%rI%TCeZmrx*esu=(#Ii5%1LZn2gk7d^$9hiSy_qwCKh|Y};LL;x!b+#O_@{`^%$Xnw k#L`Nq_ynIm4`$S|ZqsNlA`=~tt@~qN(ipytVFoV)0IAMgvH$=8 literal 0 HcmV?d00001 diff --git a/python/dateutil/zoneinfo/rebuild.py b/python/dateutil/zoneinfo/rebuild.py new file mode 100644 index 0000000..9d53bb8 --- /dev/null +++ b/python/dateutil/zoneinfo/rebuild.py @@ -0,0 +1,52 @@ +import logging +import os +import tempfile +import shutil +import json +from subprocess import check_call + +from dateutil.zoneinfo import tar_open, METADATA_FN, ZONEFILENAME + + +def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None): + """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar* + + filename is the timezone tarball from ftp.iana.org/tz. + + """ + tmpdir = tempfile.mkdtemp() + zonedir = os.path.join(tmpdir, "zoneinfo") + moduledir = os.path.dirname(__file__) + try: + with tar_open(filename) as tf: + for name in zonegroups: + tf.extract(name, tmpdir) + filepaths = [os.path.join(tmpdir, n) for n in zonegroups] + try: + check_call(["zic", "-d", zonedir] + filepaths) + except OSError as e: + _print_on_nosuchfile(e) + raise + # write metadata file + with open(os.path.join(zonedir, METADATA_FN), 'w') as f: + json.dump(metadata, f, indent=4, sort_keys=True) + target = os.path.join(moduledir, ZONEFILENAME) + with tar_open(target, "w:%s" % format) as tf: + for entry in os.listdir(zonedir): + entrypath = os.path.join(zonedir, entry) + tf.add(entrypath, entry) + finally: + shutil.rmtree(tmpdir) + + +def _print_on_nosuchfile(e): + """Print helpful troubleshooting message + + e is an exception raised by subprocess.check_call() + + """ + if e.errno == 2: + logging.error( + "Could not find zic. Perhaps you need to install " + "libc-bin or some other package that provides it, " + "or it's not in your PATH?") diff --git a/python/defusedxml/ElementTree.py b/python/defusedxml/ElementTree.py new file mode 100644 index 0000000..41b2ea8 --- /dev/null +++ b/python/defusedxml/ElementTree.py @@ -0,0 +1,112 @@ +# defusedxml +# +# Copyright (c) 2013 by Christian Heimes +# Licensed to PSF under a Contributor Agreement. +# See http://www.python.org/psf/license for licensing details. +"""Defused xml.etree.ElementTree facade +""" +from __future__ import print_function, absolute_import + +import sys +from xml.etree.ElementTree import TreeBuilder as _TreeBuilder +from xml.etree.ElementTree import parse as _parse +from xml.etree.ElementTree import tostring + +from .common import PY3 + + +if PY3: + import importlib +else: + from xml.etree.ElementTree import XMLParser as _XMLParser + from xml.etree.ElementTree import iterparse as _iterparse + from xml.etree.ElementTree import ParseError + + +from .common import (DTDForbidden, EntitiesForbidden, + ExternalReferenceForbidden, _generate_etree_functions) + +__origin__ = "xml.etree.ElementTree" + + +def _get_py3_cls(): + """Python 3.3 hides the pure Python code but defusedxml requires it. + + The code is based on test.support.import_fresh_module(). + """ + pymodname = "xml.etree.ElementTree" + cmodname = "_elementtree" + + pymod = sys.modules.pop(pymodname, None) + cmod = sys.modules.pop(cmodname, None) + + sys.modules[cmodname] = None + pure_pymod = importlib.import_module(pymodname) + if cmod is not None: + sys.modules[cmodname] = cmod + else: + sys.modules.pop(cmodname) + sys.modules[pymodname] = pymod + + _XMLParser = pure_pymod.XMLParser + _iterparse = pure_pymod.iterparse + ParseError = pure_pymod.ParseError + + return _XMLParser, _iterparse, ParseError + + +if PY3: + _XMLParser, _iterparse, ParseError = _get_py3_cls() + + +class DefusedXMLParser(_XMLParser): + + def __init__(self, html=0, target=None, encoding=None, + forbid_dtd=False, forbid_entities=True, + forbid_external=True): + # Python 2.x old style class + _XMLParser.__init__(self, html, target, encoding) + self.forbid_dtd = forbid_dtd + self.forbid_entities = forbid_entities + self.forbid_external = forbid_external + if PY3: + parser = self.parser + else: + parser = self._parser + if self.forbid_dtd: + parser.StartDoctypeDeclHandler = self.defused_start_doctype_decl + if self.forbid_entities: + parser.EntityDeclHandler = self.defused_entity_decl + parser.UnparsedEntityDeclHandler = self.defused_unparsed_entity_decl + if self.forbid_external: + parser.ExternalEntityRefHandler = self.defused_external_entity_ref_handler + + def defused_start_doctype_decl(self, name, sysid, pubid, + has_internal_subset): + raise DTDForbidden(name, sysid, pubid) + + def defused_entity_decl(self, name, is_parameter_entity, value, base, + sysid, pubid, notation_name): + raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name) + + def defused_unparsed_entity_decl(self, name, base, sysid, pubid, + notation_name): + # expat 1.2 + raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name) + + def defused_external_entity_ref_handler(self, context, base, sysid, + pubid): + raise ExternalReferenceForbidden(context, base, sysid, pubid) + + +# aliases +XMLTreeBuilder = XMLParse = DefusedXMLParser + +parse, iterparse, fromstring = _generate_etree_functions(DefusedXMLParser, + _TreeBuilder, _parse, + _iterparse) +XML = fromstring + + +__all__ = ['XML', 'XMLParse', 'XMLTreeBuilder', 'fromstring', 'iterparse', + 'parse', 'tostring'] diff --git a/python/defusedxml/__init__.py b/python/defusedxml/__init__.py new file mode 100644 index 0000000..590a5a9 --- /dev/null +++ b/python/defusedxml/__init__.py @@ -0,0 +1,45 @@ +# defusedxml +# +# Copyright (c) 2013 by Christian Heimes +# Licensed to PSF under a Contributor Agreement. +# See http://www.python.org/psf/license for licensing details. +"""Defuse XML bomb denial of service vulnerabilities +""" +from __future__ import print_function, absolute_import + +from .common import (DefusedXmlException, DTDForbidden, EntitiesForbidden, + ExternalReferenceForbidden, NotSupportedError, + _apply_defusing) + + +def defuse_stdlib(): + """Monkey patch and defuse all stdlib packages + + :warning: The monkey patch is an EXPERIMETNAL feature. + """ + defused = {} + + from . import cElementTree + from . import ElementTree + from . import minidom + from . import pulldom + from . import sax + from . import expatbuilder + from . import expatreader + from . import xmlrpc + + xmlrpc.monkey_patch() + defused[xmlrpc] = None + + for defused_mod in [cElementTree, ElementTree, minidom, pulldom, sax, + expatbuilder, expatreader]: + stdlib_mod = _apply_defusing(defused_mod) + defused[defused_mod] = stdlib_mod + + return defused + + +__version__ = "0.5.0" + +__all__ = ['DefusedXmlException', 'DTDForbidden', 'EntitiesForbidden', + 'ExternalReferenceForbidden', 'NotSupportedError'] diff --git a/python/defusedxml/cElementTree.py b/python/defusedxml/cElementTree.py new file mode 100644 index 0000000..cc13689 --- /dev/null +++ b/python/defusedxml/cElementTree.py @@ -0,0 +1,30 @@ +# defusedxml +# +# Copyright (c) 2013 by Christian Heimes +# Licensed to PSF under a Contributor Agreement. +# See http://www.python.org/psf/license for licensing details. +"""Defused xml.etree.cElementTree +""" +from __future__ import absolute_import + +from xml.etree.cElementTree import TreeBuilder as _TreeBuilder +from xml.etree.cElementTree import parse as _parse +from xml.etree.cElementTree import tostring +# iterparse from ElementTree! +from xml.etree.ElementTree import iterparse as _iterparse + +from .ElementTree import DefusedXMLParser +from .common import _generate_etree_functions + +__origin__ = "xml.etree.cElementTree" + + +XMLTreeBuilder = XMLParse = DefusedXMLParser + +parse, iterparse, fromstring = _generate_etree_functions(DefusedXMLParser, + _TreeBuilder, _parse, + _iterparse) +XML = fromstring + +__all__ = ['XML', 'XMLParse', 'XMLTreeBuilder', 'fromstring', 'iterparse', + 'parse', 'tostring'] diff --git a/python/defusedxml/common.py b/python/defusedxml/common.py new file mode 100644 index 0000000..668b609 --- /dev/null +++ b/python/defusedxml/common.py @@ -0,0 +1,120 @@ +# defusedxml +# +# Copyright (c) 2013 by Christian Heimes +# Licensed to PSF under a Contributor Agreement. +# See http://www.python.org/psf/license for licensing details. +"""Common constants, exceptions and helpe functions +""" +import sys + +PY3 = sys.version_info[0] == 3 + + +class DefusedXmlException(ValueError): + """Base exception + """ + + def __repr__(self): + return str(self) + + +class DTDForbidden(DefusedXmlException): + """Document type definition is forbidden + """ + + def __init__(self, name, sysid, pubid): + super(DTDForbidden, self).__init__() + self.name = name + self.sysid = sysid + self.pubid = pubid + + def __str__(self): + tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})" + return tpl.format(self.name, self.sysid, self.pubid) + + +class EntitiesForbidden(DefusedXmlException): + """Entity definition is forbidden + """ + + def __init__(self, name, value, base, sysid, pubid, notation_name): + super(EntitiesForbidden, self).__init__() + self.name = name + self.value = value + self.base = base + self.sysid = sysid + self.pubid = pubid + self.notation_name = notation_name + + def __str__(self): + tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})" + return tpl.format(self.name, self.sysid, self.pubid) + + +class ExternalReferenceForbidden(DefusedXmlException): + """Resolving an external reference is forbidden + """ + + def __init__(self, context, base, sysid, pubid): + super(ExternalReferenceForbidden, self).__init__() + self.context = context + self.base = base + self.sysid = sysid + self.pubid = pubid + + def __str__(self): + tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})" + return tpl.format(self.sysid, self.pubid) + + +class NotSupportedError(DefusedXmlException): + """The operation is not supported + """ + + +def _apply_defusing(defused_mod): + assert defused_mod is sys.modules[defused_mod.__name__] + stdlib_name = defused_mod.__origin__ + __import__(stdlib_name, {}, {}, ["*"]) + stdlib_mod = sys.modules[stdlib_name] + stdlib_names = set(dir(stdlib_mod)) + for name, obj in vars(defused_mod).items(): + if name.startswith("_") or name not in stdlib_names: + continue + setattr(stdlib_mod, name, obj) + return stdlib_mod + + +def _generate_etree_functions(DefusedXMLParser, _TreeBuilder, + _parse, _iterparse): + """Factory for functions needed by etree, dependent on whether + cElementTree or ElementTree is used.""" + + def parse(source, parser=None, forbid_dtd=False, forbid_entities=True, + forbid_external=True): + if parser is None: + parser = DefusedXMLParser(target=_TreeBuilder(), + forbid_dtd=forbid_dtd, + forbid_entities=forbid_entities, + forbid_external=forbid_external) + return _parse(source, parser) + + def iterparse(source, events=None, parser=None, forbid_dtd=False, + forbid_entities=True, forbid_external=True): + if parser is None: + parser = DefusedXMLParser(target=_TreeBuilder(), + forbid_dtd=forbid_dtd, + forbid_entities=forbid_entities, + forbid_external=forbid_external) + return _iterparse(source, events, parser) + + def fromstring(text, forbid_dtd=False, forbid_entities=True, + forbid_external=True): + parser = DefusedXMLParser(target=_TreeBuilder(), + forbid_dtd=forbid_dtd, + forbid_entities=forbid_entities, + forbid_external=forbid_external) + parser.feed(text) + return parser.close() + + return parse, iterparse, fromstring diff --git a/python/defusedxml/expatbuilder.py b/python/defusedxml/expatbuilder.py new file mode 100644 index 0000000..0eb6b91 --- /dev/null +++ b/python/defusedxml/expatbuilder.py @@ -0,0 +1,110 @@ +# defusedxml +# +# Copyright (c) 2013 by Christian Heimes +# Licensed to PSF under a Contributor Agreement. +# See http://www.python.org/psf/license for licensing details. +"""Defused xml.dom.expatbuilder +""" +from __future__ import print_function, absolute_import + +from xml.dom.expatbuilder import ExpatBuilder as _ExpatBuilder +from xml.dom.expatbuilder import Namespaces as _Namespaces + +from .common import (DTDForbidden, EntitiesForbidden, + ExternalReferenceForbidden) + +__origin__ = "xml.dom.expatbuilder" + + +class DefusedExpatBuilder(_ExpatBuilder): + """Defused document builder""" + + def __init__(self, options=None, forbid_dtd=False, forbid_entities=True, + forbid_external=True): + _ExpatBuilder.__init__(self, options) + self.forbid_dtd = forbid_dtd + self.forbid_entities = forbid_entities + self.forbid_external = forbid_external + + def defused_start_doctype_decl(self, name, sysid, pubid, + has_internal_subset): + raise DTDForbidden(name, sysid, pubid) + + def defused_entity_decl(self, name, is_parameter_entity, value, base, + sysid, pubid, notation_name): + raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name) + + def defused_unparsed_entity_decl(self, name, base, sysid, pubid, + notation_name): + # expat 1.2 + raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name) + + def defused_external_entity_ref_handler(self, context, base, sysid, + pubid): + raise ExternalReferenceForbidden(context, base, sysid, pubid) + + def install(self, parser): + _ExpatBuilder.install(self, parser) + + if self.forbid_dtd: + parser.StartDoctypeDeclHandler = self.defused_start_doctype_decl + if self.forbid_entities: + # if self._options.entities: + parser.EntityDeclHandler = self.defused_entity_decl + parser.UnparsedEntityDeclHandler = self.defused_unparsed_entity_decl + if self.forbid_external: + parser.ExternalEntityRefHandler = self.defused_external_entity_ref_handler + + +class DefusedExpatBuilderNS(_Namespaces, DefusedExpatBuilder): + """Defused document builder that supports namespaces.""" + + def install(self, parser): + DefusedExpatBuilder.install(self, parser) + if self._options.namespace_declarations: + parser.StartNamespaceDeclHandler = ( + self.start_namespace_decl_handler) + + def reset(self): + DefusedExpatBuilder.reset(self) + self._initNamespaces() + + +def parse(file, namespaces=True, forbid_dtd=False, forbid_entities=True, + forbid_external=True): + """Parse a document, returning the resulting Document node. + + 'file' may be either a file name or an open file object. + """ + if namespaces: + build_builder = DefusedExpatBuilderNS + else: + build_builder = DefusedExpatBuilder + builder = build_builder(forbid_dtd=forbid_dtd, + forbid_entities=forbid_entities, + forbid_external=forbid_external) + + if isinstance(file, str): + fp = open(file, 'rb') + try: + result = builder.parseFile(fp) + finally: + fp.close() + else: + result = builder.parseFile(file) + return result + + +def parseString(string, namespaces=True, forbid_dtd=False, + forbid_entities=True, forbid_external=True): + """Parse a document from a string, returning the resulting + Document node. + """ + if namespaces: + build_builder = DefusedExpatBuilderNS + else: + build_builder = DefusedExpatBuilder + builder = build_builder(forbid_dtd=forbid_dtd, + forbid_entities=forbid_entities, + forbid_external=forbid_external) + return builder.parseString(string) diff --git a/python/defusedxml/expatreader.py b/python/defusedxml/expatreader.py new file mode 100644 index 0000000..ef6bc39 --- /dev/null +++ b/python/defusedxml/expatreader.py @@ -0,0 +1,59 @@ +# defusedxml +# +# Copyright (c) 2013 by Christian Heimes +# Licensed to PSF under a Contributor Agreement. +# See http://www.python.org/psf/license for licensing details. +"""Defused xml.sax.expatreader +""" +from __future__ import print_function, absolute_import + +from xml.sax.expatreader import ExpatParser as _ExpatParser + +from .common import (DTDForbidden, EntitiesForbidden, + ExternalReferenceForbidden) + +__origin__ = "xml.sax.expatreader" + + +class DefusedExpatParser(_ExpatParser): + """Defused SAX driver for the pyexpat C module.""" + + def __init__(self, namespaceHandling=0, bufsize=2 ** 16 - 20, + forbid_dtd=False, forbid_entities=True, + forbid_external=True): + _ExpatParser.__init__(self, namespaceHandling, bufsize) + self.forbid_dtd = forbid_dtd + self.forbid_entities = forbid_entities + self.forbid_external = forbid_external + + def defused_start_doctype_decl(self, name, sysid, pubid, + has_internal_subset): + raise DTDForbidden(name, sysid, pubid) + + def defused_entity_decl(self, name, is_parameter_entity, value, base, + sysid, pubid, notation_name): + raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name) + + def defused_unparsed_entity_decl(self, name, base, sysid, pubid, + notation_name): + # expat 1.2 + raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name) + + def defused_external_entity_ref_handler(self, context, base, sysid, + pubid): + raise ExternalReferenceForbidden(context, base, sysid, pubid) + + def reset(self): + _ExpatParser.reset(self) + parser = self._parser + if self.forbid_dtd: + parser.StartDoctypeDeclHandler = self.defused_start_doctype_decl + if self.forbid_entities: + parser.EntityDeclHandler = self.defused_entity_decl + parser.UnparsedEntityDeclHandler = self.defused_unparsed_entity_decl + if self.forbid_external: + parser.ExternalEntityRefHandler = self.defused_external_entity_ref_handler + + +def create_parser(*args, **kwargs): + return DefusedExpatParser(*args, **kwargs) diff --git a/python/defusedxml/lxml.py b/python/defusedxml/lxml.py new file mode 100644 index 0000000..7f3ee0b --- /dev/null +++ b/python/defusedxml/lxml.py @@ -0,0 +1,153 @@ +# defusedxml +# +# Copyright (c) 2013 by Christian Heimes +# Licensed to PSF under a Contributor Agreement. +# See http://www.python.org/psf/license for licensing details. +"""Example code for lxml.etree protection + +The code has NO protection against decompression bombs. +""" +from __future__ import print_function, absolute_import + +import threading +from lxml import etree as _etree + +from .common import DTDForbidden, EntitiesForbidden, NotSupportedError + +LXML3 = _etree.LXML_VERSION[0] >= 3 + +__origin__ = "lxml.etree" + +tostring = _etree.tostring + + +class RestrictedElement(_etree.ElementBase): + """A restricted Element class that filters out instances of some classes + """ + __slots__ = () + # blacklist = (etree._Entity, etree._ProcessingInstruction, etree._Comment) + blacklist = _etree._Entity + + def _filter(self, iterator): + blacklist = self.blacklist + for child in iterator: + if isinstance(child, blacklist): + continue + yield child + + def __iter__(self): + iterator = super(RestrictedElement, self).__iter__() + return self._filter(iterator) + + def iterchildren(self, tag=None, reversed=False): + iterator = super(RestrictedElement, self).iterchildren( + tag=tag, reversed=reversed) + return self._filter(iterator) + + def iter(self, tag=None, *tags): + iterator = super(RestrictedElement, self).iter(tag=tag, *tags) + return self._filter(iterator) + + def iterdescendants(self, tag=None, *tags): + iterator = super(RestrictedElement, + self).iterdescendants(tag=tag, *tags) + return self._filter(iterator) + + def itersiblings(self, tag=None, preceding=False): + iterator = super(RestrictedElement, self).itersiblings( + tag=tag, preceding=preceding) + return self._filter(iterator) + + def getchildren(self): + iterator = super(RestrictedElement, self).__iter__() + return list(self._filter(iterator)) + + def getiterator(self, tag=None): + iterator = super(RestrictedElement, self).getiterator(tag) + return self._filter(iterator) + + +class GlobalParserTLS(threading.local): + """Thread local context for custom parser instances + """ + parser_config = { + 'resolve_entities': False, + # 'remove_comments': True, + # 'remove_pis': True, + } + + element_class = RestrictedElement + + def createDefaultParser(self): + parser = _etree.XMLParser(**self.parser_config) + element_class = self.element_class + if self.element_class is not None: + lookup = _etree.ElementDefaultClassLookup(element=element_class) + parser.set_element_class_lookup(lookup) + return parser + + def setDefaultParser(self, parser): + self._default_parser = parser + + def getDefaultParser(self): + parser = getattr(self, "_default_parser", None) + if parser is None: + parser = self.createDefaultParser() + self.setDefaultParser(parser) + return parser + + +_parser_tls = GlobalParserTLS() +getDefaultParser = _parser_tls.getDefaultParser + + +def check_docinfo(elementtree, forbid_dtd=False, forbid_entities=True): + """Check docinfo of an element tree for DTD and entity declarations + + The check for entity declarations needs lxml 3 or newer. lxml 2.x does + not support dtd.iterentities(). + """ + docinfo = elementtree.docinfo + if docinfo.doctype: + if forbid_dtd: + raise DTDForbidden(docinfo.doctype, + docinfo.system_url, + docinfo.public_id) + if forbid_entities and not LXML3: + # lxml < 3 has no iterentities() + raise NotSupportedError("Unable to check for entity declarations " + "in lxml 2.x") + + if forbid_entities: + for dtd in docinfo.internalDTD, docinfo.externalDTD: + if dtd is None: + continue + for entity in dtd.iterentities(): + raise EntitiesForbidden(entity.name, entity.content, None, + None, None, None) + + +def parse(source, parser=None, base_url=None, forbid_dtd=False, + forbid_entities=True): + if parser is None: + parser = getDefaultParser() + elementtree = _etree.parse(source, parser, base_url=base_url) + check_docinfo(elementtree, forbid_dtd, forbid_entities) + return elementtree + + +def fromstring(text, parser=None, base_url=None, forbid_dtd=False, + forbid_entities=True): + if parser is None: + parser = getDefaultParser() + rootelement = _etree.fromstring(text, parser, base_url=base_url) + elementtree = rootelement.getroottree() + check_docinfo(elementtree, forbid_dtd, forbid_entities) + return rootelement + + +XML = fromstring + + +def iterparse(*args, **kwargs): + raise NotSupportedError("defused lxml.etree.iterparse not available") diff --git a/python/defusedxml/minidom.py b/python/defusedxml/minidom.py new file mode 100644 index 0000000..0fd8684 --- /dev/null +++ b/python/defusedxml/minidom.py @@ -0,0 +1,42 @@ +# defusedxml +# +# Copyright (c) 2013 by Christian Heimes +# Licensed to PSF under a Contributor Agreement. +# See http://www.python.org/psf/license for licensing details. +"""Defused xml.dom.minidom +""" +from __future__ import print_function, absolute_import + +from xml.dom.minidom import _do_pulldom_parse +from . import expatbuilder as _expatbuilder +from . import pulldom as _pulldom + +__origin__ = "xml.dom.minidom" + + +def parse(file, parser=None, bufsize=None, forbid_dtd=False, + forbid_entities=True, forbid_external=True): + """Parse a file into a DOM by filename or file object.""" + if parser is None and not bufsize: + return _expatbuilder.parse(file, forbid_dtd=forbid_dtd, + forbid_entities=forbid_entities, + forbid_external=forbid_external) + else: + return _do_pulldom_parse(_pulldom.parse, (file,), + {'parser': parser, 'bufsize': bufsize, + 'forbid_dtd': forbid_dtd, 'forbid_entities': forbid_entities, + 'forbid_external': forbid_external}) + + +def parseString(string, parser=None, forbid_dtd=False, + forbid_entities=True, forbid_external=True): + """Parse a file into a DOM from a string.""" + if parser is None: + return _expatbuilder.parseString(string, forbid_dtd=forbid_dtd, + forbid_entities=forbid_entities, + forbid_external=forbid_external) + else: + return _do_pulldom_parse(_pulldom.parseString, (string,), + {'parser': parser, 'forbid_dtd': forbid_dtd, + 'forbid_entities': forbid_entities, + 'forbid_external': forbid_external}) diff --git a/python/defusedxml/pulldom.py b/python/defusedxml/pulldom.py new file mode 100644 index 0000000..fc9e466 --- /dev/null +++ b/python/defusedxml/pulldom.py @@ -0,0 +1,34 @@ +# defusedxml +# +# Copyright (c) 2013 by Christian Heimes +# Licensed to PSF under a Contributor Agreement. +# See http://www.python.org/psf/license for licensing details. +"""Defused xml.dom.pulldom +""" +from __future__ import print_function, absolute_import + +from xml.dom.pulldom import parse as _parse +from xml.dom.pulldom import parseString as _parseString +from .sax import make_parser + +__origin__ = "xml.dom.pulldom" + + +def parse(stream_or_string, parser=None, bufsize=None, forbid_dtd=False, + forbid_entities=True, forbid_external=True): + if parser is None: + parser = make_parser() + parser.forbid_dtd = forbid_dtd + parser.forbid_entities = forbid_entities + parser.forbid_external = forbid_external + return _parse(stream_or_string, parser, bufsize) + + +def parseString(string, parser=None, forbid_dtd=False, + forbid_entities=True, forbid_external=True): + if parser is None: + parser = make_parser() + parser.forbid_dtd = forbid_dtd + parser.forbid_entities = forbid_entities + parser.forbid_external = forbid_external + return _parseString(string, parser) diff --git a/python/defusedxml/sax.py b/python/defusedxml/sax.py new file mode 100644 index 0000000..534d0ca --- /dev/null +++ b/python/defusedxml/sax.py @@ -0,0 +1,49 @@ +# defusedxml +# +# Copyright (c) 2013 by Christian Heimes +# Licensed to PSF under a Contributor Agreement. +# See http://www.python.org/psf/license for licensing details. +"""Defused xml.sax +""" +from __future__ import print_function, absolute_import + +from xml.sax import InputSource as _InputSource +from xml.sax import ErrorHandler as _ErrorHandler + +from . import expatreader + +__origin__ = "xml.sax" + + +def parse(source, handler, errorHandler=_ErrorHandler(), forbid_dtd=False, + forbid_entities=True, forbid_external=True): + parser = make_parser() + parser.setContentHandler(handler) + parser.setErrorHandler(errorHandler) + parser.forbid_dtd = forbid_dtd + parser.forbid_entities = forbid_entities + parser.forbid_external = forbid_external + parser.parse(source) + + +def parseString(string, handler, errorHandler=_ErrorHandler(), + forbid_dtd=False, forbid_entities=True, + forbid_external=True): + from io import BytesIO + + if errorHandler is None: + errorHandler = _ErrorHandler() + parser = make_parser() + parser.setContentHandler(handler) + parser.setErrorHandler(errorHandler) + parser.forbid_dtd = forbid_dtd + parser.forbid_entities = forbid_entities + parser.forbid_external = forbid_external + + inpsrc = _InputSource() + inpsrc.setByteStream(BytesIO(string)) + parser.parse(inpsrc) + + +def make_parser(parser_list=[]): + return expatreader.create_parser() diff --git a/python/defusedxml/xmlrpc.py b/python/defusedxml/xmlrpc.py new file mode 100644 index 0000000..2a456e6 --- /dev/null +++ b/python/defusedxml/xmlrpc.py @@ -0,0 +1,157 @@ +# defusedxml +# +# Copyright (c) 2013 by Christian Heimes +# Licensed to PSF under a Contributor Agreement. +# See http://www.python.org/psf/license for licensing details. +"""Defused xmlrpclib + +Also defuses gzip bomb +""" +from __future__ import print_function, absolute_import + +import io + +from .common import ( + DTDForbidden, EntitiesForbidden, ExternalReferenceForbidden, PY3) + +if PY3: + __origin__ = "xmlrpc.client" + from xmlrpc.client import ExpatParser + from xmlrpc import client as xmlrpc_client + from xmlrpc import server as xmlrpc_server + from xmlrpc.client import gzip_decode as _orig_gzip_decode + from xmlrpc.client import GzipDecodedResponse as _OrigGzipDecodedResponse +else: + __origin__ = "xmlrpclib" + from xmlrpclib import ExpatParser + import xmlrpclib as xmlrpc_client + xmlrpc_server = None + from xmlrpclib import gzip_decode as _orig_gzip_decode + from xmlrpclib import GzipDecodedResponse as _OrigGzipDecodedResponse + +try: + import gzip +except ImportError: + gzip = None + + +# Limit maximum request size to prevent resource exhaustion DoS +# Also used to limit maximum amount of gzip decoded data in order to prevent +# decompression bombs +# A value of -1 or smaller disables the limit +MAX_DATA = 30 * 1024 * 1024 # 30 MB + + +def defused_gzip_decode(data, limit=None): + """gzip encoded data -> unencoded data + + Decode data using the gzip content encoding as described in RFC 1952 + """ + if not gzip: + raise NotImplementedError + if limit is None: + limit = MAX_DATA + f = io.BytesIO(data) + gzf = gzip.GzipFile(mode="rb", fileobj=f) + try: + if limit < 0: # no limit + decoded = gzf.read() + else: + decoded = gzf.read(limit + 1) + except IOError: + raise ValueError("invalid data") + f.close() + gzf.close() + if limit >= 0 and len(decoded) > limit: + raise ValueError("max gzipped payload length exceeded") + return decoded + + +class DefusedGzipDecodedResponse(gzip.GzipFile if gzip else object): + """a file-like object to decode a response encoded with the gzip + method, as described in RFC 1952. + """ + + def __init__(self, response, limit=None): + # response doesn't support tell() and read(), required by + # GzipFile + if not gzip: + raise NotImplementedError + self.limit = limit = limit if limit is not None else MAX_DATA + if limit < 0: # no limit + data = response.read() + self.readlength = None + else: + data = response.read(limit + 1) + self.readlength = 0 + if limit >= 0 and len(data) > limit: + raise ValueError("max payload length exceeded") + self.stringio = io.BytesIO(data) + gzip.GzipFile.__init__(self, mode="rb", fileobj=self.stringio) + + def read(self, n): + if self.limit >= 0: + left = self.limit - self.readlength + n = min(n, left + 1) + data = gzip.GzipFile.read(self, n) + self.readlength += len(data) + if self.readlength > self.limit: + raise ValueError("max payload length exceeded") + return data + else: + return gzip.GzipFile.read(self, n) + + def close(self): + gzip.GzipFile.close(self) + self.stringio.close() + + +class DefusedExpatParser(ExpatParser): + + def __init__(self, target, forbid_dtd=False, forbid_entities=True, + forbid_external=True): + ExpatParser.__init__(self, target) + self.forbid_dtd = forbid_dtd + self.forbid_entities = forbid_entities + self.forbid_external = forbid_external + parser = self._parser + if self.forbid_dtd: + parser.StartDoctypeDeclHandler = self.defused_start_doctype_decl + if self.forbid_entities: + parser.EntityDeclHandler = self.defused_entity_decl + parser.UnparsedEntityDeclHandler = self.defused_unparsed_entity_decl + if self.forbid_external: + parser.ExternalEntityRefHandler = self.defused_external_entity_ref_handler + + def defused_start_doctype_decl(self, name, sysid, pubid, + has_internal_subset): + raise DTDForbidden(name, sysid, pubid) + + def defused_entity_decl(self, name, is_parameter_entity, value, base, + sysid, pubid, notation_name): + raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name) + + def defused_unparsed_entity_decl(self, name, base, sysid, pubid, + notation_name): + # expat 1.2 + raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name) + + def defused_external_entity_ref_handler(self, context, base, sysid, + pubid): + raise ExternalReferenceForbidden(context, base, sysid, pubid) + + +def monkey_patch(): + xmlrpc_client.FastParser = DefusedExpatParser + xmlrpc_client.GzipDecodedResponse = DefusedGzipDecodedResponse + xmlrpc_client.gzip_decode = defused_gzip_decode + if xmlrpc_server: + xmlrpc_server.gzip_decode = defused_gzip_decode + + +def unmonkey_patch(): + xmlrpc_client.FastParser = None + xmlrpc_client.GzipDecodedResponse = _OrigGzipDecodedResponse + xmlrpc_client.gzip_decode = _orig_gzip_decode + if xmlrpc_server: + xmlrpc_server.gzip_decode = _orig_gzip_decode diff --git a/python/six.py b/python/six.py new file mode 100644 index 0000000..6bf4fd3 --- /dev/null +++ b/python/six.py @@ -0,0 +1,891 @@ +# Copyright (c) 2010-2017 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +"""Utilities for writing code that runs on Python 2 and 3""" + +from __future__ import absolute_import + +import functools +import itertools +import operator +import sys +import types + +__author__ = "Benjamin Peterson " +__version__ = "1.11.0" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("getoutput", "commands", "subprocess"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), +] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("splitvalue", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), + MovedAttribute("parse_http_list", "urllib2", "urllib.request"), + MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + def create_unbound_method(func, cls): + return func + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) + + viewkeys = operator.methodcaller("keys") + + viewvalues = operator.methodcaller("values") + + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + + def u(s): + return s + unichr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" +else: + def b(s): + return s + # Workaround for standalone backslash + + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + + def byte2int(bs): + return ord(bs[0]) + + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) + import StringIO + StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + def reraise(tp, value, tb=None): + try: + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + finally: + value = None + tb = None + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + exec_("""def reraise(tp, value, tb=None): + try: + raise tp, value, tb + finally: + tb = None +""") + + +if sys.version_info[:2] == (3, 2): + exec_("""def raise_from(value, from_value): + try: + if from_value is None: + raise value + raise value from from_value + finally: + value = None +""") +elif sys.version_info[:2] > (3, 2): + exec_("""def raise_from(value, from_value): + try: + raise value from from_value + finally: + value = None +""") +else: + def raise_from(value, from_value): + raise value + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(type): + + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + + @classmethod + def __prepare__(cls, name, this_bases): + return meta.__prepare__(name, bases) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def python_2_unicode_compatible(klass): + """ + A decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/youtube/channel.py b/youtube/channel.py index 9577525..c83d7d1 100644 --- a/youtube/channel.py +++ b/youtube/channel.py @@ -248,6 +248,7 @@ def channel_videos_html(polymer_json, current_page=1, current_sort=3, number_of_ return yt_channel_items_template.substitute( header = common.get_header(), channel_title = microformat['title'], + channel_id = channel_id, channel_tabs = channel_tabs_html(channel_id, 'Videos'), sort_buttons = channel_sort_buttons_html(channel_id, 'videos', current_sort), avatar = '/' + microformat['thumbnail']['thumbnails'][0]['url'], @@ -269,6 +270,7 @@ def channel_playlists_html(polymer_json, current_sort=3): return yt_channel_items_template.substitute( header = common.get_header(), channel_title = microformat['title'], + channel_id = channel_id, channel_tabs = channel_tabs_html(channel_id, 'Playlists'), sort_buttons = channel_sort_buttons_html(channel_id, 'playlists', current_sort), avatar = '/' + microformat['thumbnail']['thumbnails'][0]['url'], @@ -333,6 +335,7 @@ def channel_about_page(polymer_json): description = description, links = channel_links, stats = stats, + channel_id = channel_metadata['channelId'], channel_tabs = channel_tabs_html(channel_metadata['channelId'], 'About'), ) @@ -353,6 +356,7 @@ def channel_search_page(polymer_json, query, current_page=1, number_of_videos = return yt_channel_items_template.substitute( header = common.get_header(), channel_title = html.escape(microformat['title']), + channel_id = channel_id, channel_tabs = channel_tabs_html(channel_id, '', query), avatar = '/' + microformat['thumbnail']['thumbnails'][0]['url'], page_title = html.escape(query + ' - Channel search'), diff --git a/youtube/subscriptions.py b/youtube/subscriptions.py index 82916dd..ff7d0df 100644 --- a/youtube/subscriptions.py +++ b/youtube/subscriptions.py @@ -5,6 +5,10 @@ import sqlite3 import os import secrets import datetime +import itertools +import time +import urllib +import socks, sockshandler # so as to not completely break on people who have updated but don't know of new dependency try: @@ -51,11 +55,16 @@ def open_database(): return connection -def _subscribe(channel_id, channel_name): +def _subscribe(channels): + ''' channels is a list of (channel_id, channel_name) ''' + + # set time_last_checked to 0 on all channels being subscribed to + channels = ( (channel_id, channel_name, 0) for channel_id, channel_name in channels) + connection = open_database() try: cursor = connection.cursor() - cursor.execute("INSERT INTO subscribed_channels (channel_id, name) VALUES (?, ?)", (channel_id, channel_name)) + cursor.executemany("INSERT INTO subscribed_channels (channel_id, channel_name, time_last_checked) VALUES (?, ?, ?)", channels) connection.commit() except: connection.rollback() @@ -63,11 +72,12 @@ def _subscribe(channel_id, channel_name): finally: connection.close() -def _unsubscribe(channel_id): +def _unsubscribe(channel_ids): + ''' channel_ids is a list of channel_ids ''' connection = open_database() try: cursor = connection.cursor() - cursor.execute("DELETE FROM subscribed_channels WHERE channel_id=?", (channel_id, )) + cursor.executemany("DELETE FROM subscribed_channels WHERE channel_id=?", ((channel_id, ) for channel_id in channel_ids)) connection.commit() except: connection.rollback() @@ -125,12 +135,14 @@ def youtube_timestamp_to_posix(dumb_timestamp): weekdays = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun') months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec') -def _get_upstream_videos(channel_id, channel_name, time_last_checked): +def _get_upstream_videos(channel_id, time_last_checked): feed_url = "https://www.youtube.com/feeds/videos.xml?channel_id=" + channel_id headers = {} # randomly change time_last_checked up to one day earlier to make tracking harder time_last_checked = time_last_checked - secrets.randbelow(24*3600) + if time_last_checked < 0: # happens when time_last_checked is initialized to 0 when checking for first time + time_last_checked = 0 # If-Modified-Since header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-Modified-Since struct_time = time.gmtime(time_last_checked) @@ -142,7 +154,7 @@ def _get_upstream_videos(channel_id, channel_name, time_last_checked): headers['User-Agent'] = 'Python-urllib' # Don't leak python version headers['Accept-Encoding'] = 'gzip, br' - req = urllib.request.Request(url, headers=headers) + req = urllib.request.Request(feed_url, headers=headers) if settings.route_tor: opener = urllib.request.build_opener(sockshandler.SocksiPyHandler(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9150)) else: @@ -165,13 +177,10 @@ def _get_upstream_videos(channel_id, channel_name, time_last_checked): for entry in feed.entries: video_id = entry.id_[9:] # example of id_: yt:video:q6EoRBvdVPQ - # standard names used in this program for purposes of html templating atom_videos[video_id] = { 'title': entry.title.value, - 'author': entry.authors[0].name, #'description': '', # Not supported by atoma #'duration': '', # Youtube's atom feeds don't provide it.. very frustrating - 'published': entry.published.strftime('%m/%d/%Y'), 'time_published': int(entry.published.timestamp()), } @@ -182,12 +191,13 @@ def _get_upstream_videos(channel_id, channel_name, time_last_checked): # Now check channel page to retrieve missing information for videos json_channel_videos = channel.get_grid_items(channel.get_channel_tab(channel_id)[1]['response']) for json_video in json_channel_videos: - info = renderer_info(json_video) + info = common.renderer_info(json_video['gridVideoRenderer']) + if 'description' not in info: + info['description'] = '' if info['id'] in atom_videos: info.update(atom_videos[info['id']]) else: - info['author'] = channel_name - info['time published'] = youtube_timestamp_to_posix(info['published']) + info['time_published'] = youtube_timestamp_to_posix(info['published']) videos.append(info) return videos @@ -195,7 +205,7 @@ def get_subscriptions_page(env, start_response): items_html = '''''' start_response('200 OK', [('Content-type','text/html'),]) @@ -205,3 +215,38 @@ def get_subscriptions_page(env, start_response): page_buttons = '', ).encode('utf-8') +def post_subscriptions_page(env, start_response): + params = env['parameters'] + action = params['action'][0] + if action == 'subscribe': + if len(params['channel_id']) != len(params['channel_name']): + start_response('400 Bad Request', ()) + return b'400 Bad Request, length of channel_id != length of channel_name' + _subscribe(zip(params['channel_id'], params['channel_name'])) + + elif action == 'unsubscribe': + _unsubscribe(params['channel_id']) + + elif action == 'refresh': + connection = open_database() + try: + cursor = connection.cursor() + for uploader_id, channel_id, time_last_checked in cursor.execute('''SELECT id, channel_id, time_last_checked FROM subscribed_channels'''): + db_videos = ( (uploader_id, info['id'], info['title'], info['duration'], info['time_published'], info['description']) for info in _get_upstream_videos(channel_id, time_last_checked) ) + cursor.executemany('''INSERT INTO videos (uploader_id, video_id, title, duration, time_published, description) VALUES (?, ?, ?, ?, ?, ?)''', db_videos) + + cursor.execute('''UPDATE subscribed_channels SET time_last_checked = ?''', ( int(time.time()), ) ) + connection.commit() + except: + connection.rollback() + raise + finally: + connection.close() + + start_response('303 See Other', [('Location', common.URL_ORIGIN + '/subscriptions'),] ) + return b'' + else: + start_response('400 Bad Request', ()) + return b'400 Bad Request' + start_response('204 No Content', ()) + return b'' diff --git a/youtube/youtube.py b/youtube/youtube.py index ad73a6e..288f68b 100644 --- a/youtube/youtube.py +++ b/youtube/youtube.py @@ -35,6 +35,8 @@ post_handlers = { 'comments': post_comment.post_comment, 'post_comment': post_comment.post_comment, 'delete_comment': post_comment.delete_comment, + + 'subscriptions': subscriptions.post_subscriptions_page, } def youtube(env, start_response): diff --git a/yt_channel_about_template.html b/yt_channel_about_template.html index 221b838..6ed7a03 100644 --- a/yt_channel_about_template.html +++ b/yt_channel_about_template.html @@ -18,12 +18,16 @@ height:200px; width:200px; } - main .title{ + .metadata{ grid-row:1; - grid-column:2; + grid-column:2; + margin-left: 10px; + display:grid; + align-content: start; + grid-row-gap:10px; } + main .channel-tabs{ - grid-row:2; grid-column: 1 / span 2; display:grid; @@ -34,7 +38,6 @@ padding: 3px; } main .channel-info{ - grid-row: 3; grid-column: 1 / span 3; } .tab{ @@ -51,7 +54,15 @@ $header
-

$channel_title

+ diff --git a/yt_channel_items_template.html b/yt_channel_items_template.html index 1a8551d..93c4b0a 100644 --- a/yt_channel_items_template.html +++ b/yt_channel_items_template.html @@ -18,12 +18,15 @@ height:200px; width:200px; } - main .title{ + .metadata{ grid-row:1; - grid-column:2; + grid-column:2; + margin-left: 10px; + display:grid; + align-content: start; + grid-row-gap:10px; } main .channel-tabs{ - grid-row:2; grid-column: 1 / span 2; display:grid; @@ -48,7 +51,6 @@ font-weight:bold; } .item-grid{ - grid-row:4; grid-column: 1 / span 2; } .item-list{ @@ -68,7 +70,15 @@ $header
-

$channel_title

+ diff --git a/yt_subscriptions_template.html b/yt_subscriptions_template.html index 8477d25..6395b6c 100644 --- a/yt_subscriptions_template.html +++ b/yt_subscriptions_template.html @@ -15,6 +15,10 @@ $header
+
+ + +
$items