From f69969f1e0483b885c9ff4af3e1377b144ab4da0 Mon Sep 17 00:00:00 2001 From: Luke Sneeringer Date: Mon, 3 Nov 2014 14:50:01 -0600 Subject: [PATCH] Add vendored Redis. --- awx/lib/site-packages/redis/CHANGES | 426 +++ awx/lib/site-packages/redis/INSTALL | 6 + awx/lib/site-packages/redis/LICENSE | 22 + awx/lib/site-packages/redis/MANIFEST.in | 7 + awx/lib/site-packages/redis/PKG-INFO | 696 +++++ awx/lib/site-packages/redis/README.rst | 673 +++++ awx/lib/site-packages/redis/redis/__init__.py | 34 + awx/lib/site-packages/redis/redis/_compat.py | 79 + awx/lib/site-packages/redis/redis/client.py | 2651 +++++++++++++++++ .../site-packages/redis/redis/connection.py | 1017 +++++++ .../site-packages/redis/redis/exceptions.py | 71 + awx/lib/site-packages/redis/redis/lock.py | 272 ++ awx/lib/site-packages/redis/redis/sentinel.py | 294 ++ awx/lib/site-packages/redis/redis/utils.py | 33 + awx/lib/site-packages/redis/setup.cfg | 5 + awx/lib/site-packages/redis/setup.py | 61 + awx/lib/site-packages/redis/tests/__init__.py | 0 awx/lib/site-packages/redis/tests/conftest.py | 46 + .../redis/tests/test_commands.py | 1441 +++++++++ .../redis/tests/test_connection_pool.py | 402 +++ .../redis/tests/test_encoding.py | 33 + .../site-packages/redis/tests/test_lock.py | 167 ++ .../redis/tests/test_pipeline.py | 226 ++ .../site-packages/redis/tests/test_pubsub.py | 392 +++ .../redis/tests/test_scripting.py | 82 + .../redis/tests/test_sentinel.py | 173 ++ requirements/dev.txt | 1 + requirements/dev_local.txt | 1 + requirements/prod.txt | 1 + requirements/prod_local.txt | 1 + requirements/redis-2.10.3.tar.gz | Bin 0 -> 86532 bytes 31 files changed, 9313 insertions(+) create mode 100644 awx/lib/site-packages/redis/CHANGES create mode 100644 awx/lib/site-packages/redis/INSTALL create mode 100644 awx/lib/site-packages/redis/LICENSE create mode 100644 awx/lib/site-packages/redis/MANIFEST.in create mode 100644 awx/lib/site-packages/redis/PKG-INFO create mode 100644 awx/lib/site-packages/redis/README.rst create mode 100644 awx/lib/site-packages/redis/redis/__init__.py create mode 100644 awx/lib/site-packages/redis/redis/_compat.py create mode 100755 awx/lib/site-packages/redis/redis/client.py create mode 100755 awx/lib/site-packages/redis/redis/connection.py create mode 100644 awx/lib/site-packages/redis/redis/exceptions.py create mode 100644 awx/lib/site-packages/redis/redis/lock.py create mode 100644 awx/lib/site-packages/redis/redis/sentinel.py create mode 100644 awx/lib/site-packages/redis/redis/utils.py create mode 100644 awx/lib/site-packages/redis/setup.cfg create mode 100644 awx/lib/site-packages/redis/setup.py create mode 100644 awx/lib/site-packages/redis/tests/__init__.py create mode 100644 awx/lib/site-packages/redis/tests/conftest.py create mode 100644 awx/lib/site-packages/redis/tests/test_commands.py create mode 100644 awx/lib/site-packages/redis/tests/test_connection_pool.py create mode 100644 awx/lib/site-packages/redis/tests/test_encoding.py create mode 100644 awx/lib/site-packages/redis/tests/test_lock.py create mode 100644 awx/lib/site-packages/redis/tests/test_pipeline.py create mode 100644 awx/lib/site-packages/redis/tests/test_pubsub.py create mode 100644 awx/lib/site-packages/redis/tests/test_scripting.py create mode 100644 awx/lib/site-packages/redis/tests/test_sentinel.py create mode 100644 requirements/redis-2.10.3.tar.gz diff --git a/awx/lib/site-packages/redis/CHANGES b/awx/lib/site-packages/redis/CHANGES new file mode 100644 index 0000000000..c455f05a94 --- /dev/null +++ b/awx/lib/site-packages/redis/CHANGES @@ -0,0 +1,426 @@ +* 2.10.3 + * Fixed a bug with the bytearray support introduced in 2.10.2. Thanks + Josh Owen. +* 2.10.2 + * Added support for Hiredis's new bytearray support. Thanks + https://github.com/tzickel + * POSSIBLE BACKWARDS INCOMPATBLE CHANGE: Fixed a possible race condition + when multiple threads share the same Lock instance with a timeout. Lock + tokens are now stored in thread local storage by default. If you have + code that acquires a lock in one thread and passes that lock instance to + another thread to release it, you need to disable thread local storage. + Refer to the doc strings on the Lock class about the thread_local + argument information. + * Fixed a regression in from_url where "charset" and "errors" weren't + valid options. "encoding" and "encoding_errors" are still accepted + and preferred. + * The "charset" and "errors" options have been deprecated. Passing + either to StrictRedis.__init__ or from_url will still work but will + also emit a DeprecationWarning. Instead use the "encoding" and + "encoding_errors" options. + * Fixed a compatability bug with Python 3 when the server closes a + connection. + * Added BITPOS command. Thanks https://github.com/jettify. + * Fixed a bug when attempting to send large values to Redis in a Pipeline. +* 2.10.1 + * Fixed a bug where Sentinel connections to a server that's no longer a + master and receives a READONLY error will disconnect and reconnect to + the master. +* 2.10.0 + * Discontinuted support for Python 2.5. Upgrade. You'll be happier. + * The HiRedis parser will now properly raise ConnectionErrors. + * Completely refactored PubSub support. Fixes all known PubSub bugs and + adds a bunch of new features. Docs can be found in the README under the + new "Publish / Subscribe" section. + * Added the new HyperLogLog commanads (PFADD, PFCOUNT, PFMERGE). Thanks + Pepijn de Vos and Vincent Ohprecio. + * Updated TTL and PTTL commands with Redis 2.8+ semantics. Thanks Markus + Kaiserswerth. + * *SCAN commands now return a long (int on Python3) cursor value rather + than the string representation. This might be slightly backwards + incompatible in code using *SCAN commands loops such as + "while cursor != '0':". + * Added extra *SCAN commands that return iterators instead of the normal + [cursor, data] type. Use scan_iter, hscan_iter, sscan_iter, and + zscan_iter for iterators. Thanks Mathieu Longtin. + * Added support for SLOWLOG commands. Thanks Rick van Hattem. + * Added lexicographical commands ZRANGEBYLEX, ZREMRANGEBYLEX, and ZLEXCOUNT + for sorted sets. + * Connection objects now support an optional argument, socket_read_size, + indicating how much data to read during each socket.recv() call. After + benchmarking, increased the default size to 64k, which dramatically + improves performance when fetching large values, such as many results + in a pipeline or a large (>1MB) string value. + * Improved the pack_command and send_packed_command functions to increase + performance when sending large (>1MB) values. + * Sentinel Connections to master servers now detect when a READONLY error + is encountered and disconnect themselves and all other active connections + to the same master so that the new master can be discovered. + * Fixed Sentinel state parsing on Python 3. + * Added support for SENTINEL MONITOR, SENTINEL REMOVE, and SENTINEL SET + commands. Thanks Greg Murphy. + * INFO ouput that doesn't follow the "key:value" format will now be + appended to a key named "__raw__" in the INFO dictionary. Thanks Pedro + Larroy. + * The "vagrant" directory contains a complete vagrant environment for + redis-py developers. The environment runs a Redis master, a Redis slave, + and 3 Sentinels. Future iterations of the test sutie will incorporate + more integration style tests, ensuring things like failover happen + correctly. + * It's now possible to create connection pool instances from a URL. + StrictRedis.from_url() now uses this feature to create a connection pool + instance and use that when creating a new client instance. Thanks + https://github.com/chillipino + * When creating client instances or connection pool instances from an URL, + it's now possible to pass additional options to the connection pool with + querystring arguments. + * Fixed a bug where some encodings (like utf-16) were unusable on Python 3 + as command names and literals would get encoded. + * Added an SSLConnection class that allows for secure connections through + stunnel or other means. Construct and SSL connection with the sll=True + option on client classes, using the rediss:// scheme from an URL, or + by passing the SSLConnection class to a connection pool's + connection_class argument. Thanks https://github.com/oranagra. + * Added a socket_connect_timeout option to control how long to wait while + establishing a TCP connection before timing out. This lets the client + fail fast when attempting to connect to a downed server while keeping + a more lenient timeout for all other socket operations. + * Added TCP Keep-alive support by passing use the socket_keepalive=True + option. Finer grain control can be achieved using the + socket_keepalive_options option which expects a dictionary with any of + the keys (socket.TCP_KEEPIDLE, socket.TCP_KEEPCNT, socket.TCP_KEEPINTVL) + and integers for values. Thanks Yossi Gottlieb. + * Added a `retry_on_timeout` option that controls how socket.timeout errors + are handled. By default it is set to False and will cause the client to + raise a TimeoutError anytime a socket.timeout is encountered. If + `retry_on_timeout` is set to True, the client will retry a command that + timed out once like other `socket.error`s. + * Completely refactored the Lock system. There is now a LuaLock class + that's used when the Redis server is capable of running Lua scripts along + with a fallback class for Redis servers < 2.6. The new locks fix several + subtle race consider that the old lock could face. In additional, a + new method, "extend" is available on lock instances that all a lock + owner to extend the amount of time they have the lock for. Thanks to + Eli Finkelshteyn and https://github.com/chillipino for contributions. +* 2.9.1 + * IPv6 support. Thanks https://github.com/amashinchi +* 2.9.0 + * Performance improvement for packing commands when using the PythonParser. + Thanks Guillaume Viot. + * Executing an empty pipeline transaction no longer sends MULTI/EXEC to + the server. Thanks EliFinkelshteyn. + * Errors when authenticating (incorrect password) and selecting a database + now close the socket. + * Full Sentinel support thanks to Vitja Makarov. Thanks! + * Better repr support for client and connection pool instances. Thanks + Mark Roberts. + * Error messages that the server sends to the client are now included + in the client error message. Thanks Sangjin Lim. + * Added the SCAN, SSCAN, HSCAN, and ZSCAN commands. Thanks Jingchao Hu. + * ResponseErrors generated by pipeline execution provide addition context + including the position of the command in the pipeline and the actual + command text generated the error. + * ConnectionPools now play nicer in threaded environments that fork. Thanks + Christian Joergensen. +* 2.8.0 + * redis-py should play better with gevent when a gevent Timeout is raised. + Thanks leifkb. + * Added SENTINEL command. Thanks Anna Janackova. + * Fixed a bug where pipelines could potentially corrupt a connection + if the MULTI command generated a ResponseError. Thanks EliFinkelshteyn + for the report. + * Connections now call socket.shutdown() prior to socket.close() to + ensure communication ends immediately per the note at + http://docs.python.org/2/library/socket.html#socket.socket.close + Thanks to David Martin for pointing this out. + * Lock checks are now based on floats rather than ints. Thanks + Vitja Makarov. +* 2.7.6 + * Added CONFIG RESETSTAT command. Thanks Yossi Gottlieb. + * Fixed a bug introduced in 2.7.3 that caused issues with script objects + and pipelines. Thanks Carpentier Pierre-Francois. + * Converted redis-py's test suite to use the awesome py.test library. + * Fixed a bug introduced in 2.7.5 that prevented a ConnectionError from + being raised when the Redis server is LOADING data. + * Added a BusyLoadingError exception that's raised when the Redis server + is starting up and not accepting commands yet. BusyLoadingError + subclasses ConnectionError, which this state previously returned. + Thanks Yossi Gottlieb. +* 2.7.5 + * DEL, HDEL and ZREM commands now return the numbers of keys deleted + instead of just True/False. + * from_url now supports URIs with a port number. Thanks Aaron Westendorf. +* 2.7.4 + * Added missing INCRBY method. Thanks Krzysztof Dorosz. + * SET now accepts the EX, PX, NX and XX options from Redis 2.6.12. These + options will generate errors if these options are used when connected + to a Redis server < 2.6.12. Thanks George Yoshida. +* 2.7.3 + * Fixed a bug with BRPOPLPUSH and lists with empty strings. + * All empty except: clauses have been replaced to only catch Exception + subclasses. This prevents a KeyboardInterrupt from triggering exception + handlers. Thanks Lucian Branescu Mihaila. + * All exceptions that are the result of redis server errors now share a + command Exception subclass, ServerError. Thanks Matt Robenolt. + * Prevent DISCARD from being called if MULTI wasn't also called. Thanks + Pete Aykroyd. + * SREM now returns an integer indicating the number of items removed from + the set. Thanks http://github.com/ronniekk. + * Fixed a bug with BGSAVE and BGREWRITEAOF response callbacks with Python3. + Thanks Nathan Wan. + * Added CLIENT GETNAME and CLIENT SETNAME commands. + Thanks http://github.com/bitterb. + * It's now possible to use len() on a pipeline instance to determine the + number of commands that will be executed. Thanks Jon Parise. + * Fixed a bug in INFO's parse routine with floating point numbers. Thanks + Ali Onur Uyar. + * Fixed a bug with BITCOUNT to allow `start` and `end` to both be zero. + Thanks Tim Bart. + * The transaction() method now accepts a boolean keyword argument, + value_from_callable. By default, or if False is passes, the transaction() + method will return the value of the pipelines execution. Otherwise, it + will return whatever func() returns. + * Python3 compatibility fix ensuring we're not already bytes(). Thanks + Salimane Adjao Moustapha. + * Added PSETEX. Thanks YAMAMOTO Takashi. + * Added a BlockingConnectionPool to limit the number of connections that + can be created. Thanks James Arthur. + * SORT now accepts a `groups` option that if specified, will return + tuples of n-length, where n is the number of keys specified in the GET + argument. This allows for convenient row-based iteration. Thanks + Ionuț Arțăriși. +* 2.7.2 + * Parse errors are now *always* raised on multi/exec pipelines, regardless + of the `raise_on_error` flag. See + https://groups.google.com/forum/?hl=en&fromgroups=#!topic/redis-db/VUiEFT8U8U0 + for more info. +* 2.7.1 + * Packaged tests with source code +* 2.7.0 + * Added BITOP and BITCOUNT commands. Thanks Mark Tozzi. + * Added the TIME command. Thanks Jason Knight. + * Added support for LUA scripting. Thanks to Angus Peart, Drew Smathers, + Issac Kelly, Louis-Philippe Perron, Sean Bleier, Jeffrey Kaditz, and + Dvir Volk for various patches and contributions to this feature. + * Changed the default error handling in pipelines. By default, the first + error in a pipeline will now be raised. A new parameter to the + pipeline's execute, `raise_on_error`, can be set to False to keep the + old behavior of embeedding the exception instances in the result. + * Fixed a bug with pipelines where parse errors won't corrupt the + socket. + * Added the optional `number` argument to SRANDMEMBER for use with + Redis 2.6+ servers. + * Added PEXPIRE/PEXPIREAT/PTTL commands. Thanks Luper Rouch. + * Added INCRBYFLOAT/HINCRBYFLOAT commands. Thanks Nikita Uvarov. + * High precision floating point values won't lose their precision when + being sent to the Redis server. Thanks Jason Oster and Oleg Pudeyev. + * Added CLIENT LIST/CLIENT KILL commands +* 2.6.2 + * `from_url` is now available as a classmethod on client classes. Thanks + Jon Parise for the patch. + * Fixed several encoding errors resulting from the Python 3.x support. +* 2.6.1 + * Python 3.x support! Big thanks to Alex Grönholm. + * Fixed a bug in the PythonParser's read_response that could hide an error + from the client (#251). +* 2.6.0 + * Changed (p)subscribe and (p)unsubscribe to no longer return messages + indicating the channel was subscribed/unsubscribed to. These messages + are available in the listen() loop instead. This is to prevent the + following scenario: + * Client A is subscribed to "foo" + * Client B publishes message to "foo" + * Client A subscribes to channel "bar" at the same time. + Prior to this change, the subscribe() call would return the published + messages on "foo" rather than the subscription confirmation to "bar". + * Added support for GETRANGE, thanks Jean-Philippe Caruana + * A new setting "decode_responses" specifies whether return values from + Redis commands get decoded automatically using the client's charset + value. Thanks to Frankie Dintino for the patch. +* 2.4.13 + * redis.from_url() can take an URL representing a Redis connection string + and return a client object. Thanks Kenneth Reitz for the patch. +* 2.4.12 + * ConnectionPool is now fork-safe. Thanks Josiah Carson for the patch. +* 2.4.11 + * AuthenticationError will now be correctly raised if an invalid password + is supplied. + * If Hiredis is unavailable, the HiredisParser will raise a RedisError + if selected manually. + * Made the INFO command more tolerant of Redis changes formatting. Fix + for #217. +* 2.4.10 + * Buffer reads from socket in the PythonParser. Fix for a Windows-specific + bug (#205). + * Added the OBJECT and DEBUG OBJECT commands. + * Added __del__ methods for classes that hold on to resources that need to + be cleaned up. This should prevent resource leakage when these objects + leave scope due to misuse or unhandled exceptions. Thanks David Wolever + for the suggestion. + * Added the ECHO command for completeness. + * Fixed a bug where attempting to subscribe to a PubSub channel of a Redis + server that's down would blow out the stack. Fixes #179 and #195. Thanks + Ovidiu Predescu for the test case. + * StrictRedis's TTL command now returns a -1 when querying a key with no + expiration. The Redis class continues to return None. + * ZADD and SADD now return integer values indicating the number of items + added. Thanks Homer Strong. + * Renamed the base client class to StrictRedis, replacing ZADD and LREM in + favor of their official argument order. The Redis class is now a subclass + of StrictRedis, implementing the legacy redis-py implementations of ZADD + and LREM. Docs have been updated to suggesting the use of StrictRedis. + * SETEX in StrictRedis is now compliant with official Redis SETEX command. + the name, value, time implementation moved to "Redis" for backwards + compatability. +* 2.4.9 + * Removed socket retry logic in Connection. This is the responsbility of + the caller to determine if the command is safe and can be retried. Thanks + David Wolver. + * Added some extra guards around various types of exceptions being raised + when sending or parsing data. Thanks David Wolver and Denis Bilenko. +* 2.4.8 + * Imported with_statement from __future__ for Python 2.5 compatability. +* 2.4.7 + * Fixed a bug where some connections were not getting released back to the + connection pool after pipeline execution. + * Pipelines can now be used as context managers. This is the preferred way + of use to ensure that connections get cleaned up properly. Thanks + David Wolever. + * Added a convenience method called transaction() on the base Redis class. + This method eliminates much of the boilerplate used when using pipelines + to watch Redis keys. See the documentation for details on usage. +* 2.4.6 + * Variadic arguments for SADD, SREM, ZREN, HDEL, LPUSH, and RPUSH. Thanks + Raphaël Vinot. + * (CRITICAL) Fixed an error in the Hiredis parser that occasionally caused + the socket connection to become corrupted and unusable. This became + noticeable once connection pools started to be used. + * ZRANGE, ZREVRANGE, ZRANGEBYSCORE, and ZREVRANGEBYSCORE now take an + additional optional argument, score_cast_func, which is a callable used + to cast the score value in the return type. The default is float. + * Removed the PUBLISH method from the PubSub class. Connections that are + [P]SUBSCRIBEd cannot issue PUBLISH commands, so it doesn't make sense + to have it here. + * Pipelines now contain WATCH and UNWATCH. Calling WATCH or UNWATCH from + the base client class will result in a deprecation warning. After + WATCHing one or more keys, the pipeline will be placed in immediate + execution mode until UNWATCH or MULTI are called. Refer to the new + pipeline docs in the README for more information. Thanks to David Wolever + and Randall Leeds for greatly helping with this. +* 2.4.5 + * The PythonParser now works better when reading zero length strings. +* 2.4.4 + * Fixed a typo introduced in 2.4.3 +* 2.4.3 + * Fixed a bug in the UnixDomainSocketConnection caused when trying to + form an error message after a socket error. +* 2.4.2 + * Fixed a bug in pipeline that caused an exception while trying to + reconnect after a connection timeout. +* 2.4.1 + * Fixed a bug in the PythonParser if disconnect is called before connect. +* 2.4.0 + * WARNING: 2.4 contains several backwards incompatible changes. + * Completely refactored Connection objects. Moved much of the Redis + protocol packing for requests here, and eliminated the nasty dependencies + it had on the client to do AUTH and SELECT commands on connect. + * Connection objects now have a parser attribute. Parsers are responsible + for reading data Redis sends. Two parsers ship with redis-py: a + PythonParser and the HiRedis parser. redis-py will automatically use the + HiRedis parser if you have the Python hiredis module installed, otherwise + it will fall back to the PythonParser. You can force or the other, or even + an external one by passing the `parser_class` argument to ConnectionPool. + * Added a UnixDomainSocketConnection for users wanting to talk to the Redis + instance running on a local machine only. You can use this connection + by passing it to the `connection_class` argument of the ConnectionPool. + * Connections no longer derive from threading.local. See threading.local + note below. + * ConnectionPool has been comletely refactored. The ConnectionPool now + maintains a list of connections. The redis-py client only hangs on to + a ConnectionPool instance, calling get_connection() anytime it needs to + send a command. When get_connection() is called, the command name and + any keys involved in the command are passed as arguments. Subclasses of + ConnectionPool could use this information to identify the shard the keys + belong to and return a connection to it. ConnectionPool also implements + disconnect() to force all connections in the pool to disconnect from + the Redis server. + * redis-py no longer support the SELECT command. You can still connect to + a specific database by specifing it when instantiating a client instance + or by creating a connection pool. If you need to talk to multiplate + databases within your application, you should use a separate client + instance for each database you want to talk to. + * Completely refactored Publish/Subscribe support. The subscribe and listen + commands are no longer available on the redis-py Client class. Instead, + the `pubsub` method returns an instance of the PubSub class which contains + all publish/subscribe support. Note, you can still PUBLISH from the + redis-py client class if you desire. + * Removed support for all previously deprecated commands or options. + * redis-py no longer uses threading.local in any way. Since the Client + class no longer holds on to a connection, it's no longer needed. You can + now pass client instances between threads, and commands run on those + threads will retrieve an available connection from the pool, use it and + release it. It should now be trivial to use redis-py with eventlet or + greenlet. + * ZADD now accepts pairs of value=score keyword arguments. This should help + resolve the long standing #72. The older value and score arguments have + been deprecated in favor of the keyword argument style. + * Client instances now get their own copy of RESPONSE_CALLBACKS. The new + set_response_callback method adds a user defined callback to the instance. + * Support Jython, fixing #97. Thanks to Adam Vandenberg for the patch. + * Using __getitem__ now properly raises a KeyError when the key is not + found. Thanks Ionuț Arțăriși for the patch. + * Newer Redis versions return a LOADING message for some commands while + the database is loading from disk during server start. This could cause + problems with SELECT. We now force a socket disconnection prior to + raising a ResponseError so subsuquent connections have to reconnect and + re-select the appropriate database. Thanks to Benjamin Anderson for + finding this and fixing. +* 2.2.4 + * WARNING: Potential backwards incompatible change - Changed order of + parameters of ZREVRANGEBYSCORE to match those of the actual Redis command. + This is only backwards-incompatible if you were passing max and min via + keyword args. If passing by normal args, nothing in user code should have + to change. Thanks Stéphane Angel for the fix. + * Fixed INFO to properly parse the Redis data correctly for both 2.2.x and + 2.3+. Thanks Stéphane Angel for the fix. + * Lock objects now store their timeout value as a float. This allows floats + to be used as timeout values. No changes to existing code required. + * WATCH now supports multiple keys. Thanks Rich Schumacher. + * Broke out some code that was Python 2.4 incompatible. redis-py should + now be useable on 2.4, but this hasn't actually been tested. Thanks + Dan Colish for the patch. + * Optimized some code using izip and islice. Should have a pretty good + speed up on larger data sets. Thanks Dan Colish. + * Better error handling when submitting an empty mapping to HMSET. Thanks + Dan Colish. + * Subscription status is now reset after every (re)connection. +* 2.2.3 + * Added support for Hiredis. To use, simply "pip install hiredis" or + "easy_install hiredis". Thanks for Pieter Noordhuis for the hiredis-py + bindings and the patch to redis-py. + * The connection class is chosen based on whether hiredis is installed + or not. To force the use of the PythonConnection, simply create + your own ConnectionPool instance with the connection_class argument + assigned to to PythonConnection class. + * Added missing command ZREVRANGEBYSCORE. Thanks Jay Baird for the patch. + * The INFO command should be parsed correctly on 2.2.x server versions + and is backwards compatible with older versions. Thanks Brett Hoerner. +* 2.2.2 + * Fixed a bug in ZREVRANK where retriving the rank of a value not in + the zset would raise an error. + * Fixed a bug in Connection.send where the errno import was getting + overwritten by a local variable. + * Fixed a bug in SLAVEOF when promoting an existing slave to a master. + * Reverted change of download URL back to redis-VERSION.tar.gz. 2.2.1's + change of this actually broke Pypi for Pip installs. Sorry! +* 2.2.1 + * Changed archive name to redis-py-VERSION.tar.gz to not conflict + with the Redis server archive. +* 2.2.0 + * Implemented SLAVEOF + * Implemented CONFIG as config_get and config_set + * Implemented GETBIT/SETBIT + * Implemented BRPOPLPUSH + * Implemented STRLEN + * Implemented PERSIST + * Implemented SETRANGE diff --git a/awx/lib/site-packages/redis/INSTALL b/awx/lib/site-packages/redis/INSTALL new file mode 100644 index 0000000000..951f7dea8a --- /dev/null +++ b/awx/lib/site-packages/redis/INSTALL @@ -0,0 +1,6 @@ + +Please use + python setup.py install + +and report errors to Andy McCurdy (sedrik@gmail.com) + diff --git a/awx/lib/site-packages/redis/LICENSE b/awx/lib/site-packages/redis/LICENSE new file mode 100644 index 0000000000..29a3fe3845 --- /dev/null +++ b/awx/lib/site-packages/redis/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2012 Andy McCurdy + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. diff --git a/awx/lib/site-packages/redis/MANIFEST.in b/awx/lib/site-packages/redis/MANIFEST.in new file mode 100644 index 0000000000..7aaee12a1d --- /dev/null +++ b/awx/lib/site-packages/redis/MANIFEST.in @@ -0,0 +1,7 @@ +include CHANGES +include INSTALL +include LICENSE +include README.rst +exclude __pycache__ +recursive-include tests * +recursive-exclude tests *.pyc diff --git a/awx/lib/site-packages/redis/PKG-INFO b/awx/lib/site-packages/redis/PKG-INFO new file mode 100644 index 0000000000..adde85d9eb --- /dev/null +++ b/awx/lib/site-packages/redis/PKG-INFO @@ -0,0 +1,696 @@ +Metadata-Version: 1.1 +Name: redis +Version: 2.10.3 +Summary: Python client for Redis key-value store +Home-page: http://github.com/andymccurdy/redis-py +Author: Andy McCurdy +Author-email: sedrik@gmail.com +License: MIT +Description: redis-py + ======== + + The Python interface to the Redis key-value store. + + .. image:: https://secure.travis-ci.org/andymccurdy/redis-py.png?branch=master + :target: http://travis-ci.org/andymccurdy/redis-py + + Installation + ------------ + + redis-py requires a running Redis server. See `Redis's quickstart + `_ for installation instructions. + + To install redis-py, simply: + + .. code-block:: bash + + $ sudo pip install redis + + or alternatively (you really should be using pip though): + + .. code-block:: bash + + $ sudo easy_install redis + + or from source: + + .. code-block:: bash + + $ sudo python setup.py install + + + Getting Started + --------------- + + .. code-block:: pycon + + >>> import redis + >>> r = redis.StrictRedis(host='localhost', port=6379, db=0) + >>> r.set('foo', 'bar') + True + >>> r.get('foo') + 'bar' + + API Reference + ------------- + + The `official Redis command documentation `_ does a + great job of explaining each command in detail. redis-py exposes two client + classes that implement these commands. The StrictRedis class attempts to adhere + to the official command syntax. There are a few exceptions: + + * **SELECT**: Not implemented. See the explanation in the Thread Safety section + below. + * **DEL**: 'del' is a reserved keyword in the Python syntax. Therefore redis-py + uses 'delete' instead. + * **CONFIG GET|SET**: These are implemented separately as config_get or config_set. + * **MULTI/EXEC**: These are implemented as part of the Pipeline class. The + pipeline is wrapped with the MULTI and EXEC statements by default when it + is executed, which can be disabled by specifying transaction=False. + See more about Pipelines below. + * **SUBSCRIBE/LISTEN**: Similar to pipelines, PubSub is implemented as a separate + class as it places the underlying connection in a state where it can't + execute non-pubsub commands. Calling the pubsub method from the Redis client + will return a PubSub instance where you can subscribe to channels and listen + for messages. You can only call PUBLISH from the Redis client (see + `this comment on issue #151 + `_ + for details). + * **SCAN/SSCAN/HSCAN/ZSCAN**: The *SCAN commands are implemented as they + exist in the Redis documentation. In addition, each command has an equivilant + iterator method. These are purely for convenience so the user doesn't have + to keep track of the cursor while iterating. Use the + scan_iter/sscan_iter/hscan_iter/zscan_iter methods for this behavior. + + In addition to the changes above, the Redis class, a subclass of StrictRedis, + overrides several other commands to provide backwards compatibility with older + versions of redis-py: + + * **LREM**: Order of 'num' and 'value' arguments reversed such that 'num' can + provide a default value of zero. + * **ZADD**: Redis specifies the 'score' argument before 'value'. These were swapped + accidentally when being implemented and not discovered until after people + were already using it. The Redis class expects \*args in the form of: + `name1, score1, name2, score2, ...` + * **SETEX**: Order of 'time' and 'value' arguments reversed. + + + More Detail + ----------- + + Connection Pools + ^^^^^^^^^^^^^^^^ + + Behind the scenes, redis-py uses a connection pool to manage connections to + a Redis server. By default, each Redis instance you create will in turn create + its own connection pool. You can override this behavior and use an existing + connection pool by passing an already created connection pool instance to the + connection_pool argument of the Redis class. You may choose to do this in order + to implement client side sharding or have finer grain control of how + connections are managed. + + .. code-block:: pycon + + >>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0) + >>> r = redis.Redis(connection_pool=pool) + + Connections + ^^^^^^^^^^^ + + ConnectionPools manage a set of Connection instances. redis-py ships with two + types of Connections. The default, Connection, is a normal TCP socket based + connection. The UnixDomainSocketConnection allows for clients running on the + same device as the server to connect via a unix domain socket. To use a + UnixDomainSocketConnection connection, simply pass the unix_socket_path + argument, which is a string to the unix domain socket file. Additionally, make + sure the unixsocket parameter is defined in your redis.conf file. It's + commented out by default. + + .. code-block:: pycon + + >>> r = redis.Redis(unix_socket_path='/tmp/redis.sock') + + You can create your own Connection subclasses as well. This may be useful if + you want to control the socket behavior within an async framework. To + instantiate a client class using your own connection, you need to create + a connection pool, passing your class to the connection_class argument. + Other keyword parameters your pass to the pool will be passed to the class + specified during initialization. + + .. code-block:: pycon + + >>> pool = redis.ConnectionPool(connection_class=YourConnectionClass, + your_arg='...', ...) + + Parsers + ^^^^^^^ + + Parser classes provide a way to control how responses from the Redis server + are parsed. redis-py ships with two parser classes, the PythonParser and the + HiredisParser. By default, redis-py will attempt to use the HiredisParser if + you have the hiredis module installed and will fallback to the PythonParser + otherwise. + + Hiredis is a C library maintained by the core Redis team. Pieter Noordhuis was + kind enough to create Python bindings. Using Hiredis can provide up to a + 10x speed improvement in parsing responses from the Redis server. The + performance increase is most noticeable when retrieving many pieces of data, + such as from LRANGE or SMEMBERS operations. + + Hiredis is available on PyPI, and can be installed via pip or easy_install + just like redis-py. + + .. code-block:: bash + + $ pip install hiredis + + or + + .. code-block:: bash + + $ easy_install hiredis + + Response Callbacks + ^^^^^^^^^^^^^^^^^^ + + The client class uses a set of callbacks to cast Redis responses to the + appropriate Python type. There are a number of these callbacks defined on + the Redis client class in a dictionary called RESPONSE_CALLBACKS. + + Custom callbacks can be added on a per-instance basis using the + set_response_callback method. This method accepts two arguments: a command + name and the callback. Callbacks added in this manner are only valid on the + instance the callback is added to. If you want to define or override a callback + globally, you should make a subclass of the Redis client and add your callback + to its REDIS_CALLBACKS class dictionary. + + Response callbacks take at least one parameter: the response from the Redis + server. Keyword arguments may also be accepted in order to further control + how to interpret the response. These keyword arguments are specified during the + command's call to execute_command. The ZRANGE implementation demonstrates the + use of response callback keyword arguments with its "withscores" argument. + + Thread Safety + ^^^^^^^^^^^^^ + + Redis client instances can safely be shared between threads. Internally, + connection instances are only retrieved from the connection pool during + command execution, and returned to the pool directly after. Command execution + never modifies state on the client instance. + + However, there is one caveat: the Redis SELECT command. The SELECT command + allows you to switch the database currently in use by the connection. That + database remains selected until another is selected or until the connection is + closed. This creates an issue in that connections could be returned to the pool + that are connected to a different database. + + As a result, redis-py does not implement the SELECT command on client + instances. If you use multiple Redis databases within the same application, you + should create a separate client instance (and possibly a separate connection + pool) for each database. + + It is not safe to pass PubSub or Pipeline objects between threads. + + Pipelines + ^^^^^^^^^ + + Pipelines are a subclass of the base Redis class that provide support for + buffering multiple commands to the server in a single request. They can be used + to dramatically increase the performance of groups of commands by reducing the + number of back-and-forth TCP packets between the client and server. + + Pipelines are quite simple to use: + + .. code-block:: pycon + + >>> r = redis.Redis(...) + >>> r.set('bing', 'baz') + >>> # Use the pipeline() method to create a pipeline instance + >>> pipe = r.pipeline() + >>> # The following SET commands are buffered + >>> pipe.set('foo', 'bar') + >>> pipe.get('bing') + >>> # the EXECUTE call sends all buffered commands to the server, returning + >>> # a list of responses, one for each command. + >>> pipe.execute() + [True, 'baz'] + + For ease of use, all commands being buffered into the pipeline return the + pipeline object itself. Therefore calls can be chained like: + + .. code-block:: pycon + + >>> pipe.set('foo', 'bar').sadd('faz', 'baz').incr('auto_number').execute() + [True, True, 6] + + In addition, pipelines can also ensure the buffered commands are executed + atomically as a group. This happens by default. If you want to disable the + atomic nature of a pipeline but still want to buffer commands, you can turn + off transactions. + + .. code-block:: pycon + + >>> pipe = r.pipeline(transaction=False) + + A common issue occurs when requiring atomic transactions but needing to + retrieve values in Redis prior for use within the transaction. For instance, + let's assume that the INCR command didn't exist and we need to build an atomic + version of INCR in Python. + + The completely naive implementation could GET the value, increment it in + Python, and SET the new value back. However, this is not atomic because + multiple clients could be doing this at the same time, each getting the same + value from GET. + + Enter the WATCH command. WATCH provides the ability to monitor one or more keys + prior to starting a transaction. If any of those keys change prior the + execution of that transaction, the entire transaction will be canceled and a + WatchError will be raised. To implement our own client-side INCR command, we + could do something like this: + + .. code-block:: pycon + + >>> with r.pipeline() as pipe: + ... while 1: + ... try: + ... # put a WATCH on the key that holds our sequence value + ... pipe.watch('OUR-SEQUENCE-KEY') + ... # after WATCHing, the pipeline is put into immediate execution + ... # mode until we tell it to start buffering commands again. + ... # this allows us to get the current value of our sequence + ... current_value = pipe.get('OUR-SEQUENCE-KEY') + ... next_value = int(current_value) + 1 + ... # now we can put the pipeline back into buffered mode with MULTI + ... pipe.multi() + ... pipe.set('OUR-SEQUENCE-KEY', next_value) + ... # and finally, execute the pipeline (the set command) + ... pipe.execute() + ... # if a WatchError wasn't raised during execution, everything + ... # we just did happened atomically. + ... break + ... except WatchError: + ... # another client must have changed 'OUR-SEQUENCE-KEY' between + ... # the time we started WATCHing it and the pipeline's execution. + ... # our best bet is to just retry. + ... continue + + Note that, because the Pipeline must bind to a single connection for the + duration of a WATCH, care must be taken to ensure that the connection is + returned to the connection pool by calling the reset() method. If the + Pipeline is used as a context manager (as in the example above) reset() + will be called automatically. Of course you can do this the manual way by + explicity calling reset(): + + .. code-block:: pycon + + >>> pipe = r.pipeline() + >>> while 1: + ... try: + ... pipe.watch('OUR-SEQUENCE-KEY') + ... ... + ... pipe.execute() + ... break + ... except WatchError: + ... continue + ... finally: + ... pipe.reset() + + A convenience method named "transaction" exists for handling all the + boilerplate of handling and retrying watch errors. It takes a callable that + should expect a single parameter, a pipeline object, and any number of keys to + be WATCHed. Our client-side INCR command above can be written like this, + which is much easier to read: + + .. code-block:: pycon + + >>> def client_side_incr(pipe): + ... current_value = pipe.get('OUR-SEQUENCE-KEY') + ... next_value = int(current_value) + 1 + ... pipe.multi() + ... pipe.set('OUR-SEQUENCE-KEY', next_value) + >>> + >>> r.transaction(client_side_incr, 'OUR-SEQUENCE-KEY') + [True] + + Publish / Subscribe + ^^^^^^^^^^^^^^^^^^^ + + redis-py includes a `PubSub` object that subscribes to channels and listens + for new messages. Creating a `PubSub` object is easy. + + .. code-block:: pycon + + >>> r = redis.StrictRedis(...) + >>> p = r.pubsub() + + Once a `PubSub` instance is created, channels and patterns can be subscribed + to. + + .. code-block:: pycon + + >>> p.subscribe('my-first-channel', 'my-second-channel', ...) + >>> p.psubscribe('my-*', ...) + + The `PubSub` instance is now subscribed to those channels/patterns. The + subscription confirmations can be seen by reading messages from the `PubSub` + instance. + + .. code-block:: pycon + + >>> p.get_message() + {'pattern': None, 'type': 'subscribe', 'channel': 'my-second-channel', 'data': 1L} + >>> p.get_message() + {'pattern': None, 'type': 'subscribe', 'channel': 'my-first-channel', 'data': 2L} + >>> p.get_message() + {'pattern': None, 'type': 'psubscribe', 'channel': 'my-*', 'data': 3L} + + Every message read from a `PubSub` instance will be a dictionary with the + following keys. + + * **type**: One of the following: 'subscribe', 'unsubscribe', 'psubscribe', + 'punsubscribe', 'message', 'pmessage' + * **channel**: The channel [un]subscribed to or the channel a message was + published to + * **pattern**: The pattern that matched a published message's channel. Will be + `None` in all cases except for 'pmessage' types. + * **data**: The message data. With [un]subscribe messages, this value will be + the number of channels and patterns the connection is currently subscribed + to. With [p]message messages, this value will be the actual published + message. + + Let's send a message now. + + .. code-block:: pycon + + # the publish method returns the number matching channel and pattern + # subscriptions. 'my-first-channel' matches both the 'my-first-channel' + # subscription and the 'my-*' pattern subscription, so this message will + # be delivered to 2 channels/patterns + >>> r.publish('my-first-channel', 'some data') + 2 + >>> p.get_message() + {'channel': 'my-first-channel', 'data': 'some data', 'pattern': None, 'type': 'message'} + >>> p.get_message() + {'channel': 'my-first-channel', 'data': 'some data', 'pattern': 'my-*', 'type': 'pmessage'} + + Unsubscribing works just like subscribing. If no arguments are passed to + [p]unsubscribe, all channels or patterns will be unsubscribed from. + + .. code-block:: pycon + + >>> p.unsubscribe() + >>> p.punsubscribe('my-*') + >>> p.get_message() + {'channel': 'my-second-channel', 'data': 2L, 'pattern': None, 'type': 'unsubscribe'} + >>> p.get_message() + {'channel': 'my-first-channel', 'data': 1L, 'pattern': None, 'type': 'unsubscribe'} + >>> p.get_message() + {'channel': 'my-*', 'data': 0L, 'pattern': None, 'type': 'punsubscribe'} + + redis-py also allows you to register callback functions to handle published + messages. Message handlers take a single argument, the message, which is a + dictionary just like the examples above. To subscribe to a channel or pattern + with a message handler, pass the channel or pattern name as a keyword argument + with its value being the callback function. + + When a message is read on a channel or pattern with a message handler, the + message dictionary is created and passed to the message handler. In this case, + a `None` value is returned from get_message() since the message was already + handled. + + .. code-block:: pycon + + >>> def my_handler(message): + ... print 'MY HANDLER: ', message['data'] + >>> p.subscribe(**{'my-channel': my_handler}) + # read the subscribe confirmation message + >>> p.get_message() + {'pattern': None, 'type': 'subscribe', 'channel': 'my-channel', 'data': 1L} + >>> r.publish('my-channel', 'awesome data') + 1 + # for the message handler to work, we need tell the instance to read data. + # this can be done in several ways (read more below). we'll just use + # the familiar get_message() function for now + >>> message = p.get_message() + MY HANDLER: awesome data + # note here that the my_handler callback printed the string above. + # `message` is None because the message was handled by our handler. + >>> print message + None + + If your application is not interested in the (sometimes noisy) + subscribe/unsubscribe confirmation messages, you can ignore them by passing + `ignore_subscribe_messages=True` to `r.pubsub()`. This will cause all + subscribe/unsubscribe messages to be read, but they won't bubble up to your + application. + + .. code-block:: pycon + + >>> p = r.pubsub(ignore_subscribe_messages=True) + >>> p.subscribe('my-channel') + >>> p.get_message() # hides the subscribe message and returns None + >>> r.publish('my-channel') + 1 + >>> p.get_message() + {'channel': 'my-channel', data': 'my data', 'pattern': None, 'type': 'message'} + + There are three different strategies for reading messages. + + The examples above have been using `pubsub.get_message()`. Behind the scenes, + `get_message()` uses the system's 'select' module to quickly poll the + connection's socket. If there's data available to be read, `get_message()` will + read it, format the message and return it or pass it to a message handler. If + there's no data to be read, `get_message()` will immediately return None. This + makes it trivial to integrate into an existing event loop inside your + application. + + .. code-block:: pycon + + >>> while True: + >>> message = p.get_message() + >>> if message: + >>> # do something with the message + >>> time.sleep(0.001) # be nice to the system :) + + Older versions of redis-py only read messages with `pubsub.listen()`. listen() + is a generator that blocks until a message is available. If your application + doesn't need to do anything else but receive and act on messages received from + redis, listen() is an easy way to get up an running. + + .. code-block:: pycon + + >>> for message in p.listen(): + ... # do something with the message + + The third option runs an event loop in a separate thread. + `pubsub.run_in_thread()` creates a new thread and starts the event loop. The + thread object is returned to the caller of `run_in_thread()`. The caller can + use the `thread.stop()` method to shut down the event loop and thread. Behind + the scenes, this is simply a wrapper around `get_message()` that runs in a + separate thread, essentially creating a tiny non-blocking event loop for you. + `run_in_thread()` takes an optional `sleep_time` argument. If specified, the + event loop will call `time.sleep()` with the value in each iteration of the + loop. + + Note: Since we're running in a separate thread, there's no way to handle + messages that aren't automatically handled with registered message handlers. + Therefore, redis-py prevents you from calling `run_in_thread()` if you're + subscribed to patterns or channels that don't have message handlers attached. + + .. code-block:: pycon + + >>> p.subscribe(**{'my-channel': my_handler}) + >>> thread = p.run_in_thread(sleep_time=0.001) + # the event loop is now running in the background processing messages + # when it's time to shut it down... + >>> thread.stop() + + A PubSub object adheres to the same encoding semantics as the client instance + it was created from. Any channel or pattern that's unicode will be encoded + using the `charset` specified on the client before being sent to Redis. If the + client's `decode_responses` flag is set the False (the default), the + 'channel', 'pattern' and 'data' values in message dictionaries will be byte + strings (str on Python 2, bytes on Python 3). If the client's + `decode_responses` is True, then the 'channel', 'pattern' and 'data' values + will be automatically decoded to unicode strings using the client's `charset`. + + PubSub objects remember what channels and patterns they are subscribed to. In + the event of a disconnection such as a network error or timeout, the + PubSub object will re-subscribe to all prior channels and patterns when + reconnecting. Messages that were published while the client was disconnected + cannot be delivered. When you're finished with a PubSub object, call its + `.close()` method to shutdown the connection. + + .. code-block:: pycon + + >>> p = r.pubsub() + >>> ... + >>> p.close() + + LUA Scripting + ^^^^^^^^^^^^^ + + redis-py supports the EVAL, EVALSHA, and SCRIPT commands. However, there are + a number of edge cases that make these commands tedious to use in real world + scenarios. Therefore, redis-py exposes a Script object that makes scripting + much easier to use. + + To create a Script instance, use the `register_script` function on a client + instance passing the LUA code as the first argument. `register_script` returns + a Script instance that you can use throughout your code. + + The following trivial LUA script accepts two parameters: the name of a key and + a multiplier value. The script fetches the value stored in the key, multiplies + it with the multiplier value and returns the result. + + .. code-block:: pycon + + >>> r = redis.StrictRedis() + >>> lua = """ + ... local value = redis.call('GET', KEYS[1]) + ... value = tonumber(value) + ... return value * ARGV[1]""" + >>> multiply = r.register_script(lua) + + `multiply` is now a Script instance that is invoked by calling it like a + function. Script instances accept the following optional arguments: + + * **keys**: A list of key names that the script will access. This becomes the + KEYS list in LUA. + * **args**: A list of argument values. This becomes the ARGV list in LUA. + * **client**: A redis-py Client or Pipeline instance that will invoke the + script. If client isn't specified, the client that intiially + created the Script instance (the one that `register_script` was + invoked from) will be used. + + Continuing the example from above: + + .. code-block:: pycon + + >>> r.set('foo', 2) + >>> multiply(keys=['foo'], args=[5]) + 10 + + The value of key 'foo' is set to 2. When multiply is invoked, the 'foo' key is + passed to the script along with the multiplier value of 5. LUA executes the + script and returns the result, 10. + + Script instances can be executed using a different client instance, even one + that points to a completely different Redis server. + + .. code-block:: pycon + + >>> r2 = redis.StrictRedis('redis2.example.com') + >>> r2.set('foo', 3) + >>> multiply(keys=['foo'], args=[5], client=r2) + 15 + + The Script object ensures that the LUA script is loaded into Redis's script + cache. In the event of a NOSCRIPT error, it will load the script and retry + executing it. + + Script objects can also be used in pipelines. The pipeline instance should be + passed as the client argument when calling the script. Care is taken to ensure + that the script is registered in Redis's script cache just prior to pipeline + execution. + + .. code-block:: pycon + + >>> pipe = r.pipeline() + >>> pipe.set('foo', 5) + >>> multiply(keys=['foo'], args=[5], client=pipe) + >>> pipe.execute() + [True, 25] + + Sentinel support + ^^^^^^^^^^^^^^^^ + + redis-py can be used together with `Redis Sentinel `_ + to discover Redis nodes. You need to have at least one Sentinel daemon running + in order to use redis-py's Sentinel support. + + Connecting redis-py to the Sentinel instance(s) is easy. You can use a + Sentinel connection to discover the master and slaves network addresses: + + .. code-block:: pycon + + >>> from redis.sentinel import Sentinel + >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1) + >>> sentinel.discover_master('mymaster') + ('127.0.0.1', 6379) + >>> sentinel.discover_slaves('mymaster') + [('127.0.0.1', 6380)] + + You can also create Redis client connections from a Sentinel instnace. You can + connect to either the master (for write operations) or a slave (for read-only + operations). + + .. code-block:: pycon + + >>> master = sentinel.master_for('mymaster', socket_timeout=0.1) + >>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1) + >>> master.set('foo', 'bar') + >>> slave.get('foo') + 'bar' + + The master and slave objects are normal StrictRedis instances with their + connection pool bound to the Sentinel instance. When a Sentinel backed client + attempts to establish a connection, it first queries the Sentinel servers to + determine an appropriate host to connect to. If no server is found, + a MasterNotFoundError or SlaveNotFoundError is raised. Both exceptions are + subclasses of ConnectionError. + + When trying to connect to a slave client, the Sentinel connection pool will + iterate over the list of slaves until it finds one that can be connected to. + If no slaves can be connected to, a connection will be established with the + master. + + See `Guidelines for Redis clients with support for Redis Sentinel + `_ to learn more about Redis Sentinel. + + Scan Iterators + ^^^^^^^^^^^^^^ + + The *SCAN commands introduced in Redis 2.8 can be cumbersome to use. While + these commands are fully supported, redis-py also exposes the following methods + that return Python iterators for convenience: `scan_iter`, `hscan_iter`, + `sscan_iter` and `zscan_iter`. + + .. code-block:: pycon + + >>> for key, value in (('A', '1'), ('B', '2'), ('C', '3')): + ... r.set(key, value) + >>> for key in r.scan_iter(): + ... print key, r.get(key) + A 1 + B 2 + C 3 + + Author + ^^^^^^ + + redis-py is developed and maintained by Andy McCurdy (sedrik@gmail.com). + It can be found here: http://github.com/andymccurdy/redis-py + + Special thanks to: + + * Ludovico Magnocavallo, author of the original Python Redis client, from + which some of the socket code is still used. + * Alexander Solovyov for ideas on the generic response callback system. + * Paul Hubbard for initial packaging support. + + +Keywords: Redis,key-value store +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 diff --git a/awx/lib/site-packages/redis/README.rst b/awx/lib/site-packages/redis/README.rst new file mode 100644 index 0000000000..41bb92e822 --- /dev/null +++ b/awx/lib/site-packages/redis/README.rst @@ -0,0 +1,673 @@ +redis-py +======== + +The Python interface to the Redis key-value store. + +.. image:: https://secure.travis-ci.org/andymccurdy/redis-py.png?branch=master + :target: http://travis-ci.org/andymccurdy/redis-py + +Installation +------------ + +redis-py requires a running Redis server. See `Redis's quickstart +`_ for installation instructions. + +To install redis-py, simply: + +.. code-block:: bash + + $ sudo pip install redis + +or alternatively (you really should be using pip though): + +.. code-block:: bash + + $ sudo easy_install redis + +or from source: + +.. code-block:: bash + + $ sudo python setup.py install + + +Getting Started +--------------- + +.. code-block:: pycon + + >>> import redis + >>> r = redis.StrictRedis(host='localhost', port=6379, db=0) + >>> r.set('foo', 'bar') + True + >>> r.get('foo') + 'bar' + +API Reference +------------- + +The `official Redis command documentation `_ does a +great job of explaining each command in detail. redis-py exposes two client +classes that implement these commands. The StrictRedis class attempts to adhere +to the official command syntax. There are a few exceptions: + +* **SELECT**: Not implemented. See the explanation in the Thread Safety section + below. +* **DEL**: 'del' is a reserved keyword in the Python syntax. Therefore redis-py + uses 'delete' instead. +* **CONFIG GET|SET**: These are implemented separately as config_get or config_set. +* **MULTI/EXEC**: These are implemented as part of the Pipeline class. The + pipeline is wrapped with the MULTI and EXEC statements by default when it + is executed, which can be disabled by specifying transaction=False. + See more about Pipelines below. +* **SUBSCRIBE/LISTEN**: Similar to pipelines, PubSub is implemented as a separate + class as it places the underlying connection in a state where it can't + execute non-pubsub commands. Calling the pubsub method from the Redis client + will return a PubSub instance where you can subscribe to channels and listen + for messages. You can only call PUBLISH from the Redis client (see + `this comment on issue #151 + `_ + for details). +* **SCAN/SSCAN/HSCAN/ZSCAN**: The *SCAN commands are implemented as they + exist in the Redis documentation. In addition, each command has an equivilant + iterator method. These are purely for convenience so the user doesn't have + to keep track of the cursor while iterating. Use the + scan_iter/sscan_iter/hscan_iter/zscan_iter methods for this behavior. + +In addition to the changes above, the Redis class, a subclass of StrictRedis, +overrides several other commands to provide backwards compatibility with older +versions of redis-py: + +* **LREM**: Order of 'num' and 'value' arguments reversed such that 'num' can + provide a default value of zero. +* **ZADD**: Redis specifies the 'score' argument before 'value'. These were swapped + accidentally when being implemented and not discovered until after people + were already using it. The Redis class expects \*args in the form of: + `name1, score1, name2, score2, ...` +* **SETEX**: Order of 'time' and 'value' arguments reversed. + + +More Detail +----------- + +Connection Pools +^^^^^^^^^^^^^^^^ + +Behind the scenes, redis-py uses a connection pool to manage connections to +a Redis server. By default, each Redis instance you create will in turn create +its own connection pool. You can override this behavior and use an existing +connection pool by passing an already created connection pool instance to the +connection_pool argument of the Redis class. You may choose to do this in order +to implement client side sharding or have finer grain control of how +connections are managed. + +.. code-block:: pycon + + >>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0) + >>> r = redis.Redis(connection_pool=pool) + +Connections +^^^^^^^^^^^ + +ConnectionPools manage a set of Connection instances. redis-py ships with two +types of Connections. The default, Connection, is a normal TCP socket based +connection. The UnixDomainSocketConnection allows for clients running on the +same device as the server to connect via a unix domain socket. To use a +UnixDomainSocketConnection connection, simply pass the unix_socket_path +argument, which is a string to the unix domain socket file. Additionally, make +sure the unixsocket parameter is defined in your redis.conf file. It's +commented out by default. + +.. code-block:: pycon + + >>> r = redis.Redis(unix_socket_path='/tmp/redis.sock') + +You can create your own Connection subclasses as well. This may be useful if +you want to control the socket behavior within an async framework. To +instantiate a client class using your own connection, you need to create +a connection pool, passing your class to the connection_class argument. +Other keyword parameters your pass to the pool will be passed to the class +specified during initialization. + +.. code-block:: pycon + + >>> pool = redis.ConnectionPool(connection_class=YourConnectionClass, + your_arg='...', ...) + +Parsers +^^^^^^^ + +Parser classes provide a way to control how responses from the Redis server +are parsed. redis-py ships with two parser classes, the PythonParser and the +HiredisParser. By default, redis-py will attempt to use the HiredisParser if +you have the hiredis module installed and will fallback to the PythonParser +otherwise. + +Hiredis is a C library maintained by the core Redis team. Pieter Noordhuis was +kind enough to create Python bindings. Using Hiredis can provide up to a +10x speed improvement in parsing responses from the Redis server. The +performance increase is most noticeable when retrieving many pieces of data, +such as from LRANGE or SMEMBERS operations. + +Hiredis is available on PyPI, and can be installed via pip or easy_install +just like redis-py. + +.. code-block:: bash + + $ pip install hiredis + +or + +.. code-block:: bash + + $ easy_install hiredis + +Response Callbacks +^^^^^^^^^^^^^^^^^^ + +The client class uses a set of callbacks to cast Redis responses to the +appropriate Python type. There are a number of these callbacks defined on +the Redis client class in a dictionary called RESPONSE_CALLBACKS. + +Custom callbacks can be added on a per-instance basis using the +set_response_callback method. This method accepts two arguments: a command +name and the callback. Callbacks added in this manner are only valid on the +instance the callback is added to. If you want to define or override a callback +globally, you should make a subclass of the Redis client and add your callback +to its REDIS_CALLBACKS class dictionary. + +Response callbacks take at least one parameter: the response from the Redis +server. Keyword arguments may also be accepted in order to further control +how to interpret the response. These keyword arguments are specified during the +command's call to execute_command. The ZRANGE implementation demonstrates the +use of response callback keyword arguments with its "withscores" argument. + +Thread Safety +^^^^^^^^^^^^^ + +Redis client instances can safely be shared between threads. Internally, +connection instances are only retrieved from the connection pool during +command execution, and returned to the pool directly after. Command execution +never modifies state on the client instance. + +However, there is one caveat: the Redis SELECT command. The SELECT command +allows you to switch the database currently in use by the connection. That +database remains selected until another is selected or until the connection is +closed. This creates an issue in that connections could be returned to the pool +that are connected to a different database. + +As a result, redis-py does not implement the SELECT command on client +instances. If you use multiple Redis databases within the same application, you +should create a separate client instance (and possibly a separate connection +pool) for each database. + +It is not safe to pass PubSub or Pipeline objects between threads. + +Pipelines +^^^^^^^^^ + +Pipelines are a subclass of the base Redis class that provide support for +buffering multiple commands to the server in a single request. They can be used +to dramatically increase the performance of groups of commands by reducing the +number of back-and-forth TCP packets between the client and server. + +Pipelines are quite simple to use: + +.. code-block:: pycon + + >>> r = redis.Redis(...) + >>> r.set('bing', 'baz') + >>> # Use the pipeline() method to create a pipeline instance + >>> pipe = r.pipeline() + >>> # The following SET commands are buffered + >>> pipe.set('foo', 'bar') + >>> pipe.get('bing') + >>> # the EXECUTE call sends all buffered commands to the server, returning + >>> # a list of responses, one for each command. + >>> pipe.execute() + [True, 'baz'] + +For ease of use, all commands being buffered into the pipeline return the +pipeline object itself. Therefore calls can be chained like: + +.. code-block:: pycon + + >>> pipe.set('foo', 'bar').sadd('faz', 'baz').incr('auto_number').execute() + [True, True, 6] + +In addition, pipelines can also ensure the buffered commands are executed +atomically as a group. This happens by default. If you want to disable the +atomic nature of a pipeline but still want to buffer commands, you can turn +off transactions. + +.. code-block:: pycon + + >>> pipe = r.pipeline(transaction=False) + +A common issue occurs when requiring atomic transactions but needing to +retrieve values in Redis prior for use within the transaction. For instance, +let's assume that the INCR command didn't exist and we need to build an atomic +version of INCR in Python. + +The completely naive implementation could GET the value, increment it in +Python, and SET the new value back. However, this is not atomic because +multiple clients could be doing this at the same time, each getting the same +value from GET. + +Enter the WATCH command. WATCH provides the ability to monitor one or more keys +prior to starting a transaction. If any of those keys change prior the +execution of that transaction, the entire transaction will be canceled and a +WatchError will be raised. To implement our own client-side INCR command, we +could do something like this: + +.. code-block:: pycon + + >>> with r.pipeline() as pipe: + ... while 1: + ... try: + ... # put a WATCH on the key that holds our sequence value + ... pipe.watch('OUR-SEQUENCE-KEY') + ... # after WATCHing, the pipeline is put into immediate execution + ... # mode until we tell it to start buffering commands again. + ... # this allows us to get the current value of our sequence + ... current_value = pipe.get('OUR-SEQUENCE-KEY') + ... next_value = int(current_value) + 1 + ... # now we can put the pipeline back into buffered mode with MULTI + ... pipe.multi() + ... pipe.set('OUR-SEQUENCE-KEY', next_value) + ... # and finally, execute the pipeline (the set command) + ... pipe.execute() + ... # if a WatchError wasn't raised during execution, everything + ... # we just did happened atomically. + ... break + ... except WatchError: + ... # another client must have changed 'OUR-SEQUENCE-KEY' between + ... # the time we started WATCHing it and the pipeline's execution. + ... # our best bet is to just retry. + ... continue + +Note that, because the Pipeline must bind to a single connection for the +duration of a WATCH, care must be taken to ensure that the connection is +returned to the connection pool by calling the reset() method. If the +Pipeline is used as a context manager (as in the example above) reset() +will be called automatically. Of course you can do this the manual way by +explicity calling reset(): + +.. code-block:: pycon + + >>> pipe = r.pipeline() + >>> while 1: + ... try: + ... pipe.watch('OUR-SEQUENCE-KEY') + ... ... + ... pipe.execute() + ... break + ... except WatchError: + ... continue + ... finally: + ... pipe.reset() + +A convenience method named "transaction" exists for handling all the +boilerplate of handling and retrying watch errors. It takes a callable that +should expect a single parameter, a pipeline object, and any number of keys to +be WATCHed. Our client-side INCR command above can be written like this, +which is much easier to read: + +.. code-block:: pycon + + >>> def client_side_incr(pipe): + ... current_value = pipe.get('OUR-SEQUENCE-KEY') + ... next_value = int(current_value) + 1 + ... pipe.multi() + ... pipe.set('OUR-SEQUENCE-KEY', next_value) + >>> + >>> r.transaction(client_side_incr, 'OUR-SEQUENCE-KEY') + [True] + +Publish / Subscribe +^^^^^^^^^^^^^^^^^^^ + +redis-py includes a `PubSub` object that subscribes to channels and listens +for new messages. Creating a `PubSub` object is easy. + +.. code-block:: pycon + + >>> r = redis.StrictRedis(...) + >>> p = r.pubsub() + +Once a `PubSub` instance is created, channels and patterns can be subscribed +to. + +.. code-block:: pycon + + >>> p.subscribe('my-first-channel', 'my-second-channel', ...) + >>> p.psubscribe('my-*', ...) + +The `PubSub` instance is now subscribed to those channels/patterns. The +subscription confirmations can be seen by reading messages from the `PubSub` +instance. + +.. code-block:: pycon + + >>> p.get_message() + {'pattern': None, 'type': 'subscribe', 'channel': 'my-second-channel', 'data': 1L} + >>> p.get_message() + {'pattern': None, 'type': 'subscribe', 'channel': 'my-first-channel', 'data': 2L} + >>> p.get_message() + {'pattern': None, 'type': 'psubscribe', 'channel': 'my-*', 'data': 3L} + +Every message read from a `PubSub` instance will be a dictionary with the +following keys. + +* **type**: One of the following: 'subscribe', 'unsubscribe', 'psubscribe', + 'punsubscribe', 'message', 'pmessage' +* **channel**: The channel [un]subscribed to or the channel a message was + published to +* **pattern**: The pattern that matched a published message's channel. Will be + `None` in all cases except for 'pmessage' types. +* **data**: The message data. With [un]subscribe messages, this value will be + the number of channels and patterns the connection is currently subscribed + to. With [p]message messages, this value will be the actual published + message. + +Let's send a message now. + +.. code-block:: pycon + + # the publish method returns the number matching channel and pattern + # subscriptions. 'my-first-channel' matches both the 'my-first-channel' + # subscription and the 'my-*' pattern subscription, so this message will + # be delivered to 2 channels/patterns + >>> r.publish('my-first-channel', 'some data') + 2 + >>> p.get_message() + {'channel': 'my-first-channel', 'data': 'some data', 'pattern': None, 'type': 'message'} + >>> p.get_message() + {'channel': 'my-first-channel', 'data': 'some data', 'pattern': 'my-*', 'type': 'pmessage'} + +Unsubscribing works just like subscribing. If no arguments are passed to +[p]unsubscribe, all channels or patterns will be unsubscribed from. + +.. code-block:: pycon + + >>> p.unsubscribe() + >>> p.punsubscribe('my-*') + >>> p.get_message() + {'channel': 'my-second-channel', 'data': 2L, 'pattern': None, 'type': 'unsubscribe'} + >>> p.get_message() + {'channel': 'my-first-channel', 'data': 1L, 'pattern': None, 'type': 'unsubscribe'} + >>> p.get_message() + {'channel': 'my-*', 'data': 0L, 'pattern': None, 'type': 'punsubscribe'} + +redis-py also allows you to register callback functions to handle published +messages. Message handlers take a single argument, the message, which is a +dictionary just like the examples above. To subscribe to a channel or pattern +with a message handler, pass the channel or pattern name as a keyword argument +with its value being the callback function. + +When a message is read on a channel or pattern with a message handler, the +message dictionary is created and passed to the message handler. In this case, +a `None` value is returned from get_message() since the message was already +handled. + +.. code-block:: pycon + + >>> def my_handler(message): + ... print 'MY HANDLER: ', message['data'] + >>> p.subscribe(**{'my-channel': my_handler}) + # read the subscribe confirmation message + >>> p.get_message() + {'pattern': None, 'type': 'subscribe', 'channel': 'my-channel', 'data': 1L} + >>> r.publish('my-channel', 'awesome data') + 1 + # for the message handler to work, we need tell the instance to read data. + # this can be done in several ways (read more below). we'll just use + # the familiar get_message() function for now + >>> message = p.get_message() + MY HANDLER: awesome data + # note here that the my_handler callback printed the string above. + # `message` is None because the message was handled by our handler. + >>> print message + None + +If your application is not interested in the (sometimes noisy) +subscribe/unsubscribe confirmation messages, you can ignore them by passing +`ignore_subscribe_messages=True` to `r.pubsub()`. This will cause all +subscribe/unsubscribe messages to be read, but they won't bubble up to your +application. + +.. code-block:: pycon + + >>> p = r.pubsub(ignore_subscribe_messages=True) + >>> p.subscribe('my-channel') + >>> p.get_message() # hides the subscribe message and returns None + >>> r.publish('my-channel') + 1 + >>> p.get_message() + {'channel': 'my-channel', data': 'my data', 'pattern': None, 'type': 'message'} + +There are three different strategies for reading messages. + +The examples above have been using `pubsub.get_message()`. Behind the scenes, +`get_message()` uses the system's 'select' module to quickly poll the +connection's socket. If there's data available to be read, `get_message()` will +read it, format the message and return it or pass it to a message handler. If +there's no data to be read, `get_message()` will immediately return None. This +makes it trivial to integrate into an existing event loop inside your +application. + +.. code-block:: pycon + + >>> while True: + >>> message = p.get_message() + >>> if message: + >>> # do something with the message + >>> time.sleep(0.001) # be nice to the system :) + +Older versions of redis-py only read messages with `pubsub.listen()`. listen() +is a generator that blocks until a message is available. If your application +doesn't need to do anything else but receive and act on messages received from +redis, listen() is an easy way to get up an running. + +.. code-block:: pycon + + >>> for message in p.listen(): + ... # do something with the message + +The third option runs an event loop in a separate thread. +`pubsub.run_in_thread()` creates a new thread and starts the event loop. The +thread object is returned to the caller of `run_in_thread()`. The caller can +use the `thread.stop()` method to shut down the event loop and thread. Behind +the scenes, this is simply a wrapper around `get_message()` that runs in a +separate thread, essentially creating a tiny non-blocking event loop for you. +`run_in_thread()` takes an optional `sleep_time` argument. If specified, the +event loop will call `time.sleep()` with the value in each iteration of the +loop. + +Note: Since we're running in a separate thread, there's no way to handle +messages that aren't automatically handled with registered message handlers. +Therefore, redis-py prevents you from calling `run_in_thread()` if you're +subscribed to patterns or channels that don't have message handlers attached. + +.. code-block:: pycon + + >>> p.subscribe(**{'my-channel': my_handler}) + >>> thread = p.run_in_thread(sleep_time=0.001) + # the event loop is now running in the background processing messages + # when it's time to shut it down... + >>> thread.stop() + +A PubSub object adheres to the same encoding semantics as the client instance +it was created from. Any channel or pattern that's unicode will be encoded +using the `charset` specified on the client before being sent to Redis. If the +client's `decode_responses` flag is set the False (the default), the +'channel', 'pattern' and 'data' values in message dictionaries will be byte +strings (str on Python 2, bytes on Python 3). If the client's +`decode_responses` is True, then the 'channel', 'pattern' and 'data' values +will be automatically decoded to unicode strings using the client's `charset`. + +PubSub objects remember what channels and patterns they are subscribed to. In +the event of a disconnection such as a network error or timeout, the +PubSub object will re-subscribe to all prior channels and patterns when +reconnecting. Messages that were published while the client was disconnected +cannot be delivered. When you're finished with a PubSub object, call its +`.close()` method to shutdown the connection. + +.. code-block:: pycon + + >>> p = r.pubsub() + >>> ... + >>> p.close() + +LUA Scripting +^^^^^^^^^^^^^ + +redis-py supports the EVAL, EVALSHA, and SCRIPT commands. However, there are +a number of edge cases that make these commands tedious to use in real world +scenarios. Therefore, redis-py exposes a Script object that makes scripting +much easier to use. + +To create a Script instance, use the `register_script` function on a client +instance passing the LUA code as the first argument. `register_script` returns +a Script instance that you can use throughout your code. + +The following trivial LUA script accepts two parameters: the name of a key and +a multiplier value. The script fetches the value stored in the key, multiplies +it with the multiplier value and returns the result. + +.. code-block:: pycon + + >>> r = redis.StrictRedis() + >>> lua = """ + ... local value = redis.call('GET', KEYS[1]) + ... value = tonumber(value) + ... return value * ARGV[1]""" + >>> multiply = r.register_script(lua) + +`multiply` is now a Script instance that is invoked by calling it like a +function. Script instances accept the following optional arguments: + +* **keys**: A list of key names that the script will access. This becomes the + KEYS list in LUA. +* **args**: A list of argument values. This becomes the ARGV list in LUA. +* **client**: A redis-py Client or Pipeline instance that will invoke the + script. If client isn't specified, the client that intiially + created the Script instance (the one that `register_script` was + invoked from) will be used. + +Continuing the example from above: + +.. code-block:: pycon + + >>> r.set('foo', 2) + >>> multiply(keys=['foo'], args=[5]) + 10 + +The value of key 'foo' is set to 2. When multiply is invoked, the 'foo' key is +passed to the script along with the multiplier value of 5. LUA executes the +script and returns the result, 10. + +Script instances can be executed using a different client instance, even one +that points to a completely different Redis server. + +.. code-block:: pycon + + >>> r2 = redis.StrictRedis('redis2.example.com') + >>> r2.set('foo', 3) + >>> multiply(keys=['foo'], args=[5], client=r2) + 15 + +The Script object ensures that the LUA script is loaded into Redis's script +cache. In the event of a NOSCRIPT error, it will load the script and retry +executing it. + +Script objects can also be used in pipelines. The pipeline instance should be +passed as the client argument when calling the script. Care is taken to ensure +that the script is registered in Redis's script cache just prior to pipeline +execution. + +.. code-block:: pycon + + >>> pipe = r.pipeline() + >>> pipe.set('foo', 5) + >>> multiply(keys=['foo'], args=[5], client=pipe) + >>> pipe.execute() + [True, 25] + +Sentinel support +^^^^^^^^^^^^^^^^ + +redis-py can be used together with `Redis Sentinel `_ +to discover Redis nodes. You need to have at least one Sentinel daemon running +in order to use redis-py's Sentinel support. + +Connecting redis-py to the Sentinel instance(s) is easy. You can use a +Sentinel connection to discover the master and slaves network addresses: + +.. code-block:: pycon + + >>> from redis.sentinel import Sentinel + >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1) + >>> sentinel.discover_master('mymaster') + ('127.0.0.1', 6379) + >>> sentinel.discover_slaves('mymaster') + [('127.0.0.1', 6380)] + +You can also create Redis client connections from a Sentinel instnace. You can +connect to either the master (for write operations) or a slave (for read-only +operations). + +.. code-block:: pycon + + >>> master = sentinel.master_for('mymaster', socket_timeout=0.1) + >>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1) + >>> master.set('foo', 'bar') + >>> slave.get('foo') + 'bar' + +The master and slave objects are normal StrictRedis instances with their +connection pool bound to the Sentinel instance. When a Sentinel backed client +attempts to establish a connection, it first queries the Sentinel servers to +determine an appropriate host to connect to. If no server is found, +a MasterNotFoundError or SlaveNotFoundError is raised. Both exceptions are +subclasses of ConnectionError. + +When trying to connect to a slave client, the Sentinel connection pool will +iterate over the list of slaves until it finds one that can be connected to. +If no slaves can be connected to, a connection will be established with the +master. + +See `Guidelines for Redis clients with support for Redis Sentinel +`_ to learn more about Redis Sentinel. + +Scan Iterators +^^^^^^^^^^^^^^ + +The *SCAN commands introduced in Redis 2.8 can be cumbersome to use. While +these commands are fully supported, redis-py also exposes the following methods +that return Python iterators for convenience: `scan_iter`, `hscan_iter`, +`sscan_iter` and `zscan_iter`. + +.. code-block:: pycon + + >>> for key, value in (('A', '1'), ('B', '2'), ('C', '3')): + ... r.set(key, value) + >>> for key in r.scan_iter(): + ... print key, r.get(key) + A 1 + B 2 + C 3 + +Author +^^^^^^ + +redis-py is developed and maintained by Andy McCurdy (sedrik@gmail.com). +It can be found here: http://github.com/andymccurdy/redis-py + +Special thanks to: + +* Ludovico Magnocavallo, author of the original Python Redis client, from + which some of the socket code is still used. +* Alexander Solovyov for ideas on the generic response callback system. +* Paul Hubbard for initial packaging support. + diff --git a/awx/lib/site-packages/redis/redis/__init__.py b/awx/lib/site-packages/redis/redis/__init__.py new file mode 100644 index 0000000000..3b0995db6d --- /dev/null +++ b/awx/lib/site-packages/redis/redis/__init__.py @@ -0,0 +1,34 @@ +from redis.client import Redis, StrictRedis +from redis.connection import ( + BlockingConnectionPool, + ConnectionPool, + Connection, + SSLConnection, + UnixDomainSocketConnection +) +from redis.utils import from_url +from redis.exceptions import ( + AuthenticationError, + BusyLoadingError, + ConnectionError, + DataError, + InvalidResponse, + PubSubError, + ReadOnlyError, + RedisError, + ResponseError, + TimeoutError, + WatchError +) + + +__version__ = '2.10.3' +VERSION = tuple(map(int, __version__.split('.'))) + +__all__ = [ + 'Redis', 'StrictRedis', 'ConnectionPool', 'BlockingConnectionPool', + 'Connection', 'SSLConnection', 'UnixDomainSocketConnection', 'from_url', + 'AuthenticationError', 'BusyLoadingError', 'ConnectionError', 'DataError', + 'InvalidResponse', 'PubSubError', 'ReadOnlyError', 'RedisError', + 'ResponseError', 'TimeoutError', 'WatchError' +] diff --git a/awx/lib/site-packages/redis/redis/_compat.py b/awx/lib/site-packages/redis/redis/_compat.py new file mode 100644 index 0000000000..c7859b571b --- /dev/null +++ b/awx/lib/site-packages/redis/redis/_compat.py @@ -0,0 +1,79 @@ +"""Internal module for Python 2 backwards compatibility.""" +import sys + + +if sys.version_info[0] < 3: + from urlparse import parse_qs, urlparse + from itertools import imap, izip + from string import letters as ascii_letters + from Queue import Queue + try: + from cStringIO import StringIO as BytesIO + except ImportError: + from StringIO import StringIO as BytesIO + + iteritems = lambda x: x.iteritems() + iterkeys = lambda x: x.iterkeys() + itervalues = lambda x: x.itervalues() + nativestr = lambda x: \ + x if isinstance(x, str) else x.encode('utf-8', 'replace') + u = lambda x: x.decode() + b = lambda x: x + next = lambda x: x.next() + byte_to_chr = lambda x: x + unichr = unichr + xrange = xrange + basestring = basestring + unicode = unicode + bytes = str + long = long +else: + from urllib.parse import parse_qs, urlparse + from io import BytesIO + from string import ascii_letters + from queue import Queue + + iteritems = lambda x: iter(x.items()) + iterkeys = lambda x: iter(x.keys()) + itervalues = lambda x: iter(x.values()) + byte_to_chr = lambda x: chr(x) + nativestr = lambda x: \ + x if isinstance(x, str) else x.decode('utf-8', 'replace') + u = lambda x: x + b = lambda x: x.encode('latin-1') if not isinstance(x, bytes) else x + next = next + unichr = chr + imap = map + izip = zip + xrange = range + basestring = str + unicode = str + bytes = bytes + long = int + +try: # Python 3 + from queue import LifoQueue, Empty, Full +except ImportError: + from Queue import Empty, Full + try: # Python 2.6 - 2.7 + from Queue import LifoQueue + except ImportError: # Python 2.5 + from Queue import Queue + # From the Python 2.7 lib. Python 2.5 already extracted the core + # methods to aid implementating different queue organisations. + + class LifoQueue(Queue): + "Override queue methods to implement a last-in first-out queue." + + def _init(self, maxsize): + self.maxsize = maxsize + self.queue = [] + + def _qsize(self, len=len): + return len(self.queue) + + def _put(self, item): + self.queue.append(item) + + def _get(self): + return self.queue.pop() diff --git a/awx/lib/site-packages/redis/redis/client.py b/awx/lib/site-packages/redis/redis/client.py new file mode 100755 index 0000000000..74cca861ff --- /dev/null +++ b/awx/lib/site-packages/redis/redis/client.py @@ -0,0 +1,2651 @@ +from __future__ import with_statement +from itertools import chain +import datetime +import sys +import warnings +import threading +import time as mod_time +from redis._compat import (b, basestring, bytes, imap, iteritems, iterkeys, + itervalues, izip, long, nativestr, unicode) +from redis.connection import (ConnectionPool, UnixDomainSocketConnection, + SSLConnection, Token) +from redis.lock import Lock, LuaLock +from redis.exceptions import ( + ConnectionError, + DataError, + ExecAbortError, + NoScriptError, + PubSubError, + RedisError, + ResponseError, + TimeoutError, + WatchError, +) + +SYM_EMPTY = b('') + + +def list_or_args(keys, args): + # returns a single list combining keys and args + try: + iter(keys) + # a string or bytes instance can be iterated, but indicates + # keys wasn't passed as a list + if isinstance(keys, (basestring, bytes)): + keys = [keys] + except TypeError: + keys = [keys] + if args: + keys.extend(args) + return keys + + +def timestamp_to_datetime(response): + "Converts a unix timestamp to a Python datetime object" + if not response: + return None + try: + response = int(response) + except ValueError: + return None + return datetime.datetime.fromtimestamp(response) + + +def string_keys_to_dict(key_string, callback): + return dict.fromkeys(key_string.split(), callback) + + +def dict_merge(*dicts): + merged = {} + [merged.update(d) for d in dicts] + return merged + + +def parse_debug_object(response): + "Parse the results of Redis's DEBUG OBJECT command into a Python dict" + # The 'type' of the object is the first item in the response, but isn't + # prefixed with a name + response = nativestr(response) + response = 'type:' + response + response = dict([kv.split(':') for kv in response.split()]) + + # parse some expected int values from the string response + # note: this cmd isn't spec'd so these may not appear in all redis versions + int_fields = ('refcount', 'serializedlength', 'lru', 'lru_seconds_idle') + for field in int_fields: + if field in response: + response[field] = int(response[field]) + + return response + + +def parse_object(response, infotype): + "Parse the results of an OBJECT command" + if infotype in ('idletime', 'refcount'): + return int_or_none(response) + return response + + +def parse_info(response): + "Parse the result of Redis's INFO command into a Python dict" + info = {} + response = nativestr(response) + + def get_value(value): + if ',' not in value or '=' not in value: + try: + if '.' in value: + return float(value) + else: + return int(value) + except ValueError: + return value + else: + sub_dict = {} + for item in value.split(','): + k, v = item.rsplit('=', 1) + sub_dict[k] = get_value(v) + return sub_dict + + for line in response.splitlines(): + if line and not line.startswith('#'): + if line.find(':') != -1: + key, value = line.split(':', 1) + info[key] = get_value(value) + else: + # if the line isn't splittable, append it to the "__raw__" key + info.setdefault('__raw__', []).append(line) + + return info + + +SENTINEL_STATE_TYPES = { + 'can-failover-its-master': int, + 'config-epoch': int, + 'down-after-milliseconds': int, + 'failover-timeout': int, + 'info-refresh': int, + 'last-hello-message': int, + 'last-ok-ping-reply': int, + 'last-ping-reply': int, + 'last-ping-sent': int, + 'master-link-down-time': int, + 'master-port': int, + 'num-other-sentinels': int, + 'num-slaves': int, + 'o-down-time': int, + 'pending-commands': int, + 'parallel-syncs': int, + 'port': int, + 'quorum': int, + 'role-reported-time': int, + 's-down-time': int, + 'slave-priority': int, + 'slave-repl-offset': int, + 'voted-leader-epoch': int +} + + +def parse_sentinel_state(item): + result = pairs_to_dict_typed(item, SENTINEL_STATE_TYPES) + flags = set(result['flags'].split(',')) + for name, flag in (('is_master', 'master'), ('is_slave', 'slave'), + ('is_sdown', 's_down'), ('is_odown', 'o_down'), + ('is_sentinel', 'sentinel'), + ('is_disconnected', 'disconnected'), + ('is_master_down', 'master_down')): + result[name] = flag in flags + return result + + +def parse_sentinel_master(response): + return parse_sentinel_state(imap(nativestr, response)) + + +def parse_sentinel_masters(response): + result = {} + for item in response: + state = parse_sentinel_state(imap(nativestr, item)) + result[state['name']] = state + return result + + +def parse_sentinel_slaves_and_sentinels(response): + return [parse_sentinel_state(imap(nativestr, item)) for item in response] + + +def parse_sentinel_get_master(response): + return response and (response[0], int(response[1])) or None + + +def pairs_to_dict(response): + "Create a dict given a list of key/value pairs" + it = iter(response) + return dict(izip(it, it)) + + +def pairs_to_dict_typed(response, type_info): + it = iter(response) + result = {} + for key, value in izip(it, it): + if key in type_info: + try: + value = type_info[key](value) + except: + # if for some reason the value can't be coerced, just use + # the string value + pass + result[key] = value + return result + + +def zset_score_pairs(response, **options): + """ + If ``withscores`` is specified in the options, return the response as + a list of (value, score) pairs + """ + if not response or not options['withscores']: + return response + score_cast_func = options.get('score_cast_func', float) + it = iter(response) + return list(izip(it, imap(score_cast_func, it))) + + +def sort_return_tuples(response, **options): + """ + If ``groups`` is specified, return the response as a list of + n-element tuples with n being the value found in options['groups'] + """ + if not response or not options['groups']: + return response + n = options['groups'] + return list(izip(*[response[i::n] for i in range(n)])) + + +def int_or_none(response): + if response is None: + return None + return int(response) + + +def float_or_none(response): + if response is None: + return None + return float(response) + + +def bool_ok(response): + return nativestr(response) == 'OK' + + +def parse_client_list(response, **options): + clients = [] + for c in nativestr(response).splitlines(): + clients.append(dict([pair.split('=') for pair in c.split(' ')])) + return clients + + +def parse_config_get(response, **options): + response = [nativestr(i) if i is not None else None for i in response] + return response and pairs_to_dict(response) or {} + + +def parse_scan(response, **options): + cursor, r = response + return long(cursor), r + + +def parse_hscan(response, **options): + cursor, r = response + return long(cursor), r and pairs_to_dict(r) or {} + + +def parse_zscan(response, **options): + score_cast_func = options.get('score_cast_func', float) + cursor, r = response + it = iter(r) + return long(cursor), list(izip(it, imap(score_cast_func, it))) + + +def parse_slowlog_get(response, **options): + return [{ + 'id': item[0], + 'start_time': int(item[1]), + 'duration': int(item[2]), + 'command': b(' ').join(item[3]) + } for item in response] + + +class StrictRedis(object): + """ + Implementation of the Redis protocol. + + This abstract class provides a Python interface to all Redis commands + and an implementation of the Redis protocol. + + Connection and Pipeline derive from this, implementing how + the commands are sent and received to the Redis server + """ + RESPONSE_CALLBACKS = dict_merge( + string_keys_to_dict( + 'AUTH EXISTS EXPIRE EXPIREAT HEXISTS HMSET MOVE MSETNX PERSIST ' + 'PSETEX RENAMENX SISMEMBER SMOVE SETEX SETNX', + bool + ), + string_keys_to_dict( + 'BITCOUNT BITPOS DECRBY DEL GETBIT HDEL HLEN INCRBY LINSERT LLEN ' + 'LPUSHX PFADD PFCOUNT RPUSHX SADD SCARD SDIFFSTORE SETBIT ' + 'SETRANGE SINTERSTORE SREM STRLEN SUNIONSTORE ZADD ZCARD ' + 'ZLEXCOUNT ZREM ZREMRANGEBYLEX ZREMRANGEBYRANK ZREMRANGEBYSCORE', + int + ), + string_keys_to_dict('INCRBYFLOAT HINCRBYFLOAT', float), + string_keys_to_dict( + # these return OK, or int if redis-server is >=1.3.4 + 'LPUSH RPUSH', + lambda r: isinstance(r, long) and r or nativestr(r) == 'OK' + ), + string_keys_to_dict('SORT', sort_return_tuples), + string_keys_to_dict('ZSCORE ZINCRBY', float_or_none), + string_keys_to_dict( + 'FLUSHALL FLUSHDB LSET LTRIM MSET PFMERGE RENAME ' + 'SAVE SELECT SHUTDOWN SLAVEOF WATCH UNWATCH', + bool_ok + ), + string_keys_to_dict('BLPOP BRPOP', lambda r: r and tuple(r) or None), + string_keys_to_dict( + 'SDIFF SINTER SMEMBERS SUNION', + lambda r: r and set(r) or set() + ), + string_keys_to_dict( + 'ZRANGE ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE', + zset_score_pairs + ), + string_keys_to_dict('ZRANK ZREVRANK', int_or_none), + string_keys_to_dict('BGREWRITEAOF BGSAVE', lambda r: True), + { + 'CLIENT GETNAME': lambda r: r and nativestr(r), + 'CLIENT KILL': bool_ok, + 'CLIENT LIST': parse_client_list, + 'CLIENT SETNAME': bool_ok, + 'CONFIG GET': parse_config_get, + 'CONFIG RESETSTAT': bool_ok, + 'CONFIG SET': bool_ok, + 'DEBUG OBJECT': parse_debug_object, + 'HGETALL': lambda r: r and pairs_to_dict(r) or {}, + 'HSCAN': parse_hscan, + 'INFO': parse_info, + 'LASTSAVE': timestamp_to_datetime, + 'OBJECT': parse_object, + 'PING': lambda r: nativestr(r) == 'PONG', + 'RANDOMKEY': lambda r: r and r or None, + 'SCAN': parse_scan, + 'SCRIPT EXISTS': lambda r: list(imap(bool, r)), + 'SCRIPT FLUSH': bool_ok, + 'SCRIPT KILL': bool_ok, + 'SCRIPT LOAD': nativestr, + 'SENTINEL GET-MASTER-ADDR-BY-NAME': parse_sentinel_get_master, + 'SENTINEL MASTER': parse_sentinel_master, + 'SENTINEL MASTERS': parse_sentinel_masters, + 'SENTINEL MONITOR': bool_ok, + 'SENTINEL REMOVE': bool_ok, + 'SENTINEL SENTINELS': parse_sentinel_slaves_and_sentinels, + 'SENTINEL SET': bool_ok, + 'SENTINEL SLAVES': parse_sentinel_slaves_and_sentinels, + 'SET': lambda r: r and nativestr(r) == 'OK', + 'SLOWLOG GET': parse_slowlog_get, + 'SLOWLOG LEN': int, + 'SLOWLOG RESET': bool_ok, + 'SSCAN': parse_scan, + 'TIME': lambda x: (int(x[0]), int(x[1])), + 'ZSCAN': parse_zscan + } + ) + + @classmethod + def from_url(cls, url, db=None, **kwargs): + """ + Return a Redis client object configured from the given URL. + + For example:: + + redis://[:password]@localhost:6379/0 + unix://[:password]@/path/to/socket.sock?db=0 + + There are several ways to specify a database number. The parse function + will return the first specified option: + 1. A ``db`` querystring option, e.g. redis://localhost?db=0 + 2. If using the redis:// scheme, the path argument of the url, e.g. + redis://localhost/0 + 3. The ``db`` argument to this function. + + If none of these options are specified, db=0 is used. + + Any additional querystring arguments and keyword arguments will be + passed along to the ConnectionPool class's initializer. In the case + of conflicting arguments, querystring arguments always win. + """ + connection_pool = ConnectionPool.from_url(url, db=db, **kwargs) + return cls(connection_pool=connection_pool) + + def __init__(self, host='localhost', port=6379, + db=0, password=None, socket_timeout=None, + socket_connect_timeout=None, + socket_keepalive=None, socket_keepalive_options=None, + connection_pool=None, unix_socket_path=None, + encoding='utf-8', encoding_errors='strict', + charset=None, errors=None, + decode_responses=False, retry_on_timeout=False, + ssl=False, ssl_keyfile=None, ssl_certfile=None, + ssl_cert_reqs=None, ssl_ca_certs=None): + if not connection_pool: + if charset is not None: + warnings.warn(DeprecationWarning( + '"charset" is deprecated. Use "encoding" instead')) + encoding = charset + if errors is not None: + warnings.warn(DeprecationWarning( + '"errors" is deprecated. Use "encoding_errors" instead')) + encoding_errors = errors + + kwargs = { + 'db': db, + 'password': password, + 'socket_timeout': socket_timeout, + 'encoding': encoding, + 'encoding_errors': encoding_errors, + 'decode_responses': decode_responses, + 'retry_on_timeout': retry_on_timeout + } + # based on input, setup appropriate connection args + if unix_socket_path is not None: + kwargs.update({ + 'path': unix_socket_path, + 'connection_class': UnixDomainSocketConnection + }) + else: + # TCP specific options + kwargs.update({ + 'host': host, + 'port': port, + 'socket_connect_timeout': socket_connect_timeout, + 'socket_keepalive': socket_keepalive, + 'socket_keepalive_options': socket_keepalive_options, + }) + + if ssl: + kwargs.update({ + 'connection_class': SSLConnection, + 'ssl_keyfile': ssl_keyfile, + 'ssl_certfile': ssl_certfile, + 'ssl_cert_reqs': ssl_cert_reqs, + 'ssl_ca_certs': ssl_ca_certs, + }) + connection_pool = ConnectionPool(**kwargs) + self.connection_pool = connection_pool + self._use_lua_lock = None + + self.response_callbacks = self.__class__.RESPONSE_CALLBACKS.copy() + + def __repr__(self): + return "%s<%s>" % (type(self).__name__, repr(self.connection_pool)) + + def set_response_callback(self, command, callback): + "Set a custom Response Callback" + self.response_callbacks[command] = callback + + def pipeline(self, transaction=True, shard_hint=None): + """ + Return a new pipeline object that can queue multiple commands for + later execution. ``transaction`` indicates whether all commands + should be executed atomically. Apart from making a group of operations + atomic, pipelines are useful for reducing the back-and-forth overhead + between the client and server. + """ + return StrictPipeline( + self.connection_pool, + self.response_callbacks, + transaction, + shard_hint) + + def transaction(self, func, *watches, **kwargs): + """ + Convenience method for executing the callable `func` as a transaction + while watching all keys specified in `watches`. The 'func' callable + should expect a single argument which is a Pipeline object. + """ + shard_hint = kwargs.pop('shard_hint', None) + value_from_callable = kwargs.pop('value_from_callable', False) + with self.pipeline(True, shard_hint) as pipe: + while 1: + try: + if watches: + pipe.watch(*watches) + func_value = func(pipe) + exec_value = pipe.execute() + return func_value if value_from_callable else exec_value + except WatchError: + continue + + def lock(self, name, timeout=None, sleep=0.1, blocking_timeout=None, + lock_class=None, thread_local=True): + """ + Return a new Lock object using key ``name`` that mimics + the behavior of threading.Lock. + + If specified, ``timeout`` indicates a maximum life for the lock. + By default, it will remain locked until release() is called. + + ``sleep`` indicates the amount of time to sleep per loop iteration + when the lock is in blocking mode and another client is currently + holding the lock. + + ``blocking_timeout`` indicates the maximum amount of time in seconds to + spend trying to acquire the lock. A value of ``None`` indicates + continue trying forever. ``blocking_timeout`` can be specified as a + float or integer, both representing the number of seconds to wait. + + ``lock_class`` forces the specified lock implementation. + + ``thread_local`` indicates whether the lock token is placed in + thread-local storage. By default, the token is placed in thread local + storage so that a thread only sees its token, not a token set by + another thread. Consider the following timeline: + + time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds. + thread-1 sets the token to "abc" + time: 1, thread-2 blocks trying to acquire `my-lock` using the + Lock instance. + time: 5, thread-1 has not yet completed. redis expires the lock + key. + time: 5, thread-2 acquired `my-lock` now that it's available. + thread-2 sets the token to "xyz" + time: 6, thread-1 finishes its work and calls release(). if the + token is *not* stored in thread local storage, then + thread-1 would see the token value as "xyz" and would be + able to successfully release the thread-2's lock. + + In some use cases it's necessary to disable thread local storage. For + example, if you have code where one thread acquires a lock and passes + that lock instance to a worker thread to release later. If thread + local storage isn't disabled in this case, the worker thread won't see + the token set by the thread that acquired the lock. Our assumption + is that these cases aren't common and as such default to using + thread local storage. """ + if lock_class is None: + if self._use_lua_lock is None: + # the first time .lock() is called, determine if we can use + # Lua by attempting to register the necessary scripts + try: + LuaLock.register_scripts(self) + self._use_lua_lock = True + except ResponseError: + self._use_lua_lock = False + lock_class = self._use_lua_lock and LuaLock or Lock + return lock_class(self, name, timeout=timeout, sleep=sleep, + blocking_timeout=blocking_timeout, + thread_local=thread_local) + + def pubsub(self, **kwargs): + """ + Return a Publish/Subscribe object. With this object, you can + subscribe to channels and listen for messages that get published to + them. + """ + return PubSub(self.connection_pool, **kwargs) + + # COMMAND EXECUTION AND PROTOCOL PARSING + def execute_command(self, *args, **options): + "Execute a command and return a parsed response" + pool = self.connection_pool + command_name = args[0] + connection = pool.get_connection(command_name, **options) + try: + connection.send_command(*args) + return self.parse_response(connection, command_name, **options) + except (ConnectionError, TimeoutError) as e: + connection.disconnect() + if not connection.retry_on_timeout and isinstance(e, TimeoutError): + raise + connection.send_command(*args) + return self.parse_response(connection, command_name, **options) + finally: + pool.release(connection) + + def parse_response(self, connection, command_name, **options): + "Parses a response from the Redis server" + response = connection.read_response() + if command_name in self.response_callbacks: + return self.response_callbacks[command_name](response, **options) + return response + + # SERVER INFORMATION + def bgrewriteaof(self): + "Tell the Redis server to rewrite the AOF file from data in memory." + return self.execute_command('BGREWRITEAOF') + + def bgsave(self): + """ + Tell the Redis server to save its data to disk. Unlike save(), + this method is asynchronous and returns immediately. + """ + return self.execute_command('BGSAVE') + + def client_kill(self, address): + "Disconnects the client at ``address`` (ip:port)" + return self.execute_command('CLIENT KILL', address) + + def client_list(self): + "Returns a list of currently connected clients" + return self.execute_command('CLIENT LIST') + + def client_getname(self): + "Returns the current connection name" + return self.execute_command('CLIENT GETNAME') + + def client_setname(self, name): + "Sets the current connection name" + return self.execute_command('CLIENT SETNAME', name) + + def config_get(self, pattern="*"): + "Return a dictionary of configuration based on the ``pattern``" + return self.execute_command('CONFIG GET', pattern) + + def config_set(self, name, value): + "Set config item ``name`` with ``value``" + return self.execute_command('CONFIG SET', name, value) + + def config_resetstat(self): + "Reset runtime statistics" + return self.execute_command('CONFIG RESETSTAT') + + def config_rewrite(self): + "Rewrite config file with the minimal change to reflect running config" + return self.execute_command('CONFIG REWRITE') + + def dbsize(self): + "Returns the number of keys in the current database" + return self.execute_command('DBSIZE') + + def debug_object(self, key): + "Returns version specific meta information about a given key" + return self.execute_command('DEBUG OBJECT', key) + + def echo(self, value): + "Echo the string back from the server" + return self.execute_command('ECHO', value) + + def flushall(self): + "Delete all keys in all databases on the current host" + return self.execute_command('FLUSHALL') + + def flushdb(self): + "Delete all keys in the current database" + return self.execute_command('FLUSHDB') + + def info(self, section=None): + """ + Returns a dictionary containing information about the Redis server + + The ``section`` option can be used to select a specific section + of information + + The section option is not supported by older versions of Redis Server, + and will generate ResponseError + """ + if section is None: + return self.execute_command('INFO') + else: + return self.execute_command('INFO', section) + + def lastsave(self): + """ + Return a Python datetime object representing the last time the + Redis database was saved to disk + """ + return self.execute_command('LASTSAVE') + + def object(self, infotype, key): + "Return the encoding, idletime, or refcount about the key" + return self.execute_command('OBJECT', infotype, key, infotype=infotype) + + def ping(self): + "Ping the Redis server" + return self.execute_command('PING') + + def save(self): + """ + Tell the Redis server to save its data to disk, + blocking until the save is complete + """ + return self.execute_command('SAVE') + + def sentinel(self, *args): + "Redis Sentinel's SENTINEL command." + warnings.warn( + DeprecationWarning('Use the individual sentinel_* methods')) + + def sentinel_get_master_addr_by_name(self, service_name): + "Returns a (host, port) pair for the given ``service_name``" + return self.execute_command('SENTINEL GET-MASTER-ADDR-BY-NAME', + service_name) + + def sentinel_master(self, service_name): + "Returns a dictionary containing the specified masters state." + return self.execute_command('SENTINEL MASTER', service_name) + + def sentinel_masters(self): + "Returns a list of dictionaries containing each master's state." + return self.execute_command('SENTINEL MASTERS') + + def sentinel_monitor(self, name, ip, port, quorum): + "Add a new master to Sentinel to be monitored" + return self.execute_command('SENTINEL MONITOR', name, ip, port, quorum) + + def sentinel_remove(self, name): + "Remove a master from Sentinel's monitoring" + return self.execute_command('SENTINEL REMOVE', name) + + def sentinel_sentinels(self, service_name): + "Returns a list of sentinels for ``service_name``" + return self.execute_command('SENTINEL SENTINELS', service_name) + + def sentinel_set(self, name, option, value): + "Set Sentinel monitoring parameters for a given master" + return self.execute_command('SENTINEL SET', name, option, value) + + def sentinel_slaves(self, service_name): + "Returns a list of slaves for ``service_name``" + return self.execute_command('SENTINEL SLAVES', service_name) + + def shutdown(self): + "Shutdown the server" + try: + self.execute_command('SHUTDOWN') + except ConnectionError: + # a ConnectionError here is expected + return + raise RedisError("SHUTDOWN seems to have failed.") + + def slaveof(self, host=None, port=None): + """ + Set the server to be a replicated slave of the instance identified + by the ``host`` and ``port``. If called without arguments, the + instance is promoted to a master instead. + """ + if host is None and port is None: + return self.execute_command('SLAVEOF', Token('NO'), Token('ONE')) + return self.execute_command('SLAVEOF', host, port) + + def slowlog_get(self, num=None): + """ + Get the entries from the slowlog. If ``num`` is specified, get the + most recent ``num`` items. + """ + args = ['SLOWLOG GET'] + if num is not None: + args.append(num) + return self.execute_command(*args) + + def slowlog_len(self): + "Get the number of items in the slowlog" + return self.execute_command('SLOWLOG LEN') + + def slowlog_reset(self): + "Remove all items in the slowlog" + return self.execute_command('SLOWLOG RESET') + + def time(self): + """ + Returns the server time as a 2-item tuple of ints: + (seconds since epoch, microseconds into this second). + """ + return self.execute_command('TIME') + + # BASIC KEY COMMANDS + def append(self, key, value): + """ + Appends the string ``value`` to the value at ``key``. If ``key`` + doesn't already exist, create it with a value of ``value``. + Returns the new length of the value at ``key``. + """ + return self.execute_command('APPEND', key, value) + + def bitcount(self, key, start=None, end=None): + """ + Returns the count of set bits in the value of ``key``. Optional + ``start`` and ``end`` paramaters indicate which bytes to consider + """ + params = [key] + if start is not None and end is not None: + params.append(start) + params.append(end) + elif (start is not None and end is None) or \ + (end is not None and start is None): + raise RedisError("Both start and end must be specified") + return self.execute_command('BITCOUNT', *params) + + def bitop(self, operation, dest, *keys): + """ + Perform a bitwise operation using ``operation`` between ``keys`` and + store the result in ``dest``. + """ + return self.execute_command('BITOP', operation, dest, *keys) + + def bitpos(self, key, bit, start=None, end=None): + """ + Return the position of the first bit set to 1 or 0 in a string. + ``start`` and ``end`` difines search range. The range is interpreted + as a range of bytes and not a range of bits, so start=0 and end=2 + means to look at the first three bytes. + """ + if bit not in (0, 1): + raise RedisError('bit must be 0 or 1') + params = [key, bit] + + start is not None and params.append(start) + + if start is not None and end is not None: + params.append(end) + elif start is None and end is not None: + raise RedisError("start argument is not set, " + "when end is specified") + return self.execute_command('BITPOS', *params) + + def decr(self, name, amount=1): + """ + Decrements the value of ``key`` by ``amount``. If no key exists, + the value will be initialized as 0 - ``amount`` + """ + return self.execute_command('DECRBY', name, amount) + + def delete(self, *names): + "Delete one or more keys specified by ``names``" + return self.execute_command('DEL', *names) + + def __delitem__(self, name): + self.delete(name) + + def dump(self, name): + """ + Return a serialized version of the value stored at the specified key. + If key does not exist a nil bulk reply is returned. + """ + return self.execute_command('DUMP', name) + + def exists(self, name): + "Returns a boolean indicating whether key ``name`` exists" + return self.execute_command('EXISTS', name) + __contains__ = exists + + def expire(self, name, time): + """ + Set an expire flag on key ``name`` for ``time`` seconds. ``time`` + can be represented by an integer or a Python timedelta object. + """ + if isinstance(time, datetime.timedelta): + time = time.seconds + time.days * 24 * 3600 + return self.execute_command('EXPIRE', name, time) + + def expireat(self, name, when): + """ + Set an expire flag on key ``name``. ``when`` can be represented + as an integer indicating unix time or a Python datetime object. + """ + if isinstance(when, datetime.datetime): + when = int(mod_time.mktime(when.timetuple())) + return self.execute_command('EXPIREAT', name, when) + + def get(self, name): + """ + Return the value at key ``name``, or None if the key doesn't exist + """ + return self.execute_command('GET', name) + + def __getitem__(self, name): + """ + Return the value at key ``name``, raises a KeyError if the key + doesn't exist. + """ + value = self.get(name) + if value: + return value + raise KeyError(name) + + def getbit(self, name, offset): + "Returns a boolean indicating the value of ``offset`` in ``name``" + return self.execute_command('GETBIT', name, offset) + + def getrange(self, key, start, end): + """ + Returns the substring of the string value stored at ``key``, + determined by the offsets ``start`` and ``end`` (both are inclusive) + """ + return self.execute_command('GETRANGE', key, start, end) + + def getset(self, name, value): + """ + Sets the value at key ``name`` to ``value`` + and returns the old value at key ``name`` atomically. + """ + return self.execute_command('GETSET', name, value) + + def incr(self, name, amount=1): + """ + Increments the value of ``key`` by ``amount``. If no key exists, + the value will be initialized as ``amount`` + """ + return self.execute_command('INCRBY', name, amount) + + def incrby(self, name, amount=1): + """ + Increments the value of ``key`` by ``amount``. If no key exists, + the value will be initialized as ``amount`` + """ + + # An alias for ``incr()``, because it is already implemented + # as INCRBY redis command. + return self.incr(name, amount) + + def incrbyfloat(self, name, amount=1.0): + """ + Increments the value at key ``name`` by floating ``amount``. + If no key exists, the value will be initialized as ``amount`` + """ + return self.execute_command('INCRBYFLOAT', name, amount) + + def keys(self, pattern='*'): + "Returns a list of keys matching ``pattern``" + return self.execute_command('KEYS', pattern) + + def mget(self, keys, *args): + """ + Returns a list of values ordered identically to ``keys`` + """ + args = list_or_args(keys, args) + return self.execute_command('MGET', *args) + + def mset(self, *args, **kwargs): + """ + Sets key/values based on a mapping. Mapping can be supplied as a single + dictionary argument or as kwargs. + """ + if args: + if len(args) != 1 or not isinstance(args[0], dict): + raise RedisError('MSET requires **kwargs or a single dict arg') + kwargs.update(args[0]) + items = [] + for pair in iteritems(kwargs): + items.extend(pair) + return self.execute_command('MSET', *items) + + def msetnx(self, *args, **kwargs): + """ + Sets key/values based on a mapping if none of the keys are already set. + Mapping can be supplied as a single dictionary argument or as kwargs. + Returns a boolean indicating if the operation was successful. + """ + if args: + if len(args) != 1 or not isinstance(args[0], dict): + raise RedisError('MSETNX requires **kwargs or a single ' + 'dict arg') + kwargs.update(args[0]) + items = [] + for pair in iteritems(kwargs): + items.extend(pair) + return self.execute_command('MSETNX', *items) + + def move(self, name, db): + "Moves the key ``name`` to a different Redis database ``db``" + return self.execute_command('MOVE', name, db) + + def persist(self, name): + "Removes an expiration on ``name``" + return self.execute_command('PERSIST', name) + + def pexpire(self, name, time): + """ + Set an expire flag on key ``name`` for ``time`` milliseconds. + ``time`` can be represented by an integer or a Python timedelta + object. + """ + if isinstance(time, datetime.timedelta): + ms = int(time.microseconds / 1000) + time = (time.seconds + time.days * 24 * 3600) * 1000 + ms + return self.execute_command('PEXPIRE', name, time) + + def pexpireat(self, name, when): + """ + Set an expire flag on key ``name``. ``when`` can be represented + as an integer representing unix time in milliseconds (unix time * 1000) + or a Python datetime object. + """ + if isinstance(when, datetime.datetime): + ms = int(when.microsecond / 1000) + when = int(mod_time.mktime(when.timetuple())) * 1000 + ms + return self.execute_command('PEXPIREAT', name, when) + + def psetex(self, name, time_ms, value): + """ + Set the value of key ``name`` to ``value`` that expires in ``time_ms`` + milliseconds. ``time_ms`` can be represented by an integer or a Python + timedelta object + """ + if isinstance(time_ms, datetime.timedelta): + ms = int(time_ms.microseconds / 1000) + time_ms = (time_ms.seconds + time_ms.days * 24 * 3600) * 1000 + ms + return self.execute_command('PSETEX', name, time_ms, value) + + def pttl(self, name): + "Returns the number of milliseconds until the key ``name`` will expire" + return self.execute_command('PTTL', name) + + def randomkey(self): + "Returns the name of a random key" + return self.execute_command('RANDOMKEY') + + def rename(self, src, dst): + """ + Rename key ``src`` to ``dst`` + """ + return self.execute_command('RENAME', src, dst) + + def renamenx(self, src, dst): + "Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist" + return self.execute_command('RENAMENX', src, dst) + + def restore(self, name, ttl, value): + """ + Create a key using the provided serialized value, previously obtained + using DUMP. + """ + return self.execute_command('RESTORE', name, ttl, value) + + def set(self, name, value, ex=None, px=None, nx=False, xx=False): + """ + Set the value at key ``name`` to ``value`` + + ``ex`` sets an expire flag on key ``name`` for ``ex`` seconds. + + ``px`` sets an expire flag on key ``name`` for ``px`` milliseconds. + + ``nx`` if set to True, set the value at key ``name`` to ``value`` if it + does not already exist. + + ``xx`` if set to True, set the value at key ``name`` to ``value`` if it + already exists. + """ + pieces = [name, value] + if ex: + pieces.append('EX') + if isinstance(ex, datetime.timedelta): + ex = ex.seconds + ex.days * 24 * 3600 + pieces.append(ex) + if px: + pieces.append('PX') + if isinstance(px, datetime.timedelta): + ms = int(px.microseconds / 1000) + px = (px.seconds + px.days * 24 * 3600) * 1000 + ms + pieces.append(px) + + if nx: + pieces.append('NX') + if xx: + pieces.append('XX') + return self.execute_command('SET', *pieces) + + def __setitem__(self, name, value): + self.set(name, value) + + def setbit(self, name, offset, value): + """ + Flag the ``offset`` in ``name`` as ``value``. Returns a boolean + indicating the previous value of ``offset``. + """ + value = value and 1 or 0 + return self.execute_command('SETBIT', name, offset, value) + + def setex(self, name, time, value): + """ + Set the value of key ``name`` to ``value`` that expires in ``time`` + seconds. ``time`` can be represented by an integer or a Python + timedelta object. + """ + if isinstance(time, datetime.timedelta): + time = time.seconds + time.days * 24 * 3600 + return self.execute_command('SETEX', name, time, value) + + def setnx(self, name, value): + "Set the value of key ``name`` to ``value`` if key doesn't exist" + return self.execute_command('SETNX', name, value) + + def setrange(self, name, offset, value): + """ + Overwrite bytes in the value of ``name`` starting at ``offset`` with + ``value``. If ``offset`` plus the length of ``value`` exceeds the + length of the original value, the new value will be larger than before. + If ``offset`` exceeds the length of the original value, null bytes + will be used to pad between the end of the previous value and the start + of what's being injected. + + Returns the length of the new string. + """ + return self.execute_command('SETRANGE', name, offset, value) + + def strlen(self, name): + "Return the number of bytes stored in the value of ``name``" + return self.execute_command('STRLEN', name) + + def substr(self, name, start, end=-1): + """ + Return a substring of the string at key ``name``. ``start`` and ``end`` + are 0-based integers specifying the portion of the string to return. + """ + return self.execute_command('SUBSTR', name, start, end) + + def ttl(self, name): + "Returns the number of seconds until the key ``name`` will expire" + return self.execute_command('TTL', name) + + def type(self, name): + "Returns the type of key ``name``" + return self.execute_command('TYPE', name) + + def watch(self, *names): + """ + Watches the values at keys ``names``, or None if the key doesn't exist + """ + warnings.warn(DeprecationWarning('Call WATCH from a Pipeline object')) + + def unwatch(self): + """ + Unwatches the value at key ``name``, or None of the key doesn't exist + """ + warnings.warn( + DeprecationWarning('Call UNWATCH from a Pipeline object')) + + # LIST COMMANDS + def blpop(self, keys, timeout=0): + """ + LPOP a value off of the first non-empty list + named in the ``keys`` list. + + If none of the lists in ``keys`` has a value to LPOP, then block + for ``timeout`` seconds, or until a value gets pushed on to one + of the lists. + + If timeout is 0, then block indefinitely. + """ + if timeout is None: + timeout = 0 + if isinstance(keys, basestring): + keys = [keys] + else: + keys = list(keys) + keys.append(timeout) + return self.execute_command('BLPOP', *keys) + + def brpop(self, keys, timeout=0): + """ + RPOP a value off of the first non-empty list + named in the ``keys`` list. + + If none of the lists in ``keys`` has a value to LPOP, then block + for ``timeout`` seconds, or until a value gets pushed on to one + of the lists. + + If timeout is 0, then block indefinitely. + """ + if timeout is None: + timeout = 0 + if isinstance(keys, basestring): + keys = [keys] + else: + keys = list(keys) + keys.append(timeout) + return self.execute_command('BRPOP', *keys) + + def brpoplpush(self, src, dst, timeout=0): + """ + Pop a value off the tail of ``src``, push it on the head of ``dst`` + and then return it. + + This command blocks until a value is in ``src`` or until ``timeout`` + seconds elapse, whichever is first. A ``timeout`` value of 0 blocks + forever. + """ + if timeout is None: + timeout = 0 + return self.execute_command('BRPOPLPUSH', src, dst, timeout) + + def lindex(self, name, index): + """ + Return the item from list ``name`` at position ``index`` + + Negative indexes are supported and will return an item at the + end of the list + """ + return self.execute_command('LINDEX', name, index) + + def linsert(self, name, where, refvalue, value): + """ + Insert ``value`` in list ``name`` either immediately before or after + [``where``] ``refvalue`` + + Returns the new length of the list on success or -1 if ``refvalue`` + is not in the list. + """ + return self.execute_command('LINSERT', name, where, refvalue, value) + + def llen(self, name): + "Return the length of the list ``name``" + return self.execute_command('LLEN', name) + + def lpop(self, name): + "Remove and return the first item of the list ``name``" + return self.execute_command('LPOP', name) + + def lpush(self, name, *values): + "Push ``values`` onto the head of the list ``name``" + return self.execute_command('LPUSH', name, *values) + + def lpushx(self, name, value): + "Push ``value`` onto the head of the list ``name`` if ``name`` exists" + return self.execute_command('LPUSHX', name, value) + + def lrange(self, name, start, end): + """ + Return a slice of the list ``name`` between + position ``start`` and ``end`` + + ``start`` and ``end`` can be negative numbers just like + Python slicing notation + """ + return self.execute_command('LRANGE', name, start, end) + + def lrem(self, name, count, value): + """ + Remove the first ``count`` occurrences of elements equal to ``value`` + from the list stored at ``name``. + + The count argument influences the operation in the following ways: + count > 0: Remove elements equal to value moving from head to tail. + count < 0: Remove elements equal to value moving from tail to head. + count = 0: Remove all elements equal to value. + """ + return self.execute_command('LREM', name, count, value) + + def lset(self, name, index, value): + "Set ``position`` of list ``name`` to ``value``" + return self.execute_command('LSET', name, index, value) + + def ltrim(self, name, start, end): + """ + Trim the list ``name``, removing all values not within the slice + between ``start`` and ``end`` + + ``start`` and ``end`` can be negative numbers just like + Python slicing notation + """ + return self.execute_command('LTRIM', name, start, end) + + def rpop(self, name): + "Remove and return the last item of the list ``name``" + return self.execute_command('RPOP', name) + + def rpoplpush(self, src, dst): + """ + RPOP a value off of the ``src`` list and atomically LPUSH it + on to the ``dst`` list. Returns the value. + """ + return self.execute_command('RPOPLPUSH', src, dst) + + def rpush(self, name, *values): + "Push ``values`` onto the tail of the list ``name``" + return self.execute_command('RPUSH', name, *values) + + def rpushx(self, name, value): + "Push ``value`` onto the tail of the list ``name`` if ``name`` exists" + return self.execute_command('RPUSHX', name, value) + + def sort(self, name, start=None, num=None, by=None, get=None, + desc=False, alpha=False, store=None, groups=False): + """ + Sort and return the list, set or sorted set at ``name``. + + ``start`` and ``num`` allow for paging through the sorted data + + ``by`` allows using an external key to weight and sort the items. + Use an "*" to indicate where in the key the item value is located + + ``get`` allows for returning items from external keys rather than the + sorted data itself. Use an "*" to indicate where int he key + the item value is located + + ``desc`` allows for reversing the sort + + ``alpha`` allows for sorting lexicographically rather than numerically + + ``store`` allows for storing the result of the sort into + the key ``store`` + + ``groups`` if set to True and if ``get`` contains at least two + elements, sort will return a list of tuples, each containing the + values fetched from the arguments to ``get``. + + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise RedisError("``start`` and ``num`` must both be specified") + + pieces = [name] + if by is not None: + pieces.append(Token('BY')) + pieces.append(by) + if start is not None and num is not None: + pieces.append(Token('LIMIT')) + pieces.append(start) + pieces.append(num) + if get is not None: + # If get is a string assume we want to get a single value. + # Otherwise assume it's an interable and we want to get multiple + # values. We can't just iterate blindly because strings are + # iterable. + if isinstance(get, basestring): + pieces.append(Token('GET')) + pieces.append(get) + else: + for g in get: + pieces.append(Token('GET')) + pieces.append(g) + if desc: + pieces.append(Token('DESC')) + if alpha: + pieces.append(Token('ALPHA')) + if store is not None: + pieces.append(Token('STORE')) + pieces.append(store) + + if groups: + if not get or isinstance(get, basestring) or len(get) < 2: + raise DataError('when using "groups" the "get" argument ' + 'must be specified and contain at least ' + 'two keys') + + options = {'groups': len(get) if groups else None} + return self.execute_command('SORT', *pieces, **options) + + # SCAN COMMANDS + def scan(self, cursor=0, match=None, count=None): + """ + Incrementally return lists of key names. Also return a cursor + indicating the scan position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + """ + pieces = [cursor] + if match is not None: + pieces.extend([Token('MATCH'), match]) + if count is not None: + pieces.extend([Token('COUNT'), count]) + return self.execute_command('SCAN', *pieces) + + def scan_iter(self, match=None, count=None): + """ + Make an iterator using the SCAN command so that the client doesn't + need to remember the cursor position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + """ + cursor = '0' + while cursor != 0: + cursor, data = self.scan(cursor=cursor, match=match, count=count) + for item in data: + yield item + + def sscan(self, name, cursor=0, match=None, count=None): + """ + Incrementally return lists of elements in a set. Also return a cursor + indicating the scan position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + """ + pieces = [name, cursor] + if match is not None: + pieces.extend([Token('MATCH'), match]) + if count is not None: + pieces.extend([Token('COUNT'), count]) + return self.execute_command('SSCAN', *pieces) + + def sscan_iter(self, name, match=None, count=None): + """ + Make an iterator using the SSCAN command so that the client doesn't + need to remember the cursor position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + """ + cursor = '0' + while cursor != 0: + cursor, data = self.sscan(name, cursor=cursor, + match=match, count=count) + for item in data: + yield item + + def hscan(self, name, cursor=0, match=None, count=None): + """ + Incrementally return key/value slices in a hash. Also return a cursor + indicating the scan position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + """ + pieces = [name, cursor] + if match is not None: + pieces.extend([Token('MATCH'), match]) + if count is not None: + pieces.extend([Token('COUNT'), count]) + return self.execute_command('HSCAN', *pieces) + + def hscan_iter(self, name, match=None, count=None): + """ + Make an iterator using the HSCAN command so that the client doesn't + need to remember the cursor position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + """ + cursor = '0' + while cursor != 0: + cursor, data = self.hscan(name, cursor=cursor, + match=match, count=count) + for item in data.items(): + yield item + + def zscan(self, name, cursor=0, match=None, count=None, + score_cast_func=float): + """ + Incrementally return lists of elements in a sorted set. Also return a + cursor indicating the scan position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + + ``score_cast_func`` a callable used to cast the score return value + """ + pieces = [name, cursor] + if match is not None: + pieces.extend([Token('MATCH'), match]) + if count is not None: + pieces.extend([Token('COUNT'), count]) + options = {'score_cast_func': score_cast_func} + return self.execute_command('ZSCAN', *pieces, **options) + + def zscan_iter(self, name, match=None, count=None, + score_cast_func=float): + """ + Make an iterator using the ZSCAN command so that the client doesn't + need to remember the cursor position. + + ``match`` allows for filtering the keys by pattern + + ``count`` allows for hint the minimum number of returns + + ``score_cast_func`` a callable used to cast the score return value + """ + cursor = '0' + while cursor != 0: + cursor, data = self.zscan(name, cursor=cursor, match=match, + count=count, + score_cast_func=score_cast_func) + for item in data: + yield item + + # SET COMMANDS + def sadd(self, name, *values): + "Add ``value(s)`` to set ``name``" + return self.execute_command('SADD', name, *values) + + def scard(self, name): + "Return the number of elements in set ``name``" + return self.execute_command('SCARD', name) + + def sdiff(self, keys, *args): + "Return the difference of sets specified by ``keys``" + args = list_or_args(keys, args) + return self.execute_command('SDIFF', *args) + + def sdiffstore(self, dest, keys, *args): + """ + Store the difference of sets specified by ``keys`` into a new + set named ``dest``. Returns the number of keys in the new set. + """ + args = list_or_args(keys, args) + return self.execute_command('SDIFFSTORE', dest, *args) + + def sinter(self, keys, *args): + "Return the intersection of sets specified by ``keys``" + args = list_or_args(keys, args) + return self.execute_command('SINTER', *args) + + def sinterstore(self, dest, keys, *args): + """ + Store the intersection of sets specified by ``keys`` into a new + set named ``dest``. Returns the number of keys in the new set. + """ + args = list_or_args(keys, args) + return self.execute_command('SINTERSTORE', dest, *args) + + def sismember(self, name, value): + "Return a boolean indicating if ``value`` is a member of set ``name``" + return self.execute_command('SISMEMBER', name, value) + + def smembers(self, name): + "Return all members of the set ``name``" + return self.execute_command('SMEMBERS', name) + + def smove(self, src, dst, value): + "Move ``value`` from set ``src`` to set ``dst`` atomically" + return self.execute_command('SMOVE', src, dst, value) + + def spop(self, name): + "Remove and return a random member of set ``name``" + return self.execute_command('SPOP', name) + + def srandmember(self, name, number=None): + """ + If ``number`` is None, returns a random member of set ``name``. + + If ``number`` is supplied, returns a list of ``number`` random + memebers of set ``name``. Note this is only available when running + Redis 2.6+. + """ + args = number and [number] or [] + return self.execute_command('SRANDMEMBER', name, *args) + + def srem(self, name, *values): + "Remove ``values`` from set ``name``" + return self.execute_command('SREM', name, *values) + + def sunion(self, keys, *args): + "Return the union of sets specified by ``keys``" + args = list_or_args(keys, args) + return self.execute_command('SUNION', *args) + + def sunionstore(self, dest, keys, *args): + """ + Store the union of sets specified by ``keys`` into a new + set named ``dest``. Returns the number of keys in the new set. + """ + args = list_or_args(keys, args) + return self.execute_command('SUNIONSTORE', dest, *args) + + # SORTED SET COMMANDS + def zadd(self, name, *args, **kwargs): + """ + Set any number of score, element-name pairs to the key ``name``. Pairs + can be specified in two ways: + + As *args, in the form of: score1, name1, score2, name2, ... + or as **kwargs, in the form of: name1=score1, name2=score2, ... + + The following example would add four values to the 'my-key' key: + redis.zadd('my-key', 1.1, 'name1', 2.2, 'name2', name3=3.3, name4=4.4) + """ + pieces = [] + if args: + if len(args) % 2 != 0: + raise RedisError("ZADD requires an equal number of " + "values and scores") + pieces.extend(args) + for pair in iteritems(kwargs): + pieces.append(pair[1]) + pieces.append(pair[0]) + return self.execute_command('ZADD', name, *pieces) + + def zcard(self, name): + "Return the number of elements in the sorted set ``name``" + return self.execute_command('ZCARD', name) + + def zcount(self, name, min, max): + """ + Returns the number of elements in the sorted set at key ``name`` with + a score between ``min`` and ``max``. + """ + return self.execute_command('ZCOUNT', name, min, max) + + def zincrby(self, name, value, amount=1): + "Increment the score of ``value`` in sorted set ``name`` by ``amount``" + return self.execute_command('ZINCRBY', name, amount, value) + + def zinterstore(self, dest, keys, aggregate=None): + """ + Intersect multiple sorted sets specified by ``keys`` into + a new sorted set, ``dest``. Scores in the destination will be + aggregated based on the ``aggregate``, or SUM if none is provided. + """ + return self._zaggregate('ZINTERSTORE', dest, keys, aggregate) + + def zlexcount(self, name, min, max): + """ + Return the number of items in the sorted set ``name`` between the + lexicographical range ``min`` and ``max``. + """ + return self.execute_command('ZLEXCOUNT', name, min, max) + + def zrange(self, name, start, end, desc=False, withscores=False, + score_cast_func=float): + """ + Return a range of values from sorted set ``name`` between + ``start`` and ``end`` sorted in ascending order. + + ``start`` and ``end`` can be negative, indicating the end of the range. + + ``desc`` a boolean indicating whether to sort the results descendingly + + ``withscores`` indicates to return the scores along with the values. + The return type is a list of (value, score) pairs + + ``score_cast_func`` a callable used to cast the score return value + """ + if desc: + return self.zrevrange(name, start, end, withscores, + score_cast_func) + pieces = ['ZRANGE', name, start, end] + if withscores: + pieces.append(Token('WITHSCORES')) + options = { + 'withscores': withscores, + 'score_cast_func': score_cast_func + } + return self.execute_command(*pieces, **options) + + def zrangebylex(self, name, min, max, start=None, num=None): + """ + Return the lexicographical range of values from sorted set ``name`` + between ``min`` and ``max``. + + If ``start`` and ``num`` are specified, then return a slice of the + range. + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise RedisError("``start`` and ``num`` must both be specified") + pieces = ['ZRANGEBYLEX', name, min, max] + if start is not None and num is not None: + pieces.extend([Token('LIMIT'), start, num]) + return self.execute_command(*pieces) + + def zrangebyscore(self, name, min, max, start=None, num=None, + withscores=False, score_cast_func=float): + """ + Return a range of values from the sorted set ``name`` with scores + between ``min`` and ``max``. + + If ``start`` and ``num`` are specified, then return a slice + of the range. + + ``withscores`` indicates to return the scores along with the values. + The return type is a list of (value, score) pairs + + `score_cast_func`` a callable used to cast the score return value + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise RedisError("``start`` and ``num`` must both be specified") + pieces = ['ZRANGEBYSCORE', name, min, max] + if start is not None and num is not None: + pieces.extend([Token('LIMIT'), start, num]) + if withscores: + pieces.append(Token('WITHSCORES')) + options = { + 'withscores': withscores, + 'score_cast_func': score_cast_func + } + return self.execute_command(*pieces, **options) + + def zrank(self, name, value): + """ + Returns a 0-based value indicating the rank of ``value`` in sorted set + ``name`` + """ + return self.execute_command('ZRANK', name, value) + + def zrem(self, name, *values): + "Remove member ``values`` from sorted set ``name``" + return self.execute_command('ZREM', name, *values) + + def zremrangebylex(self, name, min, max): + """ + Remove all elements in the sorted set ``name`` between the + lexicographical range specified by ``min`` and ``max``. + + Returns the number of elements removed. + """ + return self.execute_command('ZREMRANGEBYLEX', name, min, max) + + def zremrangebyrank(self, name, min, max): + """ + Remove all elements in the sorted set ``name`` with ranks between + ``min`` and ``max``. Values are 0-based, ordered from smallest score + to largest. Values can be negative indicating the highest scores. + Returns the number of elements removed + """ + return self.execute_command('ZREMRANGEBYRANK', name, min, max) + + def zremrangebyscore(self, name, min, max): + """ + Remove all elements in the sorted set ``name`` with scores + between ``min`` and ``max``. Returns the number of elements removed. + """ + return self.execute_command('ZREMRANGEBYSCORE', name, min, max) + + def zrevrange(self, name, start, end, withscores=False, + score_cast_func=float): + """ + Return a range of values from sorted set ``name`` between + ``start`` and ``end`` sorted in descending order. + + ``start`` and ``end`` can be negative, indicating the end of the range. + + ``withscores`` indicates to return the scores along with the values + The return type is a list of (value, score) pairs + + ``score_cast_func`` a callable used to cast the score return value + """ + pieces = ['ZREVRANGE', name, start, end] + if withscores: + pieces.append(Token('WITHSCORES')) + options = { + 'withscores': withscores, + 'score_cast_func': score_cast_func + } + return self.execute_command(*pieces, **options) + + def zrevrangebyscore(self, name, max, min, start=None, num=None, + withscores=False, score_cast_func=float): + """ + Return a range of values from the sorted set ``name`` with scores + between ``min`` and ``max`` in descending order. + + If ``start`` and ``num`` are specified, then return a slice + of the range. + + ``withscores`` indicates to return the scores along with the values. + The return type is a list of (value, score) pairs + + ``score_cast_func`` a callable used to cast the score return value + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise RedisError("``start`` and ``num`` must both be specified") + pieces = ['ZREVRANGEBYSCORE', name, max, min] + if start is not None and num is not None: + pieces.extend([Token('LIMIT'), start, num]) + if withscores: + pieces.append(Token('WITHSCORES')) + options = { + 'withscores': withscores, + 'score_cast_func': score_cast_func + } + return self.execute_command(*pieces, **options) + + def zrevrank(self, name, value): + """ + Returns a 0-based value indicating the descending rank of + ``value`` in sorted set ``name`` + """ + return self.execute_command('ZREVRANK', name, value) + + def zscore(self, name, value): + "Return the score of element ``value`` in sorted set ``name``" + return self.execute_command('ZSCORE', name, value) + + def zunionstore(self, dest, keys, aggregate=None): + """ + Union multiple sorted sets specified by ``keys`` into + a new sorted set, ``dest``. Scores in the destination will be + aggregated based on the ``aggregate``, or SUM if none is provided. + """ + return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate) + + def _zaggregate(self, command, dest, keys, aggregate=None): + pieces = [command, dest, len(keys)] + if isinstance(keys, dict): + keys, weights = iterkeys(keys), itervalues(keys) + else: + weights = None + pieces.extend(keys) + if weights: + pieces.append(Token('WEIGHTS')) + pieces.extend(weights) + if aggregate: + pieces.append(Token('AGGREGATE')) + pieces.append(aggregate) + return self.execute_command(*pieces) + + # HYPERLOGLOG COMMANDS + def pfadd(self, name, *values): + "Adds the specified elements to the specified HyperLogLog." + return self.execute_command('PFADD', name, *values) + + def pfcount(self, name): + """ + Return the approximated cardinality of + the set observed by the HyperLogLog at key. + """ + return self.execute_command('PFCOUNT', name) + + def pfmerge(self, dest, *sources): + "Merge N different HyperLogLogs into a single one." + return self.execute_command('PFMERGE', dest, *sources) + + # HASH COMMANDS + def hdel(self, name, *keys): + "Delete ``keys`` from hash ``name``" + return self.execute_command('HDEL', name, *keys) + + def hexists(self, name, key): + "Returns a boolean indicating if ``key`` exists within hash ``name``" + return self.execute_command('HEXISTS', name, key) + + def hget(self, name, key): + "Return the value of ``key`` within the hash ``name``" + return self.execute_command('HGET', name, key) + + def hgetall(self, name): + "Return a Python dict of the hash's name/value pairs" + return self.execute_command('HGETALL', name) + + def hincrby(self, name, key, amount=1): + "Increment the value of ``key`` in hash ``name`` by ``amount``" + return self.execute_command('HINCRBY', name, key, amount) + + def hincrbyfloat(self, name, key, amount=1.0): + """ + Increment the value of ``key`` in hash ``name`` by floating ``amount`` + """ + return self.execute_command('HINCRBYFLOAT', name, key, amount) + + def hkeys(self, name): + "Return the list of keys within hash ``name``" + return self.execute_command('HKEYS', name) + + def hlen(self, name): + "Return the number of elements in hash ``name``" + return self.execute_command('HLEN', name) + + def hset(self, name, key, value): + """ + Set ``key`` to ``value`` within hash ``name`` + Returns 1 if HSET created a new field, otherwise 0 + """ + return self.execute_command('HSET', name, key, value) + + def hsetnx(self, name, key, value): + """ + Set ``key`` to ``value`` within hash ``name`` if ``key`` does not + exist. Returns 1 if HSETNX created a field, otherwise 0. + """ + return self.execute_command('HSETNX', name, key, value) + + def hmset(self, name, mapping): + """ + Set key to value within hash ``name`` for each corresponding + key and value from the ``mapping`` dict. + """ + if not mapping: + raise DataError("'hmset' with 'mapping' of length 0") + items = [] + for pair in iteritems(mapping): + items.extend(pair) + return self.execute_command('HMSET', name, *items) + + def hmget(self, name, keys, *args): + "Returns a list of values ordered identically to ``keys``" + args = list_or_args(keys, args) + return self.execute_command('HMGET', name, *args) + + def hvals(self, name): + "Return the list of values within hash ``name``" + return self.execute_command('HVALS', name) + + def publish(self, channel, message): + """ + Publish ``message`` on ``channel``. + Returns the number of subscribers the message was delivered to. + """ + return self.execute_command('PUBLISH', channel, message) + + def eval(self, script, numkeys, *keys_and_args): + """ + Execute the Lua ``script``, specifying the ``numkeys`` the script + will touch and the key names and argument values in ``keys_and_args``. + Returns the result of the script. + + In practice, use the object returned by ``register_script``. This + function exists purely for Redis API completion. + """ + return self.execute_command('EVAL', script, numkeys, *keys_and_args) + + def evalsha(self, sha, numkeys, *keys_and_args): + """ + Use the ``sha`` to execute a Lua script already registered via EVAL + or SCRIPT LOAD. Specify the ``numkeys`` the script will touch and the + key names and argument values in ``keys_and_args``. Returns the result + of the script. + + In practice, use the object returned by ``register_script``. This + function exists purely for Redis API completion. + """ + return self.execute_command('EVALSHA', sha, numkeys, *keys_and_args) + + def script_exists(self, *args): + """ + Check if a script exists in the script cache by specifying the SHAs of + each script as ``args``. Returns a list of boolean values indicating if + if each already script exists in the cache. + """ + return self.execute_command('SCRIPT EXISTS', *args) + + def script_flush(self): + "Flush all scripts from the script cache" + return self.execute_command('SCRIPT FLUSH') + + def script_kill(self): + "Kill the currently executing Lua script" + return self.execute_command('SCRIPT KILL') + + def script_load(self, script): + "Load a Lua ``script`` into the script cache. Returns the SHA." + return self.execute_command('SCRIPT LOAD', script) + + def register_script(self, script): + """ + Register a Lua ``script`` specifying the ``keys`` it will touch. + Returns a Script object that is callable and hides the complexity of + deal with scripts, keys, and shas. This is the preferred way to work + with Lua scripts. + """ + return Script(self, script) + + +class Redis(StrictRedis): + """ + Provides backwards compatibility with older versions of redis-py that + changed arguments to some commands to be more Pythonic, sane, or by + accident. + """ + + # Overridden callbacks + RESPONSE_CALLBACKS = dict_merge( + StrictRedis.RESPONSE_CALLBACKS, + { + 'TTL': lambda r: r >= 0 and r or None, + 'PTTL': lambda r: r >= 0 and r or None, + } + ) + + def pipeline(self, transaction=True, shard_hint=None): + """ + Return a new pipeline object that can queue multiple commands for + later execution. ``transaction`` indicates whether all commands + should be executed atomically. Apart from making a group of operations + atomic, pipelines are useful for reducing the back-and-forth overhead + between the client and server. + """ + return Pipeline( + self.connection_pool, + self.response_callbacks, + transaction, + shard_hint) + + def setex(self, name, value, time): + """ + Set the value of key ``name`` to ``value`` that expires in ``time`` + seconds. ``time`` can be represented by an integer or a Python + timedelta object. + """ + if isinstance(time, datetime.timedelta): + time = time.seconds + time.days * 24 * 3600 + return self.execute_command('SETEX', name, time, value) + + def lrem(self, name, value, num=0): + """ + Remove the first ``num`` occurrences of elements equal to ``value`` + from the list stored at ``name``. + + The ``num`` argument influences the operation in the following ways: + num > 0: Remove elements equal to value moving from head to tail. + num < 0: Remove elements equal to value moving from tail to head. + num = 0: Remove all elements equal to value. + """ + return self.execute_command('LREM', name, num, value) + + def zadd(self, name, *args, **kwargs): + """ + NOTE: The order of arguments differs from that of the official ZADD + command. For backwards compatability, this method accepts arguments + in the form of name1, score1, name2, score2, while the official Redis + documents expects score1, name1, score2, name2. + + If you're looking to use the standard syntax, consider using the + StrictRedis class. See the API Reference section of the docs for more + information. + + Set any number of element-name, score pairs to the key ``name``. Pairs + can be specified in two ways: + + As *args, in the form of: name1, score1, name2, score2, ... + or as **kwargs, in the form of: name1=score1, name2=score2, ... + + The following example would add four values to the 'my-key' key: + redis.zadd('my-key', 'name1', 1.1, 'name2', 2.2, name3=3.3, name4=4.4) + """ + pieces = [] + if args: + if len(args) % 2 != 0: + raise RedisError("ZADD requires an equal number of " + "values and scores") + pieces.extend(reversed(args)) + for pair in iteritems(kwargs): + pieces.append(pair[1]) + pieces.append(pair[0]) + return self.execute_command('ZADD', name, *pieces) + + +class PubSub(object): + """ + PubSub provides publish, subscribe and listen support to Redis channels. + + After subscribing to one or more channels, the listen() method will block + until a message arrives on one of the subscribed channels. That message + will be returned and it's safe to start listening again. + """ + PUBLISH_MESSAGE_TYPES = ('message', 'pmessage') + UNSUBSCRIBE_MESSAGE_TYPES = ('unsubscribe', 'punsubscribe') + + def __init__(self, connection_pool, shard_hint=None, + ignore_subscribe_messages=False): + self.connection_pool = connection_pool + self.shard_hint = shard_hint + self.ignore_subscribe_messages = ignore_subscribe_messages + self.connection = None + # we need to know the encoding options for this connection in order + # to lookup channel and pattern names for callback handlers. + conn = connection_pool.get_connection('pubsub', shard_hint) + try: + self.encoding = conn.encoding + self.encoding_errors = conn.encoding_errors + self.decode_responses = conn.decode_responses + finally: + connection_pool.release(conn) + self.reset() + + def __del__(self): + try: + # if this object went out of scope prior to shutting down + # subscriptions, close the connection manually before + # returning it to the connection pool + self.reset() + except Exception: + pass + + def reset(self): + if self.connection: + self.connection.disconnect() + self.connection.clear_connect_callbacks() + self.connection_pool.release(self.connection) + self.connection = None + self.channels = {} + self.patterns = {} + + def close(self): + self.reset() + + def on_connect(self, connection): + "Re-subscribe to any channels and patterns previously subscribed to" + # NOTE: for python3, we can't pass bytestrings as keyword arguments + # so we need to decode channel/pattern names back to unicode strings + # before passing them to [p]subscribe. + if self.channels: + channels = {} + for k, v in iteritems(self.channels): + if not self.decode_responses: + k = k.decode(self.encoding, self.encoding_errors) + channels[k] = v + self.subscribe(**channels) + if self.patterns: + patterns = {} + for k, v in iteritems(self.patterns): + if not self.decode_responses: + k = k.decode(self.encoding, self.encoding_errors) + patterns[k] = v + self.psubscribe(**patterns) + + def encode(self, value): + """ + Encode the value so that it's identical to what we'll + read off the connection + """ + if self.decode_responses and isinstance(value, bytes): + value = value.decode(self.encoding, self.encoding_errors) + elif not self.decode_responses and isinstance(value, unicode): + value = value.encode(self.encoding, self.encoding_errors) + return value + + @property + def subscribed(self): + "Indicates if there are subscriptions to any channels or patterns" + return bool(self.channels or self.patterns) + + def execute_command(self, *args, **kwargs): + "Execute a publish/subscribe command" + + # NOTE: don't parse the response in this function. it could pull a + # legitmate message off the stack if the connection is already + # subscribed to one or more channels + + if self.connection is None: + self.connection = self.connection_pool.get_connection( + 'pubsub', + self.shard_hint + ) + # register a callback that re-subscribes to any channels we + # were listening to when we were disconnected + self.connection.register_connect_callback(self.on_connect) + connection = self.connection + self._execute(connection, connection.send_command, *args) + + def _execute(self, connection, command, *args): + try: + return command(*args) + except (ConnectionError, TimeoutError) as e: + connection.disconnect() + if not connection.retry_on_timeout and isinstance(e, TimeoutError): + raise + # Connect manually here. If the Redis server is down, this will + # fail and raise a ConnectionError as desired. + connection.connect() + # the ``on_connect`` callback should haven been called by the + # connection to resubscribe us to any channels and patterns we were + # previously listening to + return command(*args) + + def parse_response(self, block=True): + "Parse the response from a publish/subscribe command" + connection = self.connection + if not block and not connection.can_read(): + return None + return self._execute(connection, connection.read_response) + + def psubscribe(self, *args, **kwargs): + """ + Subscribe to channel patterns. Patterns supplied as keyword arguments + expect a pattern name as the key and a callable as the value. A + pattern's callable will be invoked automatically when a message is + received on that pattern rather than producing a message via + ``listen()``. + """ + if args: + args = list_or_args(args[0], args[1:]) + new_patterns = {} + new_patterns.update(dict.fromkeys(imap(self.encode, args))) + for pattern, handler in iteritems(kwargs): + new_patterns[self.encode(pattern)] = handler + ret_val = self.execute_command('PSUBSCRIBE', *iterkeys(new_patterns)) + # update the patterns dict AFTER we send the command. we don't want to + # subscribe twice to these patterns, once for the command and again + # for the reconnection. + self.patterns.update(new_patterns) + return ret_val + + def punsubscribe(self, *args): + """ + Unsubscribe from the supplied patterns. If empy, unsubscribe from + all patterns. + """ + if args: + args = list_or_args(args[0], args[1:]) + return self.execute_command('PUNSUBSCRIBE', *args) + + def subscribe(self, *args, **kwargs): + """ + Subscribe to channels. Channels supplied as keyword arguments expect + a channel name as the key and a callable as the value. A channel's + callable will be invoked automatically when a message is received on + that channel rather than producing a message via ``listen()`` or + ``get_message()``. + """ + if args: + args = list_or_args(args[0], args[1:]) + new_channels = {} + new_channels.update(dict.fromkeys(imap(self.encode, args))) + for channel, handler in iteritems(kwargs): + new_channels[self.encode(channel)] = handler + ret_val = self.execute_command('SUBSCRIBE', *iterkeys(new_channels)) + # update the channels dict AFTER we send the command. we don't want to + # subscribe twice to these channels, once for the command and again + # for the reconnection. + self.channels.update(new_channels) + return ret_val + + def unsubscribe(self, *args): + """ + Unsubscribe from the supplied channels. If empty, unsubscribe from + all channels + """ + if args: + args = list_or_args(args[0], args[1:]) + return self.execute_command('UNSUBSCRIBE', *args) + + def listen(self): + "Listen for messages on channels this client has been subscribed to" + while self.subscribed: + response = self.handle_message(self.parse_response(block=True)) + if response is not None: + yield response + + def get_message(self, ignore_subscribe_messages=False): + "Get the next message if one is available, otherwise None" + response = self.parse_response(block=False) + if response: + return self.handle_message(response, ignore_subscribe_messages) + return None + + def handle_message(self, response, ignore_subscribe_messages=False): + """ + Parses a pub/sub message. If the channel or pattern was subscribed to + with a message handler, the handler is invoked instead of a parsed + message being returned. + """ + message_type = nativestr(response[0]) + if message_type == 'pmessage': + message = { + 'type': message_type, + 'pattern': response[1], + 'channel': response[2], + 'data': response[3] + } + else: + message = { + 'type': message_type, + 'pattern': None, + 'channel': response[1], + 'data': response[2] + } + + # if this is an unsubscribe message, remove it from memory + if message_type in self.UNSUBSCRIBE_MESSAGE_TYPES: + subscribed_dict = None + if message_type == 'punsubscribe': + subscribed_dict = self.patterns + else: + subscribed_dict = self.channels + try: + del subscribed_dict[message['channel']] + except KeyError: + pass + + if message_type in self.PUBLISH_MESSAGE_TYPES: + # if there's a message handler, invoke it + handler = None + if message_type == 'pmessage': + handler = self.patterns.get(message['pattern'], None) + else: + handler = self.channels.get(message['channel'], None) + if handler: + handler(message) + return None + else: + # this is a subscribe/unsubscribe message. ignore if we don't + # want them + if ignore_subscribe_messages or self.ignore_subscribe_messages: + return None + + return message + + def run_in_thread(self, sleep_time=0): + for channel, handler in iteritems(self.channels): + if handler is None: + raise PubSubError("Channel: '%s' has no handler registered") + for pattern, handler in iteritems(self.patterns): + if handler is None: + raise PubSubError("Pattern: '%s' has no handler registered") + pubsub = self + + class WorkerThread(threading.Thread): + def __init__(self, *args, **kwargs): + super(WorkerThread, self).__init__(*args, **kwargs) + self._running = False + + def run(self): + if self._running: + return + self._running = True + while self._running and pubsub.subscribed: + pubsub.get_message(ignore_subscribe_messages=True) + mod_time.sleep(sleep_time) + + def stop(self): + self._running = False + self.join() + + thread = WorkerThread() + thread.start() + return thread + + +class BasePipeline(object): + """ + Pipelines provide a way to transmit multiple commands to the Redis server + in one transmission. This is convenient for batch processing, such as + saving all the values in a list to Redis. + + All commands executed within a pipeline are wrapped with MULTI and EXEC + calls. This guarantees all commands executed in the pipeline will be + executed atomically. + + Any command raising an exception does *not* halt the execution of + subsequent commands in the pipeline. Instead, the exception is caught + and its instance is placed into the response list returned by execute(). + Code iterating over the response list should be able to deal with an + instance of an exception as a potential value. In general, these will be + ResponseError exceptions, such as those raised when issuing a command + on a key of a different datatype. + """ + + UNWATCH_COMMANDS = set(('DISCARD', 'EXEC', 'UNWATCH')) + + def __init__(self, connection_pool, response_callbacks, transaction, + shard_hint): + self.connection_pool = connection_pool + self.connection = None + self.response_callbacks = response_callbacks + self.transaction = transaction + self.shard_hint = shard_hint + + self.watching = False + self.reset() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.reset() + + def __del__(self): + try: + self.reset() + except Exception: + pass + + def __len__(self): + return len(self.command_stack) + + def reset(self): + self.command_stack = [] + self.scripts = set() + # make sure to reset the connection state in the event that we were + # watching something + if self.watching and self.connection: + try: + # call this manually since our unwatch or + # immediate_execute_command methods can call reset() + self.connection.send_command('UNWATCH') + self.connection.read_response() + except ConnectionError: + # disconnect will also remove any previous WATCHes + self.connection.disconnect() + # clean up the other instance attributes + self.watching = False + self.explicit_transaction = False + # we can safely return the connection to the pool here since we're + # sure we're no longer WATCHing anything + if self.connection: + self.connection_pool.release(self.connection) + self.connection = None + + def multi(self): + """ + Start a transactional block of the pipeline after WATCH commands + are issued. End the transactional block with `execute`. + """ + if self.explicit_transaction: + raise RedisError('Cannot issue nested calls to MULTI') + if self.command_stack: + raise RedisError('Commands without an initial WATCH have already ' + 'been issued') + self.explicit_transaction = True + + def execute_command(self, *args, **kwargs): + if (self.watching or args[0] == 'WATCH') and \ + not self.explicit_transaction: + return self.immediate_execute_command(*args, **kwargs) + return self.pipeline_execute_command(*args, **kwargs) + + def immediate_execute_command(self, *args, **options): + """ + Execute a command immediately, but don't auto-retry on a + ConnectionError if we're already WATCHing a variable. Used when + issuing WATCH or subsequent commands retrieving their values but before + MULTI is called. + """ + command_name = args[0] + conn = self.connection + # if this is the first call, we need a connection + if not conn: + conn = self.connection_pool.get_connection(command_name, + self.shard_hint) + self.connection = conn + try: + conn.send_command(*args) + return self.parse_response(conn, command_name, **options) + except (ConnectionError, TimeoutError) as e: + conn.disconnect() + if not conn.retry_on_timeout and isinstance(e, TimeoutError): + raise + # if we're not already watching, we can safely retry the command + try: + if not self.watching: + conn.send_command(*args) + return self.parse_response(conn, command_name, **options) + except ConnectionError: + # the retry failed so cleanup. + conn.disconnect() + self.reset() + raise + + def pipeline_execute_command(self, *args, **options): + """ + Stage a command to be executed when execute() is next called + + Returns the current Pipeline object back so commands can be + chained together, such as: + + pipe = pipe.set('foo', 'bar').incr('baz').decr('bang') + + At some other point, you can then run: pipe.execute(), + which will execute all commands queued in the pipe. + """ + self.command_stack.append((args, options)) + return self + + def _execute_transaction(self, connection, commands, raise_on_error): + cmds = chain([(('MULTI', ), {})], commands, [(('EXEC', ), {})]) + all_cmds = connection.pack_commands([args for args, _ in cmds]) + connection.send_packed_command(all_cmds) + errors = [] + + # parse off the response for MULTI + # NOTE: we need to handle ResponseErrors here and continue + # so that we read all the additional command messages from + # the socket + try: + self.parse_response(connection, '_') + except ResponseError: + errors.append((0, sys.exc_info()[1])) + + # and all the other commands + for i, command in enumerate(commands): + try: + self.parse_response(connection, '_') + except ResponseError: + ex = sys.exc_info()[1] + self.annotate_exception(ex, i + 1, command[0]) + errors.append((i, ex)) + + # parse the EXEC. + try: + response = self.parse_response(connection, '_') + except ExecAbortError: + if self.explicit_transaction: + self.immediate_execute_command('DISCARD') + if errors: + raise errors[0][1] + raise sys.exc_info()[1] + + if response is None: + raise WatchError("Watched variable changed.") + + # put any parse errors into the response + for i, e in errors: + response.insert(i, e) + + if len(response) != len(commands): + self.connection.disconnect() + raise ResponseError("Wrong number of response items from " + "pipeline execution") + + # find any errors in the response and raise if necessary + if raise_on_error: + self.raise_first_error(commands, response) + + # We have to run response callbacks manually + data = [] + for r, cmd in izip(response, commands): + if not isinstance(r, Exception): + args, options = cmd + command_name = args[0] + if command_name in self.response_callbacks: + r = self.response_callbacks[command_name](r, **options) + data.append(r) + return data + + def _execute_pipeline(self, connection, commands, raise_on_error): + # build up all commands into a single request to increase network perf + all_cmds = connection.pack_commands([args for args, _ in commands]) + connection.send_packed_command(all_cmds) + + response = [] + for args, options in commands: + try: + response.append( + self.parse_response(connection, args[0], **options)) + except ResponseError: + response.append(sys.exc_info()[1]) + + if raise_on_error: + self.raise_first_error(commands, response) + return response + + def raise_first_error(self, commands, response): + for i, r in enumerate(response): + if isinstance(r, ResponseError): + self.annotate_exception(r, i + 1, commands[i][0]) + raise r + + def annotate_exception(self, exception, number, command): + cmd = unicode(' ').join(imap(unicode, command)) + msg = unicode('Command # %d (%s) of pipeline caused error: %s') % ( + number, cmd, unicode(exception.args[0])) + exception.args = (msg,) + exception.args[1:] + + def parse_response(self, connection, command_name, **options): + result = StrictRedis.parse_response( + self, connection, command_name, **options) + if command_name in self.UNWATCH_COMMANDS: + self.watching = False + elif command_name == 'WATCH': + self.watching = True + return result + + def load_scripts(self): + # make sure all scripts that are about to be run on this pipeline exist + scripts = list(self.scripts) + immediate = self.immediate_execute_command + shas = [s.sha for s in scripts] + # we can't use the normal script_* methods because they would just + # get buffered in the pipeline. + exists = immediate('SCRIPT', 'EXISTS', *shas, **{'parse': 'EXISTS'}) + if not all(exists): + for s, exist in izip(scripts, exists): + if not exist: + s.sha = immediate('SCRIPT', 'LOAD', s.script, + **{'parse': 'LOAD'}) + + def execute(self, raise_on_error=True): + "Execute all the commands in the current pipeline" + stack = self.command_stack + if not stack: + return [] + if self.scripts: + self.load_scripts() + if self.transaction or self.explicit_transaction: + execute = self._execute_transaction + else: + execute = self._execute_pipeline + + conn = self.connection + if not conn: + conn = self.connection_pool.get_connection('MULTI', + self.shard_hint) + # assign to self.connection so reset() releases the connection + # back to the pool after we're done + self.connection = conn + + try: + return execute(conn, stack, raise_on_error) + except (ConnectionError, TimeoutError) as e: + conn.disconnect() + if not conn.retry_on_timeout and isinstance(e, TimeoutError): + raise + # if we were watching a variable, the watch is no longer valid + # since this connection has died. raise a WatchError, which + # indicates the user should retry his transaction. If this is more + # than a temporary failure, the WATCH that the user next issues + # will fail, propegating the real ConnectionError + if self.watching: + raise WatchError("A ConnectionError occured on while watching " + "one or more keys") + # otherwise, it's safe to retry since the transaction isn't + # predicated on any state + return execute(conn, stack, raise_on_error) + finally: + self.reset() + + def watch(self, *names): + "Watches the values at keys ``names``" + if self.explicit_transaction: + raise RedisError('Cannot issue a WATCH after a MULTI') + return self.execute_command('WATCH', *names) + + def unwatch(self): + "Unwatches all previously specified keys" + return self.watching and self.execute_command('UNWATCH') or True + + def script_load_for_pipeline(self, script): + "Make sure scripts are loaded prior to pipeline execution" + # we need the sha now so that Script.__call__ can use it to run + # evalsha. + if not script.sha: + script.sha = self.immediate_execute_command('SCRIPT', 'LOAD', + script.script, + **{'parse': 'LOAD'}) + self.scripts.add(script) + + +class StrictPipeline(BasePipeline, StrictRedis): + "Pipeline for the StrictRedis class" + pass + + +class Pipeline(BasePipeline, Redis): + "Pipeline for the Redis class" + pass + + +class Script(object): + "An executable Lua script object returned by ``register_script``" + + def __init__(self, registered_client, script): + self.registered_client = registered_client + self.script = script + self.sha = '' + + def __call__(self, keys=[], args=[], client=None): + "Execute the script, passing any required ``args``" + if client is None: + client = self.registered_client + args = tuple(keys) + tuple(args) + # make sure the Redis server knows about the script + if isinstance(client, BasePipeline): + # make sure this script is good to go on pipeline + client.script_load_for_pipeline(self) + try: + return client.evalsha(self.sha, len(keys), *args) + except NoScriptError: + # Maybe the client is pointed to a differnet server than the client + # that created this instance? + self.sha = client.script_load(self.script) + return client.evalsha(self.sha, len(keys), *args) diff --git a/awx/lib/site-packages/redis/redis/connection.py b/awx/lib/site-packages/redis/redis/connection.py new file mode 100755 index 0000000000..4c8b68130c --- /dev/null +++ b/awx/lib/site-packages/redis/redis/connection.py @@ -0,0 +1,1017 @@ +from __future__ import with_statement +from distutils.version import StrictVersion +from itertools import chain +from select import select +import os +import socket +import sys +import threading +import warnings + +try: + import ssl + ssl_available = True +except ImportError: + ssl_available = False + +from redis._compat import (b, xrange, imap, byte_to_chr, unicode, bytes, long, + BytesIO, nativestr, basestring, iteritems, + LifoQueue, Empty, Full, urlparse, parse_qs) +from redis.exceptions import ( + RedisError, + ConnectionError, + TimeoutError, + BusyLoadingError, + ResponseError, + InvalidResponse, + AuthenticationError, + NoScriptError, + ExecAbortError, + ReadOnlyError +) +from redis.utils import HIREDIS_AVAILABLE +if HIREDIS_AVAILABLE: + import hiredis + + hiredis_version = StrictVersion(hiredis.__version__) + HIREDIS_SUPPORTS_CALLABLE_ERRORS = \ + hiredis_version >= StrictVersion('0.1.3') + HIREDIS_SUPPORTS_BYTE_BUFFER = \ + hiredis_version >= StrictVersion('0.1.4') + + if not HIREDIS_SUPPORTS_BYTE_BUFFER: + msg = ("redis-py works best with hiredis >= 0.1.4. You're running " + "hiredis %s. Please consider upgrading." % hiredis.__version__) + warnings.warn(msg) + + HIREDIS_USE_BYTE_BUFFER = True + # only use byte buffer if hiredis supports it and the Python version + # is >= 2.7 + if not HIREDIS_SUPPORTS_BYTE_BUFFER or ( + sys.version_info[0] == 2 and sys.version_info[1] < 7): + HIREDIS_USE_BYTE_BUFFER = False + +SYM_STAR = b('*') +SYM_DOLLAR = b('$') +SYM_CRLF = b('\r\n') +SYM_EMPTY = b('') + +SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server." + + +class Token(object): + """ + Literal strings in Redis commands, such as the command names and any + hard-coded arguments are wrapped in this class so we know not to apply + and encoding rules on them. + """ + def __init__(self, value): + if isinstance(value, Token): + value = value.value + self.value = value + + def __repr__(self): + return self.value + + def __str__(self): + return self.value + + +class BaseParser(object): + EXCEPTION_CLASSES = { + 'ERR': ResponseError, + 'EXECABORT': ExecAbortError, + 'LOADING': BusyLoadingError, + 'NOSCRIPT': NoScriptError, + 'READONLY': ReadOnlyError, + } + + def parse_error(self, response): + "Parse an error response" + error_code = response.split(' ')[0] + if error_code in self.EXCEPTION_CLASSES: + response = response[len(error_code) + 1:] + return self.EXCEPTION_CLASSES[error_code](response) + return ResponseError(response) + + +class SocketBuffer(object): + def __init__(self, socket, socket_read_size): + self._sock = socket + self.socket_read_size = socket_read_size + self._buffer = BytesIO() + # number of bytes written to the buffer from the socket + self.bytes_written = 0 + # number of bytes read from the buffer + self.bytes_read = 0 + + @property + def length(self): + return self.bytes_written - self.bytes_read + + def _read_from_socket(self, length=None): + socket_read_size = self.socket_read_size + buf = self._buffer + buf.seek(self.bytes_written) + marker = 0 + + try: + while True: + data = self._sock.recv(socket_read_size) + # an empty string indicates the server shutdown the socket + if isinstance(data, bytes) and len(data) == 0: + raise socket.error(SERVER_CLOSED_CONNECTION_ERROR) + buf.write(data) + data_length = len(data) + self.bytes_written += data_length + marker += data_length + + if length is not None and length > marker: + continue + break + except socket.timeout: + raise TimeoutError("Timeout reading from socket") + except socket.error: + e = sys.exc_info()[1] + raise ConnectionError("Error while reading from socket: %s" % + (e.args,)) + + def read(self, length): + length = length + 2 # make sure to read the \r\n terminator + # make sure we've read enough data from the socket + if length > self.length: + self._read_from_socket(length - self.length) + + self._buffer.seek(self.bytes_read) + data = self._buffer.read(length) + self.bytes_read += len(data) + + # purge the buffer when we've consumed it all so it doesn't + # grow forever + if self.bytes_read == self.bytes_written: + self.purge() + + return data[:-2] + + def readline(self): + buf = self._buffer + buf.seek(self.bytes_read) + data = buf.readline() + while not data.endswith(SYM_CRLF): + # there's more data in the socket that we need + self._read_from_socket() + buf.seek(self.bytes_read) + data = buf.readline() + + self.bytes_read += len(data) + + # purge the buffer when we've consumed it all so it doesn't + # grow forever + if self.bytes_read == self.bytes_written: + self.purge() + + return data[:-2] + + def purge(self): + self._buffer.seek(0) + self._buffer.truncate() + self.bytes_written = 0 + self.bytes_read = 0 + + def close(self): + self.purge() + self._buffer.close() + self._buffer = None + self._sock = None + + +class PythonParser(BaseParser): + "Plain Python parsing class" + encoding = None + + def __init__(self, socket_read_size): + self.socket_read_size = socket_read_size + self._sock = None + self._buffer = None + + def __del__(self): + try: + self.on_disconnect() + except Exception: + pass + + def on_connect(self, connection): + "Called when the socket connects" + self._sock = connection._sock + self._buffer = SocketBuffer(self._sock, self.socket_read_size) + if connection.decode_responses: + self.encoding = connection.encoding + + def on_disconnect(self): + "Called when the socket disconnects" + if self._sock is not None: + self._sock.close() + self._sock = None + if self._buffer is not None: + self._buffer.close() + self._buffer = None + self.encoding = None + + def can_read(self): + return self._buffer and bool(self._buffer.length) + + def read_response(self): + response = self._buffer.readline() + if not response: + raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) + + byte, response = byte_to_chr(response[0]), response[1:] + + if byte not in ('-', '+', ':', '$', '*'): + raise InvalidResponse("Protocol Error: %s, %s" % + (str(byte), str(response))) + + # server returned an error + if byte == '-': + response = nativestr(response) + error = self.parse_error(response) + # if the error is a ConnectionError, raise immediately so the user + # is notified + if isinstance(error, ConnectionError): + raise error + # otherwise, we're dealing with a ResponseError that might belong + # inside a pipeline response. the connection's read_response() + # and/or the pipeline's execute() will raise this error if + # necessary, so just return the exception instance here. + return error + # single value + elif byte == '+': + pass + # int value + elif byte == ':': + response = long(response) + # bulk response + elif byte == '$': + length = int(response) + if length == -1: + return None + response = self._buffer.read(length) + # multi-bulk response + elif byte == '*': + length = int(response) + if length == -1: + return None + response = [self.read_response() for i in xrange(length)] + if isinstance(response, bytes) and self.encoding: + response = response.decode(self.encoding) + return response + + +class HiredisParser(BaseParser): + "Parser class for connections using Hiredis" + def __init__(self, socket_read_size): + if not HIREDIS_AVAILABLE: + raise RedisError("Hiredis is not installed") + self.socket_read_size = socket_read_size + + if HIREDIS_USE_BYTE_BUFFER: + self._buffer = bytearray(socket_read_size) + + def __del__(self): + try: + self.on_disconnect() + except Exception: + pass + + def on_connect(self, connection): + self._sock = connection._sock + kwargs = { + 'protocolError': InvalidResponse, + 'replyError': self.parse_error, + } + + # hiredis < 0.1.3 doesn't support functions that create exceptions + if not HIREDIS_SUPPORTS_CALLABLE_ERRORS: + kwargs['replyError'] = ResponseError + + if connection.decode_responses: + kwargs['encoding'] = connection.encoding + self._reader = hiredis.Reader(**kwargs) + self._next_response = False + + def on_disconnect(self): + self._sock = None + self._reader = None + self._next_response = False + + def can_read(self): + if not self._reader: + raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) + + if self._next_response is False: + self._next_response = self._reader.gets() + return self._next_response is not False + + def read_response(self): + if not self._reader: + raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) + + # _next_response might be cached from a can_read() call + if self._next_response is not False: + response = self._next_response + self._next_response = False + return response + + response = self._reader.gets() + socket_read_size = self.socket_read_size + while response is False: + try: + if HIREDIS_USE_BYTE_BUFFER: + bufflen = self._sock.recv_into(self._buffer) + if bufflen == 0: + raise socket.error(SERVER_CLOSED_CONNECTION_ERROR) + else: + buffer = self._sock.recv(socket_read_size) + # an empty string indicates the server shutdown the socket + if not isinstance(buffer, bytes) or len(buffer) == 0: + raise socket.error(SERVER_CLOSED_CONNECTION_ERROR) + except socket.timeout: + raise TimeoutError("Timeout reading from socket") + except socket.error: + e = sys.exc_info()[1] + raise ConnectionError("Error while reading from socket: %s" % + (e.args,)) + if HIREDIS_USE_BYTE_BUFFER: + self._reader.feed(self._buffer, 0, bufflen) + else: + self._reader.feed(buffer) + # proactively, but not conclusively, check if more data is in the + # buffer. if the data received doesn't end with \r\n, there's more. + if HIREDIS_USE_BYTE_BUFFER: + if bufflen > 2 and self._buffer[bufflen - 2:bufflen] != SYM_CRLF: + continue + else: + if not buffer.endswith(SYM_CRLF): + continue + response = self._reader.gets() + # if an older version of hiredis is installed, we need to attempt + # to convert ResponseErrors to their appropriate types. + if not HIREDIS_SUPPORTS_CALLABLE_ERRORS: + if isinstance(response, ResponseError): + response = self.parse_error(response.args[0]) + elif isinstance(response, list) and response and \ + isinstance(response[0], ResponseError): + response[0] = self.parse_error(response[0].args[0]) + # if the response is a ConnectionError or the response is a list and + # the first item is a ConnectionError, raise it as something bad + # happened + if isinstance(response, ConnectionError): + raise response + elif isinstance(response, list) and response and \ + isinstance(response[0], ConnectionError): + raise response[0] + return response + +if HIREDIS_AVAILABLE: + DefaultParser = HiredisParser +else: + DefaultParser = PythonParser + + +class Connection(object): + "Manages TCP communication to and from a Redis server" + description_format = "Connection" + + def __init__(self, host='localhost', port=6379, db=0, password=None, + socket_timeout=None, socket_connect_timeout=None, + socket_keepalive=False, socket_keepalive_options=None, + retry_on_timeout=False, encoding='utf-8', + encoding_errors='strict', decode_responses=False, + parser_class=DefaultParser, socket_read_size=65536): + self.pid = os.getpid() + self.host = host + self.port = int(port) + self.db = db + self.password = password + self.socket_timeout = socket_timeout + self.socket_connect_timeout = socket_connect_timeout or socket_timeout + self.socket_keepalive = socket_keepalive + self.socket_keepalive_options = socket_keepalive_options or {} + self.retry_on_timeout = retry_on_timeout + self.encoding = encoding + self.encoding_errors = encoding_errors + self.decode_responses = decode_responses + self._sock = None + self._parser = parser_class(socket_read_size=socket_read_size) + self._description_args = { + 'host': self.host, + 'port': self.port, + 'db': self.db, + } + self._connect_callbacks = [] + + def __repr__(self): + return self.description_format % self._description_args + + def __del__(self): + try: + self.disconnect() + except Exception: + pass + + def register_connect_callback(self, callback): + self._connect_callbacks.append(callback) + + def clear_connect_callbacks(self): + self._connect_callbacks = [] + + def connect(self): + "Connects to the Redis server if not already connected" + if self._sock: + return + try: + sock = self._connect() + except socket.error: + e = sys.exc_info()[1] + raise ConnectionError(self._error_message(e)) + + self._sock = sock + try: + self.on_connect() + except RedisError: + # clean up after any error in on_connect + self.disconnect() + raise + + # run any user callbacks. right now the only internal callback + # is for pubsub channel/pattern resubscription + for callback in self._connect_callbacks: + callback(self) + + def _connect(self): + "Create a TCP socket connection" + # we want to mimic what socket.create_connection does to support + # ipv4/ipv6, but we want to set options prior to calling + # socket.connect() + err = None + for res in socket.getaddrinfo(self.host, self.port, 0, + socket.SOCK_STREAM): + family, socktype, proto, canonname, socket_address = res + sock = None + try: + sock = socket.socket(family, socktype, proto) + # TCP_NODELAY + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + + # TCP_KEEPALIVE + if self.socket_keepalive: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + for k, v in iteritems(self.socket_keepalive_options): + sock.setsockopt(socket.SOL_TCP, k, v) + + # set the socket_connect_timeout before we connect + sock.settimeout(self.socket_connect_timeout) + + # connect + sock.connect(socket_address) + + # set the socket_timeout now that we're connected + sock.settimeout(self.socket_timeout) + return sock + + except socket.error as _: + err = _ + if sock is not None: + sock.close() + + if err is not None: + raise err + raise socket.error("socket.getaddrinfo returned an empty list") + + def _error_message(self, exception): + # args for socket.error can either be (errno, "message") + # or just "message" + if len(exception.args) == 1: + return "Error connecting to %s:%s. %s." % \ + (self.host, self.port, exception.args[0]) + else: + return "Error %s connecting to %s:%s. %s." % \ + (exception.args[0], self.host, self.port, exception.args[1]) + + def on_connect(self): + "Initialize the connection, authenticate and select a database" + self._parser.on_connect(self) + + # if a password is specified, authenticate + if self.password: + self.send_command('AUTH', self.password) + if nativestr(self.read_response()) != 'OK': + raise AuthenticationError('Invalid Password') + + # if a database is specified, switch to it + if self.db: + self.send_command('SELECT', self.db) + if nativestr(self.read_response()) != 'OK': + raise ConnectionError('Invalid Database') + + def disconnect(self): + "Disconnects from the Redis server" + self._parser.on_disconnect() + if self._sock is None: + return + try: + self._sock.shutdown(socket.SHUT_RDWR) + self._sock.close() + except socket.error: + pass + self._sock = None + + def send_packed_command(self, command): + "Send an already packed command to the Redis server" + if not self._sock: + self.connect() + try: + if isinstance(command, str): + command = [command] + for item in command: + self._sock.sendall(item) + except socket.timeout: + self.disconnect() + raise TimeoutError("Timeout writing to socket") + except socket.error: + e = sys.exc_info()[1] + self.disconnect() + if len(e.args) == 1: + _errno, errmsg = 'UNKNOWN', e.args[0] + else: + _errno, errmsg = e.args + raise ConnectionError("Error %s while writing to socket. %s." % + (_errno, errmsg)) + except: + self.disconnect() + raise + + def send_command(self, *args): + "Pack and send a command to the Redis server" + self.send_packed_command(self.pack_command(*args)) + + def can_read(self): + "Poll the socket to see if there's data that can be read." + sock = self._sock + if not sock: + self.connect() + sock = self._sock + return bool(select([sock], [], [], 0)[0]) or self._parser.can_read() + + def read_response(self): + "Read the response from a previously sent command" + try: + response = self._parser.read_response() + except: + self.disconnect() + raise + if isinstance(response, ResponseError): + raise response + return response + + def encode(self, value): + "Return a bytestring representation of the value" + if isinstance(value, Token): + return b(value.value) + elif isinstance(value, bytes): + return value + elif isinstance(value, (int, long)): + value = b(str(value)) + elif isinstance(value, float): + value = b(repr(value)) + elif not isinstance(value, basestring): + value = str(value) + if isinstance(value, unicode): + value = value.encode(self.encoding, self.encoding_errors) + return value + + def pack_command(self, *args): + "Pack a series of arguments into the Redis protocol" + output = [] + # the client might have included 1 or more literal arguments in + # the command name, e.g., 'CONFIG GET'. The Redis server expects these + # arguments to be sent separately, so split the first argument + # manually. All of these arguements get wrapped in the Token class + # to prevent them from being encoded. + command = args[0] + if ' ' in command: + args = tuple([Token(s) for s in command.split(' ')]) + args[1:] + else: + args = (Token(command),) + args[1:] + + buff = SYM_EMPTY.join( + (SYM_STAR, b(str(len(args))), SYM_CRLF)) + + for arg in imap(self.encode, args): + # to avoid large string mallocs, chunk the command into the + # output list if we're sending large values + if len(buff) > 6000 or len(arg) > 6000: + buff = SYM_EMPTY.join( + (buff, SYM_DOLLAR, b(str(len(arg))), SYM_CRLF)) + output.append(buff) + output.append(arg) + buff = SYM_CRLF + else: + buff = SYM_EMPTY.join((buff, SYM_DOLLAR, b(str(len(arg))), + SYM_CRLF, arg, SYM_CRLF)) + output.append(buff) + return output + + def pack_commands(self, commands): + "Pack multiple commands into the Redis protocol" + output = [] + pieces = [] + buffer_length = 0 + + for cmd in commands: + for chunk in self.pack_command(*cmd): + pieces.append(chunk) + buffer_length += len(chunk) + + if buffer_length > 6000: + output.append(SYM_EMPTY.join(pieces)) + buffer_length = 0 + pieces = [] + + if pieces: + output.append(SYM_EMPTY.join(pieces)) + return output + + +class SSLConnection(Connection): + description_format = "SSLConnection" + + def __init__(self, ssl_keyfile=None, ssl_certfile=None, ssl_cert_reqs=None, + ssl_ca_certs=None, **kwargs): + if not ssl_available: + raise RedisError("Python wasn't built with SSL support") + + super(SSLConnection, self).__init__(**kwargs) + + self.keyfile = ssl_keyfile + self.certfile = ssl_certfile + if ssl_cert_reqs is None: + ssl_cert_reqs = ssl.CERT_NONE + elif isinstance(ssl_cert_reqs, basestring): + CERT_REQS = { + 'none': ssl.CERT_NONE, + 'optional': ssl.CERT_OPTIONAL, + 'required': ssl.CERT_REQUIRED + } + if ssl_cert_reqs not in CERT_REQS: + raise RedisError( + "Invalid SSL Certificate Requirements Flag: %s" % + ssl_cert_reqs) + ssl_cert_reqs = CERT_REQS[ssl_cert_reqs] + self.cert_reqs = ssl_cert_reqs + self.ca_certs = ssl_ca_certs + + def _connect(self): + "Wrap the socket with SSL support" + sock = super(SSLConnection, self)._connect() + sock = ssl.wrap_socket(sock, + cert_reqs=self.cert_reqs, + keyfile=self.keyfile, + certfile=self.certfile, + ca_certs=self.ca_certs) + return sock + + +class UnixDomainSocketConnection(Connection): + description_format = "UnixDomainSocketConnection" + + def __init__(self, path='', db=0, password=None, + socket_timeout=None, encoding='utf-8', + encoding_errors='strict', decode_responses=False, + retry_on_timeout=False, + parser_class=DefaultParser, socket_read_size=65536): + self.pid = os.getpid() + self.path = path + self.db = db + self.password = password + self.socket_timeout = socket_timeout + self.retry_on_timeout = retry_on_timeout + self.encoding = encoding + self.encoding_errors = encoding_errors + self.decode_responses = decode_responses + self._sock = None + self._parser = parser_class(socket_read_size=socket_read_size) + self._description_args = { + 'path': self.path, + 'db': self.db, + } + self._connect_callbacks = [] + + def _connect(self): + "Create a Unix domain socket connection" + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.settimeout(self.socket_timeout) + sock.connect(self.path) + return sock + + def _error_message(self, exception): + # args for socket.error can either be (errno, "message") + # or just "message" + if len(exception.args) == 1: + return "Error connecting to unix socket: %s. %s." % \ + (self.path, exception.args[0]) + else: + return "Error %s connecting to unix socket: %s. %s." % \ + (exception.args[0], self.path, exception.args[1]) + + +class ConnectionPool(object): + "Generic connection pool" + @classmethod + def from_url(cls, url, db=None, **kwargs): + """ + Return a connection pool configured from the given URL. + + For example:: + + redis://[:password]@localhost:6379/0 + rediss://[:password]@localhost:6379/0 + unix://[:password]@/path/to/socket.sock?db=0 + + Three URL schemes are supported: + redis:// creates a normal TCP socket connection + rediss:// creates a SSL wrapped TCP socket connection + unix:// creates a Unix Domain Socket connection + + There are several ways to specify a database number. The parse function + will return the first specified option: + 1. A ``db`` querystring option, e.g. redis://localhost?db=0 + 2. If using the redis:// scheme, the path argument of the url, e.g. + redis://localhost/0 + 3. The ``db`` argument to this function. + + If none of these options are specified, db=0 is used. + + Any additional querystring arguments and keyword arguments will be + passed along to the ConnectionPool class's initializer. In the case + of conflicting arguments, querystring arguments always win. + """ + url_string = url + url = urlparse(url) + qs = '' + + # in python2.6, custom URL schemes don't recognize querystring values + # they're left as part of the url.path. + if '?' in url.path and not url.query: + # chop the querystring including the ? off the end of the url + # and reparse it. + qs = url.path.split('?', 1)[1] + url = urlparse(url_string[:-(len(qs) + 1)]) + else: + qs = url.query + + url_options = {} + + for name, value in iteritems(parse_qs(qs)): + if value and len(value) > 0: + url_options[name] = value[0] + + # We only support redis:// and unix:// schemes. + if url.scheme == 'unix': + url_options.update({ + 'password': url.password, + 'path': url.path, + 'connection_class': UnixDomainSocketConnection, + }) + + else: + url_options.update({ + 'host': url.hostname, + 'port': int(url.port or 6379), + 'password': url.password, + }) + + # If there's a path argument, use it as the db argument if a + # querystring value wasn't specified + if 'db' not in url_options and url.path: + try: + url_options['db'] = int(url.path.replace('/', '')) + except (AttributeError, ValueError): + pass + + if url.scheme == 'rediss': + url_options['connection_class'] = SSLConnection + + # last shot at the db value + url_options['db'] = int(url_options.get('db', db or 0)) + + # update the arguments from the URL values + kwargs.update(url_options) + + # backwards compatability + if 'charset' in kwargs: + warnings.warn(DeprecationWarning( + '"charset" is deprecated. Use "encoding" instead')) + kwargs['encoding'] = kwargs.pop('charset') + if 'errors' in kwargs: + warnings.warn(DeprecationWarning( + '"errors" is deprecated. Use "encoding_errors" instead')) + kwargs['encoding_errors'] = kwargs.pop('errors') + + return cls(**kwargs) + + def __init__(self, connection_class=Connection, max_connections=None, + **connection_kwargs): + """ + Create a connection pool. If max_connections is set, then this + object raises redis.ConnectionError when the pool's limit is reached. + + By default, TCP connections are created connection_class is specified. + Use redis.UnixDomainSocketConnection for unix sockets. + + Any additional keyword arguments are passed to the constructor of + connection_class. + """ + max_connections = max_connections or 2 ** 31 + if not isinstance(max_connections, (int, long)) or max_connections < 0: + raise ValueError('"max_connections" must be a positive integer') + + self.connection_class = connection_class + self.connection_kwargs = connection_kwargs + self.max_connections = max_connections + + self.reset() + + def __repr__(self): + return "%s<%s>" % ( + type(self).__name__, + self.connection_class.description_format % self.connection_kwargs, + ) + + def reset(self): + self.pid = os.getpid() + self._created_connections = 0 + self._available_connections = [] + self._in_use_connections = set() + self._check_lock = threading.Lock() + + def _checkpid(self): + if self.pid != os.getpid(): + with self._check_lock: + if self.pid == os.getpid(): + # another thread already did the work while we waited + # on the lock. + return + self.disconnect() + self.reset() + + def get_connection(self, command_name, *keys, **options): + "Get a connection from the pool" + self._checkpid() + try: + connection = self._available_connections.pop() + except IndexError: + connection = self.make_connection() + self._in_use_connections.add(connection) + return connection + + def make_connection(self): + "Create a new connection" + if self._created_connections >= self.max_connections: + raise ConnectionError("Too many connections") + self._created_connections += 1 + return self.connection_class(**self.connection_kwargs) + + def release(self, connection): + "Releases the connection back to the pool" + self._checkpid() + if connection.pid != self.pid: + return + self._in_use_connections.remove(connection) + self._available_connections.append(connection) + + def disconnect(self): + "Disconnects all connections in the pool" + all_conns = chain(self._available_connections, + self._in_use_connections) + for connection in all_conns: + connection.disconnect() + + +class BlockingConnectionPool(ConnectionPool): + """ + Thread-safe blocking connection pool:: + + >>> from redis.client import Redis + >>> client = Redis(connection_pool=BlockingConnectionPool()) + + It performs the same function as the default + ``:py:class: ~redis.connection.ConnectionPool`` implementation, in that, + it maintains a pool of reusable connections that can be shared by + multiple redis clients (safely across threads if required). + + The difference is that, in the event that a client tries to get a + connection from the pool when all of connections are in use, rather than + raising a ``:py:class: ~redis.exceptions.ConnectionError`` (as the default + ``:py:class: ~redis.connection.ConnectionPool`` implementation does), it + makes the client wait ("blocks") for a specified number of seconds until + a connection becomes available. + + Use ``max_connections`` to increase / decrease the pool size:: + + >>> pool = BlockingConnectionPool(max_connections=10) + + Use ``timeout`` to tell it either how many seconds to wait for a connection + to become available, or to block forever: + + # Block forever. + >>> pool = BlockingConnectionPool(timeout=None) + + # Raise a ``ConnectionError`` after five seconds if a connection is + # not available. + >>> pool = BlockingConnectionPool(timeout=5) + """ + def __init__(self, max_connections=50, timeout=20, + connection_class=Connection, queue_class=LifoQueue, + **connection_kwargs): + + self.queue_class = queue_class + self.timeout = timeout + super(BlockingConnectionPool, self).__init__( + connection_class=connection_class, + max_connections=max_connections, + **connection_kwargs) + + def reset(self): + self.pid = os.getpid() + self._check_lock = threading.Lock() + + # Create and fill up a thread safe queue with ``None`` values. + self.pool = self.queue_class(self.max_connections) + while True: + try: + self.pool.put_nowait(None) + except Full: + break + + # Keep a list of actual connection instances so that we can + # disconnect them later. + self._connections = [] + + def make_connection(self): + "Make a fresh connection." + connection = self.connection_class(**self.connection_kwargs) + self._connections.append(connection) + return connection + + def get_connection(self, command_name, *keys, **options): + """ + Get a connection, blocking for ``self.timeout`` until a connection + is available from the pool. + + If the connection returned is ``None`` then creates a new connection. + Because we use a last-in first-out queue, the existing connections + (having been returned to the pool after the initial ``None`` values + were added) will be returned before ``None`` values. This means we only + create new connections when we need to, i.e.: the actual number of + connections will only increase in response to demand. + """ + # Make sure we haven't changed process. + self._checkpid() + + # Try and get a connection from the pool. If one isn't available within + # self.timeout then raise a ``ConnectionError``. + connection = None + try: + connection = self.pool.get(block=True, timeout=self.timeout) + except Empty: + # Note that this is not caught by the redis client and will be + # raised unless handled by application code. If you want never to + raise ConnectionError("No connection available.") + + # If the ``connection`` is actually ``None`` then that's a cue to make + # a new connection to add to the pool. + if connection is None: + connection = self.make_connection() + + return connection + + def release(self, connection): + "Releases the connection back to the pool." + # Make sure we haven't changed process. + self._checkpid() + if connection.pid != self.pid: + return + + # Put the connection back into the pool. + try: + self.pool.put_nowait(connection) + except Full: + # perhaps the pool has been reset() after a fork? regardless, + # we don't want this connection + pass + + def disconnect(self): + "Disconnects all connections in the pool." + for connection in self._connections: + connection.disconnect() diff --git a/awx/lib/site-packages/redis/redis/exceptions.py b/awx/lib/site-packages/redis/redis/exceptions.py new file mode 100644 index 0000000000..a8518c708a --- /dev/null +++ b/awx/lib/site-packages/redis/redis/exceptions.py @@ -0,0 +1,71 @@ +"Core exceptions raised by the Redis client" +from redis._compat import unicode + + +class RedisError(Exception): + pass + + +# python 2.5 doesn't implement Exception.__unicode__. Add it here to all +# our exception types +if not hasattr(RedisError, '__unicode__'): + def __unicode__(self): + if isinstance(self.args[0], unicode): + return self.args[0] + return unicode(self.args[0]) + RedisError.__unicode__ = __unicode__ + + +class AuthenticationError(RedisError): + pass + + +class ConnectionError(RedisError): + pass + + +class TimeoutError(RedisError): + pass + + +class BusyLoadingError(ConnectionError): + pass + + +class InvalidResponse(RedisError): + pass + + +class ResponseError(RedisError): + pass + + +class DataError(RedisError): + pass + + +class PubSubError(RedisError): + pass + + +class WatchError(RedisError): + pass + + +class NoScriptError(ResponseError): + pass + + +class ExecAbortError(ResponseError): + pass + + +class ReadOnlyError(ResponseError): + pass + + +class LockError(RedisError, ValueError): + "Errors acquiring or releasing a lock" + # NOTE: For backwards compatability, this class derives from ValueError. + # This was originally chosen to behave like threading.Lock. + pass diff --git a/awx/lib/site-packages/redis/redis/lock.py b/awx/lib/site-packages/redis/redis/lock.py new file mode 100644 index 0000000000..90f0e7af30 --- /dev/null +++ b/awx/lib/site-packages/redis/redis/lock.py @@ -0,0 +1,272 @@ +import threading +import time as mod_time +import uuid +from redis.exceptions import LockError, WatchError +from redis.utils import dummy +from redis._compat import b + + +class Lock(object): + """ + A shared, distributed Lock. Using Redis for locking allows the Lock + to be shared across processes and/or machines. + + It's left to the user to resolve deadlock issues and make sure + multiple clients play nicely together. + """ + def __init__(self, redis, name, timeout=None, sleep=0.1, + blocking=True, blocking_timeout=None, thread_local=True): + """ + Create a new Lock instance named ``name`` using the Redis client + supplied by ``redis``. + + ``timeout`` indicates a maximum life for the lock. + By default, it will remain locked until release() is called. + ``timeout`` can be specified as a float or integer, both representing + the number of seconds to wait. + + ``sleep`` indicates the amount of time to sleep per loop iteration + when the lock is in blocking mode and another client is currently + holding the lock. + + ``blocking`` indicates whether calling ``acquire`` should block until + the lock has been acquired or to fail immediately, causing ``acquire`` + to return False and the lock not being acquired. Defaults to True. + Note this value can be overridden by passing a ``blocking`` + argument to ``acquire``. + + ``blocking_timeout`` indicates the maximum amount of time in seconds to + spend trying to acquire the lock. A value of ``None`` indicates + continue trying forever. ``blocking_timeout`` can be specified as a + float or integer, both representing the number of seconds to wait. + + ``thread_local`` indicates whether the lock token is placed in + thread-local storage. By default, the token is placed in thread local + storage so that a thread only sees its token, not a token set by + another thread. Consider the following timeline: + + time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds. + thread-1 sets the token to "abc" + time: 1, thread-2 blocks trying to acquire `my-lock` using the + Lock instance. + time: 5, thread-1 has not yet completed. redis expires the lock + key. + time: 5, thread-2 acquired `my-lock` now that it's available. + thread-2 sets the token to "xyz" + time: 6, thread-1 finishes its work and calls release(). if the + token is *not* stored in thread local storage, then + thread-1 would see the token value as "xyz" and would be + able to successfully release the thread-2's lock. + + In some use cases it's necessary to disable thread local storage. For + example, if you have code where one thread acquires a lock and passes + that lock instance to a worker thread to release later. If thread + local storage isn't disabled in this case, the worker thread won't see + the token set by the thread that acquired the lock. Our assumption + is that these cases aren't common and as such default to using + thread local storage. + """ + self.redis = redis + self.name = name + self.timeout = timeout + self.sleep = sleep + self.blocking = blocking + self.blocking_timeout = blocking_timeout + self.thread_local = bool(thread_local) + self.local = threading.local() if self.thread_local else dummy() + self.local.token = None + if self.timeout and self.sleep > self.timeout: + raise LockError("'sleep' must be less than 'timeout'") + + def __enter__(self): + # force blocking, as otherwise the user would have to check whether + # the lock was actually acquired or not. + self.acquire(blocking=True) + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.release() + + def acquire(self, blocking=None, blocking_timeout=None): + """ + Use Redis to hold a shared, distributed lock named ``name``. + Returns True once the lock is acquired. + + If ``blocking`` is False, always return immediately. If the lock + was acquired, return True, otherwise return False. + + ``blocking_timeout`` specifies the maximum number of seconds to + wait trying to acquire the lock. + """ + sleep = self.sleep + token = b(uuid.uuid1().hex) + if blocking is None: + blocking = self.blocking + if blocking_timeout is None: + blocking_timeout = self.blocking_timeout + stop_trying_at = None + if blocking_timeout is not None: + stop_trying_at = mod_time.time() + blocking_timeout + while 1: + if self.do_acquire(token): + self.local.token = token + return True + if not blocking: + return False + if stop_trying_at is not None and mod_time.time() > stop_trying_at: + return False + mod_time.sleep(sleep) + + def do_acquire(self, token): + if self.redis.setnx(self.name, token): + if self.timeout: + # convert to milliseconds + timeout = int(self.timeout * 1000) + self.redis.pexpire(self.name, timeout) + return True + return False + + def release(self): + "Releases the already acquired lock" + expected_token = self.local.token + if expected_token is None: + raise LockError("Cannot release an unlocked lock") + self.local.token = None + self.do_release(expected_token) + + def do_release(self, expected_token): + name = self.name + + def execute_release(pipe): + lock_value = pipe.get(name) + if lock_value != expected_token: + raise LockError("Cannot release a lock that's no longer owned") + pipe.delete(name) + + self.redis.transaction(execute_release, name) + + def extend(self, additional_time): + """ + Adds more time to an already acquired lock. + + ``additional_time`` can be specified as an integer or a float, both + representing the number of seconds to add. + """ + if self.local.token is None: + raise LockError("Cannot extend an unlocked lock") + if self.timeout is None: + raise LockError("Cannot extend a lock with no timeout") + return self.do_extend(additional_time) + + def do_extend(self, additional_time): + pipe = self.redis.pipeline() + pipe.watch(self.name) + lock_value = pipe.get(self.name) + if lock_value != self.local.token: + raise LockError("Cannot extend a lock that's no longer owned") + expiration = pipe.pttl(self.name) + if expiration is None or expiration < 0: + # Redis evicted the lock key between the previous get() and now + # we'll handle this when we call pexpire() + expiration = 0 + pipe.multi() + pipe.pexpire(self.name, expiration + int(additional_time * 1000)) + + try: + response = pipe.execute() + except WatchError: + # someone else acquired the lock + raise LockError("Cannot extend a lock that's no longer owned") + if not response[0]: + # pexpire returns False if the key doesn't exist + raise LockError("Cannot extend a lock that's no longer owned") + return True + + +class LuaLock(Lock): + """ + A lock implementation that uses Lua scripts rather than pipelines + and watches. + """ + lua_acquire = None + lua_release = None + lua_extend = None + + # KEYS[1] - lock name + # ARGV[1] - token + # ARGV[2] - timeout in milliseconds + # return 1 if lock was acquired, otherwise 0 + LUA_ACQUIRE_SCRIPT = """ + if redis.call('setnx', KEYS[1], ARGV[1]) == 1 then + if ARGV[2] ~= '' then + redis.call('pexpire', KEYS[1], ARGV[2]) + end + return 1 + end + return 0 + """ + + # KEYS[1] - lock name + # ARGS[1] - token + # return 1 if the lock was released, otherwise 0 + LUA_RELEASE_SCRIPT = """ + local token = redis.call('get', KEYS[1]) + if not token or token ~= ARGV[1] then + return 0 + end + redis.call('del', KEYS[1]) + return 1 + """ + + # KEYS[1] - lock name + # ARGS[1] - token + # ARGS[2] - additional milliseconds + # return 1 if the locks time was extended, otherwise 0 + LUA_EXTEND_SCRIPT = """ + local token = redis.call('get', KEYS[1]) + if not token or token ~= ARGV[1] then + return 0 + end + local expiration = redis.call('pttl', KEYS[1]) + if not expiration then + expiration = 0 + end + if expiration < 0 then + return 0 + end + redis.call('pexpire', KEYS[1], expiration + ARGV[2]) + return 1 + """ + + def __init__(self, *args, **kwargs): + super(LuaLock, self).__init__(*args, **kwargs) + LuaLock.register_scripts(self.redis) + + @classmethod + def register_scripts(cls, redis): + if cls.lua_acquire is None: + cls.lua_acquire = redis.register_script(cls.LUA_ACQUIRE_SCRIPT) + if cls.lua_release is None: + cls.lua_release = redis.register_script(cls.LUA_RELEASE_SCRIPT) + if cls.lua_extend is None: + cls.lua_extend = redis.register_script(cls.LUA_EXTEND_SCRIPT) + + def do_acquire(self, token): + timeout = self.timeout and int(self.timeout * 1000) or '' + return bool(self.lua_acquire(keys=[self.name], + args=[token, timeout], + client=self.redis)) + + def do_release(self, expected_token): + if not bool(self.lua_release(keys=[self.name], + args=[expected_token], + client=self.redis)): + raise LockError("Cannot release a lock that's no longer owned") + + def do_extend(self, additional_time): + additional_time = int(additional_time * 1000) + if not bool(self.lua_extend(keys=[self.name], + args=[self.local.token, additional_time], + client=self.redis)): + raise LockError("Cannot extend a lock that's no longer owned") + return True diff --git a/awx/lib/site-packages/redis/redis/sentinel.py b/awx/lib/site-packages/redis/redis/sentinel.py new file mode 100644 index 0000000000..2f30062cd2 --- /dev/null +++ b/awx/lib/site-packages/redis/redis/sentinel.py @@ -0,0 +1,294 @@ +import os +import random +import weakref + +from redis.client import StrictRedis +from redis.connection import ConnectionPool, Connection +from redis.exceptions import ConnectionError, ResponseError, ReadOnlyError +from redis._compat import iteritems, nativestr, xrange + + +class MasterNotFoundError(ConnectionError): + pass + + +class SlaveNotFoundError(ConnectionError): + pass + + +class SentinelManagedConnection(Connection): + def __init__(self, **kwargs): + self.connection_pool = kwargs.pop('connection_pool') + super(SentinelManagedConnection, self).__init__(**kwargs) + + def __repr__(self): + pool = self.connection_pool + s = '%s' % (type(self).__name__, pool.service_name) + if self.host: + host_info = ',host=%s,port=%s' % (self.host, self.port) + s = s % host_info + return s + + def connect_to(self, address): + self.host, self.port = address + super(SentinelManagedConnection, self).connect() + if self.connection_pool.check_connection: + self.send_command('PING') + if nativestr(self.read_response()) != 'PONG': + raise ConnectionError('PING failed') + + def connect(self): + if self._sock: + return # already connected + if self.connection_pool.is_master: + self.connect_to(self.connection_pool.get_master_address()) + else: + for slave in self.connection_pool.rotate_slaves(): + try: + return self.connect_to(slave) + except ConnectionError: + continue + raise SlaveNotFoundError # Never be here + + def read_response(self): + try: + return super(SentinelManagedConnection, self).read_response() + except ReadOnlyError: + if self.connection_pool.is_master: + # When talking to a master, a ReadOnlyError when likely + # indicates that the previous master that we're still connected + # to has been demoted to a slave and there's a new master. + # calling disconnect will force the connection to re-query + # sentinel during the next connect() attempt. + self.disconnect() + raise ConnectionError('The previous master is now a slave') + raise + + +class SentinelConnectionPool(ConnectionPool): + """ + Sentinel backed connection pool. + + If ``check_connection`` flag is set to True, SentinelManagedConnection + sends a PING command right after establishing the connection. + """ + + def __init__(self, service_name, sentinel_manager, **kwargs): + kwargs['connection_class'] = kwargs.get( + 'connection_class', SentinelManagedConnection) + self.is_master = kwargs.pop('is_master', True) + self.check_connection = kwargs.pop('check_connection', False) + super(SentinelConnectionPool, self).__init__(**kwargs) + self.connection_kwargs['connection_pool'] = weakref.proxy(self) + self.service_name = service_name + self.sentinel_manager = sentinel_manager + + def __repr__(self): + return "%s>> from redis.sentinel import Sentinel + >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1) + >>> master = sentinel.master_for('mymaster', socket_timeout=0.1) + >>> master.set('foo', 'bar') + >>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1) + >>> slave.get('foo') + 'bar' + + ``sentinels`` is a list of sentinel nodes. Each node is represented by + a pair (hostname, port). + + ``min_other_sentinels`` defined a minimum number of peers for a sentinel. + When querying a sentinel, if it doesn't meet this threshold, responses + from that sentinel won't be considered valid. + + ``sentinel_kwargs`` is a dictionary of connection arguments used when + connecting to sentinel instances. Any argument that can be passed to + a normal Redis connection can be specified here. If ``sentinel_kwargs`` is + not specified, any socket_timeout and socket_keepalive options specified + in ``connection_kwargs`` will be used. + + ``connection_kwargs`` are keyword arguments that will be used when + establishing a connection to a Redis server. + """ + + def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None, + **connection_kwargs): + # if sentinel_kwargs isn't defined, use the socket_* options from + # connection_kwargs + if sentinel_kwargs is None: + sentinel_kwargs = dict([(k, v) + for k, v in iteritems(connection_kwargs) + if k.startswith('socket_') + ]) + self.sentinel_kwargs = sentinel_kwargs + + self.sentinels = [StrictRedis(hostname, port, **self.sentinel_kwargs) + for hostname, port in sentinels] + self.min_other_sentinels = min_other_sentinels + self.connection_kwargs = connection_kwargs + + def __repr__(self): + sentinel_addresses = [] + for sentinel in self.sentinels: + sentinel_addresses.append('%s:%s' % ( + sentinel.connection_pool.connection_kwargs['host'], + sentinel.connection_pool.connection_kwargs['port'], + )) + return '%s' % ( + type(self).__name__, + ','.join(sentinel_addresses)) + + def check_master_state(self, state, service_name): + if not state['is_master'] or state['is_sdown'] or state['is_odown']: + return False + # Check if our sentinel doesn't see other nodes + if state['num-other-sentinels'] < self.min_other_sentinels: + return False + return True + + def discover_master(self, service_name): + """ + Asks sentinel servers for the Redis master's address corresponding + to the service labeled ``service_name``. + + Returns a pair (address, port) or raises MasterNotFoundError if no + master is found. + """ + for sentinel_no, sentinel in enumerate(self.sentinels): + try: + masters = sentinel.sentinel_masters() + except ConnectionError: + continue + state = masters.get(service_name) + if state and self.check_master_state(state, service_name): + # Put this sentinel at the top of the list + self.sentinels[0], self.sentinels[sentinel_no] = ( + sentinel, self.sentinels[0]) + return state['ip'], state['port'] + raise MasterNotFoundError("No master found for %r" % (service_name,)) + + def filter_slaves(self, slaves): + "Remove slaves that are in an ODOWN or SDOWN state" + slaves_alive = [] + for slave in slaves: + if slave['is_odown'] or slave['is_sdown']: + continue + slaves_alive.append((slave['ip'], slave['port'])) + return slaves_alive + + def discover_slaves(self, service_name): + "Returns a list of alive slaves for service ``service_name``" + for sentinel in self.sentinels: + try: + slaves = sentinel.sentinel_slaves(service_name) + except (ConnectionError, ResponseError): + continue + slaves = self.filter_slaves(slaves) + if slaves: + return slaves + return [] + + def master_for(self, service_name, redis_class=StrictRedis, + connection_pool_class=SentinelConnectionPool, **kwargs): + """ + Returns a redis client instance for the ``service_name`` master. + + A SentinelConnectionPool class is used to retrive the master's + address before establishing a new connection. + + NOTE: If the master's address has changed, any cached connections to + the old master are closed. + + By default clients will be a redis.StrictRedis instance. Specify a + different class to the ``redis_class`` argument if you desire + something different. + + The ``connection_pool_class`` specifies the connection pool to use. + The SentinelConnectionPool will be used by default. + + All other keyword arguments are merged with any connection_kwargs + passed to this class and passed to the connection pool as keyword + arguments to be used to initialize Redis connections. + """ + kwargs['is_master'] = True + connection_kwargs = dict(self.connection_kwargs) + connection_kwargs.update(kwargs) + return redis_class(connection_pool=connection_pool_class( + service_name, self, **connection_kwargs)) + + def slave_for(self, service_name, redis_class=StrictRedis, + connection_pool_class=SentinelConnectionPool, **kwargs): + """ + Returns redis client instance for the ``service_name`` slave(s). + + A SentinelConnectionPool class is used to retrive the slave's + address before establishing a new connection. + + By default clients will be a redis.StrictRedis instance. Specify a + different class to the ``redis_class`` argument if you desire + something different. + + The ``connection_pool_class`` specifies the connection pool to use. + The SentinelConnectionPool will be used by default. + + All other keyword arguments are merged with any connection_kwargs + passed to this class and passed to the connection pool as keyword + arguments to be used to initialize Redis connections. + """ + kwargs['is_master'] = False + connection_kwargs = dict(self.connection_kwargs) + connection_kwargs.update(kwargs) + return redis_class(connection_pool=connection_pool_class( + service_name, self, **connection_kwargs)) diff --git a/awx/lib/site-packages/redis/redis/utils.py b/awx/lib/site-packages/redis/redis/utils.py new file mode 100644 index 0000000000..0b0067ed37 --- /dev/null +++ b/awx/lib/site-packages/redis/redis/utils.py @@ -0,0 +1,33 @@ +from contextlib import contextmanager + + +try: + import hiredis + HIREDIS_AVAILABLE = True +except ImportError: + HIREDIS_AVAILABLE = False + + +def from_url(url, db=None, **kwargs): + """ + Returns an active Redis client generated from the given database URL. + + Will attempt to extract the database id from the path url fragment, if + none is provided. + """ + from redis.client import Redis + return Redis.from_url(url, db, **kwargs) + + +@contextmanager +def pipeline(redis_obj): + p = redis_obj.pipeline() + yield p + p.execute() + + +class dummy(object): + """ + Instances of this class can be used as an attribute container. + """ + pass diff --git a/awx/lib/site-packages/redis/setup.cfg b/awx/lib/site-packages/redis/setup.cfg new file mode 100644 index 0000000000..861a9f5542 --- /dev/null +++ b/awx/lib/site-packages/redis/setup.cfg @@ -0,0 +1,5 @@ +[egg_info] +tag_build = +tag_date = 0 +tag_svn_revision = 0 + diff --git a/awx/lib/site-packages/redis/setup.py b/awx/lib/site-packages/redis/setup.py new file mode 100644 index 0000000000..67706f15ab --- /dev/null +++ b/awx/lib/site-packages/redis/setup.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python +import os +import sys + +from redis import __version__ + +try: + from setuptools import setup + from setuptools.command.test import test as TestCommand + + class PyTest(TestCommand): + def finalize_options(self): + TestCommand.finalize_options(self) + self.test_args = [] + self.test_suite = True + + def run_tests(self): + # import here, because outside the eggs aren't loaded + import pytest + errno = pytest.main(self.test_args) + sys.exit(errno) + +except ImportError: + + from distutils.core import setup + PyTest = lambda x: x + +f = open(os.path.join(os.path.dirname(__file__), 'README.rst')) +long_description = f.read() +f.close() + +setup( + name='redis', + version=__version__, + description='Python client for Redis key-value store', + long_description=long_description, + url='http://github.com/andymccurdy/redis-py', + author='Andy McCurdy', + author_email='sedrik@gmail.com', + maintainer='Andy McCurdy', + maintainer_email='sedrik@gmail.com', + keywords=['Redis', 'key-value store'], + license='MIT', + packages=['redis'], + tests_require=['pytest>=2.5.0'], + cmdclass={'test': PyTest}, + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Environment :: Console', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Operating System :: OS Independent', + 'Programming Language :: Python', + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.2', + 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.4', + ] +) diff --git a/awx/lib/site-packages/redis/tests/__init__.py b/awx/lib/site-packages/redis/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awx/lib/site-packages/redis/tests/conftest.py b/awx/lib/site-packages/redis/tests/conftest.py new file mode 100644 index 0000000000..bd0116bc5e --- /dev/null +++ b/awx/lib/site-packages/redis/tests/conftest.py @@ -0,0 +1,46 @@ +import pytest +import redis + +from distutils.version import StrictVersion + + +_REDIS_VERSIONS = {} + + +def get_version(**kwargs): + params = {'host': 'localhost', 'port': 6379, 'db': 9} + params.update(kwargs) + key = '%s:%s' % (params['host'], params['port']) + if key not in _REDIS_VERSIONS: + client = redis.Redis(**params) + _REDIS_VERSIONS[key] = client.info()['redis_version'] + client.connection_pool.disconnect() + return _REDIS_VERSIONS[key] + + +def _get_client(cls, request=None, **kwargs): + params = {'host': 'localhost', 'port': 6379, 'db': 9} + params.update(kwargs) + client = cls(**params) + client.flushdb() + if request: + def teardown(): + client.flushdb() + client.connection_pool.disconnect() + request.addfinalizer(teardown) + return client + + +def skip_if_server_version_lt(min_version): + check = StrictVersion(get_version()) < StrictVersion(min_version) + return pytest.mark.skipif(check, reason="") + + +@pytest.fixture() +def r(request, **kwargs): + return _get_client(redis.Redis, request, **kwargs) + + +@pytest.fixture() +def sr(request, **kwargs): + return _get_client(redis.StrictRedis, request, **kwargs) diff --git a/awx/lib/site-packages/redis/tests/test_commands.py b/awx/lib/site-packages/redis/tests/test_commands.py new file mode 100644 index 0000000000..286ea04b25 --- /dev/null +++ b/awx/lib/site-packages/redis/tests/test_commands.py @@ -0,0 +1,1441 @@ +from __future__ import with_statement +import binascii +import datetime +import pytest +import redis +import time + +from redis._compat import (unichr, u, b, ascii_letters, iteritems, iterkeys, + itervalues) +from redis.client import parse_info +from redis import exceptions + +from .conftest import skip_if_server_version_lt + + +@pytest.fixture() +def slowlog(request, r): + current_config = r.config_get() + old_slower_than_value = current_config['slowlog-log-slower-than'] + old_max_legnth_value = current_config['slowlog-max-len'] + + def cleanup(): + r.config_set('slowlog-log-slower-than', old_slower_than_value) + r.config_set('slowlog-max-len', old_max_legnth_value) + request.addfinalizer(cleanup) + + r.config_set('slowlog-log-slower-than', 0) + r.config_set('slowlog-max-len', 128) + + +def redis_server_time(client): + seconds, milliseconds = client.time() + timestamp = float('%s.%s' % (seconds, milliseconds)) + return datetime.datetime.fromtimestamp(timestamp) + + +# RESPONSE CALLBACKS +class TestResponseCallbacks(object): + "Tests for the response callback system" + + def test_response_callbacks(self, r): + assert r.response_callbacks == redis.Redis.RESPONSE_CALLBACKS + assert id(r.response_callbacks) != id(redis.Redis.RESPONSE_CALLBACKS) + r.set_response_callback('GET', lambda x: 'static') + r['a'] = 'foo' + assert r['a'] == 'static' + + +class TestRedisCommands(object): + + def test_command_on_invalid_key_type(self, r): + r.lpush('a', '1') + with pytest.raises(redis.ResponseError): + r['a'] + + # SERVER INFORMATION + def test_client_list(self, r): + clients = r.client_list() + assert isinstance(clients[0], dict) + assert 'addr' in clients[0] + + @skip_if_server_version_lt('2.6.9') + def test_client_getname(self, r): + assert r.client_getname() is None + + @skip_if_server_version_lt('2.6.9') + def test_client_setname(self, r): + assert r.client_setname('redis_py_test') + assert r.client_getname() == 'redis_py_test' + + def test_config_get(self, r): + data = r.config_get() + assert 'maxmemory' in data + assert data['maxmemory'].isdigit() + + def test_config_resetstat(self, r): + r.ping() + prior_commands_processed = int(r.info()['total_commands_processed']) + assert prior_commands_processed >= 1 + r.config_resetstat() + reset_commands_processed = int(r.info()['total_commands_processed']) + assert reset_commands_processed < prior_commands_processed + + def test_config_set(self, r): + data = r.config_get() + rdbname = data['dbfilename'] + try: + assert r.config_set('dbfilename', 'redis_py_test.rdb') + assert r.config_get()['dbfilename'] == 'redis_py_test.rdb' + finally: + assert r.config_set('dbfilename', rdbname) + + def test_dbsize(self, r): + r['a'] = 'foo' + r['b'] = 'bar' + assert r.dbsize() == 2 + + def test_echo(self, r): + assert r.echo('foo bar') == b('foo bar') + + def test_info(self, r): + r['a'] = 'foo' + r['b'] = 'bar' + info = r.info() + assert isinstance(info, dict) + assert info['db9']['keys'] == 2 + + def test_lastsave(self, r): + assert isinstance(r.lastsave(), datetime.datetime) + + def test_object(self, r): + r['a'] = 'foo' + assert isinstance(r.object('refcount', 'a'), int) + assert isinstance(r.object('idletime', 'a'), int) + assert r.object('encoding', 'a') == b('raw') + assert r.object('idletime', 'invalid-key') is None + + def test_ping(self, r): + assert r.ping() + + def test_slowlog_get(self, r, slowlog): + assert r.slowlog_reset() + unicode_string = unichr(3456) + u('abcd') + unichr(3421) + r.get(unicode_string) + slowlog = r.slowlog_get() + assert isinstance(slowlog, list) + commands = [log['command'] for log in slowlog] + + get_command = b(' ').join((b('GET'), unicode_string.encode('utf-8'))) + assert get_command in commands + assert b('SLOWLOG RESET') in commands + # the order should be ['GET ', 'SLOWLOG RESET'], + # but if other clients are executing commands at the same time, there + # could be commands, before, between, or after, so just check that + # the two we care about are in the appropriate order. + assert commands.index(get_command) < commands.index(b('SLOWLOG RESET')) + + # make sure other attributes are typed correctly + assert isinstance(slowlog[0]['start_time'], int) + assert isinstance(slowlog[0]['duration'], int) + + def test_slowlog_get_limit(self, r, slowlog): + assert r.slowlog_reset() + r.get('foo') + r.get('bar') + slowlog = r.slowlog_get(1) + assert isinstance(slowlog, list) + commands = [log['command'] for log in slowlog] + assert b('GET foo') not in commands + assert b('GET bar') in commands + + def test_slowlog_length(self, r, slowlog): + r.get('foo') + assert isinstance(r.slowlog_len(), int) + + @skip_if_server_version_lt('2.6.0') + def test_time(self, r): + t = r.time() + assert len(t) == 2 + assert isinstance(t[0], int) + assert isinstance(t[1], int) + + # BASIC KEY COMMANDS + def test_append(self, r): + assert r.append('a', 'a1') == 2 + assert r['a'] == b('a1') + assert r.append('a', 'a2') == 4 + assert r['a'] == b('a1a2') + + @skip_if_server_version_lt('2.6.0') + def test_bitcount(self, r): + r.setbit('a', 5, True) + assert r.bitcount('a') == 1 + r.setbit('a', 6, True) + assert r.bitcount('a') == 2 + r.setbit('a', 5, False) + assert r.bitcount('a') == 1 + r.setbit('a', 9, True) + r.setbit('a', 17, True) + r.setbit('a', 25, True) + r.setbit('a', 33, True) + assert r.bitcount('a') == 5 + assert r.bitcount('a', 0, -1) == 5 + assert r.bitcount('a', 2, 3) == 2 + assert r.bitcount('a', 2, -1) == 3 + assert r.bitcount('a', -2, -1) == 2 + assert r.bitcount('a', 1, 1) == 1 + + @skip_if_server_version_lt('2.6.0') + def test_bitop_not_empty_string(self, r): + r['a'] = '' + r.bitop('not', 'r', 'a') + assert r.get('r') is None + + @skip_if_server_version_lt('2.6.0') + def test_bitop_not(self, r): + test_str = b('\xAA\x00\xFF\x55') + correct = ~0xAA00FF55 & 0xFFFFFFFF + r['a'] = test_str + r.bitop('not', 'r', 'a') + assert int(binascii.hexlify(r['r']), 16) == correct + + @skip_if_server_version_lt('2.6.0') + def test_bitop_not_in_place(self, r): + test_str = b('\xAA\x00\xFF\x55') + correct = ~0xAA00FF55 & 0xFFFFFFFF + r['a'] = test_str + r.bitop('not', 'a', 'a') + assert int(binascii.hexlify(r['a']), 16) == correct + + @skip_if_server_version_lt('2.6.0') + def test_bitop_single_string(self, r): + test_str = b('\x01\x02\xFF') + r['a'] = test_str + r.bitop('and', 'res1', 'a') + r.bitop('or', 'res2', 'a') + r.bitop('xor', 'res3', 'a') + assert r['res1'] == test_str + assert r['res2'] == test_str + assert r['res3'] == test_str + + @skip_if_server_version_lt('2.6.0') + def test_bitop_string_operands(self, r): + r['a'] = b('\x01\x02\xFF\xFF') + r['b'] = b('\x01\x02\xFF') + r.bitop('and', 'res1', 'a', 'b') + r.bitop('or', 'res2', 'a', 'b') + r.bitop('xor', 'res3', 'a', 'b') + assert int(binascii.hexlify(r['res1']), 16) == 0x0102FF00 + assert int(binascii.hexlify(r['res2']), 16) == 0x0102FFFF + assert int(binascii.hexlify(r['res3']), 16) == 0x000000FF + + @skip_if_server_version_lt('2.8.7') + def test_bitpos(self, r): + key = 'key:bitpos' + r.set(key, b('\xff\xf0\x00')) + assert r.bitpos(key, 0) == 12 + assert r.bitpos(key, 0, 2, -1) == 16 + assert r.bitpos(key, 0, -2, -1) == 12 + r.set(key, b('\x00\xff\xf0')) + assert r.bitpos(key, 1, 0) == 8 + assert r.bitpos(key, 1, 1) == 8 + r.set(key, b('\x00\x00\x00')) + assert r.bitpos(key, 1) == -1 + + @skip_if_server_version_lt('2.8.7') + def test_bitpos_wrong_arguments(self, r): + key = 'key:bitpos:wrong:args' + r.set(key, b('\xff\xf0\x00')) + with pytest.raises(exceptions.RedisError): + r.bitpos(key, 0, end=1) == 12 + with pytest.raises(exceptions.RedisError): + r.bitpos(key, 7) == 12 + + def test_decr(self, r): + assert r.decr('a') == -1 + assert r['a'] == b('-1') + assert r.decr('a') == -2 + assert r['a'] == b('-2') + assert r.decr('a', amount=5) == -7 + assert r['a'] == b('-7') + + def test_delete(self, r): + assert r.delete('a') == 0 + r['a'] = 'foo' + assert r.delete('a') == 1 + + def test_delete_with_multiple_keys(self, r): + r['a'] = 'foo' + r['b'] = 'bar' + assert r.delete('a', 'b') == 2 + assert r.get('a') is None + assert r.get('b') is None + + def test_delitem(self, r): + r['a'] = 'foo' + del r['a'] + assert r.get('a') is None + + @skip_if_server_version_lt('2.6.0') + def test_dump_and_restore(self, r): + r['a'] = 'foo' + dumped = r.dump('a') + del r['a'] + r.restore('a', 0, dumped) + assert r['a'] == b('foo') + + def test_exists(self, r): + assert not r.exists('a') + r['a'] = 'foo' + assert r.exists('a') + + def test_exists_contains(self, r): + assert 'a' not in r + r['a'] = 'foo' + assert 'a' in r + + def test_expire(self, r): + assert not r.expire('a', 10) + r['a'] = 'foo' + assert r.expire('a', 10) + assert 0 < r.ttl('a') <= 10 + assert r.persist('a') + assert not r.ttl('a') + + def test_expireat_datetime(self, r): + expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + r['a'] = 'foo' + assert r.expireat('a', expire_at) + assert 0 < r.ttl('a') <= 61 + + def test_expireat_no_key(self, r): + expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + assert not r.expireat('a', expire_at) + + def test_expireat_unixtime(self, r): + expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + r['a'] = 'foo' + expire_at_seconds = int(time.mktime(expire_at.timetuple())) + assert r.expireat('a', expire_at_seconds) + assert 0 < r.ttl('a') <= 61 + + def test_get_and_set(self, r): + # get and set can't be tested independently of each other + assert r.get('a') is None + byte_string = b('value') + integer = 5 + unicode_string = unichr(3456) + u('abcd') + unichr(3421) + assert r.set('byte_string', byte_string) + assert r.set('integer', 5) + assert r.set('unicode_string', unicode_string) + assert r.get('byte_string') == byte_string + assert r.get('integer') == b(str(integer)) + assert r.get('unicode_string').decode('utf-8') == unicode_string + + def test_getitem_and_setitem(self, r): + r['a'] = 'bar' + assert r['a'] == b('bar') + + def test_getitem_raises_keyerror_for_missing_key(self, r): + with pytest.raises(KeyError): + r['a'] + + def test_get_set_bit(self, r): + # no value + assert not r.getbit('a', 5) + # set bit 5 + assert not r.setbit('a', 5, True) + assert r.getbit('a', 5) + # unset bit 4 + assert not r.setbit('a', 4, False) + assert not r.getbit('a', 4) + # set bit 4 + assert not r.setbit('a', 4, True) + assert r.getbit('a', 4) + # set bit 5 again + assert r.setbit('a', 5, True) + assert r.getbit('a', 5) + + def test_getrange(self, r): + r['a'] = 'foo' + assert r.getrange('a', 0, 0) == b('f') + assert r.getrange('a', 0, 2) == b('foo') + assert r.getrange('a', 3, 4) == b('') + + def test_getset(self, r): + assert r.getset('a', 'foo') is None + assert r.getset('a', 'bar') == b('foo') + assert r.get('a') == b('bar') + + def test_incr(self, r): + assert r.incr('a') == 1 + assert r['a'] == b('1') + assert r.incr('a') == 2 + assert r['a'] == b('2') + assert r.incr('a', amount=5) == 7 + assert r['a'] == b('7') + + def test_incrby(self, r): + assert r.incrby('a') == 1 + assert r.incrby('a', 4) == 5 + assert r['a'] == b('5') + + @skip_if_server_version_lt('2.6.0') + def test_incrbyfloat(self, r): + assert r.incrbyfloat('a') == 1.0 + assert r['a'] == b('1') + assert r.incrbyfloat('a', 1.1) == 2.1 + assert float(r['a']) == float(2.1) + + def test_keys(self, r): + assert r.keys() == [] + keys_with_underscores = set([b('test_a'), b('test_b')]) + keys = keys_with_underscores.union(set([b('testc')])) + for key in keys: + r[key] = 1 + assert set(r.keys(pattern='test_*')) == keys_with_underscores + assert set(r.keys(pattern='test*')) == keys + + def test_mget(self, r): + assert r.mget(['a', 'b']) == [None, None] + r['a'] = '1' + r['b'] = '2' + r['c'] = '3' + assert r.mget('a', 'other', 'b', 'c') == [b('1'), None, b('2'), b('3')] + + def test_mset(self, r): + d = {'a': b('1'), 'b': b('2'), 'c': b('3')} + assert r.mset(d) + for k, v in iteritems(d): + assert r[k] == v + + def test_mset_kwargs(self, r): + d = {'a': b('1'), 'b': b('2'), 'c': b('3')} + assert r.mset(**d) + for k, v in iteritems(d): + assert r[k] == v + + def test_msetnx(self, r): + d = {'a': b('1'), 'b': b('2'), 'c': b('3')} + assert r.msetnx(d) + d2 = {'a': b('x'), 'd': b('4')} + assert not r.msetnx(d2) + for k, v in iteritems(d): + assert r[k] == v + assert r.get('d') is None + + def test_msetnx_kwargs(self, r): + d = {'a': b('1'), 'b': b('2'), 'c': b('3')} + assert r.msetnx(**d) + d2 = {'a': b('x'), 'd': b('4')} + assert not r.msetnx(**d2) + for k, v in iteritems(d): + assert r[k] == v + assert r.get('d') is None + + @skip_if_server_version_lt('2.6.0') + def test_pexpire(self, r): + assert not r.pexpire('a', 60000) + r['a'] = 'foo' + assert r.pexpire('a', 60000) + assert 0 < r.pttl('a') <= 60000 + assert r.persist('a') + assert r.pttl('a') is None + + @skip_if_server_version_lt('2.6.0') + def test_pexpireat_datetime(self, r): + expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + r['a'] = 'foo' + assert r.pexpireat('a', expire_at) + assert 0 < r.pttl('a') <= 61000 + + @skip_if_server_version_lt('2.6.0') + def test_pexpireat_no_key(self, r): + expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + assert not r.pexpireat('a', expire_at) + + @skip_if_server_version_lt('2.6.0') + def test_pexpireat_unixtime(self, r): + expire_at = redis_server_time(r) + datetime.timedelta(minutes=1) + r['a'] = 'foo' + expire_at_seconds = int(time.mktime(expire_at.timetuple())) * 1000 + assert r.pexpireat('a', expire_at_seconds) + assert 0 < r.pttl('a') <= 61000 + + @skip_if_server_version_lt('2.6.0') + def test_psetex(self, r): + assert r.psetex('a', 1000, 'value') + assert r['a'] == b('value') + assert 0 < r.pttl('a') <= 1000 + + @skip_if_server_version_lt('2.6.0') + def test_psetex_timedelta(self, r): + expire_at = datetime.timedelta(milliseconds=1000) + assert r.psetex('a', expire_at, 'value') + assert r['a'] == b('value') + assert 0 < r.pttl('a') <= 1000 + + def test_randomkey(self, r): + assert r.randomkey() is None + for key in ('a', 'b', 'c'): + r[key] = 1 + assert r.randomkey() in (b('a'), b('b'), b('c')) + + def test_rename(self, r): + r['a'] = '1' + assert r.rename('a', 'b') + assert r.get('a') is None + assert r['b'] == b('1') + + def test_renamenx(self, r): + r['a'] = '1' + r['b'] = '2' + assert not r.renamenx('a', 'b') + assert r['a'] == b('1') + assert r['b'] == b('2') + + @skip_if_server_version_lt('2.6.0') + def test_set_nx(self, r): + assert r.set('a', '1', nx=True) + assert not r.set('a', '2', nx=True) + assert r['a'] == b('1') + + @skip_if_server_version_lt('2.6.0') + def test_set_xx(self, r): + assert not r.set('a', '1', xx=True) + assert r.get('a') is None + r['a'] = 'bar' + assert r.set('a', '2', xx=True) + assert r.get('a') == b('2') + + @skip_if_server_version_lt('2.6.0') + def test_set_px(self, r): + assert r.set('a', '1', px=10000) + assert r['a'] == b('1') + assert 0 < r.pttl('a') <= 10000 + assert 0 < r.ttl('a') <= 10 + + @skip_if_server_version_lt('2.6.0') + def test_set_px_timedelta(self, r): + expire_at = datetime.timedelta(milliseconds=1000) + assert r.set('a', '1', px=expire_at) + assert 0 < r.pttl('a') <= 1000 + assert 0 < r.ttl('a') <= 1 + + @skip_if_server_version_lt('2.6.0') + def test_set_ex(self, r): + assert r.set('a', '1', ex=10) + assert 0 < r.ttl('a') <= 10 + + @skip_if_server_version_lt('2.6.0') + def test_set_ex_timedelta(self, r): + expire_at = datetime.timedelta(seconds=60) + assert r.set('a', '1', ex=expire_at) + assert 0 < r.ttl('a') <= 60 + + @skip_if_server_version_lt('2.6.0') + def test_set_multipleoptions(self, r): + r['a'] = 'val' + assert r.set('a', '1', xx=True, px=10000) + assert 0 < r.ttl('a') <= 10 + + def test_setex(self, r): + assert r.setex('a', '1', 60) + assert r['a'] == b('1') + assert 0 < r.ttl('a') <= 60 + + def test_setnx(self, r): + assert r.setnx('a', '1') + assert r['a'] == b('1') + assert not r.setnx('a', '2') + assert r['a'] == b('1') + + def test_setrange(self, r): + assert r.setrange('a', 5, 'foo') == 8 + assert r['a'] == b('\0\0\0\0\0foo') + r['a'] = 'abcdefghijh' + assert r.setrange('a', 6, '12345') == 11 + assert r['a'] == b('abcdef12345') + + def test_strlen(self, r): + r['a'] = 'foo' + assert r.strlen('a') == 3 + + def test_substr(self, r): + r['a'] = '0123456789' + assert r.substr('a', 0) == b('0123456789') + assert r.substr('a', 2) == b('23456789') + assert r.substr('a', 3, 5) == b('345') + assert r.substr('a', 3, -2) == b('345678') + + def test_type(self, r): + assert r.type('a') == b('none') + r['a'] = '1' + assert r.type('a') == b('string') + del r['a'] + r.lpush('a', '1') + assert r.type('a') == b('list') + del r['a'] + r.sadd('a', '1') + assert r.type('a') == b('set') + del r['a'] + r.zadd('a', **{'1': 1}) + assert r.type('a') == b('zset') + + # LIST COMMANDS + def test_blpop(self, r): + r.rpush('a', '1', '2') + r.rpush('b', '3', '4') + assert r.blpop(['b', 'a'], timeout=1) == (b('b'), b('3')) + assert r.blpop(['b', 'a'], timeout=1) == (b('b'), b('4')) + assert r.blpop(['b', 'a'], timeout=1) == (b('a'), b('1')) + assert r.blpop(['b', 'a'], timeout=1) == (b('a'), b('2')) + assert r.blpop(['b', 'a'], timeout=1) is None + r.rpush('c', '1') + assert r.blpop('c', timeout=1) == (b('c'), b('1')) + + def test_brpop(self, r): + r.rpush('a', '1', '2') + r.rpush('b', '3', '4') + assert r.brpop(['b', 'a'], timeout=1) == (b('b'), b('4')) + assert r.brpop(['b', 'a'], timeout=1) == (b('b'), b('3')) + assert r.brpop(['b', 'a'], timeout=1) == (b('a'), b('2')) + assert r.brpop(['b', 'a'], timeout=1) == (b('a'), b('1')) + assert r.brpop(['b', 'a'], timeout=1) is None + r.rpush('c', '1') + assert r.brpop('c', timeout=1) == (b('c'), b('1')) + + def test_brpoplpush(self, r): + r.rpush('a', '1', '2') + r.rpush('b', '3', '4') + assert r.brpoplpush('a', 'b') == b('2') + assert r.brpoplpush('a', 'b') == b('1') + assert r.brpoplpush('a', 'b', timeout=1) is None + assert r.lrange('a', 0, -1) == [] + assert r.lrange('b', 0, -1) == [b('1'), b('2'), b('3'), b('4')] + + def test_brpoplpush_empty_string(self, r): + r.rpush('a', '') + assert r.brpoplpush('a', 'b') == b('') + + def test_lindex(self, r): + r.rpush('a', '1', '2', '3') + assert r.lindex('a', '0') == b('1') + assert r.lindex('a', '1') == b('2') + assert r.lindex('a', '2') == b('3') + + def test_linsert(self, r): + r.rpush('a', '1', '2', '3') + assert r.linsert('a', 'after', '2', '2.5') == 4 + assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('2.5'), b('3')] + assert r.linsert('a', 'before', '2', '1.5') == 5 + assert r.lrange('a', 0, -1) == \ + [b('1'), b('1.5'), b('2'), b('2.5'), b('3')] + + def test_llen(self, r): + r.rpush('a', '1', '2', '3') + assert r.llen('a') == 3 + + def test_lpop(self, r): + r.rpush('a', '1', '2', '3') + assert r.lpop('a') == b('1') + assert r.lpop('a') == b('2') + assert r.lpop('a') == b('3') + assert r.lpop('a') is None + + def test_lpush(self, r): + assert r.lpush('a', '1') == 1 + assert r.lpush('a', '2') == 2 + assert r.lpush('a', '3', '4') == 4 + assert r.lrange('a', 0, -1) == [b('4'), b('3'), b('2'), b('1')] + + def test_lpushx(self, r): + assert r.lpushx('a', '1') == 0 + assert r.lrange('a', 0, -1) == [] + r.rpush('a', '1', '2', '3') + assert r.lpushx('a', '4') == 4 + assert r.lrange('a', 0, -1) == [b('4'), b('1'), b('2'), b('3')] + + def test_lrange(self, r): + r.rpush('a', '1', '2', '3', '4', '5') + assert r.lrange('a', 0, 2) == [b('1'), b('2'), b('3')] + assert r.lrange('a', 2, 10) == [b('3'), b('4'), b('5')] + assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4'), b('5')] + + def test_lrem(self, r): + r.rpush('a', '1', '1', '1', '1') + assert r.lrem('a', '1', 1) == 1 + assert r.lrange('a', 0, -1) == [b('1'), b('1'), b('1')] + assert r.lrem('a', '1') == 3 + assert r.lrange('a', 0, -1) == [] + + def test_lset(self, r): + r.rpush('a', '1', '2', '3') + assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3')] + assert r.lset('a', 1, '4') + assert r.lrange('a', 0, 2) == [b('1'), b('4'), b('3')] + + def test_ltrim(self, r): + r.rpush('a', '1', '2', '3') + assert r.ltrim('a', 0, 1) + assert r.lrange('a', 0, -1) == [b('1'), b('2')] + + def test_rpop(self, r): + r.rpush('a', '1', '2', '3') + assert r.rpop('a') == b('3') + assert r.rpop('a') == b('2') + assert r.rpop('a') == b('1') + assert r.rpop('a') is None + + def test_rpoplpush(self, r): + r.rpush('a', 'a1', 'a2', 'a3') + r.rpush('b', 'b1', 'b2', 'b3') + assert r.rpoplpush('a', 'b') == b('a3') + assert r.lrange('a', 0, -1) == [b('a1'), b('a2')] + assert r.lrange('b', 0, -1) == [b('a3'), b('b1'), b('b2'), b('b3')] + + def test_rpush(self, r): + assert r.rpush('a', '1') == 1 + assert r.rpush('a', '2') == 2 + assert r.rpush('a', '3', '4') == 4 + assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4')] + + def test_rpushx(self, r): + assert r.rpushx('a', 'b') == 0 + assert r.lrange('a', 0, -1) == [] + r.rpush('a', '1', '2', '3') + assert r.rpushx('a', '4') == 4 + assert r.lrange('a', 0, -1) == [b('1'), b('2'), b('3'), b('4')] + + # SCAN COMMANDS + @skip_if_server_version_lt('2.8.0') + def test_scan(self, r): + r.set('a', 1) + r.set('b', 2) + r.set('c', 3) + cursor, keys = r.scan() + assert cursor == 0 + assert set(keys) == set([b('a'), b('b'), b('c')]) + _, keys = r.scan(match='a') + assert set(keys) == set([b('a')]) + + @skip_if_server_version_lt('2.8.0') + def test_scan_iter(self, r): + r.set('a', 1) + r.set('b', 2) + r.set('c', 3) + keys = list(r.scan_iter()) + assert set(keys) == set([b('a'), b('b'), b('c')]) + keys = list(r.scan_iter(match='a')) + assert set(keys) == set([b('a')]) + + @skip_if_server_version_lt('2.8.0') + def test_sscan(self, r): + r.sadd('a', 1, 2, 3) + cursor, members = r.sscan('a') + assert cursor == 0 + assert set(members) == set([b('1'), b('2'), b('3')]) + _, members = r.sscan('a', match=b('1')) + assert set(members) == set([b('1')]) + + @skip_if_server_version_lt('2.8.0') + def test_sscan_iter(self, r): + r.sadd('a', 1, 2, 3) + members = list(r.sscan_iter('a')) + assert set(members) == set([b('1'), b('2'), b('3')]) + members = list(r.sscan_iter('a', match=b('1'))) + assert set(members) == set([b('1')]) + + @skip_if_server_version_lt('2.8.0') + def test_hscan(self, r): + r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) + cursor, dic = r.hscan('a') + assert cursor == 0 + assert dic == {b('a'): b('1'), b('b'): b('2'), b('c'): b('3')} + _, dic = r.hscan('a', match='a') + assert dic == {b('a'): b('1')} + + @skip_if_server_version_lt('2.8.0') + def test_hscan_iter(self, r): + r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) + dic = dict(r.hscan_iter('a')) + assert dic == {b('a'): b('1'), b('b'): b('2'), b('c'): b('3')} + dic = dict(r.hscan_iter('a', match='a')) + assert dic == {b('a'): b('1')} + + @skip_if_server_version_lt('2.8.0') + def test_zscan(self, r): + r.zadd('a', 'a', 1, 'b', 2, 'c', 3) + cursor, pairs = r.zscan('a') + assert cursor == 0 + assert set(pairs) == set([(b('a'), 1), (b('b'), 2), (b('c'), 3)]) + _, pairs = r.zscan('a', match='a') + assert set(pairs) == set([(b('a'), 1)]) + + @skip_if_server_version_lt('2.8.0') + def test_zscan_iter(self, r): + r.zadd('a', 'a', 1, 'b', 2, 'c', 3) + pairs = list(r.zscan_iter('a')) + assert set(pairs) == set([(b('a'), 1), (b('b'), 2), (b('c'), 3)]) + pairs = list(r.zscan_iter('a', match='a')) + assert set(pairs) == set([(b('a'), 1)]) + + # SET COMMANDS + def test_sadd(self, r): + members = set([b('1'), b('2'), b('3')]) + r.sadd('a', *members) + assert r.smembers('a') == members + + def test_scard(self, r): + r.sadd('a', '1', '2', '3') + assert r.scard('a') == 3 + + def test_sdiff(self, r): + r.sadd('a', '1', '2', '3') + assert r.sdiff('a', 'b') == set([b('1'), b('2'), b('3')]) + r.sadd('b', '2', '3') + assert r.sdiff('a', 'b') == set([b('1')]) + + def test_sdiffstore(self, r): + r.sadd('a', '1', '2', '3') + assert r.sdiffstore('c', 'a', 'b') == 3 + assert r.smembers('c') == set([b('1'), b('2'), b('3')]) + r.sadd('b', '2', '3') + assert r.sdiffstore('c', 'a', 'b') == 1 + assert r.smembers('c') == set([b('1')]) + + def test_sinter(self, r): + r.sadd('a', '1', '2', '3') + assert r.sinter('a', 'b') == set() + r.sadd('b', '2', '3') + assert r.sinter('a', 'b') == set([b('2'), b('3')]) + + def test_sinterstore(self, r): + r.sadd('a', '1', '2', '3') + assert r.sinterstore('c', 'a', 'b') == 0 + assert r.smembers('c') == set() + r.sadd('b', '2', '3') + assert r.sinterstore('c', 'a', 'b') == 2 + assert r.smembers('c') == set([b('2'), b('3')]) + + def test_sismember(self, r): + r.sadd('a', '1', '2', '3') + assert r.sismember('a', '1') + assert r.sismember('a', '2') + assert r.sismember('a', '3') + assert not r.sismember('a', '4') + + def test_smembers(self, r): + r.sadd('a', '1', '2', '3') + assert r.smembers('a') == set([b('1'), b('2'), b('3')]) + + def test_smove(self, r): + r.sadd('a', 'a1', 'a2') + r.sadd('b', 'b1', 'b2') + assert r.smove('a', 'b', 'a1') + assert r.smembers('a') == set([b('a2')]) + assert r.smembers('b') == set([b('b1'), b('b2'), b('a1')]) + + def test_spop(self, r): + s = [b('1'), b('2'), b('3')] + r.sadd('a', *s) + value = r.spop('a') + assert value in s + assert r.smembers('a') == set(s) - set([value]) + + def test_srandmember(self, r): + s = [b('1'), b('2'), b('3')] + r.sadd('a', *s) + assert r.srandmember('a') in s + + @skip_if_server_version_lt('2.6.0') + def test_srandmember_multi_value(self, r): + s = [b('1'), b('2'), b('3')] + r.sadd('a', *s) + randoms = r.srandmember('a', number=2) + assert len(randoms) == 2 + assert set(randoms).intersection(s) == set(randoms) + + def test_srem(self, r): + r.sadd('a', '1', '2', '3', '4') + assert r.srem('a', '5') == 0 + assert r.srem('a', '2', '4') == 2 + assert r.smembers('a') == set([b('1'), b('3')]) + + def test_sunion(self, r): + r.sadd('a', '1', '2') + r.sadd('b', '2', '3') + assert r.sunion('a', 'b') == set([b('1'), b('2'), b('3')]) + + def test_sunionstore(self, r): + r.sadd('a', '1', '2') + r.sadd('b', '2', '3') + assert r.sunionstore('c', 'a', 'b') == 3 + assert r.smembers('c') == set([b('1'), b('2'), b('3')]) + + # SORTED SET COMMANDS + def test_zadd(self, r): + r.zadd('a', a1=1, a2=2, a3=3) + assert r.zrange('a', 0, -1) == [b('a1'), b('a2'), b('a3')] + + def test_zcard(self, r): + r.zadd('a', a1=1, a2=2, a3=3) + assert r.zcard('a') == 3 + + def test_zcount(self, r): + r.zadd('a', a1=1, a2=2, a3=3) + assert r.zcount('a', '-inf', '+inf') == 3 + assert r.zcount('a', 1, 2) == 2 + assert r.zcount('a', 10, 20) == 0 + + def test_zincrby(self, r): + r.zadd('a', a1=1, a2=2, a3=3) + assert r.zincrby('a', 'a2') == 3.0 + assert r.zincrby('a', 'a3', amount=5) == 8.0 + assert r.zscore('a', 'a2') == 3.0 + assert r.zscore('a', 'a3') == 8.0 + + @skip_if_server_version_lt('2.8.9') + def test_zlexcount(self, r): + r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) + assert r.zlexcount('a', '-', '+') == 7 + assert r.zlexcount('a', '[b', '[f') == 5 + + def test_zinterstore_sum(self, r): + r.zadd('a', a1=1, a2=1, a3=1) + r.zadd('b', a1=2, a2=2, a3=2) + r.zadd('c', a1=6, a3=5, a4=4) + assert r.zinterstore('d', ['a', 'b', 'c']) == 2 + assert r.zrange('d', 0, -1, withscores=True) == \ + [(b('a3'), 8), (b('a1'), 9)] + + def test_zinterstore_max(self, r): + r.zadd('a', a1=1, a2=1, a3=1) + r.zadd('b', a1=2, a2=2, a3=2) + r.zadd('c', a1=6, a3=5, a4=4) + assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MAX') == 2 + assert r.zrange('d', 0, -1, withscores=True) == \ + [(b('a3'), 5), (b('a1'), 6)] + + def test_zinterstore_min(self, r): + r.zadd('a', a1=1, a2=2, a3=3) + r.zadd('b', a1=2, a2=3, a3=5) + r.zadd('c', a1=6, a3=5, a4=4) + assert r.zinterstore('d', ['a', 'b', 'c'], aggregate='MIN') == 2 + assert r.zrange('d', 0, -1, withscores=True) == \ + [(b('a1'), 1), (b('a3'), 3)] + + def test_zinterstore_with_weight(self, r): + r.zadd('a', a1=1, a2=1, a3=1) + r.zadd('b', a1=2, a2=2, a3=2) + r.zadd('c', a1=6, a3=5, a4=4) + assert r.zinterstore('d', {'a': 1, 'b': 2, 'c': 3}) == 2 + assert r.zrange('d', 0, -1, withscores=True) == \ + [(b('a3'), 20), (b('a1'), 23)] + + def test_zrange(self, r): + r.zadd('a', a1=1, a2=2, a3=3) + assert r.zrange('a', 0, 1) == [b('a1'), b('a2')] + assert r.zrange('a', 1, 2) == [b('a2'), b('a3')] + + # withscores + assert r.zrange('a', 0, 1, withscores=True) == \ + [(b('a1'), 1.0), (b('a2'), 2.0)] + assert r.zrange('a', 1, 2, withscores=True) == \ + [(b('a2'), 2.0), (b('a3'), 3.0)] + + # custom score function + assert r.zrange('a', 0, 1, withscores=True, score_cast_func=int) == \ + [(b('a1'), 1), (b('a2'), 2)] + + @skip_if_server_version_lt('2.8.9') + def test_zrangebylex(self, r): + r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) + assert r.zrangebylex('a', '-', '[c') == [b('a'), b('b'), b('c')] + assert r.zrangebylex('a', '-', '(c') == [b('a'), b('b')] + assert r.zrangebylex('a', '[aaa', '(g') == \ + [b('b'), b('c'), b('d'), b('e'), b('f')] + assert r.zrangebylex('a', '[f', '+') == [b('f'), b('g')] + assert r.zrangebylex('a', '-', '+', start=3, num=2) == [b('d'), b('e')] + + def test_zrangebyscore(self, r): + r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + assert r.zrangebyscore('a', 2, 4) == [b('a2'), b('a3'), b('a4')] + + # slicing with start/num + assert r.zrangebyscore('a', 2, 4, start=1, num=2) == \ + [b('a3'), b('a4')] + + # withscores + assert r.zrangebyscore('a', 2, 4, withscores=True) == \ + [(b('a2'), 2.0), (b('a3'), 3.0), (b('a4'), 4.0)] + + # custom score function + assert r.zrangebyscore('a', 2, 4, withscores=True, + score_cast_func=int) == \ + [(b('a2'), 2), (b('a3'), 3), (b('a4'), 4)] + + def test_zrank(self, r): + r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + assert r.zrank('a', 'a1') == 0 + assert r.zrank('a', 'a2') == 1 + assert r.zrank('a', 'a6') is None + + def test_zrem(self, r): + r.zadd('a', a1=1, a2=2, a3=3) + assert r.zrem('a', 'a2') == 1 + assert r.zrange('a', 0, -1) == [b('a1'), b('a3')] + assert r.zrem('a', 'b') == 0 + assert r.zrange('a', 0, -1) == [b('a1'), b('a3')] + + def test_zrem_multiple_keys(self, r): + r.zadd('a', a1=1, a2=2, a3=3) + assert r.zrem('a', 'a1', 'a2') == 2 + assert r.zrange('a', 0, 5) == [b('a3')] + + @skip_if_server_version_lt('2.8.9') + def test_zremrangebylex(self, r): + r.zadd('a', a=0, b=0, c=0, d=0, e=0, f=0, g=0) + assert r.zremrangebylex('a', '-', '[c') == 3 + assert r.zrange('a', 0, -1) == [b('d'), b('e'), b('f'), b('g')] + assert r.zremrangebylex('a', '[f', '+') == 2 + assert r.zrange('a', 0, -1) == [b('d'), b('e')] + assert r.zremrangebylex('a', '[h', '+') == 0 + assert r.zrange('a', 0, -1) == [b('d'), b('e')] + + def test_zremrangebyrank(self, r): + r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + assert r.zremrangebyrank('a', 1, 3) == 3 + assert r.zrange('a', 0, 5) == [b('a1'), b('a5')] + + def test_zremrangebyscore(self, r): + r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + assert r.zremrangebyscore('a', 2, 4) == 3 + assert r.zrange('a', 0, -1) == [b('a1'), b('a5')] + assert r.zremrangebyscore('a', 2, 4) == 0 + assert r.zrange('a', 0, -1) == [b('a1'), b('a5')] + + def test_zrevrange(self, r): + r.zadd('a', a1=1, a2=2, a3=3) + assert r.zrevrange('a', 0, 1) == [b('a3'), b('a2')] + assert r.zrevrange('a', 1, 2) == [b('a2'), b('a1')] + + # withscores + assert r.zrevrange('a', 0, 1, withscores=True) == \ + [(b('a3'), 3.0), (b('a2'), 2.0)] + assert r.zrevrange('a', 1, 2, withscores=True) == \ + [(b('a2'), 2.0), (b('a1'), 1.0)] + + # custom score function + assert r.zrevrange('a', 0, 1, withscores=True, + score_cast_func=int) == \ + [(b('a3'), 3.0), (b('a2'), 2.0)] + + def test_zrevrangebyscore(self, r): + r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + assert r.zrevrangebyscore('a', 4, 2) == [b('a4'), b('a3'), b('a2')] + + # slicing with start/num + assert r.zrevrangebyscore('a', 4, 2, start=1, num=2) == \ + [b('a3'), b('a2')] + + # withscores + assert r.zrevrangebyscore('a', 4, 2, withscores=True) == \ + [(b('a4'), 4.0), (b('a3'), 3.0), (b('a2'), 2.0)] + + # custom score function + assert r.zrevrangebyscore('a', 4, 2, withscores=True, + score_cast_func=int) == \ + [(b('a4'), 4), (b('a3'), 3), (b('a2'), 2)] + + def test_zrevrank(self, r): + r.zadd('a', a1=1, a2=2, a3=3, a4=4, a5=5) + assert r.zrevrank('a', 'a1') == 4 + assert r.zrevrank('a', 'a2') == 3 + assert r.zrevrank('a', 'a6') is None + + def test_zscore(self, r): + r.zadd('a', a1=1, a2=2, a3=3) + assert r.zscore('a', 'a1') == 1.0 + assert r.zscore('a', 'a2') == 2.0 + assert r.zscore('a', 'a4') is None + + def test_zunionstore_sum(self, r): + r.zadd('a', a1=1, a2=1, a3=1) + r.zadd('b', a1=2, a2=2, a3=2) + r.zadd('c', a1=6, a3=5, a4=4) + assert r.zunionstore('d', ['a', 'b', 'c']) == 4 + assert r.zrange('d', 0, -1, withscores=True) == \ + [(b('a2'), 3), (b('a4'), 4), (b('a3'), 8), (b('a1'), 9)] + + def test_zunionstore_max(self, r): + r.zadd('a', a1=1, a2=1, a3=1) + r.zadd('b', a1=2, a2=2, a3=2) + r.zadd('c', a1=6, a3=5, a4=4) + assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MAX') == 4 + assert r.zrange('d', 0, -1, withscores=True) == \ + [(b('a2'), 2), (b('a4'), 4), (b('a3'), 5), (b('a1'), 6)] + + def test_zunionstore_min(self, r): + r.zadd('a', a1=1, a2=2, a3=3) + r.zadd('b', a1=2, a2=2, a3=4) + r.zadd('c', a1=6, a3=5, a4=4) + assert r.zunionstore('d', ['a', 'b', 'c'], aggregate='MIN') == 4 + assert r.zrange('d', 0, -1, withscores=True) == \ + [(b('a1'), 1), (b('a2'), 2), (b('a3'), 3), (b('a4'), 4)] + + def test_zunionstore_with_weight(self, r): + r.zadd('a', a1=1, a2=1, a3=1) + r.zadd('b', a1=2, a2=2, a3=2) + r.zadd('c', a1=6, a3=5, a4=4) + assert r.zunionstore('d', {'a': 1, 'b': 2, 'c': 3}) == 4 + assert r.zrange('d', 0, -1, withscores=True) == \ + [(b('a2'), 5), (b('a4'), 12), (b('a3'), 20), (b('a1'), 23)] + + # HYPERLOGLOG TESTS + @skip_if_server_version_lt('2.8.9') + def test_pfadd(self, r): + members = set([b('1'), b('2'), b('3')]) + assert r.pfadd('a', *members) == 1 + assert r.pfadd('a', *members) == 0 + assert r.pfcount('a') == len(members) + + @skip_if_server_version_lt('2.8.9') + def test_pfcount(self, r): + members = set([b('1'), b('2'), b('3')]) + r.pfadd('a', *members) + assert r.pfcount('a') == len(members) + + @skip_if_server_version_lt('2.8.9') + def test_pfmerge(self, r): + mema = set([b('1'), b('2'), b('3')]) + memb = set([b('2'), b('3'), b('4')]) + memc = set([b('5'), b('6'), b('7')]) + r.pfadd('a', *mema) + r.pfadd('b', *memb) + r.pfadd('c', *memc) + r.pfmerge('d', 'c', 'a') + assert r.pfcount('d') == 6 + r.pfmerge('d', 'b') + assert r.pfcount('d') == 7 + + # HASH COMMANDS + def test_hget_and_hset(self, r): + r.hmset('a', {'1': 1, '2': 2, '3': 3}) + assert r.hget('a', '1') == b('1') + assert r.hget('a', '2') == b('2') + assert r.hget('a', '3') == b('3') + + # field was updated, redis returns 0 + assert r.hset('a', '2', 5) == 0 + assert r.hget('a', '2') == b('5') + + # field is new, redis returns 1 + assert r.hset('a', '4', 4) == 1 + assert r.hget('a', '4') == b('4') + + # key inside of hash that doesn't exist returns null value + assert r.hget('a', 'b') is None + + def test_hdel(self, r): + r.hmset('a', {'1': 1, '2': 2, '3': 3}) + assert r.hdel('a', '2') == 1 + assert r.hget('a', '2') is None + assert r.hdel('a', '1', '3') == 2 + assert r.hlen('a') == 0 + + def test_hexists(self, r): + r.hmset('a', {'1': 1, '2': 2, '3': 3}) + assert r.hexists('a', '1') + assert not r.hexists('a', '4') + + def test_hgetall(self, r): + h = {b('a1'): b('1'), b('a2'): b('2'), b('a3'): b('3')} + r.hmset('a', h) + assert r.hgetall('a') == h + + def test_hincrby(self, r): + assert r.hincrby('a', '1') == 1 + assert r.hincrby('a', '1', amount=2) == 3 + assert r.hincrby('a', '1', amount=-2) == 1 + + @skip_if_server_version_lt('2.6.0') + def test_hincrbyfloat(self, r): + assert r.hincrbyfloat('a', '1') == 1.0 + assert r.hincrbyfloat('a', '1') == 2.0 + assert r.hincrbyfloat('a', '1', 1.2) == 3.2 + + def test_hkeys(self, r): + h = {b('a1'): b('1'), b('a2'): b('2'), b('a3'): b('3')} + r.hmset('a', h) + local_keys = list(iterkeys(h)) + remote_keys = r.hkeys('a') + assert (sorted(local_keys) == sorted(remote_keys)) + + def test_hlen(self, r): + r.hmset('a', {'1': 1, '2': 2, '3': 3}) + assert r.hlen('a') == 3 + + def test_hmget(self, r): + assert r.hmset('a', {'a': 1, 'b': 2, 'c': 3}) + assert r.hmget('a', 'a', 'b', 'c') == [b('1'), b('2'), b('3')] + + def test_hmset(self, r): + h = {b('a'): b('1'), b('b'): b('2'), b('c'): b('3')} + assert r.hmset('a', h) + assert r.hgetall('a') == h + + def test_hsetnx(self, r): + # Initially set the hash field + assert r.hsetnx('a', '1', 1) + assert r.hget('a', '1') == b('1') + assert not r.hsetnx('a', '1', 2) + assert r.hget('a', '1') == b('1') + + def test_hvals(self, r): + h = {b('a1'): b('1'), b('a2'): b('2'), b('a3'): b('3')} + r.hmset('a', h) + local_vals = list(itervalues(h)) + remote_vals = r.hvals('a') + assert sorted(local_vals) == sorted(remote_vals) + + # SORT + def test_sort_basic(self, r): + r.rpush('a', '3', '2', '1', '4') + assert r.sort('a') == [b('1'), b('2'), b('3'), b('4')] + + def test_sort_limited(self, r): + r.rpush('a', '3', '2', '1', '4') + assert r.sort('a', start=1, num=2) == [b('2'), b('3')] + + def test_sort_by(self, r): + r['score:1'] = 8 + r['score:2'] = 3 + r['score:3'] = 5 + r.rpush('a', '3', '2', '1') + assert r.sort('a', by='score:*') == [b('2'), b('3'), b('1')] + + def test_sort_get(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + assert r.sort('a', get='user:*') == [b('u1'), b('u2'), b('u3')] + + def test_sort_get_multi(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + assert r.sort('a', get=('user:*', '#')) == \ + [b('u1'), b('1'), b('u2'), b('2'), b('u3'), b('3')] + + def test_sort_get_groups_two(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + assert r.sort('a', get=('user:*', '#'), groups=True) == \ + [(b('u1'), b('1')), (b('u2'), b('2')), (b('u3'), b('3'))] + + def test_sort_groups_string_get(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + with pytest.raises(exceptions.DataError): + r.sort('a', get='user:*', groups=True) + + def test_sort_groups_just_one_get(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + with pytest.raises(exceptions.DataError): + r.sort('a', get=['user:*'], groups=True) + + def test_sort_groups_no_get(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r.rpush('a', '2', '3', '1') + with pytest.raises(exceptions.DataError): + r.sort('a', groups=True) + + def test_sort_groups_three_gets(self, r): + r['user:1'] = 'u1' + r['user:2'] = 'u2' + r['user:3'] = 'u3' + r['door:1'] = 'd1' + r['door:2'] = 'd2' + r['door:3'] = 'd3' + r.rpush('a', '2', '3', '1') + assert r.sort('a', get=('user:*', 'door:*', '#'), groups=True) == \ + [ + (b('u1'), b('d1'), b('1')), + (b('u2'), b('d2'), b('2')), + (b('u3'), b('d3'), b('3')) + ] + + def test_sort_desc(self, r): + r.rpush('a', '2', '3', '1') + assert r.sort('a', desc=True) == [b('3'), b('2'), b('1')] + + def test_sort_alpha(self, r): + r.rpush('a', 'e', 'c', 'b', 'd', 'a') + assert r.sort('a', alpha=True) == \ + [b('a'), b('b'), b('c'), b('d'), b('e')] + + def test_sort_store(self, r): + r.rpush('a', '2', '3', '1') + assert r.sort('a', store='sorted_values') == 3 + assert r.lrange('sorted_values', 0, -1) == [b('1'), b('2'), b('3')] + + def test_sort_all_options(self, r): + r['user:1:username'] = 'zeus' + r['user:2:username'] = 'titan' + r['user:3:username'] = 'hermes' + r['user:4:username'] = 'hercules' + r['user:5:username'] = 'apollo' + r['user:6:username'] = 'athena' + r['user:7:username'] = 'hades' + r['user:8:username'] = 'dionysus' + + r['user:1:favorite_drink'] = 'yuengling' + r['user:2:favorite_drink'] = 'rum' + r['user:3:favorite_drink'] = 'vodka' + r['user:4:favorite_drink'] = 'milk' + r['user:5:favorite_drink'] = 'pinot noir' + r['user:6:favorite_drink'] = 'water' + r['user:7:favorite_drink'] = 'gin' + r['user:8:favorite_drink'] = 'apple juice' + + r.rpush('gods', '5', '8', '3', '1', '2', '7', '6', '4') + num = r.sort('gods', start=2, num=4, by='user:*:username', + get='user:*:favorite_drink', desc=True, alpha=True, + store='sorted') + assert num == 4 + assert r.lrange('sorted', 0, 10) == \ + [b('vodka'), b('milk'), b('gin'), b('apple juice')] + + +class TestStrictCommands(object): + + def test_strict_zadd(self, sr): + sr.zadd('a', 1.0, 'a1', 2.0, 'a2', a3=3.0) + assert sr.zrange('a', 0, -1, withscores=True) == \ + [(b('a1'), 1.0), (b('a2'), 2.0), (b('a3'), 3.0)] + + def test_strict_lrem(self, sr): + sr.rpush('a', 'a1', 'a2', 'a3', 'a1') + sr.lrem('a', 0, 'a1') + assert sr.lrange('a', 0, -1) == [b('a2'), b('a3')] + + def test_strict_setex(self, sr): + assert sr.setex('a', 60, '1') + assert sr['a'] == b('1') + assert 0 < sr.ttl('a') <= 60 + + def test_strict_ttl(self, sr): + assert not sr.expire('a', 10) + sr['a'] = '1' + assert sr.expire('a', 10) + assert 0 < sr.ttl('a') <= 10 + assert sr.persist('a') + assert sr.ttl('a') == -1 + + @skip_if_server_version_lt('2.6.0') + def test_strict_pttl(self, sr): + assert not sr.pexpire('a', 10000) + sr['a'] = '1' + assert sr.pexpire('a', 10000) + assert 0 < sr.pttl('a') <= 10000 + assert sr.persist('a') + assert sr.pttl('a') == -1 + + +class TestBinarySave(object): + def test_binary_get_set(self, r): + assert r.set(' foo bar ', '123') + assert r.get(' foo bar ') == b('123') + + assert r.set(' foo\r\nbar\r\n ', '456') + assert r.get(' foo\r\nbar\r\n ') == b('456') + + assert r.set(' \r\n\t\x07\x13 ', '789') + assert r.get(' \r\n\t\x07\x13 ') == b('789') + + assert sorted(r.keys('*')) == \ + [b(' \r\n\t\x07\x13 '), b(' foo\r\nbar\r\n '), b(' foo bar ')] + + assert r.delete(' foo bar ') + assert r.delete(' foo\r\nbar\r\n ') + assert r.delete(' \r\n\t\x07\x13 ') + + def test_binary_lists(self, r): + mapping = { + b('foo bar'): [b('1'), b('2'), b('3')], + b('foo\r\nbar\r\n'): [b('4'), b('5'), b('6')], + b('foo\tbar\x07'): [b('7'), b('8'), b('9')], + } + # fill in lists + for key, value in iteritems(mapping): + r.rpush(key, *value) + + # check that KEYS returns all the keys as they are + assert sorted(r.keys('*')) == sorted(list(iterkeys(mapping))) + + # check that it is possible to get list content by key name + for key, value in iteritems(mapping): + assert r.lrange(key, 0, -1) == value + + def test_22_info(self, r): + """ + Older Redis versions contained 'allocation_stats' in INFO that + was the cause of a number of bugs when parsing. + """ + info = "allocation_stats:6=1,7=1,8=7141,9=180,10=92,11=116,12=5330," \ + "13=123,14=3091,15=11048,16=225842,17=1784,18=814,19=12020," \ + "20=2530,21=645,22=15113,23=8695,24=142860,25=318,26=3303," \ + "27=20561,28=54042,29=37390,30=1884,31=18071,32=31367,33=160," \ + "34=169,35=201,36=10155,37=1045,38=15078,39=22985,40=12523," \ + "41=15588,42=265,43=1287,44=142,45=382,46=945,47=426,48=171," \ + "49=56,50=516,51=43,52=41,53=46,54=54,55=75,56=647,57=332," \ + "58=32,59=39,60=48,61=35,62=62,63=32,64=221,65=26,66=30," \ + "67=36,68=41,69=44,70=26,71=144,72=169,73=24,74=37,75=25," \ + "76=42,77=21,78=126,79=374,80=27,81=40,82=43,83=47,84=46," \ + "85=114,86=34,87=37,88=7240,89=34,90=38,91=18,92=99,93=20," \ + "94=18,95=17,96=15,97=22,98=18,99=69,100=17,101=22,102=15," \ + "103=29,104=39,105=30,106=70,107=22,108=21,109=26,110=52," \ + "111=45,112=33,113=67,114=41,115=44,116=48,117=53,118=54," \ + "119=51,120=75,121=44,122=57,123=44,124=66,125=56,126=52," \ + "127=81,128=108,129=70,130=50,131=51,132=53,133=45,134=62," \ + "135=12,136=13,137=7,138=15,139=21,140=11,141=20,142=6,143=7," \ + "144=11,145=6,146=16,147=19,148=1112,149=1,151=83,154=1," \ + "155=1,156=1,157=1,160=1,161=1,162=2,166=1,169=1,170=1,171=2," \ + "172=1,174=1,176=2,177=9,178=34,179=73,180=30,181=1,185=3," \ + "187=1,188=1,189=1,192=1,196=1,198=1,200=1,201=1,204=1,205=1," \ + "207=1,208=1,209=1,214=2,215=31,216=78,217=28,218=5,219=2," \ + "220=1,222=1,225=1,227=1,234=1,242=1,250=1,252=1,253=1," \ + ">=256=203" + parsed = parse_info(info) + assert 'allocation_stats' in parsed + assert '6' in parsed['allocation_stats'] + assert '>=256' in parsed['allocation_stats'] + + def test_large_responses(self, r): + "The PythonParser has some special cases for return values > 1MB" + # load up 5MB of data into a key + data = ''.join([ascii_letters] * (5000000 // len(ascii_letters))) + r['a'] = data + assert r['a'] == b(data) + + def test_floating_point_encoding(self, r): + """ + High precision floating point values sent to the server should keep + precision. + """ + timestamp = 1349673917.939762 + r.zadd('a', 'a1', timestamp) + assert r.zscore('a', 'a1') == timestamp diff --git a/awx/lib/site-packages/redis/tests/test_connection_pool.py b/awx/lib/site-packages/redis/tests/test_connection_pool.py new file mode 100644 index 0000000000..55ccce19ff --- /dev/null +++ b/awx/lib/site-packages/redis/tests/test_connection_pool.py @@ -0,0 +1,402 @@ +from __future__ import with_statement +import os +import pytest +import redis +import time +import re + +from threading import Thread +from redis.connection import ssl_available +from .conftest import skip_if_server_version_lt + + +class DummyConnection(object): + description_format = "DummyConnection<>" + + def __init__(self, **kwargs): + self.kwargs = kwargs + self.pid = os.getpid() + + +class TestConnectionPool(object): + def get_pool(self, connection_kwargs=None, max_connections=None, + connection_class=DummyConnection): + connection_kwargs = connection_kwargs or {} + pool = redis.ConnectionPool( + connection_class=connection_class, + max_connections=max_connections, + **connection_kwargs) + return pool + + def test_connection_creation(self): + connection_kwargs = {'foo': 'bar', 'biz': 'baz'} + pool = self.get_pool(connection_kwargs=connection_kwargs) + connection = pool.get_connection('_') + assert isinstance(connection, DummyConnection) + assert connection.kwargs == connection_kwargs + + def test_multiple_connections(self): + pool = self.get_pool() + c1 = pool.get_connection('_') + c2 = pool.get_connection('_') + assert c1 != c2 + + def test_max_connections(self): + pool = self.get_pool(max_connections=2) + pool.get_connection('_') + pool.get_connection('_') + with pytest.raises(redis.ConnectionError): + pool.get_connection('_') + + def test_reuse_previously_released_connection(self): + pool = self.get_pool() + c1 = pool.get_connection('_') + pool.release(c1) + c2 = pool.get_connection('_') + assert c1 == c2 + + def test_repr_contains_db_info_tcp(self): + connection_kwargs = {'host': 'localhost', 'port': 6379, 'db': 1} + pool = self.get_pool(connection_kwargs=connection_kwargs, + connection_class=redis.Connection) + expected = 'ConnectionPool>' + assert repr(pool) == expected + + def test_repr_contains_db_info_unix(self): + connection_kwargs = {'path': '/abc', 'db': 1} + pool = self.get_pool(connection_kwargs=connection_kwargs, + connection_class=redis.UnixDomainSocketConnection) + expected = 'ConnectionPool>' + assert repr(pool) == expected + + +class TestBlockingConnectionPool(object): + def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20): + connection_kwargs = connection_kwargs or {} + pool = redis.BlockingConnectionPool(connection_class=DummyConnection, + max_connections=max_connections, + timeout=timeout, + **connection_kwargs) + return pool + + def test_connection_creation(self): + connection_kwargs = {'foo': 'bar', 'biz': 'baz'} + pool = self.get_pool(connection_kwargs=connection_kwargs) + connection = pool.get_connection('_') + assert isinstance(connection, DummyConnection) + assert connection.kwargs == connection_kwargs + + def test_multiple_connections(self): + pool = self.get_pool() + c1 = pool.get_connection('_') + c2 = pool.get_connection('_') + assert c1 != c2 + + def test_connection_pool_blocks_until_timeout(self): + "When out of connections, block for timeout seconds, then raise" + pool = self.get_pool(max_connections=1, timeout=0.1) + pool.get_connection('_') + + start = time.time() + with pytest.raises(redis.ConnectionError): + pool.get_connection('_') + # we should have waited at least 0.1 seconds + assert time.time() - start >= 0.1 + + def connection_pool_blocks_until_another_connection_released(self): + """ + When out of connections, block until another connection is released + to the pool + """ + pool = self.get_pool(max_connections=1, timeout=2) + c1 = pool.get_connection('_') + + def target(): + time.sleep(0.1) + pool.release(c1) + + Thread(target=target).start() + start = time.time() + pool.get_connection('_') + assert time.time() - start >= 0.1 + + def test_reuse_previously_released_connection(self): + pool = self.get_pool() + c1 = pool.get_connection('_') + pool.release(c1) + c2 = pool.get_connection('_') + assert c1 == c2 + + def test_repr_contains_db_info_tcp(self): + pool = redis.ConnectionPool(host='localhost', port=6379, db=0) + expected = 'ConnectionPool>' + assert repr(pool) == expected + + def test_repr_contains_db_info_unix(self): + pool = redis.ConnectionPool( + connection_class=redis.UnixDomainSocketConnection, + path='abc', + db=0, + ) + expected = 'ConnectionPool>' + assert repr(pool) == expected + + +class TestConnectionPoolURLParsing(object): + def test_defaults(self): + pool = redis.ConnectionPool.from_url('redis://localhost') + assert pool.connection_class == redis.Connection + assert pool.connection_kwargs == { + 'host': 'localhost', + 'port': 6379, + 'db': 0, + 'password': None, + } + + def test_hostname(self): + pool = redis.ConnectionPool.from_url('redis://myhost') + assert pool.connection_class == redis.Connection + assert pool.connection_kwargs == { + 'host': 'myhost', + 'port': 6379, + 'db': 0, + 'password': None, + } + + def test_port(self): + pool = redis.ConnectionPool.from_url('redis://localhost:6380') + assert pool.connection_class == redis.Connection + assert pool.connection_kwargs == { + 'host': 'localhost', + 'port': 6380, + 'db': 0, + 'password': None, + } + + def test_password(self): + pool = redis.ConnectionPool.from_url('redis://:mypassword@localhost') + assert pool.connection_class == redis.Connection + assert pool.connection_kwargs == { + 'host': 'localhost', + 'port': 6379, + 'db': 0, + 'password': 'mypassword', + } + + def test_db_as_argument(self): + pool = redis.ConnectionPool.from_url('redis://localhost', db='1') + assert pool.connection_class == redis.Connection + assert pool.connection_kwargs == { + 'host': 'localhost', + 'port': 6379, + 'db': 1, + 'password': None, + } + + def test_db_in_path(self): + pool = redis.ConnectionPool.from_url('redis://localhost/2', db='1') + assert pool.connection_class == redis.Connection + assert pool.connection_kwargs == { + 'host': 'localhost', + 'port': 6379, + 'db': 2, + 'password': None, + } + + def test_db_in_querystring(self): + pool = redis.ConnectionPool.from_url('redis://localhost/2?db=3', + db='1') + assert pool.connection_class == redis.Connection + assert pool.connection_kwargs == { + 'host': 'localhost', + 'port': 6379, + 'db': 3, + 'password': None, + } + + def test_extra_querystring_options(self): + pool = redis.ConnectionPool.from_url('redis://localhost?a=1&b=2') + assert pool.connection_class == redis.Connection + assert pool.connection_kwargs == { + 'host': 'localhost', + 'port': 6379, + 'db': 0, + 'password': None, + 'a': '1', + 'b': '2' + } + + def test_calling_from_subclass_returns_correct_instance(self): + pool = redis.BlockingConnectionPool.from_url('redis://localhost') + assert isinstance(pool, redis.BlockingConnectionPool) + + def test_client_creates_connection_pool(self): + r = redis.StrictRedis.from_url('redis://myhost') + assert r.connection_pool.connection_class == redis.Connection + assert r.connection_pool.connection_kwargs == { + 'host': 'myhost', + 'port': 6379, + 'db': 0, + 'password': None, + } + + +class TestConnectionPoolUnixSocketURLParsing(object): + def test_defaults(self): + pool = redis.ConnectionPool.from_url('unix:///socket') + assert pool.connection_class == redis.UnixDomainSocketConnection + assert pool.connection_kwargs == { + 'path': '/socket', + 'db': 0, + 'password': None, + } + + def test_password(self): + pool = redis.ConnectionPool.from_url('unix://:mypassword@/socket') + assert pool.connection_class == redis.UnixDomainSocketConnection + assert pool.connection_kwargs == { + 'path': '/socket', + 'db': 0, + 'password': 'mypassword', + } + + def test_db_as_argument(self): + pool = redis.ConnectionPool.from_url('unix:///socket', db=1) + assert pool.connection_class == redis.UnixDomainSocketConnection + assert pool.connection_kwargs == { + 'path': '/socket', + 'db': 1, + 'password': None, + } + + def test_db_in_querystring(self): + pool = redis.ConnectionPool.from_url('unix:///socket?db=2', db=1) + assert pool.connection_class == redis.UnixDomainSocketConnection + assert pool.connection_kwargs == { + 'path': '/socket', + 'db': 2, + 'password': None, + } + + def test_extra_querystring_options(self): + pool = redis.ConnectionPool.from_url('unix:///socket?a=1&b=2') + assert pool.connection_class == redis.UnixDomainSocketConnection + assert pool.connection_kwargs == { + 'path': '/socket', + 'db': 0, + 'password': None, + 'a': '1', + 'b': '2' + } + + +class TestSSLConnectionURLParsing(object): + @pytest.mark.skipif(not ssl_available, reason="SSL not installed") + def test_defaults(self): + pool = redis.ConnectionPool.from_url('rediss://localhost') + assert pool.connection_class == redis.SSLConnection + assert pool.connection_kwargs == { + 'host': 'localhost', + 'port': 6379, + 'db': 0, + 'password': None, + } + + @pytest.mark.skipif(not ssl_available, reason="SSL not installed") + def test_cert_reqs_options(self): + import ssl + pool = redis.ConnectionPool.from_url('rediss://?ssl_cert_reqs=none') + assert pool.get_connection('_').cert_reqs == ssl.CERT_NONE + + pool = redis.ConnectionPool.from_url( + 'rediss://?ssl_cert_reqs=optional') + assert pool.get_connection('_').cert_reqs == ssl.CERT_OPTIONAL + + pool = redis.ConnectionPool.from_url( + 'rediss://?ssl_cert_reqs=required') + assert pool.get_connection('_').cert_reqs == ssl.CERT_REQUIRED + + +class TestConnection(object): + def test_on_connect_error(self): + """ + An error in Connection.on_connect should disconnect from the server + see for details: https://github.com/andymccurdy/redis-py/issues/368 + """ + # this assumes the Redis server being tested against doesn't have + # 9999 databases ;) + bad_connection = redis.Redis(db=9999) + # an error should be raised on connect + with pytest.raises(redis.RedisError): + bad_connection.info() + pool = bad_connection.connection_pool + assert len(pool._available_connections) == 1 + assert not pool._available_connections[0]._sock + + @skip_if_server_version_lt('2.8.8') + def test_busy_loading_disconnects_socket(self, r): + """ + If Redis raises a LOADING error, the connection should be + disconnected and a BusyLoadingError raised + """ + with pytest.raises(redis.BusyLoadingError): + r.execute_command('DEBUG', 'ERROR', 'LOADING fake message') + pool = r.connection_pool + assert len(pool._available_connections) == 1 + assert not pool._available_connections[0]._sock + + @skip_if_server_version_lt('2.8.8') + def test_busy_loading_from_pipeline_immediate_command(self, r): + """ + BusyLoadingErrors should raise from Pipelines that execute a + command immediately, like WATCH does. + """ + pipe = r.pipeline() + with pytest.raises(redis.BusyLoadingError): + pipe.immediate_execute_command('DEBUG', 'ERROR', + 'LOADING fake message') + pool = r.connection_pool + assert not pipe.connection + assert len(pool._available_connections) == 1 + assert not pool._available_connections[0]._sock + + @skip_if_server_version_lt('2.8.8') + def test_busy_loading_from_pipeline(self, r): + """ + BusyLoadingErrors should be raised from a pipeline execution + regardless of the raise_on_error flag. + """ + pipe = r.pipeline() + pipe.execute_command('DEBUG', 'ERROR', 'LOADING fake message') + with pytest.raises(redis.BusyLoadingError): + pipe.execute() + pool = r.connection_pool + assert not pipe.connection + assert len(pool._available_connections) == 1 + assert not pool._available_connections[0]._sock + + @skip_if_server_version_lt('2.8.8') + def test_read_only_error(self, r): + "READONLY errors get turned in ReadOnlyError exceptions" + with pytest.raises(redis.ReadOnlyError): + r.execute_command('DEBUG', 'ERROR', 'READONLY blah blah') + + def test_connect_from_url_tcp(self): + connection = redis.Redis.from_url('redis://localhost') + pool = connection.connection_pool + + assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == ( + 'ConnectionPool', + 'Connection', + 'host=localhost,port=6379,db=0', + ) + + def test_connect_from_url_unix(self): + connection = redis.Redis.from_url('unix:///path/to/socket') + pool = connection.connection_pool + + assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == ( + 'ConnectionPool', + 'UnixDomainSocketConnection', + 'path=/path/to/socket,db=0', + ) diff --git a/awx/lib/site-packages/redis/tests/test_encoding.py b/awx/lib/site-packages/redis/tests/test_encoding.py new file mode 100644 index 0000000000..b1df0a56c2 --- /dev/null +++ b/awx/lib/site-packages/redis/tests/test_encoding.py @@ -0,0 +1,33 @@ +from __future__ import with_statement +import pytest + +from redis._compat import unichr, u, unicode +from .conftest import r as _redis_client + + +class TestEncoding(object): + @pytest.fixture() + def r(self, request): + return _redis_client(request=request, decode_responses=True) + + def test_simple_encoding(self, r): + unicode_string = unichr(3456) + u('abcd') + unichr(3421) + r['unicode-string'] = unicode_string + cached_val = r['unicode-string'] + assert isinstance(cached_val, unicode) + assert unicode_string == cached_val + + def test_list_encoding(self, r): + unicode_string = unichr(3456) + u('abcd') + unichr(3421) + result = [unicode_string, unicode_string, unicode_string] + r.rpush('a', *result) + assert r.lrange('a', 0, -1) == result + + +class TestCommandsAndTokensArentEncoded(object): + @pytest.fixture() + def r(self, request): + return _redis_client(request=request, charset='utf-16') + + def test_basic_command(self, r): + r.set('hello', 'world') diff --git a/awx/lib/site-packages/redis/tests/test_lock.py b/awx/lib/site-packages/redis/tests/test_lock.py new file mode 100644 index 0000000000..d732ae1f1e --- /dev/null +++ b/awx/lib/site-packages/redis/tests/test_lock.py @@ -0,0 +1,167 @@ +from __future__ import with_statement +import pytest +import time + +from redis.exceptions import LockError, ResponseError +from redis.lock import Lock, LuaLock + + +class TestLock(object): + lock_class = Lock + + def get_lock(self, redis, *args, **kwargs): + kwargs['lock_class'] = self.lock_class + return redis.lock(*args, **kwargs) + + def test_lock(self, sr): + lock = self.get_lock(sr, 'foo') + assert lock.acquire(blocking=False) + assert sr.get('foo') == lock.local.token + assert sr.ttl('foo') == -1 + lock.release() + assert sr.get('foo') is None + + def test_competing_locks(self, sr): + lock1 = self.get_lock(sr, 'foo') + lock2 = self.get_lock(sr, 'foo') + assert lock1.acquire(blocking=False) + assert not lock2.acquire(blocking=False) + lock1.release() + assert lock2.acquire(blocking=False) + assert not lock1.acquire(blocking=False) + lock2.release() + + def test_timeout(self, sr): + lock = self.get_lock(sr, 'foo', timeout=10) + assert lock.acquire(blocking=False) + assert 8 < sr.ttl('foo') <= 10 + lock.release() + + def test_float_timeout(self, sr): + lock = self.get_lock(sr, 'foo', timeout=9.5) + assert lock.acquire(blocking=False) + assert 8 < sr.pttl('foo') <= 9500 + lock.release() + + def test_blocking_timeout(self, sr): + lock1 = self.get_lock(sr, 'foo') + assert lock1.acquire(blocking=False) + lock2 = self.get_lock(sr, 'foo', blocking_timeout=0.2) + start = time.time() + assert not lock2.acquire() + assert (time.time() - start) > 0.2 + lock1.release() + + def test_context_manager(self, sr): + # blocking_timeout prevents a deadlock if the lock can't be acquired + # for some reason + with self.get_lock(sr, 'foo', blocking_timeout=0.2) as lock: + assert sr.get('foo') == lock.local.token + assert sr.get('foo') is None + + def test_high_sleep_raises_error(self, sr): + "If sleep is higher than timeout, it should raise an error" + with pytest.raises(LockError): + self.get_lock(sr, 'foo', timeout=1, sleep=2) + + def test_releasing_unlocked_lock_raises_error(self, sr): + lock = self.get_lock(sr, 'foo') + with pytest.raises(LockError): + lock.release() + + def test_releasing_lock_no_longer_owned_raises_error(self, sr): + lock = self.get_lock(sr, 'foo') + lock.acquire(blocking=False) + # manually change the token + sr.set('foo', 'a') + with pytest.raises(LockError): + lock.release() + # even though we errored, the token is still cleared + assert lock.local.token is None + + def test_extend_lock(self, sr): + lock = self.get_lock(sr, 'foo', timeout=10) + assert lock.acquire(blocking=False) + assert 8000 < sr.pttl('foo') <= 10000 + assert lock.extend(10) + assert 16000 < sr.pttl('foo') <= 20000 + lock.release() + + def test_extend_lock_float(self, sr): + lock = self.get_lock(sr, 'foo', timeout=10.0) + assert lock.acquire(blocking=False) + assert 8000 < sr.pttl('foo') <= 10000 + assert lock.extend(10.0) + assert 16000 < sr.pttl('foo') <= 20000 + lock.release() + + def test_extending_unlocked_lock_raises_error(self, sr): + lock = self.get_lock(sr, 'foo', timeout=10) + with pytest.raises(LockError): + lock.extend(10) + + def test_extending_lock_with_no_timeout_raises_error(self, sr): + lock = self.get_lock(sr, 'foo') + assert lock.acquire(blocking=False) + with pytest.raises(LockError): + lock.extend(10) + lock.release() + + def test_extending_lock_no_longer_owned_raises_error(self, sr): + lock = self.get_lock(sr, 'foo') + assert lock.acquire(blocking=False) + sr.set('foo', 'a') + with pytest.raises(LockError): + lock.extend(10) + + +class TestLuaLock(TestLock): + lock_class = LuaLock + + +class TestLockClassSelection(object): + def test_lock_class_argument(self, sr): + lock = sr.lock('foo', lock_class=Lock) + assert type(lock) == Lock + lock = sr.lock('foo', lock_class=LuaLock) + assert type(lock) == LuaLock + + def test_cached_lualock_flag(self, sr): + try: + sr._use_lua_lock = True + lock = sr.lock('foo') + assert type(lock) == LuaLock + finally: + sr._use_lua_lock = None + + def test_cached_lock_flag(self, sr): + try: + sr._use_lua_lock = False + lock = sr.lock('foo') + assert type(lock) == Lock + finally: + sr._use_lua_lock = None + + def test_lua_compatible_server(self, sr, monkeypatch): + @classmethod + def mock_register(cls, redis): + return + monkeypatch.setattr(LuaLock, 'register_scripts', mock_register) + try: + lock = sr.lock('foo') + assert type(lock) == LuaLock + assert sr._use_lua_lock is True + finally: + sr._use_lua_lock = None + + def test_lua_unavailable(self, sr, monkeypatch): + @classmethod + def mock_register(cls, redis): + raise ResponseError() + monkeypatch.setattr(LuaLock, 'register_scripts', mock_register) + try: + lock = sr.lock('foo') + assert type(lock) == Lock + assert sr._use_lua_lock is False + finally: + sr._use_lua_lock = None diff --git a/awx/lib/site-packages/redis/tests/test_pipeline.py b/awx/lib/site-packages/redis/tests/test_pipeline.py new file mode 100644 index 0000000000..46fc994e4d --- /dev/null +++ b/awx/lib/site-packages/redis/tests/test_pipeline.py @@ -0,0 +1,226 @@ +from __future__ import with_statement +import pytest + +import redis +from redis._compat import b, u, unichr, unicode + + +class TestPipeline(object): + def test_pipeline(self, r): + with r.pipeline() as pipe: + pipe.set('a', 'a1').get('a').zadd('z', z1=1).zadd('z', z2=4) + pipe.zincrby('z', 'z1').zrange('z', 0, 5, withscores=True) + assert pipe.execute() == \ + [ + True, + b('a1'), + True, + True, + 2.0, + [(b('z1'), 2.0), (b('z2'), 4)], + ] + + def test_pipeline_length(self, r): + with r.pipeline() as pipe: + # Initially empty. + assert len(pipe) == 0 + assert not pipe + + # Fill 'er up! + pipe.set('a', 'a1').set('b', 'b1').set('c', 'c1') + assert len(pipe) == 3 + assert pipe + + # Execute calls reset(), so empty once again. + pipe.execute() + assert len(pipe) == 0 + assert not pipe + + def test_pipeline_no_transaction(self, r): + with r.pipeline(transaction=False) as pipe: + pipe.set('a', 'a1').set('b', 'b1').set('c', 'c1') + assert pipe.execute() == [True, True, True] + assert r['a'] == b('a1') + assert r['b'] == b('b1') + assert r['c'] == b('c1') + + def test_pipeline_no_transaction_watch(self, r): + r['a'] = 0 + + with r.pipeline(transaction=False) as pipe: + pipe.watch('a') + a = pipe.get('a') + + pipe.multi() + pipe.set('a', int(a) + 1) + assert pipe.execute() == [True] + + def test_pipeline_no_transaction_watch_failure(self, r): + r['a'] = 0 + + with r.pipeline(transaction=False) as pipe: + pipe.watch('a') + a = pipe.get('a') + + r['a'] = 'bad' + + pipe.multi() + pipe.set('a', int(a) + 1) + + with pytest.raises(redis.WatchError): + pipe.execute() + + assert r['a'] == b('bad') + + def test_exec_error_in_response(self, r): + """ + an invalid pipeline command at exec time adds the exception instance + to the list of returned values + """ + r['c'] = 'a' + with r.pipeline() as pipe: + pipe.set('a', 1).set('b', 2).lpush('c', 3).set('d', 4) + result = pipe.execute(raise_on_error=False) + + assert result[0] + assert r['a'] == b('1') + assert result[1] + assert r['b'] == b('2') + + # we can't lpush to a key that's a string value, so this should + # be a ResponseError exception + assert isinstance(result[2], redis.ResponseError) + assert r['c'] == b('a') + + # since this isn't a transaction, the other commands after the + # error are still executed + assert result[3] + assert r['d'] == b('4') + + # make sure the pipe was restored to a working state + assert pipe.set('z', 'zzz').execute() == [True] + assert r['z'] == b('zzz') + + def test_exec_error_raised(self, r): + r['c'] = 'a' + with r.pipeline() as pipe: + pipe.set('a', 1).set('b', 2).lpush('c', 3).set('d', 4) + with pytest.raises(redis.ResponseError) as ex: + pipe.execute() + assert unicode(ex.value).startswith('Command # 3 (LPUSH c 3) of ' + 'pipeline caused error: ') + + # make sure the pipe was restored to a working state + assert pipe.set('z', 'zzz').execute() == [True] + assert r['z'] == b('zzz') + + def test_parse_error_raised(self, r): + with r.pipeline() as pipe: + # the zrem is invalid because we don't pass any keys to it + pipe.set('a', 1).zrem('b').set('b', 2) + with pytest.raises(redis.ResponseError) as ex: + pipe.execute() + + assert unicode(ex.value).startswith('Command # 2 (ZREM b) of ' + 'pipeline caused error: ') + + # make sure the pipe was restored to a working state + assert pipe.set('z', 'zzz').execute() == [True] + assert r['z'] == b('zzz') + + def test_watch_succeed(self, r): + r['a'] = 1 + r['b'] = 2 + + with r.pipeline() as pipe: + pipe.watch('a', 'b') + assert pipe.watching + a_value = pipe.get('a') + b_value = pipe.get('b') + assert a_value == b('1') + assert b_value == b('2') + pipe.multi() + + pipe.set('c', 3) + assert pipe.execute() == [True] + assert not pipe.watching + + def test_watch_failure(self, r): + r['a'] = 1 + r['b'] = 2 + + with r.pipeline() as pipe: + pipe.watch('a', 'b') + r['b'] = 3 + pipe.multi() + pipe.get('a') + with pytest.raises(redis.WatchError): + pipe.execute() + + assert not pipe.watching + + def test_unwatch(self, r): + r['a'] = 1 + r['b'] = 2 + + with r.pipeline() as pipe: + pipe.watch('a', 'b') + r['b'] = 3 + pipe.unwatch() + assert not pipe.watching + pipe.get('a') + assert pipe.execute() == [b('1')] + + def test_transaction_callable(self, r): + r['a'] = 1 + r['b'] = 2 + has_run = [] + + def my_transaction(pipe): + a_value = pipe.get('a') + assert a_value in (b('1'), b('2')) + b_value = pipe.get('b') + assert b_value == b('2') + + # silly run-once code... incr's "a" so WatchError should be raised + # forcing this all to run again. this should incr "a" once to "2" + if not has_run: + r.incr('a') + has_run.append('it has') + + pipe.multi() + pipe.set('c', int(a_value) + int(b_value)) + + result = r.transaction(my_transaction, 'a', 'b') + assert result == [True] + assert r['c'] == b('4') + + def test_exec_error_in_no_transaction_pipeline(self, r): + r['a'] = 1 + with r.pipeline(transaction=False) as pipe: + pipe.llen('a') + pipe.expire('a', 100) + + with pytest.raises(redis.ResponseError) as ex: + pipe.execute() + + assert unicode(ex.value).startswith('Command # 1 (LLEN a) of ' + 'pipeline caused error: ') + + assert r['a'] == b('1') + + def test_exec_error_in_no_transaction_pipeline_unicode_command(self, r): + key = unichr(3456) + u('abcd') + unichr(3421) + r[key] = 1 + with r.pipeline(transaction=False) as pipe: + pipe.llen(key) + pipe.expire(key, 100) + + with pytest.raises(redis.ResponseError) as ex: + pipe.execute() + + expected = unicode('Command # 1 (LLEN %s) of pipeline caused ' + 'error: ') % key + assert unicode(ex.value).startswith(expected) + + assert r[key] == b('1') diff --git a/awx/lib/site-packages/redis/tests/test_pubsub.py b/awx/lib/site-packages/redis/tests/test_pubsub.py new file mode 100644 index 0000000000..5486b75a86 --- /dev/null +++ b/awx/lib/site-packages/redis/tests/test_pubsub.py @@ -0,0 +1,392 @@ +from __future__ import with_statement +import pytest +import time + +import redis +from redis.exceptions import ConnectionError +from redis._compat import basestring, u, unichr + +from .conftest import r as _redis_client + + +def wait_for_message(pubsub, timeout=0.1, ignore_subscribe_messages=False): + now = time.time() + timeout = now + timeout + while now < timeout: + message = pubsub.get_message( + ignore_subscribe_messages=ignore_subscribe_messages) + if message is not None: + return message + time.sleep(0.01) + now = time.time() + return None + + +def make_message(type, channel, data, pattern=None): + return { + 'type': type, + 'pattern': pattern and pattern.encode('utf-8') or None, + 'channel': channel.encode('utf-8'), + 'data': data.encode('utf-8') if isinstance(data, basestring) else data + } + + +def make_subscribe_test_data(pubsub, type): + if type == 'channel': + return { + 'p': pubsub, + 'sub_type': 'subscribe', + 'unsub_type': 'unsubscribe', + 'sub_func': pubsub.subscribe, + 'unsub_func': pubsub.unsubscribe, + 'keys': ['foo', 'bar', u('uni') + unichr(4456) + u('code')] + } + elif type == 'pattern': + return { + 'p': pubsub, + 'sub_type': 'psubscribe', + 'unsub_type': 'punsubscribe', + 'sub_func': pubsub.psubscribe, + 'unsub_func': pubsub.punsubscribe, + 'keys': ['f*', 'b*', u('uni') + unichr(4456) + u('*')] + } + assert False, 'invalid subscribe type: %s' % type + + +class TestPubSubSubscribeUnsubscribe(object): + + def _test_subscribe_unsubscribe(self, p, sub_type, unsub_type, sub_func, + unsub_func, keys): + for key in keys: + assert sub_func(key) is None + + # should be a message for each channel/pattern we just subscribed to + for i, key in enumerate(keys): + assert wait_for_message(p) == make_message(sub_type, key, i + 1) + + for key in keys: + assert unsub_func(key) is None + + # should be a message for each channel/pattern we just unsubscribed + # from + for i, key in enumerate(keys): + i = len(keys) - 1 - i + assert wait_for_message(p) == make_message(unsub_type, key, i) + + def test_channel_subscribe_unsubscribe(self, r): + kwargs = make_subscribe_test_data(r.pubsub(), 'channel') + self._test_subscribe_unsubscribe(**kwargs) + + def test_pattern_subscribe_unsubscribe(self, r): + kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') + self._test_subscribe_unsubscribe(**kwargs) + + def _test_resubscribe_on_reconnection(self, p, sub_type, unsub_type, + sub_func, unsub_func, keys): + + for key in keys: + assert sub_func(key) is None + + # should be a message for each channel/pattern we just subscribed to + for i, key in enumerate(keys): + assert wait_for_message(p) == make_message(sub_type, key, i + 1) + + # manually disconnect + p.connection.disconnect() + + # calling get_message again reconnects and resubscribes + # note, we may not re-subscribe to channels in exactly the same order + # so we have to do some extra checks to make sure we got them all + messages = [] + for i in range(len(keys)): + messages.append(wait_for_message(p)) + + unique_channels = set() + assert len(messages) == len(keys) + for i, message in enumerate(messages): + assert message['type'] == sub_type + assert message['data'] == i + 1 + assert isinstance(message['channel'], bytes) + channel = message['channel'].decode('utf-8') + unique_channels.add(channel) + + assert len(unique_channels) == len(keys) + for channel in unique_channels: + assert channel in keys + + def test_resubscribe_to_channels_on_reconnection(self, r): + kwargs = make_subscribe_test_data(r.pubsub(), 'channel') + self._test_resubscribe_on_reconnection(**kwargs) + + def test_resubscribe_to_patterns_on_reconnection(self, r): + kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') + self._test_resubscribe_on_reconnection(**kwargs) + + def _test_subscribed_property(self, p, sub_type, unsub_type, sub_func, + unsub_func, keys): + + assert p.subscribed is False + sub_func(keys[0]) + # we're now subscribed even though we haven't processed the + # reply from the server just yet + assert p.subscribed is True + assert wait_for_message(p) == make_message(sub_type, keys[0], 1) + # we're still subscribed + assert p.subscribed is True + + # unsubscribe from all channels + unsub_func() + # we're still technically subscribed until we process the + # response messages from the server + assert p.subscribed is True + assert wait_for_message(p) == make_message(unsub_type, keys[0], 0) + # now we're no longer subscribed as no more messages can be delivered + # to any channels we were listening to + assert p.subscribed is False + + # subscribing again flips the flag back + sub_func(keys[0]) + assert p.subscribed is True + assert wait_for_message(p) == make_message(sub_type, keys[0], 1) + + # unsubscribe again + unsub_func() + assert p.subscribed is True + # subscribe to another channel before reading the unsubscribe response + sub_func(keys[1]) + assert p.subscribed is True + # read the unsubscribe for key1 + assert wait_for_message(p) == make_message(unsub_type, keys[0], 0) + # we're still subscribed to key2, so subscribed should still be True + assert p.subscribed is True + # read the key2 subscribe message + assert wait_for_message(p) == make_message(sub_type, keys[1], 1) + unsub_func() + # haven't read the message yet, so we're still subscribed + assert p.subscribed is True + assert wait_for_message(p) == make_message(unsub_type, keys[1], 0) + # now we're finally unsubscribed + assert p.subscribed is False + + def test_subscribe_property_with_channels(self, r): + kwargs = make_subscribe_test_data(r.pubsub(), 'channel') + self._test_subscribed_property(**kwargs) + + def test_subscribe_property_with_patterns(self, r): + kwargs = make_subscribe_test_data(r.pubsub(), 'pattern') + self._test_subscribed_property(**kwargs) + + def test_ignore_all_subscribe_messages(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + + checks = ( + (p.subscribe, 'foo'), + (p.unsubscribe, 'foo'), + (p.psubscribe, 'f*'), + (p.punsubscribe, 'f*'), + ) + + assert p.subscribed is False + for func, channel in checks: + assert func(channel) is None + assert p.subscribed is True + assert wait_for_message(p) is None + assert p.subscribed is False + + def test_ignore_individual_subscribe_messages(self, r): + p = r.pubsub() + + checks = ( + (p.subscribe, 'foo'), + (p.unsubscribe, 'foo'), + (p.psubscribe, 'f*'), + (p.punsubscribe, 'f*'), + ) + + assert p.subscribed is False + for func, channel in checks: + assert func(channel) is None + assert p.subscribed is True + message = wait_for_message(p, ignore_subscribe_messages=True) + assert message is None + assert p.subscribed is False + + +class TestPubSubMessages(object): + def setup_method(self, method): + self.message = None + + def message_handler(self, message): + self.message = message + + def test_published_message_to_channel(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + p.subscribe('foo') + assert r.publish('foo', 'test message') == 1 + + message = wait_for_message(p) + assert isinstance(message, dict) + assert message == make_message('message', 'foo', 'test message') + + def test_published_message_to_pattern(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + p.subscribe('foo') + p.psubscribe('f*') + # 1 to pattern, 1 to channel + assert r.publish('foo', 'test message') == 2 + + message1 = wait_for_message(p) + message2 = wait_for_message(p) + assert isinstance(message1, dict) + assert isinstance(message2, dict) + + expected = [ + make_message('message', 'foo', 'test message'), + make_message('pmessage', 'foo', 'test message', pattern='f*') + ] + + assert message1 in expected + assert message2 in expected + assert message1 != message2 + + def test_channel_message_handler(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + p.subscribe(foo=self.message_handler) + assert r.publish('foo', 'test message') == 1 + assert wait_for_message(p) is None + assert self.message == make_message('message', 'foo', 'test message') + + def test_pattern_message_handler(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + p.psubscribe(**{'f*': self.message_handler}) + assert r.publish('foo', 'test message') == 1 + assert wait_for_message(p) is None + assert self.message == make_message('pmessage', 'foo', 'test message', + pattern='f*') + + def test_unicode_channel_message_handler(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + channel = u('uni') + unichr(4456) + u('code') + channels = {channel: self.message_handler} + p.subscribe(**channels) + assert r.publish(channel, 'test message') == 1 + assert wait_for_message(p) is None + assert self.message == make_message('message', channel, 'test message') + + def test_unicode_pattern_message_handler(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + pattern = u('uni') + unichr(4456) + u('*') + channel = u('uni') + unichr(4456) + u('code') + p.psubscribe(**{pattern: self.message_handler}) + assert r.publish(channel, 'test message') == 1 + assert wait_for_message(p) is None + assert self.message == make_message('pmessage', channel, + 'test message', pattern=pattern) + + +class TestPubSubAutoDecoding(object): + "These tests only validate that we get unicode values back" + + channel = u('uni') + unichr(4456) + u('code') + pattern = u('uni') + unichr(4456) + u('*') + data = u('abc') + unichr(4458) + u('123') + + def make_message(self, type, channel, data, pattern=None): + return { + 'type': type, + 'channel': channel, + 'pattern': pattern, + 'data': data + } + + def setup_method(self, method): + self.message = None + + def message_handler(self, message): + self.message = message + + @pytest.fixture() + def r(self, request): + return _redis_client(request=request, decode_responses=True) + + def test_channel_subscribe_unsubscribe(self, r): + p = r.pubsub() + p.subscribe(self.channel) + assert wait_for_message(p) == self.make_message('subscribe', + self.channel, 1) + + p.unsubscribe(self.channel) + assert wait_for_message(p) == self.make_message('unsubscribe', + self.channel, 0) + + def test_pattern_subscribe_unsubscribe(self, r): + p = r.pubsub() + p.psubscribe(self.pattern) + assert wait_for_message(p) == self.make_message('psubscribe', + self.pattern, 1) + + p.punsubscribe(self.pattern) + assert wait_for_message(p) == self.make_message('punsubscribe', + self.pattern, 0) + + def test_channel_publish(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + p.subscribe(self.channel) + r.publish(self.channel, self.data) + assert wait_for_message(p) == self.make_message('message', + self.channel, + self.data) + + def test_pattern_publish(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + p.psubscribe(self.pattern) + r.publish(self.channel, self.data) + assert wait_for_message(p) == self.make_message('pmessage', + self.channel, + self.data, + pattern=self.pattern) + + def test_channel_message_handler(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + p.subscribe(**{self.channel: self.message_handler}) + r.publish(self.channel, self.data) + assert wait_for_message(p) is None + assert self.message == self.make_message('message', self.channel, + self.data) + + # test that we reconnected to the correct channel + p.connection.disconnect() + assert wait_for_message(p) is None # should reconnect + new_data = self.data + u('new data') + r.publish(self.channel, new_data) + assert wait_for_message(p) is None + assert self.message == self.make_message('message', self.channel, + new_data) + + def test_pattern_message_handler(self, r): + p = r.pubsub(ignore_subscribe_messages=True) + p.psubscribe(**{self.pattern: self.message_handler}) + r.publish(self.channel, self.data) + assert wait_for_message(p) is None + assert self.message == self.make_message('pmessage', self.channel, + self.data, + pattern=self.pattern) + + # test that we reconnected to the correct pattern + p.connection.disconnect() + assert wait_for_message(p) is None # should reconnect + new_data = self.data + u('new data') + r.publish(self.channel, new_data) + assert wait_for_message(p) is None + assert self.message == self.make_message('pmessage', self.channel, + new_data, + pattern=self.pattern) + + +class TestPubSubRedisDown(object): + + def test_channel_subscribe(self, r): + r = redis.Redis(host='localhost', port=6390) + p = r.pubsub() + with pytest.raises(ConnectionError): + p.subscribe('foo') diff --git a/awx/lib/site-packages/redis/tests/test_scripting.py b/awx/lib/site-packages/redis/tests/test_scripting.py new file mode 100644 index 0000000000..4849c81b7c --- /dev/null +++ b/awx/lib/site-packages/redis/tests/test_scripting.py @@ -0,0 +1,82 @@ +from __future__ import with_statement +import pytest + +from redis import exceptions +from redis._compat import b + + +multiply_script = """ +local value = redis.call('GET', KEYS[1]) +value = tonumber(value) +return value * ARGV[1]""" + + +class TestScripting(object): + @pytest.fixture(autouse=True) + def reset_scripts(self, r): + r.script_flush() + + def test_eval(self, r): + r.set('a', 2) + # 2 * 3 == 6 + assert r.eval(multiply_script, 1, 'a', 3) == 6 + + def test_evalsha(self, r): + r.set('a', 2) + sha = r.script_load(multiply_script) + # 2 * 3 == 6 + assert r.evalsha(sha, 1, 'a', 3) == 6 + + def test_evalsha_script_not_loaded(self, r): + r.set('a', 2) + sha = r.script_load(multiply_script) + # remove the script from Redis's cache + r.script_flush() + with pytest.raises(exceptions.NoScriptError): + r.evalsha(sha, 1, 'a', 3) + + def test_script_loading(self, r): + # get the sha, then clear the cache + sha = r.script_load(multiply_script) + r.script_flush() + assert r.script_exists(sha) == [False] + r.script_load(multiply_script) + assert r.script_exists(sha) == [True] + + def test_script_object(self, r): + r.set('a', 2) + multiply = r.register_script(multiply_script) + assert not multiply.sha + # test evalsha fail -> script load + retry + assert multiply(keys=['a'], args=[3]) == 6 + assert multiply.sha + assert r.script_exists(multiply.sha) == [True] + # test first evalsha + assert multiply(keys=['a'], args=[3]) == 6 + + def test_script_object_in_pipeline(self, r): + multiply = r.register_script(multiply_script) + assert not multiply.sha + pipe = r.pipeline() + pipe.set('a', 2) + pipe.get('a') + multiply(keys=['a'], args=[3], client=pipe) + # even though the pipeline wasn't executed yet, we made sure the + # script was loaded and got a valid sha + assert multiply.sha + assert r.script_exists(multiply.sha) == [True] + # [SET worked, GET 'a', result of multiple script] + assert pipe.execute() == [True, b('2'), 6] + + # purge the script from redis's cache and re-run the pipeline + # the multiply script object knows it's sha, so it shouldn't get + # reloaded until pipe.execute() + r.script_flush() + pipe = r.pipeline() + pipe.set('a', 2) + pipe.get('a') + assert multiply.sha + multiply(keys=['a'], args=[3], client=pipe) + assert r.script_exists(multiply.sha) == [False] + # [SET worked, GET 'a', result of multiple script] + assert pipe.execute() == [True, b('2'), 6] diff --git a/awx/lib/site-packages/redis/tests/test_sentinel.py b/awx/lib/site-packages/redis/tests/test_sentinel.py new file mode 100644 index 0000000000..0a6e98b273 --- /dev/null +++ b/awx/lib/site-packages/redis/tests/test_sentinel.py @@ -0,0 +1,173 @@ +from __future__ import with_statement +import pytest + +from redis import exceptions +from redis.sentinel import (Sentinel, SentinelConnectionPool, + MasterNotFoundError, SlaveNotFoundError) +from redis._compat import next +import redis.sentinel + + +class SentinelTestClient(object): + def __init__(self, cluster, id): + self.cluster = cluster + self.id = id + + def sentinel_masters(self): + self.cluster.connection_error_if_down(self) + return {self.cluster.service_name: self.cluster.master} + + def sentinel_slaves(self, master_name): + self.cluster.connection_error_if_down(self) + if master_name != self.cluster.service_name: + return [] + return self.cluster.slaves + + +class SentinelTestCluster(object): + def __init__(self, service_name='mymaster', ip='127.0.0.1', port=6379): + self.clients = {} + self.master = { + 'ip': ip, + 'port': port, + 'is_master': True, + 'is_sdown': False, + 'is_odown': False, + 'num-other-sentinels': 0, + } + self.service_name = service_name + self.slaves = [] + self.nodes_down = set() + + def connection_error_if_down(self, node): + if node.id in self.nodes_down: + raise exceptions.ConnectionError + + def client(self, host, port, **kwargs): + return SentinelTestClient(self, (host, port)) + + +@pytest.fixture() +def cluster(request): + def teardown(): + redis.sentinel.StrictRedis = saved_StrictRedis + cluster = SentinelTestCluster() + saved_StrictRedis = redis.sentinel.StrictRedis + redis.sentinel.StrictRedis = cluster.client + request.addfinalizer(teardown) + return cluster + + +@pytest.fixture() +def sentinel(request, cluster): + return Sentinel([('foo', 26379), ('bar', 26379)]) + + +def test_discover_master(sentinel): + address = sentinel.discover_master('mymaster') + assert address == ('127.0.0.1', 6379) + + +def test_discover_master_error(sentinel): + with pytest.raises(MasterNotFoundError): + sentinel.discover_master('xxx') + + +def test_discover_master_sentinel_down(cluster, sentinel): + # Put first sentinel 'foo' down + cluster.nodes_down.add(('foo', 26379)) + address = sentinel.discover_master('mymaster') + assert address == ('127.0.0.1', 6379) + # 'bar' is now first sentinel + assert sentinel.sentinels[0].id == ('bar', 26379) + + +def test_master_min_other_sentinels(cluster): + sentinel = Sentinel([('foo', 26379)], min_other_sentinels=1) + # min_other_sentinels + with pytest.raises(MasterNotFoundError): + sentinel.discover_master('mymaster') + cluster.master['num-other-sentinels'] = 2 + address = sentinel.discover_master('mymaster') + assert address == ('127.0.0.1', 6379) + + +def test_master_odown(cluster, sentinel): + cluster.master['is_odown'] = True + with pytest.raises(MasterNotFoundError): + sentinel.discover_master('mymaster') + + +def test_master_sdown(cluster, sentinel): + cluster.master['is_sdown'] = True + with pytest.raises(MasterNotFoundError): + sentinel.discover_master('mymaster') + + +def test_discover_slaves(cluster, sentinel): + assert sentinel.discover_slaves('mymaster') == [] + + cluster.slaves = [ + {'ip': 'slave0', 'port': 1234, 'is_odown': False, 'is_sdown': False}, + {'ip': 'slave1', 'port': 1234, 'is_odown': False, 'is_sdown': False}, + ] + assert sentinel.discover_slaves('mymaster') == [ + ('slave0', 1234), ('slave1', 1234)] + + # slave0 -> ODOWN + cluster.slaves[0]['is_odown'] = True + assert sentinel.discover_slaves('mymaster') == [ + ('slave1', 1234)] + + # slave1 -> SDOWN + cluster.slaves[1]['is_sdown'] = True + assert sentinel.discover_slaves('mymaster') == [] + + cluster.slaves[0]['is_odown'] = False + cluster.slaves[1]['is_sdown'] = False + + # node0 -> DOWN + cluster.nodes_down.add(('foo', 26379)) + assert sentinel.discover_slaves('mymaster') == [ + ('slave0', 1234), ('slave1', 1234)] + + +def test_master_for(cluster, sentinel): + master = sentinel.master_for('mymaster', db=9) + assert master.ping() + assert master.connection_pool.master_address == ('127.0.0.1', 6379) + + # Use internal connection check + master = sentinel.master_for('mymaster', db=9, check_connection=True) + assert master.ping() + + +def test_slave_for(cluster, sentinel): + cluster.slaves = [ + {'ip': '127.0.0.1', 'port': 6379, + 'is_odown': False, 'is_sdown': False}, + ] + slave = sentinel.slave_for('mymaster', db=9) + assert slave.ping() + + +def test_slave_for_slave_not_found_error(cluster, sentinel): + cluster.master['is_odown'] = True + slave = sentinel.slave_for('mymaster', db=9) + with pytest.raises(SlaveNotFoundError): + slave.ping() + + +def test_slave_round_robin(cluster, sentinel): + cluster.slaves = [ + {'ip': 'slave0', 'port': 6379, 'is_odown': False, 'is_sdown': False}, + {'ip': 'slave1', 'port': 6379, 'is_odown': False, 'is_sdown': False}, + ] + pool = SentinelConnectionPool('mymaster', sentinel) + rotator = pool.rotate_slaves() + assert next(rotator) in (('slave0', 6379), ('slave1', 6379)) + assert next(rotator) in (('slave0', 6379), ('slave1', 6379)) + # Fallback to master + assert next(rotator) == ('127.0.0.1', 6379) + with pytest.raises(SlaveNotFoundError): + next(rotator) diff --git a/requirements/dev.txt b/requirements/dev.txt index 052555f80a..5f92477cab 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -22,6 +22,7 @@ Django>=1.6.7,<1.7 #pexpect #pyrax #python-dateutil + #redis #requests #South>=0.8,<2.0 diff --git a/requirements/dev_local.txt b/requirements/dev_local.txt index 84bb96111b..315c1e3869 100644 --- a/requirements/dev_local.txt +++ b/requirements/dev_local.txt @@ -72,6 +72,7 @@ Django-1.6.7.tar.gz #Markdown-2.4.tar.gz #pexpect-3.1.tar.gz #pyrax-1.7.2.tar.gz + #redis-2.10.3.tar.gz #South-0.8.4.tar.gz # Dev-only packages: diff --git a/requirements/prod.txt b/requirements/prod.txt index 34cdf96ccd..660f1abf53 100644 --- a/requirements/prod.txt +++ b/requirements/prod.txt @@ -20,6 +20,7 @@ Django>=1.4 #pexpect #pyrax #python-dateutil + #redis #requests #South>=0.8,<2.0 diff --git a/requirements/prod_local.txt b/requirements/prod_local.txt index 428b1c51b8..c387f53c0f 100644 --- a/requirements/prod_local.txt +++ b/requirements/prod_local.txt @@ -70,6 +70,7 @@ Django-1.5.5.tar.gz #Markdown-2.4.tar.gz #pexpect-3.1.tar.gz #pyrax-1.7.2.tar.gz + #redis-2.10.3.tar.gz #South-0.8.4.tar.gz # You may also need to install the following extra packages using the OS diff --git a/requirements/redis-2.10.3.tar.gz b/requirements/redis-2.10.3.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..12f957fddaa7861e1669fd4a9e0eb04d9422625f GIT binary patch literal 86532 zcmV(wKnF&{QHFlgo#KNJv761egG6Salu_ zdjLREvJuf;eZo~4l1N~WYp=cLKZ}NOzP{DoeA<4t@y)OE?Up!@J*Nu?-xA>k6m3YnIuj`fBqNw zm%01D|91D}O{e>7?ElN>&*$v_%crkiy)gU#;^h|X|8scoo8am1`~R!|dlK;8Hv;(g zBzPTviiSZLT+Xh8yLfsNOmCv#@_rhHSr*<0`D`*tvuO|~(<~j%2Jmd0a0^@Qpm!4{ zw>h;G1b?RF3Mjyl2K?*4Z_?4BQVgTs^k)8n(<9)8MHzHR0@N%K6u z97RDE4x(U?Cc}6dr-}4-cM~PScs82G6L@5LlSSb$5AvHZi)eB3a2y3k>EISt0%#c93bOxixZ>6l-OkS03V04O_{gn16%)6*kwu+vnU3zHP) zlj&R2G{~Y+6y{M7Pg~S?5=HbmY*lz^7Q-KNTUtAhuCT3?)@Yax;QK62u5%a|eMBoZ z7{RcD@Df&uKI8`b)S!+kyPl1s1W67kWgOzpvq&fd-3VMv#>t-)mkq&AG;{Xqy#wu3WR9_Uuuj$+=QH0Z)w z4yNaLX4?IJoW#?9KY(*&r;bD80p6wAEga`5eWs%vCAW{19ddbO*Es z#DP0I%bCXg^^iu&tD@^y*d92blW-bd#-n(8Z#m1^{qzQQ>=~ahI!aOY5hgeorAVux zA@d}O2JXHy1KB(5!9mB?$1o`65mnsbmuNbTukPDbJP!K~r_*Q*V^6Meaq|c^XasZ@ zAz94u)T9BeHBwg?oW+x96ep44Yn#>W0*Q2iaN)_3GY4ud)T!cmM6Q>@Axf^{1D*6Z zgdNC;R$zCc_#++e^Um(U>B-Sg0j)UiGK@=F*Qez#bFgt~xji%Fr!wRNdKtzzn-*BI z?9NvEMLT#mxz562)DC`1XHDqxG6H&?Ok!-%Quy1L7iofYE`viJH_6gTl#T9zy~aRK z`(`ORL^j&QK2XUhnnw5_@Vo(Y#RjOb6}3c>~^NzeusOM<-i@MH`>9I?*8tHZ4vi0i(oAix^Cdmtb#aze4T0L*;+7| zWjW9g9Us_sWLt(7Lpkeo9Y8}c88~rMX6!H~80a{@zM10g)Fp84yCKZ?D|l#PFVs@((*f^J#N?#oxu~(=-c#1+YiqhhPdc0p|&hK@J4i$2MBQjq`i%{BG&_ zv-y+*!|6>N&EQr|uHiJd=So`LqthRbPT!a*nHJ7LfPxK6g15vC{T4>ir+AP8 z6PVn@c-NZ+`S~10slA_$IzP7HkIu3CHxlE|@EZ}j%ml{(>p|QmnmUJHnL?0W{sO-- znd!)3SbVoY-zpHmT7d`=P5UG=3y9`AlKII~t1=u+@{_^>43pjs_{beD_ zD%jq=4r>gY5cxQ;1rlXQRK)w|Dw+;%a76B{(^8@Z-kl&%%%Lkw|0uW0MT9qhDDSTR zW%GD%O-~oKq-%W0{qxW#aG?6KjkHmC>-F)|XlOpa0{&_4ESgFHY9WR}3Y5l_Oo3t^JuJQ~rxiBp6dgT-68 zS>QHxZ)78yTX|kbl=6{RmM$O5-9f#5j2uCaB>*!`kg%r_-G{g^hCv3;7M#z{N$>Ea za}*q(o*ed0&s+8rI9;dTclcPD54)Y75t9lh`VEN1!SO7c+!)E{@Z|Mrkj^F~Zo|BW zX_SKy3_XuVu;?V}-A4D@v~4Q^3*7c5ztqi#%O7@|m4PsTH-ZFK4aC}hKMU{r{T01Y zsY5u8bau0QGq$s6n5lSj1iD^&@5#y^L*O+D$jU>|YETlm$NidyF{(}^Poks{$YVeT zALA@dNKLph^ud;m393*ZqY=s=v^J6Vcs8RZ`GVx_ZJEFG5lH1NN3wrr_7<9YJwtgy zm@$#ETmaK32c9sS#t|UIQ zPzmD^5*tZgQDQbd!vz}I9cFRz&MLLAMR;(hF3H2U>By*nIq3~B>v!izs=m1zhH4eS zAF<^bE0VB2B2qbBgvCw_zY=W|SCO@fP);|!iL(vqhz&+DYkT~{a@8L==V%m9;v|(N z_`z$n&|Z$ra@j&8xDw_zsanwq>8(RH6u_mVYMk(pVh1RMNppXjMcKWuXT8eu_FN4j zPsdT9iWW$ZL<_U&)%xbkHPT^0TAi^r?(i2S*<85-oloY3BO=%lNQUWbGz_kzDYtLA z7;`09g>LuAxtLj5XL|#(i<~d>Xn-W(DL6M-I=j{h=hIn&%n+Vs20V^J)UlyuScaLX zEzp&_YR2@Gk48JaY^Dc?*Eyik(7w{3@Ump#4_~2kk3<487F+_6x2b_)5103}i~3gO zT+=c^H}jHO+7~m1?B*g(6qrDQOslZeD#}QkeX$(rbn$$pXmJ~nKtnP&e0CScc!off zRRaN#F=;7$oO=6b?&vS0D`Y#+Hr+@@bWdY*kF?W(ay1 ziF2gUvS@u9f%;+$7a=peQIyc0>Fkhv?7hrO5FnopJu?EY5l-eiXmvdt;q|R|rn8G` ztdO?~m4;%Zct2l%K9t^|G?S zPsqE1H|cZ=dwf}>z6(%kvim+zvTV(Tp*%e8vOqZzpNvBHkWD|vR)>axWH>_G)t<3k z0t-c94di_!nAgC2_#U9kaS-ah$%!^*3pN)38FN=;Ex<*@uFa7#1M#InG?z*<6_c}r zLDFcsz0;WJ4Hh(rdEzSQzQri#(AmW6jMy>L?}ZGAR^(!dS;JUBbGQpY+d{bnMFPHQ z!r*8YS_7$7o$)G~q4r@+W+JHY*^AMvG2zSL3Pl^V=3!d_8Z0JAmB>5Pu+3KLD>#Cv zUJK7d;&B_zgFgpb?UyV$;RTJ>V|Xn71P^?~n?#St>~iYZ(DQgG_AMG`IvTRwc|f-g zEJTE6M&}M~8QT|0+7aAj!&a~YdN_!hD>%3CBk&}>>O3R1y_UoRuE+f@VS_2RLL&*s zNF*eyAtAv>_iTN|FR4{n0&{Q-HFicZa*bOM{cfhweL@txP#mG9q_Y!W&V-|(`Rz}} zV0d`;@nzA+QPn1>lE~AcX=y;scV~_gChDaM!KC4l0AwsdNHg}DWLe>ijCCsYspc?) zIfQVT1>eW%R4KXh39fpU)RF)%gL|uiPT^k5L%wIMF%wn7JUD)L)H~eh{Mgy|O_EGs zX5O&C?)sWuiNVMvG=ulhc*UaKDhbvo|I!_BmuAB?(Pc+?=g}pIivOimTk&|2ec8Dx zjPx}F`mxHJ+%Qum1XzseFJW*T-iENyI_^Kn%X`rjRYSCAc?zBI6&%)Fv9loLqCqY= zPcNg)N`}n+K!(f1YbAHbWX>zC(x-GOJ|kwFb zZ4*smpqBEFxBP=lJ)W)1wDnimhrvyl25)ED)_Igq;5bCGx7SgEcRn5x>!@isN38rB*l}V^Ch}L$UXOI24hu(p6vbD!1%bdA7mBWx-6RQvzk&oixJ^HX^OY7| zwqreZ+I zNz#|;vrKUl;SH|r;gZamF#M}g8cuUD*R!EMM%~mWRi6nGjlOEX^yp{*^yKy7o8Y|D z?ex06-Cl`~s-;nvj*9UGuiDQ<77a=M0R;~ob42$n+o;{x^}{L|nvVCwY=V>s^E->t zMZ5kQF0MfuyM}1sr$}yk-azCMgAORpxZEnQhIbKZ=ac(3Js{inu<5+u=}a;@ru1OJ zF-B^uF+!8Cgbwh0+3)Cd_u%m44PALf5r1!%-yfwRvPo_|`hl8qbMr#drz}6sNPC#Ao!olsH(Cn3y78sDrZu zI{#bvFJDpTo#RSJ7M;@B_!8w^xHw2N8%8L%4;>4ZIC*}V!EU4GutB(p88#s@ciHFr{PPrs zc93Ri{@K{uJ3ZDXXoFbEN5{+={O{yPT9F@rG`1@;lc=ZgWqXqXdZOG{opaWZlq-w= zC#)L|hCYSd#cC?T>dj)3-M}N?KRbQ03>u~AM}XbEiHD)C%G@xez4Novv!ko)Jo~vqeX_XT!VZ}KBz)#tUf!Qy`$Lx<$^unQ&B#c1;_CXYC+yav<+2* zBr=IzMx+Y5OD$VQR7z1yVL|#aGc&Ji_+mqD@mRAFkui9%!6DaPBd1 zxM)oNFNd+R2<E_vXCw!}(#avwQj)SD%Cxni?wD zxf}NKtiZ)iLb`B&2n)X3{i8#W#)3DU-pTGU_aYy44^M8Yh!?<*%}ajC+*-C&S&lK^zj*s@Rmz9i34Q-Vo?EJejr z0_#Xl!96B1N7;6Pbap{qKTT#?@a{g$=I`BMkDSxQkjMpgK^NQwE%XI&u z>u_GLr#CIpy%Hn`f7m3=nC4Uyeq%hxuHz!kUFXgKrC~POXX)KKs|v>9S=gY%G@1S9 z|9}bq=l}S>|FRSPr?ZSqqr*s^ zep(D?JU!!UM7dJQmO_@HUi$fSTsF(}4((;&&|nVh`CWoxRP%!lj^6Ex7DdsYRfF1?uGtmh0>WPWfaKp>+~&Pm~={fEh(G z`quv%U0r3-eefOV1=G)Nkki4(I19c{N4Mg@$WZPF&IUr4xGp_oDqFj(7uslWF#794 zfDT)^NVYRpP0nwoBkF0nE#nqaL9=0X=Xj2oku&v#D_jyuPDopai16gnIaxluv>SzQnX7T@{t z?C`v^AwPF}8(zetBRQk}ihevjCi1WNO4EaKJ)(hi$GCr1<ym`7vZ7gbT1`*s7PmwZ*+Klk`t$qiD^(0 zWAelgSVb}(ir`uMlW~&B_%}WA!hPZo!Crjr*z9*l(Wl@|_8GgtWM>`S(1_ULn*oBPL3)-{r_G7_FyNx$ zd={D)4s`w%4bmEg*0z+k{|c9?y=wQvY!)V=zD~DMj#&?|8!N*Ig$_f_`HB$+iP_U2 zWn#j;91WQnlF{lgT!`BnqB|}%L9UKxkxxOB78FxnT%E?1F3|RR8%MzbS*z0`*TB>B zyuJC%^?$grXn0{x!`nzgdu%8V9|8I9%t`0akPKQ|*QSg53OQZTRv9KUqzLWiI2=Yc7*vf+ zBs)(*3#JH8kYr@P=nUo(ddl)BaBD~C{&s8gm02t0t$VYpE23gdu^@IV;Y1bOof_oe zqu>YNq@e7q%Yhi^y$tsPF!84^*2=QY>E2&E`#mC{gU;T&H~N8ZhtW6s{b4lf_eB90 zt!m^Xln_6ua4s!iIoYuLK#gqY6$Tf6tHT5?kC)A(r; zTXkCnM2^*w)Gfa8gqxkDi5bYxy9WmxU5URq?hZAW3cFuyH?=ucj&b8{I)+DJrXZ$R zw?0RKQ_3*rjk@AuA#JfG=5_4T4D$$WyRq@$h96nSVF5b5g7X=R`wM60mr9>+) zcT?H!pUj4H8^UWjY;KfZ#{-;-y%Qb5krh#tL&O&7MMRPpAoGpPaUqTa*dE-dY!6_S z8T*Y(&I7}#KH4-zk*g@Cn)17@XE;|NDM|xWn*(NP(RMqY71sspW`=9p3q@RUq6hh3 zOAPa)C~pT*0(01lM^SQ{>Wco+=IC&86mE5&T%a7#g;!d?e?=(=aGm%Wn$^4XYH?7x zYs{oTax}qSi_9pQ-$SAY-O1n1F3CNSLVXLaxCu*GL|e8NzF^;#*2F ziPWIk+}l$;W6OxdU}%V{nGA}k_Np}Lr}M}oD$z%1?Vf{3P3Pjg@@@N)%TCPBVL{eM zfJr6%euM_)1okuMgfuZ;UZy|@8F0d>>n~+l+L&al*Yu8DklYoTb;-=BDfTmawvj&I z2!T9D`pAr;yM!(@SmS%3_7E7qO+(-eBuY<3HydU2ov{176_9h4BfZY?*TQ<9qb>eF z{?8GnsoI#q)&29s-r@f4(VCKBVr^=Ukf}+-w3+DuZd0-!k<)c%Pn?a}bIA}rG@z1< zBehUTHkf8c2vNZ^z!FUyAPClx1d|R*85d{I~Ijq2x*_}PsxnWzsdG3}#oMPnmkGRufhC~(&Xov69UH2AJ~cZ~UuHCIYN z?K!qlw4kC1t6gB~BsRa>J38#XRT{RI8Iin**!^riaX@SO>)D6yyS*+@!Cr^>H8L3r z-Z5>f+>8P47%8qfX=7Zg97Eyk1nH86XON4P`3ql`lqMGZu-n_`P=I$Q^g9d_77dRT ze+48c-xU49Wzj|aFXT-pTkz1@X0N*9?r-O71^wT9K<0b@!PTW09>qa%hC9{l8ZB=|P zYMz3|kMbC{s>fi%shvkTFNY-3sAC% z(o{M;NqXA1CUTO z5+3O@i$L2#pB%Cf=G5k9meLKtMA0Ke8Lr77wnkUrJK<38e3PvL=PB5I*Ly2@xks)o zAV?FtQVTQPiL-@z4T9QBey*s^K2vd*isX!o=BVC!L@32w{RiyDxJ#u)v=zh?K8>p1 zY@5IzZ{syE+ArtPwrUaatfKAD4F)L`0*P(mBG;^cBR4;=wb@7_OpwA_oKJ%>2X^79 zPs~(t1=505=O^d{MNC+e0Wn?jL}6PLE`c@@!@4LTB%_-*iHfO!7d&uI>+@Y#zMZcq zHZI^F;^#tNT9~Lk115ayIev=SvYNt^a;Xh0y)ef7G`v(26lrzNyaArQvz+D_8!yvE zF?rFMrJSVdnfme5>ZAZQ9CPwSJP>+p(|A=t^**#R7^=*p^sYLt9!MrtD%p2sgIdHDac!?{_9m+any41D zAeXQ`NV&dy;!RZ5R8cfIwrc(AMm2 zM1lBn%#kIy%AclrNEDBAg%zYc&(T^;v2aqDOcqd7VD9$tah)X{KY3al8sm0?YXk@y<8 z+M8IHXJqluBMfes7LtSwqeAvZHGx7p=|Dn4GUsZ3e;q^k;vil%apDYiCJXW>R( zp4>@_@gVmeEvas#3RkhBg{Pg!{W^^D*lI<_H0SsbX&^S9-WHm3X2$w2Tgw3VVs*Wc67c;_M_A>vYZ8>rEju+H@4kj$>v@*?a43+W9VA9m zp?OARBvXMzP4se-{z^h;3xil`nf~<3iE4ISJ1A;Zp(~+-kWkO;tt1n$ROl7^LW0r43HoSYjsm#sv{6Qh)lurv5vZ zJ7uIrD3RdzGP;#!THMg1spD zB?O%^*u_eM;_257i<6;A%_CdUs53!{?7%9T zC8_+@l#;H}+}vuM73&9ocvfx9D@@$NeCvKmKE9LgA`w4v#^dl4O&AvjSfI4#u!NjM z7=8TmUMlI)S1qLZ8wuW|b4w-vl%ZJ`B(XnkGV}C5{*TEGMUKPABXc5P;eE$5#V1f` zlN>~jfwa;hY1lUGg8XHe8h{)3$)T35_OpL}O!t(Q>D9fPrkMr~YJm%3z{nTaUh79| zQ}U)=1INeWHISpgVWKRkGD4h7?%psW^GNIzG-ETPm(XTPA+!I%mJfvEI=GpkerhG^ zy)4B78PK~h5Up5@p=!o~_1vTAb}`l2x$k*eD%8Q7Eh=qGhv`PriHLNOG)V-&#JaL; z5WSOB&a;v|o=&hx&S!NMI>ja9&oSpc#yJSfFx0LkKs4U~51rlz*J-MQ25Fz&P*}18 zE8}HMMO6II`UrJAZNHj8A#Rx+j_D9uri4!YUfe&&x#dwRzPUF?-#!} zi$pHZn4+ssQs5Xjvp6@~Cr<;-s3gK{HaF(12{XCW5SN>=GbmPmqDUm*hlx#()=0S< z8!}~VR#rtr-xtoCuPVpsZ|zn4j-V?m!>J?E3Ta~~?94Gb*y31oOHi5mUwM3;h&Gs( zx_6zG#Y9uJ9~AEkCnotm*aO*fSbcDwKqlmAmEBL2I!}@Q% z>GbvvdmCN)QSrS^Zmt?k@BFB9Qt`zZu0gk|y{-fkH;VWlhbP@0C{({K{>Rg&o6nwq zv$?hP^u?>K7cXBvBm9r8r@!NW{1*QjXKbt>6^zS~pl~vqv@rsXk71+123+DwlCMG8 zVZ;02c(6aq;O8pv^en#p*XwZ@k1)AztWCxy*-J=0oEtpIlr#rHo8y?6VqCRU5bbwT`}xBY4{hx~H#uKkS}&g2Qfbc7FQ(;X&sh zSlR8u-z%-)hr{06(|0{Ic!5mP`zZiHb@$|_;Jd?<1K5bpk7wtdZZ|kR4-Svdjt)Bq zEzI9MdPf$@J$Uit6!RUA4|~umCi+8<($-<8+rZ|IJLd@b)Z5)VJUZam@%S3S4{tm48T7ad|G(b@Sr%sk;w4CK`@I&-^t@-@{o%0NX$8CIhg}$KBY1s& z3LW60!YilL3cPsI;bw5b{e6K)@b7nB{Iwy&Iq2*jL2HPdDoP{@?S>SI;-$0z&!!1^oE^{{IdB36B?0ylzAP7FE#vEux|Mn_r`Wma18{(j{Gwpm|tvEI^{@aSf|`C%~6*9mHdt z?Y5~r#2^{%X%>Ejkqlxg_*ymd_9VIfmrJZkdb7iZSktWj+n%DLH8pFwywUOy58190 zI&q>wulPFF%sMb=xVITd= z1!uU$&M@f@gbT63N&e2I#y-D-wPLmue z_-nI8itWzJXRrPQ8vo_a)3tI#ZJ7FM^D0f@>E>maHT`EC;1TzwYkkRoni?oiY4_}q zQ+gsB3tCvPYCd*R$P`vjoS!LJI*l6SgNG1vu}qdgz{x$qFX<)c#!g0J+M&o#r_efQ z9y)$}c$Oetr+29zKpObP5KPdoR4s4|GDTi)wD>!@JOCs04AC@Y+7+g4*1-57?_uVj z6mWwKP{CDn=cM{poQx;IlP4UD^yJAlMr_)VVsL@xVA7H#%C5L78$%>_!>b6Z(0ZZy z8Y6wz_BuM~9APibVKi!Tx*xjaGmPfcU>PrA$Da~q=ae{qz(Jc$u4&vf6@9#p#d-Cn z)BEq;4$YJc29YJoaUW?cFt9b>(Zv-5#n0upZ9yRBw;NK*h{-_E#6FGF?cKPC$(AO~ zgS!kd(T3KcCrNQw%NAiLOdfUaAZknO_0(xIHU-*-nr>;K{?NfXv;o|lfn~FScbr<# zQm_)Jf|BYO-`eIWR_wV+lH9=3U7ZtLq;5QpMM5qNcEi4S1l@m=I~V3WyZ{$jedTrbwgIWwP=5Qp~v`&e|Iuy0qN2ypRN&F{jBS zlE?b(=8H||sXtfYw^Rsa8(hME1K$3YewBvTH(xw|@pSVAu;0Q&B%gb&Kso!nCmRID zu|ZfI8wA54hpdQSTXEtL|Hj*2%$14V67b%A;eqzFe9n*QIv7*{g4{=-TyKLFveqeG zt}I{zqOnBNxq0__s^K%4$q9Di{DjAIdBhB(5ESr(QGGd6QfifAdhR2-ee_X{IImu*;kf%JZi zD|$d|s7eqmzu?505m1i#;=dLCm0#V9Zej`D!*YUL09eGy8HdLgE0ItrGu9FD3pJ;v z&9I=Ux|LsvlS$njy?5xwVPI1Mat-K)^1}6yx8;(YP)Nu!7ml1=pLG~4R3{M2ip7Pw zIcc1jmLg;&C8y^iOZk;sJj_b2-BL*UbZeN<=*Qtb212G>ab=ilCLjlyyTY~=-R1T~ z065MIl=3(fBYRZ=ya=7&n7qxv+dRAnj_%`s*)Ze@%RYE zDJd8f9b2(;+|q)4Xt&5X(M#?oJG(SXMLUPlNAg{f8a{ZUbMR5&njJcHxFjzhtE1uYC z&m<%iUzV3_eQ6ZSByUOcH<=Vs|B}WL;KA9NvD{;*i&!n_>rYkj7!((3r@1j5PsF}} zpEoNGg>p*awiO}JE^JVU6{|)g$$%IW4jS(A)eKnQm1)#XK~-MkR1h+JSh8S|uYxK) z0BrzP`HoTA?18aax{=M%gIQ&XmF-oj9CyMYANU`zx~ z+7GohaT*g$YAHR#+2IB|&=$-K9MAyC;fj+OV?N@~p)K+CYq8|wly`tgGW*OviOwbS z_@aNf5PevEA~_GHQLmD#FZL9wdaymsMLF}Uz!pxIsp&DVn1{vry(*+vx&yXKj#mDt zpe(0UZ5Zq;+M22Jz_uAaDw!_sGz!P3)csrxRgNg)L zM}=vU0M!Ue*-y^-V(&ynqEVGU?gBtJ$Yy93vR0xk^wKkLle+#k(fTt?Xy9 zrL_DTlai&ef(GEGt_qQxl5oZzZVSMQ8Q%1wLWjfCOJ+04ph|oQbSWQIk+@wU@d7;n zW!i2Pnw?UV7?~={){T|}yAT9`+Cd7*9S%hdDK<+*TXNXRgTt=fM%i||uJgX0GQF~jYBO- zr4kFY$t8iw*S?9Cnq1k3lW)nI-gjVoA}J#wmKhse-86s^k}z;N~huND>6@ z-3tCjmZN;7LXvd2kK?_n=37;}?2wWz=0K)dVFZ@_~y+Fqj0FQpA`KUQ)t3Q>Dc*(D$3)Rg`IKEFu@utM`j6(YweJOB~HpU zeg_&&QMGB#)g)+`27i=O9AS&dpFXwK8!G(n;(7JOWb%k7X~9)p*a8ccQ{wTt;pMpA zCYRpU7w`%*8PJkqaVE?Dn5&D}TB_4z@0L|UL}sYRd+)(w#khbLajjg_smj+em2xCp zlpB2zcngRFkCQpsO{?V%iYLB8ttyl~b{NirhedQL-fP9u%nG~Avb&SDZd$w~C<&6G zb$47Ji(`LceF@}VhR$vUAx)H)5Qw`HlT)G zbA-+3X8ECS)wO5b$xd6gt6@TSDAi1_mXXWNx+bK z?AlDbY6Ohq+}VHE>j;89LC;g#j`TiwokYNRdQZR*4i#StQ zwf7hXCA;^bOc1YGD~zPFdrLBnlOaH6%>w_@l{W15;Q2pFyML$W~Z=- z3zxpr7I>Uab#M*;a8OVY4dBWR!pYmU?4QpANH^5g7 zL~)C6OSYsr#W%EcaZ8FX_q67@gb}cE?#R}PsxsBhKu`c%eJpV9eDO*dNpqOmvRwr@ zIq@DVc!qvwSBM5f>IqD%Vp%!~fri3+TyV+OhNVA6TwzC)pnQBFpS2yjT9Nb}Q z7=~R;4GBBaxxK%x;r?zfX?E(=N}G{TB!wka3ATYp?m{kXRo?9Ln>&u=_Q(ZIqEEIJ zShrQL<+b3SgUuz=Bk+WX{Sty}X75(ACMM=i#VSv@$V9~yDZ89dh%K(JEq^etoPNuh z;!@h8nuy{O*KkvBLaQv#PL%{6Fsy31s@yq3D0&=;Ysv}0qS*8tyAc&gBmT0aVb~n< za=-;E@)I&bBV;Wm-%C(oZs$G0f$Pqox0f!kL=LhdIL2X-BX1y8mCF043gps5#`hu4 zR`GKT5(<~d^>)$#Vtc;i>bCJzULw-oWi%BCT3S#79b5de!sL}1I@Vq$9@hqq zwYiKFu^Wl5Kk&=?yEi0INT?U2uxqRlpJ`JO8N8DseARr@Skb;!5f?UaqOPfUpHYA5 zes_m==J04O4;dCgxPsNt#34qXLelyvaCJ?aZ+KzbmkK@OgF>`}Q#4=9vfPGlYXBKG z%`l+hw(cNqr zd)45vL)F$E=R0C)P)s5Z1>1W=p+?Q_)uR5{U$2-0j#hXJWmg-VN8XCdL(DXS$hWtGi)#RZ)u-kI0$3Nh9 zdP`s!fLLDns=q4@$j2P7X3!-K$q8Pl;M&BuWk{0`I?zSqabXY4f;#QVyA@PWXtI3BE5?6S#7wk^Bvh5lC%&HXOas{gK!)j zb}QaqJ@N#iE+<~krSml3Fm=cZPNS=pQkMGwFWA~wZPHv}V(VxTg}67gvtRX!W?Jwi|zD!|N`kZ5YnPhP?4VuS4 z1#fpx4vsqKaBx~W&i8!KJ}gv3pFH_Ho=ba5ZQuV`D_qF5pHmNo(bWL8-`_whdYCrm z3j@wO;axOO64)%vSz%2Iq(zmQ;Tmk&(1?gllwQ^~tr!W$jS5GkR+UL98qHL6xC`%d zMB536?8~P@F>7t;sR>;W7ja`1teAKej^j}rW(5M)Gs~tOC&gOVN$0k+aHm`%3)~fK z6rGUMC=e(UacWqavYdoSSHQL|y*6T)eBd^GNnlp~0?2XqrD6DX4- z9hxd)#pWx*s;)}Vs2V;($pM5drLK}y3N?tw6dxSt_iIJV$z`C^F*zEIs7-#lL2m? z#iTh-;n1>U8j&6x_Ttc)(O(-*J-ZM@f<#Jh(XC9&o@>62E3_LTZ&zJ>RqSdv`8~|xRmcM8~3Hg{bogM?WW{AG1 zl|sIrxSaZMl%Z`_vs&nWC}nsI5S|5%i&3?4D1JUCOgA=VfJE=K;<(06#;CW}tpMI& zh+Rcult9@Sz58jx#8lvWNI7ssoQ*2&ki;A$viHDMFNj0*ky%{Wv`ai~CU4YNgPjgV za)bX|ILwCFyilR&vRO<0m5opW3iR66peN{54 zm@GU3XLTcljNH!gv|_rkZVY3CY5UR%rf@hZc?fAw21gW~NsZUgq4J(EE^#x9%Azl$ z@~HI@>k_F|k|r=rwE$shcvv*; zcaK{1-|pL8Nt4Wt$7N2g1y)QDzmUy{!>UzS&yNeWK!3G$ zWn0L-OWCMZg$_J>n?$?UT*A8uE|tKxUX@4ARv>z>p}&H98Bd*3Y$;jBzj=z z?K1cB!`YNXP@-P5aU9jfrT01UKsl!-oNy>1nv82GHR0ytZ>`NeF{?;jJMZbi4=vnh z`0GXC&~84hX0axH5DAi+Fj9FMY>6ym&Za#`ygB>=zUo+IeHXWK>rv{No{BsYM)0Cd z>|HWPd~(-TDmiHjW>>LAr86w2eJemP^l{hOq(6^oC&_;M zAsh}fw2;?OH-Rp7DFklj!ANa7#?m|H!5#C=>ibo%QvViK>0kTM68CLitROp2+nZ~1 z+iUCe`#g2@(DQEv-oM)1+gv~8 zb!y^C_Jv0wNjQiMSrmQV!~|nHM-B5l zVVuoJ5g`lyJXX@yNKW$&n*FgKafJ(l&6K<6dP z4$n)jqDBHoFZP-mcKB?4f8@u*rgW*hH0xtDjb`A`n2+B zi!=I~);V*EV7A>DH(W|?q2kGwk5gbgAG+@j?4A=&LWiVE4v2z_H?w#s1qVf67R?4oh7KegbHm_!P^${+BuEkhgcsLt4 zQY0vZe>96tb{~qV6vHJDPdsu3Q_sYTeBf$En6kV~s6+ZO@dj?wGqMPVnJ@5)T}d&I zb-qP3w(a1;fT3Nqf{PpHFKb4!FlraXz%DF;+C@FV8e}#!j_%df<}PyJ&E{GQ?&&@J zeT#qJ$KRhd*XBfWFbB7-tL7bI>dLInlT|P=^h{B+tcNBdJ5g;?@rla)Zefp8XsD~h9s}Glgs^URK)oqUQa@#u#XQSZl z>=N$$p}<-bhzeC_4X?@lJ69EGNL}+>VDDN*aP_lMIK>dd?cm+XcPFPmoHPg+1r2}M zc5uMdK$>xP8ct_<00-?wupXQdPePu)(M47sdGB?Sk8zeJ^d7tb_g$VA9>Gf83ECWX zXG5;+v2FT8_!Re9!Dh!NPP>P}?u39bhWPjKVNbvIUOAlza%0eyxH)_mdL2d+>=)z_ z_i>ocbrz1t_`*?`T+h&Py{#Dak9ZV++iJgj^c$}p`Np%yKHuJY^c#=w<9Xwo-~atj z^^e*1R(tbl``N~MXZPT^(+0V{);^xXzn9OS)6Z8gU(nB|Tl_bC^X%oz=ih8@J$nw{ zJ$ttG^qZ%f&!4|~{!Q@ow^)D~222LQH+YvVdJZ1Dx>_R_(FZP*(Sr;O~eys<{(kRf?EYI)toCv6x(iHq`5kby?J`4~*3si?)5k_RZR zwlPodu#cd|ai#TlWf=5FHq#o#4J>Rbz(G1BMI79wwJd`WH5ha#2Pw&eH)7bK2AXF^ z1f4OBKcWw16+;Cu_yQFLwV{ir2H%J6on7CoJ)}Di=*ad*_NF|YWuP8BWY7Xu4+_Vm zO_hrn1gg<^BXx7TTr$wxGupOlMWLahx|k3x0MmS*{sUn?OI}0A$b)8X=b?gyhb=pe z-Lpd=C~{&AqDEmlRGBBex{66E77`{NKBs zo|H}zbi@}o$%0UTi7Aw|ONA(AH z)iKjpAPQ&iy6^VTVY%1YI6Cb1Is|s#jmPmQ^qg>cOM?W_OzJO<4QOu!O1_i;1++AX z6!8^wz|lR|8g>wf2;P-2p&6=&bmb;4fmW-#-4vfAP1s zteI}Zn&Q+DC>ib!B6%ByCo^%A3`AvtC4sY5#0hT|KzE#bq18}yy4<9=OX*<;oJ6sB z(0XzgW<#Q`3CvIG+!H@aF+r|Tl^`b<#nE}^*ay2$X5(d8_Z&)1mmNROPBIJRs$9;5 z)_jh#lu7aD-Gc+{P{c-wwPMcEBxp6;qsIEnNR&P(SUSHWwhP^cgF%dT4HDUiuX1jH zN5e25jFVPBqeb4LitLjposi|2npTu}7W!faC-T6MxZ-IZ{5yrY>4`vd5=<7YbQc&} zw+XT-P6&SDpIh?h7F8a+kbAn<`O#m0bfYcX03f)=F|N!39rK1KCOO#81bp5o1q(FF zS;AJl5unVXM+IRM7(m_iolet6SkM4mlw9HfzQ2t*FS}$VHjki56y+If)W5@c z6}izng0x`dzPI_djf$er8f{F@Vcup9;441umZd;mQNXFUR9b+7Bsl>`i$IEln-ptA za=s@g;z@`@W9p(b$ZbJM!HDrkDLGWn_anvd194nuh=Z=+$XLTSy=%CeLFKY(kB04~ z!v7)!>BmU>1vsk0BJB_)noE-&Il5mEttyQfpw|5g48yqy&kZWP=f5Umw;{=W*V#(R zAnY4P*e?Y*4-tRN2lI=9cS-!|fUxMh^q4a;sij1hmH6XSI~K@NO0^nBA2F?20o_?P?l-Iey{A$~9GbFV5Uv+lGCfUkqawC^!u8SXnON};47_Z{bi9bupgRfy(^?}}3g55V1+}<#*IzjAf z1+T9aw7yQTdIzMwQ8OrgZD92EfY8?pKK~fd`9_T(^Ht#T3$2w^;PH(#Tl`Wb7<{7w z{Jo;Od!sIp_xixyzaG@R@wFiCHG;R-3EEyaSbNPN?GFZLZ`29KULy#5-Qer>g09yH zw%+i;)e9`4HW2l?z|-pmO@9jnq^B2e%-Sz*;UaG0r2JWpM6?toKsz?M7W- z+qHsh*9@+`093nC8;EvY;Mui;W*5P-8+C(Y*93}PD;RdIAlQq+ua}*{CE(VTpw^XO z)-{4ye*t*)uK-$IGgx)KAk|+DPW`2z)V~TC^->V(uL7U04|MtgVAHjOOn)J`^n6h1 z*x=|ZAbZwx}^?*Uw1p-|k`11nL=f_KoRu`(gqAIVaN8Tq| z&Tk1=b2Uiw5^(0Zpv;wE%yU7Q8?}Hg*9o>()m4EPIrdkpw*qdu_TBFJx}UQpkMf%(=6;_HF; z)(+bHXt3V8L3(Qi=dBBr*MRXh=7aAp;sv#V>(&XXTPv9E!$EYv06ceV3D5ZzL2`es zP8WgVHfjUGtpoga`5jjVw_O5i`_M(L4aD|qcX9Jq>hFs|Y8!Qe($)w@TQ3N0&ET_- z0iA8s2r^qUxNM!EvIb1{A$sRru-M0g#6BK-=5e5~^-9BwrS8Xoziu{u1+drJKwcjY z?z$Y*bupN0O(3pyfwxwJwi484<14{g>jY)32aL5&5LOR@ajC@cSa8+lpsIC&sV)Oi z-F(#b{7!0o5xn$apr!M`N*@bS`eopx zjhewoYXu>#8GQ7MKu4Et`Vvslhk}VVz8pNXR?yH|!9wc=39S(vv|doqTERf;1Oa96 z&qi%vpS6N~)(7rcFR15ZJTA)>r!N5OtPiB~q2QclP|ilZAe=RVZ`KUDSqs=^ogkZE z2d-HssAkPznvHtFGiw3OtP3o&R*=lvz%d^OiuowL(K0uehgguyh7W2v&-GsicxA1i zm34wuR)JJD>I9{%8I1CaK`0wFgHF~AHu*&$lV27aVvmIn9{Fg{$Yo%W%RwR^1`gS% z4Ggjl5XeT|ppRv+$NE4X>jZcFN>InI2XkBm;@EfqXyel3QUS{NU@*qbr_5q%1YN8X zZ1Djgi;Y@A6~6>bap?v<7A$ctNaB}(BW}Ln9j*%mu}1JiAM~(MFUVod;D)t=8amc)8nmwlu)d`reRIM2q;z|2Abj0p5i#hmD441UI_3YKN&6lrU;^&*sH=k~O6TJ8x|LM2I|Kk1c_mTMe z{r2Skx4{2;y7}_yOSAvaUu?ntzuek<@jL$4Z}HDjyVSnKN*6;oxMFU@eb!0$;pbQR z29dA`zpOD=EMEK>|K05#6+fC^E4;Dh4rvCAIoHwP=lxkWavyM{PH)V3(V?uvF-9bG za?g9S{QfAVNIvJ&f_%y!9)#1-`SUQLqM=%2lRspyob%K<>zH{C^JUv1G!E7>$R>$FFHO8CIdA^-bMlonb zZ4f@zuqC*usFnAuBC)v$q!WiM@K1kh@snyoX!3xa=czGw)A57(TaMrA-fCkN`$r=v z?4CbP^O4y(ZLY9)_>8l2_@}pT{DJP8Hsb9YKH}~g{%!ZH+4#`-{}%se0G8U=|1$pn z{P~vRKfZeUe2e%$Joh{Q|1JKltgL9cSdA1$@l%6L=E3r`(W@w26r@<-I`I|$(pFXy zCs*nFryqhp2hUgwAw~?RmI}Zt1ExRve}j8Qzp>Av3Ns}z4S6^Qj@1g{&+)`QB)JFj z#3-6lQGX2d$Om!Umk;do|2B(erg!?I;Z+4O4-UH2^5Lm5Lh9ep_#Ot{9G*6uIv$7g zlvBvVwDgdM=nb4O{BN8Cza53+%V8LN+73Ro&F5m=#=ofyM)@)P*m+9AjLOgP7xE-y zMq+f4`{2KuS$x9Cx;Qt+;!iDH_B9T;{nX|-tkvdhdbR#XWNleQC~9I>n-#j}a3SgX z(*Lrd6)y_!;79rftXO}V_6Ijv#k&}QL|^bP^x3D3D!1*(FWd@7gfQvsIDeQnP}keh z-%P)_lJFq?O`gFW`q99J@QHjBU$!4k=&3&C(p3d{&!OqRm1%kbJ&|RAr~nCS0Y%A! zOi~MJN}g1rTD(#4_v)uw2u~>KfrM8@Z-#aerZrjLY_4ICltSe7N}I3!xD;(nbS}TN@ zw_hPLN84%5fM&t|W#K?WC9p`l(~tt?p`ke*dL=et$*p6Z*EY?r!z9iPhJ~r(M5Ue0 zbXV!WYug@(%gU)j18`eT&$jqu2&9`&*W-k({Of3{rN`SV4X-s!4kaCa6>H%lw$Z0N z{#@)6|52@6`I@Meeye_rM+U0Ehl<|*hVMytqbS*d|5bD+hFJJ=)i$|S(cEOFV@EzU zZ{R$6DtI?sr3WjTLPIA$J%2b(TN8?P{+|~Ao#lVF1Joz}FSh?Zd%E@N71@7YKHGY+ z`SSTokpEx2da?Pt{QsNTfBOBa85Vp7RbO~2x|sR~^0!efkW(>j)dGg_EP7*Itw0+K zsi?U5n==b2q(MH2xkvygAmq_?tu~^K8k5as%egA$YHq1+pb}5Zh%^hqvaCGAdPR#a z)0P!XT6zPoEirx-Y+DPAR?E=4d<$66OK*Xdy1k=e#ayEBdn-7ah4{-uY+VmAUUfc2 zgWXHLhg*_!&2di7M-}F*ud-dW(0JAT>A2rHKI{F2BF<_P;q4m8#?edLPYKX0r|2M# z3KFLNt%Q@T9+So3xsc(Dtq+FieYKh3#3OASt?Cf4eruI!^*7325?2-3X8UVD+m^mU1^ zSkaN81gHB3sDeO}D4YtDPp55CbKN7;3>`UHR16F?tfM%mxF5XO7{ zpgo)51XhQPd_vB6dhLTd41QL6U`=%xUCyriye*~8B_KQ}q?N)pG?MV3v-j>zaJu(b z7bAwVtIf8@y4MVh!KwDfO=~huZMFh3b&clN(a5388GD`Ti)YbQ{K=-lV2F65Kpcj( z7f8f;god--4F1`CSbT!E_xnGS@-nXjM%I zGo=lM?|9vFX^w5FEixWD0pZP|0E#2pPI%8H`avoVGc6p)XnLB{&BrX6_OIdytFG?^ z7}k3=NN35E>V3g2ig=dM5G3{M=?#81%4YJT&t;(UehgoWu8otY#;`-%^0v$3%=hJ! z?Jny)SGz5>n{(bMtmxG!4X19xi~G+S;O4xIq*U?lf_tIRfW;!H?k}HR z64SQ}ZR`~o%pvTHQ(6lh!WZ-{N6$_G_(CrR2)f z4Rc;41iiqnDCTU{-|i7d3NrLO{?X=o0mx=oo8K1aA#b!Xf{uCPA9jNE&5Fs|fT$g5 zSNUn>jLD|}FBRrnv9Fc%K$Z77-2j+opriMJ?uXO0dxH- zj7J!zxE@dQbqRdkMp70ZBNZ#UUXLc};Kus`vpLrZ?qYo$k3iI9hV4CL8kY!K?@Juw zI30~V=zYagUx$^vU8kuL1Fm=i z^&Ic>WHw$WoPu=?eauVWbM&V7aXP0P+)x~$h9~-8h8fnm7_B25cHyhyxc)Xvv)S1D zC`(5XE(^stmb=d942ed+PPrQK^uFRNTIw+e`}Sjs{fs~lht+XNsPPZ)O44;> zTfnMgjg#AG2Ob0wO@-b*UX^T&YXwy#F7m=Cye4l-L^m}w^u9@-H9t7)(aIJm$+aZ? zow-K2&sA@|pLXJ%$G^$NG~`@QXdhmQ^Je6X%HBiQ_eq#vH_Z;yi$^7tR2xY zeZkgXiG+DxJRBl;*%zvg#Ak%XWT6iBr#4ApI!XKThl-bFWf@_MtnYFD>4c#O_0(pVHbsd2hI0yuF=#V17%? z7M=5}$=V0A!j*dcwvOA3S!$jxwKDpaO74Qlqs~rPb1rssnWm$DdOPQus4xu#J3B%1 z^t+~a@dz3WE#`bu)4V0}9V@RL;0jmtGe-@THdL)*3ml&7w$YOLro_*%>4AO`G-(5! z*-2aeOh}m`Z}I$zIL4FrcIYuhjl^1gma5%SsK|-JC@u$i4DnA6`Zss+8Q*YZ<6RVoh=Ox{K4V#@iWQYH*&7Z?#*|2QU zD7_n{%PETG!M__qZ^pwWg;b4EDzhJx$)#@^LPII17q&fGAE$<0YUEzUCIvMR> z(m3G@+~I!H$nA464D!poe3a3oU30J<7m zCe?Gk3J0eAmSk>;8qRk=A%zuTa2NG$U7_@{Da`^3F;)*1V|+}`WoZ;Iv>Pe4&GAA` zo?@>hXE zF?i;wMniTd@Xa_z$-#vK)Umu?IV72(s^UiV5?VV%4-FGL4u!r>d&)D$K)NrAh zpN~2}@)&-`ckw^e?A}lCh5IM`_|E;iyAKT)78$KH4_xCWFV*X#Q=|ju55piIu}9yE zi%EFa>36Mw5*4Q`mS7x_%@Y61&Sv{r`+3E#@HQ0YBtev z)#J`N&>J7V5;^YD@jF6CRrl??-ofb)K!Qi`>FH|$R(^LvKPwIGmnx(fL!{CJqg;Lwr}>!=^v~`I!z1^- zelEXNAHJf3``{J*S&ufh{#~=>>Jm$LJhx5Z;XBW14?+sFcw<}bd-?s60f5Q7m zhaKQNz($dz;OZ|ft9zZfz%wPqp!(^q8O@wVPfuPSzQIA; zHjRj0{VZIkogSK+mo?h$%zwlUUobtp(GZ2_-@*uYX>p3HS}pVzU&pcurXP~M3s0br z*F1ohqr&$`yKuK7L2Ort<`f<;%&c-wXNMUom;9&BX$Y*Ded?D2rcGz+JYsR{bt9+PT{;4=to`c?Pb{gQl|<#izvCfh4Me0}eyb)odR#@cyJaWmyNAM#dr-fQ{X_fAg^L5`Wf4*DR7C#TMdc4Dryv8JUad1=+tLYj@mcpX^7+}Qqnj-!32wFM(*lh#2HSl zV0gL1I4Vz`VE&*9)pYeA4iY6{GW1JcNemcs{8^?@T2xJgTnq2ckDL^X*T7w*AmR3Q zqY(Z8&2DdOyx&HfHUf}+_*YXWY#S?iZ9Mf}z;MOF^9Y!DvoTFK1R@20{0mI*sqM-l z!+1DPSYDaTsfb8`wX`v+Q_2%dF+d9M(ORjdJ*hlx%&!~<=ae-kD}wJO*lY*8!NtY! z^5TLjK;3Kjs(?jC?d!H#53>e55x1Q!s`+GVck3%bKDdd-TsIucUEkQkgraBD&SN)? zQWQ<0*TSMd|7K>U}DKwNn^b+niiIw8Km1$QDh@aJ*LwP3{!_c0$Osj)-LP;I{90 zHTp2vVF!<3g{vF)jgbX$`GuDhioTTv3KpUHDH+%UpT3QvNr=VO{jSW%eW9;;%@mh~ zTR~>hm!|N<%zImb6Lw58qkht-x}^C|GbgiKv#M*Zs4rd0^K&{Xq5

gh7i-oAzPw zx?22s#d75%eFuI;J>@DMnMH$722nP(A5}I$&%hY|mb>qT^f7^pAoK4kZYJ zHHni6qE-MYAwZQ=#L-DlGTAN;p%r=3Vv^&{)I5T}R}&aE1M?`hTphvGbdEA#jN6N9 z{1jjR$HPKBd;4c91`Jd-__E2-WwlM$*PIDSd>{Up^F;NPWXZSi#o|`Xon+rPA3g9* zz0E3G(63rmD_<4D4a~bb_WB{KJC7hL>OfUb%-%sb7yH+ecl4H$m-NpEyhZnx^BVoV z>^-^L^fmdj`mF+iFB9Qb%hDPpG?(8ieBwXe2LZW1nuUEzFWiy9FYgiMYJE*g;s{QB znfI;VZ4$byrW!gA zWSWIZ9?}GMxL|n>vca%_18RCv464+b5yj6`lXsD4Fh5Z$32;eu$5(lkW({j#$2!Nh zusdjA&Y&USCd_)*^Gd6s1?(+;K?n=T2vMMxf|koQ*$@+laSbRa2NfP$ zV%jvv@3w5=eA4QGLa$~c!B-v52CARp8n44}*5Mnt&M>O|1_T(>+GRAoiy{vPjGYM# zgiy7@LW}I`R?o06T~$$FtNcWnL|TPtL9^V2w+$&cWbHgHG{oTtPwqm3z*%Z;*|+6x z6$0uiH5B0BuM^{Azx0{)=ELpRn#|aDv$Z?HZy z?!Gzj#pAt(i;Ca+S0^uMMXF3?P^nzNvcB7M?;rw5pr=-qP}V9R#;xgVs4D)kYLOO7 zx&V-jdA0(AVO}1W?E;2gwcr#wWm_*RA)K^j6UVD3K46TR$Q~ZN+2qu4K7nT$2nc%- z3(2`^vf5;)cP+7Z*6d3qdIz$G{9Nb0n4* z-Gm=wEp05-6x!IBZz6LHVHX!N8IP-nSo0+w&&I(hzT#jt%8;U#w4uFwg*UYLC|QKk zIz5PBHB>b{ix9*jVjx{YeRWN47mRr7b&q|8V@#r?nJ6M9J{KT(E2CX1imAhQgq?FO z1OietfUIM}I-_tP%KfnbN*D-+1$#y|nd>GU4VCSU{N=m2D3U>G=(=`=Da2S)hElGu z*uG&%lwD@a*oT9^&0G9{SS3EW!}@I;Vo22jsV4EPQK;Mn!DM*=#AVGR_M+;pzBE1VcDuLbuE zRZ1Ub)2CP$1NW3le&I#q91y~ z1_h&{yBE%!Z$h?J+*3hz#<&`aQv*eZI1`e?uB-b4{<^rAEuF*AnFQkCG0GZ(m|B$b zg^RjXwWy!&KUXd4OJ|k>=gEY{I6A;NkfG+5$F?P#b?7PbU5no*b-UYI<2jQhd(=z7jV<~$tRfoLTT$3Xxm zmyo&43GSUa@%WKWmGmwF>&;wU#T&BZD9fy5`{-)T#S7~utlD%;4&j;jfI;j zsoPuVbOuUZp3lY;@6L@a?3&pD0sKaFP@-so76@urSS_P_Xk|@HPsOdN5R>97N5m>8 zJE-xgY`ds@)aE_0CzbALDy8H~E3LpHCJ7EeT2}VV^Vy%SOLtda0D`idNoM2Si6j!}Yjt&9f7SG|Bgo4? zgd)yWk7=xJLmXaH&F6l|uh+*%r$^6@ z4wKh=$0rA`zB5b5YT}TMNO}#S$uK_x2e?mfP+iL@iT1j*ruq;=D8?pJ!On*5p0Q~N zX&PHZ>!NxVM)7v*EaE^1Ef+lIm)2YlRv7ud9R5b_85bqCm54ny>u}#S;`#uU6q%zZ z*_Cy_R*r#BeAh>RbMV;EB$3sQ)h4HV3Vz_A9r*q%+%6HoOzh$-C8|`+Qbs ztlX(x*?QYIws{s!daBLYWL9#rfBfVAG2$8>zuW^}VwUy%vdpk@VOm`Hu7l0fY%&Q} zhc0BQc8C?5k6t9;b-F&_S&eC4X!N?$V+ifs33<>sk;Mf2| z>fCT+1QMnCI3sku@nFDvI1F_`zyYWo39bu`L0&KL&U*dpfc|g@l^6nvQl5+J4o9H_ z`MDo47MoE^gJ_ za|1kHch;=!5`9yQC!uLy2j#XL$@8Zt2Y+)%AdxbOdZ4aiWH1cBXv0zz7f%rmteg^+ zQ~Ax5W1LkpZAT@OR;YhFz-)9?@W29s-dEpAG!U#xXkjP=A){do`_H~V>Vy=0F2+Rgr=5&iV`)sZ#HUsh+4e3gT~C4n-%~b^LLR&)W>+6Uc+J`eerl>PX995tCi* z+OJ%02p0d8aEwE02{MdjOgcDSGVRMJAWVden{H$)9L&;3B zR`?<>nVjHo`@%jxCf5?y%cLsU!a}i8V{bbb)+7)GLJl1;&?x)Igvpl{(&QCfgY_-t%yW<;*O&WDHkp`^!`n0*Y7Lje z!J0VBM8li$ z1>?&kzbf)#e4{d6kM+0K-&UOtB@Hm{hRQ0?oyw$hY28uiOm1MZbBH2Q%AGfC?Fawf zhkU#QQ?O3hy043$bb67&iln(AC<4Rto1qYOVTJQiHuOba%T~J>$BD6;gbQVS-gM~# z!?bO*mgQDv|E+@k*9KQH{>bbZ+pX&?)Ya7LYl1q3E~*ilO}C2(MnLr<9xNEU7U^mA*B`mn5Z#f)hK>oZ>#sGw zb~g)6skrs4e6?HvP-(ymPWX$c@)6k4YevDS7@ffS?HRv!yu6Cke*g>9ClptJR-%Y( zxZxPAMFo`Zrh(_EGAO~P7y|m30e3l5SEpPLSb8>Ua+(}Plwp2cgR%g`XP1h34%m$6 z6AYUfQ`0&-)A*+IF%S!Ie=Bj^uw@0hhZIaDjK6ya19g&)D#vA(57Y!`v#OhiZ3r` zWxaLs*3X|n3-UIricMG=wf(sbO@6)}LgJ;PP`#)*? zNlTP5rs|Yxjx_>GSM4!Z;@?d}<08Y58C%{F0BDjCffMHo9Uv#-EV-LObQeInqZ%lh zi3Y=1e-ZayzutfKyyL8k@Fe96)RN`eA@-ln@12d?7?22_j-ygB_y^R@TtWsH7j`X^ zqdBF*%Wz%j)!YTDU-di6H7Uw9IyjRf^Ql4>1SfD@V{~}Yo zN210wPr?Rz4G*aQNo$Ylv)kAyt}+-d{yWg!y56`bp+q<4ZBJZ2cnX2G^os6t3W&`z zSa-AK9i_qZ6iwZyIlBZ^%$b@@t_?&w!O6IfMbF}-^g1iS>Zgv2YT`Oaj0Jw3gF4^6 zGoL8h(0MjUR23196GrQlv4PBxcQA-sogp=2L?)}5kTd&vQOUz}4k(^$pC|&oD5_lI zN>VJDY7kGDqWx_kmo2nqGb0m@ZYM?YKA`|V9VB;EW*IeJE3m=jx$IAO z3#zuPp!*KIpb32omTOyx>JEFrtrEp%8qOy4?mxKo!g|%=9!|* z&1OTMc6LY2x7FBHrg@>w$iwMzXN+oocnV;o$ zu2XiZ*4py(eTZe?&SY||8Vm?}1~$c0-LW+uu%}m#m(zKT`6uRjuvOlSXVQ^zIA)wV zf}N}rKz~5h5J2ui62`p+vskV+ayfaA%IFQyDy>E~z6G;?{^sTD(7;4wRBP|j1`dGH z$^aRd#1*) zF>}XDmKtb7{h@O_%qKR0k-UAzp+wZ0&UD64g4QW^L&3X>wian)W~E!Ml-xU}Or~kV zy9iK`*~K3Nt&#jG;S!tbFx^iceWm{M_{r8*+bsZiltf*x$ObU@wiP0K`By{NE_;-bBS_xOKjT?)EwPvE;rB82T z&YoG8_WPscl8!eHXRK6-!`FyWbZ=uzS)OYsyo;drjkxDnbg`0&$p#r~11SxzsuAY9 zm^}$t1^H|=Q99BGv3KjxD*_OA!nrz&vNBxF4FoLb4XbLFXaDB&jdfKi@jqZCs3 za@2aXozA2Li>ve=^u$pAr7ZX`h^{bp(YTmY2q(6pgROPe)Te&6u-G(NM9d1iP?d6_ ze+|mgDE~Dpf(6Iju-wl5PP2;;m7b{v`&d5+_lNzTPom+sQ!6?^ACXv;XoQ>|7gx_N z%5jECuISxDZbW>ItWixa5oigp*x}DE4Izqu8K2in5-@yM$JFv?libvpCx$%QRLZMt z%rkh;<`B#@NM6z}Car%tpHFmFKSQS2+sj6OSfEzO?bL`Sb> z4F!M__((U{5A(L#<^>pk;GMQ~K-buPeBUF4*~dF31xmXz0JwBGLGsWnRds4NxCPm) zK`xDQAA=o!=X2Lb09*ak?=>fB`IN@@4UX5fyxS z5u(d3E;5Xg_To?p#<}IKAfkt`a}mA?R;g;y7|~Q2t(^-#t*w1<4Wv**oPS=EjjYTq zodo>#?d&!-X8W$~H%PQ#P9Rf)bSytiwzsyndNFfPw~f80r+z{U>icQccGDWIKl9&) z`NxUkwgwB!;Nq;;JG<(YcE@*!ubFq;LkyqX z#W}`Oa-Euu6q~+SOjcO>-N+&5fY~T|XU|dhqHS(_SDc<6h8IPtzAL7x>E<*vNVHZX zOZoXVavyAE9ocIpew%V$#e(Oo?+Cxh;>u^(x1XLygK$D-Q4>FP4?kM2mCq4T-CQ z8UWYU;5Ive6$d+!u{#*Jp?$EgR#AtS>R)CbHKhMX{;d@cZ8RbZi6;Aq9;!vvPCnwt zSuhgq%-3rNA9tlJ(b()G(HaMCFg|DN`5`ZMf!d7^)FD@=(~o!Fsnb`DMnBI1h=^(y z8#-`eudWdS{0n>!1?L6lnTi%x8Jmw$WaF$u~tY67ltH(b!KmJ$w_=@m;3NOR= z9YqZMUVafehhPksUCpCZ>|9w@S<>JZP+a3KQM9mket+%XSVv*V6W zu@ass7OeH+hAyz^B1J^s&n))}2_XnGxEo>jw;W~=3aHU-nSET19MN(BM?kp07xIhH z>D@X7hparcM@;%r+aI!{4_QfI4-{w^MiKKo&{K&>;0t*mA?RA%g623LIF94_WXaf_ zCg9pG3gqx)6d`W05}m;8qRcM=ut|$KI(*ypvrN+R65;ufZUH*i@zaRmh#pt;G+RR3 zHxi>)cc+u==jqsip@;ZAZp~vgSY8)h)M_bZ3jVb^oSjOwLNEY-$CM-?T}WNS9WShc zAB@^QYuGxYSMa7qsgpYDlr&FABVv-!q!F5GJ4~#`9mzsO*S|yY)}6llV0&edTutbJ zC#DVJ0Rh6#lqFja$coRLQO6J7m^-H^Wo$8bgwQE4nA_~#o2P1BI^o1+PRg2!^_}a~ zQI*;h{%9T+Jmxdgw%z{xI+CWg|gv8OyY?$X5rdk#Mzd z%pMCWA~>n@%bA^3&1~MxuKoGc2WAzwpUg^N0Gt_DFkdyddkDZ4rXHP7<|f~Pe7PC~ zw9dr{vfkjGCOV{vk5G;XHi{EZ?B8FLvDCurfLsSyh6ikAGZ*Ku^4UZ z3M(621h!!oOJ{l#PUVNwD;u1pD+0LGk~2Aywbu}nAT?jdRLh>uD|G+l>SlsmSSSPx zM}^89TOu_w;}&-=$vLa7@pKzFybexqmMBK81F?4ei8wg zHa88T?Z#k4wHx#*yToed)D4%@vLIasc}qho�r+O*a=x%j8yp*oOzNo(pA~=Iks> zm6d_dw#4$Z7p!bG>8S_!zEq`W-a2JDCVmKPRMrB~PI0jiS*vf+-&?A;&eZ#Q0CH8E z6NM={ZKgwCpo0h7nMtm9u)}~^GI_zZTFf{(U9-*GkBPX3+p{;i6(%M* zxK?8$V!TcI6AriD&@>6nOfBQ}LF1m}qbxpNwx9@ncf;~x%J+)QTQ*(JG;bsww@UsF z$*BOnY%fv@z<`l~O0Ev7TaK#z0d-9&QH6X`W>aVBFpIyL@$qr7XXM?x5rq#Y#fYFR z!2_fgHJflabM_BFU5m#38$=RZeVHD?#ts((k}>^EF6-<pKKf;^EO-Q2uj!c zaP!dAx5?Iyo>Dkoy6x4MSo;cwdd0<6K(-k~+WYgG?V*eWcs&kKjcspNv~~3-3J1-ETWWnX2LREJmng>*;!@|5UvZ9(cIA zcw=Cl+0Fv@Bv_OUHjCmHL9Nf<4M#XVK6u&85XyD0NyG~M)LZg6dP~+RBWr5BS`om3 zVLcC%K$PI!BHi4iUD9h57lPuED}UYg#KlB*XTF~z6=-Ga_IuDQ6_lSK71WOH_EN!d zvs6$Oo@_5MI5cRM(#~)Am&+_#cXOOoBOR`iPUcss{sS*6{Vo(2u9{<63PxU$CkN4j zARt3Y>lG0%hW)(5@Het@m2-rl^w2`>qd9z?U78QZNEl+|GsBK8;HNWDIp{)PvV(-V%{|9(wwwnXaKIc>mFq`w4~)Y6 zL~6XNP<}9j`g_;u4Cxx4FxffYWlzlCDt?vY`Fy~KHooiCYy&GF<{UwUfnx<{r#TzE&-6O zcN!?jcExE>&AJhK#FlqrMba)wv}o9U_!_KouyaateN>my_FP?|oY96E86-ArQs0}% za5q&On^rMjozvC{0O+XjEb)4KeY9vv-7qK(bi~qd5irFc|EELa?CjXNnuVhR@UTPw zY#HWobSy#>opNKktk|=?SD~Z0Qb${tXIqx)=G)!sQ+_i`k*I+;LBpB!aFPhPlqGuK zI4S`{0S3w5q$=!TDUY0D8eacr(Uagk3TV~}4(`Ac0joMJ8C)%GCq?iI*Z zrdp4>Dr}So!#=DCC#HEg_Z$v!|bU@eZD~ zjc`>|5)E;M9U0~pcz-P#d6~Y?*rTE@4{&`M`4h=f`^|KHaZxsn!z-*WILo+-@HE4n z;z~6YC>y_Bg5eSFCY>$IIj*muU=)A#A7D~*Ep7NkpEL!A!@={)Cslt>TY>-8dod!Y z1OI+QZ6U`rx-?{}H+eQ0<1=F01%qId+~;5u#(YSqk=f$km0id+{P$rR^<2Ye8#Mnr zV;+BSkM+0*o@VD9^JA?lvm~FD)m(j6R%3u9$XL#t<|?hO{$Mq~BUbZ$EvvcuysYN? zKUmFLtmf)p!)gWu9MWsYa(-Ex<@lqoMoRS^j+90?yjadgy9k1H7b1``ynF1)->Ba( zdy-aQcbT9mtfqn{DV-oPLbEJR0?ZUqM8|(GRwu0i-V$|oeA;Sh3HY1KAwyGuA^f(? zGJ0ayA^Zlm`nP{Ww)(m0{;pW`FSRV%VZJT-wP4Y0U-4q+!TTI>&w;9f}z2oPR{4fBqYQ`QEqtS?0 z6rhGs1Re-H#oOO8z~IUAgBLF%2!l9y31mSfFjiwIo|s}BZD&N)gRtD8h}2$idV(qB z;RVG;D{o^uVzwB3K0roXFAb2vWkLLa&F>knR0ea-j;K5g^c&+7NJ z{kUeU_P?S#v^jdlM5V@o4n?FncrW=@=7LM>GN^--m-{cD?jP3&&`|r$B@ZxCjvq9k z1-BRtjpZc5F$H3vO>ziU4uAv2`d}0Qjl&@R(4Q32Wdp!k50G#J{UL~ft`j$BP?7 z4PW^MWb9_Z>RlHcy>Cl6R(g0Q&cB2JWLD+3X%6ZK{`rXhR6hoTfrEBw0eQogu!UXO z-lJXJ9JJ%4CD=TJ>|+Y>^2v3vR63GcRQ27m)NyLO1fA*41GNktSc@I8yNXKH7D zoMVV6#ryT0vsVD)j&#u~uJ9_E#L>FxGeZ`C7PZB6HYbCl7* ze*Vj+D5D~-_ZIiT-{N}dFR}(G8>sRb7$-iqD0tFppgz)^gVK|-LB{4&sP0Wbq#DJo zsy6;jSE=-7B<3Bdd%?+zvGNzjtSoFu4sH+_3&Oo|YhknrBV!#bP%J_$enOoQrPW(Lp)f z|Lf|r(v)V|cM>JwY!mzDPc}Zmq%A>r`MhmL0s%c&3i`Z0;1iH^|e~*UmN>cu%%svoZh7@U6rj3=n)*@~tr_SH% z5^5cp%J$nL=J%(A)9+87DGqcZ1Am3L%4wow+wSbtPH?q@%RJuB)79+Y!u3DDnRplz znuWwOuU7NRIKx|g{3FSk5gzHVWkiTe;s;eGZS zxqm*__4Ds>YVn5oO`O~Ro%iGW)|E3YX0!(I!5|V?VCD`L{wANbp|jpaGos!ctJ?afaAbz_~0Af`MSz4 zuS`3UT6E3Z^GUnooV(`jsTXJNaDUbiIer!Djo)SN0)1$ECj^XRQM3~&G7yYEj=Y9Lj(eQwT0e$ngM2L0u`?~eDs z+dEx>+#+^&eH-?@ zn7xzlLrS?CXA_rD@OX&)AUgXSYt?ghmjgeSCLREHRLvdRKc?O`*^DWs_(r3DlwRF*r8csx<-8hppY=lwS^kv6^uzwokvxv8HLJBr z^W?3D5v>7mRb`P~URBRh@1|2XF}6g3+JO}qzK3w@5ms{^v(S10`IAbhG$4Xow;+L& zeQ)c13H>Up6nVGvm26noBevq+M;ABQti`Lpiq!~LVsN{q5S1w7i!t^0eyXGk7Xi^< z2!_JdH60@h05QjmP+BU5V_u*VDGT(V>Q5~gQx%w-uy0tmyrCi~*b4a3n2p1t@@;l- zejPFjb@-hQg0NFPjV)o=pa4WC7Cat29WIf1U5n4~Qd>lkmy%@fV>8YUMQ+;qj1+0q zVq2j01u3;`wqW2~B;wDKoAUijc|H3HDpzVuYp`9O!KX0ZANLL;jCZ~~SAA$y@zGT} zn`IM4{8?3{mvvm=HN6CcO3xvdH>By)`!dS3rf}o(yc(4`rf*>mKedxwrx40G$v@ym zEsER7IDmVN3L2YyDV+bmnBp)0MGTne3h=d!*6&&)XpLfx8f8$D#ck~%V{j8=)GPeFe2t@ z0t1%0L?_RV4_==pN+NqcNKS}C8ptC;7S6S?CV522!uL7+?#bii`@PPJotB6XFw&u; zXEvn;KD)}GEZ{|A_MPXV{p0lENL}_&nA+z7YHXFXN}y1n2?(mR`uk)rFRk91?G;+C z6b7(KO^neo;^1ymxtB=SsKNK+Yk4uzkZ|_A=IJ>hwwgU zomYy6-t1EuAL15nD_*tu8aKn~TH%scFLK{g(LIot!Q zsAsU=@PN%2b}5T=xF@xCsE)mjUI|AY+;iFtFS*ws;%qsgnKCaypdqN)sNIOb)vlC~ zMw2JL@X<5tjk9#3{kKR~#?}q)KP4a$3xamJuK7h)0&l)fseVOKzTc3(XF&;7;~6=L ztlY-N#%PjO6|uAK$)Z$ZJO0j#s@r)@UV$n(Pe+h?IL1P8XR1+)? z0j@VhG*WFnnBU-HVM`#~TxMgpkU>>UGwlMv56+>ORGG1%Cm*R1r{IuRocEk+Sfry7 z%EJRQPPPypss77+tiHpYfdLaoKi)riee~*NfB03O&an)ZpGpCh7EioF?T?QYPQ-CR>D$pzA(+UdGq!y+f`>DFt&?XWrFCna-*d zudH3DVS%?wIUWM!REzWkQsU=srD!5^h`ag6GJ_gl+O1`_>q1~%PEt5=%%;?h2qi}C zpqvl@ofQNuR<~u7(XTMgSdjs93R&G~knGLD6O9acn!d+k2ZVbLG-ryX+2-iozL^^Z z+bP-LVmaZGCgbHuGXx-+2WmJE)HguYilCs1SrBR=K*sR&f}Or>JcGIlw_C#-IpU~L zGC!`;aIS97e)t3_;uZLynXKK1#6o*-8zPX5KU-uUV;Q1QX|=(4;1~p*)K|x@f>c8# z>{+yfcEbv_1M`iJ?JA+7d8HPWq%^KQahc(H(57GOCQ#l=NF%GH!00wB&Q_=@Mm#0OaTF7%8sReB(6^gn*->}Fq$uznQJbh$ zM&K4>HN~X5nJpk5O0j?{hgCG_lQEGYL5WJ~${95SI^uDr@!t~WCV8Z>N&c5Zl2FOFjIl|6N8lu^-@IO) zpDfS2BqR8`4SuHspDJxy?ptFot`b~`*-RIF2lC?MPS)Cr$#f6upPN^ir9f;IA9C}2 z-(0hb2zr`jQXJQ$7?GB@oGsLilo~s8s#Kc~aGTGlO*Vn)fsXBH)LBsso!{Fq2HW$@ z+Q6{F9n|-#(u)k$FbwddA)!!xndY;ByI5qU9=_Z^IobPee|Y-y>wVCp)G?y2;2h2M z@3ePsUY)#o3JU(y{m6UEnHfI5EB}@uW5axwFNQ-CFXoENzNQn2*v>C!5W#6$AMz-} z3YRf~T#eQH#C_cM59qk&5BH(kAwa-c-FJ1v^5e_yL0LA9Zv1{$TysXmsGzWK4v2*# ziZ$QWF^Pc?7-q9@*#?){VrW0> zsvSR9lkW5#J;f?DH#?y!WF|$eo4@Oy9A>~fEB^)lIDBuc1paY0)H;`W(f{0XLWtp- z-1w!q-Km!u)c(r4@Of{;*Y2`K*CBYNlrX&u+6)%t9$X~2p{%f9gB)Hg7Ytf92P;xu z5d6l~a)IWg!j$?3|fk-qybk zGGnrjKh$g9C8C(Qva>Xq=Btn21|U~fi<^HVaZ8JA!Z5KEkW z$ctsA2u55{i(*so8PZTtjzG=+F$Cdqy-3*Yq3?V!u~ABr?>~v7?C)Rfu;Q^yzq8@X67;oiH_I7V*}A$)AG}MrR}-$lhahB z5(S4)L>X1Q^5lEf^?QD{>oCTCluw5BDD`mOzCTlKeF!PntV;L(eKYo8fi!LS$6C^E z%LURe{YDl@52tp4=5mG1;DzURW;$;UX4=OGg(Xf`JwRd3M2Vog06xCXI+KZzqSG;! zk$2g1o!u;c8=~lFEoy9}!lUYK2aRx-O(VBi;B3;goi#(@bKN)?vEZ!-Kj@4%S9@zRFc+?Zu2@7{o?qK-x664;Z#%j)tAth(-;4?$v{)+GGL zaZ}R1i5y_9=%Ho(+{orez|F@6aqp5P2)$XP_$x_8JJtsvDU48kpD&eml8UA#*=4?f z5P75gXrfg+O+IZ;z<`86@*{5qVhvq9QAKQ{L3Gf$csna@>gcWS3AzEtFp_WZji*sK z-}eHk%lJ8sERNiu6!&yURM)=rb`8X6bw6YtO5cT2&+v^ElLQbzOiVJaT3=EkGPOcR zZzB|w!=-d(mLXGc*FGS{ltGov#zT`6t&SI3(g1V5^VPc6NA^*1(?*)pNV#?H1Sp@!N|EPFEh{LE+y~iYTRS*y`?C0 zBJmMm58_q&Ap<)ndxK3Ljn|T(Kuk@xTy9x4%eZ_#y!M3TE00tQLbASHV4vomW0FP7 z5c+l{mwp{`TD0jma_zMaA5$K7Pg%b%9YQfE;Jkj_G@hfQ$T&HKE6y{tY1Tk4j+LcW z-eJ;#DfJd@*U(!D26En14eZ~naFDYBR#~VP@QOCQW0;$W52>uoHc0k1Jjt;mLP@l( zE}wlU-orSSivk=16#R>f*%}3fC#1|qS^hz%d!`FBv@#_K&MTvm%dX#w4!Znd4`h38P7?G`ZK^&#VPvl{e|9K7oDxfAUt;9<5}Go_ z|L6t6RHwZcr~Ajiap0QMK}O`0yUtkZx=tZ$r-(^F$>JK|9mz6N*~a^dT1FWg22Gnp zyu^V|Dtqj&wiXj@3yPCz53$ zMu~^kg4H=JE)nrd@7{sX&QE@R{ueh4$!|V||Cv03_4uE$^ho~4AKr@p)w4fiC#qq8 zW_>;f`?HR?yJUa<#yQhm$^Pz=``N=z+;6dp{TYS$U&8&GnI9il-=1LlimVI1U?s}0h3W%vYC36^Ic+vpnu0?d-)bB=Q&h_Bu-lFOk z8m1Re80s4~XD!^Ne$t}ZaMffk8BDh#6${iWy@*Cl*4L30L@`UuAwD;f+g?%3Vf&of z&fL2)cO>ZdWX47910pj-c`<6^JVV?ccJ{~bF@AQ4cv`#33_MQR z-05uM(Fw6>Y`2-lBYzqjp<_75C?U!Fd9;1Tt^_kIMN_Lk%bVIA&-sYfMs#>~QbX%Q zyudw=W-U$05Ri}`hOIh!hx2^6mklih_g~chM_gs%H}aOJ@z(6*nY%M=8T=u;LA%#Z z#DP?~w&>{6M+0!85dm45b*dOsAugi!+<8m0nYNo0KlpA-u0jZT&}NnNflz`1W+t@Q zHm_YH-#G1>)sMBC>&u|5ZM|!fZUakc#FiBf^qxI(mP8&#&fb789>RVi@zpoUr-Z;U z^(RpuN2HxUwNLCc&da|@b^MqOxmeDI`E0njLTeu<6HT&gjuvPqM18di(&Xp1TNU@g z*w9Fd-{aUw)@yf?&X-jOMZQ^K8nUUbo9kGLn!L@o941>D*BB=GikSB8K2dn%PleK{ z%2V1D+8cHG59kAL#F1dSdP{=km~(YBWH;m}?F~#*ej{s)?XJ>ZM^ zzJE?P5_jbj>8|V^MDpMvp#zvl(cF)#U#l0mn>3J;dA)RrIzhCVX)zvB+6kPju03D9 zaJi}l7wC=cT6D#uPyM~fXI;@e5s`YwA+4VG{Qx6IyMg>o-eP~{vx%xb>Q^BE+NpFzD-`fIXpc; zQrrLQ{<95jA=Bxgm&;U5W|1LEZmbIqsW%-+JjhrI9S;}WT5RtCE=A>o>5P%|)ZJW4Afm8H*3=ngP{c6oV4S06n{`fwJ)hh%FZ(gk^wGGS?7?(7iqv|8lp84Ru`t(^xmrJB!Fx060wn&f62bMNH*G;LLe?9@9Dd5M?CnCzd5gqLK)=G@qvZ_Vw#s3`{vb8d#BI7A8PPCyh;|`Zs+;I376080PVp~eion~TL)lA z(i7`b68*fyPm8@?5i}W&Og$EU=)NR|4oQIauXbVI?o-#mV`4nGF7y}DAImWKSC~nh zr$35~Q6xo5DE_9CI!kqkV3sR`K--2Le{%5da5%|kwNuo%Y6GVDAqM^QR>b~>Z@76l zv}KIvK=dyy&(rih*iXxhg8HxbO$9?i12SsD5&g4dd=ewaRcN4%RejuKyB^LL8gL&8!j*>lS2Q#v=w zid>kOh0_f220$KzTQ(aHl6`hA#TrGi;2octdgoy6#;$<$?>tl5Yq6lABuX46fZ)jl zWnGjJJH5!p2!^+^2Q8t&R48IBfr}jk7+M?%nKFPpo#s@+4mwn5zOHF-_1k7$>U;E7NLLeRhI=!T zqiuL{(@zw$V!tVP-5+2yA8thq&7mz$JYePgO$4rR1mtS7nVops^cM1iJRY$4-1IUJ;Q2-i#GY zXL3>?b72`IVK!`toUD9?j^s-I)V`T@Uj|jE5ur$KP$;DV3qhqE z^HrRchk}lV3Fy+U={@VMP?q)9i+jI59YD2xME*n{rdZns0?SwHFZ zlmGc=?@TrVU$bq6zwOD$o1t#YsvmPTLrtDl_bmq6T(GFqPlm8y@X}cKtbnWc!GeOY&g2Hm-_P5jZJoh>Lu-%-obstw5u} zSsPl@@i=G6+iIrl<8lI*?s1hgHQB`qYej^+>HY2uJHZX)%))Jsmd;S%mO6MhfR!;C z0(48a2gnvZxu_US##5t{6sRDuzIZzF<@hY7l-RC7-$+xcV+7EVV_b1OOl_*6#TPmmzN(x-@V0pPUBJS#8m zK)B&=ZzJHuLR39UbO}ZD*W!XTT#0vPqgL~7xN=OCwB7pK+{*q~LNR3HGw_BWiro)Q z4x4eR{K>|4{M(>(MW4unA)==`Le4_f#X>{`lhT(VmC=CpQFfg}+Keq4L;Y_Pb`5xPI)xrI8oOHjedT=?JTV|AEZCl#z z`s9?>omw{!0$8}$jxkg>x8uYp+0zD95Nu{dk&YP_z4Cn} z(xoY(>`~3~3*pxnYFJzc0m9?m_;9y6iA&B8Zb8pH0s~gVj^Ogsv?BQvizkpAJ%FXUBZ`&z^4>h6;DouE(fw=6&kZh)D)0KKhDi z7Qc^6BebwJlhC01yp?UO&CXo0jemwvBRbo{xp;yE_I+U*5;jp=0fG%=dV7Rjh@JeQ zeaK(*n0Jg$lnc};i8^vRo#Phz;-s%j$KtM56SZk<*UgF_#==g*kmI_y^=fUj^?2Nc z-$I+gK8wrZUIJ)bm+0yC-4|7%LlqKsK38=RM=al!F*d5jSPLlVkHS$f)lCN-d`Ktx*vBEnm<1o95-x~w4(@r4BVx@H zeX=t9Lo!fH(7Cz_N*(J*-UsMFoG;GXB5&U`1UJNV;xtjp{=6tt_FkxamF7lnO}ti3 zZ)o;M*LdZ}^@BMa-tQ*}9(8F;wwDlm=xu-0_Ehfy3avMLfg2Z*;Rz;!LY~d8wZRUS zC`ypayXl`&lQY@>RB6y`|Hxz*N$>%Hz))|k7!-dff zBE$}q7Z=&ZDj6G0MVTwc-FE8p-C1Nv$^=IoD-9w;8WU|v@HDfL$;WoGa*}W8bB;Zh zppFJR9H}7MeHi2x$^6wp8;Yph@DNd+XsCoE-?$ML0D5WUT`jAoi0+<_0S`jsphm24 zF&JHNEGV%FpmepLf~x2YhG@+h4$%v?L@OYh$3#0^%54xxBGiWZ$|FGY`AW$-6yI)p z-JO1dXL1B&!azTXlqE8rQk{_>0Pxd>Qpl+W0gpG|+;DS~ zTvW-;Y1q@;2rcEJ9_zL(%QWwkl zB*W^cV7Z{b+#m@j9@xh{1AOgDtIuI^OxHZ6H%ro8&rchaok2geACUV|6zE*N1o%S3 zx*~HQG;+({YDhb3MOL63U|B+Ns^Cf)Tn0~IeO1swh*&A_B`?#P^Gp^?0{a5Q5s=SV zr!LI0g0@4ij% zZEWQ*zbO6>gediseL~QAv4rr?WjR4%r_Xlne<(H}=4R8n%=9?Dp zB+jk-IC=B>_0jR^3D#1HFr{a(KHyXkBVwg-^E;l$r@Lv2X@Z{>EZh~b7~MJ6j)YzY$=5`7)_pb`0-ht}%dLzz@Xp(-V7hvhOXKacSIB(u~p}L3x5R&8B!v zsOPb9shtXxD_vro%KzyaqudZp{n6l?wrdNDrspcUF$@`_7RlXUB&x=#&30|9i$C?!SC}`ZIkB zBy_TW{Nw)d@Y&(f$^P@|9K9lfSI=!)&O#L+P}$%Lm77L=IW68RX(pPH zeTbQ}6I~bqEb*)g86U1qShc8WqQ3Wx!a=~4`v7>i#1)kJ&l(h*hD0?SuzU^dA_$1{ zIY*X2v!?V*xttK#7XW0qMT#P-laHtG3h10AiObOl8Be(Hj)dtvAQdDiWZ9U)IS5`+ zi{0uCnO6_@PwDRA{$HQ%zeaj^cDQ$Pf`yhWa#2SSNoOZ^ebBdm-G8?CRGk?0Ozb+q zhd{wUc=ernBz`yHL$8k5NU;;UYVeWc{k`W$uMU64@rY}N9{H#5K9;crk-z{3LQXlF z%sjH`#=H)(e1c5@9#r#5zCahej0DmVc=Wh-1P5zd8+B+Pnm}W)ErGfTp7FXE{Gfxk z_T95?Z4ibVt|sERw50+i!KcV%U22J(77`ovPegVeR{1YMjjSO&04lc|eoM-d_pW)& ze&jXAH^8oz-JHZr8+nHkpyD3+0-*ocfy3`w6vW7g@Wr8iQQegd&0{dKX^mQswZcY3Q#Fjs{Ew-xh^1^r!z4@v?2UV6a zg1<9h5totzP>5ZNsHK6N4Il$VK(#1l&=S_5%tjx&0S!iSO5ntUc+R(#=lfx+0u;Xj ztoj&z#IFSyk*;b&H2~E~h7Wrn8*J4C{!#cXwm%?-)qGut6N2@I^`#N?qAN9|ZBKUX zpyDx4E`9xJSH2y*!JFtk6pq;X0N94NIYD>xm!_jI`KCVAM1(j#MlFf1ikw=lGKGmN9^_Z-** z1G_~_EsngT2>9hsN_-7b4N?-MLAXqmh&RoZf?SkQtLJsr`M`M@*{oPzUeOt?W3rYi zzU5d6{43&R3t1Vz_&~nc+lXEe0V_g~tBoid;Wu#Abfa}#{7>>IIYI5^WhO5fu8RU| z13HruX~(FdK!8;NzX58FbY|Xz;`Ge7eTfO*~i*h-Gi?`crD|*$+Qq9E+^^~Hcpx1hJ;$?^s zq~%zGpV00?G&Qh(WmPTXDV(H=NZB3%>Iv|9;U^R|N?lZosH|NtW3Mkq%gHn?!R;`A zirFw4XtWOYPs>V^&!}P#o*=;u_~B|ZK5yZ9!H??Jz|n|o3;p=Adm+L$i>R@@V60j# zzi4XK%2g37>qlYKEQWof3Ik1ONovu>g{bWxnPzG-?Q5f5a}PDgvenV7Jw~SFP=juz z8wSbLR?5@kaik1=Rk|VSZuGg6Pbg&z@BU*8WDV4LRBBwMBG|iGvC6(2jBt}Uu-$zD zB)#Ki|DnAVqlsooxAOpkNd5%>*@6H3C;aDrrxzQw=U?bzRob{w-;{# zXdH&pgLo}rvv3nWS$h*z%gHPgObCMNA9QNXF(J5lFU;w6lSMPnmJ$P5=JycqHwfG0 zOg#&^LUBYCAm}%TX9&Zo8ZuonYS`^YNs0LI+;8vSc4LsQ1X@!Tqictni*gXRIdeCMP_y0AMc*_Ar4(U?18Zc0D zTI-3O)p!_euB$L7ITzeYJ94ymZbLF2i?QF6H?3Zd#lP75f|%Xr#8!$=0b}4MHg6%q zdd@h{4O|=Uy2)K?$pY6Af$AbD6 zojG6cxG9~Tczl6*y3FS6zEO|**QkBgDN&()y?;i}^<%9JYu~S_6kN%$F9DldM`(4i z0Pc#?yx+=^LxC&j2Hd!n;ICWO=M>wGN&lfG18Gis-H-8uZip{O&qI#EQ1A+YslHO9 zSfwnQu|&RG(Qln>E+MSA{W}$ojb7r8Ptg|+Fm}rQnaHt0Zd28bht-E&z|6dLXjCXZ z_hQ^jyfJDiPHhi@d2o$bTe}qz9a=1=nQ1fPiu+=VwGZ*>qZ#JWzT!K!wiisB?Hehl z4xX*m;rH(<8qb^W{x*TR4GX+Er0eyn!BtY5>Q<@vO3dw87levxHk=dARTt-d}NZHfwo zt;Nn9q@op&sP)0%rgBE*Xrg3f`cyGhu(n-DH-=7O_H$23CDJwoeAW~;)iMxNxkQ~D ze06=tNiw+g>Its#tqyM!%lB4)`5<|;!@r*au#)!Otl`m(FSsrxPAjm=H`Lfc%S!-F zHHys?6OdVW-X9rb!rgQ0infk;@2djj?sV#S-Sd<%$$G-&_Hc`6&aLVzA&+N)dPX3HL9&_wMd zpSEFjo3+k(Wb|Ahxzz`QKhG}GO^22-`nj71d(*%P#!rRejJ~)nD*K{WU&U zzmLy*)wlI|efWN7QYi5oe(m%VY~#t}Z~p&&q8iwu966}Xy&EqdsRdmYGUzG&A?ttc zm$gm2&$78v@;_u${rhN+r`mn;_1BM|1g_;AywC;K6;!|beq`7oP|%CY3ok@_1sNl8 z@7-_5P`hn>?!Cgh3IM(QYt+1@u^M*0eU3itZMS{N{{rwyRySw%-8N)CYJOp8f%s~_ zQezO6&)L%IAyn)JBvtDojGI+FvJ%hnC*D?hv<4gJebm~BGS|#X(q*6KjvY&)o^1{&(pcrx@7~wrbN%J`Tw3bfQD_2^b(G$otrzniVWwZ!O!Ah-)lasp znv86JzS_{re+IU+V0|0$5o*Q~TvDQu57K84YZ_9lAl14=ByE0d!|^&}0aU6k%C%S@>VoBf zHFs_frCZsR+cJp}A(49d`SK7+$4M>mlC3{QiOmiD2?0B(RBn+>^JzXxuEAHp#6>Ox z0Z~Oo4FN%ld<=3m<{!R#sQ&YWRD97c)B@9YmVpt0Dap&`p6N5Bm@=@WUr>e$E+D<8 z1den(E|K0XBNt3N*=h^W!7ZK~J^NvJa(cYK_tJ-&OQ-qdhGZft7l+Os;BGTDdc;TM zTM5RbjuN>dW2Zdup2mU6Wv&Pd3*DMw#De@484ONgv`#IV+lsAB%YMc0i9UQ!V zeSCC!G*qwZ*$hN8{bakhQPa~8`}?o=4iA3ZudPDs%QQ8}d-0K-932i}bM{aBcCT!w zII>uCKs4|B$%jaV;yM|tS8w2K7#uDc^b}noT^g@OvZP`?2-X!SvowqnXJgEtyHFgF z=jFWBzipz5HX3Z?n;|=M!?)1f50;5dc@MakwuhwafSMY>XlJ1SiYd#nxWajsK%~@6 zv9nEXa8ufd)YBLjWWMw<4HF;mz&u5TAWaVwH9s~yOzR{w(p>d@9w!`wsA z-3HJ{xBoAzb%zji)=yfE%0Nm-Vi4r*eLywkz~|lsd-Awg4 z&!;1Ztsyr0xebE0*-4=xFGf&Ni93j&j&Q<({ZnDH1c&E2cR$>cPuEyUB88XblKS#!5^T71oS9|Bd zzaihN>IdPHbl$xB;nmSkuN0Rs?xYrOz@Qy^xqX7Ay7Cyh)L%1w?X`(PcZU(6-0$2@ zMK?z+Qee)-D^O>YZot{tEh)ExgVh~mr?v){{p+m`%-VciF!Z+0xC50pV`mz=ozX{4 zL3mh&4r2iZn;TJ!k7HQTyr3-?Xl{)KADzwtufBx`lmPRV|F;D=4n2dvt+~AobZQx) zwRxMQuZ{Uh$T^Feg1ZgK$>Uq#arFax>mqBN&d@e&b^4M?Ee<2&IR7<1np8 zUJ2YV2i%_vfhJ@sj1j}sQOlO1)YUpiN7CCf&$-W#loT3?RjzUf=>fE&ZG&+=VJsxs zqu-9eKZ1+!a?npY&yHTbIQTC4ZvV71NKSqGd-icow)v~fsq}7pp}HK%V5&^<3$EH+ zDc%f-H6l7ezvHCwQ%$ZxvZq)(oiy*>d*YAv}Es{A4epd<<nolyX{@c1NcAi5jp8FZa{aN&y zE%%ggeZY7*HhcgnKQ{%drL?PFj;Mt+3ge1}_v3V6hsEFy{Om>~e$qZ$cNW||^2mk8 z)LS(U36v09RLNWEb9Wr;jlrWF4IFlIC}OIf`AJ6c;3(&%PmYfQFxmIn&4tqTbYzqI zWR#VQ$VW<;{zq+W5gd&e ztz_%G)gY5%OUL{F=Ol<`!I+ep8V*FI$Ueg@?U0u%oybRz5I}G5F!Hztn3s>MF}#7$ zMCa*$24z>A#o+XLGlx18r=XE2|KHRmPoR@$s`>na4CBW%P!iQ%Ow!AyKznCwy~aH> zBYx|Aa~9H%*me7y_ZXig{S^P%!n^;I5E&O?& zF0O#Bs~;`dI=<2Ad{T((uM<>S6Vm!Gh(3ii!)QVQ-uG+8m;Mo$d7HpY*eaV3y}0^3 zk(VojsNm!#V?4S|!Bo*89sXhil`oFLND(PzWAA$as z<)k~BR9J}tFOXVcUW0idV{|&@=GjNs0d~7|s&wO> zoegiHm7urt@ZsAX&1%p7%cOMdKrY9_t?-4l-hj>Wo`+D)tA~r?p*XRBf&0`B?)0k6 zGMGnFjjl3;X9pV)iCNirCprs8T7Y~fuCN=8!i-NyybKbZwlS`5fhQ>5N47;02Lrar zcf}l`^d!y;Koiw)uhSa}^2BI}8yO=(SkGjUMec7?QBsjnCh`Oa8IU${RM?6R{k32h%0oJ2W5TYa@3 z1vk}NIUauZuJ*LIr!JB5$uYrP=;K* zzBf}lG9Krox;e`wiUiECupkOB_EX%Nb9mX1kMyMMqX zrcz;iFEEt;Vv-YoG@bVAMmRw-yUu5WD8Eq~GUS(b;Sc!*eS-AYRlissjyjjFwWeXC zk~!KJ9u1!KlaacS6pxZ8dR&0jp;W@qsHU?K9x_=9uooNppcwfZ6|HSzK8e7LO7U)E?UZyB#|6$7X zIpT1^qh3gt7{XvV9|Hnx#G<3GEhX{Nw$Y#c*pnzU>&=ZkA<9tG@!n}rZ{7F*DeP(C zCbydkr(3}Q;aA+_$TTSH3Q_`bDp;JWOyFwo^>4X8|MU#f_o#3&1!<)2#n9KaNx4dk zQW6bwhqytnp@3^>bEaYFK9Pd@qD^7)6d*MS0d3$>qNf&Ms@0K7jS|0$>lhZ7yqUymh zRt1`ae3dIEeB<05qbp!@3%o(7Rj(w3Q$p%F1c*@d>reDWXg2L^a+{kVS&aD&s15!` zQTwJgA#5V3gHqgmGKpey@r38aylaN+#7K0=B5|klpeCEAF*Fagn+-o7Oo~4aQNWcN z<$D?;#yX#2ZipE}V81oF(Y8C{IoLef^1$V&` zUvbSk9}2<%Dnt8TAd$Yc& zShye5-&`m`2qnF$7otN^8OFroF$>0kK))!LqXl4KT!^3zcffIJ)?fGC;1kvABel!P z!KN*(hcFSD}ayVHX0 z+6@+@_638S7<}fwvU2Sflg);OqlF2Llh`{LK9R!O`E!1{7+No_8SEVB!J)*F+IYKfxErp1RWN*7Jkt3#s1 zYwKoPK^YrI7Mn$uLOq22;yno^th)`PYVv&7EkMtWA+y_)&ur&)htcI&rnV+=#I_Bg;oX`M zYen#2k<7Ca?s7)fRD9J~c8!66#9Msf-MgLn%?_@{PV#?wh<4T7Zr;6vaZfN@B_*W* zNg-vV86{GKc)L*lLn{TCWeYfZ%L*%mOB#|%CY4gfuy`%DV&W1Bw~rR2O1iM(6D16f z%A%_H0<7Sm>1dxGi_Ku=9_J8ZmjUvBPM!5N+3FbQ6fYE7?ghr1t0NBK(qu8IyJ|?6 zNGaUfR}R2J0tpyp%C~e%2OC%;T9u;u`zmmoh}MDMQ&V8G@Qc_onpLKHRhzJ!EpjsII#=|$`WjtPng#F%fll=9op+b0VL}S)3~mDT z{X_8j)30Xb!8h+8E&O7)?j-v<>-JVp4uO5u)b%1$d#Yy6UgN9cnyxK9N%aV>3@wM} z+`=e+n5~^`KOq1STt{8$Ye!mEp2}|qYfMXq$2hi{W4wZa%0faSl$r}rlJt}?typqW z=|Mo;AiHzxkM8TYYT;YnTod5 zM8#k3V>U=8h>gKWw)hGKF`yfbbM!xOr{u4puX%5*n);pSbF1|#A}TG00u&?o_y9dg zD>Hbd;!>0T^zI$dx1wA!TMm3AKBPf^x4JPo)vsC@)~1bv+30=-^W|bVE8zHd>C8o} z(Jz*h$xcnRQ?UX+WZ7KfkV1mfXt7Kut~f(Bl#1#IQBlNEDwC0{FhjXI6Sb!0ARbO+ zb>`N(!b>O)3MD|Z>PpH`n;Z37&Kfd#?J#4qY75op6E#a={PLB|zLoHR>%V&^54|Ex zy!XP(0A(R@?YRPk>tOd(4FjQ~-Z%7*Hct_&;b?Gn+n;8m6kI{qSb7R*EUgv~lt_Ys z4G(}%;X&zhjU+{ZuIy@=TlXscfcUmqCdMRGAihoDPjOtVzc?lpNdE$tBtaaW z!Nbi!MpIPK#^Uj+kSk(ks7+^^`S8DcXP*U8y<&1zgy=E=m_B-cM@u9};Nmh2(mN5D zT{w0Q?I3QRh^K|LF=Vyj&Ua~e8QkH4yS=4GjNvt%s9?+{lg?fCIDm{VVZ^S#S{dUW zDEwFHyt1tMN@MoV1!4J{L>%)6Ga? zV?*OkASbjl@_cc}|B$~tcr@7F8a#f8e>@b5`~2oE`q)zco_zHc{`}_2*Z6bm5&aFn zJ>K5>V*AnKufBQm&DK|s9(}R3{p87`ufIsPzW5dXEtOnaCdn6yWj0P$J$7-iVv_2g z`T0BfxA_bt6}#p7CgJPnSX@7w8^Yo@q_7EX@?+2iV}lgAWBXowJDswPdmI;-JQ{p$ z>V_DTj)gl``Xo0#91fDbF_s|5m|3_ACX)@w6fZ?_Oe|cn!OmO7HPXeR?Ar14la6Sy zV{(Q-jW-`hJY+IU5RR^iB#+}wNXxztC&1tFi%xUI8|`T2>;W~={$-Xr2E1k)a2MT= zYH#zj1NYSY=+k9&b6B93OpnJ)FgWk{g?XEMa4=w2JPSx{ZJv9*JU>~Uw|VlXbTPVW z^WdxE1bxu@*$}wKd3^t4HrhK^=cwiL0Pb@%nvz|QPWN{(sh}p3s?S#~j$oXX0JcZ6zU_FxjiFw2okA2wewoiuOdMSm zRW>6NB%+&4^7ol%g&Ekne(}e@ySo3;d44xHf1CUN(N|w@J%;=L>#x6hy#3WT>I&Wd z`m3$&Kkolu;~(GO=3H&)4|1^jL&>f&Cco*=m&<%CqGHdXAgudNR!;%*$zN^#dL&)t&a}BD>bE z3eG3#O``63a11Vr%M3g>gH>J!T)jSr?0dy@CRsM$-5P91+y>ed$i@u)rX8>G1y2_Es8R|%mxk@fmXr>v;z=dw2Mv+ z7%!Od6HX-(fcJlHkDVJcIRK@u%EvsXivq4UTr9Ql>X2n#$}fDp z1NBCEcH4UQtK{AE1}6LdU7sA5X=18wT_j)Y^^QkZ+UB4%gIE}~JDchGXfra1Z8L~R zbnL4LO`1X3iz7Y&=h7KOy8Bwp85jvH@eStYg6#p57|7VWxH_L9>DU28_5sr$ZK zuKP&DA&qO=rgVlC^l^=B9)No7d^m5gqFIr*gKmSGVWDRT%i)YbNV`~q;>c5?c6sGM zMsu&GgBcdQM4#dat@OARGXNH=(((pfV>v~5#U?ob-B%d0F|oc6iwMpYR9L}I1DvMJ z(4(b;oAo&(eHv$pnGTiUIEpeRj`Rda0r$>EN#p%RQzcJ1`Ci~V(~>h}_Nno#c>7Sc zsn~n!c5sbWbG1dz4J?_FSYzg9n$+zbaaOVu~UfjDbnwGcOE5KWD8b_z8%#7m)@bqm&e~ZmgjJZN29nm-oik@Z zHI29n9#*J{g+;JgH1I%j@ic;k%D6G5?5UWd=ab`ayN-I*i^Y9A)A=YAI>5rGx>E2P zgBveOC0)cS_!&lS+tXv9=FCV)ql+qfHCo*p4F(6R1mZpfPDV6tlAIkqZ{Pu#R46$s zemKe;?ZZgfP8f-+KU4{qV`c0DUPK}A548K)xvy#4sQtcvi_~396H*meC39=p)s?wp zBB>YyfF9S2+HsbpS&KPN8}5Y+INM;58Nh$G)g^wFeRO=lh66>N;etOo>?qcPVNs1O zNjR<;gOg(Ld`RmtM0_@%r^Nb=7j_Do(k3fpI$Z!qs2v6c&TR(>29yp|4D}IOL?GO2 zm?QqItD6N>7ZjoaRg(v}Q}-g$WiU#2=3<4&%H*xXx84hDbY_}DD(vEaWyXbFDPlJM zdg&!46Q2?uW*@ue)(szz+$%frN@`fn9*ZYVCOMzGFlpZqCFGtvx7Gb*duwZ}HroqE zJ10fJ8E>pST5OA*1&s!e)CP>`Bsg==Snmr-oO3MeHq>t(~G&axYzJC^` zFw{i>;Cqbgf~LaCGWOB`*!6}~hn?E6j%;wCxTjfx*~4&eT+g!crrSA=Yn*{v%0r8Y zvjZg)%&L^4m%N#iJy@)8_HmJ!LIpMh8xMEQ?Y}o3S5!Mk!tx zm*_G@uPFQ{@QIRY<>ttt$z=&5hI%z2oPP^}Cn+u>bSP+wHUDfz=)Po4w=jexz@#G{c`h!cR4;orSXBJ-t}ln%#Ih zpjG?mq#VB48}2KRXRNZ3l|MhhL)$`xxTC=rL-dGOptE;(T6ym+0+nQTYcJ5yA>MH*PLW!JG z=MD{yY@CE5(HvMqED8!tDVWUp!@3*Zd*(HMLCJTQ>VnG3Av6E3mHPNvk0KWbuOWgl zz2>O->hpoT9O79R-a~r-yp;x6`SsNO>W5r3=}GyKjwrc-FEC6wux7TXtY`_if*$IFnsLlee-C zXsentE&c8%P0Q`_6Q;GM@nwaOcnhV^m+g0(B>v`=!)?zh3-rQRx8RQY}O=)qh=Q>H9Lf*x0BA z`#HgqGz9r09}>xK^q`LxAuwedtCLs5DYAn4vhR9RgLeVBgI*r; zE_QoO*!)=KXF>&>lTC@9{_0}a3k#RpVzB5X>FnaOxr8qb~*!_7(WCFw%l*Ym~T7I3XK%;hw) zmKZm2t(U`NaoA39W3L0PsNcavNe5zE0_R|~lh2diy-O}8=_SL2>%4=0z2LAxivupv zG(kqHi)@!zCY~@86uVB(CwXgUdD%Ix~{v7{fH`Y$<#(l{&z%MdRPG4S6h|j`_l{(_xQ9M@P zDNNknEG$(y(peDH1wzn+vN#9f4)5FZbOJH`r400hS6AU@4(@+Xa@f^`n*bQ#OF0a(v zXgg=YTQ_+I87uId2o8V2WleSZFLR{`!cZW~a)Z!&Ol9z;)re_gCxkCrC5d-CuL@p( z3-}Os=*!aTz7Ad8W%Yn&nazRM4}r6#3^MRVkjR;7vK9wWq*18HrWSm3(il2t)B6p` z?w+6-Pk5(jBO_RjCQBkZ%Awwf0CXE+TU+h;Z}Y5qVAuBjw%ajf7nQK{2#bH6_5E_z zTLV7y(2kM2j1`&b`T}TgdSm3T)r~-$aJO?&6i_hhJS{u+0dsy-O;#1HC zeSzI{1=N9Ka-%7O9Be%`li}Sy^caYrO{88tR~^a#60a{uWcy@`Z_0nr0b;p+O2z z1z8>a#H~fmz>Mj_PMA`{&Z!y?WSgq-A$-rkUo@s@y>#Oo=^PU49Ds3A@(1c{qGId- zm6IlI$ydY34ycBZNtzbG>!xlHFFsd+Wt;)qbr6pVI`DurvDh@QIq(&UBB2PS?N%bk z1StCGeU{DD>U^*@M%{9B>RyyUiMBJ1C@N}?#vcTD_ON;qSqDTLc3mCaFZGcXd69s@ zO2~q8xapr7_xk}ju@)5s4|hAf1f6!TpY>|g zK{Gw?la2WE2*)g(K_0j3>r2mTt!a?3Ol~V=A>|IedPxxq1l1$_^fnc-Rw}?|S}eb1 zUP&i3@>(wDtz8X;nYJ{Bbmz-zhaD9W;hFD&z|tRD6!1omxYXBa11p4jZS(@AiCj9G zBfR~xI{UV>zDc0d?+pH4VD%M$X{3xw78({fD$vR_qk`XE+f^7Yf)BkFMxryYC)iJ` zadAEKKP>3OsFm8yqq>(o!vs^+8vqts^C#cPkPAk|XuepLfps`VV4-p6(qBB>)g@CN8Oc zZ<6*cv|7p4F7nC3HU9M3bm1lsLLn_}N+L7{s@kR;Xc z)-tZi8p9HNEO}v~Uy@w2EisCr=u5Aw8>1zsKWNFr`$jep;b`t6)^Ie-NlZNpTpC_m zI{3twe15cfs|!C~r^6ZreJ)^TQFlspDe30enciBPqRo>YIYw-3HAcu_m;AkRt9cl! zLCo)jRt+}a2%$4-bn#>v{Z@^^KjOcVT;I}$gUoM$TJ&Y-6Nd4r+RL)Wo0yowyGt&A ztCk1gQ-yu8Q^`0^yhgd^gs0#6RW&SR8REAxTtaXsgS*%m)3M4S78G`CofQr)48THY zmkbmXo)lG01!7~4*BUCXXtvP-VUmAYr-;+JcvgWd+6wr2R$R*w%{&P#z^z z<)v`Y6cFQ1&TMW^4ENM4VjxH-K!ET(x`a|AU}&JNn!R?E^mDUBl1!k02a1L9D#=RB zfYJa2)#cQq^c7rLPuU>z8u%euXxW*KA_eX_z^bblGE);O7fs23$pULjO<`uOOh}=7 z*Uh1iDQKv1ud2ioe5Gr``wuev5!4e@D2x((*3AiL)`4vYWayg?z6DCH5nXE zrd#zsnZ=O<_mer?9}MRYhSQeexJvQAAIDiM{vYv~?-BrfMg5QMM~@$Q@&8{x*?RIv z{Qs}<51l+9^=BU!ll)x!2l*2d|FdCK6n@|;w-|cg9~|#LKR6lg{kV5n0NGV}Wma7PXU^N8kqoe??@9=f=i=dD{hRXdpa@$rR^**C_}(ArU_Fkv=FU zfpEs=0=z-{r3oD{>Xbe(6&}%YV;DZW*^p%u;tNz{LC82HvsCr&!JGe4(cxd1|24=i zFCRc&!^6Ae|KEK5bshglcB%OPH;=cUeDef8-~RgRZ@&5>`T7t3|1aJDaR&CcY&Nr3UcET_tnxqP zmp|Ha&;Rz5Cr|!3|G&z=m)Rl($@amISy@3EV6r{f-guP)h&)O7<_jW{jT0pTrb-(~ zUQ_)(mS4dxL~^%M-X9RW8{))od|ynn2XiHu?j%=>#eC=C!^?bewLDjv^zmbzZm@baygsX>Pj3mmYif+@(w@hREc_a^d5tgOiO>}ffKXIi-(J1o{y@B_R(+O z4UwF3F--iUT>7RRyK{wp+zhp!Q~-Y_Tx)WV0Gzn~@-0XK)bQtFa86mC#?729d`P zSKD>soiQyOVxG|i;eiBWLp2e~h9-TqXL`wtIW{p)7<(K{Y47!cI$&rn9%bZDFlvTQXT{82=UXioN(Dvj%#2 z)s33@M{Kan(4L(p7umI_fEzg(_mlhgPxcS@pPk;nzXR2t>`0+r%n4U9!6lh#c7-3E zQhkb(^deh8c^|*%^m#TZt_My>&-V|Z7et|>;Aw~_SG;v>4D#GK-&D?&&_xn({)9ju zJA@_N!IKJAgPe|^9sR%d-o3wVBS{p^-})3-`!|QAm!=7jlw{vwb{$7a{2Ryiv7DVV ztI?rB5|S|HYXH)coSXaEZ$0{r2KbQVII~h>i3IvlU0q#WU0sjYKfL@g`0?od|9gBy zBYjV^KxQO*yfBJIoKd`e)aSrDJWJ09K-2-}xW;d=F6_2{{^iyCmrsuV&(VwWcA+t7 zcqt8k-U`hW&ovEi&kl`DZ!tV-hL$4YN&>4=Z*G8 zaT7zsBUQ8VO65#}`g|3`k^FV!<-W)qpj$(#ps>mD5Y58naB|> z$M26`Ba)8OX*!81v6%|W*%ZlYDPfz9x>(K2o*52_mI)@e;lxyYf3$*AeKw&X*h$G_ zVcf-8Yf>_)ubC(k_(ARF40WYxEqR`9r27TeQ~nUTE`0IheE^@^ZA^k!@%D}1C(itLg~K!f9=367kNds zD&DVJl=I^G>nF$b`zQMS-}qZBvVdNz8SImd@j`$6tUd~;r73Hgb57!Pp0#|! zwL*}(;UrU<967-!k!s1<#xOht5H+t=7=`z)U*r-@P|k*rvAm;IbBk<(uNn#&nN!Zl z%=~da85^gHPkgweekEy^Y=E!c9sP`S^(Gs$6B61Z*VzA>PDC~1nwByEqQ;-nH__(K z#Q~)z7Z(u6iYu#xgP)UZZj<_dKmY!F1V9`&tQ+xKJ_1&&A&|xb0y0T&m&i!+OWH0B zFyhe&pqMR5RHuD`b3a;Zg<^Ftl_{ah6Us3a7RemSnkRFIGKt3ec2R{VA{`7LY)xKG&sS|1#F<^9qBvF8gtl-5mHt3rDJ z88P}ju_2!zTCAXL4+1nk(4zSNb$*Ifze_Gsk&rWogMA{SjiNY7vP|r=g5V3VGBY*D zy~)bqNSw^J1mPWrGuqMyg_DCj6hG4j`hdRq<*X3cLvJ`@?b@t^CSt{zHHht+=I`#< z%>1D``vL<}2q|KvjWcDF(qQ3=hW0#7;>Z-1lZ&vXo;ajcd>h- zdmv?r8jDQwlx7RPwK6zJV76Vn{L26e=={P6f^SY9*_&{%kJgP3g(39ZMe{;KWSlV6%peUlNDoDa3d;-NDqdUMI{vP zK@wrl6zLueZC;~4kqWzbCK|`gudwI|EzLg*LpdCjvZ zB&QVImIj0@)tDS9AtiL4xlAS#Br{z93r$X!J7XjLMSNXctkIq)LpUULYM|1l1!I* zu`WnM@dptx0L1lcAT8BMX7bSD+Zg1atiYVh3MvhjmSr#FYm??+bJL#n?pc$0nZZqP zMo17VW8wT>_1iR5K>{KX&`mY^6WeO>TU+c=eOx-0Ptu>-s_U*U&YTIQgf$rEiCf{Z zixt5_YUTiOPMj1IV13Aui3~iHB>-Xm`U1}S!wjPdAX6n1qOr_JivqDsSDD1qR`50@ zp80wX>*8XC`l49R?njg|lNoB_^%|9$wP8w5i8-Dgk^ChL$g1RYuNI_zR1JphD_l3o znWtEWg(BcDIBnR-8d9(oU5g~cvj7Qm=?tOc823{UQt&teyCS71rb7!EQNvCNolMa( zcabK-91Cywn#SG&`&2CYfA#M9>mQHEH|*!5pT9eLcO1+Y38yf~*UW>=TaA16+v~S4 znC7|{;O>kU9+*YRxTsJ~{sYs!Vl+fWj;)*BPMEhfrlF#9A9Ax!tUH&|UZlen zDaUXDlplh8Jmx*|WNWWUaxMFfWTQm8aW3o!4H|P!0C4in=8G&fh#VzBTOY3DjL7WF zvKJ|Im|=}vwVAN(pV?7AMobFtj*j2Hd3}5|c=7zztM8t__{*_ps$T#-pjE2sR!~$t z9#h}w;+tfT)hP#NDa}2J;Q>i1`sYBlW@w;r9kG5g9-;Q1FDTTR_!(`{q+YS}4hsIN z0Bq^W6)?&`V^h>lvO^SU9Vu3dE8*(Xn$ae$FKTN!rq#9gV^vdM*G%R^ zmMpQZxPe3=)6q8-W{@&(kH zB})qqsY&wtMic}l`}9q;RLfcUOy$;Q`3JlLv}KEVcb@{0T*Fmb*s2hlk(2N ztp`GqAaL#;;3v`?C6TIcl9Ms7UpC`auw-Jm1Y z`g9?>Iklw3re~UWyXp|03wIs__DM{q2&r`aphaP>3VeOE#G5YE^1(uP>uSzG;f+B6 zVx=VCqwq?1k>T4_=a*Q&$$@xr@#9(;fqWG&pBXEZZy^JVVC-j_)$*FHv2f+-(e z*oN$*RfbMT6TG=6I#Lppd5({@9$(eW&%Ps?XVa0onRaOQ< zi{Vrh7NU#0uMxGH`gWoTDZA&Z1=)$58M5GTg~UNJw45FDj>XVF@qH|3atKfl>(iRLM#zS!)LdyHnPs*j)4sv!_L{l=5wuB=%KvY z(=IO!Dc9)}U2Iqkh&55vMsOb7%$NRF_x7IdXOOv)CDYM+`+k+_$BDx(TVG=^q>7!Z zVf!G!v7%waFYk|7ln@cWQK2JxFQM6{V3_81sN2=?F4XyTj2 z1*;G60D5z}_Rn%F2YDJE0SY7?w~ztUYw>D1AMhfEXO$S_-}|SPSI$jsEJ6b$2{@T4 zb#jqFkZ7gZixwW>045j}*_IGT5ofe>nQ4TRw{B(rY#vZB^6}^lpA(?UCS;Dr9=TM?puH?z+%W=7 zcxm9MALiQv(?6JmTD}ga(==~laCApIP86!}I$MhgMyoAaQt&5@XigB}#ogT&0sSyYJyRToVGEQEDrl3C%*QM4d6mjfBj{W-)qe zIwu{L?S2Hlir^6fn!H=GCJluey;D(7EWjU&Di{%f%?S8#ZphZGw?cVEOvW%7#Vf=0 zTkrYeR!T{l<2i4=crIUR$qet*#qIIAFm9FaG=>NmI-bC=JZL$ZaZZmU`0MlcFMiVE zJ%1JA3SUIU!k7bZyXUhMZE8e@axylUM=D<}qC_ zo3z^mS~ZS%IBVm%5p#j`i#UBZ=kauoHl+kIU4(CTydw~4*b;`dhN|G?;al?{qJ9nUcWec{FkG@ zIYLJf8a8<$XaKcMdq+ktMTxn|Dqsq13+1okf!W{m6vHUycSagvm0)7R>`I868wTmnf>B$pbuF#ld61a+` z-dxUPE!VhQw}5d~4&j82pdgC@awdeLq_hTog_w3a2-Yrp3q(S@Y)bDP|=0*jU1Nf^VuCp_QkVxlT_Byhs}d4-7%^0UtZqC=6K9n5Lg@N(>Sn&S8eK#e}r$XGS^J z;B(ponuj34A)~@|NvkdwcVyyVYN)k^5|vGH^*UEKQBCux%1Q-3aiHw16>uax8;7`| z5a&^peY46+C0AZuQXO%brOTzr4oIs-AK;XM*%UAElQ>WLA`3OCH(wtVK$}1uAczCf zPvEc{{)xY___M3nvsNP6NfH4Rc=V43eb!y%G=p^`CB=O%#d74$$^u)wpw#LN?k726U?R9Q$% zSs27L7PoN%dw9h__Y^Vdvm_zui&+Uz;acoAa7;P0xJ!pG{8|%3syz!hl2{FmUf^3z zAp)mCBLA6xMrwEw1~0=`zkPx33q&G%k8ZE`MMd|Ij4nGyms)3qM;|%IH0M0ez6Ge` zZWNDJRB6Rpe(227xK* zWGB7`C#%`1y|6^6R9PTa=rS!31N4=|?Lg-&k{yYelO2h#%-N?X<4}4iU0p((%Pw0S z(Qat@F(MUwpxaBlX`%aOAt~Y(pvTNU0=5qpQiKFY4hElLM=+fh3KRnnnpfWHFAD&X z&Q?iLeg<7QAsNOC9mr$^$-S=+|XG5m>q+1t7sOAY!_WK1+MVp<*Y+=Bt z6JI7}l0fK;S>`!#NDCeT*lXVP^YSN zmrsFW8ZGFl23-&rF>MvJn0OXX(@7d<4gt$G%c>p2#aiM?j;&ugQznrGCIWHwWCBHUq($Q+R&3s5 zR&~ZM5Qg=wG#?@7fFxfgDi~9pObya0*$1cjb;H%143j6u&gNY}hM7}(KAW@2*3@wR zS8aaa_XE|QoV&b_I>!%4gCDfA?1M1fBB6?rXgXl|AyoPs&bfee*(8G{j2R|s~&n|t@=&!gS8SJQsIR%yeS~Y@EXW%3aaKqReWb#u}bqc zDfLgUcR6^NYD3`%h8Eh1VJpwhC+DfifaYrXO02zXX@@VGho~rIcdZXRr}l_}T8rU7 zH0j%A*&~h$Uc-8v!dbk=&IdK=jtZnn!6nh3f9~8TX{7=l;ziH^zBbKJk%G81939Q2 z-~wWuWQmKXASaSt_tN57nr`q`d@h``*~}&O$TDZ9vmHSN2Q30@9XQ$mhdpJmOQZ#6 z2S&b2ydFaUvh)*1%MBO_Orq}Q&Ozn z!er_6Ojhu>#ygK?H>QHhl?gdCFTnR(`6Nje^>(Y>4jV#^F-vv)SSHWlneXNTV={ST zF-ae%c&ynmK_nLr-{R1kkstSrcy_T&e?ig=&>^8cm;O4Y(5{HQ<=go5xkkj|l4hY} zqz$%#3E&LRw!B#~O3<{EZZ}61Elq(h-i__d+%({(8Uq0|qdO+)PLBp#u+P9#5h>*6 z#AU>XBM)tbO~25+DP>p;5X!=di?fj<=vU(CZ`!H4$|3xU*qalZc=n- z$IscRgEE2hNmKP=U*(9>`YNwrWrFq`q>cUHcFZnkqJSC9OA2Nm#1Qi3e1U*!PmKHm zCsmt_aN%i@1;IUQ4-DT?R<>P8dfV8y=$;uA2d-k;cFt zXzV`HSDx_an&KQ1F*$q>DF>E_F;RsX5;g}jG55e#KM;o)AhXcP!f3TAzft=bY;-6j zH~96zU^c|&1r>@j?Tn3)MdUkatE<~2a`2l%Va!Rou1RVbL{!A%@eyd#+=;~csdQWSm1 z?7b$mj~JIoY9(m^hW>9*}+r25?{D6)%#hS)#&>pp>xjt`Ap7W#0j`)Ju zn0$7l8+|pXFwz!3YlYZs9){c zg!-*y26#rG@??r(dPcd77Ug(C8e!Y$@F!lj8;%MY)OP)2-=&*2Qb%s3VZMV5$~ zR4g|hbsBPXYE+tq7DopIW3Q*>g>YZo4E%zHqRn`?BU@=Y>`;@sSa_8!rARc>eCk z|AO{hP)MiGSi2^U5Xz*r*o)h!OdPFy=U1604-BO9~qM zOex>-Jc58s=Up>ZfRKYJoQX5}Bu?TD4Am}jFWa0=k_bxF4J+Yv%NyMm9Sgu0R!P6$rGkohgi#BZgS*&s&M1rIylvF+sA|cDDwW${p zPYzeWs;NJHwxa5IGPhJuE{_BVdM#q_!gItXciD=UlQv;=o+&D`&m@-q4wJ&Ia__Aq6G=WZyAOfu?hO-4&9#7Xw} zBIdYSjsu=ekj7|9Evi&fG5~`@E2EPy<&~Sg1i$Dp-})l%Yi{t7-tJUNjz2qud5L-+ zf0tv_ucAstoK3dK1E6+DGmZ3eex8sID_@0(&}g!gvIKLvC}5^m;aDY<&_OVp8O(`@ zelM>$Nb6?>DO7!pV~oou)n&%1K!+Jz99Q{F_Cv_|yUHazSP5{%_sO2XE=-a^RhI39}8*R~potomp8-($qF1Aed z_ujJ*)$WPgmq@^1Cc@KFnUo3BtUQYdY4FQfNlRLCsyC?i$52tW%EQdMmq1l^QHZU$ z_vun7J^r~$GMS`Dvpy2&EN9sh2cYc6d0Fj=J_*W z@%4Q91HL(u`q5*=sQnbTc@b{rJM?pa%n2~+J%9%lpl-a}zZ)6L2wTc+2O&c-p zgjpsfe5IEJifPLeso>arr%#wiU7R|v$v9_55X_co5bp2a;6xb7S&2y3V zuIVOMe>;hnXkz#*_~rFqUcdS4>pe=0f{MTFS@1nm1D%YIm+^9y2e8n3!QWA5O)C$|Jj*zC3^Yh2)>}>wz_{}fx zUK|~_mRHLg_0fi({ce~39_;t%Z#&}e@DsM9&NpGyIp~D@2mAOuJm~hi-vsS1fq)en zOa{R>xR+IxgJNfAYX%8F>hB-q=fzK$ZQ^+EnfXu&Q|r1E4iz|CwhFrIMAS^a=FXXCpzyHL;cp-VqQ zp-_KQM|bsP1)H}nu8H>TUw(Z2^7RjI_R5LbeI>V)(e(h%B_DHN88}}ICb+xN%O#wD z?9uZRbn3xR`U(CXC|e8nv6V5>c`Gg2oW~L`#v`c_b|16pu>DY(PPorCg72B)J^wG# z{N3??d~5Lk&NuC--HF0`{{O}1|6c05U;nuNd3PV3o!@RBYuEpQEB|%(!`}V+{}MkZ z!0iXf?N9fX@%ey*{PzR;GDdB4KWNjJ{L^fZC7)7?^^Xtlna029`oDDvaLxMfbUPug z|L(#5LAx78xc+%T9TJF*=?Pl=hB;wjlQfFMEwQ2-7Kv)NtVs#=pRO}T6i;1 zw+G}5_8LwCSLw1&6&rhdB1Yg#>Oq8)*0TvjvE(B;mTz}e;KD4n)cQQ1I%bY7&MweP3t0Rjm91tLk`A?4nfxs&RXp3 zw|Ax-ELdw%-;)Vk`)u(iqCg4??i;ksFN}8kHK8RHb{G0)($U9@HeI;-?gK8i$|n8V zX4fjBVZILO*UYON>(u}=GeWEG?T#p|2n{Pf6tj#kaiqgUuaK@wrUNWtI+mBkkR$CStO~;%M{?{75tvwU8^;=W32U2-M z^V;iei<=N5I%v&*5n`5Da}Qz`-Rv5~tg-$U#4KH{;orQZxC7a?!ohYGI$IWRMcY=` zx`(c)aJtvn`=2QPu|DwD`yjcm*Q?O~wB7Sx*xCOk=-tbIzsLL=YV9v{{^$7*+ucs* z00jSbxF3c4_wxVmH2=mW#uu6YDB7p_?}i86upQ$0Z@+V||M@aM_JKisW2Nw(BiPGC zkJ4nDtWf`pAMWk#4c;An|MGb7Uq|nbU%q*L45Ik2zrkZX^k85XQM>-=(Z@?VZ9EgA zAZ*Hbmio0u=G@N$%eK3QUBN@NyT+K0L-_pkH=|~2wLmphT{J3%Q)saEr~KKU@>=kx zpw2~4xQEkbAiq=N!oe#QzJwIqFK`C)%!$R|BrnED?bxC|K{@qLpy^YnBrZcy-&Su3 z;a6hDh}S8Kx%Y$Gb6)sf2FlQ=`)eb)rf+sL+vWMpy*?|3|xx`3=Md$sT_?4cRM`NwoINY4gr zjjYa)lVu%5P5D*8Ji16mAJLP+Tsn1gbu=2mpPkocGbR-3AfJ6~ArR?Vo!ZAaj&nG3 zK6n5}-@X47<<8P8OuP#NCN)G|47Wf7lBNv8F-TnzkWsI+lYEC>4BK{ZU-uf@JKz6Z zlZCt8|HE#l3ugn^|MxplFWm3$qy0~3|Nj2>i~P_YKNy^?aNQ0Da&t#J`~eJkNy&Gm zTrdQ_n2*v_en)1H?lOB7QiN3JVlhL5+!BUDvcozC;3hfgN4{VzhU+%-I4{g7}baFzC4<3ZIUFQZb`9N={Cz4e>p##%r7VN zbIlYpxi5&sXA~3ooV+~wBl1VFMa?JU0X6_#p&_+OoMzWOsfkV=<3BF{7|RRk0$Z8J zSAf~`8O-pyMxf~9NrIL4MU425x9|rG^j^Bc`coe*N{_53t&tLrIc#p4E zP?`Ep7`DFK7k#HuL!OzA41d4uq!!mu_9s8U z!YIYtzN!gxV@?A=CT^&VDX_6Bw(7wg!~VglNXG+UkOK_%>_KnVnk-frkvbNxGj7S`R?cE@8LA!fXXQp(Q(ZKnsfpq{%4eI z6lOJ7?$4smNgIxJ7^tpLwuTPOH9XVlq73eTmW%GSsI}jEDsbaM4?LN!r7B3*DcO)A z>~Gd_zH!I0q&Q(NuIbLP*4O}2Bt^Scf!x&2R0M>gQhWC=b4q}ZPLt_8yQX=uQVED6 z`0T_eciKwxaeAI&YhG|D`z$UYFUc>`*|`A+1#D0v?m&dJOvWM)X{HY7%lR^%cna1| z3xg_c^V`1R`lS$~;Wba7$YpL9tkO3BTne!t!TApJosEYSL98$4b3DY0Tzq6GI|^x) zHEkMOuu-+?5NHd!aU87OCI~FM|01DNJ35AyFX{1m(8Q2kDjg4V5K4Ts>E9mU@sJ-| zuHveH(HI&L189IY^P01jDa;UutspwVf$d5{t~U3Q zraUG28jp-NF=?8q6tarM-eas`h0%>PB8rSHvd=-VJe1{;|%@_$>xLYD% z6XdGd66H9eeZz>zAl$h7!3n%Rsfhxf|1Dd{|)?j0q`jDki zAOjoz7Dp$p*H+83$A`5>qX4eiD9T3yHn$jbaQy1cUthiX5zh?RmACXm;h~G3vnpy+ zaDu=Ee+JM*h~00IqU`ogn?@s;J|H~{<`kDiN`&b2u7gc!U-+dC0FA^dLYNw@jU*hh zPE!P4#=pzUB$+iSw8Kvld1^bRTD8)a{~Mj!Ce9M1Klr4gm2lu|(~ zKB-ai06N6tHOU%=WL6xnIQEOGR=UtZq@SiYTke@Ju8MeQM`I~lx#8U}-Qps`g+MUN zOQwnihh=$pHp}^u3bM!f@}df(UWEB}7^BI0!<@-=N0zpGWFfsqaX+A&GgIkdK!Tkv zg+x;h5?X}#F!=8I@yiz&>oa)q=I5WEzyAK%h8HtQ%*Itam?**L<~XeR zA=L+Zn8i5sZLxMk5jWIb(-4;1aVm!Ck^~L!$p}^!yk}r~&A`r}r9f6qNC6@&X<~o# zCZcjED2V+{zi3<_=ELk&muUv=X7D)NtZ0ORC;ensEGZi8tSkCh zmtEUz2>%Pj?@WvH#Q^x<07FP!3sI#~pf(N{EvjCxL47&}3W>G|HL<&_wx!;s99kbI zBNn{OSRDKN)$`|nziPMte)Yo-f4}PWYUaK{doYyy|JqQh-TvW+UN87xK^uzlPjS$) z@vVoBdIEV%)4E8mCh6IA9h%K*r@+s{eVRN0(T%4toedU~c$9o~*on8qPJG+g;cUU> z)a|0D9m2l|(c{w`myZ|44^oHZp*v(%W}b;6QE8DYRi;z6q)xaK+K>wHScRfZ3U!=9 zH$paHc|h?8QB7XKOdMnv5kB-4^P#MOd5i?O3Hr;*73RPxxNd>d>}V=(UtwI0AAV@J zw`d{qwP38k^(}Oq7U&0B*kFDSTL+%`UCezGDlEL<&oeGlvv+C^?M2ggb_W02$bD)J z|9*{KQ291*ZPA)i=I$EquPtosuVGXQCNf6`buE~og!FK2xzH)+>$SZZa=59-;fH=-t=shNb#f45_(Hrkeu}73AgZdT4gr zp2w@{Vt_Z(m??XnZE^j9HR;|95C|XZmPq1(obLFk3wfmC78;eb$odfH@&uFIR+uHA z>LzQ6Qnoa;h8E2_zCQ7y4Snw_I)}Paca&||FIJ)AZkLNROV$sQO7YRJz0FYl^+bVo z@Mly4EhkK5f9{)EHS~=El2^X0`@~TOPqyG4Tf7{|TO7|Yxy&F|mfV&BPlgu->M{s_ z0nIIAG{r>)Yd_o;6|p!!t8O>I$$nTiuGt)!)Lo9sLj|58lz_LIrB}Zb+Ex1lZD@tk zDm6I$NWG{s)WdQGJip%XNk1jymQC+6g{c2VhUmXce@HowDfD+j5m;-K5f5up@L=Wi zIk~2UY?$Yjl>b}oCzy~?-;)6oBr`f=g;Cyb3ccRDz56w-(x+Wa!%%CQ5S3LBFxeR` z>>@UZHTN>COh;sb!Z)M(K}i(}jJQaJvUTyOQPztCT%*MaDAm;*wq%zn(hctJl6Kba z^j=xS-1>Ys{jw7lW|75ELKuvbKbWSJ-L8c1ctqj9B-iWA#jI6$a-(BB+w@jy9975MOgw49xUuU$N9SaRN)3LQj_sNh=~ewJ-EmL~!U++n3C zugf|%IHlTbZMosM(y2Z$();4AiH|jSkBe6Q>eFIWRo^)*R-Y9Qyu%z`uNx?oC?Bd( zLe6-v5{ll=7jN7Xn{uxOSy+5axLfVrCdp_BWbc-^*J%~-z@>!l8_V#62$P=JqbNBK zt6u3nHDQFq!H0aVUSTNp9D~B-l!9ehKLPmp&J~STuNM8o-Vtv%M>7V$5H`!r0vs;6sx%Ad5<*klhErQXA>|W)OUih_zcKU! zJ(<-+;hlZ$tJx2IoXy67YVHovpq(+}*?2znuUqYdsR|X(vxbzd?zdP(+*IIp+J|;mJDh;%VnO*h0>LR7M5@n-`vW21T4*J#AIuXVO|Edh6auQjieRamX zK?k=nWs5DQY;i?As=YOdmGGPPM)!U<8QWd+<^q1UlC8qcP;dhbBwNgGg0tJW<0-k> zb}46LfBjq}>*vBgE!<#G(lW#h3|+?DVMng5gIz5BiUn6bD<)uTXJ3jH)a(AP>)MA$ z6O6UH!tBZuY+yqW6mM3IM70g~2@}S$D+m)z<1V15bogE?JF~mLx8;A1JCY&^yctc- z&M(q`T-4TeypLFlI^CYo0#|G5xn)t_9rQ9of12%YT|`B>19jYXS3|tG&;bw`ife za|`ka72ddoX!{llaP;}`#tnO~-++?Ri>m;^K`EbMXmFQFzC$@2e@F+MrEiXB=OUkPb`?5|F?`bKn*Lvz% zh~E-Nh0S%s&hHz}FDH5TpnzN9vhC^_vC4+kvu%})RN0Pi7+B>N#zD;pk2Z`_pejVI zUYYA~8L@asfLhTvw`(Ba*a-@RP=e6g6okK9nn4qwPytc`nuFjH>2@pZRtdV>ND`$T z5TA?J(4SK%+N98$?)=xHWvtU^z!D=%3~0gaJPt*e6a3vN zmB@!wY)Hk1rGxVek@1!U8_T(hqe{k86rf|tX@;_ip``j@$2ak7Hr#BZ4L93p!_97E z!`s=UU#d*B!Y1P2=V0qVF@qDr^0; zv9~)W2WTsIC+HNzqCmtvHtws@xCOM8HLkJu1<+SciAq}=ir7O1BAX`DA#}j=P7V2{ zd=0IMMywI}gbIt?ygkEX!d!_etD>`;p|67c%5kViAm?8y*ozV=Tbt{H&yD)GURh?*?zl-Q^Aft z@5%_N3BAgcW}>Z%|TUr15jak!!fh$>>N?sVt%{kfVdH^;3Uj^JFMH(-!A#9?KR zm6W*|E-^yl zdUkfx9;glLq_G{chBs;dwBZ|4Y0Q&t$0Z`0jLf)TB?=)usiV8XLK!sScF+`YhFp_VjsQox~a6t^<QN0jsq|2Hris6tI;H?-55!-oeD!0w@|c6 zp-!nWnp3b_w7rrTx#8Gc23@_DT^Qz{)>5F-&6QAzQp$Ou!ww9)e}Ay!(m{~!t+Il% z>WV72*vm%z%2o>7y-rze?VQG*Dbu9Ed#M+70p}tZ0DMCnfSB-@$3|6)<3QJl(lv6U z;b_2&K3Lg{@b73TtJ`9WSx(S)+px34h&cA!;5UQW3P1NFZ)kB*)6d4pX6P<%T_`dXoY4;4!^<_KYyBpp*?w!`*0+$-e9&#P zx9$ky-3eUs4DjaN`=jqGjslv*^JZ5dO?ipps`x*b z%KqDdQ7Qg^E)Pz>WiL8TdF^pJJHx;Kj(^MM?z0`)JStmCRyn+wY>VaT45TzV$+jag zLzJit0Ub|B5VvT@4X|?Ptwez0rm z5EV;}xYRzq@4@eGzw0L#EwN)%>J!Tw^R$$#1$P)r?$D$_UL4hgz0*7$|{WXlnNur=3bHQUIUJBt5@ae&sY-s`+h&**L(@jc zC&;j{rBSdRj%~@Vr7%X_NAP_Epf~SDwQ7+!^hR91_ayidXik-g>YcBZiojbMmQ_qMg7JN)%^`^)WAs`)8D#? zKw31BHbA1kWAQf;f6q4U>CBLcPPig9aK6o0{~dl&o;ch+;Kbe6YtR79&p3wHEKqL8 zKbT>{?_R%GF}a4U8p+Up1GQ@sM0_W!%I=ML%6jOU7GR#AZ8%S4-r>nD-x`gV{yyG5-FEi%RcGtYZ76 zhL@kqv`t%Xze|TCjVTap>;bUoySp z9nk8)vk7*#nIX>>RJoIQI=6PCXvfYs?(D+oti5w*rIU6S+{JEO3Xh!)C#&aKO})x7 z+{xDLv~KUto!yv1Z{586`fA^xuL8QinqX5m82guH* z=b-}VyNfgGAhw(MvVO>wqRTd7O9hHbDD18l<)`98>00#DEr`s~&LVSJZ(B>v(Jr*? z>2^~}Q-X1c;Ox%dZj`qkz_qgTmUJjYWUu&aALCt${CFpn=nB!@^(CVUnZ+(yAxiJE zwi@d$Ny72~?*5CHXno0U`55$l&VnYPuf4P|5c>69TC72+ z|I*?PFdS82cE9929|#|h>+yomFQV0jTQ zgYi7cF)1P?OjW?lR+EW8L7dfFdG6ecaWc70g2Co2adFh* z34FBNgyWMM)YlMFG2bA^XO)6RRkT zgZ(ggIZKx*)V-#3o6C!Y_cPjE{k!Pw%8D!Ot#UQZ#wL_{SCBSOxz`D&35mY4HC7FOUPL0+i5W3zdT~D)RHlLA z)%^_rVB$VrU!Rjz?pf$gsb#v1XP!mx6uU^WY2sRq7H{+zYOM=kkm9e0!;vWcD&0KrAM=hFL z+4lC^{w*xuB;8)VF5ezQgk{R%L5>3rnR(UC)Q@>p2C|BJe_f&p9Xb3ZWbmeIfk9NhYFAqBd=EG zlg2eTo6mz`oCP(`Jy@C+Sr=6L;mEqOF1Gvk?C&$Efq$s^Zg0P`d8@p-8>UKSyUE%c@2a4{6@wue!vF zFcewwKvXysf1g?vf3r**CKHTxMdNhynKyMTFT8Akf784!MqW@4@Lol=9#J*Rj$w3> zj6SmI!e5U5cC0KG;21$SL3D2z=lJ*0rRBuu%p6$jSaBsOs;zfjw$!lcE{X=}?98WXeemFc`ut`xPO{(~ zS)z!oAt%`5be4=^^Pk|Y4xY|nV~Ce|4WWJc`iD1!0IdpQj0d9__;JfE8Fu@f_S3K#_Mk?)d)N&3`%%<8>_Q{Z_`zYf86NfzL-+$7 zMD57mTGZ}GJ?JS4`}^HqGm83QFAO`)sM9~(e+mz}{jeJyg1{2>`knBw8SVE0vW~y8 zgMQTR?T5|iu;1&p0hH)zzjM%e+HQ8*FiL=_6XIwO!e%Fe<~sWa%?^xb-w#{|I^TcV z?DU{nr~!CuhrM321LJE0u$@BywtaBe>^y~`KRxU-4+uqzfo$_Imw;UbD9k$USKG4ghtLx3S(~AKvr;TTh$&?LJKBe%SByn)^|IKWgrG z@a=vVMj1Bu0UP_xeV7J6()OVjcz=kX>_6>yyUl|();NIS;o2>2QaNbv49F}24}R}JcKq6nujp#cJna8!5_k~;Yk+6o@EmOtnNK(rd+&r?Dau0;G3;tay_>H~DJ;12xl;S$8sJt_vR;vZP$FdMi6L+lLJ z8*;m_e+XT|>hiBjSm9J`pME2k1?ED(L;8(I>ES-T-KPc*= zaPr{WApH5e2Zk&*naAT`wFr7Ye}|IBI9|p8Ey!Up3RpTb0zQXxXsz{+c{;0~#Q7*q z2Oz*>Y^c-VQBd!-=_h#dge=*u0%iseb*9B8t1`*sd-o_nwt0B1y8tMbgJd?EkNpW? zO}Xf&^!y@NWPllz&H~v;K#fSulYWq2%vX~!;2~LPx{}TKMKDa73)4DX zzyN`tJ>3UcBs^$6?L0l$k4mx)gd*84)!WpSPgU6a=3noJ?7AOEt+3tdJi&?2>GuG{ z>KRAC1p=5)T8r!5`#@sd?{?|$!G4eawj=)DZg;v-ulo(`dEJBk1CT4DZ$O?s=ytvd z+Ft?zD^yko!8h@2TscH2c6PRAknp4a{y~1uviUR^49-@-x{|>lNT&-pU6LA@m}WAC z#kwb+&U5*RZ_?#AA|m+(my3R}w?{qTt$2+5Lw4|<9`H-5-qI8&%jEfF5Pyo(Nel;8 zE{dhjuxnMOOk=2&X5X)-)9V+iaaU`WkFu0*^ajAfr*KY&Q=L=g&)+`K%y9r4T)G?# z1S5R(=;I~qBDtx;h8Hb<3XSmx`{g3V?Q))@P6|HP8>D za$kM)B*0W9SH>z9Pm6aYMs)(A?+o4;SfM8vMDZDHguiNK3j&H4L^Q?D$o4AXa39#P zyQ6cy+l3!JDu8L4j?%7;fI3^C2?0>+QijG}gOXoSJ5_rY)KDLW7x3w4{`R?6zzdOs zn$Y4@R={nnmp(N|?dYfV+MuWlg#B~|%X&6SjLw?w8Y$*2Qm+(Yd#2t&pNi3B%!`XZ zFB*(7hMSCS6m5Y4X!73xcGtncp`VT5bZO8qtX>Pv+5#SfC3g||ES5^!b&_``D-hK| z!upiXSNY@`9!wGtWXEQ`yO=8~AUdm$!rdpa@0r9bS!4sD5X=YTA)Q4A%h6)9WqUEt zm$*{NHk`h|>cw3K9`1Jzo}wKM`Z|O+U&d=fdK8$UJM|hP$rYSKmURBA+5Gj-#tw#~ z>8o)yaZr64(s4+6>=|czJ>Nei9GKQt9_a?k@oM z_wy-?<#;~&m@KzMc6s$b<2d^`$ZfH0Ui-o1eFXXGj)VgYYmb84e6@tb{cY~~C4jFJ zJ2r`Vv!gVy?nmN5{N1kdy{K_7YOE18oTK$%h$|!qzPU^%13`2y@DKiqS5oi-HAg08 z2UMHXPogS}5DJXp724|sBs)CVPLK&TGibNM4TKhbyg|#afORG<{8hg@iN**7mPiHC zMf@oVE@O0u3S!Lr6T?vv#vqa6PLTm8reh-LzwKiUJ!zHm70>2?=FFT_DM}UQ&`@5j znNDgu5N(^wk9_dCd94UzO&W0(Util!d9>-G&?-wLZPcYM>(1m6e)CC^Eb0~|c|}3> zmX!l_ZnDpR8ZDX~gPy8rZPO*X$^737%+<$uk}WNvS&NqeNIdP^6js}JFRbpavREfb z+M)!Bg*B3Wt>-vO{p41JPa%7@cauHs&VG6K>Mff_lo+7FhZy$bd<(p`P`Ni)Ws`c1 zUO#*CMAK7&n2G3J&3=mQ z)C~V-7ykem`V9^Uh52eoSQrRvY&aBT8H{qE)KyiJ;#X&GC%+n+F>Je8*|d9eGn%BZ zt+19b$z5fV3wx#jXKynyp58)Y%v!D?$S%ZG*~;%POO_vX;8?{6syjaP;4u_t;Ym)N zZM}@kkAFKaYjrmxK>CVAPJt{_;{4Y^+?SIv9hxJ-DExyUF#MfGAscrn1x0@>1V&$p zDB~bkNd0GCw#2lV>0qWCu{;I514ms2GQiH@@e1xufHR<883f(o^^$Ux*>OZ2UiZhK8|u zkEbYsdaA>=jy25-=U8G+AZVEIJ^MI-+M=QL_ukaJ@5f56~+H#RyGf`GB|FHLfNv&jckf zPJ=jj_2&8aFJJ%2Q`2N0Up=X6s`RzB>Jn*Z2LFQZ0Hjw85lxJkRxbdh^XfKI@ET99 zlF@33xTM%G_1gDG-~I9<8t@#wd-vuYevo51i$5j-P<|etC$$0%WPkW+C+@mP7s(`@ zC4+Q21q{XpYN~0##dI!bndXC;)mzbc&TjAm_d#q}H&IuC>U?tD3??by{jbm8zxau` ziQfeY;ig%T1K7mcWes&;Q!R~$wWP9*?)A2)iU1gzW%!#Vvj?H_?*(5&Sp3Z0 z$7O!I8OFUDP#fY()6#Ps$#4T+Mdw+1h2YlpuB^7;q!zZh8* zVb!4^{9(QUc90_~LLrDTi-`EZj~O5JPPez;2>v}-)zRJ)liK~eeiemQ$Oh5IV{U`u zH`vYUeIp)Sa7<&|H;Of@JTg>OHBkk9l{=We(V-hiBh8n0i7Ed(^O4@rh~ZhJz5717wj~ zXJL1uH5rfSeyKhb)?)o1;z^$Pq6kyKTWW}M9%E%X!M2vTUzLSqRuvwH7M#ilX-!xD z6sssDCKOJJoaf4r5pIGFd>(Cyws2Ftq2h=-SX+UcEyc=ajX1rA8}%9)z1nCty+^mA zHSPW9iTJ$@-5nay!I>cJ_}~sHd18*;#vq=ydbcr(g*}X?z4nF!kC- z2U7#Ie!CTI=2Ka^LW;>wJ#Lka;M)MYDqTJWz1cFk!muo}_&mvc_Dy(Ghnv6);uO$-!m-J;3M?n>RX+UXsQ^m=~NWN^^! z*j2J4Z6I=2Gpr3KPuh&vjd0Ulb&F9{E_Mwkfj67OUpPZ%gZbqQV7@JIH{xFp16VOD zN;eB8^+pq6hwv%*HzkSGTv}VI)r9ZsE=SKrJ)ibx+6Pepzdg)mkMz8h9C)dA@hVPV4u> zPwP%GblaT8Pg2`1DT6r5fF@Y!ga*7u^m%J+W}7L#!PvH(r#t1W+YI>@nWs6^=1G@N zzjb-)^&WRiPRK9t+c99X%0y7NWP1>lU@zk%t{Abd+$Z1)0X41{2`1P6NU{w9xp{*; zs&#G3u{$naq?(?<-p70|KKBi3nO!^bN!A)*WDlq~5CEd;zc5=a;0>Q_=Lb|}(nx2x zF|Y04cYGG3yXnw~m~Ullw=^((%Qr}vltkEHsL?fp>3sGvxnAHswK3HHBzl=9AZ3iL zAT(3r;>mfMFQJ*yBp1pb*Gz&GI6AG3?gSTiyj*5=F?mfi^yTIT94Qv_+gaT-%4gt~ z2**AQ*jNS8$fDO9&c$k`>}BqL7U*-iz@~fMaEoQ`CW?N0k*P{QImS%T<{|^$~ z0mc6g+ucs*pxvSS|8DPI|NqtA|65_d%Z&ks$_{``0)#z)DYHo1+!9Hp!aS3NEz=+p zl`iu`A0^J#-Y$q6&jWo8mU4A z=0=DdT#}upH{m!Zcxmt54vM)W>r_kNW28ZRFIneau_}dNtv4#BEroMtj21rOHq&#u z4UPt9AiS>p$>zTfT!lUl z8n23Y21EH2Ptvh=n~=UZ!cm7bY5{D^?D?k*3<7D$<_1Bfbe8ioal%W+BIS^`f4Le_ zi*qx%A~d*B)M!oE&dAreaU@`L7s4#3(B={^UwC>h=NW1=0}|^?IWUnc%eS9D!@la{$bJYv zD+sA(Ig4PIDL;??Jy&Ff%Z*7?*ne`hmXUgpB#uD!#b?1El6E+;;2PrD_{@OA{G26I)YQlgXqa#| zU|?(GIWBq(_5eH7HRoss9Hh%?a>Evp-YmkqL-M)_eMLe2zuz7G91QP?YbgZBd^H*+ zWlV`Bw$OY;n`02|?Zl>(=!l1xio*((GcEq9z<@JxyJDuL3-FXF=}0wKB`OT9LQ$mViTyAg(_%d^;+U{&2=X)pg&O`xm5RUKrbr@aE)*e28e_To+ z8=H-AGnAK;C$AItU}_#lQ8{DWp=^#e`RF3f2iXe!K~GNYJPXrnOUFVAn`c`y2xsx9 zGt%LaN{<)#&itgvDhl#7s_S5Ik4aU98&<2;3Q)BQ2Zsmo13WBfvM#vVJI$TVvk`@W zp)fHZWW$z%m}3=~dFr4pr~~R4$~}l4SgoaJL@fgO#r2f6u!$lzjTvjjiv`~5*HUU{ z2UW>P9`PBGc+khM0ma^Efvik3u*0Mdq?28_XBp}Oc5#1% zdbdp)&}2Djz{@X88b%rqNgf_b_)|`#;Lht>(x|DK5B_AO72lL9C3rr{;mJ{ybMHO( zuj~J}SPk>l@OC@^*W~~1g(ClVc+d^oA^HEeI|ujo|9=SozcO2ZqBEeA=B>(b0ORvX ze{N|(u(wSl0o>0p4IC!*9*Ag1b*5=6w;hOD=d(GC9A2O)(J+yvbFsTHbDzyGJzg== z@1#%f|6P8kcb6CGB%$Yjme2J*A^Jdhn%5bcr%Etwk+c-N(l>gu19?{V1X3`HwrFGI zvxSr8^nHq%xUFq$(SA&bR@kbHu#cJ_H6%2pB5!q|fJu{LFg7v&)iTLul#x`RSu~>q zxz?~|?HSo(X~Tk=s0uH{Cko#uKDQ_k0qm;i6n_XCAsiy?fUPwF3Dg&#+&V^C1QrV7 zpF+!kVB<1^hoysT~QDu zQ*`WVsS;-ePvklxFG*Sx6MKqgZUcLZ4bivQ3V(}j5V+U`gO3P-kJjPuQ4x7U=p!Zw zZAeAA>XH!q4CK@r2%7lWHtbprkLiCd{EG%vn|i5(2d@CVAdG>Dlf|MLNUY*^pg&8r zY!(Jlqp?YPedD}>?tge@NoVxN@46={(}}`%N)oCKrK*S(vhE<$D1fd1;-aahKx!h93$# zwNx#fvRDKH?{@{M!eyO=L=gXP&7yIaEE*LV6q3pxm1Tv))5Ge zh=VQtzHT)jGBE{)yvv%}K+SGWvYavZxltcvSbz?2GmWoFpv{uU#+EdfXv_&MSMg{$ zx#n=?@iYNJaBPGue~5}^Xo_-1L)Gz|9l$BY1vHe5KGMyvzQcsV=g=p#H$_#mE6(Sv zuD9k0;b0SC#f_Z#k(H(9*fUWE3Ssa5=PDVfVWNB6(i43GI#cHX@-mmbyCkFz2qqb+ zs$NnOZ%9(Y5#PL}InwLH>x-G6XcP19Cbj*M;TJknwnG)60i z>>mB(LH_(i2bdwp~+0Hkk zdUMMbudzA{4rh~e!REo}85;~^!%we#nS2q-Eg?FByPD880qUz8_C4AS4-<;3_=b78vZP(2J0p!@HqZKR-ZZK>3KQj*Z7c{N~~Sn{Tc+9V51H zpx@P=?oK(O&GSmQkYwKq9x25Eb8fO&yECV}jhPC|Ig9Xy^`H0Ga{(QUS8eKYgXWMJ zlHGi%L$@Iyl*vJ5ph|)0OP#tm099NnzzlgV7Cdk+I4uQ3QhrxqL{ny&oz7NnIQhCU z?=nb7(=B0YJTF=TQq%Nde9kI^gd({h$a4W%gX*`8Ic&oyh* zZFY;LCFU`mjnhx*7!Le9(8B#}-~MdtTf^dnS6yV5x*;2vgImx_LCycOq=|wTcb+U) zivfp-5ER2-46-3krH5t*rV!5ufWGlWMFtU1%NvpVv7`E24MFzAKq|6XeV)E4Z_`>Z zt=RO38KNc;4i!X*ZnITN9?oa8x5=beWfm8On!z|76$72CR-K)^Cc#iKtsLk!L8;h{ zz5q(i#b2j&ZO9xU3W5TbCVvx?y~~tDg((Tw%!zmu?K*W~$=nr7MzZ9d$H>f%Be30U zH!JEc)>PIP{Px70?v)T@$U0jFyrfLDL76c4H^l*>l6x9IU%6FM1)TIvwj}%7ks<89 z<=edRrVCzNAKe)O7v{y#uSh!20v{TG`^xaRb~$Wi)N3yt*Cj(3-`*`PYsLBorWbB4 z6!(4=pG)S?yJ#LgQtI{c=~Z?_ztch~>%U~4?t00H3&Z|e6OS8Ct-Cfw$J;N@-)H_7 z#R=SMNtW$y;%~zv7(ZVv=iev3OhymhUnF@#bdm>{NH8Gtns}LTmQuWFOq8Xzh+;sx z%?C_n+fU(^bb?9-E*lR=PT50IHjFwZ)!TY4qV7!%|H{p=yas@NBfer%Ion9kP4cb{ z``lFSU(MO&cg}dVtyzns0`PHo8m+o#-AviY7_wG#nN{63KPD))zTQ$i+!n~?rhvNx z-M*=LPw9*;obf9*xAXWHw>ErzsI%s5QJAz_0QweZO@ZAmTW4~Iyo2M2pNRFQY||Be zL(X^u)a@qumeTsoTH&$y*UGJ9eeK%0OSCV(I=t`Rgp?F)EdFjsdJuX|)U1_!Z)ch} zly<9D;7w=LT|(l59|dA5t}444zJSF`$g#4-? zXNeWwkib`4KS{EYKgt|wcz;)ojk#36`USGM{S~r9-3l(Z-9i&}?-$zL&b0*i_NKa^ zIiURS-_I{+9(Ni4$;=mM!A>V%rzG|@mBk))7@WucsbQ;8av<+t6WRfV0Ea$V;bO=%m2e8-^Ixl|xD0u$v z$Nz%z*tE%8k0nu8=jV=B%lRr#3_*t!ZWwA?=9}>6(x!KVvk3;9ECf~o*vd0-$9A|t z4AJ)}=-^JaU(};hQw}1VK`0{qbQ)Z(*nNHxZwvvHrHw$0X)=$;ZkOADOI=^Y8$u~M z7|iC}b27dwxU*zB|HQ!w1wj(|(tcUXIpKZz3|DN9dO@^a&zbIgyJRVLToXb)93DB)BNb zYkFf!ksuH%oP08C;X~ImS*8WRxx%`b)c`{g29Lj$#D!xA`53rWc3rSu5Y3VOb)TX% zH3PKK?4NW_8{W0$g`fn3W?5g9MFIF(nrZNN0kL9gFrrjNT(G;D5Q;h4w;~Yk`GdGd z&Wl3eyvpG>1HNhLQ$)U!e3&u_Dur~qj48ad3<}M*+vM6f)-h8}Yk#JpOUf>bOpzQV z(IGJogmZrRl-&-AoE#s$r*N&9&=a<1=2jw(-26=TCYfm=qFiYT1`ZY24E9gWbMIo6 zofnyNW-@2tEczHj{+Za_v*Z0L+7b;hK?WaZ^GlFnFk(5e&3vx2-{Rby8@^7sSqQwd zhfhho=Te4wib;E-~{;%Ht{rT?? zfBut1Bumwg#e-%bKWQb>+xdKrGVbRXC4$%U#SwItTrI7T zWeP@hP?V72f$;?!PZaFh2O!F%%fUcMF(csg09i9g3&tVh74UuW$$p)pg>E`FtxmZ2 z!ITEe2hEa34ehQB*?Be?qtcKn6%1>vy69_~j*`JFo+i)iX1VL%JV51$YSR$tsB z)2eOp#Ye3;qv2b9n%u2*yv)+klJpvgPQdwiU_4`+LdC0mON5tep{CN;`MTO$3yIIZ5hHhNXg<;~L$0Zs{h)5~76PII1eFQh0>q=7 z@@%9QyDEeC>guXi34q!th#J+l=s@%^c)OCC1|=d8<^!x}(xS0YDL{)oL5;h`G{Z_H z!0|IL9iaED8%WW&;*TfoQ`+DA4jGvU6nLAavjK5eg~7aTld8f~zm#0zaOY{KAIgDv zUfwo=IB2#7iWBc9qVpbni4`O8J+Gus7i_gPo|@U=?*?r}So3XR&3^}2RWTtUmVxUo z7Pp?!Jm~_2I>?sfET^_#Sx&0at2Vmb2nh*q6q_EorYH&h*7Ua)-lVnDoyMyGsp|n@ z*R&TYpy-J}&BK5TqL%s1_iz6C+Ji9QL}gTbQvgfA2@#y*GH}AvGD5u-`X~-pdvLFV z#PElINKzvqwlI)Qwxv6r`U0CcgPp%@)u|JP0>-Fho@U0w{!G5Wp;?~f5b|#NT$Jm%Ehn3MlviUHb-DDmM zdkIbAt?eaSx5ku!3(OGzY|WX4UNvGq<;<7ya*nHrmMDEqBp0%12DM{#QM^G(p$uN6 zjznraMP|2q0}pY)6LfInkef